content
stringlengths 10
4.9M
|
---|
// Flatten a quadratic statement, writing explicitly the generators
// (and optionally the witnesses) in simple vectors.
FlatQuadStmt::FlatQuadStmt(const PedersenContext& ped,
const QuadConstraint& cnstr, const PtxtVec& xes, const PtxtVec& ys) {
size_t n = cnstr.indexes.size();
size_t n2 = xes.size();
if (n<1 || n > 1UL<<20) {
throw std::runtime_error("FlatQuadStmt: proof size must be between 1 and 2^20");
}
if (ys.size() != n2 || (n2>0 && n2 != n)) {
throw std::runtime_error("FlatQuadStmt: witnesses,constraints must bf of unequal size, found "
+std::to_string(n)+","+std::to_string(n2)+","+std::to_string(ys.size()));
}
gs.reserve(n);
hs.reserve(n);
auto it1 = xes.begin();
auto it2 = ys.begin();
for (auto idx : cnstr.indexes) {
gs.push_back(ped.getG(idx));
hs.push_back(ped.getH(idx));
if (n2>0) {
if (it1->first != idx || it2->first != idx)
throw std::runtime_error("FlatQuadStmt: witness and constraint keys not equal");
wG.push_back(it1->second);
wH.push_back(it2->second);
++it1; ++it2;
}
}
equalsTo = cnstr.equalsTo;
#ifdef DEBUGGING
if (n2>0)
assert(equalsTo==CRV25519::innerProduct(wG.data(), wH.data(), n2));
#endif
} |
def simplify(self):
for component in list(nx.connected_components(self.G)):
counted = [s for s in component if self.counts[s]]
keep = set(counted)
for s0,s1 in combinations(counted,2):
for path in nx.all_shortest_paths(self.G,s0,s1):
keep.update(path)
for s in [s for s in component if s not in keep]:
self.G.remove_node(s) |
#include "PowerUpState.h"
#include <memory>
#include "Core/src/Exception.h"
#include "Communication/GeneralParams.h"
#include "ConfigParams.h"
#include "ClusterManager.h"
#include "ProcessingState.h"
using namespace std;
void PowerUpState::HandleState(StateContext &stateContext, CommandType commandType, const GeneralParams ¶ms)
{
switch(commandType)
{
case CommandType::Init:
{
HandleInit(params);
stateContext.SetState(unique_ptr<State>(new ProcessingState()));
break;
}
case CommandType::Terminate:
{
State::HandleTerminate();
break;
}
case CommandType::GetTopK:
case CommandType::Index:
defualt:
{
throw core::Exception(SOURCE, "Unauthorized command was received - %s", commandType.ToString().c_str());
}
}
}
void PowerUpState::HandleInit(const GeneralParams& params)
{
ConfigParams::Instance().Load(params);
ClusterManager::Instace().HandleInit();
}
|
package search.binary;
class AllocateBooks {
public static void main(String[] args) {
}
}
|
def construct_circuit(self, mode='circuit', register=None):
dim = (self._num_qubits + 1)//2
if mode == 'vector':
state = np.zeros((2**(2*dim - 1)), dtype=np.complex)
for i in range(2**(dim-1)):
j1 = i << dim
j2 = i
state[j1 + j2] = 1
state[j1 + j2 + (1 << (dim-1))] = 1j
return state/np.linalg.norm(state)
elif mode == 'circuit':
if register is None:
register = QuantumRegister(2*dim - 1, name='q')
quantum_circuit = QuantumCircuit(register)
quantum_circuit.h(list(range(dim - 1, 2*dim - 1)))
quantum_circuit.s(dim - 1)
quantum_circuit.cx(list(range(dim, 2*dim - 1)), list(range(dim - 1)))
return quantum_circuit
else:
raise AquaError('Mode should be either "vector" or "circuit"') |
import React from "react";
import { Job } from "../types";
type Props = {
item: Job;
};
const JobItem = (props: Props) => {
const { item } = props;
return (
<div style={{ marginBottom: "60px" }}>
<p className="about-text">{item.employer}</p>
{item.department && (
<>
{" "}
<span>
<strong>{item.department}</strong>
</span>
{` - `}
</>
)}
<span>
<strong>{item.position}</strong>
</span>
<br />
<br />
<p>{item.details}</p>
</div>
);
};
export default JobItem;
|
<reponame>zzApotheosis/Personal-Projects
#include <xc.h>
#include "LCD.h"
//Functions below must be defined for the interface
void LCDWriteByte(char c, char rs);
char LCDReadByte(char rs);
void LCDInitPort(void);
void LCDWrite8(char c, char rs); //Only needed if 4 bit mode is supported
//Private LCD Functions
void LCDCommand(unsigned char command);
void LCDInitCommand(unsigned char command);
void LCDWriteData(char c);
char LCDBusy(void);
char LCDReadData(void);
char LCDGetAC(void);
//Utility functions
unsigned char calculateBase(char line);
void wait(void);
void longDelay(void);
void shortDelay(void);
void LCDInit(void) {
char functionSet = 0b00111000;
LCDInitPort();
__delay_ms(10);
#if LCD_DATA_WIDTH == 4
LCDWrite8(functionSet, 0);
__delay_us(40);
LCDWrite8(functionSet, 0);
__delay_us(40);
functionSet = 0b00101000;
LCDWrite8(functionSet, 0);
__delay_us(40);
#endif
LCDInitCommand(functionSet); //Function set
__delay_us(40);
LCDInitCommand(0b00001100); //Display on, cursor and blink off
__delay_us(40);
LCDInitCommand(0b00000001); //Clear
__delay_us(1700);
LCDInitCommand(0b00000110); //Entry mode increment, no shift
__delay_us(40);
}
void LCDClear(void) {
LCDCommand(0b00000001);
longDelay();
}
void LCDPutStr(char *str) {
while (*str) {
LCDWriteData(*str);
++str;
}
}
void LCDWriteLine(const char *str, char line) {
if (line >= LCD_ROWS) {
return;
}
LCDSetPos(line, 0);
while (*str) {
if (*str == '\n') {
++line;
if (line == LCD_ROWS) {
line = 0;
}
}
if (*str == '\r' || *str == '\n') {
LCDSetPos(line, 0);
} else {
LCDWriteData(*str);
}
++str;
}
}
void LCDClearLine(char line) {
char c = 0;
if (line >= LCD_ROWS) {
return;
}
LCDSetPos(line, 0);
while (c < LCD_CHARS) {
LCDWriteData(' ');
++c;
}
}
void LCDPutChar(char c) {
LCDWriteData(c);
}
void LCDSetPos(int row, int col) {
int pos;
if (row < 0 || col < 0 || row >= LCD_ROWS || col >= LCD_CHARS) {
return;
}
pos = calculateBase(row) + col;
LCDCommand(128 + pos);
}
void LCDGetPos(int *row, int *col) {
char ac;
ac = LCDGetAC();
*row = (ac / 64);
if (LCD_ROWS > 2 && ac % 64 >= 20) {
++(*row);
}
*col = ac % 64;
if (LCD_ROWS > 2) {
*col = *col % 20;
}
}
void LCDLoadCustomChar(const char *pixels, char pattern) {
int rows = 8;
char ac;
if (pattern < 8) {
ac = LCDGetAC();
LCDCommand(64 + (pattern * 8));
while (rows > 0) {
LCDWriteData(*pixels);
++pixels;
--rows;
}
LCDCommand(128 + ac);
}
}
void LCDReadLine(char *str, char line) {
int i;
if (line >= LCD_ROWS) {
str[0] = '\0';
return;
}
LCDSetPos(line, 0);
for (i = 0; i < LCD_CHARS; ++i) {
str[i] = LCDReadData();
}
str[LCD_CHARS] = '\0';
}
char LCDGetChar(void) {
char c;
c = LCDReadData();
return c;
}
void LCDScroll(signed char dir) {
char str[LCD_CHARS + 1];
char ac;
signed char i;
ac = LCDGetAC();
if (dir == LCD_SCROLL_UP) {
for (i = 1; i < LCD_ROWS; ++i) {
LCDReadLine(str, i);
LCDWriteLine(str, i - 1);
}
LCDClearLine(LCD_ROWS - 1);
} else {
for (i = LCD_ROWS - 2; i >= 0; --i) {
LCDReadLine(str, i);
LCDWriteLine(str, i + 1);
}
LCDClearLine(0);
}
LCDCommand(128 + ac);
}
void LCDDisplay(char enableDisplay, char enableCursor, char blink) {
unsigned char command = 0b00001000;
if (enableDisplay) {
command |= 0b00000100;
}
if (enableCursor) {
command |= 0b00000010;
}
if (blink) {
command |= 0b00000001;
}
LCDCommand(command);
}
void LCDCommand(unsigned char command) {
wait();
LCDWriteByte(command, 0);
shortDelay();
}
void LCDInitCommand(unsigned char command) {
LCDWriteByte(command, 0);
}
void LCDWriteData(char c) {
wait();
LCDWriteByte(c, 1);
shortDelay();
}
char LCDBusy(void) {
char b;
b = LCDReadByte(0);
return (b & 0b10000000) != 0;
}
char LCDReadData(void) {
char c;
wait();
c = LCDReadByte(1);
shortDelay();
return c;
}
char LCDGetAC(void) {
char b;
wait();
b = LCDReadByte(0);
shortDelay();
return b & 0b01111111;
}
unsigned char calculateBase(char line) {
char base;
if (line % 2 == 0) {
base = line * 10;
} else {
base = 54 + line * 10;
}
return base;
}
void wait(void) {
#ifdef LCD_POLLING
while (LCDBusy());
#endif
}
void longDelay(void) {
#ifndef LCD_POLLING
__delay_us(1660);
#endif
}
void shortDelay(void) {
#ifndef LCD_POLLING
__delay_us(40);
#endif
}
#if defined LCD_MODE_EXP18
#define PORT_A 0x12
#define PORT_B 0x13
void WriteRegister(char, char);
void LCDWriteByte(char c, char rs) {
unsigned char comFlags = 0;
if (rs) {
comFlags = 0b10000000;
}
WriteRegister(PORT_A, comFlags);
WriteRegister(PORT_B, c);
WriteRegister(PORT_A, comFlags | 0b01000000);
WriteRegister(PORT_A, comFlags);
}
char LCDReadByte(char rs) {
return 0;
}
#if defined (__18F47J53)
#define SSPxCON1 SSP2CON1
#define SSPxSTATbits SSP2STATbits
#define SSPxIF SSP2IF
#define SSPxBUF SSP2BUF
#else
#define SSPxCON1 SSPCON1
#define SSPxSTATbits SSPSTATbits
#define SSPxIF SSPIF
#define SSPxBUF SSPBUF
#endif
void LCDInitPort(void) {
TRISAbits.TRISA2 = 0;
LATAbits.LATA2 = 1;
#if defined (__18F47J53)
EECON2 = 0x55;
EECON2 = 0xAA;
PPSCONbits.IOLOCK = 0;
RPOR2 = 10;
RPOR4 = 11;
EECON2 = 0x55;
EECON2 = 0xAA;
PPSCONbits.IOLOCK = 1;
TRISBbits.TRISB1 = 0;
TRISAbits.TRISA5 = 0;
LATAbits.LATA5 = 0;
LATBbits.LATB1 = 0;
#else
TRISCbits.TRISC3 = 0;
TRISCbits.TRISC5 = 0;
#endif
SSPxCON1 = 0x21;
SSPxSTATbits.CKE = 1;
SSPxIF = 0;
WriteRegister(0, 0); //Set port A to outputs
WriteRegister(1, 0); //Set port B to outputs
WriteRegister(PORT_A, 0); //Clear port A (E and RS pins)
}
//*****************************************************************
// Write to MCP23S17 register
//*****************************************************************
void WriteRegister(char reg, char b) {
LATAbits.LATA2 = 0;
SSPxBUF = 0x40;
while (!SSPxIF);
SSPxIF = 0;
SSPxBUF = reg;
while (!SSPxIF);
SSPxIF = 0;
SSPxBUF = b;
while (!SSPxIF);
SSPxIF = 0;
LATAbits.LATA2 = 1;
}
#endif
#if defined LCD_MODE_PMP
void LCDInitPort(void) {
//Data is PORTD
//RS = RB5, RW = RE0, E = RE1
TRISD = 0xff;
PMCONH = 0b00100011;
PMCONL = 0b00000011;
PMMODEH = 0b00000011;
PMMODEL = 0b00001000;
PMEH = 0;
PMEL = 0b00000001;
PMCONHbits.PMPEN = 1;
}
void LCDWriteByte(char c, char rs) {
PMADDRL = rs;
while (PMMODEHbits.BUSY);
PMDIN1L = c;
}
char LCDReadByte(char rs) {
char c;
PMADDRL = rs;
while (PMMODEHbits.BUSY);
c = PMDIN1L;
while (PMMODEHbits.BUSY);
c = PMDIN1L;
return c;
}
#endif
#if defined LCD_MODE_DIRECT
#if LCD_DATA_WIDTH == 8
#define ON_MASK 0b11111111
#define OFF_MASK 0b00000000
#elif LCD_DATA_WIDTH == 4
#define ON_MASK 0b11110000
#define OFF_MASK 0b00001111
#else
#error Invalid LCD_DATA_WIDTH
#endif
void LCDInitPort(void) {
LCD_LAT = LCD_LAT & OFF_MASK;
LCD_TRIS = LCD_TRIS & OFF_MASK;
LCD_RW = 0;
RW_TRIS = 0;
LCD_RS = 0;
RS_TRIS = 0;
LCD_E = 0;
E_TRIS = 0;
}
void LCDWriteByte(char c, char rs) {
LCD_LAT = (LCD_LAT & OFF_MASK) | (c & ON_MASK);
if (rs) {
LCD_RS = 1;
} else {
LCD_RS = 0;
}
LCD_RW = 0;
LCD_E = 1;
__delay_us(1);
LCD_E = 0;
#if LCD_DATA_WIDTH == 4
LCD_LAT = (LCD_LAT & OFF_MASK) | ((c << 4) & ON_MASK);
LCD_E = 1;
__delay_us(1);
LCD_E = 0;
#endif
}
void LCDWrite8(char c, char rs) {
LCD_LAT = (LCD_LAT & OFF_MASK) | (c & ON_MASK);
if (rs) {
LCD_RS = 1;
} else {
LCD_RS = 0;
}
LCD_RW = 0;
LCD_E = 1;
__delay_us(1);
LCD_E = 0;
}
char LCDReadByte(char rs) {
char b;
LCD_TRIS = LCD_TRIS | ON_MASK;
if (rs) {
LCD_RS = 1;
} else {
LCD_RS = 0;
}
LCD_RW = 1;
LCD_E = 1;
__delay_us(1);
b = LCD_PORT & ON_MASK;
LCD_E = 0;
#if LCD_DATA_WIDTH == 4
LCD_E = 1;
__delay_us(1);
b |= (LCD_PORT >> 4) & OFF_MASK;
LCD_E = 0;
#endif
LCD_TRIS = LCD_TRIS & OFF_MASK;
return b;
}
#endif
#if defined LCD_MODE_I2C
#define RS_ON 0b00000001
#define RW_ON 0b00000010
#define E_ON 0b00000100
#define E_OFF 0b11111011
#define BACKLIGHT_ON 0b00001000
#define BAUD ((_XTAL_FREQ / 400000) - 1)
#if MSSPx == 1
#define SSPxADD SSPADD
#define SSPxCON1bits SSPCON1bits
#define SSPxCON2bits SSPCON2bits
#define SSPxSTATbits SSPSTATbits
#define SSPxBUF SSPBUF
#define I2C_TRIS() (TRISC |= 0b00011000) //RC4=SDA, RC3=SCL
//#define I2C_TRIS() (TRISB |= 0b00000011) //RB0=SDA, RB1=SCL
#elif MSSPx == 2
#define SSPxADD SSP2ADD
#define SSPxCON1bits SSP2CON1bits
#define SSPxCON2bits SSP2CON2bits
#define SSPxSTATbits SSP2STATbits
#define SSPxBUF SSP2BUF
#define I2C_TRIS() (TRISD |= 0b01100000) //RD5=SDA, RD6=SCL
#else
#error Invalid MSSPx selection
#endif
void LCDInitPort(void) {
I2C_TRIS();
SSPxADD = BAUD; //100kHz
SSPxCON1bits.SSPM = 0b1000;
SSPxCON1bits.SSPEN = 1;
Nop();
SSPxCON2bits.SEN = 1;
while (SSPxCON2bits.SEN == 1);
SSPxBUF = LCD_I2C_ADDRESS;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxBUF = BACKLIGHT_ON;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxCON2bits.PEN = 1;
while (SSPxCON2bits.PEN == 1);
}
void LCDWrite8(char c, char rs) {
unsigned char dataByte;
unsigned char comFlags = BACKLIGHT_ON;
if (rs) {
comFlags |= RS_ON;
}
SSPxCON2bits.SEN = 1;
while (SSPxCON2bits.SEN == 1);
SSPxBUF = LCD_I2C_ADDRESS;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
dataByte = c & 0b11110000;
SSPxBUF = dataByte | E_ON | comFlags;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxBUF = dataByte | comFlags;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxCON2bits.PEN = 1;
while (SSPxCON2bits.PEN == 1);
}
void LCDWriteByte(char c, char rs) {
unsigned char dataByte;
unsigned char comFlags = BACKLIGHT_ON;
if (rs) {
comFlags |= RS_ON;
}
SSPxCON2bits.SEN = 1;
while (SSPxCON2bits.SEN == 1);
SSPxBUF = LCD_I2C_ADDRESS;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
dataByte = c & 0b11110000;
SSPxBUF = dataByte | E_ON | comFlags;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxBUF = dataByte | comFlags;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
dataByte = (c << 4) & 0b11110000;
SSPxBUF = dataByte | E_ON | comFlags;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxBUF = dataByte | comFlags;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxBUF = BACKLIGHT_ON;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxCON2bits.PEN = 1;
while (SSPxCON2bits.PEN == 1);
}
char LCDReadNibble(char rs) {
char b;
char comFlags = 0b11111000 | RW_ON;
if (rs) {
comFlags |= RS_ON;
}
SSPxCON2bits.SEN = 1; //Start
while (SSPxCON2bits.SEN == 1);
SSPxBUF = LCD_I2C_ADDRESS;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxBUF = comFlags;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxBUF = comFlags | E_ON;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxCON2bits.RSEN = 1; //restart
while (SSPxCON2bits.RSEN == 1);
SSPxBUF = LCD_I2C_ADDRESS | 1;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxCON2bits.RCEN = 1;
while (SSPxSTATbits.BF == 0); //Wait for byte
b = SSPxBUF & 0b11110000; //Upper nibble
SSPxCON2bits.ACKDT = 1;
SSPxCON2bits.ACKEN = 1; //Send NACK
while (SSPxCON2bits.ACKEN == 1);
SSPxCON2bits.RSEN = 1; //restart
while (SSPxCON2bits.RSEN == 1);
SSPxBUF = LCD_I2C_ADDRESS;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxBUF = comFlags;
while (SSPxSTATbits.BF || SSPxSTATbits.R_W);
SSPxCON2bits.PEN = 1; //stop
while (SSPxCON2bits.PEN == 1);
return b;
}
char LCDReadByte(char rs) {
unsigned char ub;
unsigned char lb;
ub = LCDReadNibble(rs);
lb = LCDReadNibble(rs);
return ub | (lb >> 4);
}
#endif |
/*
!==========================================================================
elemental function gsw_t_deriv_chem_potential_water_t_exact (sa, t, p)
!==========================================================================
!
! Calculates the temperature derivative of the chemical potential of water
! in seawater so that it is valid at exactly SA = 0.
!
! SA = Absolute Salinity [ g/kg ]
! t = in-situ temperature (ITS-90) [ deg C ]
! p = sea pressure [ dbar ]
! ( i.e. absolute pressure - 10.1325 dbar )
!
! chem_potential_water_dt = temperature derivative of the chemical
! potential of water in seawater [ J g^-1 K^-1 ]
!--------------------------------------------------------------------------
*/
double
gsw_t_deriv_chem_potential_water_t_exact(double sa, double t, double p)
{
GSW_TEOS10_CONSTANTS;
double g03_t, g08_sa_t, x, x2, y, z, g08_t, kg2g = 1e-3;
/*
! Note. The kg2g, a factor of 1e-3, is needed to convert the output of this
! function into units of J/g. See section (2.9) of the TEOS-10 Manual.
*/
x2 = gsw_sfac*sa;
x = sqrt(x2);
y = t*0.025;
z = p*rec_db2pa;
/* the input pressure (p) is sea pressure in units of dbar. */
g03_t = 5.90578347909402 + z*(-270.983805184062 +
z*(776.153611613101 + z*(-196.51255088122 + (28.9796526294175 -
2.13290083518327*z)*z))) +
y*(-24715.571866078 + z*(2910.0729080936 +
z*(-1513.116771538718 + z*(546.959324647056 +
z*(-111.1208127634436 + 8.68841343834394*z)))) +
y*(2210.2236124548363 + z*(-2017.52334943521 +
z*(1498.081172457456 + z*(-718.6359919632359 +
(146.4037555781616 - 4.9892131862671505*z)*z))) +
y*(-592.743745734632 + z*(1591.873781627888 +
z*(-1207.261522487504 + (608.785486935364 -
105.4993508931208*z)*z)) +
y*(290.12956292128547 + z*(-973.091553087975 +
z*(602.603274510125 + z*(-276.361526170076 +
32.40953340386105*z))) +
y*(-113.90630790850321 + y*(21.35571525415769 -
67.41756835751434*z) +
z*(381.06836198507096 + z*(-133.7383902842754 +
49.023632509086724*z)))))));
g08_t = x2*(168.072408311545 +
x*(-493.407510141682 + x*(543.835333000098 +
x*(-196.028306689776 + 36.7571622995805*x) +
y*(-137.1145018408982 + y*(148.10030845687618 +
y*(-68.5590309679152 + 12.4848504784754*y))) -
22.6683558512829*z) + z*(-175.292041186547 +
(83.1923927801819 - 29.483064349429*z)*z) +
y*(-86.1329351956084 + z*(766.116132004952 +
z*(-108.3834525034224 + 51.2796974779828*z)) +
y*(-30.0682112585625 - 1380.9597954037708*z +
y*(3.50240264723578 + 938.26075044542*z)))));
g08_sa_t = 1187.3715515697959 +
x*(-1480.222530425046 + x*(2175.341332000392 +
x*(-980.14153344888 + 220.542973797483*x) +
y*(-548.4580073635929 + y*(592.4012338275047 +
y*(-274.2361238716608 + 49.9394019139016*y))) -
90.6734234051316*z) + z*(-525.876123559641 +
(249.57717834054571 - 88.449193048287*z)*z) +
y*(-258.3988055868252 + z*(2298.348396014856 +
z*(-325.1503575102672 + 153.8390924339484*z)) +
y*(-90.2046337756875 - 4142.8793862113125*z +
y*(10.50720794170734 + 2814.78225133626*z))));
return (kg2g*((g03_t + g08_t)*0.025 - 0.5*gsw_sfac*0.025*sa*g08_sa_t));
}
|
package com.example.demo.controller;
import com.example.demo.domain.Singer;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.stereotype.Repository;
@Repository
public interface SingerRepository extends JpaRepository<Singer,Long> {
Page<Singer> findAll(Pageable pageable);
}
|
/**
* This class handles communication with clients using NIO. There is one per
* client, but only one thread doing the communication.
*/
public class NIOServerCnxn implements Watcher, ServerCnxn {
private static final Logger LOG = Logger.getLogger(NIOServerCnxn.class);
private ConnectionBean jmxConnectionBean;
static public class Factory extends Thread {
static {
Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
public void uncaughtException(Thread t, Throwable e) {
LOG.error("Thread " + t + " died", e);
}
});
/**
* this is to avoid the jvm bug:
* NullPointerException in Selector.open()
* http://bugs.sun.com/view_bug.do?bug_id=6427854
*/
try {
Selector.open().close();
} catch(IOException ie) {
LOG.error("Selector failed to open", ie);
}
}
ZooKeeperServer zks;
final ServerSocketChannel ss;
final Selector selector = Selector.open();
/**
* We use this buffer to do efficient socket I/O. Since there is a single
* sender thread per NIOServerCnxn instance, we can use a member variable to
* only allocate it once.
*/
final ByteBuffer directBuffer = ByteBuffer.allocateDirect(64 * 1024);
final HashSet<NIOServerCnxn> cnxns = new HashSet<NIOServerCnxn>();
final HashMap<InetAddress, Set<NIOServerCnxn>> ipMap =
new HashMap<InetAddress, Set<NIOServerCnxn>>( );
int outstandingLimit = 1;
int maxClientCnxns = 10;
/**
* Construct a new server connection factory which will accept an unlimited number
* of concurrent connections from each client (up to the file descriptor
* limits of the operating system). startup(zks) must be called subsequently.
* @param port
* @throws IOException
*/
public Factory(InetSocketAddress addr) throws IOException {
this(addr, 0);
}
/**
* Constructs a new server connection factory where the number of concurrent connections
* from a single IP address is limited to maxcc (or unlimited if 0).
* startup(zks) must be called subsequently.
* @param port - the port to listen on for connections.
* @param maxcc - the number of concurrent connections allowed from a single client.
* @throws IOException
*/
public Factory(InetSocketAddress addr, int maxcc) throws IOException {
super("NIOServerCxn.Factory:" + addr);
setDaemon(true);
maxClientCnxns = maxcc;
this.ss = ServerSocketChannel.open();
ss.socket().setReuseAddress(true);
LOG.info("binding to port " + addr);
ss.socket().bind(addr);
ss.configureBlocking(false);
ss.register(selector, SelectionKey.OP_ACCEPT);
}
@Override
public void start() {
// ensure thread is started once and only once
if (getState() == Thread.State.NEW) {
super.start();
}
}
public void startup(ZooKeeperServer zks) throws IOException,
InterruptedException {
start();
zks.startdata();
zks.startup();
setZooKeeperServer(zks);
}
public void setZooKeeperServer(ZooKeeperServer zks) {
this.zks = zks;
if (zks != null) {
this.outstandingLimit = zks.getGlobalOutstandingLimit();
zks.setServerCnxnFactory(this);
} else {
this.outstandingLimit = 1;
}
}
public ZooKeeperServer getZooKeeperServer() {
return this.zks;
}
public InetSocketAddress getLocalAddress(){
return (InetSocketAddress)ss.socket().getLocalSocketAddress();
}
public int getLocalPort(){
return ss.socket().getLocalPort();
}
public int getMaxClientCnxns() {
return maxClientCnxns;
}
private void addCnxn(NIOServerCnxn cnxn) {
synchronized (cnxns) {
cnxns.add(cnxn);
synchronized (ipMap){
InetAddress addr = cnxn.sock.socket().getInetAddress();
Set<NIOServerCnxn> s = ipMap.get(addr);
if (s == null) {
// in general we will see 1 connection from each
// host, setting the initial cap to 2 allows us
// to minimize mem usage in the common case
// of 1 entry -- we need to set the initial cap
// to 2 to avoid rehash when the first entry is added
s = new HashSet<NIOServerCnxn>(2);
s.add(cnxn);
ipMap.put(addr,s);
} else {
s.add(cnxn);
}
}
}
}
protected NIOServerCnxn createConnection(SocketChannel sock,
SelectionKey sk) throws IOException {
return new NIOServerCnxn(zks, sock, sk, this);
}
private int getClientCnxnCount(InetAddress cl) {
// The ipMap lock covers both the map, and its contents
// (that is, the cnxn sets shouldn't be modified outside of
// this lock)
synchronized (ipMap) {
Set<NIOServerCnxn> s = ipMap.get(cl);
if (s == null) return 0;
return s.size();
}
}
public void run() {
while (!ss.socket().isClosed()) {
try {
selector.select(1000);
Set<SelectionKey> selected;
synchronized (this) {
selected = selector.selectedKeys();
}
ArrayList<SelectionKey> selectedList = new ArrayList<SelectionKey>(
selected);
Collections.shuffle(selectedList);
for (SelectionKey k : selectedList) {
if ((k.readyOps() & SelectionKey.OP_ACCEPT) != 0) {
SocketChannel sc = ((ServerSocketChannel) k
.channel()).accept();
InetAddress ia = sc.socket().getInetAddress();
int cnxncount = getClientCnxnCount(ia);
if (maxClientCnxns > 0 && cnxncount >= maxClientCnxns){
LOG.warn("Too many connections from " + ia
+ " - max is " + maxClientCnxns );
sc.close();
} else {
LOG.info("Accepted socket connection from "
+ sc.socket().getRemoteSocketAddress());
sc.configureBlocking(false);
SelectionKey sk = sc.register(selector,
SelectionKey.OP_READ);
NIOServerCnxn cnxn = createConnection(sc, sk);
sk.attach(cnxn);
addCnxn(cnxn);
}
} else if ((k.readyOps() & (SelectionKey.OP_READ | SelectionKey.OP_WRITE)) != 0) {
NIOServerCnxn c = (NIOServerCnxn) k.attachment();
c.doIO(k);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Unexpected ops in select "
+ k.readyOps());
}
}
}
selected.clear();
} catch (RuntimeException e) {
LOG.warn("Ignoring unexpected runtime exception", e);
} catch (Exception e) {
LOG.warn("Ignoring exception", e);
}
}
clear();
LOG.info("NIOServerCnxn factory exited run method");
}
/**
* Clear all the connections in the selector.
*
* You must first close ss (the serversocketchannel) if you wish
* to block any new connections from being established.
*
*/
@SuppressWarnings("unchecked")
synchronized public void clear() {
selector.wakeup();
HashSet<NIOServerCnxn> cnxns;
synchronized (this.cnxns) {
cnxns = (HashSet<NIOServerCnxn>)this.cnxns.clone();
}
// got to clear all the connections that we have in the selector
for (NIOServerCnxn cnxn: cnxns) {
try {
// don't hold this.cnxns lock as deadlock may occur
cnxn.close();
} catch (Exception e) {
LOG.warn("Ignoring exception closing cnxn sessionid 0x"
+ Long.toHexString(cnxn.sessionId), e);
}
}
}
public void shutdown() {
try {
ss.close();
clear();
this.interrupt();
this.join();
} catch (InterruptedException e) {
LOG.warn("Ignoring interrupted exception during shutdown", e);
} catch (Exception e) {
LOG.warn("Ignoring unexpected exception during shutdown", e);
}
try {
selector.close();
} catch (IOException e) {
LOG.warn("Selector closing", e);
}
if (zks != null) {
zks.shutdown();
}
}
synchronized void closeSession(long sessionId) {
selector.wakeup();
closeSessionWithoutWakeup(sessionId);
}
@SuppressWarnings("unchecked")
private void closeSessionWithoutWakeup(long sessionId) {
HashSet<NIOServerCnxn> cnxns;
synchronized (this.cnxns) {
cnxns = (HashSet<NIOServerCnxn>)this.cnxns.clone();
}
for (NIOServerCnxn cnxn : cnxns) {
if (cnxn.sessionId == sessionId) {
try {
cnxn.close();
} catch (Exception e) {
LOG.warn("exception during session close", e);
}
break;
}
}
}
}
/**
* The buffer will cause the connection to be close when we do a send.
*/
static final ByteBuffer closeConn = ByteBuffer.allocate(0);
final Factory factory;
/** The ZooKeeperServer for this connection. May be null if the server
* is not currently serving requests (for example if the server is not
* an active quorum participant.
*/
private final ZooKeeperServer zk;
private SocketChannel sock;
private SelectionKey sk;
boolean initialized;
ByteBuffer lenBuffer = ByteBuffer.allocate(4);
ByteBuffer incomingBuffer = lenBuffer;
LinkedBlockingQueue<ByteBuffer> outgoingBuffers = new LinkedBlockingQueue<ByteBuffer>();
int sessionTimeout;
ArrayList<Id> authInfo = new ArrayList<Id>();
/* Send close connection packet to the client, doIO will eventually
* close the underlying machinery (like socket, selectorkey, etc...)
*/
public void sendCloseSession() {
sendBuffer(closeConn);
}
/**
* send buffer without using the asynchronous
* calls to selector and then close the socket
* @param bb
*/
void sendBufferSync(ByteBuffer bb) {
try {
/* configure socket to be blocking
* so that we dont have to do write in
* a tight while loop
*/
sock.configureBlocking(true);
if (bb != closeConn) {
if (sock != null) {
sock.write(bb);
}
packetSent();
}
} catch (IOException ie) {
LOG.error("Error sending data synchronously ", ie);
}
}
void sendBuffer(ByteBuffer bb) {
try {
if (bb != closeConn) {
// We check if write interest here because if it is NOT set,
// nothing is queued, so we can try to send the buffer right
// away without waking up the selector
if ((sk.interestOps() & SelectionKey.OP_WRITE) == 0) {
try {
sock.write(bb);
} catch (IOException e) {
// we are just doing best effort right now
}
}
// if there is nothing left to send, we are done
if (bb.remaining() == 0) {
packetSent();
return;
}
}
synchronized(this.factory){
sk.selector().wakeup();
if (LOG.isTraceEnabled()) {
LOG.trace("Add a buffer to outgoingBuffers, sk " + sk
+ " is valid: " + sk.isValid());
}
outgoingBuffers.add(bb);
if (sk.isValid()) {
sk.interestOps(sk.interestOps() | SelectionKey.OP_WRITE);
}
}
} catch(Exception e) {
LOG.error("Unexpected Exception: ", e);
}
}
private static class CloseRequestException extends IOException {
private static final long serialVersionUID = -7854505709816442681L;
public CloseRequestException(String msg) {
super(msg);
}
}
private static class EndOfStreamException extends IOException {
private static final long serialVersionUID = -8255690282104294178L;
public EndOfStreamException(String msg) {
super(msg);
}
public String toString() {
return "EndOfStreamException: " + getMessage();
}
}
/** Read the request payload (everything followng the length prefix) */
private void readPayload() throws IOException, InterruptedException {
if (incomingBuffer.remaining() != 0) { // have we read length bytes?
int rc = sock.read(incomingBuffer); // sock is non-blocking, so ok
if (rc < 0) {
throw new EndOfStreamException(
"Unable to read additional data from client sessionid 0x"
+ Long.toHexString(sessionId)
+ ", likely client has closed socket");
}
}
if (incomingBuffer.remaining() == 0) { // have we read length bytes?
packetReceived();
incomingBuffer.flip();
if (!initialized) {
readConnectRequest();
} else {
readRequest();
}
lenBuffer.clear();
incomingBuffer = lenBuffer;
}
}
void doIO(SelectionKey k) throws InterruptedException {
try {
if (sock == null) {
LOG.warn("trying to do i/o on a null socket for session:0x"
+ Long.toHexString(sessionId));
return;
}
if (k.isReadable()) {
int rc = sock.read(incomingBuffer);
if (rc < 0) {
throw new EndOfStreamException(
"Unable to read additional data from client sessionid 0x"
+ Long.toHexString(sessionId)
+ ", likely client has closed socket");
}
if (incomingBuffer.remaining() == 0) {
boolean isPayload;
if (incomingBuffer == lenBuffer) { // start of next request
incomingBuffer.flip();
isPayload = readLength(k);
incomingBuffer.clear();
} else {
// continuation
isPayload = true;
}
if (isPayload) { // not the case for 4letterword
readPayload();
}
else {
// four letter words take care
// need not do anything else
return;
}
}
}
if (k.isWritable()) {
// ZooLog.logTraceMessage(LOG,
// ZooLog.CLIENT_DATA_PACKET_TRACE_MASK
// "outgoingBuffers.size() = " +
// outgoingBuffers.size());
if (outgoingBuffers.size() > 0) {
// ZooLog.logTraceMessage(LOG,
// ZooLog.CLIENT_DATA_PACKET_TRACE_MASK,
// "sk " + k + " is valid: " +
// k.isValid());
/*
* This is going to reset the buffer position to 0 and the
* limit to the size of the buffer, so that we can fill it
* with data from the non-direct buffers that we need to
* send.
*/
ByteBuffer directBuffer = factory.directBuffer;
directBuffer.clear();
for (ByteBuffer b : outgoingBuffers) {
if (directBuffer.remaining() < b.remaining()) {
/*
* When we call put later, if the directBuffer is to
* small to hold everything, nothing will be copied,
* so we've got to slice the buffer if it's too big.
*/
b = (ByteBuffer) b.slice().limit(
directBuffer.remaining());
}
/*
* put() is going to modify the positions of both
* buffers, put we don't want to change the position of
* the source buffers (we'll do that after the send, if
* needed), so we save and reset the position after the
* copy
*/
int p = b.position();
directBuffer.put(b);
b.position(p);
if (directBuffer.remaining() == 0) {
break;
}
}
/*
* Do the flip: limit becomes position, position gets set to
* 0. This sets us up for the write.
*/
directBuffer.flip();
int sent = sock.write(directBuffer);
ByteBuffer bb;
// Remove the buffers that we have sent
while (outgoingBuffers.size() > 0) {
bb = outgoingBuffers.peek();
if (bb == closeConn) {
throw new CloseRequestException("close requested");
}
int left = bb.remaining() - sent;
if (left > 0) {
/*
* We only partially sent this buffer, so we update
* the position and exit the loop.
*/
bb.position(bb.position() + sent);
break;
}
packetSent();
/* We've sent the whole buffer, so drop the buffer */
sent -= bb.remaining();
outgoingBuffers.remove();
}
// ZooLog.logTraceMessage(LOG,
// ZooLog.CLIENT_DATA_PACKET_TRACE_MASK, "after send,
// outgoingBuffers.size() = " + outgoingBuffers.size());
}
synchronized(this.factory){
if (outgoingBuffers.size() == 0) {
if (!initialized
&& (sk.interestOps() & SelectionKey.OP_READ) == 0) {
throw new CloseRequestException("responded to info probe");
}
sk.interestOps(sk.interestOps()
& (~SelectionKey.OP_WRITE));
} else {
sk.interestOps(sk.interestOps()
| SelectionKey.OP_WRITE);
}
}
}
} catch (CancelledKeyException e) {
LOG.warn("Exception causing close of session 0x"
+ Long.toHexString(sessionId)
+ " due to " + e);
if (LOG.isDebugEnabled()) {
LOG.debug("CancelledKeyException stack trace", e);
}
close();
} catch (CloseRequestException e) {
// expecting close to log session closure
close();
} catch (EndOfStreamException e) {
LOG.warn(e); // tell user why
// expecting close to log session closure
close();
} catch (IOException e) {
LOG.warn("Exception causing close of session 0x"
+ Long.toHexString(sessionId)
+ " due to " + e);
if (LOG.isDebugEnabled()) {
LOG.debug("IOException stack trace", e);
}
close();
}
}
private void readRequest() throws IOException {
// We have the request, now process and setup for next
InputStream bais = new ByteBufferInputStream(incomingBuffer);
BinaryInputArchive bia = BinaryInputArchive.getArchive(bais);
RequestHeader h = new RequestHeader();
h.deserialize(bia, "header");
// Through the magic of byte buffers, txn will not be
// pointing
// to the start of the txn
incomingBuffer = incomingBuffer.slice();
if (h.getType() == OpCode.auth) {
AuthPacket authPacket = new AuthPacket();
ZooKeeperServer.byteBuffer2Record(incomingBuffer, authPacket);
String scheme = authPacket.getScheme();
AuthenticationProvider ap = ProviderRegistry.getProvider(scheme);
if (ap == null
|| (ap.handleAuthentication(this, authPacket.getAuth())
!= KeeperException.Code.OK)) {
if (ap == null) {
LOG.warn("No authentication provider for scheme: "
+ scheme + " has "
+ ProviderRegistry.listProviders());
} else {
LOG.warn("Authentication failed for scheme: " + scheme);
}
// send a response...
ReplyHeader rh = new ReplyHeader(h.getXid(), 0,
KeeperException.Code.AUTHFAILED.intValue());
sendResponse(rh, null, null);
// ... and close connection
sendCloseSession();
disableRecv();
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Authentication succeeded for scheme: "
+ scheme);
}
ReplyHeader rh = new ReplyHeader(h.getXid(), 0,
KeeperException.Code.OK.intValue());
sendResponse(rh, null, null);
}
return;
} else {
Request si = new Request(this, sessionId, h.getXid(), h.getType(), incomingBuffer, authInfo);
si.setOwner(ServerCnxn.me);
zk.submitRequest(si);
}
if (h.getXid() >= 0) {
synchronized (this) {
outstandingRequests++;
}
synchronized (this.factory) {
// check throttling
if (zk.getInProcess() > factory.outstandingLimit) {
if (LOG.isDebugEnabled()) {
LOG.debug("Throttling recv " + zk.getInProcess());
}
disableRecv();
// following lines should not be needed since we are
// already reading
// } else {
// enableRecv();
}
}
}
}
public void disableRecv() {
sk.interestOps(sk.interestOps() & (~SelectionKey.OP_READ));
}
public void enableRecv() {
if (sk.isValid()) {
int interest = sk.interestOps();
if ((interest & SelectionKey.OP_READ) == 0) {
sk.interestOps(interest | SelectionKey.OP_READ);
}
}
}
private void readConnectRequest() throws IOException, InterruptedException {
BinaryInputArchive bia = BinaryInputArchive
.getArchive(new ByteBufferInputStream(incomingBuffer));
ConnectRequest connReq = new ConnectRequest();
connReq.deserialize(bia, "connect");
if (LOG.isDebugEnabled()) {
LOG.debug("Session establishment request from client "
+ sock.socket().getRemoteSocketAddress()
+ " client's lastZxid is 0x"
+ Long.toHexString(connReq.getLastZxidSeen()));
}
if (zk == null) {
throw new IOException("ZooKeeperServer not running");
}
if (connReq.getLastZxidSeen() > zk.getZKDatabase().getDataTreeLastProcessedZxid()) {
String msg = "Refusing session request for client "
+ sock.socket().getRemoteSocketAddress()
+ " as it has seen zxid 0x"
+ Long.toHexString(connReq.getLastZxidSeen())
+ " our last zxid is 0x"
+ Long.toHexString(zk.getZKDatabase().getDataTreeLastProcessedZxid())
+ " client must try another server";
LOG.info(msg);
throw new CloseRequestException(msg);
}
sessionTimeout = connReq.getTimeOut();
byte passwd[] = connReq.getPasswd();
int minSessionTimeout = zk.getMinSessionTimeout();
if (sessionTimeout < minSessionTimeout) {
sessionTimeout = minSessionTimeout;
}
int maxSessionTimeout = zk.getMaxSessionTimeout();
if (sessionTimeout > maxSessionTimeout) {
sessionTimeout = maxSessionTimeout;
}
// We don't want to receive any packets until we are sure that the
// session is setup
disableRecv();
if (connReq.getSessionId() != 0) {
long clientSessionId = connReq.getSessionId();
LOG.info("Client attempting to renew session 0x"
+ Long.toHexString(clientSessionId)
+ " at " + sock.socket().getRemoteSocketAddress());
factory.closeSessionWithoutWakeup(clientSessionId);
setSessionId(clientSessionId);
zk.reopenSession(this, sessionId, passwd, sessionTimeout);
} else {
LOG.info("Client attempting to establish new session at "
+ sock.socket().getRemoteSocketAddress());
zk.createSession(this, passwd, sessionTimeout);
}
initialized = true;
}
private void packetReceived() {
stats.incrPacketsReceived();
if (zk != null) {
zk.serverStats().incrementPacketsReceived();
}
}
private void packetSent() {
stats.incrPacketsSent();
if (zk != null) {
zk.serverStats().incrementPacketsSent();
}
}
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int confCmd =
ByteBuffer.wrap("conf".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int consCmd =
ByteBuffer.wrap("cons".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int crstCmd =
ByteBuffer.wrap("crst".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int dumpCmd =
ByteBuffer.wrap("dump".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int enviCmd =
ByteBuffer.wrap("envi".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int getTraceMaskCmd =
ByteBuffer.wrap("gtmk".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int ruokCmd =
ByteBuffer.wrap("ruok".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int setTraceMaskCmd =
ByteBuffer.wrap("stmk".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int srvrCmd =
ByteBuffer.wrap("srvr".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int srstCmd =
ByteBuffer.wrap("srst".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int statCmd =
ByteBuffer.wrap("stat".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int wchcCmd =
ByteBuffer.wrap("wchc".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int wchpCmd =
ByteBuffer.wrap("wchp".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int wchsCmd =
ByteBuffer.wrap("wchs".getBytes()).getInt();
/*
* See <a href="{@docRoot}/../../../docs/zookeeperAdmin.html#sc_zkCommands">
* Zk Admin</a>. this link is for all the commands.
*/
private final static int mntrCmd = ByteBuffer.wrap("mntr".getBytes())
.getInt();
private final static HashMap<Integer, String> cmd2String =
new HashMap<Integer, String>();
// specify all of the commands that are available
static {
cmd2String.put(confCmd, "conf");
cmd2String.put(consCmd, "cons");
cmd2String.put(crstCmd, "crst");
cmd2String.put(dumpCmd, "dump");
cmd2String.put(enviCmd, "envi");
cmd2String.put(getTraceMaskCmd, "gtmk");
cmd2String.put(ruokCmd, "ruok");
cmd2String.put(setTraceMaskCmd, "stmk");
cmd2String.put(srstCmd, "srst");
cmd2String.put(srvrCmd, "srvr");
cmd2String.put(statCmd, "stat");
cmd2String.put(wchcCmd, "wchc");
cmd2String.put(wchpCmd, "wchp");
cmd2String.put(wchsCmd, "wchs");
cmd2String.put(mntrCmd, "mntr");
}
/**
* clean up the socket related to a command and also make sure we flush the
* data before we do that
*
* @param pwriter
* the pwriter for a command socket
*/
private void cleanupWriterSocket(PrintWriter pwriter) {
try {
if (pwriter != null) {
pwriter.flush();
pwriter.close();
}
} catch (Exception e) {
LOG.info("Error closing PrintWriter ", e);
} finally {
try {
close();
} catch (Exception e) {
LOG.error("Error closing a command socket ", e);
}
}
}
/**
* This class wraps the sendBuffer method of NIOServerCnxn. It is
* responsible for chunking up the response to a client. Rather
* than cons'ing up a response fully in memory, which may be large
* for some commands, this class chunks up the result.
*/
private class SendBufferWriter extends Writer {
private StringBuffer sb = new StringBuffer();
/**
* Check if we are ready to send another chunk.
* @param force force sending, even if not a full chunk
*/
private void checkFlush(boolean force) {
if ((force && sb.length() > 0) || sb.length() > 2048) {
sendBufferSync(ByteBuffer.wrap(sb.toString().getBytes()));
// clear our internal buffer
sb.setLength(0);
}
}
@Override
public void close() throws IOException {
if (sb == null) return;
checkFlush(true);
sb = null; // clear out the ref to ensure no reuse
}
@Override
public void flush() throws IOException {
checkFlush(true);
}
@Override
public void write(char[] cbuf, int off, int len) throws IOException {
sb.append(cbuf, off, len);
checkFlush(false);
}
}
private static final String ZK_NOT_SERVING =
"This ZooKeeper instance is not currently serving requests";
/**
* Set of threads for commmand ports. All the 4
* letter commands are run via a thread. Each class
* maps to a correspoding 4 letter command. CommandThread
* is the abstract class from which all the others inherit.
*/
private abstract class CommandThread extends Thread {
PrintWriter pw;
CommandThread(PrintWriter pw) {
this.pw = pw;
}
public void run() {
try {
commandRun();
} catch (IOException ie) {
LOG.error("Error in running command ", ie);
} finally {
cleanupWriterSocket(pw);
}
}
public abstract void commandRun() throws IOException;
}
private class RuokCommand extends CommandThread {
public RuokCommand(PrintWriter pw) {
super(pw);
}
@Override
public void commandRun() {
pw.print("imok");
}
}
private class TraceMaskCommand extends CommandThread {
TraceMaskCommand(PrintWriter pw) {
super(pw);
}
@Override
public void commandRun() {
long traceMask = ZooTrace.getTextTraceLevel();
pw.print(traceMask);
}
}
private class SetTraceMaskCommand extends CommandThread {
long trace = 0;
SetTraceMaskCommand(PrintWriter pw, long trace) {
super(pw);
this.trace = trace;
}
@Override
public void commandRun() {
pw.print(trace);
}
}
private class EnvCommand extends CommandThread {
EnvCommand(PrintWriter pw) {
super(pw);
}
@Override
public void commandRun() {
List<Environment.Entry> env = Environment.list();
pw.println("Environment:");
for(Environment.Entry e : env) {
pw.print(e.getKey());
pw.print("=");
pw.println(e.getValue());
}
}
}
private class ConfCommand extends CommandThread {
ConfCommand(PrintWriter pw) {
super(pw);
}
@Override
public void commandRun() {
if (zk == null) {
pw.println(ZK_NOT_SERVING);
} else {
zk.dumpConf(pw);
}
}
}
private class StatResetCommand extends CommandThread {
public StatResetCommand(PrintWriter pw) {
super(pw);
}
@Override
public void commandRun() {
if (zk == null) {
pw.println(ZK_NOT_SERVING);
}
else {
zk.serverStats().reset();
pw.println("Server stats reset.");
}
}
}
private class CnxnStatResetCommand extends CommandThread {
public CnxnStatResetCommand(PrintWriter pw) {
super(pw);
}
@Override
public void commandRun() {
if (zk == null) {
pw.println(ZK_NOT_SERVING);
} else {
synchronized(factory.cnxns){
for(NIOServerCnxn c : factory.cnxns){
c.getStats().reset();
}
}
pw.println("Connection stats reset.");
}
}
}
private class DumpCommand extends CommandThread {
public DumpCommand(PrintWriter pw) {
super(pw);
}
@Override
public void commandRun() {
if (zk == null) {
pw.println(ZK_NOT_SERVING);
}
else {
pw.println("SessionTracker dump:");
zk.sessionTracker.dumpSessions(pw);
pw.println("ephemeral nodes dump:");
zk.dumpEphemerals(pw);
}
}
}
private class StatCommand extends CommandThread {
int len;
public StatCommand(PrintWriter pw, int len) {
super(pw);
this.len = len;
}
@SuppressWarnings("unchecked")
@Override
public void commandRun() {
if (zk == null) {
pw.println(ZK_NOT_SERVING);
}
else {
pw.print("Zookeeper version: ");
pw.println(Version.getFullVersion());
if (len == statCmd) {
LOG.info("Stat command output");
pw.println("Clients:");
// clone should be faster than iteration
// ie give up the cnxns lock faster
HashSet<NIOServerCnxn> cnxnset;
synchronized(factory.cnxns){
cnxnset = (HashSet<NIOServerCnxn>)factory
.cnxns.clone();
}
for(NIOServerCnxn c : cnxnset){
((CnxnStats)c.getStats())
.dumpConnectionInfo(pw, true);
}
pw.println();
}
pw.print(zk.serverStats().toString());
pw.print("Node count: ");
pw.println(zk.getZKDatabase().getNodeCount());
}
}
}
private class ConsCommand extends CommandThread {
public ConsCommand(PrintWriter pw) {
super(pw);
}
@SuppressWarnings("unchecked")
@Override
public void commandRun() {
if (zk == null) {
pw.println(ZK_NOT_SERVING);
} else {
// clone should be faster than iteration
// ie give up the cnxns lock faster
HashSet<NIOServerCnxn> cnxns;
synchronized (factory.cnxns) {
cnxns = (HashSet<NIOServerCnxn>) factory.cnxns.clone();
}
for (NIOServerCnxn c : cnxns) {
((CnxnStats) c.getStats()).dumpConnectionInfo(pw, false);
}
pw.println();
}
}
}
private class WatchCommand extends CommandThread {
int len = 0;
public WatchCommand(PrintWriter pw, int len) {
super(pw);
this.len = len;
}
@Override
public void commandRun() {
if (zk == null) {
pw.println(ZK_NOT_SERVING);
} else {
DataTree dt = zk.getZKDatabase().getDataTree();
if (len == wchsCmd) {
dt.dumpWatchesSummary(pw);
} else if (len == wchpCmd) {
dt.dumpWatches(pw, true);
} else {
dt.dumpWatches(pw, false);
}
pw.println();
}
}
}
private class MonitorCommand extends CommandThread {
MonitorCommand(PrintWriter pw) {
super(pw);
}
@Override
public void commandRun() {
if(zk == null) {
pw.println(ZK_NOT_SERVING);
return;
}
ZKDatabase zkdb = zk.getZKDatabase();
ServerStats stats = zk.serverStats();
print("version", Version.getFullVersion());
print("avg_latency", stats.getAvgLatency());
print("max_latency", stats.getMaxLatency());
print("min_latency", stats.getMinLatency());
print("packets_received", stats.getPacketsReceived());
print("packets_sent", stats.getPacketsSent());
print("outstanding_requests", stats.getOutstandingRequests());
print("server_state", stats.getServerState());
print("znode_count", zkdb.getNodeCount());
print("watch_count", zkdb.getDataTree().getWatchCount());
print("ephemerals_count", zkdb.getDataTree().getEphemeralsCount());
print("approximate_data_size", zkdb.getDataTree().approximateDataSize());
OperatingSystemMXBean osMbean = ManagementFactory.getOperatingSystemMXBean();
if(osMbean != null && osMbean instanceof UnixOperatingSystemMXBean) {
UnixOperatingSystemMXBean unixos = (UnixOperatingSystemMXBean)osMbean;
print("open_file_descriptor_count", unixos.getOpenFileDescriptorCount());
print("max_file_descriptor_count", unixos.getMaxFileDescriptorCount());
}
if(stats.getServerState() == "leader") {
Leader leader = ((LeaderZooKeeperServer)zk).getLeader();
print("followers", leader.learners.size());
print("synced_followers", leader.forwardingFollowers.size());
print("pending_syncs", leader.pendingSyncs.size());
}
}
private void print(String key, long number) {
print(key, "" + number);
}
private void print(String key, String value) {
pw.print("zk_");
pw.print(key);
pw.print("\t");
pw.println(value);
}
}
/** Return if four letter word found and responded to, otw false **/
private boolean checkFourLetterWord(final SelectionKey k, final int len)
throws IOException
{
// We take advantage of the limited size of the length to look
// for cmds. They are all 4-bytes which fits inside of an int
String cmd = cmd2String.get(len);
if (cmd == null) {
return false;
}
LOG.info("Processing " + cmd + " command from "
+ sock.socket().getRemoteSocketAddress());
packetReceived();
/** cancel the selection key to remove the socket handling
* from selector. This is to prevent netcat problem wherein
* netcat immediately closes the sending side after sending the
* commands and still keeps the receiving channel open.
* The idea is to remove the selectionkey from the selector
* so that the selector does not notice the closed read on the
* socket channel and keep the socket alive to write the data to
* and makes sure to close the socket after its done writing the data
*/
if (k != null) {
try {
k.cancel();
} catch(Exception e) {
LOG.error("Error cancelling command selection key ", e);
}
}
final PrintWriter pwriter = new PrintWriter(
new BufferedWriter(new SendBufferWriter()));
if (len == ruokCmd) {
RuokCommand ruok = new RuokCommand(pwriter);
ruok.start();
return true;
} else if (len == getTraceMaskCmd) {
TraceMaskCommand tmask = new TraceMaskCommand(pwriter);
tmask.start();
return true;
} else if (len == setTraceMaskCmd) {
int rc = sock.read(incomingBuffer);
if (rc < 0) {
throw new IOException("Read error");
}
incomingBuffer.flip();
long traceMask = incomingBuffer.getLong();
ZooTrace.setTextTraceLevel(traceMask);
SetTraceMaskCommand setMask = new SetTraceMaskCommand(pwriter, traceMask);
setMask.start();
return true;
} else if (len == enviCmd) {
EnvCommand env = new EnvCommand(pwriter);
env.start();
return true;
} else if (len == confCmd) {
ConfCommand ccmd = new ConfCommand(pwriter);
ccmd.start();
return true;
} else if (len == srstCmd) {
StatResetCommand strst = new StatResetCommand(pwriter);
strst.start();
return true;
} else if (len == crstCmd) {
CnxnStatResetCommand crst = new CnxnStatResetCommand(pwriter);
crst.start();
return true;
} else if (len == dumpCmd) {
DumpCommand dump = new DumpCommand(pwriter);
dump.start();
return true;
} else if (len == statCmd || len == srvrCmd) {
StatCommand stat = new StatCommand(pwriter, len);
stat.start();
return true;
} else if (len == consCmd) {
ConsCommand cons = new ConsCommand(pwriter);
cons.start();
return true;
} else if (len == wchpCmd || len == wchcCmd || len == wchsCmd) {
WatchCommand wcmd = new WatchCommand(pwriter, len);
wcmd.start();
return true;
} else if (len == mntrCmd) {
MonitorCommand mntr = new MonitorCommand(pwriter);
mntr.start();
return true;
}
return false;
}
/** Reads the first 4 bytes of lenBuffer, which could be true length or
* four letter word.
*
* @param k selection key
* @return true if length read, otw false (wasn't really the length)
* @throws IOException if buffer size exceeds maxBuffer size
*/
private boolean readLength(SelectionKey k) throws IOException {
// Read the length, now get the buffer
int len = lenBuffer.getInt();
if (!initialized && checkFourLetterWord(k, len)) {
return false;
}
if (len < 0 || len > BinaryInputArchive.maxBuffer) {
throw new IOException("Len error " + len);
}
if (zk == null) {
throw new IOException("ZooKeeperServer not running");
}
incomingBuffer = ByteBuffer.allocate(len);
return true;
}
/**
* The number of requests that have been submitted but not yet responded to.
*/
int outstandingRequests;
/*
* (non-Javadoc)
*
* @see org.apache.zookeeper.server.ServerCnxnIface#getSessionTimeout()
*/
public int getSessionTimeout() {
return sessionTimeout;
}
/**
* This is the id that uniquely identifies the session of a client. Once
* this session is no longer active, the ephemeral nodes will go away.
*/
long sessionId;
static long nextSessionId = 1;
public NIOServerCnxn(ZooKeeperServer zk, SocketChannel sock,
SelectionKey sk, Factory factory) throws IOException {
this.zk = zk;
this.sock = sock;
this.sk = sk;
this.factory = factory;
sock.socket().setTcpNoDelay(true);
sock.socket().setSoLinger(true, 2);
InetAddress addr = ((InetSocketAddress) sock.socket()
.getRemoteSocketAddress()).getAddress();
authInfo.add(new Id("ip", addr.getHostAddress()));
sk.interestOps(SelectionKey.OP_READ);
}
@Override
public String toString() {
return "NIOServerCnxn object with sock = " + sock + " and sk = " + sk;
}
/*
* Close the cnxn and remove it from the factory cnxns list.
*
* This function returns immediately if the cnxn is not on the cnxns list.
*/
public void close() {
synchronized(factory.cnxns){
// if this is not in cnxns then it's already closed
if (!factory.cnxns.remove(this)) {
return;
}
synchronized (factory.ipMap) {
Set<NIOServerCnxn> s =
factory.ipMap.get(sock.socket().getInetAddress());
s.remove(this);
}
// unregister from JMX
try {
if(jmxConnectionBean != null){
MBeanRegistry.getInstance().unregister(jmxConnectionBean);
}
} catch (Exception e) {
LOG.warn("Failed to unregister with JMX", e);
}
jmxConnectionBean = null;
if (zk != null) {
zk.removeCnxn(this);
}
closeSock();
if (sk != null) {
try {
// need to cancel this selection key from the selector
sk.cancel();
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
LOG.debug("ignoring exception during selectionkey cancel", e);
}
}
}
}
}
/**
* Close resources associated with the sock of this cnxn.
*/
private void closeSock() {
if (sock == null) {
return;
}
LOG.info("Closed socket connection for client "
+ sock.socket().getRemoteSocketAddress()
+ (sessionId != 0 ?
" which had sessionid 0x" + Long.toHexString(sessionId) :
" (no session established for client)"));
try {
/*
* The following sequence of code is stupid! You would think that
* only sock.close() is needed, but alas, it doesn't work that way.
* If you just do sock.close() there are cases where the socket
* doesn't actually close...
*/
sock.socket().shutdownOutput();
} catch (IOException e) {
// This is a relatively common exception that we can't avoid
if (LOG.isDebugEnabled()) {
LOG.debug("ignoring exception during output shutdown", e);
}
}
try {
sock.socket().shutdownInput();
} catch (IOException e) {
// This is a relatively common exception that we can't avoid
if (LOG.isDebugEnabled()) {
LOG.debug("ignoring exception during input shutdown", e);
}
}
try {
sock.socket().close();
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("ignoring exception during socket close", e);
}
}
try {
sock.close();
// XXX The next line doesn't seem to be needed, but some posts
// to forums suggest that it is needed. Keep in mind if errors in
// this section arise.
// factory.selector.wakeup();
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("ignoring exception during socketchannel close", e);
}
}
sock = null;
}
private final static byte fourBytes[] = new byte[4];
/*
* (non-Javadoc)
*
* @see org.apache.zookeeper.server.ServerCnxnIface#sendResponse(org.apache.zookeeper.proto.ReplyHeader,
* org.apache.jute.Record, java.lang.String)
*/
synchronized public void sendResponse(ReplyHeader h, Record r, String tag) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
// Make space for length
BinaryOutputArchive bos = BinaryOutputArchive.getArchive(baos);
try {
baos.write(fourBytes);
bos.writeRecord(h, "header");
if (r != null) {
bos.writeRecord(r, tag);
}
baos.close();
} catch (IOException e) {
LOG.error("Error serializing response");
}
byte b[] = baos.toByteArray();
ByteBuffer bb = ByteBuffer.wrap(b);
bb.putInt(b.length - 4).rewind();
sendBuffer(bb);
if (h.getXid() > 0) {
synchronized(this){
outstandingRequests--;
}
// check throttling
synchronized (this.factory) {
if (zk.getInProcess() < factory.outstandingLimit
|| outstandingRequests < 1) {
sk.selector().wakeup();
enableRecv();
}
}
}
} catch(Exception e) {
LOG.warn("Unexpected exception. Destruction averted.", e);
}
}
/*
* (non-Javadoc)
*
* @see org.apache.zookeeper.server.ServerCnxnIface#process(org.apache.zookeeper.proto.WatcherEvent)
*/
synchronized public void process(WatchedEvent event) {
ReplyHeader h = new ReplyHeader(-1, -1L, 0);
if (LOG.isTraceEnabled()) {
ZooTrace.logTraceMessage(LOG, ZooTrace.EVENT_DELIVERY_TRACE_MASK,
"Deliver event " + event + " to 0x"
+ Long.toHexString(this.sessionId)
+ " through " + this);
}
// Convert WatchedEvent to a type that can be sent over the wire
WatcherEvent e = event.getWrapper();
sendResponse(h, e, "notification");
}
public void finishSessionInit(boolean valid) {
// register with JMX
try {
jmxConnectionBean = new ConnectionBean(this, zk);
MBeanRegistry.getInstance().register(jmxConnectionBean, zk.jmxServerBean);
} catch (Exception e) {
LOG.warn("Failed to register with JMX", e);
jmxConnectionBean = null;
}
try {
ConnectResponse rsp = new ConnectResponse(0, valid ? sessionTimeout
: 0, valid ? sessionId : 0, // send 0 if session is no
// longer valid
valid ? zk.generatePasswd(sessionId) : new byte[16]);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BinaryOutputArchive bos = BinaryOutputArchive.getArchive(baos);
bos.writeInt(-1, "len");
rsp.serialize(bos, "connect");
baos.close();
ByteBuffer bb = ByteBuffer.wrap(baos.toByteArray());
bb.putInt(bb.remaining() - 4).rewind();
sendBuffer(bb);
if (!valid) {
LOG.info("Invalid session 0x"
+ Long.toHexString(sessionId)
+ " for client "
+ sock.socket().getRemoteSocketAddress()
+ ", probably expired");
sendCloseSession();
} else {
LOG.info("Established session 0x"
+ Long.toHexString(sessionId)
+ " with negotiated timeout " + sessionTimeout
+ " for client "
+ sock.socket().getRemoteSocketAddress());
}
// Now that the session is ready we can start receiving packets
synchronized (this.factory) {
sk.selector().wakeup();
enableRecv();
}
} catch (Exception e) {
LOG.warn("Exception while establishing session, closing", e);
close();
}
}
/*
* (non-Javadoc)
*
* @see org.apache.zookeeper.server.ServerCnxnIface#getSessionId()
*/
public long getSessionId() {
return sessionId;
}
public void setSessionId(long sessionId) {
this.sessionId = sessionId;
}
public ArrayList<Id> getAuthInfo() {
return authInfo;
}
public synchronized InetSocketAddress getRemoteAddress() {
if (sock == null) {
return null;
}
return (InetSocketAddress) sock.socket().getRemoteSocketAddress();
}
class CnxnStats implements ServerCnxn.Stats {
private final Date established = new Date();
private final AtomicLong packetsReceived = new AtomicLong();
private final AtomicLong packetsSent = new AtomicLong();
private long minLatency;
private long maxLatency;
private String lastOp;
private long lastCxid;
private long lastZxid;
private long lastResponseTime;
private long lastLatency;
private long count;
private long totalLatency;
CnxnStats() {
reset();
}
public synchronized void reset() {
packetsReceived.set(0);
packetsSent.set(0);
minLatency = Long.MAX_VALUE;
maxLatency = 0;
lastOp = "NA";
lastCxid = -1;
lastZxid = -1;
lastResponseTime = 0;
lastLatency = 0;
count = 0;
totalLatency = 0;
}
long incrPacketsReceived() {
return packetsReceived.incrementAndGet();
}
long incrPacketsSent() {
return packetsSent.incrementAndGet();
}
synchronized void updateForResponse(long cxid, long zxid, String op,
long start, long end)
{
// don't overwrite with "special" xids - we're interested
// in the clients last real operation
if (cxid >= 0) {
lastCxid = cxid;
}
lastZxid = zxid;
lastOp = op;
lastResponseTime = end;
long elapsed = end - start;
lastLatency = elapsed;
if (elapsed < minLatency) {
minLatency = elapsed;
}
if (elapsed > maxLatency) {
maxLatency = elapsed;
}
count++;
totalLatency += elapsed;
}
public Date getEstablished() {
return established;
}
public long getOutstandingRequests() {
synchronized (NIOServerCnxn.this) {
synchronized (NIOServerCnxn.this.factory) {
return outstandingRequests;
}
}
}
public long getPacketsReceived() {
return packetsReceived.longValue();
}
public long getPacketsSent() {
return packetsSent.longValue();
}
public synchronized long getMinLatency() {
return minLatency == Long.MAX_VALUE ? 0 : minLatency;
}
public synchronized long getAvgLatency() {
return count == 0 ? 0 : totalLatency / count;
}
public synchronized long getMaxLatency() {
return maxLatency;
}
public synchronized String getLastOperation() {
return lastOp;
}
public synchronized long getLastCxid() {
return lastCxid;
}
public synchronized long getLastZxid() {
return lastZxid;
}
public synchronized long getLastResponseTime() {
return lastResponseTime;
}
public synchronized long getLastLatency() {
return lastLatency;
}
/**
* Prints detailed stats information for the connection.
*
* @see dumpConnectionInfo(PrintWriter, boolean) for brief stats
*/
@Override
public String toString() {
StringWriter sw = new StringWriter();
PrintWriter pwriter = new PrintWriter(sw);
dumpConnectionInfo(pwriter, false);
pwriter.flush();
pwriter.close();
return sw.toString();
}
/**
* Print information about the connection.
* @param brief iff true prints brief details, otw full detail
* @return information about this connection
*/
public synchronized void
dumpConnectionInfo(PrintWriter pwriter, boolean brief)
{
Channel channel = sk.channel();
if (channel instanceof SocketChannel) {
pwriter.print(" ");
pwriter.print(((SocketChannel)channel).socket()
.getRemoteSocketAddress());
pwriter.print("[");
pwriter.print(sk.isValid() ? Integer.toHexString(sk.interestOps())
: "0");
pwriter.print("](queued=");
pwriter.print(getOutstandingRequests());
pwriter.print(",recved=");
pwriter.print(getPacketsReceived());
pwriter.print(",sent=");
pwriter.print(getPacketsSent());
if (!brief) {
long sessionId = getSessionId();
if (sessionId != 0) {
pwriter.print(",sid=0x");
pwriter.print(Long.toHexString(sessionId));
pwriter.print(",lop=");
pwriter.print(getLastOperation());
pwriter.print(",est=");
pwriter.print(getEstablished().getTime());
pwriter.print(",to=");
pwriter.print(getSessionTimeout());
long lastCxid = getLastCxid();
if (lastCxid >= 0) {
pwriter.print(",lcxid=0x");
pwriter.print(Long.toHexString(lastCxid));
}
pwriter.print(",lzxid=0x");
pwriter.print(Long.toHexString(getLastZxid()));
pwriter.print(",lresp=");
pwriter.print(getLastResponseTime());
pwriter.print(",llat=");
pwriter.print(getLastLatency());
pwriter.print(",minlat=");
pwriter.print(getMinLatency());
pwriter.print(",avglat=");
pwriter.print(getAvgLatency());
pwriter.print(",maxlat=");
pwriter.print(getMaxLatency());
}
}
pwriter.println(")");
}
}
}
private final CnxnStats stats = new CnxnStats();
public Stats getStats() {
return stats;
}
} |
<filename>cmd/snps.go
package cmd
import (
"github.com/spf13/cobra"
"github.com/cov-ert/gofasta/pkg/snps"
)
var snpsReference string
var snpsQuery string
var snpsOutfile string
func init() {
rootCmd.AddCommand(snpCmd)
snpCmd.Flags().StringVarP(&snpsReference, "reference", "r", "", "Reference sequence, in fasta format")
snpCmd.Flags().StringVarP(&snpsQuery, "query", "q", "stdin", "Alignment of sequences to find snps in, in fasta format")
snpCmd.Flags().StringVarP(&snpsOutfile, "outfile", "o", "stdout", "Output to write")
}
var snpCmd = &cobra.Command{
Use: "snps",
Short: "Find snps relative to a reference",
Long: `Find snps relative to a reference.
Example usage:
gofasta snps -r reference.fasta -q alignment.fasta -o snps.csv
reference.fasta and alignment.fasta must be the same length.
The output is a csv-format file with one line per query sequence, and two columns:
'query' and 'SNPs', the second of which is a "|"-delimited list of snps in that query.
If query and outfile are not specified, the behaviour is to read the query alignment
from stdin and write the snps file to stdout, e.g. you could do this:
cat alignment.fasta | gofasta snps -r reference.fasta > snps.csv`,
RunE: func(cmd *cobra.Command, args []string) (err error) {
err = snps.SNPs(snpsReference, snpsQuery, snpsOutfile)
return
},
}
|
#include <graphic_syscalls.h>
#include <utils/string.h>
#include <renderer/renderer.h>
using namespace syscall;
void syscall::sys_fb_info(interrupts::s_registers* regs) {
renderer::framebuffer_t* user_address = (renderer::framebuffer_t*) regs->rbx;
*user_address = renderer::default_framebuffer;
} |
/*
* Copyright (c) 2011-2015, <NAME>. All Rights Reserved.
*
* This file is part of BoofCV (http://boofcv.org).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boofcv.alg.distort.radtan;
import boofcv.struct.distort.PointTransform_F64;
import georegression.geometry.GeometryMath_F64;
import georegression.struct.point.Point2D_F64;
import org.ejml.data.DenseMatrix64F;
import org.ejml.ops.CommonOps;
/**
* Converts the observed distorted pixels into normalized image coordinates.
*
* @author <NAME>
*/
public class RemoveRadialPtoN_F64 implements PointTransform_F64 {
// principle point / image center
protected double cx, cy;
// other intrinsic parameters
protected double fx,fy,skew;
// distortion parameters
protected RadialTangential_F64 params;
// radial distortion magnitude
protected double sum;
// found tangential distortion
protected double tx,ty;
// inverse of camera calibration matrix
protected DenseMatrix64F K_inv = new DenseMatrix64F(3,3);
private double tol=1e-10;
public RemoveRadialPtoN_F64() {
}
public RemoveRadialPtoN_F64( double tol ) {
this.tol = tol;
}
public void setTolerance(double tol) {
this.tol = tol;
}
/**
* Specify camera calibration parameters
*
* @param fx Focal length x-axis in pixels
* @param fy Focal length y-axis in pixels
* @param skew skew in pixels
* @param cx camera center x-axis in pixels
* @param cy center center y-axis in pixels
*/
public RemoveRadialPtoN_F64 setK(double fx, double fy, double skew, double cx, double cy ) {
this.fx = fx;
this.fy = fy;
this.skew = skew;
this.cx = cx;
this.cy = cy;
K_inv.set(0,0,fx);
K_inv.set(1,1,fy);
K_inv.set(0,1,skew);
K_inv.set(0,2,cx);
K_inv.set(1,2,cy);
K_inv.set(2,2,1);
CommonOps.invert(K_inv);
return this;
}
public RemoveRadialPtoN_F64 setDistortion( double[] radial, double t1, double t2 ) {
params = new RadialTangential_F64(radial,t1,t2);
return this;
}
/**
* Removes radial distortion
*
* @param x Distorted x-coordinate pixel
* @param y Distorted y-coordinate pixel
* @param out Undistorted normalized coordinate.
*/
@Override
public void compute(double x, double y, Point2D_F64 out) {
out.x = x;
out.y = y;
double radial[] = params.radial;
double t1 = params.t1,t2 = params.t2;
// initial estimate of undistorted point
GeometryMath_F64.mult(K_inv, out, out);
double origX = x = out.x;
double origY = y = out.y;
double prevSum = 0;
for( int iter = 0; iter < 20; iter++ ) {
// estimate the radial distance
double r2 = x*x + y*y;
double ri2 = r2;
sum = 0;
for( int i = 0; i < radial.length; i++ ) {
sum += radial[i]*ri2;
ri2 *= r2;
}
tx = 2*t1*x*y + t2*(r2 + 2*x*x);
ty = t1*(r2 + 2*y*y) + 2*t2*x*y;
x = (origX - tx)/(1+sum);
y = (origY - ty)/(1+sum);
if( Math.abs(prevSum-sum) <= tol ) {
break;
} else {
prevSum = sum;
}
}
out.set(x,y);
}
} |
<filename>Mage/src/main/java/mage/abilities/dynamicvalue/common/SubTypeAssignment.java
package mage.abilities.dynamicvalue.common;
import mage.abilities.dynamicvalue.RoleAssignment;
import mage.cards.Card;
import mage.constants.SubType;
import mage.game.Game;
import java.util.Set;
import java.util.stream.Collectors;
public class SubTypeAssignment extends RoleAssignment<SubType> {
public SubTypeAssignment(SubType... subTypes) {
super(subTypes);
}
@Override
protected Set<SubType> makeSet(Card card, Game game) {
return attributes
.stream()
.filter(subType -> card.hasSubtype(subType, game))
.collect(Collectors.toSet());
}
}
|
pub mod progressbar;
pub mod table;
pub mod text;
|
/**
* Try to perform the actual model enhancing.
*/
private static CtClass doModelModifications(byte[] byteCode, Version modelVersion, ClassLoader... loaders) {
if (!initiated) {
initiate();
}
CtClass cc = null;
LoaderClassPath[] classloaders = new LoaderClassPath[loaders.length];
try {
InputStream stream = new ByteArrayInputStream(byteCode);
cc = cp.makeClass(stream);
if (!JavassistUtils.hasAnnotation(cc, Model.class.getName())) {
return null;
}
LOGGER.debug("Model to enhance: {}", cc.getName());
for (int i = 0; i < loaders.length; i++) {
classloaders[i] = new LoaderClassPath(loaders[i]);
cp.appendClassPath(classloaders[i]);
}
doEnhancement(cc, modelVersion);
LOGGER.info("Finished model enhancing for class {}", cc.getName());
} catch (IOException e) {
LOGGER.error("IOException while trying to enhance model", e);
} catch (RuntimeException e) {
LOGGER.error("RuntimeException while trying to enhance model", e);
} catch (CannotCompileException e) {
LOGGER.error("CannotCompileException while trying to enhance model", e);
} catch (NotFoundException e) {
LOGGER.error("NotFoundException while trying to enhance model", e);
} catch (ClassNotFoundException e) {
LOGGER.error("ClassNotFoundException while trying to enhance model", e);
} finally {
for (int i = 0; i < loaders.length; i++) {
if (classloaders[i] != null) {
cp.removeClassPath(classloaders[i]);
}
}
}
return cc;
} |
Oversaturating synchronous CDMA systems using collaborative coding
Oversaturated synchronous CDMA system is proposed based on collaborative coding, where data bits of group of L + s users are jointly one-to-one mapped onto 2/sup L+s/ L-dimensional signal vectors. Instead of the unique signature per each user, the L-dimensional signature subspace is used to transmit bits of L + s users (s > 0). All signature subspaces are orthogonal to each other which simplifies the optimal receiver structure. With signal space dimension N, the number of users is K = N(1 + s/L). Preferable collaborative codes are found using the sphere packing theory for L = 2...5, s = 1, 2. Trade-off between oversaturation efficiency and energy loss/gain is evaluated against the conventional non-oversaturated orthogonal CDMA. |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.river.test.impl.outrigger.transaction;
import org.apache.river.qa.harness.Test;
import java.util.logging.Level;
// Test harness specific classes
import org.apache.river.qa.harness.TestException;
// All other imports
import java.rmi.*;
import java.io.File;
import net.jini.core.entry.Entry;
import net.jini.core.lease.Lease;
import net.jini.core.transaction.server.TransactionManager;
import net.jini.core.transaction.TransactionFactory;
import net.jini.core.transaction.Transaction;
import net.jini.space.JavaSpace;
import org.apache.river.test.share.TestBase;
/**
* Writes and entry, creates two transactions, T1 and T2, reads the
* entry under T1, reads the entry under T2, then performs a blocking
* take/takeIfExists under T1, and then commits/aborts T2. Repeats
* trying each combination of aborting/committing T1 and using
* take/takeIfExists.
*/
public class ReadReadTakeTest extends TestBase implements Test {
/** Space under test */
protected JavaSpace space;
/** Transaction Manager we are using */
protected TransactionManager txnMgr;
/** Entry to manipulate */
private SimpleEntry entry = new SimpleEntry("King", 1, 1);
/**
* Return a String which describes this test
*/
public String getDescription() {
return "Test Name = ReadReadTakeTest.";
}
/**
* Return an array of String whose elements comprise the
* categories to which this test belongs.
*/
public String[] getCategories() {
return new String[] {
"outrigger" };
}
public void run() throws Exception {
specifyServices(new Class[] {
TransactionManager.class, JavaSpace.class});
space = (JavaSpace) services[1];
txnMgr = (TransactionManager) services[0];
test(false, true);
test(false, false);
test(true, true);
test(true, false);
}
private void test(boolean commit, boolean useTake) throws Exception {
final String action = useTake?"take":"takeIfExists";
final String resolve = commit?"commit":"abort";
logger.log(Level.INFO, "Starting " + action + "/" + resolve + " trail");
final Lease el = space.write(entry, null, Lease.FOREVER);
addOutriggerLease(el, true);
logger.log(Level.INFO, "wrote entry");
final Transaction.Created txnHolder1 =
TransactionFactory.create(txnMgr, 1000 * 60 * 60);
final Transaction txn1 = txnHolder1.transaction;
addMahaloLease(txnHolder1.lease, true);
final Transaction.Created txnHolder2 =
TransactionFactory.create(txnMgr, 1000 * 60 * 60);
final Transaction txn2 = txnHolder2.transaction;
addMahaloLease(txnHolder2.lease, true);
if (null == space.read(entry, txn1, 0)) {
throw new TestException("Could not perform read 1");
}
logger.log(Level.INFO, "read under txn1");
if (null == space.read(entry, txn2, 0)) {
throw new TestException("Could not perform read 2");
}
logger.log(Level.INFO, "read under txn2");
final TakeThread takeThread = new TakeThread(txn1, useTake);
takeThread.start();
logger.log(Level.INFO, "started TakeThread");
Thread.sleep(15000);
takeThread.confirmCallInProgress();
logger.log(Level.INFO, "calling " + resolve);
if (commit)
txn2.commit(60000);
else
txn2.abort(60000);
logger.log(Level.INFO, resolve + " returned");
if (takeThread.waitOnTakeReturn(30000)) {
logger.log(Level.INFO, action + "/" + resolve + " trail ok");
} else {
logger.log(Level.INFO, action + "/" + resolve + " trail bad");
throw new TestException(action + "/" + resolve + " trail failed");
}
// Make the take role forward so we start clean
txn1.commit(60000);
}
private class TakeThread extends Thread {
private boolean takeCalled = false;
private boolean takeReturned = false;
private boolean resultOk;
private Transaction txn1;
private final boolean useTake;
private TakeThread(Transaction txn, boolean useTake) {
super("TakeThread");
this.useTake = useTake;
txn1 = txn;
}
private synchronized void confirmCallInProgress() throws TestException {
if (!takeCalled)
throw new
TestException("Advancing to txn2 resolution before take call");
if (takeReturned)
throw new
TestException("Advancing to txn2 resolution after take call");
}
private synchronized boolean waitOnTakeReturn(long timeout)
throws InterruptedException
{
long endTime = System.currentTimeMillis() + timeout;
if (endTime > Long.MAX_VALUE)
endTime = Long.MAX_VALUE;
while (!takeReturned) {
final long waitTime = endTime - System.currentTimeMillis();
if (waitTime > 0)
wait(waitTime);
else
break;
}
return resultOk;
}
public void run() {
synchronized (this) {
takeCalled = true;
}
Entry e = null;
try {
if (useTake)
e = space.take(entry, txn1, Long.MAX_VALUE);
else
e = space.takeIfExists(entry, txn1, Long.MAX_VALUE);
} catch (Throwable t) {
t.printStackTrace();
}
synchronized (this) {
resultOk = (e != null);
takeReturned = true;
notifyAll();
}
}
}
}
|
/**
* Convert the input argument into a string describing the time span.
*
* @param interv the time interval, in milliseconds
* @return a String describing the time span
*/
public static String millisToTimeSpan(final long interv)
{
StringBuffer buf = new StringBuffer(100);
long lMillis = interv;
if (lMillis <= 0L)
{
buf.append("0 seconds");
return buf.toString();
}
final long lYears = (long) (lMillis / MSECS_PER_YEAR);
lMillis = lMillis % MSECS_PER_YEAR;
final long lWeeks = (long) (lMillis / MSECS_PER_WEEK);
lMillis = lMillis % MSECS_PER_WEEK;
final long lDays = (long) (lMillis / MSECS_PER_DAY);
lMillis = lMillis % MSECS_PER_DAY;
final long lHours = (long) (lMillis / MSECS_PER_HOUR);
lMillis = lMillis % MSECS_PER_HOUR;
final long lMinutes = (long) (lMillis / MSECS_PER_MIN);
lMillis = lMillis % MSECS_PER_MIN;
final float fSeconds = (float) (((float) lMillis) / 1000.0F);
if (lYears > 0L)
{
buf.append(Long.toString(lYears)).append(" year");
if (lYears > 1L)
{
buf.append('s');
}
}
if (lWeeks > 0L)
{
if (buf.length() > 0)
{
buf.append(", ");
}
buf.append(Long.toString(lWeeks)).append(" week");
if (lWeeks > 1L)
{
buf.append('s');
}
}
if (lDays > 0L)
{
if (buf.length() > 0)
{
buf.append(", ");
}
buf.append(Long.toString(lDays)).append(" day");
if (lDays > 1L)
{
buf.append('s');
}
}
if (lHours > 0L)
{
if (buf.length() > 0)
{
buf.append(", ");
}
buf.append(Long.toString(lHours)).append(" hour");
if (lHours > 1L)
{
buf.append('s');
}
}
if (lMinutes > 0L)
{
if (buf.length() > 0)
{
buf.append(", ");
}
buf.append(Long.toString(lMinutes)).append(" minute");
if (lMinutes > 1L)
{
buf.append('s');
}
}
if (Float.compare(fSeconds, 0.0F) > 0)
{
if (buf.length() > 0)
{
buf.append(", ");
}
DecimalFormat df = new DecimalFormat();
df.setDecimalSeparatorAlwaysShown(false);
df.setMaximumFractionDigits(3);
buf.append(df.format((double) fSeconds)).append(" second");
if (Float.compare(fSeconds, 1.0F) != 0)
{
buf.append('s');
}
}
return buf.toString();
} |
def isolated_labels(
self,
adata: AnnData,
adata_id: str,
batch_key: str,
group_key: str,
embed: str,
) -> None:
self.__check_key(adata_id)
self.metrics[adata_id]['il_sil'] = metrics.isolated_labels(adata,
group_key,
batch_key,
embed=embed,
iso_threshold=1,
cluster=False)
self.metrics[adata_id]['il_clus'] = metrics.isolated_labels(adata,
group_key,
batch_key,
embed=embed,
iso_threshold=1,
cluster=True) |
def manage_rundir(request, clean_up):
global STARTUP
if STARTUP:
if os.path.exists(run_dir):
shutil.rmtree(run_dir)
STARTUP = False
os.mkdir(run_dir)
def finalize_rundir():
if os.path.exists(run_dir) and clean_up:
shutil.rmtree(run_dir)
request.addfinalizer(finalize_rundir) |
/**
* @param args the command line arguments
*/
public static void main(String[] args) {
ArrayList<String> VariableA = new ArrayList<String>(Arrays.asList("red", "green"));
ArrayList<String> VariableB = new ArrayList<String>(Arrays.asList("A", "B", "C"));
ArrayList<String> VariableC = new ArrayList<String>(Arrays.asList("1", "2", "3", "4"));
ArrayList<ArrayList<String>> AofA = new ArrayList<ArrayList<String>>();
AofA.add(VariableA); AofA.add(VariableB); AofA.add(VariableC);
System.out.println("Array of Arrays: ToString(): " +AofA.toString());
ArrayList<String> optionsList = new ArrayList<String>();
recurse(optionsList, AofA, 0);
for (int i = 0 ; i < testCases.size() ; i++) {
System.out.println("Test Case " + (i+1) + ": " + testCases.get(i));
}
} |
.
We report 2 cases of portal vein thrombosis associated with a single point mutation in the factor V gene that replaces arginine in residue 506 with glutamine. This mutation induces abnormal resistance to anticoagulant activity of activated protein C and increases the risk of deep vein thrombosis. Both patients had a personal and familial history of deep vein thrombosis. Intraabdominal neoplasia or infection, myeloproliferative disorder, antiphospholipid syndrome, paroxysmal nocturnal hemoglobinuria and coagulation inhibitor deficiency (antithrombin, proteins C and S) were excluded by exhaustive investigation. However, an abnormal resistance to activated protein C was found, and DNA analysis showed the factor V Arg506 to Gln mutation in both cases. Anticoagulant treatment was begun. A study of family history made in one case, showed the same genetic disease in one of the relatives. Resistance to activated protein C with factor V gene mutation should be investigated in patients with portal vein thrombosis. A study of family history, and anticoagulant treatment are justified for symptomatic patients. |
<gh_stars>0
#include <string>
#include <iostream>
#include <set>
#include <AMReX_ParmParse.H>
#include <AMReX_MultiFab.H>
#include <AMReX_DataServices.H>
#include <AMReX_BCRec.H>
#include <AMReX_Interpolater.H>
#include <WritePlotFile.H>
#include <AMReX_BLFort.H>
#include <mechanism.h>
#include <chemistry_file.H>
#include <EOS.H>
#include <util.H>
using namespace amrex;
static
void
print_usage (int,
char* argv[])
{
std::cerr << "usage:\n";
std::cerr << argv[0] << " infile infile=f1 [options] \n\tOptions:\n";
exit(1);
}
std::string
getFileRoot(const std::string& infile)
{
vector<std::string> tokens = Tokenize(infile,std::string("/"));
return tokens[tokens.size()-1];
}
int
main (int argc,
char* argv[])
{
Initialize(argc,argv);
{
if (argc < 2)
print_usage(argc,argv);
ParmParse pp;
if (pp.contains("help"))
print_usage(argc,argv);
if (pp.contains("verbose"))
AmrData::SetVerbose(true);
std::string plotFileName; pp.get("infile",plotFileName);
DataServices::SetBatchMode();
Amrvis::FileType fileType(Amrvis::NEWPLT);
DataServices dataServices(plotFileName, fileType);
if( ! dataServices.AmrDataOk()) {
DataServices::Dispatch(DataServices::ExitRequest, NULL);
// ^^^ this calls ParallelDescriptor::EndParallel() and exit()
}
AmrData& amrData = dataServices.AmrDataRef();
EOS::init();
int finestLevel = amrData.FinestLevel();
pp.query("finestLevel",finestLevel);
int Nlev = finestLevel + 1;
int idXin = -1;
int idTin = -1;
Vector<std::string> spec_names;
EOS::speciesNames(spec_names);
const Vector<std::string>& plotVarNames = amrData.PlotVarNames();
const std::string spName= "X(" + spec_names[0] + ")";
const std::string TName = "temp";
for (int i=0; i<plotVarNames.size(); ++i)
{
if (plotVarNames[i] == spName) idXin = i;
if (plotVarNames[i] == TName) idTin = i;
}
if (idXin<0 || idTin<0)
Print() << "Cannot find required data in pltfile" << std::endl;
const int idYout = 0;
const int idTout = NUM_SPECIES;
const int nCompIn = NUM_SPECIES + 1;
const int nCompOut = idYout + NUM_SPECIES + 1;
Vector<std::string> outNames(nCompOut);
Vector<std::string> inNames(nCompIn);
Vector<int> destFillComps(nCompIn);
const int idXlocal = 0; // Xs start here
const int idTlocal = NUM_SPECIES; // T start here
for (int i=0; i<NUM_SPECIES; ++i)
{
destFillComps[i] = idXlocal + i;
inNames[i] = "X(" + spec_names[i] + ")";
outNames[i] = "Y(" + spec_names[i] + ")";
}
destFillComps[idTlocal] = idTlocal;
inNames[idTlocal] = TName;
outNames[idTout] = TName;
Vector<std::unique_ptr<MultiFab>> outdata(Nlev);
const int nGrow = 0;
for (int lev=0; lev<Nlev; ++lev)
{
const BoxArray ba = amrData.boxArray(lev);
const DistributionMapping dm(ba);
outdata[lev].reset(new MultiFab(ba,dm,nCompOut,nGrow));
MultiFab indata(ba,dm,nCompIn,nGrow);
Print() << "Reading data for level " << lev << std::endl;
amrData.FillVar(indata,lev,inNames,destFillComps);
Print() << "Data has been read for level " << lev << std::endl;
for (MFIter mfi(indata,TilingIfNotGPU()); mfi.isValid(); ++mfi)
{
const Box& bx = mfi.tilebox();
Array4<Real> const& X = indata.array(mfi);
Array4<Real> const& Tin = indata.array(mfi);
Array4<Real> const& Y = (*outdata[lev]).array(mfi);
Array4<Real> const& Tout = (*outdata[lev]).array(mfi);
AMREX_PARALLEL_FOR_3D ( bx, i, j, k,
{
Real Yl[NUM_SPECIES];
Real Xl[NUM_SPECIES];
for (int n=0; n<NUM_SPECIES; ++n) {
Xl[n] = X(i,j,k,idXlocal+n);
}
EOS::X2Y(Xl,Yl);
for (int n=0; n<NUM_SPECIES; ++n) {
Y(i,j,k,idYout+n) = Yl[n];
}
Tout(i,j,k,idTout) = Tin(i,j,k,idTlocal);
});
}
Print() << "Derive finished for level " << lev << std::endl;
}
std::string outfile(getFileRoot(plotFileName) + "_Y");
Print() << "Writing new data to " << outfile << std::endl;
const bool verb = false;
WritePlotFile(GetVecOfPtrs(outdata),amrData,outfile,verb,outNames);
}
Finalize();
return 0;
}
|
The structure of schizotypy, its relation to subdiagnoses of schizophrenia and to sex and age.
A growing amount of evidence suggests that the generally accepted division of schizophrenic symptomatology into positive and negative aspects should be extended to include a third major aspect, namely 'disorganization/social impairment'. As schizotypy can be seen as the non-pathological counterpart of schizophrenia, possibly brought about by the same 'schizotaxic' predisposition(s), it might be expected that the multidimensionality of schizotypy would reflect the tripartite structure seen in schizophrenia. Although the data from studies using scales to measure schizotypy do not clearly support this view, this is mainly because of the relative lack of comparability among the scales used in different studies. Results from the present study, which involve the factor analysis of items rather than scales derived from the testing of a large and diverse population of normal subjects, does, however, support the view that measures of schizotypy may be grouped in a parallel way to symptoms shown by populations of schizophrenic subjects. The suggestion may thus be made that the symptom groupings shown by schizophrenics may be seen as primary and not the secondary result of reactions to earlier phases of the illness. The role of sex and age in the determination of scores on schizotypic dimensions is also examined and show that the sex and age differences found in subdiagnostic categories in schizophrenia are reflected in dimensions of schizotypy. |
South Africa's involvement in the battle that toppled the former president of the Central African Republic (CAR), François Bozizé, has laid bare the country's cold war with France for control of Francophone Africa.
And fallout from the battle of the CAR capital, Bangui, now the subject of official enquiries, may result in that war heating up.
Sources in South Africa now say that the French forces that held the Bangui M'poko airport during the battle denied South African soldiers reinforcements and equipment, including urgently needed mortars and rocket-propelled grenades.
Soldiers were later told those weapons were held up at the airport, apparently because the French did not want to allow such weapons into a "peacekeeping" arena.
At least one soldier with intimate knowledge of the battle believes the presence of those weapons in the field could have saved lives.
South African National Defence Force spokesperson Brigadier General Xolani Mabanga denied claims that the SANDF sent extra personnel, arms or ammunition when the security situation deteriorated in Bangui. However, he said the SANDF found reports that the CAR international airport was under the control of French troops "very strange."
"This implies that they [French troops] will decide what goes in and out of the airport. You have to ask what the role was of the French in that nation."
Legitimate reason
One analyst believes the French may have had legitimate reason to allow the use of the airport for the treatment of South African wounded and the withdrawal of troops while not allowing reinforcements or weapons to flow into the CAR.
At the time, a force of about only 200 French troops was holding the airport, as several hundred troops had been moved to Mali.
"The French commander would have known he could not hold the airport against thousands of rebels," the analyst said. "The last thing he'd have wanted to do was to pick a fight."
The new allegations come after previous claims that France had backed the Seleka rebels, ensuring the toppling of Bozizé and South Africa's withdrawal.
Meanwhile, France is now said to believe that South Africa owes it a debt of gratitude for saving SANDF troops by providing medical assistance to wounded soldiers who had taken refuge at M'poko airport. Other unconfirmed reports said French soldiers also assisted in negotiating the release of South African soldiers who had been captured by the rebels.
Said Mabanga: "We were in CAR. They were in CAR. The SANDF was attacked by the rebels. Were the French attacked by the rebels? One has to ask: Were the French on the side of the rebels, or were the rebels on the side of the French? We had no members of the SANDF kept as captives of the rebels."
A source from the department of international relations said there were no plans to thank France.
"How do we say thank you to people who killed our soldiers? By supporting the rebels and Chad with weapons, they killed our soldiers. They will not get that thank you today, tomorrow or ever."
The French diplomatic source said France believed South Africa was at fault in Bangui last month, saying the country had disrespected the Libreville peace accord signed in January between the CAR government and rebels, which had also called for the withdrawal of foreign troops. "Instead of withdrawing, the South African troops stayed and contributed to derailing the process," said the source.
African solutions to African problems
The tension between the two countries is said to stem from South Africa's foreign policy, which seeks to help fellow African states to rid themselves of their former colonial masters, introduce constitutional democratic rule and develop the economies and infrastructure of the continent. South Africa's path of seeking "African solutions to African problems" is another factor that is displeasing to France, according to top government and international relations department sources.
France was opposed to these policies because a united Africa would render the French presence on the continent irrelevant.
"France has never really left Africa. It does not allow those countries to have decent armies so that it can always have a reason to intervene. South Africa wanted to professionalise the CAR army, but we were blocked from concluding that mandate by France," the departmental source said.
The source said France was behind the unsuccessful plans to foil Nkosazana Dlamini-Zuma's election as chairperson of the African Union Commission.
A diplomat from an African country with good relations with France said Francophone countries in Africa saw South Africa as a potential big brother. "Francophones are getting tired of French intervention, but unfortunately in Africa the only country ready to do the job is South Africa," the source said.
The diplomat said the leaders of former French colonies also felt indebted to France. "All those Francophone leaders were put in power by France. [Denis] Sassou [Nguesso, president of the Republic of Congo-Brazzaville]; Bozizé; [Côte d'Ivoire's president Alassane] Ouattara; and [Chad's president Idriss] Déby. They were helped to chase others, so they may be fearing that it will happen to them too. That is why they are keeping quiet."
At last week's summit in Chad, France was accused of backing the coup against Bozizé, said a Cameroonian source in Yaoundé.
Back and forth
"Behind the scenes, there were accusations and counteraccusations that some world powers were behind the coup. France and Chad were named," the source said.
Kwaku Nuamah, the assistant professor of international peace and conflict resolution at American University in Washington, said France had always seen itself as an African power.
"South Africa is going to bump up against traditional powers in Africa. France, more than other former colonial powers, is still deeply involved in Africa, so it is directly affected," Nuamah said.
He said African Francophone countries had "gone back and forth" in their commitment to their former colonial power, whereas Anglophone countries appeared to have moved on. "You have colonial powers still in charge in some states and they are suspicious of regional powers trying to occupy that same space."
A United States diplomatic cable made public by WikiLeaks shows that the US believed, in December 2006, on the eve of the SANDF deployment to the CAR, that France was annoyed by the move.
Said Nuamah: "France has stabilised these countries' currencies and is their biggest trading partner. For the elite, the question is: Who is more likely to protect your interests? France or South Africa? And the answer, for now at least, is France, which helps them more than it should. They have homes and bank accounts in France. Their children go to school there and they seek medical care there after wrecking the healthcare systems in their own countries."
Although South Africa sells its African agenda as a desire to ensure that countries are run along democratic lines, Nuamah said: "France doesn't really care that much about the quality of democracy in its former colonies."
Nuamah said Francophone Africa might warm to South Africa, "but you still need to give them a reason to switch patrons. And remember, these patronage relationships are not based on national-interest calculations. It's the interests of the elite that shape these relationships. So to come in and say, 'You must switch', South Africa needs to show the elites how it's going to protect their interests."
International relations department spokesperson Clayson Monyela denied there was any friction. "Our relations with France remain cordial. We co-operate well on a multilateral platform and on African issues."
A response from the French embassy had not been received by the time of going to press. |
In order to reduce pollution level in the capital, the Delhi government has asked the Centre to make a provision in the law to impose a hefty fines on vehicles more than 15 years old entering the city.
Concerned over the increasing pollution levels, Union Environment Minister Prakash Javadekar on Monday called a meeting with environment ministers of Haryana, Uttar Pradesh and Rajasthan, Delhi.
According to the city's government, there are thousands of old vehicles from other states, including light and heavy which cause pollution at a high level.
"We have suggested Union Environment Minister to make such a provision in the law to impose a hefty fine on over 15-year-old vehicles entering the capital to discourage them from running here," Delhi Environment Minister Asim Ahmed Khan told PTI.
Union Environment Ministry has convened a meeting with environment ministers of Delhi, Haryana, Rajasthan and Uttar Pradesh on Monday to discuss the issue again.
Khan said that more than 15-year-old vehicles, be it from Haryana, Punjab, or Jammu & Kashmir, they should be challaned strictly when they enter the capital.
"There is an urgent need to reduce pollution levels in the national capital and keeping this in mind, we have to take some major decisions. In the past, several files to curb pollution have been prepared, but nothing happed on ground," he said.
Dismayed at the increasing level of air pollution in Delhi, National Green Tribunal had on November 26 barred all vehicles - private cars, bikes, commercial vehicles, buses and trucks - over 15 years old from plying in the capital. |
/**
* nfexp_callback_register - register a callback
* @h: library handler
* @cb: callback used to process expect received
* @data: data used by the callback, if any.
*
* This function register a callback to handle the expect received,
* in case of error -1 is returned and errno is set appropiately, otherwise
* 0 is returned.
*
* Note that the data parameter is optional, if you do not want to pass any
* data to your callback, then use NULL.
*/
int nfexp_callback_register(struct nfct_handle *h,
enum nf_conntrack_msg_type type,
int (*cb)(enum nf_conntrack_msg_type type,
struct nf_expect *exp,
void *data),
void *data)
{
struct __data_container *container;
assert(h != NULL);
container = malloc(sizeof(struct __data_container));
if (!container)
return -1;
memset(container, 0, sizeof(struct __data_container));
h->expect_cb = cb;
container->h = h;
container->type = type;
container->data = data;
h->nfnl_cb.call = __expect_callback;
h->nfnl_cb.data = container;
h->nfnl_cb.attr_count = CTA_EXPECT_MAX;
nfnl_callback_register(h->nfnlssh_exp,
IPCTNL_MSG_EXP_NEW,
&h->nfnl_cb);
nfnl_callback_register(h->nfnlssh_exp,
IPCTNL_MSG_EXP_DELETE,
&h->nfnl_cb);
return 0;
} |
def decode_subst(msg, steps=4000, restarts=90):
msg = cat(allwords(msg))
candidates = [
hillclimb(encode(msg, key=cat(shuffled(alphabet))), logP3letters,
neighboring_msgs, steps) for _ in range(restarts)
]
p, words = max(segment2(c) for c in candidates)
return ' '.join(words) |
<gh_stars>0
#include <gtest/gtest.h>
#include <string>
#include "max_substring.h"
using namespace std;
TEST(NormalCase, SameLength)
{
string str1 = {"abcdef"}, str2 = {"defabc"};
string res = get_max_substring(str1, str2);
ASSERT_TRUE((res == "abc") || (res == "def"));
}
TEST(NormalCase, DifferentLength)
{
string str1 = {"abcdefg"}, str2 = {"defgabc"};
string res = get_max_substring(str1, str2);
ASSERT_EQ(res, "defg");
}
TEST(CornerCase, OneEmpty)
{
string str1 = {"abcdef"}, str2;
string res = get_max_substring(str1, str2);
ASSERT_TRUE(res.empty());
}
TEST(CornerCase, BothEmpty)
{
string str1, str2;
string res = get_max_substring(str1, str2);
ASSERT_TRUE(res.empty());
}
int main(int argc, char *argv[])
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
/**
* Represents a region where an experiment takes place
*/
public class Region implements Serializable {
private String regionText;
private Location geoLocation;
private Double kmRadius;
public Region() {
this.regionText = "";
this.geoLocation = new Location();
this.kmRadius = null;
}
public Region(String text) {
this.regionText = text;
this.geoLocation = new Location();
this.kmRadius = null;
}
/**
* Gets the description of a region
*
* @return the description of a region
*/
public String getRegionText() {
return regionText;
}
/**
* Sets the description of a region
*
* @param regionText the description of a region to be set
*/
public void setRegionText(String regionText) {
this.regionText = regionText;
}
/**
* Gets the region radius in kilometres
*
* @return the region radius in kilometres
*/
public Double getKmRadius() {
return kmRadius;
}
/**
* Sets the region radius in kilometres
*
* @param kmRadius the region radius in kilometres to be set
*/
public void setKmRadius(Double kmRadius) {
this.kmRadius = kmRadius;
}
/**
* Gets the geo location of the experiment.
*
* @return The geo location of the experiment.
*/
public Location getGeoLocation() {
return geoLocation;
}
/**
* Sets the geo location of the experiment.
*
* @param geoLocation The location to set as the geo location of the experiment.
*/
public void setGeoLocation(Location geoLocation) {
this.geoLocation = geoLocation;
}
} |
#include<stdio.h>
int main()
{
long long int T,N,K,i,j,k,l,c,b;
scanf("%lld",&T);
for(i=0;i<T;i++)
{
c=0;
scanf("%lld",&N);
scanf("%lld",&K);
while(N!=0)
{
if(N%K==0)
{
N=N/K;
c++;
}
else
{
b=N/K;
c=c+N-(b*K);
N=b*K;
}
}
printf("%lld\n",c);
}
}
|
import { ArraySchema, MapSchema } from "@colyseus/schema";
import { GamePlayState } from "./GamePlayState";
import { CardSuit, ICard } from "./ICard";
import { IChatMessage } from "./IChatMessage";
import { IPlayer } from "./IPlayer";
export interface ICourtPieceState {
players: MapSchema<IPlayer>;
currentPlayer: number;
playState: GamePlayState;
activeCards: MapSchema<ICard>;
roundNumber: number;
trumpSuit: CardSuit;
roundSuit: CardSuit;
trumpPlayerId: string;
winnerPlayerId: string;
chatMessages: ArraySchema<IChatMessage>;
gameWins: MapSchema<number>;
handWins: MapSchema<number>;
}
|
package chat_test
import (
"reflect"
"testing"
"github.com/PacktPublishing/Hands-On-Software-Engineering-with-Golang/Chapter04/chat"
)
func TestChatRoomBroadcast(t *testing.T) {
pub := new(spyPublisher)
room := chat.NewRoom(pub)
room.AddUser("bob")
room.AddUser("alice")
_ = room.Broadcast("hi")
// Check published entries
exp := []entry{
{user: "bob", message: "hi"},
{user: "alice", message: "hi"},
}
if got := pub.published; !reflect.DeepEqual(got, exp) {
t.Fatalf("expected the following messages:\n%#+v\ngot:\n%#+v", exp, got)
}
}
type entry struct {
user string
message string
}
type spyPublisher struct {
published []entry
}
func (p *spyPublisher) Publish(user, message string) error {
p.published = append(p.published, entry{user: user, message: message})
return nil
}
|
<filename>backend/src/factories/upload-controller-factory.ts
import { UploadHelper } from '../helpers/upload-helper'
import { UploadController } from '../controllers/upload-controller'
import { LocalStorage } from '../adapters/local-storage-adapter'
export const makeUploadController = (): UploadController => {
const localStorage = new LocalStorage()
const uploadHelper = new UploadHelper(localStorage)
return new UploadController(uploadHelper)
}
|
<gh_stars>0
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
struct Vertex
{
struct Edge *edgeList[100];
int edgeCount;
int id;
int status;
};
struct Edge
{
struct Vertex *end[2];
int w;
};
//判斷傳入的 v 是否為樹林
void DFS(struct Vertex *v)
{
if (v->status != 0)
return;
printf("%d ", v->id);
v->status = 1;
for (int i = 0; i < v->edgeCount; i++)
{
if(v->edgeList[i]->end[0] == v->edgeList[i]->end[1])
continue;
if (v->edgeList[i]->end[0] == v)
DFS(v->edgeList[i]->end[1]);
else
DFS(v->edgeList[i]->end[0]);
}
}
void BFS(struct Vertex *v)
{
struct Vertex *visited[100];
int first = 0, rear = 0;
visited[rear++] = v, visited[first]->status = 1;
while (first != rear)
{
printf("%d ", visited[first]->id);
for (int i = 0; i < visited[first]->edgeCount; i++)
{
if (visited[first]->edgeList[i]->end[0] == visited[first]->edgeList[i]->end[1]) // ignore self loop
continue;
if (visited[first]->edgeList[i]->end[0] == visited[first])
{
if (visited[first]->edgeList[i]->end[1]->status != 0)
continue;
visited[first]->edgeList[i]->end[1]->status = 1;
visited[rear++] = visited[first]->edgeList[i]->end[1];
}
else
{
if (visited[first]->edgeList[i]->end[0]->status != 0)
continue;
visited[first]->edgeList[i]->end[0]->status = 1;
visited[rear++] = visited[first]->edgeList[i]->end[0];
}
}
first++;
}
}
void print(struct Vertex v[10])
{
int j, k;
for(j = 0;j < 10;j ++)
{
printf("%d :", v[j].id);
for(k = 0;k < v[j].edgeCount;k ++)
{
if(v[j].edgeList[k]->end[0] == &v[j])
printf("%d ", v[j].edgeList[k]->end[1]->id);
else
printf("%d ", v[j].edgeList[k]->end[0]->id);
}
printf("\n");
}
}
int main()
{
struct Vertex v[10];
struct Edge e[100];
int j, k, i, l;
srand(time(NULL));
//產生點
for(j = 0;j < 10;j ++)
{
v[j].id = j + 1;
v[j].edgeCount = 0;
}
//產生邊
k = rand() % 20 + 1; //邊的數量
for(j = 0;j < k;j ++)
{
//先產生兩個端點
i = rand() % 10;
l = rand() % 10;
//邊的重量
e[j].w = rand() % 100;
//兩個端點的連結
e[j].end[0] = &v[i];
e[j].end[1] = &v[l];
printf("!!\n");
//在兩個端點加入邊
v[i].edgeList[v[i].edgeCount] = &e[j];
v[i].edgeCount ++;
v[l].edgeList[v[l].edgeCount] = &e[j];
v[l].edgeCount ++;
}
print(v);
for(j = 0;j < 10;j ++)
{
BFS(&v[j]);
printf("\n-----------------------------------------------------------------\n");
DFS(&v[j]);
printf("\n=================================================================\n");
}
} |
def extract_assets(html):
soup = BeautifulSoup(html, 'html.parser')
tags_and_attributes = (
('a', 'href'),
('link', 'href'),
('img', 'src'),
('script', 'src'),
)
assets = []
for tag, attribute in tags_and_attributes:
asset_elements = soup.find_all(tag)
assets.extend([
element.attrs[attribute]
for element in asset_elements
if attribute in element.attrs
])
return assets |
//Read and Display array elements
#include <stdio.h>
void main()
{
int a[20], n, i, *p;
p = a; //Pointer Initialization
printf("Enter the number of elements:\n");
scanf("%d", &n);
//Address of (ptr+i)
//Value stored at the address *(ptr+i)
printf("Enter the elements:\n");
for (i = 0; i < n; i++)
{
scanf("%d", (p + i)); //No need to give &(p+i)
}
printf("The Array elements are:\n");
for (i = 0; i < n; i++)
{
printf("%d\t", *(p + i));
}
} |
Islamic State (Isis) has released a propaganda song in Mandarin, aimed at recruiting Chinese Muslims, just two weeks after executing a hostage from the country. The recording is in the form of a 'nasheeds' – an Islamic chant sung a cappella.
The four-minute song found its way to the internet via jihadist channels. Uighur Muslims have reportedly been targeted in the Chinese-language song, along with the entire Muslim community in the country.
In what is claimed to be near-perfect Mandarin pronunciation, the song roughly translates to: "The brilliance of Islam is etched in history. The purpose of our struggle is to let it shine again. Wake up! Muslim brother, now is the time to awaken. Take up your faith and courage, fulfil the lost doctrine."
The production quality is in no way inferior to the earlier releases of IS (Daesh), although no instruments have been used in the song as the Sunni group's harsh interpretation of Islam prohibits the use of music in songs. Extremist groups from the Middle East have previously tried to recruit Muslims in China, but it is unusual for them to release propaganda material in Mandarin.
It is unlikely that the recording would reach the Chinese public, as the country's cyberspace is heavily-censored. The Communist government in Beijing is critical of Muslims in the country, and Uighur separatists are in conflict with government forces in the restive Xinjiang region.
Meanwhile, officials in Beijing said they are not yet aware of the song. Foreign ministry spokesperson Hua Chunying told reporters during a routine press conference: "We hope we can safeguard the citizens of every country through international cooperation."
China has earlier expressed serious concern over the growing challenges posed by IS. It is estimated anywhere between 100 and 300 Chinese nationals are currently fighting in the conflict zones of Iraq and Syria on behalf of IS. |
/**
* Modify dataset in catalog.
*
* @param dataset
* @return HTTP 200 OK if dataset could be could be created.
*/
@PreAuthorize("hasPermission(#catalogId, 'write')")
@CrossOrigin
@RequestMapping(value = "/{id}", method = PUT, consumes = APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_UTF8_VALUE)
public HttpEntity<Dataset> saveDataset(@PathVariable("catalogId") String catalogId, @PathVariable("id") String datasetId, @RequestBody Dataset dataset) {
logger.info("PUT requestbody dataset: " + dataset.toString());
dataset.setId(datasetId);
dataset.setCatalogId(catalogId);
if (!datasetRepository.findById(dataset.getId()).isPresent()) {
return ResponseEntity.notFound().build();
}
dataset.set_lastModified(Calendar.getInstance().getTime());
Dataset savedDataset = datasetRepository.save(dataset);
return new ResponseEntity<>(savedDataset, HttpStatus.OK);
} |
<filename>openapi_core/testing/requests.py<gh_stars>100-1000
"""OpenAPI core testing requests module"""
from urllib.parse import urljoin
from werkzeug.datastructures import Headers
from werkzeug.datastructures import ImmutableMultiDict
from openapi_core.validation.request.datatypes import OpenAPIRequest
from openapi_core.validation.request.datatypes import RequestParameters
class MockRequestFactory:
@classmethod
def create(
cls,
host_url,
method,
path,
path_pattern=None,
args=None,
view_args=None,
headers=None,
cookies=None,
data=None,
mimetype="application/json",
):
path_pattern = path_pattern or path
path = view_args or {}
query = ImmutableMultiDict(args or {})
header = Headers(headers or {})
cookie = ImmutableMultiDict(cookies or {})
parameters = RequestParameters(
path=path,
query=query,
header=header,
cookie=cookie,
)
method = method.lower()
body = data or ""
full_url_pattern = urljoin(host_url, path_pattern)
return OpenAPIRequest(
full_url_pattern=full_url_pattern,
method=method,
parameters=parameters,
body=body,
mimetype=mimetype,
)
|
A Method for Alternatives Ranking Using an OWA Operator Based on the Laplace Distribution
We consider the problem of representing a multiple-criteria (i.e., multiple heterogeneous measurements) object by a single value that we can use to compare and rank different objects. An intrinsic characteristic of the multiple-criteria is their different nature (e.g., high quality and low price), and thus, the ranking process is vastly dependent on the decision-makers’ preferences and viewpoints. The different criteria denote a severe problem to find an overall value to represent the trade-offs of an object.For example, is it possible to represent the different criteria of a car by a single number and utilize this number to rank different cars in single and multiple decision-makers settings? To answer this question, we extend our proposed method to calculate a weight vector of the Ordered Weighted Average (OWA) operator based on the Laplace distribution and use it to illustrate how to rank a dataset of used cars, and we compare the results with six other OWA operators. In this paper, we prove the characteristics of the new operator and illustrate its benefits in single and multiple decision-making settings. Finally, to find out how well the new OWA operator can represent the information per object; we employ the values produced by the new OWA in a regression model to estimate the used car price. |
<gh_stars>0
import { expect } from "chai";
import { stub } from "sinon";
import * as React from "react";
import { shallow } from "enzyme";
import { Stats } from "../Stats";
import ErrorMessage from "../ErrorMessage";
import LoadingIndicator from "opds-web-client/lib/components/LoadingIndicator";
import LibraryStats from "../LibraryStats";
import { StatsData, LibraryStatsData, LibraryData } from "../../interfaces";
describe("Stats", () => {
const libraryStatsData: LibraryStatsData = {
patrons: {
total: 3456,
with_active_loans: 55,
with_active_loans_or_holds: 1234,
loans: 100,
holds: 2000,
},
inventory: {
titles: 54321,
licenses: 123456,
available_licenses: 100000,
},
collections: {
Overdrive: {
licensed_titles: 500,
open_access_titles: 10,
licenses: 350,
available_licenses: 100,
},
Bibliotheca: {
licensed_titles: 400,
open_access_titles: 0,
licenses: 300,
available_licenses: 170,
},
"Axis 360": {
licensed_titles: 300,
open_access_titles: 0,
licenses: 280,
available_licenses: 260,
},
"Open Bookshelf": {
licensed_titles: 0,
open_access_titles: 1200,
licenses: 0,
available_licenses: 0,
},
},
};
const totalStatsData = Object.assign({}, libraryStatsData, {
inventory: {
titles: 100000,
licenses: 234567,
available_licenses: 200000,
},
});
const statsData: StatsData = {
NYPL: libraryStatsData,
BPL: libraryStatsData,
total: totalStatsData,
};
const librariesData: LibraryData[] = [
{ short_name: "NYPL" },
{ short_name: "BPL" },
];
describe("rendering", () => {
let wrapper;
const fetchError = { status: 401, response: "test", url: "test url" };
let fetchStats;
let fetchLibraries;
beforeEach(() => {
fetchStats = stub().returns(
new Promise<void>((resolve, reject) => resolve())
);
fetchLibraries = stub().returns(
new Promise<void>((resolve, reject) => resolve())
);
wrapper = shallow(
<Stats
stats={statsData}
libraries={librariesData}
fetchStats={fetchStats}
fetchLibraries={fetchLibraries}
isLoaded={false}
/>
);
});
it("shows error message", () => {
let error = wrapper.find(ErrorMessage);
expect(error.length).to.equal(0);
wrapper.setProps({ fetchError });
error = wrapper.find(ErrorMessage);
expect(error.length).to.equal(1);
});
it("shows/hides loading indicator", () => {
let loading = wrapper.find(LoadingIndicator);
expect(loading.length).to.equal(1);
wrapper.setProps({ isLoaded: true });
loading = wrapper.find(LoadingIndicator);
expect(loading.length).to.equal(0);
});
it("shows LibraryStats", () => {
wrapper.setProps({ isLoaded: true, library: "NYPL" });
let libraryStats = wrapper.find(LibraryStats);
expect(libraryStats.length).to.equal(2);
expect(libraryStats.at(0).props().stats).to.deep.equal(libraryStatsData);
expect(libraryStats.at(0).props().library).to.deep.equal(
librariesData[0]
);
expect(libraryStats.at(1).props().stats).to.deep.equal(totalStatsData);
expect(libraryStats.at(1).props().library).to.be.undefined;
// No total stats.
wrapper.setProps({ stats: { NYPL: libraryStatsData } });
libraryStats = wrapper.find(LibraryStats);
expect(libraryStats.length).to.equal(1);
expect(libraryStats.at(0).props().stats).to.deep.equal(libraryStatsData);
expect(libraryStats.at(0).props().library).to.deep.equal(
librariesData[0]
);
// Still no total stats, since there's only one library.
wrapper.setProps({
stats: { NYPL: libraryStatsData, total: totalStatsData },
libraries: [librariesData[0]],
});
libraryStats = wrapper.find(LibraryStats);
expect(libraryStats.length).to.equal(1);
expect(libraryStats.at(0).props().stats).to.deep.equal(libraryStatsData);
expect(libraryStats.at(0).props().library).to.deep.equal(
librariesData[0]
);
// No library stats.
wrapper.setProps({
stats: { total: totalStatsData },
libraries: librariesData,
library: null,
});
libraryStats = wrapper.find(LibraryStats);
expect(libraryStats.length).to.equal(1);
expect(libraryStats.at(0).props().stats).to.deep.equal(totalStatsData);
expect(libraryStats.at(0).props().library).to.be.undefined;
});
});
});
|
// -*-c++-*- osgWidget - Code by: <NAME> (cubicool) 2007-2008
// $Id: Box.cpp 50 2008-05-06 05:06:36Z cubicool $
#include <osgDB/Registry>
#include <osgDB/Input>
#include <osgDB/Output>
#include <osgDB/FileUtils>
#include <osgWidget/Box>
bool osgWidget_Box_readData(osg::Object& /*obj*/, osgDB::Input& fr)
{
/*
osgWidget::Box& box = static_cast<osgWidgegt::Box&>(obj);
if(fr[0].matchWord("skeleton") and fr[1].isString()) iter = loadFile(
"skeleton",
&osgCal::osgWidget_Box::loadSkeleton,
model,
fr
);
if(fr[0].matchWord("animation") and fr[1].isString()) iter = loadFile(
"animation",
&osgCal::osgWidget_Box::loadAnimation,
model,
fr
);
if(fr[0].matchWord("mesh") and fr[1].isString()) iter = loadFile(
"mesh",
&osgCal::osgWidget_Box::loadMesh,
model,
fr
);
if(fr[0].matchWord("material") and fr[1].isString()) iter = loadFile(
"material",
&osgCal::osgWidget_Box::loadMaterial,
model,
fr
);
*/
osgWidget::warn() << "Box read" << std::endl;
return false;
}
bool osgWidget_Box_writeData(const osg::Object& /*obj*/, osgDB::Output& fw)
{
// const osgWidget::Box& model = static_cast<const osgWidget::Box&>(obj);
fw.indent() << fw.wrapString("Box stuff...") << std::endl;
return true;
}
/*
bool Model_readData(osg::Object& obj, osgDB::Input& fr) {
bool iter = false;
osgCal::Model& model = static_cast<osgCal::Model&>(obj);
osgCal::osgWidget_Box* core = static_cast<osgCal::osgWidget_Box*>(
fr.readObjectOfType(osgCal::osgWidget_Box("dummy"))
);
if(core) {
model.create(core);
iter = true;
}
if(fr.matchSequence("StartAnimation")) {
if(fr[1].isString()) {
int animation = core->getAnimationId(fr[1].getStr());
if(animation >= 0) model.startLoop(animation, 1.0f, 0.0f);
else osg::notify(osg::WARN)
<< "Couldn't start animation: " << fr[1].getStr()
<< std::endl
;
iter = true;
fr += 2;
}
}
return iter;
}
bool Model_writeData(const osg::Object& obj, osgDB::Output& fw) {
const osgCal::Model& model = static_cast<const osgCal::Model&>(obj);
fw.writeObject(*model.getosgWidget_Box());
return true;
}
osgDB::RegisterDotOsgWrapperProxy g_ModelProxy(
new osgCal::Model,
"Model",
"Object Node Model",
&Model_readData,
&Model_writeData
);
*/
osgDB::RegisterDotOsgWrapperProxy g_osgWidget_BoxProxy(
new osgWidget::Box("unset"),
"osgWidget::Box",
"Object Node Group Transform MatrixTransform osgWidget::Box",
&osgWidget_Box_readData,
&osgWidget_Box_writeData
);
|
/**
*
* RadialDFTLib
*
* RadialSrelSolver.cpp
*
* Created by Franco Moitzi on 3/3/23.
*
* Copyright (c) 2023 University of Leoben. All rights reserved.
*
*/
#include "RadialSrelSolver.hpp"
void lsms::RadialSrelOut(std::complex<double> E, int kappa,
std::complex<double> *P, std::complex<double> *Q,
const double *R, const double *Rp, const double *V,
int end, int sign_correction, double *beta) {
std::complex<double> yp[2];
std::complex<double> f0[2];
std::complex<double> f1[2];
std::complex<double> f2[2];
std::complex<double> f3[2];
std::complex<double> delta;
double lam;
std::complex<double> i0;
std::complex<double> i1;
std::complex<double> M11;
std::complex<double> M21;
std::complex<double> M12;
std::complex<double> M22;
std::complex<double> k = kappa * kappa + kappa;
double Z = -V[0] * R[0];
if (Z > 0.0001) {
*beta = std::sqrt(1 - (Z * Z) / (c * c) + (kappa * kappa + kappa));
} else {
*beta = abs(kappa);
}
double start_value = 1e-5;
if (Z > 0.0001) {
P[0] = start_value;
Q[0] = start_value * (k)*c / (Z * (*beta + 1));
} else {
int l = -kappa - 1;
P[0] = pow(start_value, (l + 1.0)) * pow(-1, sign_correction);
Q[0] = (l + 1.0) * pow(start_value, l) * pow(-1, sign_correction);
}
// Predictor Corrector Euler
cdirac_radial_func_nosoc(R[0], Rp[0], E, V[0], kappa, P[0], Q[0], f0);
yp[0] = P[0] + f0[0];
yp[1] = Q[0] + f0[1];
cdirac_radial_func_nosoc(R[1], Rp[1], E, V[1], kappa, yp[0], yp[1], f1);
P[1] = P[0] + (f0[0] + f1[0]) / 2.0;
Q[1] = Q[0] + (f0[1] + f1[1]) / 2.0;
// Adams Moulton 3
cdirac_radial_func_nosoc(R[1], Rp[1], E, V[1], kappa, P[1], Q[1], f1);
i0 = P[1] + (-f0[0] + 8.0 * f1[0]) / 12.0;
i1 = Q[1] + (-f0[1] + 8.0 * f1[1]) / 12.0;
lam = 5.0 / 12.0;
delta = 1.0 + lam * lam * Rp[2] * Rp[2] *
((E - V[2]) * (E - V[2]) / (c_2) + 2.0 * E - 2 * V[2] -
(k + 1.0) / (R[2] * R[2]));
M11 = 1 + lam * Rp[2] / R[2];
M12 = lam * Rp[2] * (E / c - V[2] / c + 2 * c);
M21 = lam * Rp[2] *
(-E / c + V[2] / c + (k) / (R[2] * R[2] * (E / c - V[2] / c + 2 * c)));
M22 = 1 - lam * Rp[2] / R[2];
P[2] = (M11 * i0 + M12 * i1) / delta;
Q[2] = (M21 * i0 + M22 * i1) / delta;
// Adams Moulton 4
cdirac_radial_func_nosoc(R[2], Rp[2], E, V[2], kappa, P[2], Q[2], f2);
i0 = P[2] + (f0[0] - 5.0 * f1[0] + 19.0 * f2[0]) / 24.0;
i1 = Q[2] + (f0[1] - 5.0 * f1[1] + 19.0 * f2[1]) / 24.0;
lam = 9.0 / 24.0;
delta = 1.0 + lam * lam * Rp[3] * Rp[3] *
((E - V[3]) * (E - V[3]) / (c_2) + 2.0 * E - 2 * V[3] -
(k + 1.0) / (R[3] * R[3]));
M11 = 1 + lam * Rp[3] / R[3];
M12 = lam * Rp[3] * (E / c - V[3] / c + 2 * c);
M21 = lam * Rp[3] *
(-E / c + V[3] / c + (k) / (R[3] * R[3] * (E / c - V[3] / c + 2 * c)));
M22 = 1 - lam * Rp[3] / R[3];
P[3] = (M11 * i0 + M12 * i1) / delta;
Q[3] = (M21 * i0 + M22 * i1) / delta;
// Adams Moulton 5
for (int idx = 3; idx < end - 1; idx++) {
cdirac_radial_func_nosoc(R[idx - 3], Rp[idx - 3], E, V[idx - 3], kappa,
P[idx - 3], Q[idx - 3], f0);
cdirac_radial_func_nosoc(R[idx - 2], Rp[idx - 2], E, V[idx - 2], kappa,
P[idx - 2], Q[idx - 2], f1);
cdirac_radial_func_nosoc(R[idx - 1], Rp[idx - 1], E, V[idx - 1], kappa,
P[idx - 1], Q[idx - 1], f2);
cdirac_radial_func_nosoc(R[idx], Rp[idx], E, V[idx], kappa, P[idx], Q[idx],
f3);
i0 = P[idx] +
1.0 / 720 *
(-19.0 * f0[0] + 106.0 * f1[0] - 264.0 * f2[0] + 646.0 * f3[0]);
i1 = Q[idx] +
1.0 / 720 *
(-19.0 * f0[1] + 106.0 * f1[1] - 264.0 * f2[1] + 646.0 * f3[1]);
lam = 251.0 / 720.0;
delta = 1.0 + lam * lam * Rp[idx + 1] * Rp[idx + 1] *
((E - V[idx + 1]) * (E - V[idx + 1]) / (c_2) + 2.0 * E -
2 * V[idx + 1] - (k + 1.0) / (R[idx + 1] * R[idx + 1]));
M11 = 1 + lam * Rp[idx + 1] / R[idx + 1];
M12 = lam * Rp[idx + 1] * (E / c - V[idx + 1] / c + 2 * c);
M21 = lam * Rp[idx + 1] *
(-E / c + V[idx + 1] / c +
(k) / (R[idx + 1] * R[idx + 1] *
(E / c - V[idx + 1] / c +
2 * c)));
M22 = 1 - lam * Rp[idx + 1] / R[idx + 1];
P[idx + 1] = (M11 * i0 + M12 * i1) / delta;
Q[idx + 1] = (M21 * i0 + M22 * i1) / delta;
}
}
void lsms::RadialSrelIn(std::complex<double> E, int kappa,
std::complex<double> *P,
std::complex<double> *Q,
const double *R,
const double *Rp,
const double *V,
int end,
std::complex<double> *P_last,
std::complex<double> *Q_last,
int stop) {
int idx = end - 1;
int start_imax = idx;
std::complex<double> f0[2], f1[2], f2[2];
std::complex<double> delta, lam, i0, i1;
std::complex<double> M11;
std::complex<double> M21;
std::complex<double> M12;
std::complex<double> M22;
const double min_tol = 1.0e-10;
std::complex<double> lambda = std::sqrt(-2.0 * E - E * E / (c_2));
// Use asymptoics for starting
for (idx = end - 1; idx >= stop; idx--) {
P[idx] = exp(-lambda * R[idx]) / sqrt(-E / (E + 2 * c_2));
Q[idx] = -exp(-lambda * R[idx]);
if (std::abs(P[idx]) > min_tol) {
start_imax = idx;
break;
}
}
/*
* Calculate 3 more points and then use predictor corrector to improve the
* last point
*/
P[start_imax - 1] =
exp(-lambda * R[start_imax - 1]) / sqrt(-E / (E + 2 * c_2));
Q[start_imax - 1] = -exp(-lambda * R[start_imax - 1]);
P[start_imax - 2] =
exp(-lambda * R[start_imax - 2]) / sqrt(-E / (E + 2 * c_2));
Q[start_imax - 2] = -exp(-lambda * R[start_imax - 2]);
for (idx = start_imax; idx > stop + 3; --idx) {
cdirac_radial_func_nosoc(R[idx], Rp[idx], E, V[idx], kappa,
P[idx], Q[idx], f0);
cdirac_radial_func_nosoc(R[idx - 1], Rp[idx - 1], E, V[idx - 1], kappa,
P[idx - 1], Q[idx - 1], f1);
cdirac_radial_func_nosoc(R[idx - 2], Rp[idx - 2], E, V[idx - 2], kappa,
P[idx - 2], Q[idx - 2], f2);
i0 = P[idx - 2] - 1.0 / 24 * (f0[0] - 5.0 * f1[0] + 19.0 * f2[0]);
i1 = Q[idx - 2] - 1.0 / 24 * (f0[1] - 5.0 * f1[1] + 19.0 * f2[1]);
lam = -9.0 / 24.0;
delta = 1.0 +
lam * lam * Rp[idx - 3] * Rp[idx - 3] *
(-kappa * kappa / (R[idx - 3] * R[idx - 3]) -
(-E + V[idx - 3]) * (2 * c + (E - V[idx - 3]) / c) / c);
//M11 = -kappa * lam * Rp[idx - 3] / R[idx - 3] + 1;
//M22 = kappa * lam * Rp[idx - 3] / R[idx - 3] + 1;
//M21 = lam * Rp[idx - 3] * (-E + V[idx - 3]) / c;
//M12 = lam * Rp[idx - 3] * (2 * c + (E - V[idx - 3]) / c);
P[idx - 3] = (M11 * i0 + M12 * i1) / delta;
Q[idx - 3] = (M21 * i0 + M22 * i1) / delta;
}
idx = stop + 3;
cdirac_radial_func_nosoc(R[idx], Rp[idx], E, V[idx], kappa,
P[idx], Q[idx], f0);
cdirac_radial_func_nosoc(R[idx - 1], Rp[idx - 1], E, V[idx - 1], kappa,
P[idx - 1], Q[idx - 1], f1);
cdirac_radial_func_nosoc(R[idx - 2], Rp[idx - 2], E, V[idx - 2], kappa,
P[idx - 2], Q[idx - 2], f2);
i0 = P[idx - 2] - 1.0 / 24 * (f0[0] - 5.0 * f1[0] + 19.0 * f2[0]);
i1 = Q[idx - 2] - 1.0 / 24 * (f0[1] - 5.0 * f1[1] + 19.0 * f2[1]);
lam = -9.0 / 24.0;
// delta = 1 +
// lam * lam * Rp[idx - 3] * Rp[idx - 3] *
// (-kappa * kappa / (R[idx - 3] * R[idx - 3]) -
// (-E + V[idx - 3]) * (2 * c + (E - V[idx - 3]) / c) / c);
// M11 = -kappa * lam * Rp[idx - 3] / R[idx - 3] + 1;
//
// M22 = kappa * lam * Rp[idx - 3] / R[idx - 3] + 1;
//
// M21 = lam * Rp[idx - 3] * (-E + V[idx - 3]) / c;
//
// M12 = lam * Rp[idx - 3] * (2 * c + (E - V[idx - 3]) / c);
*P_last = (M11 * i0 + M12 * i1) / delta;
*Q_last = (M21 * i0 + M22 * i1) / delta;
}
|
<filename>lang/py/pylib/06/mmap/mmap_write_copy.py
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
import mmap
import shutil
import contextlib
#Copy the example file
shutil.copyfile('lorem.txt','lorem_copy.txt')
word='consectetuer'
reversed1=word[::-1]
with open('lorem_copy.txt','r+') as f:
with contextlib.closing(mmap.mmap(f.fileno(),0,access=mmap.ACCESS_COPY)) as m:
print'Memory Before:'
print m.readline().rstrip()
print'File Before:'
print f.readline().rstrip()
print
m.seek(0) #rewind
loc=m.find(word)
m[loc:loc+len(word)]=reversed1
m.seek(0)
print'Memory After:'
print m.readline().rstrip()
f.seek(0)
print'File After :'
print f.readline().rstrip()
|
/* Cydia - iPhone UIKit Front-End for Debian APT
* Copyright (C) 2008-2015 <NAME> (saurik)
*/
/* GNU General Public License, Version 3 {{{ */
/*
* Cydia is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License,
* or (at your option) any later version.
*
* Cydia is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Cydia. If not, see <http://www.gnu.org/licenses/>.
**/
/* }}} */
#ifndef CyteKit_URLProtocol_H
#define CyteKit_URLProtocol_H
#include <UIKit/UIKit.h>
@interface CyteURLProtocol : NSURLProtocol
+ (NSString *) scheme;
- (void) _returnPNGWithImage:(UIImage *)icon forRequest:(NSURLRequest *)request;
- (bool) loadForPath:(NSString *)path ofRequest:(NSURLRequest *)request;
@end
#endif//CyteKit_URLProtocol_H
|
/**
* Clears a value stored at the given index in this storage.
*
* @param index the index to clear a value at.
* @return {@code true} if this storage array is emptied, {@code false}
* otherwise.
*/
public boolean clear(short index) {
if (indexes == null) {
return clearDense(index);
} else {
return clearSparse(index);
}
} |
<filename>tests/column_test.py
import pytest
from dbjudge.structures.column import Column
from dbjudge import exceptions
def test_unsupported_ctype():
with pytest.raises(exceptions.InvalidColumnTypeError) as expected_exception:
Column('randomname', 'nosupptype')
assert 'Unsupported column type: nosupptype' in str(
expected_exception.value)
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package Controle;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.Date;
import javax.swing.JOptionPane;
/**
*
* @author Mr-Robot
*/
public class ControleValidacao {
ConectaBanco connex = new ConectaBanco();
int valida, operacao, senhaValidacao;
public void Validar(String senha){
connex.conexao();
try {
connex.executaSQL("select * from vencimento");
connex.rs.last();
valida = Integer.parseInt(connex.rs.getString("datavenc"));
operacao = (valida + 132) / 4;
senhaValidacao = Integer.parseInt(senha);
if(operacao == senhaValidacao){
int dia, mes, ano;
String AcertaMes, AcertaDia, ProximaSenha;
//pegando a data de hoje
SimpleDateFormat df = new SimpleDateFormat("dd/MM/yyyy");
Date hoje = new Date();
String data = df.format(hoje);
char [] senhachar = data.toCharArray();
dia = Integer.parseInt("" + senhachar[0] + senhachar[1]);
mes = Integer.parseInt("" + senhachar[3] + senhachar[4]);
ano = Integer.parseInt("" + senhachar[6] + senhachar[7] + senhachar[8] + senhachar[9]);
//procurando erro
//JOptionPane.showMessageDialog(null, "" + dia);
if(mes < 12){
mes++;
if(mes < 10){
AcertaMes = "0" + mes;
}else{
AcertaMes = "" + mes;
}
}else{
mes = 1;
ano++;
AcertaMes = "0" + mes;
}
if(dia < 10){
AcertaDia = "0" + dia;
}else{
AcertaDia = "" + dia;
}
ProximaSenha = AcertaDia + AcertaMes;
PreparedStatement pst = connex.conn.prepareStatement("insert into vencimento(datavenc)values(?)");
pst.setString(1, ProximaSenha);
pst.execute();
//JOptionPane.showMessageDialog(null, "" + ProximaSenha);
}else{
JOptionPane.showMessageDialog(null, "senha errada");
}
//JOptionPane.showMessageDialog(null, valida);
} catch (SQLException ex) {
JOptionPane.showMessageDialog(null, "Erro ao validar!\nERRO: " + ex);
}
connex.desconecta();
}
}
|
<gh_stars>0
package client;
import java.net.URL;
import java.util.ArrayList;
import java.util.ResourceBundle;
import javafx.scene.Node;
import entites.Book;
import entites.LibrarianNew;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.event.ActionEvent;
import javafx.fxml.FXML;
import javafx.fxml.Initializable;
import javafx.scene.control.Button;
import javafx.scene.control.ChoiceBox;
import javafx.scene.control.TableColumn;
import javafx.scene.control.TableView;
import javafx.scene.control.TextField;
import javafx.scene.control.cell.PropertyValueFactory;
import javafx.scene.text.Text;
/**
* This class ViewWorkersController is to Controller to sho workers we have in db
* in this class we search workers by :By First Name,By Last Name, By ID
* after we choies the way we want to search and get the text fild from the user and send it to the
* server to check if the user are exist or not
* if exit we show it in table view
*
*/
public class ViewWorkersController implements Initializable {
private ArrayList<String> message;
@FXML
private Text NoWorkersFound;
@FXML
private TableView<LibrarianNew> table;
@FXML
private TableColumn<LibrarianNew, String> LibrarianID;
@FXML
private TableColumn<LibrarianNew, String> FirstName;
@FXML
private TableColumn<LibrarianNew, String> LastName;
@FXML
private TableColumn<LibrarianNew, String> Email;
@FXML
private TableColumn<LibrarianNew, String> PhoneNumber;
@FXML
private TableColumn<LibrarianNew, String> Organization;
@FXML
private TextField SearchBox;
@FXML
private ChoiceBox<String> SearchOptions;
@FXML
private Button SearchBTN;
ClientConsole clientCon = new ClientConsole();
// @FXML
// void Exit(ActionEvent event) {
// ((Node) event.getSource()).getScene().getWindow().hide();
// }
/*
* this method Exit to exit frome the current page and return to the last gui (page)
*
*/
@FXML
void Exit(ActionEvent event) {
((Node) event.getSource()).getScene().getWindow().hide();
}
/*
* this method to find the worker by the selected search we select
* we send to the server the selected way we choies with the text we get from the user
*/
@FXML
void FindWorkers(ActionEvent event) throws ClassNotFoundException {
message = new ArrayList<String>();
if (SearchBox.getText().trim().isEmpty()) {
NoWorkersFound.setText("Empty Field");
} else {
NoWorkersFound.setText("");
switch ((String) SearchOptions.getValue()) {
case "By First Name": { // Search for workers By first name
message.add("FindWorkersByFirstName");
message.add(SearchBox.getText());
Object obj = (Object) message;
Object obj1 = new Object();
clientCon.execute(obj);
ArrayList<String> message = (ArrayList<String>) obj;
System.out.println(message + "final");
try {
Thread.currentThread().sleep(1200);
} catch (Exception e) {
System.out.println("Exception At AddNewSubscriberController in Function addNew");
}
obj1 = clientCon.Getrespond();
if (obj1 == null) {
NoWorkersFound.setText("No Workers Found");
} else {
ArrayList<LibrarianNew> message1 = (ArrayList<LibrarianNew>) obj1;
LibrarianID.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("UserID"));
FirstName.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("FirstName"));
LastName.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("LastName"));
Email.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("Email"));
PhoneNumber.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("PhoneNumber"));
Organization.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("Organization"));
ObservableList<LibrarianNew> toShow = FXCollections.observableArrayList();
toShow.addAll(message1);
table.setItems(toShow);
}
break;
}
// new case
case "By Last Name": { // Search for workers By first name
message.add("FindWorkersByLastName");
message.add(SearchBox.getText());
Object obj = (Object) message;
Object obj1 = new Object();
clientCon.execute(obj);
ArrayList<String> message = (ArrayList<String>) obj;
System.out.println(message + "final");
try {
Thread.currentThread().sleep(1200);
} catch (Exception e) {
System.out.println("Exception At ViewWorkersController in Function FindWorkers");
}
obj1 = clientCon.Getrespond();
if (obj1 == null) {
NoWorkersFound.setText("No Workers Found");
} else {
ArrayList<LibrarianNew> message1 = (ArrayList<LibrarianNew>) obj1;
LibrarianID.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("UserID"));
FirstName.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("FirstName"));
LastName.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("LastName"));
Email.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("Email"));
PhoneNumber.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("PhoneNumber"));
Organization.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("Organization"));
ObservableList<LibrarianNew> toShow = FXCollections.observableArrayList();
toShow.addAll(message1);
table.setItems(toShow);
}
break;
}
case "By ID": { // Search for workers By first name
message.add("FindWorkersByID");
message.add(SearchBox.getText());
Object obj = (Object) message;
Object obj1 = new Object();
clientCon.execute(obj);
ArrayList<String> message = (ArrayList<String>) obj;
System.out.println(message + "final");
try {
Thread.currentThread().sleep(1200);
} catch (Exception e) {
System.out.println("Exception At ViewWorkersController in Function FindWorkers");
}
obj1 = clientCon.Getrespond();
if (obj1 == null) {
NoWorkersFound.setText("No Workers Found");
} else {
ArrayList<LibrarianNew> message1 = (ArrayList<LibrarianNew>) obj1;
LibrarianID.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("UserID"));
FirstName.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("FirstName"));
LastName.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("LastName"));
Email.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("Email"));
PhoneNumber.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("PhoneNumber"));
Organization.setCellValueFactory(new PropertyValueFactory<LibrarianNew, String>("Organization"));
ObservableList<LibrarianNew> toShow = FXCollections.observableArrayList();
toShow.addAll(message1);
table.setItems(toShow);
}
break;
}
}
}
}
/*
*
* initialize the current screen
*/
@Override
public void initialize(URL arg0, ResourceBundle arg1) {
SearchOptions.getItems().add("By First Name");
SearchOptions.getItems().add("By Last Name");
SearchOptions.getItems().add("By ID");
SearchOptions.getSelectionModel().select(0);
// TODO Auto-generated method stub
}
} |
ADVERTISEMENT
“Who are all these people interested in this?"
The question, asked by Shilpi Kumar, head of product commercialization at Filament, summed up the reaction to a Trusted IoT Alliance event sponsored by Cisco in San Jose last week. As the event showcased, the intersection of the Internet of Things (IoT) and blockchain is becoming more congested, with commercial products now live, instead of merely proofs-of concept.
According to Kumar, only a year ago Filament, which sells IoT hardware connected to the blockchain, was often receiving confused, if not skeptical, looks when the company mentioned it was developing blockchain for IoT applications. And now, she said, looking out over a room of about 100 attendees, “there’s all these people sitting with us at that intersection.”
Not only are more entrepreneurs interested in the space, but, in the past three months, calls from corporate clients asking how the blockchain plays a role in Filament’s business model have increased as well, she said.
This increased interest could be, in part, the work that Bosch, Cisco and the other members of a previously unnamed group have done to create awareness of the many use cases for blockchain in the IoT industry.
But, according to Anoop Nannra, the senior leader and head of distributed ledger technology and blockchain incubation at Cisco’s Strategic Innovation Group, both IoT and blockchain are nascent industries where much collaboration must be done to ensure the industry succeeds.
“A number of blockchain IoT startups in this space came together and recognized common challenges ... that there’s a missing identity later in IoT and maybe blockchain could potentially solve that,” said Ryan Orr, CEO of Chronicled, a blockchain startup that’s also a part of the alliance.
He continued:
“But if we all took a different approach with smart contracts, we’d end up with [a bunch] of different startups and processes that would create confusion and not any one would take hold of the market.”
The alliance was set up late last year to develop common protocol 'primitives' to merge blockchain and IoT through smart contracts using ethereum, the Linux Foundation’s Hyperledger and JP Morgan Chase’s Quorum. And, although Orr believes there will be hundreds of IoT blockchains, the alliance’s hope is that it can create interoperability between those.
About half of the room consisted of blockchain aficionados, and half were individuals and companies representing the IoT space. According to Joe Pindar, CTO and director of product strategy at Gemalto, this diversity matters as the industry tries to weed through the froth and spin.
Get your engines ready
The alliance seems to have cut through some of the hype within the blockchain IoT space, at least in the automotive and transportation industry. Three presentations at the event focused on this sector, including a demo by Stephan Tual, founder and COO at Slock.it (of The DAO fame).
Slock.it’s Share and Charge project built autonomous electric-car charging stations using Raspberry Pis. The effort also allows owners of charging stations to share their energy reserves with others.
According to Tual, there are only 6,181 charging stations throughout Germany for a population of more than 82 million. But the number of Germans with electric cars and personal charging stations is estimated at over 45,000.
“By sharing charging stations with each other, you can double the number of charging stations in the country,” Tual said.
Slock.it’s app for the electric charging service is currently downloadable, with about a thousand charging stations currently connected. The project, which was built on ethereum but abstracted the cryptocurrency from consumers, was very popular and captured people’s attention, Tual claimed.
There were hurdles, however. Installing Raspberry Pis in charging stations came with its challenges, including needing to measure temperature changes so the machine wouldn’t shut down. Further, the public key infrastructure used to secure cryptocurrency transactions is not only difficult for consumers to understand, but also comes with serious consequences in the case of loss or theft.
And the tools available for blockchain are still young and underdeveloped, Tual said. For example, because the economic 'gas' price estimation is off, transactions on ethereum are not as cheap as the industry first touted. Gas is the term for the internal price for making a transaction or running smart contract on ethereum.
Although Vitalik Buterin, the founder of ethereum, has said he’ll fix the issue with various hard forks, according to Tual, he argued a more sensible approach would be to initiate a state channel transaction, where each subsequent transaction and receipt is replaced by the next. This means “there's only one blockchain transaction instead of 147 ... so it’s way cheaper”.
“Block-ifying things is not straightforward.” Tual said. “But we’ve proved that we can do blockchain development outside a [proof-of-concept] and we’re quite proud of that.”
Other projects
Executives from Oaken Innovations and Bosch also presented automotive applications for blockchain in the IoT space.
Oaken demoed its tollbooth proof-of-concept, which uses IPFA and ethereum to allow Tesla cars to pay automatically at toll booths. The project won first place at a United Arab Emirates-sponsored blockchain hack recently.
“The car goes to the tollbooth and it’s a true machine-to-machine transaction, as they both have ethereum nodes inside,” said John Gerryts, cofounder and CEO of Oaken. “We were able to reduce transaction costs, going from the traditional card models with 2% to 4% transaction fees and reduce that to a 0.1% fee.”
And Bosch demoed its blockchain-based system to prevent odometer fraud.
According to Timo Gessmann, director of Bosch IoT Lab, nearly one in three cars' odometers are manipulated before they are sold. With a lower number of miles on a car, both individuals and car dealers can sell a vehicle for a higher price.
“Odometer numbers can be like a cryptocurrency,” said Gessmann during his presentation at the event.
The company is currently testing the project as a white-label solution, offering certificates that guarantee the odometer reading is correct, because the data has been recorded on a blockchain. The company has also developed CertifiCar, a consumer-facing mobile application that tracks car mileage.
Echoing Tual’s sentiments, Gessman said blockchain-based solutions are not easy builds. Ethereum transaction costs are high and it costs a considerable amount to build trust within consortia sharing data, he said.
The physical-digital link
Nearly all the other demos at the event focused on tracking the provenance of products along the supply chain.
BitSE demoed its work with VeChain to develop unique tags for all sorts of luxury goods.
Currently the company is working with the biggest wine importer in China. The company developed a tag embedded with a chip that sits on the top of the wine bottle. The chip monitors vibrations, temperature and other characteristics throughout the supply chain. And if someone wanted to tamper with the contents inside the bottle, upon opening it, they would destroy the unique tag.
The company also has its blockchain-based tags in luxury brands.
BitSE has more than two million product identities in production running on VeChain. Those products are worth about $450m renminbi, according to DJ Qian, CEO of BitSE.
Qian said that, in June, the company will be announcing a particular luxury brand using BitSE’s tags, which will not only allow consumers and merchants to scan the tags with their phone to verify the authenticity of the piece, but will also allow them to interact with the brand.
Chronicled then presented its tamper-proof crypto-seal for packages, documents and other high-value goods, such as electronics and forensic evidence bags. Sam Radocchia, the chief product officer at Chronicled, said the company has been working with the pharmaceutical industry too.
Filament also demoed a sharing economy handheld drill. The drill contained what Filament calls “the path”, a small Bluetooth-enabled device with a built-in contract that allows people to lease the tool for a specific amount of time. When someone signs a digital lease, the information is sent to the drill and an LED light on the tool lights up green indicating it’s ready for use. When the lease expires, the drill ceases to function.
“It’s kind of gimmicky,” said Filament’s Kumar. “I don’t think we should have a sharing economy for drills [due to their short lifespan] ... but our clients are thinking about other industrial infrastructure.”
For instance, many construction companies are wondering if they can not only monetize the leasing of their large equipment, but also monetize the data that’s collected from those machines in use.
Experimentation and governance
While a legal entity has not yet been created, the alliance has set up a working governance structure.
So far, the alliance’s model will have 21 industry board seats with five executive board seats. Two in-person meetings per year will see the board review new proposals and allocate funding. And as the alliance grows, smaller committee working groups will be created to focus on particular niches.
“It was also important to show that there was some validation of the technical idea, that it makes sense to do a blockchain-agnostic IoT-oriented working group,” said Zaki Manian, founder of SKUChain, a DLT startup focused on supply chain, and one of the founding members of the alliance. “The initial thing we put some effort into was a basic proof-of-concept of this idea of a unified registry.”
The unified registry allows companies to put IoT device identities onto a blockchain and give each device its own private key. The API, which the alliance announced about a month ago, allows all these registries on separate blockchains to be unified.
“We clearly demonstrated with limited resources that we can make this work,” Manian said.
And it worked without recreating the wheel, said Nannra. Instead, the group developed a model for portability of identities and interoperability between blockchains.
Yet, the group’s mission isn’t to create standards – Orr thinks it’s too early for that. Instead, the aim is to build an open-source blockchain base layer via which collaborating companies can then compete with the applications built on top, he said.
According to Orr, the group is focusing on getting 15 pilots running in the next 12 months.
Image via Bailey Reutzel for CoinDesk |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
TH = raw_input()
TA = raw_input()
TEAMS = dict(h=TH, a=TA)
N = int(raw_input())
counts = {}
for i in xrange(N):
minute, team, player, card = raw_input().split()
k = (team, player)
counts.setdefault(k, 0)
if counts[k] >= 10:
continue
counts[k] += 1
if card == "r" or counts[k] >= 2:
print TEAMS[team], player, minute
counts[k] = 10
continue
|
def sort_docs(self, sort_field):
for doc in self.doc_list:
if not hasattr(doc, "doc_metadata"):
print "A doc in doc_list doesn't have metadata, "\
"so we can't sort on a metadata field!"
raise Exception
if not hasattr(doc.doc_metadata, sort_field):
print "A doc in doc_list doesn't have the metadata field: "\
"'{0}', so we can't sort on that field!".format(sort_field)
raise Exception
sorted_doc_list = sorted(self.doc_list,
key=lambda doc: getattr(doc.doc_metadata, sort_field))
subsets = []
subset_start = 0
for i in range(1, len(sorted_doc_list)):
prev_doc_field = getattr(sorted_doc_list[i-1].doc_metadata,
sort_field)
curr_doc_field = getattr(sorted_doc_list[i].doc_metadata,
sort_field)
if prev_doc_field != curr_doc_field:
subsets.append(sorted_doc_list[subset_start : i])
subset_start = i
if i == len(sorted_doc_list) - 1:
subsets.append(sorted_doc_list[subset_start:])
return subsets |
<filename>bower_components/ocrad-bower/ocrad-0.23-pre1/ucs.cc
/* GNU Ocrad - Optical Character Recognition program
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
2012, 2013 <NAME>.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <cctype>
#include "ucs.h"
int UCS::base_letter( const int code )
{
switch( code )
{
case CAGRAVE:
case CAACUTE:
case CACIRCU:
case CATILDE:
case CADIAER:
case CARING : return 'A';
case CCCEDI : return 'C';
case CEGRAVE:
case CEACUTE:
case CECIRCU:
case CEDIAER: return 'E';
case CGBREVE: return 'G';
case CIGRAVE:
case CIACUTE:
case CICIRCU:
case CIDIAER:
case CIDOT : return 'I';
case CNTILDE: return 'N';
case COGRAVE:
case COACUTE:
case COCIRCU:
case COTILDE:
case CODIAER: return 'O';
case CSCEDI : return 'S';
case CUGRAVE:
case CUACUTE:
case CUCIRCU:
case CUDIAER: return 'U';
case CYACUTE: return 'Y';
case SAGRAVE:
case SAACUTE:
case SACIRCU:
case SATILDE:
case SADIAER:
case SARING : return 'a';
case SCCEDI : return 'c';
case SEGRAVE:
case SEACUTE:
case SECIRCU:
case SEDIAER: return 'e';
case SGBREVE: return 'g';
case SIGRAVE:
case SIACUTE:
case SICIRCU:
case SIDIAER:
case SINODOT: return 'i';
case SNTILDE: return 'n';
case SOGRAVE:
case SOACUTE:
case SOCIRCU:
case SOTILDE:
case SODIAER: return 'o';
case SSCEDI : return 's';
case SUGRAVE:
case SUACUTE:
case SUCIRCU:
case SUDIAER: return 'u';
case SYACUTE:
case SYDIAER: return 'y';
default: return 0;
}
}
int UCS::compose( const int letter, const int accent )
{
switch( letter )
{
case 'A': if( accent == '\'') return CAACUTE;
if( accent == '`' ) return CAGRAVE;
if( accent == '^' ) return CACIRCU;
if( accent == ':' ) return CADIAER; break;
case 'E': if( accent == '\'') return CEACUTE;
if( accent == '`' ) return CEGRAVE;
if( accent == '^' ) return CECIRCU;
if( accent == ':' ) return CEDIAER; break;
case 'G': return CGBREVE;
case '[':
case 'I': if( accent == '\'') return CIACUTE;
if( accent == '`' ) return CIGRAVE;
if( accent == '^' ) return CICIRCU;
if( accent == ':' ) return CIDIAER; break;
case 'N': if( accent != ':' ) return CNTILDE; break;
case 'O': if( accent == '\'') return COACUTE;
if( accent == '`' ) return COGRAVE;
if( accent == '^' ) return COCIRCU;
if( accent == ':' ) return CODIAER; break;
case 'S': return CSCARON;
case 'U':
case 'V': if( accent == '\'') return CUACUTE;
if( accent == '`' ) return CUGRAVE;
if( accent == '^' ) return CUCIRCU;
if( accent == ':' ) return CUDIAER; break;
case 'Z': return CZCARON;
case 'a': if( accent == '\'') return SAACUTE;
if( accent == '`' ) return SAGRAVE;
if( accent == '^' ) return SACIRCU;
if( accent == ':' ) return SADIAER; break;
case 'e': if( accent == '\'') return SEACUTE;
if( accent == '`' ) return SEGRAVE;
if( accent == '^' ) return SECIRCU;
if( accent == ':' ) return SEDIAER; break;
case '9':
case 'g': return SGBREVE;
case '|':
case ']':
case 'i':
case 'l': if( accent == '\'') return SIACUTE;
if( accent == '`' ) return SIGRAVE;
if( accent == '^' ) return SICIRCU;
if( accent == ':' ) return SIDIAER; break;
case 'n': if( accent != ':' ) return SNTILDE; break;
case 'o': if( accent == '\'') return SOACUTE;
if( accent == '`' ) return SOGRAVE;
if( accent == '^' ) return SOCIRCU;
if( accent == ':' ) return SODIAER; break;
case 's': return SSCARON;
case 'u':
case 'v': if( accent == '\'') return SUACUTE;
if( accent == '`' ) return SUGRAVE;
if( accent == '^' ) return SUCIRCU;
if( accent == ':' ) return SUDIAER; break;
case 'y': if( accent == '\'') return SYACUTE;
if( accent == ':' ) return SYDIAER; break;
case 'z': return SZCARON;
}
return 0;
}
bool UCS::isalnum( const int code )
{
return ( UCS::isalpha( code ) || UCS::isdigit( code ) );
}
bool UCS::isalpha( const int code )
{
return ( ( code < 128 && std::isalpha( code ) ) || base_letter( code ) );
}
bool UCS::isdigit( const int code )
{
return ( code <= '9' && code >= '0' );
}
bool UCS::ishigh( const int code )
{
if( isupper( code ) || isdigit( code ) ) return true;
switch( code )
{
case 'b': case 'd': case 'f': case 'g': case 'h': case 'i': case 'j':
case 'k': case 'l': case 'p': case 'q': case 't': case 'y': case '|':
return true;
default : return false;
}
}
bool UCS::islower( const int code )
{
if( code < 128 && std::islower( code ) ) return true;
const int base = base_letter( code );
return ( base && std::islower( base ) );
}
bool UCS::islower_ambiguous( const int code )
{
if( islower_small_ambiguous( code ) ) return true;
switch( code )
{
case 'k': case 'p': case SCCEDI:
case SIGRAVE: case SIACUTE: case SICIRCU: case SIDIAER:
case SOGRAVE: case SOACUTE: case SOCIRCU: case SOTILDE: case SODIAER:
case SUGRAVE: case SUACUTE: case SUCIRCU: case SUDIAER:
case SSCEDI: case SSCARON: case SZCARON:
return true;
default : return false;
}
}
bool UCS::islower_small( const int code )
{
if( code >= 128 || !std::islower( code ) ) return false;
switch( code )
{
case 'a': case 'c': case 'e': case 'm': case 'n': case 'o':
case 'r': case 's': case 'u': case 'v': case 'w': case 'x':
case 'z': return true;
default : return false;
}
}
bool UCS::islower_small_ambiguous( const int code )
{
if( code >= 128 || !std::islower( code ) ) return false;
switch( code )
{
case 'c': case 'o': case 's': case 'u': case 'v': case 'w':
case 'x': case 'z': return true;
default : return false;
}
}
bool UCS::isspace( const int code )
{
return ( code < 128 && std::isspace( code ) );
}
bool UCS::isupper( const int code )
{
if( code < 128 && std::isupper( code ) ) return true;
const int base = base_letter( code );
return ( base && std::isupper( base ) );
}
bool UCS::isvowel( int code )
{
if( code >= 128 ) code = base_letter( code );
if( !code || !std::isalpha( code ) ) return false;
code = std::tolower( code );
return ( code == 'a' || code == 'e' || code == 'i' ||
code == 'o' || code == 'u' );
}
unsigned char UCS::map_to_byte( const int code )
{
if( code < 0 ) return 0;
if( code < 256 ) return code;
switch( code )
{
case CGBREVE: return 0xD0;
case SGBREVE: return 0xF0;
case CIDOT : return 0xDD;
case SINODOT: return 0xFD;
case CSCEDI : return 0xDE;
case SSCEDI : return 0xFE;
case CSCARON: return 0xA6;
case SSCARON: return 0xA8;
case CZCARON: return 0xB4;
case SZCARON: return 0xB8;
case EURO : return 0xA4;
default : return 0;
}
}
const char * UCS::ucs_to_utf8( const int code )
{
static char s[7];
if( code < 0 || code > 0x7FFFFFFF ) { s[0] = 0; return s; } // invalid code
if( code < 128 ) { s[0] = code; s[1] = 0; return s; } // plain ascii
int i, mask;
if( code < 0x800 ) { i = 2; mask = 0xC0; } // 110X XXXX
else if( code < 0x10000 ) { i = 3; mask = 0xE0; } // 1110 XXXX
else if( code < 0x200000 ) { i = 4; mask = 0xF0; } // 1111 0XXX
else if( code < 0x4000000 ) { i = 5; mask = 0xF8; } // 1111 10XX
else { i = 6; mask = 0xFC; } // 1111 110X
s[i] = 0; --i;
int d = 0;
for( ; i > 0; --i, d+=6 )
s[i] = 0x80 | ( ( code >> d ) & 0x3F ); // 10XX XXXX
s[0] = mask | ( code >> d );
return s;
}
int UCS::to_nearest_digit( const int code )
{
switch( code )
{
case 'O':
case 'Q':
case 'o': return '0';
case '|':
case 'I':
case 'L':
case 'l':
case SINODOT: return '1';
case 'Z':
case 'z': return '2';
case 'A':
case 'q': return '4';
case 'S':
case 's': return '5';
case 'G':
case 'b':
case SOACUTE: return '6';
case 'J':
case 'T': return '7';
case '&':
case 'B': return '8';
case 'g': return '9';
default: return code;
}
}
int UCS::to_nearest_letter( const int code )
{
switch( code )
{
case '0': return 'O';
case '1': return 'l';
case '2': return 'Z';
case '4': return 'q';
case '5': return 'S';
case '6': return SOACUTE;
case '7': return 'I';
case '8': return 'B';
case '9': return 'g';
default: return code;
}
}
int UCS::toupper( const int code )
{
if( code < 128 ) return std::toupper( code );
switch( code )
{
case SAGRAVE: return CAGRAVE;
case SAACUTE: return CAACUTE;
case SACIRCU: return CACIRCU;
case SATILDE: return CATILDE;
case SADIAER: return CADIAER;
case SARING : return CARING;
case SCCEDI : return CCCEDI;
case SEGRAVE: return CEGRAVE;
case SEACUTE: return CEACUTE;
case SECIRCU: return CECIRCU;
case SEDIAER: return CEDIAER;
case SGBREVE: return CGBREVE;
case SIGRAVE: return CIGRAVE;
case SIACUTE: return CIACUTE;
case SICIRCU: return CICIRCU;
case SIDIAER: return CIDIAER;
case SNTILDE: return CNTILDE;
case SOGRAVE: return COGRAVE;
case SOACUTE: return COACUTE;
case SOCIRCU: return COCIRCU;
case SOTILDE: return COTILDE;
case SODIAER: return CODIAER;
case SSCEDI : return CSCEDI;
case SUGRAVE: return CUGRAVE;
case SUACUTE: return CUACUTE;
case SUCIRCU: return CUCIRCU;
case SUDIAER: return CUDIAER;
case SYACUTE: return CYACUTE;
default: return code;
}
}
|
def initialize(self):
if getattr(self, '_initialized', False):
return
out = getattr(self, '_output_layers', None)
if out is None:
self.initialize_layers()
self._check_for_unused_kwargs()
iter_funcs = self._create_iter_funcs(
self.layers_, self.objective, self.update,
self.y_tensor_type,
)
self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs
self._initialized = True |
s=list(input())
n=len(s)
l=[-1 for i in range(n+1)]
ct=0
ans=0
if s[0]=='<':l[0]=0
if s[n-1]=='>':l[n]=0
for i in range(n-1):
if s[i]=='>' and s[i+1]=='<':
l[i+1]=0
for i in range(n):
if s[i]=='<':
l[i+1]=l[i]+1
for i in range(n-1,-1,-1):
if s[i]=='>':
l[i]=l[i+1]+1
for i in range(n-1):
if s[i]=='<' and s[i+1]=='>':
l[i+1]=max(l[i]+1,l[i+2]+1)
print(sum(l)) |
<gh_stars>1-10
/*------------------------------------------------------------------------------
* 2048.c
*
* File: main.c
* Author: <NAME>
* Date: 2021/04/28
*
* MIT License - Copyright (c) 2021 <NAME>
*----------------------------------------------------------------------------*/
#include "../includes/main.h"
#include "../includes/appState.h"
#include "../includes/controls.h"
#include "../includes/leaderboard.h"
#include "../includes/screen.h"
/*------------------------------------------------------------------------------
* Main app loop
*----------------------------------------------------------------------------*/
int main(){
pthread_t tId;
appState_t appState = getDefaultAppState();
pthread_create(&tId, NULL, screenThread, (void *)&tId);
appState.leaderboard = readLeaderboardFile();
do{
updateScreenState(&appState);
appState.screen.lastScreen = appState.screen.currentScreen;
appState.screen.forceClear = FALSE;
captureUserAction(&appState);
handleUserAction(&appState);
} while(appState.appStatus == STATUS_RUNNING);
return appState.appStatus;
}
|
use crate::errors::CollectionError;
use solana_program::{
account_info::AccountInfo,
entrypoint::ProgramResult,
msg,
program::{invoke, invoke_signed},
program_error::ProgramError,
pubkey::Pubkey,
system_instruction,
sysvar::{rent::Rent, Sysvar},
};
use std::convert::TryInto;
pub fn assert_owned_by(account: &AccountInfo, owner: &Pubkey) -> ProgramResult {
if account.owner != owner {
msg!(
"{} Owner Invalid, Expected {}, Got {}",
account.key,
owner,
account.owner
);
Err(CollectionError::IncorrectOwner.into())
} else {
Ok(())
}
}
pub fn assert_authority(account: &AccountInfo, authorities: Vec<Pubkey>) -> ProgramResult {
if !authorities.contains(account.key ) {
msg!(
"{} Authority Invalid",
account.key,
);
return Err(CollectionError::InvalidAuthority.into());
}
if !account.is_signer {
msg!(
"{} Authority is not a signer",
account.key,
);
return Err(CollectionError::AuthorityIsNotSigner.into());
}
Ok(())
}
#[inline(always)]
pub fn create_or_allocate_account_raw<'a>(
program_id: Pubkey,
new_account_info: &AccountInfo<'a>,
rent_sysvar_info: &AccountInfo<'a>,
system_program_info: &AccountInfo<'a>,
payer_info: &AccountInfo<'a>,
size: usize,
signer_seeds: &[&[u8]],
) -> Result<(), ProgramError> {
let rent = &Rent::from_account_info(rent_sysvar_info)?;
let required_lamports = rent
.minimum_balance(size)
.max(1)
.saturating_sub(new_account_info.lamports());
if required_lamports > 0 {
msg!("Transfer {} lamports to the new account", required_lamports);
invoke(
&system_instruction::transfer(&payer_info.key, new_account_info.key, required_lamports),
&[
payer_info.clone(),
new_account_info.clone(),
system_program_info.clone(),
],
)?;
}
let accounts = &[new_account_info.clone(), system_program_info.clone()];
msg!("Allocate space for the account");
invoke_signed(
&system_instruction::allocate(new_account_info.key, size.try_into().unwrap()),
accounts,
&[&signer_seeds],
)?;
msg!("Assign the account to the owning program");
invoke_signed(
&system_instruction::assign(new_account_info.key, &program_id),
accounts,
&[&signer_seeds],
)?;
msg!("Completed assignment!");
Ok(())
} |
/**
* List all the steps of the target BMR cluster.
*
* @param request The request containing the ID of target BMR cluster.
*
* @return The response containing the list of steps owned by the cluster.
*/
public ListStepsResponse listSteps(ListStepsRequest request) {
checkNotNull(request, "request should not be null.");
checkStringNotEmpty(request.getClusterId(), "The parameter clusterId should not be null or empty string.");
InternalRequest internalRequest =
this.createRequest(request, HttpMethodName.GET, CLUSTER, request.getClusterId(), STEP);
if (request.getMarker() != null) {
internalRequest.addParameter("marker", request.getMarker());
}
if (request.getMaxKeys() >= 0) {
internalRequest.addParameter("maxKeys", String.valueOf(request.getMaxKeys()));
}
return this.invokeHttpClient(internalRequest, ListStepsResponse.class);
} |
TL;DR
Hello Particle community,
I’m here to present some of the recent work I’ve been doing to add Rust support to the photon.
(Full disclaimer: I’m under contract to improve Rust’s embedded development story and one of the tasks involves adding Rust support to well known development boards and I picked the photon. Yay!)
My focus is making sure that the tooling and development experience for this board is as good as possible on the three major OSes. To that goal I have created a template Cargo project to get you writing code for your photon as quickly as possible. Give it a try! I have successfully tested it on Linux and on Windows and expect that it should also work well on macOS.
I have also been creating a safe API on top of the Firmware API, and already have working part of the Cloud API, the USB Serial API, the delay API and the digital output API. Furthermore, I have created memory safe abstractions for sharing data between the application thread and the cloud functions so all the examples in the project template are written in 100% safe Rust!
Do let me know if anything doesn’t work for you by opening an issue in this issue tracker. Also let me know if I should prioritize work on some particular API that’s currently missing. And, by the way, you can help me with that: Adding more low level bindings is easy as adding the function signature to this block as explained here; building safe APIs on top of that require some discussion about the invariants we want to preserve and how we want to do error handling so chime in the discussion.
Happy Rusting!
cc @dbrgn @zach |
import { EventEmitter } from "events";
import { Task } from "./types/rabbitmq";
class UserDataListener extends EventEmitter {
constructor() {
super();
}
genKey(addr: string, cmd: Task) {
return addr + "+" + cmd;
}
}
export default UserDataListener;
|
// This callback function continuously executes and reads the image data
void process_image_callback(const sensor_msgs::Image img)
{
enum Pos {LEFT, MIDDLE, RIGHT, NOTSEEN};
int white_pixel = 255;
int scanned_pixel = 0;
int detected_pixel = 0;
int ball_volume = 0;
int ball_position = 0;
int left_marker = img.width / 3;
int forward_marker = 2 * img.width / 3;
int right_marker = img.width;
bool ball_detected = false;
float linear_x;
float angular_z;
float go_nowhere = 0.0;
float go_slow = 0.1;
float go_fast = 0.5;
int ball_refpoints [3];
int ball_ref_axis = 0;
int hysteresis = 10;
for (int i = 0; i < img.height; i++)
{
ball_volume = detected_pixel;
for (int j = 0; j < img.step; ++j)
{
scanned_pixel = (i*img.step)+j;
if (img.data[scanned_pixel] == white_pixel)
{
detected_pixel++;
if (detected_pixel == 1)
{
ball_refpoints[0] = i;
ball_ref_axis = j/3;
}
ball_detected = true;
break;
}
}
if (detected_pixel > 0 && ball_volume == detected_pixel){
ball_refpoints[2] = i;
ball_refpoints[1] = ball_refpoints[0] + (int)((ball_refpoints[2] - ball_refpoints[0]) / 2);
ball_detected = true;
ball_volume = detected_pixel;
break;
}
}
if (ball_detected)
{
if (ball_ref_axis < left_marker)
{
ball_position = LEFT;
}
else if (ball_ref_axis >= left_marker && ball_ref_axis < forward_marker)
{
ball_position = MIDDLE;
}
else if (ball_ref_axis >= forward_marker)
{
ball_position = RIGHT;
}
}
else
{
ball_position = NOTSEEN;
}
switch (ball_position)
{
case LEFT:
linear_x = go_nowhere;
angular_z = go_fast;
break;
case MIDDLE:
if(ball_ref_axis >= img.width/2 - hysteresis && ball_ref_axis <= img.width/2 + hysteresis)
{
linear_x = go_fast;
angular_z = go_nowhere;
}
else if (ball_ref_axis < img.width/2 - hysteresis)
{
linear_x = go_nowhere;
angular_z = go_slow;
}
else if (ball_ref_axis > img.width/2 + hysteresis)
{
linear_x = go_nowhere;
angular_z = -1 * go_slow;
}
break;
case RIGHT:
linear_x = go_nowhere;
angular_z = -1 * go_fast;
break;
default:
linear_x = go_nowhere;
angular_z = go_nowhere;
break;
}
drive_robot(linear_x, angular_z);
} |
<filename>src/main/java/com/objectspace/coorperation/util/SerializeUtil.java
package com.objectspace.coorperation.util;
import java.io.*;
/**
* @Description: 序列化工具类,必须实现了Serializable接口的对象才可以进行序列化
* @Author: NoCortY
* @Date: 2019/10/4
*/
public class SerializeUtil {
/**
* @Description: 序列化对象
* @Param: [obj]
* @return: byte[]
* @Author: NoCortY
* @Date: 2019/10/4
*/
public byte[] serialize(Object obj){
ObjectOutputStream oos = null;
ByteArrayOutputStream baos = null;
try{
baos = new ByteArrayOutputStream();
oos = new ObjectOutputStream(baos);
oos.writeObject(obj);
byte[] byteArray = baos.toByteArray();
return byteArray;
}catch(IOException e){
e.printStackTrace();
}
return null;
}
/**
* @Description: 反序列化对象
* @Param: [byteArray]
* @return: java.lang.Object
* @Author: NoCortY
* @Date: 2019/10/4
*/
public Object unSerialize(byte[] byteArray){
ByteArrayInputStream bais = null;
try {
//反序列化为对象
bais = new ByteArrayInputStream(byteArray);
ObjectInputStream ois = new ObjectInputStream(bais);
return ois.readObject();
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
} |
/* crossTalksAndMerge() - merges the sorted segments arr[bIndex..mIndex] (left) and arr[mIndex+1..eIndex] (right) into a single sorted arr[bIndex..eIndex]
and returns the number of cross quality talks which is found simulataneously as the merging is done. To do this, we note that
just before an element of right, arr[j] with j>mIndex, is inserted, exactly the remaining elements in left are > than arr[j].
So, if i is the ptr of left used for merging, exactly mIndex - i + 1 cross talks are possible between arr[j] and left. So we find
the sum for all such j. Finally, we note that if any one of left or right is completely exhausted, then no additional cross talks
need to be included.
ARG - int* arr, int* tempArr, size_t lIndex,size_t mIndex, size_t hIndex
RET - long crossTalks
*/
long BloomBrain::crossTalksAndMerge(int* arr,int* tempArr,size_t bIndex,size_t mIndex,size_t eIndex)
{
size_t leftPtr = bIndex;
size_t rightPtr = mIndex + 1;
size_t tempPtr = bIndex;
long crossTalks = 0;
while(leftPtr <= mIndex && rightPtr <= eIndex)
{
if(arr[leftPtr]<= arr[rightPtr])
{
tempArr[tempPtr++] = arr[leftPtr++];
}
else
{
crossTalks += mIndex - leftPtr + 1;
tempArr[tempPtr++] = arr[rightPtr++];
}
}
while(leftPtr <= mIndex)
tempArr[tempPtr++] = arr[leftPtr++];
while(rightPtr <= eIndex)
tempArr[tempPtr++] = arr[rightPtr++];
for(size_t i = bIndex;i<=eIndex;i++)
{
arr[i] = tempArr[i];
}
return crossTalks;
} |
///
/// For a given world <world> and a hypothetical robot position <position>
/// and a list of current real sensor values <real_sensor_values>
/// we compute the sensor values we would get (without any noise)
/// at the robot position <position>.
///
/// Already DURING the computation of these sensor values
/// we compute the sum of differences between these hypothetical sensor
/// values and the given real sensor values <real_sensor_values> and
/// if the sum reaches the threshold <max_allowed_difference>,
/// we stop the computation of the hypothetical sensor values
///
/// --> i.e., we then say: No! We cannot be at this hypothetical position
/// <position> since the sensor values we would get here are too
/// different compared to the actual real sensor values
/// <real_sensor_values>
///
bool Robot::check_hypothetical_position(Mat& world,
Point position,
double* real_sensor_values,
double max_allowed_difference
)
{
int sum_differences = 0;
for (int sensor_nr = 0; sensor_nr < nr_sensors; sensor_nr++)
{
double x = position.x;
double y = position.y;
double sensor_angle = sensor_angles[sensor_nr];
double sensor_dx = cos(orientation + sensor_angle);
double sensor_dy = sin(orientation + sensor_angle);
double sensor_startx = x + sensor_dx * radius;
double sensor_starty = y + sensor_dy * radius;
double sensor_max_dist = sensor_distances[sensor_nr];
int step;
for (step = 0; step < sensor_max_dist; step++)
{
double sx = sensor_startx + step*sensor_dx;
double sy = sensor_starty + step*sensor_dy;
if ((sx >= world.cols) || (sy >= world.rows) || (sx < 0) || (sy < 0))
break;
Vec3b pixel_color = world.at<Vec3b>((int)sy, (int)sx);
if ((pixel_color.val[0] == 0) && (pixel_color.val[1] == 0) && (pixel_color.val[2] == 0))
{
continue;
}
else
{
break;
}
}
int final_sensor_value = step;
int diff_to_real_sensor_value =
abs(final_sensor_value - (int)(real_sensor_values[sensor_nr]));
sum_differences += diff_to_real_sensor_value;
if (sum_differences > max_allowed_difference)
return false;
}
return true;
} |
<gh_stars>1-10
/*-
* ============LICENSE_START=======================================================
* ONAP Policy Engine
* ================================================================================
* Copyright (C) 2018-2019 AT&T Intellectual Property. All rights reserved.
* ================================================================================
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ============LICENSE_END=========================================================
*/
package org.onap.policy.brms.api;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import org.onap.policy.api.PolicyException;
import org.onap.policy.brms.api.BrmsGateway.Factory;
import org.powermock.reflect.Whitebox;
public class BrmsGatewayTest {
private static final String FACTORY_FIELD = "factory";
private static Factory saveFactory;
private Thread thread;
@BeforeClass
public static void setUpBeforeClass() {
saveFactory = Whitebox.getInternalState(BrmsGateway.class, FACTORY_FIELD);
}
@AfterClass
public static void tearDownAfterClass() {
Whitebox.setInternalState(BrmsGateway.class, FACTORY_FIELD, saveFactory);
}
/**
* Installs a factory.
*/
@Before
public void setUp() {
thread = null;
Factory factory = new Factory() {
@Override
public BrmsHandler makeBrmsHandler(String configFile) throws PolicyException {
// Mock handler
return Mockito.mock(BrmsHandler.class);
}
@Override
public Thread makeThread(Runnable runnable) {
thread = super.makeThread(runnable);
return thread;
}
};
Whitebox.setInternalState(BrmsGateway.class, FACTORY_FIELD, factory);
}
/**
* Interrupts the thread, if there is one.
*/
@After
public void tearDown() throws InterruptedException {
if (thread != null) {
thread.interrupt();
thread.join(5000L);
assertFalse(thread.isAlive());
}
}
@Test
public void testFactory() throws InterruptedException {
assertNotNull(saveFactory);
assertNotNull(saveFactory.makeThread(() -> { }));
}
@Test
public void testGet() {
assertNull(BrmsGateway.getPolicyEngine());
}
@Test
public void testMain() throws Exception {
try {
final String[] args = new String[0];
BrmsGateway.main(args);
} catch (final Exception ex) {
fail("Not expected an exception: " + ex);
}
}
}
|
def on_enter(event_data):
pocs = event_data.model
pocs.next_state = 'scheduling'
try:
pocs.observatory.take_flat_fields()
except NotTwilightError:
pocs.logger.info("No longer twilight. Moving to next state.")
except NotSafeError:
pocs.logger.warning("Safety failed while taking twilight flats. Going to park.")
pocs.next_state = 'parking'
return
except Exception as err:
pocs.logger.error(f"Error taking flat fields: {err!r}. Continuing with states.") |
/**
* Tests the command-line arguments of the starter classes.
*
* @author BaseX Team 2005-12, BSD License
* @author Christian Gruen
*/
public abstract class MainTest {
/** Null output stream. */
static final PrintStream NULL = new PrintStream(new NullOutput());
/** Test database name. */
static final String NAME = Util.name(MainTest.class);
/** Input file. */
static final IOFile IN = new IOFile(Prop.TMP + NAME + ".in");
/** Drop test database. */
@AfterClass
public static void cleanUp() {
final Context ctx = new Context();
try {
new DropDB(NAME).execute(ctx);
} catch(final BaseXException ex) {
Util.debug(ex);
} finally {
ctx.close();
}
}
/**
* Runs a request with the specified arguments.
* @param args command-line arguments
* @return result
* @throws IOException I/O exception
*/
protected abstract String run(final String... args) throws IOException;
/**
* Runs a request and compares the result with the expected result.
* @param exp expected result
* @param args command-line arguments
* @throws IOException I/O exception
*/
final void equals(final String exp, final String... args)
throws IOException {
assertEquals(exp, run(args));
}
/**
* Runs a request and checks if the expected string is contained in the
* result.
* @param exp expected result
* @param args command-line arguments
* @throws IOException I/O exception
*/
final void contains(final String exp, final String... args)
throws IOException {
final String result = run(args);
if(!result.contains(exp)) {
fail('\'' + exp + "' not contained in '" + result + "'.");
}
}
} |
The first trip from Tel Aviv to Eilat is scheduled to take place in July 2015, and is anticipated to take exactly 7:06 minutes. Israeli entrepreneur Shai Agassi is leading the project together with Elon Musk in collaboration with a Japanese team.
Is the public transportation problem in Israel solved once and for all? Geektime learned that the Hyperloop project, initiated by serial entrepreneur billionaire Elon Musk, CEO of Tesla and SpaceX, will launch with special collaboration with the Ministry of Transportation, Better Place and Japanese Taigo. The project will cost about $10 billion. The Hyperloop will launch its first route from Tel Aviv to Eilat on July 2015, prior to the local tourist season, which is expected to be particularly busy with visitors arriving to experience the modern transportation.
Israel first
According to information received by Geektime, secret talks began in August 2010 between the Ministry of Transportation and Elon Musk, who appreciated Israel’s advanced startup scene and visited Tel Aviv 12 times over the past two years in his private plane. According to sources, Transportation Minister Yisrael Katz was involved in the talks.
Geektime also learned that the project will be conducted in Israel by Shai Agassi, former CEO of Better Place, but he insisted that the Ministry of Transportation will lead the project on behalf of the Israeli government during negotiations with Musk. Together, the Ministry of Transportation agreed, along with Musk, on the appointment of a Japanese company to take part in the technological aspects of the project. The company was chosen personally by Musk’s secretive selection process conducted in Japan under heavy fog and in full cooperation with the Japanese authorities.
According to sources with knowledge of the selection process, Musk chose Japanese Taigo, headed by Ishimoto Tadahiro. While Agassi, together with the Ministry of Transportation was responsible for planning the infrastructure between Tel Aviv and Eilat, Tadahiro was responsible for building the Hyperloop and experimental components of the project. So far it has been a great success and has been based more or less on a document published by Musk in August 2013, knowing of course that the project will be taking shape in Israel.
100 passengers every 30 seconds
Geektime also learned that the first trips will have a driver supervisor through the end of the beta period of the project, which is expected to be completed around November 2015. Trips will then be completely automatic and unsupervised. The exact route of the Hyperloop and location of the infrastructure could not be published at this time, but we know that the project is under heavy security through a unique collaboration between special units.
Pods of the complex Hyperloop will be connected by long steel pipes with installed solar panels on their backs. In the first stage small cars will only carry passengers, but sources close to the project told us that in the future they will be able to maximize cargo pods for cars and other heavy trucks. The travel schedule of the Hyperloop is based on pods with 10 passengers, and every 30 seconds about 10 pods would leave their stations and will arrive within the precise time frame of 7:06 minutes from Tel Aviv to Eilat, 24 hours a day. The travel time on the way back from Eilat to Tel Aviv will be extended by one second only and will take 7:07 minutes.
Drones instead of attendants
The high speed is possible thanks to the movement of the Hyperloop with low-pressure pipes, which will reduce the friction and allow the loop to reach a maximum speed of 1,280 mph, although in practice the pods do not reach top speed given the relatively short distance to travel. Geektime also learned that the Japanese team worked on the construction of unique Drones, developed by the French company Parrot, which will be coming to serve travelers soft drinks, coffee and cakes upon request, shortly before the start of the trip. Also, passengers will also be offered tablets of sodium chloride in case they suffer from nausea. The price is likely to be more expensive than most public transportation although the price gap due to investors should be small.
Information provided to Geektime said travelers wishing to take part in the project in its beta stage, which is expected to start in July 2015, can do so by signing a dedicated web page, which will be distributed to various media outlets in the country along with a large-scale advertising campaign that will include billboards, advertising on radio, television, and banners on large sites in the country and abroad.
Elon Musk, Ishimoto Tadahiro, Ministry of Transportation and Shai Agassi would not respond to requests for comment.
This post is an April Fools day joke. It is (sadly) not true. |
<filename>solutions/practice/rps_randomnumber.py
# RPS game with random numbers
# import
from random import randint
# input
print('Welcome to RPS game!!')
player_choice = int(input('Enter your choice (0 - Rock | 1 - Scissors | 2 - Paper): '))
# processing & output
cpu_choice = randint(0,2)
if cpu_choice == 0:
print('CPU chose Rock')
elif cpu_choice == 1:
print('CPU chose Scissors')
else:
print('CPU chose Paper')
if player_choice == cpu_choice:
print('Tie Game!')
else:
if player_choice == 0:
if cpu_choice == 1:
print('Player 1 Wins!')
else:
print('CPU Wins!')
elif player_choice == 1:
if cpu_choice == 2:
print('Player 1 Wins!')
else:
print('CPU Wins!')
else:
if cpu_choice == 0:
print('Player 1 Wins!')
else:
print('CPU Wins!') |
// -*- C++ -*-
//
// <NAME>, <NAME>
// orthologue
// (c) 1998-2020 all rights reserved
//
// configuration
#include <portinfo>
// externals
#include <iostream>
#include <pyre/journal.h>
#include <pyre/memory.h>
#include <pyre/grid.h>
// main
int main() {
// journal control
pyre::journal::debug_t debug("pyre.memory.direct");
// debug.activate();
// space
typedef double cell_t;
// layout
typedef std::array<int, 2> rep_t;
typedef pyre::grid::index_t<rep_t> index_t;
typedef pyre::grid::layout_t<index_t> layout_t;
// storage
typedef pyre::memory::view_t<cell_t> view_t;
// grid
typedef pyre::grid::grid_t<cell_t, layout_t, view_t> grid_t;
// make a channel
pyre::journal::debug_t channel("pyre.grid");
// make a common ordering
layout_t::packing_type packing {1u, 0u};
// make a shape for reference grid
layout_t::shape_type ref_shape {6, 4};
// make a layout
layout_t ref_layout {ref_shape, packing};
// allocate some memory for reference grid
cell_t * buffer = new cell_t[ref_layout.size()];
// initialize the memory with predictable values
for (layout_t::size_type i = 0; i < ref_layout.size(); ++i) {
buffer[i] = i;
}
// make reference grid
grid_t ref_grid {ref_layout, buffer};
// make a shape for a new grid
layout_t::shape_type new_shape {2, 4};
// make a layout
layout_t new_layout {new_shape, packing};
// allocate memory for new grid
cell_t * new_buffer = new cell_t[new_layout.size()];
// initialize the memory with zeros
std::fill(new_buffer, new_buffer + new_layout.size(), 0.0);
// make new grid
grid_t new_grid {new_layout, new_buffer};
// create slice indices for reference grid
const index_t ref_low = {2, 0};
const index_t ref_high = {4, 4};
const layout_t::slice_type ref_slice = {ref_low, ref_high, packing};
// create slice indices for new grid for setting values (slice whole grid for testing)
const index_t low = {0, 0};
const index_t high = {2, 4};
const layout_t::slice_type slice = {low, high, packing};
// assign values from reference view to new view
new_grid.view(slice) = ref_grid.view(ref_slice);
// loop over the grid
const double bias = ref_low[0] * ref_shape[1];
for (auto idx : new_grid.layout()) {
// get the value stored at this location
auto value = new_grid[idx];
// the expect values is the current offset as a double plus a bias from view
grid_t::cell_type expected = new_grid.layout().offset(idx) + bias;
// if they are not the same
if (value != expected) {
// make a channel
pyre::journal::error_t error("pyre.grid");
// show me
error
<< pyre::journal::at(__HERE__)
<< "new_grid[" << idx << "]: " << value << " != " << expected
<< pyre::journal::endl;
// and bail
return 1;
}
}
// clean up
delete [] buffer;
delete [] new_buffer;
// all done
return 0;
}
// end of file
|
<reponame>m-moris/waf-java-samples
package org.pnop.waf.sample.act.sb.indicator;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.stereotype.Component;
import lombok.extern.slf4j.Slf4j;
@Slf4j
@Component("MyApplication2")
public class MyHealthIndicator2 implements HealthIndicator {
@Override
public Health health() {
int errorCode = check();
if (errorCode != 0) {
return Health
.down()
.withDetail("Error Code", errorCode)
.withDetail("PropertyA", 99999)
.withDetail("PropertyB", false)
.build();
}
return Health
.up()
.withDetail("PropertyX", "XYZ")
.withDetail("PropertyY", 84803)
.withDetail("PropertyZ", 10.012)
.build();
}
private int check() {
if (Math.random() < 0.5) {
log.warn("An error has occurred");
return 1;
}
log.info("check successful.");
return 0;
}
}
|
export const urlRegex = /^(ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-/]))?$/;
export function isValidUrl(value: string) {
return urlRegex.test(value);
}
|
By the summer of 1966 it had already been five years since the late President John F. Kennedy had committed the United States to a manned lunar landing by the end of the decade. While the US seemed to have had a slow start as the Soviet Union chalked up one important space first after another, by the summer of 1966 the American space program was making visible progress. In the beginning of June, NASA had landed Surveyor 1 on the lunar surface (see “Surveyor 1: NASA’s First Lunar Landing”). Two months later, Lunar Orbiter 1 reached the Moon and started a systematic mapping of potential lunar landing sites (see “Lunar Orbiter 1: America’s First Lunar Satellite”).
In the mean time, the manned Gemini missions were proceeding at an increasing rate helping NASA learn the skills as well as perfect the techniques and test the technologies required to perform the Apollo mission. In July 1966 NASA successfully flew the Gemini 10 mission which included a double rendezvous and a series of EVAs (see “Gemini 10: Dual Rendezvous in Space”). The final two Gemini flights were being prepared to finish the program by the end of 1966. In parallel, flight testing of the highly advanced Apollo spacecraft and their Saturn launch vehicles had already begun. In August 1966 there was one final automated Apollo test flight, designated AS-202, which remained to be flown before NASA would fly its first manned mission scheduled for February 1967 which would become known as “Apollo 1”.
The Hardware
At this stage of the Apollo program, there were two versions of the Apollo spacecraft being built by North American Aviation (which, after decades of corporate mergers, is now part of Boeing). The first variant, designated Block I, was meant for test flights in low Earth orbit in order to verify the basic Apollo CSM (Command-Service Module) design. Lessons learned from constructing and flying these versions would be incorporated into the improved Block II Apollo CSM which would include all of the equipment required to support a flight to the Moon. The Apollo CM (Command Module), which carried the astronauts during their mission and the recovery systems needed to return them safely to Earth, was conical in shape with a diameter of 3.9 meters and a height of 3.2 meters. The SM (Service Module), which included all the systems and consumables needed to support the astronauts and their mission, was a cylinder with the same diameter. Its appearance was dominated by the 91-kilonewton Aerojet AJ10-137 engine of the Service Propulsion System (SPS) which would be used for all major propulsive maneuvers after the Saturn launch vehicle had finished its task. The total height of the CSM was 11 meters and the Block I version had a nominal mass in excess of 20 metric tons.
The Apollo spacecraft was topped off by the LES built by the Lockheed Propulsion Company (whose corporate parent is now part of Lockheed Martin). It consisted of a solid rocket motor assembly attached to the top of the CM by means of a truss framework with a total height of 9.9 meters and a mass of 4,200 kilograms. It was designed to pull the CM and its crew to safety in case of an abort situation during the earliest phase of launch and would be jettisoned during the burn of the Saturn second stage when it was no longer needed.
The launch vehicle for the Apollo missions in Earth orbit was the Saturn IB. The Saturn IB was a substantially improved version of the Saturn I originally developed by the team led by famed rocket engineer Wernher von Braun based at NASA’s Marshall Space Flight Center in Huntsville, Alabama. The first stage of this new launch vehicle, built by Chrysler and designated S-IB, was an updated version of the S-I stage successfully flown ten times between 1960 and 1965 during the Saturn I test flight program (see “The Last Launch of the Saturn I”). Like the S-I stage, the S-IB structure consisted of a set of eight 1.8-meter in diameter tanks holding LOX and RP-1 derived from the proven Redstone rocket clustered around a single 2.7-meter LOX tank adapted from the Jupiter rocket. A new set of eight swept-back fins as well as a host of other changes to the hardware and fabrication of the S-IB made it 9,000 kilograms lighter than the older S-I. The S-IB was powered by eight improved Rocketdyne H-1 engines whose total thrust at launch was increased from 6,683 to 7,120 kilonewtons.
By far the biggest change to create the Saturn IB was to the second stage designated S-IVB built by the Douglas Aircraft Company. Instead of six Pratt & Whitney RL-10 engines which produced a total of 400 kilonewtons of thrust on the S-IV stage used by the Saturn I, the new and larger S-IVB stage employed a single Rocketdyne J-2 engine to produce 890 kilonewtons of thrust using the same high energy propellant combination of liquid hydrogen and LOX used on the S-IV. The S-IVB stage also included a pair of auxiliary propulsion system modules which provided roll control during the burn of the J-2 as well as attitude control while coasting in orbit. This was topped off by the Instrument Unit (IU) which controlled both stages of the launch vehicle. In addition carrying the CSM or LM (Lunar Module) into Earth orbit for initial test flights, the first launches also allowed flight testing of the nearly identical version of the S-IVB stage that would be employed as the third stage of the Saturn V which would send Apollo to the Moon.
A tapered adapter consisting of four panels connected the S-IVB stage to the Apollo CSM. During flights of the Saturn V, the LM would also be housed inside this adapter. The Saturn IB, without the payload, was 43.2 meters tall and was capable of placing about 21 metric tons into low Earth orbit. The total height of the Apollo-Saturn IB was 68.3 meters and it had a typical lift off mass of 598 metric tons.
Mission Objectives & Plan
The Apollo AS-201 mission launched on February 26, 1966 was the first spaceflight of the Block I CSM and the Saturn IB (see “The First Flight of the Apollo-Saturn IB”). While all the primary mission objectives were met, problems encountered during this 37-minute suborbital test flight, especially during the pair of burns of the SM’s SPS, forced a postponement of the AS-202 mission to resolve the issues. In the mean time, the AS-203 mission was launched out of the originally intended sequence on July 5. The objectives of the AS-203 mission, which was never intended to fly with an Apollo spacecraft, concentrated on testing the Saturn IB especially various design features of the S-IVB stage during orbital flight before the stage was flown on the Saturn V (see “AS-203: NASA’s Odd Apollo Mission”).
For this final unmanned test flight of the Apollo-Saturn IB, the primary objectives centered on a thorough testing of various of spacecraft systems before the first manned flight in early 1967 designated AS-204 or, as it would become better known, Apollo 1. Like AS-201, the AS-202 mission objectives could be met with a suborbital flight. But instead of just a 37-minute mission as AS-201 flew, this would be a longer 93-minute flight which would end with a splashdown almost three quarters of the way around the globe in the Pacific Ocean about 28,700 kilometers downrange.
While the CSM-009 flown on the earlier AS-201 mission did not carry many systems required for a crewed flight, the payload for the AS-202 mission was the fully functional Block I CSM-011 more or less in the configuration intended for the first manned test flight. Notable differences included the omission of the three astronaut couches, some crew equipment and the post-landing cabin ventilation system. In lieu of a crew to control the spacecraft, CM-011 carried an electro-mechanical flight control sequencer to put the various systems through their paces during the flight. Also included were four cameras, three auxiliary batteries and flight-specific instrumentation. Unlike the AS-201 mission which relied on batteries for power, SM-011 carried two operational fuel cells (and one non-operational unit) capable of a maximum electrical power output of 2.84 kilowatts to test this vital hardware in space.
After the mission’s Saturn IB launch vehicle, designated SA-202, had finished its job at an altitude of 217 kilometers some 1,559 kilometers downrange, CSM-011 would separate from the spent S-IVB stage. The S-IVB stage would then conduct a test like that performed during the AS-203 mission where the pressure differential across the common bulkhead between the liquid hydrogen and LOX tanks is increased until the bulkhead fails in order to verify ground test results. Right after separating from the S-IVB, the SM would fire the main engine of the SPS for 215 seconds to boost the spacecraft’s apogee to 1,136 kilometers over South Africa about 41 minutes after launch. Since one of the important test objectives for this flight was to subject the CM’s heat shield to high heat loads, 25 minutes after apogee the SPS would be fired a second time for 88 seconds to increase the descent rate. This would be followed by two more short burns of the SPS in quick succession to test the rapid restart capability of the engine.
After separating from the SM, CM-011 would hit the atmosphere at a speed of 8,500 meters per second at a shallow angle of just -3.48°. After dipping to an altitude of 66.4 kilometers, the spacecraft’s guidance system would use the CM’s lift to steer back up out of the atmosphere to an altitude of 80.6 kilometers before descending to Earth for the final time at 7,150 meters per second. This double skip reentry profile would be used by the Apollo CM during its return from the Moon and this flight would provide actual flight data to verify the CM’s hypersonic aerodynamic properties derived earlier from wind tunnel testing. While this reentry profile subjected the CM heat shield to lower heating rates, it still had to contend with a total heat load of 260 megajoules per square meter. Although this heat load was just a fraction of that the CM would experience during a return from the Moon, it would still certify the CM for return to Earth from orbital flight.
After its double-skip reentry, the CM would splashdown in the north central Pacific Ocean where it would be recovered. If the CM experienced a guidance system failure, it would follow a simpler ballistic reentry like on the AS-201 mission but splashdown 1,600 kilometers uprange. In order to cover this and other contingencies including the uncertainties in the CM’s actual lift-to-drag ratio, the recovery area for the AS-202 mission was 6,500 kilometers long and 370 to 560 kilometer wide cutting between the Caroline and Wake Islands with an impact point centered 550 kilometers southeast of Wake Island. The recovery force would consist of the Essex-class aircraft carrier, the USS Hornet, a pair of destroyers and seven C-130 search aircraft.
The launch mass of CSM-011 for the AS-202 mission was 20,275 kilograms including 10,580 kilograms of SPS propellant as well as 327 kilograms of liquid hydrogen and LOX reactants for the fuel cells in the SM. This was almost five metric tons heavier than CSM-009 flown during the AS-201 mission making CSM-011 the heaviest crewed spacecraft prototype ever to fly into space up until that time. The total launch mass of the fully fueled Saturn IB and its Apollo spacecraft payload for this mission was 595.3 metric tons.
The AS-202 Flight
The first major piece of flight hardware to arrive at Cape Kennedy, Florida was the second stage of the launch vehicle, designated S-IVB-202, on January 31, 1966. This was followed by the first stage, S-IB-2, which arrived by barge a week later. The S-IB was erected on the pad at Launch Complex 34 (LC-34) on March 4 just six days after the launch of AS-201. The S-IVB-202 was added to the stack at LC-34 on March 10 with the addition of the IU the following day completing the rocket. The SA-202 launch vehicle was then put through a series of pre-launch tests over the following month. With LC-34 tied up for several months with preparations for the AS-202 mission, the newly renovated LC-37B was used to support the AS-203 which was being prepared in parallel for its mission.
Meanwhile, the components of CSM-011 arrived at the Cape in April and, after an initial inspection, the SM was taken to LC-16 for checks of its secondary propulsion system. This was followed by fuel cell installation and a series of tests including under vacuum conditions inside an altitude chamber. After the individual spacecraft modules were integrated in June, they were finally erected atop of SA-202 at LC-34 on July 2. Following more checks and the successful completion of a Flight Readiness Review on August 11, the AS-202 mission was ready for its first launch attempt at 12:30 PM EDT on August 25.
The countdown for the first launch attempt of the Apollo AS-202 mission started at 11:30 PM EDT on August 24. Ground computer issues, the threat of Hurricane Faith hitting the downrange tracking station in Antigua all affected the countdown. Finally at 1:15:32 PM EDT (17:15:32 GMT) on August 25, AS-202 lifted off from LC-34 into high broken cloud cover only 45½ minutes behind schedule. The last of the S-IB’s eight H-1 engines shutdown two minutes, 23.47 seconds after launch – this was 1.27 seconds earlier than predicted but the ascending rocket was already travelling 21 meters per second faster than planned.
The trio of ullage motors on the S-IVB stage fired with the stage separation monitored by a pair of movie cameras on the spent S-IB stage. Despite some minor valve malfunctions in the recirculation system of the J-2, the S-IVB’s engine ignited and continued pushing the rocket and its payload on towards space. As the spent S-IB-2 stage continued along a ballistic arc reaching an altitude of 97.2 kilometers and brushing the edge of space, the cameras which recorded stage separation were ejected but only one was located and recovered.
Meanwhile, as the S-IVB-202 stage was near the beginning of its burn, the LES along with the boost protective cover of the CM was jettisoned two minutes and 49 seconds into the flight. Because of some issues controlling the propellant mixture ratio of the J-2 during what was suppose to be a seven minute, 35 second burn, the J-2 shut down almost 14 seconds earlier than planned with the ascending craft travelling only 0.6 meters per second faster and 90 meters lower than planned. At an altitude of 222 kilometers, the CSM separated from the spent S-IVB stage and 11 seconds later fired its SPS for 216.7 seconds.
These events were monitored live by a television camera mounted on the S-IVB transmitting via the Antigua tracking station. The spent S-IVB-202 proceeded to conduct its structural tests reaching a peak altitude of 269 kilometers some 2,858 kilometers down range 13 minutes and 20 seconds after launch. Telemetry finally ceased 141 seconds after apogee when the bulkhead between the S-IVB propellant tanks failed and the descending stage lost structural integrity. The debris burned up during reentry and the Antigua tracking station, with its tasks completed, was finally forced to shut down 45 minutes after launch due to Hurricane Faith.
After the first burn of the SPS, CSM-011 was travelling with a fixed space velocity of 7,772 meters per second at an altitude of 338.4 kilometers. As the Apollo approached apogee, the spacecraft turned its nose towards the Earth as part of a thermal test reaching a peak altitude of 1,143 kilometers at a mission elapsed time of 41 minutes and 14 seconds. The only issues during this time included a weaker than expected signal from the unified S-band communication system resulting in spotty telemetry from this system and when the glycol evaporator of the environmental control system ceased functioning for 54 minutes allowing its outlet temperature to exceed 24° C.
As AS-202 was descending back to Earth over the Indian Ocean, the CSM reoriented itself to align its nose with its velocity vector. Some 24 minutes, 42 seconds after reaching apogee, the SPS ignited for a second time at an altitude of 457.4 kilometers. This 89.2 second burn was followed nine seconds later by two brief 3.8 second burns of the SPS to test the system’s rapid restart capability. The end of the last SPS burn was one hour, 7 minutes and 51 seconds into the mission with CSM-011 at an altitude of 346.0 kilometers travelling at a fixed space speed of 8,449 meters per second. A couple of minutes later, the Apollo modules separated and the CM prepared itself for reentry.
The CM reached its entry interface at an altitude of 122 kilometers travelling at a fixed space velocity of 8,690 meters per second. The guidance system steered the CM through a double skip reentry initially dipping to an altitude of about 64.7 kilometers then rising up to 78.5 kilometers before making its final plunge back into the atmosphere. During the reentry, the heat shield is calculated to have reached a temperature of about 1,500° C while the interior never exceeded 21° C. CM-011 automatically deployed its parachutes and splashed down at 16.11° N, 168.90° E about 805 kilometers southwest of Wake Island at 18:48:34 GMT after a flight lasting one hour, 33 minutes and two seconds. Because the CM’s lift-to-drag ratio was about 15% lower than expected and the entry was 0.05° steeper than planned, the AS-202 CM had come down 380 kilometers short of its aim point.
Once the CM had been spotted by aircraft, rescue swimmers were deployed and attached a flotation collar to help stabilize the capsule. Ten hours after launch, the USS Hornet arrived and recovered CM-011. The Apollo CM was then shipped to North American Aviation’s facility in Downey, California where engineers thoroughly studied the craft and its heatshield. Despite some minor issues, the AS-202 mission had achieved all of its objectives thus human rating the Saturn IB and the Apollo spacecraft for the first Apollo manned test flight scheduled for launch in six months. In the mean time, there was still much work to do but by the end of the summer of 1966, Apollo was getting ready for its next big step in the race to the Moon.
Follow Drew Ex Machina on Facebook.
Related Video
Here is a brief video showing the S-IVB-202 stage separation as viewed from inside the interstage.
This is an excellent 1968 NASA documentary with a detailed tutorial on Apollo’s double skip reentry technique (first demonstrated during the AS-202 mission) entitled “Apollo Atmospheric Entry Phase”.
Related Reading
“The First Flight of the Apollo-Saturn IB”, Drew Ex Machina, February 26, 2016 [Post]
“AS-203: NASA’s Odd Apollo Mission”, Drew Ex Machina, July 5, 2016 [Post]
General References
Roger E. Bilstein, Stages to Saturn: A Technological History of the Apollo/Saturn Launch Vehicles, University Press of Florida, 2003
Ernest Hillje, Entry Flight Aerodynamics From Apollo Mission AS-202, NASA TN D-4185, NASA Manned Space Center, October 1967
Alan Lawrie, Saturn I/IB The Complete Manufacturing and Testing Records, Apogee Books, 2008
Richard W. Orloff and David M. Harland, Apollo: The Definitive Sourcebook, Springer-Praxis, 2006
Pamelia Pack, AS-202 Launch Vehicle Operational Flight Trajectory, NASA TM X-53470, NASA Marshall Space Flight Center, June 3, 1966
“Apollo/Saturn 202”, NASA Press Release 66-213, August 21, 1966 |
/**
* Sends a custom message and informs {@link WebSocketObserver} instances of this new message.
*
* @param msg
* @throws IOException
*/
@Deprecated
public void sendAndNotify(WebSocketMessageDTO msg) throws IOException {
logger.debug("sending custom message");
WebSocketMessage message = createWebSocketMessage(msg);
if (message.forward(getOuputStream(msg))) {
notifyMessageObservers(message);
}
} |
class DataSource:
'''The base class of data sources. The class defines the defaulkt behavior for data sources.'''
def getUpdateItems(self):
'''Returns a dictionary of items determined by the source for updates as opposed to add (to sinks)'''
'''The dictionary contains key/values that represent the query to the document to update as the key.'''
'''The query is expectted to be a vaild query to the document id. The value is the documents to update.'''
return (None) |
Wrath & Glory FAQ
What is Warhammer 40,000 Roleplay: Wrath & Glory?
Wrath & Glory is an officially licensed, brand-new roleplaying game set in the universe of Warhammer 40,000.
How is it related to the 8th edition of the tabletop miniatures game Warhammer 40,000?
The events that are occurring in the Dark Imperium of Warhammer 40,000 8th edition forms the central backdrop for Wrath & Glory. Characters adventuring into the Dark Imperium witness firsthand the dire threat that the Great Rift brings to the galaxy.
What does it have to do with the FFG RPGs?
Wrath & Glory is an entirely separate game line from Dark Heresy, Rogue Trader, and the other RPGs developed by Fantasy Flight Games.
What system does Wrath & Glory use?
Wrath & Glory has a brand-new game system involving dice pools of d6s to represent your character’s abilities. The game focuses on highlighting brutal combat, fast action, and a deep immersion into the setting of the 41st Millennium.
What is the focus of Wrath & Glory’s game line?
Wrath & Glory allows players to take on the roles of characters from the Warhammer 40,000 setting, including intrepid agents of the Imperium of Man and even some of the classic alien races that populate the galaxy.
What are the first Wrath & Glory products?
The game line begins with one core rulebook that contains everything you need to get started in the grim darkness of the far future.
What else is on the horizon for Wrath & Glory?
Future releases in the Wrath & Glory line feature campaigns that revolve around distinct elements of the Warhammer 40,000 universe, each campaign focusing on a different experience.
Previous editions of Warhammer 40,000 Roleplay will be available on DriveThruRPG soon.
If you want to know more about Wrath & Glory, be sure to catch our Gen Con Seminar, where we’ll be discussing further details.
Or sign up for our email list! |
Indiana Secretary of State Connie Lawson says an array of steps have been taken to ensure the integrity of Indiana's election system spread out over 92 jurisdictions.
By BRIAN A. HOWEY
INDIANAPOLIS - Indiana Secretary of State Connie Lawson reassured Hoosier voters that Indiana’s election system has not been compromised by Russian hackers, saying it would be “next to impossible” for someone to invade and alter data in Indiana’s 92 election jurisdictions that could throw Nov. 8 outcomes into doubt.
‘We check every log in,” Lawson told Howey Politics Indiana on Friday, just hours after FBI Director James Comey urged states to make sure their are “deadbolt” locks on the system. “We’ve had 15 million log ins and we’ve checked every one,” Lawson said. “We have not had any from the URL that were in the FBI alert.”
On Wednesday, Comey told the House Judiciary Committee, “There have been a variety of scanning activities which is a preamble for potential intrusion activities as well as some attempted intrusions at voter database registrations beyond those we knew about in July and August. We are urging the states just to make sure that their deadbolts are thrown and their locks are on and to get the best information they can from DHS just to make sure their systems are secure.”
And Department of Homeland Security Sec. Jeh Johnson said that 18 states had requested cyber assistance from the federal government to prevent intrusions from what U.S. intelligence and congressional leaders say are hackers associated with the Russian government which is trying to discredit and disrupt the U.S. election system. Indiana is not one of those 18 states. Only two states, Illinois and Arizona, have actually been hacked.
Compounding the concern is that Republican presidential nominee Donald Trump has continually told his supporters that the “system is rigged,” casting further doubt about the integrity of a fair election.
“We have 92 election systems,” Lawson said of Indiana’s 92 counties. “We have five different voting systems. None of them are on the internet and none of them are networked together. I’m not saying it’s impossible, but with 9,000 election systems in the United States, I’m saying it is next to impossible” that the Nov. 8 election would be discredited or disrupted.
Lawson said that a reason Indiana has not joined the 18 states seeking federal assistance is “we have worked with the Department of Homeland Security, the FBI, the Federal Election Commission, the Department of Justice. We have engaged our vendors and our system managers.”
Lawson added that when someone goes into the state or county system to register to vote, what they’re seeing is “a mirror of the data base.” She said that the information isn’t entered in for 10 days, allowing the state to check the validity of the person. She added that there are continual audits and checks for anomalies. Officials also match up the number of voters on polling lists with those on individual polling machines.
“The last thing we want is for anyone to think they have a reason not to show up or that their vote won’t count,” said Lawson, who as secretary of state is Indiana’s chief elections officer.
Lawson noted that in a Sept. 28 letter from Congressional leaders Paul Ryan, Mitch McConnell, Nancy Pelosi and Harry Reid to the National Association of State Election Directors, they were told, “The states face the challenge of malefactors that are seeking to use cyberattacks to disrupt the administration of our elections. We use the state stop take full advantage of the robust public and private sector resources available to them to endure that their network infrastructure is secure from attack. For over 200 years the states have overcome every challenge to ensure the smooth function of our democracy. We trust that you will take the steps necessary to meet the new challenges of the 21st Century by securing your election systems from cyberattacks.” |
export interface DeleteRecurrence {
OrderID: string;
}
export interface Recurrence {
orderID: string;
paymentInfo: PaymentInfo;
recurring: Recurring;
billingInfo: BillingInfo;
shippingInfo: ShippingInfo;
}
interface PaymentInfo {
cardInfo: string;
creditCardNumber: string;
expirationMonth: string;
expirationYear: string;
chargeTotal: string;
}
interface Recurring {
processorID: string;
action: string;
installments: string;
nextFireDate?: string;
fireDay?: string;
period: string;
lastDate?: string;
lastAmount?: string;
}
interface BillingInfo {
name: string;
address1: string;
address2?: string;
city: string;
zip: string;
country: string;
email: string;
phone: string;
}
interface ShippingInfo {
name: string;
address1: string;
address2?: string;
city: string;
zip: string;
country: string;
email: string;
phone: string;
}
|
/*******************************************************************
MIT License
Copyright (c) 2016 BobbyAnguelov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*********************************************************************/
//standard includes
#include <iostream>
#include <vector>
#include <fstream>
#include <math.h>
//include definition file
#include "neuralNetwork.h"
using namespace std;
/*******************************************************************
* Constructor
********************************************************************/
neuralNetwork::neuralNetwork(int nI, int nH, int nO) : nInput(nI), nHidden(nH), nOutput(nO)
{
//create neuron lists
//--------------------------------------------------------------------------------------------------------
inputNeurons = new( double[nInput + 1] );
for ( int i=0; i < nInput; i++ ) inputNeurons[i] = 0;
//create input bias neuron
inputNeurons[nInput] = -1;
hiddenNeurons = new( double[nHidden + 1] );
for ( int i=0; i < nHidden; i++ ) hiddenNeurons[i] = 0;
//create hidden bias neuron
hiddenNeurons[nHidden] = -1;
outputNeurons = new( double[nOutput] );
for ( int i=0; i < nOutput; i++ ) outputNeurons[i] = 0;
//create weight lists (include bias neuron weights)
//--------------------------------------------------------------------------------------------------------
wInputHidden = new( double*[nInput + 1] );
for ( int i=0; i <= nInput; i++ )
{
wInputHidden[i] = new (double[nHidden]);
for ( int j=0; j < nHidden; j++ ) wInputHidden[i][j] = 0;
}
wHiddenOutput = new( double*[nHidden + 1] );
for ( int i=0; i <= nHidden; i++ )
{
wHiddenOutput[i] = new (double[nOutput]);
for ( int j=0; j < nOutput; j++ ) wHiddenOutput[i][j] = 0;
}
//initialize weights
//--------------------------------------------------------------------------------------------------------
initializeWeights();
}
/*******************************************************************
* Destructor
********************************************************************/
neuralNetwork::~neuralNetwork()
{
//delete neurons
delete[] inputNeurons;
delete[] hiddenNeurons;
delete[] outputNeurons;
//delete weight storage
for (int i=0; i <= nInput; i++) delete[] wInputHidden[i];
delete[] wInputHidden;
for (int j=0; j <= nHidden; j++) delete[] wHiddenOutput[j];
delete[] wHiddenOutput;
}
/*******************************************************************
* Load Neuron Weights
********************************************************************/
bool neuralNetwork::loadWeights(char* filename)
{
//open file for reading
fstream inputFile;
inputFile.open(filename, ios::in);
if ( inputFile.is_open() )
{
vector<double> weights;
string line = "";
//read data
while ( !inputFile.eof() )
{
getline(inputFile, line);
//process line
if (line.length() > 2 )
{
//store inputs
char* cstr = new char[line.size()+1];
char* t;
strcpy_s(cstr, line.size() + 1, line.c_str());
//tokenise
int i = 0;
char* nextToken = NULL;
t=strtok_s(cstr,",", &nextToken );
while ( t!=NULL )
{
weights.push_back( atof(t) );
//move token onwards
t = strtok_s(NULL,",", &nextToken );
i++;
}
//free memory
delete[] cstr;
}
}
//check if sufficient weights were loaded
if ( weights.size() != ( (nInput + 1) * nHidden + (nHidden + 1) * nOutput ) )
{
cout << endl << "Error - Incorrect number of weights in input file: " << filename << endl;
//close file
inputFile.close();
return false;
}
else
{
//set weights
int pos = 0;
for ( int i=0; i <= nInput; i++ )
{
for ( int j=0; j < nHidden; j++ )
{
wInputHidden[i][j] = weights[pos++];
}
}
for ( int i=0; i <= nHidden; i++ )
{
for ( int j=0; j < nOutput; j++ )
{
wHiddenOutput[i][j] = weights[pos++];
}
}
//print success
//cout << endl << "Neuron weights loaded successfuly from '" << filename << "'" << endl;
//close file
inputFile.close();
return true;
}
}
else
{
cout << endl << "Error - Weight input file '" << filename << "' could not be opened: " << endl;
return false;
}
}
/*******************************************************************
* Save Neuron Weights
********************************************************************/
bool neuralNetwork::saveWeights(char* filename)
{
//open file for reading
fstream outputFile;
outputFile.open(filename, ios::out);
if ( outputFile.is_open() )
{
outputFile.precision(50);
//output weights
for ( int i=0; i <= nInput; i++ )
{
for ( int j=0; j < nHidden; j++ )
{
outputFile << wInputHidden[i][j] << ",";
}
}
for ( int i=0; i <= nHidden; i++ )
{
for ( int j=0; j < nOutput; j++ )
{
outputFile << wHiddenOutput[i][j];
if ( i * nOutput + j + 1 != (nHidden + 1) * nOutput ) outputFile << ",";
}
}
//print success
cout << endl << "Neuron weights saved to '" << filename << "'" << endl;
//close file
outputFile.close();
return true;
}
else
{
cout << endl << "Error - Weight output file '" << filename << "' could not be created: " << endl;
return false;
}
}
/*******************************************************************
* Feed pattern through network and return results
********************************************************************/
int* neuralNetwork::feedForwardPattern(double *pattern)
{
feedForward(pattern);
//create copy of output results
int* results = new int[nOutput];
for (int i=0; i < nOutput; i++ ) results[i] = clampOutput(outputNeurons[i]);
return results;
}
/*******************************************************************
* Return the NN accuracy on the set
********************************************************************/
double neuralNetwork::getSetAccuracy( std::vector<dataEntry*>& set )
{
double incorrectResults = 0;
//for every training input array
for ( int tp = 0; tp < (int) set.size(); tp++)
{
//feed inputs through network and backpropagate errors
feedForward( set[tp]->pattern );
//correct pattern flag
bool correctResult = true;
//check all outputs against desired output values
for ( int k = 0; k < nOutput; k++ )
{
//set flag to false if desired and output differ
if ( clampOutput(outputNeurons[k]) != set[tp]->target[k] ) correctResult = false;
}
//inc training error for a incorrect result
if ( !correctResult ) incorrectResults++;
}//end for
//calculate error and return as percentage
return 100 - (incorrectResults/set.size() * 100);
}
/*******************************************************************
* Return the NN mean squared error on the set
********************************************************************/
double neuralNetwork::getSetMSE( std::vector<dataEntry*>& set )
{
double mse = 0;
//for every training input array
for ( int tp = 0; tp < (int) set.size(); tp++)
{
//feed inputs through network and backpropagate errors
feedForward( set[tp]->pattern );
//check all outputs against desired output values
for ( int k = 0; k < nOutput; k++ )
{
//sum all the MSEs together
mse += pow((outputNeurons[k] - set[tp]->target[k]), 2);
}
}//end for
//calculate error and return as percentage
return mse/(nOutput * set.size());
}
/*******************************************************************
* Initialize Neuron Weights
********************************************************************/
void neuralNetwork::initializeWeights()
{
//set range
double rH = 1/sqrt( (double) nInput);
double rO = 1/sqrt( (double) nHidden);
//set weights between input and hidden
//--------------------------------------------------------------------------------------------------------
for(int i = 0; i <= nInput; i++)
{
for(int j = 0; j < nHidden; j++)
{
//set weights to random values
wInputHidden[i][j] = ( ( (double)(rand()%100)+1)/100 * 2 * rH ) - rH;
}
}
//set weights between input and hidden
//--------------------------------------------------------------------------------------------------------
for(int i = 0; i <= nHidden; i++)
{
for(int j = 0; j < nOutput; j++)
{
//set weights to random values
wHiddenOutput[i][j] = ( ( (double)(rand()%100)+1)/100 * 2 * rO ) - rO;
}
}
}
/*******************************************************************
* Activation Function
********************************************************************/
inline double neuralNetwork::activationFunction( double x )
{
//sigmoid function
return 1/(1+exp(-x));
}
/*******************************************************************
* Output Clamping
********************************************************************/
inline int neuralNetwork::clampOutput( double x )
{
if ( x < 0.1 ) return 0;
else if ( x > 0.9 ) return 1;
else return -1;
}
/*******************************************************************
* Feed Forward Operation
********************************************************************/
void neuralNetwork::feedForward(double* pattern)
{
//set input neurons to input values
for(int i = 0; i < nInput; i++) inputNeurons[i] = pattern[i];
//for (int i = 0; i < nInput; i++) cout << pattern[i] << endl;
//Calculate Hidden Layer values - include bias neuron
//--------------------------------------------------------------------------------------------------------
for(int j=0; j < nHidden; j++)
{
//clear value
hiddenNeurons[j] = 0;
//get weighted sum of pattern and bias neuron
for( int i=0; i <= nInput; i++ ) hiddenNeurons[j] += inputNeurons[i] * wInputHidden[i][j];
//set to result of sigmoid
hiddenNeurons[j] = activationFunction( hiddenNeurons[j] );
}
//Calculating Output Layer values - include bias neuron
//--------------------------------------------------------------------------------------------------------
for(int k=0; k < nOutput; k++)
{
//clear value
outputNeurons[k] = 0;
//get weighted sum of pattern and bias neuron
for( int j=0; j <= nHidden; j++ ) outputNeurons[k] += hiddenNeurons[j] * wHiddenOutput[j][k];
//set to result of sigmoid
outputNeurons[k] = activationFunction( outputNeurons[k] );
}
}
|
/**
* Checks if a tzhaar npc has been killed, if so then it checks if it needs
* to do the tz-kek effect. If tzKek spawn has been killed twice or didn't
* need to be killed it calls killedTzhaar.
*
* @param i
* The npc.
*/
private void tzhaarDeathHandler(int i) {
if (isFightCaveNpc(i) && npcs[i].npcType != FightCaves.TZ_KEK)
killedTzhaar(i);
if (npcs[i].npcType == FightCaves.TZ_KEK) {
int p = npcs[i].killerId;
if (PlayerHandler.players[p] != null) {
Player c = PlayerHandler.players[p];
FightCaves.tzKekEffect(c, i);
}
}
} |
// Run is used to run a watch plan
func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error {
logger := p.Logger
if logger == nil {
logger = newWatchLogger(p.LogOutput)
}
p.address = address
if conf == nil {
conf = consulapi.DefaultConfigWithLogger(logger)
}
conf.Address = address
conf.Datacenter = p.Datacenter
conf.Token = p.Token
client, err := consulapi.NewClient(conf)
if err != nil {
return fmt.Errorf("Failed to connect to agent: %v", err)
}
return p.RunWithClientAndHclog(client, logger)
} |
TRIM21 causes abnormal expression of IL-6 in oral lichen planus via the TRIB2-MAPK signal axis.
Oral lichen planus (OLP) is a common chronic inflammatory disease in the oral cavity, and has the risk of developing into oral squamous cell carcinoma (OSCC). It is necessary to discover the role of TRIM21 in the pathogenesis of OLP and its underlying mechanism.
METHODS
Western bolt and qPCR assays were used to detect the effects of TRIM21 on cellular levels of ERK, p-ERK, AP-1, IL-6, TRIB2, IRF3, and IRF7, while co-immunoprecipitation was performed to verify the interaction between Trim21 and TRIB2 protein. The TRIM21 effect on TH1/TH2 balance in T cells was also evaluated using ELISA.
RESULTS
The results of western blot showed that TRIM21 overexpression significantly increased p-ERK, c-fos, c-jun, IL-6 and TRIB2 levels in H9 cells (P<0.01 and P<0.001), however, inhibited the IRF3 and IRF7 levels (P<0.05). On the other hand, TRIM21 did not regulate the phosphorylation of ERK and the mRNA expression of AP-1 and TRIB2. In addition, TRIM21 was in relation to the proteasome degradation in TRIB2-ERK. TRIM21 also regulated the level of TRIB2 not only by inhibiting the ubiquitination of TRIB2, but also by affecting IL-6 through the ERK pathway.
CONCLUSION
TRIM21 caused abnormal expression of IL-6 in OLP via regulating TRIB2-MAPK signal axis, leading to the disrupted Th1/Th2 balance in T lymphocytes. |
use crate::pre_tokenizers::unicode_scripts::scripts::{get_script, Script};
use crate::tokenizer::{normalizer::Range, PreTokenizedString, PreTokenizer, Result};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct UnicodeScripts;
impl UnicodeScripts {
pub fn new() -> Self {
Self {}
}
}
impl Default for UnicodeScripts {
fn default() -> Self {
Self::new()
}
}
// This code exists in the Unigram default IsValidSentencePiece.
// It could be integrated directly within `get_script` but I
// think it's kind of tricky to see those modifications later
// I am guessing release mode will optimize this away anyway.
fn fixed_script(c: char) -> Script {
let raw_script = get_script(c);
if c as u32 == 0x30FC {
Script::Han
} else if c == ' ' {
Script::Any
} else {
match raw_script {
Script::Hiragana => Script::Han,
Script::Katakana => Script::Han,
script => script,
}
}
}
impl PreTokenizer for UnicodeScripts {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, normalized| {
let mut last_script = None;
let mut offset = 0;
let mut ranges: Vec<_> = normalized
.get()
.chars()
.filter_map(|c| {
let script = Some(fixed_script(c));
let result = if script != Some(Script::Any)
&& last_script != Some(Script::Any)
&& last_script != script
{
Some(offset)
} else {
None
};
offset += c.len_utf8();
if script != Some(Script::Any) {
last_script = script;
}
result
})
.collect();
ranges.push(normalized.get().len());
Ok(ranges
.windows(2)
.map(|item| {
normalized
.slice(Range::Normalized(item[0]..item[1]))
.expect("NormalizedString bad split")
})
.collect::<Vec<_>>())
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::OffsetReferential;
use crate::OffsetType;
#[test]
fn basic() {
let pretok = UnicodeScripts::default();
let mut pretokenized = PreTokenizedString::from("どこで生れ。Yes");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))]
);
}
#[test]
fn spaces_are_included_in_every_script() {
let pretok = UnicodeScripts::default();
let mut pretokenized = PreTokenizedString::from("Apples are りんご 林檎");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))]
);
}
#[test]
fn test_unicode_script() {
assert_eq!(Script::Han, fixed_script('京'));
assert_eq!(Script::Han, fixed_script('太'));
assert_eq!(Script::Han, fixed_script('い'));
assert_eq!(Script::Han, fixed_script('グ'));
assert_eq!(Script::Han, fixed_script('ー'));
assert_eq!(Script::Latin, fixed_script('a'));
assert_eq!(Script::Latin, fixed_script('A'));
assert_eq!(Script::Common, fixed_script('0'));
assert_eq!(Script::Common, fixed_script('$'));
assert_eq!(Script::Common, fixed_script('@'));
assert_eq!(Script::Common, fixed_script('-'));
assert_eq!(Script::Any, fixed_script(' '));
}
}
|
import numpy as np
import tensorflow as tf
import os
import deepdish as dd
import struct
from array import array
from tensorflow.python.framework import ops
from tensorflow.contrib.framework.python.framework import is_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import check_ops
def decode_image(contents, channels=None, name=None):
"""Convenience function for `decode_gif`, `decode_jpeg`, and `decode_png`.
Detects whether an image is a GIF, JPEG, or PNG, and performs the appropriate
operation to convert the input bytes `string` into a `Tensor` of type `uint8`.
Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as
opposed to `decode_jpeg` and `decode_png`, which return 3-D arrays
`[height, width, num_channels]`. Make sure to take this into account when
constructing your graph if you are intermixing GIF files with JPEG and/or PNG
files.
Args:
contents: 0-D `string`. The encoded image bytes.
channels: An optional `int`. Defaults to `0`. Number of color channels for
the decoded image.
name: A name for the operation (optional)
Returns:
`Tensor` with type `uint8` with shape `[height, width, num_channels]` for
JPEG and PNG images and shape `[num_frames, height, width, 3]` for GIF
images.
"""
with ops.name_scope(name, 'decode_image') as scope:
if channels not in (None, 0, 1, 3):
raise ValueError('channels must be in (None, 0, 1, 3)')
substr = tf.substr(contents, 0, 4)
def _gif():
# Create assert op to check that bytes are GIF decodable
is_gif = tf.equal(substr, b'\x47\x49\x46\x38', name='is_gif')
decode_msg = 'Unable to decode bytes as JPEG, PNG, or GIF'
assert_decode = control_flow_ops.Assert(is_gif, [decode_msg])
# Create assert to make sure that channels is not set to 1
# Already checked above that channels is in (None, 0, 1, 3)
gif_channels = 0 if channels is None else channels
good_channels = tf.not_equal(gif_channels, 1, name='check_channels')
channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images'
assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])
with ops.control_dependencies([assert_decode, assert_channels]):
return gen_image_ops.decode_gif(contents)
def _png():
return gen_image_ops.decode_png(contents, channels)
def check_png():
is_png = tf.equal(substr, b'\211PNG', name='is_png')
return control_flow_ops.cond(is_png, _png, _gif, name='cond_png')
def _jpeg():
return gen_image_ops.decode_jpeg(contents, channels)
is_jpeg = tf.logical_or(tf.equal(substr, b'\xff\xd8\xff\xe0', name='is_jpeg0'),
tf.equal(substr, b'\xff\xd8\xff\xe1', name='is_jpeg0'))
return control_flow_ops.cond(is_jpeg, _jpeg, check_png, name='cond_jpeg')
VOC_CLASSES = [
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor',
]
def _assert(cond, ex_type, msg):
"""A polymorphic assert, works with tensors and boolean expressions.
If `cond` is not a tensor, behave like an ordinary assert statement, except
that a empty list is returned. If `cond` is a tensor, return a list
containing a single TensorFlow assert op.
Args:
cond: Something evaluates to a boolean value. May be a tensor.
ex_type: The exception class to use.
msg: The error message.
Returns:
A list, containing at most one assert op.
"""
if is_tensor(cond):
return [logging_ops.Assert(cond, [msg])]
else:
if not cond:
raise ex_type(msg)
else:
return []
def _ImageDimensions(images, static_only=True):
"""Returns the dimensions of an image tensor.
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]`
static_only: Boolean, whether to return only static shape.
Returns:
list of integers `[batch, height, width, channels]`, when static shape is
fully defined or `static_only` is `True`.
list of integer scalar tensors `[batch, height, width, channels]`, when
static shape is not fully defined.
"""
# A simple abstraction to provide names for each dimension. This abstraction
# should make it simpler to switch dimensions in the future (e.g. if we ever
# want to switch height and width.)
if static_only or images.get_shape().is_fully_defined():
return images.get_shape().as_list()
else:
return tf.unstack(tf.shape(images))
def _Check3DImage(image, require_static=True):
"""Assert that we are working with properly shaped image.
Args:
image: 3-D Tensor of shape [height, width, channels]
require_static: If `True`, requires that all dimensions of `image` are
known and non-zero.
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
image_shape = image.get_shape().with_rank(3)
except ValueError:
raise ValueError("'image' must be three-dimensional.")
if require_static and not image_shape.is_fully_defined():
raise ValueError("'image' must be fully defined.")
if any(x == 0 for x in image_shape):
raise ValueError("all dims of 'image.shape' must be > 0: %s" %
image_shape)
if not image_shape.is_fully_defined():
return [check_ops.assert_positive(array_ops.shape(image),
["all dims of 'image.shape' "
"must be > 0."])]
else:
return []
def pad_to_ensure_size(image, target_height, target_width):
"""Pads if below target size, but does nothing if above.
If `width` or `height` is smaller than the specified `target_width` or
`target_height` respectively, this op centrally pads with 0 along that
dimension.
Args:
image: 3-D tensor of shape `[height, width, channels]`
target_height: Target height.
target_width: Target width.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Padded image of shape
`[max(target_height, height), max(target_width, height), channels]`
"""
image = ops.convert_to_tensor(image, name='image')
assert_ops = []
assert_ops += _Check3DImage(image, require_static=False)
image = control_flow_ops.with_dependencies(assert_ops, image)
# `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
# Make sure our checks come first, so that error messages are clearer.
if is_tensor(target_height):
target_height = control_flow_ops.with_dependencies(assert_ops, target_height)
if is_tensor(target_width):
target_width = control_flow_ops.with_dependencies(assert_ops, target_width)
def max_(x, y):
if is_tensor(x) or is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
height, width, _ = _ImageDimensions(image, static_only=False)
width_diff = target_width - width
offset_crop_width = max_(-width_diff // 2, 0)
offset_pad_width = max_(width_diff // 2, 0)
height_diff = target_height - height
offset_crop_height = max_(-height_diff // 2, 0)
offset_pad_height = max_(height_diff // 2, 0)
# Maybe pad if needed.
resized = tf.image.pad_to_bounding_box(image, offset_pad_height, offset_pad_width,
max_(target_height, height), max_(target_width, width))
# In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
resized_height, resized_width, _ = \
_ImageDimensions(resized, static_only=False)
#assert_ops = []
#assert_ops += _assert(equal_(resized_height, target_height), ValueError,
#'resized height is not correct.')
#assert_ops += _assert(equal_(resized_width, target_width), ValueError,
#'resized width is not correct.')
#resized = control_flow_ops.with_dependencies(assert_ops, resized)
return resized
def resize_to_ensure_size(image, target_height, target_width):
height, width, _ = _ImageDimensions(image, static_only=False)
#if height < target_height or width < target_width:
# Do not preserve aspect ratio
image4 = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image4, [tf.maximum(height, target_height), tf.maximum(width, target_width)])
return image[0]
def _voc_seg_load_file(path, epochs=None, shuffle=True, seed=0):
PASCAL_ROOT = os.environ['VOC_DIR']
filename_queue = tf.train.string_input_producer([path],
num_epochs=epochs, shuffle=shuffle, seed=seed)
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
image_path, seg_path = tf.decode_csv(value, record_defaults=[[''], ['']], field_delim=' ')
image_abspath = PASCAL_ROOT + image_path
seg_abspath = PASCAL_ROOT + seg_path
image_content = tf.read_file(image_abspath)
image = decode_image(image_content, channels=3)
image.set_shape([None, None, 3])
imgshape = tf.shape(image)[:2]
imgname = image_path
seg_content = tf.read_file(seg_abspath)
seg = tf.cast(tf.image.decode_png(seg_content, channels=1), tf.int32)
return image, seg, imgshape, imgname
def voc_seg_batching(path, batch_size, input_size, epochs=None,
shuffle=True, min_after_dequeue=250, seed=0,
pad255=False, num_threads=1, color_transform=None, random_mirror=False):
assert seed is not None, "Seed must be specified, to synchronize images and segmentation maps"
image, seg, imgshape, imgname = _voc_seg_load_file(path, epochs=epochs, shuffle=shuffle, seed=seed)
if pad255:
seg += 1
image = tf.cast(image, tf.float32) / 255.0
if color_transform is not None:
image = color_transform(image)
pad_image = pad_to_ensure_size(image, input_size, input_size)
pad_seg = pad_to_ensure_size(seg, input_size, input_size)
fixed_image = tf.random_crop(pad_image, (input_size, input_size, 3), seed=seed)
#fixed_image = tf.cast(fixed_image, tf.float32) / 255.0
if pad255:
fixed_seg = tf.random_crop(tf.cast(pad_seg, tf.int32), (input_size, input_size, 1), seed=seed)
fixed_seg = ((fixed_seg + 255) % 256)
else:
fixed_seg = tf.random_crop(pad_seg, (input_size, input_size, 1), seed=seed)
fixed_seg = tf.cast(fixed_seg, tf.int32)
if random_mirror:
assert seed is not None
fixed_image = tf.image.random_flip_left_right(fixed_image, seed=seed)
fixed_seg = tf.image.random_flip_left_right(fixed_seg, seed=seed)
capacity = min_after_dequeue + 3 * batch_size
if shuffle:
batch_image, batch_seg, batch_imgshape, batch_imgname = tf.train.shuffle_batch(
[fixed_image, fixed_seg, imgshape, imgname], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, num_threads=num_threads)
else:
batch_image, batch_seg, batch_imgshape, batch_imgname = tf.train.batch(
[fixed_image, fixed_seg, imgshape, imgname], batch_size=batch_size, capacity=capacity,
num_threads=num_threads)
batch_seg = tf.squeeze(batch_seg, [3])
return batch_image, batch_seg, batch_imgshape, batch_imgname
def _imagenet_load_file(path, epochs=None, shuffle=True, seed=0, subset='train', prepare_path=True):
IMAGENET_ROOT = os.environ.get('IMAGENET_DIR', '')
if not isinstance(path, list):
path = [path]
filename_queue = tf.train.string_input_producer(path,
num_epochs=epochs, shuffle=shuffle, seed=seed)
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
image_path, label_str = tf.decode_csv(value, record_defaults=[[''], ['']], field_delim=' ')
if prepare_path:
image_abspath = IMAGENET_ROOT + '/images/' + subset + image_path
else:
image_abspath = image_path
image_content = tf.read_file(image_abspath)
image = decode_image(image_content, channels=3)
image.set_shape([None, None, 3])
imgshape = tf.shape(image)[:2]
label = tf.string_to_number(label_str, out_type=tf.int32)
return image, label, imgshape, image_path
def _relpath_no_label_load_file(path, root_path, epochs=None, shuffle=True, seed=0):
filename_queue = tf.train.string_input_producer([path],
num_epochs=epochs, shuffle=shuffle, seed=seed)
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
#image_path, = tf.decode_csv(value, record_defaults=[['']], field_delim=' ')
image_path = value
image_abspath = root_path + '/' + image_path
image_content = tf.read_file(image_abspath)
image = decode_image(image_content, channels=3)
image.set_shape([None, None, 3])
imgshape = tf.shape(image)[:2]
return image, imgshape, image_path
def _abspath_no_label_load_file(path, epochs=None, shuffle=True, seed=0):
filename_queue = tf.train.string_input_producer([path],
num_epochs=epochs, shuffle=shuffle, seed=seed)
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
#image_path, = tf.decode_csv(value, record_defaults=[['']], field_delim=' ')
image_path = value
image_abspath = image_path
image_content = tf.read_file(image_abspath)
image = decode_image(image_content, channels=3)
image.set_shape([None, None, 3])
imgshape = tf.shape(image)[:2]
return image, imgshape, image_path
def do_center_crop(value, size, name=None):
"""Randomly crops a tensor to a given size.
Slices a shape `size` portion out of `value` at a uniformly chosen offset.
Requires `value.shape >= size`.
If a dimension should not be cropped, pass the full size of that dimension.
For example, RGB images can be cropped with
`size = [crop_height, crop_width, 3]`.
Args:
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A cropped tensor of the same rank as `value` and shape `size`.
"""
# TODO(shlens): Implement edge case to guarantee output size dimensions.
# If size > value.shape, zero pad the result so that it always has shape
# exactly size.
from tensorflow.python.framework import dtypes
with ops.op_scope([value, size], name, "center_crop") as name:
value = ops.convert_to_tensor(value, name="value")
size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
shape = array_ops.shape(value)
check = logging_ops.Assert(
math_ops.reduce_all(shape >= size),
["Need value.shape >= size, got ", shape, size])
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = tf.random_uniform(
array_ops.shape(shape),
dtype=size.dtype,
maxval=size.dtype.max,
seed=0) % limit
offset2 = shape // 2 - size // 2
#import ipdb; ipdb.set_trace()
return array_ops.slice(value, offset, size, name=name)
def classification_resizing_batching(path, batch_size, input_size, epochs=None,
shuffle=True, center_crop=False, min_after_dequeue=250, seed=0,
num_threads=1, color_transform=None, random_mirror=False,
min_size=None, max_size=None,
subset='train', max_min_side=None):
assert seed is not None, "Seed must be specified, to synchronize images and segmentation maps"
image, label, imgshape, imgname = _imagenet_load_file(path, epochs=epochs, shuffle=shuffle, seed=seed, subset=subset,
prepare_path=subset is not None)
image = tf.cast(image, tf.float32) / 255.0
if color_transform is not None:
image = color_transform(image)
imgshape = tf.shape(image)
height, width, _ = tf.unstack(imgshape)
smaller_side = tf.to_float(tf.minimum(height, width))
if min_size is not None and max_size is not None:
scale = tf.random_uniform([], minval=tf.to_float(min_size) / smaller_side, maxval=tf.to_float(max_size) / smaller_side, seed=seed)
new_height = tf.to_int32(tf.to_float(height) * scale)
new_width = tf.to_int32(tf.to_float(width) * scale)
image = tf.squeeze(tf.image.resize_bilinear(tf.expand_dims(image, 0), [new_height, new_width]), [0])
else:
scale = tf.constant(1.0)
new_height = height
new_width = width
pad_image = pad_to_ensure_size(image, input_size, input_size)
if not center_crop:
fixed_image = tf.random_crop(pad_image, (input_size, input_size, 3), seed=seed)
else:
if max_min_side is not None:
min_side = tf.random_uniform([1], minval=input_size, maxval=max_min_side, dtype=tf.int32, seed=seed)
pad_image = dd.image.resize(pad_image, min_side=min_side)
#height, width, _ = _ImageDimensions(pad_image, static_only=False)
fixed_image = array_ops.slice(pad_image, [new_height//2 - input_size//2,
new_width//2 - input_size//2, 0],
[input_size, input_size, 3])
#fixed_image = array_ops.slice(pad_image, [0, 0, 0],
#[input_size, input_size, 3])
#fixed_image = tf.image.crop_to_bounding_box(pad_image,
#0, 0, input_size, input_size)
if random_mirror:
assert seed is not None
fixed_image = tf.image.random_flip_left_right(fixed_image, seed=seed)
capacity = min_after_dequeue + 3 * batch_size
if shuffle:
batch_image, batch_label, batch_imgshape, batch_imgname = tf.train.shuffle_batch(
[fixed_image, label, imgshape, imgname], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, num_threads=num_threads)
else:
batch_image, batch_label, batch_imgshape, batch_imgname = tf.train.batch(
[fixed_image, label, imgshape, imgname], batch_size=batch_size, capacity=capacity,
num_threads=num_threads)
return batch_image, batch_label, batch_imgshape, batch_imgname
def classification_batching(path, batch_size, input_size, epochs=None,
shuffle=True, center_crop=False, min_after_dequeue=250, seed=0,
num_threads=10, color_transform=None, random_mirror=False,
subset='train', max_min_side=None):
assert seed is not None, "Seed must be specified, to synchronize images and segmentation maps"
image, label, imgshape, imgname = _imagenet_load_file(path, epochs=epochs, shuffle=shuffle, seed=seed, subset=subset,
prepare_path=subset is not None)
image = tf.cast(image, tf.float32) / 255.0
if color_transform is not None:
image = color_transform(image)
pad_image = pad_to_ensure_size(image, input_size, input_size)
if not center_crop:
fixed_image = tf.random_crop(pad_image, (input_size, input_size, 3), seed=seed)
else:
if max_min_side is not None:
min_side = tf.random_uniform([1], minval=input_size, maxval=max_min_side, dtype=tf.int32, seed=seed)
#pad_image = dd.image.resize(pad_image, min_side=min_side)
pad_image = tf.image.resize_bilinear(pad_image, min_side=min_side)
height, width, _ = _ImageDimensions(pad_image, static_only=False)
fixed_image = array_ops.slice(pad_image, [height//2 - input_size//2,
width//2 - input_size//2, 0],
[input_size, input_size, 3])
#fixed_image = array_ops.slice(pad_image, [0, 0, 0],
#[input_size, input_size, 3])
#fixed_image = tf.image.crop_to_bounding_box(pad_image,
#0, 0, input_size, input_size)
if random_mirror:
assert seed is not None
fixed_image = tf.image.random_flip_left_right(fixed_image, seed=seed)
capacity = min_after_dequeue + 3 * batch_size
if shuffle:
batch_image, batch_label, batch_imgshape, batch_imgname = tf.train.shuffle_batch(
[fixed_image, label, imgshape, imgname], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, num_threads=num_threads)
else:
batch_image, batch_label, batch_imgshape, batch_imgname = tf.train.batch(
[fixed_image, label, imgshape, imgname], batch_size=batch_size, capacity=capacity,
num_threads=num_threads)
return batch_image, batch_label, batch_imgshape, batch_imgname
def unlabeled_batching(path, batch_size, input_size, epochs=None,
shuffle=True, center_crop=False, min_after_dequeue=250, seed=0,
num_threads=1, color_transform=None, random_mirror=False,
resize_if_small=False, root_path=None):
assert seed is not None, "Seed must be specified, to synchronize images and segmentation maps"
if root_path is not None:
image, imgshape, imgname = _relpath_no_label_load_file(path, root_path, epochs=epochs, shuffle=shuffle, seed=seed)
else:
image, imgshape, imgname = _abspath_no_label_load_file(path, epochs=epochs, shuffle=shuffle, seed=seed)
image = tf.cast(image, tf.float32) / 255.0
if color_transform is not None:
image = color_transform(image)
if resize_if_small:
ensure_size = resize_to_ensure_size
else:
ensure_size = pad_to_ensure_size
pad_image = ensure_size(image, input_size, input_size)
if not center_crop:
fixed_image = tf.random_crop(pad_image, (input_size, input_size, 3), seed=seed)
else:
height, width, _ = _ImageDimensions(pad_image, static_only=False)
fixed_image = array_ops.slice(pad_image, [height//2 - input_size//2,
width//2 - input_size//2, 0],
[input_size, input_size, 3])
#fixed_image = array_ops.slice(pad_image, [0, 0, 0],
#[input_size, input_size, 3])
#fixed_image = tf.image.crop_to_bounding_box(pad_image,
#0, 0, input_size, input_size)
if random_mirror:
assert seed is not None
fixed_image = tf.image.random_flip_left_right(fixed_image, seed=seed)
capacity = min_after_dequeue + 3 * batch_size
if shuffle:
batch_image, batch_imgshape, batch_imgname = tf.train.shuffle_batch(
[fixed_image, imgshape, imgname], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, num_threads=num_threads)
else:
batch_image, batch_imgshape, batch_imgname = tf.train.batch(
[fixed_image, imgshape, imgname], batch_size=batch_size, capacity=capacity,
num_threads=num_threads)
return batch_image, batch_imgshape, batch_imgname
def voc2007_classification_generator_save(which, batch_size, input_size, outer_input_size,
shuffle=True,# seed=0,
color_transform=None, random_mirror=False):
path = os.path.expandvars('$VOC2007_DIR/ImageSets/Main')
# First load image list
fn = os.path.join(path, '{}.txt'.format(which))
imgids = np.genfromtxt(fn, dtype=np.int32)#[:100]
C = np.zeros((imgids.size, len(VOC_CLASSES)), dtype=np.float32)
for c, cls_name in enumerate(VOC_CLASSES):
fn = os.path.join(path, '{}_{}.txt'.format(cls_name, which))
data = np.genfromtxt(fn)#[:100]
C[:, c] = data[:, 1]
# Convert to 0.0, 0.5, 1.0
C = (C + 1) / 2
filenames = [os.path.expandvars('$VOC2007_DIR/JPEGImages/{:06d}.jpg').format(imgid)
for imgid in imgids]
rs = np.random.RandomState(0)
imgs = np.zeros((len(imgids), input_size, input_size, 3), dtype=np.float32)
for i, fn in enumerate(filenames):
if i % 200 == 0:
print(i)
img = dd.image.load(fn)
if color_transform is not None:
img = color_transform(img)
# Resize to smaller side
img = dd.image.resize(img, min_side=input_size)
img = dd.image.crop(img, (input_size, input_size))
imgs[i] = img
dd.io.save('{}.h5'.format(which), dict(data=imgs, labels=C))
if shuffle:
rs = np.random.RandomState()
while True:
II = rs.randint(len(imgs), size=batch_size)
ii, cc = imgs[II], C[II]
if random_mirror and rs.randint(2) == 1:
ii = ii[:, :, ::-1]
yield ii, cc
else:
for i in range(len(imgs)//batch_size):
ss = np.s_[i*batch_size:(i+1)*batch_size]
yield imgs[ss], C[ss]
def voc2007_classification_batching(which, batch_size, input_size,# outer_input_size=None,
shuffle=True, seed=None,
random_mirror=False,
min_scale=None, max_scale=None,
center_crop=False,
ignore_label=None,
min_after_dequeue=250, num_threads=10):
path = os.path.expandvars('$VOC2007_DIR/ImageSets/Main')
# First load image list
fn = os.path.join(path, '{}.txt'.format(which))
imgids = np.genfromtxt(fn, dtype=str)
C = np.zeros((imgids.size, len(VOC_CLASSES)), dtype=np.float32)
for c, cls_name in enumerate(VOC_CLASSES):
fn = os.path.join(path, '{}_{}.txt'.format(cls_name, which))
data = np.genfromtxt(fn)
C[:, c] = data[:, 1]
# Convert to 0.0, 0.5, 1.0
if ignore_label is not None:
# -1 -> 0
# 0 -> ignore_label
# 1 -> 1
C = tf.to_float(C == 1) * 1 + tf.to_float(C == 0) * ignore_label
else:
C = (C + 1) / 2
filenames = [os.path.expandvars('$VOC2007_DIR/JPEGImages/{}.jpg').format(imgid)
for imgid in imgids]
imgname, label = tf.train.slice_input_producer([filenames, C], shuffle=shuffle, seed=seed)
image_content = tf.read_file(imgname)
image = decode_image(image_content, channels=3)
image.set_shape([None, None, 3])
imgshape = tf.shape(image)
height, width, _ = tf.unstack(imgshape)
if min_scale is not None and max_scale is not None:
scale = tf.random_uniform([], minval=min_scale, maxval=max_scale, seed=seed)
new_height = tf.to_int32(tf.to_float(height) * scale)
new_width = tf.to_int32(tf.to_float(width) * scale)
image = tf.squeeze(tf.image.resize_bilinear(tf.expand_dims(image, 0), [new_height, new_width]), [0])
else:
scale = tf.constant(1)
new_height = height
new_width = width
new_imgshape = tf.stack([new_height, new_width, 3])
image = pad_to_ensure_size(image, input_size, input_size)
#new_height = tf.maximum(new_height, input_size)
new_height, new_width, _ = tf.unstack(tf.shape(image))
if not center_crop:
fixed_image = tf.random_crop(image, (input_size, input_size, 3), seed=seed)
else:
fixed_image = array_ops.slice(image, [new_height//2 - input_size//2,
new_width//2 - input_size//2,
0],
[input_size, input_size, 3])
if random_mirror:
fixed_image = tf.image.random_flip_left_right(fixed_image, seed=seed)
fixed_image = tf.to_float(fixed_image) / 255.0
capacity = min_after_dequeue + 3 * batch_size
if shuffle:
batch_image, batch_label, batch_imgshape, batch_imgname, batch_scale = tf.train.shuffle_batch(
[fixed_image, label, new_imgshape, imgname, scale], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, num_threads=num_threads, seed=seed)
else:
batch_image, batch_label, batch_imgshape, batch_imgname, batch_scale = tf.train.batch(
[fixed_image, label, new_imgshape, imgname, scale], batch_size=batch_size, capacity=capacity,
num_threads=num_threads)
return batch_image, batch_label, batch_imgshape, batch_imgname, batch_scale
def voc2007_classification_generator(which, batch_size, input_size, outer_input_size=None,
shuffle=True, seed=None,
color_transform=None, random_mirror=False,
min_scale=None, max_scale=None,
return_filenames=False):
path = os.path.expandvars('$VOC2007_DIR/ImageSets/Main')
# First load image list
fn = os.path.join(path, '{}.txt'.format(which))
imgids = np.genfromtxt(fn, dtype=np.int32)#[:100]
C = np.zeros((imgids.size, len(VOC_CLASSES)), dtype=np.float32)
for c, cls_name in enumerate(VOC_CLASSES):
fn = os.path.join(path, '{}_{}.txt'.format(cls_name, which))
data = np.genfromtxt(fn)#[:100]
C[:, c] = data[:, 1]
# Convert to 0.0, 0.5, 1.0
C = (C + 1) / 2
filenames = [os.path.expandvars('$VOC2007_DIR/JPEGImages/{:06d}.jpg').format(imgid)
for imgid in imgids]
rs = np.random.RandomState(seed)
def load_image(imgid):
fn = os.path.expandvars('$VOC2007_DIR/JPEGImages/{:06d}.jpg').format(imgid)
img = dd.image.load(fn)
if color_transform is not None:
img = color_transform(img)
if min_scale is not None and max_scale is not None:
s = rs.uniform(min_scale, max_scale)
img = dd.image.resize_by_factor(img, factor=s)
# Resize to smaller side
img = pad_if_too_small(img, (input_size, input_size))
if outer_input_size is not None:
img = dd.image.resize(img, min_side=outer_input_size)
#img = dd.image.crop(img, (input_size, outer_input_size))
h, w = img.shape[:2]
if shuffle:
dh = rs.randint(h - input_size + 1)
dw = rs.randint(w - input_size + 1)
else:
dh = (h - input_size + 1) // 2
dw = (w - input_size + 1) // 2
img = img[dh:dh+input_size, dw:dw+input_size]
return img
if shuffle:
#rs = np.random.RandomState()
while True:
II = rs.randint(len(imgids), size=batch_size)
ii = np.array([
load_image(imgid) for imgid in imgids[II]
])
#ii, cc = imgs[II], C[II]
cc = C[II]
if random_mirror and rs.randint(2) == 1:
ii = ii[:, :, ::-1]
yield ii, cc
else:
all_II = np.arange(len(imgids))
for i in range(int(np.ceil(len(imgids) / batch_size))):
ss = np.s_[i*batch_size:(i+1)*batch_size]
II = all_II[ss]
xx = np.zeros((batch_size, input_size, input_size, 3), dtype=np.float32)
yy = np.zeros((batch_size, 20), dtype=np.float32)
xx[:len(imgids[II])] = np.array([
load_image(imgid) for imgid in imgids[II]
])
yy[:len(imgids[II])] = C[ss]
if return_filenames:
nn = [j for j in imgids[ss]]
nn += [0] * (batch_size - len(nn))
yield xx, yy, nn
else:
yield xx, yy
def pad_if_too_small(img, shape, value=0.0):
if img.shape[0] >= shape[0] and img.shape[1] >= shape[1]:
return img
else:
img0 = np.full((max(img.shape[0], shape[0]), max(img.shape[1], shape[1])) + img.shape[2:], value, dtype=np.float32)
img0[:img.shape[0], :img.shape[1]] = img
return img0
def voc2007_classification_generator_10crop(which, input_size, outer_input_size=None,
shuffle=True,# seed=0,
color_transform=None, random_mirror=False):
path = os.path.expandvars('$VOC2007_DIR/ImageSets/Main')
# First load image list
fn = os.path.join(path, '{}.txt'.format(which))
imgids = np.genfromtxt(fn, dtype=np.int32)#[:100]
C = np.zeros((imgids.size, len(VOC_CLASSES)), dtype=np.float32)
for c, cls_name in enumerate(VOC_CLASSES):
fn = os.path.join(path, '{}_{}.txt'.format(cls_name, which))
data = np.genfromtxt(fn)#[:100]
C[:, c] = data[:, 1]
# Convert to 0.0, 0.5, 1.0
C = (C + 1) / 2
filenames = [os.path.expandvars('$VOC2007_DIR/JPEGImages/{:06d}.jpg').format(imgid)
for imgid in imgids]
rs = np.random.RandomState(1234)
def load_image(imgid):
fn = os.path.expandvars('$VOC2007_DIR/JPEGImages/{:06d}.jpg').format(imgid)
img = dd.image.load(fn)
if color_transform is not None:
img = color_transform(img)
# Resize to smaller side
img = pad_if_too_small(img, (input_size, input_size))
if outer_input_size is not None:
img = dd.image.resize(img, min_side=outer_input_size)
return img
if shuffle:
assert 0
else:
all_II = np.arange(len(imgids))
for i in range(len(imgids)):
img = load_image(imgids[i])
h0, w0 = img.shape[0]//2 - input_size//2, img.shape[1]//2 - input_size//2
ii = []
ii.append(img[:input_size, :input_size])
ii.append(img[:input_size, -input_size:])
ii.append(img[-input_size:, -input_size:])
ii.append(img[-input_size:, :input_size])
ii.append(img[h0:h0+input_size, w0:w0+input_size])
img = img[:, ::-1]
ii.append(img[:input_size, :input_size])
ii.append(img[:input_size, -input_size:])
ii.append(img[-input_size:, -input_size:])
ii.append(img[-input_size:, :input_size])
ii.append(img[h0:h0+input_size, w0:w0+input_size])
yield np.array(ii), C[[i]]
def voc2007_classification_generator2(which, batch_size, input_size,
outer_input_size,
shuffle=True,# seed=0,
color_transform=None, random_mirror=False):
path = os.path.expandvars('$VOC2007_DIR/ImageSets/Main')
assert which in ['test', 'val']
imgs, C = dd.io.load('{}.h5'.format(which), ['/data', '/labels'])
if shuffle:
rs = np.random.RandomState()
while True:
II = rs.randint(len(imgs), size=batch_size)
ii, cc = imgs[II], C[II]
if random_mirror and rs.randint(2) == 1:
ii = ii[:, :, ::-1]
yield ii, cc
else:
for i in range(len(imgs)//batch_size):
ss = np.s_[i*batch_size:(i+1)*batch_size]
yield imgs[ss], C[ss]
"""
def voc_seg_trainval_batching(tr_path, vl_path, pick_val, batch_size,
input_size, epochs=None, shuffle=True, min_after_dequeue=100,
seed=0, pad255=False, num_threads=1):
assert seed is not None, ("Seed must be specified, to synchronize "
"images and segmentation maps")
tr_image, tr_seg, tr_imgshape, tr_imgname = _voc_seg_load_file(tr_path, epochs=epochs, shuffle=shuffle, seed=seed)
vl_image, vl_seg, vl_imgshape, vl_imgname = _voc_seg_load_file(vl_path, epochs=epochs, shuffle=shuffle, seed=seed)
if 1:
image = tf.cond(pick_val, lambda: vl_image, lambda: tr_image)
imgshape = tf.cond(pick_val, lambda: vl_imgshape, lambda: tr_imgshape)
imgname = tf.cond(pick_val, lambda: vl_imgname, lambda: tr_imgname)
seg = tf.cond(pick_val, lambda: vl_seg, lambda: tr_seg)
else:
image = vl_image
imgshape = vl_imgshape
imgname = vl_imgname
seg = vl_seg
#seg = tf.cast(tf.image.decode_png(seg_content, channels=1), tf.int32)
if pad255:
seg += 1
pad_image = pad_to_ensure_size(image, input_size, input_size)
pad_seg = pad_to_ensure_size(seg, input_size, input_size)
fixed_image = tf.random_crop(pad_image, (input_size, input_size, 3), seed=seed)
fixed_image = tf.cast(fixed_image, tf.float32) / 255.0
if pad255:
fixed_seg = tf.random_crop(tf.cast(pad_seg, tf.int32), (input_size, input_size, 1), seed=seed)
fixed_seg = ((fixed_seg + 255) % 256)
else:
fixed_seg = tf.random_crop(pad_seg, (input_size, input_size, 1), seed=seed)
fixed_seg = tf.cast(fixed_seg, tf.int32)
capacity = min_after_dequeue + 3 * batch_size
if shuffle:
batch_image, batch_seg, batch_imgshape, batch_imgname = tf.train.shuffle_batch(
[fixed_image, fixed_seg, imgshape, imgname], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, num_threads=num_threads)
else:
batch_image, batch_seg, batch_imgshape, batch_imgname = tf.train.batch(
[fixed_image, fixed_seg, imgshape, imgname], batch_size=batch_size, capacity=capacity,
num_threads=num_threads)
batch_seg = tf.squeeze(batch_seg, [3])
return batch_image, batch_seg, batch_imgshape, batch_imgname
"""
def _load_mnist(section="training", offset=0, count=None, ret='xy',
x_dtype=np.float64, y_dtype=np.int64, path=None):
"""
Loads MNIST files into a 3D numpy array.
You have to download the data separately from [MNIST]_. It is recommended
to set the environment variable ``MNIST_DIR`` to point to the folder where
you put the data, so that you don't have to select path. On a Linux+bash
setup, this is done by adding the following to your ``.bashrc``::
export MNIST_DIR=/path/to/mnist
Parameters
----------
section : str
Either "training" or "testing", depending on which section you want to
load.
offset : int
Skip this many samples.
count : int or None
Try to load this many samples. Default is None, which loads until the
end.
ret : str
What information to return. See return values.
x_dtype : dtype
Type of samples. If ``np.uint8``, intensities lie in {0, 1, ..., 255}.
If a float type, then intensities lie in [0.0, 1.0].
y_dtype : dtype
Integer type to store labels.
path : str
Path to your MNIST datafiles. The default is ``None``, which will try
to take the path from your environment variable ``MNIST_DIR``. The data
can be downloaded from http://yann.lecun.com/exdb/mnist/.
Returns
-------
images : ndarray
Image data of shape ``(N, 28, 28)``, where ``N`` is the number of
images. Returned if ``ret`` contains ``'x'``.
labels : ndarray
Array of size ``N`` describing the labels. Returned if ``ret``
contains ``'y'``.
Examples
--------
Assuming that you have downloaded the MNIST database and set the
environment variable ``$MNIST_DIR`` point to the folder, this will load all
images and labels from the training set:
>>> images, labels = ag.io.load_mnist('training') # doctest: +SKIP
Load 100 samples from the testing set:
>>> sevens = ag.io.load_mnist('testing', offset=200, count=100,
ret='x') # doctest: +SKIP
"""
# The files are assumed to have these names and should be found in 'path'
files = {
'training': ('train-images-idx3-ubyte',
'train-labels-idx1-ubyte',
60000),
'testing': ('t10k-images-idx3-ubyte',
't10k-labels-idx1-ubyte',
10000),
}
if count is None:
count = files[section][2] - offset
if path is None:
try:
path = os.environ['MNIST_DIR']
except KeyError:
raise ValueError("Unspecified path requires the environment"
"variable $MNIST_DIR to be set")
try:
images_fname = os.path.join(path, files[section][0])
labels_fname = os.path.join(path, files[section][1])
except KeyError:
raise ValueError("Data set must be 'testing' or 'training'")
returns = ()
if 'x' in ret:
with open(images_fname, 'rb') as fimg:
magic_nr, size, d0, d1 = struct.unpack(">IIII", fimg.read(16))
fimg.seek(offset * d0 * d1, 1)
images_raw = array("B", fimg.read(count * d0 * d1))
images = np.asarray(images_raw, dtype=x_dtype).reshape(-1, d0, d1)
if x_dtype == np.uint8:
pass # already this type
elif x_dtype in (np.float16, np.float32, np.float64):
images /= 255.0
else:
raise ValueError("Unsupported value for x_dtype")
returns += (images,)
if 'y' in ret:
with open(labels_fname, 'rb') as flbl:
magic_nr, size = struct.unpack(">II", flbl.read(8))
flbl.seek(offset, 1)
labels_raw = array("b", flbl.read(count))
labels = np.asarray(labels_raw)
returns += (labels,)
if len(returns) == 1:
return returns[0] # Don't return a tuple of one
else:
return returns
def mnist_batching(batch_size, subset='training', input_size=None,
num_threads=1):
xraw, y = _load_mnist(subset, x_dtype=np.float32, y_dtype=np.int32)
if input_size is None or input_size == xraw.shape[1]:
x = xraw
else:
x = np.zeros((xraw.shape[0], input_size, input_size, 1), dtype=np.float32)
w = (input_size - xraw.shape[1]) // 2
x[:, w:w+xraw.shape[1], w:w+xraw.shape[2]] = xraw[..., np.newaxis]
min_after_dequeue = 10
capacity = min_after_dequeue * 3 + batch_size
x1, y1 = tf.train.slice_input_producer([x, y], shuffle=True)
batch_x, batch_y = tf.train.shuffle_batch([x1, y1], batch_size=batch_size,
capacity=capacity, min_after_dequeue=min_after_dequeue,
num_threads=num_threads)
return batch_x, batch_y
|
<filename>asts/asts.go
// Packages asts contains utilities for constructing and manipulating ASTs.
package asts
import (
"fmt"
"github.com/jschaf/bibtex/ast"
"github.com/jschaf/bibtex/token"
"strconv"
"strings"
)
func UnparsedBraceText(s string) *ast.UnparsedText {
return &ast.UnparsedText{
Kind: token.BraceString,
Value: s,
}
}
// BraceTextExpr return parsed text delimited by braces.
func BraceTextExpr(depth int, ss ...ast.Expr) *ast.ParsedText {
return &ast.ParsedText{
Depth: depth,
Delim: ast.BraceDelimiter,
Values: ss,
}
}
// BraceTextExpr returns parsed text delimited by braces.
// Uses the following strategies to convert each string into a text expression:
// - If the string is all whitespace, convert to ast.TextSpace.
// - If the string begins and ends with '$', convert to ast.TextMath.
// - If the string begins with '{' and ends with '}', convert to brace text
// recursively by removing the braces and splitting on space.
// - If the string is ',', convert to ast.TextComma.
// - Otherwise, convert to ast.Text.
func BraceText(depth int, ss ...string) *ast.ParsedText {
xs := make([]ast.Expr, len(ss))
for i, s := range ss {
xs[i] = ParseStringExpr(depth, s)
}
return &ast.ParsedText{
Depth: depth,
Delim: ast.BraceDelimiter,
Values: xs,
}
}
func ParseStringExpr(depth int, s string) ast.Expr {
switch {
case strings.TrimSpace(s) == "":
return WSpace()
case strings.HasPrefix(s, "$") && strings.HasSuffix(s, "$"):
bs := []byte(s)
return Math(string(bs[1 : len(bs)-1]))
case s == "~":
return NBSP()
case strings.HasPrefix(s, "{") && strings.HasSuffix(s, "}"):
bs := []byte(s)
innerString := string(bs[1 : len(bs)-1])
split := strings.Split(innerString, " ")
xs := make([]ast.Expr, len(split)*2-1)
idx := 0
for i, sp := range split {
innerExpr := ParseStringExpr(depth+1, sp)
xs[idx] = innerExpr
idx++
if i < len(split)-1 {
xs[idx] = WSpace()
idx++
}
}
return BraceTextExpr(depth+1, xs...)
case s == ",":
return Comma()
default:
return Text(s)
}
}
// QuotedTextExpr return parsed text delimited by quotes.
func QuotedTextExpr(depth int, ss ...ast.Expr) *ast.ParsedText {
return &ast.ParsedText{
Depth: depth,
Delim: ast.QuoteDelimiter,
Values: ss,
}
}
// QuotedText returns parsed text delimited by braces.
// Uses the following strategies to convert each string into a text expression:
// - If the string is all whitespace, convert to ast.TextSpace.
// - If the string begins and ends with '$', convert to ast.TextMath.
// - If the string begins with '{' and ends with '}', convert to brace text
// recursively by removing the braces and splitting on space.
// - If the string is ',', convert to ast.TextComma.
// - Otherwise, convert to ast.Text.
func QuotedText(depth int, ss ...string) *ast.ParsedText {
xs := make([]ast.Expr, len(ss))
for i, s := range ss {
xs[i] = ParseStringExpr(depth, s)
}
return QuotedTextExpr(depth, xs...)
}
func Text(s string) *ast.Text {
return &ast.Text{Kind: ast.TextContent, Value: s}
}
func WSpace() *ast.Text {
return &ast.Text{Kind: ast.TextSpace}
}
func NBSP() *ast.Text {
return &ast.Text{Kind: ast.TextNBSP}
}
func Math(x string) *ast.Text {
return &ast.Text{Kind: ast.TextMath, Value: x}
}
func Comma() *ast.Text {
return &ast.Text{Kind: ast.TextComma}
}
func UnparsedText(s string) ast.Expr {
return &ast.UnparsedText{
Kind: token.String,
Value: s,
}
}
func Ident(s string) ast.Expr {
return &ast.Ident{
Name: s,
Obj: nil,
}
}
func Concat(x, y ast.Expr) ast.Expr {
return &ast.ConcatExpr{X: x, Y: y}
}
func ExprString(x ast.Expr) string {
switch v := x.(type) {
case *ast.Ident:
return "Ident(" + v.Name + ")"
case *ast.Number:
return "Number(" + v.Value + ")"
case *ast.UnparsedText:
if v.Kind == token.String {
return "UnparsedText(\"" + v.Value + "\")"
} else {
return "UnparsedText({" + v.Value + "})"
}
case *ast.Text:
switch v.Kind {
case ast.TextSpace:
return "<space>"
case ast.TextNBSP:
return "<NBSP>"
case ast.TextHyphen:
return "<hyphen>"
case ast.TextComma:
return "<comma>"
case ast.TextMath:
return "$" + v.Value + "$"
case ast.TextContent:
return fmt.Sprintf("%q", v.Value)
default:
return "Text[" + v.Kind.String() + "](" + v.Value + ")"
}
case *ast.ParsedText:
sb := strings.Builder{}
delim := "quote"
if v.Delim == ast.BraceDelimiter {
delim = "brace"
}
sb.WriteString(fmt.Sprintf("ParsedText[%d, %s](", v.Depth, delim))
for i, val := range v.Values {
sb.WriteString(ExprString(val))
if i < len(v.Values)-1 {
sb.WriteString(", ")
}
}
sb.WriteString(")")
return sb.String()
case *ast.ConcatExpr:
return ExprString(v.X) + " # " + ExprString(v.Y)
default:
return fmt.Sprintf("UnknownExpr(%v)", v)
}
}
func WithBibType(s string) func(decl *ast.BibDecl) {
return func(b *ast.BibDecl) {
b.Type = s
}
}
func WithBibKeys(ts ...string) func(decl *ast.BibDecl) {
return func(b *ast.BibDecl) {
if len(ts) > 0 {
b.Key = &ast.Ident{Name: ts[0]}
ts = ts[1:]
}
for _, k := range ts {
b.ExtraKeys = append(b.ExtraKeys, &ast.Ident{Name: k})
}
}
}
func WithBibTags(key string, val ast.Expr, rest ...interface{}) func(decl *ast.BibDecl) {
if len(rest)%2 != 0 {
panic("WithBibTags must have even number of strings for key-val pairs")
}
for i := 0; i < len(rest); i += 2 {
k := rest[i]
v := rest[i+1]
if _, ok := k.(string); !ok {
panic("need string at index: " + strconv.Itoa(i))
}
if _, ok := v.(ast.Expr); !ok {
panic(fmt.Sprintf("need ast.Expr at index: %d of WithBibTags, got: %v", i+1, v))
}
}
return func(b *ast.BibDecl) {
b.Tags = append(b.Tags, &ast.TagStmt{
Name: key,
RawName: key,
Value: val,
})
for i := 0; i < len(rest); i += 2 {
k, v := rest[i].(string), rest[i+1].(ast.Expr)
tag := &ast.TagStmt{
Name: k,
RawName: k,
Value: v,
}
b.Tags = append(b.Tags, tag)
}
}
}
|
const { DropkiqEngine } = require('dropkiq')
import { BoundElement } from './BoundElement'
import tippy from 'tippy.js';
import { v4 as uuidv4 } from 'uuid';
const createDOMPurify = require('dompurify');
const DOMPurify = createDOMPurify(window);
enum ColumnType {
Boolean = 'ColumnTypes::Boolean',
DateTime = 'ColumnTypes::DateTime',
HasMany = 'ColumnTypes::HasMany',
HasOne = 'ColumnTypes::HasOne',
Numeric = 'ColumnTypes::Numeric',
String = 'ColumnTypes::String',
Text = 'ColumnTypes::Text',
YAML = 'ColumnTypes::YAML'
}
interface Suggestion {
active?: boolean
foreign_table_name: string | null
hint?: string
iconImageURLForSuggestion: string
insertionTemplate?: string
name: string
nameWithoutPrefix: string
prefix?: string
preview?: string
selectRange?: Array<number>
template: string
type: ColumnType
}
interface DropkiqOptions {
iframe?: HTMLIFrameElement
onRender?: (renderedDocument: string) => void
showHints?: () => boolean
showPreviews?: () => boolean
suggestionFilter?: (suggestions: Suggestion[]) => void
}
export class DropkiqUI {
public element: any;
public isCodeMirror: boolean;
public isAceEditor: boolean;
public boundElement: BoundElement;
public schema: object;
public context: object;
public scope: object;
public licenseKey: string;
public options: DropkiqOptions;
public showPreviews: Function;
public showHints: Function;
public suggestionFilter: Function;
public onRender: Function;
public iframe: any;
public document: any;
public window: any;
public pathSchema: [];
private dropkiqEngine: any;
private suggestionsArray: Array<Suggestion>;
private result: object;
private caretOffset: object;
private $ul: any;
private $header: any;
private $div: any;
private $poweredByDropkiq: any;
private $paywall: any;
private documentCallback: any;
constructor(element, schema: object, context: object, scope: object, licenseKey: string = "", options: DropkiqOptions = {}) {
this.schema = schema;
this.context = context;
this.scope = scope;
this.licenseKey = licenseKey;
this.options = options;
this.showPreviews = (typeof(options['showPreviews']) === 'function' ? options['showPreviews'] : () => true);
this.showHints = (typeof(options['showHints']) === 'function' ? options['showHints'] : () => true);
this.suggestionFilter = (typeof(options['suggestionFilter']) === 'function' ? options['suggestionFilter'] : () => {});
this.onRender = (typeof(options['onRender']) === 'function' ? options['onRender'] : () => {});
this.iframe = options['iframe'];
if(this.iframe){
this.window = this.iframe.contentWindow;
this.document = this.window.document;
} else {
this.window = window;
this.document = document;
}
this.element = element;
if(!this.window.dropkiqUIInstances){
this.window.dropkiqUIInstances = {};
}
let dropkiqUUID;
if(this.element.dataset){
dropkiqUUID = this.element.dataset.dropkiqUUID;
}
let existingInstance = this.window.dropkiqUIInstances[dropkiqUUID];
if(existingInstance){
return existingInstance;
} else {
if(this.element.dataset){
dropkiqUUID = uuidv4();
this.element.dataset.dropkiqUUID = dropkiqUUID;
this.window.dropkiqUIInstances[dropkiqUUID] = this;
}
}
this.isCodeMirror = typeof(this.element['doc']) === 'object';
this.isAceEditor = typeof(this.element['renderer']) === 'object';
this.boundElement = new BoundElement(this.element, this.window, this.document);
this.dropkiqEngine = new DropkiqEngine("", 0, schema, context, scope, this.licenseKey, {suggestionFilter: this.suggestionFilter});
this.suggestionsArray = [];
this.result = {};
this.caretOffset = {};
this.pathSchema = [];
this.$poweredByDropkiq = document.createElement("div");
this.$poweredByDropkiq.style.display = "none";
this.$poweredByDropkiq.style.padding = "5px";
this.$poweredByDropkiq.style.height = "24px";
this.$poweredByDropkiq.style.color = "#666666";
this.$poweredByDropkiq.style['font-size'] = "10px";
this.$poweredByDropkiq.style.background = "rgba(240,240,240,0.9)"
this.$poweredByDropkiq.style['text-align'] = "right"
let poweredByText = document.createTextNode("Powered by");
let $dropkiqImg = document.createElement("img");
$dropkiqImg.setAttribute('src', "https://app.dropkiq.com/plugin/dropkiq-sm.png")
$dropkiqImg.style.width = "48px";
$dropkiqImg.style.height = "10px";
$dropkiqImg.style['margin-left'] = "3px";
this.$poweredByDropkiq.appendChild(poweredByText);
this.$poweredByDropkiq.appendChild($dropkiqImg);
this.$paywall = document.createElement("div");
this.$paywall.style['font-size'] = "14px"
this.$paywall.style.padding = "10px"
this.$paywall.style['color'] = "#666666"
this.$paywall.display = "none"
this.$ul = document.createElement("ul");
this.$header = document.createElement("div");
this.$header.setAttribute('class', 'dropkiq-header');
this.$div = document.createElement("div")
this.$div.setAttribute('id', 'dropkiq-autosuggest-menu');
this.$div.appendChild(this.$header);
this.$div.appendChild(this.$ul);
this.$div.appendChild(this.$paywall);
this.$div.appendChild(this.$poweredByDropkiq);
document.body.appendChild(this.$div);
let that = this;
that.documentCallback = function(){
that.closeMenu();
}
let scrollCallback = function(){
that.closeMenu();
}
let keydownCallback = function(e) {
if(that.suggestionsArray.length){
let suggestion;
switch (e.keyCode) {
case 27: // Esc key
that.closeMenu();
e.preventDefault();
return false;
break;
case 38: // up arrow
that.scrollToPrevious();
e.preventDefault();
return false;
break;
case 40: // down arrow
that.scrollToNext();
e.preventDefault();
return false;
break;
case 9: // tab
suggestion = that.suggestionsArray.find(function(suggestion){
return suggestion['active'];
});
that.insertSuggestion(suggestion);
e.preventDefault();
return false;
break;
case 13: // enter key
suggestion = that.suggestionsArray.find(function(suggestion){
return suggestion['active'];
});
that.insertSuggestion(suggestion);
e.preventDefault();
return false;
break;
default:
break;
}
}
// Auto-complete {{}} and {%%}
setTimeout(function(){
let result = that.boundElement.caretPositionWithDocumentInfo();
let selectionStart = result['selectionStart'];
let leftText = result['leftText'];
let rightText = result['rightText'];
let leftTwoCharacters = leftText.slice(-2);
let closeTagPattern = /^(\s+)?\}(.+)?/;
if (e.keyCode == 219 && e.shiftKey && (leftTwoCharacters[1] == "{" || leftTwoCharacters == "{")){
let textNode = that.boundElement.insertTextAtCaret("}");
that.boundElement.setCaretPosition(selectionStart, 0, 0, textNode, "");
that.element.focus();
} else if (e.keyCode == 53 && e.shiftKey && leftTwoCharacters == "{%" && closeTagPattern.test(rightText)){
let textNode = that.boundElement.insertTextAtCaret("%");
that.boundElement.setCaretPosition(selectionStart, 0, 0, textNode, "");
that.element.focus();
}
}, 25);
findResultsCallback(e);
};
let findResultsCallback = function(e){
if(typeof(e.stopImmediatePropagation) === 'function'){
e.stopImmediatePropagation();
}
setTimeout(function(){
that.findResults.apply(that);
}, 25);
}
let onBlurCallback = function(e){
let sel = that.window.getSelection();
let range = sel.getRangeAt(0);
that.boundElement.setExpiringCachedOnBlurRange(range);
}
if(this.isCodeMirror){
this.element.on('keydown', function(cm, e){ keydownCallback(e); });
this.element.on("mousedown", function(cm, e){ findResultsCallback(e); });
this.element.on("focus", function(cm, e){ findResultsCallback(e); });
this.element.on("blur", function(em, e){ onBlurCallback(e); });
this.element.on("scroll", function(em, e){ scrollCallback(); });
} else if(this.isAceEditor){
this.element.textInput.getElement()
.addEventListener('keydown', keydownCallback);
this.element.on("click", findResultsCallback);
this.element.on("focus", findResultsCallback);
this.element.on("blur", onBlurCallback);
this.element.session.on("changeScrollTop", scrollCallback);
this.element.session.on("changeScrollLeft", scrollCallback);
} else {
this.element.addEventListener('keydown', keydownCallback);
this.element.addEventListener("click", findResultsCallback);
this.element.addEventListener("focus", findResultsCallback);
this.element.addEventListener("blur", onBlurCallback);
this.element.addEventListener("scroll", scrollCallback);
}
}
public updateScope(scope: object) {
this.scope = scope;
this.dropkiqEngine.updateScope(this.scope);
}
public registerFilter(name: string, filter: Function, template: string, selectionRange: Array<number>, hint?: string){
this.dropkiqEngine.registerFilter(name, filter, template, selectionRange, hint);
}
public menuIsOpen(){
return (this.suggestionsArray.length > 0);
}
public closeMenu(){
this.removeDocumentEventListeners();
this.suggestionsArray = [];
this.renderSuggestions();
}
private removeDocumentEventListeners(){
document.removeEventListener('click', this.documentCallback);
if(this.document && this.document !== document){
this.document.removeEventListener('click', this.documentCallback);
}
}
private renderSuggestions(){
let prefix = this.result['prefix'];
this.$paywall.innerHTML = '';
this.$paywall.style.display = 'none';
this.$header.innerHTML = '';
this.$header.style.display = 'none';
this.$poweredByDropkiq.style.display = "none";
if(!this.dropkiqEngine.authorizer.authorized()){
this.$poweredByDropkiq.style.display = "block";
}
let lastPathNode;
if(this.pathSchema){
lastPathNode = this.pathSchema[this.pathSchema.length-1];
}
if(lastPathNode && lastPathNode.type === "ColumnTypes::HasOne"){
let imgUrl = "https://app.dropkiq.com/plugin/object.png";
let $icon = document.createElement("img");
$icon.setAttribute('src', imgUrl);
$icon.setAttribute('class', 'icon');
$icon.setAttribute('width', '16px');
$icon.setAttribute('height', '16px');
let $text = document.createElement("span");
$text.textContent = lastPathNode.name;
this.$header.appendChild($icon);
this.$header.appendChild($text);
this.$header.style.display = "block";
}
this.$div.style.top = `${this.caretOffset['top']}px`;
this.$div.style.left = `${this.caretOffset['left']}px`;
if(this.suggestionsArray.length){
this.$div.style.display = 'block';
} else {
this.$div.style.display = 'none';
}
this.$ul.innerHTML = '';
let that = this;
this.suggestionsArray.forEach(function(suggestion){
let $li = document.createElement("li");
let imgUrl = suggestion['iconImageURLForSuggestion'];
let $icon = document.createElement("img");
$icon.setAttribute('src', imgUrl);
$icon.setAttribute('class', 'icon');
$icon.setAttribute('width', '16px');
$icon.setAttribute('height', '16px');
let $entire = document.createElement("div");
$entire.setAttribute('class', "first-line");
let $extra = document.createElement("div");
$extra.setAttribute('class', 'extra');
let $remaining = document.createElement("span");
let $arrowSpan = document.createElement("img");
$arrowSpan.setAttribute('class', 'right-arrow');
$arrowSpan.setAttribute('src', "https://app.dropkiq.com/plugin/next-level.png");
$entire.appendChild($icon);
$entire.appendChild($arrowSpan);
if(prefix){
let $strong = document.createElement("strong");
$strong.textContent = prefix;
let suggestionName = suggestion['name'];
$remaining.textContent = (suggestionName.slice(prefix.length, suggestionName.length));
$entire.appendChild($strong);
} else {
$remaining.textContent = suggestion['name'];
}
$entire.appendChild($remaining);
$li.appendChild($entire);
$li.setAttribute('title', that.suggestionTitleText(suggestion))
if(suggestion['hint'] && that.showHints()){
let $hintSpan = document.createElement("div");
$hintSpan.setAttribute('class', 'hint-icon');
$hintSpan.setAttribute("data-tippy-content", suggestion['hint']);
$hintSpan.setAttribute("title", "");
let imgUrl = "https://app.dropkiq.com/plugin/question-circle.png";
let $hint = document.createElement("img");
$hint.setAttribute('src', imgUrl);
$hintSpan.appendChild($hint);
$li.appendChild($hintSpan);
}
if(suggestion['preview'] && that.showPreviews()){
let $head = document.createElement("p")
$head.textContent = "OUTPUT";
let $samp = document.createElement("div");
$samp.innerHTML = DOMPurify.sanitize(suggestion['preview']);
$extra.appendChild($head);
$extra.appendChild($samp);
$li.appendChild($extra);
}
if(suggestion['active']){$li.classList.add("active");}
that.$ul.appendChild($li);
$li.addEventListener('click', function(e){
that.insertSuggestion(suggestion);
});
});
let activeLi = this.$ul.querySelector('.active');
if(activeLi){
this.$ul.scrollTop = (activeLi.offsetTop - 50);
}
if(lastPathNode && lastPathNode.type === "ColumnTypes::HasOne" && !this.dropkiqEngine.authorizer.authorized()){
this.$ul.innerHTML = '';
this.$paywall.style.display = 'block';
let previewText = document.createElement('p');
let dropkiqSuggestion = this.suggestionsArray[0];
if(dropkiqSuggestion){
previewText.textContent = dropkiqSuggestion['hint'];
}
this.$paywall.appendChild(previewText);
let purchaseLinkP = document.createElement('p');
let purchaseLink = document.createElement('a');
purchaseLink.textContent = "Purchase to unlock"
purchaseLink.setAttribute('href', "http://dropkiq.com")
purchaseLinkP.appendChild(purchaseLink)
this.$paywall.appendChild(purchaseLinkP);
}
that.removeDocumentEventListeners();
setTimeout(function(){
document.addEventListener('click', that.documentCallback);
if(that.document && that.document !== document){
that.document.addEventListener('click', that.documentCallback);
}
}, 100);
tippy('.hint-icon');
}
private findResults(){
let result = this.boundElement.caretPositionWithDocumentInfo();
this.caretOffset = this.boundElement.getCaretPosition();
if(this.iframe){
var iframeRect = this.iframe.getBoundingClientRect();
this.caretOffset['top'] = (this.caretOffset['top'] + iframeRect.top);
this.caretOffset['left'] = (this.caretOffset['left'] + iframeRect.left);
}
try {
this.result = this.dropkiqEngine.update(result['allText'], result['selectionStart']);
} catch(error) {
this.closeMenu();
if (error.name === "ParseError") {
return false;
} else if (error.name === "RenderError") {
return false;
} else {
throw error;
}
}
this.onRender(this.result['renderedDocument']);
this.pathSchema = this.result['pathSchema'];
let emptyArray: Array<Object> = [];
this.suggestionsArray = this.result['suggestionsArray'] || emptyArray;
if(this.suggestionsArray.length > 0){
this.suggestionsArray = this.suggestionsArray.sort((a, b) => (a.name > b.name) ? 1 : -1);
this.suggestionsArray[0]['active'] = true;
}
this.renderSuggestions();
};
private insertSuggestion(suggestion){
let prefix = this.result['prefix'];
let suggestionText;
let caretPositionWithDocumentInfo;
if(suggestion.type === "ColumnTypes::Filter"){
caretPositionWithDocumentInfo = this.boundElement.caretPositionWithDocumentInfo();
suggestionText = suggestion['insertionTemplate'];
} else {
suggestionText = suggestion['name'];
}
let textToEnter = suggestionText.slice(prefix.length, suggestionText.length);
if(suggestion.type === "ColumnTypes::HasOne"){
textToEnter = (textToEnter + ".");
}
let textNode = this.boundElement.insertTextAtCaret(textToEnter);
if(suggestion.type === "ColumnTypes::Filter"){
let startSelect = suggestion['selectRange'][0];
let endSelect = suggestion['selectRange'][1];
this.boundElement.setCaretPosition(caretPositionWithDocumentInfo['selectionStart'], startSelect, endSelect, textNode, prefix);
}
this.boundElement.setFocus();
this.closeMenu();
let that = this;
setTimeout(function(){
that.findResults.apply(that);
}, 25);
};
private scrollToNext(){
let activeSuggestion = this.suggestionsArray.find(function(suggestion, index){
return suggestion['active'];
});
let activeIndex = this.suggestionsArray.indexOf(activeSuggestion);
this.suggestionsArray[activeIndex]['active'] = false;
if(this.suggestionsArray[activeIndex+1]){
this.suggestionsArray[activeIndex+1]['active'] = true;
} else {
this.suggestionsArray[0]['active'] = true;
}
this.renderSuggestions();
}
private scrollToPrevious(){
let activeSuggestion = this.suggestionsArray.find(function(suggestion, index){
return suggestion['active'];
});
let activeIndex = this.suggestionsArray.indexOf(activeSuggestion);
this.suggestionsArray[activeIndex]['active'] = false;
if(this.suggestionsArray[activeIndex-1]){
this.suggestionsArray[activeIndex-1]['active'] = true;
} else {
this.suggestionsArray[this.suggestionsArray.length-1]['active'] = true;
}
this.renderSuggestions();
}
private suggestionTitleText(suggestion): string{
let suggestionTexts = [suggestion['name']];
if(suggestion.preview){
suggestionTexts.push(`**OUTPUT** ${suggestion.preview}`)
}
if(suggestion['hint']){
suggestionTexts.push(`**HINT** ${suggestion['hint']}`)
}
return suggestionTexts.join(" ");
}
}
|
/**
* The type Provider metamodel generator.
*
* @author Christos Tsakostas
*/
public class ProviderMetamodelGenerator extends AbstractMetamodelGenerator {
// ===============================================================================================
// DEPENDENCIES
// ===============================================================================================
private final PackageName rootPackageName;
private final ContextName contextName;
private final ProviderCollectionGenerator providerCollectionGenerator;
private final ProviderDetailGenerator providerDetailGenerator;
// ===============================================================================================
// CONSTRUCTOR(S)
// ===============================================================================================
/**
* Instantiates a new Provider metamodel generator.
*
* @param generationPath the generation path
* @param rootPackageName the root package name
* @param contextName the context name
* @param providerCollectionGenerator the provider collection generator
* @param providerDetailGenerator the provider detail generator
*/
public ProviderMetamodelGenerator(
Path generationPath,
PackageName rootPackageName,
ContextName contextName,
ProviderCollectionGenerator providerCollectionGenerator,
ProviderDetailGenerator providerDetailGenerator) {
super(generationPath);
this.rootPackageName = rootPackageName;
this.contextName = contextName;
this.providerCollectionGenerator = providerCollectionGenerator;
this.providerDetailGenerator = providerDetailGenerator;
}
// ===============================================================================================
// GETTERS
// ===============================================================================================
/**
* Gets root package name.
*
* @return the root package name
*/
public PackageName getRootPackageName() {
return rootPackageName;
}
/**
* Gets context name.
*
* @return the context name
*/
public ContextName getContextName() {
return contextName;
}
// ===============================================================================================
// OVERRIDES
// ===============================================================================================
@Override
public void generate(Set<MetamodelRepository<?>> modelRepositories) {
CoreRegistry.getMetamodelRepositoryResolver()
.resolve(modelRepositories, ProviderMetamodelRepository.class)
.getItems()
.forEach(
provider -> {
switch (provider.getProviderType()) {
case DETAIL:
providerDetailGenerator.generate(
provider, providerExportInfo(getGenerationPath(), provider));
break;
case COLLECTION:
providerCollectionGenerator.generate(
provider, providerExportInfo(getGenerationPath(), provider));
break;
default:
throw new IllegalStateException(
String.format("Unknown provider type=%s", provider.getProviderType()));
}
});
}
// ===============================================================================================
// PRIVATE
// ===============================================================================================
private ExportInfo providerExportInfo(Path generationPath, Provider provider) {
return ExportInfo.file(
Paths.get(
generationPath.toString(),
TextConverter.toLowerUnderscore(provider.getFeatureName().getText()),
DartFolderFileConstants.PROVIDERS),
String.format(
"%s%s",
TextConverter.toUpperCamel(provider.getName().getText()),
DartFolderFileConstants.DART_POSTFIX));
}
} |
/**
* Take the waste pile and turn it into the deck for the next round.
*
* @return {@code false} iff the game is over, otherwise {@code true}
*/
public boolean restartDeck() {
Preconditions.checkState(deck.isEmpty());
++passes;
if (isGameOver()) {
return doGameOver();
}
deck.addAll(Collections2.transform(waste, new Function<Card, Card>() {
public Card apply(Card input) {
input.flip();
return input;
}
}));
waste.clear();
didChange = false;
return true;
} |
/**
* Represents a highlight draped over the terrain.
*/
public class Highlight extends TriMesh
{
/** The position of the center of the highlight. */
public float x, y;
/** The layer of the highlight. */
public byte layer = 2;
/** If true, the highlight will be over pieces occupying the tile. */
public boolean overPieces;
/** If true, the highlight will be flat. */
public boolean flatten;
/** Whether or not the user is hovering over the highlight. */
public boolean hover;
/** Whether or not the user *can* hover over it. */
public boolean hoverable;
/** A specified height for the highlight. */
public int minElev = Integer.MIN_VALUE;
protected Highlight (
int x, int y, boolean overPieces, boolean flatten, int minElev)
{
this((x + 0.5f) * TILE_SIZE, (y + 0.5f) * TILE_SIZE, TILE_SIZE,
TILE_SIZE, true, overPieces, flatten, minElev);
}
protected Highlight (
int x, int y, boolean overPieces, boolean flatten, byte layer)
{
this((x + 0.5f) * TILE_SIZE, (y + 0.5f) * TILE_SIZE, TILE_SIZE,
TILE_SIZE, true, overPieces, flatten, layer, Integer.MIN_VALUE);
}
protected Highlight (float x, float y, float width, float height)
{
this(x, y, width, height, false, false, false, Integer.MIN_VALUE);
}
protected Highlight (float x, float y, float width, float height,
boolean onTile, boolean overPieces, boolean flatten, int minElev)
{
this(x, y, width, height, onTile,
overPieces, flatten, (byte)2, minElev);
}
protected Highlight (
float x, float y, float width, float height, boolean onTile,
boolean overPieces, boolean flatten, byte layer, int minElev)
{
super("highlight");
this.x = x;
this.y = y;
this.layer = layer;
this.overPieces = overPieces;
this.flatten = flatten;
this.minElev = minElev;
_width = width;
_height = height;
_onTile = onTile;
setLightCombineMode(LightState.OFF);
setRenderQueueMode(Renderer.QUEUE_TRANSPARENT);
setRenderState(RenderUtil.overlayZBuf);
setRenderState(RenderUtil.blendAlpha);
setRenderState(RenderUtil.backCull);
// set the vertices, which change according to position and terrain
if (_onTile) {
_vwidth = _vheight = BangBoard.HEIGHTFIELD_SUBDIVISIONS + 1;
} else {
_vwidth = (int)FastMath.ceil(_width / SUB_TILE_SIZE) + 2;
_vheight = (int)FastMath.ceil(_height / SUB_TILE_SIZE) + 2;
}
setVertexBuffer(0, BufferUtils.createFloatBuffer(
_vwidth * _vheight * 3));
// set the texture coords, which change for highlights not aligned
// with tiles
if (_onTile) {
if (_htbuf == null) {
_htbuf = BufferUtils.createFloatBuffer(
_vwidth * _vheight * 2);
float step = 1f / BangBoard.HEIGHTFIELD_SUBDIVISIONS;
for (int iy = 0; iy < _vheight; iy++) {
for (int ix = 0; ix < _vwidth; ix++) {
_htbuf.put(ix * step);
_htbuf.put(iy * step);
}
}
}
setTextureBuffer(0, _htbuf);
} else {
setTextureBuffer(0, BufferUtils.createFloatBuffer(
_vwidth * _vheight * 2));
}
setIndexBuffer(0, BufferUtils.createIntBuffer(
(_vwidth - 1) * (_vheight - 1) * 6));
// update the vertices, indices, and possibly the texture coords
setModelBound(new BoundingBox());
updateVertices();
}
/**
* Returns the x tile coordinate of this highlight.
*/
public int getTileX ()
{
return (int)(x / TILE_SIZE);
}
/**
* Returns the y tile coordinate of this highlight.
*/
public int getTileY ()
{
return (int)(y / TILE_SIZE);
}
/**
* Sets the position of this highlight in tile coordinates and updates
* it.
*/
public void setPosition (int x, int y)
{
setPosition((x + 0.5f) * TILE_SIZE, (y + 0.5f) * TILE_SIZE);
}
/**
* Sets the position of this highlight in world coordinates and
* updates it.
*/
public void setPosition (float x, float y)
{
this.x = x;
this.y = y;
updateVertices();
}
/**
* Sets the default and hover colors for this highlight.
*/
public void setColors (ColorRGBA defaultColor, ColorRGBA hoverColor)
{
_defaultColor = defaultColor;
_hoverColor = hoverColor;
updateHoverState();
}
/**
* Sets the default and hover textures for this highlight.
*/
public void setTextures (
TextureState defaultTexture, TextureState hoverTexture)
{
_defaultTexture = defaultTexture;
_hoverTexture = hoverTexture;
updateHoverState();
}
/**
* Sets the hover state of this highlight.
*/
public void setHover (boolean hover)
{
this.hover = hover;
updateHoverState();
}
/**
* Sets whether this highlight has normals.
*/
public void setHasNormals (boolean normals)
{
if (normals && getNormalBuffer(0) == null) {
setNormalBuffer(0, BufferUtils.createFloatBuffer(
_vwidth * _vheight * 3));
updateVertices();
} else {
setNormalBuffer(0, null);
}
}
/**
* Updates the vertices of the highlight to reflect a change in
* position or in the underlying terrain.
*/
public void updateVertices ()
{
if (_board == null) {
return;
}
FloatBuffer vbuf = getVertexBuffer(0),
nbuf = getNormalBuffer(0);
IntBuffer ibuf = getIndexBuffer(0);
ibuf.rewind();
// if we're putting highlights over pieces and there's a piece
// here, raise the highlight above it and make the center of the
// highlight its origin
int tx = getTileX(), ty = getTileY();
Vector3f offset = null;
getLocalTranslation().set(0f, 0f, 0f);
float height = 0f;
boolean flat = flatten && (_board.isBridge(tx, ty) ||
!_board.isTraversable(tx, ty));
int belev = _board.getElevation(tx, ty);
if (flat) {
if (minElev > Integer.MIN_VALUE) {
belev = minElev;
}
int maxelev = _board.getMaxHeightfieldElevation(tx, ty);
height = (Math.max(minElev, Math.max(belev, maxelev)) * _elevationScale);
} else if (_onTile && overPieces) {
int helev = _board.getHeightfieldElevation(tx, ty);
if (belev > helev) {
offset = new Vector3f(x, y, helev * _elevationScale);
getLocalTranslation().set(x, y, belev * _elevationScale);
}
}
float x0 = x - _width/2, y0 = y - _height/2;
int sx0 = (int)(x0 / SUB_TILE_SIZE),
sy0 = (int)(y0 / SUB_TILE_SIZE);
Vector3f vertex = new Vector3f();
for (int sy = sy0, sy1 = sy0 + _vheight, idx = 0; sy < sy1; sy++) {
for (int sx = sx0, sx1 = sx0 + _vwidth; sx < sx1; sx++) {
// set the normal if required
if (nbuf != null) {
if (flat) {
BufferUtils.setInBuffer(Vector3f.UNIT_Z, nbuf, idx);
} else {
getHeightfieldNormal(sx, sy, vertex);
BufferUtils.setInBuffer(vertex, nbuf, idx);
}
}
// set the vertex
getHeightfieldVertex(sx, sy, vertex);
if (flat) {
vertex.z = height;
} else {
if (offset != null) {
vertex.subtractLocal(offset);
}
}
vertex.z += layer * LAYER_OFFSET;
BufferUtils.setInBuffer(vertex, vbuf, idx++);
// update the index buffer according to the diagonalization
// toggles set by the splat blocks
if (sy == sy0 || sx == sx0) {
continue;
}
int ur = (sy-sy0)*_vwidth + (sx-sx0),
ul = ur - 1, lr = ur - _vwidth, ll = lr - 1;
if (_diags.length <= sy+1) {
log.warning("Attempting to access _diags out of range",
"_diags.length", _diags.length, "sy", sy, "sy0", sy0,
"sy1", sy1);
ibuf.put(ll); ibuf.put(ur); ibuf.put(ul);
ibuf.put(ll); ibuf.put(lr); ibuf.put(ur);
continue;
} else if (_diags[sy+1].length <= sx+1) {
log.warning("Attempting to access _diags out of range",
"_diags[sy+1].length", _diags[sy+1].length, "sx", sx,
"sx0", sx0, "sx1", sx1);
ibuf.put(ll); ibuf.put(ur); ibuf.put(ul);
ibuf.put(ll); ibuf.put(lr); ibuf.put(ur);
continue;
}
if (_diags[sy+1][sx+1]) {
ibuf.put(ul); ibuf.put(ll); ibuf.put(lr);
ibuf.put(ul); ibuf.put(lr); ibuf.put(ur);
} else {
ibuf.put(ll); ibuf.put(ur); ibuf.put(ul);
ibuf.put(ll); ibuf.put(lr); ibuf.put(ur);
}
}
}
updateModelBound();
setIsCollidable(flat || offset != null);
if (isCollidable()) {
updateCollisionTree();
}
// if the highlight is aligned with a tile, we're done; otherwise,
// we must update the texture coords as well
if (_onTile || flat) {
return;
}
FloatBuffer tbuf = getTextureBuffer(0, 0);
Vector2f tcoord = new Vector2f();
float sstep = SUB_TILE_SIZE / _width,
tstep = SUB_TILE_SIZE / _height,
s0 = (sx0 * SUB_TILE_SIZE - x0) / _width,
t0 = (sy0 * SUB_TILE_SIZE - y0) / _height;
for (int iy = 0, idx = 0; iy < _vheight; iy++) {
for (int ix = 0; ix < _vwidth; ix++) {
tcoord.set(s0 + ix * sstep, t0 + iy * tstep);
BufferUtils.setInBuffer(tcoord, tbuf, idx++);
}
}
}
/**
* Updates the state associated with the hover status.
*/
protected void updateHoverState ()
{
// here, we set by reference rather than by value, because the default color
// may be one of our special "throbbing" colors
setDefaultColor(hover ? _hoverColor : _defaultColor);
setRenderState(hover ? _hoverTexture : _defaultTexture);
updateRenderState();
}
@Override // documentation inherited
protected void setParent (Node parent)
{
super.setParent(parent);
updateRenderState();
}
/** If true, the highlight will always be aligned with a tile. */
protected boolean _onTile;
/** The dimensions of the highlight in world units. */
protected float _width, _height;
/** The dimensions of the highlight in vertices. */
protected int _vwidth, _vheight;
/** The colors for normal and hover modes. */
protected ColorRGBA _defaultColor = ColorRGBA.white,
_hoverColor = ColorRGBA.white;
/** The textures for normal and hover modes. */
protected TextureState _defaultTexture, _hoverTexture;
/** The zoffset for each layer. */
protected static final float LAYER_OFFSET = TILE_SIZE/1000;
} |
def _pyt_train(self, datatype):
defaults = integration_test_parser_defaults.copy()
defaults['datatype'] = datatype
defaults['shuffle'] = True
str_output, valid, test = testing_utils.train_model(defaults)
self.assertTrue(
solved_task(str_output, valid, test),
'Teacher could not teach seq2seq with args: {}; here is str_output: {}'.format(
defaults, str_output
),
) |
/*
* Called via glDeleteTexture to delete a texture object.
*/
void
tdfxTMFreeTexture(tdfxContextPtr fxMesa, struct gl_texture_object *tObj)
{
tdfxTexInfo *ti = TDFX_TEXTURE_DATA(tObj);
if (ti) {
tdfxTMMoveOutTM(fxMesa, tObj);
FREE(ti);
tObj->DriverData = NULL;
}
} |
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from jinja2 import Environment, FileSystemLoader, StrictUndefined
def main(output_fd, input_filename, *args):
# FileSystemLoader requires the path to the directory containing templates,
# not the file name of the template itself. We hang onto the leaf name
# which will shortly be passed to Environment.get_template.
(path, leaf) = os.path.split(input_filename)
# Jinja's default value for undefined is too permissive and would allow
# omissions to slip into the generated output. We set undefined to
# StrictUndefined to force Jinja to raise an exception any time a required
# value is missing.
env = Environment(
loader=FileSystemLoader(path, encoding="utf-8"),
autoescape=True,
undefined=StrictUndefined,
)
tpl = env.get_template(leaf)
context = dict()
# args should all be key=value pairs that will be added to the context.
# Note that all values are *strings*, so the Jinja template may need to
# convert them to other types during processing.
# (As in Python, the empty string is falsy, so simple boolean checks are possible)
for arg in args:
(k, v) = arg.split("=", 1)
context[k] = v
# Now run the template and send its output directly to output_fd
tpl.stream(context).dump(output_fd, encoding="utf-8")
|
/*
This might seems as under-tested, I copied it from another project of myne where it was perfectly tested.
Just for the sake of saving time did not move the tests.
TODO test
*/
@Slf4j
public class CassandraRepository<T> {
static final String MY_POD_NAMESPACE_ENV_VARIABLE = "MY_POD_NAMESPACE";
static final String MY_POD_NAMESPACE_DEFAULT_NAMESPACE = "default";
protected final Session session;
protected final Mapper<T> mapper;
protected final String table;
protected final String keyspace;
protected final ConsistencyLevel writeConsistency;
protected final ConsistencyLevel readConsistency;
protected final Class<T> clazz;
protected CassandraRepository(final Session session, final Class<T> clazz) {
this(session, clazz, ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM, null);
}
protected CassandraRepository(
final Session session,
final Class<T> clazz,
final ConsistencyLevel readConsistency,
final ConsistencyLevel writeConsistency,
final String keyspaceOverride) {
if (clazz.getAnnotation(Table.class) == null) {
throw new IllegalArgumentException(clazz.getName() + " does not have the " + Table.class.getName() + " annotation.");
}
if (!StringUtils.isEmpty(clazz.getAnnotation(Table.class).keyspace())) {
throw new IllegalArgumentException("@Table.keyspace is not supported anymore since we need to be able to dynamically set the" +
" keyspace. See Changelog lovebird commons for instructions");
}
this.session = session;
this.clazz = clazz;
this.readConsistency = readConsistency;
this.writeConsistency = writeConsistency;
this.table = clazz.getAnnotation(Table.class).name();
if (!StringUtils.isEmpty(keyspaceOverride)) {
keyspace = keyspaceOverride;
mapper = new MappingManager(session).mapper(clazz, keyspaceOverride);
} else {
if (StringUtils.isEmpty(session.getLoggedKeyspace())) {
throw new IllegalArgumentException("cannot resolve the keyspace for " + this.getClass().getName() +
" This should be configured on the session.");
}
keyspace = session.getLoggedKeyspace();
mapper = new MappingManager(session).mapper(clazz);
}
if (readConsistency != null) {
mapper.setDefaultGetOptions(consistencyLevel(readConsistency));
}
if (writeConsistency != null) {
mapper.setDefaultDeleteOptions(consistencyLevel(writeConsistency));
mapper.setDefaultSaveOptions(consistencyLevel(writeConsistency));
}
checkIsKeyspaceValid();
}
private void checkIsKeyspaceValid() {
String podNamespace = System.getenv(MY_POD_NAMESPACE_ENV_VARIABLE);
if (StringUtils.isEmpty(podNamespace)) {
return;
}
if (podNamespace.trim().equals(MY_POD_NAMESPACE_DEFAULT_NAMESPACE)) {
return;
}
if (!keyspace.startsWith(podNamespace)) {
throw new IllegalArgumentException("Unable to create CassandraRepository. Keyspace should be prefixed with " + podNamespace);
}
}
protected void save(final T entity, final Mapper.Option... options) {
try {
mapper.save(entity, options);
String auditMessageSuccess = String.format("Saved %s", table);
log.info(auditMessageSuccess, entity);
} catch (Throwable e) {
String auditErrorMessage = String.format("Error while saving %s", table);
log.error(auditErrorMessage, entity, e);
throw e;
}
}
protected void save(final T entity) {
this.save(entity, new Mapper.Option[0]);
}
protected void save(final List<T> entities) {
entities.forEach(this::save);
}
protected ResultSet executeDelete(final Delete delete) {
try {
ResultSet resultSet = session.execute(withConsistencyLevel(delete, writeConsistency));
String auditMessageSuccess = String.format("Delete query executed : %s ", delete.toString());
log.info(auditMessageSuccess);
return resultSet;
} catch (Throwable e) {
String auditErrorMessage = String.format("Error while executing delete query : %s", table);
log.error(auditErrorMessage, null, e);
throw e;
}
}
/**
* Mind that this method is keyspace unaware, so you will have to set your keyspace when building the statement.
* Only use this method when the select(Clause) method is insufficient.
*
* @param select The statement where the result will be mapped in a List of T.
* @return The mapped objects of T as a result of the select statement.
*/
protected List<T> select(final Statement select) {
Result<T> result = mapper.map(session.execute(withConsistencyLevel(select, readConsistency)));
return result.all();
}
protected Optional<T> selectOne(final Clause clause) {
ResultSet resultSet = session.execute(withConsistencyLevel(createSelect(clause), readConsistency));
Result<T> result = mapper.map(resultSet);
return Optional.ofNullable(result.one());
}
protected Select createSelect() {
return withConsistencyLevel(QueryBuilder.select().from(keyspace, table), readConsistency);
}
protected Select createSelect(final Clause clause) {
Select select = createSelect();
select.where(clause);
return select;
}
protected Delete createDelete() {
return withConsistencyLevel(QueryBuilder.delete().from(keyspace, table), writeConsistency);
}
protected Delete createDelete(final Clause clause) {
Delete delete = createDelete();
delete.where(clause);
return delete;
}
private <T extends Statement> T withConsistencyLevel(final T statement, final ConsistencyLevel level) {
if (level != null && statement.getConsistencyLevel() == null) {
statement.setConsistencyLevel(level);
}
return statement;
}
} |
/*
* nfs_async_stop_sig:
* Wait for all outstanding putpage operation to complete. If a signal
* is deliver we will abort and return non-zero. If we can put all the
* pages we will return 0. This routine is called from nfs_unmount and
* nfs3_unmount to make these operations interruptible.
*/
int
nfs_async_stop_sig(struct vfs *vfsp)
{
mntinfo_t *mi = VFTOMI(vfsp);
ushort_t omax;
int rval;
mutex_enter(&mi->mi_async_lock);
omax = mi->mi_max_threads;
mi->mi_max_threads = 0;
NFS_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
while (mi->mi_threads[NFS_ASYNC_QUEUE] != 0 ||
mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE] != 0) {
if (!cv_wait_sig(&mi->mi_async_cv, &mi->mi_async_lock))
break;
}
rval = (mi->mi_threads[NFS_ASYNC_QUEUE] != 0 ||
mi->mi_threads[NFS_ASYNC_PGOPS_QUEUE] != 0);
if (rval)
mi->mi_max_threads = omax;
mutex_exit(&mi->mi_async_lock);
return (rval);
} |
What is middleware?
What exactly is middleware? In real application when the request comes to the server it has to go through the different request handlers. For example, it could be authentication, validation, ACL, logging, caching and so on. Consider the request-response circle as an onion and when a request comes in, it has to go through the different layers of this onion, to get to the core. And every middleware is a layer of the onion. It is a callable object that receives the request and can modify it (or modify the response) before passing it to the next middleware in the chain (to the next layer of the onion).
Defining middleware
Let’s start with a simple server example:
<?php use React\Http\Server ; use React\Http\Response ; use React\EventLoop\Factory ; use Psr\Http\Message\ServerRequestInterface ; $loop = Factory :: create (); $server = new Server ( function ( ServerRequestInterface $request ) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); }); $socket = new \React\Socket\Server ( '127.0.0.1:8000' , $loop ); $server -> listen ( $socket ); echo 'Listening on ' . str_replace ( 'tcp:' , 'http:' , $socket -> getAddress ()) . "
" ; $loop -> run ();
This code represents a dummy server, that returns Hello world responses to all incoming requests. But for our needs it is OK. Now, what if we want to log all incoming requests? So, let’s add a line with echo :
<?php $server = new Server ( function ( ServerRequestInterface $request ) { echo date ( 'Y-m-d H:i:s' ) . ' ' . $request -> getMethod () . ' ' . $request -> getUri () . PHP_EOL ; return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); });
When we run our server and make a request to it (I use Curl in terminal) we see a log output on the server console:
And now we can extract this logging logic into the logging middleware. ReactPHP middleware:
is a callable
accepts ServerRequestInterface as the first argument and optional callable as the second one
as the first argument and optional callable as the second one returns a ResponseInterface (or any promise which can be consumed by Promise\resolve resolving to a ResponseInterface )
(or any promise which can be consumed by resolving to a ) calls $next($request) to continue chaining to the next middleware or returns explicitly to abort the chain
So, following these rules a logging middleware function will look like this:
<?php $loggingMiddleware = function ( ServerRequestInterface $request , callable $next ) { echo date ( 'Y-m-d H:i:s' ) . ' ' . $request -> getMethod () . ' ' . $request -> getUri () . PHP_EOL ; return $next ( $request ); }
The server constructor can accept an array of callables, where we can pass our middleware:
<?php $loggingMiddleware = function ( ServerRequestInterface $request , callable $next ) { echo date ( 'Y-m-d H:i:s' ) . ' ' . $request -> getMethod () . ' ' . $request -> getUri () . PHP_EOL ; return $next ( $request ); }; $server = new Server ([ $loggingMiddleware , function ( ServerRequestInterface $request ) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); } ]); $socket = new \React\Socket\Server ( '127.0.0.1:8000' , $loop ); $server -> listen ( $socket ); echo 'Listening on ' . str_replace ( 'tcp:' , 'http:' , $socket -> getAddress ()) . "
" ; $loop -> run ();
This code does the same logging. When the request comes in our first $loggingMiddleware is executed. It prints out a log message to the server console and then passes a request object to the next middleware which returns a response and ends the chain. This is a very simple example and doesn’t show the real power of middleware when you have some complicated logic, where you modify request and response objects during the request-response life-cycle.
Attaching middleware to a server
For better understanding middleware we can use a simple video streaming server from one of the previous articles. Here is the source code for it:
<?php $server = new Server ( function ( ServerRequestInterface $request ) use ( $loop ) { $params = $request -> getQueryParams (); $file = $params [ 'video' ] ?? '' ; if ( empty ( $file )) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], 'Video streaming server' ); } $filePath = __DIR__ . DIRECTORY_SEPARATOR . 'media' . DIRECTORY_SEPARATOR . basename ( $file ); @ $fileStream = fopen ( $filePath , 'r' ); if ( ! $fileStream ) { return new Response ( 404 , [ 'Content-Type' => 'text/plain' ], "Video $file doesn't exist on server." ); } $video = new \React\Stream\ReadableResourceStream ( $fileStream , $loop ); return new Response ( 200 , [ 'Content-Type' => getMimeTypeByExtension ( $filePath )], $video ); }); $socket = new \React\Socket\Server ( '127.0.0.1:8000' , $loop ); $server -> listen ( $socket ); echo 'Listening on ' . str_replace ( 'tcp:' , 'http:' , $socket -> getAddress ()) . "
" ; $loop -> run ();
How does it work? When you open your browser on URL 127.0.0.1:8000 and don’t provide any query params the server returns a blank page with Video streaming server message. To open a video in the browser you can specify video query param like this: http://127.0.0.1:8000/?video=bunny.mpg . If there is a file called bunny.mpg in server media directory, the server starts streaming this file. Very simple.
Note that this example uses fopen() for simplicity and demo purposes only! This should not be used in a truly asynchronous application because the filesystem is inherently blocking and each call could potentially take several seconds. Read this tutorial in case you need to work asynchronously with the filesystem in ReactPHP ecosystem.
getMimeTypeByExtension() is a custom function to detect file MIME type by its extension. You can find its implementation in Video streaming server article.
You can notice that this request handling logic can be separated into three parts:
a plain text response , when there is no video query param.
, when there is no query param. 404 response , when a requested file is not found.
, when a requested file is not found. a streaming response.
These three parts are good candidates for middleware. Let’s start with the first one: $queryParamMiddleware . It simply checks query params. If video param is present it passes the request to the next middleware, otherwise, it stops the chain and returns a plain text response:
<?php $queryParamMiddleware = function ( ServerRequestInterface $request , callable $next ) { $params = $request -> getQueryParams (); if ( ! isset ( $params [ 'video' ])) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], 'Video streaming server' ); } return $next ( $request ); };
Then, if the request has reached the second middleware, that means that we have video query param. So, we can check if a specified file exists on the server. If not we return 404 response, otherwise we continue chaining to the next middleware:
<?php $checkFileExistsMiddleware = function ( ServerRequestInterface $request , callable $next ) { $file = $request -> getQueryParams ()[ 'video' ]; $filePath = __DIR__ . DIRECTORY_SEPARATOR . 'media' . DIRECTORY_SEPARATOR . basename ( $file ); @ $fileStream = fopen ( $filePath , 'r' ); if ( ! $fileStream ) { return new Response ( 404 , [ 'Content-Type' => 'text/plain' ], "Video $file doesn't exist on server." ); } return $next ( $request ); };
I’m using fopen here to check if a file exists. file_exists() call is blocking and may lead to race conditions, so it is not recommended to use it in an asynchronous application.
And the last third middleware opens a stream, wraps it into ReactPHP \React\Stream\ReadableResourceStream object and returns it as a response body. This middleware doesn’t accept $next argument because it is the last middleware in our chain. But, notice that it use s an event loop to create \React\Stream\ReadableResourceStream object:
<?php $videoStreamingMiddleware = function ( ServerRequestInterface $request ) use ( $loop ) { $file = $request -> getQueryParams ()[ 'video' ]; $filePath = __DIR__ . DIRECTORY_SEPARATOR . 'media' . DIRECTORY_SEPARATOR . basename ( $file ); $video = new \React\Stream\ReadableResourceStream ( fopen ( $filePath , 'r' ), $loop ); return new Response ( 200 , [ 'Content-Type' => getMimeTypeByExtension ( $filePath )], $video ); };
Now, having all these three middleware we can provide them to the Server constructor as an array:
<?php $server = new Server ([ $queryParamMiddleware , $checkFileExistsMiddleware , $videoStreamingMiddleware ]);
The code looks much cleaner than having all this request handling logic in one callback. Our request-response cycle consists of three layers of the middleware onion:
$queryParamMiddleware
$checkFileExistsMiddleware
$videoStreamingMiddleware
When the request comes in it has to go through all these layers. And each layer decides whether to continue chaining or we are done and a response should be returned.
When middleware becomes too complicated it can be extracted to its own class, that implements magic __invoke() method. This allows customizing middleware on the fly.
Modifying response
PHP community has already standardized middleware under PSR-15: HTTP message interfaces, but ReactPHP doesn’t provide any interfaces for middleware implementations. So, don’t confuse PSR-15 middleware and ReactPHP HTTP middleware. As you can notice ReactPHP middleware doesn’t accept the response object, but only request:
<?php $myMiddleware = function ( ServerRequestInterface $request , callable $next ) { // ... }
So, it may look like there is no way to modify the response. But it is not exactly the truth. It may look a little tricky, but you can. Let me show how.
In this example we are going to add some headers to the response. We create a server with an array of two middleware: the first one is going to add a custom header to the resulting response, and the second one simply returns the response:
<?php $server = new Server ([ function ( ServerRequestInterface $request , callable $next ) { // add custom header }, function ( ServerRequestInterface $request ) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); } ]);
So, how can we modify the response returned by the next middleware? We know that the $next variable represents the next middleware in the chain, so we can explicitly call it and pass a request object to it:
<?php $server = new Server ([ function ( ServerRequestInterface $request , callable $next ) { return $next ( $request ); }, function ( ServerRequestInterface $request ) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); } ]);
In this snippet, the first middleware simply continues the chain and returns the response from the next middleware. We can assign the Response from the next middleware to a variable, modify it and then return a modified response:
<?php $server = new Server ([ function ( ServerRequestInterface $request , callable $next ) { $response = $next ( $request ); return $response -> withHeader ( 'X-Custom' , 'foo' ); }, function ( ServerRequestInterface $request ) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); } ]);
And now we can add our custom X-Custom header with foo value and check if everything works as expected:
I use Curl in terminal with -i flag to receive the response with headers. You can see that the server returns a response from the second middleware with Hello world message. And also response headers contain our X-Custom header.
Middleware under the hood of the Server
ReactPHP HTTP Component comes with three middleware implementations:
LimitConcurrentRequestsMiddleware
RequestBodyParserMiddleware
RequestBodyBufferMiddleware
All of them under the hood are included in Server class, so there is no need to explicitly pass them. Why these particular middleware? Because they are required to match PHP’s request behavior.
Limiting concurrent requests
LimitConcurrentRequestsMiddleware can be used to limit how many next handlers can be executed concurrently. Server class tries to determine this number automatically according to your php.ini settings (it uses memory_limit and post_max_size values). But a predefined maximum number of pending handlers is 100 . This middleware has its own queue. If the number of pending handlers exceeds the allowed limit, the request goes to the queue and its streaming body is paused. Once one of the pending requests is done, the middleware fetches the oldest pending request from the queue and resumes its streaming body.
To demonstrate how it works, we can attach a timer for 2 seconds in one of the middleware to simulate a busy server:
<?php $server = new Server ([ function ( ServerRequestInterface $request , callable $next ) use ( $loop ) { $deferred = new \React\Promise\Deferred (); $loop -> addTimer ( 2 , function () use ( $next , $request , $deferred ) { echo 'Resolving request' . PHP_EOL ; $deferred -> resolve ( $next ( $request )); }); return $deferred -> promise (); }, function ( ServerRequestInterface $request ) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); } ]);
Then when running two parallel Curl requests we can see that they both are resolved with a delay of 2 seconds:
And now see what happens if we use LimitConcurrentRequestsMiddleware and set the limit to 1:
<?php $server = new Server ([ new \React\Http\Middleware\LimitConcurrentRequestsMiddleware ( 1 ), function ( ServerRequestInterface $request , callable $next ) use ( $loop ) { $deferred = new \React\Promise\Deferred (); $loop -> addTimer ( 2 , function () use ( $next , $request , $deferred ) { echo 'Resolving request' . PHP_EOL ; $deferred -> resolve ( $next ( $request )); }); return $deferred -> promise (); }, function ( ServerRequestInterface $request ) { return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); } ]);
The requests are queued. While the first request is being processed, the second one is stored in the middleware’s queue. After two seconds, when the first request is done, the second one is dispatched from the queue and then processed. In this way, we have actually removed concurrency and incoming requests are processed by server simply one by one.
Buffering request body
When POST or PUT request reaches HTTP server we can get access to its body by calling $request->getParsedBody() . This method returns an associative array that represents a parsed request body. Under the hood, the server receives a request which body is a stream. So, behind the scenes, React\Http\Server at first uses RequestBodyBufferMiddleware to buffer this stream in memory. The request is buffered until its body end has been reached and then the next middleware in the chain will be called with a complete, buffered request. And the next middleware is RequestBodyParserMiddleware .
Parsing request body
When a request body is buffered it goes to RequestBodyParserMiddleware which actually parses form fields and file uploads. This middleware makes it possible to receive request params, when you call $request->getParsedBody() :
<?php $server = new \React\Http\Server ([ function ( ServerRequestInterface $request ) { print_r ( $request -> getParsedBody ()); return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], "Hello world
" ); } ]);
To check how it works we again use Curl from terminal and provide some form data curl 127.0.0.1:8000 --data "param1=value1¶m2=value2" :
Under the hood this middleware parses requests that use Content-Type: application/x-www-form-urlencoded or Content-Type: multipart/form-data headers. That allows us to get access to both request body and uploads.
Uploading files
To get an instance of the uploaded file you can $request->getUploadedFiles() . This method returns an array of Psr\Http\Message\UploadedFileInterface instances. Each upload from the submitted request is represented by its own instance:
<?php $server = new \React\Http\Server ([ function ( ServerRequestInterface $request ) { $files = $request -> getUploadedFiles (); /** @var \Psr\Http\Message\UploadedFileInterface|null $file */ $file = $files [ 'file' ] ?? null ; return new Response ( 200 , [ 'Content-Type' => 'text/plain' ], $file ? $file -> getClientFilename () : '' ); } ]);
For example, we can upload a simple text file from terminal curl 127.0.0.1:8000 -F "[email protected]" (flag -F means multipart/form-data ):
This code simply returns an uploaded file name as a response.
To store an uploaded file use UploadedFileInterface::getStream() method, which returns an instance of the Psr\Http\Message\StreamInterface . To get the contents of the uploaded file we can cast this object to a string. For example, this snippet opens a writable stream and stores the contents of the uploaded file in dest.txt :
<?php $server = new \React\Http\Server ([ function ( ServerRequestInterface $request ) use ( $loop ) { /** @var \Psr\Http\Message\UploadedFileInterface[] $files */ $files = $request -> getUploadedFiles (); $file = $files [ 'file' ] ?? null ; if ( $file ) { $dest = new \React\Stream\WritableResourceStream ( fopen ( 'dest.txt' , 'w' ), $loop ); $dest -> write ( $file -> getStream ()); } return new Response ( 200 , [ 'Content-Type' => 'text/plain' ]); } ]);
Actually, UploadedFileInterface has moveTo() method that can be used exactly to store an upload. But this method by its nature is going to be blocking (as a recommended alternative to calling move_uploaded_file() ), so it can’t be used in an asynchronous application. That is the reason why we have to use getStream() . Calling moveTo() will throw RuntimeException .
Conclusion
A chain of middleware between the server and your application is a powerful tool to customize the way how your request and response behave. In addition to already built-in Server middleware, there is a list of third-party middleware created by ReactPHP community.
You can find examples from this article on GitHub.
This article is a part of the ReactPHP Series. |
/**
* Start the ProfileActivity to display information of the specified user
* @param userId user to display
*/
private void visitProfile(String userId) {
Intent profileIntent = new Intent(getContext(), ProfileActivity.class);
profileIntent.putExtra(ProfileActivity.PROFILE_VISIT_CODE, ProfileActivity.PROFILE_VISITING_MODE);
profileIntent.putExtra(ProfileActivity.PROFILE_ID_USER, userId);
startActivityForResult(profileIntent, ProfileActivity.VISIT_MODE_REQUEST_CODE);
} |
// MarshalJSON implements the json.Marshaller interface for type JobResponse.
func (j JobResponse) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
populateTimeRFC1123(objectMap, "endTimeUtc", j.EndTimeUTC)
populate(objectMap, "failureReason", j.FailureReason)
populate(objectMap, "jobId", j.JobID)
populate(objectMap, "parentJobId", j.ParentJobID)
populateTimeRFC1123(objectMap, "startTimeUtc", j.StartTimeUTC)
populate(objectMap, "status", j.Status)
populate(objectMap, "statusMessage", j.StatusMessage)
populate(objectMap, "type", j.Type)
return json.Marshal(objectMap)
} |
<reponame>pozi/PoziConnect
"""
This module contains some common functions used by wxPython-AUI to
manipulate colours, bitmaps, text, gradient shadings and custom
dragging images for AuiNotebook tabs.
"""
__author__ = "<NAME> <<EMAIL>>"
__date__ = "31 March 2009"
import wx
from aui_constants import *
if wx.Platform == "__WXMAC__":
import Carbon.Appearance
def BlendColour(fg, bg, alpha):
"""
Blends the two colour component `fg` and `bg` into one colour component, adding
an optional alpha channel.
:param `fg`: the first colour component;
:param `bg`: the second colour component;
:param `alpha`: an optional transparency value.
"""
result = bg + (alpha*(fg - bg))
if result < 0.0:
result = 0.0
if result > 255:
result = 255
return result
def StepColour(c, ialpha):
"""
Darken/lighten the input colour `c`.
:param `c`: a colour to darken/lighten;
:param `ialpha`: a transparency value.
"""
if ialpha == 100:
return c
r, g, b = c.Red(), c.Green(), c.Blue()
# ialpha is 0..200 where 0 is completely black
# and 200 is completely white and 100 is the same
# convert that to normal alpha 0.0 - 1.0
ialpha = min(ialpha, 200)
ialpha = max(ialpha, 0)
alpha = (ialpha - 100.0)/100.0
if ialpha > 100:
# blend with white
bg = 255
alpha = 1.0 - alpha # 0 = transparent fg 1 = opaque fg
else:
# blend with black
bg = 0
alpha = 1.0 + alpha # 0 = transparent fg 1 = opaque fg
r = BlendColour(r, bg, alpha)
g = BlendColour(g, bg, alpha)
b = BlendColour(b, bg, alpha)
return wx.Colour(r, g, b)
def LightContrastColour(c):
"""
Creates a new, lighter colour based on the input colour `c`.
:param `c`: the input colour to analyze.
"""
amount = 120
# if the colour is especially dark, then
# make the contrast even lighter
if c.Red() < 128 and c.Green() < 128 and c.Blue() < 128:
amount = 160
return StepColour(c, amount)
def ChopText(dc, text, max_size):
"""
Chops the input `text` if its size does not fit in `max_size`, by cutting the
text and adding ellipsis at the end.
:param `dc`: a `wx.DC` device context;
:param `text`: the text to chop;
:param `max_size`: the maximum size in which the text should fit.
"""
# first check if the text fits with no problems
x, y, dummy = dc.GetMultiLineTextExtent(text)
if x <= max_size:
return text
textLen = len(text)
last_good_length = 0
for i in xrange(textLen, -1, -1):
s = text[0:i]
s += "..."
x, y = dc.GetTextExtent(s)
last_good_length = i
if x < max_size:
break
ret = text[0:last_good_length] + "..."
return ret
def BitmapFromBits(bits, w, h, colour):
"""
BitmapFromBits() is a utility function that creates a
masked bitmap from raw bits (XBM format).
:param `bits`: a string containing the raw bits of the bitmap;
:param `w`: the bitmap width;
:param `h`: the bitmap height;
:param `colour`: the colour which will replace all white pixels in the
raw bitmap.
"""
img = wx.BitmapFromBits(bits, w, h).ConvertToImage()
img.Replace(0, 0, 0, 123, 123, 123)
img.Replace(255, 255, 255, colour.Red(), colour.Green(), colour.Blue())
img.SetMaskColour(123, 123, 123)
return wx.BitmapFromImage(img)
def IndentPressedBitmap(rect, button_state):
"""
Indents the input rectangle `rect` based on the value of `button_state`.
:param `rect`: an instance of wx.Rect;
:param `button_state`: an L{AuiNotebook} button state.
"""
if button_state == AUI_BUTTON_STATE_PRESSED:
rect.x += 1
rect.y += 1
return rect
def GetBaseColour():
"""
Returns the face shading colour on push buttons/backgrounds, mimicking as closely
as possible the platform UI colours.
"""
if wx.Platform == "__WXMAC__":
if hasattr(wx, 'MacThemeColour'):
base_colour = wx.MacThemeColour(Carbon.Appearance.kThemeBrushToolbarBackground)
else:
brush = wx.Brush(wx.BLACK)
brush.MacSetTheme(Carbon.Appearance.kThemeBrushToolbarBackground)
base_colour = brush.GetColour()
else:
base_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE)
# the base_colour is too pale to use as our base colour,
# so darken it a bit
if ((255-base_colour.Red()) +
(255-base_colour.Green()) +
(255-base_colour.Blue()) < 60):
base_colour = StepColour(base_colour, 92)
return base_colour
def MakeDisabledBitmap(bitmap):
"""
Convert the given image (in place) to a grayed-out version,
appropriate for a 'disabled' appearance.
:param `bitmap`: the bitmap to gray-out.
"""
anImage = bitmap.ConvertToImage()
factor = 0.7 # 0 < f < 1. Higher Is Grayer
if anImage.HasMask():
maskColour = (anImage.GetMaskRed(), anImage.GetMaskGreen(), anImage.GetMaskBlue())
else:
maskColour = None
data = map(ord, list(anImage.GetData()))
for i in range(0, len(data), 3):
pixel = (data[i], data[i+1], data[i+2])
pixel = MakeGray(pixel, factor, maskColour)
for x in range(3):
data[i+x] = pixel[x]
anImage.SetData(''.join(map(chr, data)))
return anImage.ConvertToBitmap()
def MakeGray(rgbTuple, factor, maskColour):
"""
Make a pixel grayed-out. If the pixel matches the `maskColour`, it won't be
changed.
:param `rgbTuple`: a tuple representing a pixel colour;
:param `factor`: a graying-out factor;
:param `maskColour`: a colour mask.
"""
if rgbTuple != maskColour:
r, g, b = rgbTuple
return map(lambda x: int((230 - x) * factor) + x, (r, g, b))
else:
return rgbTuple
def Clip(a, b, c):
"""
Clips the value in `a` based on the extremes `b` and `c`.
:param `a`: the value to analyze;
:param `b`: a minimum value;
:param `c`: a maximum value.
"""
return ((a < b and [b]) or [(a > c and [c] or [a])[0]])[0]
def LightColour(colour, percent):
"""
Brighten input `colour` by `percent`.
:param `colour`: the colour to be brightened;
:param `percent`: brightening percentage.
"""
end_colour = wx.WHITE
rd = end_colour.Red() - colour.Red()
gd = end_colour.Green() - colour.Green()
bd = end_colour.Blue() - colour.Blue()
high = 100
# We take the percent way of the colour from colour -. white
i = percent
r = colour.Red() + ((i*rd*100)/high)/100
g = colour.Green() + ((i*gd*100)/high)/100
b = colour.Blue() + ((i*bd*100)/high)/100
return wx.Colour(r, g, b)
def PaneCreateStippleBitmap():
"""
Creates a stipple bitmap to be used in a `wx.Brush`.
This is used to draw sash resize hints.
"""
data = [0, 0, 0, 192, 192, 192, 192, 192, 192, 0, 0, 0]
img = wx.EmptyImage(2, 2)
counter = 0
for ii in xrange(2):
for jj in xrange(2):
img.SetRGB(ii, jj, data[counter], data[counter+1], data[counter+2])
counter = counter + 3
return img.ConvertToBitmap()
def DrawMACCloseButton(colour, backColour=None):
"""
Draws the wxMAC tab close button using `wx.GraphicsContext`.
:param `colour`: the colour to use to draw the circle;
:param `backColour`: the optional background colour for the circle.
"""
bmp = wx.EmptyBitmapRGBA(16, 16)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
gc = wx.GraphicsContext.Create(dc)
gc.SetBrush(wx.Brush(colour))
path = gc.CreatePath()
path.AddCircle(6.5, 7, 6.5)
path.CloseSubpath()
gc.FillPath(path)
path = gc.CreatePath()
if backColour is not None:
pen = wx.Pen(backColour, 2)
else:
pen = wx.Pen("white", 2)
pen.SetCap(wx.CAP_BUTT)
pen.SetJoin(wx.JOIN_BEVEL)
gc.SetPen(pen)
path.MoveToPoint(3.5, 4)
path.AddLineToPoint(9.5, 10)
path.MoveToPoint(3.5, 10)
path.AddLineToPoint(9.5, 4)
path.CloseSubpath()
gc.DrawPath(path)
dc.SelectObject(wx.NullBitmap)
return bmp
def DarkenBitmap(bmp, caption_colour, new_colour):
"""
Darkens the input bitmap on wxMAC using the input colour.
:param `bmp`: the bitmap to be manipulated;
:param `caption_colour`: the colour of the pane caption;
:param `new_colour`: the colour used to darken the bitmap.
"""
image = bmp.ConvertToImage()
red = caption_colour.Red()/float(new_colour.Red())
green = caption_colour.Green()/float(new_colour.Green())
blue = caption_colour.Blue()/float(new_colour.Blue())
image = image.AdjustChannels(red, green, blue)
return image.ConvertToBitmap()
def DrawGradientRectangle(dc, rect, start_colour, end_colour, direction, offset=0, length=0):
"""
Draws a gradient-shaded rectangle.
:param `dc`: a `wx.DC` device context;
:param `rect`: the rectangle in which to draw the gradient;
:param `start_colour`: the first colour of the gradient;
:param `end_colour`: the second colour of the gradient;
:param `direction`: the gradient direction (horizontal or vertical).
"""
if direction == AUI_GRADIENT_VERTICAL:
dc.GradientFillLinear(rect, start_colour, end_colour, wx.SOUTH)
else:
dc.GradientFillLinear(rect, start_colour, end_colour, wx.EAST)
def FindFocusDescendant(ancestor):
"""
Find a window with the focus, that is also a descendant of the given window.
This is used to determine the window to initially send commands to.
:param `ancestor`: the window to check for ancestry.
"""
# Process events starting with the window with the focus, if any.
focusWin = wx.Window.FindFocus()
win = focusWin
# Check if this is a descendant of this frame.
# If not, win will be set to NULL.
while win:
if win == ancestor:
break
else:
win = win.GetParent()
if win is None:
focusWin = None
return focusWin
def GetLabelSize(dc, label, vertical):
"""
Returns the L{AuiToolBar} item label size.
:param `label`: the toolbar tool label;
:param `vertical`: whether the toolbar tool orientation is vertical or not.
"""
text_width = text_height = 0
# get the text height
dummy, text_height = dc.GetTextExtent("ABCDHgj")
# get the text width
if label.strip():
text_width, dummy = dc.GetTextExtent(label)
if vertical:
tmp = text_height
text_height = text_width
text_width = tmp
return wx.Size(text_width, text_height)
#---------------------------------------------------------------------------
# TabDragImage implementation
# This class handles the creation of a custom image when dragging
# AuiNotebook tabs
#---------------------------------------------------------------------------
class TabDragImage(wx.DragImage):
"""
This class handles the creation of a custom image in case of drag and
drop of a notebook tab.
"""
def __init__(self, notebook, page, button_state, tabArt):
"""
Default class constructor.
For internal use: do not call it in your code!
:param `notebook`: an instance of L{AuiNotebook};
:param `page`: the dragged L{AuiNotebook} page;
:param `button_state`: the state of the close button on the tab;
:param `tabArt`: an instance of L{AuiDefaultTabArt} or one of its derivations.
"""
self._backgroundColour = wx.NamedColour("pink")
self._bitmap = self.CreateBitmap(notebook, page, button_state, tabArt)
wx.DragImage.__init__(self, self._bitmap)
def CreateBitmap(self, notebook, page, button_state, tabArt):
"""
Actually creates the drag and drop bitmap.
:param `notebook`: an instance of L{AuiNotebook};
:param `page`: the dragged L{AuiNotebook} page;
:param `button_state`: the state of the close button on the tab;
:param `tabArt`: an instance of L{AuiDefaultTabArt} or one of its derivations.
"""
control = page.control
memory = wx.MemoryDC(wx.EmptyBitmap(1, 1))
tab_size, x_extent = tabArt.GetTabSize(memory, notebook, page.caption, page.bitmap, page.active,
button_state, control)
tab_width, tab_height = tab_size
rect = wx.Rect(0, 0, tab_width, tab_height)
bitmap = wx.EmptyBitmap(tab_width+1, tab_height+1)
memory.SelectObject(bitmap)
if wx.Platform == "__WXMAC__":
memory.SetBackground(wx.TRANSPARENT_BRUSH)
else:
memory.SetBackground(wx.Brush(self._backgroundColour))
memory.SetBackgroundMode(wx.TRANSPARENT)
memory.Clear()
paint_control = wx.Platform != "__WXMAC__"
tabArt.DrawTab(memory, notebook, page, rect, button_state, paint_control=paint_control)
memory.SetBrush(wx.TRANSPARENT_BRUSH)
memory.SetPen(wx.BLACK_PEN)
memory.DrawRoundedRectangle(0, 0, tab_width+1, tab_height+1, 2)
memory.SelectObject(wx.NullBitmap)
# Gtk and Windows unfortunatly don't do so well with transparent
# drawing so this hack corrects the image to have a transparent
# background.
if wx.Platform != '__WXMAC__':
timg = bitmap.ConvertToImage()
if not timg.HasAlpha():
timg.InitAlpha()
for y in xrange(timg.GetHeight()):
for x in xrange(timg.GetWidth()):
pix = wx.Colour(timg.GetRed(x, y),
timg.GetGreen(x, y),
timg.GetBlue(x, y))
if pix == self._backgroundColour:
timg.SetAlpha(x, y, 0)
bitmap = timg.ConvertToBitmap()
return bitmap
def GetDockingImage(direction, useAero, center):
"""
Returns the correct name of the docking bitmap depending on the input parameters.
:param `useAero`: whether L{AuiManager} is using Aero-style or Whidbey-style docking
images or not;
:param `center`: whether we are looking for the center diamond-shaped bitmap or not.
"""
suffix = (center and [""] or ["_single"])[0]
prefix = ""
if useAero == 2:
# Whidbey docking guides
prefix = "whidbey_"
elif useAero == 1:
# Aero docking style
prefix = "aero_"
if direction == wx.TOP:
bmp_unfocus = eval("%sup%s"%(prefix, suffix)).GetBitmap()
bmp_focus = eval("%sup_focus%s"%(prefix, suffix)).GetBitmap()
elif direction == wx.BOTTOM:
bmp_unfocus = eval("%sdown%s"%(prefix, suffix)).GetBitmap()
bmp_focus = eval("%sdown_focus%s"%(prefix, suffix)).GetBitmap()
elif direction == wx.LEFT:
bmp_unfocus = eval("%sleft%s"%(prefix, suffix)).GetBitmap()
bmp_focus = eval("%sleft_focus%s"%(prefix, suffix)).GetBitmap()
elif direction == wx.RIGHT:
bmp_unfocus = eval("%sright%s"%(prefix, suffix)).GetBitmap()
bmp_focus = eval("%sright_focus%s"%(prefix, suffix)).GetBitmap()
else:
bmp_unfocus = eval("%stab%s"%(prefix, suffix)).GetBitmap()
bmp_focus = eval("%stab_focus%s"%(prefix, suffix)).GetBitmap()
return bmp_unfocus, bmp_focus
def TakeScreenShot(rect):
"""
Takes a screenshot of the screen at given position and size (rect).
:param `rect`: the screen rectangle for which we want to take a screenshot.
"""
# Create a DC for the whole screen area
dcScreen = wx.ScreenDC()
# Create a Bitmap that will later on hold the screenshot image
# Note that the Bitmap must have a size big enough to hold the screenshot
# -1 means using the current default colour depth
bmp = wx.EmptyBitmap(rect.width, rect.height)
# Create a memory DC that will be used for actually taking the screenshot
memDC = wx.MemoryDC()
# Tell the memory DC to use our Bitmap
# all drawing action on the memory DC will go to the Bitmap now
memDC.SelectObject(bmp)
# Blit (in this case copy) the actual screen on the memory DC
# and thus the Bitmap
memDC.Blit( 0, # Copy to this X coordinate
0, # Copy to this Y coordinate
rect.width, # Copy this width
rect.height, # Copy this height
dcScreen, # From where do we copy?
rect.x, # What's the X offset in the original DC?
rect.y # What's the Y offset in the original DC?
)
# Select the Bitmap out of the memory DC by selecting a new
# uninitialized Bitmap
memDC.SelectObject(wx.NullBitmap)
return bmp
def RescaleScreenShot(bmp, thumbnail_size=200):
"""
Rescales a bitmap to be 300 pixels wide (or tall) at maximum.
:param `bmp`: the bitmap to rescale;
:param `thumbnail_size`: the maximum size of every page thumbnail.
"""
bmpW, bmpH = bmp.GetWidth(), bmp.GetHeight()
img = bmp.ConvertToImage()
newW, newH = bmpW, bmpH
if bmpW > bmpH:
if bmpW > thumbnail_size:
ratio = bmpW/float(thumbnail_size)
newW, newH = int(bmpW/ratio), int(bmpH/ratio)
img.Rescale(newW, newH, wx.IMAGE_QUALITY_HIGH)
else:
if bmpH > thumbnail_size:
ratio = bmpH/float(thumbnail_size)
newW, newH = int(bmpW/ratio), int(bmpH/ratio)
img.Rescale(newW, newH, wx.IMAGE_QUALITY_HIGH)
newBmp = img.ConvertToBitmap()
otherBmp = wx.EmptyBitmap(newW+5, newH+5)
memDC = wx.MemoryDC()
memDC.SelectObject(otherBmp)
memDC.SetBackground(wx.WHITE_BRUSH)
memDC.Clear()
memDC.SetPen(wx.TRANSPARENT_PEN)
pos = 0
for i in xrange(5, 0, -1):
brush = wx.Brush(wx.Colour(50*i, 50*i, 50*i))
memDC.SetBrush(brush)
memDC.DrawRoundedRectangle(0, 0, newW+5-pos, newH+5-pos, 2)
pos += 1
memDC.DrawBitmap(newBmp, 0, 0, True)
# Select the Bitmap out of the memory DC by selecting a new
# uninitialized Bitmap
memDC.SelectObject(wx.NullBitmap)
return otherBmp
def GetSlidingPoints(rect, size, direction):
"""
Returns the point at which the sliding in and out of a minimized pane begins.
:param `rect`: the L{AuiToolBar} tool screen rectangle;
:param `size`: the pane window size;
:param `direction`: the pane docking direction.
"""
if direction == AUI_DOCK_LEFT:
startX, startY = rect.x + rect.width + 2, rect.y
elif direction == AUI_DOCK_TOP:
startX, startY = rect.x, rect.y + rect.height + 2
elif direction == AUI_DOCK_RIGHT:
startX, startY = rect.x - size.x - 2, rect.y
elif direction == AUI_DOCK_BOTTOM:
startX, startY = rect.x, rect.y - size.y - 2
else:
raise Exception("How did we get here?")
caption_height = wx.SystemSettings.GetMetric(wx.SYS_CAPTION_Y)
frame_border_x = wx.SystemSettings.GetMetric(wx.SYS_FRAMESIZE_X)
frame_border_y = wx.SystemSettings.GetMetric(wx.SYS_FRAMESIZE_Y)
stopX = size.x + caption_height + frame_border_x
stopY = size.x + frame_border_y
return startX, startY, stopX, stopY
def CopyAttributes(newArt, oldArt):
"""
Copies pens, brushes, colours and fonts from the old tab art to the new one.
:param `newArt`: the new instance of L{AuiDefaultTabArt};
:param `oldArt`: the old instance of L{AuiDefaultTabArt}.
"""
attrs = dir(oldArt)
for attr in attrs:
if attr.startswith("_") and (attr.endswith("_colour") or attr.endswith("_font") or \
attr.endswith("_font") or attr.endswith("_brush") or \
attr.endswith("Pen") or attr.endswith("_pen")):
setattr(newArt, attr, getattr(oldArt, attr))
return newArt
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.