content
stringlengths 10
4.9M
|
---|
def authenticate(screen_name_or_email_address: str, password: str) -> User:
user_id = _find_user_id_by_screen_name_or_email_address(
screen_name_or_email_address
)
if user_id is None:
raise AuthenticationFailed()
user = user_service.find_active_user(user_id)
if user is None:
raise AuthenticationFailed()
if not password_service.is_password_valid_for_user(user.id, password):
raise AuthenticationFailed()
password_service.migrate_password_hash_if_outdated(user.id, password)
return user |
/**
* get session and reset. After reset call super
* to return the updated session information
* @param toBuilder string containing the xml
* @return true if the parent wrapper to be included in returned xml
*/
@Override
protected boolean onDoGetWebService(StringBuilder toBuilder)
{
ISession loSession = getSession();
loSession.reset();
super.onDoGetWebService(toBuilder);
return true;
} |
// WithSetttings is used for overriding settings
// Default is settings from a method DefaultSettings()
func WithSetttings(s Settings) Option {
return func(g *Generator) {
g.settings = s
}
} |
package com.hltech.pact.gen.domain.client.model;
import lombok.Builder;
import lombok.Data;
import java.util.List;
@Data
@Builder
public class ClientMethodRepresentation {
private RequestRepresentation requestRepresentation;
private List<ResponseRepresentation> responseRepresentationList;
}
|
<reponame>Tracy-MacGrady/qg-utils
package com.qgclient.utils.http;
import android.content.Context;
import android.text.TextUtils;
import com.android.volley.AuthFailureError;
import com.android.volley.Cache;
import com.android.volley.DefaultRetryPolicy;
import com.android.volley.NetworkResponse;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.Response;
import com.android.volley.toolbox.StringRequest;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import com.qgclient.utils.AppUtils;
import com.qgclient.utils.DebugUtils;
import com.qgclient.utils.NetUtils;
import com.qgclient.utils.NetworkUtils;
import com.qgclient.utils.R;
import com.qgclient.utils.ToastUtil;
import com.qgclient.utils.userauth.AuthCookie;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.protocol.HTTP;
import java.lang.reflect.Type;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* Created by Administrator on 2018/1/9.
*/
public class QGHttpClient {
private static QGHttpClient instance;
private RequestQueue requestQueue;
private DefaultRetryPolicy retryPolicy;
private QGHttpClient() {
}
public static QGHttpClient getInstance() {
if (instance == null) {
synchronized (QGHttpClient.class) {
instance = new QGHttpClient();
}
}
return instance;
}
/**
* 初始化
*
* @param requestQueue1
*/
public void initHttpClient(RequestQueue requestQueue1) {
this.requestQueue = requestQueue1;
this.retryPolicy = new DefaultRetryPolicy(QGHttpConstant.getInstance().getDefault_timeout_ms(), QGHttpConstant.getInstance().getDefault_max_retries(), QGHttpConstant.getInstance().getDefault_backoff_mult());
}
public RequestQueue getRequestQueue() {
return requestQueue;
}
public void get(Context context, String requestUrl, HashMap<String, String> params, QGHttpHandler<?> handler) {
get(context, requestUrl, params, false, handler);
}
public void get(Context context, String requestUrl, HashMap<String, String> params, boolean isGetCookie, QGHttpHandler<?> handler) {
if (context == null || TextUtils.isEmpty(requestUrl)) {
if (handler != null) handler.onFailure("参数错误!", "-100001");
} else {
String URL = getUrlWithQueryString(true, requestUrl, params);
DebugUtils.error(URL);
if (!NetworkUtils.hasNetWork(context)) {
// TODO: 2018/1/9 获取缓存数据 如果缓存数据为空则提示网络连接状态
Cache.Entry cacheEntry = requestQueue.getCache().get(URL);
if (handler != null) {
if (cacheEntry != null) handler.onResponse(new String(cacheEntry.data));
else handler.onFailure(context.getString(R.string.check_network), "-100001");
}
} else {
MyStringRequest stringRequest = new MyStringRequest(URL, handler, handler);
stringRequest.setContext(context);
stringRequest.setGetCookie(isGetCookie);
stringRequest.setRetryPolicy(retryPolicy);
stringRequest.setTag(context);
requestQueue.add(stringRequest);
}
}
}
public void post(Context context, String requestUrl, HashMap<String, String> params, QGHttpHandler<?> handler) {
post(context, requestUrl, params, false, handler);
}
public void post(Context context, String requestUrl, HashMap<String, String> params, boolean isGetCookie, QGHttpHandler<?> handler) {
if (context == null || TextUtils.isEmpty(requestUrl)) {
if (handler != null) handler.onFailure("参数错误!", "-100001");
} else {
DebugUtils.error(requestUrl);
if (!NetworkUtils.hasNetWork(context)) {
if (handler != null)
handler.onFailure(context.getString(R.string.check_network), "-100001");
} else {
MyStringRequest stringRequest = new MyStringRequest(Request.Method.POST, requestUrl, handler, handler);
stringRequest.setContext(context);
stringRequest.setGetCookie(isGetCookie);
stringRequest.setRetryPolicy(retryPolicy);
stringRequest.setTag(context);
requestQueue.add(stringRequest);
}
}
}
public String getUrlWithQueryString(boolean shouldEncodeUrl, String url, HashMap<String, String> params) {
if (shouldEncodeUrl)
url = url.replace(" ", "%20");
if (params != null) {
String paramString = getParamsList(params).trim();
if (!paramString.equals("") && !paramString.equals("?")) {
url += url.contains("?") ? "&" : "?";
url += paramString;
}
}
return url;
}
public String getParamsList(HashMap<String, String> params) {
List<BasicNameValuePair> lparams = new LinkedList<BasicNameValuePair>();
for (ConcurrentHashMap.Entry<String, String> entry : params.entrySet()) {
lparams.add(new BasicNameValuePair(entry.getKey(), entry.getValue()));
}
return URLEncodedUtils.format(lparams, HTTP.UTF_8);
}
}
|
<gh_stars>0
typedef enum WebPPreset {
WEBP_PRESET_DEFAULT = 0,
WEBP_PRESET_PICTURE,
WEBP_PRESET_PHOTO,
WEBP_PRESET_DRAWING,
WEBP_PRESET_ICON,
WEBP_PRESET_TEXT
} WebPPreset;
typedef enum WEBP_CSP_MODE {
MODE_RGB = 0, MODE_RGBA = 1,
MODE_BGR = 2, MODE_BGRA = 3,
MODE_ARGB = 4, MODE_RGBA_4444 = 5,
MODE_RGB_565 = 6,
MODE_rgbA = 7,
MODE_bgrA = 8,
MODE_Argb = 9,
MODE_rgbA_4444 = 10,
MODE_YUV = 11, MODE_YUVA = 12,
MODE_LAST = 13
} WEBP_CSP_MODE;
typedef enum VP8StatusCode {
VP8_STATUS_OK = 0,
VP8_STATUS_OUT_OF_MEMORY,
VP8_STATUS_INVALID_PARAM,
VP8_STATUS_BITSTREAM_ERROR,
VP8_STATUS_UNSUPPORTED_FEATURE,
VP8_STATUS_SUSPENDED,
VP8_STATUS_USER_ABORT,
VP8_STATUS_NOT_ENOUGH_DATA
} VP8StatusCode;
struct WebPData {
const uint8_t* bytes;
size_t size;
...;
};
typedef struct WebPData WebPData;
struct WebPPicture;
typedef struct WebPPicture WebPPicture;
typedef int (*WebPWriterFunction)(const uint8_t* data, size_t data_size, const WebPPicture* picture);
struct WebPPicture {
int use_argb;
int width;
int height;
WebPWriterFunction writer;
void* custom_ptr;
...;
};
struct WebPRGBABuffer {
uint8_t* rgba;
int stride;
size_t size;
};
typedef struct WebPRGBABuffer WebPRGBABuffer;
struct WebPYUVABuffer {
uint8_t* y, *u, *v, *a;
int y_stride;
int u_stride, v_stride;
int a_stride;
size_t y_size;
size_t u_size, v_size;
size_t a_size;
};
typedef struct WebPYUVABuffer WebPYUVABuffer;
struct WebPBitstreamFeatures {
int width;
int height;
int has_alpha;
int has_animation;
int format;
...;
};
typedef struct WebPBitstreamFeatures WebPBitstreamFeatures;
struct WebPDecBuffer {
WEBP_CSP_MODE colorspace;
int width, height;
int is_external_memory;
union {
WebPRGBABuffer RGBA;
WebPYUVABuffer YUVA;
} u;
...;
};
typedef struct WebPDecBuffer WebPDecBuffer;
struct WebPDecoderOptions {
int use_threads;
...;
};
typedef struct WebPDecoderOptions WebPDecoderOptions;
struct WebPDecoderConfig {
WebPBitstreamFeatures input;
WebPDecBuffer output;
WebPDecoderOptions options;
...;
};
typedef struct WebPDecoderConfig WebPDecoderConfig;
// Image characteristics hint for the underlying encoder.
typedef enum WebPImageHint {
WEBP_HINT_DEFAULT = 0, // default preset.
WEBP_HINT_PICTURE, // digital picture, like portrait, inner shot
WEBP_HINT_PHOTO, // outdoor photograph, with natural lighting
WEBP_HINT_GRAPH, // Discrete tone image (graph, map-tile etc).
WEBP_HINT_LAST
} WebPImageHint;
// Compression parameters.
struct WebPConfig {
int lossless; // Lossless encoding (0=lossy(default), 1=lossless).
float quality; // between 0 and 100. For lossy, 0 gives the smallest
// size and 100 the largest. For lossless, this
// parameter is the amount of effort put into the
// compression: 0 is the fastest but gives larger
// files compared to the slowest, but best, 100.
int method; // quality/speed trade-off (0=fast, 6=slower-better)
WebPImageHint image_hint; // Hint for image type (lossless only for now).
int target_size; // if non-zero, set the desired target size in bytes.
// Takes precedence over the 'compression' parameter.
float target_PSNR; // if non-zero, specifies the minimal distortion to
// try to achieve. Takes precedence over target_size.
int segments; // maximum number of segments to use, in [1..4]
int sns_strength; // Spatial Noise Shaping. 0=off, 100=maximum.
int filter_strength; // range: [0 = off .. 100 = strongest]
int filter_sharpness; // range: [0 = off .. 7 = least sharp]
int filter_type; // filtering type: 0 = simple, 1 = strong (only used
// if filter_strength > 0 or autofilter > 0)
int autofilter; // Auto adjust filter's strength [0 = off, 1 = on]
int alpha_compression; // Algorithm for encoding the alpha plane (0 = none,
// 1 = compressed with WebP lossless). Default is 1.
int alpha_filtering; // Predictive filtering method for alpha plane.
// 0: none, 1: fast, 2: best. Default if 1.
int alpha_quality; // Between 0 (smallest size) and 100 (lossless).
// Default is 100.
int pass; // number of entropy-analysis passes (in [1..10]).
int show_compressed; // if true, export the compressed picture back.
// In-loop filtering is not applied.
int preprocessing; // preprocessing filter:
// 0=none, 1=segment-smooth, 2=pseudo-random dithering
int partitions; // log2(number of token partitions) in [0..3]. Default
// is set to 0 for easier progressive decoding.
int partition_limit; // quality degradation allowed to fit the 512k limit
// on prediction modes coding (0: no degradation,
// 100: maximum possible degradation).
int emulate_jpeg_size; // If true, compression parameters will be remapped
// to better match the expected output size from
// JPEG compression. Generally, the output size will
// be similar but the degradation will be lower.
int thread_level; // If non-zero, try and use multi-threaded encoding.
int low_memory; // If set, reduce memory usage (but increase CPU use).
int near_lossless; // Near lossless encoding [0 = max loss .. 100 = off
// (default)].
int exact; // if non-zero, preserve the exact RGB values under
// transparent area. Otherwise, discard this invisible
// RGB information for better compression. The default
// value is 0.
int use_delta_palette; // reserved for future lossless feature
int use_sharp_yuv; // if needed, use sharp (and slow) RGB->YUV conversion
int qmin; // minimum permissible quality factor
int qmax; // maximum permissible quality factor
...;
};
typedef struct WebPConfig WebPConfig;
struct WebPMemoryWriter {
uint8_t* mem;
size_t size;
...;
};
typedef struct WebPMemoryWriter WebPMemoryWriter;
struct WebPAnimEncoderOptions {
int minimize_size;
int kmin;
int kmax;
int allow_mixed;
int verbose;
...;
};
typedef struct WebPAnimEncoderOptions WebPAnimEncoderOptions;
struct WebPAnimDecoderOptions {
WEBP_CSP_MODE color_mode;
int use_threads;
...;
};
typedef struct WebPAnimDecoderOptions WebPAnimDecoderOptions;
struct WebPAnimInfo {
uint32_t canvas_width;
uint32_t canvas_height;
uint32_t loop_count;
uint32_t bgcolor;
uint32_t frame_count;
...;
};
typedef struct WebPAnimInfo WebPAnimInfo;
// Opaque objects
typedef struct WebPMux WebPMux;
typedef struct WebPAnimEncoder WebPAnimEncoder;
typedef struct WebPAnimDecoder WebPAnimDecoder;
int WebPPictureInit(WebPPicture* picture);
int WebPPictureAlloc(WebPPicture* picture);
int WebPPictureImportRGB(WebPPicture* picture, const uint8_t* rgb,
int rgb_stride);
int WebPPictureImportRGBA(WebPPicture* picture, const uint8_t* rgba,
int rgba_stride);
void WebPPictureFree(WebPPicture* picture);
int WebPInitDecoderConfig(WebPDecoderConfig* config);
VP8StatusCode WebPGetFeatures(const uint8_t* data, size_t data_size,
WebPBitstreamFeatures* features);
VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size,
WebPDecoderConfig* config);
void WebPFreeDecBuffer(WebPDecBuffer* buffer);
int WebPConfigPreset(WebPConfig* config, WebPPreset preset, float quality);
int WebPValidateConfig(const WebPConfig* config);
int WebPEncode(const WebPConfig* config, WebPPicture* picture);
void WebPMemoryWriterInit(WebPMemoryWriter* writer);
int WebPMemoryWrite(const uint8_t* data, size_t data_size,
const WebPPicture* picture);
void WebPMemoryWriterClear(WebPMemoryWriter* writer);
void WebPFree(void* ptr);
void WebPDataInit(WebPData* webp_data);
void WebPDataClear(WebPData* webp_data);
int WebPAnimEncoderOptionsInit(WebPAnimEncoderOptions* enc_options);
WebPAnimEncoder* WebPAnimEncoderNew(int width, int height,
const WebPAnimEncoderOptions* enc_options);
int WebPAnimEncoderAdd(WebPAnimEncoder* enc, struct WebPPicture* frame,
int timestamp_ms, const struct WebPConfig* config);
int WebPAnimEncoderAssemble(WebPAnimEncoder* enc, WebPData* webp_data);
void WebPAnimEncoderDelete(WebPAnimEncoder* enc);
int WebPAnimDecoderOptionsInit(WebPAnimDecoderOptions* dec_options);
WebPAnimDecoder* WebPAnimDecoderNew(const WebPData* webp_data,
const WebPAnimDecoderOptions* dec_options);
int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, WebPAnimInfo* info);
int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec);
int WebPAnimDecoderGetNext(WebPAnimDecoder* dec, uint8_t** buf, int* timestamp);
void WebPAnimDecoderReset(WebPAnimDecoder* dec);
void WebPAnimDecoderDelete(WebPAnimDecoder* dec);
|
#![feature(get_mut_unchecked)]
#![feature(map_first_last)]
use crate::error::Error;
use pydis::opcode::py27::{self, Mnemonic, Standard};
use pydis::prelude::Opcode;
use rayon::prelude::*;
use py27_marshal::{Code, Obj};
use rayon::Scope;
use std::collections::HashMap;
use std::fmt::Debug;
use std::marker::PhantomData;
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use strings::CodeObjString;
/// Representing code as a graph of basic blocks
pub mod code_graph;
/// Deobfuscation module
pub mod deob;
/// Errors
pub mod error;
/// Provides code for partially executing a code object and identifying const conditions
pub mod partial_execution;
/// Python VM
pub mod smallvm;
/// Management of Python strings for string dumping
pub mod strings;
#[derive(Debug)]
pub struct Deobfuscator<'a, O: Opcode<Mnemonic = py27::Mnemonic>> {
/// Input stream.
input: &'a [u8],
/// Output to write dotviz graph to
enable_dotviz_graphs: bool,
files_processed: AtomicUsize,
graphviz_graphs: HashMap<String, String>,
_opcode_phantom: PhantomData<O>,
}
impl<'a, O: Opcode<Mnemonic = py27::Mnemonic>> Deobfuscator<'a, O> {
/// Creates a new instance of a deobfuscator
pub fn new(input: &'a [u8]) -> Deobfuscator<'a, O> {
Deobfuscator {
input,
enable_dotviz_graphs: false,
files_processed: AtomicUsize::new(0),
graphviz_graphs: HashMap::new(),
_opcode_phantom: Default::default(),
}
}
/// Consumes the current Deobfuscator object and returns a new one with graph
/// output enabled.
pub fn enable_graphs(mut self) -> Deobfuscator<'a, O> {
self.enable_dotviz_graphs = true;
self
}
/// Deobfuscates this code object
pub fn deobfuscate(&self) -> Result<DeobfuscatedCodeObject, Error<O>> {
deobfuscate_codeobj::<O>(self.input, &self.files_processed, self.enable_dotviz_graphs)
}
/// Returns the generated graphviz graphs after a [`deobfuscate`] has been called.
/// Keys are their filenames, values are the dot data.
pub fn graphs(&self) -> &HashMap<String, String> {
&self.graphviz_graphs
}
}
pub struct DeobfuscatedCodeObject {
/// Serialized code object with no header
pub data: Vec<u8>,
/// Graphs that were generated while deobfuscating this code object and any
/// nested objects. Keys represent file names and their deobfuscation pass
/// while the values represent the graphviz data in Dot format
pub graphs: HashMap<String, String>,
}
/// Deobfuscates a marshalled code object and returns either the deobfuscated code object
/// or the [`crate::errors::Error`] encountered during execution
pub(crate) fn deobfuscate_codeobj<O: Opcode<Mnemonic = py27::Mnemonic>>(
data: &[u8],
files_processed: &AtomicUsize,
enable_dotviz_graphs: bool,
) -> Result<DeobfuscatedCodeObject, Error<O>> {
if let py27_marshal::Obj::Code(code) = py27_marshal::read::marshal_loads(data)? {
// This vector will contain the input code object and all nested objects
let mut results = vec![];
let mut mapped_names = HashMap::new();
let mut graphs = HashMap::new();
let out_results = Arc::new(Mutex::new(vec![]));
rayon::scope(|scope| {
deobfuscate_nested_code_objects::<O>(
Arc::clone(&code),
scope,
Arc::clone(&out_results),
files_processed,
enable_dotviz_graphs,
);
});
let out_results = Arc::try_unwrap(out_results)
.unwrap_or_else(|_| panic!("failed to unwrap mapped names"))
.into_inner()
.unwrap();
for result in out_results {
let result = result?;
results.push((result.file_number, result.new_bytecode));
mapped_names.extend(result.mapped_function_names);
graphs.extend(result.graphviz_graphs);
}
// sort these items by their file number. ordering matters since our python code pulls the objects as a
// stack
results.sort_by(|a, b| a.0.cmp(&b.0));
let output_data = crate::deob::rename_vars(
data,
&mut results.iter().map(|result| result.1.as_slice()),
&mapped_names,
)
.unwrap();
Ok(DeobfuscatedCodeObject {
data: output_data,
graphs,
})
} else {
Err(Error::InvalidCodeObject)
}
}
pub(crate) struct DeobfuscatedBytecode {
pub(crate) file_number: usize,
pub(crate) new_bytecode: Vec<u8>,
pub(crate) mapped_function_names: HashMap<String, String>,
pub(crate) graphviz_graphs: HashMap<String, String>,
}
pub(crate) fn deobfuscate_nested_code_objects<O: Opcode<Mnemonic = py27::Mnemonic>>(
code: Arc<Code>,
scope: &Scope,
out_results: Arc<Mutex<Vec<Result<DeobfuscatedBytecode, Error<O>>>>>,
files_processed: &AtomicUsize,
enable_dotviz_graphs: bool,
) {
let file_number = files_processed.fetch_add(1, Ordering::Relaxed);
let task_code = Arc::clone(&code);
let thread_results = Arc::clone(&out_results);
scope.spawn(move |_scope| {
let res = crate::deob::deobfuscate_code::<O>(task_code, file_number, enable_dotviz_graphs);
thread_results.lock().unwrap().push(res);
});
// We need to find and replace the code sections which may also be in the const data
for c in code.consts.iter() {
if let Obj::Code(const_code) = c {
let thread_results = Arc::clone(&out_results);
let thread_code = Arc::clone(const_code);
// Call deobfuscate_bytecode first since the bytecode comes before consts and other data
deobfuscate_nested_code_objects::<O>(
thread_code,
scope,
thread_results,
files_processed,
enable_dotviz_graphs,
);
}
}
}
/// Dumps all strings from a Code object. This will go over all of the `names`, variable names (`varnames`),
/// `consts`, and all strings from any nested code objects.
pub fn dump_strings<'a>(
pyc_filename: &'a Path,
data: &[u8],
) -> Result<Vec<CodeObjString<'a>>, Error<Standard>> {
if let py27_marshal::Obj::Code(code) = py27_marshal::read::marshal_loads(data)? {
Ok(dump_codeobject_strings(pyc_filename, code))
} else {
Err(Error::InvalidCodeObject)
}
}
/// Dumps all strings from a Code object. This will go over all of the `names`, variable names (`varnames`),
/// `consts`, and all strings from any nested code objects.
fn dump_codeobject_strings(pyc_filename: &Path, code: Arc<Code>) -> Vec<CodeObjString> {
let new_strings = Mutex::new(vec![]);
code.names.par_iter().for_each(|name| {
new_strings.lock().unwrap().push(CodeObjString::new(
code.as_ref(),
pyc_filename,
crate::strings::StringType::Name,
name.to_string().as_ref(),
))
});
code.varnames.par_iter().for_each(|name| {
new_strings.lock().unwrap().push(CodeObjString::new(
code.as_ref(),
pyc_filename,
crate::strings::StringType::VarName,
name.to_string().as_ref(),
))
});
code.consts.as_ref().par_iter().for_each(|c| {
if let py27_marshal::Obj::String(s) = c {
new_strings.lock().unwrap().push(CodeObjString::new(
code.as_ref(),
pyc_filename,
crate::strings::StringType::Const,
s.to_string().as_ref(),
))
}
});
// We need to find and replace the code sections which may also be in the const data
code.consts.par_iter().for_each(|c| {
if let Obj::Code(const_code) = c {
// Call deobfuscate_bytecode first since the bytecode comes before consts and other data
let mut strings = dump_codeobject_strings(pyc_filename, Arc::clone(&const_code));
new_strings.lock().unwrap().append(&mut strings);
}
});
new_strings.into_inner().unwrap()
}
|
#include <bits/stdc++.h>
#define LL long long
#define pb push_back
#define mp make_pair
#define fi first
#define sc second
using namespace std;
LL n,a,b,c;
int main() {
cin>>n>>a>>b>>c;
n%=4;
if(n==0){
cout<<"0"<<endl;
return 0;
}
LL x = 4-n;
if(x==1) cout<<min(a,min(b+c,3*c))<<endl;
else if(x==2) cout<<min(a*2,min(b,2*c))<<endl;
else if(x==3) cout<<min(a*3,min(a+b,c))<<endl;
return 0;
} |
<gh_stars>0
/*
接受客户端的连接请求
*/
package handle
import (
"fmt"
"io"
"net"
"time"
"github.com/yushaona/gmessage/packet"
"github.com/yushaona/gmessage/server/job"
"github.com/yushaona/gmessage/server/cache"
"github.com/yushaona/gjson"
)
//Accept 等待客户端的连接
func Accept(listen net.Listener) {
go cache.UserCache()
//go HandleMsgQueue()
for {
conn, err := listen.Accept() //tcp三次握手完成,可以进行通信了
if err != nil {
fmt.Println(err.Error())
continue
}
go execute(conn)
}
}
func HandleData(param *gjson.GJSON) (result gjson.GJSON, err error) {
funcid := param.GetInt("funcid")
if funcid == 0 {
return result, fmt.Errorf("%s", "funcid is not exist")
}
return job.DoJob(funcid, param)
}
/*
消息数据包,主要包含该三个字段
id 也就是处理完成,发送给哪个用户ID,现在就是自己发给自己
funcid 执行哪个函数
data 数据
{
"funcid":300,
"userid":"462262902976417792",
"session":"1",
"macaddr":"111231321-"
"type":0
}
*/
// func HandleMsgQueue() {
// for {
// select {
// case msgData := <-cache.MsgQueue:
// //result := "[" + msgData.ID + "]===" + msgData.Data // 将请求的数据包发给id指定的用户
// var param gjson.GJSON
// param.Load(msgData.Data)
// //fmt.Println("result", result)
// var msgResult gjson.GJSON
// msgResponse, err := HandleData(¶m) //
// fmt.Println(msgResponse.ToString())
// if err != nil {
// msgResult.SetInt("code", 0)
// msgResult.SetString("info", err.Error())
// } else {
// msgResult.SetInt("code", 1)
// msgResult.SetString("info", "ok")
// msgResult.SetObject("data", msgResponse)
// }
// //cache.GetConn(msgData.ID).Write([]byte(msgResult.ToString()))
// }
// }
// }
func execute(c net.Conn) {
var user *cache.UserInfo = nil
var quitChannel = make(chan struct{})
cacheBuf := make([]byte, 0) // 缓存的数据包
packetBuf := make([]byte, 0) //完整的数据包
buf := make([]byte, 1024) // 单次接收的数据流
for {
n, err := c.Read(buf)
if err != nil || err == io.EOF {
fmt.Println("1111111111111111111")
break
}
//TCP数据包拆包
packetBuf, cacheBuf = packet.UnPack(append(cacheBuf, buf[:n]...))
if len(packetBuf) > 0 {
fmt.Println("packetBuf", string(packetBuf))
var main gjson.GJSON
err = main.Load(string(packetBuf))
if err != nil {
fmt.Println(err)
continue
}
/*
{
"userid":"462262902976417792",
"funcid":300,
"session":"1",
"macaddr":"111231321-"
}
*/
funcid := main.GetInt("funcid")
switch funcid {
case 1: //用户登录
{
userid := main.GetString("userid")
if userid == "" {
var obj gjson.GJSON
obj.SetInt("code", 0)
obj.SetString("info", "userid不能为空")
c.Write([]byte(obj.ToString()))
continue
}
if user == nil { // 一个socket只能初始化一次用户登录 -- 相当于当前的socket就和user绑定到了一起,有状态的TCP连接
passwd := main.GetString("passwd")
if passwd == "<PASSWORD>" {
user = cache.CreateUser(userid, c)
//updateTime = time.Now()
cache.UserEnterChannel <- user
var obj gjson.GJSON
obj.SetInt("code", 1)
obj.SetString("info", "ok")
c.Write([]byte(obj.ToString()))
go UserChannel(user)
go UserVaild(c, quitChannel)
} else {
var obj gjson.GJSON
obj.SetInt("code", 0)
obj.SetString("info", "密码错误")
c.Write([]byte(obj.ToString()))
}
}
}
default:
{
if user == nil {
var obj gjson.GJSON
obj.SetInt("code", 0)
obj.SetString("info", "请登录")
c.Write([]byte(obj.ToString()))
} else {
if funcid != 2 { // funcid=2 表示心跳包
var msgResult gjson.GJSON
msgResponse, err := HandleData(&main) //
fmt.Println(msgResponse.ToString())
if err != nil {
msgResult.SetInt("code", 0)
msgResult.SetString("info", err.Error())
} else {
msgResult.SetInt("code", 1)
msgResult.SetString("info", "ok")
msgResult.SetObject("data", msgResponse)
}
//表示处理后的结果,想要发给哪个用户 -- 使用一个公用的通道,接受数据,然后分发给对应的用户的通道
cache.UserMessageChannel <- &cache.MsgData{ID: user.UserID, Data: msgResult.ToString()}
}
if funcid == 2 {
fmt.Println("心跳包")
}
quitChannel <- struct{}{}
}
}
}
}
}
//用户注销
if user != nil {
cache.UserLeaveChannel <- user
}
}
//检查链接的有效性
func UserVaild(c net.Conn, quit <-chan struct{}) {
d := 1 * time.Minute
t := time.NewTimer(d) // 10s内客户端没有消息发送,会强制将用户下线
for {
select {
case <-t.C:
{
c.Close()
}
case <-quit:
{
t.Reset(d)
}
}
}
}
//异步发送数据
func UserChannel(user *cache.UserInfo) { //每个用户的数据通道
for {
select {
case datastr := <-user.MsgChannel:
user.Conn.Write([]byte(datastr))
}
}
}
|
The Family Policy Institute of Washington recently spoke with some students from Seattle University and asked them simple questions about gender and the differences between men and women.
Here’s a description from their blog:
The second episode in FPIW’s wildly popular College Kids Say the Darndest Things series launched Monday afternoon, just hours after the federal government inserted themselves into the battle between the unrealistic social justice agenda and biological science. This episode, filmed near Seattle University, a Jesuit private school, asks college students if there is a difference between men and women. Some of the answers are hilarious, bizarre, and alarming. Is this really what our institutions of higher education are teaching?
News radio KEEL gets into specifics:
A new video of students at Washington’s Seattle University gives insight into how college age Americans view gender identity. Repeatedly, students insisted that “gender identity” chosen by a person is more important than biological differences the sexes. In fact, when asked if there is a difference between men and women, most seem truly stumped. Says one, “I don’t think there’s any one way to distinguish between men and women, and I don’t think it’s necessary.”
And another says, “Gender is a societal construct.”
Now watch the video and prepare to be amazed:
Everyone is trying so hard to give politically correct answers that no one states the obvious.
All I could think of was this classic scene from Kindergarten Cop:
Was that so difficult?
Featured image via YouTube. |
package cola.machine.game.myblocks.model.base;
import cola.machine.game.myblocks.manager.TextureManager;
import cola.machine.game.myblocks.model.textture.TextureInfo;
import com.alibaba.fastjson.JSONObject;
import com.dozenx.game.engine.element.model.BoxModel;
import com.dozenx.game.graphics.shader.ShaderManager;
import com.dozenx.game.opengl.util.ShaderConfig;
import com.dozenx.game.opengl.util.ShaderUtils;
import com.dozenx.game.opengl.util.Vao;
import com.dozenx.util.MapUtil;
import core.log.LogUtil;
import glmodel.*;
import java.util.ArrayList;
import java.util.List;
public class ObjBlock extends BaseBlock {
public TextureInfo front;
String fileName;
GLModel glModel ;
List<Float[]> metas = new ArrayList<Float[]>();
public TextureInfo getFront() {
return front;
}
public void setFront(TextureInfo front) {
this.front = front;
}
public ObjBlock(){
}
public ObjBlock(String name, int i, boolean b) {
super(name,i,b);
}
@Override
public void renderShader(ShaderConfig config, Vao vao, GL_Matrix matrix) {
GL_Vector[] dirAry = BoxModel.dirAry;
//拿出他所有的定点 颜色 还有normal
GL_Mesh m = glModel.mesh;
GLMaterial[] materials =m .materials; // loaded from the .mtl file
GLMaterial mtl;
GL_Triangle t;
int currMtl = -1;
int i = 0;
// draw all triangles in object
ShaderUtils.glUse(config,config.getVao());
// ShaderUtils.glColor(1,1,1);//先暂时设定她的颜色值是黑色的
for (i=0; i < m.triangles.length; ) {
t = m.triangles[i];
// activate new material and texture
currMtl = t.materialID;
mtl = (materials != null && materials.length>0 && currMtl >= 0)? materials[currMtl] : glModel.defaultMtl;
// mtl.apply();
if(mtl!=null&& mtl.textureFile!=null){
// ShaderUtils. glColor(mtl.diffuse.get(0),mtl.diffuse.get(1),mtl.diffuse.get(2));
ShaderUtils.bindTexture(TextureManager.getTextureInfo("human_body_front"));
}
for ( ; i < m.triangles.length && (t=m.triangles[i])!=null && currMtl == t.materialID; i++) {
// GL11.glTexCoord2f(t.uvw1.x, t.uvw1.y);
ShaderUtils.glTexCoord2f(t.uvw1.x, t.uvw1.y);
ShaderUtils.glNormal3f(t.norm1.x, t.norm1.y, t.norm1.z);
ShaderUtils.glVertex3f( (float)t.p1.pos.x, (float)t.p1.pos.y, (float)t.p1.pos.z);
ShaderUtils.glTexCoord2f(t.uvw2.x, t.uvw2.y);
ShaderUtils.glNormal3f(t.norm2.x, t.norm2.y, t.norm2.z);
ShaderUtils.glVertex3f( (float)t.p2.pos.x, (float)t.p2.pos.y, (float)t.p2.pos.z);
ShaderUtils.glTexCoord2f(t.uvw3.x, t.uvw3.y);
ShaderUtils.glNormal3f(t.norm3.x, t.norm3.y, t.norm3.z);
ShaderUtils.glVertex3f( (float)t.p3.pos.x, (float)t.p3.pos.y, (float)t.p3.pos.z);
}
}
glModel.renderShader();
}
@Override
public void renderShaderInGivexyzwht(ShaderConfig config, Vao vao, float parentX, float parentY, float parentZ, float childX, float childY, float childZ, float width, float height, float thick, boolean top, boolean bottom, boolean left, boolean right, boolean front, boolean back) {
}
// public void update(){
// GL_Vector[] dirAry = BoxModel.dirAry;
//
// if(top!=null) {
// ShaderUtils.draw3dImage(ShaderManager.terrainShaderConfig, ShaderManager.anotherShaderConfig.getVao(),x,y,z, points[4], points[5], points[6], points[7], dirAry[0], top);
// }
//
// if(bottom!=null) {
// ShaderUtils.draw3dImage(ShaderManager.terrainShaderConfig, ShaderManager.anotherShaderConfig.getVao(), x,y,z, points[3], points[2], points[1], points[0], dirAry[1], bottom);
// }
//
// if(front!=null) {
// ShaderUtils.draw3dImage(ShaderManager.terrainShaderConfig, ShaderManager.anotherShaderConfig.getVao(),x,y,z, points[0], points[1], points[5], points[4], dirAry[2], front);
// }
//
// if(back!=null) {
// ShaderUtils.draw3dImage(ShaderManager.terrainShaderConfig, ShaderManager.anotherShaderConfig.getVao(), x,y,z, points[2], points[3], points[7], points[6], dirAry[3], back);
// }
//
// if(left!=null) {
// ShaderUtils.draw3dImage(ShaderManager.terrainShaderConfig, ShaderManager.anotherShaderConfig.getVao(),x,y,z, points[3], points[0], points[4], points[7], dirAry[4], left);
// }
// if(right!=null) {
// ShaderUtils.draw3dImage(ShaderManager.terrainShaderConfig, ShaderManager.anotherShaderConfig.getVao(), x,y,z, points[1], points[2], points[6], points[5], dirAry[5], right);
// }
//}
public ObjBlock copy(){
ObjBlock block =new ObjBlock();
copyObjBlock( block);
return block;
}
public void copyObjBlock(ObjBlock block){
copyBaseBlock(block);
block.glModel =this.glModel.copyClone();
block.front=front;
}
@Override
public void render(ShaderConfig config, Vao vao, float x, float y, float z, boolean hastop, boolean hasbottom, boolean hasleft, boolean hasright, boolean hasfront, boolean hasback) {
GL_Vector[] dirAry = BoxModel.dirAry;
glModel.renderShader(config,vao,x,y,z);
}
public String toImageBlockString(){
StringBuffer buffer =new StringBuffer();
buffer .append(toBaseBlockString())
.append("front:'").append(this.front==null?"":this.front.name).append("',")
;
return buffer.toString();
}
public String toString(){
StringBuffer buffer =new StringBuffer();
buffer.append("{")
.append("blocktype:'imageblock',")
.append("obj:'"+fileName+"'")
.append("}");
return buffer.toString();
}
public static ObjBlock parse(JSONObject map){
ObjBlock objBlock =new ObjBlock();
parseObj(objBlock, map);
return objBlock;
}
public static void parseObj(ObjBlock objBlock,JSONObject map){
parse(objBlock,map);
String front = (String) map.get("front");
// imageBlock.name = name ;
//String bottom = (String) map.get("bottom");
//String allSide = MapUtil.getStringValue(map, "allSide");
//if(StringUtil.isNotEmpty(front)){
objBlock.front = TextureManager.getTextureInfo(front);
//objBlock.glModel=TextureManager.getObj(MapUtil.getStringValue(map,"obj"));
//}
String objName=MapUtil.getStringValue(map,"obj");
objBlock. glModel = TextureManager.getObj(objName);
if(objBlock.glModel ==null){
LogUtil.err("can't find "+objName);
objBlock. glModel =new GLModel(objName);
}
// GL_Mesh m = objBlock. glModel.mesh;
// for (int i=0; i < m.triangles.length; ) {
//
// t = m.triangles[i];
//
// // activate new material and texture
// // currMtl = t.materialID;
// // mtl = (materials != null && materials.length>0 && currMtl >= 0)? materials[currMtl] : defaultMtl;
//// mtl.apply();
//// if(mtl!=null){
//// // ShaderUtils. glColor(mtl.diffuse.get(0),mtl.diffuse.get(1),mtl.diffuse.get(2));
//// ShaderUtils.bindTexture(TextureManager.getTextureInfo("human_body_front"));
//// }
//GL_Triangle t;
// for ( ; i < m.triangles.length && (t=m.triangles[i])!=null; i++) {
// // GL11.glTexCoord2f(t.uvw1.x, t.uvw1.y);
// // veticesBuffer.put(x).put(y).put(z).put(normalX).put(normalY).put(normalZ).put(texCoordX).put(texCoordY).put(0).put(nowTextureId);//p1
//
// //Float[] floats =new Float[]{t.p1.pos.x, t.p1.pos.y, t.p1.pos.z,t.norm1.x, t.norm1.y, t.norm1.z,}
// ShaderUtils.glTexCoord2f(t.uvw1.x, t.uvw1.y);
// ShaderUtils.glNormal3f(t.norm1.x, t.norm1.y, t.norm1.z);
// objBlock.metas.add(ShaderUtils.getFloats((float) t.p1.pos.x, (float) t.p1.pos.y, (float) t.p1.pos.z));
//
// ShaderUtils.glTexCoord2f(t.uvw2.x, t.uvw2.y);
// ShaderUtils.glNormal3f(t.norm2.x, t.norm2.y, t.norm2.z);
// objBlock.metas.add(ShaderUtils.getFloats((float) t.p2.pos.x, (float) t.p2.pos.y, (float) t.p2.pos.z));
//
// ShaderUtils.glTexCoord2f(t.uvw3.x, t.uvw3.y);
// ShaderUtils.glNormal3f(t.norm3.x, t.norm3.y, t.norm3.z);
// objBlock.metas.add(ShaderUtils.getFloats((float) t.p3.pos.x, (float) t.p3.pos.y, (float) t.p3.pos.z));
// }
//
// }
}
public void rotateWithY(int degree){// angle 1~360
}
@Override
public void reComputePoints(GL_Matrix glMatrix) {
// for(int i=0;i<glModel.mesh.vertices.length;i++){
// glModel.mesh.vertices[i] =glMatrix.multiply();//glModel.mesh.vertices[i]
// }
// glModel.mesh.vertices
//this.points = BoxModel.getSmallPoint(0, 0, 0, width, height, thick);
// this.points = BoxModel.getSmallPoint(x,y,z,width,height,thick);
//GL_Matrix rotateMatrix = GL_Matrix.multiply(GL_Matrix.multiply(GL_Matrix.translateMatrix(width / 2, 0, thick / 2), GL_Matrix.rotateMatrix(0, this.dir * Constants.PI90, 0)), GL_Matrix.translateMatrix(-width / 2, 0, -thick / 2));
// GL_Matrix rotateMatrix = GL_Matrix.rotateMatrix(0,this.dir*3.14f/2,0);
// for (int i = 0; i < points.length; i++) {
// points[i] = rotateMatrix.multiply(rotateMatrix, points[i]);
//
// }
//GL_Mesh mesh = glModel.mesh;
glModel. mesh = glModel.mesh.makeClone();
for (int i = 0; i < glModel.mesh.vertices.length; i++) {
GL_Vertex gl_vertex = glModel.mesh.vertices[i];
//new GL_Vector(0,0,0);//
gl_vertex.pos = GL_Matrix.multiply(glMatrix, gl_vertex.pos);//旋转
}
}
}
|
Modelling of mechanical and filtration processes near the well with regard to anisotropy
A geomechanical approach to modeling deformation and seepage is presented. Three stages of modeling are described: choice of an appropriate mechanical model and its adaptation to the case in question, experimental determination of parameters of the model, simulation of processes of seepage for particular configurations of the well. The applied model allows describing the main specific characteristics of mechanical behavior of the collector: the influence of the pore pressure on deformation; the influence of not only shear but also comprehensive stresses and pore pressure on the transition to inelastic behavior; the appearance of inelastic volumetric deformation and its nontrivial dependence on the stress state; the anisotropy of elastic, strength and seepage properties; non-obvious dependence of permeability on the stress strain state. The model unites essential characteristics of Hill’s plastic flow theory for anisotropic materials and the Drucker–Prager theory for inelastic deformation of soils. The results of experimental determination of the involved parameters obtained using true triaxial loading system for the collector of Vladimir Filanovsky field in the Caspian Sea are presented.
Introduction
The process of hydrocarbon fluid seepage into productive wells is affected by many factors, one of which, and not the least significant, is the changes in the pore structure, and hence in permeability, due to deformation processes in collectors governed by the stress state. The situation is aggravated due to the back influence of seepage in rearrangement of the stress strain state, and hence it is necessary to solve the coupled elastic-plastic and filtration problem.
The approach of modeling used throughout the current research consists in the following key stages: • choosing and, if necessary, developing an appropriate model of deformation and filtration; • experimental determination of the parameters involved; • solving the coupled mechanical (determining the stress strain state) and seepage (determining the fluid flow and the production rate) problem on the basis of the chosen model and experimentally obtained parameters.
The results of the research on these three stages are described below in the particular case of Filanovsky field collector. it. An adequate model should be able to describe the key characteristics of the process, among which we emphasize the following ones: (1) the influence of pore pressure on deformation; (2) the nonlinear stress-strain behavior with essential role of inelastic ("plastic") strains, and the influence of the level of not only shear but also comprehensive stress (and pore pressure) on the transition to inelastic behavior; (3) the possible inelastic volumetric deformation and its nontrivial dependence on the stress state: at least the absence of proportionality between inelastic volumetric strain and volumetric stress (violation of the associative law); (4) the possible anisotropy of elastic, strength and seepage properties; (5) the non-obvious dependence of permeability on the stress strain state.
All of these characteristics have been well described by a number of suggested theories. Thus, the influence of pore pressure on deformation (p. 1) is described by the theory of poroelasticity . The influence of comprehensive stress on the level of elastic/plastic transition (p. 2) is described by the Moor-Coulomb and Drucker-Prager criteria . To describe the inelastic volumetric deformation nonproportional to the volumetric stress (p. 3), the concept of dilatancy was proposed , according to which the former is supposed to depend on the intensity of shear strain, with possible violation of the associative law. The anisotropy in the theory of plastic flow was taken into account by Hill , and further the Hill theory was modified to account for the difference in the behavior in compression and in tension . The dependence of permeability on the stress state may vary in a wide range depending on the particular collector and is usually determined from experiments.
The above-mentioned models, which describe the listed characteristics of deformation and filtration, form a basis for solving geomechanical problems. Moreover, some of them have already been implemented to commercial codes. Meanwhile, describing some characteristics each of these models does not describe the others, which does not allow their direct application. Therefore, one of the main goals of the current study consists in adoption and further development of these models aiming at taking the whole set of the above-listed specific features into account.
Basic equation of the model
The fluid flow is described by Darcy's law where p is the pore pressure; κ is the permeability which may generally depend on the coordinate, pore pressure, the stress state and other factors; the indexes after comma stand for the derivative with respect to the corresponding coordinate. The stress strain state is described by the laws of poroelasticity . Their generalization to the case of plastic deformations may be written as Here σ ij and s ij are components of the total and effective (acting on solid skeleton) stress tensors; ε T ij , ε E ij , and ε P ij are components of the total, elastic and plastic strain tensors; u i are components of the displacement vector; Λ ijkl are components of elasticity tensor (in the case of elastic isotropy, they are expressed in terms of two constants); 0 ≤ α P ≤ 1 is Bio's coefficient characterizing the structure of porosity; for well permeable collectors, α P approaches the unity from below . For practical reasons, one can set α P = 1. Before reaching the criterion of elastic-plastic transition ε P ij = 0, equations (1)-(5) together with the boundary conditions for stresses (or displacements) and the pore pressure form a closed system.
To describe the plastic deformation, one needs to specify the plasticity criterion and the plasticity potential. Consider a criterion of the type of modified for taking account of the effects of poroelasticity Here s ij are components of the effective stress tensor in the coordinate frame related to the material axes of isotropy; G 0 (ij) , L 0 (ij) , B 0 (ij) are material constants . For a transverse isotropic medium with the isotropy plane with the normal n 3 , the number of constants in (6) reduces to In the isotropic case, neglecting the effects related to the pore pressure (replacing effective stresses with the total ones) results in reducing (6) to the Druckep-Prager criterion .
On the other hand, for the vanishing influence of normal stresses on the elastic-plastic transition and effects related to pore pressure, equation (6) reduces to the Hill criterion of anisotropy plasticity.
According to the plastic flow theory (although modified for taking account of hydrostatic stresses), the actual state in the space of stresses during the active loading remains belonging to the yield surface (generalization of plasticity criterion (6)) where the dimensionless function A(k) (A(0) = 1) serves as a characteristic of hardening, supposed to be isotropic for the model under consideration; k is the parameter of hardening and, for a widely used variant of the theory, k is associated with the work of plastic strains During active loading, the stress growth is accompanied by plastic strain growth. In the frame of the plastic flow theory, the increment ("rate") of plastic strain dε p ij is expressed in terms of the plastic potential Q as Here dλ is a still unknown coefficient. In the classical theory of plasticity, equating the plastic potential with the yield surface (such a relation is referred to as the associative law) allows elegant deducing to constructing constitutive laws capable of adequate description of the observed stress strain relations. However, using the associative law to describe the deformation of rocks and soils leads to a rather strong discrepancy from the phenomenon, namely, the predicted volumetric inelastic deformations are usually much greater than the actual ones. Therefore, the so-called non-associative laws with Q = F are used to describe the deformation of rocks and soils .
Hereafter, the form of the plastic potential Q is chosen similarly to the form of the yield function F in equation (9) with dissimilar factors at the linear terms of stresses In the case of vanishing or negligible inelastic volumetric strains (dilatancy), the following restriction takes place For a number of rocks and soils, this equation, although not exact, serves as a rather accurate approximation.
To provide the deviator associativity, it is sufficient to set The value of dλ is determined from equation (9). The differential of F should vanish ∂F ∂s ij ds ij + ∂F ∂k dk = 0.
The substitution of dk from equation (10) into this relation yields Expressing the value of plastic strain in terms of the plastic potential (11), one finds Here H ≡ −∂F/∂k is the material function determined from experiments. In the first approximation, it may be considered as the constant H = E −1 p , where E p is the plasticity modulus.
Experimental determination of the model parameters
The experimental part of the study consists in determining the model parameters. Experiments were carried using the experimental unit IPMech RAS Triaxial Independent Loading Test System (TILTS) for cubic specimens (40 × 40 × 40 mm) for one of lithotypes of the Filanovsky field collector. Two types of the loading program were used: uniaxial loading with lateral pressure and generalized shear .
According to the loading program of the first type, the specimens were subjected to comprehensive pressure up to a certain value (2, 10, or 20 MPa) and then loaded along one of the axis up to the level of elastic/plastic transition in the displacement controlled mode. During the second loading program, the specimens experience volumetric compression up to the level corresponding to the in situ stress state, and then the loading continues along one of the axes accompanied with unloading along another axis so that the stress along the third axis remains constant; the average stress is conserved at this stage. The stress state constructed in this program corresponds to the state near the well.
The permeability was measured along the direction of the minimal stress during the whole period. The tests were carried out for two orientations of the minimal stresses: along the layers and normally to the layers, which corresponds to two different points at the contour of the horizontal well. As a result, the dependence of permeability on the stress and the yield stresses corresponding to these loading conditions was obtained.
The results of tests according to the loading programs of the first type are presented in table 1. Figure 1 presents the stress strain curves in the cases of the maximal stress aligned in vertical ( figure 1 a) and one of the horizontal (figure 1 b) directions. Figure 2 presents the dependence of permeability on the stress intensity. Figure 3 presents experimental and calculated data about the critical stress according to criterion (9).
The parameters of yield criteria (6) were determined from the whole set of experiments of both types (the property corresponds to transverse isotropy (7)) by means of the least square method G 0 (12) = 2.7 × 10 −3 , G 0 (13) = G 0 (23) = 1.27 × 10 −3 , B 0 (1) = B 0 (2) = 1.95 × 10 −2 , B 0 (3) = 3.28 × 10 −2 , L 0 (13) = 1.0 × 10 −2 . It was impossible to determine the last parameter from the available experiments, its value was chosen by analogy with samples of similar lithotype . The parameters of plastic potential (12) deviating from the corresponding parameters of the yield criteria are determined by formula (14) Value of critical stress S cr : depending on the lateral pressure for tests of the second type. The dots correspond to experimental points, the line corresponds to modelling.
Simulation
Two variants of well faces are considered: simple unsupported well and unsupported well with perforation. The former case corresponds to a rather simple geometry, which allows comparing the obtained results of simulation with the analytical results. The latter case corresponds to a more complex situation. In both cases, the normal stresses and pressure at the well contour were assumed to be vanishing. The stress state at a far distance from the well was supposed to be the hydrostatic compression of magnitude corresponding to the weight of the overlying rocks; the pore pressure at a far distance from the well was set to be corresponding to hydrostatic.
For each configuration, the finite element (FEM) simulation was carried out as follows.
1. The problem of fluid seepage was solved to obtain the first iteration of the pore pressure.
2. The problem of poroelastoplasticity was solved to obtain the distribution of effective stresses, elastic and plastic strains. This stage consisted in the following substages. 2.1. For the calculated field of pore pressure, the uncoupled problem of poroelasticity was solved.
2.2. The parameters of plasticity for each element were modified according to the calculated fields of the attained stresses and pore pressure levels.
2.3. The problem of poroelastoplasticity was solved for the modified properties of the elements. 3. In accordance with the experimentally obtained law of the permeability dependence on the stress strain state, the permeability of each element was modified in accordance with the calculated stress field. 4. The problem of fluid seepage was solved, the flows distribution and the production rate were obtained.
The calculations were performed in 3-D using meshes with 22356 nodes and 44001 elements corresponding (due to symmetry) to quarters of the initial areas. The geometrical properties were: the well radius, R = 0.1 m; the length of the cut L = 0.46 m; the thickness of the cut h = 0.02 m (figure 4). The mechanical properties were listed in the preceding section.
The boundary conditions were the following: normal stress and pore pressure on the outer boundary were 31 MPa and 13 MPa, respectively; normal stress and pore pressure at the well and cut surfaces were zero. For the normal to the well surfaces, the zero displacements and zero fluid flow were set. The production rates relative to the "ideal" well (for which the permeability is constant and equal to the initial permeability) are presented in table 2. The distributions of the stress intensity and plastic strains for both configurations are presented in figure 5.
Conclusion
The geomechanical approach to modeling the deformation and seepage is presented with regard to the anisotropy of elastic and plastic properties and dependence of the yield transition on The used model allows describing the main specific characteristics of the mechanical behavior of the collector: the influence of pore pressure on deformation; the influence of not only shear but also comprehensive stress and pore pressure on the transition to inelastic behavior; the appearance of inelastic volumetric deformation and its nontrivial dependence on the stress state; the anisotropy of elastic, strength and seepage properties; the non-obvious dependence of the permeability on stress strain state. The model unites essential characteristics of Hill's plastic flow theory for anisotropic materials and the Drucker-Prager theory for inelastic deformation of soils.
The procedure of experimental determination of the involved parameters for the Filanovsky field using the experimental unit IPMech RAS Triaxial Independent Loading Test System (TILTS) is presented, and the experimental results are given.
The results of numerical simulation for the described model and the experimentally obtained parameters are presented. The cases of unsupported and perforated well ends are considered. The stress concentrations and the production rates were calculated.
The results of the study demonstrate that the proposed approach can be used to address geomechanical problems for optimizing technological processes. |
199 years of Parkinson disease – what have we learned and what is the path to the future?
In 1817, 199 years ago, James Parkinson described for the first time in ‘An Essay on the Shaking Palsy’ the symptoms of the disease that was later named Parkinson Disease. The current special issue of the Journal of Neurochemistry is dedicated to the discoveries and advances that have been made since, leading to a better understanding of this neurodegenerative disease and of potential treatment options. Reputed researchers cover various aspects from neuroanatomical basics; genetic and molecular risk factors such as LRRK2; the available cell and animal models that mimic crucial features of the pathophysiology; to clinical aspects and treatments, including deep brain stimulation. |
On N=1 4d Effective Couplings for F-theory and Heterotic Vacua
We show that certain superpotential and Kahler potential couplings of N=1 supersymmetric compactifications with branes or bundles can be computed from Hodge theory and mirror symmetry. This applies to F-theory on a Calabi-Yau four-fold and three-fold compactifications of type II and heterotic strings with branes. The heterotic case includes a class of bundles on elliptic manifolds constructed by Friedmann, Morgan and Witten. Mirror symmetry of the four-fold computes non-perturbative corrections to mirror symmetry on the three-folds, including D-instanton corrections. We also propose a physical interpretation for the observation by Warner that relates the deformation spaces of certain matrix factorizations and the periods of non-compact 4-folds that are ALE fibrations.
Introduction
Let Z B be a Calabi-Yau (CY) three-fold and E a holomorphic bundle or sheaf on it. In a certain decoupling limit, where one neglects the backreaction of the full string theory to the degrees of freedom of the bundle, E can describe either a (sub-)bundle of a heterotic string compactification on Z B , a heterotic 5-brane or a B-type brane in a type II compactification on Z B . In the latter case we will also be interested in the geometry (Z A , L) associated to (Z B , E) by open string mirror symmetry, which consists of an Atype brane L on the mirror three-fold Z A of Z B . The contribution of the bundle to the space-time superpotential of a string compactification on Z B is, in a certain approximation, given by the holomorphic Chern-Simons functional for both the heterotic bundle and the B-type brane Here Ω is the holomorphic (3,0) form on Z B and A is the (0, 1) part of the connection on E. There is another superpotential proportional to the periods of Ω, which, again in a certain approximation, is of the form In the type II compactification on Z B , W G is the superpotential induced by NS and RR 3-form fluxes , and S the complex dilaton. In heterotic compactifications, W G will be related below to the superpotential of a compactification on non-Kähler manifolds with H-flux . Depending on the type of string theory and its compactification, the combined superpotential may be exact or subject to various quantum corrections.
The purpose of this note is to show how the methods of mirror symmetry of refs. when combined with Hodge theory can be used to compute effective couplings of these heterotic/type II compactifications, including the superpotential and the Kähler potential.
Hodge theory enters in two steps: A 'classical' theory on the CY 3-fold, which computes the integrals on the 3-fold in (1.1), (1.2), and a 'quantum' deformation of these 3-fold data defined by the (classical) Hodge variation on a 'dual' CY 4-fold. Physicswise, the 4-fold geometry represents the compactification manifold of a dual F-theory or type IIA compactification. We will argue that the 4-fold result agrees with the 3-fold result when it should, but gives more general results, including the case when the heterotic 3-fold is not CY.
The first step on the three-fold can be realized by computing the Hodge variation on a relative cohomology group H 3 (Z B , D), which captures the brane/bundle data in addition to the geometry of Z B . This was shown previously in the context of B-type branes in and we generalize this relation here to heterotic 5-branes and general bundles, including the bundles on elliptically fibered 3-fold Z B constructed by Friedman, Morgan and Witten in (see also ref. ). The 'classical' Hodge theory on the 3-fold gives an explicit evaluation of the 3-fold integrals in (1.1),(1.2) and a preferred choice of physical coordinates, which leads to the prediction of world-sheet corrections from sphere and disc instantons of the appropriately defined mirror theories.
The second step involves Hodge theory and mirror symmetry on a mirror pair of dual CY 4-folds. 4-folds enter the stage in two seemingly different ways, in remarkable parallel with the two appearances of (1.1) in heterotic and type II compactifications on Z B . Firstly, through the duality of heterotic strings on elliptically fibered CY 3-fold Z B to F-theory on a CY 4-fold X B . This duality motivated the systematic construction of "heterotic" bundles on elliptically fibered Z B in refs. . Secondly, 4-folds appear in the computation of brane superpotentials of type II strings via an "open-closed string duality", which associates a non-compact 4-fold geometry X nc B to a B-type brane on a 3fold Z B . In this approach, the superpotential (1.1) of the brane compactification on (Z B , E) is computed from the periods of the holomorphic (4, 0) form on the dual 4-fold X nc B . Moreover, mirror symmetry of 4-folds relates the sphere instanton corrected periods on the mirror 4-fold X nc A of X nc B to the disc instanton corrected superpotential of the compactification with A-type brane L on the mirror manifold Z A of Z B . This surprising relation between mirror symmetry of the 4-folds X nc A and X nc B and open string mirror symmetry of the brane geometries (Z B , E) and (Z A , L) has been tested in various different contexts, see e.g. .
As we will argue below, these two 4-fold strands are in fact connected by a certain physical and geometrical limit, that relates open-closed duality to heterotic/F-theory duality. 1 In this limit part of the bundle degrees of freedoms decouple (in a physical sense) 1 A related explanation of type II open-closed duality based on T-duality of 5-branes has been recently given in ref. . from the remaining compactification and the type II brane and the heterotic bundle are equalized. Geometrically, this can be viewed as a local mirror limit in the open string sector of type II strings or a local mirror limit for bundles considered in , respectively.
In this limit, the F-theory/type IIA superpotential on the dual 4-fold X B reduces to the 'classical' type II/heterotic superpotential (1.3) on the 3-fold Z B , as has been observed previously in .
The result obtained from an F-theory/type IIA compactification on the dual 4-fold differs from the 3-fold result away from the decoupling limit. We assert that these deviations represent physical corrections to the dual type II/heterotic compactification from perturbative and instanton effects and describe how Hodge theory and mirror symmetry on the 4-fold provides a powerful computational tool to determine these perturbative and non-perturbative contributions. Depending on the point of view, the corrections computed by mirror symmetry of 4-folds describe world-sheet, D-brane or space-time instanton effects in the dual type II and heterotic compactifications.
Finally we discuss the type II/heterotic duality in the context of non-compact 4-folds that arise as two-dimensional ALE fibrations. For a particular choice of background fluxes these models admit a description in terms of certain Kazama-Suzuki coset models , whose deformation spaces coincide with the deformation spaces of matrix factorizations of N = 2 minimal models . We give a physical interpretation of this relation via type II/heterotic duality and we propose that this correspondence holds even more generally.
The organization of this note is as follows. In sect. 2 we discuss the application of Hogde theory to the evaluation of the Chern-Simons functional (1.1) with a focus on bundles on elliptic CY 3-fold constructed by Friedman, Morgan and Witten . For a perturbative bundle with structure group SU (N ) the superpotential captures obstructions to the deformation of the spectral cover Σ imposed by a certain choice of line bundle. We discuss also the case of a general structure group G and heterotic 5-branes. In sect. 3 we describe the decoupling limit in the type II and heterotic compactifications and use it to relate open-closed string duality to F-theory/heterotic duality, giving an explicit map between type II and heterotic compactifications. We discuss the relevant string dualities and the meaning of the quantum corrections in the dual theories. In sect. 4, we argue, that the F-theory superpotential on the 4-fold captures more generally the heterotic superpotential for a bundle compactification on a generalized Calabi-Yau manifold and describe the map from the F-theory superpotential to the superpotential for heterotic bundles and heterotic 5-branes. In sect. 5 we extend the previous discussion to the Kähler potential and the twisted superpotential by studying the effective supergravity for the two-dimensional compactification of type IIA on the 4-fold and heterotic strings on T 2 × Z B . In sect. 6 we start to demonstrate our techniques for an example of an N = 1 supersymmetric bundle compactification on the quintic. We discuss the perturbative heterotic theory, the general structure of the quantum corrections and give explicit results for the example. In sect. 7 we consider other interesting examples, including heterotic 5-branes wrapping a curve in the base of the heterotic CY manifold and bundles with non-trivial Jacobians. In sect. 8 we connect via heterotic/type II duality the deformation spaces of certain matrix factorizations to the deformation spaces of type II on non-compact 4-folds that are ALE fibrations with fluxes. Sect. 9 contains our conclusions. In the appendix we present further technical details on the computations for the toric hypersurface examples analyzed in the main text.
Hodge variations in open-closed duality
In the approach of refs. , the superpotential of B-type brane compactifications with 5-brane charge on a Calabi-Yau Z B is computed from the mixed Hodge variation on a certain relative cohomology group H 3 (Z B , D). The superpotential is a linear combination of the period integrals of the relative (3,0) form Ω ∈ H 3,0 (Z B , D) The first term is the RR "flux" superpotential on 3-cycles γ Σ ∈ H 3 (Z B ) and the second term an off-shell version of the brane superpotential defined on 3-chains γ Σ with non-empty boundary. Note that the superpotential W II (Z B , D) associated with the Hodge bundle does not include the NS part of the type II flux potential.
The boundary ∂γ Σ is required to lie in a hypersurface D ⊂ Z B , ∂γ Σ ∈ H 2 (D). The moduli of the hypersurface D parametrize certain deformations of the brane configuration (Z B , E). Infinitesimally, the accessible deformations are described by elements in Mirror symmetry maps the B-type brane configuration (Z B , E) to an A-type brane configuration (Z A , L) on the mirror 3-fold Z A . The flat Gauss-Manin connection on H 3 (Z B , D) determines the mirror map z(t) between the complex structure moduli z of (Z B , E) and the Kähler moduli t of (Z A , L). Inserting the mirror map into (2.1) then gives the disc instanton corrected superpotential of the A-type geometry near a suitable large volume point of (Z A , L) .
The relative cohomology problem and open string mirror symmetry is related to absolute cohomology and mirror symmetry of CY 4-folds by a certain open-closed string duality . The constructions of these papers associate to a B-type brane compactification (Z B , E) and its mirror (Z A , L) a pair of non-compact mirror 4-folds (X nc A , X nc B ), such that the "flux" superpotential of agrees with the combined "flux" and brane superpotential (2.1) of the three-fold compactification, Here δ denotes universally a variation in the complex structure of the respective geometries, represented by the Gauss-Manin derivative and projecting onto pure pieces.
The two maps α, β : H 4 hor (X nc B ) → H 3 (Z B , D) identify an element of H 4 hor (X nc B ) either with an element in H 3 (Z B ) of the closed string state space or an element in H 2 (D) associated with the brane geometry i : D ֒→ Z B . These maps can be explicitly realized on the level of 4-fold period integrals by integrating out certain directions of the 4-cycles Γ Σ ∈ H 4 (X nc B ) . The map α : H 4 hor (X nc B ) → H 3 (Z B ) can be represented as an integration over a particular S 1 in X nc B and shifts the Hodge degree by (−1, 0). The other class of contours produces a delta function on the hypersurface D as in , and leads to the map β : H 4 hor (X nc B ) → H 2 (D) that shifts by (−1, −1). Specifically, the infinitesimal deformations of the complex structure of X nc B split into the closed and open string deformations (2.2) as The above deformation problem is a priori unobstructed, but becomes obstructed by the superpotential (2.3) upon adding the appropriate "flux". In the brane geometry (Z B , E) this can be realized by a brane flux, adding a D5-chargeγ ∈ H 2 (D) . A non-trivial obstruction in the open string direction arises for the choicẽ Restricting the open string moduli to the subspace where the classγ remains of type (1,1) leads to a superpotential for the closed string moduli as in refs. . Note also that a classγ in the image of i * is always of type (1,1) and thus does not impose a restriction on the moduli of D, as the variation δW II of eq.(2.1) is automatically zero for a holomorphic boundary ∂Γ Σ .
Hodge variations for heterotic superpotentials
In the following we consider a similar Hodge theoretic approach to superpotentials of "heterotic" bundles on elliptically fibered Calabi-Yau manifolds constructed in .
In the framework Friedmann, Morgan and Witten, an SU (n) bundle E on an elliptically fibered CY 3-fold π Z B : Z B → B with section σ : B → Z B is described in terms of a spectral cover Σ, which is an n-fold cover π Σ : Σ → B, and certain twisting data specifying a line bundle on Σ. Fixing the projection of the second Chern class of E to the base B, the latter comprise a continuous part related to the Jacobian of Σ and a discrete part from elements In the duality to F-theory on a 4-fold X B , the elements of the Hodge spaces of the spectral cover are related to those on X B schematically as : The first line identifies the infinitesimal deformations of Σ with infinitesimal deformations of the 4-fold. The second relation relates the discrete data described by the class γ with 4-form flux in the F-theory compactification on X B . The last relation reflects the isomorphism of the Jacobian of Σ and the corresponding Jacobian in X B related to it by duality (see also ). Note that the heterotic/F-theory relation between H 4 (X B ) and H 2 (Σ) is formally given by the same (−1, −1) shift in Hodge degree as in the map β in the open-closed duality relation (2.4). As argued below, this similarity is not accidental, but a reflection of the fact, that the heterotic and type II data can be related by the afore mentioned decoupling limit.
Again the deformations of the spectral cover Σ in H 2,0 (Σ) are unobstructed if γ is the "generic" (1, 1) class discussed in . 2 Consider instead a class γ that is of type (1,1) only on a subspaceẑ = 0 of the deformation space. Twisting by γ then should obstruct the deformations of Σ in the directionẑ = 0, which destroy the property γ ∈ H 1,1 (Σ).
We propose that the heterotic superpotential describing this obstruction is captured by the chain integral is the relative cohomology group defined by the spectral cover Σ with H 2 var (Σ) the mid-dimensional horizontal Hodge cohomology of Σ. Moreover the boundary 2-cycle C = ∂Γ ⊂ Σ is the cycle Poincaré dual to γ. The chain integral can then be computed from the Hodge variation on the relative cohomology group, as has been used in refs. to compute brane superpotentials in type II strings. As a first check on the relevance of the mixed Hodge variation on H 3 (Z B , Σ) for the heterotic theory, note that 2 However, the existence of this class is a consequence of insisting on a section for π Σ : Σ → B. the deformation space H 2,0 (Σ) is indeed captured by the Hodge space H 2,1 (Z B , Σ), as in the type II case.
In the type II context, the mixed Hodge variation gives more physical information than just the superpotential, specifically appropriate coordinates on the deformation space, which lead to the interpretation of the superpotential as a disc instanton sum in the mirror A model. The physical interpretation of the corrections in the heterotic theory will be discussed below.
The expression (2.7) of the heterotic string can be argued for by relating it to the holomorphic Chern-Simons functional (1.1), which is the holomorphic superpotential for the bundle moduli in the heterotic string . Before turning to the derivation for a genuine CY 3-fold of holonomy SU (3), it is instructive to reflect on the argument at the hand of the simpler N = 2 supersymmetric case of dual compactifications of F-theory on K3 × K3 and heterotic string on T 2 × K3. The perturbative F -term superpotential associated with a heterotic flux on K3 in the i-th U (1) factor is where A i is the Wilson line on T 2 , C the cycle Poincaré dual to the flux and ω 2,0 the holomorphic (2, 0) form on the heterotic K3. In this simple case, the spectral cover is just points on the dual T 2 times K3, and the chain integral (2.7) over the holomorphic (3,0) form dz ∧ ω 2,0 becomes There is also a simple generalization of this N = 2 superpotential to the case, where the heterotic vacuum contains heterotic 5-branes , and this is also true for the N = 1 supersymmetric case studied below. The 5-brane superpotential is in fact the most straightforward part starting from the results on type II brane superpotentials of refs. , as the brane deformations of the type II brane map to the brane deformations of the heterotic 5-brane in a simple way. The type II/heterotic map providing this identification and explicit examples will be discussed later on.
Holomorphic-Chern Simons functional for heterotic bundles
The holomorphic Chern-Simons functional is (a projection of) the transgression of the Chern-Weil representation of the algebraic second Chern class for a supersymmetric vector bundle configuration. Thus, in order to establish for a supersymmetric heterotic bundle configuration that (1.1) agrees with eq. (2.7) on-shell, we need to show that the boundary 2-cycle C = ∂Γ of the 3-chain Γ in eq. (2.7) is given by a curve representing the algebraic second Chern class of the holomorphic heterotic vector bundle. The latter is encoded in the zero and pole structure of a global meromorphic section s E : Z → E of the supersymmetric holomorphic heterotic bundle E . This is described in ref. for a general SU (2) bundle and in ref. for a bundle associated with a matrix factorization.
To apply this reasoning to the SU (N ) bundles of , we need to construct an explicit representative for the algebraic Chern class. 4 As explained in ref. , the spectral cover Σ together with the class γ of eq. (2.6) defines the SU (n) bundle E over the elliptically Here π 2 is the projection to the second factor of the fiberwise product Σ × B Z of the 3-fold Z and of the spectral cover Σ over the common base B. P B is the restriction of the Poincaré bundle of the product Z × B Z to Σ × B Z, while S → Σ denotes the line bundle over the spectral cover Σ, which is given by 5 The bundle N ensures that the first Chern class c 1 (E) of the SU (n) bundle vanishes and its explicit form is thoroughly analyzed in ref. . The holomorphic line bundle L γ with c 1 (L γ ) = γ governs the twisting associated to the class γ in (2.6), and it is responsible for the discussed obstructions to the deformations of the spectral cover Σ. Note that, due to the property (2.6), the line bundle L γ does not further modify the first Chern class c 1 (E) . 4 To avoid cluttering of notation, the heterotic manifold Z B is denoted simply by Z in the following argument. 5 For ease of notation its pull-back to Σ × B Z is also denoted by the same symbol S.
In order to construct a section s E of the SU (n)-bundle, we need to push-forward a global (meromorphic) section s R = s P · s S of the line bundle R, which in turn is the product of a section of the Poincaré bundle P B and the line bundle S. The Poincaré bundle is given by is the canonical bundle of the base (pulled back to Σ × B Z) and σ : B → Z the section of the elliptic fibration Z. Therefore the section s P = s K · s F can be chosen to be the product of the section s K of the canonical bundle of the base B and the section s F , which has a (simple) zero set along the diagonal divisor ∆ and a (simple) pole set along the divisor Σ × B σ. Finally, the zero set/pole set of the section s S is induced from the (algebraic) first Chern class c 1 (S) of the line bundle S over the spectral cover Σ.
Here we are in particular interested in the contribution from the line bundle L γ , whose global (meromorphic) section extended to the fiber-product space Σ × B Z is denoted by For an SU (n)-bundle the projection map π 2 is an n-fold branched cover of the 3-fold Z, and therefore in a open neighborhood U ⊂ B of the base the push-forward of the section As the section s K originates from the canonical bundle over the base, it appears as an overall pre-factor of the bundle section s E , while the entries s i F and s i S arise from the n sheets of the n-fold branched cover. The entries s i F restrict on the elliptic fiber to a section of ⊕ n i=1 (O(p i ) ⊗ O(0) −1 ) that have a simple zero at p i and a simple pole at 0. Here 0 denotes the distinguished point corresponding to the section σ : B → Z and i p i = 0 for SU (n). 6 The n entries s i S arise again from the section s S on the n different sheets. Since the section s S is induced from a line bundle over the spectral cover, the zeros/poles of the sections s i S correspond to co-dimension one sub-spaces on the base. Now we are ready to determine the algebraic Chern classes of the SU (n)-bundle E from the global section (2.10). By construction the first topological Chern class is trivial, which implies that also the first algebraic Chern class vanishes since the Abel-Jacobi map is trivial for the simply-connected Calabi-Yau 3-folds discussed here. The second algebraic Chern class is determined by the "transverse zero/pole sets" of the section s E , which correspond to the co-dimension two cycles of the mutual zero/pole sets of distinct entries Since s i E = s i F · s i S , this computation exhibits c 2 (E) as a sum of three contributions: The joint vanishing of s i F and s j F is empty since p i = p j generically. The joint vanishing of s i S and s j S is a sum of fibers, which we may neglect since, moving in a rational family, they do not contribute to the superpotential. 7 Equivalently, we may use the relation ch 2 (E) = 1 2 c 1 (E) 2 − c 2 (E) between the second Chern class and the second Chern character ch 2 (E), which thanks to the vanishing of c 1 reduces to ch 2 (E) = −c 2 (E), to compute c 2 (E) from the transverse zero/pole sets of the local sections s k F and s k S of the same entry k. This will more directly lead to the desired boundary 2-cycle C = ∂Γ. (Again, we may neglect the self-intersections of s k F and s k S .) We focus now on the contribution c 2 (E γ ) to the second algebraic Chern class, which is associated to the intersection of the zero/pole sets of the local sections s k γ and the local sections s k F for k = 1, . . . , n. As argued the obtained divisor is rational equivalent to the (negative) boundary 2-cycle C arising form the Poincaré dual of the 2-form γ on the spectral cover Σ, and we obtain for the second algebraic Chern class where we denote by the cycle class, which arises from embedding the two-cycle C of the spectral spectral cover Σ into the Calabi-Yau 3-fold Z. Due to the property (2.6) the curve associated to c 2 (E γ ) is (up to a minus sign) rational equivalent to the boundary of the same 3-chain Γ appearing in eq. (2.7). The other piece c 2 (V ), which is (locally) independent of the analyzed deformations of the spectral cover, is discussed in detail in ref. . In general it gives rise to a non-trivial second topological Chern class. In a globally consistent heterotic string compactification this contribution is compensated by the second 7 An equivalent way to see this is to note that five-branes wrapped on the fiber on the elliptic threefold map under heterotic/F-theory duality to mobile D3-branes which clearly have no superpotential.
topological Chern class of the tangent bundle as dictated by the anomaly equations of the heterotic string. 8 Thus, by reproducing the 3-chain Γ from the second algebraic Chern class of the holomorphic SU (n) bundles, the holomorphic Chern-Simons functional is demonstrated to be agreement with the holomorphic superpotential (2.7). Analogously to the nonsupersymmetric off-shell deformations of branes in type II compactifications , we propose that the correspondence between the superpotential (2.7) and the Chern-Simons functional even persists along deformations of the spectral cover, which yield nonsupersymmetric SU (n) bundle configurations.
To illustrate the presented construction, we briefly return to the N = 2 compactification of the heterotic string on T 2 × K3. For this example the spectral cover of an
Chern Simons vs. F-theory/heterotic duality
In the next section we will consider a dual F-theory compactification on a 4-fold and argue that mirror symmetry of the 4-fold computes interesting quantum corrections to the Chern-Simons functional. Here we want to motivate the following 'classical' relation between the 4-fold periods and the Chern-Simons functional (1.1) (2.12) In the above, X B is a CY 4-fold which will support the F-theory compactification dual to the heterotic compactification on the 3-fold Z and G A is a 4-form 'flux' related to the connection A of a bundle E → Z as described below. Moreover S is a distinguished complex structure modulus of the 4-fold X B such that Im S → ∞ imposes a so-called stable degeneration (s.d.) limit in the complex structure of X B . In this limit the 4-fold X degenerates into two components intersecting over the elliptically fibered heterotic 3-fold Z → B 2 . The two 4-fold components X i are also fibered over the same base B 2 and capture (part of) the bundle data of the two E 8 factors of the heterotic string, respectively.
The idea is now to view Z as a complex boundary within one of the components X i and to apply a theorem of , which relates the holomorphic Chern-Simons functional on a 3-fold Z to an integral of the Pontryagin class of a connection A on an extension E → X ′ of the bundle E → Z defined over a Fano 4-fold X ′ : Here CS(Z, A) is short for the Chern-Simons functional on the r.h.s. of (2.12) without the finite S corrections. Moreover s ∈ H 0 (K −1 X ′ ) is a section of the anti-canonical bundle of X ′ whose zero set defines the 3-fold Z as a 'boundary' of X ′ . Now it is straightforward to show, that the components X i of the degenerate F-theory 4-fold X ♯ are Fano in the sense required by the theorem and moreover that the heterotic Calabi-Yau 3-fold Z can be defined as the zero set of appropriate sections s i of the anticanonical bundles K −1 X i , as required by the theorem. This will be discussed in more detail in sect. 4.2, where we explicitly discuss hypersurface representations for X ♯ to match the F-theory/heterotic deformation spaces.
The above line of argument then leads to a relation of the form (2.12), provided one identifies the 4-form flux G A with the Pontryagin class of a gauge connection A on an extension E of the bundle over the component X 1 . Up to terms of lower Hodge type, we shall have (2.14) Note that this identification of the 4-form flux is a non-trivial prediction of the outlined duality.
The real challenge posed by the relations (2.14),(2.12) is not the on-shell relation, which has been argued for in a special case in the previous section, but a proper off-shell extension of both sides. On the 4-fold side, the standard lore of string compactifications is to not fix the Hodge type of G, but rather to view the flux superpotential as a potential on the moduli space of the 4-fold X, which fixes the moduli to the critical locus. The idea is, that the periods X Ω 4,0 on the l.h.s. of (2.12) have a well-defined meaning as the section of a bundle over the unobstructed complex structure moduli space M CS (X) of the 4-fold before turning on a the flux; in particular they define the Kähler metric on M CS (X B ). In this way, viewing non-zero G as a 'perturbation' on top of an unobstructed moduli space, the section W (X B ) is considered as an off-shell potential for fields parametrizing M CS (X).
Although it is not clear in general under which conditions it is valid to restrict the effective field theory to the fields parametrizing M CS (X) and to interprete W (X B ) as the relevant low energy potential for the light fields, this working definition for an off-shell deformation space seems to make sense in many situations. 9 The relation (2.12) suggests that it should be possible to give a sensible notion of a distinguished, finite-dimensional 'off-shell moduli space' for non-holomorphic bundles and to treat the obstruction induced by the Chern-Simons superpotential as some sort of 'perturbation' to an unobstructed problem. This is also suggested by the recent success to compute off-shell superpotentials for brane compactifications from open string mirror symmetry. We plan to circle around these questions in the future.
3. Quantum corrected superpotentials in F-theory from mirror symmetry of
4-folds
In this section we show, that the various Hodge theoretic computations of superpotentials in CY 3-fold and 4-fold compactifications discussed above are in some cases linked together by a chain of dualities. The unifying framework is the type IIA compactification on a pair (X A , X B ) of compact mirror CY 4-folds and its F-theory limits. As will be argued below, mirror symmetry of the 4-folds computes interesting quantum corrections, most notably D-instanton corrections to type II orientifolds and world-sheet corrections to heterotic (0,2) compactifications, which are hard to compute by other means at present. Another interesting connection is that to the heterotic superpotential for generalized Calabi-Yau manifold. The purpose of this section is to study the general framework, which involves a somewhat involved chain of dualities, while explicit examples are given in sects. 6, 7. 9 There is a considerable literature on this subject. We suggest ref. for a justification in the context of type IIA flux compactifications on 3-folds, ref. in the type IIB context, ref. in non-geometric phases, and ref. for a recent general discussion.
Four-fold superpotentials: a first look at the quantum corrections
For orientation it is useful to keep in mind the concrete structure of the superpotential on compact 4-folds that we want to study, as it links the different dual theories discussed below at the level of effective supergravity. The compact 4-fold X B for F-theory compactification is obtained from the non-compact 4-fold X nc B of open-closed in eq.(2.3) by a simple compactification , discussed in more detail later on. In a certain decoupling limit defined in , the F-theory superpotential on X B reproduces the type II superpotential (2.1) plus further terms: The essential novelty in the superpotential of the compact 4-fold, as compared to the previous result (2.1), is the additional dependence on the new, distinguished complex structure modulus S of the compactification X B of X nc B . This modulus is identified in with the decoupling limit Im S ∼ 1/g s → ∞ . (3.2) A similar weak coupling expansion of the 4-fold Kähler potential leads to a conjectural Kähler potential for the open-closed deformation space, as will be discussed in more detail in sect. 5.
Note that the flux terms ∼ S M Λ in the 4-fold superpotential W F (X B ) correspond to NS fluxes in the type II string on Z B , which were missing in (2.1). 10 In addition there are subleading corrections for finite S, denoted by the dots in (3.1), which include an infinite sum of exponentials with the characteristic weight e −1/g s of D-instantons. Before studying these corrections in detail, it is instructive to consider the dualities involved in the picture, which leads to a somewhat surprising reinterpretation of the open-closed duality of .
N = 1 Duality chain
The relevant duality chain for understanding the quantum corrections in (3.1), and the relation to open-closed duality, relates the following N = 1 supersymmetric compact- where Z B is a CY 3-fold and (X A , X B ) a mirror pair of 4-folds which is related to the heterotic compactification on Z B by type IIA/heterotic duality. Here and in the following it is assumed that the 3-fold Z B and the 4-fold X B have suitable elliptic fibrations, in addition to the K3 fibration of X B required by heterotic/type IIA duality . This guarantees the existence of the F-theory dual in the last step. For an appropriate choice of bundle one can take the large volume of the T 2 factor to obtain the four-dimensional duality between heterotic on Z B and F-theory on X B .
The remaining section will center around the identification of the limit (3.2) in the various dual theories. Note that there are two different F-theory compactifications involved in the duality chain (3.3), namely on the manifolds K3 × Z B and X B × T 2 , respectively.
The identification (3.2) is associated with the F-theory compactification on K3×Z B , or the type II orientifold on T 2 × Z B , in the orientifold limit . The decoupling limit describes also a certain limit of the heterotic compactification on the same 3-fold Z B , which will be identified as a large fiber limit of the elliptic fibration Z B below.
In order to make contact with the brane configuration (Z B , E) discussed in sect. 2.1, we combine the orientifold limit of F-theory with a particular Fourier-Mukai transformation type II OF The relevant Fourier-Mukai transformation is discussed in detail in ref. . Heuristically, it implements T duality in both directions of the torus T 2 to the dual torusŤ 2 together with a fiberwise T duality in both directions of the elliptic fibers of the 3-fold Z B to the 3-foldŽ B with dual elliptic fibers. This operation does not change the complex structure of the bulk 11 In this note, for ease of notation and to emphasize the relation to four-dimensional theories, N = 1 compactifications to two space-time dimensions also refer to low energy effective theories with four supercharges.
geometry, but instead it transforms the brane configuration to the open-closed geometry (Z B , E). These orientifold limits of F-theory, the type II and heterotic compactifications on Z B can be also connected as: Here S duality associates the type I to the heterotic string, T duality onŤ 2 relates the type I compactification to the type II orientifold on T 2 × Z B , while the afore mentioned Fourier Mukai transformation, which realizes fiberwise T duality, applied to the 3-fold Z B of the type I theory maps to the type II orientifold onŤ 2 ׎ B .
The decoupling limit as a stable degeneration
The meaning of the decoupling limit in the mirror pair (X A , X B ) of 4-folds and the dual heterotic string on Z B (×T 2 ) can be understood with the help of the following two propositions obtained in the study of F-theory/heterotic duality and mirror symmetry on toric 4-folds in ref. . It is shown there that 12 (C1) If F-theory on the 4-fold X B is dual to a heterotic compactification on a 3-fold Z B then the mirror 4-fold X A is a fibration Z A → X A → P 1 , where the generic fiber Z A is the 3-fold mirror of Z B .
(C2) In the above situation, the large base limit in the Kähler moduli of the fibration X A → P 1 maps under mirror symmetry to a "stable degeneration" limit in the complex structure moduli of the mirror X B .
The first part applies, since the 4-fold duals constructed in the context of open-closed string duality have precisely the fibration structure required by (C1); indeed the mirror pair (X nc A , X nc B ) of open-closed dual 4-folds, dual to an A-brane geometry (Z A , L) and its mirror B-brane geometry (Z B , E), is constructed in refs. as a fibration over the complex plane, where the generic fiber is the CY 3-fold Z A : The notation π(L) for the fiber projection is a reminder of the fact that the data of the bundle L are encoded in the singularity of the central fiber as described in detail in refs. . The manifold X nc B may be defined as the 4-fold mirror of the fibration X nc A . Since the pair of compact 4-folds (X A , X B ) is obtained by a simple compactification of the base to a P 1 , it follows that the F-theory 4-fold X B has a mirror X A , which is a 3-fold fibration π : X A → P 1 with generic fiber Z A . The multiple fibration structures are summarized below: Here B 3 and B 2 denote the corresponding three-and two-dimensional base spaces, where than F-theory/heterotic duality. For simplicity we impose in the following, that X B is elliptically and K3 fibered, which implies that (C1) holds also in the reverse direction.
Part two of the proposition applies, since the decoupling limit Im S → ∞ in the complex structure of X B was defined in ref. as the mirror of the large base volume in the Kähler moduli of the fibration π : X A → P 1 . The image of this limit under the mirror map in the complex structure of X B is a local mirror limit in the sense of and effectively imposes the stable degeneration (s.d.) limit of X B studied in refs. . Under Ftheory/heterotic duality, the s.d. limit maps to a large fiber limit of the heterotic string compactification on the elliptic fibration Z B and this is the sought for identification of limit (3.2) in the heterotic string. The meaning as a physical decoupling limit of a sector of the heterotic string can be understood from both, the world-sheet and the effective supergravity point of view, as will be discussed in sect. 5. Explicit examples for the relation between the hypersurface geometries X B and Z B in the s.d. limit will be considered in sects. 6,7.
Open-closed duality as a limit of F-theory/heterotic duality
The relation in (3.3) between the type II orientifold on Z B and type IIA on the 4-folds (X B , X A ) is similar as in the open-closed duality of refs. . These papers claim to compute the type II superpotential for a B-type brane compactification on Z B with a given 5-brane charge from the periods of a dual (non-compact) 4-fold X nc B . As explained in refs. , this 5-brane charge can be generated by non-trivial fluxes on higher dimensional branes. The only difference to the type II orientifold on T 2 × Z B appearing in (3.3) is the extra T 2 compactification and the presence of 7-branes wrapping Z B , which does not change the superpotential associated with the 5-brane charge.
In the decoupling limit Im S → ∞, which sends X B to the non-compact manifold X nc B , the "local" B-type brane with 5-brane charge decouples from the global orientifold compactification and we recover the type II result W II (Z B ) in eq. (1.2). 13 Note that in this limit there are two different paths connecting the B-type orientifold to the non-compact open-closed string dual X nc B . The first one goes via the open-closed string duality of refs. , while the second goes via F-theory/heterotic/type IIA duality of eq. (3.3).
13 In the type II string without branes/orientifold,N Σ = 0 and the subleading corrections to the superpotential would be absent .
Commutativity of the diagram implies that for this special case, open-closed duality of refs. coincides with heterotic/F-theory duality in the decoupling limit.
Note that the duality (3.4) maps a D3 brane wrapping a curve C in Z B in the orientifold to a heterotic 5-brane wrapping the same curve C in the heterotic dual Z B . The heterotic 5-brane can be locally viewed as an M-theory 5-brane , which is in turn related to the type IIA 5-brane used in to derive open-closed string duality from T-duality.
The original observation of open-closed string duality of ref. is that it maps the disc instanton generated superpotential of the brane geometry (Z A , L) (mirror to (Z B , E)) to the sphere instanton generated superpotential for the dual 4-fold X nc A (mirror to X nc B ). At tree-level, this map is term by term, that is it maps an individual Ooguri-Vafa invariant for a given class β ∈ H 2 (Z A , L) to a Gromov-Witten invariant for a related class β ′ ∈ H 2 (X nc A ). This genus zero correspondence left the important question, whether there is a full string duality, that extends this relation between the 3-fold and the 4-fold data beyond the superpotential. From the above diagram we see, that there is at least one true string duality which reduces to open-closed string duality of refs. at g s = 0 and extends it to a true string duality: F-theory/heterotic duality!
Instanton corrections and mirror symmetry in F-theory
The above discussion has lead to the qualitative identification of the dual interpretations of the expansion in (3.1) in terms of a weak coupling limit of the type II orientifold, a large fiber volume of the heterotic string on the elliptic fibration Z B , a stable degeneration limit of the F-theory 4-fold X B and a large base limit of the 3-fold fibration X A → P 1 .
We will now argue that the quantum corrections computed by 4-fold mirror symmetry can be tentatively assigned to the two 4-fold superpotentials in refs. as Here W (X B ) is the 4-fold superpotential of eq. (3.1), while W (X B ) is the twisted superpotential associated with the type IIA compactification on X B . 14 The latter computes 14 See the discussion in sect. 5 below.
also world-sheet instanton corrections to the large volume limit of the type II/heterotic compactification.
The details of the argument are somewhat involved and may be skipped on a first reading. It is again instructive to first consider the simpler case of a closely related duality chain with N = 2 supersymmetry: whereZ V , Z H are two K3 manifolds and (X A , X B ) denotes a mirror pair of CY 3-folds; differently then in (3.3), mirror symmetry of the 3-folds exchanges the IIA compactification on X B with a type IIB compactification on X A . As before, we assume that the 3-fold X B is elliptically fibered, such that one can decompactify the T 2 of the heterotic string to obtain F-theory in six dimensions. Note that the N = 1 duality chain (3.3) can be heuristically thought of as a chain of dualities obtained by "fibering" (3.9) over P 1 , so that some observations from the N = 2 supersymmetric case will carry over to N = 1.
The two basic questions that we want to study in this simpler setup are the meaning of mirror symmetry in F-theory and the identification of quantum corrections computed by it. It will turn out that, under favorable conditions, the distinguished modulus S has a mirror partner ρ and mirror symmetry of the CY manifolds X A and X B exchanges the two weak coupling expansions in Im S and Im ρ.
The quantum corrections to the N = 2 supersymmetric duality chain (3.9) have a rich structure studied previously in . The F-theory superpotential for the K3 × K3 compactification, which arises in the effective N = 2 supergravity theory from certain gaugings in the hypermultiplet sector, can be written as a bilinear in the period integrals on the two K3 factors Here G IΛ labels the 4-form flux in F-theory, decomposed on a basis {μ Λ } for H 2 prim (Z V ) and {µ I } for H 2 prim (Z H ) as G = I,Λ G IΛ µ I ∧μ Λ . The periods on Z H depend on N = 2 hyper multiplets and are mapped under duality to the type IIA/F-theory compactification on X B to the 3-fold periods, by a similar relation as (3.1): This equation describes, how the periods on the F-theory 3-fold X B defined on the basis γ I ∈ H 3 (X B , Z) compute finite S corrections to the periods on the 2-fold Z H of the dual type II compactification. As explained in the 4-fold case, (C2) says that these are corrections to the s.d. limit in the complex structure of X B .
Note that (3.10) is apparently symmetric in the periods of the two K3 factors. This is somewhat misleading, as the periods onZ V depend on N = 2 vector multiplets. 15 It was argued in , that there is also a similar relation as (3.11) for the second period vector where ρ is a distinguished vector multiplet related to the heterotic string coupling as discussed below. This relation describes corrections to the result (3.10) computed by the periods of the mirror manifold X A . Here it is understood, that one uses mirror symmetry to map the periods of the holomorphic (3,0) form on H 3 (X A , Z) defined on the basis Note that these 'Kähler periods' of X B are the 3-fold equivalent of the integrals appearing in the twisted superpotential W (X B ) in (3.8). However, replacing the K3 periods in (3.10) by the quantum corrected expressions (3.11),(3.12), we get a superpotential that is proportional to both, the periods of the manifold X B and of its mirror X A . It was argued in , that this 'quadratic' superpotential in the 3-fold periods is in agreement with the S-duality of topological strings predicted in ref. . Similar expressions have been obtained in refs. from the study of type II compactification on generalized CY manifolds.
limit Im S → ∞ is mirror to the large base limit of the fibration X A → P 1 , which is a K3 fibration by (C1) in the 3-fold case. By type IIA/heterotic duality, X B is also a K3 fibration X B → P 1 and eq. (3.12) represents the large base limit Im ρ → ∞ of X B , where ρ is the Kähler volume of the base P 1 . By heterotic/type IIA duality, the Kähler volume of the base of X B is identified with the four-dimensional heterotic string coupling . Adding the identification of S provided by (C2), we get the following heterotic interpretation of the volumes V A/B of the base P 1 's of the fibrations X A/B → P 1 : Here V E het denotes the volume of the elliptic fiber of Z H in the heterotic compactification in (3.9). Clearly, mirror symmetry exchanges the two expansions (3.11) and (3.12) associated with a compactification on X A or on X B , respectively .
In the dual F-theory compactification on K3 × K3, mirror symmetry represents the exchange of the two K3 factors , which gives rise to two dual heterotic T 2 × K3 compactifications. Starting from the duality relation between M-theory on K3 × K3 and heterotic string on , it is shown in ref. , that the exchange of the two K3 factors in M-theory generates the following Z 2 transformation on the moduli of the two heterotic duals: Comparing with the relation (3.14) between the four-dimensional heterotic coupling and the volumes of the bases of the fibrations (X A , X B ), one concludes that the result of is in accord with the claim (C2) of and its consequence (3.15) in this case. It is reassuring to observe that these conclusions, reached by rather different arguments in refs. and , agree so nicely.
As further argued in , the expansion (3.12) computed from mirror symmetry of the 3-folds X B and X A computes D3 instanton corrections to the orientifold on K3 × T 2 (or F-theory on K3 × K3). The basic instanton is a D3 brane wrapping K3, which is mapped under the duality (3.4) to a 5-brane instanton of the heterotic brane wrapping T 2 × K3.
In the type II orientifold, ρ is the K3 volume.
Compactifying the N = 2 chain on a further P 1 , the previous arguments leads to the assignements (3.8). In particular the identification of D3 instantons in continues to hold with the appropriate replacement of K3 with 4-cycles in Z B . The above argument based on (C2) is in fact independent of the dimension and can be phrased more generally as the following statement on mirror symmetry in F-theory. Let X B be an F-theory n-fold with heterotic dual (Z B , V B ), where V B denotes the gauge bundle. If the mirror X A of X B is also elliptically and K3 fibered, we have the following relations between the F-theory compactifications on (X A , X B ) and heterotic compactifications on (Z A , Z B ): Under mirror symmetry, the s.d. limit and the large base limit are exchanged: Note that the two theories on the left and on the right are in general not dual but become dual after further circle compactifications.
The simplest example is F-theory on a K3 X B dual to heterotic on ( where V G denotes a flat gauge bundle on T 2 with structure group G. The eight-dimensional heterotic compactification has an unbroken gauge group H, where H is the centralizer of G in the ten-dimensional heterotic gauge group. In a further compactification on T 2 one has to choose a flat H bundle on the second T 2 . Assuming that the bundles factorize, one can exchange the two T 2 factors and thus H and G. In F-theory this exchange corresponds to mirror symmetry of K3 and this was used in to construct local mirrors of bundles on T 2 from local ADE singularities. The next simple example is the above N = 2 supersymmetric case, where X B is the 3-fold in (3.9), with a heterotic dual compactified on K3 × T 2 . Assuming a suitable factorization of the heterotic bundle, the action of 3-fold mirror symmetry maps to the exchange of the two K3 factors (Z V , Z H ) in the dual F-theory compactification in (3.16).
In the heterotic string this symmetry relates two different K3 compactifications (Z H , V ) and (Z V , V ′ ) which become dual after compactification on T 2 × S 1 . 16 16 One needs the T 2 compactification to get two type IIA compactifications on the mirror pair (X A , X B ), which become T-dual after a further circle compactification.
In the 4-fold case, the fibrations required by the above arguments are not granted, since (C1) now implies that the 4-fold X A is a 3-fold fibration X A → P 1 (as opposed to the K3 fibration in the 3-fold case). If X A is K3 fibered, the N = 1 chain can be viewed as a N = 2 chain fibered over P 1 and the above arguments apply, leading to the assignment (3.8). In the other case, the large Im S expansion of W (X B ) always exists, but there is no corresponding large ρ expansion of the twisted superpotential W (X B ).
Heterotic superpotential from F-theory/heterotic duality
Having identified the limit S → i∞ as a large fiber limit in the heterotic interpretation, the next elementary question is to identify the "flux quanta" of the 4-fold superpotential where H is the non-trivial NS 3-form flux and dJ is often called the geometric flux of the It is instructive to examine first the fluxes of the heterotic string compactified on the N = 2 background T 2 × K3. For this particular geometry the analyzed fluxes induce 17 In the context of generalized Calabi-Yau spacesJ andΩ are in general not closed with respect to the de Rahm differential d.
In order to show the relation to the superpotential (4.1) we first construct the coho- The constructed functions ε (k) : U αβγ → Z specify 2-cocycles in theČech cohomology groupȞ 2 (K3, Z). The classes ε (k) correspond to the Euler classes e (k) of the two circular bundles in the integral de Rham cohomology H 2 (K3, Z). 18 The non-Kähler manifoldK is equipped with the hermitian formJ and the holomor- (2) ) .
Here θ (k) , k = 1, 2, are the two 1-forms of the toroidal fibers, while J K3 is the (complexified) Kähler form and ω 2,0 is the holomorphic 2-form of the K3 base. S is the (complexified) 18 For details and background material onČech cohomology and on the construction of the Euler classes we refer the interested reader, for instance, to ref. . 19 For simplicity, we ignore a warp factor in front of the Kähler form J K3 , as it is not relevant for the analysis of the superpotential. Also note that in our conventions the imaginary part ofJ corresponds to the hermitian volume form. 20 The stabilization of volume moduli in the context of heterotic string compactifications with fluxes is also discussed in refs. .
As the two-forms dθ (k) restrict to the Euler classes e (k) on the K3 base, the non-Kähler 3-foldK encodes the background fluxes where the H-flux is determined by imposing the torsional constraint (4.2) for the on-shell value S = i of the fiber volume. Then evaluating the superpotential (4.1) with these fluxes Note that the structure of the derived superpotential is in agreement with the superpotential periods obtained in ref. .
The idea is now to generalize the construction by "twisting" the fibers of the elliptically fibered 3-fold π : Z B → B with a section σ : B → Z B , such that we arrive at the generalized Calabi-Yau 3-foldZ B . In order to eventually relate the periods of the two manifolds Z B and Z B , we first translate the 3-form cohomology of the 3-fold Z B to appropriate cohomology groups on the common base B. This is achieved with the Leray-Serre spectral sequence, which associates the cohomology of a fiber bundle to cohomology groups on the base.
Here the (pre-)sheaf H q of the base B is defined by assigning to each open set U the Due to the simple connectedness of the examined Calabi-Yau 3-fold Z B we arrive at the simplified relation Note that the (pre)sheaf H 1 is not locally constant, because the dimension of the sheaf H 1 differs at a singular fiber from the dimension of the sheaf H 1 at a generic regular fiber.
In terms of the open covering U aČech cohomology element ε inȞ 2 (B, H 1 ) is a map that assigns to each triple intersection set U αβγ an element in H 1 (U αβγ ) and fulfills the cocycle condition on quartic intersections U αβγδ The map ρ δ , for instance, is the pull-back induced from the inclusion ι δ : U αβγ ֒→ U αβγδ .
Then the cohomology element ε is called a 2-cocycle with coefficients in the (pre-)sheaf H 1 , and it is non-trivial if it does not arise form a 1-cochain on double intersections U αβ .
To proceed we assume that the generalized Calabi-Yau manifoldZ B is also fibered π :Z B → B over the same base B and that it arises from "twisting" the elliptic fibers of the 3-fold Z B . This "twist" is measured by the 1-cochain ϕ, which assigns to each double intersection U αβ an element in H 1 (U αβ ) ⊗ Z R and which captures the distortion of the angular variable of the 1-cycles in the elliptic fibers of the original 3-fold Z B .
In general the 1-chain ϕ does not fulfill the cocycle condition due to the periodicity of the angular variables of the 1-cycles. Instead we find on triple intersections U αβγ which defines a 2-cocycle inȞ 2 (B, H 1 ) characterizing the "twist" of the 3-foldZ B . 21 Strictly speaking the first relation is not an equality '≃' but an inclusion '⊆', because we ignore the "higher order corrections" from the spectral sequence. This implies that some of the elements on the right hand side might actually be trivial in H 3 (Z B , Z).
where we now view ξ α as a two form in the open setπ −1 U α . Due to the "twist" the 2-forms ω F , which are defined on open sets, patch together to a global 2-form on the 3-foldZ B .
Furthermore, as a consequence of eq. (4.5) we observe that in terms of the element s e in H 2 (B, H 1 ).
In order to evaluate the heterotic superpotential In the last step we have again related the integral elements s H and s e to their Poincaré dual 3-cycles C H and C J in the original Calabi-Yau manifold Z B .
In the context of heterotic string compactifications on the 3-fold Z B the presented arguments provide further evidence for the encountered structure of the closed-string periods in eq. (3.1). In particular we find that the complex modulus S should be identified with the complexified volume of the generic elliptic fiber.
There is, however, a cautious remark overdue. We tacitly assumed that the manifold Z B can be constructed by simply "twisting" the elliptic fibers of Z B . In general, however, we expect that such a construction is obstructed and additional modifications are necessary to arrive at a "true" generalized Calabi-Yau manifold. A detailed analysis of such obstructions is beyond the scope of this note. However, we believe that the outlined construction is still suitable to anticipate the (geometric) flux quanta, which are responsible for the transition to the generalized Calabi-Yau manifoldZ B to leading order. From the duality perspective of the previous section we actually expect further corrections to the superpotential (4.7). These corrections should be suppressed in the large fiber limit Im S → ∞. It is in this limit, in which we expect the "twisting" construction to become accurate.
Chern-Simons contribution to
The F-theory prediction from the last term in (3.1) is the equality, up to finite S corrections, of certain 4-fold period integrals on X B and the Chern-Simons superpotential on Z B , for appropriate choice of G ∈ H 4 (X B ) and a connection on E → Z B . The general relation of this type has already been described in sect. 2.4 where we used that the 3fold Z B may be viewed as a 'boundary' within the F-theory 4-fold X B in the s.d. limit.
Here we complete the argument and discuss the map of the deformation spaces by using hypersurface representations for X ♯ and Z B . This will also lead to a direct identification of the open-closed dual 4-fold geometries for type II branes and the local mirror geometries for (heterotic) bundles of .
To this end, we represent the s.d. limit X ♯ B of the F-theory 4-fold X B as a reducible fiber of a CY 5-fold W obtained by fibering X B over C as in . Let µ be the local coordinate on the base C which serves as a deformation space for the 4-fold fiber X B . We start from the Weierstrass form where f α (x k ) and g α (x k ) are functions of the coordinates on the two-dimensional base Tuning the complex structure of W by choosing a α,β = 0 for α + β > 4 and b α,β = 0 for α + β > 6, the central fiber of W at µ = 0 acquires a non-minimal singularity at y = x = s = 0, which can be blown up by y = ρ 3 y, x = ρ 2 x, s = ρs, µ = ρµ, to obtain the hypersurface 22 The singular central fiber has been replaced by a fiber X ♯ = X 1 ∪ X 2 with two components X i defined by ρ = 0 and µ = 0, respectively. The component ρ = 0 is described by where we have collected the terms with zero and positive powers in µ into the two polynomials p 0 and p + for later use. The hypersurface X 1 is a fibration X 1 → B 2 with fiber a 22 The non-zero constants a α,β , b α,β are set to one in the following. rational elliptic surface S 1 . The expressions in (4.9) are sections of line bundles, specifically the anti-canonical bundle L = K −1 B 2 , a line bundle M over B 2 that enters the definition of the fibration X B → B 2 and a bundle N associated with a C * symmetry acting on the homogeneous coordinates (y, x, s, µ). The powers of the line bundles appearing in these The hypersurface X 1 has a positive first Chern class c 1 (X 1 ) = c 1 (N ) and the CY 3-fold Z B is embedded in X 1 as the divisor µ = 0, verifying a claim that was needed in the argument of sect. 2.4. According to the picture of F-theory/heterotic duality developped in , the polynomial p + containing the positive powers in s describes part of the bundle data in a single E 8 factor of the heterotic string compactified on Z B . Using a different argument, based on the type IIA string compactified on fibrations of ADE singularities, more general n-fold geometriesX of the general form (4.9) have been obtained in as local mirror geometries of bundles with arbitrary structure group on elliptic fibrations. Mirror symmetry gives an entirely explicit map between the moduli of a given toric n-fold and the geometric data of a G bundle on a toric n − 1-fold Z B , which applies to any geometryX of the form (4.9) . The application of these methods will be illustrated at the hand of selected examples in sects. 6 and 7.
A special case of the above discussion is the one, where the heterotic gauge sector is not a smooth bundle, but includes also non-perturbative small instantons . The F-theory interpretation of these heterotic 5-branes as a blow up of the base of elliptic fibration X B → B 3 has been studied in detail in ; see also refs. for details in the case of toric hypersurfaces and ref. for an elegant discussion of the moduli space in M-theory.
From the point of Hodge variations and brane superpotentials this is in fact the most simple case, starting from the approach of , as the brane moduli of the type II side map to moduli of the heterotic 5-brane. An explicit example from will be discussed in sect. 7
Type II / heterotic map
The above argument also provides a means to describe an explicit map between a type II brane compactification on Z B and a heterotic bundle compactification on Z B . The key point is again the afore mentioned relation (C2) between the large volume limit of the fibration π : X A → P 1 and the s.d. limit of the F-theory 4-fold X B . The relation between the F-theory 4-fold geometry, the heterotic bundle on Z B and the type II branes on Z B is concisely summarized by the following diagram: mirror symmetry large base The upper line indicates how the open-closed string dual X nc A (L) of an A-type bundle L on the 3-fold Z A sits in the compact 4-fold X A mirror to X B . The details of the bundle L are encoded in the toric resolution of the central fiber Z 0 A at the origin 0 ∈ C 1 , as described in terms of toric polyhedra in refs. . The limit consists of concentrating on a local neighbourhood of the point 0 ∈ P 1 and taking the large volume limit of P 1 base.
The lower row describes how the heterotic bundle E on the elliptic manifold Z B dual to F-theory on X B is captured by a local mirror geometry of the form (4.9). Assuming that the large base/local limit commutes with mirror symmetry, the diagram is completed to the right by another vertical arrow, which represents local mirror symmetry of the noncompact manifolds. The mirror of the open closed dual X nc A (L) has been previously called X nc B (E), and we see that commutativity of the diagram requires that the open-closed dual X nc B (E) is the same as the heterotic dualX(E). Indeed, the hypersurface equations for G = SU (N ) given in ref. for the heterotic 4-foldX and in ref. for the open-closed 4-fold X nc B can be both written in the form where v is a local coordinate defined on the cylinder related to s in (4.9). In both cases, the v 0 term specifies the 3-fold Z B on which the type II/heterotic string is compactified. In the type II context, Q(D) = 0 is the hypersurface D ⊂ Z B , which is part of the definition of the B-type brane . In the heterotic dual of , p + (Σ) = 0 specifies the SU (n) spectral cover .
The agreement of the local geometries dual to the type II/heterotic compactification on Z B predicted by the commutativity of (4.11) is now obvious with the identification type II/heterotic map: This map between the dual 4-folds in (4.12) can be interpreted as a geometric reflection of the physical fact that the decoupling limit conforms the heterotic and type II bundles.
Note that, with the identification (4.13), the proofs of refs. , which relate the relative periods H 3 (Z B , D) to the periods of the 4-fold X nc B in the context of open-closed duality, carry also over to the heterotic string setting for G = SU (N ). More ambitiously, one would like to have an explicit relation between the 4-fold periods and the holomorphic Chern Simons integral also for a heterotic bundle with general structure group G. The approach of refs. gives an explicit map from the the moduli of a G bundle on Z B to a local mirror geometryX for any G and evaluation of the periods ofX gives the 4-fold side.
A computation on the heterotic side could proceed by a generalization of the arguments of sect. 2.3, e.g. by constructing the sections of the bundle from the more general approaches to G bundles described in . In sect. 8 we outline a possible alternative route, using a conjectural relation between two two-dimensional thories associated with the 3-fold and the 4-fold compactification.
Type II/heterotic duality in two space-time dimensions
In the previous sections we demonstrated the chain of dualities in eq. (3.3) by matching the holomorphic superpotentials of the various dual theories. In this section we further supplement this analysis by relating the two-dimensional low energy effective theories of the type IIA compactificatons on the 4-folds X A and X B with the dual heterotic compactification on T 2 × Z B . Many aspects of the type II/heterotic duality on the level of the low energy effective action are already examined in ref. . We further extend this discussion here.
For the afore mentioned string compactifications the low energy effective theory is described by two-dimensional N = (2, 2) supergravity. 23 Chiral multiplets ϕ and twisted chiral multipletsφ comprise the dynamical degrees of freedom of these supergravity theories . In a dimensional reduction of four-dimensional N = 1 theories the twodimensional chiral multiplets/twisted chiral multiplets arise from four-dimensional chiral multiplets/vector multiplets, respectively.
The scalar potential of the two-dimensional N = (2, 2) Lagrangian arises from the holomorphic chiral and twisted chiral superpotentials W (ϕ) and W (φ), and the kinetic terms are specified by the two-dimensional Kähler potential 24 Here K (2) and K (2) can be thought of individual Kähler potentials for the chiral and twisted chiral sectors. In this section we mainly focus on the Kähler potential (5.1) to further establish the type II/heterotic string duality of eq. (3.3).
Type IIA on Calabi-Yau fourfolds
The low energy degrees of freedom of type IIA compactifications on the Calabi-Yau 4-fold X are the twisted chiral multiplets T A , A = 1, . . . , h 1,1 (X) and the chiral multiplets z I , I = 1, . . . , h 3,1 (X). 25 They arise from the Kähler and the complex structure moduli of the 4-fold X. 26 Then the tree-level Kähler potential is given by K (2) CS (z,z) + K where the exponential of the potential K CS for the complex structure moduli is determined by 3) 23 Note that these two-dimensional theories describe the effective space-time theory and not the two-dimensional field theory of the underlying microscopic string worldsheet. 24 This splitting of the Kähler potential does not represent the most general form. In fact in general the target space metric need not even be Kähler . The given ansatz, however, suffices for our purposes. 25 In two dimensions the graviton and the dilaton are not dynamical .
in terms of the holomorphic (4, 0) form Ω of the Calabi-Yau X. In the large radius regime the twisted potential K K for the Kähler moduli reads with K ABCD the topological intersection numbers of the 4-fold X. The Kähler moduli T A appear in the expansion of the complexified Kähler form where B and J are the NS 2-form and the real Kähler form, respectively. Finally, in the presence of background fluxes, we obtain the holomorphic superpotentials Here F hor ∈ H 4 hor (X) is a non-trivial horizontal RR 4-form flux, whereas F ver ∈ H ev ver (X) is a non-trivial even-dimensional vertical RR flux. 27 The twisted chiral superpotential W receives non-perturbative worldsheet corrections away from the large radius point .
Type IIA on the Calabi-Yau 4-folds X A and X B
We now turn to the type IIA compactification on the special Calabi-Yau 4-fold X A .
As discussed in sect. 4.1. the 4-fold geometry X A is a fibration over the P 1 base, where the generic fiber is the Calabi-Yau 3-fold Z A . Geometries of this type have been studied previously in and we extend the discussion here to fibrations with singular fibers, which support the brane/bundle degrees of freedom in the context of open-closed/heterotic duality.
For the divisor D S dual to the base this implies Here c 3 (X A ) is the third Chern class of the 4-fold X A and χ(Z B ) is the Euler characteristic of 3-fold Z A . Hence the divisor D S is homologous to the generic (non-singular) fiber Z A .
For type IIA compactified on the 4-fold X A we are interested in the twisted chiral sector, and hence in the twisted Kähler potential (5.4). This means we need to obtain the intersection numbers of the fibered 4-fold X A . We use similar arguments as in ref. , where the intersection numbers of K3-fibered Calabi-Yau threefold are determined. 27 The 6-and 8-forms are the magnetic dual fluxes to the RR 4-and 2-form fluxes in type IIA.
We denote by S the (complexified) Kähler modulus that measures the volume of the P 1 base, which is dual to the divisor D S representing the generic fiber Z A . Consider now a divisor H a of the generic fiber Z A . As we move this divisor about the base by mapping it to equivalent divisors in the neighboring generic fibers, we define a divisor D a in the Calabi-Yau 4-fold X A . 28 The remaining (inequivalent) divisors of the 4-fold X A are associated to singular fibers, and we denote them byDâ.
The 2-forms ω S , ω a andωâ, which are dual to the divisors D S , D a andDâ, furnish now a basis of the cohomology group H 2 (X A , Z), and we denote the corresponding (complexified) Kähler moduli by S, t a andtâ. They measure the volume of the P 1 -base, the volume of the 2-cycles in the generic 3-fold fiber Z A , and the volume of the remaining 2-cycles arising from the degenerate fibers.
From this analysis we can extract the structure of intersection numbers. Since D S is a homology representative of the generic fiber it intersects only with the Calabi-Yau divisors D a according to the triple intersection numbers κ abc of the 3-fold Z A . The intersection numbers for divisors, which do not involve D S , cannot be further specified by these general considerations. Therefore we find where t ′ α are the Kähler moduli (t a ,tâ) with their quartic intersection numbers K ′ αβγδ . The twisted Kähler potential for the 4-fold X A then reads The essential point here is that the leading term for large S involves only the moduli t a associated with the bulk fields in the dual compactifications, whereas the brane/bundle degrees of freedom appear in the subleading term. In the decoupling limit Im S → ∞, 28 Due to monodromies with respect to the degenerate fibers, it may happen that two inequivalent divisors H a and H b are identified globally, and hence yield the same divisor D a = D b . Then we work on the 3-fold Z A with monodromy-invariant (linear combinations of) divisors such that only inequivalent divisors D a are generated on the 4-fold X A .
the kinetic terms derived from (5.8) factorize into the bulk and bundle sector of the dual theories as illustrating the separation of the physical scales at which the fields in the two sectors fluctuate. In this limit, the backreaction of the (dual) bulk fields to the (dual) bundle fields vanishes and the latter fluctuate in the fixed background determined by the bulk fields. A more detailed treatment of the heterotic dual will be given below.
Analogously to the three contributions to H 2 (X A , Z) distinguished above, we can decompose the even-dimensional fluxes F V into three distinct classes where the components f (1) and f (2) pull back to even-forms in H ev (Z B ), while the fluxes f (3) vanish upon pullback to the regular 3-fold fiber Z A . With the vertical fluxes (5.9) the (semi-classical) twisted chiral superpotential W (X A ) simplifies to with the generators (ω a ,ωâ) collectively denoted by ω ′ α . Next we turn to the chiral sector of type IIA strings compactified on the mirror 4-fold X B . The Kähler potential (5.3) is then expressed in terms of the periods Π Σ = γ Σ Ω (4,0) of the Calabi-Yau 4-fold X B
Heterotic string on T 2 × Z B
The low energy effective action of the heterotic string compactified on the 4-fold T 2 × Z B together with a (non-trivial) gauge bundle V has in the large radius regime the The chiral Kähler potential K het coincides with the four-dimensional Kähler potential of the heterotic string compactified on the Calabi-Yau 3-fold Z B with the gauge bundle V .
Apart from the heterotic dilaton, which is not a dynamic field in two dimensions , it comprises all the kinetic terms for both the chiral multiplets of the Kähler/complex structure moduli of the 3-fold Z B and the chiral multiplets from the gauge bundle V . The Kähler potential K In order to infer some qualitative information about the relevant kinetic terms of the moduli z a andẑâ we briefly discuss the general structure of the bosonic part of the fourdimensional low-energy effective heterotic action in the four-dimensional Einstein frame Here R (4) is the Einstein-Hilbert term, κ 4 is the four-dimensional gravitational coupling constant. C ab and Bâb denote the Kähler metrics of the chiral fields z a andẑâ. For simplicity cross terms among bulk and bundle moduli and the kinetic terms of other moduli fields are omitted. Note that the α ′ dependence of the bundle moduli is absorbed into the Kähler metric Bâb.
From a dimensional reduction point of view the bundle moduliẑâ arise from a Kaluza-
Klein reduction of the ten-dimensional vector field A (10) , which in terms of four-dimensional coordinates x and internal coordinates y enjoys the expansion The four-dimensional vector A (4) gives rise to the Yang-Mills kinetic term, while the internal vectors fields vâ are integrated out in the dimensional reduction process and yield the metric Bâb 14) The volume factor V (Z B ) arises due to the Weyl rescaling to the four-dimensional Einstein frame, and it compensates the scaling of the (internal) measure d 6 y √ g 6 . Thus the dimensionless quantity α ′ ℓ 2 , where ℓ is the length scale of the internal Calabi-Yau manifold Z B , governs the magnitude of the kinetic terms Bâb.
As discussed in sect. 4.1., the decoupling limit Im S → ∞ defined in ref. is mapped on the heterotic side to the large fiber limit of the elliptically fibered Calabi-Yau 3-fold Z B → B. In order to work in at semi-classical regime, the volume V (B) of the base B, common to the K3 fibration X B → B and the elliptic fibration Z B → B, has to be taken of large volume as well, due to the relations which follow from the relations λ II,6d = λ −1 het,6d , g het = λ −2 II,6d g II in six dimensions . As we move away from the stable degeneration point in the dual type IIA description, the volume of the elliptic fiber in the 3-fold Z B becomes finite while we keep the volume of the base large Here ℓ F is the length scale for the generic elliptic fiber and ℓ B is the length scale for the base.
As a consequence, as we move away from the stable degeneration point, the bundle components, which scale with the dimensionless quantity are the dominant contributions to the metric (5.14). The moduli of the spectral cover correspond on the (dual) elliptic fiber to vector fields vâ, which are contracted with the metric component scaling as g F . Therefore the bundle moduliẑâ associated to the subbundle E of the spectral cover become relevant.
Thus for the heterotic string compactification on the 3-fold Z B with gauge bundle the complex structure/bundle moduli space of the pair (Z B , E) is governed by the deformation problem of a family of Calabi-Yau 3-folds Z B together with a family of spectral covers Σ + .
As proposed in (3.1), this moduli dependence is encoded in the relative periods Π Σ (z,ẑ) of the relative three forms H 3 (Z B , Σ + ), and therefore in the semi-classical regime the Kähler potential of the complex structure/bundle moduli space (Z B , E) is expressed explicitly by K (4) The topological metric η ΣΛ arises from the intersection matrix of the relative cycles γ Σ .
This intersection matrix has the form where η Z B is the topological metric of the absolute cohomology H 3 (Z B ) andη Σ + is the topological metric of the variable cohomology sector H 2 var (Σ + ) of the relative cohomology group H 3 (Z B , Σ + ).
Note that the structure of the Kähler potential (5.16) is also in agreement with the mirror Kähler potential of type IIA compactified on the 4-fold X A . By the arguments of sect. 4, the Kähler modulus S of the P 1 base of the 4-fold X A is related to the heterotic volume modulus of the elliptic fiber of the fibration Z B → B. In the large base limit of X A /bundle decoupling limit of (Z B , V ) the leading order terms are the Kähler moduli of the 3-fold fiber Z A /complex structure moduli of the 3-fold Z B . These moduli spaces are identified by mirror symmetry of the 3-fold mirror pair (Z A , Z B ). The subleading terms for type IIA on X A in eq. (5.8) should be compared to the subleading bundle moduli terms in eq. (5.16) on the heterotic side.
Finally we remark that since the chiral sector of the heterotic string compactification on T 2 × Z B and on Z B are equivalent (cf. eq. (5.12)), the identification of the chiral Kähler potentials in the type IIA/heterotic duality in two space-time dimensions carries over to the analog identification of Kähler potentials in the F-theory/heterotic dual theories in four space-time dimensions discussed in sect. 4.
A heterotic bundle on the mirror of the quintic
Our first example will be an N = 1 supersymmetric compactification on the quintic in P 4 and its mirror. This was the first compact manifold for which disc instanton corrected brane superpotentials have been computed from open string mirror symmetry in . This computation was confirmed by an A model computation in . An off-shell version of the superpotential was later obtained in , both in the relative cohomology approach, eq.(2.1), as well as from open-closed duality, eq.(2.3).
Heterotic string on the threefold in the decoupling limit
Here we follow the treatment in , In the framework of , the mirror pair (X A , X B ) of toric hypersurfaces can be defined by a pair (∆, ∆ * ) of toric polyhedra, given in app. B.1 for the concrete example. The h 1,1 = 3 Kähler moduli t a , a = 1, 2, 3, of the fibration Z A → X A → P 1 describe the volume t = t 1 + t 2 of the generic quintic fiber of the type Z A , the volume S = t 3 of the base P 1 and one additional Kähler volumet = t 2 measuring the volume of an exceptional divisor intersecting the singular fiber Z 0 A . This divisor is associated with the vertex ν 6 ⊂ ∆ in eq.(B.1) and its Kähler modulus represents an open string deformation of a toric A brane geometry (Z A , L) of the class considered in .
The hypersurface equation for the mirror 4-fold X B is given by the general expression (6.1) Here the sums for i and j run over the relevant integral points of the polyhedra ∆ and ∆ * , respectively, and a i are complex coefficients that determine the complex structure of X B .
A similar expression holds for the hypersurface equation of the mirror manifold X A , with the roles of ∆ and ∆ * exchanged.
Instead of writing the full expression, which would be too complicated due to the large number of relevant points of ∆ * , we first write a simplified expression in local coordinates that displays the quintic fibration of the mirror: Here v is a local coordinate on C * and z a the three complex structure moduli of X B related to the afore mentioned Kähler moduli of X A by the mirror map, t i = t i (z . ). In the large volume limit the leading behavior is t i (z . ) = 1 2πi ln(z i ) + O(z . ). The special combination z 1 z 2 appearing above is mirror to the volume of the quintic fiber of π : X A → P 1 , We refer to app. B.1 for further details of the parametrization used here and in the following.
Although the above expression for P (X B ) is oversimplified (most of the coordinates x j in (6.1) have been set to one), it suffices to illustrate the general structure and to sketch the effect of the decoupling limit, which, again simplifying, corresponds to setting z 3 = 0, removing the term ∼ p − in (6.3). 29 This produces a hypersurface equation of the promised form (4.12). In particular, p 0 (Z B ) = 0 defines the mirror of the quintic, which has a single complex structure deformation parametrized by z = z 1 z 2 . The hypersurface D for the relative cohomology space H 3 (Z B , D), which specifies the Hodge variation problem, is defined by p + = 0, that is More precisely the component of (6.4) relevant to the brane superpotential of refs. is in a patch with x i = 0 ∀ i and passing to appropriate local coordinates for this patch, the Hodge variation on D is equivalent to that on a quartic K3 surface in P 3 . 29 A more precise description of this process as a local mirror limit is given in ref. .
The F-theory content of the toric hypersurface X B and its heterotic dual are exposed in different local coordinates on the ambient space, which put the hypersurface equation into the form studied in the context of F-theory/heterotic duality in : Here (Y, X, Z) are the coordinates on the elliptic fiber, a cubic in P 2 . Again the zero set p 0 = 0 defines the 3-fold geometry Z B , while the polynomials p ± specify the components Σ ± of the spectral cover of the heterotic bundle in the two E 8 factors. While p − corresponds to the trivial spectral cover, p + describes a non-trivial component where ρ is a third root of unity. Note that the spectral cover Σ + represents the most general polynomial of degree two invariant with respect to the orbifold group (6.7). As a consequence the six zeros become just two distinct zeros in the elliptic fiber E, adding up to zero. Therefore the spectral cover describes a SU (2) bundle on the heterotic manifold Alternatively one may study the perturbative gauge symmetry of the heterotic compactification from studying the singularities of the elliptic fibration X B . The result of this procedure, described in detail in the appendix, is that the bundle leads to the gauge symmetry breaking pattern in agreement with a new component of the bundle of structure group SU (2).
Flux superpotential in the decoupling limit
To be more precise, the above discussion describes only the data of the bundle geometrized by F-theory and ignores the 'non-geometric' part of the bundle arising from fluxes on the 7-branes, which may lead to a larger structure group of the bundle, and thus smaller gauge group of the compactification then the one described above .
In particular, to compute the heterotic superpotential (2.7), we have to specify the class γ of sect. 2.2, which determines the flux numberN Σ in (3.1), and thus the superpotential as a linear combination of the 4-fold periods. This is the heterotic analogue of choosing the 5-brane flux on the type II brane (6.4). Since eq.(4.13) identifies the type II open string brane modulus z 2 literally with the heterotic bundle modulus in the decoupling limit Im S → ∞, the relative cohomology space and the associated Hodge variation problem is identical to the one studied in the context of type II branes in . Using the identification γ =γ between the classes defined in (2.5) and (2.6), the heterotic superpotential in the decoupling limit is identical to that for the type II brane computed in sect. 5 of , see eq. (5.3). We now discuss the corrections to this result for finite Im S.
F-theory superpotential on the four-fold X B
According to the arguments of sect. 3, Hodge theory on the F-theory 4-fold X B computes further corrections to the superpotential of the type II/heterotic compactification for finite S. We will now perform a detailed study of the periods of X B using mirror symmetry of the 4-folds (X A , X B ).
Mirror symmetry is vital in two ways. Firstly, it allows to determine the geometric . In the present context we use this A model expansion to describe the superpotential W F (X B ) near a large complex structure limit of X B , which by the previous arguments describes the decoupling limit Im S → ∞ of the dual heterotic compactification (Z B , E) near large complex structure of Z B . 30 The methods of mirror symmetry for toric 4-fold hypersurfaces used in the following have been described in detail in and we refer to these papers to avoid excessive repetitions. We work at the large complex structure point of X B defined by the values z a = 0, a = 1, 2, 3 for the moduli in the hypersurface equation (6.2). This corresponds to a large volume phase t a ∼ 1 2πi ln(z a ) → i∞ in the Kähler moduli of the mirror manifold X A generated by the charge vectors The topological intersection data for this phase can be determined from toric geometry in the standard way, see for examples. We refer to the appendix of for details on this particular example and restrict here to quote the quartic intersections (6.10) Here J = a t a J a = ať aJa denotes the Kähler form on X A , with J a , a = 1, 2, 3 a basis of H 1,1 (X A ) dual to the Mori cone defined by (6.9). In the above, we have introduced the linear combinationš 11) and the corresponding basis {J a } of H 1,1 (X A ) to expose the simple dependence on the Kähler modulusť 1 = Vol(Z A ) of the generic quintic fiber of π : The leading terms of the period vector Π Σ = γ Σ Ω for X B in the limit z a → 0 can be computed from the classical volumes of even-dimensional algebraic cycles in X A 30 The fact that the large complex structure limit of the 4-fold X B implies a large structure limit of the dual heterotic 3-fold Z B follows already from the hypersurface equation, eq.(6.5), and is explicit in the monodromy weight filtration of the 4-fold periods discussed below.
where γ Σ ∈ H 4 (X B , Z) refers to a basis of primitive 4-cycles in X B andγ Σ a basis for the 2q dimensional algebraic cycles in H 2q (X A ), q = 0, ..., 4, related to the former by mirror symmetry. Except for q = 2, there are canonical basis elements for H 2q (X A , Z), given by the class of a point, the class of X A , the divisors dual to the generatorsJ a and the curves dual to these divisors, respectively. On the subspace q = 2 we choose as in the basis Here the D i = {x i = 0}, i = 0, .., 8 are the toric divisors defined by the coordinates x i on the ambient space for X A (cpw. eq. (6.1)), which correspond to the vertices of the polyhedron ∆ in (B.1). The classical volumes of these basis elements computed from the intersections (6.10) are where the first index q on Π q,. denotes the complex dimension of the cycle.
The entries of the period vector Π(X B ) are solutions of the Picard-Fuchs system for the mirror manifold X B with the appropriate leading behavior (6.12) for z a → 0. The Picard-Fuchs operators can be derived from the toric GKZ system and are given in eq. (A.6) in the appendix.
The Gauss-Manin system for the period matrix imposes certain integrability conditions on the moduli dependence of the periods of a CY n-fold. For n = 2 these conditions imply that there are no instanton corrections on K3 and for n = 3 they imply the existence of a prepotential F for the periods. For n = 4 the periods can no longer be integrated to a prepotential, but still satisfy a set of integrability conditions discussed in ref. .
Applying the integrability condition to the example the leading behavior of Π neař t 3 = i∞, is captured by only seven functions denoted by (1,ť 1 ,ť 2 ,F t ,W ,F 0 ,T ). The eleven solutions can be arranged into a period vector of the form where the index q on Π q,. now labels the monodromy weight filtration w.r.t. to the large volume monodromyť a →ť a + 1.
Since the decoupling limit sends the compact 4-fold X B to its non-compact open-closed dual X nc B , these functions should reproduce the relative 3-fold periods on H 3 (Z B , D) in virtue of eq. (2.3). Indeed the four functions (1,ť 1 ,F t ,F 0 ) converge to the four periods on where F (t) = 5 6 t 3 + O(e 2πit ) is the closed string prepotential on the mirror quintic. 31 The remaining three functions reproduce the three chain integrals on H 3 (Z B , D) with non-trivial ∂γ ∈ H 2 (D): is conjectured to be the generating function of disc instantons in the type II mirror configuration (Z A , L), similarly as F (t) is the generating function of closed string sphere instantons . In the above formula, β denotes the homology class of the disc and the N β are the integral Ooguri-Vafa disc invariants .
Since the closed string period vector (6.11) appears twice in (6.13), with coefficients 1 andť 3 = S, respectively, the leading terms of the eleven periods on X B are proportional to the seven relative periods on .
A linear combination of these leading terms gives a large S expansion for the superpotential of the form (3.1). 31 Here and in the following we neglect terms in the geometric periods from polynomials of lower degree inť i .
Finite S corrections: perturbative contributions
There are two types of finite S contributions in the 4-fold periods, which correct the 3-fold result: linear corrections ∼ S −1 and exponential corrections ∼ e 2πiS . In the type II orientifold where Im S ∼ 1/g s , the first should correspond to perturbative corrections.
These linear corrections are described by the three additional functions π 2,1 , π 3,1 , π 4 in (6.13) with leading behavior lim t 3 →i∞ An immediate observation is that these terms seem to break the naive S-duality symmetry of the type II string (and the T -duality of the heterotic string) even in the large S limit where one ignores the D-instanton corrections ∼ e 2πiS . The above functions f q,. vanish exponentially in theq i = e 2πiť i for i = 1, 2 near the large complex structure limit of Z B , but contribute in the interior of the complex structure moduli space of Z B .
E.g., the ratio of two periods corresponding to the central charges of an 'S-dual' pair of BPS domain walls with classical tension ∼F t is In principle there are various possibilities regarding the fate of S duality. Firstly, there could be a complicated field redefinition which corrects the relation Im S = 1 g s away from the decoupling limit such that there is an S duality for a redefined fieldS including these corrections. Such a redefinition is known to be relevant in four-dimensional N = 2 compactifications of the heterotic string, where one may define a perturbatively modular invariant dilaton . On the other hand, duality transformations often originate from monodromies of the periods in the Calabi-Yau moduli space, which generate simple transformations at a boundary of the moduli space, such as Im S = ∞, but correspond to complicated field transformations away from this boundary. Again, such a 'deformation' of a duality transformation is known to happen in the heterotic string . At this point we can not decide between these options, or a simple breaking of S-duality, without a detailed study of the monodromy transformations in the three-dimensional moduli space of the 4-fold, which beyond the scope of this work.
D-instanton corrections and Gromov-Witten invariants on the 4-fold
There are further exponential corrections ∼ e 2πiS to the moduli dependent functions in eqs. (6.13). Recall that we are considering here the classical periods of X B , which describe the complex structure moduli space of the 4-fold X B and complex deformations of the dual heterotic bundle compactification on Z B . From the point the type IIA compactification on X B , obtained by compactifying F-theory on X B × T 2 , these are B model data and do not have an immediate instanton interpretation.
However, according to the identification of the decoupling limit in sect. 2, we expect these B model data to describe D-instanton corrections ∼ e −2π/g s to the type II orientifold on the 3-fold, see (3.3). Lacking a sufficient understanding of the afore mentioned issue of field redefinitions, we will express the expansion in exponentials ∼ e 2πiS in terms of Gromov-Witten invariants, or rather in terms of integral invariants of Gopakumar-Vafa type, using the multi-cover formula for 4-folds given in . These invariants capture the world-sheet instanton expansion of the A-model on the mirror X A of X B . Note that if mirror pair (X A , X B ) supports a duality of the type (3.16), then this expansion captures world-sheet and D-instanton corrections computed by the twisted superpotential W (X A ), according to the arguments in sect 3.5. However, according to eq. (3.6) such a duality can only exist if the mirror 4-fold X A is given in terms of a suitable fibration structure, which is not true for the quintic example of this section (since X A is neither elliptically nor K3 fibered), but for other examples considered in sect. 7. The integral A model expansion of the 4-fold is defined by 32 where Π 2,γ is one of the periods in the q = 2 sector, double logarithmic near the large complex structure limit z a = 0, and p 2 a degree two polynomial in the coordinates t a defined by (6.9). Moreover β is a label, which in the A model on the mirror X A specifies a homology class β ∈ H 2 (X A , Z) with exponentiated Kähler volume q β = a q n a a , q a = e 2πit a . As discussed above, these Kähler moduli of X A map under mirror symmetry to 32 The fact that this multi-cover formula for spheres in a 4-fold is formally the same as the multi-cover formula for discs in a 3-fold is at the heart of the open-closed duality of .
coordinates on the complex structure moduli space of the F-theory compactification on X B , 33 and we use these coordinates to write an expansion for the B model on X B .
We restrict here to discuss only the few leading coefficients N γ β for the three linearly independent q = 2 periods of X B . We label the 'class' β by tree integers (m, n, k), such that N γ β is the coefficient of the exponential exp(2πi(mt 1 + nt 2 + kt 3 ) in the basis (6.9). Thus k is the exponent of e 2πiS in the expansion.
Deformation of the closed string prepotential F t The leading term of the period Π 2,2 is the closed string prepotential (6.13). This period is mirror to a 4-cycle in the quintic fiber of X A and depends only on the closed string variable t = t 1 + t 2 in the limit Im S → ∞. The leading terms in the expansion (6.17) of case, in other words, the mirror map t = t(z) for the closed string modulus t = t 1 + t 2 is the same as in the theory without branes, with z = z 1 z 2 . This is no longer the case for finite S, as there are corrections to the mirror map of the form t(z a ) = t(z) + e 2πiS f (z,ẑ).
Deformation of disc superpotential W (t,t) The leading term of the period Π 2,3 is the brane superpotential of , which conjecturally computes the disc instanton expansion of an A type brane on the quintic. The leading
Heterotic five-branes and non-trivial Jacobians
In this section we discuss a number of further examples to illustrate the duality relations and the application of the method. The geometries are mostly taken from , where the brane superpotential for B-type branes has been already computed. Since the superpotential (2.7) for the heterotic compactification on Z B with the appropriate bundle E agrees with the brane superpotential in the decoupling limit, the explicit heterotic superpotential in this limit can be read off from the results of . of the 4-fold can be roughly divided into the following sets w.r.t. their meaning in the dual heterotic compactification on the CY 3-fold Z B with bundle E (see ): Generic classes: The first set arises from the two generic classes from the K3 fiber Y of the K3 fibration 1. The class E of the fiber of the elliptic fibration Y → P 1 , which is also the elliptic fiber of X B . This curve shrinks in the 4d F-theory limit and does not lead to a field in four dimensions; 2. The class F of the section of the elliptic fibration Y → P 1 , which provides the universal tensor multiplet associated with the heterotic dilaton. Fixing the heterotic 3-fold Z B , one can still vary the 4-fold data in the last group, to choose a bundle E. In the framework of toric geometry, this step can be made very explicit by using local mirror symmetry of bundles . Starting from the toric 3-fold polyhedron for Z B one may to 'geometrically engineer' the bundle in terms of a 4-fold polyhedron, by appropriately adding or removing exceptional divisors, as described in great detail in . By the type II/heterotic map (4.13), this is the complement of adding singular fibers to the mirror fibration X A → P 1 in (3.5), to define a toric A type brane on the 3-fold mirror Z A . As seen in the previous section, the quintic example of corresponds to a perturbative heterotic bundle with structure group SU (2). Another example of a brane compactification taken from ref. turns out to have a quite different interpretation.
In this case, the brane deformation of the type II string does not translate to a bundle modulus on the heterotic side under the type II/heterotic map (4.13), but rather to a brane modulus. On the heterotic side, this is a 5-brane representing a small instanton .
Let us first recall the brane geometry on the type II side, which is defined in as a compactification of a non-compact brane in the non-compact CY O(−3) P 2 , i.e. the anti-canonical bundle of P 2 . This example has been very well studied in the context of open string mirror symmetry in . The non-compact CY can be thought of as the large fiber limit of an elliptic fibration Z A → P 2 which gives the interesting possibility to check the result obtained from the compact 4-fold against the disc instanton corrected 3-fold superpotential computed by different methods in . Indeed it was shown in , that 4-fold mirror symmetry reproduces the known results for the non-compact brane in the large fiber limit, including the normalization computed from the intersections of the 4-fold X A . The result for the local result is corrected by instanton corrections for finite fiber volume. 34 Two different 3-fold compactifications of O(−3) P 2 were considered in , with a different model for the elliptic fiber. 35 As the two examples produce very similar results, we discuss here the degree 18 case of in some detail and only briefly comment on the difference for the degree 9 hypersurface, below. 34 Note that this is a large fiber limit in the type IIA theory compactified on Z A , not the previously discussed large fiber limit of the heterotic string compactified on Z B . 35 A cubic in P 2 for the degree 9 and a sixtic in P 2 (1, 2, 3) for the degree 18 hypersurface.
The B-type brane is defined in On the mirror side, the blow up modulus corresponds to a new complex structure deformation parametrizing a holomorphic divisor in Z B . As will be explained now, this deformation maps in the heterotic compactification to a modulus moving a heterotic 5brane that wraps a curve C in the base B 2 of the 3-fold Z B .
The hypersurface constraint (7.2) is already in the form to which the methods of can be applied. The relevant component of p + deforming with the modulusẑ lies in a patch with s, t, u = 0 and is given by Σ + : Z 6 (t 6 u 6 +ẑs 12 ) = 0 .
Here the deformationẑ does not involve the coordinates of the elliptic fiber, and therefore it does not correspond to a bundle modulus. Instead this F-theory geometry describes heterotic 5-branes wrapping a curve C in the base B 2 of the heterotic compactification.
As described in detail in (see also ref. ), F-theory describes these heterotic 5-branes by a blow ups of the the P 1 bundle B 3 → B 2 .
The toric 4-fold singularities associated with heterotic five-branes of type (7.2) were also studied in great detail in . In the present case, the 5-branes wrap a set of curves C in the elliptic fibration Z B → B 2 , defined by the zero of the function f (s, t, u) = s 6 (t 6 u 6 +ẑs 12 ). The deformationẑ moves the branes on the second component, similarly as it moves the type II brane in the dual type II compactification on Z B .
By the F-theory/heterotic dictionary developed in refs. , the above singularity describes a small E 8 instanton, which can be viewed as an M-theory/type IIA 5-brane . Note that there are also exceptional blow up divisors in X B associated with the 5brane wrapping, which support the elements in H 1,1 (X B ) dual to the world-volume tensor fields on the 5-branes . However, these Kähler blow ups are not relevant for the purpose of computing the superpotential W (X B ).
The above conclusions may again be cross-checked by analyzing the perturbative gauge symmetry of the heterotic compactification, which does not changes in this case forẑ = 0 The further discussion is as above, except for the gauge symmetry breaking pattern, which is in this case E 6 × E 6 → E 6 × E 6 .
In the decoupling limit Im S → ∞ limit, the heterotic superpotential for the 5-branes in these two cases agrees with the type II brane superpotential computed in sect. 3.2 and app. B of , respectively. See also sect. 5 of for a reconsideration of the first case, with an identical result (Tab.3a/5.2).
Non-trivial Jacobians: SU (2) bundle on a degree 9 hypersurface
A new aspect of another example of is the appearance of a non-trivial Jacobian J(Σ) of the spectral surface, corresponding to non-zero h 1,2 . In this case there are additional massless fields associated with the Jacobian J( in the F-theory compactification, and the non-trivial Jacobian of Σ in the heterotic dual . The present example has been considered in sect. 3.3 of and describes a brane compactification on the same degree nine hypersurface Z A as in the previous section, but with a different gauge background. Z A is defined as a hypersurface in the weighted projective space P 4 (1, 1, 1, 3, 3) with hodge numbers and Euler number The numbers in brackets denote the non-toric deformations of Z A , which are unavailable in the given hypersurface representation.
As familiar by now, the technical details on toric geometry are relegated to app. B.
The local form (6.2) of the hypersurface equation for X B , exposing the elliptic fibration and the hypersurface Z B is p 0 = a 1 Y 3 + a 2 X 3 + Z 3 (a 3 (stu) 3 + a 4 s 9 + a 5 t 9 + a 6 u 9 ) + a 0 Y XZ stu, Again the zero set p 0 = 0 defines the 3-fold geometry Z B for the compactification of the type II/heterotic string, while the brane geometry considered in is defined by the hypersurface D : p + = 0. By the type II/heterotic map (4.13), we reinterprete these equations in terms of a heterotic bundle on Z B . While p − corresponds to the trivial spectral cover, p + describes a component with non-trivial dependence on a single moduluŝ z: whereẑ is the brane/bundle deformation. As in the quintic case, Σ + may be identified with a component with structure group SU (2). This is confirmed by a study of the perturbative gauge symmetry of the heterotic compactification, which changes forẑ = 0 as The Im S → ∞ limit of the heterotic superpotential for this bundle coincides with the type II result computed in .
ADE Singularities, Kazama-Suzuki models and matrix factorizations
In the above we have described how 4-fold mirror symmetry computes quantum corrections to the superpotential and the Kähler potential of supersymmetric compactifications to four and lower dimensions with four supercharges. Specifically, these corrections are expected to correspond to D(−1), D1, and D3 instanton contributions in the type II orientifold compactification on Z B and to world-sheet and space-time instanton corrections to a (0, 2) heterotic string compactification on the same manifold. At present, it is hard to concretely verify these predictions by an independent computation. A particularly neat way to find further evidence for our proposal (in the N = 2 supersymmetric situation) would be to establish a connection with refs. . In these works, considerable progress has been made in understanding corrections to the hyper-multiplet moduli, especially the interaction with mirror symmetry. It would be very interesting to study the overlap with the non-perturbative corrections discussed in the present paper. In this section, we discuss a different application of heterotic/F-theory duality which might be viewed as an interesting corroboration of our main statements, and is also of independent interest.
N = 2 supersymmetry
It is best again to begin with 8 supercharges. Consider a heterotic string compactification on a K3 manifold near an ADE singularity with a trivial gauge bundle on the blown up 2-spheres. The hypermultiplet moduli space of this heterotic compactification is corrected by α ′ corrections from perturbative and world-sheet instanton effects. It has been shown in that for an A 1 singularity, the heterotic moduli space space in the hyperkähler limit is given by the Atiyah-Hitchin manifold, which is also the moduli space of three-dimensional N = 4 SU (2) Yang-Mills theory. This relation between the moduli space of the heterotic string on a singular K3 and the moduli space of a three-dimensional gauge theory can be derived and generalized by studying the stable degeneration limit of the dual type IIA/F-theory 3-fold. Specifically it is shown in refs. , that the 3-fold X B dual to the heterotic string on an ADE singularity of type G and with a certain local behavior of the gauge bundle V develops a singularity, which 'geometrically engineers' a three-dimensional gauge theory of gauge group and matter content depending on G and V , see ref. . In connection with the N = 2 version of the decoupling limit Im S → ∞, eq.(3.11), this leads to a very concrete relation between the 3-fold period and the world-sheet instanton corrections to the heterotic hypermultiplet space in the hyperkähler limit. This could be explicitly checked against the known result, at least in the case dual to 3d SU (2) SYM theory.
N = 1 supersymmetry
The above situation has an interesting N = 1 counter part. Namely, it has been conjectured in that one may use the heterotic string on a certain 3-fold singularity to geometrically engineer (the moduli space of) interesting 2-dimensional field theories. The 3-fold singularities are of the type where H(x k ) describes an ADE surface singularity. The idea is the obvious generalization of the above, by first applying heterotic/F-theory duality and then exploiting the relation of ref. between similar 4-fold singularities and Kazama-Suzuki models. We here make this correspondence more precise.
Recall that the identification of proceeded through the comparison of the vacuum and soliton structure of a type IIA compactification on Calabi-Yau four-fold with its superpotential from four-form flux, and the Landau-Ginzburg description of the deformed Kazama-Suzuki coset models . The four-folds relevant for this connection are local manifolds that are fibered by singular 2-dimensional ALE spaces and their deformations. The ADE type of the singularity in the fiber determines the numerator G of the N = 2 coset G/H, while the flux determines the denominator H and the level. More precisely, the fluxes studied in are the minimal fluxes corresponding to a minuscule weight of G. These give rise to the so-called SLOHSS models (simply-laced, level one, Hermitian symmetric space), which is the subset of Kazama-Suzuki models admitting a Landau-Ginzburg description. This identification was checked for the A-series in ref. and worked out in detail for D and E in ref. . It has remained an interesting question to identify the theories for non-minimal flux, see e.g., the conclusions of .
An important clue to address this question has come from the study of matrix factorizations and their deformation theory. In particular, it was observed in ref. , see also ref. , that the superpotential resulting from the deformation theory of certain matrix factorization in N = 2 minimal models coincides with the Landau-Ginzburg potential of a corresponding SLOHSS model. More precisely, the matrix factorizations are associated with the fundamental weights of ADE simple Lie algebras via the standard McKay correspondence, and the relevant subset are those matrix factorizations corresponding to the minuscule weights. We argue that this coincidence of superpotentials can be explained via heterotic/F-theory/type II duality.
The missing link is provided by ref. . Among the results of this work is that the matrix factorizations of ADE minimal models can be used to describe bundles on partial resolutions (Grassmann blowups) of the threefold singularities of ADE type (8.1) that appear in the above-mentioned conjecture of ref. . The bundles have support only on the smooth part of the partial blowup, which is important to apply the arguments of ref. .
The combination of the last three paragraphs suggests that we should couple the heterotic worldsheet to the matrix factorizations of ref.
! This can be implemented by using the (0, 2) linear sigma model resp. (0, 2) Landau-Ginzburg models , along the lines of , in combination with an appropriate non-compact Landau-Ginzburg model to describe the fibration structure. The resulting strongly coupled heterotic worldsheet theories are conjectured to be dual to those 2-d field theories that are engineered on the four-fold side. The ADE type of the minimal model is that of the fiber of the four-fold, while the fundamental weight specifies the choice of four-form flux.
As formulated, the above conjecture makes sense for all, fundamental weights. The main testable prediction is thus the coincidence of the deformation superpotentials of the higher rank matrix factorizations corresponding to non-minuscule fundamental weights with the appropriate periods of the four-folds of refs. . Note that the Kazama-Suzuki models only appear for the minuscule weights, and that we have not covered the case of fluxes corresponding to non-fundamental weights. We plan to return to these questions in the near future.
Conclusions
In this note we study the variation of Hodge structure of the complex structure moduli space of certain Calabi-Yau 4-folds. These moduli spaces capture certain effective couplings of the N = 1 supergravity theory arising from the associated F-theory 4-fold compactification. Furthermore, through a chain of dualities we relate such F-theory scenarios to heterotic compactifications with non-trivial gauge bundle and small instanton 5-branes and to type II compactifications with branes.
The connection to the heterotic string is made through the stable degeneration limit of the F-theory 4-fold . Taking this limit specifies the corresponding heterotic geometry. Due to the employed F-theory/heterotic duality the resulting heterotic geometry is given in terms of elliptically fibered Calabi-Yau 3-folds. Furthermore, in the simplest cases, the geometric bundle moduli are described in terms of the spectral cover, which is also encoded in the 4-fold geometry in the stable degeneration limit . Alternatively, depending on the details of the F-theory 4-fold, we describe the moduli space of heterotic 5-branes instead of bundle moduli. On the other hand the link to the open-closed type II string theories is achieved through the weak coupling limit , and it realizes the openclosed duality introduced in ref. .
Starting from the F-theory 4-fold geometry we discuss in detail non-trivial background fluxes and compute the N = 1 superpotential, which couples to the moduli fields described by the variation of Hodge structure. We trace these F-terms along the chain of dualities to the open-closed and heterotic string compactifications. For the heterotic string we find that, depending on the characteristics of the 4-fold flux quanta, these fluxes either deform the bulk geometry of the heterotic string to generalized Calabi-Yau manifolds , or they give rise to superpotential terms for the bundle/five-brane moduli fields. The superpotentials associated to the flux quanta encode obstructions to deformations of the spectral cover. Furthermore, we show that in the stable degeneration limit the holomorphic Chern-Simons functional of the heterotic gauge bundle gives rise to these F-terms for the geometric bundle moduli.
The underlying 4-fold description of the heterotic and the type II strings allows us to extract (non-perturbative) corrections to the stable degeneration limit and the weak coupling limit respectively. We discuss the nature of these corrections, and we find that they encode world sheet instanton, D-instanton and space-time instanton corrections depending on the specific theory in the analyzed web of dualities. In order to exhibit the origin of these corrections we compare our analysis with the analog N = 2 scenarios, which have been studied in detail in refs. .
Apart from these F-term couplings we demonstrate that our techniques are also suitable to extract the Kähler potentials for the metrics of the studied moduli spaces in appropriate semi-classical regimes. In ref. the connection to the open-closed Kähler potential for 3-fold compactifications with 7-branes has been developed. Here, starting from the Kähler potential of the complex structure moduli space of the Calabi-Yau 4-fold, we also extract the corresponding Kähler potential associated to the combined moduli space of the complex structure and certain moduli of the heterotic gauge bundle. In leading order these Kähler potentials are in agreement with the results obtained by dimensional reduction of higher dimensional supergravity theories . In addition our calculation predicts subleading corrections.
Thus, the used duality relations together with the presented computational techniques offer novel tools to extract (non-perturbative) corrections to N = 1 string compactifications arising from F-theory, from heterotic strings or from type II strings in the presence of branes. It would be interesting to confirm the anticipated quantum corrections by independent computations and to understand in greater detail the physics of various (nonperturbative) corrections discussed here. In particular, our analysis suggests a connection to the quantum corrections in the hypermultiplet sector of N = 2 compactifications analyzed in refs. .
Our techniques should also be useful to address phenomenological interesting questions in the context of F-theory, type II or heterotic string compactifications. As discussed in sects. 5,6, the finite S corrections to the superpotential capture the backreaction of the geometric moduli to the bundle moduli. Such corrections are a new and important ingredient in fixing the bundle moduli in phenomenological applications, as emphasized, e.g., in ref. . Thus the calculated (quantum corrected) superpotentials provide a starting point to investigate moduli stabilization and/or supersymmetry breaking for the class of models discussed here. In the context of the heterotic string it seems plausible that our approach can be extended to more general heterotic bundle configurations, which can be described in terms of monad constructions . Such an extension is not only interesting from a conceptual point of view, but in addition it also gives a handle on analyzing the effective theory of phenomenologically appealing heterotic bundle configurations as discussed, for instance, in ref. .
In section 8, we propose an explanation, and conjecture an extension of, an observation originally made by Warner, which relates the deformation superpotential of matrix factorizations of minimal models to the flux superpotential of local four-folds near an ADE singularity. One of the results of this connection is the suggestion that (higher rank) matrix factorizations should also play a role in constructing the (0, 2) worldsheet theories of heterotic strings.
The presented approach to calculate deformation superpotentials by studying adequate Hodge problems is ultimately linked to the derivation of effective obstruction superpotentials with matrix factorization or, more generally, worldsheet techniques . While the latter approach leads to effective superpotentials up to field redefinitions, our computations give rise to effective superpotentials in terms of flat coordinates due to the underlying integrability of the associated Hodge problem. It would be interesting to explore the physical origin and the necessary conditions for the emergence of such a flat structure in the context of the deformation spaces studied in this note.
The distinguished local coordinate v = a/b on C * parametrizes a patch near the local singularity associated with the bundle/brane data for a Lie group G . For G = SU (n), v appears linearly, which leads to a substantial simplification of the Hodge variation problem, as described in the appendices of refs. .
Perturbative gauge symmetry of the heterotic string The perturbative gauge symmetry of the dual heterotic string is determined by the singularities in the elliptic fibration of the K3 fiber Y . There is a simple technique to read off fibration structures for the CY 4-fold X B from the toric polyhedra described in refs. .
Namely a fibration of X B → B 4−n with fibers a Calabi-Yau n-fold Y n corresponds to the existence of a hypersurface H of codimension 4 − n, such that the integral points in the set H ∩ ∆ * define the toric polyhedron of Y n .
In the present case, the toric polyhedron ∆ * K3 for the K3 fiber Y is given as the convex hull of the points in ∆ * lying on the hypersurface H : which is associated to the toric data (A.3). The Z 3 orbifold singularity is captured by r 3 = p q in terms of the invariant monomials p = Y 3 X ′3 , q = Z 3 X ′3 and r = ZY X ′2 . Then, to leading order, the singularities of the elliptic fiber E in the vicinity a = 0 and in the vicinity b = 0 are respectively given by p a→0 (K3) = a 2 q + q 2 + qr + r 3 , p b→0 (K3) = b 2 q + q 2 + bqr + r 3 .
Using a computer algebra system, such as ref. , it is straight forward to check that the polynomials p a→0 (K3) and p b→0 (K3) correspond to the ADE singularities SU (6) and In fact it turns out, that the same answer is obtained by naively applying the method developed in refs. for the standard model of the elliptic fiber, which implements the Kodaira classification of singular elliptic fibers in the language of toric polyhedra, such that the orbifold group is taken into account automatically. The polyhedron ∆ * K3 splits into a top and bottom piece Ξ + and Ξ − with the points which build up the affine Dynkin diagrams of SU (6) and E 6 , respectively. As asserted in , these toric vertices corresponds to two ADE singularities of the same type, in agreement with the direct computation. Moreover, deleting the vertex ν 7 ∈ ∆ which is associated with the exceptional toric divisor that described the brane/bundle modulusẑ, the same analysis produces a K3 fiber with two ADE singularities of type E 6 , leading to the pattern (6.8).
Moduli and Picard-Fuchs system
The moduli z a are related to the parameters a i in ( where l a i are the charge vectors that define the phase of the linear sigma model for the mirror X A . For the phase considered in , these are given in (6.9). The complex structure modulus z ∼ e 2πit mirror to the volume of the generic quintic fiber, the brane/bundle modulusẑ ∼ e 2πit and the distinguished modulus z S ∼ e 2πiS capturing the decoupling limit are given by z = z 1 z 2 = − a 1 a 2 a 3 a 4 a 5 a 5 0 ,ẑ = z 2 = − a 1 a 6 a 0 a 7 , z S = z 3 = a 7 a 8 a 2 1 .
A.2. Heterotic 5-branes
Degree 18 hypersurface in P 4 (1, 1, 1, 6,9) The polyhedra for the mirror pair (X A , X B ) of 4-folds dual to the 3-fold compactifications on (Z A , Z B ) are defined as the convex hull of the points: ∆ is the enhanced polyhedron for X nc A in Table 2 of , with the point ν 9 added in the compactification X A of X nc A . The polyhedron ∆ 3 for the 3-fold Z A defined as a degree 18 hypersurface in P 4 (1, 1, 1, 6,9) is given by the points on the hypersurface ν i,5 = 0, with the last entry deleted. The vertices of the dual polyhedron ∆ * 3 of ∆ 3 are given by the points of ∆ * with ν ⋆ i,5 = 0 and on extra vertex (−12, 6, 1, 1). On the r.h.s we have given the selection Ξ of points in ∆ * used to define local coordinates in (7.2). The relation to the coordinates used there is Z = Z ′ ab, v = a/b .
The relevant phase of the Kähler cone considered in In the coordinates (A.5), the brane modulus in (6.6) is given byẑ = z |
/**
* Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
* See LICENSE in the project root for license information.
*/
package com.linkedin.parser.test.junit3.java;
import android.test.ActivityInstrumentationTestCase2;
public class JUnit3ActivityInstrumentationTestCase2 extends ActivityInstrumentationTestCase2 {
public JUnit3ActivityInstrumentationTestCase2() {
super(null);
}
public void testJUnit3ActivityInstrumentationTestCase2() throws Exception {
assertTrue(true);
}
}
|
{-# LANGUAGE NoImplicitPrelude, OverloadedStrings #-}
module Hc2Prosody.SecureFile (securelyOpenFile) where
import BasicPrelude
import Data.Text (unpack)
import System.IO (Handle, IOMode(ReadMode), openBinaryFile)
import System.Posix.Types (FileMode)
import System.Posix.Files
import Filesystem.Path.CurrentOS (encodeString, toText)
isSecureFile :: FilePath -> IO Bool
isSecureFile fp =
(== nullFileMode) . intersectFileModes insecureFilePermissions . fileMode <$> getFileStatus (encodeString fp)
where
insecureFilePermissions :: FileMode
insecureFilePermissions = unionFileModes groupReadMode otherReadMode
securelyOpenFile :: FilePath -> IO Handle
securelyOpenFile fp = do
secure <- liftIO $ isSecureFile fp
if secure then do
liftIO $ openBinaryFile (encodeString fp) ReadMode
else fail . unpack $ "File " ++ filenameText ++ " must not be readable by other users"
where
filenameText :: Text
filenameText = either id (++ " [encoding issue]") $ toText fp |
def _set_path_if_exists(self, healthcheck_json):
if self._label_exists(self.PATH_LABEL):
healthcheck_json['path'] = self._get_label_value(self.PATH_LABEL)
healthcheck_json['protocol'] = 'HTTP'
return healthcheck_json |
/**
* Add an array operation to the instruction list as marker on the code position.
*
* @param op
* the operation
* @param type
* the array/component type of the array
* @param javaCodePos
* the code position/offset in the Java method
* @param lineNumber
* the line number in the Java source code
*/
protected void addArrayInstruction( @Nonnull ArrayOperator op, @Nonnull AnyType type, int javaCodePos, int lineNumber ) {
boolean useGC = options.useGC();
if( useGC ) {
int idx;
switch( op ) {
case GET:
case GET_S:
case GET_U:
idx = StackInspector.findInstructionThatPushValue( instructions, 1, javaCodePos ).idx;
break;
case SET:
idx = StackInspector.findInstructionThatPushValue( instructions, 2, javaCodePos ).idx;
break;
case LEN:
idx = instructions.size();
break;
default:
idx = -1;
}
if( idx >= 0 ) {
ArrayType arrayType = types.arrayType( type );
instructions.add( idx, new WasmStructInstruction( StructOperator.GET, arrayType, arrayType.getNativeFieldName(), javaCodePos, lineNumber, types ) );
}
}
WasmArrayInstruction arrayInst = new WasmArrayInstruction( op, type, types, javaCodePos, lineNumber );
instructions.add( arrayInst );
SyntheticFunctionName name = arrayInst.createNonGcFunction( useGC );
if( name != null ) {
functions.markAsNeeded( name, !name.istStatic() );
functions.markAsImport( name, name.getAnnotation() );
}
} |
// Thin types
type ArchNameStr = string; // uncertain of format
type BCInstrStr = string; // uncertain of format
type BinaryStr = string; // uncertain of format, may be arch-dependent
type BlockId = number; // unsigned int
type CallDestStr = string; // uncertain of format
type CodeStr = string; // uncertain of format
type CodeLen = number; // uint32_t
type ConfigFileStr = string; // maybe empty, should I make this null if empty?
type CounterName = string; // uncertain of format
type DisasmString = string; // uncertain of format
type EventCount = number; // uint64_t
type ExtraString = string; // uncertain of format
type FileName = string; // uncertain of format
type FuncId = number; // uint32_t
type FuncName = string; // uncertain of format
type FuncString = string; // uncertain of format
type GuardString = string; // uncertain of format
type InstrId = number; // uint32_t
type InstrLen = number // uint32_t
type LineNum = number; // int
type Offset = number; // int32_t
type Opcode = string; // Some sort of enum
type OptIndex = number; // int
type ProfCount = number; // uint64_t
type ProfileString = string; // uncertain of format
type RepoSchemaStr = string; // uncertain of format
type SHA1 = string; // SHA1.toString()
type SSATmpId = number; // uint32_t
type TCA = string; // unsigned char*, casted to void* for sformat
type TransId = number; // int32_t
type TypeString = string; // uncertain of format
type UnitFuncStr = string; // maybe fix? see TODO in tc-print.cpp
type TCDump = {
configFile: ConfigFileStr;
repoSchema: RepoSchemaStr;
translations: [Translation | null];
}
type Translation = {
transRec: TransRec;
blocks: [Block];
archName: ArchNameStr;
perfEvents: EventCounts;
regions: {
main: TCARegionInfo | null;
cold: TCARegionInfo | null;
frozen: TCARegionInfo | null;
};
transId: TransId;
ir_annotation: PrintIR_Unit | string;
}
type TransRec = {
id: TransId;
src: TransRecSrc;
kind: TransKind;
hasLoop: boolean;
aStart: TCA;
aLen: CodeLen;
coldStart: TCA;
coldLen: CodeLen;
frozenStart: TCA;
frozenLen: CodeLen;
}
type TransRecSrc = {
sha1: SHA1;
funcId: FuncId;
funcName: FuncName;
resumeMode: ResumeMode;
hasThis: boolean;
prologue: boolean;
bcStartOffset: Offset;
guards: [GuardString];
}
enum ResumeMode {
None,
Async,
GenIter,
}
enum TransKind {
TransAnchor,
TransInterp,
TransLive,
TransProfile,
TransOptimize,
TransLivePrologue,
TransProfPrologue,
TransOptPrologue,
TransInvalid,
}
type Block = {
sha1: SHA1;
start: Offset;
end: Offset;
unit: UnitFuncStr | null;
}
type EventType =
"cycles" |
"branch-misses" |
"L1-icache-misses" |
"L1-dcache-misses" |
"cache-misses" |
"LLC-store-misses" |
"iTLB-misses" |
"dTLB-misses" |
string; // Technically there can be user-defined events too
type EventCounts = {[event in EventType]: EventCount;}
type TCARegionInfo = {
tcRegion: TCRegion;
ranges: [TCARangeInfo];
}
enum TCRegion {
hot,
main,
profile,
cold,
frozen
}
type TCARangeInfo = {
start: TCA;
end: TCA;
bc: Offset | null;
sha1: SHA1 | null;
instrStr: BCInstrStr | null;
lineNum: LineNum | null;
disasm: [TCADisasmInfo];
ir_annotation?: {
area: Area;
start: TCA;
end: TCA;
instrId: InstrId;
blockId: BlockId;
};
}
type TCADisasmInfo = {
binary: BinaryStr;
callDest: CallDestStr;
code: CodeStr;
perfEvents: EventCounts;
ip: TCA;
instrLen: InstrLen;
}
enum Area {
Main,
Cold,
Frozen
}
type PrintIR_Unit = {
transContext: PrintIR_TransContext;
blocks: {[x in string]: PrintIR_Block;};
// This is actually a map from BlockId to Block, but with
// the BlockIds interpreted as strings for JSON object compatibility
inliningDecision: [PrintIR_InliningDecision];
}
type PrintIR_TransContext = {
kind: TransKind;
id: TransId;
optIndex: OptIndex;
srcKey: PrintIR_SrcKey;
funcName: FuncName;
sourceFile: FileName;
startLine: LineNum;
endLine: LineNum;
}
type PrintIR_SrcKey = {
funcStr: FuncString;
unitStr: UnitFuncString;
prologue: boolean;
offset: Offset;
resumeMode: ResumeMode;
hasThis: boolean;
}
type ResumeMode = "" | "ra" | "rg";
type PrintIR_Block = {
id: BlockId;
isCatch: boolean;
hint: Hint;
profCount: ProfCount;
next: BlockId | null;
instrs: {[x in string]: PrintIR_Instr;};
// This is actually a map from InstrId to Instr, but with
// the InstrIds interpreted as strings for JSON object compatibility
}
enum Hint {
Unused,
Unlikely,
Neither,
Likely,
}
type PrintIR_Instr = {
rawMarker: FuncString | null;
phiPseudoInstrs: [PrintIR_PhiPseudoInstrs];
opcode: Opcode;
typeParam: TypeString | null;
guard: GuardString | null;
extra: ExtraString | null;
id: InstrId;
taken: BlockId | null;
tcRanges: [PrintIR_TCRange];
dsts: [PrintIR_SSATmp];
offset: Offset;
profileData: PrintIR_Profile;
srcs: [PrintIR_SSATmp] | null; // exactly one of srcs and counterName should
counterName: CounterName | null; // be defined
}
type PrintIR_PhiPseudoInstrs = {
srcs: [[PrintIR_SSATmp, BlockId]];
dst: PrintIR_SSATmp;
}
type PrintIR_SSATmp = {
id: SSATmpId;
type: TypeString;
}
type PrintIR_TCRange = {
area: Area;
start: TCA;
end: TCA;
disasm: string;
}
type PrintIR_Profile = {
offset: Offset;
name: ProfileString;
data: {profileType: ProfileType};
// the rest of the keys in "data" will depend on the value of "profileType"
}
enum ProfileType {
ArrayAccessProfile,
ArrayKindProfile,
CallTargetProfile,
ClsCnsProfile,
DecRefProfile,
IncRefProfile,
MethProfile,
ReleaseVVProfile,
SwitchProfile,
TypeProfile,
}
type PrintIR_InliningDecision = {
wasInlined: boolean;
offset: Offset;
callerName: FuncName | null;
calleeName: FuncName | null;
reason: string;
}
|
#include <iostream>
#include <algorithm>
#include <vector>
#include <stdio.h>
#include <queue>
using namespace std;
#define fi(a,b,c) for(int a=b;a<=c;a++)
#define fo(a,b,c) for(int a=b;a>=c;a--)
#define long long long
#define pii pair<int,int>
#define mp make_pair
const int N = 2e3+1;
const int Rw[2] = {0,1};
const int Cl[2] = {1,0};
string s;
int n, k;
int a[N][N], ck[N][N], d[N][N];
vector<int> ans;
vector<pii> tmp;
deque<pii> qu;
void Clear(){
while (!qu.empty()) qu.pop_front();
}
void Bfs_Init(){
qu.push_back(mp(1,1));
ck[1][1] = true;
d[1][1] = (a[1][1] != 'a');
while (!qu.empty()){
int x = qu.front().first, y = qu.front().second; qu.pop_front();
fi(l,0,1){
int u = x+Rw[l], v = y+Cl[l];
if (u > n || v > n) continue;
d[u][v] = min(d[u][v], d[x][y] + (a[u][v]!='a'));
if (!ck[u][v] && d[u][v] <= k){
qu.push_back(mp(u,v));
ck[u][v] = true;
}
}
}
}
void Bfs(){
Clear();
if (k == 0){
qu.push_back(mp(1,1));
ans.push_back(a[1][1]);
}else{
int mx = 0;
fi(i,1,n)
fi(j,1,n)
if (d[i][j] <= k)
if (i+j-1 > mx){
mx = i+j-1; Clear();
qu.push_back(mp(i,j));
}else
if (i+j-1 == mx)
qu.push_back(mp(i,j));
if (qu.empty()){
fi(i,1,2*n-1)
ans.push_back('a');
return;
}else
fi(i,1,mx)
ans.push_back('a');
}
int mn = 1e9+1;
while (!qu.empty()){
int x = qu.front().first, y = qu.front().second; qu.pop_front();
fi(k,0,1){
int u = x+Rw[k], v = y+Cl[k];
if (u > n || v > n) continue;
if (ck[u][v]) continue;
tmp.push_back(mp(u,v));
ck[u][v] = true; mn = min(mn, a[u][v]);
}
if (qu.empty()){
if (tmp.empty()) continue;
ans.push_back(mn);
fi(i,0,tmp.size()-1)
if (a[tmp[i].first][tmp[i].second] == mn)
qu.push_back(tmp[i]);
mn = 'z'+1;
tmp.clear();
}
}
}
main(){
cin >> n >> k;
fi(i,1,n){
cin >> s;
fi(j,1,n)
a[i][j] = s[j-1];
}
fi(i,1,n)
fi(j,1,n)
d[i][j] = 1e9+1;
if (k != 0) Bfs_Init(); Bfs();
fi(i,0,(int)ans.size()-1)
cout << (char)ans[i];
}
|
ALLEN PARK -- The Detroit Lions were determined not to go into the NFL draft today without their No. 1 target signed, sealed and delivered. They finally got their man. The Lions signed Georgia quarterback Matthew Stafford to a six-year contract late Friday and, according to a league source, Stafford will receive $41.7 million in guaranteed money and $72 million overall.Stafford will earn about $12 million per season over his six-year deal, while Matt Ryan, who was the No. 3 pick last year to the Atlanta Falcons, had inked a deal worth about $11 million per season. Negotiations between the Lions and Stafford turned up in intensity Wednesday after Detroit had reached an agreement on contract terms with Wake Forest linebacker Aaron Curry. The Lions wanted to have a Plan B in place in case contract talks with Stafford -- their No. 1 target -- broke down. The Lions wanted to get a deal done with their top draft pick before the start of the draft at 4 p.m. Saturday. This week, Lions general manager Martin Mayhew said he liked Detroit's chances of getting their potential No. 1 pick signed early. "I think very good," Mayhew said. "That was our plan -- we talked about that back at the combine --and it's very important. We plan on getting something done prior to making that selection." Under league rules, the Lions are the only team allowed to sign a potential draft pick to a contract before the start of the draft. "I think you have that opportunity to get that done. You know you have that player through the entire offseason, you don't have a contentious situation with the player, you don't have a holdout," Mayhew said. "The upside is that you definitely want to get that done when you've got the opportunity early." Two years ago, the Oakland Raiders took quarterback JaMarcus Russell with the top overall choice and he was a holdout through the offseason and all of training camp. He started one game as a rookie. Mayhew said some players handle a holdout better than others, but the Lions are in a position where they do not have to deal with that issue. "It varies from player to player. I think some guys are more affected by it than others," Mayhew said. "Some guys can miss two days and they'll be so far behind it'll be hard for them to catch up. Some guys can miss a week and jump right now. "I think it's important to know we've got a player signed and he'll be here whenever the rookies come back, he'll be here throughout training camp. It's just one less thing for that player to have to worry about it." After the Lions get the situation at No. 1 settled, they still have a lot of work to do. They have five total picks in the top 82: No. 1, 20, 33, 65 and 82. The Lions have had the NFL's worst-ranked defense in each of the past two seasons, and while Mayhew wants to fill some roster holes on that side of the ball, he said the team will draft for value -- even if that means taking offensive players. Detroit, which did not receive much action in terms of trading down from the No. 1 pick, will entertain offers to move down with its other picks. Mayhew said his philosophy would be to trade down and acquire more draft picks instead of trade up and lose them. |
/**
* Set of elements used to provide summary information on entries.
* <p>
* <strong>Constant fields:</strong>
* <ul>
* <li>
* {@linkplain com.tools20022.metamodel.MMMessageElementContainer#getMessageElements
* messageElements} =
* <ul>
* <li>
* {@linkplain com.tools20022.repository.msgpart.TotalTransactions6#mmTotalEntries
* TotalTransactions6.mmTotalEntries}</li>
* <li>
* {@linkplain com.tools20022.repository.msgpart.TotalTransactions6#mmTotalCreditEntries
* TotalTransactions6.mmTotalCreditEntries}</li>
* <li>
* {@linkplain com.tools20022.repository.msgpart.TotalTransactions6#mmTotalDebitEntries
* TotalTransactions6.mmTotalDebitEntries}</li>
* <li>
* {@linkplain com.tools20022.repository.msgpart.TotalTransactions6#mmTotalEntriesPerBankTransactionCode
* TotalTransactions6.mmTotalEntriesPerBankTransactionCode}</li>
* </ul>
* </li>
* <li>
* {@linkplain com.tools20022.metamodel.MMTopLevelDictionaryEntry#getDataDictionary
* dataDictionary} =
* {@linkplain com.tools20022.repository.GeneratedRepository#dataDict
* GeneratedRepository.dataDict}</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMRepositoryConcept#getRegistrationStatus
* registrationStatus} =
* com.tools20022.metamodel.MMRegistrationStatus.REGISTERED</li>
* <li>{@linkplain com.tools20022.metamodel.MMRepositoryConcept#getName name} =
* "TotalTransactions6"</li>
* </ul>
*/
@XmlAccessorType(XmlAccessType.NONE)
@XmlType(name = "TotalTransactions6", propOrder = {"totalEntries", "totalCreditEntries", "totalDebitEntries", "totalEntriesPerBankTransactionCode"})
public class TotalTransactions6 {
final static private AtomicReference<MMMessageComponent> mmObject_lazy = new AtomicReference<>();
@XmlElement(name = "TtlNtries")
protected NumberAndSumOfTransactions4 totalEntries;
/**
*
<p>
* <strong>Constant fields:</strong>
* <ul>
* <li>{@linkplain com.tools20022.metamodel.MMMessageAssociationEnd#getType
* type} =
* {@linkplain com.tools20022.repository.msgpart.NumberAndSumOfTransactions4
* NumberAndSumOfTransactions4}</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMMessageElement#getComponentContext
* componentContext} =
* {@linkplain com.tools20022.repository.msgpart.TotalTransactions6
* TotalTransactions6}</li>
* <li>{@linkplain com.tools20022.metamodel.MMMessageConstruct#getXmlTag
* xmlTag} = "TtlNtries"</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMRepositoryConcept#getRegistrationStatus
* registrationStatus} =
* com.tools20022.metamodel.MMRegistrationStatus.PROVISIONALLY_REGISTERED</li>
* <li>{@linkplain com.tools20022.metamodel.MMRepositoryConcept#getName
* name} = "TotalEntries"</li>
* </ul>
*/
public static final MMMessageAssociationEnd<TotalTransactions6, Optional<NumberAndSumOfTransactions4>> mmTotalEntries = new MMMessageAssociationEnd<TotalTransactions6, Optional<NumberAndSumOfTransactions4>>() {
{
componentContext_lazy = LazyReference.create(() -> com.tools20022.repository.msgpart.TotalTransactions6.mmObject());
isDerived = false;
xmlTag = "TtlNtries";
registrationStatus = MMRegistrationStatus.PROVISIONALLY_REGISTERED;
name = "TotalEntries";
definition = "Specifies the total number and sum of debit and credit entries.";
maxOccurs = 1;
minOccurs = 0;
isComposite = true;
type_lazy = LazyReference.create(() -> NumberAndSumOfTransactions4.mmObject());
}
@Override
public Optional<NumberAndSumOfTransactions4> getValue(TotalTransactions6 obj) {
return obj.getTotalEntries();
}
@Override
public void setValue(TotalTransactions6 obj, Optional<NumberAndSumOfTransactions4> value) {
obj.setTotalEntries(value.orElse(null));
}
};
@XmlElement(name = "TtlCdtNtries")
protected NumberAndSumOfTransactions1 totalCreditEntries;
/**
*
<p>
* <strong>Constant fields:</strong>
* <ul>
* <li>{@linkplain com.tools20022.metamodel.MMMessageAssociationEnd#getType
* type} =
* {@linkplain com.tools20022.repository.msgpart.NumberAndSumOfTransactions1
* NumberAndSumOfTransactions1}</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMMessageElement#getComponentContext
* componentContext} =
* {@linkplain com.tools20022.repository.msgpart.TotalTransactions6
* TotalTransactions6}</li>
* <li>{@linkplain com.tools20022.metamodel.MMMessageConstruct#getXmlTag
* xmlTag} = "TtlCdtNtries"</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMRepositoryConcept#getRegistrationStatus
* registrationStatus} =
* com.tools20022.metamodel.MMRegistrationStatus.PROVISIONALLY_REGISTERED</li>
* <li>{@linkplain com.tools20022.metamodel.MMRepositoryConcept#getName
* name} = "TotalCreditEntries"</li>
* </ul>
*/
public static final MMMessageAssociationEnd<TotalTransactions6, Optional<NumberAndSumOfTransactions1>> mmTotalCreditEntries = new MMMessageAssociationEnd<TotalTransactions6, Optional<NumberAndSumOfTransactions1>>() {
{
componentContext_lazy = LazyReference.create(() -> com.tools20022.repository.msgpart.TotalTransactions6.mmObject());
isDerived = false;
xmlTag = "TtlCdtNtries";
registrationStatus = MMRegistrationStatus.PROVISIONALLY_REGISTERED;
name = "TotalCreditEntries";
definition = "Specifies the total number and sum of credit entries.";
maxOccurs = 1;
minOccurs = 0;
isComposite = true;
type_lazy = LazyReference.create(() -> NumberAndSumOfTransactions1.mmObject());
}
@Override
public Optional<NumberAndSumOfTransactions1> getValue(TotalTransactions6 obj) {
return obj.getTotalCreditEntries();
}
@Override
public void setValue(TotalTransactions6 obj, Optional<NumberAndSumOfTransactions1> value) {
obj.setTotalCreditEntries(value.orElse(null));
}
};
@XmlElement(name = "TtlDbtNtries")
protected NumberAndSumOfTransactions1 totalDebitEntries;
/**
*
<p>
* <strong>Constant fields:</strong>
* <ul>
* <li>{@linkplain com.tools20022.metamodel.MMMessageAssociationEnd#getType
* type} =
* {@linkplain com.tools20022.repository.msgpart.NumberAndSumOfTransactions1
* NumberAndSumOfTransactions1}</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMMessageElement#getComponentContext
* componentContext} =
* {@linkplain com.tools20022.repository.msgpart.TotalTransactions6
* TotalTransactions6}</li>
* <li>{@linkplain com.tools20022.metamodel.MMMessageConstruct#getXmlTag
* xmlTag} = "TtlDbtNtries"</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMRepositoryConcept#getRegistrationStatus
* registrationStatus} =
* com.tools20022.metamodel.MMRegistrationStatus.PROVISIONALLY_REGISTERED</li>
* <li>{@linkplain com.tools20022.metamodel.MMRepositoryConcept#getName
* name} = "TotalDebitEntries"</li>
* </ul>
*/
public static final MMMessageAssociationEnd<TotalTransactions6, Optional<NumberAndSumOfTransactions1>> mmTotalDebitEntries = new MMMessageAssociationEnd<TotalTransactions6, Optional<NumberAndSumOfTransactions1>>() {
{
componentContext_lazy = LazyReference.create(() -> com.tools20022.repository.msgpart.TotalTransactions6.mmObject());
isDerived = false;
xmlTag = "TtlDbtNtries";
registrationStatus = MMRegistrationStatus.PROVISIONALLY_REGISTERED;
name = "TotalDebitEntries";
definition = "Specifies the total number and sum of debit entries.";
maxOccurs = 1;
minOccurs = 0;
isComposite = true;
type_lazy = LazyReference.create(() -> NumberAndSumOfTransactions1.mmObject());
}
@Override
public Optional<NumberAndSumOfTransactions1> getValue(TotalTransactions6 obj) {
return obj.getTotalDebitEntries();
}
@Override
public void setValue(TotalTransactions6 obj, Optional<NumberAndSumOfTransactions1> value) {
obj.setTotalDebitEntries(value.orElse(null));
}
};
@XmlElement(name = "TtlNtriesPerBkTxCd")
protected List<TotalsPerBankTransactionCode5> totalEntriesPerBankTransactionCode;
/**
*
<p>
* <strong>Constant fields:</strong>
* <ul>
* <li>{@linkplain com.tools20022.metamodel.MMMessageAssociationEnd#getType
* type} =
* {@linkplain com.tools20022.repository.msgpart.TotalsPerBankTransactionCode5
* TotalsPerBankTransactionCode5}</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMMessageElement#getComponentContext
* componentContext} =
* {@linkplain com.tools20022.repository.msgpart.TotalTransactions6
* TotalTransactions6}</li>
* <li>{@linkplain com.tools20022.metamodel.MMMessageConstruct#getXmlTag
* xmlTag} = "TtlNtriesPerBkTxCd"</li>
* <li>
* {@linkplain com.tools20022.metamodel.MMRepositoryConcept#getRegistrationStatus
* registrationStatus} =
* com.tools20022.metamodel.MMRegistrationStatus.PROVISIONALLY_REGISTERED</li>
* <li>{@linkplain com.tools20022.metamodel.MMRepositoryConcept#getName
* name} = "TotalEntriesPerBankTransactionCode"</li>
* </ul>
*/
public static final MMMessageAssociationEnd<TotalTransactions6, List<TotalsPerBankTransactionCode5>> mmTotalEntriesPerBankTransactionCode = new MMMessageAssociationEnd<TotalTransactions6, List<TotalsPerBankTransactionCode5>>() {
{
componentContext_lazy = LazyReference.create(() -> com.tools20022.repository.msgpart.TotalTransactions6.mmObject());
isDerived = false;
xmlTag = "TtlNtriesPerBkTxCd";
registrationStatus = MMRegistrationStatus.PROVISIONALLY_REGISTERED;
name = "TotalEntriesPerBankTransactionCode";
definition = "Specifies the total number and sum of entries per bank transaction code.";
minOccurs = 0;
isComposite = true;
type_lazy = LazyReference.create(() -> TotalsPerBankTransactionCode5.mmObject());
}
@Override
public List<TotalsPerBankTransactionCode5> getValue(TotalTransactions6 obj) {
return obj.getTotalEntriesPerBankTransactionCode();
}
@Override
public void setValue(TotalTransactions6 obj, List<TotalsPerBankTransactionCode5> value) {
obj.setTotalEntriesPerBankTransactionCode(value);
}
};
final static public MMMessageComponent mmObject() {
mmObject_lazy.compareAndSet(null, new MMMessageComponent() {
{
messageElements_lazy = LazyReference.create(() -> Arrays.asList(com.tools20022.repository.msgpart.TotalTransactions6.mmTotalEntries, com.tools20022.repository.msgpart.TotalTransactions6.mmTotalCreditEntries,
com.tools20022.repository.msgpart.TotalTransactions6.mmTotalDebitEntries, com.tools20022.repository.msgpart.TotalTransactions6.mmTotalEntriesPerBankTransactionCode));
dataDictionary_lazy = LazyReference.create(() -> GeneratedRepository.dataDict);
registrationStatus = MMRegistrationStatus.REGISTERED;
name = "TotalTransactions6";
definition = "Set of elements used to provide summary information on entries.";
}
});
return mmObject_lazy.get();
}
public Optional<NumberAndSumOfTransactions4> getTotalEntries() {
return Optional.ofNullable(totalEntries);
}
public TotalTransactions6 setTotalEntries(NumberAndSumOfTransactions4 totalEntries) {
this.totalEntries = totalEntries;
return this;
}
public Optional<NumberAndSumOfTransactions1> getTotalCreditEntries() {
return Optional.ofNullable(totalCreditEntries);
}
public TotalTransactions6 setTotalCreditEntries(NumberAndSumOfTransactions1 totalCreditEntries) {
this.totalCreditEntries = totalCreditEntries;
return this;
}
public Optional<NumberAndSumOfTransactions1> getTotalDebitEntries() {
return Optional.ofNullable(totalDebitEntries);
}
public TotalTransactions6 setTotalDebitEntries(NumberAndSumOfTransactions1 totalDebitEntries) {
this.totalDebitEntries = totalDebitEntries;
return this;
}
public List<TotalsPerBankTransactionCode5> getTotalEntriesPerBankTransactionCode() {
return totalEntriesPerBankTransactionCode == null ? totalEntriesPerBankTransactionCode = new ArrayList<>() : totalEntriesPerBankTransactionCode;
}
public TotalTransactions6 setTotalEntriesPerBankTransactionCode(List<TotalsPerBankTransactionCode5> totalEntriesPerBankTransactionCode) {
this.totalEntriesPerBankTransactionCode = Objects.requireNonNull(totalEntriesPerBankTransactionCode);
return this;
}
} |
/**
* <p>
* An instance of ZipMdrefManager holds the state needed to retrieve the
* contents of an external metadata stream referenced by an
* <code>mdRef</code> element in a Zipped up METS manifest.
* <p>
* Initialize it with the Content (ORIGINAL) Bundle containing all of the
* metadata bitstreams. Match an mdRef by finding the bitstream with the
* same name.
*/
protected static final class MdrefManager implements METSManifest.Mdref {
private File packageFile = null;
private PackageParameters params;
// constructor initializes from package file
private MdrefManager(File packageFile, PackageParameters params) {
super();
this.packageFile = packageFile;
this.params = params;
}
/**
* Make the contents of an external resource mentioned in an
* <code>mdRef</code> element available as an <code>InputStream</code>.
* See the <code>METSManifest.MdRef</code> interface for details.
*
* @param mdref the METS mdRef element to locate the input for.
* @return the input stream of its content.
* @throws MetadataValidationException if validation error
* @throws IOException if IO error
* @see METSManifest
*/
@Override
public InputStream getInputStream(Element mdref)
throws MetadataValidationException, IOException {
String path = METSManifest.getFileName(mdref);
if (packageFile == null) {
throw new MetadataValidationException(
"Failed referencing mdRef element, because there is no package specified.");
}
// Use the 'getFileInputStream()' method from the
// AbstractMETSIngester to retrieve the inputstream for the
// referenced external metadata file.
return AbstractMETSIngester.getFileInputStream(packageFile, params,
path);
}
} |
Judge Lucy Koh of California has rejected a proposed settlement by Apple, Google, and other companies that allegedly agreed to not poach or hire each others' employees. Court documents say that Koh said the $324 million settlement wasn't high enough to compensate for the lost wages employees may have suffered. The companies first proposed the settlement in April; now, they'll need to go back to the drawing board and come back with a higher number in order to avoid taking the issue to trial.
The case in question involves four companies: Apple, Google, Intel, and Adobe. Workers claim that from 2005 to 2009, company executives had routinely collaborated to keep from hiring employees away from their jobs — an anti-competitive practice. Emails from Steve Jobs, Eric Schmidt, and others seemed to show them asking each other to stop recruiting from each others' companies, entering into surreptitious "gentlemen's agreements." Judge Koh certified the case as a class-action lawsuit in October of last year, making over 64,000 workers eligible to receive damages to compensate them for potentially having their wages kept artificially low.
In the court filing, Judge Koh maintains that there is "ample evidence of an overarching conspiracy" between the companies. They've already settled with some of the plaintiffs, and Koh says that among other things, she's concerned that this settlement offers significantly less money to the rest of the employees. She suggests that the minimum amount should be $380 million, based on the amount that the companies paid out when settling previous complaints. The case has been going on for several years now, but if the companies agree to a higher number, it could finally come to a close. |
<filename>pkg/synchronization/core/entry.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: synchronization/core/entry.proto
package core
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// EntryKind encodes the type of entry represented by an Entry object.
type EntryKind int32
const (
// EntryKind_Directory indicates a directory.
EntryKind_Directory EntryKind = 0
// EntryKind_File indicates a regular file.
EntryKind_File EntryKind = 1
// EntryKind_SymbolicLink indicates a symbolic link.
EntryKind_SymbolicLink EntryKind = 2
// EntryKind_Untracked indicates content (or the root of content) that is
// intentionally excluded from synchronization by Mutagen. This includes
// explicitly ignored content, content that is ignored due to settings (such
// as symbolic links in the "ignore" symbolic link mode), as well as content
// types that Mutagen doesn't understand and/or have a way to propagate
// (such as FIFOs and Unix domain sockets). This type of entry is not
// synchronizable.
EntryKind_Untracked EntryKind = 100
// EntryKind_Problematic indicates content (or the root of content) that
// would normally be synchronized, but which is currently inaccessible to
// scanning. This includes (but is not limited to) content that is modified
// concurrently with scanning, content that is inaccessible due to
// permissions, content that can't be read due to filesystem errors, content
// that cannot be properly encoded given the current settings (such as
// absolute symbolic links found when using the "portable" symbolic link
// mode), and content that Mutagen cannot scan or watch reliably (such as
// directories that are also mount points). This type of entry is not
// synchronizable.
EntryKind_Problematic EntryKind = 101
)
// Enum value maps for EntryKind.
var (
EntryKind_name = map[int32]string{
0: "Directory",
1: "File",
2: "SymbolicLink",
100: "Untracked",
101: "Problematic",
}
EntryKind_value = map[string]int32{
"Directory": 0,
"File": 1,
"SymbolicLink": 2,
"Untracked": 100,
"Problematic": 101,
}
)
func (x EntryKind) Enum() *EntryKind {
p := new(EntryKind)
*p = x
return p
}
func (x EntryKind) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (EntryKind) Descriptor() protoreflect.EnumDescriptor {
return file_synchronization_core_entry_proto_enumTypes[0].Descriptor()
}
func (EntryKind) Type() protoreflect.EnumType {
return &file_synchronization_core_entry_proto_enumTypes[0]
}
func (x EntryKind) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use EntryKind.Descriptor instead.
func (EntryKind) EnumDescriptor() ([]byte, []int) {
return file_synchronization_core_entry_proto_rawDescGZIP(), []int{0}
}
// Entry encodes a filesystem entry (e.g. a directory, a file, or a symbolic
// link). A nil Entry represents an absence of content. An zero-value Entry
// represents an empty Directory. Entry objects should be considered immutable
// and must not be modified.
type Entry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Kind encodes the type of filesystem entry being represented.
Kind EntryKind `protobuf:"varint,1,opt,name=kind,proto3,enum=core.EntryKind" json:"kind,omitempty"`
// Contents represents a directory entry's contents. It must only be non-nil
// for directory entries.
Contents map[string]*Entry `protobuf:"bytes,5,rep,name=contents,proto3" json:"contents,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Digest represents the hash of a file entry's contents. It must only be
// non-nil for file entries.
Digest []byte `protobuf:"bytes,8,opt,name=digest,proto3" json:"digest,omitempty"`
// Executable indicates whether or not a file entry is marked as executable.
// It must only be set (if appropriate) for file entries.
Executable bool `protobuf:"varint,9,opt,name=executable,proto3" json:"executable,omitempty"`
// Target is the symbolic link target for symbolic link entries. It must be
// non-empty if and only if the entry is a symbolic link.
Target string `protobuf:"bytes,12,opt,name=target,proto3" json:"target,omitempty"`
// Problem indicates the relevant error for problematic content. It must be
// non-empty if and only if the entry represents problematic content.
Problem string `protobuf:"bytes,15,opt,name=problem,proto3" json:"problem,omitempty"`
}
func (x *Entry) Reset() {
*x = Entry{}
if protoimpl.UnsafeEnabled {
mi := &file_synchronization_core_entry_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Entry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Entry) ProtoMessage() {}
func (x *Entry) ProtoReflect() protoreflect.Message {
mi := &file_synchronization_core_entry_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Entry.ProtoReflect.Descriptor instead.
func (*Entry) Descriptor() ([]byte, []int) {
return file_synchronization_core_entry_proto_rawDescGZIP(), []int{0}
}
func (x *Entry) GetKind() EntryKind {
if x != nil {
return x.Kind
}
return EntryKind_Directory
}
func (x *Entry) GetContents() map[string]*Entry {
if x != nil {
return x.Contents
}
return nil
}
func (x *Entry) GetDigest() []byte {
if x != nil {
return x.Digest
}
return nil
}
func (x *Entry) GetExecutable() bool {
if x != nil {
return x.Executable
}
return false
}
func (x *Entry) GetTarget() string {
if x != nil {
return x.Target
}
return ""
}
func (x *Entry) GetProblem() string {
if x != nil {
return x.Problem
}
return ""
}
var File_synchronization_core_entry_proto protoreflect.FileDescriptor
var file_synchronization_core_entry_proto_rawDesc = []byte{
0x0a, 0x20, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x05, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x23, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4b, 0x69, 0x6e,
0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x35, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65,
0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x72, 0x65,
0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x16,
0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06,
0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74,
0x61, 0x62, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x65, 0x78, 0x65, 0x63,
0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18,
0x0a, 0x07, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x1a, 0x48, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x63, 0x6f, 0x72,
0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
0x38, 0x01, 0x2a, 0x56, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4b, 0x69, 0x6e, 0x64, 0x12,
0x0d, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x10, 0x00, 0x12, 0x08,
0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x79, 0x6d, 0x62,
0x6f, 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x6e, 0x6b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x6e,
0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x10, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x72, 0x6f,
0x62, 0x6c, 0x65, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x10, 0x65, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x67, 0x65, 0x6e,
0x2d, 0x69, 0x6f, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f,
0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f,
0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_synchronization_core_entry_proto_rawDescOnce sync.Once
file_synchronization_core_entry_proto_rawDescData = file_synchronization_core_entry_proto_rawDesc
)
func file_synchronization_core_entry_proto_rawDescGZIP() []byte {
file_synchronization_core_entry_proto_rawDescOnce.Do(func() {
file_synchronization_core_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_synchronization_core_entry_proto_rawDescData)
})
return file_synchronization_core_entry_proto_rawDescData
}
var file_synchronization_core_entry_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_synchronization_core_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_synchronization_core_entry_proto_goTypes = []interface{}{
(EntryKind)(0), // 0: core.EntryKind
(*Entry)(nil), // 1: core.Entry
nil, // 2: core.Entry.ContentsEntry
}
var file_synchronization_core_entry_proto_depIdxs = []int32{
0, // 0: core.Entry.kind:type_name -> core.EntryKind
2, // 1: core.Entry.contents:type_name -> core.Entry.ContentsEntry
1, // 2: core.Entry.ContentsEntry.value:type_name -> core.Entry
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_synchronization_core_entry_proto_init() }
func file_synchronization_core_entry_proto_init() {
if File_synchronization_core_entry_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_synchronization_core_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Entry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_synchronization_core_entry_proto_rawDesc,
NumEnums: 1,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_synchronization_core_entry_proto_goTypes,
DependencyIndexes: file_synchronization_core_entry_proto_depIdxs,
EnumInfos: file_synchronization_core_entry_proto_enumTypes,
MessageInfos: file_synchronization_core_entry_proto_msgTypes,
}.Build()
File_synchronization_core_entry_proto = out.File
file_synchronization_core_entry_proto_rawDesc = nil
file_synchronization_core_entry_proto_goTypes = nil
file_synchronization_core_entry_proto_depIdxs = nil
}
|
This player has full sharing enabled: social, email, embed, etc. It has the ability to go fullscreen. It will display a list of suggested videos when the video has played to the end.
The relationship between President Obama and Russian President Vladimir Putin appears to have reached the breaking point over the crisis in Ukraine.
ADVERTISEMENT
Through a series of long telephone conversations, Obama and Putin have talked extensively behind the scenes about the fate of Crimea, with the United States repeatedly warning Russia against a grab for territory.
But Putin appears to be forging ahead, defying Obama’s calls for a diplomatic solution that would allow both sides to save face.
Now the U.S. and its allies are directly hitting some of Putin’s closest advisers with sanctions in a move intended to isolate and punish the Kremlin.
Foreign policy analysts warn the latest steps could be the beginning of a deep freeze in U.S.-Russian relations.
“This has the potential to end very badly,” said Richard Fontaine, president of the Center for a New American Security. “I don’t think this means war, but this could spell the beginning of a long period of extreme diplomatic alienation between the West and Russia.”
The tit-for-tat intensified on Monday, as Obama announced sanctions against Russians and Ukrainians deemed responsible for Russia’s incursion into Crimea, including close allies of Putin and former Ukrainian President Viktor Yanukovych.
Obama said that the U.S. would “calibrate our response based on whether Russia chooses to escalate or de-escalate the situation,” and administration officials did not rule out sanctioning Putin directly, an action rarely taken against a foreign head of state.
Putin was unbowed, and quickly issued a decree declaring Crimea a sovereign and independent nation.
He followed that up on Tuesday by signing a treaty that would annex Crimea.
In a speech to both houses of Russia's parliament, he argued the referendum on Crimea's succession was legal and that Russia could not abandon Crimea, an area with long ties to his country.
“We couldn’t leave Crimea in the lurch; otherwise we would be considered traitors,” Putin said according to an interpreter. “[The West says] we are violating norms of international norm. It’s good that they realize international laws still exist. It’s better late than never.”
Gary Berntsen, who served in the CIA Directorate of Operations between 1982 and 2005, said Putin is likely to take Crimea while working covertly to annex other parts of Ukraine.
"The Russians are unlikely to just roll into Ukraine with tanks. They will likely try to subvert the political process in other parts of Ukraine,” he said. "Putin’s made a very bold calculation [over Crimea] and he’s proceeding. He’s assessed that we won’t move against him militarily, and he’s moving ahead.”
John Bellinger III, who was legal adviser at the State Department under former Secretary Condoleezza Rice, said the Kremlin is likely to retaliate against the U.S. by leveling sanctions against American officials.
“I expect that Putin will announce retaliatory sanctions against certain U.S. officials, if not tomorrow, then soon thereafter,” said Bellinger, who currently heads an international law practice. “Putin is likely to be especially incensed by the sanctions against several of his close advisers.”
The Daily Beast reported Monday evening that the Kremlin was preparing sanctions that included Russian travel bans on several U.S. senators.
Obama and Putin have spoken by phone three times since Russia’s military intervention in Crimea, including a marathon 90-minute phone call. The latest conversation occurred on Sunday after the Crimea referendum, a vote the U.S. and allies say is not legitimate.
Obama warned on Monday that additional sanctions could be on the horizon — and White House officials would not rule out bringing down the hammer on Putin.
“We're not going to rule out individuals or rule out actions, except to say that there will be additional costs imposed on Russia, if Russia does not change direction here when it comes to how it's handling the situation in Ukraine,” White House spokesman Jay Carney said.
The sanctions announced Monday target the assets of seven individuals in the Russian government, in addition to four Ukrainian officials, and will prevent any American citizen from doing business with them.
“If they want to transact in dollars, they will be unable to do so,” said an administration official, who added that, in the past, individuals targeted for American sanctions “tend to find great difficulty in accessing financial services elsewhere.”
Fontaine said the U.S. is trying to increase pressure on the Kremlin with the sanctions while also providing an “off-ramp” to de-escalate the situation in Ukraine.
“The problem with that is Putin has shown absolutely no appetite whatsoever for any off-ramp,” Fontaine said.
“The question is, are these sanctions significant enough to impose a cost that he would find painful enough that he would reverse his position,” he added. “I don’t think he’s going to back off only in response to these sanctions. If this is the first of more things to come, then that’s a step in the right direction.”
The White House is coming under pressure from Republicans to turn up the heat on Putin.
Sen. John McCain John Sidney McCainGOP lobbyists worry Trump lags in K Street fundraising Mark Kelly kicks off Senate bid: ‘A mission to lift up hardworking Arizonans’ Gabbard hits back at Meghan McCain after fight over Assad MORE (R-Ariz.) has criticized the Obama administration for not providing military assistance to Ukraine, and Monday called for admitting Georgia and Montenegro into NATO.
“The crisis in Ukraine calls for a far more significant response from the United States,” McCain said.
Sen. Bob Corker Robert (Bob) Phillips CorkerBrexit and exit: A transatlantic comparison Sasse’s jabs at Trump spark talk of primary challenger RNC votes to give Trump 'undivided support' ahead of 2020 MORE (Tenn.), the top Republican on the Foreign Relations Committee, pushed Obama to consider direct military assistance to Ukraine.
“So far, the administration’s calibrated actions have failed to affect Vladimir Putin’s decisions, and that has to change,” Corker said Monday.
While the Obama administration has focused on diplomatic efforts, the U.S. military has also bolstered its presence in Eastern Europe to send a message to both Russia and worried allies in Europe.
"Our actions in the region serve to demonstrate our commitment to our collective defense responsibilities and provide reassurance to our NATO allies," Pentagon spokeswoman Eileen Lainez said Monday. "We continue to support the diplomatic approach to the resolution of the crisis in Ukraine.”
So far, about 200 U.S. airmen and an additional 12 F-16s have arrived at Lask Air Base in Poland, according to European Command spokesman Navy Capt. Gregory Hicks. Flight operations were set to start Monday, but were postponed due to inclement weather.
NATO officials don’t expect to see near-term military “stand offs” with Russia, but are planning to bolster Ukrainian forces in the long-term, a NATO official told the Hill.
NATO plans to help Ukrainian forces build capacity via joint exercises, advice and other unspecified things, the official said.
- Rebecca Shabad contributed.
This story was updated at 9:43 a.m. |
<reponame>p6-process/p6-process-model<filename>src/main/java/org/lorislab/p6/process/model/ProcessDefinition.java
package org.lorislab.p6.process.model;
import com.fasterxml.jackson.annotation.JsonInclude;
import io.quarkus.runtime.annotations.RegisterForReflection;
import lombok.ToString;
import java.util.HashMap;
import java.util.Map;
@RegisterForReflection
@JsonInclude(JsonInclude.Include.NON_EMPTY)
public class ProcessDefinition {
public String id;
public String version;
public Map<String, String> labels = new HashMap<>();
public Map<String, Object> metadata = new HashMap<>();
public Map<String, Node> nodes = new HashMap<>();
@Override
public String toString() {
return "ProcessDefinition:" + id + ":" + version;
}
}
|
package com.jsonde.gui.action;
import com.jsonde.gui.sdedit.SdEditUIAdapter;
import net.sf.sdedit.icons.Icons;
import javax.swing.*;
import java.awt.event.ActionEvent;
/**
*
* @author admin
*
*/
public class CloseDiagramAction extends AbstractAction {
private SdEditUIAdapter sdEditUIAdapter;
public CloseDiagramAction(SdEditUIAdapter sdEditUIAdapter) {
this.sdEditUIAdapter = sdEditUIAdapter;
}
{
putValue(Action.SMALL_ICON,
new ImageIcon(
Icons.class.getResource("close.png")
));
putValue(Action.NAME, "Close Current Tab");
putValue(Action.SHORT_DESCRIPTION, "Close current tab");
}
public void actionPerformed(ActionEvent e) {
sdEditUIAdapter.getUserInterface().removeCurrentTab(false);
}
} |
class DestinySocketTypeDefinition:
"""All Sockets have a "Type": a set of common properties that determine
when the socket allows Plugs to be inserted, what Categories of Plugs can
be inserted, and whether the socket is even visible at all given the
current game/character/account state.
See DestinyInventoryItemDefinition for more information about
Socketed items and Plugs.
"""
always_randomize_sockets: bool
avoid_duplicates_on_initialization: bool
currency_scalars: t.Sequence["DestinySocketTypeScalarMaterialRequirementEntry"]
display_properties: "DestinyDisplayPropertiesDefinition" = dt.field(
metadata={
"description": "There are fields for this display data, but they appear to be unpopulated as of now. I am not sure where in the UI these would show if they even were populated, but I will continue to return this data in case it becomes useful."
}
)
hash: int = dt.field(
metadata={
"description": """The unique identifier for this entity. Guaranteed to be unique for the type of entity, but not globally.
When entities refer to each other in Destiny content, it is this hash that they are referring to."""
}
)
hide_duplicate_reusable_plugs: bool
index: int = dt.field(
metadata={
"description": "The index of the entity as it was found in the investment tables."
}
)
insert_action: "DestinyInsertPlugActionDefinition" = dt.field(
metadata={
"description": "Defines what happens when a plug is inserted into sockets of this type."
}
)
is_preview_enabled: bool
overrides_ui_appearance: bool = dt.field(
metadata={
"description": "This property indicates if the socket type determines whether Emblem icons and nameplates should be overridden by the inserted plug item's icon and nameplate."
}
)
plug_whitelist: t.Sequence["DestinyPlugWhitelistEntryDefinition"] = dt.field(
metadata={
"description": """A list of Plug "Categories" that are allowed to be plugged into sockets of this type.
These should be compared against a given plug item's DestinyInventoryItemDefinition.plug.plugCategoryHash, which indicates the plug item's category.
If the plug's category matches any whitelisted plug, or if the whitelist is empty, it is allowed to be inserted."""
}
)
redacted: bool = dt.field(
metadata={
"description": "If this is true, then there is an entity with this identifier/type combination, but BNet is not yet allowed to show it. Sorry!"
}
)
socket_category_hash: ManifestReference["DestinySocketCategoryDefinition"]
visibility: "DestinySocketVisibility" = dt.field(
metadata={
"description": "Sometimes a socket isn't visible. These are some of the conditions under which sockets of this type are not visible. Unfortunately, the truth of visibility is much, much more complex. Best to rely on the live data for whether the socket is visible and enabled."
}
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"displayProperties": to_json(self.display_properties),
"insertAction": to_json(self.insert_action),
"plugWhitelist": to_json(self.plug_whitelist),
"socketCategoryHash": to_json(self.socket_category_hash),
"visibility": to_json(self.visibility),
"alwaysRandomizeSockets": to_json(self.always_randomize_sockets),
"isPreviewEnabled": to_json(self.is_preview_enabled),
"hideDuplicateReusablePlugs": to_json(self.hide_duplicate_reusable_plugs),
"overridesUiAppearance": to_json(self.overrides_ui_appearance),
"avoidDuplicatesOnInitialization": to_json(
self.avoid_duplicates_on_initialization
),
"currencyScalars": to_json(self.currency_scalars),
"hash": to_json(self.hash),
"index": to_json(self.index),
"redacted": to_json(self.redacted),
} |
Try tuning in to next week's public hearings on the federal government's first-ever efficiency standards for medium- and heavy-duty trucks, where—surprisingly—it looks like all sides will be singing the same tune. Truckers like it, truck manufacturers like it, scientists like it, environmentalists like it . . . which should leave you wondering why the government needs to get involved at all.
The first thing to understand is that, while the new rules are being billed as anti-greenhouse-gas measures, in practice that translates to fuel-efficiency standards—something that President Bush's 2007 Energy Independence and Security Act had already committed to for heavy trucks. The trucking industry feared that President Obama's pledge to tackle its greenhouse gas emissions might translate into an across-the-board gas tax, or the mandatory adoption of specific emissions-reducing technologies. So the decision to regulate emissions by piggybacking on fleet-wide fuel-efficiency standards, leaving manufacturers plenty of flexibility, came as a relief.
The real question is, why have big rigs been exempt from standards for so long? After all, the corresponding rules for passenger cars and light trucks were passed back in 1975, in the wake of the Arab oil embargo, and first came into effect for 1978 model years. If you have any doubts about whether these rules made a difference, this Department of Energy data, which shows the mpg rise of passenger cars, vans, pick-ups and SUVs next to the static fuel economy of heavy-duty trucks, should dispel them:
And efficiency-impaired heavy trucks are by no means a minor part of the fuel-use and emissions picture. "These trucks represent only 4 percent of vehicles on the road, but they consume 20 percent of the fuel," Union of Concerned Scientists analyst Don Anair points out. They also contribute 6 percent of total U.S. greenhouse-gas emissions—and, crucially, they're the fastest-growing segment of the transportation sector.
The problem is that trucks are complicated: the new rules apply to pretty much everything over 8500 pounds, from the semis that pull long-haul trailers to school buses, fire trucks and pickups. A delivery truck that spends most of its time on the highway needs different rules from a garbage truck that stops every 10 yards. And "miles per gallon" is meaningless when you're hauling different loads. The new rules rely heavily on recommendations by a National Research Council report released earlier this year to divide trucks into three basic categories, and express the fuel standards as "gallons per thousand tons per mile" to take load into account.
Costs and Benefits
The standards will be phased in gradually for the 2014 to 2018 model years, and they'll improve fuel efficiency by between 7 and 20 percent, depending on the truck type. That will save 500 million barrels of oil and 250 million metric tons of greenhouse gas, according to EPA calculations. The cost to implement the needed changes: $7.7 billion. The resulting benefits: $49 billion. Even if you ignore fuzzy concepts like "societal benefits," the fuel savings alone are expected to add up to $35 billion.
For individual trucks, the rules will typically add $200 to $400 to the sticker price of a 2014 model. Long-haul truckers will see larger hikes, projected at $5900 on average, but will end up saving $74,000 over the life of the truck.
That's the kind of promised return on investment that usually gets flagged by your spam filter, so it's worth asking whether the technology projections are realistic. As it turns out, you don't even need projections: the American Trucking Associations say the new standards will be largely met with "off-the-shelf technologies such as low-rolling-resistance tires, improved aerodynamics [and] reduced idling." The EPA already has a voluntary SmartWay certification that encourages truckers to adopt these measures, which can be as simple as narrowing the gap between tractor and trailer to reduce drag.
With standards so easily met, you'd expect environmental groups to be disappointed. The Union of Concerned Scientists does point out that, if the rules applied to trailers as well as trucks, big-rig fuel efficiency could easily be hiked by 35 percent rather than just 20 percent by 2017. But overall, most environmental groups seem pleased.
Instead, one of the few critical notes comes from Heavy Duty Trucking magazine. "Legislating fuel economy standards for truckers is like requiring bears to crap in the woods," one of its writers notes. "I'm thinking this first round of reductions is just to get us primed for what's to come."
Interestingly, the National Research Council report made a related point back in March:
"The choices that will be made over the course of the next few years will establish the regulatory design for [mid-size- and heavy-truck] fuel-consumption standards for the next several decades at least. While the stringency of the standards themselves may be revisited from time to time, the regulatory design elements (regulated parties, certification tests and procedures, compliance methods)—once established—are far more difficult to modify."
So we could haggle over the numbers if we really wanted to—whether the gas savings will really total $35 billion or whether they'll be just half that, whether the costs will be higher than forecast, and so on. But it's hard to escape the feeling that the specific numbers aren't really the point, at least this time round. That's why there will be so little bickering at next week's hearings: efficiency standards for heavy trucks were long overdue, and the interested parties have managed to hammer out a basic format that everyone can live with. The standards may be soft (for now), but their greatest significance is simply that they exist. |
The latest incarnation of Toronto's bike plan provoked some grumbling from the two-wheeling set when it was released last week, mostly for its recommendation that any plans for a bike lane on Bloor St. West be nixed.
But one part of the Bikeway Network report that went largely unnoticed was a Scarborough councillor's alarming request that bike lanes in her ward be removed completely, at an estimated cost to taxpayers of $210,000.
In a move many cycling advocates see as a striding step in the wrong direction, Ward 35 Councillor and Rob Ford ally Michelle Berardinetti wants the bike lanes on Birchmount Rd. and Pharmacy Ave. gone, even though city staff concluded that the lanes have had no significant adverse effect on traffic flow since they were approved in 2008. According to city staff, restoring Birchmount Rd. to its original four-lane configuration would cost $90,000, and doing the same on Pharmacy Ave. would cost an additional $120,000. All this in the midst of the worst financial crisis the city has ever seen, or so Rob Ford would have you believe.
But before you hop on your fixie and head down to Councillor Berardinetti's office wielding bike chains and spanners, removing these lanes might not be as insane as it sounds, or at least not as expensive.
Berardinetti says she wants the lanes gone because that's what her constituents want. "That is the will of this community. This is democracy in action," she said.
The councillor says she's received calls from drivers and cyclists alike who don't like the bike lanes. According to her, the lanes don't lead anywhere and because the roads are busy, younger cyclists tend not to use them. She would prefer to see the city take action on building an off-road biking trail through St. Clair Ravine Park, which runs through her ward.
×
She also claims that removing the lanes won't cost the city a thing because Birchmount and Pharmacy are scheduled to be repaved anyway.
But Peter Noehammer, the city's director of transportation services, says he's not certain the bike lanes can be removed at the same time as the roads are repaved. "We're trying to combine the two activities, if feasible," he said. "We may not be doing all of the resurfacing routes all at once. In other stretches they're still in good shape so we wouldn't be planning to resurface them. We have had discussions about minimizing the costs but we haven't come to any final resolution on it yet."
Given the vagaries of public works projects, and the fact that not every part of the roads in question need to be repaved, it would be a miracle if removing the bike lanes actually cost zero dollars, as Berardinetti suggests.
While the existing bike lanes don't appear to join any major transportation hubs, the off-road trail Berardinetti favours has the advantage of linking Scarborough bike trails to the trail newtork in the centre of the city. But the on-road bike lanes and off-road trail options aren't mutually exclusive. The off-road trail is included in the most recent bike plan, and it's set to run through St. Clair Ravine Park, which would connect the lanes on Birchmount and Pharmacy and form a new piece of suburban cycling infrastructure. That is, if the two bike lanes survive Berardinetti's bid to wipe them out.
Besides the constituent concerns she cites, one also gets the sense Berardinetti objects to the bike lanes on ideological grounds. In her view they are relics from the David Miller era at City Hall, during which downtown councillors ruled the roost and unfairly imposed their city-building ideas on suburban councillors and their constituents.
"Had the previous councillor done community consultation, he never would have put these lanes in in the first place. So [removing them is] really righting a wrong," she said. "What's really ridiculous is we have a downtown ideology that's been shoved down the throats of Scarborough residents. That's not democracy."
The Bikeway Network update will be debated at the Public Works and Infrastructure Meeting on Thursday, and city staff have asked for direction from City Council on whether to remove the bike lanes on Pharmacy and Birchmount. |
Should he go to jail?
‘You are going to make your decision, and I am going to respect that’
“On my personal part, after the last five years that I have been through, it would be, I don’t think, too – I don’t want to go with brash, too exaggerated to say that, [the last five years] that was a little more than any prison system in America could put forth, if we are talking punishment for action. I read on the Internet somebody saying, ‘Well, if charges of desertion were pressed, then he would spend five years in prison.’ If you put it in that sense, realistically speaking, what I dealt with isn’t in the American prison system because we go to great extent of giving prisoners the luxuries of being human.
From my personal perspective, given the opportunity to move on with my life, which has been what has kept me going. The ability to get back to this life and being the perfectionist that I am, make up for all of the things I had to look at as regrets.
Do I need anything to move on with my life? Yes, obviously I need something as a human being. I can pull myself up like I did before. I started at the bottom before; I can start at the bottom. …
So your decision is going to be your decision. This is just coming from my side. I’m going to be, as a soldier, I am going to understand that as the commander, as the general, you have the big picture. You have to look at everything, details to the bigger picture, and your decisions are going to be made off of your understanding of the situation and your experience in all the years that you have been there. You are going to make your decision and I am going to respect that.” |
//-------------------------------------------------------------------------------
//!
//! Applies an AppliedTemperature element to the environment
//!
//! \param degrees -- YThe surface temperature of the temperature being applied to the bodies surface.
//! \param surface_ara_fraction -- What % [0.0,1.0] of the patients surface area is covered by the applied temp
//!
//! This function can not be undone. SEThermalApplication supports a State member which can be set to off to terminate
//! an active applied_temperature, you would simply need to pass the CDM::enumOnOff as an additional paramater to this function.
bool action_applied_temperature(std::unique_ptr<biogears::BioGearsEngine>& engine, double degrees_c, double surface_area_fraction)
{
auto thermalApplication = biogears::SEThermalApplication();
auto& heated_car_seat = thermalApplication.GetAppliedTemperature();
heated_car_seat.GetTemperature().SetValue(degrees_c, biogears::TemperatureUnit::C);
heated_car_seat.GetSurfaceAreaFraction().SetValue(surface_area_fraction);
if (thermalApplication.IsValid()) {
engine->ProcessAction(thermalApplication);
return true;
} else {
return false;
}
} |
<reponame>jimfenton/notif-agent
/*
agent.go - Prototype notification agent
Copyright (c) 2015, 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/jimfenton/notif-agent/notif"
_ "github.com/lib/pq"
"io/ioutil"
"os"
)
type AgentDbCfg struct {
Host string `json:"host"`
User string `json:"user"`
Dbname string `json:"dbname"`
Password string `json:"password"`
}
// Find an user record by ID
func findUser(db *sql.DB, userID int, user *notif.Userinfo) error {
var twilioSID sql.NullString
var twilioToken sql.NullString
var twilioFrom sql.NullString
err := db.QueryRow(`SELECT id,email_username,email_server,email_port,email_authentication,email_security,twilio_sid,twilio_token,twilio_from,count,latest,created,user_id FROM userext WHERE user_id = $1`, userID).Scan(&user.Id,
&user.EmailUsername,
&user.EmailServer,
&user.EmailPort,
&user.EmailAuthentication,
&user.EmailSecurity,
&twilioSID,
&twilioToken,
&twilioFrom,
&user.Count,
&user.Latest,
&user.Created,
&user.UserID)
user.TwilioSID = twilioSID.String
user.TwilioToken = twilioToken.String
user.TwilioFrom = twilioFrom.String
return err
}
func findSite(db *sql.DB, site *notif.Siteinfo) error {
var twilioSID sql.NullString
var twilioToken sql.NullString
var twilioFrom sql.NullString
err := db.QueryRow(`SELECT twilio_sid,twilio_token,twilio_from FROM site`).Scan(&twilioSID,
&twilioToken,
&twilioFrom)
site.TwilioSID = twilioSID.String
site.TwilioToken = twilioToken.String
site.TwilioFrom = twilioFrom.String
return err
}
func main() {
var user notif.Userinfo
var site notif.Siteinfo
var adc AgentDbCfg
dat, err := ioutil.ReadFile("/etc/notifs/agent.cfg") //keeps passwords out of source code
err = json.Unmarshal(dat, &adc)
if err != nil {
fmt.Println("DB config unmarshal error:", err)
os.Exit(1)
}
// Database parameters are stored in JSON form in /etc/notifs/agent.cfg
// Sample configuration:
// {"host":"localhost","dbname":"notifs","user":"notifs","password":"<PASSWORD>"}
db, err := sql.Open("postgres", fmt.Sprintf("user=%s dbname=%s host=%s password=%s", adc.User, adc.Dbname, adc.Host, adc.Password))
if err != nil {
fmt.Println("Can't connect to database:", err)
os.Exit(1)
}
defer db.Close()
//Collect site configuration info
err = findSite(db, &site)
if err != nil {
fmt.Println("Can't retrieve site configuration info:", err) // non-fatal for now at least
}
// Channel for notif collectors
cc := make(chan notif.Notif, 10)
go collectNative(db, cc) //Listen for native notifs
for notif := range cc {
err := findUser(db, notif.UserID, &user)
if err != nil {
fmt.Println("Can't retrieve user info for push:", err) // non-fatal
} else {
ProcessRules(notif, db, user, site)
}
}
}
|
/**
* Form an RDD[(Array[Byte], Array[Byte])] from key-value pairs returned from Python.
* This is used by PySpark's shuffle operations.
*/
public class PairwiseRDD extends org.apache.spark.rdd.RDD<scala.Tuple2<java.lang.Object, byte[]>> {
public PairwiseRDD (org.apache.spark.rdd.RDD<byte[]> prev) { throw new RuntimeException(); }
public org.apache.spark.api.java.JavaPairRDD<java.lang.Object, byte[]> asJavaPairRDD () { throw new RuntimeException(); }
public scala.collection.Iterator<scala.Tuple2<java.lang.Object, byte[]>> compute (org.apache.spark.Partition split, org.apache.spark.TaskContext context) { throw new RuntimeException(); }
public org.apache.spark.Partition[] getPartitions () { throw new RuntimeException(); }
public scala.Option<org.apache.spark.Partitioner> partitioner () { throw new RuntimeException(); }
} |
// importPackage is a function that will be called by the type check package when it
// needs to import a go package. 'path' is the import path.
func (b *Builder) importPackage(dir string, userRequested bool) (*tc.Package, error) {
klog.V(5).Infof("importPackage %s", dir)
var pkgPath = importPathString(dir)
if buildPkg := b.buildPackages[dir]; buildPkg != nil {
canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath)
klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage)
pkgPath = canonicalPackage
}
ignoreError := false
if _, found := b.parsed[pkgPath]; !found {
ignoreError = true
if err := b.addDir(dir, userRequested); err != nil {
if isErrPackageNotFound(err) {
klog.V(6).Info(err)
return nil, nil
}
return nil, err
}
if buildPkg := b.buildPackages[dir]; buildPkg != nil {
canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath)
klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage)
pkgPath = canonicalPackage
}
}
b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath]
pkg, err := b.typeCheckPackage(pkgPath)
if err != nil {
switch {
case ignoreError && pkg != nil:
klog.V(2).Infof("type checking encountered some issues in %q, but ignoring.\n", pkgPath)
case !ignoreError && pkg != nil:
klog.V(2).Infof("type checking encountered some errors in %q\n", pkgPath)
return nil, err
default:
return nil, err
}
}
return pkg, nil
} |
<reponame>scottwilkerson/liquidity-baking
import { Center, Text } from '@chakra-ui/react';
import React, { useEffect, useMemo, useState } from 'react';
import SkeletonLayout from '../../components/skeleton';
import OvenCard from '../../components/OvenCard/OvenCard';
import OvenSummary from '../../components/OvenSummary/OvenSummary';
import { useSortedOvensList } from '../../hooks/utilHooks';
import { useWallet } from '../../wallet/hooks';
import { useOvenDataByAddresses, useUserOvenData } from '../../api/queries';
import { getExternalOvens, removeExternalOven } from '../../utils/ovenUtils';
import { CTEZ_ADDRESS } from '../../utils/globals';
import { useAppDispatch, useAppSelector } from '../../redux/store';
import { setExternalOvens, setRemoveOven } from '../../redux/slices/OvenSlice';
import { AllOvenDatum } from '../../interfaces';
const MyOvensContainer: React.FC = () => {
const [{ pkh: userAddress }] = useWallet();
const dispatch = useAppDispatch();
const { data: myOvens, isLoading } = useUserOvenData(userAddress);
const removeTrackedOven = useAppSelector((state) => state.oven.removeOven);
const extOvensAddressesFromState = useAppSelector((state) => state.oven.extOvens);
const [extOvensAddresses, setExtOvensAddresses] = useState<string[]>(extOvensAddressesFromState);
useEffect(() => {
if (removeTrackedOven) {
const extOvensAddressesAfterRemoval = extOvensAddresses.filter(
(address) => address !== removeTrackedOven,
);
setExtOvensAddresses(extOvensAddressesAfterRemoval);
} else {
setExtOvensAddresses(extOvensAddressesFromState);
}
}, [removeTrackedOven, extOvensAddressesFromState]);
useEffect(() => {
if (userAddress && CTEZ_ADDRESS) {
dispatch(setExternalOvens(getExternalOvens(userAddress, CTEZ_ADDRESS)));
if (removeTrackedOven) {
removeExternalOven(userAddress, CTEZ_ADDRESS, removeTrackedOven);
dispatch(setRemoveOven(''));
}
}
}, [dispatch, userAddress, removeTrackedOven]);
const extOvens = useOvenDataByAddresses(extOvensAddresses);
const extOvensData = useMemo<AllOvenDatum[]>(() => {
return extOvens
.filter((oven) => !!oven.data)
.map((oven) => ({ ...(oven.data as AllOvenDatum), isImported: true }));
}, [extOvens, extOvensAddresses]);
const sortedOvens = useSortedOvensList([...(myOvens ?? []), ...extOvensData]);
if (userAddress == null) {
return (
<Center>
<Text>Connect your wallet to get started</Text>
</Center>
);
}
if (isLoading) {
return <SkeletonLayout component="OvenCard" />;
}
return (
<>
<OvenSummary ovens={sortedOvens || []} />
{sortedOvens?.map((oven) => (
<OvenCard key={oven.value.address} oven={oven} type="MyOvens" />
))}
</>
);
};
export default MyOvensContainer;
|
#include <c10/cuda/CUDAFunctions.h>
#include <c10/macros/Macros.h>
#include <limits>
namespace c10::cuda {
namespace {
// returns -1 on failure
int32_t driver_version() {
int driver_version = -1;
C10_CUDA_IGNORE_ERROR(cudaDriverGetVersion(&driver_version));
return driver_version;
}
int device_count_impl(bool fail_if_no_driver) {
int count = 0;
auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDeviceCount(&count));
if (err == cudaSuccess) {
return count;
}
// Clear out the error state, so we don't spuriously trigger someone else.
// (This shouldn't really matter, since we won't be running very much CUDA
// code in this regime.)
cudaError_t last_err C10_UNUSED = cudaGetLastError();
switch (err) {
case cudaErrorNoDevice:
// Zero devices is ok here
count = 0;
break;
case cudaErrorInsufficientDriver: {
auto version = driver_version();
if (version <= 0) {
if (!fail_if_no_driver) {
// No CUDA driver means no devices
count = 0;
break;
}
TORCH_CHECK(
false,
"Found no NVIDIA driver on your system. Please check that you "
"have an NVIDIA GPU and installed a driver from "
"http://www.nvidia.com/Download/index.aspx");
} else {
TORCH_CHECK(
false,
"The NVIDIA driver on your system is too old (found version ",
version,
"). Please update your GPU driver by downloading and installing "
"a new version from the URL: "
"http://www.nvidia.com/Download/index.aspx Alternatively, go to: "
"https://pytorch.org to install a PyTorch version that has been "
"compiled with your version of the CUDA driver.");
}
} break;
case cudaErrorInitializationError:
TORCH_CHECK(
false,
"CUDA driver initialization failed, you might not "
"have a CUDA gpu.");
break;
case cudaErrorUnknown:
TORCH_CHECK(
false,
"CUDA unknown error - this may be due to an "
"incorrectly set up environment, e.g. changing env "
"variable CUDA_VISIBLE_DEVICES after program start. "
"Setting the available devices to be zero.");
break;
#if C10_ASAN_ENABLED
case cudaErrorMemoryAllocation:
// In ASAN mode, we know that a cudaErrorMemoryAllocation error will
// pop up if compiled with NVCC (clang-cuda is fine)
TORCH_CHECK(
false,
"Got 'out of memory' error while trying to initialize CUDA. "
"CUDA with nvcc does not work well with ASAN and it's probably "
"the reason. We will simply shut down CUDA support. If you "
"would like to use GPUs, turn off ASAN.");
break;
#endif // C10_ASAN_ENABLED
default:
TORCH_CHECK(
false,
"Unexpected error from cudaGetDeviceCount(). Did you run "
"some cuda functions before calling NumCudaDevices() "
"that might have already set an error? Error ",
err,
": ",
cudaGetErrorString(err));
}
return count;
}
} // namespace
DeviceIndex device_count() noexcept {
// initialize number of devices only once
static int count = []() {
try {
auto result = device_count_impl(/*fail_if_no_driver=*/false);
TORCH_INTERNAL_ASSERT(
result <= std::numeric_limits<DeviceIndex>::max(),
"Too many CUDA devices, DeviceIndex overflowed");
return result;
} catch (const c10::Error& ex) {
// We don't want to fail, but still log the warning
// msg() returns the message without the stack trace
TORCH_WARN("CUDA initialization: ", ex.msg());
return 0;
}
}();
return static_cast<DeviceIndex>(count);
}
DeviceIndex device_count_ensure_non_zero() {
// Call the implementation every time to throw the exception
int count = device_count_impl(/*fail_if_no_driver=*/true);
// Zero gpus doesn't produce a warning in `device_count` but we fail here
TORCH_CHECK(count, "No CUDA GPUs are available");
return static_cast<DeviceIndex>(count);
}
DeviceIndex current_device() {
int cur_device = 0;
C10_CUDA_CHECK(c10::cuda::GetDevice(&cur_device));
return static_cast<DeviceIndex>(cur_device);
}
void set_device(DeviceIndex device) {
C10_CUDA_CHECK(c10::cuda::SetDevice(static_cast<int>(device)));
}
void device_synchronize() {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_device_synchronization();
}
C10_CUDA_CHECK(cudaDeviceSynchronize());
}
// this function has to be called from callers performing cuda synchronizing
// operations, to raise proper error or warning
void warn_or_error_on_sync() {
if (warning_state().get_sync_debug_mode() == SyncDebugMode::L_ERROR) {
TORCH_CHECK(false, "called a synchronizing CUDA operation");
} else if (warning_state().get_sync_debug_mode() == SyncDebugMode::L_WARN) {
TORCH_WARN("called a synchronizing CUDA operation");
}
}
c10::optional<int64_t> getDeviceIndexWithPrimaryContext() {
// check current device first
int64_t current_device_index = current_device();
if (current_device_index >= 0) {
if (hasPrimaryContext(current_device_index)) {
return current_device_index;
}
}
for (const auto device_index : c10::irange(at::cuda::device_count())) {
if (device_index == current_device_index)
continue;
if (hasPrimaryContext(device_index)) {
return device_index;
}
}
return c10::nullopt;
}
namespace _internal {
bool dummyHasPrimaryContext(C10_UNUSED int64_t device_index) {
TORCH_CHECK(false, "Should never been called");
}
bool (*hasPrimaryContext)(int64_t) = dummyHasPrimaryContext;
// Private api to be called from CUDAHooks.cpp
C10_CUDA_API void setHasPrimaryContext(bool (*func)(int64_t)) {
hasPrimaryContext = func ? func : dummyHasPrimaryContext;
}
} // namespace _internal
bool hasPrimaryContext(int64_t device_index) {
return _internal::hasPrimaryContext(device_index);
}
// Wrappers for raw CUDA device management functions
cudaError_t GetDeviceCount(int* dev_count) {
return cudaGetDeviceCount(dev_count);
}
// This is a codepath for CUDA 12 that comes with a critical change in behavior
// of `cudaSetDevice`. Unlike to previous CUDA versions that allocate context
// lazily CUDA 12.x eagerly allocates primary context the moment `cudaSetDevice`
// is called. This can lead to dramatic consequences and pollute the device
// memory in distributed runs. To avoid unnecessary context creation a new
// function called `MaybeSetDevice` was introduced. This function is to be
// called in device guard destructor and at the exit of torch.cuda.device
// context manager. The behavior of `MaybeSetDevice` is quite simple, it calls
// to `cudaSetDevice` if context already exist or if context was not allocated
// on targeted device it simply saves the device index. This way we can keep
// PyTorch backward compatible for applications like this:
//
// ```
// import torch
// x = torch.empty(1, device=“cuda:1”) # no CUDA context on cuda:0 after this
// call y = torch.empty(1, device=“cuda”) # CUDA context is created on cuda:0
// ```
#if CUDA_VERSION >= 12000
thread_local int targetDeviceIndex = -1;
cudaError_t GetDevice(int* device) {
if (targetDeviceIndex >= 0) {
*device = targetDeviceIndex;
return cudaSuccess;
}
return cudaGetDevice(device);
}
cudaError_t SetDevice(int device) {
TORCH_CHECK(device >= 0, "device id must be positive!");
targetDeviceIndex = -1;
int cur_device = -1;
C10_CUDA_CHECK(cudaGetDevice(&cur_device));
if (device == cur_device) {
return cudaSuccess;
}
return cudaSetDevice(device);
}
cudaError_t MaybeSetDevice(int device) {
if (hasPrimaryContext(device)) {
return c10::cuda::SetDevice(device);
}
targetDeviceIndex = device;
return cudaSuccess;
}
// This function always initializes the CUDA context
// on to_device
int ExchangeDevice(int to_device) {
int cur_device = targetDeviceIndex;
targetDeviceIndex = -1;
if (cur_device < 0) {
C10_CUDA_CHECK(cudaGetDevice(&cur_device));
if (to_device == cur_device) {
return cur_device;
}
}
C10_CUDA_CHECK(cudaSetDevice(to_device));
return cur_device;
}
// This function does not initialize the CUDA context
// on to_device if it does not already exist
int MaybeExchangeDevice(int to_device) {
int cur_device = -1;
C10_CUDA_CHECK(cudaGetDevice(&cur_device));
if (to_device == cur_device) {
return cur_device;
}
if (hasPrimaryContext(to_device)) {
C10_CUDA_CHECK(cudaSetDevice(to_device));
} else {
targetDeviceIndex = to_device;
}
return cur_device;
}
void SetTargetDevice() {
if (targetDeviceIndex >= 0) {
C10_CUDA_CHECK(c10::cuda::SetDevice(targetDeviceIndex));
}
}
#else
cudaError_t GetDevice(int* device) {
return cudaGetDevice(device);
}
cudaError_t SetDevice(int device) {
TORCH_CHECK(device >= 0, "device id must be positive!");
int cur_device = -1;
C10_CUDA_CHECK(cudaGetDevice(&cur_device));
if (device == cur_device) {
return cudaSuccess;
}
return cudaSetDevice(device);
}
cudaError_t MaybeSetDevice(int device) {
return c10::cuda::SetDevice(device);
}
int ExchangeDevice(int to_device) {
int cur_device = -1;
C10_CUDA_CHECK(c10::cuda::GetDevice(&cur_device));
if (to_device == cur_device) {
return cur_device;
}
C10_CUDA_CHECK(cudaSetDevice(to_device));
return cur_device;
}
int MaybeExchangeDevice(int to_device) {
return c10::cuda::ExchangeDevice(to_device);
}
void SetTargetDevice() {
// no-op on CUDA version < 12.x
}
#endif
} // namespace c10::cuda
|
<gh_stars>100-1000
import { Duration } from 'aws-cdk-lib';
import { Metric, Statistic } from 'aws-cdk-lib/aws-cloudwatch';
const enum Metrics {
NumberOfMessagesPublished = 'NumberOfMessagesPublished',
NumberOfNotificationsDelivered = 'NumberOfNotificationsDelivered',
NumberOfNotificationsFailed = 'NumberOfNotificationsFailed',
PublishSize = 'PublishSize'
}
const Namespace = 'AWS/SNS';
export class SnsMetricFactory {
metricNumberOfMessagesPublished(topicName: string) {
return this.metric(Metrics.NumberOfMessagesPublished, topicName).with({ statistic: Statistic.SUM });
}
metricNumberOfMessagesDelivered(topicName: string) {
return this.metric(Metrics.NumberOfNotificationsDelivered, topicName).with({ statistic: Statistic.SUM });
}
metricNumberOfNotificationsFailed(topicName: string) {
return this.metric(Metrics.NumberOfNotificationsFailed, topicName).with({ statistic: Statistic.SUM });
}
metricAverageMessageSizeInBytes(topicName: string) {
return this.metric(Metrics.PublishSize, topicName).with({ statistic: Statistic.AVERAGE });
}
protected metric(metric: Metrics, topicName: string) {
return new Metric({
metricName: metric,
namespace: Namespace,
period: Duration.minutes(5),
dimensionsMap: {
TopicName: topicName,
},
});
}
}
|
def _wizard(
rcfile: Annotated[str, Arg("--file")] = None,
confirm: Annotated[bool, Arg("--confirm", action="store_true")] = False,
):
env = XSH.env
shell = XSH.shell.shell
xonshrcs = env.get("XONSHRC", [])
fname = xonshrcs[-1] if xonshrcs and rcfile is None else rcfile
no_wiz = os.path.join(env.get("XONSH_CONFIG_DIR"), "no-wizard")
w = make_xonfig_wizard(default_file=fname, confirm=confirm, no_wizard_file=no_wiz)
tempenv = {"PROMPT": "", "XONSH_STORE_STDOUT": False}
pv = wiz.PromptVisitor(w, store_in_history=False, multiline=False)
@contextlib.contextmanager
def force_hide():
if env.get("XONSH_STORE_STDOUT") and hasattr(shell, "_force_hide"):
orig, shell._force_hide = shell._force_hide, False
yield
shell._force_hide = orig
else:
yield
with force_hide(), env.swap(tempenv):
try:
pv.visit()
except (KeyboardInterrupt, Exception):
print()
print_exception() |
"The Master of Disguise" pants and wheezes and hurls itself exhausted across the finish line after barely 65 minutes of movie, and then follows it with 15 minutes of end credits in an attempt to clock in as a feature film. We get outtakes, deleted scenes, flubbed lines and all the other versions of the Credit Cookie, which was once a cute idea but is getting to be a bore.
The credits go on and on and on. The movie is like a party guest who thinks he is funny and is wrong. The end credits are like the same guest taking too long to leave. At one point they at last mercifully seemed to be over, and the projectionist even closed the curtains, but no: There was Dana Carvey, still visible against the red velvet, asking us what we were still doing in the theater. That is a dangerous question to ask after a movie like "The Master of Disguise." The movie is a desperate miscalculation. It gives poor Dana Carvey nothing to do that is really funny, and then expects us to laugh because he acts so goofy all the time. But acting funny is not funny. Acting in a situation that's funny--that's funny.
Advertisement
The plot: Carvey plays an Italian waiter named Pistachio Disguisey, who is unfamiliar with the First Law of Funny Names, which is that funny names in movies are rarely funny. Pistachio comes from a long line of masters of disguise. His father, Frabbrizio (James Brolin), having capped his career by successfully impersonating Bo Derek, retires and opens a New York restaurant. He doesn't tell his son about the family trade, but then, when he's kidnapped by his old enemy Bowman (Brent Spiner), Pistachio is told the family secret by his grandfather (Harold Gould).
Grandfather also gives him a crash course in disguise-craft after locating Frabbrizio's hidden workshop in the attic (a Disguisey's workshop, we learn, is known as a nest). There is now a scene representative of much of the movie, in which Pistachio puts on an inflatable suit, and it suddenly balloons so that he flies around the room and knocks over granddad. That scene may seem funny to kids. Real, real little, little kids.
Carvey of course is himself a skilled impersonator, and during the film we see him as a human turtle, Al Pacino from "Scarface," Robert Shaw from "Jaws," a man in a cherry suit, a man with a cow pie for a face, George W. Bush, and many other guises. In some cases the disguises are handled by using a double and then employing digital technology to make it appear as if the double's face is a latex mask that can be removed. In other cases, such as Bush, he simply impersonates him.
The plot helpfully supplies Pistachio with a girl named Jennifer (Jennifer Esposito) who becomes his sidekick in the search for Frabbrizio, and they visit a great many colorful locations. One of them is a secret headquarters where Bowman keeps his priceless trove of treasures, including the lunar landing module, which is used for one of those fight scenes where the hero dangles by one hand. The movie's director, Perry Andelin Blake, has been a production designer on 14 movies, including most of Adam Sandler's, and, to be sure, "The Master of Disguise" has an excellent production design. It is less successful at disguising itself as a comedy. |
Q,H,S,D = map(int,input().split())
N = int(input())
ans = 0
if D == min(D,S*2,H*4,Q*8):
ans += (N // 2) * D
N = N % 2
ans += N * min(S , H * 2,Q * 4)
print (ans)
|
#include "Printer.h"
#include <string>
#include <map>
#include <vector>
#include <iostream>
Printer::Printer(std::vector<std::map<std::string, int> >* combinations, std::map<int, std::string>* graphs, std::vector<std::string>* performanceCounters, std::string* sequenceIdParameter, std::map<std::string, std::map<int, std::vector<long long> > >* resultX, std::map<std::string, std::map<int, std::vector<long long> > >* resultY, std::string* unit) {
_combinations = combinations;
_graphs = graphs;
_performanceCounters = performanceCounters;
_sequenceIdParameter = sequenceIdParameter;
_resultX = resultX;
_resultY = resultY;
_unit = unit;
}
Printer::Printer(std::vector<std::map<std::string, int> >* combinations) {
_combinations = combinations;
}
void Printer::printResults() {
std::cout << std::endl << "Results:" << std::endl << "#############" << std::endl;
std::size_t test_series_id, positions;
std::map<int, std::string>::iterator it;
std::map<std::string, std::map<int, std::vector<long long> > > resultX = *_resultX;
std::vector<std::string> performanceCounters = *_performanceCounters;
std::vector<std::map<std::string, int> > combinations = *_combinations;
for (size_t i = 0; i < _performanceCounters->size(); ++i) {
for (it = _graphs->begin(); it != _graphs->end(); it++) {
test_series_id = it->first;
std::vector<long long> graphData;
positions = resultX[performanceCounters[i]][test_series_id].size();
for (std::size_t pos = 0; pos < positions; ++pos) {
std::cout << resultX[performanceCounters[i]][test_series_id][pos] << " " << *_sequenceIdParameter << " -> " << this->getValue(test_series_id, performanceCounters[i], pos) << " " << *_unit << std::endl;
}
}
}
}
long long Printer::getValue(size_t test_series_id, std::string perf_ctr, size_t pos) {
std::map<std::string, std::map<int, std::vector<long long> > > resultY = *_resultY;
return resultY[perf_ctr][test_series_id][pos];
}
void Printer::printCombinations() {
std::vector<std::map<std::string, int> > combinations = *_combinations;
std::map<std::string, int>::iterator it;
for (std::size_t i = 0; i < combinations.size(); ++i) {
for (it = combinations[i].begin(); it != combinations[i].end(); it++) {
std::cout << it->first << " - " << it->second << std::endl;
}
std::cout << "-------" << std::endl;
}
}
Printer::~Printer() {
}
|
///
/// In this test the query is simply an edge such but the time constraints
/// make so that nothing matches.
///
BOOST_FIXTURE_TEST_CASE( test_single_edge_no_match, F )
{
size_t tableCapacity = 1000;
size_t resultCapacity = 1000;
size_t numNodes = 1;
size_t nodeId = 0;
MapType map(numNodes, nodeId, tableCapacity, resultCapacity, *csr, *csc);
auto query = std::make_shared<QueryType>(featureMap);
TimeEdgeExpression endTimeExpressionE1(endtimeFunction, e1,
equal_edge_operator, 0);
query->addExpression(*startY2Xboth);
query->addExpression(endTimeExpressionE1);
query->addExpression(*y2x);
query->finalize();
std::list<EdgeRequestType> edgeRequests;
Tuplizer tuplizer;
size_t n = 10000;
for(size_t i = 0; i < n; i++)
{
std::string str = generator->generate();
EdgeType edge = tuplizer(i, str);
double startTime = std::get<TimeSeconds>(edge.tuple);
if (query->satisfiesConstraints(0, edge.tuple, startTime))
{
QueryResultType result(query, edge);
map.add(result, edgeRequests);
}
}
BOOST_CHECK_EQUAL(edgeRequests.size(), 0);
BOOST_CHECK_EQUAL(map.getNumResults(), 0);
} |
<filename>ibzdisk/app_web/src/widgets/app/sdindex-view-appmenu/sdindex-view-appmenu-model.ts
/**
* SDIndexView 部件模型
*
* @export
* @class SDIndexViewModel
*/
export default class SDIndexViewModel {
/**
* 菜单项集合
*
* @public
* @type {any[]}
* @memberof SDIndexViewModel
*/
public items: any[] = [
{
id: '541951a398d53d906d35436fcb39a9f3',
name: 'menuitem1',
text: '文件',
type: 'MENUITEM',
counterid: '',
tooltip: '文件',
expanded: false,
separator: false,
hidden: false,
hidesidebar: false,
opendefault: false,
iconcls: '',
icon: '',
textcls: '',
appfunctag: 'Auto1',
resourcetag: '',
authtag:'web-SDIndexView-menuitem1',
},
{
id: '<KEY>',
name: 'menuitem2',
text: '动态模型',
type: 'MENUITEM',
counterid: '',
tooltip: '动态模型',
expanded: false,
separator: false,
hidden: false,
hidesidebar: false,
opendefault: false,
iconcls: '',
icon: '',
textcls: '',
appfunctag: 'AppFunc2',
resourcetag: '',
authtag:'web-SDIndexView-menuitem2',
},
];
/**
* 应用功能集合
*
* @public
* @type {any[]}
* @memberof SDIndexViewModel
*/
public funcs: any[] = [
{
appfunctag: 'AppFunc2',
appfuncyype: 'APPVIEW',
openmode: '',
codename: 'metadynamicmodeldynainstgridview',
deResParameters: [],
routepath: '/sdindexview/:sdindexview?/metadynamicmodels/:metadynamicmodel?/dynainstgridview/:dynainstgridview?',
parameters: [
{ pathName: 'metadynamicmodels', parameterName: 'metadynamicmodel' },
{ pathName: 'dynainstgridview', parameterName: 'dynainstgridview' },
],
},
{
appfunctag: 'Auto1',
appfuncyype: 'APPVIEW',
openmode: '',
codename: 'sdfilegridview',
deResParameters: [],
routepath: '/sdindexview/:sdindexview?/sdfiles/:sdfile?/gridview/:gridview?',
parameters: [
{ pathName: 'sdfiles', parameterName: 'sdfile' },
{ pathName: 'gridview', parameterName: 'gridview' },
],
},
];
/**
* 获取所有菜单项集合
*
* @returns {any[]}
* @memberof SDIndexViewModel
*/
public getAppMenuItems(): any[] {
return this.items;
}
/**
* 获取所有应用功能集合
*
* @returns {any[]}
* @memberof SDIndexViewModel
*/
public getAppFuncs(): any[] {
return this.funcs;
}
} |
/**
* Functional error thrown in case of a problem reading the source
* @author aro_tech
*
*/
public class UnableToReadSource extends Exception {
private static final long serialVersionUID = 1L;
private final IOException cause;
public UnableToReadSource(IOException cause) {
super();
this.cause = cause;
}
/**
* @return the cause
*/
@Override
public IOException getCause() {
return cause;
}
} |
/*
* Routines to support hard-linking.
*
* Copyright (C) 1996 Andrew Tridgell
* Copyright (C) 1996 Paul Mackerras
* Copyright (C) 2002 Martin Pool <[email protected]>
* Copyright (C) 2004-2009 Wayne Davison
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, visit the http://fsf.org website.
*/
#include "rsync.h"
#include "inums.h"
#include "ifuncs.h"
extern int dry_run;
extern int list_only;
extern int am_sender;
extern int inc_recurse;
extern int do_xfers;
extern int link_dest;
extern int preserve_acls;
extern int preserve_xattrs;
extern int protocol_version;
extern int remove_source_files;
extern int stdout_format_has_i;
extern int maybe_ATTRS_REPORT;
extern int unsort_ndx;
extern char *basis_dir[MAX_BASIS_DIRS+1];
extern struct file_list *cur_flist;
#ifdef SUPPORT_HARD_LINKS
/* Starting with protocol 30, we use a simple hashtable on the sending side
* for hashing the st_dev and st_ino info. The receiving side gets told
* (via flags and a "group index") which items are hard-linked together, so
* we can avoid the pool of dev+inode data. For incremental recursion mode,
* the receiver will use a ndx hash to remember old pathnames. */
static struct hashtable *dev_tbl;
static struct hashtable *prior_hlinks;
static struct file_list *hlink_flist;
void init_hard_links(void)
{
if (am_sender || protocol_version < 30)
dev_tbl = hashtable_create(16, 1);
else if (inc_recurse)
prior_hlinks = hashtable_create(1024, 0);
}
struct ht_int64_node *idev_find(int64 dev, int64 ino)
{
static struct ht_int64_node *dev_node = NULL;
struct hashtable *tbl;
/* Note that some OSes have a dev == 0, so increment to avoid storing a 0. */
if (!dev_node || dev_node->key != dev+1) {
/* We keep a separate hash table of inodes for every device. */
dev_node = hashtable_find(dev_tbl, dev+1, 1);
if (!(tbl = dev_node->data)) {
tbl = dev_node->data = hashtable_create(512, 1);
if (DEBUG_GTE(HLINK, 3)) {
rprintf(FINFO,
"[%s] created hashtable for dev %s\n",
who_am_i(), big_num(dev));
}
}
} else
tbl = dev_node->data;
return hashtable_find(tbl, ino, 1);
}
void idev_destroy(void)
{
int i;
for (i = 0; i < dev_tbl->size; i++) {
struct ht_int32_node *node = HT_NODE(dev_tbl, dev_tbl->nodes, i);
if (node->data)
hashtable_destroy(node->data);
}
hashtable_destroy(dev_tbl);
}
static int hlink_compare_gnum(int *int1, int *int2)
{
struct file_struct *f1 = hlink_flist->sorted[*int1];
struct file_struct *f2 = hlink_flist->sorted[*int2];
int32 gnum1 = F_HL_GNUM(f1);
int32 gnum2 = F_HL_GNUM(f2);
if (gnum1 != gnum2)
return gnum1 > gnum2 ? 1 : -1;
return *int1 > *int2 ? 1 : -1;
}
static void match_gnums(int32 *ndx_list, int ndx_count)
{
int32 from, prev;
struct file_struct *file, *file_next;
struct ht_int32_node *node = NULL;
int32 gnum, gnum_next;
qsort(ndx_list, ndx_count, sizeof ndx_list[0],
(int (*)()) hlink_compare_gnum);
for (from = 0; from < ndx_count; from++) {
file = hlink_flist->sorted[ndx_list[from]];
gnum = F_HL_GNUM(file);
if (inc_recurse) {
node = hashtable_find(prior_hlinks, gnum, 1);
if (!node->data) {
if (!(node->data = new_array0(char, 5)))
out_of_memory("match_gnums");
assert(gnum >= hlink_flist->ndx_start);
file->flags |= FLAG_HLINK_FIRST;
prev = -1;
} else if (CVAL(node->data, 0) == 0) {
struct file_list *flist;
prev = IVAL(node->data, 1);
flist = flist_for_ndx(prev, NULL);
if (flist)
flist->files[prev - flist->ndx_start]->flags &= ~FLAG_HLINK_LAST;
else {
/* We skipped all prior files in this
* group, so mark this as a "first". */
file->flags |= FLAG_HLINK_FIRST;
prev = -1;
}
} else
prev = -1;
} else {
file->flags |= FLAG_HLINK_FIRST;
prev = -1;
}
for ( ; from < ndx_count-1; file = file_next, gnum = gnum_next, from++) { /*SHARED ITERATOR*/
file_next = hlink_flist->sorted[ndx_list[from+1]];
gnum_next = F_HL_GNUM(file_next);
if (gnum != gnum_next)
break;
F_HL_PREV(file) = prev;
/* The linked list uses over-the-wire ndx values. */
if (unsort_ndx)
prev = F_NDX(file);
else
prev = ndx_list[from] + hlink_flist->ndx_start;
}
if (prev < 0 && !inc_recurse) {
/* Disable hard-link bit and set DONE so that
* HLINK_BUMP()-dependent values are unaffected. */
file->flags &= ~(FLAG_HLINKED | FLAG_HLINK_FIRST);
file->flags |= FLAG_HLINK_DONE;
continue;
}
file->flags |= FLAG_HLINK_LAST;
F_HL_PREV(file) = prev;
if (inc_recurse && CVAL(node->data, 0) == 0) {
if (unsort_ndx)
prev = F_NDX(file);
else
prev = ndx_list[from] + hlink_flist->ndx_start;
SIVAL(node->data, 1, prev);
}
}
}
/* Analyze the hard-links in the file-list by creating a list of all the
* items that have hlink data, sorting them, and matching up identical
* values into clusters. These will be a single linked list from last
* to first when we're done. */
void match_hard_links(struct file_list *flist)
{
if (!list_only && flist->used) {
int i, ndx_count = 0;
int32 *ndx_list;
if (!(ndx_list = new_array(int32, flist->used)))
out_of_memory("match_hard_links");
for (i = 0; i < flist->used; i++) {
if (F_IS_HLINKED(flist->sorted[i]))
ndx_list[ndx_count++] = i;
}
hlink_flist = flist;
if (ndx_count)
match_gnums(ndx_list, ndx_count);
free(ndx_list);
}
if (protocol_version < 30)
idev_destroy();
}
static int maybe_hard_link(struct file_struct *file, int ndx,
char *fname, int statret, stat_x *sxp,
const char *oldname, STRUCT_STAT *old_stp,
const char *realname, int itemizing, enum logcode code)
{
if (statret == 0) {
if (sxp->st.st_dev == old_stp->st_dev
&& sxp->st.st_ino == old_stp->st_ino) {
if (itemizing) {
itemize(fname, file, ndx, statret, sxp,
ITEM_LOCAL_CHANGE | ITEM_XNAME_FOLLOWS,
0, "");
}
if (INFO_GTE(NAME, 2) && maybe_ATTRS_REPORT)
rprintf(FCLIENT, "%s is uptodate\n", fname);
file->flags |= FLAG_HLINK_DONE;
return 0;
}
}
if (atomic_create(file, fname, oldname, MAKEDEV(0, 0), sxp, statret == 0 ? DEL_FOR_FILE : 0)) {
if (itemizing) {
itemize(fname, file, ndx, statret, sxp,
ITEM_LOCAL_CHANGE | ITEM_XNAME_FOLLOWS, 0,
realname);
}
if (code != FNONE && INFO_GTE(NAME, 1))
rprintf(code, "%s => %s\n", fname, realname);
return 0;
}
return -1;
}
/* Figure out if a prior entry is still there or if we just have a
* cached name for it. */
static char *check_prior(struct file_struct *file, int gnum,
int *prev_ndx_p, struct file_list **flist_p)
{
struct file_struct *fp;
struct ht_int32_node *node;
int prev_ndx = F_HL_PREV(file);
while (1) {
struct file_list *flist;
if (prev_ndx < 0
|| (flist = flist_for_ndx(prev_ndx, NULL)) == NULL)
break;
fp = flist->files[prev_ndx - flist->ndx_start];
if (!(fp->flags & FLAG_SKIP_HLINK)) {
*prev_ndx_p = prev_ndx;
*flist_p = flist;
return NULL;
}
F_HL_PREV(file) = prev_ndx = F_HL_PREV(fp);
}
if (inc_recurse
&& (node = hashtable_find(prior_hlinks, gnum, 0)) != NULL) {
assert(node->data != NULL);
if (CVAL(node->data, 0) != 0) {
*prev_ndx_p = -1;
*flist_p = NULL;
return node->data;
}
/* The prior file must have been skipped. */
F_HL_PREV(file) = -1;
}
*prev_ndx_p = -1;
*flist_p = NULL;
return NULL;
}
/* Only called if FLAG_HLINKED is set and FLAG_HLINK_FIRST is not. Returns:
* 0 = process the file, 1 = skip the file, -1 = error occurred. */
int hard_link_check(struct file_struct *file, int ndx, char *fname,
int statret, stat_x *sxp, int itemizing,
enum logcode code)
{
STRUCT_STAT prev_st;
char namebuf[MAXPATHLEN], altbuf[MAXPATHLEN];
char *realname, *prev_name;
struct file_list *flist;
int gnum = inc_recurse ? F_HL_GNUM(file) : -1;
int prev_ndx;
prev_name = realname = check_prior(file, gnum, &prev_ndx, &flist);
if (!prev_name) {
struct file_struct *prev_file;
if (!flist) {
/* The previous file was skipped, so this one is
* treated as if it were the first in its group. */
if (DEBUG_GTE(HLINK, 2)) {
rprintf(FINFO, "hlink for %d (%s,%d): virtual first\n",
ndx, f_name(file, NULL), gnum);
}
return 0;
}
prev_file = flist->files[prev_ndx - flist->ndx_start];
/* Is the previous link not complete yet? */
if (!(prev_file->flags & FLAG_HLINK_DONE)) {
/* Is the previous link being transferred? */
if (prev_file->flags & FLAG_FILE_SENT) {
/* Add ourselves to the list of files that will
* be updated when the transfer completes, and
* mark ourself as waiting for the transfer. */
F_HL_PREV(file) = F_HL_PREV(prev_file);
F_HL_PREV(prev_file) = ndx;
file->flags |= FLAG_FILE_SENT;
cur_flist->in_progress++;
if (DEBUG_GTE(HLINK, 2)) {
rprintf(FINFO, "hlink for %d (%s,%d): waiting for %d\n",
ndx, f_name(file, NULL), gnum, F_HL_PREV(file));
}
return 1;
}
if (DEBUG_GTE(HLINK, 2)) {
rprintf(FINFO, "hlink for %d (%s,%d): looking for a leader\n",
ndx, f_name(file, NULL), gnum);
}
return 0;
}
/* There is a finished file to link with! */
if (!(prev_file->flags & FLAG_HLINK_FIRST)) {
/* The previous previous is FIRST when prev is not. */
prev_name = realname = check_prior(prev_file, gnum, &prev_ndx, &flist);
/* Update our previous pointer to point to the FIRST. */
F_HL_PREV(file) = prev_ndx;
}
if (!prev_name) {
int alt_dest;
assert(flist != NULL);
prev_file = flist->files[prev_ndx - flist->ndx_start];
/* F_HL_PREV() is alt_dest value when DONE && FIRST. */
alt_dest = F_HL_PREV(prev_file);
if (DEBUG_GTE(HLINK, 2)) {
rprintf(FINFO, "hlink for %d (%s,%d): found flist match (alt %d)\n",
ndx, f_name(file, NULL), gnum, alt_dest);
}
if (alt_dest >= 0 && dry_run) {
pathjoin(namebuf, MAXPATHLEN, basis_dir[alt_dest],
f_name(prev_file, NULL));
prev_name = namebuf;
realname = f_name(prev_file, altbuf);
} else {
prev_name = f_name(prev_file, namebuf);
realname = prev_name;
}
}
}
if (DEBUG_GTE(HLINK, 2)) {
rprintf(FINFO, "hlink for %d (%s,%d): leader is %d (%s)\n",
ndx, f_name(file, NULL), gnum, prev_ndx, prev_name);
}
if (link_stat(prev_name, &prev_st, 0) < 0) {
if (!dry_run || errno != ENOENT) {
rsyserr(FERROR_XFER, errno, "stat %s failed", full_fname(prev_name));
return -1;
}
/* A new hard-link will get a new dev & inode, so approximate
* those values in dry-run mode by zeroing them. */
memset(&prev_st, 0, sizeof prev_st);
}
if (statret < 0 && basis_dir[0] != NULL) {
/* If we match an alt-dest item, we don't output this as a change. */
char cmpbuf[MAXPATHLEN];
stat_x alt_sx;
int j = 0;
init_stat_x(&alt_sx);
do {
pathjoin(cmpbuf, MAXPATHLEN, basis_dir[j], fname);
if (link_stat(cmpbuf, &alt_sx.st, 0) < 0)
continue;
if (link_dest) {
if (prev_st.st_dev != alt_sx.st.st_dev
|| prev_st.st_ino != alt_sx.st.st_ino)
continue;
statret = 1;
if (stdout_format_has_i == 0
|| (!INFO_GTE(NAME, 2) && stdout_format_has_i < 2)) {
itemizing = 0;
code = FNONE;
if (INFO_GTE(NAME, 2) && maybe_ATTRS_REPORT)
rprintf(FCLIENT, "%s is uptodate\n", fname);
}
break;
}
if (!unchanged_file(cmpbuf, file, &alt_sx.st))
continue;
statret = 1;
if (unchanged_attrs(cmpbuf, file, &alt_sx))
break;
} while (basis_dir[++j] != NULL);
if (statret == 1) {
sxp->st = alt_sx.st;
#ifdef SUPPORT_ACLS
if (preserve_acls && !S_ISLNK(file->mode)) {
free_acl(sxp);
if (!ACL_READY(alt_sx))
get_acl(cmpbuf, sxp);
else {
sxp->acc_acl = alt_sx.acc_acl;
sxp->def_acl = alt_sx.def_acl;
alt_sx.acc_acl = alt_sx.def_acl = NULL;
}
}
#endif
#ifdef SUPPORT_XATTRS
if (preserve_xattrs) {
free_xattr(sxp);
if (!XATTR_READY(alt_sx))
get_xattr(cmpbuf, sxp);
else {
sxp->xattr = alt_sx.xattr;
alt_sx.xattr = NULL;
}
}
#endif
} else {
#ifdef SUPPORT_ACLS
if (preserve_acls)
free_acl(&alt_sx);
#endif
#ifdef SUPPORT_XATTRS
if (preserve_xattrs)
free_xattr(&alt_sx);
#endif
}
}
if (maybe_hard_link(file, ndx, fname, statret, sxp, prev_name, &prev_st,
realname, itemizing, code) < 0)
return -1;
if (remove_source_files == 1 && do_xfers)
send_msg_int(MSG_SUCCESS, ndx);
return 1;
}
int hard_link_one(struct file_struct *file, const char *fname,
const char *oldname, int terse)
{
if (do_link(oldname, fname) < 0) {
enum logcode code;
if (terse) {
if (!INFO_GTE(NAME, 1))
return 0;
code = FINFO;
} else
code = FERROR_XFER;
rsyserr(code, errno, "link %s => %s failed",
full_fname(fname), oldname);
return 0;
}
file->flags |= FLAG_HLINK_DONE;
return 1;
}
void finish_hard_link(struct file_struct *file, const char *fname, int fin_ndx,
STRUCT_STAT *stp, int itemizing, enum logcode code,
int alt_dest)
{
stat_x prev_sx;
STRUCT_STAT st;
char prev_name[MAXPATHLEN], alt_name[MAXPATHLEN];
const char *our_name;
struct file_list *flist;
int prev_statret, ndx, prev_ndx = F_HL_PREV(file);
if (stp == NULL && prev_ndx >= 0) {
if (link_stat(fname, &st, 0) < 0) {
rsyserr(FERROR_XFER, errno, "stat %s failed",
full_fname(fname));
return;
}
stp = &st;
}
/* FIRST combined with DONE means we were the first to get done. */
file->flags |= FLAG_HLINK_FIRST | FLAG_HLINK_DONE;
F_HL_PREV(file) = alt_dest;
if (alt_dest >= 0 && dry_run) {
pathjoin(alt_name, MAXPATHLEN, basis_dir[alt_dest],
f_name(file, NULL));
our_name = alt_name;
} else
our_name = fname;
init_stat_x(&prev_sx);
while ((ndx = prev_ndx) >= 0) {
int val;
flist = flist_for_ndx(ndx, "finish_hard_link");
file = flist->files[ndx - flist->ndx_start];
file->flags = (file->flags & ~FLAG_HLINK_FIRST) | FLAG_HLINK_DONE;
prev_ndx = F_HL_PREV(file);
F_HL_PREV(file) = fin_ndx;
prev_statret = link_stat(f_name(file, prev_name), &prev_sx.st, 0);
val = maybe_hard_link(file, ndx, prev_name, prev_statret, &prev_sx,
our_name, stp, fname, itemizing, code);
flist->in_progress--;
#ifdef SUPPORT_ACLS
if (preserve_acls)
free_acl(&prev_sx);
#endif
#ifdef SUPPORT_XATTRS
if (preserve_xattrs)
free_xattr(&prev_sx);
#endif
if (val < 0)
continue;
if (remove_source_files == 1 && do_xfers)
send_msg_int(MSG_SUCCESS, ndx);
}
if (inc_recurse) {
int gnum = F_HL_GNUM(file);
struct ht_int32_node *node = hashtable_find(prior_hlinks, gnum, 0);
if (node == NULL) {
rprintf(FERROR, "Unable to find a hlink node for %d (%s)\n", gnum, f_name(file, prev_name));
exit_cleanup(RERR_MESSAGEIO);
}
if (node->data == NULL) {
rprintf(FERROR, "Hlink node data for %d is NULL (%s)\n", gnum, f_name(file, prev_name));
exit_cleanup(RERR_MESSAGEIO);
}
if (CVAL(node->data, 0) != 0) {
rprintf(FERROR, "Hlink node data for %d already has path=%s (%s)\n",
gnum, (char*)node->data, f_name(file, prev_name));
exit_cleanup(RERR_MESSAGEIO);
}
free(node->data);
if (!(node->data = strdup(our_name)))
out_of_memory("finish_hard_link");
}
}
int skip_hard_link(struct file_struct *file, struct file_list **flist_p)
{
struct file_list *flist;
int prev_ndx;
file->flags |= FLAG_SKIP_HLINK;
if (!(file->flags & FLAG_HLINK_LAST))
return -1;
check_prior(file, F_HL_GNUM(file), &prev_ndx, &flist);
if (prev_ndx >= 0) {
file = flist->files[prev_ndx - flist->ndx_start];
if (file->flags & (FLAG_HLINK_DONE|FLAG_FILE_SENT))
return -1;
file->flags |= FLAG_HLINK_LAST;
*flist_p = flist;
}
return prev_ndx;
}
#endif
|
export class EmailQuery {
email: string;
userId: number;
userProfile: UserProfile;
}
export type UserProfile = {
name: string
dateOfBirth: Date
} |
export class ApiResponse {
public code:string;
public message:string;
public data:any;
public meta:any;
}
export const ResponseCodes = {
SUCCESS:{
code:'000',
message:'success'
},
FAILED:{
code:'001',
message:'failed'
},
NO_RECORD_FOUND:{
code:'404',
message:'no record found'
},
AUTHENTICATION_FAILED:{
code:'002',
message:'authentication failed',
},
ACCESS_TOKEN_EXPIRED:{
code:'003',
message:'access token has expired'
},
INACTIVE_ACCOUNT:{
code:'004',
message:'inactive account'
}
}
|
Not a day goes by without someone dragging an iconic movie out the vaults of history to be tarted up and sold back to the movie-going public. Today is a day and it’s the turn of Terry Gilliam’s Time Bandits!
Rumoured to be on the cards from as early as 2006, Variety are now reporting that Guy Collins and Michael Ryan, both involved in the original production, are planning to push forward with the remake. What’s more they are promising “a bigscreen kids action franchise”! Fantastic!
The original Time Bandits told the story of a young boy who accidently joins a group of time travelling dwarves as they plunder different periods in history looking for treasure. DOES THAT SOUND FAMILIAR TO ANYONE? I am of course suggesting that Time Bandits’ fantastic tale of thieving, epoch-hopping dwarves is, in fact, a suitable analogue for the dubious practices of Hollywood production studios in 2011! Burn! |
// Listens to a server socket. On incoming request, forward it to the host.
// static
void* Server::ServerThread(void* arg) {
Server* server = reinterpret_cast<Server*>(arg);
while (!g_killed) {
int forwarder_index = server->GetFreeForwarderIndex();
if (forwarder_index < 0) {
LOG(ERROR) << "Too many forwarders";
continue;
}
struct sockaddr_in addr;
socklen_t addr_len = sizeof(addr);
int socket = HANDLE_EINTR(accept(server->socket_,
reinterpret_cast<sockaddr*>(&addr),
&addr_len));
if (socket < 0) {
LOG(ERROR) << "Failed to accept: " << strerror(errno);
break;
}
tools::DisableNagle(socket);
int host_socket = tools::ConnectAdbHostSocket(server->forward_to_);
if (host_socket >= 0) {
fcntl(socket, F_SETFL, fcntl(socket, F_GETFL) | O_NONBLOCK);
fcntl(host_socket, F_SETFL, fcntl(host_socket, F_GETFL) | O_NONBLOCK);
ForwarderInfo* forwarder_info = server->GetForwarderInfo(forwarder_index);
time_t now = time(NULL);
forwarder_info->start_time = now;
forwarder_info->socket1 = socket;
forwarder_info->socket1_last_byte_time = now;
forwarder_info->socket1_bytes = 0;
forwarder_info->socket2 = host_socket;
forwarder_info->socket2_last_byte_time = now;
forwarder_info->socket2_bytes = 0;
pthread_t thread;
pthread_create(&thread, NULL, ForwarderThread,
new ForwarderThreadInfo(server, forwarder_index));
} else {
CloseSocket(socket);
}
}
CloseSocket(server->socket_);
server->socket_ = -1;
return NULL;
} |
def build_substitution_clause(initial_clause, predicate_types):
terms: List[Term] = []
append_variables_from_atom(initial_clause.head, predicate_types, terms)
body: List[Literal] = []
if not initial_clause.body:
body.append(TRUE_LITERAL)
else:
for literal in initial_clause.body:
append_variables_from_atom(literal, predicate_types, terms)
body = initial_clause.body
substitution_head = Atom(SUBSTITUTION_NAME, *terms)
substitution_clause = HornClause(substitution_head, *body)
type_clause = HornClause(
Atom(TYPE_PREDICATE_NAME, *terms),
Literal(substitution_head), Literal(initial_clause.head))
return substitution_clause, type_clause |
<reponame>xieruan/v2bp<gh_stars>1-10
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from v2ray.com.core.app.proxyman.command import command_pb2 as v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2
class HandlerServiceStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddInbound = channel.unary_unary(
'/v2ray.core.app.proxyman.command.HandlerService/AddInbound',
request_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddInboundRequest.SerializeToString,
response_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddInboundResponse.FromString,
)
self.RemoveInbound = channel.unary_unary(
'/v2ray.core.app.proxyman.command.HandlerService/RemoveInbound',
request_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveInboundRequest.SerializeToString,
response_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveInboundResponse.FromString,
)
self.AlterInbound = channel.unary_unary(
'/v2ray.core.app.proxyman.command.HandlerService/AlterInbound',
request_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterInboundRequest.SerializeToString,
response_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterInboundResponse.FromString,
)
self.AddOutbound = channel.unary_unary(
'/v2ray.core.app.proxyman.command.HandlerService/AddOutbound',
request_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddOutboundRequest.SerializeToString,
response_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddOutboundResponse.FromString,
)
self.RemoveOutbound = channel.unary_unary(
'/v2ray.core.app.proxyman.command.HandlerService/RemoveOutbound',
request_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveOutboundRequest.SerializeToString,
response_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveOutboundResponse.FromString,
)
self.AlterOutbound = channel.unary_unary(
'/v2ray.core.app.proxyman.command.HandlerService/AlterOutbound',
request_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterOutboundRequest.SerializeToString,
response_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterOutboundResponse.FromString,
)
class HandlerServiceServicer(object):
"""Missing associated documentation comment in .proto file"""
def AddInbound(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveInbound(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AlterInbound(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddOutbound(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveOutbound(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AlterOutbound(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HandlerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddInbound': grpc.unary_unary_rpc_method_handler(
servicer.AddInbound,
request_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddInboundRequest.FromString,
response_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddInboundResponse.SerializeToString,
),
'RemoveInbound': grpc.unary_unary_rpc_method_handler(
servicer.RemoveInbound,
request_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveInboundRequest.FromString,
response_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveInboundResponse.SerializeToString,
),
'AlterInbound': grpc.unary_unary_rpc_method_handler(
servicer.AlterInbound,
request_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterInboundRequest.FromString,
response_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterInboundResponse.SerializeToString,
),
'AddOutbound': grpc.unary_unary_rpc_method_handler(
servicer.AddOutbound,
request_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddOutboundRequest.FromString,
response_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddOutboundResponse.SerializeToString,
),
'RemoveOutbound': grpc.unary_unary_rpc_method_handler(
servicer.RemoveOutbound,
request_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveOutboundRequest.FromString,
response_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveOutboundResponse.SerializeToString,
),
'AlterOutbound': grpc.unary_unary_rpc_method_handler(
servicer.AlterOutbound,
request_deserializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterOutboundRequest.FromString,
response_serializer=v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterOutboundResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'v2ray.core.app.proxyman.command.HandlerService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class HandlerService(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def AddInbound(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v2ray.core.app.proxyman.command.HandlerService/AddInbound',
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddInboundRequest.SerializeToString,
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddInboundResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RemoveInbound(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v2ray.core.app.proxyman.command.HandlerService/RemoveInbound',
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveInboundRequest.SerializeToString,
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveInboundResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AlterInbound(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v2ray.core.app.proxyman.command.HandlerService/AlterInbound',
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterInboundRequest.SerializeToString,
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterInboundResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddOutbound(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v2ray.core.app.proxyman.command.HandlerService/AddOutbound',
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddOutboundRequest.SerializeToString,
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AddOutboundResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RemoveOutbound(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v2ray.core.app.proxyman.command.HandlerService/RemoveOutbound',
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveOutboundRequest.SerializeToString,
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.RemoveOutboundResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AlterOutbound(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v2ray.core.app.proxyman.command.HandlerService/AlterOutbound',
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterOutboundRequest.SerializeToString,
v2ray_dot_com_dot_core_dot_app_dot_proxyman_dot_command_dot_command__pb2.AlterOutboundResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
|
<reponame>bshelton229/snagsby<filename>items.go<gh_stars>0
package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/secretsmanager"
)
// Key validation regexp
var keyRegexp = regexp.MustCompile(`^\w+$`)
var quotesRegexp = regexp.MustCompile(`"`)
type handlerFunc func(*url.URL) *Collection
var handlers = map[string]handlerFunc{
"s3": LoadItemsFromS3,
"sm": LoadItemsFromSecretsManager,
}
// Item is a representation of a single config key and value
type Item struct {
Key, Value string
}
// FormattedKey formats the key for snagsby
func (i *Item) FormattedKey() string {
return strings.ToUpper(i.Key)
}
// Collection is a collection of single key value items and the source. If
// there were source processing errors they'll be saved in .Error
type Collection struct {
Items map[string]*Item
Source string
Error error
}
// NewCollection initializes a collection
func NewCollection(source string, err error) *Collection {
return &Collection{
Source: source,
Items: make(map[string]*Item),
Error: err,
}
}
// AppendItem will add an item to the internal Items map if the key
// validates. If the key doesn't validate an error will be returned and no
// item will be written.
func (c *Collection) AppendItem(key, val string) error {
if !keyRegexp.MatchString(key) {
return errors.New(key + " contains invalid characters")
}
key = strings.ToUpper(key)
c.Items[key] = &Item{Key: key, Value: val}
return nil
}
// Len returns the number of items in the collection
func (c *Collection) Len() int {
return len(c.Items)
}
// AsMap represents the collection as a map[string]string
func (c *Collection) AsMap() map[string]string {
out := make(map[string]string)
for _, i := range c.Items {
out[i.FormattedKey()] = i.Value
}
return out
}
// GetItemString will return the value of a item by key
func (c *Collection) GetItemString(key string) (string, bool) {
item, ok := c.Items[key]
if !ok {
return "", false
}
return item.Value, true
}
// ReadItemsFromReader will read in items from an io.Reader into the collection
// Items map
func (c *Collection) ReadItemsFromReader(r io.Reader) error {
var f map[string]interface{}
if err := json.NewDecoder(r).Decode(&f); err != nil {
c.Error = err
return err
}
for k, v := range f {
switch vv := v.(type) {
case string:
c.AppendItem(k, vv)
case float64:
c.AppendItem(k, strconv.FormatFloat(vv, 'f', -1, 64))
case bool:
var b string
if vv {
b = "1"
} else {
b = "0"
}
c.AppendItem(k, b)
}
}
return nil
}
func getAwsSession() (*session.Session, error) {
return session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
})
}
// LoadItemsFromSecretsManager loads data from aws secrets manager
func LoadItemsFromSecretsManager(source *url.URL) *Collection {
secrets := NewCollection(source.String(), nil)
sess, sessionError := getAwsSession()
if sessionError != nil {
secrets.Error = sessionError
return secrets
}
// sess := session.Must(bsess, err)
region := source.Query().Get("region")
config := aws.Config{}
if region != "" {
config.Region = aws.String(region)
}
secretName := fmt.Sprintf("%s%s", source.Host, source.Path)
svc := secretsmanager.New(sess, &config)
input := &secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretName),
}
// Add version stage
versionStage := source.Query().Get("version-stage")
if versionStage != "" {
input.VersionStage = aws.String(versionStage)
}
versionID := source.Query().Get("version-id")
if versionID != "" {
input.VersionId = aws.String(versionID)
}
result, err := svc.GetSecretValue(input)
if err != nil {
secrets.Error = err
return secrets
}
secrets.ReadItemsFromReader(strings.NewReader(*result.SecretString))
return secrets
}
// LoadItemsFromS3 loads data from an s3 source
func LoadItemsFromS3(source *url.URL) *Collection {
secrets := NewCollection(source.String(), nil)
sess, sessionError := getAwsSession()
if sessionError != nil {
secrets.Error = sessionError
return secrets
}
region := source.Query().Get("region")
config := aws.Config{}
if region != "" {
config.Region = aws.String(region)
}
svc := s3.New(sess, &config)
result, s3err := svc.GetObject(&s3.GetObjectInput{
Bucket: aws.String(source.Host),
Key: aws.String(source.Path),
})
if s3err != nil {
secrets.Error = s3err
return secrets
}
defer result.Body.Close()
secrets.ReadItemsFromReader(result.Body)
return secrets
}
// LoadItemsFromSource will find an appropriate handler and return a collection
func LoadItemsFromSource(source *url.URL) *Collection {
handler, ok := handlers[source.Scheme]
if ok {
return handler(source)
}
col := NewCollection(
source.String(),
fmt.Errorf("No handler found for %s", source.String()),
)
return col
}
|
Studies on the human T-lymphocyte population. I. The development and characterization of a specific anti-human T-cell antibody.
The IgG fraction of a goat antibody prepared against human infant thymus cells and extensively absorbed with pooled human red blood cells, insolubilized human Ig and cells from a patient with chronic lymphocytic leukemia (ATG), was used to study human peripheral lymphocyte populations. ATG inhibited the response of human peripheral lymphocytes to concanavalin A (Con A) but not to pokeweed mitogen (PWM). The F(ab′) 2 and Fab′ fragments of ATG were as effective as ATG in inhibiting Con A induced 3 H-thymidine incorporation. ATG did not interact with Con A or the receptor for Con A on the lymphocyte but was inhibitory in the absence of complement as a result of its apparent selective cytotoxicity. The antibody also inhibited 3 H-thymidine incorporation in the one-way mixed lymphocyte reaction, again without affecting the response of these cells to PWM. The specificity of ATG for the T-lymphocyte population was also suggested by fluorescent labeling experiments which indicated that ATG reacted with only 50 to 80% of human peripheral lymphocytes. These results suggest that ATG is directed toward antigens unique to the human T-lymphocyte population. Moreover, it appears that ATG can be used to study human T-lymphocyte responses and to follow the isolation of T-specific antigens. |
<reponame>polarsorg/polars<filename>src/cpp/polars/Series.cpp
//
// Created by <NAME> on 16/03/2018.
//
/**
* Series
*
* This module defines a Series class that is inspired by a python pandas Series class.
*
*
* Style guide when extending Series
* =====================================
* Where possible, this class will behave the same as a pandas Series. The supported features
* of this class will be a subset of what is available by pandas.Series, and the typing system of C++ will be used to
* full effect here where it makes sense.
*
*/
#include "Series.h"
#include "SeriesMask.h"
#include "numc.h"
namespace polars {
using SeriesMask = polars::SeriesMask;
Series::Series() = default;
// todo; check for 1-D series & that the lengths match
Series::Series(const arma::vec &v, const arma::vec &t) : t(t), v(v) {
//assert(t.n_cols == 1 && v.n_cols == 1);
//assert(t.n_rows == v.n_rows);
};
/**
* Converting constructor - this takes a SeriesMask and creates a Series from it.
*
* This is intentionally implicit (not marked explicit) so that a function expecting a Series can be passed a
* SeriesMask and it will be automatically converted since this is a loss-less process.
*/
Series::Series(const SeriesMask &sm) : t(sm.index()), v(arma::conv_to<arma::vec>::from(sm.values())) {}
Series Series::from_vect(const std::vector<double> &t_v, const std::vector<double> &v_v) {
return Series(arma::conv_to<arma::vec>::from(v_v), arma::conv_to<arma::vec>::from(t_v));
}
Series Series::from_map(const std::map<double, double> &iv_map) {
arma::vec index(iv_map.size());
arma::vec values(iv_map.size());
int i = 0;
for (auto& pair : iv_map) {
index[i] = pair.first;
values[i] = pair.second;
++i;
}
return {values, index};
}
// Series [op] int methods
SeriesMask Series::operator==(const int rhs) const {
arma::vec rhs_vec = arma::ones(this->size()) * rhs;
arma::vec abs_diff = arma::abs(values() - rhs_vec);
// We can't use a large difference test like .1 despite the rhs is an int since the lhs is double so could be close.
double threshold = 1E-50;
return SeriesMask(abs_diff < threshold, index());
}
SeriesMask Series::operator!=(const int rhs) const { // TODO implement as negation of operator==
arma::vec rhs_vec = arma::ones(this->size()) * rhs;
arma::vec abs_diff = arma::abs(values() - rhs_vec);
// We can't use a large difference test like .1 despite the rhs is an int since the lhs is double so could be close.
double threshold = 1E-50;
return SeriesMask(abs_diff > threshold, index());
}
// Series [op] Series methods
SeriesMask Series::operator==(const Series &rhs) const {
// TODO: make this fast enough to always check at runtime
//assert(!arma::any(index() != rhs.index())); // Use not any != to handle empty array case
return SeriesMask(values() == rhs.values(), index());
}
SeriesMask Series::operator!=(const Series &rhs) const {
// TODO: make this fast enough to always check at runtime
//assert(!arma::any(index() != rhs.index())); // Use not any != to handle empty array case
return SeriesMask(values() != rhs.values(), index());
}
SeriesMask Series::operator>(const Series &rhs) const {
//assert(!arma::any(index() != rhs.index())); // Use not any != to handle empty array case
return SeriesMask(values() > rhs.values(), index());
}
SeriesMask Series::operator<(const Series &rhs) const {
//assert(!arma::any(index() != rhs.index())); // Use not any != to handle empty array case
return polars::SeriesMask(values() < rhs.values(), index());
}
Series Series::operator+(const Series &rhs) const {
//assert(!arma::any(index() != rhs.index())); // Use not any != to handle empty array case
return polars::Series(values() + rhs.values(), index());
}
Series Series::operator-(const Series &rhs) const {
//assert(!arma::any(index() != rhs.index())); // Use not any != to handle empty array case
return polars::Series(values() - rhs.values(), index());
}
Series Series::operator*(const Series &rhs) const {
//assert(!arma::any(index() != rhs.index())); // Use not any != to handle empty array case
return polars::Series(values() % rhs.values(), index());
}
// Series [op] double methods
SeriesMask Series::operator>(const double &rhs) const {
return SeriesMask(values() > (arma::ones(size()) * rhs), index());
}
SeriesMask Series::operator<(const double &rhs) const {
return SeriesMask(values() < (arma::ones(size()) * rhs), index());
}
SeriesMask Series::operator>=(const double &rhs) const {
return SeriesMask(values() >= (arma::ones(size()) * rhs), index());
}
SeriesMask Series::operator<=(const double &rhs) const {
return SeriesMask(values() <= (arma::ones(size()) * rhs), index());
}
Series Series::operator+(const double &rhs) const {
return Series(values() + rhs, index());
}
Series Series::operator-(const double &rhs) const {
return Series(values() - rhs, index());
}
Series Series::operator*(const double &rhs) const {
return Series(values() * rhs, index());
}
// todo; do we need a flavor that *doesn't* take account of NANs?
bool Series::equals(const Series &rhs) const {
if ((index().n_rows != rhs.index().n_rows)) return false;
if ((index().n_cols != rhs.index().n_cols)) return false;
if (!polars::numc::equal_handling_nans(values(), rhs.values())) return false;
if (any(index() != rhs.index())) return false;
return true;
}
// todo; do we need a flavor that *doesn't* take account of NANs?
bool Series::almost_equals(const Series &rhs) const {
if ((index().n_rows != rhs.index().n_rows)) return false;
if ((index().n_cols != rhs.index().n_cols)) return false;
if (!polars::numc::almost_equal_handling_nans(values(), rhs.values())) return false;
if (any(index() != rhs.index())) return false;
return true;
}
// Location.
Series Series::iloc(int from, int to, int step) const {
if(empty() || (from == to)){
return Series();
}
arma::uvec pos;
int effective_from;
int effective_to;
if(from < 0){
effective_from = values().size() + from;
} else {
effective_from = from;
}
if(to < 0){
effective_to = values().size() + to - 1;
} else if(to == 0) {
effective_to = to;
} else {
effective_to = to - 1;
}
pos = arma::regspace<arma::uvec>(effective_from, step, effective_to);
if(pos.size() > size()){
pos = pos.subvec(0, size() - 1);
}
return Series(values().elem(pos),index().elem(pos));
}
// TODO: Add slicing logic of the form .iloc(int start, int stop, int step=1) so it can be called like ser.iloc(0, -10).
Series Series::iloc(const arma::uvec &pos) const {
return Series(values().elem(pos), index().elem(pos));
}
double Series::iloc(arma::uword pos) const {
arma::vec val = values().elem(arma::uvec{pos});
return val[0];
}
// by label of indices
Series Series::loc(const arma::vec &index_labels) const {
std::vector<int> indices;
for (int j = 0; j < index_labels.n_elem; j++) {
arma::uvec idx = arma::find(index() == index_labels[j]);
if (!idx.empty()) {
indices.push_back(idx[0]);
}
}
if (indices.empty()) {
return Series();
} else {
arma::uvec indices_v = arma::conv_to<arma::uvec>::from(indices);
return Series(values().elem(indices_v), index().elem(indices_v));
}
}
Series Series::loc(arma::uword pos) const {
arma::uvec idx = arma::find(index() == pos);
if (!idx.empty()) {
return Series(values(), index()).iloc(idx);
} else {
return Series();
}
}
Series Series::where(const SeriesMask &condition, double other) const {
arma::vec result = values();
result.elem(find((!condition).values())).fill(other);
return Series(result, index());
}
Series Series::diff() const {
arma::uword resultSize = values().size();
arma::vec resultv(resultSize);
double previousValue = NAN;
for (arma::uword idx = 0; idx < resultSize; idx++) {
resultv[idx] = values()[idx] - previousValue;
previousValue = values()[idx];
}
return Series(resultv, index());
}
Series Series::abs() const {
return Series(arma::abs(values()), index());
}
double Series::quantile(double q) const {
return polars::numc::quantile(values(), q);
}
Series Series::fillna(double value) const {
arma::vec vals = values();
vals.replace(arma::datum::nan, value);
return Series(vals, index());
}
Series Series::dropna() const {
// Get indices of finite elements
arma::uvec indices = arma::sort(arma::join_cols(
arma::find_finite(values()),
arma::find(arma::abs(values()) == arma::datum::inf))
);
return Series(values().elem(indices), index().elem(indices));
}
arma::vec calculate_window_weights(
polars::WindowProcessor::WindowType win_type,
arma::uword windowSize
) {
switch (win_type) {
case (polars::WindowProcessor::WindowType::none):
return arma::ones(windowSize);
case (polars::WindowProcessor::WindowType::triang):
return polars::numc::triang(windowSize);
default:
return arma::ones(windowSize);
}
}
// TODO: This method needs to be re-factored.
arma::vec _ewm_correction(const arma::vec &results, const arma::vec &vals,
polars::WindowProcessor::WindowType win_type) {
/* Method that shifts result from rolling average with exp window so it yields correct normalisation and allows usage
* of rolling method hereby implemented.
* This matches pandas ewm for its default case */
arma::uvec res_fin = arma::find_finite(results);
if (results.empty() || res_fin.empty()){
arma::vec effective_results(vals.size());
effective_results.fill(NAN);
return effective_results;
}
if (win_type == polars::WindowProcessor::WindowType::expn) {
// Correction to match pandas ewm - shift by one.
arma::vec effective_results = results;
arma::vec results_ewm;
int missing_initial_nans = 0;
// Check if it was a case of originally front NANs - no added padding
arma::uvec finite_values_idx = arma::find_finite(vals);
if(finite_values_idx[0] != 0){
// we had originally some NANs.
missing_initial_nans = finite_values_idx[0];
}
arma::vec effective_vals = vals.subvec(missing_initial_nans, vals.size() - 1);
// For the cases in which window_size < size we need to correct
if( (results.size() > effective_vals.size())){
effective_results = effective_results.elem( arma::find_finite(effective_results) );
effective_results = effective_results.head(effective_vals.size());
}
if ( effective_results.at(0) == effective_vals[0] ){
// difference of 1 for center = False.
results_ewm = effective_results;
} else {
results_ewm.copy_size(effective_results) ;
results_ewm.at(0) = vals[0];
for (int j = 1; j < results_ewm.n_elem; j++) {
results_ewm[j] = effective_results[j - 1];
}
}
if(missing_initial_nans > 0){
auto added_nans = vals.size() - results_ewm.size();
arma::vec pn(added_nans);
pn.fill(NAN);
return arma::join_cols(pn, results_ewm);
} else {
return results_ewm;
}
} else {
return results;
}
}
// TODO: Combine cases here with those in input for exponential case.
polars::Series _window_size_correction(int window_size, bool center, const polars::Series &input){
// This is required because of relative size of window size vs array size.
if(input.size() == 1){
return input;
}
auto n = window_size / 2;
if(input.size() % 2 == 0){ n = n - 1; };
// Get index delta
auto ts = input.index();
double delta = std::ceil(std::abs(ts(1) - ts(0)));
// Vector with n nans
arma::vec multi_nan(n);
multi_nan.fill(NAN);
// Vector with single nan
arma::vec single_nan(1);
single_nan.fill(NAN);
// Base case is padding goes to the right
arma::vec new_input = arma::join_cols(input.values(), multi_nan);
auto start_idx = ts(0);
auto end_idx = ts(ts.size()-1) + n * delta;
auto effective_size = n + input.size();
if (center == false){
if( window_size > input.size() + 1 ){
new_input = arma::join_cols(multi_nan, input.values());
start_idx = start_idx - n * delta;
end_idx = end_idx - n * delta;
} else if (n == 1) {
new_input = arma::join_cols(single_nan, arma::join_cols(single_nan, input.values()));
start_idx = start_idx - 2 * delta;
end_idx = end_idx - delta;
effective_size = effective_size + 1;
} else if (n == 2) {
new_input = arma::join_cols(single_nan, arma::join_cols(input.values(), single_nan));
start_idx = start_idx - delta;
end_idx = end_idx - delta;
}
}
arma::vec new_timestamps = arma::linspace(start_idx, end_idx, effective_size);
return Series(new_input, new_timestamps);
}
// TODO: Refactor this method and combine with window_size_correction since similar logic
polars::Series _ewm_input_correction(const polars::Series &input){
// only gets called when window_size < input.size() and we have win_type = expn
Series new_input = input;
if(input.empty() || input.dropna().empty()){
return input;
}
// remove front NANs
if(std::isnan(input.values()(0))){
arma::uvec finite_input_idx = arma::find_finite(input.values());
auto number_of_nans = finite_input_idx(0);
new_input = input.iloc(number_of_nans, input.size());
}
arma::vec v(new_input.size());
v.fill(NAN);
arma::vec new_values = arma::join_cols(v, new_input.values());
// Get new timestamps
auto ts = new_input.index();
auto delta = std::ceil(std::abs(input.index()(1) - input.index()(0))); // use original input
auto new_index_0 = ts(0) - new_input.size() * delta;
arma::vec new_timestamps = arma::linspace(new_index_0, ts(ts.size()-1), 2 * new_input.size());
return Series(new_values, new_timestamps);
}
std::tuple<int, int, int, int> _get_interval_edges(int windowSize, int inputSize, bool symmetric, int centerIdx) {
arma::uword centerOffset = round(((float) windowSize - 1) / 2.0);
arma::sword leftIdx = centerIdx - centerOffset;
arma::sword rightIdx = centerIdx - centerOffset + windowSize - 1;
arma::sword weightLeftIdx = 0;
arma::sword weightRightIdx = windowSize - 1;
if (symmetric) {
// This option works for odd windows only.
if (leftIdx < 0) {
arma::sword left_err = leftIdx;
arma::sword right_err = windowSize - 1 - centerIdx - centerOffset;
weightLeftIdx = weightLeftIdx - left_err;
weightRightIdx = weightRightIdx - right_err;
rightIdx = rightIdx - right_err;
leftIdx = leftIdx - left_err;
}
if (rightIdx >= inputSize) {
arma::sword r_clipped = rightIdx - inputSize;
arma::sword left_err = -r_clipped - 1;
arma::sword right_err = rightIdx - (inputSize - 1);
weightLeftIdx = weightLeftIdx - left_err;
weightRightIdx = weightRightIdx - right_err;
leftIdx = leftIdx - left_err;
rightIdx = rightIdx - right_err;
}
} else {
if (leftIdx < 0) {
arma::sword left_err = leftIdx;
weightLeftIdx = weightLeftIdx - left_err;
leftIdx = leftIdx - left_err;
}
if (rightIdx >= inputSize) {
arma::sword right_err = rightIdx - (inputSize - 1);
weightRightIdx = weightRightIdx - right_err;
rightIdx = rightIdx - right_err;
}
}
return {leftIdx, rightIdx, weightLeftIdx, weightRightIdx};
}
Series _align_to_left(Series input, int windowSize) {
// Only need to realign for center=False and windowSize > input.size()
arma::vec moved_values = arma::zeros(windowSize - 1) * NAN;
auto ceil_half = std::ceil((windowSize-1.0)/2.0);
auto rolling_start_idx = ceil_half;
auto rolling_end_idx = input.size() - (windowSize - ceil_half);
arma::vec rolling_values = input.values().subvec(rolling_start_idx, rolling_end_idx);
moved_values.insert_rows(windowSize-1, rolling_values);
return polars::Series(moved_values, input.index());
};
Series
Series::ewm(SeriesSize windowSize, SeriesSize minPeriods, bool center, double alpha) const {
arma::vec input_values = v;
arma::vec input_idx = t;
Series padded_input = _ewm_input_correction(*this);
input_values = padded_input.values();
input_idx = padded_input.index();
// Set window size to be the same as the size of the array
windowSize = this->size();
if (minPeriods == 0) {
minPeriods = windowSize;
}
arma::vec resultv(arma::size(input_values));
for (arma::uword centerIdx = 0; centerIdx < input_idx.size(); centerIdx++) {
int leftIdx, rightIdx, weightLeftIdx, weightRightIdx;
std::tie(
leftIdx, rightIdx, weightLeftIdx, weightRightIdx
) = _get_interval_edges(windowSize, input_idx.size(), false, centerIdx);
arma::vec values = input_values.subvec(leftIdx, rightIdx);
// Define weights vector required for specific windows
arma::vec exponential_weights = reverse(
polars::numc::exponential(windowSize, -1. / log(1 - alpha), false, 0)
);
arma::vec weights(arma::size(values));
weights = exponential_weights.subvec(weightLeftIdx, weightRightIdx);
const Series subSeries = Series(values, input_idx.subvec(leftIdx, rightIdx));
if (subSeries.finiteSize() >= minPeriods) {
resultv(centerIdx) = polars::ExpMean().processWindow(subSeries, weights);
} else {
resultv(centerIdx) = polars::ExpMean().defaultValue();
}
}
Series result = Series(
polars::_ewm_correction(resultv, v, polars::WindowProcessor::WindowType::expn), t);
if(size() > 0 & windowSize > size()){
return Series(result.values().head(size()), t);
} else {
return result;
}
}
// todo; allow passing in transformation function rather than WindowProcessor.
Series
Series::rolling(SeriesSize windowSize, const polars::WindowProcessor &processor, SeriesSize minPeriods,
bool center, bool symmetric, polars::WindowProcessor::WindowType win_type) const {
//assert(center); // todo; implement center:false
//assert(windowSize > 0);
//assert(windowSize % 2 == 0); // TODO: Make symmetric = true work for even windows. See tests for reference.
arma::vec input_values = v;
arma::vec input_idx = t;
if((size() > 0 && windowSize > size())) {
// This deals with issues of alignment that arises with non linear windows.
Series padded_input = _window_size_correction(windowSize, center, *this);
input_values = padded_input.values();
input_idx = padded_input.index();
}
if (minPeriods == 0) {
minPeriods = windowSize;
}
arma::vec resultv(arma::size(input_values));
// roll a window [left,right], of up to size windowSize, centered on centerIdx, and hand to processor if there are minPeriods finite values.
for (arma::uword centerIdx = 0; centerIdx < input_idx.size(); centerIdx++) {
int leftIdx, rightIdx, weightLeftIdx, weightRightIdx;
std::tie(
leftIdx, rightIdx, weightLeftIdx, weightRightIdx
) = _get_interval_edges(windowSize, input_idx.size(), symmetric, centerIdx);
arma::vec values = input_values.subvec(leftIdx, rightIdx);
// Define weights vector required for specific windows
arma::vec weights(arma::size(values));
weights = polars::calculate_window_weights(win_type, windowSize).subvec(weightLeftIdx, weightRightIdx);
const Series subSeries = Series(values, input_idx.subvec(leftIdx, rightIdx));
if ( subSeries.finiteSize() >= minPeriods) {
resultv(centerIdx) = processor.processWindow(subSeries, weights);
} else {
resultv(centerIdx) = processor.defaultValue();
}
}
Series result = Series(resultv, t);
if(size() > 0 & windowSize > size()){
return Series(result.values().head(size()), t);
} else {
if( (center == true) || (windowSize <= 2) ){
return result;
} else {
return _align_to_left(result, windowSize);
}
}
}
Window Series::rolling(SeriesSize windowSize,
SeriesSize minPeriods,
bool center,
bool symmetric,
polars::WindowProcessor::WindowType win_type) const {
return Window((*this), windowSize, minPeriods, center, symmetric, win_type);
};
Rolling Series::rolling(SeriesSize windowSize,
SeriesSize minPeriods,
bool center,
bool symmetric) const {
return Rolling((*this), windowSize, minPeriods, center, symmetric);
};
Series Series::clip(double lower_limit, double upper_limit) const {
SeriesMask upper = SeriesMask(values() < upper_limit, index());
SeriesMask lower = SeriesMask(values() > lower_limit, index());
return Series(values(), index()).where(upper, upper_limit).where(lower, lower_limit);
};
Series Series::pow(double power) const {
return Series(arma::pow(values(), power), index());
}
int Series::count() const {
return finiteSize();
}
double Series::sum() const {
arma::vec finites = finiteValues();
if (finites.size() == 0) {
return NAN;
} else {
return arma::sum(finites);
}
}
double Series::mean() const {
arma::vec finites = finiteValues();
if (finites.size() == 0) {
return NAN;
} else {
return arma::mean(finites);
}
}
double Series::std(int ddof) const {
arma::vec finites = finiteValues();
if (ddof < 0) {
ddof = 0;
}
auto n = finites.size();
if (n <= ddof) {
return NAN;
} else {
auto dev = (*this) - this->mean();
auto squared_deviation = dev.pow(2);
return std::pow(squared_deviation.sum() / (n - ddof), 0.5);
}
}
Series::SeriesSize Series::size() const {
//assert(index().size() == values().size());
return index().size();
}
arma::vec Series::finiteValues() const {
return values().elem(find_finite(values()));
}
Series::SeriesSize Series::finiteSize() const {
//assert(index().size() == values().size());
return finiteValues().size();
}
// done this way so default copy / assignment works.
// todo; make copies of indices share memory as they are const?
const arma::vec Series::index() const {
return t;
}
const arma::vec Series::values() const {
return v;
}
bool Series::equal(const Series &lhs, const Series &rhs) {
return lhs.equals(rhs);
}
bool Series::almost_equal(const Series &lhs, const Series &rhs) {
return lhs.almost_equals(rhs);
}
bool Series::not_equal(const Series &lhs, const Series &rhs) {
return !lhs.equals(rhs);
}
Series Series::apply(double (*f)(double)) const {
arma::vec vals = values();
vals.transform([=](double val) { return (f(val)); });
return Series(vals, index());
}
Series Series::index_as_series() const {
return Series(index(), index());
}
std::map<double, double> Series::to_map() const {
std::map<double, double> m;
// put pairs into map
for (int i = 0; i < size(); i++) {
m.insert(std::make_pair(index()[i], values()[i]));
}
return m;
}
bool Series::empty() const {
return (index().is_empty() & values().is_empty());
}
// TODO: Modify head once iloc has been refactored to accept slicing logic.
Series Series::head(int n) const {
Series ser(values(), index());
if(n >= ser.size()){
return ser;
} else {
arma::uvec indices = arma::conv_to<arma::uvec>::from(polars::numc::arange(0, n));
return ser.iloc(indices);
}
}
// TODO: Modify tail once iloc has been refactored to accept slicing logic.
Series Series::tail(int n) const {
Series ser(values(), index());
if(n >= ser.size()){
return ser;
} else {
arma::uword l = ser.size() - n;
arma::uvec indices = arma::conv_to<arma::uvec>::from(polars::numc::arange(l, ser.size()));
return ser.iloc(indices);
}
}
Series Series::arctan2(const Series &lhs, const Series &rhs) {
arma::vec x = lhs.values();
arma::vec y = rhs.values();
arma::vec result = numc::arctan2(x, y);
return Series(result, lhs.index());
}
Series Series::concat(const Series &lhs, const Series &rhs) {
arma::vec expanded_values = lhs.values();
arma::vec expanded_indices = lhs.index();
expanded_values.insert_rows(lhs.values().size(), rhs.values());
expanded_indices.insert_rows(lhs.values().size(), rhs.index());
return Series(expanded_values, expanded_indices);
}
/**
* Add support for pretty printing of a Series object.
* @param os the output stream that will be written to
* @param ts the Series instance to output
* @return the ostream for further piping
*/
std::ostream &operator<<(std::ostream &os, const Series &ts) {
if(ts.size() >= 5){
os << "Series:\nindices\n" << ts.head(5).index() << "values\n" << ts.head(5).values();
os << "\n....\n";
os << "Series:\nindices\n" << ts.tail(5).index() << "values\n" << ts.tail(5).values();
return os;
} else {
return os << "Series:\nindices\n" << ts.index() << "values\n" << ts.values();
}
}
}; // polars
|
/*
Copyright (c) 2014, Colorado State University
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall the copyright holder or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused and on
any theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use of this
software, even if advised of the possibility of such damage.
*/
package edu.colostate.cs.galileo.event;
import edu.colostate.cs.galileo.serialization.SerializationInputStream;
import edu.colostate.cs.galileo.serialization.SerializationOutputStream;
import java.io.IOException;
/**
* Encapsulates a raw (byte[] based) event that includes a String representing
* the event synopsis. This can be used to essentially 'tag' particular blobs
* of data without writing specific events.
*
* @author malensek
*/
public class EventWithSynopsis implements Event {
private String synopsis;
private byte[] data;
private boolean compress = false;
public EventWithSynopsis(String synopsis, byte[] data) {
this.synopsis = synopsis;
this.data = data;
}
public String getSynopsis() {
return this.synopsis;
}
public byte[] getPayload() {
return this.data;
}
/**
* Enables compression when serializing this event. When deserializing,
* this setting has no effect.
*/
public void enableCompression() {
this.compress = true;
}
/**
* Disables compression when serializing this event. This is the default
* behavior. When deserializing, this setting has no effect.
*/
public void disableCompression() {
this.compress = false;
}
@Deserialize
public EventWithSynopsis(SerializationInputStream in)
throws IOException {
this.synopsis = in.readString();
this.data = in.readCompressableField();
}
@Override
public void serialize(SerializationOutputStream out)
throws IOException {
out.writeString(synopsis);
out.writeCompressableField(data, compress);
}
}
|
// newK8sControllerUpdaterViaPod returns a k8sControllerUpdater based on the parent kind of a pod
func newK8sControllerUpdaterViaPod(clusterScraper *cluster.ClusterScraper, pod *api.Pod, ormClient *resourcemapping.ORMClient) (*k8sControllerUpdater, error) {
ownerInfo, _, _, err := clusterScraper.GetPodControllerInfo(pod, true)
if err != nil {
return nil, fmt.Errorf("failed to get parent info of pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
if discoveryutil.IsOwnerInfoEmpty(ownerInfo) {
return nil, fmt.Errorf("pod %s/%s does not have controller", pod.Namespace, pod.Name)
}
return newK8sControllerUpdater(clusterScraper, ormClient, ownerInfo.Kind, ownerInfo.Name, pod.Name, pod.Namespace)
} |
import { Injectable } from '@angular/core';
import { i18nUS } from '../../i18n/en-us';
import { i18nZH } from '../../i18n/zh-cn';
/**
* 支持的国际化种类
*/
export const enum LANGUAGE_TYPE {
// ZH表示界面语言为中文
ZH = 0,
// EH表示界面语言为英文
EN = 1,
}
@Injectable({
providedIn: 'root'
})
export class I18nService {
constructor() { }
/** vscode判断界面语言类型的公共方法
* 当界面语言为英文时,返回LANGUAGE_TYPE.EN
* 当界面语言为非英文时,返回LANGUAGE_TYPE.ZH
* 默认返回LANGUAGE_TYPE.ZH
*/
public static getLang() {
const language: string = ((self as any).webviewSession || {}).getItem('language');
if (language) {
return language.indexOf('en') !== -1 ? LANGUAGE_TYPE.EN : LANGUAGE_TYPE.ZH;
} else {
return LANGUAGE_TYPE.ZH;
}
}
/**
* I18n
*/
I18n(): any {
const lang: string = (self as any).webviewSession.getItem('language');
if (lang) {
return lang === 'zh-cn' ? i18nZH : i18nUS;
} else {
return i18nZH;
}
}
/**
* I18nReplace
* @param i18nText i18nText
* @param contentObj contentObj
*/
I18nReplace(i18nText: any, contentObj: any) {
const matchedArr = i18nText.match(/\{\d+\}/g);
let replaceStr = i18nText;
if (matchedArr.length > 0) {
const keys = Object.keys(contentObj);
for (let i = 0; i < matchedArr.length; i++) {
const value = contentObj[keys[i]];
replaceStr = replaceStr.replace(/\{\d+\}/, value);
}
}
return replaceStr;
}
/**
* 根据元素宽度判断是否溢出
* @param $element element
* @param width width
*/
isEleTextOverflow($element: any, width?: any) {
if (!$element || $element.length < 1) {
return false;
}
const $tempNode = $element.clone();
$tempNode.css({
overflow: 'visible',
position: 'absolute',
visibility: 'hidden',
'max-width': 'none',
width: 'auto'
});
$('body').append($tempNode);
const tempNodeWidth = $tempNode.width();
$tempNode.remove();
if (width) {
return tempNodeWidth > width;
}
return tempNodeWidth > $element.width();
}
}
|
#!/usr/bin/python3
"""
Exercise 9.6. Write a function called is_abecedarian that returns True if the letters in a word
appear in alphabetical order (double letters are ok). How many abecedarian words are there?
"""
def is_abecedarian(word):
i = 0
while i < len(word)-1:
if word[i+1] < word[i]:
return False
i = i+1
return True
fin = open('words.txt')
count = 0
for line in fin:
if is_abecedarian(line):
print(line)
count = count + 1
print('Total: ' + str(count))
|
The problem, if I am being honest, is me.
I'm to blame. My fault. My bad, as the kids used to say.
Not just me, mind you. There are a lot of us. But if you want to point your digital finger at someone, look right here. It's on me. I only ask that you use proper spelling in the comments.
The issue at hand, of course, is information. Specifically, fantasy football information. There are gobs of it. Enormous mountains delivered daily to whatever device you have, wherever you are. Twitter and nonstop tickers scrolling by and text alerts from your 15 different apps. At the Worldwide Leader, we have people analyzing every dropback, every two-tight end set, every hamstring tweak, every coach's press conference. Is there a guy in your league who doesn't follow Adam or Mort? Exactly. You don't even need me to use their full names to know whom I'm talking about.
But back when fantasy football started, there was no internet, no mobile phones, no breaking alerts ... West Coast guys had to wait until Wednesday to score their games by hand because the Monday Night game ended too late to make the newspaper (if you don't know what that is, ask your dad). There were no websites to check your stats and, frankly ... no guys like me.
That's what I mean when I say I am to blame. Before you played the game online, before there was a 24-hour news cycle, before there was ever such a thing as a fantasy football analyst, getting information actually meant having an advantage. Which receiver the QB likes, which handcuff you want for your stud running back, whose gonna get the goal-line looks. If you didn't live in that NFL team's market, that info was hard to get.
That is no longer an issue. Everything you could ever want to know is available. Need to know Tom Brady's pet's name? (It's a pit bull named Lua). How about what Jamaal Charles drives? (It's a Lamborghini Gallardo LP-550 2). Or even what Vincent Jackson just had for lunch? (Tuna tartare -- it was delicious). There is no piece of information you can't find out and find out quickly. And that's before you even get to the stats. Stats about players and teams and trends. Stats about situations and schemes, and stats about which stats are more statistically relevant than other stats.
Getting intel is no longer the concern. There's no advantage because everyone has access to the same information. If you are in any kind of real league in which people are paying attention, the only advantage might be a faster Internet connection or a better smartphone to make a quick pick up in the dying seconds before game time. Otherwise, it's 10 or 12 guys looking at all the same names, numbers and rankings.
The key to winning, then, is parsing that data. Figuring out what to believe and what to ignore. Because, as I'll be the first to tell you every season, stats can say anything you want them to.
Consider the case of these two quarterbacks.
"QB1" was a fantasy stud last year. He finished as a top-10 quarterback and had more 30-point games than any quarterback not named Peyton or Drew. In fact, according to Tristan H. Cockcroft's 2013 consistency rankings, those two quarterbacks (Peyton Manning and Drew Brees) were the only two who had more "stud" games than this guy. ("Stud" being defined as a QB who was top-two at his position for the week). His attempts, completions, touchdowns, yards, QB rating and QBR have improved every year he's been in the league. He has one of the most talented receiving corps around, including the leading wide receiver in end zone catch percentage (among qualified wideouts). Top 10 in the NFL in pass attempts last year, he's the leader of a high-octane offense that was sixth in the league in total points. And he has started every game of his career, so it's easy to see why this 4,000-yard passer was a high draft pick. He's coming off his best professional year ever, and considering he's still fairly young, the best is yet to come. Draft him high, and ride the wave.
On the other hand, "QB2" is being drafted well outside the top 10 this year, and it's no shock why. Per Tristan's same consistency rankings, Geno Smith, Eli Manning and Chad Henne were the only quarterbacks who were "stiffs" more often last season. ("Stiff" being defined as someone who ranked among the worst at his position, thus making almost any waiver wire option a better choice.) His interceptions have increased every year he has been in the league, his completion percentage decreased from the previous season, and his QBR was just four tenths of a point better than Ryan Fitzpatrick's. I repeat: Ryan Fitzpatrick. It's not just fantasy owners who have questions about this quarterback. His own team hasn't signed him to an extension yet, and in fact, he will be a free agent after this season. Considering how QB-starved the NFL is, it speaks volumes that his team is willing to let him walk. With the fifth most interceptions in the NFL last year, it's not surprising his team just hired a new offensive coordinator known for running the ball; in his latest job as a playcaller, this coach was top four in the NFL in rush attempts and rush yards. Hand the ball off and don't lose this for us, they seem to be saying. Something you don't want them to say about your quarterback as a fantasy owner. Look elsewhere.
Now, everything I wrote about for each player is 100 percent true. So tell me ... which QB do you want?
Before you answer, you should know that both quarterbacks are Andy Dalton.
Yeah.
You see, I can talk up or talk down anyone; I just have to choose the right stats for the job. Or just ask John Parolin and Zach Rodgers of ESPN Stats & Information to get me the right numbers for the job, as I did at many different points while writing this column. They are both stats studs. Everything you're about to read is heavily researched and thought out -- a 100 percent true, can't be argued with, fully vetted fact.
But they're only some of the facts. The facts that support whatever opinion I have of a player. Listen, there's very little in this world that I am good at, but one thing at which I am truly fantastic? Manipulating stats to tell the story I want to tell. For instance, in a little bit I'm going to use some Scott Linehan stats to talk up Tony Romo's prospects. When I do, I will conveniently leave out the not very impressive numbers from Matthew Stafford's first two seasons and the season Linehan was calling plays for Gus Frerotte.
I'm going to do that because Stafford's first two years were marred by injury, and Gus Frerotte wasn't very good. Caling a lot of passing plays does no good if the guy passing can't make the throws. So I'll leave those stats out because I don't think they are relevant (or helpful) to the point I'm trying to lead you to, which is that Scott Linehan is going to help Tony Romo have top-10 fantasy numbers this year.
If you're having a bit of deja vu, it's because I make this same confession at the top of this column every year. I want to be truthful about everything, so I happily cop to trying to manipulate you because I feel it's important. Extremely important. Throughout this preseason, you will have countless analysts give you all sorts of reasons to draft this guy or avoid that one, so I want you to be aware that every stat thrown at you is really just reflective of an opinion. Your job? Figure out which analysts you trust and whose thinking aligns with yours, question everyone and everything you hear, take it all in, and then make your own call.
Ultimately, that's all any of us is doing: taking a small piece of a big picture and making a call.
Everything that follows is completely accurate. Some is about players, some about tendencies, and not a damn bit of it tells the whole story.
These are 100 facts you need to know before you draft. What you do with them is up to you.
1. Over the past two seasons, when Rob Gronkowski is off the field, Tom Brady's completion percentage is 59, his yards per attempt is 6.8, and he has a 26-to-11 touchdown-to-interception ratio.
2. Over the past two years, when Rob Gronkowski has played, Tom Brady completes 65 percent of his passes and has 7.7 yards per attempt and a 33-to-8 touchdown-to-interception ratio.
Things just didn't go Tom Brady's way in 2013, and that's a fact. Elsa/Getty Images
3. Last year, Patriots wide receivers were tackled at the 1-yard line eight times.
4. That was three more times than any other team.
5. If you regress that to the league average (3.3), Tom Brady would have had four additional touchdown passes and would have finished as the eighth best fantasy QB.
6. Last year, only one quarterback had more drops from his pass-catchers than Tom Brady.
7. The quarterback who had more passes dropped than Tom Brady? Matthew Stafford. Detroit had 46 drops last season -- 10 more than any other team.
8. Those 46 drops accounted for 7.5 percent of Detroit's total targets, which is also highest in the NFL.
9. Last year, Golden Tate was targeted 94 times.
10. And had two drops. Two.
11. With Scott Linehan calling plays as his offensive coordinator from 2002 to 2004, no quarterback had more fantasy points than Daunte Culpepper's 888.
12. With Scott Linehan calling plays as his offensive coordinator from 2011 to 2013, only four quarterbacks had more fantasy points than Matthew Stafford's 863.
13. In 2014, Scott Linehan will call plays for Tony Romo, who is currently being drafted outside the top 10 at QB.
14. Matt Ryan was under pressure on 154 pass attempts last season.
15. That was the most in the NFL and 31 more than the next QB.
16. Prior to last season, Ryan had never attempted more than 94 passes under pressure.
17. This offseason, the Falcons drafted RT Jake Matthews in the first round, added Jon Asamoah from Kansas City, got Sam Baker back from injury and hired Mike Tice as the offensive line coach.
18. During the first five weeks of the past season (the only time Julio Jones and Roddy White were on the field together), Ryan was sixth among QBs in fantasy points.
19. On passes of 15 yards or more, when targeting Kelvin Benjamin, Florida State quarterback Jameis Winston was 23-of-41 (56.1 percent) and averaged 16.5 yards per attempt.
20. Kelvin Benjamin had eight touchdown receptions of 15 or more yards, which tied for the most of any player from an automatic qualifying conference.
21. During Cam Newton's career, Panthers wide receivers have had the seventh deepest average target distance in the league.
22. Per Pro Football Focus, Steve Smith's catch percentage of 62.1 was 52nd among wide receivers this past season.
23. If Cam Newton does not finish this year as a top four fantasy quarterback, it will be the first time in his career that he fails to do so.
24. Including playoffs, without Michael Crabtree in the lineup, Colin Kaepernick has a 66.6 QBR (out of 100), 7.5 yards per attempt, a completion percentage of 56 and an 8.0 sack percentage.
25. Including playoffs, with Michael Crabtree in the lineup, Colin Kaepernick has a 75.5 QBR, 7.8 yards per attempt, a 59.4 completion percentage and a 5.5 sack percentage.
26. If you combine the 2013 fantasy points of Jay Cutler and Josh McCown, the Bears combo QB was the third highest scoring QB.
27. Prior to getting injured in the Week 7 game against Washington, Jay Cutler was tied for sixth in total fantasy points among quarterbacks.
28. Based on 2013 blitz percentage, Jay Cutler and the Bears will face the least blitz-heavy schedule (28.7 percent) in the NFL.
29. Jay Cutler's Total QBR of 75.0 against four or fewer rushers was sixth best in the NFL this past season.
30. Against extra pressure, Cutler's QBR is 57.1, 16th-best in the NFL.
31. In five of his six years as an offensive coordinator, Kyle Shanahan's teams have been ninth or better in pass attempts.
It's hard not to get excited about Johnny Manziel's upside. Nick Cammett/Getty Images
32. In two years in the SEC, Johnny Manziel averaged 3,910 passing yards and 32 touchdowns per year. He completed 73.5 percent of his passes from the pocket.
33. Manziel also averaged 1,085 yards rushing and 15 rushing touchdowns.
34. In Cam Newton's final season against the SEC, he threw for 2,854 yards and 30 TD. He ran for 1,473 yards and 20 touchdowns.
35. If you scored Newton's final season in ESPN standard fantasy points, he would have averaged 35.8 per game.
36. And Johnny Manziel would have averaged 37.2.
37. Over the past five years, New York Giants running backs have the second most rushing touchdowns (74) and the 11th most rushing yards (8,337) among NFL corps.
38. Over that span, the Giants have run the ball on 55 percent of their plays from inside the 10-yard line, the seventh highest rate in the NFL.
39. Rashad Jennings is currently being drafted outside the top 20 of running backs.
40. At the end of 2012, there were four players with active streaks of at least three seasons of 250-plus rushes: Steven Jackson, Chris Johnson, Ray Rice and Arian Foster.
41. Jackson, Johnson and Rice all had the lowest yards per carry of their careers, and Foster had the lowest per-game rushing average of his career in 2013.
42. Of those four, only Chris Johnson played all 16 games.
43. Since 2001, there have been 47 instances of a player coming off three straight seasons of 250-plus rushes. The average fantasy output for those 47 the following season was 137.9 points, and that includes six seasons of LaDainian Tomlinson.
44. If you take Tomlinson out, that number dives to 118 points per season.
45. Last year, 118 points from a running back ranked 23rd among the position.
46. Heading into the 2014 season, Marshawn Lynch and Frank Gore (plus the aforementioned Chris Johnson) have an active streak of three straight seasons with at least 250 rushes.
47. More Lynch: Since his first game with Seattle (during the 2010 season) and including postseason games, Lynch has the most carries in the NFL and 53 more than second-place Ray Rice.
48. Last year, 79 percent of Knowshon Moreno's runs came with six or fewer men in the box.
49. Broncos running backs combined for 18 touchdowns in the 2013 regular season.
50. That included 14 rushing touchdowns inside the opponents' 10-yard line, good for second in the NFL.
51. Eight of those touchdowns and 13 of the 18 touchdowns were by Knowshon Moreno, now of the Miami Dolphins.
52. In the second half of last season, Montee Ball was fourth in the NFL in yards after contact per rush (2.55).
53. Over the past three seasons, the New Orleans Saints have targeted running backs 571 times.
54. That's the most in the NFL and 73 more running back targets than the next team.
55. Among New Orleans Saints running backs last year, Darren Sproles had 71 receptions on 89 targets, Mark Ingram had seven, Travis Cadet had two, and Khiry Robinson had zero.
56. And Pierre Thomas had 77 receptions on 84 targets.
57. All of the above running backs will be back with the New Orleans Saints.
58. Except Darren Sproles.
59. An ESPN Stats & Information study of running back fantasy production from 2001 to 2013, over the presumed five-year length of a rookie contract and using a minimum of 100 rushes to qualify, shows that on average the most productive season is the third year.
60. Third-year running backs averaged 150 fantasy points, most of any of the current rookie contract years.
61. Alfred Morris, Doug Martin, Trent Richardson and Lamar Miller are among the notable third-year backs this season.
62. In 2009 -- his third year -- Adrian Peterson caught a career-high 43 balls and had the second highest scoring fantasy season of his career.
63. In 2012, Adrian Peterson caught 40 balls, the second highest in his career and the only other time he had 40 or more receptions in a year. It was the best fantasy season of his career.
64. He played all 16 games in both 2009 and 2012, something he has done only three times in seven NFL seasons.
65. In Norv Turner's final three seasons with the Chargers, they ranked second in the NFL in targets and receptions by running backs, second only to the Saints.
66. Over the past three years, among running backs with at least 175 rushes, only three running backs have a higher yards after contact per rush than ... Toby Gerhart.
67. From 2010 to 2011, only three teams in the NFL had more rushing attempts or more rushing touchdowns, and no team in the NFL had more rushing yards than the Oakland Raiders, who had Hue Jackson first as offensive coordinator and then head coach.
68. Last year, there was only one running back in the NFL who had at least 15 red zone opportunities (rushes and targets) without scoring a touchdown: C.J. Spiller.
69. Meanwhile, there was only one running back with at least 15 red zone opportunities to convert at least one third of them for touchdowns: Donald Brown. He was six for 18 (33 percent).
70. More red zone: Ryan Mathews and Danny Woodhead converted just 13 of 71 opportunities in the red zone (18 percent).
71. Last year, Dez Bryant was tackled inside the 5-yard line seven times, most in the NFL.
72. If Dez had scored on three of those plays, he still would have been tied for the seventh most plays being tackled inside the 5.
73. But he would have tied Calvin Johnson in fantasy points.
While you were ooh-ing and aah-ing at Josh Gordon's big games in the second half, Antonio Brown was also busy putting up seven double-digit performances in the season's final nine weeks. Charles LeClaire/USA TODAY Sports
74. During the second half of last season, only Josh Gordon had more fantasy points among wide receivers than ... Antonio Brown.
75. Among Steelers wideouts last year, Emmanuel Sanders and Jerricho Cotchery combined for 16 receiving touchdowns.
76. With Emmanuel Sanders now playing for the Broncos and Jerricho Cotchery now playing for the Panthers, the remaining Steelers with the most receiving touchdowns after Antonio Brown last year are Heath Miller, Matt Spaeth, Will Johnson and Derek Moye. They each had one.
77. In Joe Flacco's first five NFL seasons, he completed 39 percent of deep (15-plus yards) passes. He averaged 9.4 deep touchdowns to only four deep interceptions.
78. Last season, Joe Flacco completed only 28 percent of deep passes, with just three deep touchdowns and nine interceptions.
79. Last season, Torrey Smith set career highs in targets, receptions and yards on deep throws ... and yet had zero deep touchdowns.
80. Torrey Smith had at least five deep touchdowns in both of his first two NFL seasons.
81. If Torrey Smith had had just three deep touchdowns last year, he would've tied Pierre Garcon in fantasy points (14th).
82. This marks the third consecutive season I have talked up Torrey Smith. I might have a problem.
83. Only Dez Bryant and Brandon Marshall had more touchdown catches on end zone throws than ... Marvin Jones, who was 9 for 9.
84. Jones was the only player in the league with a perfect catch percentage on end zone throws (minimum 5). A.J. Green, incidentally, was 7-for-23.
85. In 2012, 6-foot-3, 225-pound Josh Gordon's rookie year, he caught 53.2 percent of his targets, averaged 6.0 yards after catch per reception and averaged 18.8 targets per touchdown reception.
86. In 2013, 6-foot-2, 220-pound Cordarrelle Patterson's rookie year, he caught 60 percent of his targets, averaged 6.2 yards after catch per reception and averaged 18.8 targets per touchdown reception.
87. During the final five weeks of the past season, Cordarrelle Patterson was tied with Demaryius Thomas and A.J. Green for fourth in fantasy points among wide receivers.
88. Last year, Josh Gordon's offensive coordinator was Norv Turner. This year, Norv Turner is the offensive coordinator for Cordarrelle Patterson's Vikings.
89. Over the final eight weeks of the season, only four wide receivers had more receptions than Kendall Wright's 51: Pierre Garcon, Julian Edelman, Josh Gordon and Andre Johnson.
90. Wright caught 67.1 percent of his targets over that span, fourth among the 31 wideouts who had at least 50 targets.
91. Only nine wide receivers had at least 90 receptions last year. Eight of them had at least five touchdowns, with an the average of nine scores per player. The ninth, Kendall Wright, had two.
92. Give Kendall Wright five total touchdowns instead of two (and assume the three touchdowns are at least a total of 10 yards combined), and he's a top-20 wide receiver tied with T.Y. Hilton and Torrey Smith.
93. Over the second half of the past season, only Jimmy Graham had more tight end targets than ... Greg Olsen.
94. After Olsen, the current Panther with the most receptions last year is Mike Tolbert, with 27.
95. Since 2007, the top tight end in Norv Turner's offense has never scored fewer than seven touchdowns and only once has fallen short of 700 yards. This year, Norv's top tight end is 6-foot-6 Kyle Rudolph. |
// GetFunctions gets the functions property value.
func (m *Workbook) GetFunctions()(*WorkbookFunctions) {
if m == nil {
return nil
} else {
return m.functions
}
} |
<filename>malib/utils/metrics.py
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from typing import Sequence, List, Dict, Any
from operator import mul
from functools import reduce
from malib import settings
from malib.utils.typing import AgentID, MetricType, PolicyID, MetricEntry
from malib.utils.aggregators import Aggregator
def to_metric_entry(data: Dict[str, Any], prefix=""):
"""Convert a dict of metrics to a dict or metric entries.
:param Dict[str,Any] data: Raw metrics dict.
:return: A dict of metric entries.
"""
res: Dict[str, MetricEntry] = {}
for k, v in data.items():
if isinstance(v, MetricEntry):
res[k] = v
else:
res[k] = MetricEntry(
value=v,
agg="mean",
tag=f"{prefix}/{k}",
log=settings.STATISTIC_FEEDBACK,
)
return res
class Metric(metaclass=ABCMeta):
def __init__(self, agents: List[AgentID]):
self._agents = agents
self._episode_data = dict()
self._statistics = dict()
# single for sequential, vector for simultaneous
self._mode = "single"
@property
def step_mode(self) -> str:
return self._mode
@abstractmethod
def step(self, agent_id, policy_id, **kwargs) -> None:
"""Record sampled data and save to do evaluation."""
@abstractmethod
def parse(self, agent_filter=None) -> Dict[AgentID, Dict[str, MetricEntry]]:
"""Parse episode data and filter with given keys (agent level)"""
@staticmethod
def merge_parsed(
agent_result_seq: Sequence[Dict[AgentID, Any]]
) -> Dict[AgentID, Dict[str, float]]:
"""Merge multiple evaluated results."""
def reset(self, mode="single"):
self._mode = mode
self._episode_data = dict()
self._statistics = dict()
class SimpleMetrics(Metric):
def __init__(self, agents: List[AgentID]):
super(SimpleMetrics, self).__init__(agents)
self._episode_data = {
MetricType.REWARD: defaultdict(lambda: []),
}
self._statistics = defaultdict(
lambda: defaultdict(
lambda: MetricEntry(value=0, agg="mean", tag="", log=False)
)
)
self._pids = {}
def step(self, agent_id, policy_id, **kwargs) -> None:
self._episode_data[MetricType.REWARD][agent_id].append(kwargs["reward"])
self._pids[agent_id] = policy_id
def parse(self, agent_filter=None) -> Dict[AgentID, Dict[str, MetricEntry]]:
for item_key, agent_data in self._episode_data.items():
# if filter is not None use filter else agents
for aid in agent_filter or self._agents:
if item_key == MetricType.REWARD:
if self.step_mode == "vector":
agent_data[aid] = [
sum(e) / max(1, len(e)) for e in agent_data[aid]
]
self._statistics[aid][MetricType.REWARD] = MetricEntry(
value=sum(agent_data[aid]),
agg=Aggregator.MEAN,
tag=f"{self._pids[aid]}/{MetricType.REWARD}"
if self._pids.get(aid) is not None
else MetricType.REWARD,
log=True,
)
return self._statistics
@staticmethod
def merge_parsed(
agent_result_seq: Sequence,
) -> Dict[AgentID, Dict[str, MetricEntry]]:
"""Aggregates a sequence of evaluated results in average, and return an agent dict."""
agent_res = {}
for agent_result in agent_result_seq:
for agent_id, result in agent_result.items():
if agent_res.get(agent_id, None) is None:
tmp = result[MetricType.REWARD]
agent_res[agent_id] = {
MetricType.REWARD: MetricEntry(
value=tmp.value / len(agent_result_seq),
agg=tmp.agg,
tag=tmp.tag,
log=tmp.log,
)
}
agent_res[agent_id][MetricType.REWARD].value += result[
MetricType.REWARD
].value / len(agent_result_seq)
return agent_res
def reset(self, mode: str = "single"):
self._mode = mode
self._episode_data = {
MetricType.REWARD: defaultdict(lambda: []),
}
self._statistics = defaultdict(
lambda: defaultdict(
lambda: MetricEntry(value=0, agg="mean", tag="", log=False)
)
)
class JointDistMetric(Metric):
class Meta:
REWARD = MetricType.REWARD
ACTION_DIST = "action_dist"
def __init__(self, agents: List[AgentID]):
# must be list here
agents = list(agents)
super(JointDistMetric, self).__init__(agents)
self._episode_data = {
MetricType.REWARD: defaultdict(lambda: []),
"action_dist": defaultdict(lambda: []),
}
self._statistics = defaultdict(
lambda: defaultdict(
lambda: MetricEntry(value=0, agg="mean", tag="", log=False)
)
)
self._pids = {}
def step(self, agent_id, policy_id, **kwargs):
self._episode_data[self.Meta.REWARD][agent_id].append(kwargs[MetricType.REWARD])
self._episode_data[self.Meta.ACTION_DIST][agent_id].append(
kwargs[self.Meta.ACTION_DIST]
)
self._pids[agent_id] = policy_id
def _cum_reward_on_joint_dist(self, main, others):
"""Calculate cumulative reward using joint policy distribution"""
rewards = self._episode_data[MetricType.REWARD][main]
all_dist = self._episode_data[self.Meta.ACTION_DIST]
main_dist = [0.0 for _ in range(len(all_dist[main]))]
if len(others):
for i, _ in enumerate(main_dist):
main_dist[i] = reduce(mul, [1.0] + [all_dist[o][i] for o in others])
else:
# return all ones
main_dist = [1.0] * len(main_dist)
# the head reward from sequential mode is no use
total_reward = sum(
[r * dist for dist, r in zip(main_dist, rewards[-len(main_dist) :])]
)
return total_reward
def parse(self, agent_filter=None) -> Dict[AgentID, Dict[str, MetricEntry]]:
"""Parse episode data, return an agent wise MetricEntry dictionary"""
# if filter is not None use filter else agents
for i, aid in enumerate(agent_filter or self._agents):
others = self._agents[:i] + self._agents[i + 1 :]
if self._pids.get(aid) is not None:
prefix = f"{aid}/{self._pids[aid]}"
else:
prefix = f"{aid}"
self._statistics[aid][MetricType.REWARD] = MetricEntry(
value=self._cum_reward_on_joint_dist(aid, others),
agg="mean",
tag=f"{prefix}/{MetricType.REWARD}",
log=True,
)
return self._statistics
@staticmethod
def merge_parsed(
agent_result_seq: Sequence[Dict[AgentID, Any]]
) -> Dict[AgentID, Dict[str, float]]:
agent_res = {}
for agent_result in agent_result_seq:
for agent_id, result in agent_result.items():
if agent_res.get(agent_id, None) is None:
agent_res[agent_id] = {
MetricType.REWARD: 0,
}
if isinstance(result[MetricType.REWARD], MetricEntry):
e = result[MetricType.REWARD].value
else:
e = result[MetricType.REWARD]
agent_res[agent_id][MetricType.REWARD] += e / len(agent_result_seq)
return agent_res
def reset(self):
self._episode_data = {
MetricType.REWARD: defaultdict(lambda: []),
}
self._statistics = defaultdict(lambda: {MetricType.REWARD: 0.0})
METRIC_TYPES = {"simple": SimpleMetrics, "jointdist": JointDistMetric}
def get_metric(metric_type: str):
"""Return a metric handler with given name.
:param str metric_type: Registered metric type.
"""
return METRIC_TYPES[metric_type]
|
/**
* Class to test the {@link NonBlockingRouter}
*/
public class NonBlockingRouterTest {
private static final int MAX_PORTS_PLAIN_TEXT = 3;
private static final int MAX_PORTS_SSL = 3;
private static final int CHECKOUT_TIMEOUT_MS = 1000;
private static final int REQUEST_TIMEOUT_MS = 1000;
private static final int PUT_REQUEST_PARALLELISM = 3;
private static final int PUT_SUCCESS_TARGET = 2;
private static final int GET_REQUEST_PARALLELISM = 2;
private static final int GET_SUCCESS_TARGET = 1;
private static final int DELETE_REQUEST_PARALLELISM = 3;
private static final int DELETE_SUCCESS_TARGET = 2;
private static final int AWAIT_TIMEOUT_MS = 2000;
private static final int PUT_CONTENT_SIZE = 1000;
private int maxPutChunkSize = PUT_CONTENT_SIZE;
private final Random random = new Random();
private NonBlockingRouter router;
private PutManager putManager;
private GetManager getManager;
private DeleteManager deleteManager;
private AtomicReference<MockSelectorState> mockSelectorState = new AtomicReference<MockSelectorState>();
private final MockTime mockTime;
private final MockClusterMap mockClusterMap;
// Request params;
BlobProperties putBlobProperties;
byte[] putUserMetadata;
byte[] putContent;
ReadableStreamChannel putChannel;
/**
* Initialize parameters common to all tests.
* @throws Exception
*/
public NonBlockingRouterTest() throws Exception {
mockTime = new MockTime();
mockClusterMap = new MockClusterMap();
NonBlockingRouter.currentOperationsCount.set(0);
}
@After
public void after() {
Assert.assertEquals(0, NonBlockingRouter.currentOperationsCount.get());
}
/**
* Constructs and returns a VerifiableProperties instance with the defaults required for instantiating
* the {@link NonBlockingRouter}.
* @return the created VerifiableProperties instance.
*/
private Properties getNonBlockingRouterProperties(String routerDataCenter) {
Properties properties = new Properties();
properties.setProperty("router.hostname", "localhost");
properties.setProperty("router.datacenter.name", routerDataCenter);
properties.setProperty("router.put.request.parallelism", Integer.toString(PUT_REQUEST_PARALLELISM));
properties.setProperty("router.put.success.target", Integer.toString(PUT_SUCCESS_TARGET));
properties.setProperty("router.max.put.chunk.size.bytes", Integer.toString(maxPutChunkSize));
properties.setProperty("router.get.request.parallelism", Integer.toString(GET_REQUEST_PARALLELISM));
properties.setProperty("router.get.success.target", Integer.toString(GET_SUCCESS_TARGET));
properties.setProperty("router.delete.request.parallelism", Integer.toString(DELETE_REQUEST_PARALLELISM));
properties.setProperty("router.delete.success.target", Integer.toString(DELETE_SUCCESS_TARGET));
properties.setProperty("router.connection.checkout.timeout.ms", Integer.toString(CHECKOUT_TIMEOUT_MS));
properties.setProperty("router.request.timeout.ms", Integer.toString(REQUEST_TIMEOUT_MS));
properties.setProperty("clustermap.cluster.name", "test");
properties.setProperty("clustermap.datacenter.name", "dc1");
properties.setProperty("clustermap.host.name", "localhost");
return properties;
}
/**
* Construct {@link Properties} and {@link MockServerLayout} and initialize and set the
* router with them.
*/
private void setRouter() throws IOException {
setRouter(getNonBlockingRouterProperties("DC1"), new MockServerLayout(mockClusterMap));
}
/**
* Initialize and set the router with the given {@link Properties} and {@link MockServerLayout}
* @param props the {@link Properties}
* @param mockServerLayout the {@link MockServerLayout}
*/
private void setRouter(Properties props, MockServerLayout mockServerLayout) throws IOException {
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
router = new NonBlockingRouter(new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap),
new MockNetworkClientFactory(verifiableProperties, null, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL,
CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), new LoggingNotificationSystem(), mockClusterMap,
mockTime);
}
private void setOperationParams() {
putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time,
Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM));
putUserMetadata = new byte[10];
random.nextBytes(putUserMetadata);
putContent = new byte[PUT_CONTENT_SIZE];
random.nextBytes(putContent);
putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
}
/**
* Test the {@link NonBlockingRouterFactory}
*/
@Test
public void testNonBlockingRouterFactory() throws Exception {
Properties props = getNonBlockingRouterProperties("NotInClusterMap");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
try {
router = (NonBlockingRouter) new NonBlockingRouterFactory(verifiableProperties, mockClusterMap,
new LoggingNotificationSystem(), null).getRouter();
Assert.fail("NonBlockingRouterFactory instantiation should have failed because the router datacenter is not in "
+ "the cluster map");
} catch (IllegalStateException e) {
}
props = getNonBlockingRouterProperties("DC1");
verifiableProperties = new VerifiableProperties((props));
router = (NonBlockingRouter) new NonBlockingRouterFactory(verifiableProperties, mockClusterMap,
new LoggingNotificationSystem(), null).getRouter();
assertExpectedThreadCounts(2, 1);
router.close();
assertExpectedThreadCounts(0, 0);
}
/**
* Test Router with a single scaling unit.
*/
@Test
public void testRouterBasic() throws Exception {
setRouter();
assertExpectedThreadCounts(2, 1);
setOperationParams();
// More extensive test for puts present elsewhere - these statements are here just to exercise the flow within the
// NonBlockingRouter class, and to ensure that operations submitted to a router eventually completes.
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
router.getBlob(blobId, new GetBlobOptionsBuilder().build()).get();
router.getBlob(blobId, new GetBlobOptionsBuilder().operationType(GetBlobOptions.OperationType.BlobInfo).build())
.get();
router.deleteBlob(blobId, null).get();
try {
router.getBlob(blobId, new GetBlobOptionsBuilder().build()).get();
} catch (ExecutionException e) {
RouterException r = (RouterException) e.getCause();
Assert.assertEquals("BlobDeleted error is expected", RouterErrorCode.BlobDeleted, r.getErrorCode());
}
router.getBlob(blobId, new GetBlobOptionsBuilder().getOption(GetOption.Include_Deleted_Blobs).build()).get();
router.getBlob(blobId, new GetBlobOptionsBuilder().getOption(GetOption.Include_All).build()).get();
router.close();
assertExpectedThreadCounts(0, 0);
//submission after closing should return a future that is already done.
assertClosed();
}
/**
* Test behavior with various null inputs to router methods.
* @throws Exception
*/
@Test
public void testNullArguments() throws Exception {
setRouter();
assertExpectedThreadCounts(2, 1);
setOperationParams();
try {
router.getBlob(null, new GetBlobOptionsBuilder().build());
Assert.fail("null blobId should have resulted in IllegalArgumentException");
} catch (IllegalArgumentException expected) {
}
try {
router.getBlob("", null);
Assert.fail("null options should have resulted in IllegalArgumentException");
} catch (IllegalArgumentException expected) {
}
try {
router.putBlob(putBlobProperties, putUserMetadata, null);
Assert.fail("null channel should have resulted in IllegalArgumentException");
} catch (IllegalArgumentException expected) {
}
try {
router.putBlob(null, putUserMetadata, putChannel);
Assert.fail("null blobProperties should have resulted in IllegalArgumentException");
} catch (IllegalArgumentException expected) {
}
try {
router.deleteBlob(null, null);
Assert.fail("null blobId should have resulted in IllegalArgumentException");
} catch (IllegalArgumentException expected) {
}
// null user metadata should work.
router.putBlob(putBlobProperties, null, putChannel).get();
router.close();
assertExpectedThreadCounts(0, 0);
//submission after closing should return a future that is already done.
assertClosed();
}
/**
* Test router put operation in a scenario where there are no partitions available.
*/
@Test
public void testRouterPartitionsUnavailable() throws Exception {
setRouter();
setOperationParams();
mockClusterMap.markAllPartitionsUnavailable();
try {
router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
Assert.fail("Put should have failed if there are no partitions");
} catch (Exception e) {
RouterException r = (RouterException) e.getCause();
Assert.assertEquals("Should have received AmbryUnavailable error", RouterErrorCode.AmbryUnavailable,
r.getErrorCode());
}
router.close();
assertExpectedThreadCounts(0, 0);
assertClosed();
}
/**
* Test router put operation in a scenario where there are partitions, but none in the local DC.
* This should not ideally happen unless there is a bad config, but the router should be resilient and
* just error out these operations.
*/
@Test
public void testRouterNoPartitionInLocalDC() throws Exception {
// set the local DC to invalid, so that for puts, no partitions are available locally.
Properties props = getNonBlockingRouterProperties("invalidDC");
setRouter(props, new MockServerLayout(mockClusterMap));
setOperationParams();
try {
router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
Assert.fail("Put should have failed if there are no partitions");
} catch (Exception e) {
RouterException r = (RouterException) e.getCause();
Assert.assertEquals(RouterErrorCode.UnexpectedInternalError, r.getErrorCode());
}
router.close();
assertExpectedThreadCounts(0, 0);
assertClosed();
}
/**
* Test RequestResponseHandler thread exit flow. If the RequestResponseHandlerThread exits on its own (due to a
* Throwable), then the router gets closed immediately along with the completion of all the operations.
*/
@Test
public void testRequestResponseHandlerThreadExitFlow() throws Exception {
Properties props = getNonBlockingRouterProperties("DC1");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
router = new NonBlockingRouter(new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap),
new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL,
CHECKOUT_TIMEOUT_MS, new MockServerLayout(mockClusterMap), mockTime), new LoggingNotificationSystem(),
mockClusterMap, mockTime);
assertExpectedThreadCounts(2, 1);
setOperationParams();
mockSelectorState.set(MockSelectorState.ThrowExceptionOnAllPoll);
Future future = router.putBlob(putBlobProperties, putUserMetadata, putChannel);
try {
while (!future.isDone()) {
mockTime.sleep(1000);
Thread.yield();
}
future.get();
Assert.fail("The operation should have failed");
} catch (ExecutionException e) {
Assert.assertEquals(RouterErrorCode.OperationTimedOut, ((RouterException) e.getCause()).getErrorCode());
}
setOperationParams();
mockSelectorState.set(MockSelectorState.ThrowThrowableOnSend);
future = router.putBlob(putBlobProperties, putUserMetadata, putChannel);
Thread requestResponseHandlerThreadRegular = TestUtils.getThreadByThisName("RequestResponseHandlerThread-0");
Thread requestResponseHandlerThreadBackground =
TestUtils.getThreadByThisName("RequestResponseHandlerThread-backgroundDeleter");
if (requestResponseHandlerThreadRegular != null) {
requestResponseHandlerThreadRegular.join(NonBlockingRouter.SHUTDOWN_WAIT_MS);
}
if (requestResponseHandlerThreadBackground != null) {
requestResponseHandlerThreadBackground.join(NonBlockingRouter.SHUTDOWN_WAIT_MS);
}
try {
future.get();
Assert.fail("The operation should have failed");
} catch (ExecutionException e) {
Assert.assertEquals(RouterErrorCode.RouterClosed, ((RouterException) e.getCause()).getErrorCode());
}
assertClosed();
// Ensure that both operations failed and with the right exceptions.
Assert.assertEquals("No ChunkFiller Thread should be running after the router is closed", 0,
TestUtils.numThreadsByThisName("ChunkFillerThread"));
Assert.assertEquals("No RequestResponseHandler should be running after the router is closed", 0,
TestUtils.numThreadsByThisName("RequestResponseHandlerThread"));
Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
/**
* Test that if a composite blob put fails, the successfully put data chunks are deleted.
*/
@Test
public void testUnsuccessfulPutDataChunkDelete() throws Exception {
// Ensure there are 4 chunks.
maxPutChunkSize = PUT_CONTENT_SIZE / 4;
Properties props = getNonBlockingRouterProperties("DC1");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// Since this test wants to ensure that successfully put data chunks are deleted when the overall put operation
// fails, it uses a notification system to track the deletions.
final CountDownLatch deletesDoneLatch = new CountDownLatch(2);
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.countDown();
}
};
router = new NonBlockingRouter(new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap),
new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL,
CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap,
mockTime);
setOperationParams();
List<DataNodeId> dataNodeIds = mockClusterMap.getDataNodeIds();
List<ServerErrorCode> serverErrorList = new ArrayList<>();
// There are 4 chunks for this blob.
// All put operations make one request to each local server as there are 3 servers overall in the local DC.
// Set the state of the mock servers so that they return success for the first 2 requests in order to succeed
// the first two chunks.
serverErrorList.add(ServerErrorCode.No_Error);
serverErrorList.add(ServerErrorCode.No_Error);
// fail requests for third and fourth data chunks including the slipped put attempts:
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.Unknown_Error);
// all subsequent requests (no more puts, but there will be deletes) will succeed.
for (DataNodeId dataNodeId : dataNodeIds) {
MockServer server = mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort());
server.setServerErrors(serverErrorList);
}
// Submit the put operation and wait for it to fail.
try {
router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
} catch (ExecutionException e) {
Assert.assertEquals(RouterErrorCode.AmbryUnavailable, ((RouterException) e.getCause()).getErrorCode());
}
// Now, wait until the deletes of the successfully put blobs are complete.
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS,
deletesDoneLatch.await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
Assert.assertEquals("Unexpected service ID for deleted blob",
BackgroundDeleteRequest.SERVICE_ID_PREFIX + putBlobProperties.getServiceId(), blobIdAndServiceId.getValue());
}
router.close();
assertClosed();
Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
/**
* Test that if a composite blob is deleted, the data chunks are eventually deleted. Also check the service IDs used
* for delete operations.
*/
@Test
public void testCompositeBlobDataChunksDelete() throws Exception {
// Ensure there are 4 chunks.
maxPutChunkSize = PUT_CONTENT_SIZE / 4;
Properties props = getNonBlockingRouterProperties("DC1");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
RouterConfig routerConfig = new RouterConfig(verifiableProperties);
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// metadata blob + data chunks.
final AtomicReference<CountDownLatch> deletesDoneLatch = new AtomicReference<>();
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.get().countDown();
}
};
router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(mockClusterMap),
new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL,
CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap,
mockTime);
setOperationParams();
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
String deleteServiceId = "delete-service";
Set<String> blobsToBeDeleted = getBlobsInServers(mockServerLayout);
int getRequestCount = mockServerLayout.getCount(RequestOrResponseType.GetRequest);
// The second iteration is to test the case where the blob was already deleted.
// The third iteration is to test the case where the blob has expired.
for (int i = 0; i < 3; i++) {
if (i == 2) {
// Create a clean cluster and put another blob that immediate expires.
setOperationParams();
putBlobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, 0,
Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM));
blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
Set<String> allBlobsInServer = getBlobsInServers(mockServerLayout);
allBlobsInServer.removeAll(blobsToBeDeleted);
blobsToBeDeleted = allBlobsInServer;
}
blobsThatAreDeleted.clear();
deletesDoneLatch.set(new CountDownLatch(5));
router.deleteBlob(blobId, deleteServiceId, null).get();
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS,
deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
Assert.assertTrue("All blobs in server are deleted", blobsThatAreDeleted.keySet().containsAll(blobsToBeDeleted));
Assert.assertTrue("Only blobs in server are deleted", blobsToBeDeleted.containsAll(blobsThatAreDeleted.keySet()));
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
String expectedServiceId = blobIdAndServiceId.getKey().equals(blobId) ? deleteServiceId
: BackgroundDeleteRequest.SERVICE_ID_PREFIX + deleteServiceId;
Assert.assertEquals("Unexpected service ID for deleted blob", expectedServiceId, blobIdAndServiceId.getValue());
}
// For 1 chunk deletion attempt, 1 background operation for Get is initiated which results in 2 Get Requests at
// the servers.
getRequestCount += 2;
Assert.assertEquals("Only one attempt of chunk deletion should have been done", getRequestCount,
mockServerLayout.getCount(RequestOrResponseType.GetRequest));
}
deletesDoneLatch.set(new CountDownLatch(5));
router.deleteBlob(blobId, null, null).get();
Assert.assertTrue("Deletes should not take longer than " + AWAIT_TIMEOUT_MS,
deletesDoneLatch.get().await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
router.close();
assertClosed();
Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
/**
* Return the blob ids of all the blobs in the servers in the cluster.
* @param mockServerLayout the {@link MockServerLayout} representing the cluster.
* @return a Set of blob id strings of the blobs in the servers in the cluster.
*/
private Set<String> getBlobsInServers(MockServerLayout mockServerLayout) {
Set<String> blobsInServers = new HashSet<>();
for (MockServer mockServer : mockServerLayout.getMockServers()) {
blobsInServers.addAll(mockServer.getBlobs().keySet());
}
return blobsInServers;
}
/**
* Test to ensure that for simple blob deletions, no additional background delete operations
* are initiated.
*/
@Test
public void testSimpleBlobDelete() throws Exception {
// Ensure there are 4 chunks.
maxPutChunkSize = PUT_CONTENT_SIZE;
Properties props = getNonBlockingRouterProperties("DC1");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
String deleteServiceId = "delete-service";
// metadata blob + data chunks.
final AtomicInteger deletesInitiated = new AtomicInteger();
final AtomicReference<String> receivedDeleteServiceId = new AtomicReference<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId) {
deletesInitiated.incrementAndGet();
receivedDeleteServiceId.set(serviceId);
}
};
router = new NonBlockingRouter(new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap),
new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL,
CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap,
mockTime);
setOperationParams();
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
router.deleteBlob(blobId, deleteServiceId, null).get();
long waitStart = SystemTime.getInstance().milliseconds();
while (router.getBackgroundOperationsCount() != 0
&& SystemTime.getInstance().milliseconds() < waitStart + AWAIT_TIMEOUT_MS) {
Thread.sleep(1000);
}
Assert.assertEquals("All background operations should be complete ", 0, router.getBackgroundOperationsCount());
Assert.assertEquals("Only the original blob deletion should have been initiated", 1, deletesInitiated.get());
Assert.assertEquals("The delete service ID should match the expected value", deleteServiceId,
receivedDeleteServiceId.get());
router.close();
assertClosed();
Assert.assertEquals("All operations should have completed", 0, router.getOperationsCount());
}
/**
* Test that multiple scaling units can be instantiated, exercised and closed.
*/
@Test
public void testMultipleScalingUnit() throws Exception {
final int SCALING_UNITS = 3;
Properties props = getNonBlockingRouterProperties("DC1");
props.setProperty("router.scaling.unit.count", Integer.toString(SCALING_UNITS));
setRouter(props, new MockServerLayout(mockClusterMap));
assertExpectedThreadCounts(SCALING_UNITS + 1, SCALING_UNITS);
// Submit a few jobs so that all the scaling units get exercised.
for (int i = 0; i < SCALING_UNITS * 10; i++) {
setOperationParams();
router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
}
router.close();
assertExpectedThreadCounts(0, 0);
//submission after closing should return a future that is already done.
setOperationParams();
assertClosed();
}
/**
* Response handling related tests for all operation managers.
*/
@Test
public void testResponseHandling() throws Exception {
Properties props = getNonBlockingRouterProperties("DC1");
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
setOperationParams();
final List<ReplicaId> failedReplicaIds = new ArrayList<>();
final AtomicInteger successfulResponseCount = new AtomicInteger(0);
final AtomicBoolean invalidResponse = new AtomicBoolean(false);
ResponseHandler mockResponseHandler = new ResponseHandler(mockClusterMap) {
@Override
public void onEvent(ReplicaId replicaId, Object e) {
if (e instanceof ServerErrorCode) {
if (e == ServerErrorCode.No_Error) {
successfulResponseCount.incrementAndGet();
} else {
invalidResponse.set(true);
}
} else {
failedReplicaIds.add(replicaId);
}
}
};
// Instantiate a router just to put a blob successfully.
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
setRouter(props, mockServerLayout);
setOperationParams();
// More extensive test for puts present elsewhere - these statements are here just to exercise the flow within the
// NonBlockingRouter class, and to ensure that operations submitted to a router eventually completes.
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel).get();
router.close();
for (MockServer mockServer : mockServerLayout.getMockServers()) {
mockServer.setServerErrorForAllRequests(ServerErrorCode.No_Error);
}
NetworkClient networkClient =
new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL,
CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime).getNetworkClient();
putManager = new PutManager(mockClusterMap, mockResponseHandler, new LoggingNotificationSystem(),
new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap),
new RouterCallback(networkClient, new ArrayList<BackgroundDeleteRequest>()), "0", mockTime);
OperationHelper opHelper = new OperationHelper(OperationType.PUT);
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount,
invalidResponse, -1);
// Test that if a failed response comes before the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount,
invalidResponse, 0);
// Test that if a failed response comes after the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, null, successfulResponseCount,
invalidResponse, PUT_REQUEST_PARALLELISM - 1);
testNoResponseNoNotification(opHelper, failedReplicaIds, null, successfulResponseCount, invalidResponse);
testResponseDeserializationError(opHelper, networkClient, null);
opHelper = new OperationHelper(OperationType.GET);
getManager = new GetManager(mockClusterMap, mockResponseHandler, new RouterConfig(verifiableProperties),
new NonBlockingRouterMetrics(mockClusterMap),
new RouterCallback(networkClient, new ArrayList<BackgroundDeleteRequest>()), mockTime);
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount,
invalidResponse, -1);
// Test that if a failed response comes before the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount,
invalidResponse, 0);
// Test that if a failed response comes after the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount,
invalidResponse, GET_REQUEST_PARALLELISM - 1);
testNoResponseNoNotification(opHelper, failedReplicaIds, blobId, successfulResponseCount, invalidResponse);
testResponseDeserializationError(opHelper, networkClient, blobId);
opHelper = new OperationHelper(OperationType.DELETE);
deleteManager = new DeleteManager(mockClusterMap, mockResponseHandler, new LoggingNotificationSystem(),
new RouterConfig(verifiableProperties), new NonBlockingRouterMetrics(mockClusterMap),
new RouterCallback(null, new ArrayList<BackgroundDeleteRequest>()), mockTime);
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount,
invalidResponse, -1);
// Test that if a failed response comes before the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount,
invalidResponse, 0);
// Test that if a failed response comes after the operation is completed, failure detector is notified.
testFailureDetectorNotification(opHelper, networkClient, failedReplicaIds, blobId, successfulResponseCount,
invalidResponse, DELETE_REQUEST_PARALLELISM - 1);
testNoResponseNoNotification(opHelper, failedReplicaIds, blobId, successfulResponseCount, invalidResponse);
testResponseDeserializationError(opHelper, networkClient, blobId);
putManager.close();
getManager.close();
deleteManager.close();
}
/**
* Test that failure detector is correctly notified for all responses regardless of the order in which successful
* and failed responses arrive.
* @param opHelper the {@link OperationHelper}
* @param networkClient the {@link NetworkClient}
* @param failedReplicaIds the list that will contain all the replicas for which failure was notified.
* @param blobId the id of the blob to get/delete. For puts, this will be null.
* @param successfulResponseCount the AtomicInteger that will contain the count of replicas for which success was
* notified.
* @param invalidResponse the AtomicBoolean that will contain whether an unexpected failure was notified.
* @param indexToFail if greater than 0, the index representing which response for which failure is to be simulated.
* For example, if index is 0, then the first response will be failed.
* If the index is -1, no responses will be failed, and successful responses will be returned to
* the operation managers.
*/
private void testFailureDetectorNotification(OperationHelper opHelper, NetworkClient networkClient,
List<ReplicaId> failedReplicaIds, String blobId, AtomicInteger successfulResponseCount,
AtomicBoolean invalidResponse, int indexToFail) throws Exception {
failedReplicaIds.clear();
successfulResponseCount.set(0);
invalidResponse.set(false);
mockSelectorState.set(MockSelectorState.Good);
FutureResult futureResult = opHelper.submitOperation(blobId);
int requestParallelism = opHelper.requestParallelism;
List<RequestInfo> allRequests = new ArrayList<>();
long loopStartTimeMs = SystemTime.getInstance().milliseconds();
while (allRequests.size() < requestParallelism) {
if (loopStartTimeMs + AWAIT_TIMEOUT_MS < SystemTime.getInstance().milliseconds()) {
Assert.fail("Waited too long for requests.");
}
opHelper.pollOpManager(allRequests);
}
ReplicaId replicaIdToFail =
indexToFail == -1 ? null : ((RouterRequestInfo) allRequests.get(indexToFail)).getReplicaId();
for (RequestInfo requestInfo : allRequests) {
ResponseInfo responseInfo;
if (replicaIdToFail != null && replicaIdToFail.equals(((RouterRequestInfo) requestInfo).getReplicaId())) {
responseInfo = new ResponseInfo(requestInfo, NetworkClientErrorCode.NetworkError, null);
} else {
List<RequestInfo> requestInfoListToSend = new ArrayList<>();
requestInfoListToSend.add(requestInfo);
List<ResponseInfo> responseInfoList;
loopStartTimeMs = SystemTime.getInstance().milliseconds();
do {
if (loopStartTimeMs + AWAIT_TIMEOUT_MS < SystemTime.getInstance().milliseconds()) {
Assert.fail("Waited too long for the response.");
}
responseInfoList = networkClient.sendAndPoll(requestInfoListToSend, 10);
requestInfoListToSend.clear();
} while (responseInfoList.size() == 0);
responseInfo = responseInfoList.get(0);
}
opHelper.handleResponse(responseInfo);
}
// Poll once again so that the operation gets a chance to complete.
allRequests.clear();
opHelper.pollOpManager(allRequests);
futureResult.get(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
if (indexToFail == -1) {
Assert.assertEquals("Successful notification should have arrived for replicas that were up",
opHelper.requestParallelism, successfulResponseCount.get());
Assert.assertEquals("Failure detector should not have been notified", 0, failedReplicaIds.size());
Assert.assertFalse("There should be no notifications of any other kind", invalidResponse.get());
} else {
Assert.assertEquals("Failure detector should have been notified", 1, failedReplicaIds.size());
Assert.assertEquals("Failed notification should have arrived for the failed replica", replicaIdToFail,
failedReplicaIds.get(0));
Assert.assertEquals("Successful notification should have arrived for replicas that were up",
opHelper.requestParallelism - 1, successfulResponseCount.get());
Assert.assertFalse("There should be no notifications of any other kind", invalidResponse.get());
}
}
/**
* Test that failure detector is not notified when the router times out requests.
* @param opHelper the {@link OperationHelper}
* @param failedReplicaIds the list that will contain all the replicas for which failure was notified.
* @param blobId the id of the blob to get/delete. For puts, this will be null.
* @param successfulResponseCount the AtomicInteger that will contain the count of replicas for which success was
* notified.
* @param invalidResponse the AtomicBoolean that will contain whether an unexpected failure was notified.
*/
private void testNoResponseNoNotification(OperationHelper opHelper, List<ReplicaId> failedReplicaIds, String blobId,
AtomicInteger successfulResponseCount, AtomicBoolean invalidResponse) throws Exception {
failedReplicaIds.clear();
successfulResponseCount.set(0);
invalidResponse.set(false);
FutureResult futureResult = opHelper.submitOperation(blobId);
List<RequestInfo> allRequests = new ArrayList<>();
long loopStartTimeMs = SystemTime.getInstance().milliseconds();
while (!futureResult.isDone()) {
if (loopStartTimeMs + AWAIT_TIMEOUT_MS < SystemTime.getInstance().milliseconds()) {
Assert.fail("Waited too long for requests.");
}
opHelper.pollOpManager(allRequests);
mockTime.sleep(REQUEST_TIMEOUT_MS + 1);
}
Assert.assertEquals("Successful notification should not have arrived for replicas that were up", 0,
successfulResponseCount.get());
Assert.assertEquals("Failure detector should not have been notified", 0, failedReplicaIds.size());
Assert.assertFalse("There should be no notifications of any other kind", invalidResponse.get());
}
/**
* Test that operations succeed even in the presence of responses that are corrupt and fail to deserialize.
* @param opHelper the {@link OperationHelper}
* @param networkClient the {@link NetworkClient}
* @param blobId the id of the blob to get/delete. For puts, this will be null.
* @throws Exception
*/
private void testResponseDeserializationError(OperationHelper opHelper, NetworkClient networkClient, String blobId)
throws Exception {
mockSelectorState.set(MockSelectorState.Good);
FutureResult futureResult = opHelper.submitOperation(blobId);
int requestParallelism = opHelper.requestParallelism;
List<RequestInfo> allRequests = new ArrayList<>();
long loopStartTimeMs = SystemTime.getInstance().milliseconds();
while (allRequests.size() < requestParallelism) {
if (loopStartTimeMs + AWAIT_TIMEOUT_MS < SystemTime.getInstance().milliseconds()) {
Assert.fail("Waited too long for requests.");
}
opHelper.pollOpManager(allRequests);
}
List<ResponseInfo> responseInfoList = new ArrayList<>();
loopStartTimeMs = SystemTime.getInstance().milliseconds();
do {
if (loopStartTimeMs + AWAIT_TIMEOUT_MS < SystemTime.getInstance().milliseconds()) {
Assert.fail("Waited too long for the response.");
}
responseInfoList.addAll(networkClient.sendAndPoll(allRequests, 10));
allRequests.clear();
} while (responseInfoList.size() < requestParallelism);
// corrupt the first response.
ByteBuffer response = responseInfoList.get(0).getResponse();
byte b = response.get(response.limit() - 1);
response.put(response.limit() - 1, (byte) ~b);
for (ResponseInfo responseInfo : responseInfoList) {
opHelper.handleResponse(responseInfo);
}
allRequests.clear();
opHelper.pollOpManager(allRequests);
try {
futureResult.get(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (ExecutionException e) {
Assert.fail("Operation should have succeeded with one corrupt response");
}
}
/**
* Assert that the number of ChunkFiller and RequestResponseHandler threads running are as expected.
* @param expectedRequestResponseHandlerCount the expected number of ChunkFiller and RequestResponseHandler threads.
* @param expectedChunkFillerCount the expected number of ChunkFiller threads.
*/
private void assertExpectedThreadCounts(int expectedRequestResponseHandlerCount, int expectedChunkFillerCount) {
Assert.assertEquals("Number of RequestResponseHandler threads running should be as expected",
expectedRequestResponseHandlerCount, TestUtils.numThreadsByThisName("RequestResponseHandlerThread"));
Assert.assertEquals("Number of chunkFiller threads running should be as expected", expectedChunkFillerCount,
TestUtils.numThreadsByThisName("ChunkFillerThread"));
if (expectedRequestResponseHandlerCount == 0) {
Assert.assertFalse("Router should be closed if there are no worker threads running", router.isOpen());
Assert.assertEquals("All operations should have completed if the router is closed", 0,
router.getOperationsCount());
}
}
/**
* Assert that submission after closing the router returns a future that is already done and an appropriate
* exception.
*/
private void assertClosed() {
Future<String> future = router.putBlob(putBlobProperties, putUserMetadata, putChannel);
Assert.assertTrue(future.isDone());
RouterException e = (RouterException) ((FutureResult<String>) future).error();
Assert.assertEquals(e.getErrorCode(), RouterErrorCode.RouterClosed);
}
/**
* Enum for the three operation types.
*/
private enum OperationType {
PUT, GET, DELETE,
}
/**
* A helper class to abstract away the details about specific operation manager.
*/
private class OperationHelper {
final OperationType opType;
int requestParallelism = 0;
/**
* Construct an OperationHelper object with the associated type.
* @param opType the type of operation.
*/
OperationHelper(OperationType opType) {
this.opType = opType;
switch (opType) {
case PUT:
requestParallelism = PUT_REQUEST_PARALLELISM;
break;
case GET:
requestParallelism = GET_REQUEST_PARALLELISM;
break;
case DELETE:
requestParallelism = DELETE_REQUEST_PARALLELISM;
break;
}
}
/**
* Submit a put, get or delete operation based on the associated {@link OperationType} of this object.
* @param blobId the blobId to get or delete. For puts, this is ignored.
* @return the {@link FutureResult} associated with the submitted operation.
*/
FutureResult submitOperation(String blobId) {
FutureResult futureResult = null;
switch (opType) {
case PUT:
futureResult = new FutureResult<String>();
ReadableStreamChannel putChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(putContent));
putManager.submitPutBlobOperation(putBlobProperties, putUserMetadata, putChannel, futureResult, null);
break;
case GET:
final FutureResult getFutureResult = new FutureResult<GetBlobResultInternal>();
getManager.submitGetBlobOperation(blobId, new GetBlobOptionsInternal(
new GetBlobOptionsBuilder().operationType(GetBlobOptions.OperationType.BlobInfo).build(), false),
new Callback<GetBlobResultInternal>() {
@Override
public void onCompletion(GetBlobResultInternal result, Exception exception) {
getFutureResult.done(result, exception);
}
});
futureResult = getFutureResult;
break;
case DELETE:
futureResult = new FutureResult<Void>();
deleteManager.submitDeleteBlobOperation(blobId, null, futureResult, null);
break;
}
NonBlockingRouter.currentOperationsCount.incrementAndGet();
return futureResult;
}
/**
* Poll the associated operation manager.
* @param requestInfos the list of {@link RequestInfo} to pass in the poll call.
*/
void pollOpManager(List<RequestInfo> requestInfos) {
switch (opType) {
case PUT:
putManager.poll(requestInfos);
break;
case GET:
getManager.poll(requestInfos);
break;
case DELETE:
deleteManager.poll(requestInfos);
break;
}
}
/**
* Hand over a responseInfo to the operation manager.
* @param responseInfo the {@link ResponseInfo} to hand over.
*/
void handleResponse(ResponseInfo responseInfo) {
switch (opType) {
case PUT:
putManager.handleResponse(responseInfo);
break;
case GET:
getManager.handleResponse(responseInfo);
break;
case DELETE:
deleteManager.handleResponse(responseInfo);
break;
}
}
}
} |
CryptoBlaze: A partially homomorphic processor with multiple instructions and non-deterministic encryption support
Homomorphic computing has been suggested as a method to secure processing in insecure servers. One of the drawbacks of homomorphic processing is the enormous execution time taken to process even the simplest of operations. In this paper, we propose a processor with hardware support for homomorphic processing. The proposed processor, named CryptoBlaze, has eight additional specialized instructions and hardware to support computation of encrypted data. For the first time, we show that it is possible to build a hardware implementation of a processor with multiple instructions, support for non-deterministic Pallier encryption, and partially homomorphic processing. The system was implemented and tested on an FPGA with three benchmarks. The design space with differing security parameters was explored and results are presented. Cryptoblaze is at least 10X faster than the state of the art. |
For the city council ward, see The Gabba Ward
The Brisbane Cricket Ground, commonly known as the Gabba,[1][2] is a major sports stadium in Brisbane, the capital of Queensland, Australia. The nickname Gabba derives from the suburb of Woolloongabba, in which it is located.
The land on which the ground sits was set aside for use as a cricket ground in 1895 and the first match was held on the site on 19 December 1896, between Parliament and The Press. Prior to this, cricket was played at a ground in the area then known as Green Hills (beside Countess Street Petrie Terrace opposite the Victoria Barracks – now occupied by the Northern Busway),[3] since at least the early 1860s.[4]
The Gabba shared first-class cricket matches with the Exhibition Ground until 1931. The first Sheffield Shield match at the Gabba was scheduled to be played between 31 January 1931 and 4 February 1931, but it was washed out without a ball being bowled. The first Test match at the Gabba was played between Australia and South Africa between 27 November and 3 December 1931.
Over the years, the Gabba has hosted athletics, Australian rules football, baseball, concerts, cricket, cycling, rugby league, rugby union, soccer and pony and greyhound races.
Between 1993 and 2005, the Gabba was redeveloped in six stages at a cost of A$128,000,000. The dimensions of the playing field are now 170.6 metres (east-west) by 149.9 metres (north-south) to accommodate the playing of Australian Football at elite level. The seating capacity of the ground is now 42,000; however, for international cricket matches, the seating capacity is reduced to around 36,000 due to new bigger screens and the addition of a pool deck, as well as wider sight screens.[5]
On 15 December 2016, Australia hosted Pakistan for the first day-night Test at the Gabba,[6] and the first Australian day-night Test hosted outside the Adelaide Oval.
Sports played at the ground [ edit ]
Cricket [ edit ]
The Gabba in 1899
The First Test between Australia and England is played nowadays at Brisbane. Nobody seems to know why, and all sorts of arguments are ventilated for and against more cricket Tests on the Woolloongabba ground. I am all in favour of robbing Queensland of its greatest cricketing occasion, for the ground depresses. It is not a cricket ground at all. It is a concentration camp! Wire fences abound. Spectators are herded and sorted out into lots as though for all the world this was a slave market and not a game of cricket. The stands are of wood and filthy to sit on. The dining rooms are barns, without a touch of colour or a picture on the wall. Everywhere there is dust and dirt...Forgive me if I am bitter about the Woolloongabba ground...the city has many good points, and the people who live there are generous and hospitable to the highest degree, but once one goes to the cricket ground the advantages are overwhelmingly lost in the mass of rules and regulations...[7] – John Kay, 1950–51 Ashes series
The Gabba is used from October to March for cricket and is home to the Queensland Cricket Association, the Queensland Cricketers Club and the Queensland Bulls cricket team. The venue usually hosts the first Test match of the season each November in addition to a number of international one-day matches usually held in January. The pitch is usually fast and bouncy.
The Gabba's amenities were greatly improved in the 1980s from a very basic standard, especially in comparison with the other Australian cricket grounds. Test cricket was first played at the ground in November 1931, the first Test of the series between Australia and South Africa. In December 1960, Test cricket's first-ever Tied Test took place at the ground when Richie Benaud's Australian team tied with Frank Worrell's West Indian side. Queensland clinched its first-ever Sheffield Shield title with victory over South Australia in the final at the ground in March 1995.
The Gabba was the first Australian venue to host an International Twenty20 cricket match.[8]
In November 1968 Colin Milburn scored 243 – in the two-hour afternoon session he scored 181- in a Sheffield Shield match for Western Australia vs. Queensland [9]
For the first day of the first Test of the 2010–11 Ashes series between Australia and England the Gabba was almost sold out.[10] Australia's Michael Clarke holds the record for number of runs scored in one Test innings at the Gabba with 259 not out, breaking the previous record set by Alastair Cook.[11]
Australia has a formidable test match record at the ground. In the 55 matches played at the ground, Australia has won 33, drawn 13, tied 1 and lost 8. Australia has also not lost at the Gabba in 28 matches, a record dating back to 1988.[12] England have a notoriously poor record at The Gabba, and have only won two test matches at the ground since the end of the Second World War. Many of their defeats have been heavy[13] and only seven England players have scored centuries at the ground.
Australian Football Premiership Finals at the Gabba, 1907
An Australian Football Match at the Gabba in 2008.
The grave of David Newitt, who died from injuries received in a cycling race at the Gabba in 1922.
The Gabba was the home ground for the Brisbane Bears from 1993 to 1996 and since 1997 has been the home of the Brisbane Lions AFL team. The record crowd for an Australian rules football match is 37,224 between the Brisbane Lions and Collingwood in Round 15 of the 2005 AFL season.
Australian football has a long association with the ground. The Queensland Football League, a precursor to AFL Queensland played matches at the Gabba from 1905 to 1914, 1959 to 1971, and in the late 1970s and early 1980s. AFLQ matches resumed in 1993 as curtain-raiser events to AFL games, along with occasional AFLQ Grand Finals.
Interstate games, including the 1961 national carnival have also been played there, as was a demonstration game during the 1982 Commonwealth Games. In 1991 the Gabba was host to Queensland's only victory over a Victorian side.
Soccer [ edit ]
In the early 1900s, the Gabba hosted numerous matches between Australia and various touring nations.[14] During the 1950s and 1960s the Gabba hosted soccer matches for English first division and Scottish clubs including Blackpool FC, Everton FC, Manchester United and Heart of Midlothian.[15] The Chinese and South African national teams also played at the ground. During the 2000 Summer Olympics, the Gabba hosted association football group games.[16]
Rugby league [ edit ]
On 8 May 1909 the first match of rugby league was played in Brisbane at the Gabba. Norths played against Souths before a handful of spectators at the ground.[17] The Gabba hosted its first rugby league Test match on 26 June 1909, when Australia defeated New Zealand Māori 16–13.[18] The Kangaroos continued to play Tests at this venue until 1956, and a ground record crowd of 47,800 people saw Australia play Great Britain in 1954. From 1932 to 1959 the Gabba was also used to host interstate matches and International Rugby League Finals from 1909 – 2003.
Rugby league test matches [ edit ]
The Gabba hosted 11 rugby league test matches between 1912 and 1956.[19]
Rugby union [ edit ]
The Gabba has hosted six rugby union Test matches.
2000 Olympic Games [ edit ]
The Gabba hosted seven games of the 2000 Olympic Games Men's Football tournament including a Quarter final match.
Greyhound racing [ edit ]
Greyhound racing was also conducted at the Gabba prior to the redevelopment.[20]
Awards [ edit ]
In 2009 as part of the Q150 celebrations, the Gabba was announced as one of the Q150 Icons of Queensland for its role as a "structure and engineering feat".[21]
Largest crowds at the Gabba [ edit ]
VFL/AFL records [ edit ]
Players
Teams
Last updated: 19 May 2015.[22]
Gallery [ edit ]
Test match between Australia and South Africa at the Gabba in Nov 2012
The Gabba prior to redevelopment
Shane Warne in action at the Gabba
The Gabba in 2006–07 Ashes series
See also [ edit ] |
Just announced: STG Presents The Glitch Mob “Love Death Immortality” tour on Saturday, May 3rd at the Showbox Market!
The Love Death Immortality album is the first album from the Glitch Mob since Drink the Sea was released in 2010. This is an extremely special opportunity to experience these glitch legends headline a set in a small and intimate venue (please do not move this one to the Showbox Sodo). Ana Sia and Penthouse Penthouse will provide opening support. Tickets are on sale NOW, and this show will sell out so buy your ticets ASAP ( ticket purchase information below)!
Check out the first track from the The Love Death Immortality album, previewed online now:
Based on the previewed tracks, it appears that The Glitch Mob has remained true to their unique style and sound. With “Can’t Kill Us” ushering in a heavier feel, we cant wait to hear what the rest of the album has in store. The Love Death Immortality album will be released on February 11th, 2014. Preorder your copy online here.
Event Links & More Information:
Facebook Event Page (Glitch Mob)
Facebook Event Page (STG Presents)
Purchase presale tickets online (purchase tickets at the box office to save $ on service fees)
STG Presents website – event information
The Glitch Mob
with Ana Sia and Penthouse Penthouse
Showbox Market, 1426 1st Ave, Seattle, Washington 98101
Saturday, May 3, 2014
Doors at 8:00pm, show at 9:00pm
Ages: All ages to enter, 21 & over to drink
Ticket prices starting at $21.50
Stay up to date on upcoming public electronic dance music events in the Seattle area by following the EmeraldCityEDM Event Calendar.
Press Release: The Glitch Mob’s Love Death Immortality, to be released February 11, 2014, is one of 2014’s most anticipated electronic-music albums. The Glitch Mob’s core members consist of Justin Boreta (aka Boreta), Ed Ma (aka edIT) and Joshua Mayer (aka Ooah). Together, this supergroup trio of instrumentalist/producers have in Love Death Immortality created the ideal follow-up to The Glitch Mob’s 2010 debut full-length LP, Drink The Sea. That album proved the group’s breakout: Drink The Sea spent numerous weeks atop the iTunes’ Electronic chart – and still remains in its top 10, nearly three years since its release. Put out completely independently on the group’s own Glass Air imprint (which will also handle Love Death Immortality), Drink The Sea would go on to sell over 80,000 copies and counting.
In an era of disposable dance-music singles, Love Death Immortality is, like Drink The Sea, created with the intent to flow as an album – one designed to be listened to from start to finish, taking the listener on an aural voyage in the process. However, it’s clear from the songs’ dynamism that The Glitch Mob is itching to take Love Death Immortality to its natural home on the stage, in all its crowd-pleasing glory. That’s exactly what’s going to happen when The Glitch Mob kick off their biggest tour yet in March 2014: accompanied by massive production from Martin Phillips of Bionic League – who’s developed staggering concert visuals for the likes of Daft Punk, Kanye West, and Deadmau5 – expect the Mob to dominate festivals and large venues well throughout the new year. |
import React from 'react'
import { GoogleCallback } from '@xrengine/client-core/src/user/components/Oauth/GoogleCallback'
export const GoogleHomePage = () => <GoogleCallback />
export default GoogleHomePage
|
WHAT was first sniffily thought of as little more than a passing fad has been heartily embraced by our city and burgers are here to stay.
We’ve fallen under the spell of charred meat in a bun, served everywhere from fine-dining restaurants to travelling food trucks.
YOUR TOP BURGERS: READERS DISH UP BUNS WE OVERLOOKED
BEST EATS: TOP 100 VICTORIAN FOOD EXPERIENCES
MATT PRESTON: MELBOURNE’S TOP 10 MOST INFLUENTIAL RESTAURANTS
GET IN EARLY: BEST MELBOURNE BREAKFASTS
Our best burgers have many things in common: the beef is first-rate, whether wagyu or black angus, and usually grass-fed.
The accompaniments are made from scratch, whether house-made pickles or tomato relish.
Buns are sourced from artisan bakers. And, importantly, everything is freshly made and cooked to order.
Because when it comes to these new-wave burgers, fast food is now definitely worth the wait.
* SHARE YOUR FAVOURITE MELBOURNE BURGER. JOIN THE CONVERSATION BELOW OR AT FACEBOOK.COM/HERALDSUN
media_camera Fat Bob's- The Jackie O burger, wrapped in chip basket with a beer from Fat Bob’s on Cochranes Rd, Moorabbin.
1. Jackie O ($15.50) at Fat Bob’s
It’s an adventure to find Fat Bob’s hidden in an industrial estate in Moorabbin, but it’s not just about the unusual location. When you get there, the burgers are great.. With its vintage neon signs and All American vibe, in Fat Bob’s case it’s the destination, not the journey.
MAIN EVENT: The Jackie O takes a 180g beef patty (made from grass-fed happy cow) cooks it medium, melts fontina cheese over it and whacks it between a toasted pain de mie bun (from Bakery Lievito) filled with tomato, lettuce and onion. Mustard, tomato and Fat Bob’s special sauce (a chutney and mayo mix) add zing to this beaut burger that delivers from first bite to last.
ON THE SIDE: Zucchini chips are a guilt-busting addition, and there are 20 beers from around the world to wash it all down.
DON’T WANT BEEF? Kewpie mayo, slaw and coriander give an Asian twist to the Victa, a chicken burger with a difference.
80 Cochranes Rd, Moorabbin; ph: 9555-0909
fatbobs.com.au
media_camera The Oz burger with a Merrywell Spider and chips from The Merrywell at Crown Riverside.
2. Oz Burger ($14) at The Merrywell
At Crown’s pub on the river promenade the signs in the window pronounce their burgers “Pink and Juicy”. And that they most certainly are.
MAIN EVENT: Get the napkins ready – Merrywell’s burgers are puddle-forming juicy. The fat Black Angus patty made from a 70/30 mix of chuck/rump and fat, simply seasoned and cooked medium-rare, would rate just on its own, but add in a runny-yolked fried egg, melted cheddar, little chunks of sweet pineapple and bursts of pickled beetroot, well, you’ve got flag-flying burger Australia can be proud of. The soft damper bun eschews the brioche craze and the burger – and its fans – are all the better for it.
ON THE SIDE: Take a cherry cola float and some Merrywell chips that you dunk in bacon aioli (brilliant!).
DON’T WANT BEEF? The Fishmonger, with pickled fennel and chipotle mayo, is a punchy take on fish in a bun.
Cnr Clarendon St and Crown Riverside, city; ph: 9292 7468
themerrywell.com.au
FORBIDDEN TREATS: MELBOURNE’S BEST DESSERTS
WHY WE LIVE HERE: YOU’RE NOT A MELBURNIAN UNTIL...
PIN-UP CITY: IF MELBOURNE WERE A MOVIE, IT MIGHT LOOK LIKE THIS...
media_camera Melbourne’s top 10 best burgers
3. Wagyu Burger ($15.90) at South Melbourne Trader
Near the South Melbourne Market this unassuming little cafe is turning out one of the best burgers in town.
MAIN EVENT: Since it arrived on the menu eight months ago, the wagyu burger has “gone gangbusters” – and with very good reason. The patty – cooked pink, gloriously juicy – is simply seasoned and speaks for itself. It’s supported by some house-made Jamaican jerk sauce that adds a perfectly pitched amount of heat, while the house pickles add terrific tart crunch.
ON THE SIDE: Hand cut chips – skin on, fried to a crisp – come with the burger and are also great.
DON’T WANT BEEF? The barramundi burger with tarragon relish is a perfect pescetarian substitute.
14/111 Cecil St, South Melbourne; ph 9696 3938
tsmt.com.au
media_camera Tuckshop Takeaway in Hawthorn Rd, Caulfield.
4. Major Burger ($12) at Tuckshop Takeaway
When two Fat Duck alumni decide it’s time to trade in their fine-dining aprons to flip burgers, it’s time for the rest of us to take notice. In just six months Clinton and Karina Serex have established Tuck Shop’s reputation as the home of some of Melbourne’s best burgers with a side of lipsmacking nostalgia thrown in. Redskin milkshake anyone?
MAIN EVENT: Two choices: the minor or the major. Both come on a Noisette milk brioche bun and have lettuce, tomato, onion, pickles, cheddar and Tuck Shop’s special sauce (a mix of tomato sauce, mustard and mayo) but the Major also comes with a free-range egg and bacon. Clinton makes about 180 patties each morning from three different cuts of grass-fed Gippsland beef and there’s no back-up supply so once they’re sold out – usually by about 8pm – it’s fish and chips up the road.
ON THE SIDE: Tuck Shop calls its chips ‘cuts’, which are SA-grown desiree potatoes that are hand-cut, skin-on and thrice-cooked. Sweet tooths are sorted thanks to Karina’s daily selection of retro lolly-inspired cakes, tarts and soft serve ice cream.
DON’T WANT BEEF? Try the ‘veggie-wedgie’ burger, with a patty made from beetroot, lentils and brown rice.
273 Hawthorn Rd, Caulfield North; ph 0431 406 580
twitter.com/tuckshop_melb
media_camera The Umami burger with chips, dipping sauces and ‘slaw from Nshry on Beaconsfield Pde, Albert Park.
5. Umami Burger ($22) at Nshry
With its prime beachfront real estate overlooking the bay, you’d be forgiven for thinking Nshry might leave the heavy lifting to the view. Thankfully their burgers put paid to any such notion.
MAIN EVENT: Named after the fifth taste (“pleasant savoury”) the Umami Burger takes a 200g wagyu patty, gives it a “rub” of dried mushrooms and cooks it to a juicy medium rare while gruyere melts atop. It joins caramelised onions on the base of a toasted brioche bun, while a large parmesan crisp adds a great dose of salty, cheesy crunch. It’s so good it’s no wonder this is the most popular item on the menu by a mile.
ON THE SIDE: Served with great beer-battered chips that beg to be dunked into the spicy chipotle, wasabi mayo or tomato sauces that come with, a side of slaw and a few cornichons complete this very pretty picture.
DON’T WANT BEEF? With miso pumpkin, crisp fried shallots, daikon and wasabi mayo, the slow braised pork burger is a riot of colour and flavour.
129A Beaconsfield Pde, Albert Park; ph: 9682 1077
nshry.com.au
media_camera The Royale with cheese beef burger from Royale Brothers in Church St, Brighton.
6. Royale with Cheese ($12) at Royale Brothers
Thanks to Pulp Fiction, the Royale with Cheese has been immortalised around the world. The boys from Brighton’s The Pantry have taken it as inspiration for their latest venture that opened in December.
MAIN EVENT: Served in wax paper and a brown paper bag, this burger captures all that is right with its namesake and then raises it to new heights. Pickles, onion, mustard and mayo provide sweet-sharp saucy back-up to the patty’s lead. Lettuce is a controversial addition to the glossy sweet milk bun - though not unwelcome – while the Royale sauce adds a surprising touch of spicy heat.
ON THE SIDE: Coney Island cocktail, jalapeño salsa, onion gravy or more of that spicy tomato sauce are all available to dip the good chips into.
DON’T WANT BEEF? You could try the fish, chicken or jerk pork versions, but really, here it’s all about the Royales.
rear 1 Church St, Brighton; ph: 9005 4242
royalebrothers.com.au
media_camera The Classic Beef Burger from Local Burger Co on Rathdowne Street, Carlton.
7. Classic beef burger ($10) at Local Burger Co
Since opening four months ago, Local Burger Co co-owner Evan Kipping says they’ve been “strapped to a rocket”. Locals have packed the corner store since day dot, queuing for their old-school burgers and jam jar shakes.
MAIN EVENT: The Classic is a burger of days gone by made with a modern eye for quality ingredients. A good-sized patty made from chuck and brisket with a 70/30 ratio of meat to fat – Australian beef, of course – is teamed with thick tomato slices, a bit of lettuce, house-pickled onion and the “Local Burger Co sauce” (pickles, mayo and tomato sauce). It’s a walk down memory lane that tastes better than the original ever did.
ON THE SIDE: Sweet potato fries make for a nice twist, especially when washed down with a vanilla malt shake.
DON’T WANT BEEF? Haloumi, eggplant and Granny Kipping’s tomato relish turn the mushroom burger into a meat-free marvel.
687 Rathdowne St, Carlton North; ph: 9347 7561
thelocalburgerco.com.au
media_camera Double Patty Smash burger with Kraft cheese from Rockwell and Sons on Smith St, Collingwood.
8. Double Patty Smash Burger ($11) at Rockwell and Sons
Dude food gets the clever treatment at Rockwell and Sons, but it’s their sandwiches that keep the crowds coming back.
MAIN EVENT: The double patty smash burger is a triumph celebrating – and elevating – the most humble of ingredient, the Kraft single. This burger is a juicy, oozy triumph. On toasted brioche, two medium-grilled patties are each dripping with the melted plastic cheese and slathered with a hot-pink Russian sauce-like concoction. Apart from a few crunchy gherkins thrown in for good measure, there’s nothing to distract from its meaty, cheesy goodness.
ON THE SIDE: Craft beer, of course.
DON’T WANT BEEF? The pressed rib sandwich, with dill, fennel and onion, delivers pork with just the right amount of posh.
288 Smith St, Collingwood; ph: 8415 0700
rockwellandsons.com.au
media_camera The New York Burger from New York minute in, Alexander Rd, Moonee Ponds.
9. New York ($12) at New York Minute
This little cafe in Moonee Ponds is bringing a slice of America to our ‘burbs in a big way.
MAIN EVENT: The New York is the burger you always wanted to make at home but didn’t know how. It’s a big, rustic take on the famed two-all-beef-patties-special sauce-lettuce-cheese-pickles-onions-on-a-sesame-seed-bun. Free-range wagyu, spinach and toasted Mr Pita buns ramp up the ingredients, while the combination of their “special sauce” and pickles deliver sharp sweetness. It’s a two-hand event in the style of a classic that over-delivers on quality and flavour.
ON THE SIDE: Add some fries and a shake and you’ll have a meal that’ll keep you happy for days.
DON’T WANT BEEF? Spinach, bacon, cheese, tomato and sweet chilli mayo elevates the chicken schnitzel burger to the heavens.
491 Mt Alexander Rd, Moonee Ponds; ph: 9043 1838
newyork-minute.com.au
media_camera Classic burger from the Burger Lounge in Main Rd, Eltham.
10. Classic ($9.50) at Burger Lounge
For the past couple of years Gerry and Florie Mustafa have been serving the good burghers of Eltham with their very good burgers – a “healthy” take on fast food. And don’t the people love them for it.
MAIN EVENT: The seed-topped sugar-free sourdough bun (from La Madre Bakery) holds a manageably hefty patty made from free-range Gippsland beef. Tomato, lettuce and red onion up the colour, while a good splodge of aioli and tomato relish add saucy goodness. It’s simple done well. You can jazz up their burgers any number of ways, from cheese – Swiss, tasty, even King Island brie or blue – through roasted pumpkin or caramelised pear.
ON THE SIDE: Handmade onion rings with garlic aioli or thick cut chips with rosemary and salt hit the spot with precision.
DON’T WANT BEEF? There are heaps of lamb, fish and veg options (more than 20 all up), but the crumbed chicken, with honey mustard mayo, Swiss cheese and caramelised onion is hard to go past.
902 Main Rd, Eltham; ph: 9431 4500
theburgerlounge.com.au
YOUR TOP BURGERS: READERS DISH UP BUNS WE OVERLOOKED |
Michael and Sunette Adendorff thought something was wrong with their hire car's GPS as they drove around in circles in the Wellington suburb, looking for the £90-a-night Majestic Hotel on Royal Parade.
When they pulled into the local chemist's shop to ask directions, they were shocked to discover that Eastbourne (population 4,600), New Zealand, does not even have a hotel.
Shop assistant Linda Burke said: "They just walked in and asked me where Royal Parade was, with the Majestic Hotel.
"I said: Oh no, there's no hotel here.
"I looked at it and said: That's in the UK, that's in England.
"He checked on the internet and said he did think it was funny they charged him in pounds."
Ms Burke rang around but discovered all the local bed and breakfast places were full, so she offered them a room for the night in her house.
The couple, who were exploring New Zealand while visiting the country to watch South Africa play in the Rugby World Cup, had mistakenly booked into the hotel in Eastbourne, Sussex, on the internet.
"I booked into the right hotel, just in the wrong country," Mr Adendorff told the Dominion Post newspaper.
Despite the good-natured ribbing they received, the couple said Eastbourne was very nice and the locals were friendly.
"I don't know how Eastbourne in England popped up in the comparisons on the internet," Mr Adendorff said.
They were unable to get a refund from the Majestic Hotel because their cancellation was at such short notice. |
/**
* Class that should be inherited by all base messages.
*/
public abstract class SerializableBase extends Serializable
{
/**
* The ReadBuffer that stores the original data used to deserialize this message
*
* It is only set in base messages on which 'deserialize ( ReadBuffer )' is called
* (and only when the entire message is available - so deserialize returns 0 or -1)
*/
private ReadBuffer orgDataBuf = null;
/**
* Creates a copy of the original read buffer object
*
* It can be used by messages inheriting this base message to deserialize.
* It does NOT copy the data itself, just the 'ReadBuffer' object,
* which will use the same memory as the original object.
*/
protected ReadBuffer getBufferDescCopy()
{
if ( orgDataBuf == null )
{
return null;
}
// We want to return a new object, so if anything modifies the offset it
// happens in the copy, and not in the original!
return new ReadBuffer ( orgDataBuf );
}
/**
* Serializes entire message to the buffer
*
* It calls serializeData from inheriting classes and also encodes the total message's length.
* @param toStream The stream to serialize the data to
* @throws java.io.IOException, CodecException
*/
public void serializeWithLength ( OutputStream toStream ) throws IOException, CodecException
{
assert toStream != null;
if ( toStream == null )
{
throw new CodecException ( ErrorCode.InvalidParameter );
}
ByteArrayOutputStream buf = new ByteArrayOutputStream();
serializeData ( buf );
if ( buf.size() < 0 )
{
throw new CodecException ( ErrorCode.TooMuchData );
}
Codec.appendField ( toStream, ( Integer ) buf.size(), 0 );
buf.writeTo ( toStream );
}
/**
* It reads a single message from the input stream
*
* @param fromStream The stream to read the message from
* @return The number of bytes consumed, or -1 if the end of the stream was reached before
* the entire message could be read.
* @throws IOException, CodecException
*/
public int deserializeWithLength ( InputStream fromStream ) throws IOException, CodecException
{
byte[] buf = null;
int dataSize = 0;
int bufSize = 0;
while ( true )
{
// deserialize() function returns a positive value when it has all the bytes
// needed to deserialize the function. The returned value is the number of consumed
// bytes.
// When the value is negative, it means that there was not enough data in the buffer.
// The number of (at least) missing bytes is the absolute value of the (negative) number
// returned. Here we focus on the case where there are some bytes missing,
// so we use 'missingBytes' variable to represent this.
// However, at first this variable actually stores both the number of consumed bytes
// (if it's positive), and the number of missing bytes (if it's negative).
int missingBytes = deserializeWithLength ( buf, 0, dataSize );
// If missingBytes > 0, then it means that the value stored in missingBytes
// is actually the number of 'consumed bytes', and that we now have a complete message.
// Let's report the number of bytes we just consumed!
if ( missingBytes > 0 )
{
return missingBytes;
}
// missingBytes is <= 0, which means there were not enough bytes to deserialize the message
// Let's make the 'missingBytes' to store the number of bytes missing as a positive value.
missingBytes *= -1;
// From this point, missingBytes is positive, and stores the number of bytes that
// are still missing (at least, this number could change in the future - this is because
// we may not have enough bytes to decode even the message length, so the deserialize
// could report the number of missing bytes it knows for sure it's going to need,
// and, in the future, once it has the entire header, it will report the actual number
// of missing bytes, that will most likely be larger).
// deserialize above should not return 0!
assert missingBytes > 0;
if ( dataSize + missingBytes > bufSize )
{
bufSize = dataSize + missingBytes;
// At the beginning we want to allocate a little more space,
// just so we could avoid reallocating it after every byte of the message header!
if ( bufSize < 16 )
{
bufSize = 16;
}
byte[] oldBuf = buf;
buf = new byte[ bufSize ];
if ( dataSize > 0 )
{
System.arraycopy ( oldBuf, 0, buf, 0, dataSize );
}
}
while ( missingBytes > 0 )
{
int r = fromStream.read ( buf, dataSize, missingBytes );
if ( r < 0 )
{
return -1;
}
dataSize += r;
missingBytes -= r;
}
}
}
/**
* Deserializes the entire message from the buffer
*
* It calls deserializeData from inheriting classes and also decodes the total message's length.
* @param buffer The buffer to deserialize the data from
* @param offset The offset in the buffer to start deserializing from
* @return Negative value means that more bytes are needed. The number of missing bytes is the absolute
* value of the number returned. Positive value means that the entire message was deserialized,
* and that the returned number of bytes were consumed
* @throws CodecException
*/
public int deserializeWithLength ( byte[] buffer, int offset ) throws CodecException
{
if ( buffer == null || offset < 0 || offset > buffer.length )
{
throw new CodecException ( ErrorCode.InvalidParameter );
}
return deserializeWithLength ( buffer, offset, buffer.length - offset );
}
/**
* Deserializes the entire message from the buffer
*
* It calls deserializeData from inheriting classes and also decodes the total message's length.
* @param buffer The buffer to deserialize the data from.
* @param offset The offset in the buffer to start deserializing from.
* @param length The number of bytes (starting from the offset) that can be deserialized.
* @return Negative value means that more bytes are needed. The number of missing bytes is the absolute
* value of the number returned. Positive value means that the entire message was deserialized,
* and that the returned number of bytes were consumed
* @throws CodecException
*/
public int deserializeWithLength ( byte[] buffer, final int offset, final int length ) throws CodecException
{
orgDataBuf = null;
if ( length < 0 )
{
throw new CodecException ( ErrorCode.InvalidParameter );
}
// Min length - 2 bytes, one for the field header, one for the 'length' field
if ( length < 2 )
{
return -1 * ( 2 - length );
}
if ( buffer == null || offset < 0 || length < 0 || offset + length > buffer.length )
{
throw new CodecException ( ErrorCode.InvalidParameter );
}
assert length >= 0;
int payloadSize = 0;
ReadBuffer readBuffer = new ReadBuffer ( buffer, offset, offset + length );
try
{
// This DOES modify the offset
Codec.FieldHeader hdr = Codec.readFieldHeader ( readBuffer );
// This is not the code we expected!
if ( hdr.fieldId != 0 )
{
throw new CodecException ( ErrorCode.ProtocolError );
}
// This DOES modify the offset
payloadSize = Codec.readInteger ( hdr, readBuffer );
assert readBuffer.getOffset() > offset;
if ( payloadSize < 0 )
{
throw new CodecException ( ErrorCode.ProtocolError );
}
}
catch ( CodecException e )
{
if ( e.getErrorCode() == ErrorCode.IncompleteData )
{
// We still are missing some data. We don't know how much, so
// lets just say there is one byte missing!
return -1;
}
// Otherwise just pass this error up!
throw e;
}
// We don't have the entire message yet!
// Report the number of missing bytes.
if ( readBuffer.getOffset() + payloadSize > offset + length )
{
return -1 * ( readBuffer.getOffset() + payloadSize - ( offset + length ) );
}
assert offset < readBuffer.getOffset();
assert readBuffer.getOffset() + payloadSize <= offset + length;
assert readBuffer.getOffset() + payloadSize <= buffer.length;
assert readBuffer.getOffset() + payloadSize <= readBuffer.getSize();
readBuffer.setSize ( readBuffer.getOffset() + payloadSize );
deserializeBase ( readBuffer );
// deserializeBase should NOT modify the buffer...
return payloadSize + ( readBuffer.getOffset() - offset );
}
/**
* Deserializes an entire, single, message from the buffer
*
* It calls deserializeData from inheriting classes, but does NOT decode the total message's length.
* It assumes that the entire data passed for deserializing is a single message.
*
* @param readBuffer The buffer to deserialize the data from. Its offset is NOT modified.
* @return True if the message has been successfully deserialized,
* False if an unknown field was found, and throws an exception on errors
* @throws CodecException
*/
public boolean deserializeBase ( ReadBuffer readBuffer ) throws CodecException
{
orgDataBuf = null;
// deserializeData should NOT modify the buffer...
boolean ret = deserializeData ( readBuffer );
// Lets keep the buffer for later - other messages will be able to deserialize
// themselves using this base message
orgDataBuf = new ReadBuffer ( readBuffer );
return ret;
}
/**
* Deserializes this serializable using its base serializable
*
* It is protected and should be used by base message's deserializeFromBase function.
* It does not perform sanity checks (except for the existence of the original buffer).
* If it succeeds, a reference to the original buffer from the baseSerializable
* will be stored in this object as well.
*
* @param baseSerializable The base serializable to deserialize the data from
* @return True if this serializable has been successfully deserialized and it used the entire available data.
* False means that although the serializable has been deserialized properly,
* there are some additional, unknown fields that have not been deserialized.
* If there is a deserialization error it throws one of the exceptions.
* @throws CodecException
*/
protected boolean deserializeFromBaseSerializable ( SerializableBase baseSerializable ) throws CodecException
{
orgDataBuf = null;
return deserializeBase ( baseSerializable.getBufferDescCopy() );
}
} |
/**
* Created by pranavkonduru on 10/28/16.
*/
public class TweetListActivity extends AppCompatActivity implements View.OnClickListener {
@BindView(R.id.viewpager) ViewPager viewPager;
@BindView(R.id.tabs) PagerSlidingTabStrip tabsStrip;
@BindView(R.id.toolbar) Toolbar toolbar;
@BindView(R.id.tv_toolbar_title) TextView tvToolbarTitle;
@BindView(R.id.iv_user_profile_image) CircleImageView civUserProfile;
private static final String TAG = "TweetListActivity";
private HootFragmentPagerAdapter adapter;
private TwitterClient twitterClient;
private String userScreenName;
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
Log.d(TAG, "onCreate: ");
setContentView(R.layout.activity_tweetlist);
ButterKnife.bind(this);
twitterClient = Hoot.getRestClient();
adapter = new HootFragmentPagerAdapter(getSupportFragmentManager());
adapter.addFragment(TimelineFragment.getInstance(), "Tweets");
adapter.addFragment(MentionsFragment.getInstance(), "Mentions");
viewPager.setAdapter(adapter);
// Attach the view pager to the tab strip
tabsStrip.setViewPager(viewPager);
setSupportActionBar(toolbar);
tvToolbarTitle.setText("Home");
tvToolbarTitle.setTextColor(Color.BLACK);
toolbar.setBackgroundColor(Color.WHITE);
if (getSupportActionBar() != null) {
//getSupportActionBar().setBackgroundDrawable(new ColorDrawable(Color.WHITE));
getSupportActionBar().setDisplayShowTitleEnabled(false);
//getSupportActionBar().setDisplayUseLogoEnabled(true);
loadUserProfile();
}
}
private void loadUserProfile() {
ProfileModel userProfile = ProfileModel.getUserProfile();
if (userProfile == null) {
if(Utility.isNetworkAvailable(this)) {
Log.d(TAG, "loadUserProfile: Network is available. Getting profile");
twitterClient.getUserProfile(new AsyncHttpResponseHandler() {
@Override
public void onSuccess(int statusCode, Header[] headers, byte[] responseBody) {
try {
loadImage(ProfileModel.parseJSON(new String(responseBody, "UTF-8")));
Log.d(TAG, "onSuccess: ");
} catch (UnsupportedEncodingException e) {
Log.e(TAG, "onSuccess: ", e);
e.printStackTrace();
}
}
@Override
public void onFailure(int statusCode, Header[] headers, byte[] responseBody, Throwable error) {
Log.e(TAG, "onFailure: ", error.getCause());
}
});
} else {
Log.d(TAG, "loadUserProfile: Network not available");
}
} else {
Log.d(TAG, "loadUserProfile: Profile found. Loading it");
loadImage(userProfile);
}
}
void loadImage(ProfileModel model){
userScreenName = model.screen_name;
Utility.loadImage(this, model.profile_image_url_https, civUserProfile, false);
civUserProfile.setOnClickListener(this);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.menu_tweet_detail, menu);
final MenuItem searchItem = menu.findItem(R.id.miSearch);
final SearchView searchView = (SearchView) MenuItemCompat.getActionView(searchItem);
searchView.setOnQueryTextListener(new SearchView.OnQueryTextListener() {
@Override
public boolean onQueryTextSubmit(String query) {
// Fetch the data remotely
// fetchBooks(query);
// Reset SearchView
searchView.clearFocus();
searchView.setQuery("", false);
searchView.setIconified(true);
searchItem.collapseActionView();
Intent intent = new Intent(TweetListActivity.this, SearchActivity.class);
intent.putExtra("search_item", query);
startActivity(intent);
// Set activity title to search query
// BookListActivity.this.setTitle(query);
return true;
}
@Override
public boolean onQueryTextChange(String s) {
return false;
}
});
return true;
}
@Override
public void onClick(View view) {
switch (view.getId()){
case R.id.iv_user_profile_image:
Log.d(TAG, "onClick: User profile clicked. Loading it");
Intent intent = new Intent(this, UserTimelineActivity.class);
intent.putExtra("screen_name", userScreenName);
startActivity(intent);
break;
}
}
} |
package com.laytonsmith.abstraction.enums.bukkit;
import com.laytonsmith.abstraction.Implementation;
import com.laytonsmith.abstraction.enums.EnumConvertor;
import com.laytonsmith.abstraction.enums.MCDragType;
import com.laytonsmith.annotations.abstractionenum;
import org.bukkit.event.inventory.DragType;
/**
*
* @author MariuszT
*/
@abstractionenum(
implementation = Implementation.Type.BUKKIT,
forAbstractEnum = MCDragType.class,
forConcreteEnum = DragType.class)
public class BukkitMCDragType extends EnumConvertor<MCDragType, DragType> {
private static com.laytonsmith.abstraction.enums.bukkit.BukkitMCDragType instance;
public static com.laytonsmith.abstraction.enums.bukkit.BukkitMCDragType getConvertor() {
if (instance == null) {
instance = new com.laytonsmith.abstraction.enums.bukkit.BukkitMCDragType();
}
return instance;
}
}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=tf-8
#
"""
"""
from operator import itemgetter
t = int(input())
def solve(a,b,c,r):
left = c-r
right = c+r
if b<= left or a >= right:
print(b-a)
elif left <= a and b <= right:
print(0)
elif a <= left and right <= b:
print(b-a-(right-left))
elif left <= a:
print(b-right)
else:
print(left-a)
for i in range(t):
a, b, c, r = map(int,input().split())
if a > b:
a, b = (b, a)
solve(a,b,c,r) |
h,w=(int(x) for x in input().split())
s=[]
for i in range(h):
s.append(input())
for i in range(h):
res=[]
for j in range(w):
if s[i][j]==".":
ini_i=i if i==0 else i-1
fin_i=i if i==h-1 else i+1
ini_j=j if j==0 else j-1
fin_j=j if j==w-1 else j+1
count=0
for x in range(ini_i,fin_i+1):
for y in range(ini_j,fin_j+1):
if s[x][y]=="#":
count+=1
res.append(str(count))
else:
res.append("#")
print("".join(res)) |
import { Component, OnInit, Input } from '@angular/core';
import { Country } from 'src/app/models/country';
@Component({
selector: 'app-country-info',
templateUrl: './country-info.component.html',
styleUrls: ['./country-info.component.scss']
})
export class CountryInfoComponent implements OnInit {
@Input() selectedCountry: Country = new Country();
constructor() { }
ngOnInit() {
}
}
|
/**
* Created by katzelda on 5/14/18.
*/
public class SSSG1Validator extends AbstractValidatorPlugin<Substance> {
@Override
public void validate(Substance objnew, Substance objold, ValidatorCallback callback) {
SpecifiedSubstanceGroup1Substance cs = (SpecifiedSubstanceGroup1Substance) objnew;
if (cs.specifiedSubstance == null) {
callback.addMessage(GinasProcessingMessage
.ERROR_MESSAGE("Specified substance must have a specified substance component"));
return;
}
if (cs.specifiedSubstance.constituents== null || cs.specifiedSubstance.constituents.isEmpty()) {
callback.addMessage(GinasProcessingMessage
.ERROR_MESSAGE("Specified substance must have at least 1 constituent"));
} else {
cs.specifiedSubstance.constituents.stream()
.filter(c->c.substance==null)
.findAny()
.ifPresent(missingSubstance->{
callback.addMessage(GinasProcessingMessage
.ERROR_MESSAGE("Specified substance constituents must have an associated substance record"));
});
ValidationUtils.validateReference(cs, cs.specifiedSubstance, callback, ValidationUtils.ReferenceAction.FAIL);
}
}
} |
#pragma once
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/csrc/jit/codegen/cuda/ir_interface_nodes.h>
#include <torch/csrc/jit/codegen/cuda/type.h>
//
// The operations defined in this header is intended as user facing functions.
// The user will provide the necessary input TensorViews and the function will
// create the correct intermediate nodes and return the output TensorViews.
//
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
struct ForwardNormResult {
TensorView* output = nullptr;
TensorView* mean = nullptr;
TensorView* invstd = nullptr;
};
struct BackwardNormResult {
TensorView* grad_input = nullptr;
TensorView* grad_weight = nullptr;
TensorView* grad_bias = nullptr;
};
TORCH_CUDA_CU_API TensorView* softmax(TensorView* x, int dim);
TORCH_CUDA_CU_API TensorView* softmax_backward(
TensorView* dy,
TensorView* y,
const int dim,
TensorView* x);
TORCH_CUDA_CU_API ForwardNormResult layer_norm(
TensorView* x,
const std::vector<int64_t>& norm_shape,
TensorView* weight,
TensorView* bias,
Val* eps);
TORCH_CUDA_CU_API ForwardNormResult layer_norm(
TensorView* x,
const size_t kNormShapeNumDims,
TensorView* weight,
TensorView* bias,
Val* eps);
TORCH_CUDA_CU_API BackwardNormResult layer_norm_backward(
TensorView* dy,
TensorView* x,
const std::vector<int64_t>& norm_shape,
TensorView* mean,
TensorView* rstd,
TensorView* weight,
TensorView* bias,
const std::vector<bool>& output_mask);
TORCH_CUDA_CU_API ForwardNormResult batch_norm(
TensorView* x,
TensorView* weight,
TensorView* bias,
TensorView* running_mean,
TensorView* running_var,
const bool kTraining,
Val* momentum,
Val* eps);
TORCH_CUDA_CU_API BackwardNormResult batch_norm_backward(
TensorView* x,
TensorView* dy,
TensorView* weight,
TensorView* running_mean,
TensorView* running_var,
TensorView* save_mean,
TensorView* save_invstd,
const bool kTraining,
Val* eps,
const std::vector<bool>& output_mask);
TORCH_CUDA_CU_API ForwardNormResult instance_norm(
TensorView* x,
TensorView* weight,
TensorView* bias,
TensorView* running_mean,
TensorView* running_var,
const bool kUseInputStats,
Val* momentum,
Val* eps);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch
|
<reponame>ZhaoXinlong/accumulate
package cmd
import (
"context"
"encoding/json"
"fmt"
url2 "github.com/AccumulateNetwork/accumulate/internal/url"
"github.com/AccumulateNetwork/accumulate/protocol"
acmeapi "github.com/AccumulateNetwork/accumulate/types/api"
"github.com/spf13/cobra"
)
var pageCmd = &cobra.Command{
Use: "page",
Short: "Create and manage Keys, Books, and Pages",
Run: func(cmd *cobra.Command, args []string) {
var out string
var err error
if len(args) == 2 {
if args[0] == "get" {
out, err = GetAndPrintKeyPage(args[1])
} else {
fmt.Println("Usage:")
PrintKeyPageGet()
PrintKeyPageCreate()
PrintKeyUpdate()
}
} else if len(args) > 3 {
if args[0] == "create" {
out, err = CreateKeyPage(args[1], args[2:])
} else if args[0] == "key" {
switch arg := args[1]; arg {
case "update":
out, err = KeyPageUpdate(args[2], protocol.UpdateKey, args[3:])
case "add":
out, err = KeyPageUpdate(args[2], protocol.AddKey, args[3:])
case "remove":
out, err = KeyPageUpdate(args[2], protocol.RemoveKey, args[3:])
default:
fmt.Println("Usage:")
PrintKeyPageCreate()
PrintKeyUpdate()
}
} else {
PrintPage()
}
} else {
PrintPage()
}
printOutput(cmd, out, err)
},
}
func PrintKeyPageGet() {
fmt.Println(" accumulate page get [URL] Get existing Key Page by URL")
}
func PrintKeyPageCreate() {
fmt.Println(" accumulate page create [actor adi url] [signing key name] [key index (optional)] [key height (optional)] [new key page url] [public key 1] ... [public key hex or name n + 1] Create new key page with 1 to N+1 public keys")
fmt.Println("\t\t example usage: accumulate key page create acc://RedWagon redKey5 acc://RedWagon/RedPage1 redKey1 redKey2 redKey3")
}
func PrintKeyUpdate() {
fmt.Println(" accumulate page key update [key page url] [signing key name] [key index (optional)] [key height (optional)] [old key name] [new public key or name] Update key in a key page with a new public key")
fmt.Println("\t\t example usage: accumulate page key update acc://RedWagon/RedPage1 redKey1 redKey2 redKey3")
fmt.Println(" accumulate page key add [key page url] [signing key name] [key index (optional)] [key height (optional)] [new key name] Add key to a key page")
fmt.Println("\t\t example usage: accumulate page key add acc://RedWagon/RedPage1 redKey1 redKey2 ")
fmt.Println(" accumulate page key remove [key page url] [signing key name] [key index (optional)] [key height (optional)] [old key name] Remove key from a key page")
fmt.Println("\t\t example usage: accumulate page key remove acc://RedWagon/RedPage1 redKey1 redKey2")
}
func PrintPage() {
PrintKeyPageCreate()
PrintKeyPageGet()
PrintKeyUpdate()
}
func GetAndPrintKeyPage(url string) (string, error) {
str, _, err := GetKeyPage(url)
if err != nil {
return "", fmt.Errorf("error retrieving key page for %s, %v", url, err)
}
res := acmeapi.APIDataResponse{}
err = json.Unmarshal(str, &res)
if err != nil {
return "", err
}
return PrintQueryResponse(&res)
}
func GetKeyPage(url string) ([]byte, *protocol.KeyPage, error) {
s, err := GetUrl(url, "sig-spec")
if err != nil {
return nil, nil, err
}
res := acmeapi.APIDataResponse{}
err = json.Unmarshal(s, &res)
if err != nil {
return nil, nil, err
}
ss := protocol.KeyPage{}
err = json.Unmarshal(*res.Data, &ss)
if err != nil {
return nil, nil, err
}
return s, &ss, nil
}
// CreateKeyPage create a new key page
func CreateKeyPage(page string, args []string) (string, error) {
pageUrl, err := url2.Parse(page)
if err != nil {
PrintKeyPageCreate()
return "", err
}
args, si, privKey, err := prepareSigner(pageUrl, args)
if err != nil {
PrintKeyBookCreate()
return "", err
}
if len(args) < 2 {
PrintKeyPageCreate()
return "", fmt.Errorf("invalid number of arguments")
}
newUrl, err := url2.Parse(args[0])
keyLabels := args[1:]
//when creating a key page you need to have the keys already generated and labeled.
if newUrl.Authority != pageUrl.Authority {
return "", fmt.Errorf("page url to create (%s) doesn't match the authority adi (%s)", newUrl.Authority, pageUrl.Authority)
}
css := protocol.CreateKeyPage{}
ksp := make([]*protocol.KeySpecParams, len(keyLabels))
css.Url = newUrl.String()
css.Keys = ksp
for i := range keyLabels {
ksp := protocol.KeySpecParams{}
pk, err := LookupByLabel(keyLabels[i])
if err != nil {
//now check to see if it is a valid key hex, if so we can assume that is the public key.
ksp.PublicKey, err = pubKeyFromString(keyLabels[i])
if err != nil {
return "", fmt.Errorf("key name %s, does not exist in wallet, nor is it a valid public key", keyLabels[i])
}
} else {
ksp.PublicKey = pk[32:]
}
css.Keys[i] = &ksp
}
data, err := json.Marshal(css)
if err != nil {
PrintKeyPageCreate()
return "", err
}
dataBinary, err := css.MarshalBinary()
if err != nil {
PrintKeyPageCreate()
return "", err
}
nonce := nonceFromTimeNow()
params, err := prepareGenTx(data, dataBinary, pageUrl, si, privKey, nonce)
if err != nil {
PrintKeyPageCreate()
return "", err
}
var res acmeapi.APIDataResponse
if err := Client.Request(context.Background(), "create-sig-spec", params, &res); err != nil {
return PrintJsonRpcError(err)
}
ar := ActionResponse{}
err = json.Unmarshal(*res.Data, &ar)
if err != nil {
return "", fmt.Errorf("error unmarshalling create adi result, %v", err)
}
return ar.Print()
}
func resolveKey(key string) ([]byte, error) {
ret, err := getPublicKey(key)
if err != nil {
ret, err = pubKeyFromString(key)
if err != nil {
PrintKeyUpdate()
return nil, fmt.Errorf("key %s, does not exist in wallet, nor is it a valid public key", key)
}
}
return ret, err
}
func KeyPageUpdate(actorUrl string, op protocol.KeyPageOperation, args []string) (string, error) {
u, err := url2.Parse(actorUrl)
if err != nil {
return "", err
}
args, si, privKey, err := prepareSigner(u, args)
if err != nil {
PrintKeyUpdate()
return "", err
}
var newKey []byte
var oldKey []byte
ukp := protocol.UpdateKeyPage{}
ukp.Operation = op
switch op {
case protocol.UpdateKey:
if len(args) < 2 {
PrintKeyUpdate()
return "", fmt.Errorf("invalid number of arguments")
}
oldKey, err = resolveKey(args[0])
if err != nil {
PrintKeyUpdate()
return "", err
}
newKey, err = resolveKey(args[1])
if err != nil {
PrintKeyUpdate()
return "", err
}
case protocol.AddKey:
if len(args) < 1 {
PrintKeyUpdate()
return "", fmt.Errorf("invalid number of arguments")
}
newKey, err = resolveKey(args[0])
if err != nil {
PrintKeyUpdate()
return "", err
}
case protocol.RemoveKey:
if len(args) < 1 {
PrintKeyUpdate()
return "", fmt.Errorf("invalid number of arguments")
}
oldKey, err = resolveKey(args[0])
if err != nil {
PrintKeyUpdate()
return "", err
}
}
ukp.Key = oldKey[:]
ukp.NewKey = newKey[:]
data, err := json.Marshal(&ukp)
if err != nil {
return "", err
}
dataBinary, err := ukp.MarshalBinary()
if err != nil {
return "", err
}
nonce := nonceFromTimeNow()
params, err := prepareGenTx(data, dataBinary, u, si, privKey, nonce)
if err != nil {
return "", err
}
var res acmeapi.APIDataResponse
if err := Client.Request(context.Background(), "update-key-page", params, &res); err != nil {
return PrintJsonRpcError(err)
}
ar := ActionResponse{}
err = json.Unmarshal(*res.Data, &ar)
if err != nil {
return "", fmt.Errorf("error unmarshalling create adi result, %v", err)
}
return ar.Print()
}
|
/*
* Interface to system's page allocator. No need to hold the
* kmem_cache_node ->list_lock.
*
* If we requested dmaable memory, we will get it. Even if we
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct folio *folio;
struct slab *slab;
flags |= cachep->allocflags;
folio = (struct folio *) __alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!folio) {
slab_out_of_memory(cachep, flags, nodeid);
return NULL;
}
slab = folio_slab(folio);
account_slab(slab, cachep->gfporder, cachep, flags);
__folio_set_slab(folio);
if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0)))
slab_set_pfmemalloc(slab);
return slab;
} |
package org.eclipse.epsilon.common.dt.launching.tabs;
import org.eclipse.epsilon.common.util.StringProperties;
import org.eclipse.epsilon.eol.types.EolAnyType;
import org.eclipse.epsilon.eol.types.EolPrimitiveType;
import org.eclipse.epsilon.eol.types.EolType;
public class ParameterConfiguration {
protected String name;
protected String value = "";
protected String type;
public ParameterConfiguration() {}
public ParameterConfiguration(StringProperties properties) {
this.name = properties.getProperty("name");
this.type = properties.getProperty("type");
this.value = properties.getProperty("value");
}
public StringProperties toStringProperties() {
StringProperties p = new StringProperties();
p.put("name", name);
p.put("type", type);
p.put("value", value);
return p;
}
public ParameterConfiguration(String name, String value, String type) {
super();
this.name = name;
this.value = value;
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public EolType getEolType() {
if ("String".equals(type)) return EolPrimitiveType.String;
else if ("Integer".equals(type)) return EolPrimitiveType.Integer;
else if ("Real".equals(type)) return EolPrimitiveType.Real;
else if ("Boolean".equals(type)) return EolPrimitiveType.Boolean;
return EolAnyType.Instance;
}
public Object getCastedValue() {
if ("String".equals(type)) return value + "";
else if ("Integer".equals(type)) {
try { return Integer.parseInt(value); }
catch (Exception ex) { return 0; }
}
else if ("Boolean".equals(type)) {
try { return Boolean.parseBoolean(value); }
catch (Exception ex) { return false; }
}
else if ("Real".equals(type)) {
if (value.endsWith("d")) {
try { return Double.parseDouble(value); }
catch (Exception ex) { return 0.0d; }
}
else {
try { return Float.parseFloat(value); }
catch (Exception ex) { return 0.0f; }
}
}
else return value;
}
}
|
/// Returns the ascent ceiling of the model.
pub(crate) fn find_ascent_ceiling(&self, gf_override: Option<f64>) -> f64 {
let mut ceilings: [f64; TISSUE_COUNT] = [0.0; TISSUE_COUNT];
let gf = match gf_override {
Some(t) => t,
None => {
if self.first_deco_depth != usize::MAX {
self.gf_at_depth(self.first_deco_depth)
}
else {
self.gf_low
}
},
};
for (idx, val) in ceilings.iter_mut().enumerate() {
let a = self.tissue_a_value(idx);
let b = self.tissue_b_value(idx);
*val = self.tissue_ceiling(gf, idx, a, b)
}
ceilings.iter().cloned().fold(core::f64::NAN, f64::max)
} |
The Art of Governance and the Political Grammar of Legitimacy: Tales of the State for a Unified Korea
This inquiry demonstrates that the political legitimacy of a certain society is historically determined, reflects specific institutional and contextual features, and employs a variety of meanings. These meanings can describe both a state of affairs and a process that ultimately involves justifications for legitimate agents and socio-political structures. This paper attepmpts to understand how the meanings of political legitimacy are conceptualized in society. As a case study, it questions: What are the conditions for the existence of political legitimacy and how have they been constructed? How is political legitimacy endorsed in South Korea today, and how does it differ from the past? This paper applies a deconstructive theory of political legitimacy that exploresa a distinctively modern style, or 'art of governance' that has an all-encompassing, as well as individualized effect upon its constituencies. By this approach, this paper argues that the concept of unification does not have a solid significance in the real world, but rather, it is an imaginary idea imposed by the dominant elite class, which is constantly imposed, reinterpreted and transformed in its political context. |
<reponame>luanrivello/go_crash_course
package main
import "fmt"
func main() {
x := 9
y := 10
for i := 0; i < 3; i++ {
if x < y {
fmt.Printf("%d is less than %d\n", x, y)
}else if x > y {
fmt.Printf("%d is greater than %d\n", x, y)
}else{
fmt.Printf("%d is equal to %d\n", x, y)
}
x = x + 1
}
color := "red"
switch color {
case "red":
fmt.Println("color is red")
case "green":
fmt.Println("color is green")
default:
fmt.Println("color is not green or red")
}
} |
/* Huffman encoding for each region based on category and power_index */
static int16_t vector_huffman(int16_t category,
int16_t power_index,
int16_t *raw_mlt_ptr,
uint32_t *word_ptr)
{
int16_t inv_of_step_size_times_std_dev;
int16_t j;
int16_t n;
int16_t k;
int16_t number_of_region_bits;
int16_t number_of_non_zero;
int16_t vec_dim;
int16_t num_vecs;
int16_t kmax;
int16_t kmax_plus_one;
int16_t index;
int16_t signs_index;
const int16_t *bitcount_table_ptr;
const uint16_t *code_table_ptr;
int32_t code_bits;
int16_t number_of_code_bits;
uint32_t current_word;
int16_t current_word_bits_free;
int32_t acca;
int32_t accb;
int16_t temp;
int16_t mytemp;
int16_t myacca;
vec_dim = vector_dimension[category];
num_vecs = number_of_vectors[category];
kmax = max_bin[category];
kmax_plus_one = add(kmax, 1);
current_word = 0L;
current_word_bits_free = 32;
number_of_region_bits = 0;
bitcount_table_ptr = table_of_bitcount_tables[category];
code_table_ptr = table_of_code_tables[category];
acca = L_mult(step_size_inverse_table[category], standard_deviation_inverse_table[power_index]);
acca = L_shr(acca, 1);
acca = L_add(acca, 4096);
acca = L_shr(acca, 13);
mytemp = acca & 0x3;
acca = L_shr(acca, 2);
inv_of_step_size_times_std_dev = (int16_t) acca;
for (n = 0; n < num_vecs; n++)
{
index = 0;
signs_index = 0;
number_of_non_zero = 0;
for (j = 0; j < vec_dim; j++)
{
k = abs_s(*raw_mlt_ptr);
acca = L_mult(k, inv_of_step_size_times_std_dev);
acca = L_shr(acca, 1);
myacca = (int16_t) L_mult(k, mytemp);
myacca = (int16_t) L_shr(myacca, 1);
myacca = (int16_t) L_add(myacca, int_dead_zone_low_bits[category]);
myacca = (int16_t) L_shr(myacca, 2);
acca = L_add(acca, int_dead_zone[category]);
acca = L_add(acca, myacca);
acca = L_shr(acca, 13);
k = (int16_t) acca;
if (k != 0)
{
number_of_non_zero = add(number_of_non_zero, 1);
signs_index = shl(signs_index, 1);
if (*raw_mlt_ptr > 0)
signs_index = add(signs_index, 1);
temp = sub(k, kmax);
if (temp > 0)
k = kmax;
}
acca = L_shr(L_mult(index, (kmax_plus_one)), 1);
index = (int16_t) acca;
index = add(index, k);
raw_mlt_ptr++;
}
code_bits = *(code_table_ptr + index);
number_of_code_bits = add((*(bitcount_table_ptr + index)), number_of_non_zero);
number_of_region_bits = add(number_of_region_bits, number_of_code_bits);
acca = code_bits << number_of_non_zero;
accb = signs_index;
acca = L_add(acca, accb);
code_bits = acca;
j = current_word_bits_free - number_of_code_bits;
if (j >= 0)
{
acca = code_bits << j;
current_word = L_add(current_word, acca);
current_word_bits_free = j;
}
else
{
j = negate(j);
acca = L_shr(code_bits, j);
current_word = L_add(current_word, acca);
*word_ptr++ = current_word;
current_word_bits_free = 32 - j;
current_word = code_bits << current_word_bits_free;
}
}
*word_ptr++ = current_word;
return number_of_region_bits;
} |
Envelope extraction for composite shapes for shape retrieval
Analysis of composite shapes recently receives increasing amount of research attention. Different from a silhouette, a composite shape rarely contains a complete envelope. In the paper, we propose a novel envelope extraction algorithm based on the Delaunay triangulation for composite shapes. By analyzing the spatial relationship among individual components of contours and their concavities, we establish new models to describe the envelope edges and their corresponding local enclosed regions. These new models are then used to extract accurate envelopes for composite shapes. We then apply the extracted envelopes to improve shape classification used in shape retrieval. The experimental results show that our algorithm effectively boosts existing shape retrieval algorithms. |
<reponame>HPNetworking/HP-Intelligent-Management-Center
#!/usr/bin/env python3
# author: @netmanchris
""" Copyright 2015 Hewlett Packard Enterprise Development LP
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# This section imports required libraries
import json
import requests
HEADERS = {'Accept': 'application/json', 'Content-Type':
'application/json', 'Accept-encoding': 'application/json'}
#auth = None
"""
This section contains functions which operate at the device level
"""
def get_dev_details(ip_address, auth, url):
"""Takes string input of IP address to issue RESTUL call to HP IMC
:param ip_address: string object of dotted decimal notation of IPv4 address
:return: dictionary of device details
>>> get_dev_details('10.101.0.1')
{'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'}
>>> get_dev_details('8.8.8.8')
Device not found
'Device not found'
"""
# checks to see if the imc credentials are already available
get_dev_details_url = "/imcrs/plat/res/device?resPrivilegeFilter=false&ip=" + \
str(ip_address) + "&start=0&size=1000&orderBy=id&desc=false&total=false"
f_url = url + get_dev_details_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
dev_details = (json.loads(r.text))
if len(dev_details) == 0:
print("Device not found")
return "Device not found"
elif type(dev_details['device']) == list:
for i in dev_details['device']:
if i['ip'] == ip_address:
dev_details = i
return dev_details
elif type(dev_details['device']) == dict:
return dev_details['device']
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_details: An Error has occured"
def get_dev_interface(devid, auth, url):
"""
Function takes devid as input to RESTFUL call to HP IMC platform
:param devid: requires devid as the only input
:return: list object which contains a dictionary per interface
"""
# checks to see if the imc credentials are already available
get_dev_interface_url = "/imcrs/plat/res/device/" + str(devid) + \
"/interface?start=0&size=1000&desc=false&total=false"
f_url = url + get_dev_interface_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
int_list = (json.loads(r.text))['interface']
return int_list
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_interface: An Error has occured"
def get_dev_run_config(devid, auth, url):
"""
function takes the devId of a specific device and issues a RESTFUL call to get the most current running config
file as known by the HP IMC Base Platform ICC module for the target device.
:param devid: int or str value of the target device
:return: str which contains the entire content of the target device running configuration. If the device is not
currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not
supported on this device"
"""
# checks to see if the imc credentials are already available
get_dev_run_url = "/imcrs/icc/deviceCfg/" + str(devid) + "/currentRun"
f_url = url + get_dev_run_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# print (r.status_code)
try:
if r.status_code == 200:
try:
run_conf = (json.loads(r.text))['content']
return run_conf
except:
return "This features is no supported on this device"
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_run_config: An Error has occured"
def get_dev_start_config(devId, auth, url):
"""
function takes the devId of a specific device and issues a RESTFUL call to get the most current startup config
file as known by the HP IMC Base Platform ICC module for the target device.
:param devId: int or str value of the target device
:return: str which contains the entire content of the target device startup configuration. If the device is not
currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not
supported on this device"
"""
# checks to see if the imc credentials are already available
get_dev_run_url = "/imcrs/icc/deviceCfg/" + str(devId) + "/currentStart"
f_url = url + get_dev_run_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if r.status_code == 200:
try:
start_conf = (json.loads(r.text))['content']
return start_conf
except:
return "Start Conf not supported on this device"
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_run_config: An Error has occured"
"""
This section contains functions which operate at the interface level
"""
def get_interface_details(devId, ifIndex, auth, url):
# checks to see if the imc credentials are already available
get_interface_details_url = "/imcrs/plat/res/device/" + str(devId) + "/interface/" + str(ifIndex)
f_url = url + get_interface_details_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
dev_details = (json.loads(r.text))
return dev_details
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_interface_details: An Error has occured"
def set_inteface_down(devid, ifindex, auth, url):
"""
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to " shut" the specifie
d interface on the target device.
:param devid: int or str value of the target device
:param ifindex: int or str value of the target interface
:return: HTTP status code 204 with no values.
"""
set_int_down_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/down"
f_url = url + set_int_down_url
try:
r = requests.put(f_url, auth=auth,
headers=HEADERS) # creates the URL using the payload variable as the contents
print(r.status_code)
if r.status_code == 204:
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " set_inteface_down: An Error has occured"
def set_inteface_up(devid, ifindex, auth, url):
"""
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the spec
ified interface on the target device.
:param devid: int or str value of the target device
:param ifindex: int or str value of the target interface
:return: HTTP status code 204 with no values.
"""
set_int_up_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/up"
f_url = url + set_int_up_url
try:
r = requests.put(f_url, auth=auth,
headers=HEADERS) # creates the URL using the payload variable as the contents
if r.status_code == 204:
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " set_inteface_up: An Error has occured"
|
package org.jm.demo.rxjava.condition;
import io.reactivex.Observable;
import io.reactivex.functions.Consumer;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
/**
* @author jiangming
* <p>
* Amb: 给定两个或多个Observables,它只发射首先发射数据或通知的那个Observable的所有数据。
*/
public class Amb {
public static void main(String[] args) throws Exception {
// 创建Observable
Observable<Integer> delayObservable = Observable.range(1, 5)
.delay(100, TimeUnit.MILLISECONDS); // 延迟100毫秒发射数据
Observable<Integer> rangeObservable = Observable.range(6, 5);
// 创建Observable的集合
ArrayList<Observable<Integer>> list = new ArrayList<>();
list.add(delayObservable);
list.add(rangeObservable);
// 创建Observable的数组
Observable<Integer>[] array = new Observable[2];
array[0] = delayObservable;
array[1] = rangeObservable;
/**
* 1. ambWith(ObservableSource<? extends T> other)
* 与另外一个Observable比较,只发射首先发射通知的Observable的数据
*/
rangeObservable.ambWith(delayObservable)
.subscribe(new Consumer<Integer>() {
@Override
public void accept(Integer integer) throws Exception {
System.out.println("--> accept(1): " + integer);
}
});
System.in.read();
System.out.println("------------------------------------------------");
/**
* 2. amb(Iterable<? extends ObservableSource<? extends T>> sources)
* 接受一个Observable类型的集合, 只发射集合中首先发射通知的Observable的数据
*/
Observable.amb(list)
.subscribe(new Consumer<Integer>() {
@Override
public void accept(Integer integer) throws Exception {
System.out.println("--> accept(2): " + integer);
}
});
System.in.read();
System.out.println("------------------------------------------------");
/**
* 3. ambArray(ObservableSource<? extends T>... sources)
* 接受一个Observable类型的数组, 只发射数组中首先发射通知的Observable的数据
*/
Observable.ambArray(array)
.subscribe(new Consumer<Integer>() {
@Override
public void accept(Integer integer) throws Exception {
System.out.println("--> accept(3): " + integer);
}
});
System.in.read();
}
}
|
#ifndef DISTANCES_TRIANGULARQUADRATURE_H
#define DISTANCES_TRIANGULARQUADRATURE_H
#include <type_traits>
#include <Eigen/Core>
namespace sdot{
namespace distances{
/**
@class TriangularQuadrature
@ingroup Quadrature
Contains functions that return quadrature rules for the unit triangle [(0,0), (1,0), (1,1)]
Maximum degree is 7.
This adapts the "Strang" rules reported at https://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tri/quadrature_rules_tri.html
*/
class TriangularQuadrature{
public:
static std::pair<Eigen::Matrix2Xd, Eigen::VectorXd> Get(unsigned int degree);
static std::pair<Eigen::Matrix2Xd, Eigen::VectorXd> Centroid();
static std::pair<Eigen::Matrix2Xd, Eigen::VectorXd> Strang1();
static std::pair<Eigen::Matrix2Xd, Eigen::VectorXd> Strang3();
static std::pair<Eigen::Matrix2Xd, Eigen::VectorXd> Strang5();
static std::pair<Eigen::Matrix2Xd, Eigen::VectorXd> Strang7();
static std::pair<Eigen::Matrix2Xd, Eigen::VectorXd> Strang9();
static std::pair<Eigen::Matrix2Xd, Eigen::VectorXd> Strang10();
/** Uses quadrature to compute the intgral of a function f over a triangle
specified by corner points pt1, pt2, pt3 in counter-clockwise order.
*/
template<typename FunctionType>
static typename std::invoke_result<FunctionType,Eigen::Vector2d>::type Integrate(FunctionType f,
Eigen::Vector2d const& pt1,
Eigen::Vector2d const& pt2,
Eigen::Vector2d const& pt3)
{
Eigen::Matrix2d A(2,2); // 2x2 matrix transforming reference coordinates to spatial coordinates
A << pt2(0)-pt1(0), pt3(0)-pt1(0),
pt2(1)-pt1(1), pt3(1)-pt1(1);
double jacDet = (pt2(0)-pt1(0))*(pt3(1)-pt1(1)) - (pt3(0)-pt1(0))*(pt2(1)-pt1(1));
Eigen::Matrix2Xd quadPts;
Eigen::VectorXd quadWts;
std::tie(quadPts, quadWts) = TriangularQuadrature::Get(7);
typename std::invoke_result<FunctionType,Eigen::Vector2d>::type output = quadWts(0)*f(pt1 + A*quadPts.col(0));
for(int i=1; i<quadWts.size(); ++i){
output += quadWts(i)*f(pt1 + A*quadPts.col(i)); // map pt in reference triangle to real coordinates
}
output *= jacDet;
return output;
};
};
}
}
#endif
|
// Test of SharedSemiFuture specific details.
TEST(SharedFuture, ModificationsArePrivate) {
FUTURE_SUCCESS_TEST([] { return 1; },
[]( auto&& fut) {
const auto exec = InlineRecursiveCountingExecutor::make();
const auto shared = std::move(fut).share();
const auto checkFunc = [](int&& i) {
ASSERT_EQ(i, 1);
return ++i;
};
auto fut1 = shared.thenRunOn(exec).then(checkFunc);
auto fut2 = shared.thenRunOn(exec).then(checkFunc);
ASSERT_EQ(fut1.get(), 2);
ASSERT_EQ(fut2.get(), 2);
ASSERT_NE(&fut1.get(), &fut2.get());
auto fut3 = shared.thenRunOn(exec).then(checkFunc);
ASSERT_EQ(fut3.get(), 2);
ASSERT_NE(&fut3.get(), &fut1.get());
ASSERT_NE(&fut3.get(), &fut2.get());
});
} |
#!/usr/bin/env python
from choose_receipts import choose_receipts, print_selection_result
def main():
s = '''61.19
70.81
59.92
107.97
540.21
43.04
99.79
126.06
147.52
77.56
92.29
47.64
118.46
47.32
72.71
70.84
59.87
118.71
25.33
25.46
3431.21
25.42
7.74
204.34
48.94
0.03
309.71
375.58
1016.14
870.48
16.48
2749.14
1695.8
184.37'''
values = s.split('\n')
result, exact = choose_receipts(values, ['8403.61'])
print()
print_selection_result(result, exact)
# One possible exact solution is:
# sum of all chooses = 8403.61 ✔
# 8403.61: (61.19, 77.56, 3431.21, 204.34, 2749.14, 1695.8, 184.37)
# remain: (70.81, 59.92, 107.97, 540.21, 43.04, 99.79, 126.06, 147.52, 92.29, 47.64, 118.46, 47.32, 72.71, 70.84, 59.87, 118.71, 25.33, 25.46, 25.42, 7.74, 48.94, 0.03, 309.71, 375.58, 1016.14, 870.48, 16.48)
if __name__ == '__main__':
main()
|
<reponame>wangtianlun/icestark<filename>src/AppRouter.tsx<gh_stars>1-10
import * as React from 'react';
import * as urlParse from 'url-parse';
import AppRoute from './AppRoute';
import matchPath from './matchPath';
import { recordAssets } from './handleAssets';
import { ICESTSRK_NOT_FOUND } from './constant';
import { setCache } from './cache';
type RouteType = 'pushState' | 'replaceState';
export interface AppRouterProps {
onRouteChange?: (
pathname: string,
query: object,
hash?: string,
type?: RouteType | 'init' | 'popstate',
) => void;
ErrorComponent?: any;
LoadingComponent?: any;
NotFoundComponent?: any;
useShadow?: boolean;
}
interface AppRouterState {
url: string;
forceRenderCount: number;
}
interface OriginalStateFunction {
(state: any, title: string, url?: string): void;
}
export default class AppRouter extends React.Component<AppRouterProps, AppRouterState> {
private originalPush: OriginalStateFunction = window.history.pushState;
private originalReplace: OriginalStateFunction = window.history.replaceState;
static defaultProps = {
ErrorComponent: <div>js bundle loaded error</div>,
NotFoundComponent: <div>NotFound</div>,
useShadow: false,
};
constructor(props: AppRouterProps) {
super(props);
this.state = {
url: location.href,
forceRenderCount: 0,
};
recordAssets();
}
componentDidMount() {
this.hijackHistory();
this.handleRouteChange(location.href, 'init');
// render NotFoundComponent eventListener
window.addEventListener('icestark:not-found', () => {
this.setState({ url: ICESTSRK_NOT_FOUND });
});
}
componentWillUnmount() {
this.unHijackHistory();
}
/**
* Hijack window.history
*/
hijackHistory = (): void => {
window.history.pushState = (state: any, title: string, url?: string, ...rest) => {
this.originalPush.apply(window.history, [state, title, url, ...rest]);
this.handleStateChange(state, url, 'pushState');
};
window.history.replaceState = (state: any, title: string, url?: string, ...rest) => {
this.originalReplace.apply(window.history, [state, title, url, ...rest]);
this.handleStateChange(state, url, 'replaceState');
};
window.addEventListener('popstate', this.handlePopState, false);
};
/**
* Unhijacking history
*/
unHijackHistory = (): void => {
window.history.pushState = this.originalPush;
window.history.replaceState = this.originalReplace;
window.removeEventListener('popstate', this.handlePopState, false);
};
/**
* Trigger statechange: pushState | replaceState
*/
handleStateChange = (state: any, url: string, routeType?: RouteType): void => {
// deal with forceRender
if (state && (state.forceRender || (state.state && state.state.forceRender))) {
const { forceRenderCount } = this.state;
this.setState({ url, forceRenderCount: forceRenderCount + 1 });
} else {
this.setState({ url });
}
this.handleRouteChange(url, routeType);
};
/**
* Trigger popstate
*/
handlePopState = (): void => {
const url = location.href;
this.setState({ url });
this.handleRouteChange(url, 'popstate');
};
/**
* Trigger onRouteChange
*/
handleRouteChange = (url: string, type: RouteType | 'init' | 'popstate'): void => {
const { pathname, query, hash } = urlParse(url, true);
this.props.onRouteChange(pathname, query, hash, type);
};
render() {
const { NotFoundComponent, ErrorComponent, LoadingComponent, useShadow, children } = this.props;
const { url, forceRenderCount } = this.state;
const { pathname, query } = urlParse(url, true);
const { localUrl } = query;
let match: any = null;
let element: any;
React.Children.forEach(children, child => {
if (match == null && React.isValidElement(child)) {
element = child;
const { path } = child.props as any;
match = path ? matchPath(pathname, { ...child.props }) : null;
}
});
const extraProps: any = {
ErrorComponent,
LoadingComponent,
useShadow,
forceRenderCount,
};
if (localUrl) {
extraProps.url = localUrl;
}
let realComponent: any = null;
if (match) {
const { path, basename } = element.props as any;
setCache('basename', basename || (Array.isArray(path) ? path[0] : path));
realComponent = React.cloneElement(element, extraProps);
} else {
realComponent = (
<AppRoute
path={ICESTSRK_NOT_FOUND}
url={ICESTSRK_NOT_FOUND}
NotFoundComponent={NotFoundComponent}
useShadow={useShadow}
/>
);
}
return realComponent;
}
}
|
The Assad regime has massed approximately 170 tanks near the Turkish border, according to an unconfirmed report on Friday by a general in the Free Syrian Army.
General Mustafa al-Sheikh told Reuters that the tanks are now located 30 kilometers (19 miles) from the border, northeast of Aleppo.
“They either want to move toward the border to confront Turkish troops stationed there, or they are planning to attack rebels in towns near the border,” al-Sheikh said.
Get The Times of Israel's Daily Edition by email and never miss our top stories Free Sign Up
The move comes after Turkey on Thursday sent anti-aircraft guns, rocket launchers and other fortifications to its frontier with Syria, marking an escalation in hostilities between the two countries.
Last week, Assad’s forces shot down a Turkish F-4 jet which they claimed had made an illegal foray into Syrian airspace. The Syrian government later claimed that the plane may have been shot down because it was mistaken for being Israeli. |
Please share this page:
Since I’ve started adding woodworking power tools to my bookbinding workshop, I wanted to try one thing. Okay, there were many things that became possible since that moment, but that one was special.
What instruments do you use to trim your books? Initially I have used just a simple utility knife. Or a bookbinding knife. It is even easier to use because it is sharpened on one side only. Then I’ve sharpened a chisel to make a sort of a shapeshift bookbinding plough with one of the presses I already had. A bit later I’ve built a simple traditional plough.
But trimming a book with a router? That’s something different!
I couldn’t just take a book and trim it with a router — that would’t result in a straight line. I had to build some sort of a guide and use a router bit with a bearing to follow that guide. For that reason, I’ve made something very similar to bookbinding presses I sell at my Etsy store. It had to have a thinner side, because the only straight router bit with a bearing I had was a pretty short one.
The tool I use in this video was made specially for the experiment from some scrap plywood, so don’t mind that it is not as straight as it should be.
Then I had to accurately tighten the book in the press. It should be set precisely to result in the same width of the book block on the top and the bottom after trimming. One of the sides of the press serves as a guide for the bearing.
The trimming process itself took almost no time. I was thinking about the experiment for many months. I’ve been to my workshop several times to prepare all the things: cut the plywood, put the nuts inside, glue the boards together, etc. As you can see in this video, it took only several seconds to trim the book.
I was pretty surprised by the result! Considering all the things: that the press wasn’t ideal and that the router bit was not in the best shape, book edge turned to be almost perfect!
I should have added some scrap cardboard on both sides of the book, to minimize the tearing of the endleaves. That’s the thing I often forget to do. Both when making books and woodworking.
However, that’s nothing that couldn’t be fixed with a piece of sandpaper.
Please share this page: |
/**
* Wireless response that requires multiple receive transmissions before
* completion
*
* @param <T>
* the data type
*/
public abstract class MultiRxData<T> extends RxRawData<T> {
/**
* Constructor
*
* @param remoteNode
* the {@linkplain RemoteNode}
* @param status
* the initial {@linkplain Status}
* @param signalStrength
* the signal strength
* @param data
* the data
*/
public MultiRxData(final RemoteNode remoteNode, final Status status,
final int signalStrength, final T data) {
super(remoteNode, status, signalStrength, data);
}
/**
* @return true when the transmission of data has timed out
*/
public boolean hasTimedOut() {
return Calendar.getInstance().getTimeInMillis()
- getCreatedTime().getTimeInMillis() > 120000;
}
} |
// Listen bind port. Receive messages and send to out channel
func (rcv *UDP) Listen(addr *net.UDPAddr) error {
var err error
rcv.conn, err = net.ListenUDP("udp", addr)
if err != nil {
return err
}
go func() {
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
metricsReceived := atomic.LoadUint32(&rcv.metricsReceived)
atomic.AddUint32(&rcv.metricsReceived, -metricsReceived)
rcv.Stat("udp.metricsReceived", float64(metricsReceived))
incompleteReceived := atomic.LoadUint32(&rcv.incompleteReceived)
atomic.AddUint32(&rcv.incompleteReceived, -incompleteReceived)
rcv.Stat("udp.incompleteReceived", float64(incompleteReceived))
errors := atomic.LoadUint32(&rcv.errors)
atomic.AddUint32(&rcv.errors, -errors)
rcv.Stat("udp.errors", float64(errors))
logrus.WithFields(logrus.Fields{
"metricsReceived": metricsReceived,
"incompleteReceived": incompleteReceived,
"errors": errors,
}).Info("[udp] doCheckpoint()")
case <-rcv.exit:
rcv.conn.Close()
return
}
}
}()
go func() {
defer rcv.conn.Close()
var buf [2048]byte
var data *bytes.Buffer
lines := newIncompleteStorage()
for {
rlen, peer, err := rcv.conn.ReadFromUDP(buf[:])
if err != nil {
if strings.Contains(err.Error(), "use of closed network connection") {
break
}
atomic.AddUint32(&rcv.errors, 1)
logrus.Error(err)
continue
}
prev := lines.pop(peer.String())
if prev != nil {
data = bytes.NewBuffer(prev)
data.Write(buf[:rlen])
} else {
data = bytes.NewBuffer(buf[:rlen])
}
for {
line, err := data.ReadBytes('\n')
if err != nil {
if err == io.EOF {
if len(line) > 0 {
if rcv.logIncomplete {
logIncomplete(peer, buf[:rlen], line)
}
lines.store(peer.String(), line)
atomic.AddUint32(&rcv.incompleteReceived, 1)
}
} else {
atomic.AddUint32(&rcv.errors, 1)
logrus.Error(err)
}
break
}
if len(line) > 0 {
if msg, err := points.ParseText(string(line)); err != nil {
atomic.AddUint32(&rcv.errors, 1)
logrus.Info(err)
} else {
atomic.AddUint32(&rcv.metricsReceived, 1)
rcv.out <- msg
}
}
}
}
}()
return nil
} |
<filename>android/lib/src/main/java/org/vosk/LibVosk.java
package org.vosk;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
public class LibVosk {
static {
Native.register(LibVosk.class, "vosk");
}
public static native void vosk_set_log_level(int level);
public static native Pointer vosk_model_new(String path);
public static native void vosk_model_free(Pointer model);
public static native Pointer vosk_spk_model_new(String path);
public static native void vosk_spk_model_free(Pointer model);
public static native Pointer vosk_recognizer_new(Model model, float sample_rate);
public static native Pointer vosk_recognizer_new_spk(Pointer model, Pointer spkModel, float sample_rate);
public static native Pointer vosk_recognizer_new_grm(Pointer model, float sample_rate, String grammar);
public static native boolean vosk_recognizer_accept_waveform(Pointer recognizer, byte[] data, int len);
public static native boolean vosk_recognizer_accept_waveform_s(Pointer recognizer, short[] data, int len);
public static native boolean vosk_recognizer_accept_waveform_f(Pointer recognizer, float[] data, int len);
public static native String vosk_recognizer_result(Pointer recognizer);
public static native String vosk_recognizer_final_result(Pointer recognizer);
public static native String vosk_recognizer_partial_result(Pointer recognizer);
public static native void vosk_recognizer_free(Pointer recognizer);
public static void setLogLevel(LogLevel loglevel) {
vosk_set_log_level(loglevel.getValue());
}
}
|
// Code generated by MockGen. DO NOT EDIT.
// Source: x/params/proposal_handler_test.go
// Package testutil is a generated GoMock package.
package testutil
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockStakingKeeper is a mock of StakingKeeper interface.
type MockStakingKeeper struct {
ctrl *gomock.Controller
recorder *MockStakingKeeperMockRecorder
}
// MockStakingKeeperMockRecorder is the mock recorder for MockStakingKeeper.
type MockStakingKeeperMockRecorder struct {
mock *MockStakingKeeper
}
// NewMockStakingKeeper creates a new mock instance.
func NewMockStakingKeeper(ctrl *gomock.Controller) *MockStakingKeeper {
mock := &MockStakingKeeper{ctrl: ctrl}
mock.recorder = &MockStakingKeeperMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockStakingKeeper) EXPECT() *MockStakingKeeperMockRecorder {
return m.recorder
}
// MaxValidators mocks base method.
func (m *MockStakingKeeper) MaxValidators(ctx context.Context) (uint32, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MaxValidators", ctx)
ret0, _ := ret[0].(uint32)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MaxValidators indicates an expected call of MaxValidators.
func (mr *MockStakingKeeperMockRecorder) MaxValidators(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxValidators", reflect.TypeOf((*MockStakingKeeper)(nil).MaxValidators), ctx)
}
|
/**
* add running task event to corresponding buffer and wait till there exist available Runners
*
* @param taskEventInfo task event info
*/
public static synchronized void waitForAvailableRunner(TaskEventInfo taskEventInfo) {
TaskMode taskMode = taskEventInfo.getTaskEvent().getContext().getTaskInfo().getType().getTaskMode();
String nodeLabel = taskEventInfo.getTaskEvent().getContext().getTaskInfo().getNodeLabel();
if (nodeLabel == null) {
nodeLabel = "";
}
switch (taskMode) {
case process:
if (!processTaskEventBuffer.containsKey(nodeLabel)) {
processTaskEventBuffer.put(nodeLabel, new PriorityBlockingQueue<TaskEventInfo>());
}
processTaskEventBuffer.get(nodeLabel).add(taskEventInfo);
break;
case thread:
if (!threadTaskEventBuffer.containsKey(nodeLabel)) {
threadTaskEventBuffer.put(nodeLabel, new PriorityBlockingQueue<TaskEventInfo>());
}
threadTaskEventBuffer.get(nodeLabel).add(taskEventInfo);
break;
default:
LOGGER.warn("unknown task mode:" + taskMode + ", discard event:" + taskEventInfo.getId());
}
} |
True love is alive in the Amazon! Before the 90 Days couple Paul Staehle and Karine Martins had a very public falling out this weekend over alleged racy texts between Karine and someone named Alex, and it appeared that Paul might be packing his trunk collection and heading to the “bodock” to catch a ride to the airport and a flight back to the US. But, Karine and Paul took to Instagram Live on Monday and revealed that #Paurine are back together!
Unfortunately, it was Karine who did all of the talking, so most viewers were just like Paul (and me) and didn’t understand a word she was saying. Fortunately, there were some viewers who spoke Portugeuse, and one of them shared some details about what Karine said during the streaming session in a 90 Day Fiance fan group.
Karine confirmed that the two did break up, but says it was all a misunderstanding over her friend Alex. Karine also reveals that Paul is very jealous and checks her phone often — which sounds about right.
In addition, the “reporter” (for whom English appears to be a second language) says that Paul is currently living in a rented house with Karine in Brazil after they “canceled the visa process.” Karine reportedly reveals that they “are trying a different visa,” but the process is expensive.
I’m not sure what other visa options are available, unless it is Paul looking to relocate to Brazil. That prospect would jibe with his previous comments about Karine having no interest in relocating to the US or learning English. Then again, it could just mean that they have re-applied for the K-1 visa. This is apparently a very common thing as the application are often denied one or more times before being successful. [Cue: Loren soapbox session.] And it could be that Paul recently being taken off probation for his felony arson conviction might increase their chances of being approved the second time around.
Regardless of what their visa status is, Paul reportedly plans on remaining in Brazil until December, unless they split up again.
Speaking of splitting up and not remaining in Brazil, Paul seemed to suggest that was about to happen after their most recent spat. “Mary Edna Norris Staehle see you soon,” Paul wrote to his mother on Facebook on Friday.
In honor of Paul and Karine’s reconciliation, I thought I would honor them by reversing our relationship poop water animated gif:
Congratulations Paul and Karine! We look forward to seeing all of this play out on TLC in a few months. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.