content
stringlengths 10
4.9M
|
---|
import './gateway/server';
import './popularity/server';
import './product/server';
|
/**Looks (multiplier)cyclo/spiro/cyclo tags before chain
* and replaces them with a group with appropriate SMILES
* Note that only simple spiro tags are handled at this stage i.e. not dispiro
* @param group A group which is potentially a chain
* @throws ComponentGenerationException
*/
private void processRings(Element group) throws ComponentGenerationException {
Element previous = OpsinTools.getPreviousSiblingIgnoringCertainElements(group, new String[]{LOCANT_EL});
if(previous != null) {
String previousElType = previous.getName();
if(previousElType.equals(SPIRO_EL)){
processSpiroSystem(group, previous);
} else if(previousElType.equals(VONBAEYER_EL)) {
processVonBaeyerSystem(group, previous);
}
else if(previousElType.equals(CYCLO_EL)) {
processCyclisedChain(group, previous);
}
}
} |
<gh_stars>1-10
/*
* This header is generated by classdump-dyld 1.0
* on Sunday, September 27, 2020 at 11:35:41 AM Mountain Standard Time
* Operating System: Version 14.0 (Build 18A373)
* Image Source: /System/Library/Frameworks/CoreServices.framework/CoreServices
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by <NAME>.
*/
#import <CoreServices/CoreServices-Structs.h>
@class _LSDatabase;
@interface _LSBindingForLog : NSObject {
_LSDatabase* _db;
LSBinding* _binding;
}
-(id)redactedDescription;
-(id)initWithState:(const State*)arg1 binding:(const LSBinding*)arg2 ;
-(id)description;
@end
|
/**
* Retrieves the subdirectories of this directory.
*
* @return Pointer to an array of subdirectories. (MUST BE FREED LATER)
*/
vector<Directory> Directory::GetSubDirectories() {
HANDLE hFind;
WIN32_FIND_DATA fndData;
vector<Directory> arr;
hFind = FindFirstFile(this->Concatenate(L"\\*").ToString(), &fndData);
while (hFind != INVALID_HANDLE_VALUE) {
if (fndData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY) {
arr.push_back(Directory(this->Concatenate(fndData.cFileName)));
}
if (FindNextFile(hFind, &fndData) == 0) {
if (GetLastError() == ERROR_NO_MORE_FILES) {
FindClose(hFind);
hFind = INVALID_HANDLE_VALUE;
} else {
MessageBox(NULL, L"Error while listing directory", L"Error",
MB_OK | MB_ICONERROR);
return vector<Directory>();
}
}
}
return arr;
} |
<reponame>opium-pro/master-hook
export const mediators = {}
export const actions = {} |
<gh_stars>100-1000
package com.dtstack.taier.develop.service.develop.impl;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.JSONPath;
import com.dtstack.taier.common.enums.EScheduleJobType;
import com.dtstack.taier.dao.domain.BatchTaskParam;
import com.dtstack.taier.dao.domain.DsInfo;
import com.dtstack.taier.dao.domain.Task;
import com.dtstack.taier.develop.service.datasource.impl.DatasourceService;
import com.dtstack.taier.develop.service.datasource.impl.DsInfoService;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
/**
* hadoop 相关类型Job执行
* Date: 2019/5/17
* Company: www.dtstack.com
*
* @author xuchao
*/
@Service
public class HadoopJobExeService {
private static final Logger LOG = LoggerFactory.getLogger(HadoopJobExeService.class);
@Autowired
private BatchTaskParamService batchTaskParamService;
@Autowired
private DsInfoService dsInfoService;
@Autowired
private DatasourceService datasourceService;
private static final String LINE_SEPARATOR = System.getProperty("line.separator");
public static final String JOB_ARGS_TEMPLATE = "-jobid %s -job %s";
private static final String ADD_FILE_FORMAT = "ADD JAR WITH %s AS %s;";
private static final String EXT_REF_RESOURCE_ARGS_TMPL = " extRefResource %s ";
private static final String OPERATE_MODEL = "operateModel";
private static final String FILES_ARG = "--files";
private static final String CMD_OPT = "--cmd-opts";
private static final Map<Integer, String> PY_VERSION_MAP = new HashMap<>(2);
static {
PY_VERSION_MAP.put(2, " 2.x ");
PY_VERSION_MAP.put(3, " 3.x ");
}
public void readyForTaskStartTrigger(Map<String, Object> actionParam, Long dtuicTenantId, Task task) throws Exception {
String sql = task.getSqlText();
sql = sql == null ? "" : sql;
String taskParams = task.getTaskParams();
String taskExeArgs = null;
JSONObject syncJob = JSON.parseObject(task.getSqlText());
taskParams = replaceSyncParll(taskParams, parseSyncChannel(syncJob));
String job = syncJob.getString("job");
//todo 合并(指的时候实时任务的提交任务相似的逻辑,需要调度支持接口后合并)
// 向导模式根据job中的sourceId填充数据源信息,保证每次运行取到最新的连接信息
job = datasourceService.setJobDataSourceInfo(job, dtuicTenantId, syncJob.getIntValue("createModel"));
//todo checkSyncJobParams为什么要异常hadoopConfig
if (Objects.equals(task.getTaskType(), EScheduleJobType.SYNC.getVal())) {
List<BatchTaskParam> taskParam = batchTaskParamService.getTaskParam(task.getId());
batchTaskParamService.checkParams(batchTaskParamService.checkSyncJobParams(job), taskParam);
}
actionParam.put("job", job);
//设置写数据源的具体类型
setWriterDataSourceType(actionParam, job);
if (taskExeArgs != null) {
actionParam.put("exeArgs", taskExeArgs);
}
actionParam.put("sqlText", sql);
actionParam.put("taskParams", taskParams);
}
private Integer parseSyncChannel(JSONObject syncJob) {
//解析出并发度---sync 消耗资源是: 并发度*1
try {
JSONObject jobJson = syncJob.getJSONObject("job").getJSONObject("job");
JSONObject settingJson = jobJson.getJSONObject("setting");
JSONObject speedJson = settingJson.getJSONObject("speed");
return speedJson.getInteger("channel");
} catch (Exception e) {
LOG.error("", e);
//默认1
return 1;
}
}
public String replaceSyncParll(String taskParams, int parallelism) throws IOException {
Properties properties = new Properties();
properties.load(new ByteArrayInputStream(taskParams.getBytes(StandardCharsets.UTF_8)));
properties.put("mr.job.parallelism", parallelism);
StringBuilder sb = new StringBuilder();
for (Map.Entry<Object, Object> tmp : properties.entrySet()) {
sb.append(tmp.getKey())
.append(" = ")
.append(tmp.getValue())
.append(LINE_SEPARATOR);
}
return sb.toString();
}
/**
* todo 是否可删除
* 设置写数据源的数据源类型
*
* @param actionParam
* @param job
*/
private void setWriterDataSourceType(Map<String, Object> actionParam, String job) {
try {
Object sourceIdObject = JSONPath.eval(JSON.parseObject(job), "$.job.content[0].writer.parameter.sourceIds[0]");
if (sourceIdObject != null && StringUtils.isNotBlank(sourceIdObject.toString())) {
DsInfo data = dsInfoService.getOneById(Long.valueOf(sourceIdObject.toString()));
if (Objects.nonNull(data)) {
actionParam.put("dataSourceType", data.getDataTypeCode());
}
}
} catch (Exception e) {
LOG.info("get write datasource error {} ", job, e);
}
}
}
|
<gh_stars>1-10
package com.edgar.module.sys.dao;
import com.edgar.core.repository.AbstractDaoTemplate;
import com.edgar.module.sys.repository.domain.SysMenuRoute;
import com.edgar.module.sys.repository.querydsl.QSysMenuRoute;
import com.mysema.query.sql.RelationalPathBase;
import org.springframework.stereotype.Repository;
/**
* 菜单路由关联的DAO
*
* @author <NAME>
* @version 1.0
*/
@Repository
public class SysMenuRouteDao extends AbstractDaoTemplate<Integer, SysMenuRoute> {
@Override
public RelationalPathBase<?> getPathBase() {
return QSysMenuRoute.sysMenuRoute;
}
}
|
More characters Mutsuki, Koharu Main Torii, Akira Main Ninomiya, Natsuki Supporting Sasagase, Mika Supporting
More reviews Jul 12, 2014 24 of 24 chapters read Overall Rating : 4 CrayonPop420 (All reviews)
39 people found this review helpful Overall 4 Story 5 Art 6 Character 5 Enjoyment 3
Koharu no Hibi is a romance school manga with a twist where an obsessed loli,Matsuki, falls in love with a nerdy MC named Akira to the point where
It has its quirks where you're like "haha Matsuki being a stalking yandere loli is sort of cute" but it wears off quickly because the author has no imagination and therefore every chapter is of the loli grabbing onto the MC's and saying senpai a thousand times. No seriously, she says it every fucking moment no matter what is happening. I'm pretty sure it's the only word she knows. The way I think of it is Matsuki is a nonviolent version of Yuno, but one hundred times more annoying. Seriously, fuck this loli.
To make it worse, the male character is just your typical harem-esque character with no real defining features and is never daring. He kind of just goes along with whatever is happening and so its not even worth talking about him seeing as this whole manga is based around a loli yandere who is only capable of saying senpai.
The enjoyment I had for this was sort of high for the initial 5 chapters with her being a creepy stalker, but its charm wore off fast. What happened in 24 chapters could of been done in 15 or less. The fact that it had 24 chapters just shows that the author was attempting to milk it for all of its worth. Only problem is, it was never worth milking in the first place.
Overall this manga is pretty damn horrible. I only advice reading this if you have some sort of fetish where you want to be called senpai by some loli. But even then you will still have a hard time finishing it with its bland plot. I dare anyone to count how many times senpai is said in this manga. I'm pretty sure its in the thousands. Helpful she is stalking him, gluing her hands to his, taping 1000 pictures of him to her walls, and even going as far as to dig through trash cans for the water bottle he just drink because it still has his saliva on it. Now if you're into this sort of thing, then maybe this is the manga for you.It has its quirks where you're like "haha Matsuki being a stalking yandere loli is sort of cute" but it wears off quickly because the author has no imagination and therefore every chapter is of the loli grabbing onto the MC's and saying senpai a thousand times. No seriously, she says it every fucking moment no matter what is happening. I'm pretty sure it's the only word she knows. The way I think of it is Matsuki is a nonviolent version of Yuno, but one hundred times more annoying. Seriously, fuck this loli.To make it worse, the male character is just your typical harem-esque character with no real defining features and is never daring. He kind of just goes along with whatever is happening and so its not even worth talking about him seeing as this whole manga is based around a loli yandere who is only capable of saying senpai.The enjoyment I had for this was sort of high for the initial 5 chapters with her being a creepy stalker, but its charm wore off fast. What happened in 24 chapters could of been done in 15 or less. The fact that it had 24 chapters just shows that the author was attempting to milk it for all of its worth. Only problem is, it was never worth milking in the first place.Overall this manga is pretty damn horrible. I only advice reading this if you have some sort of fetish where you want to be called senpai by some loli. But even then you will still have a hard time finishing it with its bland plot. I dare anyone to count how many times senpai is said in this manga. I'm pretty sure its in the thousands. read more After reading this manga, I can honestly say I never want to hear the word "senpai" again. In this short 24 chapter manga you probably hear the word senpai a thousand times. It got to the point where I wanted to kill myself every time I saw the word. And trust me, on each page it's there at least 30 times. Despite this, I still continued onto it because of the creepy vibe I got from it.Koharu no Hibi is a romance school manga with a twist where an obsessed loli,Matsuki, falls in love with a nerdy MC named Akira to the point where permalink | report Mar 31, 2013 24 of 24 chapters read Overall Rating : 9 SAimNE (All reviews)
15 people found this review helpful Overall 9 Story 9 Art 8 Character 10 Enjoyment 9
As for art i'll agree with the previous reviewer that it wasn't really anything special, i still kind of liked it so i'll rate it around a 8(6 for normal and an extra 2 points added to the average due to the 10s on
Will also agree that all the way through the characters are the main selling point of this series. they have about as much depth as you can get in 24 chapters, and koharus interactions with all of them are profoundly interesting... and at some points slightly scary :|... seriously this manga does sweetness like no other... its almost impossible not to swoon over the cute little almost innocent romance they have going most o the time, but lurking behind it at all times is some over the top holy crap wtfbbq moments... most of which are overall a positive, others less so >_>. As for the other chars the childhood friends(both the present 2 and the one that pops in later)are really the only other significant influences on the story... there are other people but even after multiple reads i dont really remember any of them :/, and as mentioned before one even goes so far to provoke a yandere. every galge game player in the world was instinctively hoping they had saved after seeing that crap :|. As for the supporters both are pretty interesting in their own rights but never quite in the spotlight... personally im pretty sure they have their own thing going on but it just never had time to make the manga, hoping on a omake or something to show how they turned out. Anyway long story short, while there isnt quite a large cast, the ones present give off a near perfect feeling forthe main storyline, and definitely make the reading experience much better.
to sum it up, if you want the standard diabetes causing cute romance this is probably not for you... you'll think it is at first, but its not... koharu is just too extreme a character for that. If, however, you want an adorable romance with some major sudden twists thrown in then you found the gold mine. Personally i love yanderes so this review may be a bit biased, but overall i place this at a 9/10... or more precisely probably a 9.5.. ive read the story 3 times so far and will probably do so again. if you think you can handle koharu, then by all means give this a try. Helpful yandere moments and the reactions to them), but for normal scenes it's not anything that will wow you. Though koharus image suits her really well.Will also agree that all the way through the characters are the main selling point of this series. they have about as much depth as you can get in 24 chapters, and koharus interactions with all of them are profoundly interesting... and at some points slightly scary :|... seriously this manga does sweetness like no other... its almost impossible not to swoon over the cute little almost innocent romance they have going most o the time, but lurking behind it at all times is some over the top holy crap wtfbbq moments... most of which are overall a positive, others less so >_>. As for the other chars the childhood friends(both the present 2 and the one that pops in later)are really the only other significant influences on the story... there are other people but even after multiple reads i dont really remember any of them :/, and as mentioned before one even goes so far to provoke a yandere. every galge game player in the world was instinctively hoping they had saved after seeing that crap :|. As for the supporters both are pretty interesting in their own rights but never quite in the spotlight... personally im pretty sure they have their own thing going on but it just never had time to make the manga, hoping on a omake or something to show how they turned out. Anyway long story short, while there isnt quite a large cast, the ones present give off a near perfect feeling forthe main storyline, and definitely make the reading experience much better.to sum it up, if you want the standard diabetes causing cute romance this is probably not for you... you'll think it is at first, but its not... koharu is just too extreme a character for that. If, however, you want an adorable romance with some major sudden twists thrown in then you found the gold mine. Personally i love yanderes so this review may be a bit biased, but overall i place this at a 9/10... or more precisely probably a 9.5.. ive read the story 3 times so far and will probably do so again. if you think you can handle koharu, then by all means give this a try. read more Well figured there should be a second review on the story now that its done. Overall the story(whats there of it) is exactly what it should be and does it's job well considering the 24 chapters that the manga has, and i would easily place it at a 9 for the genre(dont expect a primarily comedy/romance to match up with adventure/dramas).As for art i'll agree with the previous reviewer that it wasn't really anything special, i still kind of liked it so i'll rate it around a 8(6 for normal and an extra 2 points added to the average due to the 10s on permalink | report Jun 18, 2015 24 of 24 chapters read Overall Rating : 8 Jhale (All reviews)
9 people found this review helpful Overall 8 Story 8 Art 7 Character 8 Enjoyment 9
Story: 8 - Koharu no Hibi is the most vanilla manga I have ever read even though it is about such a controversial and sometimes disgusting (to some people) topic. The story revolves completely around romance and subtlety. A lot of the time, you will feel like the main character is reacting to something in an underwhelming or overwhelming way, but when you think about things, the turn
The biggest success with this manga is that it explores the ideation of a stalker and how romance would be with one in a realistic yet simplified light. Even better than that, most chapters introduce a new way to look at the relationship and understand the mind of a stalker. Some of the scenes are really beautiful even though everything is lighthearted and simple (almost like a children's manga).
Another interesting thing is that this book is considered Shoujo targeted for men (the magazine it was published in was for that demographic, but I think it got discontinued). It is definitely not as flowery in presentation or as emotionally sensitive with its protagonist as a normal Shoujo manga, but it definitely feels like reading Shoujo.
The biggest thing that people might consider gross is that the main love interest is probably three or more years younger than the main character. She likely starts at the age of 13 or 12, while the main character is very tall and perhaps 15 or 16 years old. In Japan, I suppose that this stuff is sort of acceptable, but it kind of ruins a lot of the enjoyment you could get from this manga.
The climax of this manga is really fun. I think it succeeds in making Koharu a Yandere without doing what a Yandere would normally do. Also, it feels nice to see the ending, especially given how it unconventially approaches intimacy.
The biggest flaw of this manga is that there is a bit of fluff despite there not being many chapters. Some of the chapters might not feel meaningful at all except for the interesting, romantic, funny or sexy stalker quirk that shows up in every chapter.
Art: 7 - The drawings of Koharu no Hibi are simple most of the time, but the art that is shown is not terrible-looking and has consistent detail. There are a few, rare, well-drawn picturesque scenes to enjoy.
What I like about Koharu (the love interest) in design is that she is not drawn to be pretty. I hate how female protagonists usually look like in manga and it was nice to see such a human looking character. Oddly enough, there is no real reason that she looks more realistic and imperfect. Anyway, stalkers are known to be pretty ugly so that I think that this was an important detail for the author to have included.
The two male characters in the manga look almost the same and that is a bit annoying. Other people are not drawn very often after the manga progresses and I think that subtracted some of the strength in the environment. The other female characters however, are perfectly drawn, especially considering their roles in the story. One of the later characters looks slightly unique and very enticing and she really spiced up the story with her appearances.
However, there are a lot of terrible, terrible panels that almost make me forget about the good things in the art style. What I am talking about are the close-ups of Koharu, which usually look like the same drawing again and again. The technique used in drawing the eyes just looks really ugly and fake and weird and does not portray any emotion. Even though it sounds like that would make sense given who Koharu is, this is definitely just a bad taste in dramatic panels that happens very excessively at the beginning of the series. I am really, really glad that it starts to dwindle down in the later chapters.
Character: 8 - The characters are pretty well rounded. The main character provides the apprehension the story needs to avoid completely sexualizing the Yandere (or women in general) as manga usually does. This more down-to-Earth precedent is essential to Koharu no Hibi and I am really glad that the main character is who he is. Also, he is eased into love and he is pretty relatable in the common sense yet acceptance of the taboo inherent in most of us. He can talk to Koharu in a fatherly way that actually makes Koharu a productive member of society. It is a really cool dynamic that I would not mind being reused by another author.
Koharu is a really fun character, of course. She seems like she will be a static character, but she is actually receptive to the person who she loves (which makes her probably the sympathetic stalker in literature). Koharu will violate personal space and do unsanitary or ambiguously unsanitary things every day. She will become a total time sink for Senpai if he is not able to remedy her. The bento boxes she makes "Senpai" each day are made with her love and perhaps there is something lost in translation that makes it impossible to know what is being implied after the concept is introduced.
One will wonder whether or not this is exactly how a lot of stalkers act. I believe that stalkers really are this way, but I have never encountered one before. I really feel like they are villainized waaay too much. Just like in other crimes, stalking is simply under-reported for women because it is more acceptable. Men are usually dangerous stalkers, but I still think that some could be alike Koharu.
Natsuki really makes this manga wonderful. She is the common Shoujo manga character who pushes the main characters to love each other and uses the term "lovebird" all of the time. The fact that she is in a manga about stalkers would be enough, but she is not deterred by Koharu's actions, which makes it all very cute and fun to watch. I just love Natsuki so much because of how vital she is in changing Akira's (the main character) mind about falling in love with a stalker.
Natsuki's boyfriend is kind of funny. He is completely silent most of the time, but he usually says something to comedic effect. In spite of that, he is always serious and soft in presence, but he totally submits to or helps enforce Natsuiki's whims. It is really funny to see how Akira can not turn Natsuki down because of him.
The character who comes into the manga later on is a huge spoiler, but I will just say that she appears to be perfect fanservice at first, but then is taken in a really cool-minded, constructive direction.
Enjoyment: 9 - Oh my god, this is just my ideal dream world! The combination of everything is so beautiful. If only more characters and polyamory could have been introduced! This manga can be rather boring and lacking, but the good moments are just invaluable and I do not care about the faults that much.
Overall: 8 - I would love to buy some manga and support this author soon. This is yet another mangaka who has surprised me with the deep thought that can be given in examining the stalker/yandere relationship in real life. To support him would show that writing about such 'crazy' societal romances is a profitable, worthwhile choice. Helpful of events and how they are dealt with is usually reasonable.The biggest success with this manga is that it explores the ideation of a stalker and how romance would be with one in a realistic yet simplified light. Even better than that, most chapters introduce a new way to look at the relationship and understand the mind of a stalker. Some of the scenes are really beautiful even though everything is lighthearted and simple (almost like a children's manga).Another interesting thing is that this book is considered Shoujo targeted for men (the magazine it was published in was for that demographic, but I think it got discontinued). It is definitely not as flowery in presentation or as emotionally sensitive with its protagonist as a normal Shoujo manga, but it definitely feels like reading Shoujo.The biggest thing that people might consider gross is that the main love interest is probably three or more years younger than the main character. She likely starts at the age of 13 or 12, while the main character is very tall and perhaps 15 or 16 years old. In Japan, I suppose that this stuff is sort of acceptable, but it kind of ruins a lot of the enjoyment you could get from this manga.The climax of this manga is really fun. I think it succeeds in making Koharu a Yandere without doing what a Yandere would normally do. Also, it feels nice to see the ending, especially given how it unconventially approaches intimacy.The biggest flaw of this manga is that there is a bit of fluff despite there not being many chapters. Some of the chapters might not feel meaningful at all except for the interesting, romantic, funny or sexy stalker quirk that shows up in every chapter.Art: 7 - The drawings of Koharu no Hibi are simple most of the time, but the art that is shown is not terrible-looking and has consistent detail. There are a few, rare, well-drawn picturesque scenes to enjoy.What I like about Koharu (the love interest) in design is that she is not drawn to be pretty. I hate how female protagonists usually look like in manga and it was nice to see such a human looking character. Oddly enough, there is no real reason that she looks more realistic and imperfect. Anyway, stalkers are known to be pretty ugly so that I think that this was an important detail for the author to have included.The two male characters in the manga look almost the same and that is a bit annoying. Other people are not drawn very often after the manga progresses and I think that subtracted some of the strength in the environment. The other female characters however, are perfectly drawn, especially considering their roles in the story. One of the later characters looks slightly unique and very enticing and she really spiced up the story with her appearances.However, there are a lot of terrible, terrible panels that almost make me forget about the good things in the art style. What I am talking about are the close-ups of Koharu, which usually look like the same drawing again and again. The technique used in drawing the eyes just looks really ugly and fake and weird and does not portray any emotion. Even though it sounds like that would make sense given who Koharu is, this is definitely just a bad taste in dramatic panels that happens very excessively at the beginning of the series. I am really, really glad that it starts to dwindle down in the later chapters.Character: 8 - The characters are pretty well rounded. The main character provides the apprehension the story needs to avoid completely sexualizing the Yandere (or women in general) as manga usually does. This more down-to-Earth precedent is essential to Koharu no Hibi and I am really glad that the main character is who he is. Also, he is eased into love and he is pretty relatable in the common sense yet acceptance of the taboo inherent in most of us. He can talk to Koharu in a fatherly way that actually makes Koharu a productive member of society. It is a really cool dynamic that I would not mind being reused by another author.Koharu is a really fun character, of course. She seems like she will be a static character, but she is actually receptive to the person who she loves (which makes her probably the sympathetic stalker in literature). Koharu will violate personal space and do unsanitary or ambiguously unsanitary things every day. She will become a total time sink for Senpai if he is not able to remedy her. The bento boxes she makes "Senpai" each day are made with her love and perhaps there is something lost in translation that makes it impossible to know what is being implied after the concept is introduced.One will wonder whether or not this is exactly how a lot of stalkers act. I believe that stalkers really are this way, but I have never encountered one before. I really feel like they are villainized waaay too much. Just like in other crimes, stalking is simply under-reported for women because it is more acceptable. Men are usually dangerous stalkers, but I still think that some could be alike Koharu.Natsuki really makes this manga wonderful. She is the common Shoujo manga character who pushes the main characters to love each other and uses the term "lovebird" all of the time. The fact that she is in a manga about stalkers would be enough, but she is not deterred by Koharu's actions, which makes it all very cute and fun to watch. I just love Natsuki so much because of how vital she is in changing Akira's (the main character) mind about falling in love with a stalker.Natsuki's boyfriend is kind of funny. He is completely silent most of the time, but he usually says something to comedic effect. In spite of that, he is always serious and soft in presence, but he totally submits to or helps enforce Natsuiki's whims. It is really funny to see how Akira can not turn Natsuki down because of him.The character who comes into the manga later on is a huge spoiler, but I will just say that she appears to be perfect fanservice at first, but then is taken in a really cool-minded, constructive direction.Enjoyment: 9 - Oh my god, this is just my ideal dream world! The combination of everything is so beautiful. If only more characters and polyamory could have been introduced! This manga can be rather boring and lacking, but the good moments are just invaluable and I do not care about the faults that much.Overall: 8 - I would love to buy some manga and support this author soon. This is yet another mangaka who has surprised me with the deep thought that can be given in examining the stalker/yandere relationship in real life. To support him would show that writing about such 'crazy' societal romances is a profitable, worthwhile choice. read more Koharu no Hibi is one of my favorite mangas right now because it is one of the few mangas based on a stalker/yandere and it is perhaps the only one about a completely realistic relationship with one.Story: 8 - Koharu no Hibi is the most vanilla manga I have ever read even though it is about such a controversial and sometimes disgusting (to some people) topic. The story revolves completely around romance and subtlety. A lot of the time, you will feel like the main character is reacting to something in an underwhelming or overwhelming way, but when you think about things, the turn permalink | report Mar 10, 2014 24 of 24 chapters read Overall Rating : 8 algifaria (All reviews)
3 people found this review helpful Overall 8 Story 7 Art 8 Character 7 Enjoyment 8
Sweet stroy, a bit creepy, and scared me better than horror manga, I even didnt know I must smile or cringe. Kaharu and Akira one of weird couple I ever seen (or just Kaharu, lol). Really, I dont know what should I do if I get a girl like Kaharu....pfftttt but Akira, you are the best or the worst.
The easy change emotions of Kaharu and what Akira did made my tension up down. The daily life of them (or just kaharu) is really interesing in many ways.
Really good art. I really enjoyed it. Kaharu chara design officially cute
At least I really enjoyed reading this manga. I dont think Kaharu is a bad person or extremly girl she just little bit overboard (I know I'm wrong).
8/10
Helpful and creepy in same time lol I didnt know it cute or creepy, but I still like it .At least I really enjoyed reading this manga. I dont think Kaharu is a bad person or extremly girl she just little bit overboard (I know I'm wrong).8/10 read more first, sorry for my english :pSweet stroy, a bit creepy, and scared me better than horror manga, I even didnt know I must smile or cringe. Kaharu and Akira one of weird couple I ever seen (or just Kaharu, lol). Really, I dont know what should I do if I get a girl like Kaharu....pfftttt but Akira, you are the best or the worst.The easy change emotions of Kaharu and what Akira did made my tension up down. The daily life of them (or just kaharu) is really interesing in many ways.Really good art. I really enjoyed it. Kaharu chara design officially cute permalink | report
More recommendations View All Mirai Nikki 3 Users
Ashita Dorobou 1 User
Choku! 1 User
Mousugu Shinu Hito 1 User
Watari-kun no xx ga Houkai Sunzen 1 User
More news
More discussions Poll: Koharu no Hibi Chapter 24 Discussion
TiamatNM Nov 18, 2012 37 replies by Confucius »»
Nov 8, 2018 3:11 PM Poll: Koharu no Hibi Chapter 19 Discussion
notsureifsrs Jan 17, 2012 12 replies by JeremiahOrange »»
Apr 15, 2018 10:34 PM |
// NewAuthService returns implementation of authorization service.
func NewAuthService(rateLimiter *limiter.RateLimiter, ipList repository.IPStorable) service.AuthService {
return &authService{
rl: rateLimiter,
ipList: ipList,
}
} |
Dame Judi Dench has said that pursuing a healthy sex life should not be neglected by her fellow octogenarians.
The 82-year-old actor is once again taking on the role of Queen Victoria in her latest film Victoria and Abdul, which explores the relationship the monarch struck up with an Indian clerk, Abdul Karim, in the latter years of her life.
Speaking to the Radio Times, Dench said Victoria’s infatuation with the clerk was an interesting exploration of how romance and intimacy was not just the domain of the young.
Speaking of her own experiences, the Oscar-winning actor said. “Well, of course, you still feel desire. Does that ever go? To the older reader, I would say: ‘Don’t give up.’”
The film, directed by Stephen Frears, traces how Victoria, then 68, became besotted with Karim, 24, who taught her Urdu and became her close friend and confidante, much to the anger of her children and servants who felt she was betraying the strict social hierarchies of the time. After her death, most correspondence between the pair was destroyed, and Karim was sent back to India.
While there is no evidence their relationship was sexual, the surviving letters between the pair and her diaries, some of which are written in Urdu, reveal they discussed intimate details of their lives, including sexual positions to help Karim’s wife get pregnant.
Facebook Twitter Pinterest Dame Judi Dench and co-star Ali Fazal walk the red carpet before the Victoria and Abdul screening at the Venice film festival on Sunday. Photograph: Getty
The film is based on a book written by Shrabani Basu, who spent months uncovering 13 volumes of Victoria’s journals and the journals of Karim. Basu said of the relationship: “Queen Victoria liked tall men by her side. I don’t think it was sexual in any sense although there is an intimacy.”
Dench’s romantic life was recently revived by a new relationship with conservationist David Mills, 73. Her husband of 30 years, Michael Williams, died from cancer in 2001, and she has spoken recently of how her relationship with Mills had given her a renewed lust for life.
She told Good Housekeeping: “One hot night during the summer we swam and had a glass of champagne in the garden and I said: ‘This is so fantastic’. I get overexcited about things. I love having a laugh.”
Dench ruled out the idea of getting married again. She said: “He’s not going to propose. No, no, no. Let’s just pull ourselves together and be our age. I have a jolly nice friend. I don’t know what the word is because I don’t like the word partner. Chap? Will chap do?”
Dench’s advocating for older generations to keep their sexual desire alive was echoed in a recent report by Manchester University’s school of social sciences, which found that the emotional side of sex appeared more fulfilling for people over 80.
Another study found that 74% of women and 72% of men between 75 and 85 said satisfactory sex was essential to maintaining a relationship. |
def name2sid(index, name, filename):
match = fuzzy.find(name, index)
logging.info('matches for %s : %s' % (name, str(match)))
if match and match[0][1] > 1:
return '%06d' % int(match[0][0])
raise ValueError("Could not find sid for '%s' in %s" % (name, filename)) |
<gh_stars>0
import { Component, OnInit, OnDestroy } from '@angular/core';
import { AuthService } from 'src/app/services/auth.service';
import { IdentityProvider } from 'src/app/model/identityProvider';
import { UtilService } from 'src/app/services/util.servcie';
import { takeUntil } from 'rxjs/operators';
import { Subject } from 'rxjs';
import { TranslateService } from '@ngx-translate/core';
import { appConfig } from 'src/app/app.config';
@Component({
selector: 'app-select-supplier',
templateUrl: './select-supplier.component.html',
styleUrls: ['./select-supplier.component.scss']
})
export class SelectSupplierComponent implements OnInit, OnDestroy {
private _unsubscribe: Subject<void> = new Subject();
public eIdSelector: IdentityProvider[] = [];
public selectedIdentifier: string;
public showLogin = false;
public selectedIdentityProvider: IdentityProvider;
public get locale() {
return this._translate.currentLang;
}
constructor(
private _authService: AuthService,
private _utilservice: UtilService,
private _translate: TranslateService
) {}
ngOnInit() {
this._utilservice.translateErrorCodes();
localStorage.clear();
localStorage.setItem('isSend', 'true');
this._getProviders();
}
public getSelectedidentifier(event) {
const selectedValue = event.value;
this.selectedIdentityProvider = selectedValue;
this.showLogin = true;
}
public useLanguage(language: string) {
this._utilservice.useLanguage(language);
}
public get profileUrl() {
return appConfig.profileUrl;
}
private _getProviders() {
this._authService
.getIdentityProviders()
.pipe(takeUntil(this._unsubscribe))
.subscribe((res: IdentityProvider[]) => {
this.eIdSelector = res;
});
}
ngOnDestroy() {
this._unsubscribe.next();
this._unsubscribe.complete();
}
}
|
import React from 'react';
import Thumbs from '../Thumbs';
import { CarouselProps, CarouselState } from './types';
export default class Carousel extends React.Component<CarouselProps, CarouselState> {
private thumbsRef?;
private carouselWrapperRef?;
private listRef?;
private itemsRef?;
private timer?;
private animationHandler;
static displayName: string;
static defaultProps: CarouselProps;
constructor(props: CarouselProps);
componentDidMount(): void;
componentDidUpdate(prevProps: CarouselProps, prevState: CarouselState): void;
componentWillUnmount(): void;
setThumbsRef: (node: Thumbs) => void;
setCarouselWrapperRef: (node: HTMLDivElement) => void;
setListRef: (node: HTMLElement | HTMLUListElement) => void;
setItemsRef: (node: HTMLElement, index: number) => void;
setupCarousel(): void;
destroyCarousel(): void;
setupAutoPlay(): void;
destroyAutoPlay(): void;
bindEvents(): void;
unbindEvents(): void;
autoPlay: () => void;
clearAutoPlay: () => void;
resetAutoPlay: () => void;
stopOnHover: () => void;
startOnLeave: () => void;
forceFocus(): void;
isFocusWithinTheCarousel: () => boolean;
navigateWithKeyboard: (e: KeyboardEvent) => void;
updateSizes: () => void;
setMountState: () => void;
handleClickItem: (index: number, item: React.ReactNode) => void;
/**
* On Change handler, Passes the index and React node to the supplied onChange prop
* @param index of the carousel item
* @param item React node of the item being changed
*/
handleOnChange: (index: number, item: React.ReactNode) => void;
handleClickThumb: (index: number, item: React.ReactNode) => void;
onSwipeStart: (event: React.TouchEvent) => void;
onSwipeEnd: (event: React.TouchEvent) => void;
onSwipeMove: (delta: {
x: number;
y: number;
}, event: React.TouchEvent) => boolean;
/**
* Decrements the selectedItem index a number of positions through the children list
* @param positions
* @param fromSwipe
*/
decrement: (positions?: number) => void;
/**
* Increments the selectedItem index a number of positions through the children list
* @param positions
* @param fromSwipe
*/
increment: (positions?: number) => void;
/**
* Moves the selected item to the position provided
* @param position
* @param fromSwipe
*/
moveTo: (position?: number | undefined) => void;
onClickNext: () => void;
onClickPrev: () => void;
onSwipeForward: () => void;
onSwipeBackwards: () => void;
changeItem: (newIndex: number) => (e: React.MouseEvent | React.KeyboardEvent) => void;
/**
* This function is called when you want to 'select' a new item, or rather move to a 'selected' item
* It also handles the onChange callback wrapper
* @param state state object with updated selected item, and swiping bool if relevant
*/
selectItem: (state: Pick<CarouselState, 'selectedItem' | 'swiping'>) => void;
getInitialImage: () => HTMLImageElement;
getVariableItemHeight: (position: number) => number | null;
renderItems(isClone?: boolean): JSX.Element[];
renderControls(): JSX.Element | null;
renderStatus(): JSX.Element | null;
renderThumbs(): JSX.Element | null;
render(): JSX.Element | null;
}
//# sourceMappingURL=index.d.ts.map |
<reponame>jdcloud-apigateway/jdcloud-sdk-java
/*
* Copyright 2018 JDCLOUD.COM
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*
*
*
* Contact:
*
* NOTE: This class is auto generated by the jdcloud code generator program.
*/
package com.jdcloud.sdk.service.censor.model;
import java.util.List;
import java.util.ArrayList;
/**
* qualityResultDetail
*/
public class QualityResultDetail implements java.io.Serializable {
private static final long serialVersionUID = 1L;
/**
* 图片基本信息
*/
private List<MetaInfoItem> metaInfo;
/**
* 美观度分数,0-1,分数越高美观度越高,建议0.5分以上可认为美观度较好,0.3分以下可认为美观度较差,具体视业务场景而定
*/
private Float aestheticsRate;
/**
* 清晰度分数,0-1,分数越高清晰度越高,建议0.5分以上可认为清晰度较好,0.3分以下可认为清晰度较差,具体视业务场景而定
*/
private Float sharpnessRate;
/**
* 图片边框信息
*/
private List<BoarderInfoItem> boarderInfo;
/**
* 图片边框信息
*/
private List<BackgroundInfoItem> backgroundInfo;
/**
* get 图片基本信息
*
* @return
*/
public List<MetaInfoItem> getMetaInfo() {
return metaInfo;
}
/**
* set 图片基本信息
*
* @param metaInfo
*/
public void setMetaInfo(List<MetaInfoItem> metaInfo) {
this.metaInfo = metaInfo;
}
/**
* get 美观度分数,0-1,分数越高美观度越高,建议0.5分以上可认为美观度较好,0.3分以下可认为美观度较差,具体视业务场景而定
*
* @return
*/
public Float getAestheticsRate() {
return aestheticsRate;
}
/**
* set 美观度分数,0-1,分数越高美观度越高,建议0.5分以上可认为美观度较好,0.3分以下可认为美观度较差,具体视业务场景而定
*
* @param aestheticsRate
*/
public void setAestheticsRate(Float aestheticsRate) {
this.aestheticsRate = aestheticsRate;
}
/**
* get 清晰度分数,0-1,分数越高清晰度越高,建议0.5分以上可认为清晰度较好,0.3分以下可认为清晰度较差,具体视业务场景而定
*
* @return
*/
public Float getSharpnessRate() {
return sharpnessRate;
}
/**
* set 清晰度分数,0-1,分数越高清晰度越高,建议0.5分以上可认为清晰度较好,0.3分以下可认为清晰度较差,具体视业务场景而定
*
* @param sharpnessRate
*/
public void setSharpnessRate(Float sharpnessRate) {
this.sharpnessRate = sharpnessRate;
}
/**
* get 图片边框信息
*
* @return
*/
public List<BoarderInfoItem> getBoarderInfo() {
return boarderInfo;
}
/**
* set 图片边框信息
*
* @param boarderInfo
*/
public void setBoarderInfo(List<BoarderInfoItem> boarderInfo) {
this.boarderInfo = boarderInfo;
}
/**
* get 图片边框信息
*
* @return
*/
public List<BackgroundInfoItem> getBackgroundInfo() {
return backgroundInfo;
}
/**
* set 图片边框信息
*
* @param backgroundInfo
*/
public void setBackgroundInfo(List<BackgroundInfoItem> backgroundInfo) {
this.backgroundInfo = backgroundInfo;
}
/**
* set 图片基本信息
*
* @param metaInfo
*/
public QualityResultDetail metaInfo(List<MetaInfoItem> metaInfo) {
this.metaInfo = metaInfo;
return this;
}
/**
* set 美观度分数,0-1,分数越高美观度越高,建议0.5分以上可认为美观度较好,0.3分以下可认为美观度较差,具体视业务场景而定
*
* @param aestheticsRate
*/
public QualityResultDetail aestheticsRate(Float aestheticsRate) {
this.aestheticsRate = aestheticsRate;
return this;
}
/**
* set 清晰度分数,0-1,分数越高清晰度越高,建议0.5分以上可认为清晰度较好,0.3分以下可认为清晰度较差,具体视业务场景而定
*
* @param sharpnessRate
*/
public QualityResultDetail sharpnessRate(Float sharpnessRate) {
this.sharpnessRate = sharpnessRate;
return this;
}
/**
* set 图片边框信息
*
* @param boarderInfo
*/
public QualityResultDetail boarderInfo(List<BoarderInfoItem> boarderInfo) {
this.boarderInfo = boarderInfo;
return this;
}
/**
* set 图片边框信息
*
* @param backgroundInfo
*/
public QualityResultDetail backgroundInfo(List<BackgroundInfoItem> backgroundInfo) {
this.backgroundInfo = backgroundInfo;
return this;
}
/**
* add item to 图片基本信息
*
* @param metaInfo
*/
public void addMetaInfo(MetaInfoItem metaInfo) {
if (this.metaInfo == null) {
this.metaInfo = new ArrayList<>();
}
this.metaInfo.add(metaInfo);
}
/**
* add item to 图片边框信息
*
* @param boarderInfo
*/
public void addBoarderInfo(BoarderInfoItem boarderInfo) {
if (this.boarderInfo == null) {
this.boarderInfo = new ArrayList<>();
}
this.boarderInfo.add(boarderInfo);
}
/**
* add item to 图片边框信息
*
* @param backgroundInfo
*/
public void addBackgroundInfo(BackgroundInfoItem backgroundInfo) {
if (this.backgroundInfo == null) {
this.backgroundInfo = new ArrayList<>();
}
this.backgroundInfo.add(backgroundInfo);
}
} |
import { of } from "rxjs";
// tslint:disable-next-line:no-var-requires
const stripJsonComments = require("strip-json-comments");
import { SettingsService } from "app/services";
// tslint:disable-next-line:no-var-requires
const defaultSettings = JSON.parse(stripJsonComments(require("app/components/settings/default-settings.json")));
export class MockSettingsService {
public static asProvider() {
return { provide: SettingsService, useValue: new MockSettingsService() };
}
public settings = defaultSettings;
public settingsObs = of(defaultSettings);
}
|
def range(self):
return Stats.range(self.sample) |
Vitamin D hydroxylases CYP2R1, CYP27B1 and CYP24A1 in renal cell carcinoma
There is increasing evidence that vitamin D metabolites influence carcinogenesis. Besides its role in mineral homoeostasis, calcitriol, the active metabolite of vitamin D (1,25(OH)2D3), is known to possess antiproliferative, proapoptotic and immunomodulatory effects in cancer. Concerning the synthesis of vitamin D, the hydroxylases CYP2R1, CYP27B1 and CYP24A1 play a critical role, and the latter molecule determines the biological half‐life of 1,25(OH)2D3, which is synthesized in the proximal renal tubules. |
<filename>Ikala-Graphics/src/com/ikalagaming/graphics/graph/Transformation.java
package com.ikalagaming.graphics.graph;
import com.ikalagaming.graphics.SceneItem;
import org.joml.Matrix4f;
import org.joml.Vector3f;
/**
* Bundles together the projection and world matrix.
*
* @author <NAME>
*
*/
public class Transformation {
private final Matrix4f modelViewMatrix;
private final Matrix4f projectionMatrix;
private final Matrix4f viewMatrix;
/**
* Create a new transformation.
*/
public Transformation() {
this.modelViewMatrix = new Matrix4f();
this.projectionMatrix = new Matrix4f();
this.viewMatrix = new Matrix4f();
}
/**
* Calculate and return the view matrix for a model based on a cameras view
* matrix.
*
* @param sceneItem The item we are calculating the view matrix for.
* @param cameraViewMatrix The cameras view matrix.
* @return The view matrix for a model.
*/
public Matrix4f getModelViewMatrix(SceneItem sceneItem,
Matrix4f cameraViewMatrix) {
Vector3f rotation = sceneItem.getRotation();
this.modelViewMatrix.identity().translate(sceneItem.getPosition())
.rotateX((float) Math.toRadians(-rotation.x))
.rotateY((float) Math.toRadians(-rotation.y))
.rotateZ((float) Math.toRadians(-rotation.z))
.scale(sceneItem.getScale());
Matrix4f currentView = new Matrix4f(cameraViewMatrix);
return currentView.mul(this.modelViewMatrix);
}
/**
* Calculate and return the projection matrix.
*
* @param fov Field of view, in radians.
* @param width The width of the screen.
* @param height The height of the screen.
* @param zNear The distance from the camera to the near plane.
* @param zFar The distance from the camera to the far plane.
* @return The appropriate projection matrix.
*/
public final Matrix4f getProjectionMatrix(float fov, float width,
float height, float zNear, float zFar) {
float aspectRatio = width / height;
this.projectionMatrix.identity();
this.projectionMatrix.perspective(fov, aspectRatio, zNear, zFar);
return this.projectionMatrix;
}
/**
* Calculate the view matrix for a camera.
*
* @param camera The camera to calculate the view matrix for.
* @return The appropriate view matrix.
*/
public Matrix4f getViewMatrix(Camera camera) {
Vector3f cameraPos = camera.getPosition();
Vector3f rotation = camera.getRotation();
this.viewMatrix.identity();
/**
* Rotate first, then translation, so that the we rotate the camera
* around itself and not the origin.
*/
this.viewMatrix
.rotate((float) Math.toRadians(rotation.x), new Vector3f(1, 0, 0))
.rotate((float) Math.toRadians(rotation.y), new Vector3f(0, 1, 0));
this.viewMatrix.translate(-cameraPos.x, -cameraPos.y, -cameraPos.z);
return this.viewMatrix;
}
} |
// SetCurrentLimit specifies the output current limit in Amperes.
//
// SetCurrentLimit implements the setter for the read-write IviDCPwrBase
// Attribute Current Limit described in Section 4.2.1 of IVI-4.4: IviDCPwr
// Class Specification.
func (ch *Channel) SetCurrentLimit(limit float64) error {
if ch.currentLimitBehavior == dcpwr.CurrentRegulate {
return ch.Set("CURR %f;:CURR:PROT MAX\n", limit)
} else if ch.currentLimitBehavior == dcpwr.CurrentTrip {
return ch.Set("CURR %f;:CURR:PROT %f\n", limit, limit)
}
return errors.New("current limit behavior not set")
} |
#include<cstdio>
using namespace std;
int main(){
int n,ans=0;
scanf("%d",&n);
ans+=n/100;
n-=ans*100;
if(n==0){
printf("%d",ans);
return 0;
}
ans+=n/20;
n-=n/20*20;
if(n==0){
printf("%d",ans);
return 0;
}
ans+=n/10;
n-=n/10*10;
if(n==0){
printf("%d",ans);
return 0;
}
ans+=n/5;
n-=n/5*5;
if(n==0){
printf("%d",ans);
return 0;
}
ans+=n;
printf("%d",ans);
return 0;
}
|
<reponame>kprotty/yaar<filename>yaar/src/runtime/platform/mod.rs
use super::scheduler::Worker;
use yaar_lock::ThreadEvent;
#[cfg(feature = "time")]
use super::time::Clock;
#[cfg(feature = "io")]
use yaar_reactor::Reactor;
pub trait Platform: Sync + Sized + 'static {
type CpuAffinity;
type ThreadEvent: ThreadEvent;
#[cfg(feature = "time")]
type Clock: Clock;
#[cfg(feature = "io")]
type Reactor: Reactor;
type NodeLocalData;
type WorkerLocalData;
type ThreadLocalData: Default;
fn tls_slot(new_value: Option<usize>) ->
fn spawn_thread(
&self,
worker: &Worker<Self>,
affinity: &Option<Self::CpuAffinity>,
parameter: usize,
f: extern "C" fn(param: usize),
) -> bool;
fn on_system_event(&self, _event: SystemEvent<Self>) {}
}
pub enum SystemEvent<P: Platform> {
}
|
guti01 A B
guti01 C D
|
/** Run selected function as many times as needed
* with the array sizes chosen by the user
* @param choice the function to be run
* @return 2-D table with array size and time of each run
*/
private static long[][] processMenuChoice(int choice) throws InterruptedException
{
StopWatch timer = new StopWatch();
System.out.print("Number of runs? ");
int rounds = scan.nextInt();
long[][] table = new long[rounds][2];
for (int k = 0; k < rounds; k++)
{
System.out.print("Size of array? ");
int size = scan.nextInt();
int[] arr = random1DArray(size);
timer.reset();
timer.start();
if (choice == 1)
traverseArray(arr);
else if (choice == 2)
selectionSortArray(arr);
else if (choice == 3)
mystery1(arr);
else
mystery2(arr);
timer.stop();
long time = timer.getElapsedTime();
table[k][0] = size;
table[k][1] = time;
}
return table;
} |
/**
* Eviction period.
*
* @param evictionPeriod the eviction period
* @return the off heap cache builder
*/
public OffHeapCacheBuilder evictionPeriod(final long evictionPeriod) {
checkPositive(evictionPeriod, "eviction period must be positive");
this.evictionPeriod = evictionPeriod;
return this;
} |
/**
* Created by wilsonsouza on 10/6/16.
*/
public class DeviceAdapter extends BaseAdapter
{
private java.util.List<DeviceNode> m_NodeList;
//-----------------------------------------------------------------------------------------------------------------//
public DeviceAdapter()
{
m_NodeList = new ArrayList<DeviceNode>();
}
//-----------------------------------------------------------------------------------------------------------------//
@Override
public int getCount()
{
return m_NodeList.size();
}
//-----------------------------------------------------------------------------------------------------------------//
@Override
public Object getItem(int position)
{
return m_NodeList.get(position);
}
//-----------------------------------------------------------------------------------------------------------------//
@Override
public long getItemId(int position)
{
return position;
}
//-----------------------------------------------------------------------------------------------------------------//
@Override
public View getView(int position, View convertView, ViewGroup parent)
{
return convertView;
}
//-----------------------------------------------------------------------------------------------------------------//
public void add(String szName, String szAddress, boolean bPaired)
{
m_NodeList.add( new DeviceNode(szName, szAddress, bPaired));
}
//-----------------------------------------------------------------------------------------------------------------//
public void clear()
{
m_NodeList.clear();
}
//-----------------------------------------------------------------------------------------------------------------//
public DeviceNode find(String szAddress)
{
for(DeviceNode i: m_NodeList)
{
if(szAddress.equals(i.GetAddress()))
{
return i;
}
}
return null;
}
//-----------------------------------------------------------------------------------------------------------------//
public DeviceNode search(String szName)
{
for(DeviceNode i: m_NodeList)
{
if(szName.equals(i.GetName()))
{
return i;
}
}
return null;
}
//-----------------------------------------------------------------------------------------------------------------//
public List<DeviceNode> getItems()
{
return m_NodeList;
}
} |
# -*- coding: utf-8 -*-
"""
Pygments string assert utility tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from string_asserts import StringTests
class TestStringTests(StringTests, unittest.TestCase):
def test_startswith_correct(self):
self.assertStartsWith("AAA", "A")
# @unittest.expectedFailure not supported by nose
def test_startswith_incorrect(self):
self.assertRaises(AssertionError, self.assertStartsWith, "AAA", "B")
# @unittest.expectedFailure not supported by nose
def test_startswith_short(self):
self.assertRaises(AssertionError, self.assertStartsWith, "A", "AA")
def test_endswith_correct(self):
self.assertEndsWith("AAA", "A")
# @unittest.expectedFailure not supported by nose
def test_endswith_incorrect(self):
self.assertRaises(AssertionError, self.assertEndsWith, "AAA", "B")
# @unittest.expectedFailure not supported by nose
def test_endswith_short(self):
self.assertRaises(AssertionError, self.assertEndsWith, "A", "AA")
|
/* Answer non-zero on success
*/
uintptr_t
try_scan(char **scan_start, const char *search_string)
{
char *scan_string = *scan_start;
size_t search_length = strlen(search_string);
if (strlen(scan_string) < search_length) {
return 0;
}
if (0 == j9_cmdla_strnicmp(scan_string, search_string, search_length)) {
*scan_start = &scan_string[search_length];
return 1;
}
return 0;
} |
import math
a = input()
b = a
d = "".join(list(reversed(b)))
ans = 0
c = math.floor(len(a)/2)
if len(a) % 2 == 0:
for i in range(c):
if not b[i] == d[i]:
ans += 1
else:
for i in range(c):
if not b[i] == d[i]:
ans += 1
print(ans) |
def from_esc50(cls, spec, data_path):
assert isinstance(spec, audio_spec.YAMNetSpec)
return NotImplemented |
<reponame>romnnn/vagrantutil
package vagrantutil
import "testing"
const version174 = `
1459267732,,version-installed,1.7.4
1459267732,,version-latest,1.8.1
`
const version181 = `
1459268273,,ui,output,Installed Version: 1.8.1
1459268273,,version-installed,1.8.1
1459268273,,ui,output,Latest Version: 1.8.1
1459268273,,version-latest,1.8.1
1459268273,,ui,success, \nYou're running an up-to-date version of Vagrant!
`
func TestParseRecordsAndData(t *testing.T) {
cases := []struct {
raw string
ver string
}{
{version174, "1.7.4"}, // i=0
{version181, "1.8.1"}, // i=1
}
for i, cas := range cases {
rec, err := parseRecords(cas.raw)
if err != nil {
t.Errorf("%d: parseRecords()=%s", i, err)
continue
}
ver, err := parseData(rec, "version-installed")
if err != nil {
t.Errorf("%d: parseData()=%d", i, err)
continue
}
if ver != cas.ver {
t.Errorf("%d: got %q, want %q", i, ver, cas.ver)
}
}
}
|
Applying Innovative Models for Forecasting Small-Area Peak Electrical Loads
The number of Distributed generators is currently increasing, and the electrical industry is trending toward regional supply-and-demand and resource integration. Thus, a model that can forecast small-area peak electrical loads is an indispensable part of power infrastructures. This study constructs innovative model for forecasting small-area peak electrical loads. The main aspects considered were the accuracy of the forecasting model and the convenience of follow-up maintenance and management of the model and data. This study used yearly peak load value and total power data from substations to construct regression tree models. These acted as models for the small-region peak electrical load of substation districts in the Taipower distribution systems. The errors of these forecasting models were substantially smaller than those of the least squares model originally used by Taipower to forecast peak load. The addition of exogenous factors was unnecessary. Additionally, our results were superior regardless of whether once or incremental models were adopted for the data. This confirms the usability of our models. |
package profile
import (
"github.com/labstack/echo"
"net/http"
)
func Assemble(e *echo.Echo, man Manager) {
h := &handler{
man: man,
}
g := e.Group("/profile")
g.GET("/reviews", h.reviews)
}
type handler struct {
man Manager
}
func (h *handler) reviews(c echo.Context) error {
ctx := c.Request().Context()
userID := "from ctx"
reviews, err := h.man.GetReviews(ctx, userID)
if err != nil {
return err
}
return c.JSON(http.StatusOK, reviews)
} |
/**
* Checks if <code>this</code> property can be developed.
* @return <code>true</code> if property can be developed.
*/
public boolean canDevelop() {
/*
Get each color the player owns and check its hotel count
*/
boolean canDevelop = true;
if(this.getHouseCount() == FULLY_DEVELOPED)
{
ArrayList <Property> asTemp = new ArrayList <>();
getOwner().getAssets().forEach(asset -> {
if (asset instanceof Property) {
asTemp.add((Property) asset);
}
});
asTemp.removeIf(prop -> prop.getColor() != this.getColor());
for (Property property : asTemp) {
if (property.getHouseCount() != FULLY_DEVELOPED) {
canDevelop = false;
break;
}
}
}
if (this.getHouseCount() >= HOTEL ||
this.getFootCount() < getFootMax())
{
canDevelop = false;
}
return canDevelop;
} |
/*input
5 3
1
2 motarack
2 mike
1
2 light
*/
#include <bits/stdc++.h>
#define pii pair<int, int>
#define F first
#define S second
#define int long long
using namespace std;
const int MX = 5e5 + 100, oo = 1ll << 60;
pair<int, string> que[MX];
int GG[100][100];
main () {
ios_base::sync_with_stdio(false);
cin.tie(0);
cout.tie(0);
int ptr = 0;
int n, m; cin >> n >> m;
unordered_map<string, int> mp;
for(int i = 1; i <= n; i ++) {
cin >> que[i].F;
if(que[i].F == 2) {
cin >> que[i].S;
if(mp.find(que[i].S) == mp.end()) {
mp[que[i].S] = ++ ptr;
}
}
}
for(int i = 1; i <= n; i ++) {
// cout << i << endl;
if(que[i].F == 2) {
// cout << i << endl;
set<int> s;
for(int j = i; j <= n; j ++) {
if(que[j].F == 1) {
i = j;
break;
}
s.insert(mp[que[j].S]);
}
vector<int> Yeee;
for(auto it: s) Yeee.push_back(it);
for(int i = 0; i < Yeee.size(); i ++) {
for(int j = i + 1; j < Yeee.size(); j ++) {
int a = Yeee[i];
int b = Yeee[j];
GG[a][b] = GG[b][a] = true;
// cout << a << ' ' << b << endl;
}
}
}
}
srand(time(0));
int ans = 0;
vector<int> Yeee;
for(int i = 1; i <= m; i ++) Yeee.push_back(i);
while(clock() / (double) CLOCKS_PER_SEC < 1.9) {
vector<int> OK;
bitset<50> bs;
for(auto i: Yeee) {
bool NO = false;
for(auto j: OK) {
if(GG[i][j]) NO = true;
}
if(!NO) OK.push_back(i);
}
ans = max(ans, (int) OK.size());
random_shuffle(Yeee.begin(), Yeee.end());
}
cout << ans;
} |
/*
* Open a subcommand with suitable error messaging
*/
static FILE *
popen_check(const char *command, const char *mode)
{
FILE *cmdfd;
fflush(stdout);
fflush(stderr);
errno = 0;
cmdfd = popen(command, mode);
if (cmdfd == NULL)
fprintf(stderr, _("%s: could not execute command \"%s\": %s\n"),
progname, command, strerror(errno));
return cmdfd;
} |
/* ----------------------------------------------------------------------------
* Copyright (C) 2021 European Space Agency
* European Space Operations Centre
* Darmstadt
* Germany
* ----------------------------------------------------------------------------
* System : ESA NanoSat MO Framework
* ----------------------------------------------------------------------------
* Licensed under the European Space Agency Public License, Version 2.0
* You may not use this file except in compliance with the License.
*
* Except as expressly set forth in this License, the Software is provided to
* You on an "as is" basis and without warranties of any kind, including without
* limitation merchantability, fitness for a particular purpose, absence of
* defects or errors, accuracy or non-infringement of intellectual property rights.
*
* See the License for the specific language governing permissions and
* limitations under the License.
* ----------------------------------------------------------------------------
*/
package esa.mo.com.impl.provider;
import esa.mo.com.impl.util.HelperCOM;
import java.io.Serializable;
import org.ccsds.moims.mo.com.archive.structures.ArchiveDetails;
import org.ccsds.moims.mo.com.structures.ObjectDetails;
import org.ccsds.moims.mo.com.structures.ObjectId;
import org.ccsds.moims.mo.com.structures.ObjectType;
import org.ccsds.moims.mo.mal.structures.FineTime;
import org.ccsds.moims.mo.mal.structures.Identifier;
import org.ccsds.moims.mo.mal.structures.IdentifierList;
import org.ccsds.moims.mo.mal.structures.URI;
/**
* This class used to be a direct Entity for the db, now it is just a COM Object carrier.
*
* @author <NAME>
*/
public class ArchivePersistenceObject implements Serializable
{
private final ObjectType objectType;
private final IdentifierList domainId;
private final Long objId;
private final ObjectId sourceLink;
private final Long relatedLink;
private final String network;
private final Long timestampArchiveDetails;
private final String providerURI;
// private final Element obj;
// The Element wrapping was removed!
private final Object object;
public ArchivePersistenceObject(final ObjectType objectType, final IdentifierList domain,
final Long objId, final ArchiveDetails archiveDetails, final Object object)
{
this.objectType = objectType;
this.domainId = domain;
this.objId = objId;
this.providerURI = (archiveDetails.getProvider() != null)
? archiveDetails.getProvider().getValue() : null;
this.network = (archiveDetails.getNetwork() != null)
? archiveDetails.getNetwork().getValue() : null;
this.timestampArchiveDetails = (archiveDetails.getTimestamp() != null)
? archiveDetails.getTimestamp().getValue() : 0;
this.sourceLink = archiveDetails.getDetails().getSource();
this.relatedLink = archiveDetails.getDetails().getRelated();
// this.obj = (Element) HelperAttributes.javaType2Attribute(object);
this.object = object;
}
public ObjectType getObjectType()
{
return this.objectType;
}
public Long getObjectTypeId()
{
return HelperCOM.generateSubKey(this.objectType);
}
public IdentifierList getDomain()
{
return this.domainId;
}
public Long getObjectId()
{
return this.objId;
}
public ArchiveDetails getArchiveDetails()
{
final Identifier net = (this.network == null) ? null : new Identifier(network);
final URI uri = (this.providerURI == null) ? null : new URI(providerURI);
return new ArchiveDetails(objId, new ObjectDetails(relatedLink, sourceLink),
net, new FineTime(timestampArchiveDetails), uri);
}
public Object getObject()
{
// return HelperAttributes.attribute2JavaType(obj);
return this.object;
}
}
|
Parameter Correlation of Proximate Analysis and Ultimate Analysis of the Calorific Value of Coal
Jambi Province is an area that has abundant coal potential, this is due to its location in the Jambi Sub-basin, part of the South Sumatra Basin. As one of the coal mining companies in Jambi Province, PT Kamalindo Sompurna conducts mining with the aim of being marketed and directly sold to predetermined consumers. If coal is tested for chemical properties in the form of proximate and ultimate analysis in a sustainable manner and correlated with its calorific value, the company can market or utilize coal directly or indirectly. Coal samples were taken by channel sampling method in the same seam, the number of samples obtained was 48 samples per 5 cm interval. The number of samples is then reduced to 5 samples with the composite method based on certain intervals. The caloric value of coal in the research area ranges from 4300 - 5300 cal / gr, the caloric value is correlated with the results of proximate and ultimate analysis using simple linear regression statistics. The results obtained between the 9 analysis parameters based on this correlation indicate that the levels of flying matter, ash, carbon content and oxygen levels have a strong correlation as indicated by the correlation value in sequence is 0.98; -0.93; 0.88; -0.89. After conducting a study based on these parameters and correlation values, researchers can provide references related to the use of coal in processing and its use as fuel, both in the power generation industry and processing of cement raw materials and so on. |
<gh_stars>0
#pragma once
namespace parsium {
struct {} constexpr recursive;
}
|
<reponame>eblanton/hasensor<filename>hasensor/sensors/system.py
import json
from typing import Dict, List
import psutil # type: ignore
from ..sensor import ArgDict, Sensor
class SystemSensor(Sensor):
_argtypes: ArgDict = {
"partitions": str,
"diskthresh": float
}
def __init__(self, partitions: str = "", diskthresh: float = 0.0,
**kwargs):
super().__init__(**kwargs)
if partitions:
self._partitions = partitions.split(',')
else:
self._partitions = []
self._diskthresh = diskthresh
def fire(self):
stats: Dict[str, str] = {}
with open("/sys/class/thermal/thermal_zone0/temp", "r") as f:
temp = int(f.read())
f_temp = float(temp) / 1000.0
stats["cpu_temp"] = round(f_temp, 1)
stats["cpu_pct"] = round(psutil.cpu_percent(), 1)
stats["mem_used_pct"] = round(psutil.virtual_memory().percent, 1)
if self._diskthresh != 0.0:
warnings: List[str] = []
if self._partitions:
for part in self._partitions:
if psutil.disk_usage(part).percent > self._diskthresh:
warnings.append(part)
else:
for part in psutil.disk_partitions():
usage = psutil.disk_usage(part.mountpoint)
if usage.percent > self._diskthresh:
warnings.append(part.mountpoint)
if warnings:
stats["disk_full"] = warnings
self._loop.publish(self.name, json.dumps(stats))
|
Side Splitting Failure of RC Beams and Columns under Seismic Action As a Form of Shear Failure
Recently bond splitting failure prior to the yielding of stirrups has attracted more attention for reinforced concrete (RC) structures located in seismic areas due to the increased popularity of high strength steel. However, bond splitting failure is complicated, particularly in RC beams with multiple layers of reinforcement with different cutoffs. Pullout tests indicate the bond strengths of bars in the second (inner) layer of RC beams are weaker than those in the first (outer) layer. In contrast, test results of RC beams indicate that the bond strengths of cutoff bars in the second layer are larger than those in the first layer. To examine this contradiction, previous studies of pullout test in which deformed bars were embedded in concrete were reviewed. In contrast to the conventional method of evaluating the surface bond strength of each bar, a new method for evaluating bond resistance is developed, in which a discussion point is focused on the shear strength at a potential failure plane below the reinforcement layer. The proposed method shows good results when compared with test results of RC beams and columns that failed in bond splitting prior to yielding of longitudinal reinforcement, with an average ratio of measured-to-predicted failure stress of 1.23 and a coefficient of variation of 14%. In contrast, ACI 318-19 shear equation slightly underestimated some of the test results of single-layered beams, with an average ratio of measured-to-predicted failure stress of 0.98 and a coefficient of variation of 15%. These findings suggest that side splitting failure of RC beams and columns under seismic action can be treated as shear failure.
Introduction
shows a beam in a nine-story building after the Great East Japan Earthquake, 2011. Horizontal crack is prominent along the bottom reinforcement. Similar cracks were observed in numerous beams of this building (Nagaya et al. 2013). This type of failure, which differs from a traditional flexural or shear failure, is also observed in laboratory and called "splitting bond failure (Fujii and Morita 1982)." This paper deals with such a failure.
Bond refers to the interaction between concrete and reinforcement. Studies of bond date back more than a century. In 1913, Abrams (1913) discussed many factors that affect bond strength (such as cover thickness) using 1511 pullout specimens, most of which were cylindrical with a bar at the center. Goto (1971) tested tensile specimens, each a single bar embedded in a long concrete prism and pulled at both ends. He reported fine cracks in front of each rib deformation of a bar as a result of bond stress. Fujii and Morita (1982), and Otani and Maeda (1994) tested pullout specimens as shown in Fig. 1a, each with a single layer of reinforcement comprising several bars. In this figure, the bars were bonded to the concrete between A and B, and small inclined cracks were observed in this region.
The tests of Fujii and Morita (1982), and Otani and Maeda (1994) provide a useful basis for introducing nomenclature and behavior. In these tests, the tensile stress of each bar was inferred using strain gages (σ 1 in Fig. 2a). Bond strength was defined as the average shear strength at the interface of the concrete and reinforcement (τ bu in Fig. 2a), computed from the bar stress σ 1 and the embedment length l d . The bond stress causes radial expansion shown in Fig. 2c, which causes a splitting bond failure as shown in Fig. 2d. Based on such observations, AIJ Codes (AIJ 1999(AIJ , 2018 provide bond-splitting strength formulas, considering the effects of concrete strength, cover, spacing, and transverse reinforcement. ACI 318 (ACI 2019) similarly provides minimum requirement of development length, which is mostly based on studies from the 1970s (Jirsa et al. 1979). More recently, bond strength in lap splices has been the subject of much more attention (Canbay and Frosch 2005;Hardisty et al. 2015).
One difference between AIJ and ACI is the treatment of multiple layers of bars. When a large amount of longitudinal reinforcement is needed in a beam or column, reinforcing bars are arranged in two or more layers as shown in Figs. 2b and 3a. In pullout tests of deformed bars, bars arranged in multiple layers have been observed to have weaker bond strengths (τ bu1 and τ bu2 in Fig. 1b) than bars in a single layer (τ bu in Fig. 1a) (Masuda et al. 1994;Ohyado et al. 1991;Nishimura and Onishi 2018). In the AIJ RC Standard (AIJ 2018) and the Inelastic Concept Guidelines (AIJ 1999), the bond strength in the second (inner) layer is reduced by 40% to account for this phenomenon. ACI 318 does not include such a factor, and multiple layers of reinforcing steel are treated the same.
The other difference between AIJ and ACI is the scope of formulas for bond strength. In AIJ, the formu-las have been applied to both columns and beams since 1990 (AIJ 2018), because the importance of bond splitting failure (Fig. 1) of columns and beams has been recognized in Japan since the 1980s (Muguruma and Watanabe 1984). In ACI 318, bond strength formulas have long been used to determine the minimum requirement of development and splice lengths of reinforcing bars terminated in beams. But for the first time in 2019, ACI 318 Section 18.7.4.3 for Earthquake-Resistant Structures was revised so that the formulas are also applied to bars of columns to avoid bond splitting failure during earthquakes (ACI 2019;Ichinose 1995;Sokoli and Ghannoum 2016). Figure 3a conceptually shows a beam with two layers of reinforcement subjected to anti-symmetric bending and shear which failed in bond before flexural yielding. In Fig. 3a, T σ 1 and C σ 1 are the tensile and compressive stresses in the outer (first) layer at both beam ends, and T σ 2 and C σ 2 are those in the inner (second) layer, respectively. Figure 3b shows stress diagram for the bar in the top two layers, where the stress in the second layer (light gray) is stacked on the stress in the first layer (dark gray). Note that the compressive stresses in the bars are smaller than the tensile stresses because of the contribution of the concrete in compression. Figure 3c shows the distribution of bond stress. The observed bond stresses of inner bars (Ito et al. 2013;Shinohara and Murakami 2015) at failure (τ b2 in Fig. 3c) are like those observed in pullout tests of specimens with two layers (τ bu2 in Fig. 1b). Figure 3 shows an idealized case in which the longitudinal reinforcement is continuous, but in actuality bars are terminated at different points along a span depending on demands and available lengths. Bond of multilayered beams with such bar terminations is more complex than this ideal. Figure 4a illustrates the case shown in Fig. 3 but with bars terminated near the center of the span. The bending moment is assumed to be the same as that in Fig. 3a. An idealized stress distribution of the bars is shown in Fig. 4b, where the sum of the stresses of the outer and inner bars is the same as those in Fig. 3b because the bending moment is equal. One different feature in Fig. 4b compared with Fig. 3b is that the tensile stress of the inner bars is zero at section B, where the bars are terminated. This stress distribution results in larger bond stresses in the inner bars between B and A as shown in Fig. 4c, where the bond stress of the outer bars is zero. Despite these larger bond stresses, bond failures have seldom been observed in the region be-tween B and A (Ito et al. 2013;Shinohara and Murakami 2015). Splitting bond failure usually occurs between B' and B, as shown in Fig. 4a. The observed bond stresses of such bars between B' and B are similar to those observed in the pullout test with a single layer (τ bu in Fig. 2a) (Ito et al. 2013;Shinohara and Murakami 2015).
In this paper, the authors examine bond capacity of bars arranged in two or more layers from a different perspective. The conventional definition (defining bond stress as the shear stress on the surface of a bar) is effective for pullout of a bar embedded in solid concrete. For bond failures like those shown in Figs. 1 through 4, however, evaluating shear stress on the potential splitting plane of multi-layered bars may be more convenient. In this paper, previous pullout test results and bending-shear test results are reviewed in order to evaluate bond performance of RC beams with multiple layers of longitudinal bars including cut-off bars. Application to RC columns is also discussed.
Beside the side-splitting bond failure shown in Fig. 2d, corner splitting failure may occur if one-half of bar spacing is larger than clear cover (Jirsa et al. 1979). However, this paper deals with side splitting only and not corner splitting because of the following two rea-A B Boundary with column Boundary with column Beam section (a) R/C beam subjected to bending and shear Bar in 1st layer Boundary with column Boundary with column (a) R/C beam subjected to bending and shear Bar in 1st layer
A' B' A B
T σ 1 for continuous bars in Fig. 3b sons: (1) corner splitting is rarely reported in tests of beams and columns with continuous bars or reconnaissance reports of seismic damages probably because corner splitting strength is large enough unless the bar diameter is too large; (2) corner splitting can be easily avoided by the conventional method.
Review of pull out test results
In this paper, the observed bond splitting failures shown in Figs. 2a and 2b are simplified as shown in Figs. 5a and 5b. When there are fewer bars in the second layer compared with the first layer, the failure may occur in the first layer as shown in Fig. 5c. For simplicity, the failures shown in Figs. 5a and 5b will be discussed first; the case of Fig. 5c will be considered later. Fig. 6a shows the proposed concept of the failure. Slip of the bars after splitting is assumed to cause failure at the dark region below the splitting plane in Fig. 6a. Shear stress on the failure plane is defined as shown in Fig. 6b, with shear strength denoted as τ su = (T s1 + T s2 )/(b×l d ).
This definition is quite different from the conventional definition shown in Figs. 2a and 2b, with bond strength denoted as τ bu .
There have been many pullout tests of deformed bars arranged in single and double layers. Table 1 summarizes data of tests conducted in Japan on specimens like those shown in Fig. 2. The specimens of Masuda et al. (1994) listed in the first row of Table 1 had a special feature: two bars in the first layer and three bars in the second layer as shown inside Fig. 7. Six specimens were tested, where the parameter was the ratio of T s1 /T s2 from 0 to 0.67 as shown in the horizontal axis of Fig. 7: e.g., in the case of the specimen with T s1 /T s2 = 0, only the bars in the second layer were pulled. No transverse reinforcement was provided. All the specimens failed in bond-splitting at the second layer. The solid squares in Fig. 7 show the bond stress in the second layer bars τ bu2 defined by Fig. 2b, which decreases as T s1 /T s2 increases. The open squares in Fig. 7 show the shear stress of the failure plane τ su defined by Fig. 6b. The τ su slightly increases as T s1 /T s2 increases for lower T s1 /T s2 than around 0.3, and becomes constant for larger T s1 /T s2 . For simplicity, we assume that the shear strength on the failure plane at the second layer is constant even if the bond stress around the first layer changes.
Next, the difference of τ su between single and double layers of reinforcement is examined. In Fig. 8, the vertical axis is τ su and the horizontal axis is transverse reinforcement ratio, ρ t , which is defined as: where a w is cross-sectional area of the transverse bars crossing horizontal section of bs, b is beam width, and s is the spacing of transverse reinforcement. "Top" and "bottom" in Fig. 8 refer to the locations of longitudinal bars with reference to the casting direction of concrete. Lightweight concrete with a specified compressive strength of f' c = 36 MPa (5200 psi) was used for the specimens so that the bond strengths would be smaller. Other properties of the specimens are summarized in the second row of Table 1 (Ohyado et al. 1991). As shown in Fig. 8, the shear strengths (τ su ) of the top bars (the triangles) were smaller than those of the bottom bars (the squares); this effect is well known and is introduced in many design codes (ACI 2019: AIJ 2018). More importantly, the shear strengths of the double-layered specimens (the solid lines) were larger than those of the Actual failure plane Potential failure plane (c) Bond failure at first layer of double layered bars single-layered specimens (the broken lines). In the authors' understanding, this effect is attributable to the difference of failure pattern: vertical cracks (D and E in Figs. 2d or 5a) can form in the cover concrete of singlelayered specimens resulting in the convex upwards deformation of the cover concrete, whereas these cracks rarely occur in the case of double-layered specimens as shown in Fig. 6a.
Regarding the effect of transverse reinforcement in Fig. 8, the slope of τ su with respect to ρ t is almost the same even if the number of layers and the location of the bars are different. Similar results can be seen in Fig. 9, where the axes are the same as Fig. 8, but the test data come from different specimens (Nishimura and Onishi 2018 in Table 1) made of normal-weight concrete with a range of compressive strengths. Notes: b is width of specimen; ρ t is transverse reinforcement ratio (Eq. 1); l d is embedment length of the bars; T s1 and T s2 are maximums of total tension load on bars in 1 st and 2 nd layer, respectively (See Fig. 3).
shows that the slopes of τ su with respect to ρ t are similar even if the concrete strengths are different. Figure 10 shows how concrete weight affects the slope of τ su with respect to ρ t , where the axes and the markers of the specimens are same as those in Figs. 8 and 9. At a reinforcement ratio of 0.2%, the curves for lightweight concrete and normal-weight concrete intersect, but the strength of the lightweight concrete was 36 MPa compared with 21-24 MPa for the normal-weight concrete. Not surprisingly, this suggests that the shear strength of lightweight concrete is smaller than that of normal-weight concrete of the same compressive strength. In addition, as shown in Fig. 10, the slopes of shear strength with respect to transverse reinforcement ratio for lightweight concretes are shallower than those for normal-weight concrete: that is, transverse reinforcement may be less effective at increasing shear strength in lightweight concrete.
The above observations can be summarized as follows.
(1) Bond strength (τ bu ) in the second layer is sensitive to bond stress in the first layer, but shear strength (τ su ) is insensitive to changes in bond stress in the first layer (Fig. 7).
(2) While τ su of the double-layered specimens are larger than those of the single-layered specimens, the slopes of τ su with respect to ρ t are almost the same even if the number of layers, casting location of the bars, and concrete strength are different (Figs. 8 and 9).
(3) The τ su of lightweight concrete is lower than that of normal weight-concrete of the same concrete strength. In addition, the slope of τ su with respect to ρ t of lightweight concrete is shallower than that of normal-weight concrete (Fig. 10).
Bond capacity formula
Considering the observations above, the following formula is developed for the shear strength, τ su . As discussed in Appendix, this formula is based on the equation for bond strength (τ bu ) in AIJ Inelastic Concept Guidelines (AIJ 1999) which is similar to the equation given by Jirsa et al. (1979), a basis of ACI 318 (ACI 2019).
where a 1 and a 2 are the area of first-and second-layer bars, respectively, f c ': compressive strength of concrete (MPa), N t : number of longitudinal bars next to vertical legs of transverse reinforcement (Fig. 11), N: total number of longitudinal bars, and ρ t : transverse reinforcement ratio (Eq. 1). Figure 12 compares the test results of Table 1 and the calculations by Eq. 2. The vertical axis is the ratio of the test result to the calculation, and the horizontal axis is the transverse reinforcement ratio. The markers of the specimens are the same as in Figs. 7, 8, 9, and 10. The specimens denoted by white square markers in Fig. 12 had slits from B to C shown in Fig. 7 to induce failure at that location 12 , which could explain why the results were less conservative than the other specimens. Overall, the calculations show good agreement with the test results, with a mean ratio of test result to calculation of 1.02, and a coefficient of variation of 13%.
Shear resistance of beam and column corresponding to side splitting
In the preceding sections, failures of pullout specimens were discussed. In this section, side splitting failures in beams subjected to anti-symmetric bending and shear are discussed. Figure 13a shows simplified sidesplitting failure in beam with continuous two layers of :Bar that is not next to tie :Bar that is next to tie The number of all the bars is N =10. The number of white bars is N t =8. longitudinal bars, corresponding to Fig. 3a. In a beam with cutoff as shown in Fig. 4a, two types of failure as shown in Figs. 13b and 13c are theoretically possible. However, the failure in Fig. 13b is less probable than that in Fig. 13c as will be discussed later. Figure 14a shows a truss model for shear resistance of a part in beam between both end regions, which is defined by Schlaich et al. (1987), where the effect of flexural crack or dowel action can be neglected. The shear stress of the failure plane in the second layer is assumed to reach τ su as shown in Fig. 14b. The angle of the concrete strut depends on the tensile stress in the transverse reinforcement. Because of equilibrium, however, the shear stress in the cross-section of beam is equal to the shear stress on the horizontal failure plane as shown in Fig. 14c. As discussed in the previous section, the shear stress on the potential failure plane in the first (outer) layer (τ su ' in Fig. 14b) is smaller than that on the actual failure plane at second layer, which results in the stepped diagram shown in Fig. 14c. The distribution in Fig. 14c can be simplified as shown in Fig. 14d; it is assumed that τ su is distributed over the crosssectional area of bj e where b is the beam width and j e is the distance between the centroids of the top and the bottom longitudinal bars. This leads to the following shear capacity equation: As mentioned in Appendix, a beam is expected to fail in the first layer when 1 + a 2 /a 1 < 1.5: that is, when the number of bars in the second layer is less than a half of the number of bars in the first layer. However, Eq. 5 is also applicable to this case because τ su in Eq. 2 represents the shear stress at the second layer. Figure 15 shows a beam in which all the bars in the second layer are terminated in the span. In this case, shear capacities can be calculated at two sections: (1) The capacity at section X (Fig. 15, left) associated with the failure in Fig. 13c, which is given by: (2) The capacity at section Y (Fig. 15, right) associated with the failure Fig. 13b, which is given by: The shear capacity of the beam would be the smaller of these two values. However, in most cases, V suX is smaller than V suY , because α l in Eq. 4 is usually larger than the ratio of j eY /j eX . Thus, the failure in Fig. 13c is more probable than that in Fig. 13b. This tendency is reported in all the studies dealing with beams with cutoff bars in Table 2 (Ito et al. 2013;Murakami 2014, 2015;Suzuki et al. 2015;Kim et al. 2002; Miyata et al. 2018). This procedure is applicable to a beam in which some of the bars in the second layer are terminated in the span; in this case, j eX is the distance between the centroids of the top and the bottom longitudinal bars at section X. Table 2 includes such a beam. Takami and Eto (1999) conducted tests of beams with more than two layers of longitudinal bars as shown in the top of Fig. 16: the test parameter was the number of bar layers from one to five. Four deformed bars were arranged in each layer, and the beams were subjected to anti-symmetric bending. The concrete strength of specimens 1-4 was identical (68.4 MPa = 9900 psi) and that of specimen 5 was slightly higher (73.7 MPa = 10700 psi). All the specimens failed in bond splitting at the innermost layer before yielding of longitudinal bars. The light-gray bars in Fig. 16 show the test results: the strengths of the beams with two to five layers were similar each other and were larger than that with one layer. The dark-gray bars in Fig. 16 show the calculation results using Eqs. 2 and 5, where j e for the beams with multi-layers is assumed to be the distance between the centroids of bars in the extreme two layers of the top and the bottom longitudinal bars. The calculations agree reasonably with the test results.
In short, it is proposed that the shear capacity of a beam with multiple layers of bars including cut-off bars is given as follows.
(1) The shear capacity of beam, V su , is the minimum value calculated by Eq. 5 at every section in the span (Fig. 15).
(2) The shear stress on the innermost potential failure plane τ su is calculated by Eq. 2. (3) The depth j e is assumed as the distance between the centroids of bars in the most extreme two layers (the first and the second layers) of the top and the bottom longitudinal bars (Figs. 14d and 16). In order to grasp the meaning of Eqs. 2 and 5, let us consider the following case: λ = 1 (normal weight concrete), α t = 0.9 (top bar) and N t /N = 0.5 (Fig. 11). If we assume α l = 1.0 (single layer) and j e /d = 0.9, we get: If we assume α l = 1.5 (multi-layer) and j e /d = 0.8, we get: Table 2 Beam specimens failed in bond-splitting in previous study.
The purple and green broken lines in Fig. 17a show the relationship given by Eqs. 7a and 7b, respectively. They are parallel each other.
If we assume a beam with tensile reinforcement ratio = 2% and moderate shear reinforcement, the shear strength given by ACI 318-19 is: The black line in Fig. 17a shows the relationship, where the upper limit of the second term is considered. Size effect is neglected for simplicity. Note that the inclination of the black line is steeper than those of the broken lines because 220 in Eqs. 7a and 7b is smaller than f yt of grade 40 bar (f yt = 276 MPa).
The blue and red solid lines in Fig. 17b show the relationship between shear strength and reinforcement given by ACI 318-19 for normal and high strength shear reinforcement, respectively. Figure 17b implies that beams with high strength shear reinforcement and single layer tend to fail in bond splitting. Figures 17a and 17b can be interpreted as follow: (1) Concrete in a beam such as Figs. 14 or 15 is subjected to shear stress shown in Fig. 18a.
(2) If stirrups are scarce and have moderate yield strength, tensile failure shown in Fig. 18b may occur with the yielding of stirrups, which corresponds the region of small ρ t in Fig. 17.
(3) If stirrups are abundant and the strength of concrete is insufficient, diagonal compression failure (Fig. 18c) or vertical sliding shear failure (Fig. 18d) may occur, which corresponds the region of large ρ t in Fig. 17. (4) If stirrups are moderate and have high yield strength, horizontal shear failure (Fig. 18e) at the plane of longitudinal reinforcement may occur as discussed in this paper.
Verification
In order to verify the proposed method, fifty-seven beam specimens were chosen from the previous studies (Ito et al. 2013;Murakami 2014, 2015;Takami and Eto 1999;Suzuki et al. 2015;Kamitani et al. 2001;Katori et al. 1988;Kaminosono et al. 1992;Kim et al. 2002;Masuda et al. 1996;Takeuchi et al. 1993;Miyata et al. 2018), as summarized in Table 2. All the specimens were subjected to anti-symmetric bending, and failed in bond-splitting before the yielding of the transverse reinforcement. More than 70% of the beam specimens listed in Table 2 had stirrups higher than 700 MPa (= 100 ksi). Specimens that failed in shear were not included in the table because of the following reasons: (1) There are many methods to predict shear strength of beam.
(2) The proposed method can be used in combination with any methods.
(3) If we include specimens that failed in shear, we need to compute their strength in combination with a particular method for shear, which makes the verification process unclear because it depends on the accuracy of the method for shear. Figure 19a shows the verification for the beam specimens with single-layered continuous reinforcement. The numerator V exp on the vertical axis of In the above equations, M f is the bending moment at yield; l s is one half of the clear span of each beam specimen; f y is the yield strength of tensile reinforcement; A s is the total cross-sectional area of tensile reinforcement; and d is the distance between the centroid of tensile reinforcement and compressive extreme fiber of concrete.
The numerator V su on the horizontal axis of Fig. 19a shows the shear capacities calculated in accordance with Eq. 5. The mean value and the coefficient of variation (c.o.v.) of the ratios of measured to calculated strengths (V exp /V su ) are also shown in the figures. Good agreement is observed except the data enclosed by two circles. The two specimens enclosed by the red circle in Fig. 19a had special features: low strength concrete (27 and 29 MPa = 4000 psi) and high shear reinforcement ratio (1.2%), which effect is discussed later. The concrete strength of the specimen enclosed by the blue circle in Fig. 19a was again low (23 MPa = 3300 psi). Figure 19b is similar to Fig. 19a except that the numerator V n on the horizontal axis shows the shear capacities calculated by ACI 318-19 provisions where V c is based on the detailed method considering the effect of tensile reinforcement ratio and V s is computed considering the limitation of 550 MPa (80 ksi) for shear reinforcement according to Table 20.2.2.4(a) in the provisions. The mean value in Fig. 19b (0.98) is smaller than unity, indicating that ACI 318-19 provisions are not conservative.
The vertical axis of Fig. 19c represents the ratio of the observed strength V exp and the capacity calculated by ACI 318-19 provisions V n . The horizontal axis is the shear reinforcement ratio of each specimen divided by ρ t0 defined by the orange circle in Fig. 17a. The V exp /V n ratio is the lowest around ρ t /ρ t0 = 1.0; this agrees with Fig. 17a because the broken line is most distant from the solid line at ρ t = ρ t0 .
Figures 20a through 20c are for multi-layered beams with continuous reinforcement. Figures 21a through 21c are for double-layered beams with cut-off bars in the second layer. The mean value in Fig. 20b (1.16) is larger than that in Fig. 19b (0.98); this tendency agrees with Fig. 17b. The mean value in Fig. 21b (1.07) is smaller than that in Fig. 20b (1.16); this tendency agrees with the fact that most of the beam specimens with cut-off bars listed in Table 2 had continuous bars only in the first (outer) layer and exhibited bond failure at the first layer as shown in Fig. 13. For the proposed method (Figs. 19a-21a), the mean ratios of measuredto-calculated capacity are larger than 1.1 and the c.o.v.'s are smaller than those of ACI 318. Figure 21 does not include beam specimens with extremely short development length that causes pullout of cutoff bars. Traditional provisions for development length are still required to prevent such pullout. Table 3 lists column specimens which were subjected to anti-symmetric bending and failed in bond-splitting before the yielding of the transverse reinforcement. Figure 22a shows the comparisons between the test results and calculations: the denominator V f includes the effect of axial force. The red circle in Fig. 22a indicates the data with the ratio of V exp /V su = 0.82, which is smaller than any data in Figs. 19a-21a. This specimen had special features as highlighted in yellow in Table 3: low strength concrete (23 MPa = 3400 psi) and high shear reinforcement ratio (1.1%). As shown in Fig. 17, the proposed method does not have upper limit as the conventional shear strength equation. The red circle in Fig. 22c indicates V exp /V n = 0.94 for this data. The ratios of V exp /min(V n , V su ) for the data enclosed by the red and blue circles in Fig. 19 are also larger than 0.91. It is therefore concluded that the combinational use of current shear strength equations (V n ) and the proposed method (V su ) provides acceptable prediction. Notes: P is applied axial force; A g is gross area of column section.
Further research need
As indicated by the red circles in Figs. 19a and 22a, Eq. 2 should have an upper limit. The limit may be dependent on α t and α l , which are smaller for top bars and larger for multiple-layer bars, respectively, as opposed to the upper limit for shear in ACI 318 (ACI 2019).
The proposed method has been verified only for specimens tested under anti-symmetric bending shear with uniform shear force. However, the shear force of any beam is not uniform because of gravity loads. If we consider the discussion by Marti (1986), applying Eq. 5 to a beam with large gravity loads and small seismic force seems too conservative. Further discussion is needed.
The proposed method has not been verified for specimens with slab, prestress or circular section. Further verification is recommended.
The last topic is ductility. The proposed method does not prevent splitting bond failure after flexural yielding. Further research is needed whether a similar approach is possible to prevent such failure.
Conclusion
Pullout test results from previous studies were reviewed in this paper and were used as a basis for developing a new method to evaluate bond splitting strength. As an alternative to the traditional method in which bond stress on bar surfaces are evaluated for each bar, Equation 2 was proposed to evaluate the shear capacity of a failure plane below the bond-splitting plane (Fig. 6). The formula was used to calculate shear capacities of RC beams with or without cut-off bars (Eq. 5). The method was successfully applied to most of beam and column specimens which failed in bond splitting prior to the yielding of longitudinal reinforcement . It is concluded that side splitting failure of RC beams and columns under seismic action can be treated as shear failure. The provisions in ACI 318-19 (ACI 2019) slightly underestimated some of the test results of single-layered beams (Fig. 19b). |
<filename>01_Auth/Hash_password/main.go<gh_stars>0
package main
import (
"fmt"
"log"
"golang.org/x/crypto/bcrypt"
)
func main() {
pass := "<PASSWORD>"
hashedPass, err := hashPassword(pass)
if err != nil {
panic(err)
}
fmt.Println("Hash: ", hashedPass)
if err := comparePassword(pass, hashedPass); err != nil {
log.Fatalln("Incorrect password")
}
fmt.Println("Logged in")
}
func hashPassword(password string) ([]byte, error) {
return bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
}
func comparePassword(password string, hashedPassword []byte) error {
return bcrypt.CompareHashAndPassword(hashedPassword, []byte(password))
}
|
def create_console_string_writer(stream=None, encoding='utf-8'):
stream = stream or sys.stdout
return codecs.getwriter(encoding)(stream.buffer) |
class QuotecastParser:
"""Handle the payload returned from this endpoint :
"https://degiro.quotecast.vwdservices.com/CORS/{session_id}"
OVERALL EXPLANATION
The endpoint aforementioned returns financial data measurement
such as :
* LastPrice of a Stock
* LastVolume of a Stock
* ...
This endpoint use HTTP 1.1 protocol.
Since there are no way to retrieve data asynchronously from
HTTP 1.1, this endpoint uses Long-Polling method to retrieve data
as soon as they are available.
Thus to consume this endpoint one needs to recall it as soon as a
response has been received.
If no data updates are available a HEARTBEAT signal is sent every
5 seconds.
The session timeout is approximativaly 15 seconds.
PAYLOAD DATA DESCRIPTION
The payload sent from this endpoint can contain three kind of
objects :
* DATA
* HEARTBEAT
* MATCHING
A DATA is an object with two elements :
* REFERENCE
* VALUE
A HEARTBEAT is signal sent every 5 seconds if no data updates
are available.
A MATCHING is an object with three elements :
* REFERENCE
* VWD_ID
* LABEL
The REFERENCE is a unique identifier in Degiro's Quotecast API
which refers to the financial data.
The VALUE is the value of the financial data.
THE VWD_ID is the id of the product (etf, option, stock, warrant...)
from which we retrieve measurements (LastPrice, LastVolume...).
The LABEL is the name of the measurements that we retrieve.
The MATCHING table is only sent at the first call of this endpoint.
PAYLOAD DATA STRUCTURE
The payload is a list of messages serialized in JSON, as follows :
```json
[
MESSAGE_1,
MESSAGE_2,
...
MESSAGE_X
]
```
What is called a MESSAGE here above, is an object with the two
attributes :
* "m"
* "v"
We can denote a message like this :
```python
MESSAGE = {
'm': MESSAGE_TYPE
'v': [CODE1, CODE2]
}
```
Here we have the following properties:
* m contains a MESSAGE_TYPE
* MESSAGE_TYPE is a str
* v is a list
* v contains 2 elements
* CODE1 is a str or an int
* CODE2 is a float
The MESSAGE_TYPE indicate the type of message, it can take the
following __values :
* "a_req" : subscription
* "a_rel" : unsubscription
* "d" : rejected subscription
* "h" : heartbeat
* "sr" : session invalid
* "ue" : data not available
* "un" : numeric data
* "us" : string data
* ... (this list may not be exhaustive)
Depending on the MESSAGE_TYPE different kind of information are stored
inside :
* CODE1
* CODE2
If MESSAGE_TYPE = "a_req" or "a_rel" :
* CODE1 : contains the product's VWD_ID and the PARAMETER_NAME.
* CODE2 : contains the REFERENCE for the parameter in CODE1.
* Example of MESSAGE :
```json
{
"m": "a_req",
"v": ["365004197.B10Volume", 624239]
}
```
"365004197.B10Volume" <=> CODE1
365004197 <=> VWD_ID
B10Volume <=> PARAMETER_NAME
624239 <=> REFERENCE
If MESSAGE_TYPE = "un" or "us :
* CODE1 : contains the reference number.
* CODE2 : contains the value of the information referenced by
CODE1.
* Example of MESSAGE :
```json
{
"m": "a_req",
"v": [624239, 115.85]
}
```
624239 <=> CODE1
115.85 <=> CODE2
"""
@classmethod
def ticker_to_df(
cls,
ticker: Ticker,
column_list: List[str] = None,
) -> pd.DataFrame:
"""Converts a ticker to a "pandas.DataFrame".
Args:
ticker (Ticker):
Ticker to convert.
column_list (List[str]):
Mandatory fields : will be set to "None" if empty.
Default to [].
Returns:
pandas.DataFrame:
"pandas.DataFrame" containing the metrics.
Each row depicts a specific product.
Each column depicts a specific metric.
"""
if column_list is None:
column_list = list()
ticker_dict = cls.ticker_to_dict(
ticker=ticker,
column_list=column_list,
)
df = pd.DataFrame(ticker_dict.values())
return df
@staticmethod
def ticker_to_dict(
ticker: Ticker,
column_list: List[str] = None,
) -> Dict[str, Dict[str, Union[str, int]]]:
# VWD_ID # METRICS : NAME / VALUE
"""Converts a ticker to a "dict".
Args:
ticker (Ticker):
Ticker to convert.
column_list (List[str]):
Mandatory fields : will be set to "None" if empty.
Default to [].
Returns:
Dict[Union[str, int], Dict[str, Union[str, int]]]:
Dict containing all the metrics grouped by "vwd_id".
"""
if column_list is None:
column_list = list()
empty_list = [None] * len(column_list)
empty_metrics = dict(zip(column_list, empty_list))
empty_metrics[
"response_datetime"
] = ticker.metadata.response_datetime.ToJsonString()
empty_metrics["request_duration"] = (
ticker.metadata.request_duration.ToMicroseconds() / 10 ** 6
)
ticker_dict:Dict[str, Dict[str, Union[str, int]]] = dict()
for product in ticker.products:
ticker_dict[product] = empty_metrics.copy()
ticker_dict[product]["vwd_id"] = product
ticker_dict[product].update(ticker.products[product].metrics)
return ticker_dict
@staticmethod
def build_ticker_from_quotecast(
quotecast: Quotecast,
references: Dict[int, List[str]] = None,
ticker: Ticker = None,
) -> Ticker:
"""Build or update a Ticker metrics using a Quotecast object.
Only the metrics which can be converted to float are supported.
But that should be enough to handle all the real use cases.
This was done to :
* Keep the Ticker structure simple and light.
* Have better performances during processing.
Args:
quotecast (Quotecast):
Object containing the raw metrics.
ticker (Ticker, optional):
Object to update with the new metrics.
Defaults to Ticker().
references (Dict[int, List[str]], optional):
The references dictionnary is a registry.
It links the products :
* reference
* vwd_id
* metric
Here is an example of how to populate it :
references[reference] = [vwd_id, metric]
Defaults to dict().
Raises:
AttributeError:
If the subscription is rejected.
Or if the metric is unknown.
Returns:
Ticker: New or updated Ticker.
"""
if references is None:
references = dict()
if ticker is None:
ticker = Ticker()
# SETUP PRODUCTS & METRICS
message_array = json.loads(quotecast.json_data)
for message in message_array:
if message["m"] == "un":
reference = message["v"][0]
value = message["v"][1]
product, metric = references[reference]
ticker.products[product].metrics[metric] = value
elif message["m"] == "us":
reference = message["v"][0]
value = message["v"][1]
product, metric = references[reference]
if value[4] == "-":
date = datetime.datetime.strptime(
value,
"%Y-%m-%d",
)
value = datetime.datetime.timestamp(date)
ticker.products[product].metrics[metric] = value
elif value[2] == ":":
time = datetime.time.fromisoformat(value)
value = time.hour * 3600 + time.minute * 60 + time.second
ticker.products[product].metrics[metric] = value
else:
# NOT CONVERTIBLE TO FLOAT
raise RuntimeWarning(
"Unsupported string metric : " f"{metric} = {message}"
)
elif message["m"] == "a_req":
references[message["v"][1]] = message["v"][0].rsplit(
sep=".",
maxsplit=1,
)
elif message["m"] == "a_rel":
delete_list = []
for reference in references:
if ".".join(references[reference]) == message["v"][0]:
delete_list.append(reference)
for reference in delete_list:
del references[reference]
elif message["m"] == "h":
pass
elif message["m"] == "ue":
pass
elif message["m"] == "d":
raise AttributeError(f"Subscription rejected : {message}")
else:
raise AttributeError(f"Unknown metric : {message}")
# SETUP PRODUCT LIST
ticker.product_list.extend(ticker.products)
# SETUP METADATA
ticker.metadata.MergeFrom(quotecast.metadata)
return ticker
@property
def references(self) -> Dict[int, List[str]]:
return self.__references
@property
def ticker(self) -> Ticker:
return self.__ticker
@property
def ticker_df(self) -> pd.DataFrame:
ticker = self.__ticker
ticker_df = self.ticker_to_df(ticker=ticker)
return ticker_df
@property
def ticker_dict(
self,
) -> Dict[str, Dict[str, Union[str, int]]]:
# VWD_ID # METRICS : NAME / VALUE
ticker = self.__ticker
ticker_dict = self.ticker_to_dict(ticker=ticker)
return ticker_dict
def __init__(self, forward_fill: bool = False):
"""
Args:
forward_fill (bool, optional):
Whether or not we want to fill the new Ticker with
previous received metrics.
Default to False.
"""
self.__forward_fill = forward_fill
self.__metrics_storage = MetricsStorage()
self.__references:Dict[int, List[str]] = dict()
self.__ticker = Ticker()
self.__logger = logging.getLogger(self.__module__)
def put_quotecast(self, quotecast: Quotecast):
forward_fill = self.__forward_fill
metrics_storage = self.__metrics_storage
references = self.__references
ticker = self.build_ticker_from_quotecast(
quotecast=quotecast,
ticker=Ticker(),
references=references,
)
if forward_fill is True:
metrics_storage.fill_ticker(ticker=ticker)
self.__ticker = ticker
def rebuild_request(self) -> Quotecast.Request:
"""Rebuild the request from history (self.__references).
Returns:
Quotecast.Request:
Request matching data-stream.
"""
references = self.references
request = Quotecast.Request()
for vwd_id, metric in references.values():
request.subscriptions[vwd_id].append(metric)
return request |
<filename>src/components/Page/Page.stories.tsx
import type { Story, Meta } from "@storybook/react"
import React from "react"
import { withContainer, argTypes, argHidden } from "../../helpers/stories"
import Page, {
PageProps,
PageTitle,
Heading1,
Heading2,
Heading3,
ColumnBreak,
} from "./Page"
export default {
title: "Components/Page",
component: Page,
decorators: [withContainer],
argTypes: {
...argTypes,
story: argHidden,
},
} as Meta
export const Basic: Story<PageProps> = (args) => (
<>
<Page {...args}>
<PageTitle>Page 1 Title</PageTitle>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
<ColumnBreak />
<Heading1>Heading 1</Heading1>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
<Heading2>Heading 2</Heading2>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
<Heading3>Heading 3</Heading3>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
</Page>
<Page {...args} number={args.number && args.number + 1}>
<PageTitle>Page 2 Title</PageTitle>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
<ColumnBreak />
<Heading1>Heading 1</Heading1>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
<Heading2>Heading 2</Heading2>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
<Heading3>Heading 3</Heading3>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate
velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.
</p>
</Page>
</>
)
|
import java.util.HashMap;
import java.util.Map;
import java.util.Scanner;
/**
* Created by Lux on 17.07.2017.
*/
public class Alphabet {
public static void main(String[] args) {
Scanner sc = new Scanner(System.in);
char[] str1 = sc.nextLine().toCharArray();
char[] str2 = sc.nextLine().toCharArray();
char[] str3 = sc.nextLine().toCharArray();
Map<Character, Character> map = new HashMap<>();
for (int i = 0; i < 26; i++) {
map.put(str1[i], str2[i]);
}
for (char aStr3 : str3) {
if (aStr3 >= 60 && aStr3 <= 95) {
System.out.print((char) (map.get((char) (aStr3 + 32)) - 32));
} else if (aStr3 >= 48 && aStr3 <= 57) {
System.out.print(aStr3);
} else {
System.out.print(map.get(aStr3));
}
}
}
}
|
/**
* Inclusive: Only files of this type will be considered! If an inclusive
* type is added, the exclusive types won't be regarded anymore.
*
* For example: text/plain, image/gif, video/mpeg, audio/mid,
* application/msword
*
* @param mimeType
*/
public void addInclusiveMimeType(String mimeType) {
if (!mimeType.equals("")) {
inclusiveMimeTypes.add(mimeType);
}
} |
<reponame>Blueberryy/GrimoireOfGaia
package gaia.block;
import net.minecraft.block.SoundType;
import net.minecraft.block.material.Material;
public class BlockVanilla extends BlockBase {
public BlockVanilla(Material material, String blockName, float hardness, float resistance, SoundType soundType) {
super(material, blockName);
setHardness(hardness);
setResistance(resistance);
setSoundType(soundType);
}
} |
<reponame>axeld/haiku
/*
* Copyright 2005, <NAME>, <EMAIL>.
* Distributed under the terms of the MIT License.
*
* Implements the MessagingTargetSet interface for AppInfoLists, so that
* no other representation (array/list) is needed to feed them into the
* MessageDeliverer.
*/
#ifndef APP_INFO_LIST_MESSAGING_TARGET_SET_H
#define APP_INFO_LIST_MESSAGING_TARGET_SET_H
#include "AppInfoList.h"
#include "MessageDeliverer.h"
class RosterAppInfo;
class AppInfoListMessagingTargetSet : public MessagingTargetSet {
public:
AppInfoListMessagingTargetSet(AppInfoList &list,
bool skipRegistrar = true);
virtual ~AppInfoListMessagingTargetSet();
virtual bool HasNext() const;
virtual bool Next(port_id &port, int32 &token);
virtual void Rewind();
virtual bool Filter(const RosterAppInfo *info);
private:
void _SkipFilteredOutInfos();
AppInfoList &fList;
AppInfoList::Iterator fIterator;
bool fSkipRegistrar;
};
#endif // APP_INFO_LIST_MESSAGING_TARGET_SET_H
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from queue import deque
def main():
h, w = map(int, input().strip().split())
c_h, c_w = map(int, input().strip().split())
d_h, d_w = map(int, input().strip().split())
maze = [list(input().strip()) for _ in range(h)]
visited = [[0] * w for _ in range(h)]
def bfs():
dq = deque([(c_h-1, c_w-1, 0)])
while dq:
(ih, iw, cnt) = dq.pop()
if visited[ih][iw]:
continue
visited[ih][iw] = 1
if ih == d_h-1 and iw == d_w-1:
return cnt
for dh in range(-2, 3):
for dw in range(-2, 3):
if not (0 <= ih + dh < h and 0 <= iw + dw < w) \
or visited[ih + dh][iw + dw] == 1 \
or maze[ih + dh][iw + dw] == '#':
continue
if (dh, dw) in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
dq.append((ih + dh, iw + dw, cnt))
else:
dq.appendleft((ih + dh, iw + dw, cnt+1))
return -1
print(bfs())
if __name__=='__main__':
main()
|
// Helper for {batch,layer,group}_norms below only
// Computes the jvp for `input * weight + bias` where weight and bias may be undefined
// Possibly modifies the input inplace
static Tensor _affine_jvp(
const c10::optional<Tensor>& input_p, Tensor& input_t,
const Tensor& weight_p, const Tensor& weight_t,
const Tensor& bias_t) {
TORCH_INTERNAL_ASSERT(input_p.has_value() == weight_p.defined());
if (weight_p.defined()) {
if (areAnyTensorSubclassLike({input_p.value(), input_t, weight_p, weight_t}) || input_t._is_zerotensor() || weight_t._is_zerotensor()) {
input_t = input_t * weight_p + input_p.value() * weight_t;
} else {
input_t *= weight_p;
auto temp = input_p.value();
temp *= weight_t;
input_t += temp;
}
}
if (bias_t.defined()) {
if (areAnyTensorSubclassLike({input_t, bias_t}) || input_t._is_zerotensor()) {
input_t = input_t + bias_t;
} else {
input_t += bias_t;
}
}
return input_t;
} |
Home | Databases | WorldLII | Search | Feedback High Court of Australia Transcripts
Ceremonial - Farewell to French CJ - Melbourne [2016] HCATrans 281 (18 November 2016)
Last Updated: 18 November 2016
[2016] HCATrans 281
H I G H C O U R T O F A U S T R A L I A
FAREWELL TO
THE HONOURABLE CHIEF JUSTICE ROBERT SHENTON FRENCH AC
AT
MELBOURNE
ON
FRIDAY, 18 NOVEMBER 2016, AT 9.32 AM
FRENCH CJ
KIEFEL J
GORDON J
FRENCH CJ: Yes, Ms Batrouney.
MS J.J. BATROUNEY : If it please the Court.
I appear on behalf of the Victorian Bar and seek leave to address the Court, very briefly, to mark this, the final sitting of your Honour Chief Justice French in Melbourne. This is not the occasion for a formal farewell, but we could not let the moment go unnoticed.
Your Honour was first appointed to the Federal Court at the age of 39. You have given over 30 years’ service to the judiciary. The Victorian Bar would like to sincerely thank you for that service.
The “French” High Court has been harmonious, collaborative and egalitarian. At the Melbourne Law School events last week, your Honour refused to even describe yourself as the first among equals. Our members have enjoyed appearing before your Honour. In his famous “Anthem”, the late Leonard Cohen wrote:
Forget your perfect offering
There is a crack in everything
That’s how the light gets in
While your Honour has always received a perfect offering, at least from the Victorian Bar, if there was light to be let in it would be.
There has always been a respectful and courteous relationship between your Honour and the Victorian Bar and you have always been ready to support our endeavours. A few weeks ago your Honour delivered the keynote address at the opening of the Victorian Bar ABA Conference. In 2014, your Honour opened the substantial extension to Owen Dixon Chambers West. You delivered the keynote opening address at our CPD Conference in 2012.
Your Honour and her Honour Mrs French have often supported us by attending the Victorian Bar annual dinner and in 2012 your Honour was the principal speaker. We sincerely appreciate your support.
On behalf of the Victorian Bar I wish you and her Honour Mrs French well in the exciting challenges and opportunities you will share in the next chapter of your illustrious careers.
If it please the Court.
FRENCH CJ: Thank you, Ms Batrouney, and my thanks to the Victorian Bar on whose behalf you have made your remarks.
I congratulate you on your election as President, and your colleagues on their election to the Bar Council. As I have often said, the voluntary service of members of the profession in that way is indispensable to its independence.
I have sat in Melbourne on many occasions over the past 30 years, as a trial and appellate judge in the Federal Court, and on special leave applications as Chief Justice. I have developed a considerable respect for the quality of the advocacy provided by the Bar here, as well as its commitment to access to justice which was evidenced by the Pro Bono Awards which I attended on Wednesday evening.
It has been a privilege to serve as both a Federal Court judge, and for the last eight years or so as Chief Justice. I leave the role with anticipation for a new phase, but I have very much enjoyed my 30 years, and not least in Melbourne.
Thank you.
MS BATROUNEY: Thank you, your Honour. If the Court pleases.
AT 9.36 AM THE COURT ADJOURNED
AustLII: Copyright Policy | Disclaimers | Privacy Policy | Feedback
URL: http://www.austlii.edu.au/au/cases/cth/HCATrans/2016/281.html |
<reponame>fengzhongye/LogiAM
import React, { useState, useEffect } from 'react';
import { AutoComplete, Modal, Form, Input, Divider, Select, Button, Tooltip, Row, Col, InputNumber, Collapse } from 'antd';
import * as actions from '../../actions';
import '../../container/agent-management/index.less';
import { connect } from "react-redux";
import { IFormProps, DataSourceItemType } from '../../interface/common';
import { flowUnitList } from '../../constants/common';
import { IAgentHostSet, IEditOpHostsParams, IOpAgent, IReceivers } from '../../interface/agent';
import { getHostDetails, getHostMachineZone, editOpHosts, getAgentDetails, editOpAgent, getReceivers, getReceiversTopic } from '../../api/agent'
import MonacoEditor from '../../component/editor/monacoEditor';
import { setLimitUnit, judgeEmpty } from '../../lib/utils';
import { UpOutlined, DownOutlined } from '@ant-design/icons';
import { regString128 } from '../../constants/reg';
const { Panel } = Collapse;
const { Option } = Select;
const { TextArea } = Input;
const mapStateToProps = (state: any) => ({
params: state.modal.params,
});
interface IModifyHostParams {
hostObj: IAgentHostSet;
getData: any;
}
const ModifyHost = (props: { dispatch: any, params: IModifyHostParams }) => {
// console.log('props---', props.params);
const handleModifyCancel = () => {
props.dispatch(actions.setModalId(''));
}
return (
<Modal
title="编辑"
visible={true}
footer={null}
onCancel={handleModifyCancel}
>
<div className="modify-agent-list">
<WrappedHostConfigurationForm
dispatch={props.dispatch}
params={props.params}
/>
<WrappedAgentConfigurationForm
dispatch={props.dispatch}
params={props.params}
/>
</div>
</Modal>
)
}
const modifyAgentListLayout = {
labelCol: { span: 7 },
wrapperCol: { span: 16 },
};
interface IDispatch {
dispatch: any;
params: IModifyHostParams;
}
const HostConfigurationForm = (props: IFormProps & IDispatch) => {
const { getFieldDecorator } = props.form;
let { hostObj, getData } = props.params;
const [hostDetail, setHostDetail] = useState(hostObj);
const [machineZones, setMachineZones] = useState([] as string[]);
const getMachineZonesList = () => {
const zonesList = machineZones.map((ele: string) => { return { value: ele, text: ele } });
return zonesList;
}
const handleHostSubmit = (e: any) => {
e.preventDefault();
props.form.validateFields((err: any, values: any) => {
if (err) { return false; }
const params = {
department: hostDetail?.department || '',
id: hostDetail?.hostId,
machineZone: values?.machineZone || '',
} as IEditOpHostsParams;
return editOpHosts(params).then((res: any) => {
props.dispatch(actions.setModalId(''));
Modal.success({ title: '保存成功!', okText: '确认', onOk: () => getData() });
}).catch((err: any) => {
// console.log(err);
});
});
};
const getMachineZones = () => {
getHostMachineZone().then((res: string[]) => {
const zones = res.filter(ele => !!judgeEmpty(ele));
setMachineZones(zones);
}).catch((err: any) => {
// console.log(err);
});
}
const getHostDetail = () => {
getHostDetails(hostObj?.hostId).then((res: IAgentHostSet) => {
setHostDetail(res);
}).catch((err: any) => {
// console.log(err);
});
}
useEffect(() => {
getHostDetail();
getMachineZones();
}, []);
return (
<Form
className="host-configuration"
{...modifyAgentListLayout}
onSubmit={handleHostSubmit}
>
<div className="agent-list-head">
<b>主机配置</b>
<Button type="primary" htmlType="submit">确认</Button>
</div>
<Divider />
<Form.Item label="主机名">
{getFieldDecorator('hostName', {
initialValue: hostDetail?.hostName,
})(
<span>{hostDetail?.hostName}</span>,
)}
</Form.Item>
<Form.Item label="主机IP">
{getFieldDecorator('ip', {
initialValue: hostDetail?.ip,
})(
<span>{hostDetail?.ip}</span>,
)}
</Form.Item>
{hostDetail?.container === 1 &&
<Form.Item label="宿主机名">
{getFieldDecorator('parentHostName', {
initialValue: hostDetail?.parentHostName,
})(
<span>{hostDetail?.parentHostName}</span>,
)}
</Form.Item>}
<Form.Item label="所属机房">
{getFieldDecorator('machineZone', {
initialValue: hostDetail?.machineZone,
rules: [{
required: false,
validator: (rule: any, value: string, cb: any) => {
if (!new RegExp(regString128).test(value)) {
rule.message = '最大长度限制128位'
cb('最大长度限制128位')
} else {
cb()
}
},
}],
})(
<AutoComplete
placeholder="请选择或输入"
dataSource={getMachineZonesList()}
children={<Input />}
/>,
)}
</Form.Item>
</Form>
)
}
const WrappedHostConfigurationForm = Form.create<IFormProps & IDispatch>()(HostConfigurationForm);
const AgentConfigurationForm = (props: IFormProps & IDispatch) => {
const { getFieldDecorator } = props.form;
const { hostObj, getData } = props.params;
const [activeKeys, setActiveKeys] = useState([] as string[]);
const [agentDetail, setAgentDetail] = useState({} as IOpAgent);
const [receivers, setReceivers] = useState([] as IReceivers[]);
const [errorReceivers, setErrorReceivers] = useState([] as IReceivers[]);
const [receiverTopic, setReceiverTopic] = useState([] as DataSourceItemType[]);
const [errorTopic, setErrorTopic] = useState([] as DataSourceItemType[]);
const handleAgentSubmit = (e: any) => {
e.preventDefault();
props.form.validateFields((err: any, values: any) => {
if (err) {
collapseCallBack(['high']);
return false;
}
const params = {
metricsProducerConfiguration: values?.metricsProducerConfiguration,
errorLogsProducerConfiguration: values?.errorLogsProducerConfiguration,
advancedConfigurationJsonString: values?.advancedConfigurationJsonString,
byteLimitThreshold: values?.byteLimitThreshold * values?.unit,
cpuLimitThreshold: values?.cpuLimitThreshold,
errorLogsSendReceiverId: values?.errorLogsSendReceiverId,
errorLogsSendTopic: values?.errorLogsSendTopic,
metricsSendReceiverId: values?.metricsSendReceiverId,
metricsSendTopic: values?.metricsSendTopic,
id: agentDetail.id,
} as IOpAgent;
return editOpAgent(params).then((res: IOpAgent) => {
props.dispatch(actions.setModalId(''));
Modal.success({ title: '保存成功!', okText: '确认', onOk: () => getData() });
}).catch((err: any) => {
// console.log(err);
});
});
};
const collapseCallBack = (key: any) => {
setActiveKeys(key);
}
const onReceiverChange = (value: number) => {
// getReceiverTopic(value); // 等对接Kafka集群时再修复
}
const onErrorChange = (value: number) => {
// getReceiverTopic(value, true); // 等对接Kafka集群时再修复
}
const getReceiversList = () => {
getReceivers().then((res: IReceivers[]) => {
setReceivers(res);
setErrorReceivers(res);
}).catch((err: any) => {
// console.log(err);
});
}
const getReceiverTopic = (id: number, judge?: boolean) => {
getReceiversTopic(id).then((res: string[]) => {
const topics = res?.map(ele => { return { text: ele, value: ele } });
judge ? setErrorTopic(topics) : setReceiverTopic(topics);
}).catch((err: any) => {
// console.log(err);
});
}
const getAgentDetail = () => {
getAgentDetails(hostObj?.agentId).then((res: IOpAgent) => {
setAgentDetail(res);
}).catch((err: any) => {
// console.log(err);
});
}
useEffect(() => {
if (hostObj.agentId) {
getAgentDetail();
getReceiversList();
}
}, []);
return (
<Form
className="agent-configuration"
{...modifyAgentListLayout}
onSubmit={handleAgentSubmit}
>
<div className="agent-list-head">
<b>Agent配置</b>
{hostObj.agentId && <Button type="primary" htmlType="submit">确认</Button>}
</div>
<Divider />
{hostObj.agentId ? <>
<Form.Item label="CPU核数上限">
{getFieldDecorator('cpuLimitThreshold', {
initialValue: agentDetail?.cpuLimitThreshold,
rules: [{ required: true, message: '请输入' }],
})(
<InputNumber min={1} placeholder="请输入" />,
)}
<span> 核</span>
</Form.Item>
{/* <Form.Item label="出口流量上限">
<Row>
<Col span={8}>
{getFieldDecorator('byteLimitThreshold', {
initialValue: setLimitUnit(agentDetail?.byteLimitThreshold, 1)?.maxBytesPerLogEvent,
rules: [{ required: true, message: '请输入' }],
})(
<InputNumber min={1} placeholder="请输入" />,
)}
</Col>
<Col span={6}>
<Form.Item>
{getFieldDecorator('unit', {
initialValue: setLimitUnit(agentDetail?.byteLimitThreshold, 1)?.flowunit,
rules: [{ required: true, message: '请输入' }],
})(
<Select>
{flowUnitList.map((v, index) => (
<Option key={index} value={v.value}>{v.label}</Option>
))}
</Select>,
)}
</Form.Item>
</Col>
</Row>
</Form.Item> */}
<Collapse
bordered={false}
expandIconPosition="right"
onChange={collapseCallBack}
activeKey={activeKeys?.length ? ['high'] : []}
>
<Panel
header={<h3>高级配置</h3>}
extra={<a>{activeKeys?.length ? <>收起 <UpOutlined /></> : <>展开 <DownOutlined /></>}</a>}
showArrow={false}
key="high"
>
<Row>
<Form.Item label="指标流接收集群">
{getFieldDecorator('metricsSendReceiverId', {
initialValue: agentDetail?.metricsSendReceiverId,
rules: [{ required: true, message: '请选择' }],
})(
<Select onChange={onReceiverChange}>
{receivers.map((v: IReceivers, index: number) => (
<Option key={index} value={v.id}>
{v.kafkaClusterName.length > 15 ? <Tooltip placement="bottomLeft" title={v.kafkaClusterName}>{v.kafkaClusterName}</Tooltip> : v.kafkaClusterName}
</Option>
))}
</Select>,
)}
</Form.Item>
<Form.Item label="指标流接收Topic">
{getFieldDecorator('metricsSendTopic', {
initialValue: agentDetail?.metricsSendTopic,
rules: [{ required: true, message: '请输入' }],
})(
<AutoComplete
placeholder="请选择或输入"
dataSource={receiverTopic}
children={<Input />}
/>,
)}
</Form.Item>
<Form.Item label="生产端属性">
{getFieldDecorator('metricsProducerConfiguration', {
initialValue: agentDetail?.metricsProducerConfiguration,
rules: [{ message: '请输入', pattern: /^[-\w]{1,1024}$/, }],
})(
<TextArea placeholder="默认值,如修改,覆盖相应生产端配置" />,
)}
</Form.Item>
<Form.Item label="错误日志接收集群">
{getFieldDecorator('errorLogsSendReceiverId', {
initialValue: agentDetail?.errorLogsSendReceiverId,
rules: [{ required: true, message: '请选择' }],
})(
<Select onChange={onErrorChange}>
{errorReceivers.map((v: IReceivers, index: number) => (
<Option key={index} value={v.id}>
{v.kafkaClusterName.length > 15 ? <Tooltip placement="bottomLeft" title={v.kafkaClusterName}>{v.kafkaClusterName}</Tooltip> : v.kafkaClusterName}
</Option>
))}
</Select>,
)}
</Form.Item>
<Form.Item label="错误日志接收Topic">
{getFieldDecorator('errorLogsSendTopic', {
initialValue: agentDetail?.errorLogsSendTopic,
rules: [{ required: true, message: '请输入' }],
})(
<AutoComplete
placeholder="请选择或输入"
dataSource={errorTopic}
children={<Input />}
/>,
)}
</Form.Item>
<Form.Item label="生产端属性">
{getFieldDecorator('errorLogsProducerConfiguration', {
initialValue: agentDetail?.metricsProducerConfiguration,
rules: [{ message: '请输入', pattern: /^[-\w]{1,1024}$/, }],
})(
<TextArea placeholder="默认值,如修改,覆盖相应生产端配置" />,
)}
</Form.Item>
<Form.Item label="配置信息">
{getFieldDecorator('advancedConfigurationJsonString', {
initialValue: agentDetail?.advancedConfigurationJsonString || '',
rules: [{ required: true, message: '请输入' }],
})(
<MonacoEditor {...props} />
)}
</Form.Item>
</Row>
</Panel>
</Collapse>
</> : <p className='agent-installed'>该主机未安装Agent</p>}
</Form>
)
}
const WrappedAgentConfigurationForm = Form.create<IFormProps & IDispatch>()(AgentConfigurationForm);
export default connect(mapStateToProps)(ModifyHost);
|
// NewSyncInstrument implements metric.MeterImpl.
func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(metric.SyncImpl), nil
}
syncInst, err := u.impl.NewSyncInstrument(descriptor)
if err != nil {
return nil, err
}
u.state[keyOf(descriptor)] = syncInst
return syncInst, nil
} |
Applebee’s build-your-sampler offers a combo platter of two to five appetizers. If you order the combination of cheeseburger egg rolls, pretzels and beer cheese dip, chicken quesadillas, boneless buffalo wings and spinach and artichoke dip, you’re staring down a whopping 3,390 calories , 11,650 milligrams of sodium and 65 grams of fat.
According to the Center for Science in the Public Interest , Applebee’s boasts one of the unhealthiest menu items in America. Unfortunately, this food bomb lands on your table before you even order an entree.
Even if you and your friends split that platter four ways, each diner would still consume about 850 calories ― and that’s before the entrees arrive. One chicken tenders basket at Applebee’s can add another Even if you and your friends split that platter four ways, each diner would still consume about 850 calories ― and that’s before the entrees arrive. One chicken tenders basket at Applebee’s can add another 1,160 calories and 2,540 milligrams of sodium to your meal. Throw in a blue ribbon brownie bite for dessert and you basically ate for two.
“At Applebee’s we offer our guests a wide variety of items to tailor each visit to their specific dining needs,” Applebee’s executive director of communications Steve Coe wrote in a statement to The Huffington Post. He pointed out that the appetizer combo plates in question are designed to share ― and that calorie-conscious customers can choose from a menu of low-calorie options.
Feast your eyes upon what chain restaurants hath wrought, America. May the gods of fast casual dining have mercy on our souls.
This post has been updated to include additional quotes. |
package net.coding.program;
import android.app.ActivityManager;
import android.app.Application;
import android.content.Context;
import android.util.Log;
import com.baidu.mapapi.SDKInitializer;
import com.loopj.android.http.AsyncHttpClient;
import com.loopj.android.http.JsonHttpResponseHandler;
import com.nostra13.universalimageloader.cache.disc.naming.Md5FileNameGenerator;
import com.nostra13.universalimageloader.core.ImageLoader;
import com.nostra13.universalimageloader.core.ImageLoaderConfiguration;
import com.nostra13.universalimageloader.core.assist.QueueProcessingType;
import net.coding.program.common.Unread;
import net.coding.program.model.AccountInfo;
import net.coding.program.model.UserObject;
import net.coding.program.third.MyImageDownloader;
import java.util.List;
/**
* Created by cc191954 on 14-8-9.
*/
public class MyApp extends Application {
public static float sScale;
public static int sWidthDp;
public static int sWidthPix;
public static int sEmojiNormal;
public static int sEmojiMonkey;
public static UserObject sUserObject;
public static Unread sUnread;
public static boolean sMainCreate = false;
public static void setMainActivityState(boolean create) {
sMainCreate = create;
}
public static boolean getMainActivityState() {
return sMainCreate;
}
@Override
public void onCreate() {
super.onCreate();
initImageLoader(this);
try { // x86的机器上会抛异常,大概是因为百度没有提供x86的.so文件
// 只在主进程初始化lbs
if (this.getPackageName().equals(getProcessName(this))) {
SDKInitializer.initialize(this);
}
} catch (Exception e) {}
sScale = getResources().getDisplayMetrics().density;
sWidthPix = getResources().getDisplayMetrics().widthPixels;
sWidthDp = (int) (sWidthPix / sScale);
sEmojiNormal = getResources().getDimensionPixelSize(R.dimen.emoji_normal);
sEmojiMonkey = getResources().getDimensionPixelSize(R.dimen.emoji_monkey);
sUserObject = AccountInfo.loadAccount(this);
sUnread = new Unread();
}
public static void initImageLoader(Context context) {
ImageLoaderConfiguration config = new ImageLoaderConfiguration.Builder(context)
.threadPriority(Thread.NORM_PRIORITY - 2)
.denyCacheImageMultipleSizesInMemory()
.diskCacheFileNameGenerator(new Md5FileNameGenerator())
.diskCacheSize(50 * 1024 * 1024) // 50 Mb
.diskCacheFileCount(300)
.imageDownloader(new MyImageDownloader(context))
.tasksProcessingOrder(QueueProcessingType.LIFO)
// .writeDebugLogs() // Remove for release app
.diskCacheExtraOptions(sWidthPix / 3, sWidthPix / 3, null)
.build();
ImageLoader.getInstance().init(config);
}
private static String getProcessName(Context context) {
ActivityManager actMgr = (ActivityManager) context.getSystemService(Context.ACTIVITY_SERVICE);
List<ActivityManager.RunningAppProcessInfo> appList = actMgr.getRunningAppProcesses();
for (ActivityManager.RunningAppProcessInfo info : appList) {
if (info.pid == android.os.Process.myPid()) {
return info.processName;
}
}
return "";
}
}
|
Extension Method of Safety Evaluation and Its Application
The extension method based on entropy weight is a new safety evaluation method. Entropy weight, which is an objective weight, was ascertained by entropy theory. As a result, the subjectivity in ascertaining the weights of more factors in lower hierarchy was avoided. The comprehensive correlative degree was taken as the evaluation criteria in this method. Therefore the subjectivity of evaluation model was also avoided. This method was applied in the safety evaluation of fully mechanized mining faces in 6th mine of Pingdingshan Coal Group Corporation Ltd. The safety order preference and safety differences of all fully mechanized mining faces are gained. At the same time, safety problems existing in man, machine and environment of fully mechanized mining faces are pointed out. The corresponding countermeasures to improve safety are put forward. |
// DeleteObjects will take one or more paths, and delete them from the s3 file system
func (s3fs *S3FS) DeleteObjects(path ...string) error {
svc := s3.New(s3fs.session)
objects := make([]*s3.ObjectIdentifier, 0, len(path))
for _, p := range path {
s3Path := strings.TrimPrefix(p, "/")
object := &s3.ObjectIdentifier{
Key: aws.String(s3Path),
}
objects = append(objects, object)
}
input := &s3.DeleteObjectsInput{
Bucket: aws.String(s3fs.config.S3Bucket),
Delete: &s3.Delete{
Objects: objects,
Quiet: aws.Bool(false),
},
}
_, err := svc.DeleteObjects(input)
return err
} |
/**
* A version of InvWrapper which can handle the special requirements of WorldlyContainers in addition to still handling regular Containers
*/
private static class WorldlyInvWrapper extends InvWrapper{
//Because WorldlyContainer adds extra methods to control item movement, we need to adjust the wrapper to use them
private final Direction direction;
public WorldlyInvWrapper(Container inv, Direction accessDirection){
super(inv);
this.direction = accessDirection;
}
@Nonnull
@Override
public ItemStack insertItem(int slot, @Nonnull ItemStack stack, boolean simulate){
if(getInv() instanceof WorldlyContainer wInv && !wInv.canPlaceItemThroughFace(slot, stack, direction)){
return stack;
}
return super.insertItem(slot, stack, simulate);
}
@Nonnull
@Override
public ItemStack extractItem(int slot, int amount, boolean simulate){
if(getInv() instanceof WorldlyContainer wInv && !wInv.canTakeItemThroughFace(slot, wInv.getItem(slot), direction)){
return ItemStack.EMPTY;
}
return super.extractItem(slot, amount, simulate);
}
} |
/*
* OnlineIndexerBuildIndexTest.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2015-2019 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb.record.provider.foundationdb;
import com.apple.foundationdb.async.AsyncUtil;
import com.apple.foundationdb.async.MoreAsyncUtil;
import com.apple.foundationdb.async.RangeSet;
import com.apple.foundationdb.record.IndexState;
import com.apple.foundationdb.record.TestRecords1Proto;
import com.apple.foundationdb.record.logging.KeyValueLogMessage;
import com.apple.foundationdb.record.logging.LogMessageKeys;
import com.apple.foundationdb.record.logging.TestLogMessageKeys;
import com.apple.foundationdb.record.metadata.Index;
import com.apple.foundationdb.record.metadata.Key;
import com.apple.foundationdb.record.query.RecordQuery;
import com.apple.foundationdb.record.query.plan.plans.RecordQueryPlan;
import com.apple.foundationdb.synchronizedsession.SynchronizedSessionLockedException;
import com.apple.foundationdb.tuple.ByteArrayUtil2;
import com.apple.foundationdb.tuple.Tuple;
import com.google.protobuf.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.Function;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
/**
* A base class for testing building indexes with {@link OnlineIndexer#buildIndex()} (or similar APIs).
*/
abstract class OnlineIndexerBuildIndexTest extends OnlineIndexerTest {
private static final Logger LOGGER = LoggerFactory.getLogger(OnlineIndexerBuildIndexTest.class);
private boolean safeBuild;
OnlineIndexerBuildIndexTest(boolean safeBuild) {
this.safeBuild = safeBuild;
}
void singleRebuild(@Nonnull List<TestRecords1Proto.MySimpleRecord> records, @Nullable List<TestRecords1Proto.MySimpleRecord> recordsWhileBuilding,
int agents, boolean overlap, boolean splitLongRecords,
@Nonnull Index index, @Nonnull Runnable beforeBuild, @Nonnull Runnable afterBuild, @Nonnull Runnable afterReadable) {
LOGGER.info(KeyValueLogMessage.of("beginning rebuild test",
TestLogMessageKeys.RECORDS, records.size(),
LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(),
TestLogMessageKeys.AGENTS, agents,
TestLogMessageKeys.OVERLAP, overlap,
TestLogMessageKeys.SPLIT_LONG_RECORDS, splitLongRecords,
TestLogMessageKeys.INDEX, index)
);
final FDBStoreTimer timer = new FDBStoreTimer();
final FDBRecordStoreTestBase.RecordMetaDataHook onlySplitHook = metaDataBuilder -> {
if (splitLongRecords) {
metaDataBuilder.setSplitLongRecords(true);
metaDataBuilder.removeIndex("MySimpleRecord$str_value_indexed");
}
};
final FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> {
onlySplitHook.apply(metaDataBuilder);
metaDataBuilder.addIndex("MySimpleRecord", index);
};
LOGGER.info(KeyValueLogMessage.of("inserting elements prior to test",
TestLogMessageKeys.RECORDS, records.size()));
openSimpleMetaData(onlySplitHook);
try (FDBRecordContext context = openContext()) {
for (TestRecords1Proto.MySimpleRecord record : records) {
// Check presence first to avoid overwriting version information of previously added records.
Tuple primaryKey = Tuple.from(record.getRecNo());
if (recordStore.loadRecord(primaryKey) == null) {
recordStore.saveRecord(record);
}
}
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("running before build for test"));
beforeBuild.run();
openSimpleMetaData(hook);
LOGGER.info(KeyValueLogMessage.of("adding index", TestLogMessageKeys.INDEX, index));
openSimpleMetaData(hook);
final boolean isAlwaysReadable;
try (FDBRecordContext context = openContext()) {
// If it is a safe build, it should work without setting the index state to write-only, which will be taken
// care of by OnlineIndexer.
if (!safeBuild) {
LOGGER.info(KeyValueLogMessage.of("marking write-only", TestLogMessageKeys.INDEX, index));
recordStore.markIndexWriteOnly(index).join();
}
isAlwaysReadable = safeBuild && recordStore.isIndexReadable(index);
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("creating online index builder",
TestLogMessageKeys.INDEX, index,
TestLogMessageKeys.RECORD_TYPES, metaData.recordTypesForIndex(index),
LogMessageKeys.SUBSPACE, ByteArrayUtil2.loggable(subspace.pack()),
LogMessageKeys.LIMIT, 20,
TestLogMessageKeys.RECORDS_PER_SECOND, OnlineIndexer.DEFAULT_RECORDS_PER_SECOND * 100));
final OnlineIndexer.Builder builder = OnlineIndexer.newBuilder()
.setDatabase(fdb)
.setMetaData(metaData)
.setIndex(index)
.setSubspace(subspace)
.setConfigLoader(old -> {
OnlineIndexer.Config.Builder conf = OnlineIndexer.Config.newBuilder()
.setMaxLimit(20)
.setMaxRetries(Integer.MAX_VALUE)
.setRecordsPerSecond(OnlineIndexer.DEFAULT_RECORDS_PER_SECOND * 100);
if (ThreadLocalRandom.current().nextBoolean()) {
// randomly enable the progress logging to ensure that it doesn't throw exceptions,
// or otherwise disrupt the build.
LOGGER.info("Setting progress log interval");
conf.setProgressLogIntervalMillis(0);
}
return conf.build();
}).setTimer(timer);
if (ThreadLocalRandom.current().nextBoolean()) {
LOGGER.info("Setting priority to DEFAULT");
builder.setPriority(FDBTransactionPriority.DEFAULT);
}
if (fdb.isTrackLastSeenVersion()) {
LOGGER.info("Setting weak read semantics");
builder.setWeakReadSemantics(new FDBDatabase.WeakReadSemantics(0L, Long.MAX_VALUE, true));
}
if (!safeBuild) {
builder.setIndexStatePrecondition(OnlineIndexer.IndexStatePrecondition.ERROR_IF_DISABLED_CONTINUE_IF_WRITE_ONLY);
builder.setUseSynchronizedSession(false);
}
try (OnlineIndexer indexBuilder = builder.build()) {
CompletableFuture<Void> buildFuture;
LOGGER.info(KeyValueLogMessage.of("building index",
TestLogMessageKeys.INDEX, index,
TestLogMessageKeys.AGENT, agents,
LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(),
TestLogMessageKeys.OVERLAP, overlap));
if (agents == 1) {
buildFuture = indexBuilder.buildIndexAsync(false);
} else {
if (overlap) {
CompletableFuture<?>[] futures = new CompletableFuture<?>[agents];
for (int i = 0; i < agents; i++) {
final int agent = i;
futures[i] = safeBuild ?
indexBuilder.buildIndexAsync(false)
.exceptionally(exception -> {
// (agents - 1) of the agents should stop with SynchronizedSessionLockedException
// because the other one is already working on building the index.
if (exception.getCause() instanceof SynchronizedSessionLockedException) {
LOGGER.info(KeyValueLogMessage.of("Detected another worker processing this index",
TestLogMessageKeys.INDEX, index,
TestLogMessageKeys.AGENT, agent), exception);
return null;
} else {
throw new CompletionException(exception);
}
}) :
indexBuilder.buildIndexAsync(false);
}
buildFuture = CompletableFuture.allOf(futures);
} else {
// Safe builds do not support building ranges yet.
assumeFalse(safeBuild);
buildFuture = indexBuilder.buildEndpoints().thenCompose(tupleRange -> {
if (tupleRange != null) {
long start = tupleRange.getLow().getLong(0);
long end = tupleRange.getHigh().getLong(0);
CompletableFuture<?>[] futures = new CompletableFuture<?>[agents];
for (int i = 0; i < agents; i++) {
long itrStart = start + (end - start) / agents * i;
long itrEnd = (i == agents - 1) ? end : start + (end - start) / agents * (i + 1);
LOGGER.info(KeyValueLogMessage.of("building range",
TestLogMessageKeys.INDEX, index,
TestLogMessageKeys.AGENT, i,
TestLogMessageKeys.BEGIN, itrStart,
TestLogMessageKeys.END, itrEnd));
futures[i] = indexBuilder.buildRange(
Key.Evaluated.scalar(itrStart),
Key.Evaluated.scalar(itrEnd));
}
return CompletableFuture.allOf(futures);
} else {
return AsyncUtil.DONE;
}
});
}
}
if (safeBuild) {
buildFuture = MoreAsyncUtil.composeWhenComplete(
buildFuture,
(result, ex) -> indexBuilder.checkNoOngoingOnlineIndexBuildsAsync(),
fdb::mapAsyncToSyncException);
}
if (recordsWhileBuilding != null && recordsWhileBuilding.size() > 0) {
int i = 0;
while (i < recordsWhileBuilding.size()) {
List<TestRecords1Proto.MySimpleRecord> thisBatch = recordsWhileBuilding.subList(i, Math.min(i + 30, recordsWhileBuilding.size()));
fdb.run(context -> {
FDBRecordStore store = recordStore.asBuilder().setContext(context).build();
thisBatch.forEach(store::saveRecord);
return null;
});
i += 30;
}
}
buildFuture.join();
// if a record is added to a range that has already been built, it will not be counted, otherwise,
// it will.
long additionalScans = 0;
if (recordsWhileBuilding != null && recordsWhileBuilding.size() > 0) {
additionalScans += (long)recordsWhileBuilding.size();
}
try (FDBRecordContext context = openContext()) {
IndexBuildState indexBuildState = context.asyncToSync(FDBStoreTimer.Waits.WAIT_GET_INDEX_BUILD_STATE,
IndexBuildState.loadIndexBuildStateAsync(recordStore, index));
IndexState indexState = indexBuildState.getIndexState();
if (isAlwaysReadable) {
assertEquals(IndexState.READABLE, indexState);
} else {
assertEquals(IndexState.WRITE_ONLY, indexState);
assertEquals(indexBuilder.getTotalRecordsScanned(), indexBuildState.getRecordsScanned());
// Count index is not defined so we cannot determine the records in total from it.
assertNull(indexBuildState.getRecordsInTotal());
}
}
assertThat(indexBuilder.getTotalRecordsScanned(),
allOf(
greaterThanOrEqualTo((long)records.size()),
lessThanOrEqualTo((long)records.size() + additionalScans)
));
}
KeyValueLogMessage msg = KeyValueLogMessage.build("building index - completed", TestLogMessageKeys.INDEX, index);
msg.addKeysAndValues(timer.getKeysAndValues());
LOGGER.info(msg.toString());
LOGGER.info(KeyValueLogMessage.of("running post build checks", TestLogMessageKeys.INDEX, index));
// Do not check afterBuild if it is a safe build and the index was readable before build, because the tests may
// expect that it does not use the index in queries since the index is not readable yet, but the fact is that it
// uses the index in quereis since the index is readable.
if (!isAlwaysReadable) {
afterBuild.run();
}
LOGGER.info(KeyValueLogMessage.of("verifying range set emptiness", TestLogMessageKeys.INDEX, index));
try (FDBRecordContext context = openContext()) {
RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(metaData.getIndex(index.getName())));
System.out.println("Range set for " + records.size() + " records:\n" + rangeSet.rep(context.ensureActive()).join());
assertEquals(Collections.emptyList(), rangeSet.missingRanges(context.ensureActive()).asList().join());
context.commit();
}
LOGGER.info(KeyValueLogMessage.of("marking index readable", TestLogMessageKeys.INDEX, index));
try (FDBRecordContext context = openContext()) {
boolean updated = recordStore.markIndexReadable(index).join();
if (isAlwaysReadable) {
assertFalse(updated);
} else {
assertTrue(updated);
}
context.commit();
}
afterReadable.run();
LOGGER.info(KeyValueLogMessage.of("ending rebuild test",
TestLogMessageKeys.RECORDS, records.size(),
LogMessageKeys.RECORDS_WHILE_BUILDING, recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size(),
TestLogMessageKeys.AGENTS, agents,
TestLogMessageKeys.OVERLAP, overlap,
TestLogMessageKeys.SPLIT_LONG_RECORDS, splitLongRecords,
TestLogMessageKeys.INDEX, index)
);
}
<T> void executeQuery(@Nonnull RecordQuery query, @Nonnull String planString, @Nonnull List<T> expected, @Nonnull Function<FDBQueriedRecord<Message>, T> projection) {
RecordQueryPlan plan = planner.plan(query);
assertEquals(planString, plan.toString());
List<T> retrieved = recordStore.executeQuery(plan).map(projection).asList().join();
assertEquals(expected, retrieved);
}
void executeQuery(@Nonnull RecordQuery query, @Nonnull String planString, @Nonnull List<Message> expected) {
executeQuery(query, planString, expected, FDBQueriedRecord::getRecord);
}
<K,V extends Message> Map<K,List<Message>> group(@Nonnull List<V> values, @Nonnull Function<V, K> keyFunction) {
Map<K,List<Message>> map = new HashMap<>();
for (V value : values) {
K key = keyFunction.apply(value);
if (map.containsKey(key)) {
map.get(key).add(value);
} else {
List<Message> toAdd = new ArrayList<>();
toAdd.add(value);
map.put(key, toAdd);
}
}
return map;
}
@Nonnull
List<TestRecords1Proto.MySimpleRecord> updated(@Nonnull List<TestRecords1Proto.MySimpleRecord> origRecords, @Nonnull List<TestRecords1Proto.MySimpleRecord> addedRecords) {
Map<Long,TestRecords1Proto.MySimpleRecord> lastRecordWithKey = new HashMap<>();
for (TestRecords1Proto.MySimpleRecord record : origRecords) {
if (record.hasRecNo()) {
lastRecordWithKey.put(record.getRecNo(), record);
} else {
lastRecordWithKey.put(null, record);
}
}
for (TestRecords1Proto.MySimpleRecord record : addedRecords) {
if (record.hasRecNo()) {
lastRecordWithKey.put(record.getRecNo(), record);
} else {
lastRecordWithKey.put(null, record);
}
}
List<TestRecords1Proto.MySimpleRecord> updatedRecords = new ArrayList<>(lastRecordWithKey.size());
for (TestRecords1Proto.MySimpleRecord record : lastRecordWithKey.values()) {
updatedRecords.add(record);
}
updatedRecords.sort(Comparator.comparingLong(TestRecords1Proto.MySimpleRecord::getRecNo));
return updatedRecords;
}
FDBStoredRecord<Message> createStoredMessage(@Nonnull TestRecords1Proto.MySimpleRecord record) {
return FDBStoredRecord.newBuilder()
.setPrimaryKey(Tuple.from(record.getRecNo()))
.setRecordType(recordStore.getRecordMetaData().getRecordType("MySimpleRecord"))
.setRecord(record)
.build();
}
}
|
/**
* Holds instances of given service <T> and allows their instantiation, reuse and disposal.
*
* Leverages JMS queue to store number of messages corresponding to limit of how many services can be created for given push network.
*
* The message is borrowed from this queue when new service is created.The message is returned to this queue when service is destroyed.
*
* That ensures that there won't be created more service instances in entire cluster of servers than given limit.
*
* @param <T> the type of the service
*/
public abstract class AbstractServiceHolder<T> {
private final Map<Key, ConcurrentLinkedQueue<DisposableReference<T>>> queueMap = new ConcurrentHashMap<>();
private final int instanceLimit;
private final long instanceAcquiringTimeoutInMillis;
private final long serviceDisposalDelayInMillis;
@Inject
private JmsClient jmsClient;
@Inject
private ServiceDisposalScheduler serviceDisposalScheduler;
/**
* Returns the Queue used as a counter of free services.
*
* This queue is populated with number of messages corresponding to limit of how many services can be created for given push network.
* The message is borrowed from this queue when new service is created.
* The message is returned to this queue when service is destroyed.
* That ensures that there won't be created more service instances in entire cluster of servers than given limit.
*
* @return the Queue used as a counter of free services.
*/
public abstract Queue getFreeServiceSlotQueue();
/**
* Creates new service instance
*
* @param instanceLimit how many instances can be created
* @param instanceAcquiringTimeoutInMillis what is a timeout before the holder can return null
* @param serviceDisposalDelayInMillis how long the service instance will be held until it is disposed for inactivity
*/
public AbstractServiceHolder(int instanceLimit, long instanceAcquiringTimeoutInMillis, long serviceDisposalDelayInMillis) {
this.instanceLimit = instanceLimit;
this.instanceAcquiringTimeoutInMillis = instanceAcquiringTimeoutInMillis;
this.serviceDisposalDelayInMillis = serviceDisposalDelayInMillis;
}
public void initialize(final String pushMessageInformationId, final String variantID) {
for (int i = 0; i < instanceLimit; i++) {
returnServiceSlotToQueue(pushMessageInformationId, variantID);
}
}
public void destroy(final String pushMessageInformationId, final String variantID) {
for (int i = 0; i < instanceLimit; i++) {
if (borrowServiceSlotFromQueue(pushMessageInformationId, variantID) == null) {
return;
}
}
}
/**
* Holder returns a service for given parameters or uses service constructor to instantiate new service.
*
* Number of created or queued services is limited up to configured {@link #instanceLimit}.
*
* The service blocks until a service is available or configured {@link #instanceAcquiringTimeoutInMillis}.
*
* In case the service is not available when times out, holder returns null.
*
* @param pushMessageInformationId the push message id
* @param variantID the variant
* @param constructor the service constructor
* @return the service instance; or null in case too much services were created and no services are queued for reuse
*/
public T dequeueOrCreateNewService(final String pushMessageInformationId, final String variantID, ServiceConstructor<T> constructor) {
T instance = dequeue(pushMessageInformationId, variantID);
if (instance != null) {
return instance;
}
// there is no cached instance, try to establish one
if (borrowServiceSlotFromQueue(pushMessageInformationId, variantID) != null) {
// we have borrowed a service, we can create new instance
return constructor.construct();
}
return null;
}
/**
* Dequeues the service instance if there is one available, otherwise returns null
* @param pushMessageInformationId the push message id
* @param variantID the variant
* @return the service instance or null if no instance is queued
*/
public T dequeue(final String pushMessageInformationId, final String variantID) {
ConcurrentLinkedQueue<DisposableReference<T>> concurrentLinkedQueue = getCache(pushMessageInformationId, variantID);
DisposableReference<T> serviceHolder;
// poll queue for new instance
while ((serviceHolder = concurrentLinkedQueue.poll()) != null) {
T serviceInstance = serviceHolder.get();
// holder may hold expired instance
if (serviceInstance != null) {
return serviceInstance;
}
}
return null;
}
/**
* Allows to queue used and freed up service into cache so that can be reused by another consumer.
*
* @param pushMessageInformationId the push message
* @param variantID the variant
* @param service the used and freed up service
* @param destroyer the instance of {@link ServiceDestroyer} used to destroy service instance
*/
public void queueFreedUpService(final String pushMessageInformationId, final String variantID, final T service, final ServiceDestroyer<T> destroyer) {
ServiceDestroyer<T> destroyAndReturnServiceSlot = new ServiceDestroyer<T>() {
@Override
public void destroy(T instance) {
destroyer.destroy(instance);
returnServiceSlotToQueue(pushMessageInformationId, variantID);
}
};
DisposableReference<T> disposableReference = new DisposableReference<>(service, destroyAndReturnServiceSlot);
serviceDisposalScheduler.scheduleForDisposal(disposableReference, serviceDisposalDelayInMillis);
getCache(pushMessageInformationId, variantID).add(disposableReference);
}
/**
* Allows to free up a counter of created services and thus allowing waiting consumers to create new services within the limits.
*
* Freed up service is a service that died, disconnected or similar and can no longer be used.
*
* @param pushMessageInformationId the push message
* @param variantID the variant
*/
public void freeUpSlot(final String pushMessageInformationId, final String variantID) {
returnServiceSlotToQueue(pushMessageInformationId, variantID);
}
protected Object borrowServiceSlotFromQueue(String pushMessageInformationId, String variantID) {
return jmsClient.receive().withSelector("variantID = '%s'", variantID).withTimeout(instanceAcquiringTimeoutInMillis).from(getFreeServiceSlotQueue());
}
protected void returnServiceSlotToQueue(String pushMessageInformationId, String variantID) {
jmsClient.send(pushMessageInformationId + ":" + variantID).withProperty("variantID", variantID).to(getFreeServiceSlotQueue());
}
private ConcurrentLinkedQueue<DisposableReference<T>> getCache(String pushMessageInformationId, String variantID) {
return getOrCreateQueue(new Key(pushMessageInformationId, variantID));
}
private ConcurrentLinkedQueue<DisposableReference<T>> getOrCreateQueue(Key key) {
ConcurrentLinkedQueue<DisposableReference<T>> queue = queueMap.get(key);
if (queue == null) {
queue = queueMap.putIfAbsent(key, new ConcurrentLinkedQueue<>());
queue = queueMap.get(key);
}
return queue;
}
/**
* The key that is used to store a queue instance in the map.
*/
private static class Key {
private String pushMessageInformationId;
private String variantId;
Key (String pushMessageInformationId, String variantID) {
if (pushMessageInformationId == null) {
throw new NullPointerException("pushMessageInformationId");
}
if (variantID == null) {
throw new NullPointerException("variant or its variantID cant be null");
}
this.pushMessageInformationId = pushMessageInformationId;
this.variantId = variantID;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((pushMessageInformationId == null) ? 0 : pushMessageInformationId.hashCode());
result = prime * result + ((variantId == null) ? 0 : variantId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Key other = (Key) obj;
if (pushMessageInformationId == null) {
if (other.pushMessageInformationId != null)
return false;
} else if (!pushMessageInformationId.equals(other.pushMessageInformationId))
return false;
if (variantId == null) {
if (other.variantId != null)
return false;
} else if (!variantId.equals(other.variantId))
return false;
return true;
}
}
} |
Oral Explanations in University Teaching: The Role of Projector Constructions in Spoken German
This study explores the nature of oral explanations in German university teaching and focuses particularly on projections, which are a widely-used feature in order to guide the students’ attention. Projector constructions facilitate the production and organization of complex statements and allow the speaker to draw the students’ attention to crucial pieces of information. Furthermore, projections facilitate the drawing of inferences for the listener: The projector phrase opens up semantic and syntactic slots that need to be filled, so that the number of possible contents to follow is restricted. In German, the placement of the conjugated verb in the projected unit plays a crucial role for this construction. |
<filename>org.spoofax.interpreter.core/src/main/java/org/spoofax/interpreter/library/ssl/SSL_fgetc.java
/*
* Created on 11. jan.. 2007
*
* Copyright (c) 2005, <NAME> <<EMAIL> near strategoxt.org>
*
* Licensed under the GNU Lesser General Public License, v2.1
*/
package org.spoofax.interpreter.library.ssl;
import java.io.IOException;
import java.io.Reader;
import org.spoofax.interpreter.core.IContext;
import org.spoofax.interpreter.core.InterpreterException;
import org.spoofax.interpreter.library.AbstractPrimitive;
import org.spoofax.interpreter.stratego.Strategy;
import org.spoofax.interpreter.terms.IStrategoTerm;
import org.spoofax.terms.util.TermUtils;
public class SSL_fgetc extends AbstractPrimitive {
SSL_fgetc() {
super("SSL_fgetc", 0, 1);
}
@Override
public boolean call(IContext env, Strategy[] svars, IStrategoTerm[] tvars)
throws InterpreterException {
if(!TermUtils.isInt(tvars[0]))
return false;
SSLLibrary or = (SSLLibrary) env.getOperatorRegistry(SSLLibrary.REGISTRY_NAME);
Reader in = or.getIOAgent().getReader(TermUtils.toJavaInt(tvars[0]));
int r = -1;
try {
r = in.read();
} catch(IOException e) {
throw new InterpreterException(e);
}
if(r == -1)
return false;
env.setCurrent(env.getFactory().makeInt(r));
return true;
}
}
|
//Initialising the possibilities and pointers.
//And also it finds squares where only one number is
//available to fit in, we can then permamently fix
//these numbers in their respective squares.
void init_board(single board[][SIDE]){
int i, j, poss_num;
for (i = 0; i < SIDE ; i++){
for (j = 0 ; j < SIDE ; j++){
if(board[i][j].num==0){
set_max_poss(board, i, j);
find_possibilities(board, i, j);
board[i][j].iter = board[i][j].possible.begin();
if (board[i][j].possible.size() == 2){
board[i][j].num = *board[i][j].iter;
}
}
}
}
} |
export class AccountInfo {
public address: string;
public balance: number;
public label: string;
constructor() {
this.address = null;
this.balance = null;
this.label = null;
}
}
|
<filename>src/test/ui/lint/trivial-casts-featuring-type-ascription.rs
#![deny(trivial_casts, trivial_numeric_casts)]
#![feature(type_ascription)]
fn main() {
let lugubrious = 12i32 as i32;
//~^ ERROR trivial numeric cast
let haunted: &u32 = &99;
let _ = haunted as *const u32;
//~^ ERROR trivial cast
}
|
// Init initializes service configurator.
func (sc *ServiceConfigurator) Init() error {
sc.natGlobalCfg = &nat.Nat44Global{
Forwarding: true,
}
return nil
} |
# ETL JSON files into CSV and PostgreSQL
# run once
# pip install csvkit
# initdb datamena
# postgres -D datamena/
# createdb tweetreplies
import os, csv, json
tweets = os.listdir('./all_tweets')
with open('./all_tweets/origin_tweets.csv', 'w') as csv1:
originTweets = csv.writer(csv1)
originTweets.writerow(['tweetid','timestamp','printname','screenname','verified','body','quotescreenname','quotetext','likes','retweets'])
with open('./all_tweets/replies.csv', 'w') as csv2:
replyTweets = csv.writer(csv2)
replyTweets.writerow(['tweetid','convoid','timestamp','printname','screenname','verified','mentions','cards','body','lang','links','likes','retweets'])
seenOrigins = {}
seenReplies = {}
for tweet in tweets:
if '.json' in tweet:
d = json.loads(open('./all_tweets/' + tweet, 'r').read())
origin = d['origin']
tweetid = origin[0]
if tweetid not in seenOrigins:
originTweets.writerow(origin)
seenOrigins[tweetid] = True
replies = d['replies']
for reply in replies:
tweetid = reply[0]
if tweetid not in seenReplies:
replyTweets.writerow(reply)
seenReplies[tweetid] = True
os.system('csvsql --insert --overwrite --db postgresql:///tweetreplies ./all_tweets/origin_tweets.csv')
os.system('csvsql --insert --overwrite --db postgresql:///tweetreplies ./all_tweets/replies.csv')
# psql tweetreplies
"""
DROP TABLE IF EXISTS origins;
DROP TABLE IF EXISTS combined;
CREATE TABLE origins AS (
SELECT tweetid AS originid, timestamp AS origintime, printname AS originname,
screenname AS originsn, verified AS originverified, body AS originbody,
quotescreenname, quotetext, likes AS originlikes, retweets AS originretweets
FROM origin_tweets
);
CREATE TABLE combined AS (
SELECT * FROM replies
JOIN origins ON replies.convoid = origins.originid
);
"""
|
/*
* Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include <stdio.h>
#include <string.h>
#include <openssl/rand.h>
#include <openssl/asn1t.h>
#include "internal/numbers.h"
#include "testutil.h"
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wunused-function"
#endif
#ifdef __clang__
# pragma clang diagnostic ignored "-Wunused-function"
#endif
/* Badly coded ASN.1 INTEGER zero wrapped in a sequence */
static unsigned char t_invalid_zero[] = {
0x30, 0x02, /* SEQUENCE tag + length */
0x02, 0x00 /* INTEGER tag + length */
};
#if OPENSSL_API_COMPAT < 0x10200000L
/* LONG case ************************************************************* */
typedef struct {
long test_long;
} ASN1_LONG_DATA;
ASN1_SEQUENCE(ASN1_LONG_DATA) = {
ASN1_EMBED(ASN1_LONG_DATA, test_long, LONG),
} static_ASN1_SEQUENCE_END(ASN1_LONG_DATA)
IMPLEMENT_STATIC_ASN1_ENCODE_FUNCTIONS(ASN1_LONG_DATA)
IMPLEMENT_STATIC_ASN1_ALLOC_FUNCTIONS(ASN1_LONG_DATA)
static int test_long(void)
{
const unsigned char *p = t_invalid_zero;
ASN1_LONG_DATA *dectst =
d2i_ASN1_LONG_DATA(NULL, &p, sizeof(t_invalid_zero));
if (dectst == NULL)
return 0; /* Fail */
ASN1_LONG_DATA_free(dectst);
return 1;
}
#endif
/* INT32 case ************************************************************* */
typedef struct {
int32_t test_int32;
} ASN1_INT32_DATA;
ASN1_SEQUENCE(ASN1_INT32_DATA) = {
ASN1_EMBED(ASN1_INT32_DATA, test_int32, INT32),
} static_ASN1_SEQUENCE_END(ASN1_INT32_DATA)
IMPLEMENT_STATIC_ASN1_ENCODE_FUNCTIONS(ASN1_INT32_DATA)
IMPLEMENT_STATIC_ASN1_ALLOC_FUNCTIONS(ASN1_INT32_DATA)
static int test_int32(void)
{
const unsigned char *p = t_invalid_zero;
ASN1_INT32_DATA *dectst =
d2i_ASN1_INT32_DATA(NULL, &p, sizeof(t_invalid_zero));
if (dectst == NULL)
return 0; /* Fail */
ASN1_INT32_DATA_free(dectst);
return 1;
}
/* UINT32 case ************************************************************* */
typedef struct {
uint32_t test_uint32;
} ASN1_UINT32_DATA;
ASN1_SEQUENCE(ASN1_UINT32_DATA) = {
ASN1_EMBED(ASN1_UINT32_DATA, test_uint32, UINT32),
} static_ASN1_SEQUENCE_END(ASN1_UINT32_DATA)
IMPLEMENT_STATIC_ASN1_ENCODE_FUNCTIONS(ASN1_UINT32_DATA)
IMPLEMENT_STATIC_ASN1_ALLOC_FUNCTIONS(ASN1_UINT32_DATA)
static int test_uint32(void)
{
const unsigned char *p = t_invalid_zero;
ASN1_UINT32_DATA *dectst =
d2i_ASN1_UINT32_DATA(NULL, &p, sizeof(t_invalid_zero));
if (dectst == NULL)
return 0; /* Fail */
ASN1_UINT32_DATA_free(dectst);
return 1;
}
/* INT64 case ************************************************************* */
typedef struct {
int64_t test_int64;
} ASN1_INT64_DATA;
ASN1_SEQUENCE(ASN1_INT64_DATA) = {
ASN1_EMBED(ASN1_INT64_DATA, test_int64, INT64),
} static_ASN1_SEQUENCE_END(ASN1_INT64_DATA)
IMPLEMENT_STATIC_ASN1_ENCODE_FUNCTIONS(ASN1_INT64_DATA)
IMPLEMENT_STATIC_ASN1_ALLOC_FUNCTIONS(ASN1_INT64_DATA)
static int test_int64(void)
{
const unsigned char *p = t_invalid_zero;
ASN1_INT64_DATA *dectst =
d2i_ASN1_INT64_DATA(NULL, &p, sizeof(t_invalid_zero));
if (dectst == NULL)
return 0; /* Fail */
ASN1_INT64_DATA_free(dectst);
return 1;
}
/* UINT64 case ************************************************************* */
typedef struct {
uint64_t test_uint64;
} ASN1_UINT64_DATA;
ASN1_SEQUENCE(ASN1_UINT64_DATA) = {
ASN1_EMBED(ASN1_UINT64_DATA, test_uint64, UINT64),
} static_ASN1_SEQUENCE_END(ASN1_UINT64_DATA)
IMPLEMENT_STATIC_ASN1_ENCODE_FUNCTIONS(ASN1_UINT64_DATA)
IMPLEMENT_STATIC_ASN1_ALLOC_FUNCTIONS(ASN1_UINT64_DATA)
static int test_uint64(void)
{
const unsigned char *p = t_invalid_zero;
ASN1_UINT64_DATA *dectst =
d2i_ASN1_UINT64_DATA(NULL, &p, sizeof(t_invalid_zero));
if (dectst == NULL)
return 0; /* Fail */
ASN1_UINT64_DATA_free(dectst);
return 1;
}
int setup_tests(void)
{
#if OPENSSL_API_COMPAT < 0x10200000L
ADD_TEST(test_long);
#endif
ADD_TEST(test_int32);
ADD_TEST(test_uint32);
ADD_TEST(test_int64);
ADD_TEST(test_uint64);
return 1;
}
|
/**
* Integration tests showing basic CRUD operations through
* {@link org.springframework.data.couchbase.core.ReactiveCouchbaseOperations}.
*
* @author Mark Paluch
* @author Denis Rosa
*/
@RunWith(SpringRunner.class)
@SpringBootTest
public class ReactiveJavaCouchbaseOperationsIntegrationTests {
@ClassRule //
public static CouchbaseAvailableRule COUCHBASE = CouchbaseAvailableRule.onLocalhost();
@Autowired
ReactiveCouchbaseOperations operations;
@Autowired
CouchbaseOperations couchbaseOperations;
@Before
public void before() {
if (couchbaseOperations.existsById().one("LH")) {
couchbaseOperations.removeById().one("LH");
}
}
/**
* Find all {@link Airline}s applying the _class filter .
*/
@Test
public void shouldFindByAll() {
operations.findByQuery(Airline.class).all() //
.count() //
.as(StepVerifier::create) //
.assertNext(count -> {
assertThat(count).isGreaterThan(100);
}) //
.verifyComplete();
}
/**
* Created elements are emitted by {@link ReactiveCouchbaseOperations#upsertById(Class)} )}.
*/
@Test
public void shouldCreateAirline() {
Airline airline = new Airline();
airline.setId("LH");
airline.setIata("LH");
airline.setIcao("DLH");
airline.setCallsign("Lufthansa");
airline.setName("Lufthansa");
airline.setCountry("Germany");
Mono<Airline> airlineMono = operations.upsertById(Airline.class)
.one(airline) //
.map(Airline::getId) //
.flatMap(id -> operations.findById(Airline.class).one(id));
airlineMono.as(StepVerifier::create) //
.expectNext(airline).verifyComplete();
}
} |
/* Code decompiled incorrectly, please refer to instructions dump. */
private static void restoreAppSessionInformation(android.content.Context r8) {
/*
java.lang.Object r0 = staticLock
monitor-enter(r0)
boolean r1 = isLoaded // Catch:{ all -> 0x00a9 }
if (r1 != 0) goto L_0x00a7
r1 = 0
r2 = 1
r3 = 0
java.io.ObjectInputStream r4 = new java.io.ObjectInputStream // Catch:{ FileNotFoundException -> 0x0090, Exception -> 0x0046, all -> 0x0041 }
java.lang.String r5 = "AppEventsLogger.persistedsessioninfo"
java.io.FileInputStream r5 = r8.openFileInput(r5) // Catch:{ FileNotFoundException -> 0x0090, Exception -> 0x0046, all -> 0x0041 }
r4.<init>(r5) // Catch:{ FileNotFoundException -> 0x0090, Exception -> 0x0046, all -> 0x0041 }
java.lang.Object r3 = r4.readObject() // Catch:{ FileNotFoundException -> 0x0091, Exception -> 0x003f }
java.util.HashMap r3 = (java.util.HashMap) r3 // Catch:{ FileNotFoundException -> 0x0091, Exception -> 0x003f }
appSessionInfoMap = r3 // Catch:{ FileNotFoundException -> 0x0091, Exception -> 0x003f }
com.facebook.LoggingBehavior r3 = com.facebook.LoggingBehavior.APP_EVENTS // Catch:{ FileNotFoundException -> 0x0091, Exception -> 0x003f }
java.lang.String r5 = "AppEvents"
java.lang.String r6 = "App session info loaded"
com.facebook.internal.Logger.log(r3, r5, r6) // Catch:{ FileNotFoundException -> 0x0091, Exception -> 0x003f }
com.facebook.internal.Utility.closeQuietly(r4) // Catch:{ all -> 0x00a9 }
java.lang.String r3 = "AppEventsLogger.persistedsessioninfo"
r8.deleteFile(r3) // Catch:{ all -> 0x00a9 }
java.util.Map<com.facebook.appevents.AccessTokenAppIdPair, com.facebook.appevents.FacebookTimeSpentData> r8 = appSessionInfoMap // Catch:{ all -> 0x00a9 }
if (r8 != 0) goto L_0x0039
java.util.HashMap r8 = new java.util.HashMap // Catch:{ all -> 0x00a9 }
r8.<init>() // Catch:{ all -> 0x00a9 }
appSessionInfoMap = r8 // Catch:{ all -> 0x00a9 }
L_0x0039:
isLoaded = r2 // Catch:{ all -> 0x00a9 }
L_0x003b:
hasChanges = r1 // Catch:{ all -> 0x00a9 }
goto L_0x00a7
L_0x003f:
r3 = move-exception
goto L_0x004a
L_0x0041:
r4 = move-exception
r7 = r4
r4 = r3
r3 = r7
goto L_0x0078
L_0x0046:
r4 = move-exception
r7 = r4
r4 = r3
r3 = r7
L_0x004a:
com.facebook.appevents.AppEventsLogger.TAG // Catch:{ all -> 0x0077 }
java.lang.StringBuilder r5 = new java.lang.StringBuilder // Catch:{ all -> 0x0077 }
r5.<init>() // Catch:{ all -> 0x0077 }
java.lang.String r6 = "Got unexpected exception restoring app session info: "
r5.append(r6) // Catch:{ all -> 0x0077 }
java.lang.String r3 = r3.toString() // Catch:{ all -> 0x0077 }
r5.append(r3) // Catch:{ all -> 0x0077 }
r5.toString() // Catch:{ all -> 0x0077 }
com.facebook.internal.Utility.closeQuietly(r4) // Catch:{ all -> 0x00a9 }
java.lang.String r3 = "AppEventsLogger.persistedsessioninfo"
r8.deleteFile(r3) // Catch:{ all -> 0x00a9 }
java.util.Map<com.facebook.appevents.AccessTokenAppIdPair, com.facebook.appevents.FacebookTimeSpentData> r8 = appSessionInfoMap // Catch:{ all -> 0x00a9 }
if (r8 != 0) goto L_0x0074
java.util.HashMap r8 = new java.util.HashMap // Catch:{ all -> 0x00a9 }
r8.<init>() // Catch:{ all -> 0x00a9 }
appSessionInfoMap = r8 // Catch:{ all -> 0x00a9 }
L_0x0074:
isLoaded = r2 // Catch:{ all -> 0x00a9 }
goto L_0x003b
L_0x0077:
r3 = move-exception
L_0x0078:
com.facebook.internal.Utility.closeQuietly(r4) // Catch:{ all -> 0x00a9 }
java.lang.String r4 = "AppEventsLogger.persistedsessioninfo"
r8.deleteFile(r4) // Catch:{ all -> 0x00a9 }
java.util.Map<com.facebook.appevents.AccessTokenAppIdPair, com.facebook.appevents.FacebookTimeSpentData> r8 = appSessionInfoMap // Catch:{ all -> 0x00a9 }
if (r8 != 0) goto L_0x008b
java.util.HashMap r8 = new java.util.HashMap // Catch:{ all -> 0x00a9 }
r8.<init>() // Catch:{ all -> 0x00a9 }
appSessionInfoMap = r8 // Catch:{ all -> 0x00a9 }
L_0x008b:
isLoaded = r2 // Catch:{ all -> 0x00a9 }
hasChanges = r1 // Catch:{ all -> 0x00a9 }
throw r3 // Catch:{ all -> 0x00a9 }
L_0x0090:
r4 = r3
L_0x0091:
com.facebook.internal.Utility.closeQuietly(r4) // Catch:{ all -> 0x00a9 }
java.lang.String r3 = "AppEventsLogger.persistedsessioninfo"
r8.deleteFile(r3) // Catch:{ all -> 0x00a9 }
java.util.Map<com.facebook.appevents.AccessTokenAppIdPair, com.facebook.appevents.FacebookTimeSpentData> r8 = appSessionInfoMap // Catch:{ all -> 0x00a9 }
if (r8 != 0) goto L_0x00a4
java.util.HashMap r8 = new java.util.HashMap // Catch:{ all -> 0x00a9 }
r8.<init>() // Catch:{ all -> 0x00a9 }
appSessionInfoMap = r8 // Catch:{ all -> 0x00a9 }
L_0x00a4:
isLoaded = r2 // Catch:{ all -> 0x00a9 }
goto L_0x003b
L_0x00a7:
monitor-exit(r0) // Catch:{ all -> 0x00a9 }
return
L_0x00a9:
r8 = move-exception
monitor-exit(r0) // Catch:{ all -> 0x00a9 }
goto L_0x00ad
L_0x00ac:
throw r8
L_0x00ad:
goto L_0x00ac
*/
throw new UnsupportedOperationException("Method not decompiled: com.facebook.appevents.AppEventsLogger.PersistedAppSessionInfo.restoreAppSessionInformation(android.content.Context):void");
} |
/**
* Represent a bridge between two islands in the game Bridges.
* <p>
* Defined as immutable object, following:
* https://docs.oracle.com/javase/tutorial/essential/concurrency/imstrat.html
*
* @author Maik Messerschmidt
*/
final public class Bridge implements Comparable<Bridge> {
final private Island first;
final private Island second;
final private boolean isDouble;
/**
* Create a new Bridge instance.
*
* @param island1 - The first island of the bridge.
* @param island2 - The second island of the birdge.
* @param isDouble - Whether or not the bridge is a double bridge.
* @throws IllegalArgumentException if one of the islands is null or the islands don't match in either x or y coordinate.
*/
public Bridge(Island island1, Island island2, boolean isDouble) throws IllegalArgumentException {
if (island1 == null || island2 == null)
throw new IllegalArgumentException("Bridge constructor needs two Islands, not null.");
if (island1.getX() != island2.getX() && island1.getY() != island2.getY())
throw new IllegalArgumentException("Islands must match in x or y coordinate.");
this.isDouble = isDouble;
this.first = island1;
this.second = island2;
}
/**
* @return The first island of the bridge.
*/
public Island getFirstIsland() {
return first;
}
/**
* @return The second island of the bridge.
*/
public Island getSecondIsland() {
return second;
}
/**
* @return Whether or not this is a double bridge.
*/
public boolean isDouble() {
return isDouble;
}
/**
* Check, if the given island is connected by this bridge.
*
* @param island the island to check for.
* @return true, if it is the case, false otherwise.
*/
public boolean hasIsland(Island island) {
return first.equals(island) || second.equals(island);
}
/**
* @return The bridge as a String (used for debugging only).
*/
@Override
public String toString() {
return "Bridge(" + first + ", " + second + ", " + isDouble + ")";
}
/**
* @return true, if the other object is a bridge and connects the
* same islands and is also a double (or a single) bridge,
* false otherwise.
*/
@Override
public boolean equals(Object other) {
if (other instanceof Bridge)
return equals((Bridge) other);
else
return false;
}
/**
* Check, if this bridge is equal to another bridge.
*
* @param other The other bridge.
* @return true, if the bridge connects the
* same islands and is also a double (or a single) bridge,
* false otherwise.
*/
public boolean equals(Bridge other) {
if (other == null)
return false;
else
return compareTo(other) == 0;
}
/**
* @return a hashCode for this bridge.
*/
@Override
public int hashCode() {
Set<Island> islands = new HashSet<Island>(
Arrays.asList(new Island[]{first, second}));
return Objects.hash(islands, isDouble);
}
/**
* Return if the other bridge crosses (or covers) this bridge.
*
* @param other The other bridge.
* @return true, if the other bridge crosses this one.
*/
public boolean crosses(Bridge other) {
/* case 1:
* (x3, y3)
* |
* (x1, y1) ---------+------- (x2, y2)
* |
* |
* (x4, y4)
*
*
* case 2:
* (x1, y1)
* |
* (x3, y3) ---------+------- (x4, y4)
* |
* |
* (x2, y2)
*/
int x1 = first.getX();
int y1 = first.getY();
int x2 = second.getX();
int y2 = second.getY();
int x3 = other.getFirstIsland().getX();
int y3 = other.getFirstIsland().getY();
int x4 = other.getSecondIsland().getX();
int y4 = other.getSecondIsland().getY();
return (between(x1, x3, x2) && between(y3, y1, y4)) ||
(between(x3, x1, x4) && between(y1, y3, y2));
}
/**
* Check, if any of the given bridges crosses this one.
*
* @param others A list of other bridges.
* @return true, if any of the other bridges crosses this bridge, false otherwise.
*/
public boolean crosses(List<Bridge> others) {
for (Bridge other : others) {
if (crosses(other))
return true;
}
return false;
}
/**
* Helper method which checks, if the mid value lies between start and end.
*
* @param start - first (or last) value.
* @param mid - value, that has to be in the middle of the other two.
* @param end - last (or first) value.
* @return true, if mid lies between start and end (not including).
*/
private boolean between(int start, int mid, int end) {
return ((start < mid && mid < end) || (start > mid && mid > end));
}
/**
* Return the total number of bridges in the given list.
* Double bridges count as two, single bridges count as one.
*
* @param bridges The list of bridges to be counted.
* @return The total number of bridges.
*/
public static int count(List<Bridge> bridges) {
int count = 0;
for (Bridge b : bridges) {
if (b.isDouble())
count += 2;
else
count += 1;
}
return count;
}
/**
* Return if this bridge is covered by the given list
* (which means a bridge - either single or double - with
* the same islands exists).
*
* @param bridges The list of bridges to search in.
* @return Whether or not a bridge with the same start and end are present.
*/
public boolean isCovered(List<Bridge> bridges) {
if (bridges.contains(this))
return true;
else if (bridges.contains(new Bridge(first, second, !isDouble)))
return true;
else
return false;
}
/**
* Compares this bridge to another.
* <br><br>
* A bridge is less than another bridge,
* if is has the first islands within a sorted
* list of islands of both of these bridges
* or if the boolean value of isDouble() is less
* than the boolean value of the other isDouble() value.
*
* @return -1, if bridge is smaller than other,
* 0, if bridge equals other,
* +1, if bridge is greater than other.
*/
@Override
public int compareTo(Bridge other) {
List<Island> islands = Arrays.asList(new Island[]{
this.getFirstIsland(),
this.getSecondIsland(),
other.getFirstIsland(),
other.getSecondIsland()
});
Collections.sort(islands);
for (Island island : islands) {
if (hasIsland(island) && !other.hasIsland(island))
return -1;
else if (other.hasIsland(island) && !hasIsland(island))
return 1;
}
return Boolean.compare(isDouble, other.isDouble);
}
/**
* Check, if the given island is cut by this bridge.
*
* @param island The island to be checked.
* @return true, if the island is cut, false otherwise.
*/
public boolean cuts(Island island) {
int dx = second.getX() - first.getX();
if (dx == 0)
return island.getX() == first.getX()
&& between(first.getY(), island.getY(), second.getY());
else
return island.getY() == first.getY()
&& between(first.getX(), island.getX(), second.getX());
}
} |
<gh_stars>0
import classnames from 'classnames';
import styles from './Badge.module.css';
type BadgeProps = {
content: string;
size?: 'small' | 'medium';
rectangular?: boolean;
backgroundColor: string;
};
/**
* A Badge with test included.
*
* @param content Text to show on the Badge
* @param size Can be `medium` or `small`. Default is `medium`
* @param rectangular If the badge is rectangular or rounded. Default is `false`
* @param backgroundColor Background of the Badge.
* @returns
*/
export function Badge({
content,
size = 'medium',
rectangular = false,
backgroundColor,
}: BadgeProps) {
const badgeInlineStyles = { backgroundColor: backgroundColor };
const badgeClasses = classnames(
styles.badge,
{ [styles.small]: size === 'small' },
{ [styles.rectangular]: rectangular },
);
return (
<div className={badgeClasses} style={badgeInlineStyles}>
{content}
</div>
);
}
|
/**
* Adapts a BitstampBalance to an AccountInfo
*
* @param bitstampBalance The Bitstamp balance
* @param userName The user name
* @return The account info
*/
public static AccountInfo adaptAccountInfo(BitstampBalance bitstampBalance, String userName) {
final BigDecimal usdWithdrawing = bitstampBalance.getUsdBalance().subtract(bitstampBalance.getUsdAvailable())
.subtract(bitstampBalance.getUsdReserved());
final BigDecimal eurWithdrawing = bitstampBalance.getEurBalance().subtract(bitstampBalance.getEurAvailable())
.subtract(bitstampBalance.getEurReserved());
final BigDecimal btcWithdrawing = bitstampBalance.getBtcBalance().subtract(bitstampBalance.getBtcAvailable())
.subtract(bitstampBalance.getBtcReserved());
Balance usdBalance = new Balance(Currency.USD, bitstampBalance.getUsdBalance(), bitstampBalance.getUsdAvailable(),
bitstampBalance.getUsdReserved(), ZERO, ZERO, usdWithdrawing, ZERO);
Balance eurBalance = new Balance(Currency.EUR, bitstampBalance.getEurBalance(), bitstampBalance.getEurAvailable(),
bitstampBalance.getEurReserved(), ZERO, ZERO, eurWithdrawing, ZERO);
Balance btcBalance = new Balance(Currency.BTC, bitstampBalance.getBtcBalance(), bitstampBalance.getBtcAvailable(),
bitstampBalance.getBtcReserved(), ZERO, ZERO, btcWithdrawing, ZERO);
return new AccountInfo(userName, bitstampBalance.getFee(), new Wallet(usdBalance, eurBalance, btcBalance));
} |
import classNames from 'classnames';
import type { PropsWithChildren } from 'react';
type FooterProps = PropsWithChildren<{
className?: string;
}>;
const Footer = ({ children, ...props }: FooterProps) => {
const className = classNames('p-4 pt-6', props.className);
return (
<footer className={className} style={{ gridArea: 'footer' }}>
{children}
</footer>
);
};
export default Footer;
|
// N -> No. of stattes, M -> Size of input alphabet
// finds the epsilon closure of the NFA state "state" and stores it into "closure"
void epsilonClosure(int state, bitset<MAX_NFA_STATES> &closure)
{
for (int i = 0; i < N && NFAstates[state].transitions[0][i] != -1; i++)
if (closure[NFAstates[state].transitions[0][i]] == 0)
{
closure[NFAstates[state].transitions[0][i]] = 1;
epsilonClosure(NFAstates[state].transitions[0][i], closure);
}
} |
/**
* Returns an effectively unlimited stream of pseudorandom {@code
* long} values from this generator and/or one split from it.
*
* @implNote This method is implemented to be equivalent to {@code
* longs(Long.MAX_VALUE)}.
*
* @return a stream of pseudorandom {@code long} values
*/
public LongStream longs() {
return StreamSupport.longStream
(new RandomLongsSpliterator
(this, 0L, Long.MAX_VALUE, Long.MAX_VALUE, 0L),
false);
} |
<gh_stars>0
"""
Copies in a lot of code from discord.py because I want direct access to rate limit information
Working with stuff I shouldn't be accessing, so hopefully discord.py updates with getting rate-limit info sometime soon
"""
import asyncio
import datetime
import logging
import discord
import discord.http
from discord import utils
from discord.errors import HTTPException, Forbidden, NotFound
log = logging.getLogger('discord')
global_over = None
class RateLimitInfo(object):
def __init__(self):
self.limit = None
self.remaining = None
self.reset = None
self.now = None
def __str__(self):
return 'Limit {0}, Remaining {1}, Reset {2}, Now {3}, Delta {4}'.format(
self.limit, self.remaining, self.reset, self.now, self.time_until_reset)
@property
def time_until_reset(self) -> datetime.timedelta:
delta = self.reset - self.now
if delta.total_seconds() >= 0:
return delta
else:
return datetime.timedelta(seconds=0)
# noinspection PyProtectedMember
@asyncio.coroutine
def send_and_get_rate_limit(
client: discord.Client,
channel: discord.Channel,
content: str
):
global global_over
global_over = asyncio.Event(loop=asyncio.get_event_loop())
global_over.set()
channel_id, guild_id = yield from client._resolve_destination(channel)
rate_limit_info = RateLimitInfo()
data = yield from send_message(client.http, channel_id, content, rate_limit_info)
channel = client.get_channel(data.get('channel_id'))
# noinspection PyArgumentList
message = client.connection._create_message(channel=channel, **data)
return message, rate_limit_info
@asyncio.coroutine
def send_message(
self: discord.http.HTTPClient,
channel_id,
content,
rate_limit_info,
*,
tts=False,
embed=None
):
r = discord.http.Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
payload = {}
if content:
payload['content'] = content
if tts:
payload['tts'] = True
if embed:
payload['embed'] = embed
return request(self, r, rate_limit_info, json=payload)
@asyncio.coroutine
def request(
self: discord.http.HTTPClient,
route: discord.http.Route,
rate_limit_info: RateLimitInfo,
*,
header_bypass_delay=None,
**kwargs
):
global global_over
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock(loop=self.loop)
if bucket is not None:
self._locks[bucket] = lock
# header creation
headers = {
'User-Agent': self.user_agent,
}
if self.token is not None:
headers['Authorization'] = 'Bot ' + self.token if self.bot_token else self.token
# some checking if it's a JSON request
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils.to_json(kwargs.pop('json'))
kwargs['headers'] = headers
if not global_over.is_set():
# wait until the global lock is complete
yield from global_over.wait()
yield from lock
with discord.http.MaybeUnlock(lock) as maybe_lock:
for tries in range(5):
r = yield from self.session.request(method, url, **kwargs)
log.debug(self.REQUEST_LOG.format(method=method, url=url, status=r.status, json=kwargs.get('data')))
try:
# even errors have text involved in them so this is safe to call
data = yield from discord.http.json_or_text(r)
# check if we have rate limit header information
rate_limit_info.remaining = r.headers.get('X-Ratelimit-Remaining')
if rate_limit_info.remaining is not None:
rate_limit_info.limit = r.headers['X-Ratelimit-Limit']
rate_limit_info.now = discord.http.parsedate_to_datetime(r.headers['Date'])
rate_limit_info.reset = datetime.datetime.fromtimestamp(
int(r.headers['X-Ratelimit-Reset']),
datetime.timezone.utc
)
if rate_limit_info.remaining == '0' and r.status != 429:
# we've depleted our current bucket
if header_bypass_delay is None:
now = discord.http.parsedate_to_datetime(r.headers['Date'])
reset = datetime.datetime.fromtimestamp(
int(r.headers['X-Ratelimit-Reset']),
datetime.timezone.utc
)
delta = (reset - now).total_seconds()
else:
delta = header_bypass_delay
fmt = 'A rate limit bucket has been exhausted (bucket: {bucket}, retry: {delta}).'
log.info(fmt.format(bucket=bucket, delta=delta))
maybe_lock.defer()
self.loop.call_later(delta, lock.release)
# the request was successful so just return the text/json
if 300 > r.status >= 200:
log.debug(self.SUCCESS_LOG.format(method=method, url=url, text=data))
return data
# we are being rate limited
if r.status == 429:
fmt = 'We are being rate limited. Retrying in {:.2} seconds. Handled under the bucket "{}"'
# sleep a bit
retry_after = data['retry_after'] / 1000.0
log.info(fmt.format(retry_after, bucket))
# check if it's a global rate limit
is_global = data.get('global', False)
if is_global:
log.info('Global rate limit has been hit. Retrying in {:.2} seconds.'.format(retry_after))
global_over.clear()
yield from asyncio.sleep(retry_after, loop=self.loop)
log.debug('Done sleeping for the rate limit. Retrying...')
# release the global lock now that the
# global rate limit has passed
if is_global:
global_over.set()
log.debug('Global rate limit is now over.')
continue
# we've received a 502, unconditional retry
if r.status == 502 and tries <= 5:
yield from asyncio.sleep(1 + tries * 2, loop=self.loop)
continue
# the usual error cases
if r.status == 403:
raise Forbidden(r, data)
elif r.status == 404:
raise NotFound(r, data)
else:
raise HTTPException(r, data)
finally:
# clean-up just in case
yield from r.release()
|
<filename>common/util/ptr_vec.hpp
#pragma once
#include <cassert>
#include <memory>
#include <type_traits>
#include <vector>
#include "algorithm.hpp"
namespace cloth::util {
/// An iterator wrapper that dereferences twice.
template<typename Iter>
struct double_iterator {
using wrapped = Iter;
using value_type = std::decay_t<decltype(*std::declval<typename wrapped::value_type>())>;
using difference_type = typename wrapped::difference_type;
using reference = value_type&;
using pointer = value_type*;
using iterator_category = std::random_access_iterator_tag;
using self_t = double_iterator<Iter>;
double_iterator(wrapped w) : _iter(std::move(w)) {}
double_iterator() : _iter() {}
reference operator*() const
{
return (**_iter);
}
pointer operator->() const
{
return &(**_iter);
}
self_t& operator++()
{
_iter.operator++();
return *this;
}
self_t operator++(int i)
{
return _iter.operator++(i);
}
self_t& operator--()
{
_iter.operator--();
return *this;
}
self_t operator--(int i)
{
return _iter.operator--(i);
}
auto operator==(const self_t& rhs) const noexcept
{
return _iter == rhs._iter;
}
auto operator!=(const self_t& rhs) const noexcept
{
return _iter != rhs._iter;
}
auto operator<(const self_t& rhs) const noexcept
{
return _iter < rhs._iter;
}
auto operator>(const self_t& rhs) const noexcept
{
return _iter > rhs._iter;
}
auto operator<=(const self_t& rhs) const noexcept
{
return _iter <= rhs._iter;
}
auto operator>=(const self_t& rhs) const noexcept
{
return _iter >= rhs._iter;
}
self_t operator+(difference_type d) const noexcept
{
return _iter + d;
}
self_t operator-(difference_type d) const noexcept
{
return _iter - d;
}
auto operator-(const self_t& rhs) const noexcept
{
return _iter - rhs._iter;
}
self_t& operator+=(difference_type d)
{
_iter += d;
return *this;
}
self_t& operator-=(difference_type d)
{
_iter -= d;
return *this;
}
operator wrapped&()
{
return _iter;
}
operator const wrapped&() const
{
return _iter;
}
wrapped& data()
{
return _iter;
}
const wrapped& data() const
{
return _iter;
}
private:
wrapped _iter;
};
template<typename Iter>
auto operator+(typename double_iterator<Iter>::difference_type diff, double_iterator<Iter> iter)
{
return iter + diff;
}
/// To avoid clients being moved, they are stored in unique_ptrs, which are
/// moved around in a vector. This class is purely for convenience, to still
/// have iterator semantics, and a few other utility functions
template<typename T>
struct ptr_vec {
using value_type = T;
std::vector<std::unique_ptr<value_type>> _order;
using iterator = double_iterator<typename decltype(_order)::iterator>;
using const_iterator = double_iterator<typename decltype(_order)::const_iterator>;
using reverse_iterator = double_iterator<typename decltype(_order)::reverse_iterator>;
using const_reverse_iterator =
double_iterator<typename decltype(_order)::const_reverse_iterator>;
value_type& push_back(const value_type& v)
{
auto ptr = std::make_unique<value_type>(v);
auto res = ptr.get();
_order.push_back(std::move(ptr));
return *res;
}
value_type& push_back(value_type&& v)
{
auto ptr = std::make_unique<value_type>(std::move(v));
auto res = ptr.get();
_order.push_back(std::move(ptr));
return *res;
}
value_type& push_back(std::unique_ptr<value_type> ptr)
{
auto res = ptr.get();
_order.push_back(std::move(ptr));
return *res;
}
template<typename... Args>
value_type& emplace_back(Args&&... args)
{
return push_back(std::make_unique<value_type>(std::forward<Args>(args)...));
}
std::unique_ptr<value_type> erase(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& uptr) { return uptr.get() == &v; });
if (iter != _order.end()) {
auto uptr = std::move(*iter);
_order.erase(iter);
return uptr;
}
return nullptr;
}
iterator rotate_to_back(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& uptr) { return uptr.get() == &v; });
return rotate_to_back(iter);
}
iterator rotate_to_back(iterator iter)
{
if (iter != _order.end()) {
{
return std::rotate(iter.data(), iter.data() + 1, _order.end());
}
}
return end();
}
iterator rotate_to_front(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& uptr) { return uptr.get() == &v; });
return rotate_to_front(iter);
}
iterator rotate_to_front(iterator iter)
{
if (iter != _order.end()) {
{
return std::rotate(_order.begin(), iter.data(), iter.data() + 1);
}
}
return end();
}
std::size_t size() const noexcept
{
return _order.size();
}
bool empty() const noexcept
{
return _order.empty();
}
std::size_t capacity() const noexcept
{
return _order.capacity();
}
std::size_t max_size() const noexcept
{
return _order.max_size();
}
void reserve(std::size_t new_cap)
{
_order.reserve(new_cap);
}
void shrink_to_fit()
{
_order.shrink_to_fit();
}
value_type& operator[](std::size_t n)
{
return *_order[n];
}
const value_type& operator[](std::size_t n) const
{
return *_order[n];
}
value_type& at(std::size_t n)
{
return *_order.at(n);
}
const value_type& at(std::size_t n) const
{
return *_order.at(n);
}
iterator begin()
{
return _order.begin();
}
iterator end()
{
return _order.end();
}
const_iterator begin() const
{
return _order.begin();
}
const_iterator end() const
{
return _order.end();
}
reverse_iterator rbegin()
{
return _order.rbegin();
}
reverse_iterator rend()
{
return _order.rend();
}
const_reverse_iterator rbegin() const
{
return _order.rbegin();
}
const_reverse_iterator rend() const
{
return _order.rend();
}
value_type& front()
{
return *_order.front();
}
value_type& back()
{
return *_order.back();
}
const value_type& front() const
{
return *_order.front();
}
const value_type& back() const
{
return *_order.back();
}
std::vector<std::unique_ptr<value_type>>& underlying() {
return _order;
}
};
template<typename T, typename T2>
std::unique_ptr<T> erase_this(ptr_vec<T>& vec, T2* el)
{
return vec.erase(*el);
}
template<typename T, typename T2>
std::unique_ptr<T> erase_this(ptr_vec<T>& vec, T2& el)
{
return vec.erase(el);
}
template<typename T>
struct shared_ptr_vec {
using value_type = T;
std::vector<std::shared_ptr<value_type>> _order;
using iterator = double_iterator<typename decltype(_order)::iterator>;
using const_iterator = double_iterator<typename decltype(_order)::const_iterator>;
using reverse_iterator = double_iterator<typename decltype(_order)::reverse_iterator>;
using const_reverse_iterator =
double_iterator<typename decltype(_order)::const_reverse_iterator>;
value_type& push_back(const value_type& v)
{
auto ptr = std::make_unique<value_type>(v);
auto res = ptr.get();
_order.push_back(std::move(ptr));
return *res;
}
value_type& push_back(value_type&& v)
{
auto ptr = std::make_unique<value_type>(std::move(v));
auto res = ptr.get();
_order.push_back(std::move(ptr));
return *res;
}
value_type& push_back(std::shared_ptr<value_type> ptr)
{
auto res = ptr.get();
_order.push_back(std::move(ptr));
return *res;
}
template<typename... Args>
value_type& emplace_back(Args&&... args)
{
return push_back(std::make_unique<value_type>(std::forward<Args>(args)...));
}
std::shared_ptr<value_type> erase(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& uptr) { return uptr.get() == &v; });
if (iter != _order.end()) {
auto uptr = std::move(*iter);
_order.erase(iter);
return uptr;
}
return nullptr;
}
iterator rotate_to_back(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& uptr) { return uptr.get() == &v; });
return rotate_to_back(iter);
}
iterator rotate_to_back(iterator iter)
{
if (iter != _order.end()) {
{
return std::rotate(iter.data(), iter.data() + 1, _order.end());
}
}
return end();
}
iterator rotate_to_front(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& uptr) { return uptr.get() == &v; });
return rotate_to_front(iter);
}
iterator rotate_to_front(iterator iter)
{
if (iter != _order.end()) {
{
return std::rotate(_order.begin(), iter.data(), iter.data() + 1);
}
}
return end();
}
std::size_t size() const noexcept
{
return _order.size();
}
bool empty() const noexcept
{
return _order.empty();
}
std::size_t capacity() const noexcept
{
return _order.capacity();
}
std::size_t max_size() const noexcept
{
return _order.max_size();
}
void reserve(std::size_t new_cap)
{
_order.reserve(new_cap);
}
void shrink_to_fit()
{
_order.shrink_to_fit();
}
value_type& operator[](std::size_t n)
{
return *_order[n];
}
const value_type& operator[](std::size_t n) const
{
return *_order[n];
}
value_type& at(std::size_t n)
{
return *_order.at(n);
}
const value_type& at(std::size_t n) const
{
return *_order.at(n);
}
iterator begin()
{
return _order.begin();
}
iterator end()
{
return _order.end();
}
const_iterator begin() const
{
return _order.begin();
}
const_iterator end() const
{
return _order.end();
}
reverse_iterator rbegin()
{
return _order.rbegin();
}
reverse_iterator rend()
{
return _order.rend();
}
const_reverse_iterator rbegin() const
{
return _order.rbegin();
}
const_reverse_iterator rend() const
{
return _order.rend();
}
value_type& front()
{
return *_order.front();
}
value_type& back()
{
return *_order.back();
}
const value_type& front() const
{
return *_order.front();
}
const value_type& back() const
{
return *_order.back();
}
std::vector<std::shared_ptr<value_type>>& underlying() {
return _order;
}
};
template<typename T, typename T2>
std::shared_ptr<T> erase_this(shared_ptr_vec<T>& vec, T2* el)
{
return vec.erase(*el);
}
template<typename T, typename T2>
std::shared_ptr<T> erase_this(shared_ptr_vec<T>& vec, T2& el)
{
return vec.erase(el);
}
template<typename T>
struct non_null_ptr {
non_null_ptr() = delete;
constexpr non_null_ptr(T* ptr) : _ptr(ptr)
{
assert(ptr != nullptr);
}
non_null_ptr(std::nullptr_t) = delete;
constexpr non_null_ptr(const non_null_ptr&) = default;
constexpr non_null_ptr(non_null_ptr&&) = default;
constexpr non_null_ptr& operator=(const non_null_ptr&) = default;
constexpr non_null_ptr& operator=(non_null_ptr&&) = default;
constexpr T& operator*() const noexcept
{
return *_ptr;
}
constexpr T* operator->() const noexcept
{
return _ptr;
}
constexpr operator T*() noexcept
{
return _ptr;
}
constexpr operator T* const() const noexcept
{
return _ptr;
}
private:
T* _ptr;
};
template<typename T>
struct ref_vec {
using value_type = T;
std::vector<value_type*> _order;
using iterator = double_iterator<typename decltype(_order)::iterator>;
using const_iterator = double_iterator<typename decltype(_order)::const_iterator>;
using reverse_iterator = double_iterator<typename decltype(_order)::reverse_iterator>;
using const_reverse_iterator =
double_iterator<typename decltype(_order)::const_reverse_iterator>;
ref_vec() = default;
ref_vec(std::initializer_list<value_type*> lst) : _order {lst} { };
template<typename InputIter, typename = std::enable_if_t<std::is_same_v<decltype(*std::declval<InputIter>()), value_type&>>>
ref_vec(InputIter iter1, InputIter iter2) {
_order.reserve(std::distance(iter1, iter2));
std::transform(iter1, iter2, std::back_inserter(_order), [] (auto& v) {return &v; });
}
template<typename Range, typename = std::enable_if_t<std::is_same_v<decltype(*std::declval<Range>().begin()), value_type&>>>
ref_vec(Range&& rng) : ref_vec (std::begin(rng), std::end(rng)) { }
value_type& push_back(value_type& v)
{
_order.push_back(&v);
return v;
}
value_type& push_back(non_null_ptr<value_type> ptr)
{
_order.push_back(ptr);
return *ptr;
}
value_type& emplace_back(value_type& v)
{
return push_back(v);
}
std::unique_ptr<value_type> erase(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& ptr) { return ptr == &v; });
if (iter != _order.end()) {
auto uptr = std::move(*iter);
_order.erase(iter);
return uptr;
}
return nullptr;
}
iterator rotate_to_back(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& ptr) { return ptr == &v; });
return rotate_to_back(iter);
}
iterator rotate_to_back(iterator iter)
{
if (iter != _order.end()) {
{
return std::rotate(iter.data(), iter.data() + 1, _order.end());
}
}
return end();
}
iterator rotate_to_front(const value_type& v)
{
auto iter =
std::find_if(_order.begin(), _order.end(), [&v](auto&& ptr) { return ptr == &v; });
return rotate_to_front(iter);
}
iterator rotate_to_front(iterator iter)
{
if (iter != _order.end()) {
{
return std::rotate(_order.begin(), iter.data(), iter.data() + 1);
}
}
return end();
}
std::size_t size() const noexcept
{
return _order.size();
}
bool empty() const noexcept
{
return _order.empty();
}
std::size_t capacity() const noexcept
{
return _order.capacity();
}
std::size_t max_size() const noexcept
{
return _order.max_size();
}
void reserve(std::size_t new_cap)
{
_order.reserve(new_cap);
}
void shrink_to_fit()
{
_order.shrink_to_fit();
}
value_type& operator[](std::size_t n)
{
return *_order[n];
}
const value_type& operator[](std::size_t n) const
{
return *_order[n];
}
value_type& at(std::size_t n)
{
return *_order.at(n);
}
const value_type& at(std::size_t n) const
{
return *_order.at(n);
}
iterator begin()
{
return _order.begin();
}
iterator end()
{
return _order.end();
}
const_iterator begin() const
{
return _order.begin();
}
const_iterator end() const
{
return _order.end();
}
reverse_iterator rbegin()
{
return _order.rbegin();
}
reverse_iterator rend()
{
return _order.rend();
}
const_reverse_iterator rbegin() const
{
return _order.rbegin();
}
const_reverse_iterator rend() const
{
return _order.rend();
}
value_type& front()
{
return *_order.front();
}
value_type& back()
{
return *_order.back();
}
const value_type& front() const
{
return *_order.front();
}
const value_type& back() const
{
return *_order.back();
}
std::vector<value_type*>& underlying() {
return _order;
}
};
} // namespace cloth::util
// namespace ranges {
//
// template<typename T>
// struct enable_view<cloth::util::ptr_vec<T>> : std::false_type {};
//
// template<typename T>
// struct enable_view<cloth::util::ref_vec<T>> : std::false_type {};
//
// } // namespace ranges
|
Improvements and Future Challenges for the Research Infrastructure in the Field 'Measuring Cognitive Ability'
The assessment of cognitive abilities is critical in large-scale survey studies that aim at elucidating the longitudinal interplay between the individual’s cognitive potential and socio-economic variables. The format of such studies calls for assessment methods which can not only be administered economically but also display a high (psychometric) measurement quality. In consideration of recent theoretical and empirical advances in intelligence research, we recommend the implementation of tests drawing on working memory in large-scale survey studies. Working memory is a limited-capacity system for temporary storage and processing of information and currently discussed to be the cognitive key system underlying intellectual abilities. Four types of working memory tests are exemplarily described and critically evaluated with regard to their psychometric quality and the need for further evaluation. |
Mattel's Attempt To Claim Ownership Of Bratz Comes Back To Bite Them: Now They May Owe $88.5 Million
from the blowback dept
Wow. For years, we've been following the legal battle between toy giant Mattel and toy upstart MGA concerning the ownership of Bratz dolls -- the first dolls in years to seriously compete with Mattel's classic Barbie doll. If you haven't been following it, a guy who worked for Mattel came up with the idea for the Bratz dolls. At Mattel he was not involved in designing dolls, and he claims that he did all the work in his spare time, not on company time. He then left and went to MGA, which agreed to make the Bratz dolls, which quickly became a huge success story. Mattel claimed that, under the guy's contractual agreement with Mattel, anything he invented belonged to them. The original district court ruling sided with Mattel and the judge (amazingly) ordered that Mattel should get all Bratz dolls including future plans for dolls. That made absolutely no sense to us. At, if the determination was that the original designs were Mattel's, the company should get access to the original designs, and maybe some early dolls. But everything after that had nothing, whatsoever, to do with Mattel.Thankfully, Judge Kozinski on the 9th Circuit came to the rescue and wrote a fantastic ruling explaining all of this to the district court, and sending the case back for a new trial. As part of that, MGA also filed some counterclaims against Mattel, including the claim that Mattel illegally spied on MGA and copied trade secrets from the company through questionable means. When these counterclaims were filed, I actually suggested that it was silly and distracting from the larger point... which I still stand by.However, from a karmic perspective, it's interesting to see that the new jury has rejected nearly all of the claims against MGA, but sided with MGA on the trade secrets claim , and suggested an award of $88.5 million from Mattel to MGA -- an amount that MGA is going to ask the court to double for punitive reasons.To summarize: in the course of a few short years, Mattel went from losing in the marketplace to MGA, to winning a court case that gave it total control over the competing product... to now not having controlhaving to pay MGA potentially millions.Of course, this isn't over yet. Mattel has already asked the court for a brand new trial, and if that doesn't work, it says it's going to appeal the case, even if some "industry analysts" are apparently telling Mattel the company should just drop it. I have a feeling we haven't yet seen the end of this case, however.
Filed Under: barbie, bratz, copyright, dolls
Companies: mattel, mga |
Panel Session 4: Mobile Web Services Trend Perspectives
Web services are finding their way into mobile devices in several disparate islands: We can find WS proxies connected via proprietary wireless connectors to mobile devices (Blackberry MDS), the beginnings of a Web service consumer stack in mobile Java (JSR172), Web service identity federation stacks built into smartphone operating systems (Series 60, Windows Live for Mobile), Web services "Lite" in the form of Ajax (Opera) on browsers and widgets for mobile devices). However, challenging issues abound: (1) Where should we terminate a Web service - at a proxy or on the mobile device? (2) Are there any compelling reasons to make a mobile device a Web service consumer or provider itself? (3) Can mobile Web services realize the potential of multivendor interoperability? (4) When will we see seamless interoperability between enterprise Web service platforms (for example J2EE and Vista WCF and their mobile counterparts J2ME and Windows Mobile)? (5) What are the mobility versions of the Web 2.0 scenarios of social networking and collaboration? This panel of experts from organizations (Microsoft, Sun, RIM, Opera, Motorola) leading these developments will debate their visions of the mobile Web services future |
/**
* Contains configuration data for the UnboundID provider.
*
* @author Middleware Services
*/
public class UnboundIDProviderConfig extends ProviderConfig<Control>
{
/** Connection options. */
private LDAPConnectionOptions connectionOptions;
/** socket factory for ldap connections. */
private SocketFactory socketFactory;
/** socket factory for ldaps and startTLS connections. */
private SSLSocketFactory sslSocketFactory;
/** Search result codes to ignore. */
private ResultCode[] searchIgnoreResultCodes;
/** Default constructor. */
public UnboundIDProviderConfig()
{
setOperationExceptionResultCodes(ResultCode.SERVER_DOWN);
setControlProcessor(new ControlProcessor<>(new UnboundIDControlHandler()));
searchIgnoreResultCodes = new ResultCode[] {
ResultCode.TIME_LIMIT_EXCEEDED,
ResultCode.SIZE_LIMIT_EXCEEDED,
ResultCode.REFERRAL,
};
}
/**
* Returns the connection options.
*
* @return ldap connection options
*/
public LDAPConnectionOptions getConnectionOptions()
{
return connectionOptions;
}
/**
* Sets the connection options.
*
* @param options ldap connection options
*/
public void setConnectionOptions(final LDAPConnectionOptions options)
{
connectionOptions = options;
}
/**
* Returns the socket factory to use for LDAP connections.
*
* @return socket factory
*/
public SocketFactory getSocketFactory()
{
return socketFactory;
}
/**
* Sets the socket factory to use for LDAP connections.
*
* @param sf socket factory
*/
public void setSocketFactory(final SocketFactory sf)
{
checkImmutable();
logger.trace("setting socketFactory: {}", sf);
socketFactory = sf;
}
/**
* Returns the SSL socket factory to use for LDAPS and startTLS connections.
*
* @return SSL socket factory
*/
public SSLSocketFactory getSSLSocketFactory()
{
return sslSocketFactory;
}
/**
* Sets the SSL socket factory to use for LDAPS and startTLS connections.
*
* @param sf socket factory
*/
public void setSSLSocketFactory(final SSLSocketFactory sf)
{
checkImmutable();
logger.trace("setting sslSocketFactory: {}", sf);
sslSocketFactory = sf;
}
/**
* Returns the search ignore result codes.
*
* @return result codes to ignore
*/
public ResultCode[] getSearchIgnoreResultCodes()
{
return searchIgnoreResultCodes;
}
/**
* Sets the search ignore result codes.
*
* @param codes to ignore
*/
public void setSearchIgnoreResultCodes(final ResultCode[] codes)
{
checkImmutable();
logger.trace("setting searchIgnoreResultCodes: {}", Arrays.toString(codes));
searchIgnoreResultCodes = codes;
}
@Override
public String toString()
{
return
String.format(
"[%s@%d::operationExceptionResultCodes=%s, properties=%s, " +
"connectionStrategy=%s, controlProcessor=%s, connectionOptions=%s, " +
"socketFactory=%s, sslSocketFactory=%s, searchIgnoreResultCodes=%s]",
getClass().getName(),
hashCode(),
Arrays.toString(getOperationExceptionResultCodes()),
getProperties(),
getConnectionStrategy(),
getControlProcessor(),
connectionOptions,
socketFactory,
sslSocketFactory,
Arrays.toString(searchIgnoreResultCodes));
}
} |
#include<stdio.h>
#include<string.h>
int main()
{
char s[200001];
int t,i,e,f,j,k,n,a,b,c,d,u,T;
scanf("%d",&t);
for(i=0;i<t;i++){
n=0;
T=0;
u=0;
a=0;
b=0;
c=0;
d=0;
e=1000000;
f=1000000;
scanf("%s",s);
for(j=0;j<strlen(s);j++){
if(s[j]=='('){
a++;
e=j;
u++;
}
else if(s[j]==')' && j>e && u>0){
b++;
u--;
}
else if(s[j]=='['){
c++;
f=j;
T++;
}
else if(s[j]==']' && j>f && T>0){
d++;
T--;
}
}
if(a<b)b=a;
n=n+b;
if(c<d)c=d;
n=n+d;
printf("%d\n",n);
}
}
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The passive components to version discovery.
The Discover object in discover.py contains functions that can create objects
on your behalf. These functions are not usable from within the keystoneauth1
library because you will get dependency resolution issues.
The Discover object in this file provides the querying components of Discovery.
This includes functions like url_for which allow you to retrieve URLs and the
raw data specified in version discovery responses.
"""
import copy
import re
import os_service_types
import six
from six.moves import urllib
from keystoneauth1 import _utils as utils
from keystoneauth1 import exceptions
_LOGGER = utils.get_logger(__name__)
LATEST = float('inf')
_SERVICE_TYPES = os_service_types.ServiceTypes()
def _str_or_latest(val):
"""Convert val to a string, handling LATEST => 'latest'.
:param val: An int or the special value LATEST.
:return: A string representation of val. If val was LATEST, the return is
'latest'.
"""
return 'latest' if val == LATEST else str(val)
def _int_or_latest(val):
"""Convert val to an int or the special value LATEST.
:param val: An int()-able, or the string 'latest', or the special value
LATEST.
:return: An int, or the special value LATEST
"""
return LATEST if val == 'latest' or val == LATEST else int(val)
def get_version_data(session, url, authenticated=None):
"""Retrieve raw version data from a url.
The return is a list of dicts of the form::
[{
'status': 'STABLE',
'id': 'v2.3',
'links': [
{
'href': 'http://network.example.com/v2.3',
'rel': 'self',
},
{
'href': 'http://network.example.com/',
'rel': 'collection',
},
],
'min_version': '2.0',
'max_version': '2.7',
},
...,
]
Note:
The maximum microversion may be specified by `max_version` or `version`,
the former superseding the latter.
All `*version` keys are optional.
Other keys and 'links' entries are permitted, but ignored.
:param session: A Session object that can be used for communication.
:type session: keystoneauth1.session.Session
:param string url: Endpoint or discovery URL from which to retrieve data.
:param bool authenticated: Include a token in the discovery call.
(optional) Defaults to None.
:return: A list of dicts containing version information.
:rtype: list(dict)
"""
headers = {'Accept': 'application/json'}
try:
resp = session.get(url, headers=headers, authenticated=authenticated)
except exceptions.Unauthorized:
resp = session.get(url, headers=headers, authenticated=True)
try:
body_resp = resp.json()
except ValueError:
pass
else:
# Swift returns the list of containers for an account on an
# authenticated GET from /, not a version document. To our knowledge
# it's the only thing returning a [] here - and that's ok.
if isinstance(body_resp, list):
raise exceptions.DiscoveryFailure(
'Invalid Response - List returned instead of dict')
# In the event of querying a root URL we will get back a list of
# available versions.
try:
return body_resp['versions']['values']
except (KeyError, TypeError):
pass
# Most servers don't have a 'values' element so accept a simple
# versions dict if available.
try:
return body_resp['versions']
except KeyError:
pass
# Otherwise if we query an endpoint like /v2.0 then we will get back
# just the one available version.
try:
return [body_resp['version']]
except KeyError:
pass
# Older Ironic does not actually return a discovery document for the
# single version discovery endpoint, which confuses the single-version
# fallback logic. While there are no known other services returning
# min/max ranges using headers instead of body, this is done in a
# non-Ironic specific manner just in case.
# The existence of this support should not be an indication to any
# OpenStack services that they should ADD this.
if 'id' in body_resp:
body_resp['status'] = Status.CURRENT
for header in resp.headers:
# We lose the case-insensitive quality here
header = header.lower()
if not header.startswith('x-openstack'):
continue
# Once the body starts having these values, stop overriding
# with the header values
if header.endswith('api-minimum-version'):
body_resp.setdefault('min_version', resp.headers[header])
if header.endswith('api-maximum-version'):
body_resp.setdefault('version', resp.headers[header])
return [body_resp]
err_text = resp.text[:50] + '...' if len(resp.text) > 50 else resp.text
raise exceptions.DiscoveryFailure('Invalid Response - Bad version data '
'returned: %s' % err_text)
def normalize_version_number(version):
"""Turn a version representation into a tuple.
Examples:
The following all produce a return value of (1, 0)::
1, '1', 'v1', [1], (1,), ['1'], 1.0, '1.0', 'v1.0', (1, 0)
The following all produce a return value of (1, 20, 3)::
'v1.20.3', '1.20.3', (1, 20, 3), ['1', '20', '3']
The following all produce a return value of (LATEST, LATEST)::
'latest', 'vlatest', ('latest', 'latest'), (LATEST, LATEST)
The following all produce a return value of (2, LATEST)::
'2.latest', 'v2.latest', (2, LATEST), ('2', 'latest')
:param version: A version specifier in any of the following forms:
String, possibly prefixed with 'v', containing one or more numbers
*or* the string 'latest', separated by periods. Examples: 'v1',
'v1.2', '1.2.3', '123', 'latest', '1.latest', 'v1.latest'.
Integer. This will be assumed to be the major version, with a minor
version of 0.
Float. The integer part is assumed to be the major version; the
decimal part the minor version.
Non-string iterable comprising integers, integer strings, the string
'latest', or the special value LATEST.
Examples: (1,), [1, 2], ('12', '34', '56'), (LATEST,), (2, 'latest')
:return: A tuple of len >= 2 comprising integers and/or LATEST.
:raises TypeError: If the input version cannot be interpreted.
"""
# Copy the input var so the error presents the original value
ver = version
# If it's a non-string iterable, turn it into a string for subsequent
# processing. This ensures at least 1 decimal point if e.g. [1] is given.
if not isinstance(ver, six.string_types):
try:
ver = '.'.join(map(_str_or_latest, ver))
except TypeError:
# Not an iterable
pass
# If it's a numeric or an integer as a string then normalize it to a
# float string. This ensures 1 decimal point.
# If it's a float as a string, don't do that, the split/map below will do
# what we want. (Otherwise, we wind up with 3.20 -> (3, 2))
if isinstance(ver, six.string_types):
# trim the v from a 'v2.0' or similar
ver = ver.lstrip('v')
try:
# If version is a pure int, like '1' or '200' this will produce
# a stringified version with a .0 added. If it's any other number,
# such as '1.1' - int(version) raises an Exception
ver = str(float(int(ver)))
except ValueError:
pass
# If it's an int or float, turn it into a float string
elif isinstance(ver, (int, float)):
ver = _str_or_latest(float(ver))
# At this point, we should either have a string that contains numbers with
# at least one decimal point, or something decidedly else.
# if it's a string from above break it on .
try:
ver = ver.split('.')
except AttributeError:
# Not a string
pass
# Handle special case variants of just 'latest'
if ver == 'latest' or tuple(ver) == ('latest',):
return LATEST, LATEST
# It's either an interable, or something else that makes us sad.
try:
return tuple(map(_int_or_latest, ver))
except (TypeError, ValueError):
pass
raise TypeError('Invalid version specified: %s' % version)
def _normalize_version_args(
version, min_version, max_version, service_type=None):
# The sins of our fathers become the blood on our hands.
# If a user requests an old-style service type such as volumev2, then they
# are inherently requesting the major API version 2. It's not a good
# interface, but it's the one that was imposed on the world years ago
# because the client libraries all hid the version discovery document.
# In order to be able to ensure that a user who requests volumev2 does not
# get a block-storage endpoint that only provides v3 of the block-storage
# service, we need to pull the version out of the service_type. The
# service-types-authority will prevent the growth of new monstrosities such
# as this, but in order to move forward without breaking people, we have
# to just cry in the corner while striking ourselves with thorned branches.
# That said, for sure only do this hack for officially known service_types.
if (service_type and
_SERVICE_TYPES.is_known(service_type) and
service_type[-1].isdigit() and
service_type[-2] == 'v'):
implied_version = normalize_version_number(service_type[-1])
else:
implied_version = None
if version and (min_version or max_version):
raise ValueError(
"version is mutually exclusive with min_version and max_version")
if version:
# Explode this into min_version and max_version
min_version = normalize_version_number(version)
max_version = (min_version[0], LATEST)
if implied_version:
if min_version[0] != implied_version[0]:
raise exceptions.ImpliedVersionMismatch(
service_type=service_type,
implied=implied_version,
given=version_to_string(version))
return min_version, max_version
if min_version == 'latest':
if max_version not in (None, 'latest'):
raise ValueError(
"min_version is 'latest' and max_version is {max_version}"
" but is only allowed to be 'latest' or None".format(
max_version=max_version))
max_version = 'latest'
# Normalize e.g. empty string to None
min_version = min_version or None
max_version = max_version or None
if min_version:
min_version = normalize_version_number(min_version)
# If min_version was specified but max_version was not, max is latest.
max_version = normalize_version_number(max_version or 'latest')
# NOTE(efried): We should be doing this instead:
# max_version = normalize_version_number(max_version or 'latest')
# However, see first NOTE(jamielennox) in EndpointData._set_version_info.
if max_version:
max_version = normalize_version_number(max_version)
if None not in (min_version, max_version) and max_version < min_version:
raise ValueError("min_version cannot be greater than max_version")
if implied_version:
if min_version:
if min_version[0] != implied_version[0]:
raise exceptions.ImpliedMinVersionMismatch(
service_type=service_type,
implied=implied_version,
given=version_to_string(min_version))
else:
min_version = implied_version
# If 'latest' is provided with a versioned service-type like
# volumev2 - the user wants the latest of volumev2, not the latest
# of block-storage.
if max_version and max_version[0] != LATEST:
if max_version[0] != implied_version[0]:
raise exceptions.ImpliedMaxVersionMismatch(
service_type=service_type,
implied=implied_version,
given=version_to_string(max_version))
else:
max_version = (implied_version[0], LATEST)
return min_version, max_version
def version_to_string(version):
"""Turn a version tuple into a string.
:param tuple version: A version represented as a tuple of ints. As a
special case, a tuple member may be LATEST, which
translates to 'latest'.
:return: A version represented as a period-delimited string.
"""
# Special case
if all(ver == LATEST for ver in version):
return 'latest'
return ".".join(map(_str_or_latest, version))
def version_between(min_version, max_version, candidate):
"""Determine whether a candidate version is within a specified range.
:param min_version: The minimum version that is acceptable.
None/empty indicates no lower bound.
:param max_version: The maximum version that is acceptable.
None/empty indicates no upper bound.
:param candidate: Candidate version to test. May not be None/empty.
:return: True if candidate is between min_version and max_version; False
otherwise.
:raises ValueError: If candidate is None.
:raises TypeError: If any input cannot be normalized.
"""
if not candidate:
raise ValueError("candidate is required.")
candidate = normalize_version_number(candidate)
# Normalize up front to validate any malformed inputs
if min_version:
min_version = normalize_version_number(min_version)
if max_version:
max_version = normalize_version_number(max_version)
# If the candidate is less than the min_version, it's not a match.
# No min_version means no lower bound.
if min_version and candidate < min_version:
return False
# If the candidate is higher than the max_version, it's not a match.
# No max_version means no upper bound.
if max_version and candidate > max_version:
return False
return True
def version_match(required, candidate):
"""Test that an available version satisfies the required version.
To be suitable a version must be of the same major version as required
and be at least a match in minor/patch level.
eg. 3.3 is a match for a required 3.1 but 4.1 is not.
:param tuple required: the version that must be met.
:param tuple candidate: the version to test against required.
:returns: True if candidate is suitable False otherwise.
:rtype: bool
"""
# major versions must be the same (e.g. even though v2 is a lower
# version than v3 we can't use it if v2 was requested)
if candidate[0] != required[0]:
return False
# prevent selecting a minor version less than what is required
if candidate < required:
return False
return True
def _latest_soft_match(required, candidate):
if not required:
return False
if LATEST not in required:
return False
if all(part == LATEST for part in required):
return True
if required[0] == candidate[0] and required[1] == LATEST:
return True
# TODO(efried): Do we need to handle >2-part version numbers here?
return False
def _combine_relative_url(discovery_url, version_url):
# NOTE(jamielennox): urllib.parse.urljoin allows the url to be relative
# or even protocol-less. The additional trailing '/' makes urljoin respect
# the current path as canonical even if the url doesn't include it. for
# example a "v2" path from http://host/admin should resolve as
# http://host/admin/v2 where it would otherwise be host/v2. This has no
# effect on absolute urls.
url = urllib.parse.urljoin(discovery_url.rstrip('/') + '/', version_url)
# Sadly version discovery documents are common with the scheme
# and netloc broken.
parsed_version_url = urllib.parse.urlparse(url)
parsed_discovery_url = urllib.parse.urlparse(discovery_url)
# The services can override the version_url with some config options.(for
# example, In Keystone, Cinder and Glance, the option is "public_endpoint",
# and "compute_link_prefix", "network_link_prefix" in Nova and Neutron.
# In this case, it's hard to distinguish which part in version_url is
# useful for discovery_url , so here we just get the version from
# version_url and then add it into the discovery_url if needed.
path = parsed_version_url.path
if parsed_discovery_url.netloc != parsed_version_url.netloc:
version = version_url.rstrip('/').split('/')[-1]
url_path = parsed_discovery_url.path.rstrip('/')
if not url_path.endswith(version):
path = url_path + '/' + version
if version_url.endswith('/'):
# add '/' back to keep backward compatibility.
path = path + '/'
else:
path = parsed_discovery_url.path
return urllib.parse.ParseResult(
parsed_discovery_url.scheme,
parsed_discovery_url.netloc,
path,
parsed_version_url.params,
parsed_version_url.query,
parsed_version_url.fragment).geturl()
def _version_from_url(url):
if not url:
return url
url = urllib.parse.urlparse(url)
for part in reversed(url.path.split('/')):
try:
# All integer project ids can parse as valid versions. In URLs
# all known instances of versions start with a v. So check to make
# sure the url part starts with 'v', then check that it parses
# as a valid version.
if part[0] != 'v':
continue
return normalize_version_number(part)
except Exception:
pass
return None
class Status(object):
CURRENT = 'CURRENT'
SUPPORTED = 'SUPPORTED'
DEPRECATED = 'DEPRECATED'
EXPERIMENTAL = 'EXPERIMENTAL'
UNKNOWN = 'UNKNOWN'
KNOWN = (CURRENT, SUPPORTED, DEPRECATED, EXPERIMENTAL)
@classmethod
def normalize(cls, raw_status):
"""Turn a status into a canonical status value.
If the status from the version discovery document does not match one
of the known values, it will be set to 'UNKNOWN'.
:param str raw_status: Status value from a discovery document.
:returns: A canonicalized version of the status. Valid values
are CURRENT, SUPPORTED, DEPRECATED, EXPERIMENTAL and UNKNOWN
:rtype: str
"""
status = raw_status.upper()
if status == 'STABLE':
status = cls.CURRENT
if status not in cls.KNOWN:
status = cls.UNKNOWN
return status
class Discover(object):
CURRENT_STATUSES = ('stable', 'current', 'supported')
DEPRECATED_STATUSES = ('deprecated',)
EXPERIMENTAL_STATUSES = ('experimental',)
def __init__(self, session, url, authenticated=None):
self._url = url
self._data = get_version_data(session, url,
authenticated=authenticated)
def raw_version_data(self, allow_experimental=False,
allow_deprecated=True, allow_unknown=False):
"""Get raw version information from URL.
Raw data indicates that only minimal validation processing is performed
on the data, so what is returned here will be the data in the same
format it was received from the endpoint.
:param bool allow_experimental: Allow experimental version endpoints.
:param bool allow_deprecated: Allow deprecated version endpoints.
:param bool allow_unknown: Allow endpoints with an unrecognised status.
:returns: The endpoints returned from the server that match the
criteria.
:rtype: list
"""
versions = []
for v in self._data:
try:
status = v['status']
except KeyError:
_LOGGER.warning('Skipping over invalid version data. '
'No stability status in version.')
continue
status = status.lower()
if status in self.CURRENT_STATUSES:
versions.append(v)
elif status in self.DEPRECATED_STATUSES:
if allow_deprecated:
versions.append(v)
elif status in self.EXPERIMENTAL_STATUSES:
if allow_experimental:
versions.append(v)
elif allow_unknown:
versions.append(v)
return versions
def version_data(self, reverse=False, **kwargs):
"""Get normalized version data.
Return version data in a structured way.
:param bool reverse: Reverse the list. reverse=true will mean the
returned list is sorted from newest to oldest
version.
:returns: A list of :class:`VersionData` sorted by version number.
:rtype: list(VersionData)
"""
data = self.raw_version_data(**kwargs)
versions = []
for v in data:
try:
version_str = v['id']
except KeyError:
_LOGGER.info('Skipping invalid version data. Missing ID.')
continue
try:
links = v['links']
except KeyError:
_LOGGER.info('Skipping invalid version data. Missing links')
continue
version_number = normalize_version_number(version_str)
# collect microversion information
# NOTE(efried): Some existing discovery documents (e.g. from nova
# v2.0 in the pike release) include *version keys with "" (empty
# string) values, expecting them to be treated the same as if the
# keys were absent.
min_microversion = v.get('min_version') or None
if min_microversion:
min_microversion = normalize_version_number(min_microversion)
max_microversion = v.get('max_version')
if not max_microversion:
max_microversion = v.get('version') or None
if max_microversion:
max_microversion = normalize_version_number(max_microversion)
next_min_version = v.get('next_min_version') or None
if next_min_version:
next_min_version = normalize_version_number(next_min_version)
not_before = v.get('not_before') or None
self_url = None
collection_url = None
for link in links:
try:
rel = link['rel']
url = _combine_relative_url(self._url, link['href'])
except (KeyError, TypeError):
_LOGGER.info('Skipping invalid version link. '
'Missing link URL or relationship.')
continue
if rel.lower() == 'self':
self_url = url
elif rel.lower() == 'collection':
collection_url = url
if not self_url:
_LOGGER.info('Skipping invalid version data. '
'Missing link to endpoint.')
continue
versions.append(
VersionData(version=version_number,
url=self_url,
collection=collection_url,
min_microversion=min_microversion,
max_microversion=max_microversion,
next_min_version=next_min_version,
not_before=not_before,
status=Status.normalize(v['status']),
raw_status=v['status']))
versions.sort(key=lambda v: v['version'], reverse=reverse)
return versions
def version_string_data(self, reverse=False, **kwargs):
"""Get normalized version data with versions as strings.
Return version data in a structured way.
:param bool reverse: Reverse the list. reverse=true will mean the
returned list is sorted from newest to oldest
version.
:returns: A list of :class:`VersionData` sorted by version number.
:rtype: list(VersionData)
"""
version_data = self.version_data(reverse=reverse, **kwargs)
for version in version_data:
for key in ('version', 'min_microversion', 'max_microversion'):
if version[key]:
version[key] = version_to_string(version[key])
return version_data
def data_for(self, version, **kwargs):
"""Return endpoint data for a version.
NOTE: This method raises a TypeError if version is None. It is
kept for backwards compatability. New code should use
versioned_data_for instead.
:param tuple version: The version is always a minimum version in the
same major release as there should be no compatibility issues with
using a version newer than the one asked for.
:returns: the endpoint data for a URL that matches the required version
(the format is described in version_data) or None if no
match.
:rtype: dict
"""
version = normalize_version_number(version)
for data in self.version_data(reverse=True, **kwargs):
# Since the data is reversed, the latest version is first. If
# latest was requested, return it.
if _latest_soft_match(version, data['version']):
return data
if version_match(version, data['version']):
return data
return None
def url_for(self, version, **kwargs):
"""Get the endpoint url for a version.
NOTE: This method raises a TypeError if version is None. It is
kept for backwards compatability. New code should use
versioned_url_for instead.
:param tuple version: The version is always a minimum version in the
same major release as there should be no compatibility issues with
using a version newer than the one asked for.
:returns: The url for the specified version or None if no match.
:rtype: str
"""
data = self.data_for(version, **kwargs)
return data['url'] if data else None
def versioned_data_for(self, url=None,
min_version=None, max_version=None,
**kwargs):
"""Return endpoint data for the service at a url.
min_version and max_version can be given either as strings or tuples.
:param string url: If url is given, the data will be returned for the
endpoint data that has a self link matching the url.
:param min_version: The minimum endpoint version that is acceptable. If
min_version is given with no max_version it is as if max version is
'latest'. If min_version is 'latest', max_version may only be
'latest' or None.
:param max_version: The maximum endpoint version that is acceptable. If
min_version is given with no max_version it is as if max version is
'latest'. If min_version is 'latest', max_version may only be
'latest' or None.
:returns: the endpoint data for a URL that matches the required version
(the format is described in version_data) or None if no
match.
:rtype: dict
"""
min_version, max_version = _normalize_version_args(
None, min_version, max_version)
no_version = not max_version and not min_version
version_data = self.version_data(reverse=True, **kwargs)
# If we don't have to check a min_version, we can short
# circuit anything else
if (max_version == (LATEST, LATEST) and
(not min_version or min_version == (LATEST, LATEST))):
# because we reverse we can just take the first entry
return version_data[0]
if url:
url = url.rstrip('/') + '/'
if no_version and not url:
# because we reverse we can just take the first entry
return version_data[0]
# Version data is in order from highest to lowest, so we return
# the first matching entry
for data in version_data:
if url and data['url'] and data['url'].rstrip('/') + '/' == url:
return data
if _latest_soft_match(min_version, data['version']):
return data
# Only validate version bounds if versions were specified
if min_version and max_version and version_between(
min_version, max_version, data['version']):
return data
# If there is no version requested and we could not find a matching
# url in the discovery doc, that means we've got an unversioned
# endpoint in the catalog and the user is requesting version data
# so that they know what version they got. We can return the first
# entry from version_data, because the user hasn't requested anything
# different.
if no_version and url and len(version_data) > 0:
return version_data[0]
# We couldn't find a match.
return None
def versioned_url_for(self, min_version=None, max_version=None, **kwargs):
"""Get the endpoint url for a version.
min_version and max_version can be given either as strings or tuples.
:param min_version: The minimum version that is acceptable. If
min_version is given with no max_version it is as if max version
is 'latest'.
:param max_version: The maximum version that is acceptable. If
min_version is given with no max_version it is as if max version is
'latest'.
:returns: The url for the specified version or None if no match.
:rtype: str
"""
data = self.versioned_data_for(min_version=min_version,
max_version=max_version, **kwargs)
return data['url'] if data else None
class VersionData(dict):
"""Normalized Version Data about an endpoint."""
def __init__(
self,
version,
url,
collection=None,
max_microversion=None,
min_microversion=None,
next_min_version=None,
not_before=None,
status='CURRENT',
raw_status=None):
super(VersionData, self).__init__()
self['version'] = version
self['url'] = url
self['collection'] = collection
self['max_microversion'] = max_microversion
self['min_microversion'] = min_microversion
self['next_min_version'] = next_min_version
self['not_before'] = not_before
self['status'] = status
self['raw_status'] = raw_status
@property
def version(self):
"""The normalized version of the endpoint."""
return self.get('version')
@property
def url(self):
"""The url for the endpoint."""
return self.get('url')
@property
def collection(self):
"""The URL for the discovery document.
May be None.
"""
return self.get('collection')
@property
def min_microversion(self):
"""The minimum microversion supported by the endpoint.
May be None.
"""
return self.get('min_microversion')
@property
def max_microversion(self):
"""The maximum microversion supported by the endpoint.
May be None.
"""
return self.get('max_microversion')
@property
def status(self):
"""A canonicalized version of the status.
Valid values are CURRENT, SUPPORTED, DEPRECATED and EXPERIMENTAL.
"""
return self.get('status')
@property
def raw_status(self):
"""The status as provided by the server."""
return self.get('raw_status')
class EndpointData(object):
"""Normalized information about a discovered endpoint.
Contains url, version, microversion, interface and region information.
This is essentially the data contained in the catalog and the version
discovery documents about an endpoint that is used to select the endpoint
desired by the user. It is returned so that a user can know which qualities
a discovered endpoint had, in case their request allowed for a range of
possibilities.
"""
def __init__(self,
catalog_url=None,
service_url=None,
service_type=None,
service_name=None,
service_id=None,
region_name=None,
interface=None,
endpoint_id=None,
raw_endpoint=None,
api_version=None,
major_version=None,
min_microversion=None,
max_microversion=None,
next_min_version=None,
not_before=None,
status=None):
self.catalog_url = catalog_url
self.service_url = service_url
self.service_type = service_type
self.service_name = service_name
self.service_id = service_id
self.interface = interface
self.region_name = region_name
self.endpoint_id = endpoint_id
self.raw_endpoint = raw_endpoint
self.major_version = major_version
self.min_microversion = min_microversion
self.max_microversion = max_microversion
self.next_min_version = next_min_version
self.not_before = not_before
self.status = status
self._saved_project_id = None
self._catalog_matches_version = False
self._catalog_matches_exactly = False
self._disc = None
self.api_version = api_version or _version_from_url(self.url)
def __copy__(self):
"""Return a new EndpointData based on this one."""
new_data = EndpointData(
catalog_url=self.catalog_url,
service_url=self.service_url,
service_type=self.service_type,
service_name=self.service_name,
service_id=self.service_id,
region_name=self.region_name,
interface=self.interface,
endpoint_id=self.endpoint_id,
raw_endpoint=self.raw_endpoint,
api_version=self.api_version,
major_version=self.major_version,
min_microversion=self.min_microversion,
max_microversion=self.max_microversion,
next_min_version=self.next_min_version,
not_before=self.not_before,
status=self.status,
)
# Save cached discovery object - but we don't want to
# actually provide a constructor argument
new_data._disc = self._disc
new_data._saved_project_id = self._saved_project_id
return new_data
def __str__(self):
"""Produce a string like EndpointData{key=val, ...}, for debugging."""
str_attrs = (
'api_version', 'catalog_url', 'endpoint_id', 'interface',
'major_version', 'max_microversion', 'min_microversion',
'next_min_version', 'not_before', 'raw_endpoint', 'region_name',
'service_id', 'service_name', 'service_type', 'service_url', 'url')
return "%s{%s}" % (self.__class__.__name__, ', '.join(
["%s=%s" % (attr, getattr(self, attr)) for attr in str_attrs]))
@property
def url(self):
return self.service_url or self.catalog_url
def get_current_versioned_data(self, session, allow=None, cache=None,
project_id=None):
"""Run version discovery on the current endpoint.
A simplified version of get_versioned_data, get_current_versioned_data
runs discovery but only on the endpoint that has been found already.
It can be useful in some workflows where the user wants version
information about the endpoint they have.
:param session: A session object that can be used for communication.
:type session: keystoneauth1.session.Session
:param dict allow: Extra filters to pass when discovering API
versions. (optional)
:param dict cache: A dict to be used for caching results in
addition to caching them on the Session.
(optional)
:param string project_id: ID of the currently scoped project. Used for
removing project_id components of URLs from
the catalog. (optional)
:returns: A new EndpointData with the requested versioned data.
:rtype: :py:class:`keystoneauth1.discover.EndpointData`
:raises keystoneauth1.exceptions.discovery.DiscoveryFailure: If the
appropriate versioned data
could not be discovered.
"""
min_version, max_version = _normalize_version_args(
self.api_version, None, None)
return self.get_versioned_data(
session=session, allow=allow, cache=cache, allow_version_hack=True,
discover_versions=True,
min_version=min_version, max_version=max_version)
def get_versioned_data(self, session, allow=None, cache=None,
allow_version_hack=True, project_id=None,
discover_versions=True,
min_version=None, max_version=None):
"""Run version discovery for the service described.
Performs Version Discovery and returns a new EndpointData object with
information found.
min_version and max_version can be given either as strings or tuples.
:param session: A session object that can be used for communication.
:type session: keystoneauth1.session.Session
:param dict allow: Extra filters to pass when discovering API
versions. (optional)
:param dict cache: A dict to be used for caching results in
addition to caching them on the Session.
(optional)
:param bool allow_version_hack: Allow keystoneauth to hack up catalog
URLS to support older schemes.
(optional, default True)
:param string project_id: ID of the currently scoped project. Used for
removing project_id components of URLs from
the catalog. (optional)
:param bool discover_versions: Whether to get version metadata from
the version discovery document even
if it's not neccessary to fulfill the
major version request. (optional,
defaults to True)
:param min_version: The minimum version that is acceptable. If
min_version is given with no max_version it is as
if max version is 'latest'.
:param max_version: The maximum version that is acceptable. If
min_version is given with no max_version it is as
if max version is 'latest'.
:returns: A new EndpointData with the requested versioned data.
:rtype: :py:class:`keystoneauth1.discover.EndpointData`
:raises keystoneauth1.exceptions.discovery.DiscoveryFailure: If the
appropriate versioned data
could not be discovered.
"""
min_version, max_version = _normalize_version_args(
None, min_version, max_version)
if not allow:
allow = {}
# This method should always return a new EndpointData
new_data = copy.copy(self)
new_data._set_version_info(
session=session, allow=allow, cache=cache,
allow_version_hack=allow_version_hack, project_id=project_id,
discover_versions=discover_versions, min_version=min_version,
max_version=max_version)
return new_data
def get_all_version_string_data(self, session, project_id=None):
"""Return version data for all versions discovery can find.
:param string project_id: ID of the currently scoped project. Used for
removing project_id components of URLs from
the catalog. (optional)
:returns: A list of :class:`VersionData` sorted by version number.
:rtype: list(VersionData)
"""
versions = []
for vers_url in self._get_discovery_url_choices(project_id=project_id):
try:
d = get_discovery(session, vers_url)
except Exception as e:
# Ignore errors here - we're just searching for one of the
# URLs that will give us data.
_LOGGER.debug(
"Failed attempt at discovery on %s: %s", vers_url, str(e))
continue
for version in d.version_string_data():
versions.append(version)
break
return versions or self._infer_version_data(project_id)
def _infer_version_data(self, project_id=None):
"""Return version data dict for when discovery fails.
:param string project_id: ID of the currently scoped project. Used for
removing project_id components of URLs from
the catalog. (optional)
:returns: A list of :class:`VersionData` sorted by version number.
:rtype: list(VersionData)
"""
version = self.api_version
if version:
version = version_to_string(self.api_version)
url = self.url.rstrip("/")
if project_id and url.endswith(project_id):
url, _ = self.url.rsplit('/', 1)
url += "/"
return [VersionData(url=url, version=version)]
def _set_version_info(self, session, allow=None, cache=None,
allow_version_hack=True, project_id=None,
discover_versions=False,
min_version=None, max_version=None):
match_url = None
no_version = not max_version and not min_version
if no_version and not discover_versions:
# NOTE(jamielennox): This may not be the best thing to default to
# but is here for backwards compatibility. It may be worth
# defaulting to the most recent version.
return
elif no_version and discover_versions:
# We want to run discovery, but we don't want to find different
# endpoints than what's in the catalog
allow_version_hack = False
match_url = self.url
if project_id:
self.project_id = project_id
discovered_data = None
# Maybe we've run discovery in the past and have a document that can
# satisfy the request without further work
if self._disc:
discovered_data = self._disc.versioned_data_for(
min_version=min_version, max_version=max_version,
url=match_url, **allow)
if not discovered_data:
self._run_discovery(
session=session, cache=cache,
min_version=min_version, max_version=max_version,
project_id=project_id, allow_version_hack=allow_version_hack,
discover_versions=discover_versions)
if not self._disc:
return
discovered_data = self._disc.versioned_data_for(
min_version=min_version, max_version=max_version,
url=match_url, **allow)
if not discovered_data:
if min_version and not max_version:
raise exceptions.DiscoveryFailure(
"Minimum version {min_version} was not found".format(
min_version=version_to_string(min_version)))
elif max_version and not min_version:
raise exceptions.DiscoveryFailure(
"Maximum version {max_version} was not found".format(
max_version=version_to_string(max_version)))
elif min_version and max_version:
raise exceptions.DiscoveryFailure(
"No version found between {min_version}"
" and {max_version}".format(
min_version=version_to_string(min_version),
max_version=version_to_string(max_version)))
else:
raise exceptions.DiscoveryFailure(
"No version data found remotely at all")
self.min_microversion = discovered_data['min_microversion']
self.max_microversion = discovered_data['max_microversion']
self.next_min_version = discovered_data['next_min_version']
self.not_before = discovered_data['not_before']
self.api_version = discovered_data['version']
self.status = discovered_data['status']
# TODO(mordred): these next two things should be done by Discover
# in versioned_data_for.
discovered_url = discovered_data['url']
# NOTE(jamielennox): urljoin allows the url to be relative or even
# protocol-less. The additional trailing '/' make urljoin respect
# the current path as canonical even if the url doesn't include it.
# for example a "v2" path from http://host/admin should resolve as
# http://host/admin/v2 where it would otherwise be host/v2.
# This has no effect on absolute urls returned from url_for.
url = urllib.parse.urljoin(self._disc._url.rstrip('/') + '/',
discovered_url)
# If we had to pop a project_id from the catalog_url, put it back on
if self._saved_project_id:
url = urllib.parse.urljoin(url.rstrip('/') + '/',
self._saved_project_id)
self.service_url = url
def _run_discovery(self, session, cache, min_version, max_version,
project_id, allow_version_hack, discover_versions):
tried = set()
for vers_url in self._get_discovery_url_choices(
project_id=project_id,
allow_version_hack=allow_version_hack,
min_version=min_version,
max_version=max_version):
if self._catalog_matches_exactly and not discover_versions:
# The version we started with is correct, and we don't want
# new data
return
if vers_url in tried:
continue
tried.add(vers_url)
try:
self._disc = get_discovery(
session, vers_url,
cache=cache,
authenticated=False)
break
except (exceptions.DiscoveryFailure,
exceptions.HttpError,
exceptions.ConnectionError):
continue
if not self._disc:
# We couldn't find a version discovery document anywhere.
if self._catalog_matches_version:
# But - the version in the catalog is fine.
self.service_url = self.catalog_url
return
# NOTE(jamielennox): The logic here is required for backwards
# compatibility. By itself it is not ideal.
if allow_version_hack:
# NOTE(jamielennox): If we can't contact the server we
# fall back to just returning the URL from the catalog. This
# is backwards compatible behaviour and used when there is no
# other choice. Realistically if you have provided a version
# you should be able to rely on that version being returned or
# the request failing.
_LOGGER.warning(
'Failed to contact the endpoint at %s for '
'discovery. Fallback to using that endpoint as '
'the base url.', self.url)
return
else:
# NOTE(jamielennox): If you've said no to allow_version_hack
# and we can't determine the actual URL this is a failure
# because we are specifying that the deployment must be up to
# date enough to properly specify a version and keystoneauth
# can't deliver.
raise exceptions.DiscoveryFailure(
"Version requested but version discovery document was not"
" found and allow_version_hack was False")
def _get_discovery_url_choices(
self, project_id=None, allow_version_hack=True,
min_version=None, max_version=None):
"""Find potential locations for version discovery URLs.
min_version and max_version are already normalized, so will either be
None or a tuple.
"""
url = urllib.parse.urlparse(self.url.rstrip('/'))
url_parts = url.path.split('/')
# First, check to see if the catalog url ends with a project id
# We need to remove it and save it for later if it does
if project_id and url_parts[-1].endswith(project_id):
self._saved_project_id = url_parts.pop()
elif not project_id:
# Peek to see if -2 is a version. If so, -1 is a project_id,
# even if we don't know that at this point in the call stack
try:
normalize_version_number(url_parts[-2])
self._saved_project_id = url_parts.pop()
except (IndexError, TypeError):
pass
catalog_discovery = versioned_discovery = None
# Next, check to see if the url indicates a version and if that
# version either matches our version request or is within the
# range requested. If so, we can start by trying the given url
# as it has a high potential for success.
try:
url_version = normalize_version_number(url_parts[-1])
versioned_discovery = urllib.parse.ParseResult(
url.scheme,
url.netloc,
'/'.join(url_parts),
url.params,
url.query,
url.fragment).geturl()
except TypeError:
pass
else:
# `is_between` means version bounds were specified *and* the URL
# version is between them.
is_between = min_version and max_version and version_between(
min_version, max_version, url_version)
exact_match = (is_between and max_version and
max_version[0] == url_version[0])
high_match = (is_between and max_version and
max_version[1] != LATEST and
version_match(max_version, url_version))
if exact_match or is_between:
self._catalog_matches_version = True
self._catalog_matches_exactly = exact_match
# The endpoint from the catalog matches the version request
# We construct a URL minus any project_id, but we don't
# return it just yet. It's a good option, but unless we
# have an exact match or match the max requested, we want
# to try for an unversioned endpoint first.
catalog_discovery = urllib.parse.ParseResult(
url.scheme,
url.netloc,
'/'.join(url_parts),
url.params,
url.query,
url.fragment).geturl().rstrip('/') + '/'
# If we found a viable catalog endpoint and it's
# an exact match or matches the max, go ahead and give
# it a go.
if catalog_discovery and (high_match or exact_match):
yield catalog_discovery
catalog_discovery = None
url_parts.pop()
if allow_version_hack:
# If there were projects or versions in the url they are now gone.
# That means we're left with what should be the unversioned url.
hacked_url = urllib.parse.ParseResult(
url.scheme,
url.netloc,
'/'.join(url_parts),
url.params,
url.query,
url.fragment).geturl()
# Since this is potentially us constructing a base URL from the
# versioned URL - we need to make sure it has a trailing /. But
# we only want to do that if we have built a new URL - not if
# we're using the one from the catalog
if hacked_url != self.catalog_url:
hacked_url = hacked_url.strip('/') + '/'
yield hacked_url
# If we have a catalog discovery url, it either means we didn't
# return it earlier because it wasn't an exact enough match, or
# that we did and it failed. We don't double-request things when
# consuming this, so it's safe to return it here in case we didn't
# already return it.
if catalog_discovery:
yield catalog_discovery
# NOTE(mordred): For backwards compatibility people might have
# added version hacks using the version hack system. The logic
# above should handle most cases, so by the time we get here it's
# most likely to be a no-op
yield self._get_catalog_discover_hack()
elif versioned_discovery and self._saved_project_id:
# We popped a project_id but are either avoiding version hacks
# or we didn't request a version. That means we still want to fetch
# the document from the "catalog url" - but the catalog url is has
# a project_id suffix so is likely not going to work for us. Try
# fetching from the project-less versioned endpoint.
yield versioned_discovery
# As a final fallthrough case, return the actual unmodified url from
# the catalog.
yield self.catalog_url
def _get_catalog_discover_hack(self):
"""Apply the catalog hacks and figure out an unversioned endpoint.
This function is internal to keystoneauth1.
:returns: A url that has been transformed by the regex hacks that
match the service_type.
"""
return _VERSION_HACKS.get_discover_hack(self.service_type, self.url)
def get_discovery(session, url, cache=None, authenticated=False):
"""Return the discovery object for a URL.
Check the session and the plugin cache to see if we have already
performed discovery on the URL and if so return it, otherwise create
a new discovery object, cache it and return it.
NOTE: This function is expected to be used by keystoneauth and should not
be needed by users part of normal usage. A normal user should use
get_endpoint or get_endpoint_data on `keystoneauth.session.Session` or
endpoint_filters on `keystoneauth.session.Session` or
`keystoneauth.session.Session`. However, should the user need to perform
direct discovery for some reason, this function should be used so that
the discovery caching is used.
:param session: A session object to discover with.
:type session: keystoneauth1.session.Session
:param str url: The url to lookup.
:param dict cache:
A dict to be used for caching results, in addition to caching them
on the Session. (optional) Defaults to None.
:param bool authenticated:
Include a token in the discovery call. (optional) Defaults to None,
which will use a token if an auth plugin is installed.
:raises keystoneauth1.exceptions.discovery.DiscoveryFailure:
if for some reason the lookup fails.
:raises keystoneauth1.exceptions.http.HttpError:
An error from an invalid HTTP response.
:returns: A discovery object with the results of looking up that URL.
:rtype: :py:class:`keystoneauth1.discover.Discovery`
"""
# There are between one and three different caches. The user may have
# passed one in. There is definitely one on the session, and there is
# one on the auth plugin if the Session has an auth plugin.
caches = []
# If a cache was passed in, check it first.
if cache is not None:
caches.append(cache)
# If the session has a cache, check it second, since it could have been
# provided by the user at Session creation time.
if hasattr(session, '_discovery_cache'):
caches.append(session._discovery_cache)
# Finally check the auth cache associated with the Session.
if session.auth and hasattr(session.auth, '_discovery_cache'):
caches.append(session.auth._discovery_cache)
# https://example.com and https://example.com/ should be treated the same
# for caching purposes.
parsed_url = urllib.parse.urlparse(url)
if parsed_url.path in ('', '/'):
url = urllib.parse.ParseResult(
parsed_url.scheme,
parsed_url.netloc,
'',
parsed_url.params,
parsed_url.query,
parsed_url.fragment).geturl()
for cache in caches:
disc = cache.get(url)
if disc:
break
else:
disc = Discover(session, url, authenticated=authenticated)
# Whether we get one from fetching or from cache, set it in the
# caches. This assures that if we combine sessions and auth plugins
# that we don't make unnecessary calls.
if disc:
for cache in caches:
cache[url] = disc
return disc
class _VersionHacks(object):
"""A container to abstract the list of version hacks.
This could be done as simply a dictionary but is abstracted like this to
make for easier testing.
"""
def __init__(self):
self._discovery_data = {}
def add_discover_hack(self, service_type, old, new=''):
"""Add a new hack for a service type.
:param str service_type: The service_type in the catalog.
:param re.RegexObject old: The pattern to use.
:param str new: What to replace the pattern with.
"""
hacks = self._discovery_data.setdefault(service_type, [])
hacks.append((old, new))
def get_discover_hack(self, service_type, url):
"""Apply the catalog hacks and figure out an unversioned endpoint.
:param str service_type: the service_type to look up.
:param str url: The original url that came from a service_catalog.
:returns: Either the unversioned url or the one from the catalog
to try.
"""
for old, new in self._discovery_data.get(service_type, []):
new_string, number_of_subs_made = old.subn(new, url)
if number_of_subs_made > 0:
return new_string
return url
_VERSION_HACKS = _VersionHacks()
_VERSION_HACKS.add_discover_hack('identity', re.compile('/v2.0/?$'), '/')
def add_catalog_discover_hack(service_type, old, new):
"""Add a version removal rule for a particular service.
Originally deployments of OpenStack would contain a versioned endpoint in
the catalog for different services. E.g. an identity service might look
like ``http://localhost:5000/v2.0``. This is a problem when we want to use
a different version like v3.0 as there is no way to tell where it is
located. We cannot simply change all service catalogs either so there must
be a way to handle the older style of catalog.
This function adds a rule for a given service type that if part of the URL
matches a given regular expression in *old* then it will be replaced with
the *new* value. This will replace all instances of old with new. It should
therefore contain a regex anchor.
For example the included rule states::
add_catalog_version_hack('identity', re.compile('/v2.0/?$'), '/')
so if the catalog retrieves an *identity* URL that ends with /v2.0 or
/v2.0/ then it should replace it simply with / to fix the user's catalog.
:param str service_type: The service type as defined in the catalog that
the rule will apply to.
:param re.RegexObject old: The regular expression to search for and replace
if found.
:param str new: The new string to replace the pattern with.
"""
_VERSION_HACKS.add_discover_hack(service_type, old, new)
|
<reponame>billyeatcookies/Biscuit
from .diff_viewer import DiffViewer
from .editor import Editor
from .image_viewer import ImageViewer |
//EqualPvcLists Compare two PersistentVolumeClaim lists. When ignoreRandomPostfixInName=true - last 6 chars of the name (e.g.'-abc12') are ignored during comparison
func EqualPvcLists(pvcList1, pvcList2 *[]corev1.PersistentVolumeClaim, ignoreRandomPostfixInName bool) (bool, error) {
if len(*pvcList1) != len(*pvcList2) {
return false, nil
}
pvcMap1 := getPersistentVolumeClaimMap(pvcList1, ignoreRandomPostfixInName)
pvcMap2 := getPersistentVolumeClaimMap(pvcList2, ignoreRandomPostfixInName)
for pvcName, pvc1 := range pvcMap1 {
pvc2, ok := pvcMap2[pvcName]
if !ok {
return false, fmt.Errorf("PVS not found by name '%s' in second list", pvcName)
}
if equal, err := EqualPvcs(pvc1, pvc2, ignoreRandomPostfixInName); err != nil || !equal {
return false, err
}
}
return true, nil
} |
/**
* Each assertion name now points to a typechecked Expr rather than an
* untypechecked Exp.
*/
private JoinableList<Err> resolveAssertions(A4Reporter rep, JoinableList<Err> errors, List<ErrorWarning> warns) throws Err {
Context cx = new Context(this, warns);
for (Map.Entry<String,Expr> e : asserts.entrySet()) {
Expr expr = e.getValue();
expr = cx.check(expr).resolve_as_formula(warns);
if (expr.errors.isEmpty()) {
e.setValue(expr);
rep.typecheck("Assertion " + e.getKey() + ": " + expr.type() + "\n");
} else
errors = errors.make(expr.errors);
}
return errors;
} |
def environment_overrides(self, architecture):
options = [architecture, 'x86_'+architecture, 'amd64_'+architecture]
configuration = next(option for option in options if os.path.exists(os.path.join(self.__vc_root(), 'bin', option)))
with TempDir() as d:
path = os.path.join(d, 'script.cmd')
with open(path, 'wb') as f:
f.write('\r\n'.join([
'@set',
'@echo !!!',
'@call "{}" {}'.format(os.path.join(self.__vc_root(), 'vcvarsall.bat'), configuration),
'@set',
]).encode())
lines = subprocess.check_output(['cmd', '/c', 'call', path]).decode().splitlines()
before = {}
overrides = {}
is_after = False
for line in lines:
if line == '!!!':
is_after = True
continue
name = str(line.split('=', 1)[0])
value = str(line.split('=', 1)[1])
if is_after:
if name not in before or before[name] != value:
overrides[name] = value
else:
before[name] = value
return overrides |
package systemstat
import (
"fmt"
"strconv"
)
// FreeBsdTop Top version FreeBSD
func FreeBsdTop() (cpu *Cpu, memory *Memory, err error) {
freeBsdTop := &freeBsdTopParser{
cpu: &Cpu{},
memory: &Memory{},
}
err = newCommand(`(CPU\s+(\d+)?:\s+(\d+)(?:\.(\d+))?% user,\s+(\d+)(?:\.(\d+))?% nice,\s+(\d+)(?:\.(\d+))?% system,\s+(\d+)(?:\.(\d+))?% interrupt,\s+(\d+)(?:\.(\d+))?% idle\n)(?:(Mem:(?:\s+(\d+)([BKMGTE]) Active[,\n])?(?:\s+(\d+)([BKMGTE]) Inact[,\n])?(?:\s+(\d+)([BKMGTE]) Laundry[,\n])?(?:\s+(\d+)([BKMGTE]) Wired[,\n])?(?:\s+(\d+)([BKMGTE]) Buf[,\n])?(?:\s+(\d+)([BKMGTE]) Free[,\n])?)((.|\n)*(Swap:(?:\s+(\d+)([BKMGTE]) Total[,\n])?(?:\s+(\d+)([BKMGTE]) Free[,\n])?(?:\s+(\d+)(?:\.(\d+))?% Inuse[,\n])?(?:\s+(\d+)([BKMGTE]) In[,\n])?(?:\s+(\d+)([BKMGTE]) Out[,\n])?))?)?`,
freeBsdTop,
"top",
"-b", "-P", "0")
if err != nil {
err = fmt.Errorf("FreeBsdTop: %s", err)
return
}
cpu = freeBsdTop.cpu
memory = freeBsdTop.memory
return
}
type freeBsdTopParser struct {
cpu *Cpu
memory *Memory
}
func (c *freeBsdTopParser) Parse(parsedList [][]string) (err error) {
if c.cpu == nil {
c.cpu = &Cpu{}
}
if c.memory == nil {
c.memory = &Memory{Unit: "byte"}
}
respCpu, respMemory := c.cpu, c.memory
var totalSum, usedSum int64
var cpuDetailInx int
for _, row := range parsedList {
if len(row) != 39 {
err = fmt.Errorf("freeBsdTopParser.Parse: parsed list length mismatch, want: %d, have: %d", 39, len(row))
return
}
if len(row[1]) > 0 {
var total, available, used, idle1, idle2 int64
idle1, err = strconv.ParseInt(row[11], 10, 64)
if err != nil {
err = fmt.Errorf("freeBsdTopParser.Parse: must be a number at %d index: %s", 11, row[11])
return
}
if len(row[12]) > 0 {
idle2, err = strconv.ParseInt(row[12], 10, 64)
if err != nil {
err = fmt.Errorf("freeBsdTopParser.Parse: must be a number at %d index: %s", 12, row[12])
return
}
}
total = 1000
available = idle1*10 + idle2
used = total - available
if len(respCpu.DetailList) == cpuDetailInx {
respCpu.DetailList = append(respCpu.DetailList, &CpuDetail{})
}
respCpuDetail := respCpu.DetailList[cpuDetailInx]
respCpuDetail.Name = "CPU " + row[2]
respCpuDetail.Percent = calcPercent(total, used)
totalSum += total
usedSum += used
cpuDetailInx++
}
if len(row[13]) > 0 {
var active, inactive, laundry, wired, buf, free int64
if len(row[14]) > 0 {
active, err = strconv.ParseInt(row[14], 10, 64)
if err != nil {
err = fmt.Errorf("freeBsdTopParser.Parse: must be a number at %d index: %s", 14, row[14])
return
}
active *= toByte(row[15])
}
if len(row[16]) > 0 {
inactive, err = strconv.ParseInt(row[16], 10, 64)
if err != nil {
err = fmt.Errorf("freeBsdTopParser.Parse: must be a number at %d index: %s", 16, row[16])
return
}
inactive *= toByte(row[17])
}
if len(row[18]) > 0 {
laundry, err = strconv.ParseInt(row[18], 10, 64)
if err != nil {
err = fmt.Errorf("freeBsdTopParser.Parse: must be a number at %d index: %s", 18, row[18])
return
}
laundry *= toByte(row[19])
}
if len(row[20]) > 0 {
wired, err = strconv.ParseInt(row[20], 10, 64)
if err != nil {
err = fmt.Errorf("freeBsdTopParser.Parse: must be a number at %d index: %s", 20, row[20])
return
}
wired *= toByte(row[21])
}
if len(row[22]) > 0 {
buf, err = strconv.ParseInt(row[22], 10, 64)
if err != nil {
err = fmt.Errorf("freeBsdTopParser.Parse: must be a number at %d index: %s", 22, row[22])
return
}
buf *= toByte(row[23])
}
if len(row[24]) > 0 {
free, err = strconv.ParseInt(row[24], 10, 64)
if err != nil {
err = fmt.Errorf("freeBsdTopParser.Parse: must be a number at %d index: %s", 24, row[24])
return
}
free *= toByte(row[25])
}
used := active + inactive + laundry + wired + buf
respMemory.Total = used + free
respMemory.Used = used
respMemory.Percent = calcPercent(respMemory.Total, respMemory.Used)
}
}
respCpu.Percent = calcPercent(totalSum, usedSum)
return
}
func toByte(unit string) int64 {
var k int64
k = 1
if unit == "B" {
return k
}
k *= 1024
if unit == "K" {
return k
}
k *= 1024
if unit == "M" {
return k
}
k *= 1024
if unit == "G" {
return k
}
k *= 1024
if unit == "T" {
return k
}
k *= 1024
if unit == "E" {
return k
}
return 0
}
|
/**
* @since 2020
* @author Huawei DTSE India
*/
public class CourseContentDataModel implements Parcelable {
private String courseName;
/**
* The Course url.
*/
private String courseUrl;
/**
* The Course id.
*/
private String courseId;
/**
* Instantiates a new Course content data model.
*
* @param courseName the course name
* @param courseUrl the course url
* @param courseId the course id
*/
public CourseContentDataModel(String courseName, String courseUrl, String courseId) {
this.courseId = courseId;
this.courseName = courseName;
this.courseUrl = courseUrl;
}
/**
* Gets course name.
*
* @return the course name
*/
public String getCourseName() {
return courseName;
}
/**
* Sets course name.
*
* @param courseName the course name
*/
public void setCourseName(String courseName) {
this.courseName = courseName;
}
/**
* Gets course url.
*
* @return the course url
*/
public String getCourseUrl() {
return courseUrl;
}
/**
* Sets course url.
*
* @param courseUrl the course url
*/
public void setCourseUrl(String courseUrl) {
this.courseUrl = courseUrl;
}
/**
* Gets course id.
*
* @return the course id
*/
public String getCourseId() {
return courseId;
}
/**
* Sets course id.
*
* @param courseId the course id
*/
public void setCourseId(String courseId) {
this.courseId = courseId;
}
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeString(this.courseName);
dest.writeString(this.courseUrl);
dest.writeString(this.courseId);
}
/**
* Instantiates a new Course content data model.
*
* @param in the in
*/
protected CourseContentDataModel(Parcel in) {
this.courseName = in.readString();
this.courseUrl = in.readString();
this.courseId = in.readString();
}
/**
* The constant CREATOR.
*/
public static final Parcelable.Creator<CourseContentDataModel> CREATOR =
new Parcelable.Creator<CourseContentDataModel>() {
@Override
public CourseContentDataModel createFromParcel(Parcel source) {
return new CourseContentDataModel(source);
}
@Override
public CourseContentDataModel[] newArray(int size) {
return new CourseContentDataModel[size];
}
};
} |
import pandas as pd
from ..core.project_manager import ProjectManager
from uquake.grid.nlloc import VelocityGrid3D, VelocityGridEnsemble
from uquake.nlloc.nlloc import Srces, Site, Observations
from uquake.core.event import (Catalog, Origin, Arrival, Pick,
WaveformStreamID, Ray, ResourceIdentifier)
from uquake.core import UTCDateTime
from uquake.core.logging import logger
import numpy as np
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
import dataclasses
from pydantic import BaseModel, conlist
from enum import Enum
from pydantic.typing import List, Union
from datetime import datetime
from uuid import uuid4
from functools import partial
import scipy as sc
from scipy.sparse import csr_matrix
from ..tomography import data as ekdata
__cpu_count__ = cpu_count()
class Phase(str, Enum):
p = 'P'
s = 'S'
def __str__(self):
return self.value
def __expr__(self):
return self.__str__()
def __call__(self):
return self.__str__()
class EventData(BaseModel):
location: conlist(float, min_items=3, max_items=3)
location_correction: conlist(float, min_items=3, max_items=3) = [0, 0, 0]
resource_id: str=None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource_id = ResourceIdentifier()
@property
def loc(self):
return np.array(self.location)
@property
def id(self):
return self.resource_id.id
def __str__(self):
return f'{self.id},{self.location},{self.location_correction}'
def __repr__(self):
return str(self)
class EventEnsemble(object):
def __init__(self, events: List[EventData] = []):
self.events = events
self.dict = {}
self.ids = np.arange(0, len(events))
for event in events:
self.dict[event.id] = event
def __len__(self):
return len(self.events)
def __getitem__(self, items):
if isinstance(items, (list, np.ndarray)):
events = []
for item in items:
if isinstance(item, str):
for event in self.events:
if event.id == item:
events.append(event)
elif isinstance(item, int):
events.append(self.events[item])
else:
raise TypeError
return EventEnsemble(events=events)
elif isinstance(items, str):
for event in self.events:
if event.id == items:
return EventEnsemble(events=[event])
elif isinstance(items, int):
return EventEnsemble(events=[self.events[items]])
else:
raise TypeError
def __str__(self):
out_str = ''
for event in self.events:
out_str += f'{event}\n'
return out_str
def __repr__(self):
return str(self)
def select(self, ids: Union[List[int], int]):
return self[ids]
def append(self, event: EventData):
self.events.append(event)
self.dict[event.id] = event
@property
def locs(self):
locs = []
for event in self.events:
locs.append(event.location)
return locs
def to_ek_event_table_data(self):
ev_data = [(event.resource_id, event_id, event.location,
event.location_correction) for event, event_id
in zip(self.events, self.ids)]:
data.EKEventTable()
class ArrivalTimeData(BaseModel):
event_id: str
site_id: str
phase: Phase
resource_id: str=None
arrival_time: datetime
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource_id = ResourceIdentifier()
@property
def id(self):
return self.resource_id.id
def __add__(self, other):
if not isinstance(other, type(self)):
raise TypeError(f'operation not permitted between {type(self)} '
f'and {type(other)}')
if self.event_id != other.event_id:
raise ValueError(f'operation only permitted between two objects'
f'with the same event id')
return self.arrival_time + other.arrival_time
def __repr__(self):
return f'event id: {self.event_id}, site id: {self.site_id}, ' \
f'arrival time:{self.arrival_time}'
class ArrivalTimeEnsemble():
def __init__(self, arrival_times: List[ArrivalTimeData] = []):
self.arrival_times = arrival_times
def append(self, arrival_time):
if not isinstance(arrival_time, ArrivalTimeData):
raise TypeError
self.arrival_times.append(arrival_time)
def __repr__(self):
out_str = ''
for arrival_time in self.arrival_times:
out_str += f'{str(arrival_time)}\n'
return out_str
def __getitem__(self, items):
if isinstance(items, (list, np.ndarray)):
arrival_times = []
for arrival_time in self.arrival_times:
if arrival_time.id in list(items):
arrival_times.append(arrival_time)
return ArrivalTimeEnsemble(arrival_times=arrival_times)
elif isinstance(items, str):
for arrival_time in self.arrival_times:
if arrival_time.id == items:
return arrival_time
else:
raise TypeError
def to_dict(self):
out_dict = {'id': [],
'event_id': [],
'site_id': [],
'arrival_time': [],
'phase': [],
'resource_id': []}
for arrival_time in self.arrival_times:
out_dict['id'].append(arrival_time.id)
out_dict['event_id'].append(arrival_time.event_id)
out_dict['site_id'].append(arrival_time.site_id)
out_dict['arrival_time'].append(arrival_time.arrival_time)
out_dict['phase'].append(arrival_time.phase)
out_dict['resource_id'].append(arrival_time.resource_id)
return out_dict
@property
def event_ids(self):
event_ids = [arrival_time.event_id for arrival_time
in self.arrival_times]
return list(np.unique(event_ids))
@property
def site_ids(self):
site_ids = [arrival_time.site_id for arrival_time
in self.arrival_times]
return list(np.unique(site_ids))
def groupby(self, column, **kwargs):
df = pd.DataFrame(self.to_dict()).set_index('id')
return df.groupby(column, **kwargs)
class TomoRay(Ray):
def __init__(self, ray: Ray):
self.__dict__ = {}
for key in ray.__dict__.keys():
self.__dict__[key] = ray.__dict__[key]
dist = np.array([0] + list(np.linalg.norm(np.diff(self.nodes,
axis=0),axis=1)))
self.dist = np.cumsum(dist)
self.int_x = sc.interpolate.interp1d(self.dist, self.nodes[:, 0])
self.int_y = sc.interpolate.interp1d(self.dist, self.nodes[:, 1])
self.int_z = sc.interpolate.interp1d(self.dist, self.nodes[:, 2])
self.velocity = None
self.epsilon = 10
self.threshold = 0.1
self.sensitivity_kernel = None
def add_velocity(self, velocity: VelocityGrid3D):
self.velocity = velocity
x, y, z, v = velocity.flattens()
self.vel_x = x
self.vel_y = y
self.vel_z = z
self.flatten_vel = v
def set_epsilon(self, epsilon):
self.epsilon
def sensitivity(self, distance_along_ray):
"""
measures the sensitivity with respect to the velocity model parameters
:NOTE: currently, the sensitivity assumes a rbf inverse distance
interpolation kernel
"""
from time import time
t0 = time()
location = self.interpolate_coordinate(distance_along_ray)
result = self.rbf_interpolation_sensitivity(location)
# result = self.velocity.rbf_interpolation_sensitivity(location,
# self.epsilon,
# threshold=
# self.threshold)
t1 = time()
logger.info(f'dones measuring sensitivity in {t1 - t0}')
return result
# return result.reshape(np.prod(result.shape))
def interpolate_coordinate(self, distance_along_ray):
"""
return the coordinates at a given distance along the ray
"""
if not (0 <= distance_along_ray <= np.max(self.dist)):
raise ValueError(f'the value for "distance_along_ray" must be'
f'larger or equal to 0 and smaller or equal to '
f'{np.max(self.dist)}')
return np.array([self.int_x(distance_along_ray),
self.int_y(distance_along_ray),
self.int_z(distance_along_ray)])
def rbf_interpolation_sensitivity(self, location):
"""
calculate the sensitivity of each element given a location
:param location: location in model space at which the interpolation
occurs
:param epsilon: the standard deviation of the gaussian kernel
:param threshold: threshold relative to the maximum value below which
the weights are considered 0.
:rparam: the sensitivity matrix
"""
# calculating the distance between the location and every grid points
dist = np.sqrt((self.vel_x - location[0]) ** 2 +
(self.vel_y - location[1]) ** 2 +
(self.vel_z - location[2]) ** 2)
sensitivity = np.exp(-(dist / self.epsilon) ** 2)
sensitivity[sensitivity < np.max(sensitivity) * self.threshold] = 0
# sensitivity = sensitivity / np.sum(sensitivity)
return sensitivity
def integrate_sensitivity(self, velocity: VelocityGrid3D, epsilon=10,
threshold=0.1, max_it=100, compress_matrix=True):
from time import time
t0 = time()
self.set_epsilon(epsilon)
self.add_velocity(velocity)
self.threshold = threshold
y0 = np.zeros(np.prod(velocity.shape))
slowness = 1 / velocity.data.reshape(y0.shape)
sensitivity = y0
for i, dl in enumerate(np.diff(self.dist)):
distance_along_ray = self.dist[i] + dl
sensitivity += self.sensitivity(distance_along_ray) * slowness * dl
# sensitivity = sensitivity / np.sum(sensitivity) * self.travel_time
if compress_matrix:
sensitivity = csr_matrix(sensitivity)
self.sensitivity_kernel = sensitivity
t1 = time()
logger.info(f'done calculating sensitivity in {t1 - t0:0.2f} seconds')
return
# to calculate frechet just making sure the sensitivity is normalized,
# the sensitivity is expressed in s (time)
# integrator = sc.integrate.RK45(self.sensitivity, 0, y0, self.length)
#
# for i in range(0, max_it):
# integrator.step()
# if integrator.status == 'finished':
# break
#
# return integrator.y / np.sum(integrator.y)
class TomoRayEnsemble(BaseModel):
def __init__(self, events: EventEnsemble, arrivals: ArrivalTimeEnsemble,
rays: List[Ray] = []):
self.events = events
self.arrivals = arrivals
self.rays = rays
self.site_ids = []
for ray in rays:
self.site_ids.append(ray.site_code)
# for loc
def append(self, ray: Ray):
self.rays.append()
class Tomography(ProjectManager):
def __init__(self, base_projects_path, project_name, network_code,
use_srces=False, solve_velocity=True, solve_location=True,
**kwargs):
self.events = None
self.observations = None
self.rays = None
self.solve_location = solve_location
self.solve_velocity = solve_velocity
super().__init__(base_projects_path, project_name, network_code,
use_srces=use_srces, **kwargs)
self.paths.tomography = self.paths.root / 'tomography'
super().__init__(base_projects_path, project_name, network_code,
use_srces=use_srces, **kwargs)
def synthetic(self, dims=[100, 100, 100],
origin=[0, 0, 0],
spacing=[10, 10, 10],
p_mean=5000, p_std=200, s_mean=3000, s_std=125,
model_smoothing=3,
nsites=20, nevents=1000,
trave_time_completeness=0.7, perturbation=0.005,
random_seed=None, multi_threaded=True):
self.__add_random_velocities__(dims, origin, spacing,
p_mean, p_std, s_mean, s_std,
model_smoothing,
random_seed=random_seed)
self.__add_random_sites__(nsites, random_seed=random_seed)
self.__add_random_events__(nevents, random_seed=random_seed)
self.init_travel_time_grids(multi_threaded=multi_threaded)
self.__add_random_travel_times__()
def __add_random_velocities__(self, dims, origin, spacing,
p_mean, p_std, s_mean, s_std,
smoothing, random_seed=None,
multi_threaded=True):
p_velocity = VelocityGrid3D(self.network_code, dims, origin,
spacing)
s_velocity = VelocityGrid3D(self.network_code, dims, origin,
spacing, phase='S')
p_velocity.fill_random(p_mean, p_std, smoothing, random_seed)
if random_seed is not None:
random_seed += 10
s_velocity.fill_random(s_mean, s_std, smoothing, random_seed)
# self.add_velocity(p_velocity)
# self.add_velocity(s_velocity)
self.add_velocities(VelocityGridEnsemble(p_velocity, s_velocity),
initialize_travel_times=False)
def __add_random_sites__(self, nstations, random_seed=None):
"""
create random stations
"""
if not self.has_p_velocity():
logger.info('The project does not contain a p wave velocity...'
' exiting')
return
if not self.has_s_velocity():
logger.info('The project does not contain a s wave velocity...'
' exiting')
return
sta_locs = self.p_velocity.generate_random_points_in_grid(
nstations, seed=random_seed)
sites = [Site(label=f'STA{i:02d}', x=sta_loc[0], y=sta_loc[1],
z=sta_loc[2]) for i, sta_loc in enumerate(sta_locs)]
self.add_srces(Srces(sites), initialize_travel_time=False)
def __add_random_events__(self, nevents, random_seed=None):
if not self.has_p_velocity():
logger.info('The project does not contain a p wave velocity...'
' exiting')
return
if not self.has_s_velocity():
logger.info('The project does not contain a s wave velocity...'
' exiting')
return
events = self.p_velocity.\
generate_random_points_in_grid(nevents, seed=random_seed)
self.events = EventEnsemble()
for id, event in enumerate(events):
self.events.append(EventData(location=list(event)))
return self.events
def __add_random_travel_times__(self, completeness=0.6,
min_observations=5,
p_pick_error=0.001,
s_pick_error=0.001):
"""
generate random travel time observations.
:param completeness: on average the number of observation per
event-sensor pair
:param min_observations: minimum number of observation for each
event
:param p_pick_error: standard deviation of the gaussian
perturbation in second to add to the travel time for the p picks
:param s_pick_error: standard deviation of the gaussian
perturbation in second to add to the travel time for the s picks
"""
origin_time = UTCDateTime()
self.arrival_times = ArrivalTimeEnsemble()
perturbation = {'P': p_pick_error,
'S': s_pick_error}
i = 0
for event_id in self.events.dict.keys():
arrivals = []
for sensor in self.srces:
for phase in ['P', 'S']:
travel_time = self.travel_times.select(
sensor.label, phase=phase)[0].interpolate(
self.events.dict[event_id].loc)[0]
travel_time += np.random.randn() * perturbation[phase]
pick_time = origin_time + travel_time
arrival = ArrivalTimeData(event_id=event_id,
site_id=sensor.label,
arrival_time=pick_time,
phase=phase)
self.arrival_times.append(arrival)
i += 1
@staticmethod
def ray_tracer(velocity, data):
travel_time_grid = data[0]
arrival_id = data[1]
loc = data[2]
ray = travel_time_grid.ray_tracer(loc)
ray.arrival_id = arrival_id
# if ray.phase == 'P':
# velocity = self.p_velocity
# else:
# velocity = self.s_velocity
tomo_ray = TomoRay(ray)
tomo_ray.integrate_sensitivity(velocity)
return tomo_ray
def ray_tracing(self, cpu_utilisation=0.9):
"""
calculate the rays for every station event pair
"""
num_threads = int(np.ceil(cpu_utilisation * __cpu_count__))
arrival_time_grouped = self.arrival_times.groupby(
['site_id', 'phase'])
rays = []
for site_id in tqdm(self.arrival_times.site_ids):
for phase in Phase:
df = arrival_time_grouped.get_group((site_id, phase))
tt = self.travel_times.select(site_id,
phase=phase.value)[0]
locs = self.events[df.event_id.values].locs
events = self.events[df.event_id.values]
arrival_ids = df.resource_id.values
tts = [tt] * len(arrival_ids)
data = [(travel_time, arrival_id, loc)
for travel_time, arrival_id, loc
in zip(tts, arrival_ids, locs)]
if phase == 'P':
velocity = self.p_velocity
else:
velocity = self.s_velocity
ray_tracer = partial(self.ray_tracer, velocity)
for d in data:
ray_tracer(d)
with Pool(num_threads) as pool:
rays_tmp = list(tqdm(pool.imap(ray_tracer,
data),
total=len(locs)))
for ray in rays_tmp:
rays.append(ray)
return rays
|
<filename>src/main/java/com/jmsoftware/datastructuresandalgorithms/DataStructuresAndAlgorithmsApplication.java
package com.jmsoftware.datastructuresandalgorithms;
import com.jmsoftware.datastructuresandalgorithms.common.configuration.ProjectProperty;
import com.jmsoftware.datastructuresandalgorithms.common.configuration.ServerConfiguration;
import lombok.extern.slf4j.Slf4j;
import lombok.val;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import java.time.Duration;
import java.time.Instant;
import java.util.TimeZone;
/**
* <h1>DataStructuresAndAlgorithmsApplication</h1>
* <p>
* Change description here.
*
* @author <NAME> (鍾俊), e-mail: <EMAIL>
* @date 5/25/20 7:50 PM
**/
@Slf4j
@SpringBootApplication
public class DataStructuresAndAlgorithmsApplication {
private static final String LINE_SEPARATOR = System.lineSeparator();
private static ProjectProperty projectProperty;
private static ServerConfiguration serverConfiguration;
public DataStructuresAndAlgorithmsApplication(ProjectProperty projectProperty,
ServerConfiguration serverConfiguration) {
DataStructuresAndAlgorithmsApplication.projectProperty = projectProperty;
DataStructuresAndAlgorithmsApplication.serverConfiguration = serverConfiguration;
}
public static void main(String[] args) {
val startInstant = Instant.now();
SpringApplication.run(DataStructuresAndAlgorithmsApplication.class, args);
val endInstant = Instant.now();
val duration = Duration.between(startInstant, endInstant);
log.info("🥳 Congratulations! 🎉");
log.info("🖥 {}@{} started!", projectProperty.getProjectArtifactId(), projectProperty.getVersion());
log.info("⏳ Deployment duration: {} seconds ({} ms)", duration.getSeconds(), duration.toMillis());
log.info("⏰ App started at {} (timezone - {})", endInstant, TimeZone.getDefault().getDisplayName());
log.info("{} App running at{} - Local: http://localhost:{}{}/{} - Network: {}/",
LINE_SEPARATOR, LINE_SEPARATOR, serverConfiguration.getServerPort(), projectProperty.getContextPath(),
LINE_SEPARATOR, serverConfiguration.getBaseUrl());
}
}
|
// Do executes WebAuthn.getCredentials against the provided context.
//
// returns:
// credentials
func (p *GetCredentialsParams) Do(ctx context.Context) (credentials []*Credential, err error) {
var res GetCredentialsReturns
err = cdp.Execute(ctx, CommandGetCredentials, p, &res)
if err != nil {
return nil, err
}
return res.Credentials, nil
} |
<filename>apiMangos/src/main/java/cl/vk/api/mangos/dao/ConfigurationDao.java
package cl.vk.api.mangos.dao;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import cl.vk.api.mangos.conf.ServerParam;
import cl.vk.api.mangos.dto.Configuration;
import cl.vk.api.mangos.dto.ConfigurationResponse;
import cl.vk.api.mangos.dto.GetConfiguration;
import cl.vk.api.mangos.parameter.ApiParameters;
import cl.vk.mangos.transmision.ConfigurationUtils;
import cl.vk.mangos.transmision.JWTutils;
public class ConfigurationDao extends JWTutils {
final Logger LOGGER = LoggerFactory.getLogger(ConfigurationDao.class);
@Value("${mangos.security}")
protected Boolean seguridad;
public ConfigurationDao() {
if(ConfigurationUtils.getPropiedades().get("mangos.security") != null)
this.seguridad = (Boolean) ConfigurationUtils.getPropiedades().get("mangos.security");
}
public ConfigurationResponse setConfiguration(String jwt, Configuration input) {
ConfigurationResponse response = new ConfigurationResponse();
if (!this.seguridad || validarJWT(jwt)) {
try {
ServerParam.setPass(input.getPass());
ServerParam.setPort(input.getPort());
ServerParam.setStatus(true);
ServerParam.setUrl(input.getUrl());
ServerParam.setUser(input.getUser());
response.setCod("0");
response.setDsc("Configuracion exitosa");
} catch (Exception e) {
response.setCod("1");
response.setDsc("Error al setear las configuraciones");
LOGGER.error("Error al setear los parametros ", e);
}
} else
{
response.setCod("1");
response.setDsc("Acceso no autorizado");
}
return response;
}
public GetConfiguration getConf(String jwt) {
GetConfiguration response = new GetConfiguration();
if (!this.seguridad || validarJWT(jwt)) {
ApiParameters parametros = new ApiParameters();
parametros.setPass(ServerParam.getPass());
parametros.setPort(ServerParam.getPort());
parametros.setUrl(ServerParam.getUrl());
parametros.setUser(ServerParam.getUser());
response.setParametros(parametros);
}
return response;
}
}
|
/**
* Test that exception is thrown for missing endTime attribute in test_case_3.xml.
*/
TEST(XMLReader, test_case_missing_endtime) {
EXPECT_THROW(XMLReader<2> reader("../../tests/XMLReader/input/test_case_endtime.xml"), std::invalid_argument);
EXPECT_THROW(XMLReader<3> reader("../../tests/XMLReader/input/test_case_endtime.xml"), std::invalid_argument);
} |
/**
* The XML implementation of an SnapshotSerializer. It is capable of
* transforming a Snapshot object into a serialized XML representation.
*
* @date Oct 20, 2004
*/
public class SnapshotSerializerXML implements SnapshotSerializer{
@Override
public String serializeSnapshot(Snapshot snapshot) throws TransformationException {
String result = null;
if(snapshot == null){
throw new TransformationException("Supplied Snapshot is not valid.");
}
if(snapshot instanceof ReaderSnapshot){
result = "<readerSnapshot><id>" + snapshot.getId() + "</id></readerSnapshot>";
} else {
result = "<writerSnapshot><id>" + snapshot.getId() + "</id></writerSnapshot>";
}
return result;
}
} |
/*
* Remove \DDD constructs from the input. See RFC 1035, section 5.1.
*/
static size_t
zoctet(char *text)
{
char *s;
char *p;
for (s = p = text; *s; ++s, ++p) {
assert(p <= s);
if (s[0] != '\\') {
*p = *s;
} else if (isdigit((int)s[1]) && isdigit((int)s[2]) && isdigit((int)s[3])) {
int val = (hexdigit_to_int(s[1]) * 100 +
hexdigit_to_int(s[2]) * 10 +
hexdigit_to_int(s[3]));
if (0 <= val && val <= 255) {
s += 3;
*p = val;
} else {
zc_warning("text escape \\DDD overflow");
*p = *++s;
}
} else if (s[1] != '\0') {
*p = *++s;
} else {
zc_warning("trailing backslash ignored");
--p;
}
}
*p = '\0';
return p - text;
} |
/**
* Match an Resolvable to a Rule.
* @param object resolvable
* @return true if rule matches
*/
public final boolean match(final Resolvable object) {
if (object != null) {
try {
return (evaluator.match(expr, object));
} catch (ExpressionParserException e) {
e.printStackTrace();
}
}
return (false);
} |
def formatGazeData(input_dir):
raw_df = pd.read_table(join(input_dir, 'SMI_raw.txt'))
ts = raw_df['Time']/1000
vid = cv2.VideoCapture(join(input_dir, 'SMI_worldCamera.avi'))
if OPENCV3:
vidSize = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
else:
vidSize = (int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
vid.release()
norm_pos_x = raw_df['B POR X [px]'] / vidSize[0]
norm_pos_y = raw_df['B POR Y [px]'] / vidSize[1]
frameIndices = np.zeros(shape=raw_df.shape[0])
frameCounter = 0
prevLabel = raw_df.Frame.iloc[0]
for i, frame in enumerate(raw_df.Frame):
if frame != prevLabel:
frameCounter += 1
frameIndices[i] = frameCounter
prevLabel = frame
frame_idx = pd.Series(frameIndices.astype(int), name='frame_idx')
conf = np.zeros(shape=raw_df.shape[0])
for i, eventLabel in enumerate(raw_df['B Event Info']):
if eventLabel == 'Blink':
conf[i] = 0
else:
conf[i] = 1
gaze_df = pd.DataFrame({'timestamp': ts, 'frame_idx': frame_idx,
'norm_pos_x': norm_pos_x, 'norm_pos_y': norm_pos_y,
'confidence': conf})
colOrder = ['timestamp', 'frame_idx', 'confidence', 'norm_pos_x', 'norm_pos_y']
frame_timestamps = getVidFrameTimestamps(join(input_dir, 'SMI_worldCamera.avi'))
return gaze_df[colOrder], frame_timestamps |
<reponame>aviarytech/dids
import { SecretTypeNotFound } from "../src/Secret";
import { JSONSecretResolver } from "../src";
import { JsonWebKey } from "@aviarytech/crypto-core";
test("secret resolver can resolve a JsonWebKey2020 JSON file", async () => {
const secretFile = require("../__fixtures__/JsonWebKey2020.example.json");
const resolver = new JSONSecretResolver(secretFile);
const secret = await resolver.resolve(secretFile["id"]);
const jwk = await secret.asJsonWebKey();
expect(secret.id).toBe("did:web:example.com#key-0");
expect(secret.type).toBe("JsonWebKey2020");
expect(jwk.privateKeyJwk.crv).toBe("Ed25519");
expect(jwk.privateKeyJwk.d).toBe(
"<KEY>"
);
expect(jwk.privateKeyJwk.kty).toBe("OKP");
expect(jwk.privateKeyJwk.x).toBe(
"<KEY>"
);
});
test("secret resolver can resolve a X25519KeyAgreementKey2019 JSON file", async () => {
const secretFile = require("../__fixtures__/X25519KeyAgreementKey2019.example.json");
const resolver = new JSONSecretResolver(secretFile);
const secret = await resolver.resolve(secretFile["id"]);
const jwk = await secret.asJsonWebKey();
expect(secret.id).toBe("did:web:example.com#key-1");
expect(secret.type).toBe("X25519KeyAgreementKey2019");
expect(secret.privateKeyBase58).toBe(
"<KEY>"
);
expect(jwk.privateKeyJwk.crv).toBe("X25519");
expect(jwk.privateKeyJwk.kty).toBe("OKP");
expect(jwk.privateKeyJwk.d).toBe(
"<KEY>"
);
});
test("secret throws error when doesn't support a key type", async () => {
try {
const resolver = new JSONSecretResolver({
id: "did:web:example.com#key-1",
type: "BAR",
});
} catch (e) {
expect(e instanceof SecretTypeNotFound).toBeTruthy();
}
});
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.