date
stringlengths 10
10
| nb_tokens
int64 60
629k
| text_size
int64 234
1.02M
| content
stringlengths 234
1.02M
|
---|---|---|---|
2018/03/21 | 3,142 | 11,778 | <issue_start>username_0: How do I use DOTGenerator to convert a parse tree to DOT/graphviz format in ANTLR4?
I found [this related question](https://stackoverflow.com/questions/34832518/antlr4-dotgenerator-example) but the only answer uses TreeViewer to display the tree in a JPanel and that's not what I'm after. [This other question](https://stackoverflow.com/questions/48890618/is-it-still-possible-to-visualise-parsetrees-via-graphviz-dot-in-antlr-4) is exacly what I need but it didn't get answered. Everything else I stumbled upon relates to DOTTreeGenerator from ANTLR3 and it's not helpful.
I'm using Java with the ANTLR4 plugin for IntelliJ.<issue_comment>username_1: I have a small project that has all kind of utility methods w.r.t. ANTLR4 grammar debugging/testing. I haven't found the time to provide it of some proper documentation so that I can put it on Github. But here's a part of it responsible for creating a DOT file from a grammar.
Stick it all in a single file called `Main.java` (and of course generate the lexer and parser for `Expression.g4`), and you will see a DOT string being printed to your console:
```java
import org.antlr.v4.runtime.*;
import org.antlr.v4.runtime.tree.ParseTree;
import java.util.*;
public class Main {
public static void main(String[] args) {
/*
// Expression.g4
grammar Expression;
expression
: '-' expression
| expression ('*' | '/') expression
| expression ('+' | '-') expression
| '(' expression ')'
| NUMBER
| VARIABLE
;
NUMBER
: [0-9]+ ( '.' [0-9]+ )?
;
VARIABLE
: [a-zA-Z] [a-zA-Z0-9]+
;
SPACE
: [ \t\r\n] -> skip
;
*/
String source = "3 + 42 * (PI - 3.14159)";
ExpressionLexer lexer = new ExpressionLexer(CharStreams.fromString(source));
ExpressionParser parser = new ExpressionParser(new CommonTokenStream(lexer));
SimpleTree tree = new SimpleTree.Builder()
.withParser(parser)
.withParseTree(parser.expression())
.withDisplaySymbolicName(false)
.build();
DotOptions options = new DotOptions.Builder()
.withParameters(" labelloc=\"t\";\n label=\"Expression Tree\";\n\n")
.withLexerRuleShape("circle")
.build();
System.out.println(new DotTreeRepresentation().display(tree, options));
}
}
class DotTreeRepresentation {
public String display(SimpleTree tree) {
return display(tree, DotOptions.DEFAULT);
}
public String display(SimpleTree tree, DotOptions options) {
return display(new InOrderTraversal().traverse(tree), options);
}
public String display(List nodes, DotOptions options) {
StringBuilder builder = new StringBuilder("graph tree {\n\n");
Map nodeNameMap = new HashMap<>();
int nodeCount = 0;
if (options.parameters != null) {
builder.append(options.parameters);
}
for (SimpleTree.Node node : nodes) {
nodeCount++;
String nodeName = String.format("node\_%s", nodeCount);
nodeNameMap.put(node, nodeName);
builder.append(String.format(" %s [label=\"%s\", shape=%s];\n",
nodeName,
node.getLabel().replace("\"", "\\\""),
node.isTokenNode() ? options.lexerRuleShape : options.parserRuleShape));
}
builder.append("\n");
for (SimpleTree.Node node : nodes) {
String name = nodeNameMap.get(node);
for (SimpleTree.Node child : node.getChildren()) {
String childName = nodeNameMap.get(child);
builder.append(" ").append(name).append(" -- ").append(childName).append("\n");
}
}
return builder.append("}\n").toString();
}
}
class InOrderTraversal {
public List traverse(SimpleTree tree) {
if (tree == null)
throw new IllegalArgumentException("tree == null");
List nodes = new ArrayList<>();
traverse(tree.root, nodes);
return nodes;
}
private void traverse(SimpleTree.Node node, List nodes) {
if (node.hasChildren()) {
traverse(node.getChildren().get(0), nodes);
}
nodes.add(node);
for (int i = 1; i < node.getChildCount(); i++) {
traverse(node.getChild(i), nodes);
}
}
}
class DotOptions {
public static final DotOptions DEFAULT = new DotOptions.Builder().build();
public static final String DEFAULT\_PARAMETERS = null;
public static final String DEFAULT\_LEXER\_RULE\_SHAPE = "box";
public static final String DEFAULT\_PARSER\_RULE\_SHAPE = "ellipse";
public static class Builder {
private String parameters = DEFAULT\_PARAMETERS;
private String lexerRuleShape = DEFAULT\_LEXER\_RULE\_SHAPE;
private String parserRuleShape = DEFAULT\_PARSER\_RULE\_SHAPE;
public DotOptions.Builder withParameters(String parameters) {
this.parameters = parameters;
return this;
}
public DotOptions.Builder withLexerRuleShape(String lexerRuleShape) {
this.lexerRuleShape = lexerRuleShape;
return this;
}
public DotOptions.Builder withParserRuleShape(String parserRuleShape) {
this.parserRuleShape = parserRuleShape;
return this;
}
public DotOptions build() {
if (lexerRuleShape == null)
throw new IllegalStateException("lexerRuleShape == null");
if (parserRuleShape == null)
throw new IllegalStateException("parserRuleShape == null");
return new DotOptions(parameters, lexerRuleShape, parserRuleShape);
}
}
public final String parameters;
public final String lexerRuleShape;
public final String parserRuleShape;
private DotOptions(String parameters, String lexerRuleShape, String parserRuleShape) {
this.parameters = parameters;
this.lexerRuleShape = lexerRuleShape;
this.parserRuleShape = parserRuleShape;
}
}
class SimpleTree {
public static class Builder {
private Parser parser = null;
private ParseTree parseTree = null;
private Set ignoredTokenTypes = new HashSet<>();
private boolean displaySymbolicName = true;
public SimpleTree build() {
if (parser == null) {
throw new IllegalStateException("parser == null");
}
if (parseTree == null) {
throw new IllegalStateException("parseTree == null");
}
return new SimpleTree(parser, parseTree, ignoredTokenTypes, displaySymbolicName);
}
public SimpleTree.Builder withParser(Parser parser) {
this.parser = parser;
return this;
}
public SimpleTree.Builder withParseTree(ParseTree parseTree) {
this.parseTree = parseTree;
return this;
}
public SimpleTree.Builder withIgnoredTokenTypes(Integer... ignoredTokenTypes) {
this.ignoredTokenTypes = new HashSet<>(Arrays.asList(ignoredTokenTypes));
return this;
}
public SimpleTree.Builder withDisplaySymbolicName(boolean displaySymbolicName) {
this.displaySymbolicName = displaySymbolicName;
return this;
}
}
public final SimpleTree.Node root;
private SimpleTree(Parser parser, ParseTree parseTree, Set ignoredTokenTypes, boolean displaySymbolicName) {
this.root = new SimpleTree.Node(parser, parseTree, ignoredTokenTypes, displaySymbolicName);
}
public SimpleTree(SimpleTree.Node root) {
if (root == null)
throw new IllegalArgumentException("root == null");
this.root = root;
}
public SimpleTree copy() {
return new SimpleTree(root.copy());
}
public String toLispTree() {
StringBuilder builder = new StringBuilder();
toLispTree(this.root, builder);
return builder.toString().trim();
}
private void toLispTree(SimpleTree.Node node, StringBuilder builder) {
if (node.isLeaf()) {
builder.append(node.getLabel()).append(" ");
}
else {
builder.append("(").append(node.label).append(" ");
for (SimpleTree.Node child : node.children) {
toLispTree(child, builder);
}
builder.append(") ");
}
}
@Override
public String toString() {
return String.format("%s", this.root);
}
public static class Node {
protected String label;
protected int level;
protected boolean isTokenNode;
protected List children;
Node(Parser parser, ParseTree parseTree, Set ignoredTokenTypes, boolean displaySymbolicName) {
this(parser.getRuleNames()[((RuleContext)parseTree).getRuleIndex()], 0, false);
traverse(parseTree, this, parser, ignoredTokenTypes, displaySymbolicName);
}
public Node(String label, int level, boolean isTokenNode) {
this.label = label;
this.level = level;
this.isTokenNode = isTokenNode;
this.children = new ArrayList<>();
}
public void replaceWith(SimpleTree.Node node) {
this.label = node.label;
this.level = node.level;
this.isTokenNode = node.isTokenNode;
this.children.remove(node);
this.children.addAll(node.children);
}
public SimpleTree.Node copy() {
SimpleTree.Node copy = new SimpleTree.Node(this.label, this.level, this.isTokenNode);
for (SimpleTree.Node child : this.children) {
copy.children.add(child.copy());
}
return copy;
}
public void normalizeLevels(int level) {
this.level = level;
for (SimpleTree.Node child : children) {
child.normalizeLevels(level + 1);
}
}
public boolean hasChildren() {
return !children.isEmpty();
}
public boolean isLeaf() {
return !hasChildren();
}
public int getChildCount() {
return children.size();
}
public SimpleTree.Node getChild(int index) {
return children.get(index);
}
public int getLevel() {
return level;
}
public String getLabel() {
return label;
}
public boolean isTokenNode() {
return isTokenNode;
}
public List getChildren() {
return new ArrayList<>(children);
}
private void traverse(ParseTree parseTree, SimpleTree.Node parent, Parser parser, Set ignoredTokenTypes, boolean displaySymbolicName) {
List todo = new ArrayList<>();
for (int i = 0; i < parseTree.getChildCount(); i++) {
ParseTree child = parseTree.getChild(i);
if (child.getPayload() instanceof CommonToken) {
CommonToken token = (CommonToken) child.getPayload();
if (!ignoredTokenTypes.contains(token.getType())) {
String tempText = displaySymbolicName ?
String.format("%s: '%s'",
parser.getVocabulary().getSymbolicName(token.getType()),
token.getText()
.replace("\r", "\\r")
.replace("\n", "\\n")
.replace("\t", "\\t")
.replace("'", "\\'")) :
String.format("%s",
token.getText()
.replace("\r", "\\r")
.replace("\n", "\\n")
.replace("\t", "\\t"));
if (parent.label == null) {
parent.label = tempText;
}
else {
parent.children.add(new SimpleTree.Node(tempText, parent.level + 1, true));
}
}
}
else {
SimpleTree.Node node = new SimpleTree.Node(parser.getRuleNames()[((RuleContext)child).getRuleIndex()], parent.level + 1, false);
parent.children.add(node);
todo.add(new SimpleTree.ParseTreeParent(child, node));
}
}
for (SimpleTree.ParseTreeParent wrapper : todo) {
traverse(wrapper.parseTree, wrapper.parent, parser, ignoredTokenTypes, displaySymbolicName);
}
}
@Override
public String toString() {
return String.format("{label=%s, level=%s, isTokenNode=%s, children=%s}", label, level, isTokenNode, children);
}
}
private static class ParseTreeParent {
final ParseTree parseTree;
final SimpleTree.Node parent;
private ParseTreeParent(ParseTree parseTree, SimpleTree.Node parent) {
this.parseTree = parseTree;
this.parent = parent;
}
}
}
```
And if you paste the output in a [DOT viewer](https://dreampuf.github.io/GraphvizOnline/), you will get this:
[](https://i.stack.imgur.com/eCXBN.png)
Upvotes: 3 [selected_answer]<issue_comment>username_2: You may also want to look at alternatives. DOT graphs aren't the pretties among possible graph representations. Maybe you like an svg graph instead? If so have a look at the [ANTLR4 grammar extension for Visual Studio Code,](https://marketplace.visualstudio.com/items?itemName=mike-lischke.vscode-antlr4) which generates and exports an svg graphic with the click of a mouse button (and you can style that with own CSS code).
[](https://i.stack.imgur.com/Yy4YY.png)
Upvotes: 1 |
2018/03/21 | 3,926 | 5,361 | <issue_start>username_0: I have this data set, I want to filter the rows that meet at least one requirement, that at least have a value less than 0.001 in any column of padj (padj.1,padj.2 etc.)
```
log2FoldChange padj log2FoldChange.1 padj.1 log2FoldChange.2 padj.2 log2FoldChange.3 padj.3
AT1G01010 -0.006572090 9.998981e-01 -0.309669318 9.999662e-01 0.438877524 8.309090e-01 0.561092027 5.682979e-01
AT1G01020 0.017360429 9.998981e-01 0.073469652 9.999662e-01 -0.069928545 9.563338e-01 0.125792938 8.207208e-01
AT1G01030 -0.085871384 9.998981e-01 -0.325335794 9.999662e-01 -0.399593376 6.736412e-01 0.441444783 4.456086e-01
AT1G01040 -0.060245264 9.998981e-01 -0.232347149 7.785949e-01 0.122877554 7.833058e-01 -0.069458761 8.334858e-01
AT1G01050 -0.052917601 9.998981e-01 0.014403301 9.999662e-01 0.148566949 8.249185e-01 -0.282904657 2.909831e-01
AT1G01060 0.047175423 9.998981e-01 -0.144865979 8.393350e-01 0.164981232 2.485552e-01 -0.349949093 4.481910e-04
AT1G01080 0.008088133 9.998981e-01 -0.076365069 9.999662e-01 0.074804359 8.919594e-01 -0.129912325 5.543105e-01
AT1G01090 0.056330307 9.998981e-01 -0.014582717 9.999662e-01 0.034778584 9.455045e-01 -0.024959676 9.186005e-01
AT1G01100 -0.060240311 9.998981e-01 0.142628251 9.999662e-01 0.012350145 9.886096e-01 0.214901315 3.117450e-01
AT1G01110 0.013833213 9.998981e-01 1.153241053 3.916769e-01 0.612459109 5.970020e-01 -1.326637272 1.308788e-02
AT1G01120 0.103551581 9.998981e-01 -0.318902875 3.272385e-01 0.234447384 3.078595e-01 -0.683988096 1.880000e-08
AT1G01130 0.189351439 9.998981e-01 -0.462637322 9.999662e-01 -0.170228839 9.550798e-01 0.434854000 7.426886e-01
AT1G01140 -0.005671191 9.998981e-01 -0.313300814 2.749158e-01 -1.053242873 2.890000e-18 1.185748730 3.650000e-26
AT1G01160 -0.074044311 9.998981e-01 0.099783450 9.999662e-01 -0.082539415 9.512494e-01 0.093031718 8.912551e-01
AT1G01170 0.037769428 9.998981e-01 -0.013651764 9.999662e-01 -0.117681977 8.691103e-01 0.068321467 8.790404e-01
AT1G01180 0.248586221 9.998981e-01 -0.659122042 2.585560e-01 0.018832015 9.916358e-01 0.027246808 9.725611e-01
AT1G01210 -0.082339205 9.998981e-01 0.337750634 9.999662e-01 0.108632580 9.549792e-01 -0.255164413 7.487148e-01
AT1G01220 0.074423950 9.998981e-01 -0.184146594 9.999662e-01 0.108415967 8.895097e-01 -0.109752150 7.787131e-01
AT1G01225 -0.157080073 9.998981e-01 0.088379552 9.999662e-01 0.368176470 7.468810e-01 -0.051841400 9.582530e-01
AT1G01230 0.124919482 9.998981e-01 0.038120336 9.999662e-01 -0.038279670 9.724470e-01 0.156677162 6.437230e-01
AT1G01240 0.101444575 9.998981e-01 0.041297975 9.999662e-01 -0.020448865 9.855355e-01 0.105263537 7.626743e-01
AT1G01250 0.065966313 9.998981e-01 -0.091808753 9.999662e-01 0.141797906 9.673330e-01 1.565726558 4.506212e-03
AT1G01260 0.471038145 9.998981e-01 1.528887763 1.200000e-18 -0.346919177 1.656233e-01 -0.810355958 2.440000e-07
AT1G01290 0.118170716 9.998981e-01 -0.047075401 9.999662e-01 -0.056431451 9.684373e-01 0.246362831 5.652973e-01
```
This is the desired output
```
log2FoldChange padj log2FoldChange.1 padj.1 log2FoldChange.2 padj.2 log2FoldChange.3 padj.3
AT1G01060 0.047175423 9.998981e-01 -0.144865979 8.393350e-01 0.164981232 2.485552e-01 -0.349949093 4.481910e-04
AT1G01120 0.103551581 9.998981e-01 -0.318902875 3.272385e-01 0.234447384 3.078595e-01 -0.683988096 1.880000e-08
AT1G01140 -0.005671191 9.998981e-01 -0.313300814 2.749158e-01 -1.053242873 2.890000e-18 1.185748730 3.650000e-26
AT1G01260 0.471038145 9.998981e-01 1.528887763 1.200000e-18 -0.346919177 1.656233e-01 -0.810355958 2.440000e-07
```
Regards and thanks<issue_comment>username_1: We can use `filter_at` with `dplyr`. If there are row names, then it is better to add that as a separate column before doing the piping as the row names are removed.
```
library(tidyverse)
df1 %>%
rownames_to_column('rn') %>%
filter_at(vars(matches("padj")), any_vars(. < 0.001))
# rn log2FoldChange padj log2FoldChange.1 padj.1 log2FoldChange.2 padj.2 log2FoldChange.3 padj.3
#1 AT1G01060 0.047175423 0.9998981 -0.1448660 8.393350e-01 0.1649812 2.485552e-01 -0.3499491 4.48191e-04
#2 AT1G01120 0.103551581 0.9998981 -0.3189029 3.272385e-01 0.2344474 3.078595e-01 -0.6839881 1.88000e-08
#3 AT1G01140 -0.005671191 0.9998981 -0.3133008 2.749158e-01 -1.0532429 2.890000e-18 1.1857487 3.65000e-26
#4 AT1G01260 0.471038145 0.9998981 1.5288878 1.200000e-18 -0.3469192 1.656233e-01 -0.8103560 2.44000e-07
```
Upvotes: 1 <issue_comment>username_2: I think you can do this with the which function.
try this, it worked for me:
```
i = which((data$padj.1 > 0.001) | (data$padj.2 > 0.001)|(data$padj.3 > 0.001) )
data[i,]
```
Upvotes: 0 |
2018/03/21 | 767 | 1,594 | <issue_start>username_0: I have two numbers : 7,8 and 6,8.
When I multiply these numbers I get #VALUE!. I know, this happens because excel thinks, these numbers are strings because of the comma. But i cannot use separator . (point) because in my task, numbers are with , (comma). I tried different number functions, but all of them converts numbers with point (eg. 7.8 , 6.8). What I need to do? Thanks for advices.<issue_comment>username_1: We can use `filter_at` with `dplyr`. If there are row names, then it is better to add that as a separate column before doing the piping as the row names are removed.
```
library(tidyverse)
df1 %>%
rownames_to_column('rn') %>%
filter_at(vars(matches("padj")), any_vars(. < 0.001))
# rn log2FoldChange padj log2FoldChange.1 padj.1 log2FoldChange.2 padj.2 log2FoldChange.3 padj.3
#1 AT1G01060 0.047175423 0.9998981 -0.1448660 8.393350e-01 0.1649812 2.485552e-01 -0.3499491 4.48191e-04
#2 AT1G01120 0.103551581 0.9998981 -0.3189029 3.272385e-01 0.2344474 3.078595e-01 -0.6839881 1.88000e-08
#3 AT1G01140 -0.005671191 0.9998981 -0.3133008 2.749158e-01 -1.0532429 2.890000e-18 1.1857487 3.65000e-26
#4 AT1G01260 0.471038145 0.9998981 1.5288878 1.200000e-18 -0.3469192 1.656233e-01 -0.8103560 2.44000e-07
```
Upvotes: 1 <issue_comment>username_2: I think you can do this with the which function.
try this, it worked for me:
```
i = which((data$padj.1 > 0.001) | (data$padj.2 > 0.001)|(data$padj.3 > 0.001) )
data[i,]
```
Upvotes: 0 |
2018/03/21 | 996 | 3,114 | <issue_start>username_0: So I have this header given to me
```
int mystrcpy(char *c, const char* s);
```
And I have to implement strcpy function myself. But when I do it and run the program, the console stops working, which I believe is a sign of memory address violation.
I tried this:
```
int mystrcpy(char *c, const char* s)
{
int i=0;
for(i=0;i
```
Full code:
```
#include
#include
using namespace std;
class Persoana
{
char \*nume;
int an;
float inaltime;
public:
int my\_strcpy(char \*c, const char\*s);
};
int Persoana::my\_strcpy(char \*c, const char\* s)
{
//code I need to insert
return 0;
}
int main()
{
Persoana a;
cout << endl;
cout<
```<issue_comment>username_1: The for loop
```
for(i=0; i < strlen(s); i++)
```
aborts when *i < strlen(s)* becomes *false*, which is the case when i is equal to *strlen(s)*. So that will be the value of i when the loop ends.
C strings are NULL-terminated as you know, so you need *strlen(s) + 1* bytes reserved for c. Since you increment i again before writing the '\0' character, you're using *strlen(s) + 2* bytes starting at c.
If c is exactly the size that's needed (*strlen(s) + 1*), that *may* lead to an access violation, since you're writing past the end of the allocated memory.
So instead of
```
c[++i]=NULL;
```
write
```
c[i]='\0';
```
Hope this makes sense!
Upvotes: -1 <issue_comment>username_2: Your loop itself is OK (but inefficient, since you call `strlen()` on every iteration). The real problem is when you go to insert the null terminator at the end of the copied string. You are incrementing `i` again before inserting the terminator. DON'T do that. `i` is already at the correct index after the loop ends, so just use it as-is:
```
int mystrcpy(char *c, const char* s);
{
int i, len = strlen(s);
for(i = 0; i < len; ++i)
c[i] = s[i];
c[i] = '\0'; // <-- NO ++ HERE!
cout << c;
return 0;
}
```
That being said, the simplest way to implement `strcpy` without using `i` at all is as follows:
```
int mystrcpy(char *c, const char* s)
{
char *p = c;
while (*p++ = *s++);
cout << c;
return 0;
}
```
If you remove the `cout` then it becomes simpler:
```
int mystrcpy(char *c, const char* s)
{
while (*c++ = *s++);
return 0;
}
```
No matter what, make sure that when you call `mystrcpy()`, `c` is pointing to a `char[]` buffer that is allocated to at least `strlen(s)+1` chars in size, otherwise the code will have *undefined behavior*.
Upvotes: 0 <issue_comment>username_3: Other posters have posted implementations of `strcpy` - Why you are using this in C++ code? That is an interesting question as C++ very rarely uses C style strings
The other problem is with its usage:
```
int main()
{
Persoana a;
cout << endl;
cout<
```
The strings "maria" and "george" are read only. Instead create an empty read/ write string as shown below that is long enough - i.e. 7 characters (do not forget the null character)
So the code should be
```
int main()
{
Persoana a;
char copy_of_string[7];
cout << endl;
cout<
```
Upvotes: 2 [selected_answer] |
2018/03/21 | 1,152 | 4,015 | <issue_start>username_0: I'm trying to test my axios API functions in React.
Found this question here: [how do i test axios in jest](https://stackoverflow.com/questions/45016033/how-do-i-test-axios-in-jest) which pointed to using `axios-mock-adapter`
```
import axios from 'axios';
import MockAdapter from 'axios-mock-adapter';
import chatbot from './chatbot';
describe('Chatbot', () => {
it('returns data when sendMessage is called', done => {
var mock = new MockAdapter(axios);
const data = { response: true };
mock.onGet('https://us-central1-hutoma-backend.cloudfunctions.net/chat').reply(200, data);
chatbot.sendMessage(0, 'any').then(response => {
expect(response).toEqual(data);
done();
});
});
});
```
---
The real function:
```
/**
* Retrieve all Akamai images
* @param {String} akamai Akamai url
* @return {Thenable} Resolved: Akamai images
*/
export const callGetAkamai = () =>
makeRequest('/akamai', 'GET')
.catch(defaultCatch('callGetAkamai'));
```
My test:
```
import axios from 'axios';
import MockAdapter from 'axios-mock-adapter';
import { callGetAkamai } from './api';
describe('GetAkamai', () => {
it('returns data when callGetAkamai is called', (done) => {
console.log('MockAdapter', MockAdapter);
const mock = new MockAdapter(axios);
// const mock = axios.create({
// baseURL: 'https://us-central1-hutoma-backend.cloudfunctions.net/chat/'
// });
const data = { response: true };
mock.onGet('https://us-central1-hutoma-backend.cloudfunctions.net/chat').reply(200, data);
callGetAkamai().then((response) => {
expect(response).toEqual(data);
done();
});
});
});
```
[](https://i.stack.imgur.com/3o8b7.png)<issue_comment>username_1: Are you mocking `axios` already? I have run into this issue myself, and after looking in all the wrong places, I realized I was already mocking `axios` with `jest`.
Put the following snippet in your [setupTestFrameworkScriptFile](https://jestjs.io/docs/en/configuration.html#setuptestframeworkscriptfile-string):
```
const mockNoop = () => new Promise(() => {});
// Notice how `create` was not being mocked here...
jest.mock('axios', () => ({
default: mockNoop,
get: mockNoop,
post: mockNoop,
put: mockNoop,
delete: mockNoop,
patch: mockNoop
}));
```
While you might be able to do both, if you are using the `axios-mock-adapter`, you might want to remove your other mocks (and skip the snippet above).
Upvotes: 5 [selected_answer]<issue_comment>username_2: Adding this here since it's the first hit on google to the question and the answer selected doesn't really answer the question.
This problem typically happens when you are already mocking axios (very likely within a `__mocks__` folder.
With jest, you can explicitly unmock, then call this `axios-mock-adapter`.
```
jest.unmock('axios');
import axios from 'axios';
import MockAdapter from 'axios-mock-adapter';
...
```
`axios-mock-adapter` gives nice, flexible apis when working with external requests. However it doesn't globally prevent your app from making external calls that can be triggered by a test in a different component.
So I found both using `axios-mock-adapter` and doing a manual mock in the `__mocks__` folder equally helpful.
Upvotes: 4 <issue_comment>username_3: Your error is due to the fact that you are using a new instance of axios with a custom config **axios.create([config])**
```
const instance = axios.create({
baseURL: 'https://some-domain.com/api/',
timeout: 1000,
headers: {'X-Custom-Header': 'foobar'}
});
```
But in the test you are testing axios
```
import axios from 'axios';
import MockAdapter from 'axios-mock-adapter';
const mock = new MockAdapter(axios);
```
If you are creating a new axios instance, then you need to test it, but not axios itself.
```
const mock = new MockAdapter(instance)
```
Upvotes: 0 |
2018/03/21 | 3,356 | 11,967 | <issue_start>username_0: I am trying to build my ionic 3 application and deploy and live reload on my device for that reason I am trying following command:
```
ionic cordova run ios --device --prod -lcs
```
But after running that command, I get below error:
```
error: exportArchive: No profiles for 'io.ionic.starter' were found
```
and
```
"No profiles for 'io.ionic.starter' were found" UserInfo={NSLocalizedDescription=No profiles for 'io.ionic.starter' were found, NSLocalizedRecoverySuggestion=Xcode couldn't find any iOS App Development provisioning profiles matching 'io.ionic.starter'. Automatic signing is disabled and unable to generate a profile. To enable automatic signing, pass -allowProvisioningUpdates to xcodebuild.}
```
It says `To enable automatic signing, pass -allowProvisioningUpdates to xcodebuild.` but how in cordova?
Some additional error output. As you can see below, first `Archive Succeeded` but then it failed.
```
** ARCHIVE SUCCEEDED **
2018-03-21 15:34:28.042 xcodebuild[1673:18415] [MT] IDEDistribution: -[IDEDistributionLogging _createLoggingBundleAtPath:]: Created bundle at path '/var/folders/<KEY>T/RemoteWorkersEvents_2018-03-21_15-34-28.034.xcdistributionlogs'.
2018-03-21 15:34:31.900 xcodebuild[1673:18415] [MT] IDEDistribution: Step failed: : Error Domain=IDEDistributionSigningAssetStepErrorDomain Code=0 "Locating signing assets failed." UserInfo={NSLocalizedDescription=Locating signing assets failed., IDEDistributionSigningAssetStepUnderlyingErrors=(
"Error Domain=IDEProfileLocatorErrorDomain Code=1 \"No profiles for 'io.ionic.starter' were found\" UserInfo={NSLocalizedDescription=No profiles for 'io.ionic.starter' were found, NSLocalizedRecoverySuggestion=Xcode couldn't find any iOS App Development provisioning profiles matching 'io.ionic.starter'. Automatic signing is disabled and unable to generate a profile. To enable automatic signing, pass -allowProvisioningUpdates to xcodebuild.}"
)}
error: exportArchive: No profiles for 'io.ionic.starter' were found
Error Domain=IDEProfileLocatorErrorDomain Code=1 "No profiles for 'io.ionic.starter' were found" UserInfo={NSLocalizedDescription=No profiles for 'io.ionic.starter' were found, NSLocalizedRecoverySuggestion=Xcode couldn't find any iOS App Development provisioning profiles matching 'io.ionic.starter'. Automatic signing is disabled and unable to generate a profile. To enable automatic signing, pass -allowProvisioningUpdates to xcodebuild.}
\*\* EXPORT FAILED \*\*
(node:1256) UnhandledPromiseRejectionWarning: Error code 70 for command: xcodebuild with args: -exportArchive,-archivePath,RemoteWorkersEvents.xcarchive,-exportOptionsPlist,/Users/zafar/Documents/Projects/dne/ne-frontend/platforms/ios/exportOptions.plist,-exportPath,/Users/zafar/Documents/Projects/dne/ne-frontend/platforms/ios/build/device
(node:1256) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1)
(node:1256) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
```
**Here is more output**
```
Signing Identity: "iPhone Developer: <EMAIL> (7V8DET9FFF)"
*****------>>>>> Provisioning Profile: "iOS Team Provisioning Profile: io.ionic.dne" // <--- Here it says different profile which is correct because I set it up in xcode for this project.
(c37524bf-b317-4957-85a4-01877cfac047)
/usr/bin/codesign --force --sign 0180A41A2E305542B7292E256F380F50BADCD73B --entitlements /Users/zafar/Library/Developer/Xcode/DerivedData/RemoteWorkersEvents-dxaeeftqppxbkmehuratdfbvdnom/Build/Intermediates.noindex/ArchiveIntermediates/RemoteWorkersEvents/IntermediateBuildFilesPath/RemoteWorkersEvents.build/Debug-iphoneos/RemoteWorkersEvents.build/RemoteWorkersEvents.app.xcent --timestamp=none /Users/zafar/Library/Developer/Xcode/DerivedData/RemoteWorkersEvents-dxaeeftqppxbkmehuratdfbvdnom/Build/Intermediates.noindex/ArchiveIntermediates/RemoteWorkersEvents/InstallationBuildProductsLocation/Applications/RemoteWorkersEvents.app
Validate /Users/zafar/Library/Developer/Xcode/DerivedData/RemoteWorkersEvents-dxaeeftqppxbkmehuratdfbvdnom/Build/Intermediates.noindex/ArchiveIntermediates/RemoteWorkersEvents/InstallationBuildProductsLocation/Applications/RemoteWorkersEvents.app
cd /Users/zafar/Documents/Projects/dne/ne-frontend/platforms/ios
export PATH="/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin:/Applications/Xcode.app/Contents/Developer/usr/bin:/Users/zafar/Documents/Projects/dne/ne-frontend/node_modules/.bin:/Users/zafar/.nvm/versions/node/v9.6.1/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"
export PRODUCT_TYPE=com.apple.product-type.application
builtin-validationUtility /Users/zafar/Library/Developer/Xcode/DerivedData/RemoteWorkersEvents-dxaeeftqppxbkmehuratdfbvdnom/Build/Intermediates.noindex/ArchiveIntermediates/RemoteWorkersEvents/InstallationBuildProductsLocation/Applications/RemoteWorkersEvents.app
Touch build/device/RemoteWorkersEvents.app.dSYM
cd /Users/zafar/Documents/Projects/dne/ne-frontend/platforms/ios
export PATH="/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin:/Applications/Xcode.app/Contents/Developer/usr/bin:/Users/zafar/Documents/Projects/dne/ne-frontend/node_modules/.bin:/Users/zafar/.nvm/versions/node/v9.6.1/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"
/usr/bin/touch -c /Users/zafar/Documents/Projects/dne/ne-frontend/platforms/ios/build/device/RemoteWorkersEvents.app.dSYM
** ARCHIVE SUCCEEDED **
2018-03-24 13:47:59.274 xcodebuild[3032:92302] [MT] IDEDistribution: -[IDEDistributionLogging _createLoggingBundleAtPath:]: Created bundle at path '/var/<KEY>T/RemoteWorkersEvents_2018-03-24_13-47-59.269.xcdistributionlogs'.
2018-03-24 13:48:02.328 xcodebuild[3032:92302] [MT] IDEDistribution: Step failed: : Error Domain=IDEDistributionSigningAssetStepErrorDomain Code=0 "Locating signing assets failed." UserInfo={NSLocalizedDescription=Locating signing assets failed., IDEDistributionSigningAssetStepUnderlyingErrors=(
"Error Domain=IDEProfileLocatorErrorDomain Code=1 \"No profiles for 'io.ionic.starter' were found\" UserInfo={NSLocalizedDescription=No profiles for 'io.ionic.starter' were found, NSLocalizedRecoverySuggestion=Xcode couldn't find any iOS App Development provisioning profiles matching 'io.ionic.starter'. Automatic signing is disabled and unable to generate a profile. To enable automatic signing, pass -allowProvisioningUpdates to xcodebuild.}"
)}
\*\*\*\*\*------>>>>> error: exportArchive: No profiles for 'io.ionic.starter' were found // <--- Here it says different profile which is default but I changed it the one above
Error Domain=IDEProfileLocatorErrorDomain Code=1 "No profiles for 'io.ionic.starter' were found" UserInfo={NSLocalizedDescription=No profiles for 'io.ionic.starter' were found, NSLocalizedRecoverySuggestion=Xcode couldn't find any iOS App Development provisioning profiles matching 'io.ionic.starter'. Automatic signing is disabled and unable to generate a profile. To enable automatic signing, pass -allowProvisioningUpdates to xcodebuild.}
\*\* EXPORT FAILED \*\*
(node:2553) UnhandledPromiseRejectionWarning: Error code 70 for command: xcodebuild with args: -exportArchive,-archivePath,RemoteWorkersEvents.xcarchive,-exportOptionsPlist,/Users/zafar/Documents/Projects/dne/ne-frontend/platforms/ios/exportOptions.plist,-exportPath,/Users/zafar/Documents/Projects/dne/ne-frontend/platforms/ios/build/device
(node:2553) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1)
(node:2553) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
```
Please check the line begins with `*****------>>>>>` Those are the two lines I am concerned with and I think something weird is happening there. Any idea what is happening here and how can I fix this?<issue_comment>username_1: To deploy and test an app on an iOS device you need to create a developer certificate and a provisioning profile.
>
> iOS developers need to generate a provisioning profile to code sign
> their apps for testing. The good news is that, as of iOS 9, you can
> develop and test your apps on your iOS device without a paid Apple
> Developer account. This is particularly great for developers who want
> to try out mobile development with Ionic since it saves the cost but
> still provides a lot of the features of having a full Apple Developer
> account. For a full breakdown of the features included, check out
> [Apple’s docs](https://developer.apple.com/library/ios/documentation/IDEs/Conceptual/AppDistributionGuide/SupportedCapabilities/SupportedCapabilities.html#//apple_ref/doc/uid/TP40012582-CH38-SW1).
>
>
>
**Basic Requirements to run your iOS app:**
* MacOS (10.10 or higher)
* Xcode 7 or higher
* iOS 9
* A free Apple ID or paid Apple Developer account
**Need to Generate Certificate & Profile:**
Open your Xcode > Go to Preferences > Account > Add account
[](https://i.stack.imgur.com/S392E.png)
Add Your Apple account credential:
[](https://i.stack.imgur.com/xpoyF.png)
[](https://i.stack.imgur.com/P5NvX.png)
**Now Come back to the ionic project and follow below steps:**
* Run a build (production or debug) of your app with below command
$ionic cordova build ios --prod
* Open the `.xcworkspace` file from path `platforms/ios/` in Xcode.
**For Profile Selection from XCode follow below steps:**
Now Go to your App Setting (By clicking on App name in Left Menu panel)
[](https://i.stack.imgur.com/ZyFLl.png)
Check "Automatically manage signin" (by checking this Xcode will create a profile automatically)
After setting all this now your app is ready to test on the device there are 2 ways to run your app on the device.
1. Directly run your app from XCode by hitting on the play button on the top left corner.
2. run `$ionic cordova run ios --device` through your ionic project terminal.
Hope above steps will help to run your app on the iOS device.
Useful links :
1. [How to deploy app on device (android & iOS)](https://ionicframework.com/docs/intro/deploying/)
2. [Apple Certificate Sign in flow](https://help.apple.com/xcode/mac/current/#/dev60b6fbbc7)
Upvotes: 2 <issue_comment>username_2: I had the same issue before and I fixed it by actually archive and validate the ios app using xcode GUI. That creates the provisioning profile you need if you check the "automatic managing signing". (P.s. If that fails to validate, then it is because of some other reason which you need to google) After all the necessary provisioning profiles are automatically created by xcode. Then, try run the command again. It should work this time! I hope that can solve your problem.
Upvotes: 2 <issue_comment>username_3: just add quotes between the name bundle identifier!
ex:
"io.ionic.starter"
or
-io.ionic.starter-
Upvotes: 0 <issue_comment>username_4: I had the same issue and I solved it writing my email but instead of the "@"I wrote a quote "-". That gave me the certificate and now it is working.
Upvotes: 0 |
2018/03/21 | 1,107 | 4,113 | <issue_start>username_0: What is the recommended way to essentially embed a List element within another List element?
For Example, here are snippets from my two data model classes so you can see what I'm trying to do:
```
class Person
{
int Id;
string LastName;
string FirstName;
List Orders;
}
```
and
```
class SalesOrders
{
string OrderNumber;
string CustomerPO;
DateTime OrderDate;
}
```
Can this be done, and is it advisable or is there another method for doing this?
I need to be able to create a List element for a particular person, and then load their sales order data into their list element in the same method/routine.<issue_comment>username_1: To deploy and test an app on an iOS device you need to create a developer certificate and a provisioning profile.
>
> iOS developers need to generate a provisioning profile to code sign
> their apps for testing. The good news is that, as of iOS 9, you can
> develop and test your apps on your iOS device without a paid Apple
> Developer account. This is particularly great for developers who want
> to try out mobile development with Ionic since it saves the cost but
> still provides a lot of the features of having a full Apple Developer
> account. For a full breakdown of the features included, check out
> [Apple’s docs](https://developer.apple.com/library/ios/documentation/IDEs/Conceptual/AppDistributionGuide/SupportedCapabilities/SupportedCapabilities.html#//apple_ref/doc/uid/TP40012582-CH38-SW1).
>
>
>
**Basic Requirements to run your iOS app:**
* MacOS (10.10 or higher)
* Xcode 7 or higher
* iOS 9
* A free Apple ID or paid Apple Developer account
**Need to Generate Certificate & Profile:**
Open your Xcode > Go to Preferences > Account > Add account
[](https://i.stack.imgur.com/S392E.png)
Add Your Apple account credential:
[](https://i.stack.imgur.com/xpoyF.png)
[](https://i.stack.imgur.com/P5NvX.png)
**Now Come back to the ionic project and follow below steps:**
* Run a build (production or debug) of your app with below command
$ionic cordova build ios --prod
* Open the `.xcworkspace` file from path `platforms/ios/` in Xcode.
**For Profile Selection from XCode follow below steps:**
Now Go to your App Setting (By clicking on App name in Left Menu panel)
[](https://i.stack.imgur.com/ZyFLl.png)
Check "Automatically manage signin" (by checking this Xcode will create a profile automatically)
After setting all this now your app is ready to test on the device there are 2 ways to run your app on the device.
1. Directly run your app from XCode by hitting on the play button on the top left corner.
2. run `$ionic cordova run ios --device` through your ionic project terminal.
Hope above steps will help to run your app on the iOS device.
Useful links :
1. [How to deploy app on device (android & iOS)](https://ionicframework.com/docs/intro/deploying/)
2. [Apple Certificate Sign in flow](https://help.apple.com/xcode/mac/current/#/dev60b6fbbc7)
Upvotes: 2 <issue_comment>username_2: I had the same issue before and I fixed it by actually archive and validate the ios app using xcode GUI. That creates the provisioning profile you need if you check the "automatic managing signing". (P.s. If that fails to validate, then it is because of some other reason which you need to google) After all the necessary provisioning profiles are automatically created by xcode. Then, try run the command again. It should work this time! I hope that can solve your problem.
Upvotes: 2 <issue_comment>username_3: just add quotes between the name bundle identifier!
ex:
"io.ionic.starter"
or
-io.ionic.starter-
Upvotes: 0 <issue_comment>username_4: I had the same issue and I solved it writing my email but instead of the "@"I wrote a quote "-". That gave me the certificate and now it is working.
Upvotes: 0 |
2018/03/21 | 714 | 2,904 | <issue_start>username_0: If a program relies on command line arguments as user input, like for example asking for an amount of shirts to order, a shirt size, and a color selection. I know these values would be stored in `args[0]`, `args[1]`, and `args[2]`. But how would I manage cases in which one or more of those arguments isn't given by the user if I'm expecting to receive those values in that order. Like if the amount of shirts to order isn't given, is there a way that I can initialize that value to a default value of 1?<issue_comment>username_1: There are two common approaches for optional command line arguments
1. Use a flag like `java MyClass -q5`. Parsing these kinds of arguments is challenging to do on your own. I suggest using a library such as [commons-cli](https://mvnrepository.com/artifact/commons-cli/commons-cli/1.3.1).
2. Put the optional arguments at the end. This way if the user supplies only 2 arguments, you know that these are the size and the color.
Upvotes: 1 <issue_comment>username_2: You get very little flexibility with command line arguments: the environment communicates them to you as a `String[]` array, which implies that individual arguments have no names, only indexes.
You need to decide how to handle cases when one or more command-line arguments are missing, but keep in mind that it is not possible to "skip" arguments in the middle: whenever an argument is omitted, it is always the trailing argument. For example, if the user calls your program with parameters `XXL GREEN` these would be placed in `args[0]` and `args[1]`, while `args[2]` would be missing; the parameters would not be placed in `args[1]` and `args[2]`.
One simple choice is to treat situations when fewer than three arguments are specified as errors. If this is not flexible enough, you could parse parameters individually, and try to infer what they "mean". For example, if you see `"S"`, `"M"`, `"L"`, `"XL"`, or `"XXL"` in any position, your program may take it as an indication of size.
Upvotes: 2 [selected_answer]<issue_comment>username_3: One trick off the top of my head would be to fix the length of arguments or play around it if there's a pattern.
For your example size is always a number so you can loop around the arguments and reserve the numeric one to your `length`.
For size you can prompt user to use `X,M,L,XL` so let's say it becomes maximum of two character length then you can again loop around arguments and store it as your `size`. For `color` you can make it string and greater than 2 character like `RED, GREEN` and assign it to your `color` argument. If you want to go further then you can reserve the keywords for your `size` to be one of `S,M,L,XXL,XL` etc and check every argument against it before assigning to `color` argument.
Either way you might have got the point by now, play around the arguments if there's a pattern and assign it.
Upvotes: 0 |
2018/03/21 | 816 | 2,527 | <issue_start>username_0: GETTING ERROR for the below macro script. Need extract date, name and address in a single table. Since there is only one value of date, the second value returns null.
However I need the same value to be present in all records of extract date
```
Set ApplicationsNode = oXMLFile.SelectNodes("/Extract/Applications/Application")
Set extractnodes = oXMLFile.SelectNodes("/Extract/ExtractDate")
Set NameNode = oXMLFile.SelectNodes("/Extract/Applications/Application/Name/text()")
Set AddrNode = oXMLFile.SelectNodes("/Extract/Applications/Application/Addr/text()")
For i = 0 To (ApplicationsNode.Length - 1)
Extract = extractnodes(i).NodeValue
Name = NameNode(i).NodeValue
Addr = AddrNode(i).NodeValue
mainWorkBook.Sheets("Sheet1").Range("A" & i + 2).Value = Extract
mainWorkBook.Sheets("Sheet1").Range("C" & i + 2).Value = Name
mainWorkBook.Sheets("Sheet1").Range("D" & i + 2).Value = Addr
Next
```
Input XML:
```
2018-02-21 10:01:01
1234
700ST
123466
277AVD
```<issue_comment>username_1: Try using the below portion of elements within `content.xml` file in desktop and run the below code.
within `content.xml` file:
```
2018-02-21 10:01:01
1234
700ST
123466
277AVD
```
Script to parse the value from:
```
Sub DemoXML()
Dim post As Object
With CreateObject("MSXML2.DOMDocument")
.async = False: .validateOnParse = False
.Load (ThisWorkbook.path & "\content.xml")
For Each post In .SelectNodes("//Extract//Application")
r = r + 1: Cells(r, 1) = post.ParentNode.ParentNode.FirstChild.Text
Cells(r, 2) = post.SelectNodes(".//Name")(0).Text
Cells(r, 3) = post.SelectNodes(".//Addr")(0).Text
Next post
End With
End Sub
```
Populated results:
```
2/21/2018 10:01 1234 700ST
2/21/2018 10:01 123466 277AVD
```
Upvotes: 1 <issue_comment>username_2: Better answer already given but here is a variation:
```
Option Explicit
Public Sub DemoXML()
Dim post As Object, R As Long, C As Long, dateString As String
With CreateObject("MSXML2.DOMDocument")
.async = False: .validateOnParse = False
.Load "C:\Users\User\Desktop\random.xml"
dateString = .SelectSingleNode("//ExtractDate").Text
For Each post In .SelectNodes("//Application")
R = R + 1: Cells(R, 1) = dateString
Cells(R, 2) = post.FirstChild.Text
Cells(R, 3) = post.LastChild.Text
Next post
End With
End Sub
```
Upvotes: 0 |
2018/03/21 | 8,006 | 18,170 | <issue_start>username_0: In Visual Studio I am able to make a connection to Jira witch returns the string (shortened to one instance for the example)
```
"expand": "operations,versionedRepresentations,editmeta,changelog,renderedFields",
"id": "15237",
"self": "https://companyName.atlassian.net/rest/agile/1.0/issue/15237",
"key": "THU-219",
"fields": {
"customfield_10110": null,
"fixVersions": [],
"customfield_10111": null,
"customfield_10112": null,
"resolution": null,
"customfield_10113": [
"com.atlassian.greenhopper.service.sprint.Sprint@a34aea2[id=53,rapidViewId=9,state=CLOSED,name=THU Sprint 7,goal=Survive!,startDate=2018-03-07T22:33:01.297Z,endDate=2018-03-21T22:33:00.000Z,completeDate=2018-03-20T19:16:10.159Z,sequence=53]",
"com.atlassian.greenhopper.service.sprint.Sprint@28b8057[id=58,rapidViewId=9,state=ACTIVE,name=THU Sprint 8,goal=make sure fixes fix ticket,startDate=2018-03-20T19:16:10.291Z,endDate=2018-04-03T19:16:00.000Z,completeDate=,sequence=58]",
"com.atlassian.greenhopper.service.sprint.Sprint@3e1efb35[id=43,rapidViewId=9,state=CLOSED,name=THU Sprint 5,goal=,startDate=2018-02-06T20:44:37.751Z,endDate=2018-02-20T20:44:00.000Z,completeDate=2018-02-20T20:15:10.688Z,sequence=43]",
"com.atlassian.greenhopper.service.sprint.Sprint@2ce8e5a0[id=48,rapidViewId=9,state=CLOSED,name=THU Sprint 6,goal=,startDate=2018-02-20T20:15:01.461Z,endDate=2018-03-06T20:15:00.000Z,completeDate=2018-03-07T22:32:22.792Z,sequence=48]"
],
"customfield\_10114": "1|hzz39z:",
"customfield\_10500": null,
"customfield\_10104": null,
"customfield\_10105": null,
"customfield\_10501": null,
"customfield\_10106": null,
"customfield\_10502": null,
"customfield\_10503": null,
"customfield\_10107": null,
"customfield\_10108": null,
"customfield\_10109": null,
"lastViewed": null,
"epic": {
"id": 15229,
"key": "THU-211",
"self": "https://companyName.atlassian.net/rest/agile/1.0/epic/15229",
"name": "project Name",
"summary": "Epic encompassing all TBD-related issues",
"color": {
"key": "color\_2"
},
"done": false
},
"priority": {
"self": "https://nebook.atlassian.net/rest/api/2/priority/1",
"iconUrl": "https://companyName.atlassian.net/images/icons/priorities/highest.svg",
"name": "Highest",
"id": "1"
},
"customfield\_10100": "2018-01-23T17:12:27.999-0600",
"customfield\_10101": null,
"customfield\_10102": null,
"labels": [
"user's <NAME>"
],
"customfield\_10103": null,
"customfield\_10731": null,
"customfield\_10610": null,
"customfield\_10611": null,
"customfield\_10733": null,
"customfield\_10612": null,
"customfield\_10613": null,
"timeestimate": null,
"customfield\_10614": null,
"aggregatetimeoriginalestimate": null,
"customfield\_10735": null,
"customfield\_10615": null,
"versions": [],
"customfield\_10616": null,
"customfield\_10617": null,
"issuelinks": [
{
"id": "14337",
"self": "https://companyName.atlassian.net/rest/api/2/issueLink/14337",
"type": {
"id": "10000",
"name": "Blocks",
"inward": "is blocked by",
"outward": "blocks",
"self": "https://companyName.atlassian.net/rest/api/2/issueLinkType/10000"
},
"inwardIssue": {
"id": "18233",
"key": "THU-289",
"self": "https://companyName.atlassian.net/rest/api/2/issue/18233",
"fields": {
"summary": "summary",
"status": {
"self": "https://companyName.atlassian.net/rest/api/2/status/10804",
"description": "",
"iconUrl": "https://companyName.atlassian.net/images/icons/statuses/generic.png",
"name": "Code Review",
"id": "10804",
"statusCategory": {
"self": "https://companyName.atlassian.net/rest/api/2/statuscategory/4",
"id": 4,
"key": "indeterminate",
"colorName": "yellow",
"name": "In Progress"
}
},
"priority": {
"self": "https://companyName.atlassian.net/rest/api/2/priority/1",
"iconUrl": "https://companyName.atlassian.net/images/icons/priorities/highest.svg",
"name": "Highest",
"id": "1"
},
"issuetype": {
"self": "https://companyName.atlassian.net/rest/api/2/issuetype/10300",
"id": "10300",
"description": "Created by Jira Agile - do not edit or delete. Issue type for a user story.",
"iconUrl": "https://companyName.atlassian.net/images/icons/issuetypes/story.svg",
"name": "Story",
"subtask": false
}
}
}
},
{
"id": "14336",
"self": "https://companyName.atlassian.net/rest/api/2/issueLink/14336",
"type": {
"id": "10001",
"name": "Cloners",
"inward": "is cloned by",
"outward": "clones",
"self": "https://companyName.atlassian.net/rest/api/2/issueLinkType/10001"
},
"inwardIssue": {
"id": "18233",
"key": "THU-289",
"self": "https://companyName.atlassian.net/rest/api/2/issue/18233",
"fields": {
"summary": "summary",
"status": {
"self": "https://companyName.atlassian.net/rest/api/2/status/10804",
"description": "",
"iconUrl": "https://companyName.atlassian.net/images/icons/statuses/generic.png",
"name": "Code Review",
"id": "10804",
"statusCategory": {
"self": "https://companyName.atlassian.net/rest/api/2/statuscategory/4",
"id": 4,
"key": "indeterminate",
"colorName": "yellow",
"name": "In Progress"
}
},
"priority": {
"self": "https://companyName.atlassian.net/rest/api/2/priority/1",
"iconUrl": "https://companyName.atlassian.net/images/icons/priorities/highest.svg",
"name": "Highest",
"id": "1"
},
"issuetype": {
"self": "https://companyName.atlassian.net/rest/api/2/issuetype/10300",
"id": "10300",
"description": "Created by Jira Agile - do not edit or delete. Issue type for a user story.",
"iconUrl": "https://companyName.atlassian.net/images/icons/issuetypes/story.svg",
"name": "Story",
"subtask": false
}
}
}
}
],
"assignee": null,
"status": {
"self": "https://companyName.atlassian.net/rest/api/2/status/10804",
"description": "",
"iconUrl": "https://companyName.atlassian.net/images/icons/statuses/generic.png",
"name": "Code Review",
"id": "10804",
"statusCategory": {
"self": "https://companyName.atlassian.net/rest/api/2/statuscategory/4",
"id": 4,
"key": "indeterminate",
"colorName": "yellow",
"name": "In Progress"
}
},
"components": [],
"customfield\_10730": null,
"customfield\_10600": null,
"customfield\_10601": null,
"customfield\_10602": null,
"aggregatetimeestimate": null,
"customfield\_10603": null,
"customfield\_10604": null,
"customfield\_10605": null,
"customfield\_10727": null,
"customfield\_10606": null,
"customfield\_10728": null,
"customfield\_10607": null,
"customfield\_10608": null,
"customfield\_10729": null,
"customfield\_10609": null,
"creator": {
"self": "https://companyName.atlassian.net/rest/api/2/user?username=name",
"name": "name",
"key": "key",
"accountId": "accountId",
"emailAddress": "email",
"avatarUrls": {
"48x48": "https://avatar-cdn.atlassian.com/40f53dffbd45ed6f545a40ec91d4843f?s=48&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2F40f53dffbd45ed6f545a40ec91d4843f%3Fd%3Dmm%26s%3D48%26noRedirect%3Dtrue",
"24x24": "https://avatar-cdn.atlassian.com/40f53dffbd45ed6f545a40ec91d4843f?s=24&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2F40f53dffbd45ed6f545a40ec91d4843f%3Fd%3Dmm%26s%3D24%26noRedirect%3Dtrue",
"16x16": "https://avatar-cdn.atlassian.com/40f53dffbd45ed6f545a40ec91d4843f?s=16&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2F40f53dffbd45ed6f545a40ec91d4843f%3Fd%3Dmm%26s%3D16%26noRedirect%3Dtrue",
"32x32": "https://avatar-cdn.atlassian.com/40f53dffbd45ed6f545a40ec91d4843f?s=32&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2F40f53dffbd45ed6f545a40ec91d4843f%3Fd%3Dmm%26s%3D32%26noRedirect%3Dtrue"
},
"displayName": "name",
"active": true,
"timeZone": "America/Chicago"
},
"subtasks": [],
"reporter": {
"self": "https://companyName.atlassian.net/rest/api/2/user?username=name",
"name": "name",
"key": "key",
"accountId": "557058:ec<KEY>",
"emailAddress": "email",
"avatarUrls": {
"48x48": "https://avatar-cdn.atlassian.com/40f53dffbd45ed6f545a40ec91d4843f?s=48&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2F40f53dffbd45ed6f545a40ec91d4843f%3Fd%3Dmm%26s%3D48%26noRedirect%3Dtrue",
"24x24": "https://avatar-cdn.atlassian.com/40f53dffbd45ed6f545a40ec91d4843f?s=24&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2F40f53dffbd45ed6f545a40ec91d4843f%3Fd%3Dmm%26s%3D24%26noRedirect%3Dtrue",
"16x16": "https://avatar-cdn.atlassian.com/40f53dffbd45ed6f545a40ec91d4843f?s=16&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2F40f53dffbd45ed6f545a40ec91d4843f%3Fd%3Dmm%26s%3D16%26noRedirect%3Dtrue",
"32x32": "https://avatar-cdn.atlassian.com/40f53dffbd45ed6f545a40ec91d4843f?s=32&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2F40f53dffbd45ed6f545a40ec91d4843f%3Fd%3Dmm%26s%3D32%26noRedirect%3Dtrue"
},
"displayName": "name",
"active": true,
"timeZone": "America/Chicago"
},
"aggregateprogress": {
"progress": 0,
"total": 0
},
"customfield\_10711": null,
"customfield\_10712": null,
"closedSprints": [
{
"id": 53,
"self": "https://companyName.atlassian.net/rest/agile/1.0/sprint/53",
"state": "closed",
"name": "THU Sprint 7",
"startDate": "2018-03-07T22:33:01.297Z",
"endDate": "2018-03-21T22:33:00.000Z",
"completeDate": "2018-03-20T19:16:10.159Z",
"originBoardId": 9,
"goal": "Survive!"
},
{
"id": 43,
"self": "https://companyName.atlassian.net/rest/agile/1.0/sprint/43",
"state": "closed",
"name": "THU Sprint 5",
"startDate": "2018-02-06T20:44:37.751Z",
"endDate": "2018-02-20T20:44:00.000Z",
"completeDate": "2018-02-20T20:15:10.688Z",
"originBoardId": 9,
"goal": ""
},
{
"id": 48,
"self": "https://companyName.atlassian.net/rest/agile/1.0/sprint/48",
"state": "closed",
"name": "THU Sprint 6",
"startDate": "2018-02-20T20:15:01.461Z",
"endDate": "2018-03-06T20:15:00.000Z",
"completeDate": "2018-03-07T22:32:22.792Z",
"originBoardId": 9,
"goal": ""
}
],
"progress": {
"progress": 0,
"total": 0
},
"votes": {
"self": "https://companyName.atlassian.net/rest/api/2/issue/THU-219/votes",
"votes": 0,
"hasVoted": false
},
"worklog": {
"startAt": 0,
"maxResults": 20,
"total": 0,
"worklogs": []
},
"issuetype": {
"self": "https://companyName.atlassian.net/rest/api/2/issuetype/10300",
"id": "10300",
"description": "Created by Jira Agile - do not edit or delete. Issue type for a user story.",
"iconUrl": "https://companyName.atlassian.net/images/icons/issuetypes/story.svg",
"name": "Story",
"subtask": false
},
"timespent": null,
"sprint": {
"id": 58,
"self": "https://companyName.atlassian.net/rest/agile/1.0/sprint/58",
"state": "active",
"name": "THU Sprint 8",
"startDate": "2018-03-20T19:16:10.291Z",
"endDate": "2018-04-03T19:16:00.000Z",
"originBoardId": 9,
"goal": "make sure fixes fix ticket"
},
"project": {
"self": "https://nebook.atlassian.net/rest/api/2/project/11510",
"id": "11510",
"key": "THU",
"name": "name",
"projectTypeKey": "software",
"avatarUrls": {
"48x48": "https://nebook.atlassian.net/secure/projectavatar?avatarId=10324",
"24x24": "https://nebook.atlassian.net/secure/projectavatar?size=small&avatarId=10324",
"16x16": "https://nebook.atlassian.net/secure/projectavatar?size=xsmall&avatarId=10324",
"32x32": "https://nebook.atlassian.net/secure/projectavatar?size=medium&avatarId=10324"
}
},
"aggregatetimespent": null,
"resolutiondate": null,
"workratio": -1,
"watches": {
"self": "https://companyName.atlassian.net/rest/api/2/issue/THU-219/watchers",
"watchCount": 2,
"isWatching": false
},
"created": "2017-12-14T18:01:28.505-0600",
"customfield\_10300": null,
"customfield\_10301": null,
"updated": "2018-03-20T14:16:13.195-0500",
"timeoriginalestimate": null,
"description": "description",
"timetracking": {},
"customfield\_10401": [],
"customfield\_10402": null,
"customfield\_10006": "THU-211",
"customfield\_10403": null,
"security": null,
"customfield\_10007": null,
"attachment": [],
"flagged": false,
"summary": "summary",
"customfield\_10000": "{}",
"customfield\_10001": null,
"customfield\_10002": null,
"customfield\_10400": null,
"environment": null,
"duedate": null,
"comment": {
"comments": [
{
"self": "https://companyName.atlassian.net/rest/api/2/issue/15237/comment/23890",
"id": "23890",
"author": {
"self": "https://companyName.atlassian.net/rest/api/2/user?username=name",
"name": "name",
"key": "key",
"accountId": "accountId",
"emailAddress": "email",
"avatarUrls": {
"48x48": "https://avatar-cdn.atlassian.com/b232453b514d10d65f986fe7f2df592c?s=48&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2Fb232453b514d10d65f986fe7f2df592c%3Fd%3Dmm%26s%3D48%26noRedirect%3Dtrue",
"24x24": "https://avatar-cdn.atlassian.com/b232453b514d10d65f986fe7f2df592c?s=24&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2Fb232453b514d10d65f986fe7f2df592c%3Fd%3Dmm%26s%3D24%26noRedirect%3Dtrue",
"16x16": "https://avatar-cdn.atlassian.com/b232453b514d10d65f986fe7f2df592c?s=16&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2Fb232453b514d10d65f986fe7f2df592c%3Fd%3Dmm%26s%3D16%26noRedirect%3Dtrue",
"32x32": "https://avatar-cdn.atlassian.com/b232453b514d10d65f986fe7f2df592c?s=32&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2Fb232453b514d10d65f986fe7f2df592c%3Fd%3Dmm%26s%3D32%26noRedirect%3Dtrue"
},
"displayName": "name",
"active": true,
"timeZone": "America/Chicago"
},
"body": "text describing the issue",
"updateAuthor": {
"self": "https://companyName.atlassian.net/rest/api/2/user?username=name",
"name": "name",
"key": "key",
"accountId": "557058:d4a31d0e-7944-488b-9f89-fcfec87b95ac",
"emailAddress": "email",
"avatarUrls": {
"48x48": "https://avatar-cdn.atlassian.com/b232453b514d10d65f986fe7f2df592c?s=48&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2Fb232453b514d10d65f986fe7f2df592c%3Fd%3Dmm%26s%3D48%26noRedirect%3Dtrue",
"24x24": "https://avatar-cdn.atlassian.com/b232453b514d10d65f986fe7f2df592c?s=24&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2Fb232453b514d10d65f986fe7f2df592c%3Fd%3Dmm%26s%3D24%26noRedirect%3Dtrue",
"16x16": "https://avatar-cdn.atlassian.com/b232453b514d10d65f986fe7f2df592c?s=16&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2Fb232453b514d10d65f986fe7f2df592c%3Fd%3Dmm%26s%3D16%26noRedirect%3Dtrue",
"32x32": "https://avatar-cdn.atlassian.com/b232453b514d10d65f986fe7f2df592c?s=32&d=https%3A%2F%2Fsecure.gravatar.com%2Favatar%2Fb232453b514d10d65f986fe7f2df592c%3Fd%3Dmm%26s%3D32%26noRedirect%3Dtrue"
},
"displayName": "name",
"active": true,
"timeZone": "America/Chicago"
},
"created": "2018-01-23T17:12:27.999-0600",
"updated": "2018-01-23T17:12:27.999-0600"
}
],
"maxResults": 1,
"total": 1,
"startAt": 0
}
}
},
```
I have been able to find some examples on here that deal with this issue however I have not been able to find anything that deals with repeated key words like my JSON data has.
The only real information I care about is the initial "Key" value (In this example "THU-219") and the status "name" field which is under "fields" -> "issuelinks" -> "inwardIssue" -> "fields" -> "status" -> "name"
I would like to be able to print out in the form of
Key: THU-219 \n
Status: "Code Review"
To achieve this I have attempted this code (after I have made a secure connection)Where respStr is the entire string which contains the snippet above in the same format for each issue. Am I creating my classes wrong? Because I am unable to get the expected data. I am aware that there is an option in Visual studio to "paste as JSON classes" but I getting an error that won't allow me to utilize this feature so I need a way to accomplish this without relying on that paste special feature.
```
String respStr = response.Content;
JiraObject jira = JsonConvert.DeserializeObject(respStr);
foreach (KeyValuePairkvp in jira.Issues)
{
Console.WriteLine("Key:" + kvp.Value.Keys);
Console.WriteLine("Status:" + kvp.Value.Status)
Console.WriteLine();
}
Console.ReadKey();
}
}
class JiraObject
{
[JsonProperty("issues")]
public Dictionary Issues { get; set; }
}
class issues
{
[JsonProperty("key")]
public Dictionary Keys { get; set; }
[JsonProperty("fields")]
public Dictionary fields { get; set; }
}
class Key
{
public Key Keys { get; set; }
}
class Fields
{
[JsonProperty("issuelinks")]
public Dictionary issueLinks { get; set; }
}
class issuelinks
{
[JsonProperty("inwardIssue")]
public Dictionary inwardIssues { get; set; }
}
class inwardIssue
{
[JsonProperty("fields")]
public Dictionary inwardFields { get; set; }
}
class inwardFields
{
[JsonProperty("status")]
public Dictionary status { get; set; }
}
class status
{
[JsonProperty("name")]
public Dictionary name { get; set; }
}
class name
{
public name names { get; set; }
}
```<issue_comment>username_1: Try using the below portion of elements within `content.xml` file in desktop and run the below code.
within `content.xml` file:
```
2018-02-21 10:01:01
1234
700ST
123466
277AVD
```
Script to parse the value from:
```
Sub DemoXML()
Dim post As Object
With CreateObject("MSXML2.DOMDocument")
.async = False: .validateOnParse = False
.Load (ThisWorkbook.path & "\content.xml")
For Each post In .SelectNodes("//Extract//Application")
r = r + 1: Cells(r, 1) = post.ParentNode.ParentNode.FirstChild.Text
Cells(r, 2) = post.SelectNodes(".//Name")(0).Text
Cells(r, 3) = post.SelectNodes(".//Addr")(0).Text
Next post
End With
End Sub
```
Populated results:
```
2/21/2018 10:01 1234 700ST
2/21/2018 10:01 123466 277AVD
```
Upvotes: 1 <issue_comment>username_2: Better answer already given but here is a variation:
```
Option Explicit
Public Sub DemoXML()
Dim post As Object, R As Long, C As Long, dateString As String
With CreateObject("MSXML2.DOMDocument")
.async = False: .validateOnParse = False
.Load "C:\Users\User\Desktop\random.xml"
dateString = .SelectSingleNode("//ExtractDate").Text
For Each post In .SelectNodes("//Application")
R = R + 1: Cells(R, 1) = dateString
Cells(R, 2) = post.FirstChild.Text
Cells(R, 3) = post.LastChild.Text
Next post
End With
End Sub
```
Upvotes: 0 |
2018/03/21 | 498 | 1,949 | <issue_start>username_0: I'm trying to send s3event message to rabbitmq by invoking AWS lambda function. I have configured SQS as my dead letter queue(DLQ).
I know the message is sent to DLQ when there failure in invocation of lambda or situations like timeouts or resource constraints.
My question is ,I want to send event message to DLQ from inside lambda function on certain condition like if rabbitmq is down or some other condition of my interest.
Is there any possiblity for the same? Should I throw exception or there is some other better approach to send event message to DLQ.
I'm using java for development and connecting to rabbitmq from my lambda function.<issue_comment>username_1: In the `DLQ` setting of a Lambda you specify a SNS topic or a SQS Queue. In your setup you have configured the DLQ to be a SQS queue. This is a regular SQS Queue. Using the SQS Java SDK you can post a message to that SQS Queue.
here are few references:
<https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-send-message.html>
To get the Queue URL you can use these:
<https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueUrl.html>
Or through Java:
<https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/sqs/AmazonSQSClient.html#getQueueUrl-java.lang.String->
Upvotes: 1 <issue_comment>username_2: The DLQ is simply an SQS Queue, so you could [send a message](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html) to it like you would any other queue. You would want it to be formatted the same way that Lambda natively puts message in the DLQ so that whatever processing you have on the DLQ can perform the same way for all messages. You would want to ensure that you treat the lambda as successfully executed in this instance though so that the normal DLQ process doesn't pick up the same message twice.
Upvotes: 4 [selected_answer] |
2018/03/21 | 782 | 2,359 | <issue_start>username_0: Consider this simple program:
`fails.c`:
```
#include
int main(){
int i = 10;
if (i == 10)
int j = 11;
return 0;
}
```
That fails to compile (`gcc fails.c`), giving this error:
```
fails.c: In function ‘main’:
fails.c:7:3: error: expected expression before ‘int’
int j = 11;
^
```
But this one goes through just fine:
```
#include
int main(){
int i = 10;
if (i == 10){
int j = 11;
}
return 0;
}
```
I figured that the work around, is to put those `{}` in. But I wish to know why this is required.
Why does it behave this way, when something like `printf` is acceptable?
```
#include
int main(){
int i = 10;
if (i == 10)
printf("some text\n");
return 0;
}
```<issue_comment>username_1: This is because `if` must be followed by a statement:
[C99/6.8.4](https://busybox.net/~landley/c99-draft.html#6.8.4)
>
> if ( expression ) statement
>
>
>
However, a [declaration](https://busybox.net/~landley/c99-draft.html#6.7) is **not** a statement:
[C99/6.8](https://busybox.net/~landley/c99-draft.html#statement)
>
> statement:
>
>
> labeled-statement
>
>
> compound-statement
>
>
> expression-statement
>
>
> selection-statement
>
>
> iteration-statement
>
>
> jump-statement
>
>
>
When put inside a `{}`, it is a compound-statement, thus ok.
Upvotes: 4 [selected_answer]<issue_comment>username_2: There is a difference between *declaration* and *statement*.
```
int j = 11;
```
is a declaration. `if` statement shall be followed by a statement. Putting `{}` after `if` statement results in a compound statement. A compound statement can have no other statement in it or can have a declaration.
Upvotes: 1 <issue_comment>username_3: It seems you compile your ANSI C code with C89 standard. And, unlike his successor C99, it requires all variables to be declared at the beginning of the scope. So, having `int j = 11;` somewhere in-between other statements just contradicts that C89 rule. If you open a new scope just before that `int j = 11;`, it's back to OK.
Actual reason of such C89 limitation should be an attempt to simplify memory management for stack-allocable variables. For instance, compare it with Pascal language that requires all variables to be declared in the special `var` section before the code (so, it's even more strict that C89).
Upvotes: 0 |
2018/03/21 | 663 | 2,098 | <issue_start>username_0: I want to check if two svg elements have a certain fill color and then move another svg element. Cant get it to work
```
function calculate() {
var state1 = document.getElementById("#stateButton1"),
maturity1 = document.getElementById("#maturityButton1");
if ((state1.style.fill = "#e1532d") && (maturity1.style.fill = "#e1532d")) {
TweenMax.to('#star,#star1', 0.1, {y:0});
TweenMax.to('#star,#star1', 0.1, {y:-290});
}
```<issue_comment>username_1: This is because `if` must be followed by a statement:
[C99/6.8.4](https://busybox.net/~landley/c99-draft.html#6.8.4)
>
> if ( expression ) statement
>
>
>
However, a [declaration](https://busybox.net/~landley/c99-draft.html#6.7) is **not** a statement:
[C99/6.8](https://busybox.net/~landley/c99-draft.html#statement)
>
> statement:
>
>
> labeled-statement
>
>
> compound-statement
>
>
> expression-statement
>
>
> selection-statement
>
>
> iteration-statement
>
>
> jump-statement
>
>
>
When put inside a `{}`, it is a compound-statement, thus ok.
Upvotes: 4 [selected_answer]<issue_comment>username_2: There is a difference between *declaration* and *statement*.
```
int j = 11;
```
is a declaration. `if` statement shall be followed by a statement. Putting `{}` after `if` statement results in a compound statement. A compound statement can have no other statement in it or can have a declaration.
Upvotes: 1 <issue_comment>username_3: It seems you compile your ANSI C code with C89 standard. And, unlike his successor C99, it requires all variables to be declared at the beginning of the scope. So, having `int j = 11;` somewhere in-between other statements just contradicts that C89 rule. If you open a new scope just before that `int j = 11;`, it's back to OK.
Actual reason of such C89 limitation should be an attempt to simplify memory management for stack-allocable variables. For instance, compare it with Pascal language that requires all variables to be declared in the special `var` section before the code (so, it's even more strict that C89).
Upvotes: 0 |
2018/03/21 | 1,128 | 4,428 | <issue_start>username_0: I am trying to add a force touch automation for an iOS App. I looked into the Apple docs for the same problem but cannot find anything useful. We can do the force touch from the assistive touch but I am looking for something as simple as the tap() action. Is there anything that we can use for forceTouch?
Any help will be greatly appreciated. Thanks!<issue_comment>username_1: how about using `press(forDuration)`?
`press(forDuration:)`
Sends a long press gesture to a hittable point computed for the element, holding for the specified duration.
<https://developer.apple.com/documentation/xctest/xcuielement/1618663-press>
Upvotes: 0 <issue_comment>username_2: >
> The raw force value associated with a touch is available in the [force](https://developer.apple.com/documentation/uikit/uitouch/1618110-force) property of the [UITouch](https://developer.apple.com/documentation/uikit/uitouch) object. You can compare that value against the value in the [maximumPossibleForce](https://developer.apple.com/documentation/uikit/uitouch/1618121-maximumpossibleforce) property to determine the relative amount of force.
>
>
>
```
//Without this import line, you'll get compiler errors when implementing your touch methods since they aren't part of the UIGestureRecognizer superclass
//Without this import line, you'll get compiler errors when implementing your touch methods since they aren't part of the UIGestureRecognizer superclass
import UIKit.UIGestureRecognizerSubclass
//Since 3D Touch isn't available before iOS 9, we can use the availability APIs to ensure no one uses this class for earlier versions of the OS.
@available(iOS 9.0, *)
public class ForceTouchGestureRecognizer: UIGestureRecognizer {
//Because we don't know what the maximum force will always be for a UITouch, the force property here will be normalized to a value between 0.0 and 1.0.
public private(set) var force: CGFloat = 0.0
public var maximumForce: CGFloat = 4.0
convenience init() {
self.init(target: nil, action: nil)
}
//We override the initializer because UIGestureRecognizer's cancelsTouchesInView property is true by default. If you were to, say, add this recognizer to a tableView's cell, it would prevent didSelectRowAtIndexPath from getting called. Thanks for finding this bug, <NAME>!
public override init(target: Any?, action: Selector?) {
super.init(target: target, action: action)
cancelsTouchesInView = false
}
public override func touchesBegan(_ touches: Set, with event: UIEvent) {
super.touchesBegan(touches, with: event)
normalizeForceAndFireEvent(.began, touches: touches)
}
public override func touchesMoved(_ touches: Set, with event: UIEvent) {
super.touchesMoved(touches, with: event)
normalizeForceAndFireEvent(.changed, touches: touches)
}
public override func touchesEnded(_ touches: Set, with event: UIEvent) {
super.touchesEnded(touches, with: event)
normalizeForceAndFireEvent(.ended, touches: touches)
}
public override func touchesCancelled(_ touches: Set, with event: UIEvent) {
super.touchesCancelled(touches, with: event)
normalizeForceAndFireEvent(.cancelled, touches: touches)
}
private func normalizeForceAndFireEvent(_ state: UIGestureRecognizerState, touches: Set) {
//Putting a guard statement here to make sure we don't fire off our target's selector event if a touch doesn't exist to begin with.
guard let firstTouch = touches.first else { return }
//Just in case the developer set a maximumForce that is higher than the touch's maximumPossibleForce, I'm setting the maximumForce to the lower of the two values.
maximumForce = min(firstTouch.maximumPossibleForce, maximumForce)
//Now that I have a proper maximumForce, I'm going to use that and normalize it so the developer can use a value between 0.0 and 1.0.
force = firstTouch.force / maximumForce
//Our properties are now ready for inspection by the developer. By setting the UIGestureRecognizer's state property, the system will automatically send the target the selector message that this recognizer was initialized with.
self.state = state
}
//This function is called automatically by UIGestureRecognizer when our state is set to .Ended. We want to use this function to reset our internal state.
public override func reset() {
super.reset()
force = 0.0
}
}
```
Upvotes: 2 |
2018/03/21 | 1,083 | 4,162 | <issue_start>username_0: I created an empty project.
**Startup.cs**
```
public void ConfigureServices(IServiceCollection services)
{
services.AddLocalization(s => s.ResourcesPath = "Resources");
var supportedCultures = new CultureInfo[]
{
new CultureInfo("de-CH"),
new CultureInfo("en-GB"),
};
services.Configure(s =>
{
s.SupportedCultures = supportedCultures;
s.SupportedUICultures = supportedCultures;
s.DefaultRequestCulture = new RequestCulture(culture: "de-CH", uiCulture: "de-CH");
});
services.AddMvc()
.AddViewLocalization(LanguageViewLocationExpanderFormat.Suffix)
.AddDataAnnotationsLocalization();
}
public void Configure(IApplicationBuilder app, IHostingEnvironment env, ILoggerFactory loggerFactory)
{
app.UseStaticFiles();
// Using localization
var locOptions = app.ApplicationServices.GetService>();
app.UseRequestLocalization(locOptions.Value);
app.UseMvc();
}
```
**Folder Structure**
```
Resources
|
|--Controllers
| HomeController.de.resx
| HomeController.en.resx
| HomeController.resx
```
**Controller**
```
public class HomeController : Controller
{
private readonly IStringLocalizer \_stringLocalizer;
public HomeController(IStringLocalizer stringLocalizer)
{
\_stringLocalizer = stringLocalizer;
}
public IActionResult Index()
{
string testValue = \_stringLocalizer["Test"];
return View();
}
}
```
I'm new about asp.net core, I'm just trying to understand, Why testValue always return Test, it's a bit confusing. I'm doing something wrong? i will be happy if u help me.<issue_comment>username_1: Two different errors here prevent correct loading of localized resources.
1. You set incorrect `ResourcesPath` in `AddLocalization()` call. Since your resx files are placed in `Resources/Controllers` directory, you should replace call
```
services.AddLocalization(s => s.ResourcesPath = "Resources");
```
with
```
services.AddLocalization(s => s.ResourcesPath = "Resources/Controllers");
```
2. You use incorrect name for resx files. Check [Resource file naming](https://learn.microsoft.com/en-us/aspnet/core/fundamentals/localization#resource-file-naming) section in [Globalization and localization in ASP.NET Core](https://learn.microsoft.com/en-us/aspnet/core/fundamentals/localization) article:
>
> Resources are named for the full type name of their class minus the
> assembly name. For example, a French resource in a project whose main
> assembly is `LocalizationWebsite.Web.dll` for the class
> `LocalizationWebsite.Web.Startup` would be named *Startup.fr.resx*. A
> resource for the class
> `LocalizationWebsite.Web.Controllers.HomeController` would be named
> *Controllers.HomeController.fr.resx*. If your targeted class's namespace
> isn't the same as the assembly name you will need the full type name.
> For example, in the sample project a resource for the type
> `ExtraNamespace.Tools` would be named *ExtraNamespace.Tools.fr.resx*.
>
>
>
So if your assembly is called `TestMvcApplication` and `HomeController` resides in namespace `TestMvcApplication.Controllers`, then you should call your resx files in the following way:
```
Resources
|
|--Controllers
| Controllers.HomeController.de.resx
| Controllers.HomeController.en.resx
| Controllers.HomeController.resx
```
I believe after you make above changes to your project, localization will work ok.
Upvotes: 2 <issue_comment>username_2: Just add the package **Microsoft.Extensions.Localization**
After do that, it works.
The ResourcePath is optional, and if you leave it null, the resource files organization style is the same that classic Asp.Net application (in the same location of the target classes).
Upvotes: 5 [selected_answer]<issue_comment>username_3: I fixed it, by making sure that my folder structure reflected the namespace I used when working with resource designer files !!
[](https://i.stack.imgur.com/4bTZT.png)
Upvotes: 1 <issue_comment>username_4: Make sure the "**SharedResources.cs**" has the same namespace as the project's name that contains it.
Upvotes: 0 |
2018/03/21 | 973 | 3,923 | <issue_start>username_0: I am trying to get started with mobx-react-forms but am running into an error:
```
Unhandled Rejection (Error): Validation Error: Invalid Field Instance
-> const form = new Form();
```
Here is my code:
```
import React, { Component } from 'react';
import { inject, observer } from 'mobx-react';
import { Form as BaseForm } from 'mobx-react-form';
import validatorjs from 'validatorjs';
class Form extends BaseForm{
plugins() {
return { dvr: validatorjs };
}
setup(){
return {
fields: [{
name: 'amount',
label: 'Amount',
}]
}
}
hooks(){
return {
onSuccess(form) {
alert('Form is valid! Send the request here.');
// get field values
console.log('Form Values!', form.values());
},
onError(form) {
alert('Form has errors!');
// get all form errors
console.log('All form errors', form.errors());
}
};
}
}
@observer
class PaymentForm extends Component {
render() {
const form = new Form();
return (
Payment Form
------------
{form.$('amount').label}
{form.$('amount').error}
)
}
}
export default PaymentForm;
```<issue_comment>username_1: Two different errors here prevent correct loading of localized resources.
1. You set incorrect `ResourcesPath` in `AddLocalization()` call. Since your resx files are placed in `Resources/Controllers` directory, you should replace call
```
services.AddLocalization(s => s.ResourcesPath = "Resources");
```
with
```
services.AddLocalization(s => s.ResourcesPath = "Resources/Controllers");
```
2. You use incorrect name for resx files. Check [Resource file naming](https://learn.microsoft.com/en-us/aspnet/core/fundamentals/localization#resource-file-naming) section in [Globalization and localization in ASP.NET Core](https://learn.microsoft.com/en-us/aspnet/core/fundamentals/localization) article:
>
> Resources are named for the full type name of their class minus the
> assembly name. For example, a French resource in a project whose main
> assembly is `LocalizationWebsite.Web.dll` for the class
> `LocalizationWebsite.Web.Startup` would be named *Startup.fr.resx*. A
> resource for the class
> `LocalizationWebsite.Web.Controllers.HomeController` would be named
> *Controllers.HomeController.fr.resx*. If your targeted class's namespace
> isn't the same as the assembly name you will need the full type name.
> For example, in the sample project a resource for the type
> `ExtraNamespace.Tools` would be named *ExtraNamespace.Tools.fr.resx*.
>
>
>
So if your assembly is called `TestMvcApplication` and `HomeController` resides in namespace `TestMvcApplication.Controllers`, then you should call your resx files in the following way:
```
Resources
|
|--Controllers
| Controllers.HomeController.de.resx
| Controllers.HomeController.en.resx
| Controllers.HomeController.resx
```
I believe after you make above changes to your project, localization will work ok.
Upvotes: 2 <issue_comment>username_2: Just add the package **Microsoft.Extensions.Localization**
After do that, it works.
The ResourcePath is optional, and if you leave it null, the resource files organization style is the same that classic Asp.Net application (in the same location of the target classes).
Upvotes: 5 [selected_answer]<issue_comment>username_3: I fixed it, by making sure that my folder structure reflected the namespace I used when working with resource designer files !!
[](https://i.stack.imgur.com/4bTZT.png)
Upvotes: 1 <issue_comment>username_4: Make sure the "**SharedResources.cs**" has the same namespace as the project's name that contains it.
Upvotes: 0 |
2018/03/21 | 1,030 | 3,565 | <issue_start>username_0: [Chrome invalidate a valid manifest](https://i.stack.imgur.com/Jv6cW.png)
While importing valid unpacked extension.
(manifest confirmed by <https://manifest-validator.appspot.com/>)
this error appears, and the extension do not get loaded.
>
> Only one of 'browser\_action', 'page\_action', and 'app' can be
> specified.
>
>
>
Manifest does not contain duplication of the neither mentioned in the error.
**manifest.json**
```
{
"applications": {
"gecko": {
"id": "<EMAIL>",
"strict_min_version": "42.0"
}
},
"background": {
"scripts": ["jquery.js", "my-background.js"],
"page": "my-background.html"
},
"browser_action": {
"default_icon": "userInterface/browser_action_button/airplay_icon.svg",
"default_title": "LightDictionary",
"default_popup": "userInterface/browser_action_button/popup.html"
},
"commands": {
"_execute_browser_action": {
"suggested_key": {
"default": "Ctrl+Shift+Y"
}
}
},
"content_security_policy": "script-src 'self' https://example.com; object-src 'self'",
"content_scripts": [
{
"exclude_matches": ["*://developer.mozilla.org/*"],
"matches": ["*://*.mozilla.org/*"],
"js": ["borderify.js"]
}
],
"default_locale": "en",
"description": "none",
"icons": {
"48": "userInterface/browser_action_button/airplay_icon.svg",
"96": "userInterface/browser_action_button/airplay_icon.svg"
},
"manifest_version": 2,
"name": "LightDictionary",
"page_action": {
"default_icon": {
"19": "userInterface/browser_action_button/airplay_icon.svg",
"38": "userInterface/browser_action_button/airplay_icon.svg"
},
"default_title": "LightDictionary",
"default_popup": "userInterface/browser_action_button/popup.html"
},
"permissions": ["webNavigation"],
"version": "0.1",
"web_accessible_resources": ["images/my-image.png"]
}
```<issue_comment>username_1: I have talked to Mozilla MDN maintainer on their IRC channel and I came to the conclusion, that the so called "cross-browser extension" manifest.json published on:
* <https://developer.mozilla.org/en-US/Add-ons/WebExtensions/manifest.json>
Was incompatible with Chrome, and only works on the Firefox browser due to:
Chrome has strict check on the manifest, and the way Chrome handles manifest differs from the way Firefox does it. Chrome is slow to adopt technologies that are supported by the Firefox.
So, the only way to make the manifest cross-browser compatible: is
1. to take [quick manifest.json example suggested by MDN](https://developer.mozilla.org/en-US/Add-ons/WebExtensions/manifest.json)
2. load it into Chrome ([chrome://extensions](http://chrome://extensions), Turn on developer mode, Load Unpacked)
3. check the errors and remove what is asked by the chrome.
Things to keep in mind:
* Chrome does not support .svg format icons, this leads to not showing specified icon. While Firefox does support it, it is suggestive to not use svg for a cross browser extension.
Futher comment to read: <https://hacks.mozilla.org/2017/06/cross-browser-extensions-available-now-in-firefox/#comment-21268>
Upvotes: 1 <issue_comment>username_2: You can only have one of the properties that it specifies in the error.
>
> Only one of 'browser\_action', 'page\_action', and 'app' can be specified.
>
>
>
You have both a **browser\_action** property & a **page\_action** property in your json object.
Remove one of these from the object to fix it.
Upvotes: 0 |
2018/03/21 | 557 | 1,922 | <issue_start>username_0: I'm working on a refactor where I have to change my classes from hibernate xml to annotations (JPA annotations preferred but hibernate okay). One of my entities uses the hibernate idbag feature combined with the element feature and a join table.
hibernate xml:
```
table\_a\_seq
table\_a\_b\_seq
```
The class looks like this:
```
public class EntityA {
Long id;
Collection entityBIds;
}
```
Schema:
```
table_a
table_a_id number(13,0)
table_b
table_b_id number(13,0)
table_a_b
table_a_b_id number(13,0)
fk_table_a_id number(13,0)
fk_table_b_id number(13,0)
```
How would I use annotations to implement this? Note that this is a pretty complex system and I want to minimize the changes that I have to make aside from the annotations.<issue_comment>username_1: What you want is a one-to-many relation with a join table.
See <https://en.wikibooks.org/wiki/Java_Persistence/OneToMany> §1.5 *Join Table*
Your table model is not practical with both tables `table_a` and `table_a_b`. But changing it would be costly I guess.
I hope your java model is more flexible...
Define an `EntityB` with only a `Long id`. In `EntityA` have a `Collection entityBs` and adapt the implementation of your *getEntityBIds / setEntityBIds / addEntityBId* / etc. to access it and convert as required. Of course hoping the field `entityBIds` was private and thus not used outside the entity.
Upvotes: 1 <issue_comment>username_2: I discovered the answer!
```
@ElementCollection(targetClass = Long.class, fetch = FetchType.EAGER)
@CollectionTable(name = "TABLE_A_B", joinColumns = @JoinColumn(name="fk_table_b_id"))
@Column(name = "fk_table_a_id")
private Collection entityBIds;
```
Of course doing this is less than elegant, but I needed the simplest way to convert to annotations without breaking the complex code surrounding the entities.
Upvotes: 1 [selected_answer] |
2018/03/21 | 514 | 1,938 | <issue_start>username_0: I have a `docker-compose` file for running several containers including Logstash. I have mapped the mounted `sincedb` as in the snippet:
```
logstash:
build:
context: logstash/
volumes:
- ./tmp/logstash/sincedb:/usr/share/logstash/sincedb
```
The Logstash container has some permission errors, in particular with accessing sincedb as shown in the error snippet below:
```
Error: Permission denied - /usr/share/logstash/sincedb/sincedb
Exception: Errno::EACCES
```
I tried to execute within the container chmod but I get some the errors below:
```
bash-4.2$ chmod o+wx /usr/share/logstash/sincedb/
chmod: changing permissions of ‘/usr/share/logstash/sincedb/’: Operation not permitted
```
Is there a way to overcome this permission issue ?<issue_comment>username_1: you could try to use docker named volumes. Currently you are just mounting host folder to the container
Sample docker-compose.yml with named volumes
```
version: '3.5'
services:
logstash:
build:
context: logstash/
volumes:
- logstash_data:/usr/share/logstash/sincedb
volumes:
logstash_data: # optionaly define more parameters
```
then you can see the named volume with command
```
docker volume ls
```
Upvotes: 0 <issue_comment>username_2: I was able to resolve the issue by setting proper permissions to the host folder, which maps to the folder in the docker container. By issuing the command `chmod -R 757` against the folder, access was possible. However, this was a temporary measure, I later discovered the correct permissions can be set in the `docker-compose.yml` file by appending `:rw` at the end of the specific line, like so :
```
volumes:
- logstash_data:/usr/share/logstash/sincedb:rw
```
This effectively maintained the permissions across rebuilds, which is a limitation of the earlier mentioned method (additional to the security implications)
Upvotes: 4 [selected_answer] |
2018/03/21 | 631 | 2,489 | <issue_start>username_0: I'm wanting to implement an Observable / Subject with 3 particular attributes
1. Remember last emitted value and be able to surface it via a getter (BehaviorSubject)
2. Only emit when value changes
3. It must have a strong type such that the getter is known to be available by a consumer (aka. BehaviorSubject.getValue())
I'm thinking of just extending BehaviorSubject but want to make sure I'm not introducing any potential gotchas based on my novice understanding.
```
export class DistinctUntilChangedBehaviorSubject extends BehaviorSubject {
constructor(
initialValue: T,
private \_distinctKeySelector?: (value: T) => TValue,
private \_comparer?: \_Comparer
) {
super(initialValue);
}
public subscribe() {
// I'm particularly interested in knowing if this has any gotchas.
// Mostly things like creating subscriptions that don't get disposed as expected.
return super.distinctUntilChanged(
this.\_distinctKeySelector,
this.\_comparer
).subscribe.apply(this, arguments);
}
}
```
So 2 questions:
1. Does this seem like a reasonable approach / are there any gotchas here?
2. Is there another preferred way of doing this?<issue_comment>username_1: I do not know really why, but I tend to prefer composition over extension.
So I would do something along these lines
```
import {BehaviorSubject} from 'rxjs';
export class BehaviourSubjectAugmented {
bs: BehaviorSubject;
constructor(initialValue: T, private comparer: (p: T, q: T) => boolean) {
this.bs = new BehaviorSubject(initialValue);
}
getValue() {
return this.bs.getValue();
}
asObservable() {
return this.bs.asObservable()
.distinctUntilChanged(this.comparer);
}
complete() {
return this.bs.complete();
}
next(value: T) {
return this.bs.next(value);
}
}
```
Upvotes: 2 <issue_comment>username_2: Turns out my original idea causes a call stack exceeded issue. I'm assuming that distinctUntilChanged must call subscribe internally thus causing infinite recursion.
I ended up finding a simpler way to get what I needed by simply adding a method to an ISubject instance.
```
function distinctUntilChangedBehaviorSubject(
initialValue: number
): ISubject & { getValue(): number } {
const observer = new BehaviorSubject(initialValue);
const observable = observer.distinctUntilChanged();
const subject: ISubject = Subject.create(
observer,
observable
);
return Object.assign(
subject,
{
getValue: () => observer.getValue()
}
);
}
```
Upvotes: 1 |
2018/03/21 | 478 | 1,627 | <issue_start>username_0: Suppose I have a file like this:
```
EN;05;UK;55;EN;66;US;87;US;89;EN;66;UK;87;
```
I want remove all the `EN` occurrence, so the final string should be:
```
UK;55;US;87;US;89;UK;87;
```
I can remove the `EN` using `string.Replace("EN", "")` but how to remove also the number?<issue_comment>username_1: I do not know really why, but I tend to prefer composition over extension.
So I would do something along these lines
```
import {BehaviorSubject} from 'rxjs';
export class BehaviourSubjectAugmented {
bs: BehaviorSubject;
constructor(initialValue: T, private comparer: (p: T, q: T) => boolean) {
this.bs = new BehaviorSubject(initialValue);
}
getValue() {
return this.bs.getValue();
}
asObservable() {
return this.bs.asObservable()
.distinctUntilChanged(this.comparer);
}
complete() {
return this.bs.complete();
}
next(value: T) {
return this.bs.next(value);
}
}
```
Upvotes: 2 <issue_comment>username_2: Turns out my original idea causes a call stack exceeded issue. I'm assuming that distinctUntilChanged must call subscribe internally thus causing infinite recursion.
I ended up finding a simpler way to get what I needed by simply adding a method to an ISubject instance.
```
function distinctUntilChangedBehaviorSubject(
initialValue: number
): ISubject & { getValue(): number } {
const observer = new BehaviorSubject(initialValue);
const observable = observer.distinctUntilChanged();
const subject: ISubject = Subject.create(
observer,
observable
);
return Object.assign(
subject,
{
getValue: () => observer.getValue()
}
);
}
```
Upvotes: 1 |
2018/03/21 | 1,757 | 4,466 | <issue_start>username_0: I have a data frame in H2O (called df1.hex) and I am trying to add new columns to this data frame using h2o.cbind. I am using h2o 3.18.0.4.
The code that I have shown below is only a simplified version of what I am trying to do. In reality, I am adding new columns to the df1.hex data frame based on various conditions. The bottomline is that I would like to be able to use 'h2o.cbind' whenever I need to append new columns to df1.hex. So, I would have to call h2o.cbind multiple times during my program. The real dataset that I am operating on is too big for me to do all this in R and then export it into h2o.
Consider the code below:
```
# Let's load H2O and start up an H2O cluster
library(h2o)
h2o.init()
# Initialize a data frame with a column 'y'
df1 = data.frame(y=c('A', 'B', 'C'))
df1.hex = as.h2o(df1)
print(df1.hex)
# Need to append additional columns to df1.hex named x1, x2 etc...
for (i in 1:2) {
df2 = data.frame(x=c(1*i, 2*i, 3*i))
colnames(df2) = c(paste("x", i, sep='')) # x1, x2 etc...
df2.hex = as.h2o(df2)
print(paste("Iteration: ", i, ": Adding df2.hex...", sep=''))
print(df2.hex)
df1.hex = h2o.cbind(df1.hex, df2.hex) # Append x(i) to df1.hex data frame
}
print("The final dataset df1.hex: ")
print(df1.hex)
h2o.shutdown(prompt=FALSE)
```
The output is as follows:
```
> print(df1.hex)
y
1 A
2 B
3 C
[1] "Iteration: 1: Adding df2.hex..."
x1
1 1
2 2
3 3
[1] "Iteration: 2: Adding df2.hex..."
x2
1 2
2 4
3 6
[3 rows x 1 column]
[1] "The final dataset df1.hex: "
> print(df1.hex)
y x2 x20
1 A 2 2
2 B 4 4
3 C 6 6
```
Even though I was appending two new columns named x1 and x2, the final version of df1.hex contains two columns named x2 and x20. Why did that happen?
Also, the x1 column completely disappeared. I only see the column x2 appearing twice.
How can I fix my code to name my columns x1 and x2 and have the correct values in those columns as I originally intended?
Thanks.
Karthik.<issue_comment>username_1: It could be that the the `cbind` is only binding the last run element, basically, resulting in two 'x2' columns and by making it unique the column names could have changed to 'x20'. One approach would be to assign it to a `list` and then `cbind`.
```
#initialize a `list` of length 2
lst <- vector("list", 2)
for (i in 1:2) {
#create the h2o dataset and assign it to each list element
lst[[i]] <- as.h2o(data.frame(x=c(1*i, 2*i, 3*i)))
#change the column names of the h2o dataset
names(lst[[i]]) <- paste0("x", i)
}
#do the cbind outside the loop
do.call(h2o.cbind, c(df1.hex, lst))
# y x1 x2
#1 A 1 2
#2 B 2 4
#3 C 3 6
#[3 rows x 3 columns]
```
---
Or this can be done in pipe (`%>%`) with `tidyverse` function
```
library(tidyverse)
map(1:2, ~ tibble(x = (1:3) * .x) %>%
set_names(., paste0("x", .x)) %>%
as.h2o) %>%
append(df1.hex, .) %>%
do.call(h2o.cbind, .)
# y x1 x2
#1 A 1 2
#2 B 2 4
#3 C 3 6
#[3 rows x 3 columns]
```
Upvotes: 2 <issue_comment>username_2: Ok. I was able to resolve the issue.
I just replaced the following code in my original post:
```
df1.hex = h2o.cbind(df1.hex, df2.hex) # Append x(i) to df1.hex data frame
```
with this...
```
x.hex = h2o.cbind(df1.hex, df2.hex)
df1.hex = h2o.assign(x.hex, 'df1')
```
I am not sure, but it may have something to do with how h2o stores data internally.
The full code is shown below:
```
# Let's load H2O and start up an H2O cluster
library(h2o)
h2o.init()
# Initialize a data frame with a column 'y'
df1 = data.frame(y=c('A', 'B', 'C'))
df1.hex = as.h2o(df1)
print(df1.hex)
# Need to append additional columns to df1.hex named x1, x2 etc...
for (i in 1:2) {
df2 = data.frame(x=c(1*i, 2*i, 3*i))
colnames(df2) = c(paste("x", i, sep='')) # x1, x2 etc...
df2.hex = as.h2o(df2)
print(paste("Iteration: ", i, ": Adding df2.hex...", sep=''))
print(df2.hex)
# df1.hex = h2o.cbind(df1.hex, df2.hex) # Append x(i) to df1.hex data frame
x.hex = h2o.cbind(df1.hex, df2.hex)
df1.hex = h2o.assign(x.hex, 'df1')
}
print("The final dataset df1.hex: ")
print(df1.hex)
h2o.shutdown(prompt=FALSE)
```
Now, I do get the desired output:
```
> print("The final dataset df1.hex: ")
[1] "The final dataset df1.hex: "
> print(df1.hex)
y x1 x2
1 A 1 2
2 B 2 4
3 C 3 6
[3 rows x 3 columns]
>
```
Cheers!
username_2
Upvotes: 2 |
2018/03/21 | 2,044 | 7,419 | <issue_start>username_0: I understand one always needs a message for committing the changes but when and why do I need to also tag a commit? Let's say I made some changes and I commit using
```
git add -A
git commit -m "add feature 1"
```
and now
```
git tag -a -m "includes feature 1" v0.1
```
The question is when does this make sense.<issue_comment>username_1: It would make sense to specify a tag when you release a version of the software you're producing.
You could then do:
`git tag -a v1.0 -m "Release Version 1.0"`
Like my comment mentioned, you do not have to tag after every commit, like you mention in your post, you can also create lightweight tags, if you don't want to include a message. This would look like:
`git tag v1.0`
Hope this helps.
Upvotes: 5 [selected_answer]<issue_comment>username_2: First, let's define the difference between a *commit*, a *branch name* and a *tag name*. Then I'll outsource the rest of this to the existing question and excellent answer at [What is the difference between an annotated and unannotated tag?](https://stackoverflow.com/q/11514075/1256452) Note that a *lightweight tag* (un-annotated) does not take a message; only an *annotated* tag takes a message.
### Defining a commit
A *commit* is an *object* in a Git repository. Git repository objects are mostly-permanent and completely read-only, and are consistency-checked so that they are incorruptible. There are four kinds of objects: commits (which we'll describe in detail in a moment), *trees*, *blobs*, and *annotated tag objects*. All four kinds are identified by hash IDs.
A *blob* is, very roughly, where Git stores your file's contents: if your `README` file says `I am a readme file`, the string `I am a readme file` is stored as a *blob*. A *tree* object is, slightly oversimplified, the thing that stores the file's *name* along with the blob hash ID of the file's contents. Hence, in order for commit `1234567...` to store your `README` file, Git has a tree that says "file `README` has contents from blob `c3c2a8983de728ffcf8f0ccaad014349925f23e6`".1
In any case, each commit stores one *tree* hash ID, which is the saved snapshot of your source files2 at the time you made the commit. It also stores the hash ID of the *parent* commit(s) of this commit, so that Git can work backwards through the history to find all commits. It stores your name and email address and a timestamp, as the person who made the commit and when.3
After these required items—tree, parent(s), author, and committer—Git adds your *log message*, which is entirely arbitrary as far as Git itself is concerned. You don't have to supply any log message at all, and if you do supply one, it does not have to make sense. It will, however, be *shown* to you, and to other people, when they explore the history—the commits—stored in the repository.
### Commits form chains, which is history
Because each commit records its earlier *parent* commit, the set of commits forms a chain that can be read backwards:
```
A <-B <-C <--master
```
Git can start with a *name*, like the branch name `master`, to get the hash ID of commit `C` (whatever its real hash ID may be: I use `C` here as shorthand instead of the actual hash). This hash ID is unique to commit `C`: no other commit anywhere has that same hash ID.4 Commit `C` itself stores the hash ID of commit `B`, so from `C` we can find `B`. `B` stores the hash ID of commit `A`, so from `B` we can find `A`.
This particular repository has only three commits: `A` is the *root* commit, the first one we ever made, so it has no parent, and that lets us stop following history.
### Branch and tag names find commits
But this also tells us what a branch name is and does: **A *branch name* is a name that identifies a commit, by the commit's hash ID. A *tag name* does the same thing.** So now we might wonder: what's the difference? When should we use a branch name, and when should we use a tag name?
Aside from the issue of name spaces,6 the main difference is that Git will let us get "on" a branch name, using `git checkout`. Once we are on such a branch, creating a *new* commit has a side effect: it *changes the hash ID associated with the branch name*. If we check out `master` and make a *new commit*, the parent ID stored in the new commit is the ID of `C`. Whatever the *new* commit's hash ID is, as computed by Git, that new hash ID goes into the name `master`, so that we now have:
```
A--B--C--D <-- master
```
The name `master` now *points to* new commit `D`, which points back to `C`, which points back to `B`, and so on. The branch has grown, and the *branch name* points to the *latest* commit. Each commit points back to a previous one.
You cannot get "on" a tag like this. The *tag name* `v1.0`, once it's set to point to commit `C`, continues to point to commit `C`, forever. So this is the main difference between a branch name and a tag name in Git: **A branch name changes over time, and even does this automatically when you make commits. A tag name should never7 change.**
---
1`c3c2a8983de728ffcf8f0ccaad014349925f23e6` is the hash ID of the content that reads `I am a readme file`. You can find this by running the following shell command:
```
$ echo 'I am a readme file' | git hash-object -t blob --stdin
c3c2a8983de728ffcf8f0ccaad014349925f23e6
```
Note that any blob object anywhere in any Git repository in the universe has this hash ID if it has this content! The content must consist of just those 19 bytes, including the terminating newline (with no carriage-return).
2More precisely, it's the saved *index*, aka *staging area* aka *cache*, as written out by `git write-tree`.
3Git stores these twice, once as the *author* of the commit and again as the *committer*. The two become different if you are, e.g., taking someone else's commit that they emailed you, and inserting it into a repository: then the *author* is the one who sent you the commit, but you are the *committer*. There are other situations in which the two become different. For the most part, nobody cares much, but run `git log --pretty=fuller` to see both.
4Hash IDs *can* repeat in *different* repositories, but only if the two repositories are never brought together. To make sure that commit hash IDs are unique, Git includes those time-stamps. This ensures even if you make the *same* commit you made before, with the same tree and same parent and same log message, if you make it at a different *time*, it's a different *commit*. (If you make it at the *same* time, then it's really exactly the same, and why do you care if you make it once or make it twice?5)
5There is a potential reason to care, but it's quite obscure and mostly irrelevant.
6Technically, a branch name is a *reference* that starts with `refs/heads/`, e.g., `refs/heads/master` is full spelling of the `master` branch. A tag name is a reference that starts with `refs/tags/`, such as `refs/tags/v1.0`. These are the reference *name spaces*, which include `refs/remotes/`, `refs/notes/`, `refs/replace/`, and `refs/stash` as well.
7Well, hardly ever. Some people really want a "floating tag". You *can* do this in Git, if you are careful and know what you are doing. There's no real point, though: if you want a name that moves, use a branch name. That's what they do. If you want a name that doesn't move, use a tag name. That's what *they* do.
Upvotes: 5 |
2018/03/21 | 537 | 2,132 | <issue_start>username_0: I am working over a Virtual Machine using Microsoft Azure, and I installed an app into the IIS, however the when I want to replace the files I've transferred via FTP (From my Local computer to a VM folder) the IIS does not refresh the changes. These are the steps I've run:
1. Site -- Add new website
2. Fill up all the required fields
3. Start the app
but what I can see in the browser is old application, I tried to modified the code but the changes never displayed in the screen.
Note: it is a web service which I try to modified.
Note: I've tried, iisreset, stop the web site, re-start it, re-start the server, deleting the web site, re-create the web site but nothing works.
I am using 4.5.0 in my web app, in [Web.config].
Windows Server 2016.
IIS 10.
Is there something which I am doing incorrectly?<issue_comment>username_1: Check to be absolutely sure that you are overwriting the original files. I have seen some ftp clients show a very small status window that can make it easy to assume the files are transferring due to the flood of messages streaming by when in reality there are permissions issues preventing you from overwriting files. Expand the logging window for whatever client you're using so that you can confirm for certain that your files are actually transmitting. If they are, maybe you're dropping them in the wrong folder.
Upvotes: 0 <issue_comment>username_2: The issue was that there is something in VS2015 solution which does not allow to refresh the web service once the new files are updated via FTP. What I did was to install the <https://learn.microsoft.com/en-us/visualstudio/debugger/remote-debugging-aspnet-on-a-remote-iis-7-5-computer> and configure the Visual Studio to Deploy the web service to IIS in the Virtual Machine. This was the link which helped a lot.
<https://learn.microsoft.com/en-us/aspnet/web-forms/overview/deployment/visual-studio-web-deployment/deploying-to-iis>
**RECOMMENDATION**
DO NOT deploy a web site or service using FTP, it is better to set the ports in the VM and run the deployment from VS properly.
Upvotes: 2 [selected_answer] |
2018/03/21 | 394 | 1,626 | <issue_start>username_0: The boost test library supports various logging options, for example: `--log_level=all` or `log_level=test_suite`.
And the `ctest` test driver program has the option `--output-on-failure` which nicely outputs the messages from *boost test*.
Is it possible to enable the above output for *passed* tests too?<issue_comment>username_1: Check to be absolutely sure that you are overwriting the original files. I have seen some ftp clients show a very small status window that can make it easy to assume the files are transferring due to the flood of messages streaming by when in reality there are permissions issues preventing you from overwriting files. Expand the logging window for whatever client you're using so that you can confirm for certain that your files are actually transmitting. If they are, maybe you're dropping them in the wrong folder.
Upvotes: 0 <issue_comment>username_2: The issue was that there is something in VS2015 solution which does not allow to refresh the web service once the new files are updated via FTP. What I did was to install the <https://learn.microsoft.com/en-us/visualstudio/debugger/remote-debugging-aspnet-on-a-remote-iis-7-5-computer> and configure the Visual Studio to Deploy the web service to IIS in the Virtual Machine. This was the link which helped a lot.
<https://learn.microsoft.com/en-us/aspnet/web-forms/overview/deployment/visual-studio-web-deployment/deploying-to-iis>
**RECOMMENDATION**
DO NOT deploy a web site or service using FTP, it is better to set the ports in the VM and run the deployment from VS properly.
Upvotes: 2 [selected_answer] |
2018/03/21 | 735 | 2,396 | <issue_start>username_0: I have a dataframe (table), that includes frequency counts (Freq) of 2 levels (F, I) of a categorical variable (Fert).
>
> table[1:10]
>
>
>
```
FemID Sperm Week Fert Freq
1: 269 High 1 F 4
2: 269 High 1 I 5
3: 273 High 1 F 6
4: 274 High 1 I 1
5: 275 High 1 I 1
6: 276 High 1 I 1
7: 278 Low 1 I 1
8: 280 Low 1 I 1
9: 281 Low 1 I 1
10: 282 Low 1 I 5
```
I would like to convert this to a dataframe in which the two levels of Fert (I and F) are separate variables for each value of FemID, with 0 for missing counts of a level, like so:
```
FemID Sperm Week Fert Infert
1: 269 High 1 4 5
2: 273 High 1 6 0
3: 274 High 1 1 0
4: 275 High 1 1 0
5: 276 High 1 1 0
```
Thoughts or suggestions? I feel like a loop is required, but I'm not sure how to go about setting it up for this. Perhaps there are two parts, one that creates the two new variables and one that fills in the 0's?<issue_comment>username_1: Check to be absolutely sure that you are overwriting the original files. I have seen some ftp clients show a very small status window that can make it easy to assume the files are transferring due to the flood of messages streaming by when in reality there are permissions issues preventing you from overwriting files. Expand the logging window for whatever client you're using so that you can confirm for certain that your files are actually transmitting. If they are, maybe you're dropping them in the wrong folder.
Upvotes: 0 <issue_comment>username_2: The issue was that there is something in VS2015 solution which does not allow to refresh the web service once the new files are updated via FTP. What I did was to install the <https://learn.microsoft.com/en-us/visualstudio/debugger/remote-debugging-aspnet-on-a-remote-iis-7-5-computer> and configure the Visual Studio to Deploy the web service to IIS in the Virtual Machine. This was the link which helped a lot.
<https://learn.microsoft.com/en-us/aspnet/web-forms/overview/deployment/visual-studio-web-deployment/deploying-to-iis>
**RECOMMENDATION**
DO NOT deploy a web site or service using FTP, it is better to set the ports in the VM and run the deployment from VS properly.
Upvotes: 2 [selected_answer] |
2018/03/21 | 717 | 2,414 | <issue_start>username_0: I'd like to use a Method defined in the Mongoose Model after saving the retrieved Object to a Session. Its not working though. Is it normal that these methods get lost after storing it to the session?
### Calling Method from Mongoose Model works fine:
```
Puppies.findOne({_id:123}).then(puppy => puppy.bark()) // WOOF WOOF
```
### Storing Model in Session and calling method fails:
```
// First Request
Puppies.findOne({_id:123}).then(puppy => {
req.session.puppy = puppy
})
// Second Request somewhere else in the app
app.use(function(req,res,next){
req.session.puppy.bark() // req.session.puppy.bark is not a function
})
```<issue_comment>username_1: I've got the exact issue, but I believe what happens is that when you're storing the variable in session, it's being toObject()'d, causing it to become a simple JavaScript object, instead of remaining as an instance of Model. I've used `Model.hydrate` as a means of recreating this Model instance.
```
app.use(function(req,res,next){
let puppyModel = mongoose.model("puppy");
let puppy = puppyModel.hydrate(req.session.puppy);
puppy.bark() // Awooo
});
```
This essentially is creating a new Model and then filling it with all the relevant information so it acts a clone.
Because it is needing all the relevant information to make an update (including \_id if you have it), I believe you may need to extend the toObject function to return getters/virtuals:
```
puppySchema.set('toObject', { getters: true, virtuals: true });
```
Else, when it attempts to save, and it's missing the \_id field, it won't be able to save it.
I do hope someone else can provide a nicer method of doing this and/or explain why when storing it it *has* to be converted to an object and can't remain as an instance of Model.
Upvotes: 1 <issue_comment>username_2: I think what username_1 said was correct. Finally worked around it by just using [mongoose statics](http://mongoosejs.com/docs/guide#statics):
### puppy.model.js
```
schema.statics.bark(puppy) {
console.log(puppy.sound)
}
```
### Storing Model in Session and getting desired effect via static:
```
// First Request, storing Puppy in Session
Puppy.findOne({_id:123}).then(puppy => {
req.session.puppy = puppy
})
// Second Request somewhere else in the app
app.use(function(req,res,next){
Puppy.bark(req.session.puppy) // WOOF WOOF
})
```
Upvotes: 0 |
2018/03/21 | 775 | 2,571 | <issue_start>username_0: Below is my JSON
```
[{ "id": "1",
"name" : "rob",
"Lastname":"Xyz"
},
{ "id": "2",
"name" : "xyz",
"Lastname":"abc"
}]
```
I have a form where user will enter his first name and last name, what i am trying to achieve here is to check and see if user input info is available in JSON. if yes update with user information else add new object using .push()<issue_comment>username_1: Maybe this will help you:
```
export class Test {
// Objects array
myObjects = [{ "id": "1",
"name" : "rob",
"Lastname":"Xyz"
},
{ "id": "2",
"name" : "xyz",
"Lastname":"abc"
}]
// Method responsible for finding an object who has the name passed in as a parameter
getIndexByName(name: string): number{
let index: number;
this.myObjects.forEach(object => {
if(object.name == name){ // Do your filtering here
index = this.myObjects.indexOf(object);
}
})
return index;
}
updateObject(obj: any){
let index = this.getIndexByName(obj.name);
if(index){
this.myObjects.splice(index,1,obj); // This is the usual function that I use when I find myself needing to 'update' something in an array
}
}
}
```
You can use it like this:
```
let user = {"id": "2", "name" : "xyz", "Lastname":"yjk"};
this.updateObject(user);
```
Upvotes: 2 [selected_answer]<issue_comment>username_2: ```
interface User {
id: string;
name: string;
Lastname: string;
}
class UsersCollection {
private id = 15;
constructor(private _users: Array = []) {
}
public create(name: string, lastName: string): User {
let user = {
id: this.id++,
name: name,
Lastname: lastName
};
this.\_users.push(user);
return user;
}
public searchUser(name: string, lastName: string): User | null {
return this.\_users.find((user) => {
return user.name == name && user.Lastname == lastName;
})
}
public getOrCreate(name: string, lastName: string) {
let user = this.searchUser(name, lastName);
if (user) {
return user;
}
return this.create(name, lastName);
}
public size(){
return this.\_users.length;
}
}
let users = new UsersCollection([{
'id': '1',
'name': 'rob',
'Lastname': 'Xyz'
},
{
'id': '2',
'name': 'xyz',
'Lastname': 'abc'
}]);
// Existing user
console.log(users.getOrCreate('rob','Xyz'));
console.log(users.size() == 2);
// Add new one
console.log(users.getOrCreate('rob','123'));
console.log(users.size() == 3);
```
Upvotes: 0 |
2018/03/21 | 2,599 | 7,836 | <issue_start>username_0: There is no overflow property for parent elements. The parent element has a set height. The navigation bar simply won't be sticky no matter what I try. It doesn't work with JavaScript either. I must be missing something. Below is the code. Somebody please tell me what I did wrong:
```css
/* Body */
body{
margin: 0;
position: relative;
height: 100%;
}
/* Header */
.header{
font-family: sans-serif;
font-weight: 900;
background-color: #04042b;
color: #de215a;
text-align: center;
font-size: 200%;
width: 100%;
}
.header h1{
margin: 0;
line-height: 1;
padding-top: 4%;
padding-bottom: 4%;
}
/* Navigation Bar */
.navbar{
background-color: #000005;
position: sticky;
top: 0;
overflow: hidden;
}
.navbar a{
float: left;
color: #FFFFE0;
text-align: center;
text-decoration: none;
font-size: 130%;
padding: 2%;
}
.navbar a:hover{
background-color:#E6E6FA ;
color: black;
}
.navbar a.active{
background-color: #4682B4;
color: white;
}
article img{
height: 100%
}
```
```html
Stuff
Stuff
=====
Home
Projects
News
About
Forum
Contact
Funding&FAQ
### Sticky Navigation Example
The navbar will stick to the top when you reach its scroll
position.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera
fabulas ut quo. Atqui causae gloriatur ius te, id agam omnis
evertitur eum. Affert laboramus repudiandae nec et. Inciderint
efficiantur his ad. Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
```<issue_comment>username_1: Try moving the styles you have for `.navbar` to the parent `nav` root level element (so it sticks to the parent `body`).
```
nav {
background-color: #000005;
position: sticky;
top: 0;
overflow: hidden;
}
```
```css
/* Body */
body{
margin: 0;
position: relative;
height: 100%;
}
/* Header */
.header{
font-family: sans-serif;
font-weight: 900;
background-color: #04042b;
color: #de215a;
text-align: center;
font-size: 200%;
width: 100%;
}
.header h1{
margin: 0;
line-height: 1;
padding-top: 4%;
padding-bottom: 4%;
}
/* Navigation Bar */
nav{
background-color: #000005;
position: sticky;
top: 0;
overflow: hidden;
}
.navbar a{
float: left;
color: #FFFFE0;
text-align: center;
text-decoration: none;
font-size: 130%;
padding: 2%;
}
.navbar a:hover{
background-color:#E6E6FA ;
color: black;
}
.navbar a.active{
background-color: #4682B4;
color: white;
}
article img{
height: 100%
}
```
```html
Stuff
Stuff
=====
Home
Projects
News
About
Forum
Contact
Funding&FAQ
### Sticky Navigation Example
The navbar will stick to the top when you reach its scroll
position.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera
fabulas ut quo. Atqui causae gloriatur ius te, id agam omnis
evertitur eum. Affert laboramus repudiandae nec et. Inciderint
efficiantur his ad. Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
Some text to enable scrolling.. Lorem ipsum dolor sit amet, illum
definitiones no quo, maluisset concludaturque et eum, altera fabulas
ut quo. Atqui causae gloriatur ius te, id agam omnis evertitur eum.
Affert laboramus repudiandae nec et. Inciderint efficiantur his ad.
Eum no molestiae voluptatibus.
```
Upvotes: 2 <issue_comment>username_2: Position `sticky` depends on the parent container. Move `nav` inside your `navbar`.
```
Stuff
=====
Home
Projects
...
```
Upvotes: 2 <issue_comment>username_3: If I understand well, when you are scrolling you want header and stays on top.
You should add to `.header` position fixed and `.navbar` position: fixed;
top: 160px;. Finally to `article` you should add the summary of height from `.header` and `.navbar`. In your case 232px.
```
/* Header */
.header{
font-family: sans-serif;
font-weight: 900;
background-color: #04042b;
color: #de215a;
text-align: center;
font-size: 200%;
width: 100%;
position:fixed;
top:0;
}
/* Navigation Bar */
.navbar{
background-color: #000005;
position: fixed;
top: 160px;
overflow: hidden;
width:100%;
}
article {
padding-top:232px;
}
```
Upvotes: 0 |
2018/03/21 | 1,005 | 3,550 | <issue_start>username_0: This is a method in a class I have created called Movie.
I changed the code to this now, so it should only return a value when it leaves the for loop, but it is still giving the wrong answer.
```
public int compareTo(Movie other){
String a = this.name.toLowerCase();
String b = other.name.toLowerCase();
int shorter = 0;
if (a.length()>b.length())
shorter = b.length();
else
shorter = a.length();
int diff = 2;
while (diff !=0){
for (int i = 0; i < shorter; i++) {
//a0)
diff = -1;
else if (diff == 0 && a.length()b.length())
diff = -1;
else
diff = 0;
}
}
return diff;}
```
The directions say: Write a compareTo: with the following signature public int compareTo(Movie other). Returns 0 if this movie's name is lexicographically equal to other movie's name; -1 If this movie's name is less than the other movie's name lexicographically; 1 If this movie's name is greater than the other movie's name lexicographically.<issue_comment>username_1: Your implementation is flawed.
Consider two titles: `Gone With the Wind` and `Go West!`. These share the same first two letters, so `diff` will be 0 for those two. Once we hit the third character, 'n' and ' ', is where we need to do the comparison.
To do this, we iterate until we find a letter that's different and then stop:
```
int diff;
for(int i = 0; i < shorterWord.length(); i++) {
diff = shorterWord.charAt(i) - longerWord.charAt(i);
if(diff != 0) {
break;
}
}
return diff;
```
But wait! What if all of the letters in the shorter word are in the longer word as well? Like, what if your titles are `Saw` and `Saw II` -- what then?
If `diff` is still 0 after exiting the loop, we know that either the two words are the same, or we ran into this situation, so we can add a quick check:
```
if(!shorterWord.equals(longerWord)) {
// The words are not the same, so handle this how you will
}
```
(I didn't implement this because I'm not sure how you actually want to handle it. Probably always set `diff = 1` or `diff = -1` depending on which word is longer, would make sense.)
The reason yours isn't working is two fold. You have a structure like this:
```
diff = 2;
while(diff != 0) {
for (int i = 0; i < shorter; i++) {
// set diff
}
}
```
The problem is that the while loop will only evaluate the `diff` condition once it exits the for-loop. This makes no sense -- diff will be set the comparison of the last letter of the shorter word only when you exit the for-loop.
Upvotes: 0 <issue_comment>username_2: this can be further optimized for speed (which would get it similar to Java's String.compareTo()), but...
(feel free to downvote this answer 'cause it sounds like a 'homework problem' question)
```
public int compareTo(Movie other) {
String tn = this.name;
String on = other.name;
// quick check - equality - safe to leave out if equals() is cheating
// but guard against division by zero below.
if (tn.equals(on)) {
return 0;
}
for (int i = 0; i < Math.min(tn.length(), on.length()); i++) {
int tc = (int)tn.charAt(i);
int oc = (int)on.charAt(i);
if (tc != oc) {
// return larger char...
return toOne(tc - oc);
}
}
// ...or return larger string
return toOne(tn.length() - on.length());
}
// just converts any +-N to respective +-1
private int toOne(int c) {
return c / Math.abs(c);
}
```
Upvotes: -1 |
2018/03/21 | 1,659 | 4,582 | <issue_start>username_0: I'm trying to select values that have broken the record high or low values. I'm comparing to a DataFrame that has the high and low values for each day as two separate columns. The end goal is to graph a scatterplot of the (date, value) that are the new record values against a line graph of the old record values (using matplotlib.)
Here's an example dataset.
```
new_data = {'Date': ['1/1/2015', '1/2/2015', '1/3/2015', '1/4/2015', '1/5/2015'],
'new_low': [10, 25, 24, 21, 15],
'new_high': [35, 37, 38, 55, 47]}
record_data = {'Day': ['1/1', '1/2', '1/3', '1/4', '1/5'],
'record_low': [12, 28, 21, 25, 15],
'record_high': [30, 40, 36, 57, 46]}
df_new = pd.DataFrame(new_data)
df_new.set_index('Date', inplace=True)
df_record = pd.DataFrame(record_data)
df_record.set_index('Day', inplace=True)
```
So it would look like this
```
new_low new_high (new_data)
Date
1/1/2015 10 35
1/2/2015 25 37
1/3/2015 24 38
1/4/2015 21 55
1/5/2015 15 47
record_low record_high (record_data)
Date
1/1 12 30
1/2 28 40
1/3 21 36
1/4 25 57
1/5 15 46
```
I want the result to look along this line.
```
Date Record Value
0 1/1/2015 10
1 1/2/2015 25
2 1/4/2015 21
3 1/1/2015 35
4 1/3/2015 38
5 1/5/2015 47
```
Since I need to use the result with matplotlib to make a scatterplot, I will need a list of x-values and y-values to enter. My example result was a dataframe that I made, but it doesn't need to be. I could use two separate arrays or even a list of tuples that I could un`zip` into lists of x and y.
I feel like there should be some simple/elegant way to do this with mapping, but I'm not experienced enough to find it and I haven't been able to find an answer elsewhere.
I'm also having some issues with how to enter the record data with just a month and day as a datestamp, so I've just set them all to the same year. It works for my visualization, but I would rather not do that to the data.<issue_comment>username_1: There's probably a better answer out there, but you could merge the two DataFrames together, and then determine if the df\_new value is a record by comparing the columns.
I wouldn't set the dates as an index, just keep them as a column. It makes it a little bit nicer. If they are your indices, then do this first:
```
import pandas as pd
df_new['Date'] = df_new.index
df_record['Day'] = df_record.index
```
Then:
```
df_new['day'] = pd.to_datetime(df_new.Date).dt.day
df_new['month'] = pd.to_datetime(df_new.Date).dt.month
df_record['day'] = pd.to_datetime(df_record.Day, format='%M/%d').dt.day
df_record['month'] = pd.to_datetime(df_record.Day, format='%M/%d').dt.month
```
Merge the DataFrames and drop the columns we no longer need:
```
df = df_new.merge(df_record, on=['month', 'day']).drop(columns=['month', 'day', 'Day'])
```
Then check if a value is a record. If so, create a new DataFrame with the record values:
```
record_low = df.X_x < df.X_y
record_high = df.Y_x > df.Y_y
pd.DataFrame({'Date': df[record_low]['Date'].tolist() + df[record_high]['Date'].tolist(),
'Record Value': df[record_low]['X_x'].tolist() + df[record_high]['Y_x'].tolist()})
Date Record Value
0 1/1/2015 10
1 1/2/2015 25
2 1/4/2015 21
3 1/1/2015 35
4 1/3/2015 38
5 1/5/2015 47
```
Upvotes: 0 <issue_comment>username_2: Edited to address comments
This is a solution assuming data is read in from a file and avoids merging the two dfs to compare them (note the reindex step).
```
# # skip the header and ensure the same naming of the columns
# # df_record has Date in format mon/day
df_record = pd.read_csv('record_data.tsv', sep='\t',
skiprows=1, names=['Date','X', 'Y'], index_col = 'Date')
# #df_new has Date in format 'month/day/year'
df_new = pd.read_csv('new_data.tsv', sep='\t', skiprows=1, names=['Date','X', 'Y'])
df_new = df_new.set_index(df_new['Date'].apply(lambda x: "/".join(x.split('/')[:-1]))).drop('Date', axis = 1)
df_new = df_new.reindex(df_record.index)
# compare the columns
tdfX = (df_new['X'] < df_record['X'])
tdfY = (df_new['Y'] > df_record['Y'])
# get the data that is a new record
df_plot = pd.concat([df_new.loc[tdfY[tdfY].index, 'Y'], df_new.loc[tdfX[tdfX].index, 'X']]).to_frame('Record').reset_index()
```
Upvotes: 2 [selected_answer] |
2018/03/21 | 2,584 | 9,809 | <issue_start>username_0: My client is send a post application/json that has a accessToken in the json file. How do I verify the user and get the user id?
Here is my api.php file:
```
php
use Illuminate\Http\Request;
/* API Routes */
Route::get('/user', function (Request $request) {
return $request-user();
})->middleware('auth:api');
Route::post('/client', function (Request $request) {
$data = $request->json()->all();
return $data;
})->middleware('auth:api');
```
In the $data array i can see the accessToken.
```
[user] => Array
(
[accessToken] => iOiJSUzI1NiIsImp0aSI6I...
)
```
It send back HTTP 401 : Unauthorized
Any help would be great. Thanks<issue_comment>username_1: Is your client able to send the access token in the header?
```
Authorization: Bearer
```
If so, you can use the `auth:api` that you already have in your code, here you can see how to call it with an example using Guzzle:
<https://laravel.com/docs/5.6/passport#protecting-routes>
```
$response = $client->request('GET', '/api/user', [
'headers' => [
'Accept' => 'application/json',
'Authorization' => 'Bearer '.$accessToken,
],
]);
```
Otherwise, and against my advice because it's more standard and secure to use the heather Authorization, you may have two (not very appropriate) alternatives:
1. You can create a middleware to check if there is no Authorization in the heather, but in the body, and if so, move it to the heather before the `auth:api` middleware (but be sure to run this middleware first).
2. Remove the `auth:api` middleware and authenticate either creating your own middleware or in the controller itself.
Documentation about Laravel's middleware:
<https://laravel.com/docs/5.6/middleware>
Here you can find more info about Laravel's out of the box authentication:
<https://laravel.com/docs/5.6/authentication>
Note: Be sure that the documentation version and your Laravel's version match.
More info about Barer Authentication:
<https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication>
This is an example (not tested) of how the middleware could work:
```
php
namespace App\Http\Middleware;
use Closure;
use Illuminate\Support\Facades\Auth;
class BodyAuthenticate
{
/**
* Handle an incoming request.
*
* @param \Illuminate\Http\Request $request
* @param \Closure $next
* @param string|null $guard
*
* @return mixed
*/
public function handle($request, Closure $next, $guard = null)
{
if (!Auth::guard($guard)-check()
&& null !== ($token = $request->json('access.user.accessToken', null))) {
$request->headers->add([
'Authorization' => 'Bearer ' . $token,
]);
}
return $next($request);
}
}
```
You can also have a look at the Passport Middleware code here:
<https://github.com/laravel/passport/blob/5.0/src/Http/Middleware/CreateFreshApiToken.php>
You have different ways to register your middleware:
<https://laravel.com/docs/5.6/middleware#registering-middleware>
So you have to edit this file:
<https://github.com/laravel/laravel/blob/master/app/Http/Kernel.php>
Depending on your API needs, you may do something like:
```
protected $routeMiddleware = [
...
'auth.body' => \App\Http\Middleware\BodyAuthenticate::class,
];
```
And then you can add this middleware in your route:
```
Route::post('/client', function (Request $request) {
$data = $request->json()->all();
return $data;
})->middleware('auth.body', 'auth:api');
```
Or make something more global (if all API calls require token auth) adding the middlewares to the api middleware group (in within `App\Http\Kernel` Class too):
```
'api' => [
'throttle:60,1',
'bindings',
'auth.body',
'auth:api',
],
```
Then, if the token sent match with any token in your database, the auth singleton will return the user who owns it. You can get that user like:
<https://laravel.com/docs/5.6/authentication#retrieving-the-authenticated-user>
```
use Illuminate\Support\Facades\Auth;
// Get the currently authenticated user...
$user = Auth::user();
// Get the currently authenticated user's ID...
$id = Auth::id();
```
Keep in mind that the client has to send the token in every single call (is not a session).
So you can protect the routes:
1. As the doc suggest (in the route or the controller):
<https://laravel.com/docs/5.6/authentication#protecting-routes>
```
Route::get('client', function () {
// Only authenticated users may enter...
})->middleware('auth.body', 'auth:api');
```
Or in the controller:
```
public function __construct()
{
$this->middleware('auth.body', 'auth:api');
}
```
2. With a group route:
```
Route::middleware(['auth.body', 'auth:api'])->group(function () {
Route::get('client', function () {
// Uses first & second Middleware
});
Route::post('client', function (Request $request) {
// Uses first & second Middleware
$data = $request->json()->all();
return $data;
});
Route::get('client/user/profile', function () {
// Uses first & second Middleware
});
});
```
3. If you edited `App\Http\Kernel` to add the middlewares globally (you don't need a group):
```
Route::get('client', function () {
// Uses first & second Middleware
});
Route::post('client', function (Request $request) {
// Uses first & second Middleware
$data = $request->json()->all();
return $data;
});
Route::get('client/user/profile', function () {
// Uses first & second Middleware
});
```
Tip: you can use the groups to add, not only `middleware` but also other interesting parameters such as controllers `namespace`, `domain`, naming alias prefix with `as`, or URI `path` prefix.
Example:
```
Route::group([
'namespace' => 'Client', // Loads from App\Http\Controllers\Client
'domain' => 'client.domain.com',
'as' => 'client::', // Check with `php artisan route:list --name=client`
'middleware' => ['auth.body', 'auth:api'],
'prefix' => 'api',
], function () {
// Uses first & second Middleware
// GET https://client.domain.com/api/
Route::get('/', function () {
// ...
});
// Uses first & second Middleware
// GET https://client.domain.com/api/profile
Route::get('client/profile', function () {
$user = Auth::user();
// ...
});
// Uses first & second Middleware
// POST https://client.domain.com/api/profile
Route::post('client/profile', function (Request $request) {
// ...
});
// Uses first & second Middleware
// App\Http\Controllers\Client\PhotoController
// @link: https://laravel.com/docs/5.6/controllers#resource-controllers
// GET /photos index photos.index
// GET /photos/create create photos.create
// POST /photos store photos.store
// GET /photos/{photo} show photos.show
// GET /photos/{photo}/edit edit photos.edit
// PUT/PATCH /photos/{photo} update photos.update
// DELETE /photos/{photo} destroy photos.destroy
Route::resource('photos', 'PhotoController');
//...
});
```
Notice that, if you edited `App\Http\Kernel` to add the middlewares globally, you don't need the `middleware` in the group array.
Upvotes: 2 <issue_comment>username_2: Thanks To Gonxalo and fwartner See:<https://laracasts.com/discuss/channels/laravel/laravel-53-with-passport-get-current-user-with-personal-access-token>
If the Access Token is Not in your header then do what Gonxalo says above.
Then add fwartner suggestion to get your user id.
The Alexa Skill doesn't send the accessToken in the header it send it in the body. I hope this helps someone down the road.
Here is my api.php:
```
php
use Illuminate\Http\Request;
/*
| Here is where you can register API routes for your application. These
| routes are loaded by the RouteServiceProvider within a group which
| is assigned the "api" middleware group. Enjoy building your API!
|
*/
Route::get('/user', function (Request $request) {
return $request-user();
})->middleware('auth:api');
Route::post('/alexa', function (Request $request) {
$data = $request->json()->all();
$jsonArray = json_decode(json_encode($data),true);
$user = auth()->guard('api')->user();
$userid =$user->id;
$JsonOut = GetJsonResponse();
return $JsonOut;
})->middleware('auth.body', 'auth:api');
function GetJsonResponse(){
$NextNumber = 1;
$EndSession = "true";
$SpeakPhrase = "Alexa Success.";
$ReturnValue= '
{
"version": "1.0",
"sessionAttributes": {
"countActionList": {
"read": true,
"category": true,
"currentTask": "none",
"currentStep": '.$NextNumber.'
}
},
"response": {
"outputSpeech": {
"type": "PlainText",
"text": "' . $SpeakPhrase . '"
},
"reprompt": {
"outputSpeech": {
"type": "PlainText",
"text": "Say next item to continue."
}
},
"shouldEndSession": ' . $EndSession . '
}
}';
return $ReturnValue;
}
```
Also, I added a header to the BodyAuthenticate.php middleware
$request->headers->add(['Accept' => 'application/json' ]);
$request->headers->add(['Authorization' => 'Bearer ' . $token, ]);
Upvotes: 0 |
2018/03/21 | 2,621 | 9,999 | <issue_start>username_0: I have a function using the library ngx-translate which returns an observable. So to use it I subscribe to the method and get the value I need. After that I want to do a return but I would like that the return happen after the value has been gotten.
This is my code:
```
test(textePage: string[], params?: Object){
let textToSpeech: string;
this._translateService.get(textePage).subscribe(res => {
textToSpeech = (Object.keys(res).map(e=>res[e])).join('');
});
return textToSpeech;
}
```
I would like something like:
```
test(textePage: string[], params?: Object){
let textToSpeech: string;
this._translateService.get(textePage).subscribe(res => {
textToSpeech = (Object.keys(res).map(e=>res[e])).join('');
return textToSpeech;
});
}
```
But I know it is not the best way.
Thanks a lot.<issue_comment>username_1: Is your client able to send the access token in the header?
```
Authorization: Bearer
```
If so, you can use the `auth:api` that you already have in your code, here you can see how to call it with an example using Guzzle:
<https://laravel.com/docs/5.6/passport#protecting-routes>
```
$response = $client->request('GET', '/api/user', [
'headers' => [
'Accept' => 'application/json',
'Authorization' => 'Bearer '.$accessToken,
],
]);
```
Otherwise, and against my advice because it's more standard and secure to use the heather Authorization, you may have two (not very appropriate) alternatives:
1. You can create a middleware to check if there is no Authorization in the heather, but in the body, and if so, move it to the heather before the `auth:api` middleware (but be sure to run this middleware first).
2. Remove the `auth:api` middleware and authenticate either creating your own middleware or in the controller itself.
Documentation about Laravel's middleware:
<https://laravel.com/docs/5.6/middleware>
Here you can find more info about Laravel's out of the box authentication:
<https://laravel.com/docs/5.6/authentication>
Note: Be sure that the documentation version and your Laravel's version match.
More info about Barer Authentication:
<https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication>
This is an example (not tested) of how the middleware could work:
```
php
namespace App\Http\Middleware;
use Closure;
use Illuminate\Support\Facades\Auth;
class BodyAuthenticate
{
/**
* Handle an incoming request.
*
* @param \Illuminate\Http\Request $request
* @param \Closure $next
* @param string|null $guard
*
* @return mixed
*/
public function handle($request, Closure $next, $guard = null)
{
if (!Auth::guard($guard)-check()
&& null !== ($token = $request->json('access.user.accessToken', null))) {
$request->headers->add([
'Authorization' => 'Bearer ' . $token,
]);
}
return $next($request);
}
}
```
You can also have a look at the Passport Middleware code here:
<https://github.com/laravel/passport/blob/5.0/src/Http/Middleware/CreateFreshApiToken.php>
You have different ways to register your middleware:
<https://laravel.com/docs/5.6/middleware#registering-middleware>
So you have to edit this file:
<https://github.com/laravel/laravel/blob/master/app/Http/Kernel.php>
Depending on your API needs, you may do something like:
```
protected $routeMiddleware = [
...
'auth.body' => \App\Http\Middleware\BodyAuthenticate::class,
];
```
And then you can add this middleware in your route:
```
Route::post('/client', function (Request $request) {
$data = $request->json()->all();
return $data;
})->middleware('auth.body', 'auth:api');
```
Or make something more global (if all API calls require token auth) adding the middlewares to the api middleware group (in within `App\Http\Kernel` Class too):
```
'api' => [
'throttle:60,1',
'bindings',
'auth.body',
'auth:api',
],
```
Then, if the token sent match with any token in your database, the auth singleton will return the user who owns it. You can get that user like:
<https://laravel.com/docs/5.6/authentication#retrieving-the-authenticated-user>
```
use Illuminate\Support\Facades\Auth;
// Get the currently authenticated user...
$user = Auth::user();
// Get the currently authenticated user's ID...
$id = Auth::id();
```
Keep in mind that the client has to send the token in every single call (is not a session).
So you can protect the routes:
1. As the doc suggest (in the route or the controller):
<https://laravel.com/docs/5.6/authentication#protecting-routes>
```
Route::get('client', function () {
// Only authenticated users may enter...
})->middleware('auth.body', 'auth:api');
```
Or in the controller:
```
public function __construct()
{
$this->middleware('auth.body', 'auth:api');
}
```
2. With a group route:
```
Route::middleware(['auth.body', 'auth:api'])->group(function () {
Route::get('client', function () {
// Uses first & second Middleware
});
Route::post('client', function (Request $request) {
// Uses first & second Middleware
$data = $request->json()->all();
return $data;
});
Route::get('client/user/profile', function () {
// Uses first & second Middleware
});
});
```
3. If you edited `App\Http\Kernel` to add the middlewares globally (you don't need a group):
```
Route::get('client', function () {
// Uses first & second Middleware
});
Route::post('client', function (Request $request) {
// Uses first & second Middleware
$data = $request->json()->all();
return $data;
});
Route::get('client/user/profile', function () {
// Uses first & second Middleware
});
```
Tip: you can use the groups to add, not only `middleware` but also other interesting parameters such as controllers `namespace`, `domain`, naming alias prefix with `as`, or URI `path` prefix.
Example:
```
Route::group([
'namespace' => 'Client', // Loads from App\Http\Controllers\Client
'domain' => 'client.domain.com',
'as' => 'client::', // Check with `php artisan route:list --name=client`
'middleware' => ['auth.body', 'auth:api'],
'prefix' => 'api',
], function () {
// Uses first & second Middleware
// GET https://client.domain.com/api/
Route::get('/', function () {
// ...
});
// Uses first & second Middleware
// GET https://client.domain.com/api/profile
Route::get('client/profile', function () {
$user = Auth::user();
// ...
});
// Uses first & second Middleware
// POST https://client.domain.com/api/profile
Route::post('client/profile', function (Request $request) {
// ...
});
// Uses first & second Middleware
// App\Http\Controllers\Client\PhotoController
// @link: https://laravel.com/docs/5.6/controllers#resource-controllers
// GET /photos index photos.index
// GET /photos/create create photos.create
// POST /photos store photos.store
// GET /photos/{photo} show photos.show
// GET /photos/{photo}/edit edit photos.edit
// PUT/PATCH /photos/{photo} update photos.update
// DELETE /photos/{photo} destroy photos.destroy
Route::resource('photos', 'PhotoController');
//...
});
```
Notice that, if you edited `App\Http\Kernel` to add the middlewares globally, you don't need the `middleware` in the group array.
Upvotes: 2 <issue_comment>username_2: Thanks To Gonxalo and fwartner See:<https://laracasts.com/discuss/channels/laravel/laravel-53-with-passport-get-current-user-with-personal-access-token>
If the Access Token is Not in your header then do what Gonxalo says above.
Then add fwartner suggestion to get your user id.
The Alexa Skill doesn't send the accessToken in the header it send it in the body. I hope this helps someone down the road.
Here is my api.php:
```
php
use Illuminate\Http\Request;
/*
| Here is where you can register API routes for your application. These
| routes are loaded by the RouteServiceProvider within a group which
| is assigned the "api" middleware group. Enjoy building your API!
|
*/
Route::get('/user', function (Request $request) {
return $request-user();
})->middleware('auth:api');
Route::post('/alexa', function (Request $request) {
$data = $request->json()->all();
$jsonArray = json_decode(json_encode($data),true);
$user = auth()->guard('api')->user();
$userid =$user->id;
$JsonOut = GetJsonResponse();
return $JsonOut;
})->middleware('auth.body', 'auth:api');
function GetJsonResponse(){
$NextNumber = 1;
$EndSession = "true";
$SpeakPhrase = "Alexa Success.";
$ReturnValue= '
{
"version": "1.0",
"sessionAttributes": {
"countActionList": {
"read": true,
"category": true,
"currentTask": "none",
"currentStep": '.$NextNumber.'
}
},
"response": {
"outputSpeech": {
"type": "PlainText",
"text": "' . $SpeakPhrase . '"
},
"reprompt": {
"outputSpeech": {
"type": "PlainText",
"text": "Say next item to continue."
}
},
"shouldEndSession": ' . $EndSession . '
}
}';
return $ReturnValue;
}
```
Also, I added a header to the BodyAuthenticate.php middleware
$request->headers->add(['Accept' => 'application/json' ]);
$request->headers->add(['Authorization' => 'Bearer ' . $token, ]);
Upvotes: 0 |
2018/03/21 | 915 | 3,126 | <issue_start>username_0: Trying to test `axios` calls and trying the `moxios` package.
`"axios": "^0.16.2",`
`"moxios": "^0.4.0",`
Found here: <https://github.com/axios/moxios>
Following there example, but my test errors out on the `moxios.install()` line:
```
import axios from 'axios'
import moxios from 'moxios'
import sinon from 'sinon'
import { equal } from 'assert'
describe('mocking axios requests', function () {
describe('across entire suite', function () {
beforeEach(function () {
// import and pass your custom axios instance to this method
moxios.install()
})
```
My actual test
--------------
```
import axios from 'axios';
import moxios from 'moxios';
import sinon from 'sinon';
import { equal } from 'assert';
const akamaiData = {
name: 'akamai'
};
describe('mocking axios requests', () => {
describe('across entire suite', () => {
beforeEach(() => {
// import and pass your custom axios instance to this method
moxios.install();
});
afterEach(() => {
// import and pass your custom axios instance to this method
moxios.uninstall();
});
it('should stub requests', (done) => {
moxios.stubRequest('/akamai', {
status: 200,
response: {
name: 'akamai'
}
});
// const onFulfilled = sinon.spy();
// axios.get('/akamai').then(onFulfilled);
//
// moxios.wait(() => {
// equal(onFulfilled.getCall(0).args[0], akamaiData);
// done();
// });
});
});
});
```
[](https://i.stack.imgur.com/xKsWw.png)
I did find this closed issue here, however the fix "passing `axios` into the `moxios.install(axios)` function did not work"
<https://github.com/axios/moxios/issues/15><issue_comment>username_1: I was having the same problem. It turned out I had an `axios.js` file in my `__mocks__` folder (leftover from a different attempt at mocking axios). That mock file took over the actual axios code -- but moxios needs the *real* axios code to function properly. When I removed the `axios.js` file from the `__mocks__` folder, moxios worked as advertised.
Upvotes: 3 <issue_comment>username_2: Turns out I did not need `moxios`, in my test I did not want to make an actual API call... just needed to make sure that function was called. Fixed it with a test function.
```
import { makeRequest } from 'utils/services';
import { getImages } from './akamai';
global.console = { error: jest.fn() };
jest.mock('utils/services', () => ({
makeRequest: jest.fn(() => Promise.resolve({ data: { foo: 'bar' } }))
}));
describe('Akamai getImages', () => {
it('should make a request when we get images', () => {
getImages();
expect(makeRequest).toHaveBeenCalledWith('/akamai', 'GET');
});
});
```
Upvotes: -1 [selected_answer]<issue_comment>username_3: For me it was about ES module interop. Try one of the two workarounds:
* Try change `import moxios from 'moxios'` to `import * as moxios from 'moxios'`.
* Set `esModuleInterop` to `true` in `tsconfig.json`.
Upvotes: 1 |
2018/03/21 | 535 | 1,849 | <issue_start>username_0: Current default global logging level is set to **INFO** in *JRE\_HOME/lib/logging.properties* file.
I run the following from the command line to over-ride and set the level to FINE:
```
mvn test -Dtest=ABC -Djava.util.logging.ConsoleHandler.level=FINE
```
And, I use the below in my code:
```
logger.fine("Logging works for fine");
```
The above message doesn't get printed in the output.
If I change it to the below line, it prints successfully.
```
logger.info("Logging works for fine");
```
What am I missing?<issue_comment>username_1: I was having the same problem. It turned out I had an `axios.js` file in my `__mocks__` folder (leftover from a different attempt at mocking axios). That mock file took over the actual axios code -- but moxios needs the *real* axios code to function properly. When I removed the `axios.js` file from the `__mocks__` folder, moxios worked as advertised.
Upvotes: 3 <issue_comment>username_2: Turns out I did not need `moxios`, in my test I did not want to make an actual API call... just needed to make sure that function was called. Fixed it with a test function.
```
import { makeRequest } from 'utils/services';
import { getImages } from './akamai';
global.console = { error: jest.fn() };
jest.mock('utils/services', () => ({
makeRequest: jest.fn(() => Promise.resolve({ data: { foo: 'bar' } }))
}));
describe('Akamai getImages', () => {
it('should make a request when we get images', () => {
getImages();
expect(makeRequest).toHaveBeenCalledWith('/akamai', 'GET');
});
});
```
Upvotes: -1 [selected_answer]<issue_comment>username_3: For me it was about ES module interop. Try one of the two workarounds:
* Try change `import moxios from 'moxios'` to `import * as moxios from 'moxios'`.
* Set `esModuleInterop` to `true` in `tsconfig.json`.
Upvotes: 1 |
2018/03/21 | 927 | 2,678 | <issue_start>username_0: I'm trying to generate a string which looks like this:
```
Nadya's Cell: (415) 123-4567
Jim's Cell: (617) 123-4567
```
where the names and phone numbers vary and need to be interpolated in, and the phone numbers should be aligned. For this example, I've used the following template:
```
name1 = "Nadya"
name2 = "Jim"
phone_number1 = "(415) 123-4567"
phone_number2 = "(617) 123-4567"
string = "{name1}'s Cell: {phone_number1}\n{name2}'s Cell: {phone_number2}".format(**locals())
```
Instead of adding the spaces in manually, I'd like the total width of the string to adapt to the longest of `name1` and `name2`.
So far, all I've been able to come up with following <https://docs.python.org/3.6/library/string.html> is the following:
```
max_length = max(len(name1), len(name2))
# This should use max_length and contain "'s Cell:"
string = "{name1:<7}{phone_number1}\n{name2:<7}{phone_number2}".format(**locals())
print(string)
```
which produces
```
Nadya (415) 123-4567
Jim (617) 123-4567
```
The problem is that the total width, `7`, is still hard-coded into the template, and I don't see how to add the `'s Cell:` after the name because this produces spaces between the name and the apostrophe. Any ideas how to tackle this?<issue_comment>username_1: You can use nested format specifiers, an often overlooked feature:
```
max_length = max(len(name1), len(name2)) + 2
string = """{name1:<{max_length}}{phone_number1}
{name2:<{max_length}}{phone_number2}""".format(**locals())
print(string)
# output:
# Nadya (415) 123-4567
# Jim (617) 123-4567
```
The [documentation](https://docs.python.org/3/library/string.html#format-string-syntax) only mentions them in passing, which probably explains why they're relatively unknown:
>
> A format\_spec field can also include nested replacement fields within
> it. These nested replacement fields may contain a field name,
> conversion flag and format specification, but deeper nesting is not
> allowed. The replacement fields within the format\_spec are substituted
> before the format\_spec string is interpreted. This allows the
> formatting of a value to be dynamically specified.
>
>
>
Upvotes: 3 [selected_answer]<issue_comment>username_2: Here is another solution using f-strings, following <https://www.python.org/dev/peps/pep-0498/#format-specifiers>:
```
name1 += "'s Cell:"
name2 += "'s Cell:"
max_length = max(len(name1), len(name2))
string = f"{name1:{max_length+2}}{phone_number1}\n{name2:{max_length+2}}{phone_number2}"
print(string)
```
which produces
```
Nadya's Cell: (415) 123-4567
Jim's Cell: (617) 123-4567
```
as required.
Upvotes: 0 |
2018/03/21 | 725 | 2,969 | <issue_start>username_0: I'm hoping I'm just missing something simple because I've been looking at this for too long, but I'm stumped.
I have a form with inputs bound to vuejs. I have a group of 2 radio buttons for selecting the "gender", and the binding is working perfectly. If I click on either of the radio buttons, I can see the data change in the vue component inspector.
But I'm trying to change the radio buttons to a Bootstrap 4 button group, and can't seem to get the v-model binding to work. No matter what I try, the gender\_id in my vue data is not getting updated when I click either of the buttons in the button group.
The form input values are being fed in through vue component properties, but for simplicity, my data for the radio buttons/button group would look like this:
```
export default {
data() {
return {
genders: {
1: "Men's",
2: "Women's"
},
gender_id: {
type: Number,
default: null
}
}
}
}
```
Here is the code I have for the radio button version (which is working properly):
```
Gender:
{{ gender }}
```
Here is the button group version that is not properly binding to the gender\_id data in vue.
```
Gender:
{{ gender }}
```
I've been using the following Boostrap 4 documentation to try to get this working.
<https://getbootstrap.com/docs/4.0/components/buttons/#checkbox-and-radio-buttons>
In the documentation for button groups they don't even include the value property of the radio inputs, whereas they do include it in the documentation for form radio buttons.
<https://getbootstrap.com/docs/4.0/components/forms/#checkboxes-and-radios>
Is this for simplicity or do button groups of radio buttons not even return the value of the checked button?
I see other threads stating that buttons groups are not meant to function as radio buttons, but if that's true for BS4, then why would Bootstrap have button groups with radio buttons as they do in their documentation referenced above? If you can't retrieve the checked state, then why not just use a instead of ?
Any ideas as to what I'm doing wrong and/or not understanding correctly?
Thanks so much!<issue_comment>username_1: Nothing is actually wrong your use of `v-model` here.
However: you must add the class `"active"` to the that wraps each radio-button .
See [this fiddle](https://jsfiddle.net/username_1/3e9ax7sy/) for a working example.
Is that what you're after?
Upvotes: 2 <issue_comment>username_2: Thanks so much to @username_1 for his helpful insights.
The issue was related to vue and bootstrap both trying to apply javascript to the buttons in the button group.
To get around this issue, it was as simple as removing `data-toggle="buttons"` from the button group. By removing the data-toggle attribute, the bootstrap js is not applied and vue can manage the button group.
Upvotes: 6 [selected_answer] |
2018/03/21 | 785 | 2,416 | <issue_start>username_0: I have two dictionaries:
```
dict1 = {'cm': {'fill': {'user': {'registration': {'flag': 'f'}}}}}
dict2 = {'cm': {'fill': {'user': {'url': 'www.example.com'}}}}
```
The output I want:
```
dict3 = {'cm': {'fill': {'user':{'registration': {'flag': 'f'}}, {'url': 'www.example.com'}}}
```
Here is what I have tried so far:
```
dict3 = {**dict1, **dict2} # This does not work. It only gives me a `dict1`.
```
The problem is that `dict1` and `dict2` can have many embedded keys.
Any idea how to do this?<issue_comment>username_1: If the structure of your dictionaries is consistent, you can use `collections.defaultdict` with a nested approach.
It's possible to convert the nested `defaultdict` to regular `dict` objects. But this may not be necessary for your use case.
```
from collections import defaultdict
dict1 = {'cm': {'fill': {'user': {'registration': {'flag': 'f'}}}}}
dict2 = {'cm': {'fill': {'user': {'url': 'www.example.com'}}}}
rec_dd = lambda: defaultdict(rec_dd)
d = rec_dd()
for i in [dict1, dict2]:
d['cm']['fill']['user'].update(i['cm']['fill']['user'])
# defaultdict(>,
# {'cm': defaultdict(>,
# {'fill': defaultdict(>,
# {'user': defaultdict(>,
# {'registration': {'flag': 'f'},
# 'url': 'www.example.com'})})})})
```
Upvotes: 1 <issue_comment>username_2: Correct: that merge won't do what you want. You have accumulation to do multiple levels down. I suspect that the easiest way for you to get what you want -- merging at unspecified (arbitrary) depth -- is to write a recursive routine to do the merge you want.
```
def dict_merge(d1, d2):
for key in d1:
if key in d2:
one_merge = dict_merge(d1[key], d2[key])
else:
one_merge = d1[key]
for ... # also pick up keys in d2 that are not in d1.
```
I'll leave it to you whether to handle this logic with set intersection and difference.
Upvotes: 2 <issue_comment>username_3: You can use recursion while `zip`ing the data:
```
dict1 = {'cm': {'fill': {'user': {'registration': {'flag': 'f'}}}}}
dict2 = {'cm': {'fill': {'user': {'url': 'www.example.com'}}}}
def update_dict(d1, d2):
return {a:update_dict(b, d) if a == c and list(b.keys())[0] == list(d.keys())[0] else {**b, **d} for [a, b], [c, d] in zip(d1.items(), d2.items())}
```
Output:
```
{'cm': {'fill': {'user': {'registration': {'flag': 'f'}, 'url': 'www.example.com'}}}}
```
Upvotes: 0 |
2018/03/21 | 922 | 3,070 | <issue_start>username_0: I have a DateType input in the function. I would like to exclude Saturday and Sunday and get the next week day, if the input date falls on the weekend, otherwise it should give the next day's date
Example:
Input: Monday 1/1/2017 output: 1/2/2017 (which is Tuesday)
Input: Saturday 3/4/2017 output: 3/5/2017 (which is Monday)
I have gone through <https://spark.apache.org/docs/2.0.2/api/java/org/apache/spark/sql/functions.html> but I don't see a ready made function, so I think it will need to be created.
So far I have something that is:
```
val nextWeekDate = udf {(startDate: DateType) =>
val day= date_format(startDate,'E'
if(day=='Sat' or day=='Sun'){
nextWeekDate = next_day(startDate,'Mon')
}
else{
nextWeekDate = date_add(startDate, 1)
}
}
```
Need help to get it valid and working.<issue_comment>username_1: Using dates as strings:
```
import java.time.{DayOfWeek, LocalDate}
import java.time.format.DateTimeFormatter
// If that is your format date
object MyFormat {
val formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd")
}
object MainSample {
import MyFormat._
def main(args: Array[String]): Unit = {
import java.sql.Date
import org.apache.spark.sql.types.{DateType, IntegerType}
import spark.implicits._
import org.apache.spark.sql.types.{ StringType, StructField, StructType }
import org.apache.spark.sql.functions._
implicit val spark: SparkSession =
SparkSession
.builder()
.appName("YourApp")
.config("spark.master", "local")
.getOrCreate()
val someData = Seq(
Row(1,"2013-01-30"),
Row(2,"2012-01-01")
)
val schema = List(StructField("id", IntegerType), StructField("date",StringType))
val sourceDF = spark.createDataFrame(spark.sparkContext.parallelize(someData), StructType(schema))
sourceDF.show()
val _udf = udf { (dt: String) =>
// Parse your date, dt is a string
val localDate = LocalDate.parse(dt, formatter)
// Check the week day and add days in each case
val newDate = if ((localDate.getDayOfWeek == DayOfWeek.SATURDAY)) {
localDate.plusDays(2)
} else if (localDate.getDayOfWeek == DayOfWeek.SUNDAY) {
localDate.plusDays(1)
} else {
localDate.plusDays(1)
}
newDate.toString
}
sourceDF.withColumn("NewDate", _udf('date)).show()
}
}
```
Upvotes: 3 [selected_answer]<issue_comment>username_2: Here's a much simpler answer that's defined in [spark-daria](https://github.com/Mrusername_2/spark-daria):
```scala
def nextWeekday(col: Column): Column = {
val d = dayofweek(col)
val friday = lit(6)
val saturday = lit(7)
when(col.isNull, null)
.when(d === friday || d === saturday, next_day(col,"Mon"))
.otherwise(date_add(col, 1))
}
```
You always want to stick with the native Spark functions whenever possible. This [post](https://mungingdata.com/apache-spark/week-end-start-dayofweek-next-day/) explains the derivation of this function in greater detail.
Upvotes: 1 |
2018/03/21 | 6,110 | 19,506 | <issue_start>username_0: I have a Play web app that I am working on and I would like to be able to deploy it as a fat jar using sbt assembly. While running the application in debug i have no problems with guice library being recognized as it is added in my build.sbt file: `libraryDependencies += guice`. I have created my sbt assembly MergeStrategy configuration and ran sbt assembly and successfully compiled the jar file for the project, However when i attempt to run the jar i get the following Exception in console:
```
Oops, cannot start the server.
java.lang.RuntimeException: No application loader is configured. Please
configure an application loader either using the play.application.loader
configuration property, or by depending on a module that configures one. You
can add the Guice support module by adding "libraryDependencies += guice" to
your build.sbt.
at scala.sys.package$.error(package.scala:27)
at play.api.ApplicationLoader$.play$api$ApplicationLoader$$loaderNotFound(ApplicationLoader.scala:44)
at play.api.ApplicationLoader$.apply(ApplicationLoader.scala:70)
at play.core.server.ProdServerStart$.start(ProdServerStart.scala:50)
at play.core.server.ProdServerStart$.main(ProdServerStart.scala:25)
at play.core.server.ProdServerStart.main(ProdServerStart.scala)
```
and here is my build.sbt file:
```
name := """Product-Inventory"""
organization := "com.example"
version := "1.0-SNAPSHOT"
lazy val root = (project in file(".")).enablePlugins(PlayScala)
scalaVersion := "2.12.3"
//libraryDependencies += Defaults.sbtPluginExtra("com.eed3si9n" % "sbt-assembly" % "0.7.4", "0.12.0-M2", "2.9.1")
// https://mvnrepository.com/artifact/net.ruippeixotog/scala-scraper
libraryDependencies += "net.ruippeixotog" %% "scala-scraper" % "2.0.0"
libraryDependencies += guice //GUICE IS ADDED HERE
libraryDependencies += "org.scalatestplus.play" %% "scalatestplus-play" % "3.1.2" % Test
libraryDependencies += "org.scala-lang.modules" %% "scala-async" % "0.9.7"
libraryDependencies += jdbc
libraryDependencies += "postgresql" % "postgresql" % "9.1-901.jdbc4"
// Adds additional packages into Twirl
//TwirlKeys.templateImports += "com.example.controllers._"
// Adds additional packages into conf/routes
// play.sbt.routes.RoutesKeys.routesImport += "com.example.binders._"
mainClass in assembly := Some("play.core.server.ProdServerStart")
fullClasspath in assembly +=
Attributed.blank(PlayKeys.playPackageAssets.value)
assemblyMergeStrategy in assembly := {
case PathList("META-INF", xs @ _*) => MergeStrategy.discard
case referenceOverrides if referenceOverrides.contains("reference-
overrides.conf") => MergeStrategy.concat
case PathList("reference-overrides.conf") => MergeStrategy.concat
case x => {
MergeStrategy.first
}
}
```
Since i have already added guice I am not sure what has happened or how to fix this. could this possibly be due to an incorrect MergeStrategy? Or is it something else? Below I am also including the list of files merged and merge strategies applied in case this is of any help.
```
[info] Merging files...
[warn] Merging 'META-INF\CHANGES' with strategy 'discard'
[warn] Merging 'META-INF\COPYRIGHT.html' with strategy 'discard'
[warn] Merging 'META-INF\DEPENDENCIES' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_commons-codec-1.10.txt' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_commons-io-2.5.txt' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_commons-lang3-3.6.txt' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_commons-logging-1.2.txt' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_guice-4.1.0' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_guice-assistedinject-4.1.0' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_httpclient-4.5.3' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_httpcore-4.4.6' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_httpmime-4.5.3' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_jackson-annotations-2.8.11' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_jackson-core-2.8.11' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_jackson-databind-2.8.11' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_jackson-datatype-jsr310-2.8.11' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_joda-convert-1.2.txt' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_joda-time-2.9.9.txt' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_jsoup-1.10.2' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_jta-1.1.txt' with strategy 'discard'
[warn] Merging 'META-INF\LICENSE_xercesImpl-2.11.0' with strategy 'discard'
[warn] Merging 'META-INF\MANIFEST.MF' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_commons-codec-1.10.txt' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_commons-io-2.5.txt' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_commons-lang3-3.6.txt' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_commons-logging-1.2.txt' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_guice-4.1.0' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_guice-assistedinject-4.1.0' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_httpclient-4.5.3' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_httpcore-4.4.6' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_httpmime-4.5.3' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_jackson-core-2.8.11' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_jackson-databind-2.8.11' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_joda-convert-1.2.txt' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_joda-time-2.9.9.txt' with strategy 'discard'
[warn] Merging 'META-INF\NOTICE_xercesImpl-2.11.0' with strategy 'discard'
[warn] Merging 'META-INF\README_jsoup-1.10.2' with strategy 'discard'
[warn] Merging 'META-INF\maven\cglib\cglib\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\cglib\cglib\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\ch.qos.logback\logback-classic\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\ch.qos.logback\logback-classic\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\ch.qos.logback\logback-core\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\ch.qos.logback\logback-core\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.core\jackson-annotations\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.core\jackson-annotations\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.core\jackson-core\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.core\jackson-core\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.core\jackson-databind\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.core\jackson-databind\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.datatype\jackson-datatype-jdk8\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.datatype\jackson-datatype-jdk8\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.datatype\jackson-datatype-jsr310\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.fasterxml.jackson.datatype\jackson-datatype-jsr310\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.google.errorprone\error_prone_annotations\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.google.errorprone\error_prone_annotations\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.google.guava\guava\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.google.guava\guava\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.google.j2objc\j2objc-annotations\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.google.j2objc\j2objc-annotations\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.googlecode.usc\jdbcdslog\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.googlecode.usc\jdbcdslog\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.jolbox\bonecp\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.jolbox\bonecp\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.zaxxer\HikariCP\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\com.zaxxer\HikariCP\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\commons-codec\commons-codec\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\commons-codec\commons-codec\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\commons-io\commons-io\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\commons-io\commons-io\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\commons-logging\commons-logging\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\commons-logging\commons-logging\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\io.jsonwebtoken\jjwt\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\io.jsonwebtoken\jjwt\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\joda-time\joda-time\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\joda-time\joda-time\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\net.sourceforge.cssparser\cssparser\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\net.sourceforge.cssparser\cssparser\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\net.sourceforge.htmlunit\htmlunit\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\net.sourceforge.htmlunit\htmlunit\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\net.sourceforge.htmlunit\neko-htmlunit\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\net.sourceforge.htmlunit\neko-htmlunit\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.apache.commons\commons-lang3\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.apache.commons\commons-lang3\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.apache.httpcomponents\httpclient\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.apache.httpcomponents\httpclient\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.apache.httpcomponents\httpcore\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.apache.httpcomponents\httpcore\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.apache.httpcomponents\httpmime\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.apache.httpcomponents\httpmime\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.codehaus.mojo\animal-sniffer-annotations\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.codehaus.mojo\animal-sniffer-annotations\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty.websocket\websocket-api\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty.websocket\websocket-api\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty.websocket\websocket-client\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty.websocket\websocket-client\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty.websocket\websocket-common\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty.websocket\websocket-common\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty\jetty-client\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty\jetty-client\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty\jetty-http\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty\jetty-http\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty\jetty-io\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty\jetty-io\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty\jetty-util\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.eclipse.jetty\jetty-util\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.joda\joda-convert\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.joda\joda-convert\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.jsoup\jsoup\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.jsoup\jsoup\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.slf4j\jcl-over-slf4j\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.slf4j\jcl-over-slf4j\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.slf4j\jul-to-slf4j\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.slf4j\jul-to-slf4j\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.slf4j\slf4j-api\pom.properties' with strategy 'discard'
[warn] Merging 'META-INF\maven\org.slf4j\slf4j-api\pom.xml' with strategy 'discard'
[warn] Merging 'META-INF\resources\webjars\product-inventory\1.0-SNAPSHOT\javascripts\main.js' with strategy 'discard'
[warn] Merging 'META-INF\resources\webjars\product-inventory\1.0-SNAPSHOT\stylesheets\bootstrap.min.css' with strategy 'discard'
[warn] Merging 'META-INF\resources\webjars\product-inventory\1.0-SNAPSHOT\stylesheets\main.css' with strategy 'discard'
[warn] Merging 'META-INF\services\com.fasterxml.jackson.core.JsonFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\com.fasterxml.jackson.core.ObjectCodec' with strategy 'discard'
[warn] Merging 'META-INF\services\com.fasterxml.jackson.databind.Module' with strategy 'discard'
[warn] Merging 'META-INF\services\java.sql.Driver' with strategy 'discard'
[warn] Merging 'META-INF\services\javax.servlet.ServletContainerInitializer' with strategy 'discard'
[warn] Merging 'META-INF\services\javax.xml.datatype.DatatypeFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\javax.xml.parsers.DocumentBuilderFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\javax.xml.parsers.SAXParserFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\javax.xml.stream.XMLEventFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\javax.xml.transform.TransformerFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\javax.xml.validation.SchemaFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\javax.xml.xpath.XPathFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\org.apache.commons.logging.LogFactory' with strategy 'discard'
[warn] Merging 'META-INF\services\org.apache.xalan.extensions.bsf.BSFManager' with strategy 'discard'
[warn] Merging 'META-INF\services\org.apache.xml.dtm.DTMManager' with strategy 'discard'
[warn] Merging 'META-INF\services\org.eclipse.jetty.http.HttpFieldPreEncoder' with strategy 'discard'
[warn] Merging 'META-INF\services\org.eclipse.jetty.websocket.api.extensions.Extension' with strategy 'discard'
[warn] Merging 'META-INF\services\org.w3c.dom.DOMImplementationSourceList' with strategy 'discard'
[warn] Merging 'META-INF\services\org.xml.sax.driver' with strategy 'discard'
[warn] Merging 'org\apache\commons\logging\Log.class' with strategy 'first'
[warn] Merging 'org\apache\commons\logging\LogConfigurationException.class' with strategy 'first'
[warn] Merging 'org\apache\commons\logging\LogFactory.class' with strategy 'first'
[warn] Merging 'org\apache\commons\logging\impl\NoOpLog.class' with strategy 'first'
[warn] Merging 'org\apache\commons\logging\impl\SimpleLog$1.class' with strategy 'first'
[warn] Merging 'org\apache\commons\logging\impl\SimpleLog.class' with strategy 'first'
[warn] Merging 'play\reference-overrides.conf' with strategy 'concat'
[warn] Merging 'reference.conf' with strategy 'first'
[warn] Strategy 'concat' was applied to a file
[warn] Strategy 'discard' was applied to 137 files
[warn] Strategy 'first' was applied to 7 files
[info] Assembly up to date: C:\Users\zemcd\Extras\Play_Apps\product-inventory\target\scala-2.12\Product-Inventory-assembly-1.0-SNAPSHOT.jar
[success] Total time: 15 s, completed Mar 21, 2018 11:19:27 AM
```
this line was also included: `[info] Including from cache: play-guice_2.12-2.6.12.jar` and this one as well `[info] Including from cache: guice-4.1.0.jar`. If these two jars are causing conflict how should this be resolved? remove jar? or modify merge strategy? Any other clues to what the problem could be?<issue_comment>username_1: It looks like Play has their own `dist` function that uses the sbt native packager plugin under the hood. I was able to get my program working by following the instructions here: <https://www.playframework.com/documentation/2.7.x/Deploying#Using-the-dist-task>
Upvotes: 0 <issue_comment>username_2: Play [documents](https://www.playframework.com/documentation/2.7.x/Deploying#Using-the-SBT-assembly-plugin) the following merge strategy to be used with `sbt-assembly`:
```
assemblyMergeStrategy in assembly := {
case manifest if manifest.contains("MANIFEST.MF") =>
// We don't need manifest files since sbt-assembly will create
// one with the given settings
MergeStrategy.discard
case referenceOverrides if referenceOverrides.contains("reference-overrides.conf") =>
// Keep the content for all reference-overrides.conf files
MergeStrategy.concat
case x =>
// For all the other files, use the default sbt-assembly merge strategy
val oldStrategy = (assemblyMergeStrategy in assembly).value
oldStrategy(x)
}
```
[play-scala-sbt-assembly-example](https://github.com/mario-galic/play-scala-sbt-assembly-example) is a working example demonstrating the necessary configuration.
Upvotes: 2 <issue_comment>username_3: For Play 2.8 the only solution that worked for me is a variant of the [official solution](https://www.playframework.com/documentation/2.8.x/Deploying#Using-the-sbt-assembly-plugin):
```scala
assembly / assemblyMergeStrategy := {
// otherwise basic settings are missing
// e.g. missing play.server.http.port and play.server.dir
case r if r.startsWith("reference.conf") => MergeStrategy.concat
case manifest if manifest.contains("MANIFEST.MF") =>
// We don't need manifest files since sbt-assembly will create
// one with the given settings
MergeStrategy.discard
case referenceOverrides
if referenceOverrides.contains("reference-overrides.conf") =>
// Keep the content for all reference-overrides.conf files
MergeStrategy.concat
case x => MergeStrategy.first
// leads to "deduplicate: different file contents found in the following" errors:
// case x =>
// // For all the other files, use the default sbt-assembly merge strategy
// val oldStrategy = (assembly / assemblyMergeStrategy).value
// oldStrategy(x)
}
```
Upvotes: 0 |
2018/03/21 | 260 | 1,020 | <issue_start>username_0: For some reason i just can't get an Amazon Aurora DB launched. I haven't launched one before but have read many Amazon help / instruction pages. Launching other Amazon products did work well after some digging. This one just doesn't. Any suggestions?
Error:
Access denied to Performance Insights (Service: AmazonRDS; Status Code: 400; Error Code: InvalidParameterCombination; Request ID: 8ef6c7b9-be54-4bd8-aa87-XXXXXXXX)
<http://prntscr.com/iug951><issue_comment>username_1: Today it works.. selected the same settings as yesterday. All i did different was omit dashes (-) from the database name and other stuff you have to name. If that was the actual cause of the 3h headache yesterday Amazon really sucks it just doesn't tell you that instead of showing a cryptic error message.
Upvotes: 1 <issue_comment>username_2: I just had the same issue with the same error message, restarting the setup process from the start (with a database name without a dash in it), fixed the issue.
Upvotes: 0 |
2018/03/21 | 551 | 1,859 | <issue_start>username_0: In my increaseCount method I have 3 different ways of increasing the count. I thought the first method could be used twice but it doesn't work as it seems to merge the setState in the callback. What's the proper way to use a callback function and why does the arrow notation work? How is prevState.count being defined? It is never set to 0
```js
import React from "react";
import { render } from "react-dom";
const styles = {
fontFamily: "sans-serif",
textAlign: "center"
};
class App extends React.Component {
constructor(props) {
super(props);
this.state = {
count: 0
};
}
increaseCount() {
console.log(this);
this.setState({ count: this.state.count }, function() {
this.setState({ count: this.state.count + 1 });
});
this.setState({ count: this.state.count + 1 });
this.setState(prevState=>({ count: prevState.count + 1 }));
}
render() {
return (
this.increaseCount()}>Increase
{this.state.count}
------------------
);
}
}
render(, document.getElementById("root"));
```
```
this.setState(prevState=>({count: prevState.count + 1}))
```<issue_comment>username_1: ```
this.setState(function(prevState) {
return {count: prevState.count + 1};
});
```
<https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/Arrow_functions>
Upvotes: 2 <issue_comment>username_2: ```
this.setState(function (prevState) {
return {count: prevState.count + 1};
});
```
>
> <https://reactjs.org/docs/react-component.html#setstate>
>
>
>
<https://codepen.io/anon/pen/BrRzgB?editors=1011>
Both prevState and props received by the updater function are guaranteed to be up-to-date. The output of the updater is shallowly merged with prevState.
>
> setState(updater[, callback])
>
>
>
Upvotes: 2 [selected_answer] |
2018/03/21 | 549 | 1,896 | <issue_start>username_0: I'm creating a online shop. I need to have 3 items per row with some space left over. So my idea was a column of 9 with 3 child columns of. Then I have 3 columns left over from the 9 for whaterever else I choose.
My issue is in a few categories I have 4/5 items. So they all appear on 1 line. How can I solved this.
```
@foreach ($items as $item)

@endforeach
```<issue_comment>username_1: You don't have to use all 12 columns. 12 columns is just the maximum that's available. There is a number of ways to achieve the desired spacing between columns when you don't utilize all 12 of the available columns like it is in your case.
Two of the possible options would be to use the `justify-content-around` class or the `justify-content-between` class on the row like so (click "run code snippet" below and expand to full page):
```html
### row justify-content-around
Column 1
Column 2
Column 3
### row justify-content-between
Column 1
Column 2
Column 3
```
As you can see, no unnecessary nesting is needed there.
That's because those classes take advantage of the flexbox properties.
Upvotes: 0 <issue_comment>username_2: Every row in bootstrap is 12 cols. It does not matter if you take you top row only 9 cols. Inside the 9 cols row you will have 12 cols again. Best thing in your case to do is calculate floor 12/<#items>. Then user the value to determine your col size.
Upvotes: 1 <issue_comment>username_3: You can use the [auto-layout columns](http://getbootstrap.com/docs/4.0/layout/grid/#auto-layout-columns) which are **not restricted to 12**.
Use `.col-auto` for columns that size to their content..
```
![]()
![]()
![]()
... to n
```
***or,*** use `.col` for columns that evenly fill across the row..
```
![]()
![]()
... to n
```
Demo: <https://www.codeply.com/go/wqoiLlSUc5>
Upvotes: 2 |
2018/03/21 | 930 | 2,998 | <issue_start>username_0: I am beginning my study in motion, using requestAnimationFrame. I wanted to put a circle on the canvas and then set that circle into motion with a click of a button. I have achieved it, accidentally, with the following code. But I don’t get it. What I was expecting to see with this code was a circle painted on the canvas, then another circle painted on top of that circle when the button was pressed. The first circle would remain on the screen, in stationary position, while the other circle went into motion. Again, I have accidentally achieved what I was going for, but I don’t want to build off this because it seems so wrong. What do I need to do to correct my code so that the circle appears on screen, then is set in motion with the button click?
```
var canvas = document.querySelector('canvas');
var c = canvas.getContext('2d');
// Creates a ball in location (but why does it disappear?)
c.beginPath();
c.lineWidth = 5;
c.arc(145, 100, 75, 0, Math.PI \* 2, true);
c.stroke();
var y = 100;
function ballDrop(){
requestAnimationFrame(ballDrop);
c.clearRect(0, 0, 300, 800);
// Create Ball
c.beginPath();
c.lineWidth = 5;
c.arc(145, y, 75, 0, Math.PI \* 2, true);
c.stroke();
y += 1;
}
```<issue_comment>username_1: Your code performs as expected because:
1. You draw an initial circle on the canvas
2. You click a button which executes `ballDrop`
3. Within `ballDrop` you clear the context draw area, which removes all previous paints. This is why your original circle is gone.
A few notes:
\* You don't need to keep setting `lineWidth` unless you plan on changing it for that `context`
\* You should move `requestAnimationFrame` to the end of your function. This is mostly for clarity, as `requestAnimationFrame` is asynchronous (like `setTimeout`) so functionality won't really be affected.
**Example** <https://jsfiddle.net/m503wa4g/6/>
```
Document
Drop
var canvas = document.querySelector('canvas');
var c = canvas.getContext('2d');
// Creates a ball in location (but why does it disappear?)
c.beginPath();
c.lineWidth = 5;
c.arc(145, 100, 75, 0, Math.PI \* 2, true);
c.stroke();
var y = 100;
function ballDrop() {
c.clearRect(0, 0, 300, 800);
// Create Ball
c.beginPath();
c.arc(145, y, 75, 0, Math.PI \* 2, true);
c.stroke();
y += 1;
requestAnimationFrame(ballDrop);
}
```
Upvotes: 1 <issue_comment>username_2: ```
function ballDrop(){
// start the animation that runs forever
requestAnimationFrame(ballDrop);
// clear the canvas so nothing is on the canvas
// this is why the circle disappears
c.clearRect(0, 0, 300, 800);
// Create first Ball again so it doesn't disappear
c.beginPath();
c.lineWidth = 5;
c.arc(145, 100, 75, 0, Math.PI * 2, true);
c.stroke();
// Create Dropping Ball that is 1 pixel down (y+=1)
c.beginPath();
c.lineWidth = 5;
c.arc(145, y, 75, 0, Math.PI * 2, true);
c.stroke();
y += 1;
}
```
Upvotes: 0 |
2018/03/21 | 845 | 2,366 | <issue_start>username_0: My current json readable format in file is .
`{"delete" : {
"_index" : "production",
"_type" : "listings",
"_id" : "f170321064",
"_version" : 6,
"result" : "deleted",
"_shards" : {
"total" : 1,
"successful" : 1,
"failed" : 0
},
"_seq_no" : 175987,
"_primary_term" : 1,
"status" : 200
}
}`
How to convert like below
```
{"delete" : {"_index" : "production-cire","_type" : "listings","_id" : "424-l-81694-f1703210641700147","_version" : 6,"result" : "deleted","_shards" : {"total" : 1,"successful" : 1,"failed" : 0},"_seq_no" : 175987,"_primary_term" : 1,"status" : 200}}
```<issue_comment>username_1: Your code performs as expected because:
1. You draw an initial circle on the canvas
2. You click a button which executes `ballDrop`
3. Within `ballDrop` you clear the context draw area, which removes all previous paints. This is why your original circle is gone.
A few notes:
\* You don't need to keep setting `lineWidth` unless you plan on changing it for that `context`
\* You should move `requestAnimationFrame` to the end of your function. This is mostly for clarity, as `requestAnimationFrame` is asynchronous (like `setTimeout`) so functionality won't really be affected.
**Example** <https://jsfiddle.net/m503wa4g/6/>
```
Document
Drop
var canvas = document.querySelector('canvas');
var c = canvas.getContext('2d');
// Creates a ball in location (but why does it disappear?)
c.beginPath();
c.lineWidth = 5;
c.arc(145, 100, 75, 0, Math.PI \* 2, true);
c.stroke();
var y = 100;
function ballDrop() {
c.clearRect(0, 0, 300, 800);
// Create Ball
c.beginPath();
c.arc(145, y, 75, 0, Math.PI \* 2, true);
c.stroke();
y += 1;
requestAnimationFrame(ballDrop);
}
```
Upvotes: 1 <issue_comment>username_2: ```
function ballDrop(){
// start the animation that runs forever
requestAnimationFrame(ballDrop);
// clear the canvas so nothing is on the canvas
// this is why the circle disappears
c.clearRect(0, 0, 300, 800);
// Create first Ball again so it doesn't disappear
c.beginPath();
c.lineWidth = 5;
c.arc(145, 100, 75, 0, Math.PI * 2, true);
c.stroke();
// Create Dropping Ball that is 1 pixel down (y+=1)
c.beginPath();
c.lineWidth = 5;
c.arc(145, y, 75, 0, Math.PI * 2, true);
c.stroke();
y += 1;
}
```
Upvotes: 0 |
2018/03/21 | 596 | 1,999 | <issue_start>username_0: I have simple asynchronous application written on `aiohttp`. I need to extend `app` instance on server startup or shutdown, but signals doesn't work at all (the function never executes):
```py
from aiohttp import web
app = web.Application()
async def on_startup(app):
app['key'] = "need to save something here"
app.on_startup.append(on_startup)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
srv = loop.run_until_complete(asyncio.gather(
loop.create_server(app.make_handler(), host='0.0.0.0', port=8000)
))
loop.run_forever()
```
How can I extend the `app` instance via callback? Does somebody have an idea?
P.S. I use stable `aiohttp` version (3.0.9).<issue_comment>username_1: Try the below if there is no reason you should use low-level API like `make_handler()`, it will work with `on_startup` signal .
```
if __name__ == "__main__":
web.run_app(app, host='0.0.0.0', port=8000)
```
`run_app()` will use the `get_event_loop()` internally for the loop it use.
Upvotes: 2 <issue_comment>username_2: I spend a time in search of solution... and I found it! I decide to explore the `web.run_app()` method to understand how it works. So, this method use `AppRunner().setup()` to configure application before it will be running. I'm not sure that it's the best solution, but it works :) Well, the final code looks like the following:
```py
from aiohttp import web
app = web.Application()
async def on_startup(app):
app['key'] = "need to save something here"
app.on_startup.append(on_startup)
# Create an instance of the application runner
runner = web.AppRunner(app, handle_signals=True)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
srv = loop.run_until_complete(asyncio.gather(
runner.setup(), # Configure the application before run the server
loop.create_server(app.make_handler(), host='0.0.0.0', port=8000)
))
loop.run_forever()
```
Upvotes: 2 [selected_answer] |
2018/03/21 | 954 | 3,934 | <issue_start>username_0: I'm trying to implement a simple CQRS/event sourcing proof of concept on top of [Kafka streams](https://kafka.apache.org/documentation/streams/) (as described in <https://www.confluent.io/blog/event-sourcing-using-apache-kafka/>)
I have 4 basic parts:
1. `commands` topic, which uses the aggregate ID as the key for sequential processing of commands per aggregate
2. `events` topic, to which every change in aggregate state are published (again, key is the aggregate ID). This topic has a retention policy of "never delete"
3. A [KTable](https://docs.confluent.io/current/streams/concepts.html#ktable) to reduce aggregate state and save it to a state store
```
events topic stream ->
group to a Ktable by aggregate ID ->
reduce aggregate events to current state ->
materialize as a state store
```
4. commands processor - commands stream, left joined with aggregate state KTable. For each entry in the resulting stream, use a function `(command, state) => events` to produce resulting events and publish them to the `events` topic
The question is - is there a way to make sure I have the latest version of the aggregate in the state store?
I want to reject a command if violates business rules (for example - a command to modify the entity is not valid if the entity was marked as deleted). But if a `DeleteCommand` is published followed by a `ModifyCommand` right after it, the delete command will produce the `DeletedEvent`, but when the `ModifyCommand` is processed, the loaded state from the state store might not reflect that yet and conflicting events will be published.
I don't mind sacrificing command processing throughput, I'd rather get the consistency guarantees (since everything is grouped by the same key and should end up in the same partition)
Hope that was clear :) Any suggestions?<issue_comment>username_1: I don't think Kafka is good for CQRS and Event sourcing yet, *the way you described it*, because it lacks a (simple) way of ensuring protection from concurrent writes. This [article](https://medium.com/serialized-io/apache-kafka-is-not-for-event-sourcing-81735c3cf5c) talks about this in details.
What I mean by *the way you described it* is the fact that you expect a command to generate zero or more events or to fail with an exception; this is the classical CQRS with Event sourcing. Most of the people expect this kind of Architecture.
You could have Event sourcing however in a different style. Your Command handlers could yield events for *every* command that is received (i.e. `DeleteWasAccepted`). Then, an Event handler could *eventually* handle that Event in an Event sourced way (by rebuilding Aggregate's state from its event stream) and emit other Events (i.e. `ItemDeleted` or `ItemDeletionWasRejected`). So, commands are fired-and-forget, sent async, the client does not wait for an immediate response. It waits however for an Event describing the outcome of its command execution.
An important aspect is that the Event handler must process events from the same Aggregate in a serial way (exactly once and in order). This can be implemented using a single Kafka Consumer Group. You can see about this architecture in this [video](https://youtu.be/HLfl2ccS304).
Upvotes: 3 <issue_comment>username_2: A possible solution I came up with is to implement a sort of optimistic locking mechanism:
1. Add an `expectedVersion` field on the commands
2. Use the KTable `Aggregator` to increase the version of the aggregate snapshot for each handled event
3. Reject commands if the `expectedVersion` doesn't match the snapshot's aggregate version
This seems to provide the semantics I'm looking for
Upvotes: 0 <issue_comment>username_3: Please read this article by my colleague Jesper. Kafka is a great product but actually not a good fit at all for event sourcing
<https://medium.com/serialized-io/apache-kafka-is-not-for-event-sourcing-81735c3cf5c>
Upvotes: 1 |
2018/03/21 | 685 | 2,757 | <issue_start>username_0: I've been trying to assign a value to a variable and then use the same variable and value outside of this switch/case statement.
It says that the variable outside of the `switch` statement is not defined locally, and was wondering if it is possible to make this variable global.
Here is my code so far:
```
bool play;
string choice;
string guess;
int intChoice;
int intguess;
do
{
Console.WriteLine("1: New game\n2:quit");
choice = Console.ReadLine();
intChoice = Convert.ToInt32(choice);
switch (intChoice)
{
case 1:
play = true;
break;
case 2:
play = false;
break;
}
Console.WriteLine(play);
```
*(please note that I know that I can use if statements but I want to know how switches and cases work!)*<issue_comment>username_1: The problem will be that play may not be assigned if it falls through the switch statement (eg case 3:). either init the variable to bool play=false at start.. or add a default case to the switch statement
Upvotes: 3 <issue_comment>username_2: The problem is that `play` will not be defined if the input is something other than `1` or `2`. You want to make sure you handle every possible user input if you are going to allow it. What if a user inputs `4`? What will happen? What about `Quit`?
You have a few options. You can change your switch/case to an if/else block, which will be fine if you don't plan on extending the options much:
```
if (intChoice == 1)
play = true;
else
play = false;
```
You can also add a `default` statement to your switch like so:
```
switch (intChoice)
{
case 1:
play = true;
break;
case 2:
play = false;
break;
default:
//Handle invalid inputs
play = false;
break;
}
Console.WriteLine(play);
```
This gives you the option to add more `intChoice`s later without making it hard to extend. There is some repeated code in this simple case.
You can also set `bool play = false;` at the very top of your code so that it is initialized for every case.
Upvotes: 4 [selected_answer]<issue_comment>username_3: The variable has not been initialized properly if it another case besides the 1 and 2 cases. What you need is a default case to initialize the variable in case anything else falls thru the switch. It is also good practice of using switch...case.
Another good practice would be defining the initial value of the variable so an exception does not happen.
Changing your variable scope from local to global (if in this case its scope is local to a greater scope than of the switch case) wont change anything. Since the scope only defines the 'life expectancy' of the variable.
Upvotes: 1 |
2018/03/21 | 424 | 1,291 | <issue_start>username_0: I'm trying to align a line of text to the bottom of the screen with this code.
```css
body,
html {
height: 100%;
}
div.BottomAligned {
width: 100%;
height: 100%;
display: flex;
align-items: flex-end;
justify-content: center;
overflow: hidden;
}
```
```html
Line1
Line2
TextBottom
```
However, the "TextBottom" text is shown *below* the visible portion of the screen, so the height of the Main div is actually bigger than 100% of the window size. Any ideas how to fix this?<issue_comment>username_1: Maybe something like this:
```css
body,
html {
height: 100%;
}
body {
margin:0; /* Don't forget to remove default margin */
}
#Main {
display: flex; /* Main should be the flex container */
flex-direction:column;
}
div.BottomAligned {
margin-top:auto; /* Push items to bottom */
text-align:center;
}
```
```html
Line1
Line2
TextBottom
```
Upvotes: 3 [selected_answer]<issue_comment>username_2: In case you don't want to use flexbox, you can just replace the CSS with the below:
```
body,
html {
margin: 0;
padding: 0;
}
div.BottomAligned {
width: 100%;
display: block;
position:absolute;
bottom:0;
left:0;
}
#Main {
position: relative;
height:100vh;
}
```
Upvotes: 0 |
2018/03/21 | 443 | 1,412 | <issue_start>username_0: I'm looking to accept date input into a script and running into the conundrum of how to differentiate between 040507 // 04052007 and 050407 // 05042007 when user intends April 5th, 2007 (or May 4th, 2007). The US tends to follow the first form, but other countries the second.
I know I can use IP/GPS in some instances, but I'm looking for a method that works offline, maybe from system location/language?
I'm primarily looking for a Windows solution, but surely others will be useful in future/to others.
NB I'm not considering timezone a good option, as different countries in the same timezone can use different conventions.<issue_comment>username_1: Maybe something like this:
```css
body,
html {
height: 100%;
}
body {
margin:0; /* Don't forget to remove default margin */
}
#Main {
display: flex; /* Main should be the flex container */
flex-direction:column;
}
div.BottomAligned {
margin-top:auto; /* Push items to bottom */
text-align:center;
}
```
```html
Line1
Line2
TextBottom
```
Upvotes: 3 [selected_answer]<issue_comment>username_2: In case you don't want to use flexbox, you can just replace the CSS with the below:
```
body,
html {
margin: 0;
padding: 0;
}
div.BottomAligned {
width: 100%;
display: block;
position:absolute;
bottom:0;
left:0;
}
#Main {
position: relative;
height:100vh;
}
```
Upvotes: 0 |
2018/03/21 | 360 | 1,188 | <issue_start>username_0: Given a list of scalar values, how can we split the list into K evenly-sized groups such that the groups have similar distributions? Note that simplicity is strongly favored over efficiency.
I am currently doing:
```
sort values
create K empty groups: group_1, ... group_k
while values is not empty:
for group in groups:
group.add(values.pop())
if values is empty:
break
```<issue_comment>username_1: Maybe something like this:
```css
body,
html {
height: 100%;
}
body {
margin:0; /* Don't forget to remove default margin */
}
#Main {
display: flex; /* Main should be the flex container */
flex-direction:column;
}
div.BottomAligned {
margin-top:auto; /* Push items to bottom */
text-align:center;
}
```
```html
Line1
Line2
TextBottom
```
Upvotes: 3 [selected_answer]<issue_comment>username_2: In case you don't want to use flexbox, you can just replace the CSS with the below:
```
body,
html {
margin: 0;
padding: 0;
}
div.BottomAligned {
width: 100%;
display: block;
position:absolute;
bottom:0;
left:0;
}
#Main {
position: relative;
height:100vh;
}
```
Upvotes: 0 |
2018/03/21 | 481 | 1,652 | <issue_start>username_0: In my Spring Boot 1.5.10 application with Spring Data REST and HATEOAS, I have a `ResourceProcessor` bean with an `@Autowired` service, like:
```
@Bean
public ResourceProcessor> orderResourceProcessor() {
return new ResourceProcessor>() {
@Autowired
private OrderHandler orderHandler;
@Override
public Resource process(Resource resource) {
Order order = resource.getContent();
Payment payment = orderHandler.payment(order);
resource.add(makeLink(payment));
return resource;
}
private Link makelink(Payment payment) {
return new Link(/\*...\*/);
}
};
}
```
When the `@Autowired` service is added, the resource processor bean is no longer triggered, unfortunately; i.e., when `OrderHandler` is commented out, the resource processor runs as it should.
Can a `ResourceProcessor` use `@Autowired` services; and, if so, what's the right way to construct it?<issue_comment>username_1: Maybe something like this:
```css
body,
html {
height: 100%;
}
body {
margin:0; /* Don't forget to remove default margin */
}
#Main {
display: flex; /* Main should be the flex container */
flex-direction:column;
}
div.BottomAligned {
margin-top:auto; /* Push items to bottom */
text-align:center;
}
```
```html
Line1
Line2
TextBottom
```
Upvotes: 3 [selected_answer]<issue_comment>username_2: In case you don't want to use flexbox, you can just replace the CSS with the below:
```
body,
html {
margin: 0;
padding: 0;
}
div.BottomAligned {
width: 100%;
display: block;
position:absolute;
bottom:0;
left:0;
}
#Main {
position: relative;
height:100vh;
}
```
Upvotes: 0 |
2018/03/21 | 1,468 | 6,205 | <issue_start>username_0: When using `SERIALIZABLE` transactions to implement a pattern of inserting a value into a database only if it does not already exist, I have observed a significant difference between MySQL and PostgreSQL in their definition of the `SERIALIZABLE` isolation level.
Consider the following table:
```
CREATE TABLE person (
person_id INTEGER PRIMARY KEY AUTO_INCREMENT,
name VARCHAR NOT NULL
);
```
And the following insertion code, run in concurrently on two connections:
```
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
START TRANSACTION;
SELECT person_id FROM person WHERE name = '<NAME>';
-- sync point: both transactions should run through here before proceeding to
-- demonstrate the effect
-- 0 results, so we will insert
INSERT INTO person (name) VALUES ('<NAME>');
SELECT last_insert_id();
COMMIT;
```
In PostgreSQL (after appropriate translation of the SQL), the effect is as I expect: only one of the transactions can successfully commit. This is consistent with my understanding of SERIALIZABLE as described by PostgreSQL, and other sources quoting from the ANSI standard: there exists a serial execution of the transactions that would produce the same effect. There is no serial execution of these two transactions that returns 0 results for the search and then adds the entry.
In MySQL 5.7, both transactions succeed and there are 2 ‘Bob Ross’ entries in the table. The MySQL documentation defines `SERIALIZABLE` in terms of prohibiting dirty reads, nonrepeatable reads, and phantom reads; it makes no reference to the existence of a serial execution.
SQLite also correctly pevents double insertion, at least in its default mode, due to its conservative locking strategy.
My question: **Is MySQL's behavior in this case correct, or is it violating the SQL standard by allowing these transactions to both succeed?** I suspect the answer may hinge on the definition of ‘effect’ — does observing an empty result set from the first `SELECT` count as an ‘effect’ for the purposes of two serial executions having the same effect?
A couple other comments to help scope this question:
* I know I could achieve the desired behavior in MySQL by first doing an insert with `ON CONFLICT IGNORE`, and then doing the select. I am trying to understand why the equivalent standard SQL is not exhibiting the same behavior in both engines.
* I know that I could probably also fix it by putting a unique constraint on the `name` field, which would arguably be a better data model anyway. But the core question still remains: why do *these* transactions both succeed?<issue_comment>username_1: The SQL standard says in chapter **4.35.4 Isolation levels of SQL-transactions** (emphasis mine):
>
> The execution of concurrent SQL-transactions at isolation level SERIALIZABLE is guaranteed to be serializable. **A serializable execution is defined to be an execution of the operations of concurrently executing SQL-transactions that produces the same effect as some serial execution of those same SQL-transactions.** A serial execution is one in which each SQL-transaction executes to completion before the next SQL-transaction begins.
>
>
>
A little further down, it goes on to confuse the issue:
>
> The isolation level specifies the kind of phenomena that can occur during the execution of concurrent SQL-transactions. The following phenomena are possible:
>
>
> [skipping definition of ***P1* (“Dirty read”)**, ***P2* (“Non-repeatable read”)** and ***P3* (“Phantom”)**]
>
>
> The four isolation levels guarantee that each SQL-transaction will be executed completely or not at all, and that no updates will be lost. The isolation levels are different with respect to phenomena *P1*, *P2*, and *P3*. Table 8, “SQL-transaction isolation levels and the three phenomena” specifies the phenomena that are possible and not
> possible for a given isolation level.
>
>
>
> ```none
> +------------------+--------------+--------------+--------------+
> | Level | P1 | P2 | P3 |
> +------------------+--------------+--------------+--------------+
> | READ UNCOMMITTED | Possible | Possible | Possible |
> +------------------+--------------+--------------+--------------+
> | READ COMMITTED | Not Possible | Possible | Possible |
> +------------------+--------------+--------------+--------------+
> | REPEATABLE READ | Not Possible | Not Possible | Possible |
> +------------------+--------------+--------------+--------------+
> | SERIALIZABLE | Not Possible | Not Possible | Not Possible |
> +------------------+--------------+--------------+--------------+
>
> ```
>
> NOTE 53 — The exclusion of these phenomena for SQL-transactions executing at isolation level SERIALIZABLE is a consequence of the requirement that such transactions be serializable.
>
>
>
This wording has had the unfortunate consequence that many implementors decided that it is enough to exclude direy reads, non-repeatable reads and phantom reads to correctly implement the `SERIALIZABLE` isolation level, even though the note clarifies that this is **not the definition, but a consequence of the definition**.
So I would argue that MySQL is wrong, but it is not alone: Oracle database interprets `SERIALIZABLE` in the same fashion.
Upvotes: 4 [selected_answer]<issue_comment>username_2: I can't reproduce this in MySQL 5.7. Other transaction always gets an error:
>
> ERROR 1213 (40001): Deadlock found when trying to get lock;
>
>
>
The reason is that the SELECT does not use indexed column in the WHERE-part so it sets s-locks to every row it found, gap-s-lock to every gap between rows found and next-key locks to the positive infinity after the last row found. So in this situation concurrent INSERTs are not possible.
One possible reason to the results you got might be this:
```
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
```
It sets isolation level only for the **next transaction**. If you executed even a single SELECT after that, the isolation level changed back to normal (REPEATABLE READ).
Better to use
```
SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
```
Upvotes: 2 |
2018/03/21 | 347 | 1,059 | <issue_start>username_0: Hi im trying to get an input that is contained in a TD using the `find()` function.
But i'm getting an error.
**This is my code:**
```
var ItxExtend={
CurrentSelectedIds : "",
IsSelected:false,
HiddenFieldId:"",
CurrentSelectedIds:"",
CheckBoxClass:"",
HiddenFieldRowId :"",
DivToInitId :"",
SeleccionarCheckbox :function (TrFromTableInPages){
var $tds = $(TrFromTableInPages).find("td");
var $ik = $tds[0];
var $c = $ik.find(this.CheckBoxClass);
var $item =$ik.find(this.HiddenFieldRowId).val();
```
}
>
> Error:
> customJS.js:54 Uncaught TypeError: $ik.find is not a function
>
>
><issue_comment>username_1: `$tds[0];` returns a raw dom element, not a jQuery object, so you cannot do jQuery methods off of it. Change it to `$tds.eq(0);` and you will be able to use the find off of it.
Upvotes: 1 <issue_comment>username_2: ```
var $ik = $tds[0];
```
Here, you're selecting the element itself, not the jquery-wrapped element. Instead, try
```
$($tds[0]).find( ...
```
Upvotes: 1 [selected_answer] |
2018/03/21 | 739 | 2,708 | <issue_start>username_0: I need to pass a json object to function, which is as mentioned below in a href, but this JS code is not getting evaluated. So can anyone suggest a workaroud or a solution for this?
```
function function_test(option,jsonObj){
displayMessage(str);
}
function function_prepare_div(){
var str ="";
var jsonResposneObj = getJson();//function to get jsonResponseObj
for(i=0;i1. " + jsonResposneObj.dataObj[i].objName + "";
}
return str;
}
```
P.S. I cannot return the jsonResponse after function call.<issue_comment>username_1: Instead of using inlined JS, append the element using proper DOM methods, and then attach an event listener.
Eg something like
```
const a = container.appendChild(document.createElement('a'));
a.href = function_test(1, jsonResponse);
a.textContent = '1. ' + function_test(1, jsonResponse);
```
(make sure `function_test` returns a URL)
Upvotes: 0 <issue_comment>username_2: **Functions (or any JavaScript for that matter) don't belong in an in the first place.**
Hyperlinks are for navigation, not JavaScript hooks. Using them just to trigger some JavaScript is an incorrect use of the tag. It was commonplace 20+ years ago (before we had web standards), but is woefully outdated and downright incorrect today. Using a hyperlink to trigger JavaScript is semantically incorrect and can cause issues for people who rely on assistive technologies (like screen readers) to navigate a page.
Just about any element that is valid in the `body` of a web page supports a `click` event and most are better suited to what you want to do.
What you need to do is register your function as the callback to the `click` event of some element, like this:
```js
// An example of a JSON response
var jsonResponse = '{"key1":10,"key2":true,"key3":"foo"}';
// Get reference to any element that supports a click event that
// can be safely used as a JavaScript trigger
var span = document.getElementById("fakeLink");
// Set up an event handling callback function for the click event of the element
span.addEventListener("click", function(){
// Call the function and pass it any arguments needed
processJSON(1, jsonResponse, this);
});
// Do whatever you need to do in this function:
function processJSON(val, json, el){
console.log(val, json);
el.textContent = val + json;
}
```
```css
/* Make element look & feel like a hyperlink */
#fakeLink { cursor:pointer; text-decoration:underline; }
#fakeLink:active { color:red; }
```
```html
Click Me
```
Upvotes: 0 <issue_comment>username_3: ```js
var json = {
foo: 'bar'
};
var func = function(data) {
alert(data.foo);
}
$('body').append("BUTTON");
```
Upvotes: -1 |
2018/03/21 | 787 | 2,750 | <issue_start>username_0: Need to create some factory method inside Angular 5 service based on generic type T passed to this service. How to get name of generic type "T"?
```
@Injectable()
export class SomeService {
someModel: T;
constructor(protected userService: UserService) {
let user = this.userService.getLocalUser();
let type: new () => T;
console.log(typeof(type)) // returns "undefined"
console.log(type instanceof MyModel) // returns "false"
let model = new T(); // doesn't compile, T refers to a type, but used as a value
// I also tried to initialize type, but compiler says that types are different and can't be assigned
let type: new () => T = {}; // doesn't compile, {} is not assignable to type T
}
}
// This is how this service is supposed to be initialized
class SomeComponent {
constructor(service: SomeService) {
let modelName = this.service.getSomeInfoAboutInternalModel();
}
}
```<issue_comment>username_1: Genrics are used for type validation
```
class Array{
pop:() => T;
unshift:(v:T) => void;
}
let numbers: Array = ['1212']; //error
let strings: Array = ['1','2','3']; //work
class Product{
}
let products: Array = [new Product(), new Product()]; //works
```
Upvotes: 1 <issue_comment>username_2: You cannot instantiate a class based on generic types only.
I mean, if you want this:
```
function createInstance(): T {...}
```
It is not possible, because, it would transpile into this:
```
function createInstance() {...}
```
Which, as you can see, cannot be parametrized in any way.
The closest you can get to what you want is this:
```
function createInstance(type: new() => T): T {
return new type();
}
```
Then, if you have a class with a parameterless constructor:
```
class C1 {
name: string;
constructor() { name = 'my name'; }
}
```
You can now do this:
```
createInstance(C1); // returns an object { name: '<NAME>' }
```
This works perfectly and the compiler gives you correct type information.
The reason I'm using `new() => T` as the type for `type`, is to indicate that you must pass a constructor function with no parameters that must return a type T. The class itself is exactly that. In this case, if you have
```
class C2 {
constructor(private name: string) {}
}
```
And you do
```
createInstance(C2);
```
the compiler will throw an error.
You can, however, generalise the `createInstance` function so it works for objects with any number of parameters:
```
function createInstance2(type: new (...args) => T, ...args: any[]): T
{
return new type(...args);
}
```
Now:
```
createInstance(C1); // returns { name: '<NAME>'}
createInstance(C2, 'John'); // returns {name: 'John'}
```
I hope this serves you.
Upvotes: 4 [selected_answer] |
2018/03/21 | 1,434 | 5,015 | <issue_start>username_0: I would like to store the initialization values for elements in a tuple inside a separate tuple, so that I can use the same values as a preset for other tuples of the respective type.
It is very important to my program that the constructors of the tuple elements are called in left-to-right order (otherwise it would at best turn out very confusing).
Here is a simplified version of my program:
```
#include
// Elements are the end points of a Widget hierarchy
struct Element
{
using initer\_t = int;
Element( const initer\_t pIniter )
:data{ pIniter }
{
printf("Creating %i\n", data);
}
const initer\_t data;
};
// A Widget class stores any number of Elements and/or other Widget instances
template
struct Widget
{
using initer\_t = std::tuple;
Widget( const initer\_t pIniter )
:elements{ pIniter }
{}
const std::tuple elements;
};
int main()
{
using Button = Widget;
using ButtonList = Widget;
Button::initer\_t basic\_button\_initer{ 0, 1 }; // presets for Buttons
Button::initer\_t other\_button\_initer{ 2, 3 };
ButtonList::initer\_t buttonlist\_initer{ basic\_button\_initer, other\_button\_initer, 4 }; //a preset for a ButtonList
ButtonList buttonlist{ buttonlist\_initer };
return 0;
}
```
So I am initializing the `std::tuple elements` member of `Widget` with a `std::tuple` in `Widget`'s constructor initialization list.
This is supposed to initialize each element of `elements` with its corresponding initialization value of the type defined by `initer_t` using the values in pIniter.
The `initer_t` type is a type for each member of a Widget hierarchy(for example a `Widget` or an `Element`), which is the type that the hierarchy member should be initialized with.
But the order in which this happens is right-to-left, while I need it in left-to-right.
The output of the program is
```
Creating 4
Creating 3
Creating 2
Creating 1
Creating 0
```
But I want to reverse this order.
How can I do this in this case?<issue_comment>username_1: There is no requirement in the standard for the order of `std::tuple` member initialisation, I am afraid.
You can iterate over a `tuple` in a specific order though, e.g.:
```
#include
#include
#include
#include
int main()
{
auto a = std::make\_tuple(true, 42, 3.14, "abc");
boost::fusion::for\_each(a, [](auto& value) {
std::cout << value << '\n';
});
}
```
Outputs:
```
1
42
3.14
abc
```
Upvotes: 2 <issue_comment>username_2: For anyone interested in a solution, I came up with a way to control the initialization order and retain the constness of `elements`:
```
#include
template
struct construct
{
template
static constexpr const std::tuple
drop\_head\_impl( const std::index\_sequence ns,
const std::tuple tup )
{
return std::tuple( std::get( tup )... );
}
template
static constexpr const std::tuple
drop\_head( const std::tuple tup )
{
return drop\_head\_impl( std::make\_index\_sequence(), tup );
}
template
static constexpr const std::tuple
func\_impl( const std::tuple initer )
{
return std::tuple( { std::get<0>( initer ) } );
}
template
static constexpr const std::tuple
func\_impl( const std::tuple initer )
{
std::tuple head( { std::get<0>( initer ) } );
return std::tuple\_cat( head, func\_impl( drop\_head(initer) ) );
}
static constexpr const std::tuple
func( const std::tuple initer )
{
return func\_impl( initer );
}
};
// Elements are the end points of a Widget hierarchy
struct Element
{
using initer\_t = int;
Element( const initer\_t pIniter )
:data{ pIniter }
{
printf( "Creating %i\n", data );
}
const initer\_t data;
};
// A Widget class stores any number of Elements and/or other Widget instances
template
struct Widget
{
using initer\_t = std::tuple;
Widget( const initer\_t pIniter )
:elements( construct::func( pIniter ) )
{}
const std::tuple elements;
};
int main()
{
using Button = Widget;
using ButtonList = Widget;
Button::initer\_t basic\_button\_initer{ 0, 1 }; // presets for Buttons
Button::initer\_t other\_button\_initer{ 2, 3 };
ButtonList::initer\_t buttonlist\_initer{ basic\_button\_initer, other\_button\_initer, 4 }; //a preset for a ButtonList
ButtonList buttonlist{ buttonlist\_initer };
return 0;
}
```
The `construct` structure takes the tuple of `initer_t`s (initer), constructs a tuple containing the first element of `Elems...` using the first element of initer, then drops the first element of initer and passes the remaining tuple to itself, which causes a tuple with the next element of `Elems...` to be constructed using the next element in initer. This recursion is stopped by an overload of `func_impl` for a tuple with one element which simply constructs that element from its `initer_t` in a tuple and returns it. This single-element tuple gets concatenated to the tuple with the previous element, the result gets returned to the higher level and is concatenated to the single-element tuple there and so on.
Upvotes: 2 [selected_answer] |
2018/03/21 | 627 | 2,266 | <issue_start>username_0: I'm trying to link three files using g++. The files are `simulation.o`, `lattice.o` and `thermodynamics.o`.
They're a bit long, but the gist of it is. I have a makefile:
```
main: simulation.o thermodynamics.o lattice.o
g++ simulation.o thermodynamics.o lattice.o
simulation.o: simulation.cpp lattice.o lattice.h thermodynamics.o thermodynamics.h
g++ -std=c++11 simulation.cpp -o simulation.o -c
thermodynamics.o: thermodynamics.cpp
g++ -std=c++11 thermodynamics.cpp -o thermodynamics.o -lgsl -c
lattice.o: lattice.cpp
g++ -std=c++11 lattice.cpp -o lattice.o -c
```
It passes the compile stage, but never links them. For each method I need from a different file, it simply says that it's undefined, and and refuses to find them.
The classes and methods are all defined in the `.h` files. But for some reason I can define an external function but not an external class.<issue_comment>username_1: It fails to link because your makefile uses linker flags when compiling. Whereas the linker flags must be used when linking.
Corrections:
```
CXXFLAGS := -std=c++11 -Wall -Wextra
main: simulation.o thermodynamics.o lattice.o
g++ -o main simulation.o thermodynamics.o lattice.o -lgsl
simulation.o: simulation.cpp lattice.h thermodynamics.h
g++ -c ${CXXFLAGS} -o simulation.o simulation.cpp
thermodynamics.o: thermodynamics.cpp thermodynamics.h
g++ -c ${CXXFLAGS} -o thermodynamics.o thermodynamics.cpp
lattice.o: lattice.cpp lattice.h
g++ -c ${CXXFLAGS} -o lattice.o lattice.cpp
```
Upvotes: 1 <issue_comment>username_2: I figured out what went wrong. Had to do with the way I've split the classes between files.
I declared the classes in the `.h` files, and then redeclared them inside `.cpp` files. Instead I should have filled the cpp files with implementations of the form `class::method(params)`.
Also I didn't `#include` the `.h` file inside the `.cpp`file.
Lastly, I also had the wrong order of linkage: as pointed out by @username_1, the order of linking matters. I should have linked the files all at once, and not at the final stage.
Thanks for everyone that answered!
Upvotes: 0 |
2018/03/21 | 822 | 2,799 | <issue_start>username_0: I have a `string s="java"` and a `mapM`. `M` contains
`(Cpp,1), (jAvA,2), (Cobol,3)`. I have to check if `string s` matches (case insensitive) with any key in map. `Is there any better way than iterating through entire map and check for each record`? In this example string s should match with second record in map, as it is case insensitive matching. In following example we are using `compareInterval()` with `sort()`. Is there any way that we can use any function with stricmp() and use map M and string s and perform case insensitive matching efficiently?
```
// A C++ program to demonstrate STL sort() using
// our own comparator
#include
using namespace std;
// An interval has start time and end time
struct Interval
{
int start, end;
};
// Compares two intervals according to staring times.
bool compareInterval(Interval i1, Interval i2)
{
return (i1.start < i2.start);
}
int main()
{
Interval arr[] = { {6,8}, {1,9}, {2,4}, {4,7} };
int n = sizeof(arr)/sizeof(arr[0]);
// sort the intervals in increasing order of
// start time
sort(arr, arr+n, compareInterval);
cout << "Intervals sorted by start time : \n";
for (int i=0; i
```
}<issue_comment>username_1: >
> Is there any better way than iterating through entire map and check for each record?
>
>
>
It needs to be done since you cannot use `std::map::find`, which will work more efficiently. You can use use `std::find_if` to reduce some of the boilerplate code but it still means that you are iterating over each item of the `map`.
But more importantly, if you are going to search for a key using a case insensitive comparison, it will be better to also create the map using a case insensitive compare function/functor. Otherwise, it's possible to have multiple elements in the `map` such as:
```
(jAvA, 2)
(java, 20)
(JAVA, 30)
```
Then the search function will not necessarily get you the item you would like have.
Upvotes: 0 <issue_comment>username_2: You can use a case-insensitive comparator for your map:
```
struct CaseInsensitiveLess
{
bool operator()(std::string lhs, std::string rhs)
{
std::transform(lhs.begin(), lhs.end(), lhs.begin(),
[](char c) { return std::tolower(c); });
std::transform(rhs.begin(), rhs.end(), rhs.begin(),
[](char c) { return std::tolower(c); });
return lhs < rhs;
}
};
int main()
{
std::map foo = {
{"Cpp", 1}, {"jAvA", 2}, {"Cobol", 3}
};
std::cout << foo["java"] << '\n';
}
```
[Live Demo](http://coliru.stacked-crooked.com/a/20293057e37e0c40)
This converts strings to lower case when comparing them, so `"java"`, `"Java"`, `"jAvA"`, `"JAVA"`, etc. will all be considered the same string when inserting into the map or looking up values in it.
Upvotes: 2 |
2018/03/21 | 1,053 | 3,595 | <issue_start>username_0: Mac here, running Docker Community Edition Version 17.12.0-ce-mac49 (21995).
I have Dockerized a web app with a Dockerfile like so:
```
FROM openjdk:8
RUN mkdir /opt/myapp
ADD build/libs/myapp.jar /opt/myapp
ADD application.yml /opt/myapp
ADD logback.groovy /opt/myapp
WORKDIR /opt/myapp
EXPOSE 9200
ENTRYPOINT ["java", "-Dspring.config=.", "-jar", "myapp.jar"]
```
I then build that image like so:
```
docker build -t myapp .
```
I then run a container of that image like so:
```
docker run -it -p 9200:9200 --net="host" --env-file ~/myapp-local.env --name myapp myapp
```
In the console I see the app start up without any errors, and all seems to be well. Even my metrics publishes (which publish heartbeat and other health metrics every 20 seconds) are printing to the console as I would expect them to. Everything seems to be fine.
Except when I go to run a `curl` against my app from another terminal/session:
```
curl -i -H "Content-Type: application/json" -X POST -d '{"username":"heyitsme","password":"<PASSWORD>"}' http://localhost:9200/v1/auth/signIn
curl: (7) Failed to connect to localhost port 9200: Connection refused
```
Now, if this were a situation where the `/v1/auth/signIn` path wasn't valid, or if there was something wrong with my request entity/payload, the server would pick up on it and send an error (I assure you; as I can confirm this exact same `curl` works when I run the server outside of Docker as just a standalone service).
So this is *definitely* a situation where the `curl` command can't connect to `localhost:9200`. Again, when I run my app outside of Docker, that same `curl` command works perfectly, so I know my app *is* trying to standup on port 9200.
Any ideas as to what could be going wrong here, or how I could begin troubleshooting?<issue_comment>username_1: Problem seems to be in the network mode you are running the container.
Quick test: Login to your container and run the curl cmd there, hopefully it works. That would isolate the problem to request not being forwarded from host to container.
Try running your container on the default bridge network and test.
Refer to this [blog](http://www.dasblinkenlichten.com/docker-networking-101-host-mode/) for details on the network modes in docker
TLDR; You will need to add an IPtables entry to allow the traffic to enter your container.
Upvotes: 3 [selected_answer]<issue_comment>username_2: Check that your app bind to 0.0.0.0:9200 and not localhost:9200 or something similar
Upvotes: 2 <issue_comment>username_3: The way you run your container has 2 conflicting parts:
1. `-p 9200:9200` says: "publish (bind) port `9200` of the container to port `9200` of the host"
2. `--net="host"` says: "use the host's networking stack"
According to Docker for Mac - Networking docs / [Known limitations, use cases, and workarounds](https://docs.docker.com/docker-for-mac/networking/#known-limitations-use-cases-and-workarounds), you should only publish a port:
>
> ### I want to connect to a container from the Mac
>
>
> Port forwarding works for localhost; `--publish`, `-p`, or `-P` all work. Ports exposed from Linux are forwarded to the Mac.
>
>
> Our current recommendation is to publish a port, or to connect from another container. This is what you need to do even on Linux if the container is on an overlay network, not a bridge network, as these are not routed.
>
>
> The command to run the nginx webserver shown in Getting Started is an example of this.
>
>
>
> ```
> $ docker run -d -p 80:80 --name webserver nginx
>
> ```
>
>
Upvotes: 3 |
2018/03/21 | 536 | 2,039 | <issue_start>username_0: I have created a simple task based on the Google Cloud Platform ["update counter" push task example](https://github.com/GoogleCloudPlatform/python-docs-samples/tree/master/appengine/standard/taskqueue/counter). All I want to do is log that it has been invoked to the Stackdriver logs.
```
from google.cloud import logging
logging_client = logging.Client()
log_name = 'service-log'
logger = logging_client.logger(log_name)
import webapp2
class UpdateCounterHandler(webapp2.RequestHandler):
def post(self):
amount = int(self.request.get('amount'))
logger.log_text('Service startup task done.')
app = webapp2.WSGIApplication([
('/update_counter', UpdateCounterHandler)
], debug=True)
```
After deploying this and invoking it, there is an error. In the logs online it says:
```
from google.cloud import logging
ImportError: No module named cloud
```
This isn't a local version, but one that I've deployed. It's hard for me to believe that I have to actually install python libraries into the production runtime. (I can't even imagine that I can.)<issue_comment>username_1: As the [root readme](https://github.com/GoogleCloudPlatform/python-docs-samples/tree/master/appengine/standard#user-content-deploying-the-samples) states:
>
> Many samples require extra libraries to be installed. If there is a `requirements.txt`, you will need to install the dependencies with `pip`.
>
>
>
Try adding the library as explained [here](https://stackoverflow.com/a/39815014/3615567).
Upvotes: 1 <issue_comment>username_2: When using `logging` from the Python standard library in App Engine, the logs also end up in Stackdriver. So you could use `import logging` instead of `from google.cloud import logging`.
When you are specifically interested in using the `google.cloud.logging` library, then it needs to be installed to a project folder `./lib` as referred by username_1: [here](https://cloud.google.com/logging/docs/reference/libraries#client-libraries-install-python "here")
Upvotes: 0 |
2018/03/21 | 1,005 | 3,051 | <issue_start>username_0: I am trying to iterate over an array that's similar to the below structure, and for each object that has the same id, I want them combined into a single object but with an array of all the 'names' associated with that id. My Array is pre-sorted by id and I can't use JQuery, only vanilla JS.
Convert this:
```
var array = [
{id=1, name = "Orange"},
{id=1, name = "Blue"},
{id=1, name = "Green"},
{id=2, name = "Blue"},
{id=3, name = "Orange"},
{id=3, name = "Blue"}
]
```
To this:
```
var newArray = [
{id=1, nameList = [Orange, Blue, Green]},
{id=2, nameList = [Blue]},
{id=3, namelist = [Orange, Blue]}
]
```
I was trying to do something like this by comparing the first object to the next object in the array, then setting the first object to the second object, but got stuck because 'secondObject' wasn't actually the next object in the array.
```
for (var i in array) {
firstObject = array[i];
secondObject = array[i + 1];
if (firstObject.id === secondObject.id) {
firstObject['nameList'] = [firstObject.name + ',' + secondObject.name]
} else {
continue;
}
firstObject = secondObject;
}
```
I have to do this with a couple thousand id's, and most of the id's are repeated with different names, but there are a few single id objects like id 2.<issue_comment>username_1: An alternative is using the function `reduce`
```js
var array = [ {id:1, name : "Orange"}, {id:1, name : "Blue"}, {id:1, name : "Green"}, {id:2, name : "Blue"}, {id:3, name : "Orange"}, {id:3, name : "Blue"}]
var result = Object.values(array.reduce((a, c) => {
(a[c.id] || (a[c.id] = {id: c.id, nameList: []})).nameList.push(c.name);
return a;
}, {}));
console.log(result);
```
```css
.as-console-wrapper { max-height: 100% !important; top: 0; }
```
Upvotes: 3 [selected_answer]<issue_comment>username_2: If you don't have to do it in-place, you can just create another object and put things into that directly.
```
var map = {}
for (var obj in array) {
var id = obj.id;
var name = obj.name;
if (!map[id]) {
map[id] = [];
}
map[id].push(name);
}
```
Then iterate across the map to build up your result:
```
var result = [];
Object.keys(map).forEach(function(key) {
result.push[key, map[key]];
}
```
Upvotes: 0 <issue_comment>username_3: Because of the sorted items, you could check the last item in the result array and if different from the actual item generate a new result object. Later push `name`.
```js
var array = [{ id: 1, name: "Orange" }, { id: 1, name: "Blue" }, { id: 1, name: "Green" }, { id: 2, name: "Blue" }, { id: 3, name: "Orange" }, { id: 3, name: "Blue" }],
grouped = array.reduce(function (r, { id, name }) {
if (!r.length || r[r.length - 1].id !== id) {
r.push({ id, namelist: [] });
}
r[r.length - 1].namelist.push(name);
return r;
}, []);
console.log(grouped);
```
```css
.as-console-wrapper { max-height: 100% !important; top: 0; }
```
Upvotes: 1 |
2018/03/21 | 1,032 | 3,554 | <issue_start>username_0: I'm working on creating an `SCNNode` instance that consists of an `SCNPlane` - type geometry, with a video playing on the surface of the node. This is what I have so far:
```
let plane = SCNPlane(width: 5, height: 5)
node = SCNNode(geometry: plane)
GlobalScene.shared.rootNode.addChildNode(node!)
let urlStr = Bundle.main.path(forResource: "test", ofType: "mov")
videoURL = NSURL(fileURLWithPath: urlStr!)
player = AVPlayer(playerItem: AVPlayerItem(url: videoURL! as URL))
let videoNode = SKVideoNode(avPlayer: player!)
player?.actionAtItemEnd = .none
let spritescene = SKScene(size: CGSize(width: 122, height: 431))
videoNode.size.width=spritescene.size.width
videoNode.size.height=spritescene.size.height
spritescene.addChild(videoNode)
plane.firstMaterial?.diffuse.contents = spritescene
```
The overall functionality so far works great! What I'm trying to figure out is, how to I get the `node`'s ENTIRE surface to be made up of the video ? So far, I've only gotten it to appear in the single corner shown below: [](https://i.stack.imgur.com/cONcy.png)
**EDIT**: it looks like the issue is that I'm only setting the first material of the plane node (rather than all of them...) which is why I'm seeing the other 3 "quadrants" as blank.
**EDIT2**: That first conclusion may not be correct - if I set:
```
plane.firstMaterial?.diffuse.contents = NSColor.green
```
I get the following result:
[](https://i.stack.imgur.com/CEPjt.png)
So...why won't that work when applying the contents of a `SpriteKit` scene?<issue_comment>username_1: With the help of this [gist](https://gist.github.com/glaurent/aad82c4185f3c92f21dc), I was able to solve my issue; which ended up involving both scaling the video node properly, as well as setting the position properly. My working code is below:
```
GlobalScene.shared.rootNode.addChildNode(node!)
let urlStr = Bundle.main.path(forResource: "test", ofType: "mov")
videoURL = NSURL(fileURLWithPath: urlStr!)
player = AVPlayer(playerItem: AVPlayerItem(url: videoURL! as URL))
videoSpriteKitNode = SKVideoNode(avPlayer: player!)
player?.actionAtItemEnd = .none
NotificationCenter.default.addObserver(
self,
selector: #selector(self.playerItemDidReachEnd),
name: NSNotification.Name.AVPlayerItemDidPlayToEndTime,
object: player?.currentItem)
let spriteKitScene = SKScene(size: CGSize(width: 1276.0 / 2.0, height: 712.0 / 2.0))
videoSpriteKitNode?.size.width=spriteKitScene.size.width
videoSpriteKitNode?.size.height=spriteKitScene.size.height
node?.geometry?.firstMaterial?.diffuse.contentsTransform = SCNMatrix4Translate(SCNMatrix4MakeScale(1, -1, 1), 0, 1, 0)
videoSpriteKitNode?.position = CGPoint(x: spriteKitScene.size.width / 2.0, y: spriteKitScene.size.height / 2.0)
videoSpriteKitNode?.size = spriteKitScene.size
videoSpriteKitNode?.play()
spriteKitScene.addChild(videoSpriteKitNode!)
plane?.firstMaterial?.diffuse.contents = spriteKitScene
```
Upvotes: 0 <issue_comment>username_2: Using a `SKScene` and a `SKVideoNode` is not necessary. You can directly set the `AVPlayer` as the contents of a `SCNMaterialProperty` instance. This will allow for better performance and will avoid having to deal with scaling and positioning the SpriteKit elements.
Upvotes: 3 [selected_answer] |
2018/03/21 | 595 | 2,085 | <issue_start>username_0: I am looking to convert an array of one type to another.
As an example, let's say we have an array of type `byte` and would like a new array of type `logic [5:0]`.
I am having difficulty doing this while not trying to lose data.
Thanks,
George<issue_comment>username_1: With the help of this [gist](https://gist.github.com/glaurent/aad82c4185f3c92f21dc), I was able to solve my issue; which ended up involving both scaling the video node properly, as well as setting the position properly. My working code is below:
```
GlobalScene.shared.rootNode.addChildNode(node!)
let urlStr = Bundle.main.path(forResource: "test", ofType: "mov")
videoURL = NSURL(fileURLWithPath: urlStr!)
player = AVPlayer(playerItem: AVPlayerItem(url: videoURL! as URL))
videoSpriteKitNode = SKVideoNode(avPlayer: player!)
player?.actionAtItemEnd = .none
NotificationCenter.default.addObserver(
self,
selector: #selector(self.playerItemDidReachEnd),
name: NSNotification.Name.AVPlayerItemDidPlayToEndTime,
object: player?.currentItem)
let spriteKitScene = SKScene(size: CGSize(width: 1276.0 / 2.0, height: 712.0 / 2.0))
videoSpriteKitNode?.size.width=spriteKitScene.size.width
videoSpriteKitNode?.size.height=spriteKitScene.size.height
node?.geometry?.firstMaterial?.diffuse.contentsTransform = SCNMatrix4Translate(SCNMatrix4MakeScale(1, -1, 1), 0, 1, 0)
videoSpriteKitNode?.position = CGPoint(x: spriteKitScene.size.width / 2.0, y: spriteKitScene.size.height / 2.0)
videoSpriteKitNode?.size = spriteKitScene.size
videoSpriteKitNode?.play()
spriteKitScene.addChild(videoSpriteKitNode!)
plane?.firstMaterial?.diffuse.contents = spriteKitScene
```
Upvotes: 0 <issue_comment>username_2: Using a `SKScene` and a `SKVideoNode` is not necessary. You can directly set the `AVPlayer` as the contents of a `SCNMaterialProperty` instance. This will allow for better performance and will avoid having to deal with scaling and positioning the SpriteKit elements.
Upvotes: 3 [selected_answer] |
2018/03/21 | 798 | 2,907 | <issue_start>username_0: I have this query that returns a list of student objects:
```
query = db.session.query(Student).filter(Student.is_deleted == false())
query = query.options(joinedload('project'))
query = query.options(joinedload('image'))
query = query.options(joinedload('student_locator_map'))
query = query.options(subqueryload('attached_addresses'))
query = query.options(subqueryload('student_meta'))
query = query.order_by(Student.student_last_name, Student.student_first_name,
Student.student_middle_name, Student.student_grade, Student.student_id)
query = query.filter(filter_column == field_value)
students = query.all()
```
The query itself does not take much time. The problem is converting all these objects (can be 5000+) to Python dicts. It takes over a minute with this many objects.Currently, the code loops thru the objects and converts using to\_dict(). I have also tried \_dict\_\_ which was much faster but this does not convert all relational objects it seems.
How can I convert all these Student objects and related objects quickly?<issue_comment>username_1: With the help of this [gist](https://gist.github.com/glaurent/aad82c4185f3c92f21dc), I was able to solve my issue; which ended up involving both scaling the video node properly, as well as setting the position properly. My working code is below:
```
GlobalScene.shared.rootNode.addChildNode(node!)
let urlStr = Bundle.main.path(forResource: "test", ofType: "mov")
videoURL = NSURL(fileURLWithPath: urlStr!)
player = AVPlayer(playerItem: AVPlayerItem(url: videoURL! as URL))
videoSpriteKitNode = SKVideoNode(avPlayer: player!)
player?.actionAtItemEnd = .none
NotificationCenter.default.addObserver(
self,
selector: #selector(self.playerItemDidReachEnd),
name: NSNotification.Name.AVPlayerItemDidPlayToEndTime,
object: player?.currentItem)
let spriteKitScene = SKScene(size: CGSize(width: 1276.0 / 2.0, height: 712.0 / 2.0))
videoSpriteKitNode?.size.width=spriteKitScene.size.width
videoSpriteKitNode?.size.height=spriteKitScene.size.height
node?.geometry?.firstMaterial?.diffuse.contentsTransform = SCNMatrix4Translate(SCNMatrix4MakeScale(1, -1, 1), 0, 1, 0)
videoSpriteKitNode?.position = CGPoint(x: spriteKitScene.size.width / 2.0, y: spriteKitScene.size.height / 2.0)
videoSpriteKitNode?.size = spriteKitScene.size
videoSpriteKitNode?.play()
spriteKitScene.addChild(videoSpriteKitNode!)
plane?.firstMaterial?.diffuse.contents = spriteKitScene
```
Upvotes: 0 <issue_comment>username_2: Using a `SKScene` and a `SKVideoNode` is not necessary. You can directly set the `AVPlayer` as the contents of a `SCNMaterialProperty` instance. This will allow for better performance and will avoid having to deal with scaling and positioning the SpriteKit elements.
Upvotes: 3 [selected_answer] |
2018/03/21 | 678 | 2,372 | <issue_start>username_0: I have two `UserControl` with just one/two textblocks.On my window, i add the first `UserControl` couple o' times in a for each loop and the 2nd user control,i just add it in the end,which mean i have only 1 `UserControl2`.
```
public class test
{
UserControl1 btn = new UserControl1;
private void thread1()
{
foreach (var item in mycollection)///i am not including the actual iteration target because it is a class and then the post might be too huge
{
mystack.Children.Add(btn);
}
mystack.Children.Add(new UserControl2);
}
```
Note that i am adding only the `UserControl1` in the `foreach` loop but i am adding `UserControl2` outside the loop,which means i am adding it for once.
Anyway,i may iterate through all the controls i added to `mystack` in a `foreach` loop like :
```
foreach (var control in mystack.Children)
{
////My codes here
}
```
As i mentioned earlier,there are 2 types of `UserControl`s added to the `StackPanel`.How do i only iterate through one type of `UserControl` only ? I mean what if i want to iterate through only the `UserControl1` s from the `Stackpanel`(mystack) ?
I tried something like :
```
private void thread2()
{
foreach (UserControl1 control in mystack.Children)
{
}
//////Or
for (i = o; i <= mystack.children - 1; i++)
{
btn.height = 10 /// my other codes here :)
}}
```
But both of them throw `Unable to cast object of type UserControl2 to UserControl1` exception which means it is iterating through all the controls(both `UserControl1` and `UserControl2`) :(
See the first code black in the post?Some might suggest doing whatever i wanna do in the first `foreach` loop but i cant,this has to be done in different threads(first loop in `thread1` and the rest in `thread2`...So,how should i achieve this ?<issue_comment>username_1: What if do some reflection:
```
Type t = typeof(yourControl);
if (t == typeof(int)) // or whatever you need
// Some code here
```
typeof takes a type name (which you specify at compile time).
Or you can use GetType too:
```
if(yourcontrol.GetType() == typeof(yourControl))
//code here
```
GetType gets the runtime type of an instance.
Upvotes: -1 <issue_comment>username_2: Use method OfType to filter children by their type
```
foreach (UserControl1 control in mystack.Children.OfType())
{
}
```
Upvotes: 2 [selected_answer] |
2018/03/21 | 385 | 1,251 | <issue_start>username_0: ```
CREATE TRIGGER printMoneyUSA
AFTER UPDATE ON USA
REFERENCING
OLD ROW AS old
NEW ROW AS new
FOR EACH ROW
BEGIN
IF old.SUPPLY_OF_CURRENCY = 0 THEN
SET new.SUPPLY_OF_CURRENCY = 30000000000;
END IF;
END;//
```
Hello, I keep getting a syntax error for the 'REFERENCING' syntax that says:
>
> You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near
>
>
>
> ```
> 'REFERENCING
> OLD ROW AS old
> NEW ROW AS new
>
> FOR EACH ROW BEGI' at line 3.
>
> ```
>
>
Can someone point out what I'm doing wrong? Thanks!<issue_comment>username_1: What if do some reflection:
```
Type t = typeof(yourControl);
if (t == typeof(int)) // or whatever you need
// Some code here
```
typeof takes a type name (which you specify at compile time).
Or you can use GetType too:
```
if(yourcontrol.GetType() == typeof(yourControl))
//code here
```
GetType gets the runtime type of an instance.
Upvotes: -1 <issue_comment>username_2: Use method OfType to filter children by their type
```
foreach (UserControl1 control in mystack.Children.OfType())
{
}
```
Upvotes: 2 [selected_answer] |
2018/03/21 | 696 | 2,426 | <issue_start>username_0: ```
* [Heading](#)
* [Heading Long](#)
```<issue_comment>username_1: You can use [onorientationchange](https://developer.mozilla.org/en-US/docs/Web/Events/orientationchange) event like the following:
```js
methods: {
detectOrientationChange() {
switch(window.orientation) {
case -90 || 90:
// landscape
this.mobile = false;
break;
default:
// portrait
this.mobile = true;
break;
}
}
},
mounted() {
this.$nextTick(() => {
window.addEventListener('onorientationchange', this.detectOrientationChange)
}
},
created() {
this.detectOrientationChange(); // when instance is created
}
```
**Note**: As the event has been deprecated, it can only be used with mobile browsers as of writing this.
---
To detect screen orientation on current browsers [check this post](https://stackoverflow.com/questions/67961837/deprecated-window-orientationchange-event).
Upvotes: 2 <issue_comment>username_2: Check this library : <https://github.com/apertureless/vue-breakpoints>
```
* [Heading](#)
* [Heading Long](#)
```
or simply go with `media queries` (<https://www.w3schools.com/css/css3_mediaqueries_ex.asp>)
**CSS :**
```
@media screen and (max-width: 600px) {
#app ul il:first-of-type {
visibility: visible;
}
#app ul il:last-of-type {
visibility: hidden;
}
}
@media screen and (max-width: 992px) {
#app ul il:first-of-type {
visibility: hidden;
}
#app ul il:last-of-type {
visibility: visible;
}
}
```
ofcourse it's up to you to decide what to show and what to hide on what breakpoint , i hope this helps.
Upvotes: 3 [selected_answer]<issue_comment>username_3: If you are using [Vuetify](https://vuetifyjs.com/en/customization/breakpoints/), you can programmatically adjust the data value based on the built in `breakpoints` of xs, sm, md, lg, xl (as specified in [Material Design](https://material.io/design/layout/responsive-layout-grid.html#breakpoints)) as follows:
```
computed: {
mobile() {
return this.$vuetify.breakpoint.sm
},
}
```
`mobile` will change to `true` as soon as the screen width is less than 600px.
Your code would then be something like this (I also moved the `if` statement to be directly on the `-` element):
```
* [Heading](#)
* [Heading Long](#)
```
Upvotes: 3 |
2018/03/21 | 657 | 2,425 | <issue_start>username_0: I'm looking to write a PowerShell script which validates whether a list of emails within a file are valid against the Active Directory forest. I have a script which works for my specific domain, but it doesn't find emails associated to other domains within the corporate forest.
```
foreach($line in Get-Content C:\path\emails.txt) {
if(Get-ADUser -Filter "EmailAddress -like '$line'") {
"$line is valid"
}
else {
"$line is invalid"
}
}
```
Result:
```
<EMAIL> is valid
<EMAIL> is valid
<EMAIL> is invalid
```
<EMAIL> returns invalid because it's part of another domain, but I'd like it to return valid since it's part of the corporate forest.<issue_comment>username_1: Try specifying the server (domain controller) for the other domains:
```
$DC = 'DC_name'
foreach($line in Get-Content C:\path\emails.txt) {
if(Get-ADUser -Filter "mail -like '$line*'" -Server $DC ) {
"$line is valid"
}
else {
"$line is invalid"
}
}
```
Upvotes: 1 <issue_comment>username_2: Use the `Server` parameter, but specify the port `3268` since that is the port used for the global catalog. The global catalog is the listing for your whole forest.
```
Get-ADUser -Filter "EmailAddress -like '$line'" -Server domain.com:3268
```
Where `domain.com` is the DNS name of any domain in your forest.
Here are the various ports used for Active Directory:
* 389: LDAP - single domain only (the default if you don't specify anything)
* 3268: Global Catalog - your whole forest
* 636: LDAP over SSL
* 3269: Global Catalog over SSL
Upvotes: 0 <issue_comment>username_3: The following solution worked. It takes elements of the proposed solutions and cycles through all the domain controllers. I would have preferred to leverage the Get-ADForest cmdlet in a way to prevent the need to specify DCs, but this suffices for the problem's purpose.
```
foreach($line in Get-Content C:\path\emails.txt) {
foreach($DC in Get-Content C:\path\DCs.txt){
if(Get-ADUser -Filter "EmailAddress -like '$line'" -Server $DC) {
"$line FOUND in $DC"
Add-Content -Path "C:\path\validemails.txt" -Value "$line : $DC"
break
}
else {
"$line not found in $DC"
}
}
}
```
Upvotes: 1 [selected_answer] |
2018/03/21 | 675 | 2,761 | <issue_start>username_0: I'm trying to determine if there's a practical way to prevent duplicate rows from being inserted into a table using Azure SQL DW when the table already holds billions of rows (say 20 billion).
The root cause of needing this is that the source of the data is a third party that sends over supposedly unique data, but sometimes sends duplicates which have no identifying key. I unfortunately have no idea if we've already received the data they're sending.
What I've tried is to create a table that contains a row hash column (pre-calculated from several other columns) and distribute the data based on that row hash. For example:
```
CREATE TABLE [SomeFact]
(
Row_key BIGINT NOT NULL IDENTITY,
EventDate DATETIME NOT NULL,
EmailAddress NVARCHAR(200) NOT NULL,
-- other rows
RowHash BINARY(16) NOT NULL
)
WITH
(
DISTRIBUTION = HASH(RowHash)
)
```
The insert SQL is approximately:
```
INSERT INTO [SomeFact]
(
EmailAddress,
EventDate,
-- Other rows
RowHash
)
SELECT
temp.EmailAddress,
temp.EventDate,
-- Other rows
temp.RowHash
FROM #StagingTable temp
WHERE NOT EXISTS (SELECT 1 FROM [SomeFact] f WHERE f.RowHash = temp.RowHash);
```
Unfortunately, this is just too slow. I added some statistics and even created a secondary index on RowHash and inserts of any real size (10 million rows, for example) won't run successfully without erroring due to transaction sizes. I've also tried batches of 50,000 and those too are simply too slow.<issue_comment>username_1: Two things I can think of that wouldn't have the singleton records you have in your query would be to
* Outer join your staging table with the fact table and filter on some NULL values. Assuming You're using Clustered Column Store in your fact table this should be a lot more inexpensive than the above.
* Do a CTAS with a Select Distinct from the existing fact table, and a Select Distinct from the staging table joined together with a UNION.
My gut says the first option will be faster, but you'll probably want to look at the query plan and test both approaches.
Upvotes: 1 <issue_comment>username_2: Can you partition the 'main' table by EventDate and, assuming new data has a recent EventDate, CTAS out only the partitions that include the EventDate's of the new data, then 'Merge' the data with CTAS / UNION of the 'old' and 'new' data into a table with the same partition schema (UNION will remove the duplicates) or use the INSERT method you developed against the smaller table, then swap the partition(s) back into the 'main' table.
Note - There is a new option on the partition swap command that allows you to directly 'swap in' a partition in one step: "WITH (TRUNCATE\_TARGET = ON)".
Upvotes: 0 |
2018/03/21 | 1,231 | 4,072 | <issue_start>username_0: The `ndiff` function from `difflib` allows a nice interface to detect differences in lines. It does a great job when the lines are close enough:
```
>>> print '\n'.join(list(ndiff(['foo*'], ['foot'], )))
- foo*
? ^
+ foot
? ^
```
But when the lines are too dissimilar, the rich reporting is no longer possible:
```
>>> print '\n'.join(list(ndiff(['foo'], ['foo*****'], )))
- foo
+ foo*****
```
This is the use case I am hitting, and I am trying to find ways to use `ndiff` (or the underlying class `Differ`) to force the reporting even if the strings are too dissimilar.
For the failing example, I would like to have a result like:
```
>>> print '\n'.join(list(ndiff(['foo'], ['foo*****'], )))
- foo
+ foo*****
? +++++
```<issue_comment>username_1: It seems what you want to do here is not to compare across multiple lines, but across strings. You can then pass your strings directly, without a list, and you should get a behaviour close to the one you are looking for.
```
>>> print ('\n'.join(list(ndiff('foo', 'foo*****'))))
f
o
o
+ *
+ *
+ *
+ *
+ *
```
Even though the output format is not the exact one you are looking for, it encapsulate the correct information. We can make an output adapter to give the correct format.
```
def adapter(out):
chars = []
symbols = []
for c in out:
chars.append(c[2])
symbols.append(c[0])
return ''.join(chars), ''.join(symbols)
```
This can be used like so.
```
>>> print ('\n'.join(adapter(ndiff('foo', 'foo*****'))))
foo*****
+++++
```
Upvotes: 0 <issue_comment>username_2: The function responsible for printing the context (i.e. those lines starting with `?`) is [`Differ._fancy_replace`](https://github.com/python/cpython/blob/3.6/Lib/difflib.py#L928). That function works by checking whether the two lines are equal by at least 75% (see the `cutoff` variable). Unfortunately, that 75% cutoff is hard-coded and cannot be changed.
What I can suggest is to subclass `Differ` and provide a version of `_fancy_replace` that simply ignores the cutoff. Here it is:
```
from difflib import Differ, SequenceMatcher
class FullContextDiffer(Differ):
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
"""
Copied and adapted from https://github.com/python/cpython/blob/3.6/Lib/difflib.py#L928
"""
best_ratio = 0
cruncher = SequenceMatcher(self.charjunk)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
continue
cruncher.set_seq1(ai)
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
aelt, belt = a[best_i], b[best_j]
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
yield from self._qformat(aelt, belt, atags, btags)
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
```
And here is an example of how it works:
```
a = [
'foo',
'bar',
'foobar',
]
b = [
'foo',
'bar',
'barfoo',
]
print('\n'.join(FullContextDiffer().compare(a, b)))
# Output:
#
# foo
# bar
# - foobar
# ? ---
#
# + barfoo
# ? +++
```
Upvotes: 2 [selected_answer] |
2018/03/21 | 1,132 | 3,700 | <issue_start>username_0: I have this function in my user.rb Model :
```
def change_key
self.public_key = params[:public_key]
end
```
I want to call this function from a script on Views (new.html.erb) like this :
```
change_key(x);
```
and I want self.public\_key to get the x value
The result I get is :
>
> undefined local variable or method `params' for
>
>
>
>
Any thoughts??<issue_comment>username_1: It seems what you want to do here is not to compare across multiple lines, but across strings. You can then pass your strings directly, without a list, and you should get a behaviour close to the one you are looking for.
```
>>> print ('\n'.join(list(ndiff('foo', 'foo*****'))))
f
o
o
+ *
+ *
+ *
+ *
+ *
```
Even though the output format is not the exact one you are looking for, it encapsulate the correct information. We can make an output adapter to give the correct format.
```
def adapter(out):
chars = []
symbols = []
for c in out:
chars.append(c[2])
symbols.append(c[0])
return ''.join(chars), ''.join(symbols)
```
This can be used like so.
```
>>> print ('\n'.join(adapter(ndiff('foo', 'foo*****'))))
foo*****
+++++
```
Upvotes: 0 <issue_comment>username_2: The function responsible for printing the context (i.e. those lines starting with `?`) is [`Differ._fancy_replace`](https://github.com/python/cpython/blob/3.6/Lib/difflib.py#L928). That function works by checking whether the two lines are equal by at least 75% (see the `cutoff` variable). Unfortunately, that 75% cutoff is hard-coded and cannot be changed.
What I can suggest is to subclass `Differ` and provide a version of `_fancy_replace` that simply ignores the cutoff. Here it is:
```
from difflib import Differ, SequenceMatcher
class FullContextDiffer(Differ):
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
"""
Copied and adapted from https://github.com/python/cpython/blob/3.6/Lib/difflib.py#L928
"""
best_ratio = 0
cruncher = SequenceMatcher(self.charjunk)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
continue
cruncher.set_seq1(ai)
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
aelt, belt = a[best_i], b[best_j]
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
yield from self._qformat(aelt, belt, atags, btags)
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
```
And here is an example of how it works:
```
a = [
'foo',
'bar',
'foobar',
]
b = [
'foo',
'bar',
'barfoo',
]
print('\n'.join(FullContextDiffer().compare(a, b)))
# Output:
#
# foo
# bar
# - foobar
# ? ---
#
# + barfoo
# ? +++
```
Upvotes: 2 [selected_answer] |
2018/03/21 | 873 | 3,453 | <issue_start>username_0: I cannot find documentation on how to invoke onChange for radio buttons using react-bootstrap. I left out the button which submits the form. It works with every other field, like text inputs, so I left that out.
For each, teacherRate and overallRate, each radio button has a value of 1, 2, 3 but I am not sure how to tie that in.
I also know that I cannot have the values be the same for each category.
I am not looking to do Button Groups.
I looked online for similar answers but cannot find any. There was one person who posted their problem like mine but answered later saying he implemented react-bootstrap but did not post his solution. I cannot reply as I do not have enough points.
```
class Assignment extends Component {
constructor(props){
super(props)
this.state = {
form: {
teacherRate: '',
overallRate: ''
}
}
}
handleChange(event){
const formState = Object.assign({}, this.state.form)
formState[event.target.name] = event.target.value
this.setState({form: formState})
}
render() {
return (
Rate the Teacher
1{' '}
2{' '}
3{' '}
Overall Rating
1{' '}
2{' '}
3{' '}
);
}
}
```<issue_comment>username_1: The onChange event is being fired, but your handleChange function is not doing what you are expecting.
```
handleChange(event){
const formState = Object.assign({}, this.state.form)
formState[event.target.name] = event.target.value
this.setState({form: formState})
}
```
If you take a look in a debugger, event.target does not have a name or value attribute for a check box. It simply has a checked attribute. You will have to find another way to get the information you are looking for out of the target.
Upvotes: 1 <issue_comment>username_2: To piggyback off the previous poster, in your Radio buttons you have `value={this.state.form.adminRate}` but adminRate is never defined in
```
this.state = {
form: {
teacherRate: '',
overallRate: ''
}
```
So when you return `formState[event.target.name] = event.target.value` from your handleChange function its not inputting any value into the `event.target.name`. So you may just want to input 1, 2, and 3 as the value in the corresponding buttons.
Also even if it did input values, `event.target.name` in your Radio buttons are adminRate so you'd run into the this.state problem again, so you'd have to convert `name={adminRate}` to `name={teacherRate}` in the first FormGroup buttons so that the formState object in
```
handleChange(event){
const formState = Object.assign({}, this.state.form)
formState[event.target.name] = event.target.value
this.setState({form: formState})
}
```
points to the teacherRate property inside the form object defined in this.state when you call `this.setState({form: formState})` at the end of the function.
I don't know the relevance of using the 'Overall Rating' radio buttons so I can't assist with the second FormGroup but it would basically be the same logic as the first. You'd need `Math` logic to get the average number for the overall rating though.
Upvotes: 1 <issue_comment>username_3: You need to set the `checked` attribute on the element as a response to some state/props, the following is a snippet from a Formik / react-bootstrap form:
```
setStatus({ upload\_radio: e.target.value })}
>
```
Upvotes: 0 |
2018/03/21 | 403 | 1,580 | <issue_start>username_0: When I tried to display a related object in Twig, and that relation is not present because the ID is in the parent entity, but the related entity was not in the current database, Symfony throws a 500 error
>
> // EntityNotFoundException Twig\_Error\_Runtime
> An exception has been thrown during the rendering of a template
> ("Entity of type 'App\Entity\Location' for IDs id(265) was not
> found").
>
>
>
I'd like to be able to ignore this error and instead display something like "Related object missing".
It seemed like this could be solved by some checking in Twig, but checking if the relation is null or not defined does not work - they both still find the relation, but when a property is called on the related entity, the exception is thrown.
Has anyone solved this problem already?<issue_comment>username_1: You could check if the entity exists in a Twig extension
Something like:
```
public function isRelatedEntityDefined($entity)
{
try {
if(isset($entity->getSomeField()) return true;
} catch (EntityNotFoundException $e) {
return false;
}
}
```
Upvotes: 2 <issue_comment>username_2: Have a look at this subject
[On delete cascade with doctrine2](https://stackoverflow.com/questions/6328535/on-delete-cascade-with-doctrine2)
Isn't the problem from your annotation ?
On your owning side the ID is still defined but the entity doesn't exist anymore.
You should do something like this :
```
* @JoinColumn(name="locationId", referencedColumnName="id", onDelete="set null")
```
Upvotes: 0 |
2018/03/21 | 919 | 3,346 | <issue_start>username_0: Working on getting development environment setup in Minikube and ran across an issue pulling images from the `https://quay.io/v2/` registry.
I have ran the command:
`eval $(minikube docker-env)` .
Which allows me to build my local `Dockerfile` in Minikube and it does a great job with that and deployments work great with local images.
I then used helm to install
`helm install stable/mssql-linux` .
Which worked fine and its image points to this `microsoft/mssql-server-linux:2017-CU3` [HERE](https://hub.docker.com/r/microsoft/mssql-server-linux/)
I am also working with [redis-ha](https://github.com/kubernetes/charts/tree/master/stable/redis-ha) and installed like so:
`helm install stable/redis-ha --set="rbac.create=false"`
The `rbac.create=false` seems to allow it to install in Minikube without causing all sorts of issues. However, despite creating deployments and services...the deployments ultimately fail because it cant pull the image.
I get the following error:
`Failed to pull image "quay.io/smile/redis:4.0.8r0": rpc error: code = Unknown desc = Error response from daemon: Get https://quay.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)`
The deployments point to this [registry](https://quay.io/repository/smile/redis?tab=info) image: `quay.io/smile/redis:4.0.8r0`
I have changed my DNS pretty much everywhere I could to point to `8.8.8.8` as it seems like it cant resolve the URL. It could also just be that I need to add the registry someplace? I kind of feel that its registry specific since Minikube docker daemon appears to be able to pull from `docker hub` but not `quay.io`.
If I use a terminal that is not running `eval $(minikube docker-env)` and use the docker daemon on my host computer I can pull the `quay.io/smile/redis:4.0.8r0` image just fine...ssh into minikube and try and it cant pull.
**Minikube version**
`minikube version: v0.25.0`
**Docker for Mac**
`Version 17.12.0-ce-mac55 (23011)`<issue_comment>username_1: >
> as it seems like it cant resolve the URL
>
>
>
What lead you to believe that, when the error clearly states that it has a `Client.Timeout exceeded while awaiting headers`? It resolved the registry to an IP address, and even apparently opened a network connection to what it thinks is the registry's IP and port. But after that, the networking stack in minikube did not, in fact, allow the traffic out. Observe that the error wasn't DNS, and it wasn't connection refused, it was connection timed out. That is almost always a firewall-esque behavior.
That smells very, very much like a corporate HTTP proxy, since your machine can interact with the Internet but minikube cannot.
There are a ton of troubleshooting steps one could go through, however, if you are interested in a very quick win, you can, from your working host computer, run `docker save quay.io/smile/redis:4.0.8r0 | ssh-into-minikube "docker load"` and treat minikube as if it were airgapped.
Upvotes: 1 <issue_comment>username_2: I dont know what the underlying reason was...perhaps Minikube just being fragile but ended up:
Removing minikube
`rm -rf ~/.minikube`
Running start again
`minikube start --vm-driver=hyperkit`
Reran init `helm init`
Now everything is pulling as it should....
Upvotes: 0 |
2018/03/21 | 555 | 1,988 | <issue_start>username_0: I am currently doing a little project which involves using a pre-defined function I graphed the function, and I am trying to find an x-value answer using a provided y-coordinate. I'm sure there may be other ways, but I must use this particular function. Below is what I currently have:
```
F[L_] := (ArcTan[(L + 80)/25]/Pi + 0.6)*(0.55 -
0.4*Cos[(Pi/100)*(L + 10)])
FindRoot[F[L] == 0.8, {x, 55}]
```
I am sure that my function is correct, as I am able to retrieve a graph, which looks right for the function.
I am unsure if maybe I should try changing my '55' value?
Here is the error that I receive from the program:
```
FindRoot::nlnum
```<issue_comment>username_1: >
> as it seems like it cant resolve the URL
>
>
>
What lead you to believe that, when the error clearly states that it has a `Client.Timeout exceeded while awaiting headers`? It resolved the registry to an IP address, and even apparently opened a network connection to what it thinks is the registry's IP and port. But after that, the networking stack in minikube did not, in fact, allow the traffic out. Observe that the error wasn't DNS, and it wasn't connection refused, it was connection timed out. That is almost always a firewall-esque behavior.
That smells very, very much like a corporate HTTP proxy, since your machine can interact with the Internet but minikube cannot.
There are a ton of troubleshooting steps one could go through, however, if you are interested in a very quick win, you can, from your working host computer, run `docker save quay.io/smile/redis:4.0.8r0 | ssh-into-minikube "docker load"` and treat minikube as if it were airgapped.
Upvotes: 1 <issue_comment>username_2: I dont know what the underlying reason was...perhaps Minikube just being fragile but ended up:
Removing minikube
`rm -rf ~/.minikube`
Running start again
`minikube start --vm-driver=hyperkit`
Reran init `helm init`
Now everything is pulling as it should....
Upvotes: 0 |
2018/03/21 | 695 | 2,645 | <issue_start>username_0: I wrote a simple PowerShell script to retrieve a list of servers' last boot time and output the results to grid view. The results are immediately shown in the grid window but and comes to a short pause whenever a server is not responding to the get command, either due to WMI not running or class not registered. It then displays the error in PS and move to the next server.
Now, the results aren't helpful unless the "not responding" servers are shown in the results windows.
```
$servers = ('serverx','serverb')
Get-WmiObject -Class Win32_OperatingSystem -ComputerName $servers |
select csname, @{LABEL='LastBootUpTime';EXPRESSION={$_.ConvertToDateTime($_.LastBootupTime)}},
@{LABEL='LocalTime';EXPRESSION={$_.ConvertToDateTime($_.LocalDateTime)}},
@{LABEL='UpTime';EXPRESSION={(Get-Date) - $_.ConvertToDateTime($_.LastBootupTime)}},
@{LABEL='OS';EXPRESSION={$_.Caption}} |
Out-GridView
```
Errors type shown in PS window in Red:
1. Get-WmiObject : Class not registered (Exception from HRESULT: 0x80040154 (REGDB\_E\_CLASSNOTREG)) At line:1 char:12
2. Get-WmiObject : The RPC server is unavailable. (Exception from HRESULT: 0x800706BA) At line:1 char:12
Edit: How do I can i output the good results along with the server name if the servers that responded with an error?<issue_comment>username_1: >
> as it seems like it cant resolve the URL
>
>
>
What lead you to believe that, when the error clearly states that it has a `Client.Timeout exceeded while awaiting headers`? It resolved the registry to an IP address, and even apparently opened a network connection to what it thinks is the registry's IP and port. But after that, the networking stack in minikube did not, in fact, allow the traffic out. Observe that the error wasn't DNS, and it wasn't connection refused, it was connection timed out. That is almost always a firewall-esque behavior.
That smells very, very much like a corporate HTTP proxy, since your machine can interact with the Internet but minikube cannot.
There are a ton of troubleshooting steps one could go through, however, if you are interested in a very quick win, you can, from your working host computer, run `docker save quay.io/smile/redis:4.0.8r0 | ssh-into-minikube "docker load"` and treat minikube as if it were airgapped.
Upvotes: 1 <issue_comment>username_2: I dont know what the underlying reason was...perhaps Minikube just being fragile but ended up:
Removing minikube
`rm -rf ~/.minikube`
Running start again
`minikube start --vm-driver=hyperkit`
Reran init `helm init`
Now everything is pulling as it should....
Upvotes: 0 |
2018/03/21 | 685 | 2,469 | <issue_start>username_0: How do I return the human readable element of a Choice field in a Serializer Class. Sample code below.
```
from rest_framework import serializers
from model_utils import Choices
from django.utils.translation import ugettext_lazy as _
COMPANY_TYPE = Choices(
(1, 'Public', _('Public Company')),
(2, 'Private', _('Private Company')),
(3, 'Other', _('Other Type')),
)
class CompanySerializer(serializers.ModelSerializer):
company_type = serializers.ChoiceField(choices=COMPANY_TYPE)
company_type_name = serializers.ReadOnlyField(source=COMPANY_TYPE[1]) # <=== This is the issue
class Meta:
model = Company
fields = ('id', 'title', 'company_type', 'company_type_name')
```
If say an entry in the company table has `company_type = 1`, and a user makes an API request, I want to include the extra field of `company_type_name` with the value `Public Company`.
So the issue is am unable to pass the current value of `company_type` to the serializer so that it can return the String value of the Choice Field.<issue_comment>username_1: From the [DRF Oficial DC](http://www.django-rest-framework.org/api-guide/fields/#choice-selection-fields) the `choices` must be a **list of valid values**, or a **list of (key, display\_name) tuples**
So your choices must be in following format,
```
COMPANY_TYPE = (
(1, 'Public'),
(2, 'Private'),
(3, 'Other'),
)
```
NB : `model_utils.Choices` does the same thing
I think you need a `SerializerMethodField` with `read_only=True` rather than a `ReadOnlyField`. So Change your serializer as below,
```
class CompanySerializer(serializers.ModelSerializer):
def get_company_type_name(self, obj):
return COMPANY_TYPE.__dict__.get('_display_map').get(obj['company_type'])
company_type = serializers.ChoiceField(choices=COMPANY_TYPE)
company_type_name = serializers.SerializerMethodField(read_only=True, source='get_company_type_name')
class Meta:
model = Company
fields = ('id', 'title', 'company_type', 'company_type_name')
```
Upvotes: 3 <issue_comment>username_2: You can do it with method field and by [get\_Foo\_dispay()](https://docs.djangoproject.com/en/dev/ref/models/instances/#django.db.models.Model.get_FOO_display)
```
company_type_name = serializers.SerializerMethodField()
def get_company_type_name(self, obj):
return obj.get_company_type_display()
```
Upvotes: 4 [selected_answer] |
2018/03/21 | 952 | 3,161 | <issue_start>username_0: I'm trying to find a way to password protect an entire Joomla site before going live the with site, with a temporary login page. I cannot do it in Joomla CMS for various reasons.
So I'm trying to find a way in .htaccess or in Apache `/etc/apache2/sites-available/example.com.conf`
I have root access to server.
Right now I use `AuthUserFile` and would like to remove this and go semi-live. Joomla runs from index.php and my PHP temporary login page runs from index2.php. I will change Apache configuration file to look for index2.php as first file.
index2.php:
```
php
$msg = '' ;
if (isset($_POST['Submit']) && !empty($_POST['username']) && !empty($_POST['password'])) {
if ($_POST['username'] == 'myusername' && $_POST['password'] == '<PASSWORD>') {
$_SESSION['valid'] = true;
$_SESSION['timeout'] = time();
$_SESSION['username'] = 'myusername';
//echo 'You have entered valid use name and password';
header('Location: https://example.com/index.php');
} else {
$msg = '<p style="color:#212529;"Wrong username or password.';
}
}?>
```
How can I restrict access to the entire Joomla site if `$_SESSION['valid'] = true;` is not valid set?
Is this even possible?
I'm not looking for a super secure way. It's is just a login page for pre-access users before going live. A way for them to get in and look at my site.
Best regards,
Henrik<issue_comment>username_1: I will give a temporary solution :
**Use cookies** , so when the user in this stage :
```
if ($_POST['username'] == 'myusername' && $_POST['password'] == '<PASSWORD>') {
$_SESSION['valid'] = true;
$_SESSION['timeout'] = time();
$_SESSION['username'] = 'myusername';
//echo 'You have entered valid use name and password';
header('Location: https://example.com/index.php');
```
Before `$_SESSION['valid'] = true;` put this :
```
setcookie("user", "pass");
setcookie("user", "pass", time()+3600);
```
It is only for one day , then replace this :
`$msg = 'Wrong username or password.
';`
With this :
```
$msg = 'Please Login
';
```
The line above is suggestion to give good user experience even for few days .
Go to `.htaccess` in main root and put this :
```
RewriteEngine on
RewriteCond %{HTTP_COOKIE} !user=pass [NC]
RewriteCond %{REQUEST_URI} !^/index2.php
RewriteRule ^(.*)$ http://yourwebsite/index2.php [R=302,L]
```
Keep `R=302` as it is , it says it is temporary redirection and that what you need .
So , right now , no one will be able to explore a website unless passing `index2.php` check.
**Note:** clear browser cache then test.
Upvotes: 2 [selected_answer]<issue_comment>username_2: It looks like a the way to go!
```
RewriteEngine On
RewriteCond %{HTTP_COOKIE} !user=pass [NC]
RewriteCond %{REQUEST_URI} !^/index2.php
RewriteCond %{REQUEST_FILENAME} !\.(?:css|jpg|png)$ [NC]
RewriteRule ^(.*)$ https://www.example.com/index2.php [R=302,L]
```
In Joomla default `.htaccess` there is a lot of stuff, but right after `RewriteEngine On` I added this and inclueded `css|jpg|png` to get my page looking nice. Below that I left all the other Joomla stuff. What do you think about that?
Upvotes: 0 |
2018/03/21 | 482 | 1,815 | <issue_start>username_0: I am trying to verify if the state of a check box is checked or not if the state is false click if not do something else. Should i use .getAttribute() or .getState(). I think I should use get state.
```
WebElement checkbox = driver.findElement(By.name("companyOptionsForm:mandateVerificationTotals"));
if (checkbox.getState() == true) {
//do something
} else if (checkbox.getState()== false) {
//do something
}
```
The HTML of the Check box
```
< input name = "companyOptionsForm:mandateVerificationTotals"
type = "checkbox"role = "checkbox"aria - checked = "true"
class = "dijitReset dijitCheckBoxInput"data - dojo - attach - point = "focusNode"
data - dojo - attach - event = "ondijitclick:_onClick"
value = "on"tabindex = "0"id = "mandateVerificationTotals"
checked = "checked" style = "user-select: none;" >
```
However when i use .getState() eclipse shows a red line under it.<issue_comment>username_1: do something like this:
```
WebElement checkbox = driver.findElement(By.name("companyOptionsForm:mandateVerificationTotals"));
if (checkbox.isSelected()) {
//do something
} else {
//do something else
}
```
Upvotes: 3 [selected_answer]<issue_comment>username_2: An AWT Checkbox has a method called getState(), but not a method called isSelected().
A Swing JCheckBox has a method called isSelected(), but not one called getState().
And as @Arthur showed, do not compare a boolean to true of false; just use
`if (booleanVariable)` or `if (!booleanVariable)`
Upvotes: 2 <issue_comment>username_3: You can also use `javascript` to verify if checkbox is selected
```
public boolean isChecked(WebElement element) {
return (boolean) ((JavascriptExecutor) driver).executeScript("return arguments[0].checked", element);
}
```
Upvotes: 1 |
2018/03/21 | 224 | 795 | <issue_start>username_0: ```
$ git checkout sidebar
fatal: unknown style 'diff5' given for 'merge.conflictstyle'
```
I am trying to change branch in my Git repository. Why do I get this error? How to fix it?<issue_comment>username_1: **`diff5` is not a valid setting for `merge.conflictstyle`.**
You should use `merge`:
```
git config merge.conflictstyle merge
```
...or `diff3`:
```
git config merge.conflictstyle diff3
```
[More information in Git's documentation](https://git-scm.com/docs/git-merge#git-merge-mergeconflictStyle)
Upvotes: 2 <issue_comment>username_2: git has an option to display merge conflicts in diff3 format (by default it only displays the two files to be merged). You can enable it like so:
```
$ git config --global merge.conflictstyle diff3
```
Upvotes: 0 |
2018/03/21 | 512 | 1,721 | <issue_start>username_0: I am learning angular ionic and currently attempting to use the angular forms, FormGroup, and FormBuilder features to validate some form data.
I have four fields that I would like to validate in relation to one another, but I am not sure how to write a validator that can see the value of other fields.
The relevant fields are:
`startDate, startTime, endDate, endTime.`
Because these are ionic datetime inputs, they are always going to be in "ISO 8601" format. So example values might be:
```
startDate: "2018-03-28"
startTime: "18:52"
```
My goal is to validate each of these so that they are valid if the start time and date is before the end time and date. For example all four fields are valid when:
```
startDate is "2018-03-28",
startTime is "14:30",
endDate is "2018-03-28", and
endTime is "18:00"
```
And all four fields are invalid when, for example:
```
startDate is "2018-03-28",
startTime is "14:30",
endDate is "2018-02-24", and
endTime is "23:00"
```
How can I go about comparing the values of these fields using the angular form features?
Any help would be greatly appreciated!<issue_comment>username_1: **`diff5` is not a valid setting for `merge.conflictstyle`.**
You should use `merge`:
```
git config merge.conflictstyle merge
```
...or `diff3`:
```
git config merge.conflictstyle diff3
```
[More information in Git's documentation](https://git-scm.com/docs/git-merge#git-merge-mergeconflictStyle)
Upvotes: 2 <issue_comment>username_2: git has an option to display merge conflicts in diff3 format (by default it only displays the two files to be merged). You can enable it like so:
```
$ git config --global merge.conflictstyle diff3
```
Upvotes: 0 |
2018/03/21 | 1,905 | 6,855 | <issue_start>username_0: I'm trying to execute a [Python script](https://pastebin.com/jSjn1aaD), but I am getting the following error:
```
Process finished with exit code 139 (interrupted by signal 11: SIGSEGV)
```
I'm using python 3.5.2 on a Linux Mint 18.1 Serena OS
Can someone tell me why this happens, and how can I solve?<issue_comment>username_1: The SIGSEGV signal indicates a "[segmentation violation](https://en.wikipedia.org/wiki/Segmentation_fault)" or a "segfault". More or less, this equates to a read or write of a memory address that's not mapped in the process.
This indicates a bug in your program. In a Python program, this is either a bug in the interpreter or in an extension module being used (and the latter is the most common cause).
To fix the problem, you have several options. One option is to produce a minimal, self-contained, complete example which replicates the problem and then submit it as a bug report to the maintainers of the extension module it uses.
Another option is to try to track down the cause yourself. [gdb](https://community.linuxmint.com/software/view/gdb) is a valuable tool in such an endeavor, as is a debug build of Python and all of the extension modules in use.
After you have gdb installed, you can use it to run your Python program:
```
gdb --args python
```
And then use gdb commands to track down the problem. If you use `run` then your program will run until it would have crashed and you will have a chance to inspect the state using other gdb commands.
Upvotes: 7 [selected_answer]<issue_comment>username_2: When I encounter this problem, I realize there are some memory issues. I rebooted PC and solved it.
Upvotes: 3 <issue_comment>username_3: After some times I discovered that I was running a new TensorFlow version that gives error on older computers. I solved the problem downgrading the TensorFlow version to 1.4
Upvotes: 3 <issue_comment>username_4: Another possible cause (which I encountered today) is that you're trying to read/write a file which is open. In this case, simply closing the file and rerunning the script solved the issue.
Upvotes: 4 <issue_comment>username_5: found on other page.
interpreter: python 3.8
cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade\_frontalface\_default.xml")
this solved issue for me.
i was getting SIGSEGV with 2.7, upgraded my python to 3.8 then got different error with OpenCV. and found answer on [OpenCV 4.0.0 SystemError: returned a result with an error set](https://stackoverflow.com/questions/54273050/opencv-4-0-0-systemerror-class-cv2-cascadeclassifier-returned-a-result-with).
but eventually one line of code fixed it.
Upvotes: -1 <issue_comment>username_6: This can also be the case if your C-program (e.g. using `cpython` is trying to access a variable out-of-bound
```
ctypedef struct ReturnRows:
double[10] your_value
cdef ReturnRows s_ReturnRows # Allocate memory for the struct
s_ReturnRows.your_value = [0] * 12
```
will fail with
```
Process finished with exit code 139 (interrupted by signal 11: SIGSEGV)
```
Upvotes: 3 <issue_comment>username_7: I received the same error when trying to connect to an Oracle DB using the `pyodbc` module:
```
connection = pyodbc.connect()
```
The error occurred on the following occasions:
* The DB connection has been opened multiple times in the same python
file
* While in debug mode a breakpoint has been reached
while the connection to the DB being open
The error message could be avoided with the following approaches:
* Open the DB only once and reuse the connection at all needed places
* Properly close the DB connection after using it
Hope, that will help anyone!
Upvotes: 1 <issue_comment>username_8: Deleted the python interpreter and the 'venv' folder solve my error.
Upvotes: 2 <issue_comment>username_9: **11 : SIGSEGV** - This signal is arises when an memory segement is illegally accessed.
There is a module name **signal** in python through which you can handle this kind of OS signals.
If you want to ignore this **SIGSEGV** signal, you can do this:
```
signal.signal(signal.SIGSEGV, signal.SIG_IGN)
```
However, ignoring the signal can cause some inappropriate behaviours to your code, so it is better to handle the **SIGSEGV** signal with your defined handler like this:
```
def SIGSEGV_signal_arises(signalNum, stack):
print(f"{signalNum} : SIGSEGV arises")
# Your code
signal.signal(signal.SIGSEGV, SIGSEGV_signal_arises)
```
Upvotes: 1 <issue_comment>username_10: For me, I was using the OpenCV library to apply SIFT.
In my code, I replaced cv2.SIFT() to cv2.SIFT\_create() and the problem is gone.
Upvotes: 2 <issue_comment>username_11: I encountered this problem when I was trying to run my code on an external GPU which was disconnected. I set `os.environ['PYOPENCL_CTX']=2` where GPU 2 was not connected. So I just needed to change the code to os.environ`['PYOPENCL_CTX'] = 1`.
Upvotes: 0 <issue_comment>username_12: For me these three lines of code already reproduced the error, **no matter how much free memory was available**:
```
import numpy as np
from sklearn.cluster import KMeans
X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
kmeans = KMeans(n_clusters=1, random_state=0).fit(X)
```
I could solve the issue by removing an reinstalling the `scikit-learn` package. A very similar solution to [this](https://stackoverflow.com/a/64841196/11671205).
Upvotes: 0 <issue_comment>username_13: This can also occur if trying to compound threads using concurrent.futures. For example, calling .map inside another .map call.
This can be solved by removing one of the .map calls.
Upvotes: 0 <issue_comment>username_14: I had the same issue working with kmeans from scikit-learn.
Upgrading from scikit-learn 1.0 to 1.0.2 solved it for me.
Upvotes: 0 <issue_comment>username_15: This issue is often caused by incompatible libraries in your environment. In my case, it was the `pyspark` library.
Upvotes: 0 <issue_comment>username_16: I got this error in PHP, while running PHPUnit. The reason was a circular dependency.
Upvotes: 2 <issue_comment>username_17: In my case, reverting my most recent conda installs fixed the situation.
Upvotes: 0 <issue_comment>username_18: I got this error when importing monai. It was solved after I created a new conda environment. Possible reasons I could imagine were either that there were some conflict between different packages, or maybe that my environment name was the same as the package name I wanted to import (monai).
Upvotes: 0 <issue_comment>username_19: in my case it was a pickled file, specifically a pandas DataFrame.
deleting the pickled file fixed the issue.
similar to this:
```
from pandas import DataFrame
df = DataFrame()
# somewhere
df.from_pickle('my_path.p')
# somewhere later
df.to_pickle('my_path.p')
```
Upvotes: 0 |
2018/03/21 | 790 | 2,581 | <issue_start>username_0: This is an additional question regarding the [original answer](https://stackoverflow.com/questions/22016509/change-parent-div-height-depending-on-child-absolute-div-height-using-javascript) (before the top positioning broke it) found at the bottom.
My problem is that once the css property `top` is used, the div is then offset and bleeds out of the wrapping parent div.
```js
$().ready(function(){
ch = $('#child-div').height();
$('#parent-div').css({
height : ch + 50 + 'px'
})
});
```
```css
#parent-div{
min-height: 400px;
width: 250px;
background-color: yellow;
position: relative;
}
#child-div{
background-color: red;
opacity: .8;
position: absolute;
height: auto;
top: 200px;
}
```
```html
Test 2
Test 2
Test 2
Test 2
Test 2
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
test
```<issue_comment>username_1: Just add an extra 200 to your height in JQuery to offset..
```
$().ready(function(){
ch = $('#child-div').height();
$('#parent-div').css({
height : ch + 250 + 'px'
})
});
```
Upvotes: -1 <issue_comment>username_2: Adding `50` isn't enough if the child element has a `top` of `200`.
Also, in JavaScript, any expression that contains:
```
"someString" + someNumber
```
will cause the number to be converted to a string and the result will be the concatenated string. If the only operator in the expression is `+` (as it is in your case, the expression will be evaluated left to right and, in your case, that's OK, but it does open up your code to potential bugs down the line.
It's better to organize the expression so that the math gets done first and then concatenate the `"px"`.
```js
$(function(){
ch = $('#child-div').height();
$('#parent-div').css({
// Do the math first, then concatenate
height: (ch + 200) + 'px'
});
});
```
```css
#parent-div{
min-height:400px;
width:250px;
background-color:yellow;
position:relative;
}
#child-div{
background-color:red;
opacity: .8;
position:absolute;
top: 200px;
}
```
```html
Test 2
Test 2
Test 2
Test 2
Test 2
test
test
test
test
test
test
test
test
test
```
Upvotes: 1 |
2018/03/21 | 913 | 3,353 | <issue_start>username_0: ```
class Cities(models.Model):
city_main_image = models.FileField()
city_name = models.CharField(max_length=200)
city_info = models.CharField(max_length=1000)
city_images = models.FileField()
```
In my models.py I have Cities class and I want to upload several images for this class variable, to be clearly for `city_images` variable how can I do this? Are there any way to do this or not?<issue_comment>username_1: One way you could do this is to make a `CityImage` model, which would allow you to make a `ForeignKey` to the `City` model, with a `related_name=images` for the reverse lookup.
```py
class City(models.Model):
name = models.CharField(max_length=200)
info = models.CharField(max_length=1000)
@property
def main_image(self):
try:
return self.images.get(primary=True)
except CityImage.DoesNotExist:
pass
except CityImage.MultipleObjectsReturned:
# Handle this case, the application should ensure only one is set to `True` at a time...
class CityImage(models.Model):
city = models.ForeignKey(City, related_name='images')
primary = models.BooleanField(default=False)
image = models.FileField()
```
Upvotes: 0 <issue_comment>username_2: Few notes about your code before answering your question.
1) Stick to singular model names, City rather than Cities.
2) Don't repeat the model name in every field, so you get "main\_image",
"name", "info", "images".
3) Use ImageField for images rather than FileField.
4) No need to have 2 fields for main\_image and images. You can add an extra field to make the image a main image or not.
Now, to answer your question, you need to read about relations in an SQL database.
To use relations between your models with django's ORM, look at <https://docs.djangoproject.com/en/2.0/ref/models/fields/#django.db.models.ForeignKey>
You have 2 options: `ForeignKey` or `ManyToManyField`. Stick with the `ForeignKey` as you don't need a many to many relation.
So you'll have something like the following:
```
class City(models.Model):
...
class CityImage(models.Model):
image = models.ImageField(...)
city = models.ForeignKey(City) # you might need to add on_delete parameter depending on your django version.
```
or
```
class CityImage(models.Model):
image = models.ImageField(...)
class City(models.Model):
...
images = models.ManyToManyField(CityImage)
```
Upvotes: 2 [selected_answer]<issue_comment>username_3: ```
class Cities(models.Model):
city_main_image = models.FileField()
city_name = models.CharField(max_length=200)
city_info = models.CharField(max_length=1000)
class CityImages(models.Model):
city_id = models.ForeignKey(Cities)
city_images = models.FileField()
```
Now each of your city in Cities can have one or more images in another model called CityImages. If we talk in terms of tables then the primary key for a row in table Cities would be associated to one or more rows in table city\_images. I will strongly suggest you to go through official introductory tutorial of django. Also I personally find [this tutorial](https://simpleisbetterthancomplex.com/series/2017/09/18/a-complete-beginners-guide-to-django-part-1.html) very helpful for beginners. Just in case it helps.
Upvotes: 1 |
2018/03/21 | 1,308 | 4,572 | <issue_start>username_0: I have a table `MAIN` which has duplicates on `ID` field as below:
```
ID SYSTEM FLAG FIRST_NAME LAST_NAME TERMDATE
A021 Alpha Y JOHN DOE null
A021 Beta N JOHN DOE 05-Jun-17
C045 Beta Y PETER PARKER null
C045 Omega N PETER PARKER 05-Jan-17
D078 Alpha N TONY STARK 07-Dec-17
D078 Gamma Y TONY STARK null
X039 Gamma Y STEVE ROGERS null
X039 Gamma Y STEVE ROGERS null
```
As you can see I have duplicates in ID field. I want to keep the records with null data in MAIN table and move others to a duplicate table. So I want my MAIN table's output to look like:
```
ID SYSTEM FLAG FIRST_NAME LAST_NAME TERMDATE
A021 Alpha Y JOHN DOE null
C045 Beta Y PETER PARKER null
D078 Gamma Y TONY STARK null
X039 Gamma Y STEVE ROGERS null
```
And the duplicates should be moved to a DUPLICATE TABLE which should look like:
```
ID SYSTEM FLAG FIRST_NAME LAST_NAME TERMDATE
A021 Beta N JOHN DOE 05-Jun-17
C045 Omega N PETER PARKER 05-Jan-17
D078 Alpha N TONY STARK 07-Dec-17
```
**To be noted the records that are exact duplicates were not moved to duplicate table e.x. X039 would be deleted entirely and not moved to duplicate table.**
I can't come to a script that will achieve this result.<issue_comment>username_1: One way you could do this is to make a `CityImage` model, which would allow you to make a `ForeignKey` to the `City` model, with a `related_name=images` for the reverse lookup.
```py
class City(models.Model):
name = models.CharField(max_length=200)
info = models.CharField(max_length=1000)
@property
def main_image(self):
try:
return self.images.get(primary=True)
except CityImage.DoesNotExist:
pass
except CityImage.MultipleObjectsReturned:
# Handle this case, the application should ensure only one is set to `True` at a time...
class CityImage(models.Model):
city = models.ForeignKey(City, related_name='images')
primary = models.BooleanField(default=False)
image = models.FileField()
```
Upvotes: 0 <issue_comment>username_2: Few notes about your code before answering your question.
1) Stick to singular model names, City rather than Cities.
2) Don't repeat the model name in every field, so you get "main\_image",
"name", "info", "images".
3) Use ImageField for images rather than FileField.
4) No need to have 2 fields for main\_image and images. You can add an extra field to make the image a main image or not.
Now, to answer your question, you need to read about relations in an SQL database.
To use relations between your models with django's ORM, look at <https://docs.djangoproject.com/en/2.0/ref/models/fields/#django.db.models.ForeignKey>
You have 2 options: `ForeignKey` or `ManyToManyField`. Stick with the `ForeignKey` as you don't need a many to many relation.
So you'll have something like the following:
```
class City(models.Model):
...
class CityImage(models.Model):
image = models.ImageField(...)
city = models.ForeignKey(City) # you might need to add on_delete parameter depending on your django version.
```
or
```
class CityImage(models.Model):
image = models.ImageField(...)
class City(models.Model):
...
images = models.ManyToManyField(CityImage)
```
Upvotes: 2 [selected_answer]<issue_comment>username_3: ```
class Cities(models.Model):
city_main_image = models.FileField()
city_name = models.CharField(max_length=200)
city_info = models.CharField(max_length=1000)
class CityImages(models.Model):
city_id = models.ForeignKey(Cities)
city_images = models.FileField()
```
Now each of your city in Cities can have one or more images in another model called CityImages. If we talk in terms of tables then the primary key for a row in table Cities would be associated to one or more rows in table city\_images. I will strongly suggest you to go through official introductory tutorial of django. Also I personally find [this tutorial](https://simpleisbetterthancomplex.com/series/2017/09/18/a-complete-beginners-guide-to-django-part-1.html) very helpful for beginners. Just in case it helps.
Upvotes: 1 |
2018/03/21 | 997 | 2,923 | <issue_start>username_0: I am trying to find the number of distinct numbers from input which are not equal to 0. n is in 1-100 range. The numbers in the array are in the 0-600 range.
<http://codeforces.com/problemset/problem/937/A>
For this question, I wrote a code:
```
#include
#include
int main(void)
{
int n, count = 0, i;
scanf("%d", &n);
int ar[n], ar2[601];
memset(ar2, 0, 600 \* sizeof(int));
for (i = 0; i < n; i++) {
scanf("%d ", &ar[i]);
if (ar2[ar[i]] == 0)
ar2[ar[i]] = 1;
}
for (i = 1; i < 601; i++) {
if (ar2[i] != 0)
count++;
}
printf("%d",count);
return 0;
}
```
for the first test case (4 1 3 3 2) , it outputs the right answer 3 in ideone.com 's gcc 6.3, but outputs 4 in gcc 5.1 which is used at codeforces.
Why does this happen, and how can I prevent this ?
(I think it's because of memset, but I'm not sure.)<issue_comment>username_1: You are defining an array of size `n` before the value of `n` has beed determined (note that you `scanf` the value of `n` later). This is undefined behaviour, such that different compilers may give different results, and even starting your program on your machine may give different results (including crashes).
instead of
```
int n, count = 0, i;
int ar[n];
...
```
write
```
int n, count = 0, i;
scanf("%d", &n);
int ar[n], ar2[601] = { 0 };
```
At least the malformed array should then be solved, and `ar2` is completely initialized with `0`. You can get rid of your `memset`, which initialized only 600 items (instead of 601) anyway.
Upvotes: 2 [selected_answer]<issue_comment>username_2: Here is a quicker solution to the problem
```
#include
int main()
{
bool seen\_number\_before[601] = { false };
int count = 0;
seen\_number\_before[0] = true;
int n;
scanf("%d", &n); // Should do error checking here
for (int i = 0; i < n; ++i) {
int v;
scanf("%d", v); // More error checking
if (!seen\_number\_before[v]) // Not seen it before
seen\_number\_before[v] = true; // Mark it as seen
++count; // Add to the count
}
}
printf("%d\n", count);
return 0;
}
```
}
Upvotes: 1 <issue_comment>username_3: There are some errors in your code, from a[n] when n is not defined.
To check errors try compiling with some useful options:
```
gcc -Wall code.c -o code -g
```
the -Wall is for Warning all and the -g is used for debug on valgrind (useful tool to check memory leak and other errors).
Also I suggest you to name properly every var in your code, could helpful for a large size of code base.
This is my solution,
```
#include
#include
int main(){
int n, count = 0;
scanf("%d", &n);
int \*a = malloc(n \* sizeof(int));
int hash[600] = { 0 };
for(int i=0; i
```
it can be optimized in time, using only one for, and/or in memory by creating an hashset of int, and every int can store 32 bit and do some bitwise operations, so if the nth bit is 1, count++, otherwise don't do nothing.
Upvotes: 0 |
2018/03/21 | 1,615 | 5,337 | <issue_start>username_0: I am using ASP.NET Core 2.0 with Microsoft.AspNetCore.SignalR (1.0.0-preview1-final).
I have an issue deploying my application using IIS (with Kestrel).
When I run the server in localhost using IIS Express, everything works as expected, but when I try to run on IIS (Windows server or local host windows 7), the negotiate call fail to *404*
>
> POST <http://localhost/notification/negotiate> 404 (Not Found)
>
>
>
[](https://i.stack.imgur.com/juqwA.png)
I tried configuring IIS with different topics found online, but no success. (, extension-less url with <https://support.microsoft.com/en-gb/help/980368/a-update-is-available-that-enables-certain-iis-7-0-or-iis-7-5-handlers>)
But I am still pretty sure the issue is related to my IIS configuration since the entire project works fine on IIS Express...
This is the `web.config` file used on the test server
(I removed the different debug tries, since it was not working...)
```
xml version="1.0" encoding="utf-8"?
```
Part of the code used in `Configure` method
```
app.UseSignalR(route =>
{
route.MapHub("/notification");
});
app.UseMvc(routes =>
{
routes.MapRoute(
name: "default",
template: "{controller=Home}/{action=Index}/{id?}");
routes.MapSpaFallbackRoute(
name: "spa-fallback",
defaults: new { controller = "Home", action = "Index" });
});
```
Part of the code used in `ConfigureServices` method
```
services.AddMvc()
services.AddSignalR();
services.AddSession();
```
Angular application code
```
constructor() {
/** ... **/
this.hubConnection = new HubConnection('/notification', {
transport: TransportType.LongPolling
});
this.hubConnection.on('Update', (data: string) => {
let res = new DefaultNotificationInfo(data);
/** ... **/
});
this.hubConnection.start()
.then(() => {
this.join().then(next => {
// nothing to do on join, the socket is running
}).catch(error => {
console.error(error)
});
})
.catch(err => {
console.error(err)
});
}
private join(): Promise {
return this.hubConnection.invoke('Join');
}
```
---
**Versions informations :**
IIS : **7.5**
APP net package.json:
```
{
/** ... **/
"@aspnet/signalr": "^1.0.0-preview1-update1",
"@aspnet/signalr-client": "^1.0.0-alpha2-final",
/** ... **/
}
```
API asp net nuget version : **(1.0.0-preview1-final)**
---
After reading
<https://blogs.msdn.microsoft.com/webdev/2017/09/14/announcing-signalr-for-asp-net-core-2-0/> and
<https://blogs.msdn.microsoft.com/webdev/2018/02/27/asp-net-core-2-1-0-preview1-getting-started-with-signalr/>, I am pretty sure I've covered everything from their tutorial/quick-start
Is there something I missed?
---
Update 1
--------
After finding this : <https://stackoverflow.com/a/43014264/3198096>
I tried to enable the LoadUserProfile property of my ApplicationPool using this link <https://blogs.msdn.microsoft.com/vijaysk/2009/03/08/iis-7-tip-3-you-can-now-load-the-user-profile-of-the-application-pool-identity/>
But this is still failing to 404.
I also tried to "Attach to process" on my application to see if there was any logs corresponding to my `Hub` or to the `/negotiate` url, but there was none.
*(also added a screenshot of the 404 result)*
Update 2
--------
This is an extract of package.config from the Web.App
[](https://i.stack.imgur.com/vfUP7.png)
Update 3
--------
This is the configuration of the server hosting the applications.
You can see 3 different website. All of them have almost the same configuration. (Just the port changing, which define 3 different test environment access)
[](https://i.stack.imgur.com/oqUBM.png)
And this is the basic localhost developer IIS, which I guess i generated by Visual Studio.
[](https://i.stack.imgur.com/SBYuz.png)<issue_comment>username_1: Do you have `app.UseSPA()` in your middlewares pipeline? if so try adding it after `app.UseSignalR()` like this
```
app.UseSignalR(routes =>
{
routes.MapHub("/hubs/test");
});
app.UseSpa((o) =>
{
if (env.IsDevelopment())
{
o.UseProxyToSpaDevelopmentServer("http://localhost:4200");
}
});
```
<https://github.com/aspnet/SignalR/issues/1511>
Upvotes: 2 <issue_comment>username_2: There is a very similar issue on [github](https://github.com/aspnet/SignalR/issues/1647). If the website isn't hosted on the root (ex. by using virtual directories or something similar), SignalR will use the wrong URL at the moment of writing. For example, if your "website-root" is on
```
http://YOUR_URL/test/index.html
```
you would have to use
```
this.hubConnection = new HubConnection('/test/notification', {
transport: TransportType.LongPolling,
logger: signalR.LogLevel.Trace
});
```
If the above doesn't work, I suggest you to add logging to your client app (as shown above) and your server. This will show you what exactly goes wrong.
Upvotes: 2 [selected_answer] |
2018/03/21 | 651 | 2,236 | <issue_start>username_0: I am creating a package that goes through a few folder paths and loads excel files. The file paths for the excel files are as follows. The files are located in "a" folder which is named the same from year to year, however, the "xy\*" folder name changes depending on the year. So I want SSIS to look for the excel files by searching through the H drive that has the "a" folder. I looked at System.IO.Directory.GetDirectories(string, string,SearchOptions) but that only gives me the first subfolder. for instance H:\x\xy2017. any help is much appreciated.
H:\x\xy2017\z\xy2017\a
H:\x\xy2017\z\xy2017\a
H:\x\xy2018\z\xy2018\a
This is the C# code I used in Script task to recursively loop through the folders. I am not sure what I am doing wrong.
```
Dts.Variables["User::varFolderPath1"].Value = Directory.GetFiles(@"S:\HEDIS", "*DMHM Lead Results*", SearchOption.AllDirectories);
foreach (Object obj in Dts.Variables["User::varFolderPath1"].Value.ToString())
{
Console.WriteLine(Dts.Variables["User::varFolderPath1"].Value.ToString());
}
```<issue_comment>username_1: Do you have `app.UseSPA()` in your middlewares pipeline? if so try adding it after `app.UseSignalR()` like this
```
app.UseSignalR(routes =>
{
routes.MapHub("/hubs/test");
});
app.UseSpa((o) =>
{
if (env.IsDevelopment())
{
o.UseProxyToSpaDevelopmentServer("http://localhost:4200");
}
});
```
<https://github.com/aspnet/SignalR/issues/1511>
Upvotes: 2 <issue_comment>username_2: There is a very similar issue on [github](https://github.com/aspnet/SignalR/issues/1647). If the website isn't hosted on the root (ex. by using virtual directories or something similar), SignalR will use the wrong URL at the moment of writing. For example, if your "website-root" is on
```
http://YOUR_URL/test/index.html
```
you would have to use
```
this.hubConnection = new HubConnection('/test/notification', {
transport: TransportType.LongPolling,
logger: signalR.LogLevel.Trace
});
```
If the above doesn't work, I suggest you to add logging to your client app (as shown above) and your server. This will show you what exactly goes wrong.
Upvotes: 2 [selected_answer] |
2018/03/21 | 579 | 2,000 | <issue_start>username_0: I'm using the below config in nginx to proxy RDP connection:
```
server {
listen 80;
server_name domain.com;
location / {
proxy_pass http://192.168.0.100:3389;
}
}
```
but the connection doesn't go through. My guess is that the problem is `http` in `proxy_pass`. Googling "Nginx RDP" didn't yield much.
Anyone knows if it's possible and if yes how?<issue_comment>username_1: Well actually you are right the `http` is the problem but not exactly that one in your code block. Lets explain it a bit:
In your `nginx.conf` file you have something similar to this:
```
http {
...
...
...
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
```
So everything you write in your conf files are inside this `http` block/scope. But **rdp** is not **http** is a different protocol.
The only workaround I know for nginx to handle this is to work on `tcp` level.
So inside in your `nginx.conf` and outside the `http` block you have to declare the `stream` block like this:
```
stream {
# ...
server {
listen 80;
proxy_pass 192.168.0.100:3389;
}
}
```
With the above configuration just proxying your backend on tcp layer with a cost of course. As you may notice its missing the `server_name` attribute you can't use it in the `stream` scope, plus you lose all the logging functionality that comes on the `http` level.
For more info on this topic check the [docs](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/)
Upvotes: 5 [selected_answer]<issue_comment>username_2: For anyone who is looking to load balance RDP connection using Nginx, here is what I did:
Configure nginx as you normally would, to reroute HTTP(S) traffic to your desired server.
On that server, install [myrtille](https://github.com/cedrozor/myrtille) (it needs IIS and .Net 4.5) and you'll be able to RDP into your server from a browser!
Upvotes: 0 |
2018/03/21 | 1,910 | 6,634 | <issue_start>username_0: I am trying to use iText 7.1.1 to convert a TIFF image to PDF file with multiple pages. Thanks for those to get me started with this article [Create PDF from TIFF image using iText](https://stackoverflow.com/questions/7721447/creating-pdf-from-tiff-image-using-itext/13345673#13345673). However, it is iText 5.5.x and I have trouble to duplicate it in iText 7.
I did find `TiffImageData.getNumberOfPages(raf)` to replace `int pages = TiffImage.getNumberOfPages(rafa)`.
However, I am not able to replace `TiffImage.getTiffImage(rafa, i)` in iText7. Do I need to use `new Image(ImageDataFactory.createTiff(...))`. Appreciate any suggestion(s).
iText 5.5.x code
```
import java.io.FileOutputStream;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import com.itextpdf.text.Document;
import com.itextpdf.text.Image;
import com.itextpdf.text.Rectangle;
import com.itextpdf.text.io.FileChannelRandomAccessSource;
import com.itextpdf.text.pdf.PdfWriter;
import com.itextpdf.text.pdf.RandomAccessFileOrArray;
import com.itextpdf.text.pdf.codec.TiffImage;
public class Test1 {
public static void main(String[] args) throws Exception {
RandomAccessFile aFile = new RandomAccessFile("/myfolder/origin.tif", "r");
FileChannel inChannel = aFile.getChannel();
FileChannelRandomAccessSource fcra = new FileChannelRandomAccessSource(inChannel);
Document document = new Document();
PdfWriter.getInstance(document, new FileOutputStream("/myfolder/destination.pdf"));
document.open();
RandomAccessFileOrArray rafa = new RandomAccessFileOrArray(fcra);
int pages = TiffImage.getNumberOfPages(rafa);
Image image;
for (int i = 1; i <= pages; i++) {
image = TiffImage.getTiffImage(rafa, i);
Rectangle pageSize = new Rectangle(image.getWidth(), image.getHeight());
document.setPageSize(pageSize);
document.newPage();
document.add(image);
}
document.close();
aFile.close();
}
```<issue_comment>username_1: >
> Do I need to use new Image( ImageDataFactory.createTiff(...))
>
>
>
Yes.
You want this: [ImageDataFactory.createTiff(bytes, recoverFromImageError, page, direct)](http://itextsupport.com/apidocs/itext7/7.1.1/com/itextpdf/io/image/ImageDataFactory.html#createTiff-java.net.URL-boolean-int-boolean-)
Then you would open a new PDF, loop through the TIFF pages and:
1. Get the TIFF image size
2. Create a new page in the PDF matching the TIFF page size
3. Add the TIFF image to the new PDF page
Here is a note from <NAME> on using TIFF with iText 7: [How to avoid an exception when importing a TIFF file?](https://developers.itextpdf.com/fr/node/3128)
I see you probably want fully working code. Here you go:
```
import com.itextpdf.io.image.ImageData;
import com.itextpdf.io.image.ImageDataFactory;
import com.itextpdf.io.image.TiffImageData;
import com.itextpdf.io.source.RandomAccessFileOrArray;
import com.itextpdf.io.source.RandomAccessSourceFactory;
import com.itextpdf.kernel.geom.PageSize;
import com.itextpdf.kernel.geom.Rectangle;
import com.itextpdf.kernel.pdf.PdfDocument;
import com.itextpdf.kernel.pdf.PdfPage;
import com.itextpdf.kernel.pdf.PdfWriter;
import com.itextpdf.kernel.pdf.canvas.PdfCanvas;
public class TiffToPdf {
public static void main(String[] args) throws IOException {
Path tiffFile = Paths.get("/myfolder/origin.tiff");
RandomAccessFileOrArray raf = new RandomAccessFileOrArray(new RandomAccessSourceFactory().createBestSource(tiffFile.toString()));
int tiffPages = TiffImageData.getNumberOfPages(raf);
raf.close();
try (PdfDocument output = new PdfDocument(new PdfWriter("/myfolder/destination.pdf"))) {
for (int page = 1; page <= tiffPages; page++) {
ImageData tiffImage = ImageDataFactory.createTiff(tiffFile.toUri().toURL(), true, page, true);
Rectangle tiffPageSize = new Rectangle(tiffImage.getWidth(), tiffImage.getHeight());
PdfPage newPage = output.addNewPage(new PageSize(tiffPageSize));
PdfCanvas canvas = new PdfCanvas(newPage);
canvas.addImage(tiffImage, tiffPageSize, false);
}
}
}
}
```
Some might suggest you use the high level API to achieve this a little more cleanly but this should be sufficient for your question.
Upvotes: 4 [selected_answer]<issue_comment>username_2: This is the same above but in vb.net.
It converts a multipage TIFF to a PDF.
```
Imports System.IO
Imports iTextSharp.text
Imports iTextSharp.text.pdf
Sub ConvertTIFF2PDF(ByVal inFile As String, ByVal outFile As String)
Dim pdfDoc As PdfDocument = New PdfDocument(New PdfWriter(outFile))
Dim doc As Document = New Document(pdfDoc)
Dim aFile = New RandomAccessFileOrArray(New RandomAccessSourceFactory().CreateBestSource(inFile.ToString))
Dim tiffPages = TiffImageData.GetNumberOfPages(aFile)
Dim uri As System.Uri = New Uri(inFile)
For i As Integer = 1 To tiffPages
Console.WriteLine("tiffPages: " & (i) & " of " & tiffPages.ToString)
Dim tiffImage = ImageDataFactory.CreateTiff(uri, False, i, False)
Dim tiffPageSize = New Geom.Rectangle(tiffImage.GetWidth(), tiffImage.GetHeight())
Dim newPage = pdfDoc.AddNewPage(New PageSize(tiffPageSize))
Dim canvas As PdfCanvas = New PdfCanvas(newPage)
canvas.AddImage(tiffImage, tiffPageSize, False)
Next
doc.Close()
pdfDoc.Close()
aFile.Close()
End Sub
```
Upvotes: 2 <issue_comment>username_3: It's Just the C# Version :
```
public void ConvertTIFF2PDF(string inFile, string outFile)
{
iTextSharp.text.Document document = new iTextSharp.text.Document(iTextSharp.text.PageSize.A4, 0, 0, 0, 0);
iTextSharp.text.pdf.PdfWriter writer = iTextSharp.text.pdf.PdfWriter.GetInstance(document, new FileStream(outFile, FileMode.Open));
Bitmap bm = new Bitmap(inFile);
int total = bm.GetFrameCount(FrameDimension.Page);
document.Open();
iTextSharp.text.pdf.PdfContentByte cb = writer.DirectContent;
for (int k = 0; k < total; ++k)
{
bm.SelectActiveFrame(FrameDimension.Page, k);
iTextSharp.text.Image img = iTextSharp.text.Image.GetInstance(bm, ImageFormat.Bmp);
// scale the image to fit in the page
img.ScalePercent(72f / img.DpiX * 100);
img.SetAbsolutePosition(0, 0);
cb.AddImage(img);
document.NewPage();
}
document.Close();
}
```
Upvotes: 0 |
2018/03/21 | 1,821 | 6,110 | <issue_start>username_0: I have the following dataset:

I want to go through the ID's in the `action_ID` column and check if it is in the `value` column. If it is, I want to see if the associated variable is a `Comment` or not. If it is a `Comment`, I will add 1 to the count of the number of comments for that ID into a new data frame, called `final`, which consists of the `action_ID` and number of comments.
This is the code I have written so far:
```
final = data.frame(action_ID = c(1001,981,734,985))
for (x in shares$action_ID) {
if ((x %in% shares$value) & (shares$variable[shares$value == x] =="Comment")){
final$num_comments[final$action_ID == x] =+ 1
}else {
final$num_comments[final$action_ID == x] =+ 0
}
}
```
Whenever I run it doesn't work. I tried to debug it by just looking at the first condition in the if-statement and it turns out for some reason the if statement isn't really working. Every value in `action_ID` gets outputted. I also try using any which didn't work either.
```
for (x in shares$action_ID){
print(x)
if (any(shares$value == x)){ # & (shares$variable[shares$value == x]== "Comment")){
print(x)
}
}
```
output:
```
[1] "734"
[1] "1001"
[1] "1001"
[1] "985"
[1] "981"
```
Thanks for any help!!
EDIT:
I don't think I was very clear about the output, I am trying to create a table that will give a count of the number of posts that have 0 comments, 1 comment, 2 comments, etc.<issue_comment>username_1: >
> Do I need to use new Image( ImageDataFactory.createTiff(...))
>
>
>
Yes.
You want this: [ImageDataFactory.createTiff(bytes, recoverFromImageError, page, direct)](http://itextsupport.com/apidocs/itext7/7.1.1/com/itextpdf/io/image/ImageDataFactory.html#createTiff-java.net.URL-boolean-int-boolean-)
Then you would open a new PDF, loop through the TIFF pages and:
1. Get the TIFF image size
2. Create a new page in the PDF matching the TIFF page size
3. Add the TIFF image to the new PDF page
Here is a note from <NAME> on using TIFF with iText 7: [How to avoid an exception when importing a TIFF file?](https://developers.itextpdf.com/fr/node/3128)
I see you probably want fully working code. Here you go:
```
import com.itextpdf.io.image.ImageData;
import com.itextpdf.io.image.ImageDataFactory;
import com.itextpdf.io.image.TiffImageData;
import com.itextpdf.io.source.RandomAccessFileOrArray;
import com.itextpdf.io.source.RandomAccessSourceFactory;
import com.itextpdf.kernel.geom.PageSize;
import com.itextpdf.kernel.geom.Rectangle;
import com.itextpdf.kernel.pdf.PdfDocument;
import com.itextpdf.kernel.pdf.PdfPage;
import com.itextpdf.kernel.pdf.PdfWriter;
import com.itextpdf.kernel.pdf.canvas.PdfCanvas;
public class TiffToPdf {
public static void main(String[] args) throws IOException {
Path tiffFile = Paths.get("/myfolder/origin.tiff");
RandomAccessFileOrArray raf = new RandomAccessFileOrArray(new RandomAccessSourceFactory().createBestSource(tiffFile.toString()));
int tiffPages = TiffImageData.getNumberOfPages(raf);
raf.close();
try (PdfDocument output = new PdfDocument(new PdfWriter("/myfolder/destination.pdf"))) {
for (int page = 1; page <= tiffPages; page++) {
ImageData tiffImage = ImageDataFactory.createTiff(tiffFile.toUri().toURL(), true, page, true);
Rectangle tiffPageSize = new Rectangle(tiffImage.getWidth(), tiffImage.getHeight());
PdfPage newPage = output.addNewPage(new PageSize(tiffPageSize));
PdfCanvas canvas = new PdfCanvas(newPage);
canvas.addImage(tiffImage, tiffPageSize, false);
}
}
}
}
```
Some might suggest you use the high level API to achieve this a little more cleanly but this should be sufficient for your question.
Upvotes: 4 [selected_answer]<issue_comment>username_2: This is the same above but in vb.net.
It converts a multipage TIFF to a PDF.
```
Imports System.IO
Imports iTextSharp.text
Imports iTextSharp.text.pdf
Sub ConvertTIFF2PDF(ByVal inFile As String, ByVal outFile As String)
Dim pdfDoc As PdfDocument = New PdfDocument(New PdfWriter(outFile))
Dim doc As Document = New Document(pdfDoc)
Dim aFile = New RandomAccessFileOrArray(New RandomAccessSourceFactory().CreateBestSource(inFile.ToString))
Dim tiffPages = TiffImageData.GetNumberOfPages(aFile)
Dim uri As System.Uri = New Uri(inFile)
For i As Integer = 1 To tiffPages
Console.WriteLine("tiffPages: " & (i) & " of " & tiffPages.ToString)
Dim tiffImage = ImageDataFactory.CreateTiff(uri, False, i, False)
Dim tiffPageSize = New Geom.Rectangle(tiffImage.GetWidth(), tiffImage.GetHeight())
Dim newPage = pdfDoc.AddNewPage(New PageSize(tiffPageSize))
Dim canvas As PdfCanvas = New PdfCanvas(newPage)
canvas.AddImage(tiffImage, tiffPageSize, False)
Next
doc.Close()
pdfDoc.Close()
aFile.Close()
End Sub
```
Upvotes: 2 <issue_comment>username_3: It's Just the C# Version :
```
public void ConvertTIFF2PDF(string inFile, string outFile)
{
iTextSharp.text.Document document = new iTextSharp.text.Document(iTextSharp.text.PageSize.A4, 0, 0, 0, 0);
iTextSharp.text.pdf.PdfWriter writer = iTextSharp.text.pdf.PdfWriter.GetInstance(document, new FileStream(outFile, FileMode.Open));
Bitmap bm = new Bitmap(inFile);
int total = bm.GetFrameCount(FrameDimension.Page);
document.Open();
iTextSharp.text.pdf.PdfContentByte cb = writer.DirectContent;
for (int k = 0; k < total; ++k)
{
bm.SelectActiveFrame(FrameDimension.Page, k);
iTextSharp.text.Image img = iTextSharp.text.Image.GetInstance(bm, ImageFormat.Bmp);
// scale the image to fit in the page
img.ScalePercent(72f / img.DpiX * 100);
img.SetAbsolutePosition(0, 0);
cb.AddImage(img);
document.NewPage();
}
document.Close();
}
```
Upvotes: 0 |
2018/03/21 | 1,008 | 3,649 | <issue_start>username_0: I am trying to troubleshoot an old TCL accounting script called GOTS - Grant Of The System. What it does is creates a time stamped logfile entry for each user login and another for the logout. The problem is it is not creating the second log file entry on logout. I think I tracked down the area where it is going wrong and I have attached it here. FYI the log file exists and it does not exit with the error "GOTS was called incorrectly!!". It should be executing the `if then` for `[string match "$argv" "end_session"]`
This software runs properly on RHEL Linux 6.9 but fails as described on Centos 7. I am thinking that there is a system variable or difference in the `$argv` argument vector for the different systems that creates this behavior.
Am I correct in suspecting `$argv` and if not does anyone see the true problem?
How do I print or display the $argv values on logout?
```
# Find out if we're beginning or ending a session
if { [string match "$argv" "end_session"] } {
if { ![file writable $Log] } {
onErrorNotify "4 LOG"
}
set ifd [open $Log a]
puts $ifd "[clock format [clock seconds]]\t$Instrument\t$LogName\t$GroupName"
close $ifd
unset ifd
exit 0
} elseif { [string match "$argv" "begin_session"] == 0 } {
puts stderr "GOTS was called incorrectly!!"
exit -1
}
```
end\_session is populated by the /etc/gdm/PostSession/Default file
```html
#!/bin/sh
### Begin GOTS PostSession
# Do not run GOTS if root is logging out
if test "${USER}" == "root" ; then
exit 0
fi
/usr/local/lib/GOTS/gots end_session > /var/tmp/gots_postsession.log 2> /var/tmp/gots_postsession.log
exit 0
### End GOTS PostSession
```
This is the postsession log file:
```html
Application initialization failed: couldn't connect to display ":1"
Error in startup script: invalid command name "option"
while executing
"option add *Font "-adobe-new century schoolbook-medium-r-*-*-*-140-*-*-*-*-*-*""
(file "/usr/local/lib/GOTS/gots" line 26)
```
After a lot of troubleshooting we have determined that for whatever reason Centos is not allowing part of the /etc/gdm/PostSession/default file to execute:
fi
```
/usr/local/lib/GOTS/gots end_session
```
But it does update the PostSession.log file as it should .. . Does anyone have any idea what could be interfering with only part of the PostSession/default?<issue_comment>username_1: >
> Does anyone have any idea what could be interfereing with PostSession/default?
>
>
>
Could it be that you are hitting [Bug 851769](https://bugzilla.redhat.com/show_bug.cgi?id=851769)?
That said, am I correct in stating that, as your investigation shows, this is not a Tcl-related issue or question anymore?
Upvotes: 2 <issue_comment>username_2: So it turns out that our script has certain elements that depend upon the Xserver running on logout to display some of the GUI error messages. This from:
[Gnome Configuration](https://help.gnome.org/admin/gdm/stable/configuration.html.en#scripting)
"When a user terminates their session, GDM will run the PostSession script. Note that the Xserver will have been stopped by the time this script is run, so it should not be accessed.
Note that the PostSession script will be run even when the display fails to respond due to an I/O error or similar. Thus, there is no guarantee that X applications will work during script execution."
We are having to rewrite those error message callouts so they simply write the errors to a file instead of depending on the display. The errors are for things that should be there in the beginning anyway.
Upvotes: 1 [selected_answer] |
2018/03/21 | 366 | 1,406 | <issue_start>username_0: Im not sure how to express it so I posted a picture in link below.
[It should look like this](https://i.stack.imgur.com/5Hxbr.png)<issue_comment>username_1: >
> Does anyone have any idea what could be interfereing with PostSession/default?
>
>
>
Could it be that you are hitting [Bug 851769](https://bugzilla.redhat.com/show_bug.cgi?id=851769)?
That said, am I correct in stating that, as your investigation shows, this is not a Tcl-related issue or question anymore?
Upvotes: 2 <issue_comment>username_2: So it turns out that our script has certain elements that depend upon the Xserver running on logout to display some of the GUI error messages. This from:
[Gnome Configuration](https://help.gnome.org/admin/gdm/stable/configuration.html.en#scripting)
"When a user terminates their session, GDM will run the PostSession script. Note that the Xserver will have been stopped by the time this script is run, so it should not be accessed.
Note that the PostSession script will be run even when the display fails to respond due to an I/O error or similar. Thus, there is no guarantee that X applications will work during script execution."
We are having to rewrite those error message callouts so they simply write the errors to a file instead of depending on the display. The errors are for things that should be there in the beginning anyway.
Upvotes: 1 [selected_answer] |
2018/03/21 | 632 | 2,296 | <issue_start>username_0: I'm specifically talking about the Tag model, which I have no much experience with. The code goes like this:
```
@register_snippet
class ArticleTag(index.Indexed,Tag):
class Meta:
proxy=True
search_fields = [
index.SearchField('name', partial_match=True),
index.SearchField('slug', partial_match=True),
]
```
The Tag model has two fields, 'name' and 'slug'. But now I want to add a third custom field named 'type' that will be simply a CharField.
I tried modifying it like this:
```
@register_snippet
class ArticleTag(index.Indexed,Tag):
class Meta:
proxy=True
search_fields = [
index.SearchField('name', partial_match=True),
index.SearchField('slug', partial_match=True),
]
merge_to = models.CharField(max_length=500, blank=True, null=True)
panels = [
FieldPanel('name'),
FieldPanel('slug'),
FieldPanel('type'),
]
```
However the server yields:
```
ERRORS:
?: (models.E017) Proxy model 'ArticleTag' contains model fields.
```
How can I achieve what I am trying to do?<issue_comment>username_1: >
> Does anyone have any idea what could be interfereing with PostSession/default?
>
>
>
Could it be that you are hitting [Bug 851769](https://bugzilla.redhat.com/show_bug.cgi?id=851769)?
That said, am I correct in stating that, as your investigation shows, this is not a Tcl-related issue or question anymore?
Upvotes: 2 <issue_comment>username_2: So it turns out that our script has certain elements that depend upon the Xserver running on logout to display some of the GUI error messages. This from:
[Gnome Configuration](https://help.gnome.org/admin/gdm/stable/configuration.html.en#scripting)
"When a user terminates their session, GDM will run the PostSession script. Note that the Xserver will have been stopped by the time this script is run, so it should not be accessed.
Note that the PostSession script will be run even when the display fails to respond due to an I/O error or similar. Thus, there is no guarantee that X applications will work during script execution."
We are having to rewrite those error message callouts so they simply write the errors to a file instead of depending on the display. The errors are for things that should be there in the beginning anyway.
Upvotes: 1 [selected_answer] |
2018/03/21 | 937 | 3,436 | <issue_start>username_0: I am trying to make another div right under the existing div in the HTML
```
Media Player
makeOscarPlayer(document.getElementById("my-video"))
Hello!
function makeOscarPlayer(){
var div = document.createElement("div");
div.innerHTML = `
hello
`
}
```
can someone explain to me what I am doing wrong? I am a self-taught developer sorry if my code is not perfectly organized still learning<issue_comment>username_1: You need to append that new element to a specific parent, in your case to `my-video`.
The function **[`appendChild`](https://developer.mozilla.org/en-US/docs/Web/API/Node/appendChild)** appends the new element to a parent element.
```js
function makeOscarPlayer(parent) {
var div = document.createusername_1ment("div");
div.innerHTML = 'Hello from username_1';
parent.appendChild(div);
}
makeOscarPlayer(document.getusername_1mentById("my-video"))
```
```css
#my-player {
border: 1px dashed green;
padding: 5px;
margin: 5px;
width: 300px;
background-color: #f1f1f1
}
#my-video div {
border: 1px dashed green;
padding: 5px;
margin: 5px;
width: 200px;
font-weight: 700;
}
```
```html
Hello!
```
Upvotes: 1 <issue_comment>username_2: It's a good start, but you're calling the function incorrectly and your function isn't adding anything to the page.
we use `appendChild` to add a node to the page.
In your function you create and add text to a div, but you don't return the node you made(*and also you didn't close your line of code with a semi-colon so I added that too*) but this should work:
```
Media Player
Hello!
function makeOscarPlayer() {
var div = document.createElement("div");
div.innerHTML = `hello`;
return div;
}
document.getElementById("my-video").appendChild(makeOscarPlayer())
```
```js
function makeOscarPlayer() {
var div = document.createElement("div");
div.innerHTML = `hello`;
return div;
}
document.getElementById("my-video").appendChild(makeOscarPlayer())
```
```html
Media Player
Hello!
```
Upvotes: 0 <issue_comment>username_3: You are calling the `makeOscarPlayer()` function before you are creating it.
You need to wrap the `makeOscarPlayer()` function declaration in a script tag.
You are passing in `document.getElementById("my-video")` as a parameter to `makeOscarPlayer()`, but there is no HTML element with an id of 'my-video'. You are giving the function a parameter of `null`, while the function declaration has no parameters.
You need to tell the script where to put the new element. To do that, you grab an existing element and use `parentNode` and `insertBefore`
Here is a barebones version that I got working for your reference:
```
Media Player
Hello!
function makeOscarPlayer(){
var div = document.createElement("div");
div.innerHTML = `hello`;
// This grabs the element that you want to create a new element by
var existingDiv = document.getElementById("my-player");
// This tells the script where to put the new element
existingDiv.parentNode.insertBefore( div, existingDiv.nextSibling);
}
// Must be called in the same script block or after the script holding the function declaration is loaded
makeOscarPlayer();
```
For more information on how `parentNode` and `insertBefore` work, see [this Stack Overflow question](https://stackoverflow.com/a/4793630/6884283)
Upvotes: 2 [selected_answer] |
2018/03/21 | 1,190 | 4,201 | <issue_start>username_0: I want to add a dialog to my application, but for some reason it is not showing.
The dialog component:
```ts
import { Component, OnInit } from '@angular/core';
import {MatDialogRef} from "@angular/material";
@Component({
selector: 'app-connect-to-player-dialog',
templateUrl: './connect-to-player-dialog.component.html',
styleUrls: ['./connect-to-player-dialog.component.css']
})
export class ConnectToPlayerDialogComponent implements OnInit {
constructor() { }
ngOnInit() {
}
}
```
The method used:
```ts
connectToPlayer() {
const dialogConfig = new MatDialogConfig();
dialogConfig.disableClose = true;
dialogConfig.autoFocus = true;
this.dialog.open(ConnectToPlayerDialogComponent, dialogConfig);
}
```
And I also added the dialog to `entryComponents`:
```ts
entryComponents: [ConnectToPlayerDialogComponent]
```
I have no idea what could be missing... Thank you for any help!
Full Code:
```ts
import {Component, OnInit} from '@angular/core';
import {PLAYERS} from '../mock-players';
import {Router} from "@angular/router";
import {AlertService, AuthenticationService} from "../_services";
import {HttpClient} from "@angular/common/http";
import {AuthGuard} from "../_guards";
import {MatDialog, MatDialogConfig, MatDialogRef} from '@angular/material';
import {ConnectToPlayerDialogComponent} from "../connect-to-player-dialog/connect-to-player-dialog.component";
@Component({
selector: 'app-profile',
templateUrl: './profile.component.html',
styleUrls: ['./profile.component.css']
})
export class ProfileComponent implements OnInit {
players = PLAYERS;
c: MatDialogRef;
constructor(private autheGuard: AuthGuard,
private authenticationService: AuthenticationService,
private alertService: AlertService,
private http: HttpClient,
private router: Router,
private dialog: MatDialog) {
}
ngOnInit() {
this.loadPlayers();
this.connectToPlayer();
}
loadPlayers() {
this.authenticationService.loadPlayersForPlayer(this.autheGuard.getUser().identityStr)
.subscribe(
data => {
this.players = data;
this.players.unshift(this.autheGuard.getUser());
//if (data.identityStr) {
//localStorage.setItem('currentUser', JSON.stringify(data));
//PdHeaderComponent.updateUserStatus.next(true);
//this.router.navigate(['']);
// } else {
//}
// this.alertService.success('Successfully logged in');
},
error => {
let errormsg: string = 'Unexspected Error occured.';
if (error.code == 401) {
errormsg = 'Wrong Playername or Password.';
}
this.alertService.error(errormsg);
});
}
connectToPlayer() {
const dialogConfig = new MatDialogConfig();
dialogConfig.disableClose = true;
dialogConfig.autoFocus = true;
this.c = this.dialog.open(ConnectToPlayerDialogComponent, {
width: '400px'});
}
}
```
Template:
```html
connect-to-player-dialog works!
```
Exception:
```
ERROR Error: Found the synthetic listener @slideDialog.start. Please include either "BrowserAnimationsModule" or "NoopAnimationsModule" in your application.
at checkNoSyntheticProp (platform-browser.js:2991)
at DefaultDomRenderer2.listen (platform-browser.js:2975)
at DebugRenderer2.listen (core.js:15124)
at listenToElementOutputs (core.js:10307)
at createViewNodes (core.js:13416)
at createRootView (core.js:13339)
at callWithDebugContext (core.js:14740)
at Object.debugCreateRootView [as createRootView] (core.js:14041)
at ComponentFactory_.create (core.js:10960)
at ComponentFactoryBoundToModule.create (core.js:3922)
```<issue_comment>username_1: That may help:
* make sure you injected `public dialog: MatDialog` in component
* inject `public dialogRef: MatDialogRef, @Inject(MAT\_DIALOG\_DATA) public data: any` in your dialog component
* instead of `new Config` try to pass as second parameter to open method this and check if it's working `{ width: '250px' }`
Upvotes: 0 <issue_comment>username_2: As in the chat mentioned: The import of the BrowserAnimationsModule was missing
```ts
import {BrowserAnimationsModule} from '@angular/platform-browser/animations';
@NgModule({
// ...
imports: [BrowserAnimationsModule],
// ...
})
```
Upvotes: 4 [selected_answer] |
2018/03/21 | 1,031 | 4,062 | <issue_start>username_0: I have two models where `employee` have relation with `person` model but `person` have no relation with `employee` model.
Like:
```
class Person(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=100)
class Employee(models.Model):
person = models.ForeignKey(Person, related_name='person_info')
code = models.CharField()
```
In such cases I want `code` field data in person serializer.
---
I can solved this with writing method in person model or using **SerializerMethodField** in person serializer
like this:
```
def get_employee_code(self):
return Employee.objects.get(person=self).id
```
and add this as source in person serializer
```
employee_code = serializers.CharField(source='get_employee_code')
```
Or adding employee serializer into person serialiszer
```
class PersonSerializer(serializers.ModelSerializer):
employee = EmployeeSerializer()
class Meta:
model = Person
fields = ('name', 'address', 'employee')
```
---
But i was trying to do this with **reverse relation** but i can't. I have tried like this, it gives an error
**Serializer:**
```
class PersonSerializer(serializers.ModelSerializer):
employee_code = serializers.CharField(source='person_info.code')
class Meta:
model = Person
fields = ('name', 'address', 'employee_code')
```
How can i solve this with reverse relation?<issue_comment>username_1: you can access the reverse relation with custom **SerializerMethodField()**
```
class PersonSerializer(serializers.ModelSerializer):
employee_code = serializers.SerializerMethodField()
def get_employee_code(self, obj):
return obj.person_info.code
class Meta:
model = Person
fields = ('name', 'address', 'employee_code')
```
Upvotes: 1 <issue_comment>username_2: At the moment because you are using a ForeignKey field on the person attribute, it means that its returning a list when you access the reverse relation.
One solution would be to use a slug related field, though this must have `many` and `read_only` set to True, and will return a list because of the ForeignKey field.
```
class PersonSerializer(serializers.ModelSerializer):
employee_code = serializers.SlugRelatedField(
source='person_info',
slug_field='code',
many=True,
read_only=True,
)
class Meta:
model = Person
fields = ('name', 'address', 'employee_code')
```
The other option is to change your ForeignKey into a OneToOneField, which would still need `read_only` set to True but it will not return a list.
```
class Person(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=100)
class Employee(models.Model):
person = models.OneToOneField(Person, related_name='person_info')
code = models.CharField()
class PersonSerializer(serializers.ModelSerializer):
employee_code = serializers.SlugRelatedField(
source='person_info',
slug_field='code',
read_only=True,
)
class Meta:
model = Person
fields = ('name', 'address', 'employee_code')
```
Or, if you don't want to change the ForeignKey, you could add a `employee_code` property method to the model instead to return the first employee code in the `person_info` relation.
```
class Person(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=100)
@property
def employee_code(self):
employees = self.person_info.filter()
if employees.exists():
return employees.first().code
return ''
class Employee(models.Model):
person = models.OneToOneField(Person, related_name='person_info')
code = models.CharField()
class PersonSerializer(serializers.ModelSerializer):
employee_code = serializers.CharField(
read_only=True,
)
class Meta:
model = Person
fields = ('name', 'address', 'employee_code')
```
Upvotes: 3 |
2018/03/21 | 351 | 1,157 | <issue_start>username_0: I am learning MySQL and stumbled upon a problem after installation of MySQL. I switched to sql mode and from there I tried to connect to root@localhost but after I inputted the password, it says there is no such Host. I tried other host names, but the same results show where it says
>
> ERROR: 2005: No such host is known 'hostname'
>
>
>
I even tried to make a simple table, but when I enter it, I get the error that says ERROR: Not Connected, which was expected. How exactly do I get through this, I am a bit lost despite having everything installed.
I use commands such as:
>
> mysql-sql> \connect 127.0.01;
>
>
>
From there enter a password and no success.<issue_comment>username_1: You can try "127.0.01" instead localhost.
Upvotes: 0 <issue_comment>username_2: Check the list of hosts names inside this file
**C:\Windows\System32\drivers\etc\hosts**
If you can't see this **127.0.0.1 localhost** add it on new line on bottom.
If it's there but the line start with **#** remove this symbol.
Upvotes: 2 <issue_comment>username_3: I think, Felipe meant ***\connect [email protected]*** instead of 127.0.01
Upvotes: 0 |
2018/03/21 | 1,266 | 4,930 | <issue_start>username_0: It is possible to change table names of the IdentityUser, IdentityRole,... tables.
See [How can I change the table names when using Visual Studio 2013 ASP.NET Identity?](https://stackoverflow.com/questions/19460386/how-can-i-change-the-table-names-when-using-visual-studio-2013-asp-net-identity)
```
protected override void OnModelCreating(System.Data.Entity.DbModelBuilder modelBuilder)
{
base.OnModelCreating(modelBuilder);
modelBuilder.Entity().ToTable("MyUsers");
}
```
But when I create new migration:
```
dotnet ef migrations add new ApplicationIdentity
```
The migration is generated for the original names:
```
migrationBuilder.CreateTable(
name: "AspNetUsers",
columns: table => new
{
Id = table.Column(nullable: false)
.Annotation("SqlServer:ValueGenerationStrategy", SqlServerValueGenerationStrategy.IdentityColumn),
AccessFailedCount = table.Column(nullable: false),
ConcurrencyStamp = table.Column(nullable: true),
Email = table.Column(maxLength: 256, nullable: true),
EmailConfirmed = table.Column(nullable: false),
LockoutEnabled = table.Column(nullable: false),
LockoutEnd = table.Column(nullable: true),
NormalizedEmail = table.Column(maxLength: 256, nullable: true),
NormalizedUserName = table.Column(maxLength: 256, nullable: true),
PasswordHash = table.Column(nullable: true),
PhoneNumber = table.Column(nullable: true),
PhoneNumberConfirmed = table.Column(nullable: false),
SecurityStamp = table.Column(nullable: true),
TwoFactorEnabled = table.Column(nullable: false),
UserName = table.Column(maxLength: 256, nullable: true)
},
constraints: table =>
{
table.PrimaryKey("PK\_AspNetUsers", x => x.Id);
});
```
It is possible to change generated migration manually, but is there any trick how to generate migration with correct names?<issue_comment>username_1: You can do this on your DbContext:
```
protected override void OnModelCreating(ModelBuilder builder)
{
base.OnModelCreating(builder);
builder.Entity(entity =>
{
entity.ToTable(name: "Users");
entity.Property(e => e.Id).HasColumnName("UserId");
});
}
```
Will create this migration:
```
migrationBuilder.CreateTable(
name: "Users",
columns: table => new
{
UserId = table.Column(nullable: false),
AccessFailedCount = table.Column(nullable: false),
ConcurrencyStamp = table.Column(nullable: true),
Email = table.Column(maxLength: 256, nullable: true),
EmailConfirmed = table.Column(nullable: false),
LockoutEnabled = table.Column(nullable: false),
LockoutEnd = table.Column(nullable: true),
NormalizedEmail = table.Column(maxLength: 256, nullable: true),
NormalizedUserName = table.Column(maxLength: 256, nullable: true),
PasswordHash = table.Column(nullable: true),
PhoneNumber = table.Column(nullable: true),
PhoneNumberConfirmed = table.Column(nullable: false),
SecurityStamp = table.Column(nullable: true),
TwoFactorEnabled = table.Column(nullable: false),
UserName = table.Column(maxLength: 256, nullable: true)
},
constraints: table =>
{
table.PrimaryKey("PK\_Users", x => x.UserId);
});
```
Upvotes: -1 <issue_comment>username_2: You need to add the migration manually:
* Add Empty migration file by using the below command
```
dotnet ef migrations add rename_tables
```
* A new file will add to migration and you need to change it as the below
```
public partial class rename_tables : Migration
{
protected override void Up(MigrationBuilder migrationBuilder)
{
migrationBuilder.RenameTable(name: "AspNetRoleClaims", newName: "RoleClaims");
migrationBuilder.RenameTable(name: "AspNetRoles", newName: "Roles");
migrationBuilder.RenameTable(name: "AspNetUserLogins", newName: "UserLogins");
migrationBuilder.RenameTable(name: "AspNetUserRoles", newName: "UserRoles");
migrationBuilder.RenameTable(name: "AspNetUsers", newName: "Users");
migrationBuilder.RenameTable(name: "AspNetUserTokens", newName: "UserTokens");
migrationBuilder.RenameTable(name: "AspNetUserClaims", newName: "UserClaims");
}
protected override void Down(MigrationBuilder migrationBuilder)
{
migrationBuilder.RenameTable(name: "RoleClaims", newName: "AspNetRoleClaims");
migrationBuilder.RenameTable(name: "Roles", newName: "AspNetRoles");
migrationBuilder.RenameTable(name: "UserLogins", newName: "AspNetUserLogins");
migrationBuilder.RenameTable(name: "UserRoles", newName: "AspNetUserRoles");
migrationBuilder.RenameTable(name: "Users", newName: "AspNetUsers");
migrationBuilder.RenameTable(name: "UserTokens", newName: "AspNetUserTokens");
migrationBuilder.RenameTable(name: "AspNetUserClaims", newName: "AspNetUserClaims");
}
}
```
* Finally you need to apply the migration
```
dotnet ef database update
```
* This is will not change the constraints name but you can also added to you migration file
Upvotes: 0 |
Subsets and Splits