id
stringlengths 5
27
| question
stringlengths 19
69.9k
| title
stringlengths 1
150
| tags
stringlengths 1
118
| accepted_answer
stringlengths 4
29.9k
⌀ |
---|---|---|---|---|
_codereview.116836 | I want to know what do you think about the structure of this code. If you think that there is some structural improvement to be made, tell me!Here is a github link: https://github.com/LucianoPolit/LPDropdownMenuHere is the code://// LPDropdownMenu.swift// Created by Luciano Polit on 1/8/16.// Copyright (c) 2016 Luciano Polit. All rights reserved.//import UIKit@objc protocol LPDropdownMenuViewDelegate { optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, shouldSwipeToPosition position: CGFloat) -> Bool optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, shouldTouchToPosition position: CGFloat) -> Bool optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, shouldDoubleTouchToPosition position: CGFloat) -> Bool optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, shouldAproximateToPosition position: CGFloat) -> Bool optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, shouldFinishSlidingAtPosition position: CGFloat) -> Bool optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, willSwipeToPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, willTouchToPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, willDoubleTouchToPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, willAproximateToPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, willFinishSlidingAtPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, didSwipeToPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, didTouchToPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, didDoubleTouchToPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, didAproximateToPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, didFinishSlidingAtPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, didStartMovingAtPosition position: CGFloat) optional func dropdownMenuView(dropdownMenuView: LPDropdownMenuView, didFinishMovingAtPosition position: CGFloat)}class LPDropdownMenuView: UIView { private var panGestureRecognizer: UIPanGestureRecognizer! private var tapGestureRecognizer: UITapGestureRecognizer! private var doubleTapGestureRecognizer: UITapGestureRecognizer! private var movingCondition: Bool = false var delegate: LPDropdownMenuViewDelegate? var freeSlideable: Bool = true var swipeable: Bool = true var touchable: Bool = true var doubleTouchable: Bool = true var autoSetProperties: Bool = true var marginProximity: CGFloat = 50 var swipeSensibility: CGFloat = 600 var minPosition: CGFloat! var maxPosition: CGFloat! var contentFrame: CGRect! { didSet { setProperties() } } var barView: UIView! { willSet { if (self.barView != nil) { self.barView.removeFromSuperview() } } didSet { let height = barView.frame.size.height + (contentFrame.maxY - barView.frame.maxY) frame = CGRectMake(barView.frame.origin.x, barView.frame.origin.y, barView.frame.size.width, height) barView.frame.origin.y = 0 addSubview(barView) setPosition(frame.origin.y) setProperties() setGestures() } } var menuView: UIView! { willSet { if (self.menuView != nil) { self.menuView.removeFromSuperview() } } didSet { if (!(menuView is UIScrollView)) { let newValue = menuView newValue.frame.origin.y = 0 let height = frame.size.height - barView.frame.size.height menuView = UIScrollView(frame: CGRectMake(0, barView.frame.size.height, newValue.frame.size.width, height)) (menuView as! UIScrollView).contentSize = CGSize(width: newValue.frame.size.width, height: newValue.frame.size.height) (menuView as! UIScrollView).bounces = false menuView.addSubview(newValue) } else { let height = frame.size.height - barView.frame.size.height menuView.frame = CGRectMake(0, barView.frame.size.height, menuView.frame.size.width, height) } addSubview(menuView) } } init (frame: CGRect, barView: UIView, menuView: UIView) { super.init(frame: frame) self.contentFrame = frame initView(barView, menuView: menuView) } override init(frame: CGRect) { super.init(frame: frame) self.contentFrame = frame let barView = DefaultBarView(frame: CGRectMake(0, frame.size.height - 60, frame.size.width, 60)) let menuView = DefaultMenuView(frame: CGRectMake(0, barView.frame.size.height, frame.size.width, 600)) initView(barView, menuView: menuView) } required init?(coder aDecoder: NSCoder) { fatalError(init(coder:) has not been implemented) } private func initView(barView: UIView, menuView: UIView) { self.initGestures() self.barView = barView self.menuView = menuView }}extension LPDropdownMenuView { func getPosition() -> CGFloat { return frame.origin.y } func setProperties() { if (contentFrame != nil && autoSetProperties) { minPosition = contentFrame.maxY / 3 maxPosition = contentFrame.maxY - barView.frame.size.height } } func setPosition(position: CGFloat) { self.frame.origin.y = position self.frame.size.height = self.contentFrame.size.height - position if (menuView != nil) { self.menuView.frame.size.height = self.contentFrame.size.height - self.barView.frame.size.height - position } } func setPositionWithAnimation(position: CGFloat, time: NSTimeInterval, completion: ((Void) -> Void)?) { UIView.animateWithDuration(time, delay: 0, options: .CurveEaseOut, animations: { self.frame.origin.y = position self.frame.size.height = self.contentFrame.size.height - position self.menuView.frame.size.height = self.contentFrame.size.height - position - self.barView.frame.size.height }, completion: { _ in if (completion != nil) { completion!() } }) }}extension LPDropdownMenuView { private func initGestures() { doubleTapGestureRecognizer = UITapGestureRecognizer(target: self, action: handleDoubleTapGestures:) doubleTapGestureRecognizer.numberOfTapsRequired = 2 tapGestureRecognizer = UITapGestureRecognizer(target: self, action: handleTapGestures:) tapGestureRecognizer.numberOfTapsRequired = 1 tapGestureRecognizer.requireGestureRecognizerToFail(doubleTapGestureRecognizer) panGestureRecognizer = UIPanGestureRecognizer(target: self, action: handlePanGestures:) panGestureRecognizer.minimumNumberOfTouches = 1 panGestureRecognizer.maximumNumberOfTouches = 1 } private func setGestures() { barView.addGestureRecognizer(doubleTapGestureRecognizer) barView.addGestureRecognizer(tapGestureRecognizer) barView.addGestureRecognizer(panGestureRecognizer) } internal func handleDoubleTapGestures(sender: UITapGestureRecognizer) { if (doubleTouchable) { let minPosition = contentFrame.origin.y let maxPosition = contentFrame.maxY - barView.frame.size.height let fartherPosition = getFartherPosition(minPosition, maxPosition: maxPosition) if (delegate?.dropdownMenuView?(self, shouldDoubleTouchToPosition: fartherPosition) != false) { delegate?.dropdownMenuView?(self, willDoubleTouchToPosition: fartherPosition) setPositionWithAnimation(fartherPosition, time: 0.3, completion: { delegate?.dropdownMenuView?(self, didDoubleTouchToPosition: fartherPosition) }) } } } internal func handleTapGestures(sender: UITapGestureRecognizer) { if (touchable) { let fartherPosition = getFartherPosition(minPosition, maxPosition: maxPosition) if (delegate?.dropdownMenuView?(self, shouldTouchToPosition: fartherPosition) != false) { delegate?.dropdownMenuView?(self, willTouchToPosition: fartherPosition) setPositionWithAnimation(fartherPosition, time: 0.3, completion: { delegate?.dropdownMenuView?(self, didTouchToPosition: fartherPosition) }) } } } internal func handlePanGestures(sender: UIPanGestureRecognizer) { let location = sender.locationInView(sender.view!.superview!) if (sender.state != .Ended && sender.state != .Failed) { if (!movingCondition) { movingCondition = true delegate?.dropdownMenuView?(self, didStartMovingAtPosition: frame.origin.y) } let minPosition = contentFrame.origin.y let maxPosition = contentFrame.maxY - barView.frame.size.height let newPosition = frame.origin.y + location.y - (sender.view!.frame.size.height / 2) if (newPosition > minPosition && newPosition < maxPosition) { setPosition(newPosition) } else { if (newPosition < minPosition) { setPosition(minPosition) } else { setPosition(maxPosition) } } } if (sender.state == .Ended) { movingCondition = false delegate?.dropdownMenuView?(self, didFinishMovingAtPosition: frame.origin.y) if (checkMarginProximity(contentFrame.origin.y)) { if (delegate?.dropdownMenuView?(self, shouldAproximateToPosition: contentFrame.origin.y) != false) { delegate?.dropdownMenuView?(self, willAproximateToPosition: contentFrame.origin.y) setPositionWithAnimation(contentFrame.origin.y, time: 0.3, completion: { delegate?.dropdownMenuView?(self, didAproximateToPosition: contentFrame.origin.y) }) return } } if (checkMarginProximity(contentFrame.maxY - barView.frame.size.height)) { let proximatePosition = contentFrame.maxY - barView.frame.size.height if (delegate?.dropdownMenuView?(self, shouldAproximateToPosition: proximatePosition) != false) { delegate?.dropdownMenuView?(self, willAproximateToPosition: proximatePosition) setPositionWithAnimation(contentFrame.maxY - barView.frame.size.height, time: 0.3, completion: { delegate?.dropdownMenuView?(self, didAproximateToPosition: proximatePosition) }) return } } if (swipeable) { let swipePosition = getSwipePosition(sender.velocityInView(sender.view)) if (swipePosition != nil) { let swipeCondition = delegate?.dropdownMenuView?(self, shouldSwipeToPosition: swipePosition!) != false if (swipeCondition) { delegate?.dropdownMenuView?(self, willSwipeToPosition: swipePosition!) setPositionWithAnimation(swipePosition!, time: 0.3, completion: { delegate?.dropdownMenuView?(self, didSwipeToPosition: swipePosition!) }) return } } } if (freeSlideable && delegate?.dropdownMenuView?(self, shouldFinishSlidingAtPosition: frame.origin.y) == false) { let nearestPosition = getNearestPosition() delegate?.dropdownMenuView?(self, willAproximateToPosition: nearestPosition) setPositionWithAnimation(nearestPosition, time: 0.3, completion: { delegate?.dropdownMenuView?(self, didAproximateToPosition: nearestPosition) }) return } if (!freeSlideable) { let nearestPosition = getNearestPosition() delegate?.dropdownMenuView?(self, willAproximateToPosition: nearestPosition) setPositionWithAnimation(nearestPosition, time: 0.3, completion: { delegate?.dropdownMenuView?(self, didAproximateToPosition: nearestPosition) }) return } delegate?.dropdownMenuView?(self, willFinishSlidingAtPosition: frame.origin.y) delegate?.dropdownMenuView?(self, didFinishSlidingAtPosition: frame.origin.y) } } private func checkMarginProximity(position: CGFloat) -> Bool { if (frame.origin.y < position + marginProximity && frame.origin.y > position - marginProximity) { return true } return false } private func getNearestPosition() -> CGFloat { let currentPosition = frame.origin.y + (barView.frame.size.height / 2) let positions = [contentFrame.origin.y, minPosition, maxPosition, contentFrame.maxY] var nearestPosition: CGFloat! var distance: CGFloat! for position in positions { if (distance == nil || distance > abs(currentPosition - position)) { distance = abs(currentPosition - position) nearestPosition = position } } return nearestPosition } private func getSwipePosition(velocity: CGPoint) -> CGFloat? { if (velocity.y > swipeSensibility) { return maxPosition } if (velocity.y < -swipeSensibility) { if (frame.origin.y < minPosition) { return contentFrame.origin.y } else { return minPosition } } return nil } private func getFartherPosition(minPosition: CGFloat, maxPosition: CGFloat) -> CGFloat { let rangeMinPosition: CGFloat = 1.5 let rangeMaxPosition: CGFloat = 0.5 let distanceFromMin = (frame.origin.y - minPosition) * rangeMinPosition let distanceFromMax = (maxPosition - frame.origin.y) * rangeMaxPosition if (distanceFromMin > distanceFromMax) { return minPosition } else { return maxPosition } }}class DefaultBarView: UIView { override init(frame: CGRect) { super.init(frame: frame) backgroundColor = UIColor.blueColor() initLabel() } required init?(coder aDecoder: NSCoder) { fatalError(init(coder:) has not been implemented) } private func initLabel() { let label = UILabel(frame: CGRectMake(0, 0, frame.size.width, frame.size.height)) label.textAlignment = .Center label.textColor = UIColor.whiteColor() label.font = UIFont.systemFontOfSize(22) label.text = Something cool here! addSubview(label) }}class DefaultMenuView: UIView { override init(frame: CGRect) { super.init(frame: frame) backgroundColor = UIColor.grayColor() initLabel() } required init?(coder aDecoder: NSCoder) { fatalError(init(coder:) has not been implemented) } private func initLabel() { let label = UILabel(frame: CGRectMake(0, 0, frame.size.width, frame.size.height)) label.textAlignment = .Center label.textColor = UIColor.whiteColor() label.font = UIFont.systemFontOfSize(22) label.text = An amazing view here! addSubview(label) }}The code works perfect, but I want your opinion! | Swift menu code | ios;swift;gui | There is a whole lot of code here, but I actually want to focus strictly on the protocol that we've created for delegates of this menu. There are a few things that immediately jump out to me, but the first thing, I'll illustrate with a screenshot.The protocol has a total of 17 methods. All of which start with the same 69 characters. Now, that alone is not that problematic. If we take a look at other protocols (even, some of Apple's, like table views and collection views), we'll see the same thing. But here, the problem is, we have provided no comments and the organization of the methods leaves a lot to be desired.Let's compare, for example, the documentation for UITableViewDelegate:Notice the organization into more useful categories. Your organization is this:should happenwill happendid happenAnd then a final did happen section that is somewhat inexplicably separated from the other did happen methods... but actually makes some as its own section (if we take a look at the Apple organization).I'd rather see your protocol organized into six section of roughly 3 methods each. The natural organization looks more like this...swiping...should swipewill swipedid swipetouching...should touchwill touchdid touchetc...Moreover, I would expect every single on of these method declarations to have appropriate Apple docs with them.This, for example, is what the shouldSwipeToPosition method might have:Your methods are well named, but I'm certainly going to expect documentation like this for a delegate protocol. It's the biggest clue I have as to exactly how and when this method works. Your description should even be more detailed than what I've provided. Consider, for example, the documentation that Apple provides for a similar protocol method found in UIKit:This documentation is on their website, but it's also available via Xcode in the same manner as I showed in the screenshot demonstrating an example of the documentation you should be providing.You've dumped everything into a single file. For what you've provided, I'd expect six different files. This means reconsidering your access modifiers.The thing is, you've designed this with the idea in mind that the user will copy & paste this file into their project and just go... the thing is, that's a terrible way to bring 3rd party code into my project. When I bring code into my projects, I expect to bring it in via Cocoapods. As a rule (with the rare exception), I completely pass over libraries that aren't Cocoapod compatible.It's important to note that if our code IS being used via a Cocoapod, then only the things marked as public are seen by the outside world. This is nice. It allows us to access code across our entire library, allowing us to use multiple files, and still preventing the outside world from calling these directly. It is something you should consider seriously. |
_scicomp.2244 | I have matrices $A$ and $G$. $A$ is sparse and is $n\times n$ with $n$ very large (can be on the order of several million.) $G$ is an $n\times m$ tall matrix with $m$ rather small ($1 \lt m \lt 1000$) and each column can only have a single $1$ entry with the rest being $0$'s, such that $G^TG = I$. $A$ is huge, so it is really tough to invert, and I can solve a linear system such as $Ax = b$ iteratively using a Krylov subspace method such as $\mathrm{BiCGStab}(l)$, but I do not have $A^{-1}$ explicitly.I want to solve a system of the form: $(G^TA^{-1}G)x = b$, where $x$ and $b$ are $m$ length vectors. One way to do it is to use an iterative algorithm within an iterative algorithm to solve for $A^{-1}$ for each iteration of the outer iterative algorithm. This would be extremely computationally expensive, however. I was wondering if there is a computationally easier way to go about solving this problem. | Solving $(G^TA^{-1}G)x = b$ without inverting $A$ | linear algebra;numerics;linear solver;sparse;krylov method | Introduce the vector $y:=-A^{-1}Gx$ and solve the large coupled system $Ay+Gx=0$, $G^Ty=-b$ for $(y,x)$ simultaneously, using an iterative method. If $A$ is symmetric (as seems likely though you don't state it explicitly) then the system is symmetric (but indefinite, though quasidefinite if $A$ is positive definite), which might help you to choose an appropriate method. (relevant keywords: KKT matrix, quasidefinite matrix).Edit: As $A$ is complex symmetric, so is the augmented matrix, but there is no quasidefiniteness. You can however use the $Ax$ routine to compute $A^*x=\overline{A\overline x}$; therefore you could adapt a method such as QMR ftp://ftp.math.ucla.edu/pub/camreport/cam92-19.pdf (designed for real systems, but you can easily rewrite it for complex systems, using the adjoint in place of the transpose) to solve your problem.Edit2: Actually, the (0,1)-structure of $G$ means that you can eliminate $x$ amd the components of $G^Ty$ symbolically, thus ending up with a smaller system to solve. This means messing with the structure of $A$, and pays only when $A$ is given explicitly in sparse format rather than as a linear operator. |
_codereview.8797 | This is not homework. I am trying to learn OOD principles and design patterns myself.Suppose I have a problem like this:A book-shop buys and sells two types of books:Non-technical {Title, Author, Price}Technical {Title, Author, Price, CD}Also, customer gets a CD when he buys a Technical book. A CD object is defined as, CD {Title, Price}.A Non-technical book's price will be only the price of the book. A Technical book's price will be the sum of the price of the book and the CD. Create a C# program to show the following info:Total number of book Bought & Price: XXX & XXX.XXTotal number of book Sold & Price: XXX & XXX.XXTotal Technical Book Sold & Price: XXX & XXX.XXTotal Non-technical Book sold & Price: XXX & XXX.XXI have designed the program like this:abstract class Publication{ public virtual string Title { get; set; } public virtual double Price { get; set; }}class CD : Publication{}abstract class Book : Publication{ public virtual string Author { get; set; }}class TechnicalBook : Book{ public CD Cd { get; set; } public override double Price { get { return (base.Price + Cd.Price); } }}class NonTechnicalbook : Book{}abstract class Shop{ private IDictionary<string, Book> boughtDictionary; private IDictionary<string, Book> soldDictionary; public Shop() { boughtDictionary = new Dictionary<string, Book>(); soldDictionary = new Dictionary<string, Book>(); } public virtual void Buy(Book item) { boughtDictionary.Add(item.Title, item); } public virtual void Sell(string title) { Book book = boughtDictionary[title]; boughtDictionary.Remove(book.Title); soldDictionary.Add(book.Title, book); } public virtual int GetBoughtBookCount() { return boughtDictionary.Count; } public virtual double GetBoughtBookPrice() { double price = 0.0; foreach (string title in boughtDictionary.Keys) { price = price + boughtDictionary[title].Price; } } public virtual int GetSoldBookCount() { return boughtDictionary.Count; } public virtual double GetSoldBookPrice() { double price = 0.0; foreach (string title in soldDictionary.Keys) { price = price + soldDictionary[title].Price; } } public virtual double GetTotalBookCount() { return this.GetBoughtBookCount() + this.GetSoldBookCount(); } public virtual double GetTotalBookPrice() { return this.GetBoughtBookPrice() + this.GetSoldBookPrice(); } public virtual void Show() { Console.WriteLine(Total number of books Bought & Price: , this.GetTotalBookCount() + & + this.GetTotalBookPrice()); Console.WriteLine(Total number of books Sold & Price: , this.GetSoldBookCount() + & + this.GetSoldBookPrice()); }}Does this design conform to the Open-Closed principle? Now I am unable to understand how to separate Technical and Non-technical books at this point. | Bookstore classes design | c# | null |
_cstheory.19073 | I am given as input a DAG $G$ of $n$ vertices where each vertex $x$ is additionally labeled with some $S(x) \subseteq \{1, \ldots, n\}$.A topological sort of $G$ is a bijection $f$ from the vertices of $G$ to $\{1, \ldots, n\}$ such that for all $x$, $y$, if there is a path from $x$ to $y$ in $G$ then $f(x) \leq f(y)$. I wish to decide whether there exists a topological sort of $G$ such that for all $x$, $f(x) \in S(x)$.What is the complexity of this decision problem?[Notes: Clearly this is in NP. If you look at the graph of allowed vertex/position pairs, with undirected edges between pairings that conflict because they violate the order, you get a graph of disjoint cliques where you want to pick at most one pair per clique, at most one pair per position and at most one pair per vertex -- it seems related to 3-dimensional matching but I can't see if it is still hard with the additional structure of this specific problem.] | Complexity of topological sort with constrained positions | sorting;directed acyclic graph;partial order;matching;order theory | I think this problem is NP-hard. I try to sketch a reduction from MinSAT. In the MinSAT problem we are given a CNF and our goal is to minimize the number of satisfied clauses. This problem is NP-hard, see e.g., http://epubs.siam.org/doi/abs/10.1137/S0895480191220836?journalCode=sjdmecDivide the vertices into two groups - some will represent literals, the others clauses, so $n=2v+c$ where $v$ is the number of variables of the CNF (usualy denoted by $n$) and $c$ is the number of clauses. Direct an edge from each literal-vertex to the clause-vertex where it occurs. Define $S$ for a literal-vertex that represents $x_i$ as $\{i,i+v+k\}$ (where $k$ is an arbitrary parameter), so either $f(x_i)=i$ and $f(\bar x_i)=i+v+k$ or $f(\bar x_i)=i$ and $f(x_i)=i+v+k$. For each clause-vertix, let $S=\{v+1,\ldots,v+k,2v+k+1,\ldots,n\}$, so $k$ of the clause-vertices are ``small''.Now the CNF has an assignment where at least $k$ clauses are false if and only if your problem can be solved for the above instance. The MinSAT problem is exactly to test whether a CNF formula $\varphi$ has an assignment that makes at least $k$ clauses false, so this shows that your problem is NP-hard.To help you understand this reduction, here's the intuition: small labels ($1,2,\dots,v+k$) correspond to the truth value False, and large labels ($v+k+1,\dots,2v+k$) correspond to True. The constraints for literal-vertices ensure that each $x_i$ is either True or False and that $\overline{x_i}$ has the opposite truth value. The edges ensure that if any literal is True, then all clause-vertices containing it are assigned True as well. (In contrast, if all literals in a clause are assigned False, then this graph structure allows the clause-vertex to be assigned either False or True.) Finally, the choice of $k$ ensures that $k$ of the clause-vertices are assigned False and $c-k$ of them are assigned True. So, if there is a valid topological sort of this graph, then there is an assignment to the variables that makes at least $k$ of the clauses of $\varphi$ false (all of the clause-vertices that were assigned False, plus possibly some of the ones that were assigned True). Conversely, if there is an assignment to the variables that makes at least $k$ of the clauses of $\varphi$ false, then there is a valid topological sort of this graph (we fill in the labels for the literal-vertices in the obvious way; and for each clause of $\varphi$ that is true, we give its corresponding clause-vertex a label that corresponds to True; the other clause-vertices can receive labels corresponding to an arbitrary truth value). |
_unix.294083 | I have a file with multiple information in different columns to the same IDfirst | x | y | zsecond | x | ythird | xI want something like this: first | xfirst | yfirst | zsecond | xsecond | ythird | x | Copy First Column Id to different lines | text processing | null |
_webapps.1485 | Google, Bing and the others have image search option, but what I'm looking for is the reverse way: When I submit a photo, I want this web service to tell me in which places the image is located, and show me, if possible, the versions with higher resolution.Does a web app like this exist? | Reverse image search engine | webapp rec;search;images | You want to use tineye:TinEye is a reverse image search engine. It finds out where an image came from, how it is being used, if modified versions of the image exist, or if there is a higher resolution version. |
_softwareengineering.193578 | I asked a question on StackOverflow in November about separating a very large application into layers and tiers: https://stackoverflow.com/questions/13342626/net-divorcing-layers. The previous developer included data logic and business logic in the business logic layerMy question is about the tiers element. I researched on here and concluded that it is better to separate layers into tiers contained in separate DLLs i.e. the presentation layer, business logic layer and data access layer all have separate DLLs as described here: https://stackoverflow.com/questions/13342626/net-divorcing-layers. This seems to be consistent with what I lEarnt at university.However, since then all the examples I am finding online suggest having two tiers (One for the presentation layer and one tier for the BLL and DAL).Is there any specific criteria that developers use to decide whether or not to use three tiers?I am using ADO.NET and have a shared SQL Helper class in the data access layer. The SQLHelper class is similar to this but for VB.NET: http://www.sharpdeveloper.net/source/SqlHelper-Source-Code-cs.html | VB.NET: Two tiers for three layers or three tiers for three layers | design patterns;vb.net | Is there any specific criteria that developers use to decide whether or not to use three tiers?Whether or not the software layer must be capable of running on a separate machine. That's the difference between a tier and a layer; a tier is a hardware abstraction.While the concepts of layer and tier are often used interchangeably, one fairly common point of view is that there is indeed a difference. This view holds that a layer is a logical structuring mechanism for the elements that make up the software solution, while a tier is a physical structuring mechanism for the system infrastructure.http://en.wikipedia.org/wiki/Multitier_architecture |
_cs.2680 | I've been reading a bit of the literature lately, and have found some rather interesting data-structures.I have researched various different methods of getting update times down to $\mathcal{O}(1)$ worst-case update time [1-7].Recently I begun looking into lock-free data-structures, to support efficient concurrent access.Have any of these worst-case $\mathcal{O}(1)$ update-time techniques been used in the implementation of lock-free data structures?I ask because; to me, they seem like the obvious practical extension of this theoretical enhancement.Tarjan, Robert Endre. Updating a Balanced Search Tree in O(1) Rotations. Information Processing Letters 16, no. 5 (1983): 253 257.Driscoll, J R, N Sarnak, D D Sleator, and R E Tarjan. Making Data Structures Persistent. In Proceedings of the Eighteenth Annual ACM Symposium on Theory of Computing, 109121. STOC 86. New York, NY, USA: ACM, 1986.Levcopoulos, C., and Mark H. Overmars. A Balanced Search Tree with O(1) Worst Case Update Time. Acta Inf. 26, no. 3 (November 1988): 269277.Fleischer, Rudolf. A Simple Balanced Search Tree With O(1) Worst-Case Update TimeDietz, Paul F, and Rajeev Raman. A Constant Update Time Finger Search Tree. Information Processing Letters 52, no. 3 (1994): 147 154.Lagogiannis, George, Christos Makris, Yannis Panagis, Spyros Sioutas, and Kostas Tsichlas. New Dynamic Balanced Search Trees with Worst-case Constant Update Time. J. Autom. Lang. Comb. 8, no. 4 (July 2003): 607632.Brodal, Gerth Stlting, George Lagogiannis, Christos Makris, Athanasios Tsakalidis, and Kostas Tsichlas. Optimal Finger Search Trees in the Pointer Machine. J. Comput. Syst. Sci. 67, no. 2 (September 2003): 381418. | Lock-free, constant update-time concurrent tree data-structures? | reference request;data structures;time complexity;concurrency;search trees | null |
_unix.346723 | I've barely installed something on my new droplet on Digital Ocean but the freespace in HDD of 20 Gb has evaporated already. 0% is left. Not to mention that I have the same kind of a droplet with the same configuration and it has quite a bit of free space left.How can I find out what possibly takes the free space on my new droplet? | What has taken all free space on my server? | ubuntu;hard disk | null |
_webmaster.14878 | Possible Duplicate:Which Content Management System (CMS) should I use? Are there any plugable django blog apps that can be embeded into existing pages or templates. It doesn't have to be a full fledged blogging engine. The features I am looking for are:Displaying multiple stories with 'read more' button. Display should be like news items on a news-agregator, without the regular features of blogs like commenting or social networking buttons.Url for each storySupport for pagination to limit the number of stories that can apper on the home page.Loadable on to the existing pages or templates.If there are no such apps, suggestions for how the modules (views, models, urls) can be written are welcome. | Django plugable news story application | looking for a script;blog;django | null |
_unix.159624 | I'm having trouble executing a Perl script through the Unix shell using Perl's system command.I've had more complex regex commands I had to adjust accordingly to convert from Unix to Perl, and they're working fine, but I can't seem to figure out what's missing here.I've tested with Mobaxterm v7.1 and Putty 0.60.I have a file (file.txt):23445 dir1/dir2/dir321343 dir1/dir2/dir3/dir4/file.B254322 dir1/dir2/dir3/file3.P53223 dir1/dir2/dir3/dir4/dir523412 dir1/dir2/dir3/dir4/dir5/file5.xsd54166 dir1/dir2/file6.AB64544 dir1/dir2/fil7.ABEFrom this file I'm trying to grep only the lines with files (exclude directories: lines 1 and 4), and store them in a new file:system(grep '^.*\.[a-zA-Z0-9][a-zA-Z0-9]*\$' file.txt > file2.txt);file2.txt doesn't change from file.txt and still contains the directories.However, this command works through Unix shell (without the backslash before the $):grep '^.*\.[a-zA-Z0-9][a-zA-Z0-9]*$' file.txt > file2.txtfile2.txt output:21343 dir1/dir2/dir3/dir4/file.B254322 dir1/dir2/dir3/file3.P23412 dir1/dir2/dir3/dir4/dir5/file5.xsd54166 dir1/dir2/file6.AB64544 dir1/dir2/fil7.ABEI have to escape the $ character in the Perl system command of course so that Perl doesn't read it as an unknown string. Other than that, what needs to be adjusted in the system command? | Regex works in shell but not through Perl script | shell script;grep;regular expression;perl;quoting | In double quotes, you need to backslash backslashes, i.e. double the backslash before the dot.system(grep '^.*\\.[a-zA-Z0-9][a-zA-Z0-9]*\$' file.txt > file2.txt);# ^# |# Here. |
_webmaster.15066 | I am looking for an ad manager script/system (open source or paid) which can deliver different ads based on the IP - country of the visitors. Would be nice if you can name one if you know! | IP - Country based ad delivery | advertising;country specific;ip address | I managed to do this using OpenX and GeoIP package installed. Thanks everyone. |
_scicomp.10720 | If $A\in R^{m\times n}, b\in R^m, c\in R^n$,if I need to solve the least square problems via SVD of $A$ and $A^T$, i.e.I need to solve the least square solutions to following linear systems via SVD:$Ax=b_i,\quad i=1,2,3,\cdots$$A^Ty=c_j,\;j=1,2,3,\cdots$it is also possible to do these via householderQr().solve() in Eigen, but it is instable; So I want to do this via SVD decomposition of $A$ and $A^T$; obviously, the SVD of $A^T$ can be obtained by the transpose of that for $A$.My question is, how can I implement it in Eigen so that only SVD on $A$ is Ok to solve both the two type least square problems?int m=100,n=50;VectorXd b=VectorXd::randome(m),c=VectorXd::randome(n);MatrixXd A=MatrixXd::random(m,n);Eigen::JacobiSVD<Eigen::MatrixXd> _svd(A, Eigen::ComputeThinU | Eigen::ComputeThinV); MatrixXd x = _svd.solve(b);A.transposeInPlace();// How can I save this sentence below which implements the SVD of $A^T$ Eigen::JacobiSVD<Eigen::MatrixXd> _svd(A, Eigen::ComputeThinU | Eigen::ComputeThinV);MatrixXd y = _svd.solve(c); | How can I reuse the SVD of matrix A to solve LS problems for both A and its transpose via Eigen C++? | c++;least squares;svd;eigen;dense matrix | I do not have too much experience using eigen, but when you are solving the systems with the SVD decomposition, actually you are doing the followin:$ A x = USV^T x = b $and you use the SVD decomposition of A to isolate x$x = VS^{-1}U^T b$.The same for the other system:$ A^T y = VSU^T y = c $and you use the SVD decomposition of A to isolate x$y = US^{-1}V^T c$.So with eigen you should be able to recover a pointer to the matrices $U$, $S$ and $V$ as something similar to thatMatrixUType* U = _svd.matrixU()MatrixVType* V = _svd.matrixV()SingularValuesType* S = _svd.singularValues()And then use some functions from the library to apply the operations to $c$ (matrix multiplications, traspose multimplication, and diagonal inversion) to obtain $y$.I am sorry I can not give you more implementation details, but it think that it can help. |
_codereview.26444 | I made a template pipe and filters to replace an old pipe and filters implementation that used inheritance and had a very heavy base class.#include <iostream>#include <boost/bind.hpp>#include <boost/function.hpp>#include <list>template <typename Data>class pipeline { typedef boost::function<Data(Data)> filter_function; typedef std::list<filter_function> filter_list; filter_list m_list;public: template<typename T> void add(T t) { m_list.push_back(boost::bind(&T::filter, t, _1)); } Data run(Data data) { typedef typename filter_list::iterator iter; iter end = m_list.end(); for(iter it = m_list.begin(); it != end; ++it) { data = (*it)(data); } return data; }};struct foo { int filter(int i) { return i + 1; }};int main(int argc, _TCHAR* argv[]){ pipeline<int> pipe; foo f; pipe.add(f); std::cout << pipe.run(0); char c; std::cin >> c;}Except the fact that add() is a template, are there any issues anyone sees with this approach? | Generic pipe and filters | c++;template;inheritance;boost | null |
_codereview.75505 | This is a Markov text generator I've created in JavaScript. I'm pretty sure the term Markov applies, anyway.The way the generator works is like this: it first divides the source text into unique chunks of length n (specified via the order parameter, greater = more coherent result), and, for each chunk, determines which characters follow it how often. When generating a new text, it looks at the last n characters of what it's generated so far, then randomly selects one of its possible followers and appends it to the text.(The last bit is apparently slightly different from the Dissociated press algorithm, which would output each follower with the same probability and not account for how often they occur.)Long story short, here's my code. I'd mainly like to know if I'm following the common best practices as far as object orientation goes, I've had little experience with it before, but any kind of feedback is appreciated.function Markov(sourceText, order) { this.sourceText = sourceText; this.order = order; this._setupFrequencies();}Markov.prototype = { _setupFrequencies: function() { this.frequencies = {}; // for each substring of length <order>, // create an array of characters that can follow it for (var i = 0; i < this.sourceText.length - (this.order - 1); i++) { var chunk = this.sourceText.substr(i, this.order); if (!this.frequencies.hasOwnProperty(chunk)) { var followers = []; for (var k = 0; k < this.sourceText.length - (this.order - 1); k++) { if (this.sourceText.substr(k, this.order) == chunk) { follower = this.sourceText.substr(k + this.order, 1); followers.push(follower); } } this.frequencies[chunk] = followers; } } }, _getRandomChar: function(chunk) { if (!this.frequencies.hasOwnProperty(chunk)) { return ''; } var followers = this.frequencies[chunk]; var randIndex = Math.floor(Math.random() * followers.length); return followers[randIndex]; }, _getRandomChunk: function() { var randIndex = Math.floor(Math.random() * this.sourceText.length); return this.sourceText.substr(randIndex, this.order); }, generateText: function(length) { if (this.sourceText.length <= this.order) { return ''; } var text = this._getRandomChunk(); // take the last <order> characters from the generated string, // select one of its possible followers, and append it for (var i = this.order; i < length; i++) { var currentChunk = text.substr(text.length - this.order); var newChar = this._getRandomChar(currentChunk); if (newChar != '') { // the last chunk of the source has no follower text += newChar; } else { text += this._getRandomChunk(); } } return text; }}It would then be used like so:var text = Call me Ishmael. Some years agonever mind how long preciselyhaving little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world.;var markov = new Markov(text, 3);console.log(markov.generateText(100));// results:// no money interest me Ishmael. Some or no money in my particular to in my purse, and sail about a l// y in my purse, I the would sail about a little on shore, and see thought I would sail about a little// Some Ishmael. Some Ishmael. Some Ishmael. Some Ishmael. Some years agonever mind see the worl | Markov text generator | javascript;object oriented;design patterns | null |
_webmaster.79580 | I'm new to configuring API web servers and I'm trying to enable URL rewriting for the server so that a user can go to my public DNS.com/getAlbum to access a method within my api.php file (assumption, I'm still learning this all). I was given the command cd /etc/apache2/mods-enabled; ln -s ../mods/rewrite.load rewrite.load, however when I run that, it says it fails to create the symbolic link rewrite.load: File exists. I checked this file and it contains the same code located in mods-available/rewrite.load, so I think there is something incorrect in apache2.conf. I can also update this to include any needed info. How do enable URL rewriting to get my API working? | Enabling URL rewriting for API | apache;php;webserver;api | First, the easier way to enable a modules is with a2enmod So you could do a2enmod rewrite.But that 'file exists' error you're getting probably means its already enabled. Make sure to do an apache2ctl graceful to activate the new config. |
_cs.53238 | I am learning about knowledge representation in my intro to AI course and one of the key ideas has to do with isomorphic vs homomorphic representations. The examples I find when I google around are mainly on the topic of mathematical graphs, which are over my head. Can someone give me a simple, non-mathematical example of homomorphic vs isomorphic knowledge representation to help me wrap my head around the idea? | What is a good example to illustrate the difference between isomorphic and homomorphic representations? | terminology;artificial intelligence;knowledge representation | null |
_webmaster.25226 | I never knew about the idea of a CSS framework until I found Twitter's Bootstrap (http://twitter.github.com/bootstrap/) on StumbleUpon. What are some of the other good CSS frameworks out there? | What are some CSS3 frameworks available? | css;css3;css framework | null |
_unix.109253 | I want to move all my 14.5 TB of media drives (not OS) to a combined LVM file system due to constant problems arranging things to fit into multiple smaller file systems.My question is if after setup any of the 6 drives moves to a different location (/dev/sd*), is that going to be a problem? I have always mounted them based on UUID, but I don't know LVM enough to know how it works with multiple drives.I know I can still mount the file system based on UUID, but I want to make sure LVM is not going to be messed up finding the individual parts of the system.I have to ask this since, for some reason, if I reboot with USB drives inserted they get lower sd* letters than some of the media drives and it causes those media drives to be rearranged for that boot only.PS. I maintain off site backups of my media so I'm not to worried about if one drive fails breaking stuff. Only mentioned since my Google searches of LVM always have someone trying to talk the person out of it because one problem loses everything. | How does LVM find drives after setup | linux;lvm | Each LVM object (physical volume, volume group, logical volume) has a UUID. LVM doesn't care where physical volumes are located and will assemble them as long as it can find them.By default, LVM (specifically vgscan, invoked from an init script) scans all likely-looking block devices at boot time. You can define filters in /etc/lvm.conf. As long as you don't define restrictive filters, it doesn't matter how you connect your drives. You can even move partitions around while the system isn't running and LVM will still know how to assemble them.You hardly ever need to interact with LVM's UUIDs. Usually you would refer to a volume group by name and to a logical volume by its name inside its containing volume group.If you use LVM for all your volumes, the only thing that may be affected by shuffling disks around is your bootloader. |
_codereview.109483 | My script pushes zeros to end of an int array. I'd like to be reviewed on efficiency, style, and obviously if there is a bug I would like to know.def nonzero(a): ''' given an array of ints, push all zeros to the end ''' zeros = [0 for i in range(a.count(0))] x = [ i for i in a if i != 0] x.extend(zeros) return(x) | Push all zeros to tail of int array | python | null |
_cstheory.30570 | RJ Lipton conjectures a link between group growth theory and complexity theory. Group growth theory has undergone rapid advance in the last decade and has many surface similarities/ parallels with complexity theory. (of course there are other deep known links between TCS/ group theory.)has anyone seen a link between group growth theory and complexity theory? eg have any complexity class separations been found expressible in this framework, etc?(eg conceivably there could be an intermediate link via boolean circuits etc.) | possible bridge between group growth theory and complexity theory? | cc.complexity theory;reference request;complexity classes;big picture;gr.group theory | There has been some recent work in terms of characterizing automorphism groups of strongly regular graphs using asymptotic group theory (e.g. this paper), which (for many reasons) is likely very closely related to the complexity of algorithms on strongly regular graphs that use group-theoretic methods, although exploiting such properties algorithmically is still an open question. |
_codereview.119265 | I have a little project where i am implementing an immutable linked-list (not only...) based on a pair-structure like the LISP cons-cell.The linked list works well and is basically wrapping two variables in a closure in an object, the variables are head and rest (think LISPs car and cdr). I have also added some useful functions like map, fold, forEach, merge, reverse and sort among others. All work well except for sort. As it only works with the default compare-function. I know that the reason is of how i append the list back together after a comparison.Anyone has a better idea of how this might be done? merge-sort? Also suggestions on other parts of the project are welcome! :-)var list = function (arr) { var listBuilder = function (arr, i) { if (i + 1 === arr.length) return pair(arr[i], nil); else return pair(arr[i], listBuilder(arr, i + 1)); }; return arr.length > 0 ? listBuilder(arr, 0) : nil;};var pair = function (a, b) { var cons = function (a, b) { var p = function (p) { //base closure to hold the data return p ? a : b; }; p.head = function () { return this(true); }; p.rest = function () { return this(false); }; p.equal = function (a) { return this === a; }; p.toString = function () { return '( ' + p.head() + ' , ' + p.rest() + ' )'; }; p.len = function () { if(nil.equal(p)) return 0; else return nil.equal(p.rest()) ? 1 : 1 + p.rest().len(); }; p.get = function (i) { if(i <= 0){ return p.head(); }else if(nil.equal(p.rest())){ return nil; }else return p.rest().get(i - 1); }; p.append = function(l) { if(nil.equal(p)) return l; if(nil.equal(p.rest())) return pair(p.head(),l); else return pair(p.head(),p.rest().append(l)); } p.map = function (fn) { return p.merge(fn, pair); }; p.fold = function (fn) { return p.merge(function (e) { return e; }, fn); } p.forEach = function (fn) { if (!nil.equal(p)) { fn(p.head()); p.rest().forEach(fn); } }; p.merge = function (modifierFn, concatFn) { if(nil.equal(p)) return nil; else if (nil.equal(p.rest())) return modifierFn(p.head()); else return concatFn(modifierFn(p.head()), p.rest().merge(modifierFn, concatFn)); }; p.reverse = function () { if (nil.equal(p.rest())) return p; else return p.rest().reverse().append( list([p.head()]) ); }; p.sort = function(cmp){ //quick-sort var pivot = p.head(); var left=nil,right=nil; cmp = cmp === undefined ? function(a,b){return a < b;}: cmp; //defaults to numerical less then var partion = function(l){ if(cmp(l.head(), pivot)) left = pair(l.head(),left); else right = pair(l.head(),right); if(!nil.equal(l.rest())) partion(l.rest()); }; if(!nil.equal(p) && !nil.equal(p.rest())){ partion(p.rest()); return left.sort().append(pair(pivot,nil)).append(right.sort()); }else{ return p; } }; return p; }; return cons(a, b);};var nil = pair(null, null);nil.toString = function () { return 'nil';};//uses casesvar l = list([213, 342, 654, 543, 213, 321, 54365, 564, 3221, 45, 7, 8, 53, 6542, 24, 8, 523, 543, 984]);var append = function(a,b){ return a+ ' ' +b;};alert(l.sort().reverse().fold(append)); | Quick-sort a linked list? | javascript;linked list;functional programming | null |
_unix.42484 | I use Cheese as my webcam software. I am trying to figure out a way to have it :StartClick a picExitwhen a script is executed. The script should not be asking for permissions and there shouldn't be any interruptions. So far,#!/bin/bash cheeseI could only get it to do step 1. How do I do steps 2 and 3?The doc files don't mention an such option and I don't want to change the source code.(I don't mind camorama either) | Having Cheese automatically take a pic and quit | camera | null |
_unix.95903 | I've been using the terminal for almost everything: in fact, I often don't even log in through the interface, I use the tty1 and go to the web with text-browsers. So, the external drive doesn't auto-mount, and I use sudo mount /dev/sdb1 /mnt/JMCF125_DE to mount it. It works, but listing shows there's a difference. The files' description when auto-mounting via the GUI (Unity on Ubuntu) looks like:-rw------- 1 jmcf125 jmcf125In manual mount, the same files' properties look like this:-rwxrwxrwx 1 root rootWhich makes sense since I had to use sudo to mount. But how come the system doesn't have to? How can my mounts work exaclty like the systems'? Also, I heard every action in the GUI goes through a background shell: can I see what commands are printed there? | Why does a manual mount set different file ownership? | ubuntu;permissions;mount;automounting | The default GUI uses Gvfs to mount removable drives and other dynamic filesystems. Gvfs requires D-Bus. You can launch D-Bus outside of an X11 environemnt, but it's tricky. If you have D-Bus running, you can make gvfs mounts from the command line with gvfs-mount.The program pmount provides a convenient way to mount removable drives without requiring sudo. Pmount is setuid root, so it can mount whatever it wants, but it only allows a whitelist of devices and mount points so it can safely be called by any user.It is not true that every action in the GUI goes through a background shell. A few do but most don't. |
_unix.49332 | I use Debian Wheezy on EFI motherboard and need ntldr module in GRUB2 to load bootmgr of Windows 7 installer, because the way it starts on its own (apparently, using the boot sector of the USB flash drive the installer is on) it only installs Windows on MBR-formatted disk. When I install GRUB using grub-install it won't add ntldr.mod to the GRUB modules folder and can't insmod it.Why? When I only download GRUB package without installation (apt-get download...), the module can be found there. If I add the .mod file from the downloaded package to the installed GRUB's modules folder and then insmod ntldr from the GRUB command line, it says something about wrong ELF magic (?).How to do it forcibly?Is there another way to boot the Windows installer in the GPT-mode, asI don't want to format the whole disk into MBR. | ntldr.mod missing from GRUB2 | debian;windows;grub2 | I figured out the correct bootloader of Windows is hidden somewhere in the large packed files that come on the installation image. It can be unpacked, put into right boot directory and then loaded with GRUB2 chainloader as usually. I don't get why despite having right loader Microsoft hides it somewhere deep and places the strange one into default boot dir.It worked for me (though, I downloaded the file provided on the instructions page I found because it was quite some pain to unpack it). Unfortunately, I don't remember details, I found manual somewhere on the web, but the general idea is described. |
_softwareengineering.241309 | When implementing the Builder Pattern, I often find myself confused with when to let building fail and I even manage to take different stands on the matter every few days.First some explanation:With failing early I mean that building an object should fail as soon as an invalid parameter is passed in. So inside the SomeObjectBuilder.With failing late I mean that building an object only can fail on the build() call that implicitely calls a constructor of the object to be built.Then some arguments:In favor of failing late: A builder class should be no more than a class that simply holds values. Moreover, it leads to less code duplication.In favor of failing early: A general approach in software programming is that you want to detect issues as early as possible and therefore the most logical place to check would be in the builder class' constructor, 'setters' and ultimately in the build method.What is the general concensus about this? | Builder Pattern: When to fail? | java;design patterns | Let's look at the options, where we can place the validation code:Inside the setters in builder.Inside the build() method.Inside the constructed entity: it will be invoked in build() method when the entity is being created.Option 1 allows us to detect problems earlier, but there can be complicated cases when we can validate input only having the full context, thus, doing at least part of validation in build() method. Thus, choosing option 1 will lead to inconsistent code with part of validation being done in one place and another part being done in other place.Option 2 isn't significantly worse than option 1, because, usually, setters in builder are invoked right before the build(), especially, in fluent interfaces. Thus, it's still possible to detect a problem early enough in most cases. However, if the builder is not the only way to create an object, it will lead to duplication of validation code, because you'll need to have it everywhere where you create an object. The most logical solution in this case will be to put validation as close to created object as possible, that is, inside of it. And this is the option 3.From SOLID point of view, putting validation in builder also violates SRP: the builder class already has responsibility of aggregating the data to construct an object. Validation is establishing contracts on its own internal state, it's a new responsibility to check the state of another object.Thus, from my point of view, not only it's better to fail late from design perspective, but it's also better to fail inside the constructed entity, rather than in builder itself.UPD: this comment reminded me of one more possibility, when validation inside the builder (option 1 or 2) makes sense. It does make sense if the builder has its own contracts on the objects it is creating. For example, assume that we have a builder that constructs a string with specific content, say, list of number ranges 1-2,3-4,5-6. This builder may have a method like addRange(int min, int max). The resulting string does not know anything about these numbers, neither it should have to know. The builder itself defines the format of the string and constraints on the numbers. Thus, the method addRange(int,int) must validate the input numbers and throw an exception if max is less than min.That said, the general rule will be to validate only the contracts defined by the builder itself. |
_softwareengineering.290963 | I heard that SQL is Turing complete language (for example: https://stackoverflow.com/a/7580013/2604170) I am just curious if would be possible to create independent web framework like Ruby on rails or Flask (Python) in pure SQL (SQL would replace python or ruby or java for example)It's just idea, but I think it would be really interesting if something like this could work.I apologize, if it's stupid question.Thanks in advance. | Web framework in pure SQL | sql;frameworks;web | No, because you need at least basic IO capabilities to be able to serve up web content. Assuming you finesse that by using something else to put in front of your database server then, yes, technically that would be possible. It would just be horrible. |
_computergraphics.5251 | I'am studying PBR and the book introduce the potential equation.They said the potential equation can describe shooting algorithms better, and$W(x, \theta)$ describes the fraction of the radiance $L(x, \theta)$But I could not understand the concept. Is just $W(x, \theta)$ a fraction such as $\frac{1}{3}$(I know the original equation is quite complicate, integral) to reduce the radiance $L$ which is derived from a light source? (because the L would be diminished if it collides with objects). | What is the potential equation in PBR? | physically based;pbr;photo realistic;realistic | null |
_softwareengineering.211519 | I am new to Branching and Merging but I have been tasked with making future development on an application possible while still allowing bug fixes to production. Usually I am the only developer on the application unless I am on leave.I have watched a Pluralsight video on Branching and done some forum/stack reading. I was hoping someone could take a look at my solution proposal and critique. I am concerned I will cause more problems than solutions if I get this wrongAs of Version 1.1.0.0 I have introduced a branching system for future development. Version 1.1.0.0 is our production branch. No changes should appear here except bug fixes. Version 1.2.0.0 is the next version and our development branch. After completing a development, the development branch will merge to the Application (trunk). The application will be deployed for testing. After sign off the development branch becomes production, the previous version branch will be removed. A new branch will be created for the next version.For bug fixing, the bugs are fixed against the production branch and merged to the application so when the development branch merges down it also obtains those fixes. | TFS Branching Advice | version control;team foundation server;branching;merging | Sounds like 'scenario 2: branch for release as described by Microsoft's guides.Its probably the most commonly used branching strategy when using TFS, you have a Main branch from which you make a Dev branch for development, when you're read to merge back, you merge onto Main, perform some testing and then either release from the Main branch, or more commonly, take the Main onto a Release branch so you can make changes to an individual release without having to worry about further updates that have been merged onto Main getting in the way.Some people never make bug fixes directly on the production branch, they branch from it, make the bug fixes and then merge them back when they've been tested and verified. It makes things easier if you decide not to go with the bug fixes, or have to revert them later as each bug fix release will be a single 'merge' revision onto your release branch rather than a heap of commits.So, in summary: I would go with 3 branches, one for dev, one for QA (which is also the main line) and another branch for releases. you can tag each release revision if you know you do not have to maintain old versions, but if you do then that Release branch should be a release branch per release, or major version, of your product. With git and similar systems, you tend to see more of a feature or team branch strategy as merging is a lot easier than on TFS. With something like svn, you tend to see both types. |
_unix.29220 | I have what I believe is a system file, /etc/cron.daily/ntpupdate which runs ntpdate ntp.ubuntu.com daily to sync with the network time. Every day it generates output very similar to this:/etc/cron.daily/ntpupdate:16 Jan 06:30:42 ntpdate[21446]:step time server 91.189.94.4 offset -12.646804 secI'm not positive what the 91.189.94.4 means but I'm pretty sure -12.646804 sec means that my server is off by around 12 seconds. But I don't know why it is off by around the same amount every day. This is an Amazon EC2 instance running Ubuntu.I can only guess that either it is losing / gaining 12 seconds per day, or something else is syncing the time with another clock that is off by 12 seconds and then I am re-syncing it.What should I do to try and track this down further? I don't see any other cron jobs in the /etc/cron.* directories or in the users' cron jobs...UPDATEJust thought I'd share that I started running this hourly to see if there would be a big jump at a certain hour. This is what the hourly output is:16 Jan 15:17:04 ntpdate[8346]:adjust time server 91.189.94.4 offset -0.464418 secSo apparently every hour the clock is off by around half a second, so that makes sense that each day (24 hours) the clock would be off by around 12 seconds. Guess the clock is just running fast! Thanks! | Why is my EC2 server's time off by ~10 seconds every day? | linux;ubuntu;synchronization;time | There are a number of factors that might make a software clock run slow or fast. Clocks on virtual servers are especially prone to a whole class of these problems. 12 seconds a day is pretty bad until you come across virtual boxes with clocks that run at 180200% speed! Clocks on laptops that suspend can suffer from time-keeping issues too.You should consider dropping ntupdate in favour of ntpd. The package name is ntp on Debian (and presumably Ubuntu too). The NTP daemon keeps your time in sync a lot more proactively than a cron job, synchronising with one or more other NTP servers and keeping your clock much more accurate. It's another implementation of the same protocol ntpdate uses, except ntpd monitors the time continuously.If you don't want the (very small) overhead of ntpd, you might consider running ntpdate once an hour. Assuming you're 0.5s off every hour, that should be sufficient. |
_codereview.59325 | I am trying to represent questions in quizzes.The database is Mysql but I am using fairly generic sql. The assumption is that varchar cannot handle more than 255 though, so I am supporting longer text by having multiple text components.There are four primitive kinds of questions: multiple choice, multiple answer, fill in the blank, and editText. Multiple choice have multiple answers, with one being right. Multiple anwer have multiple answers, with some being wrong and some being right. The student is expected to pick multiple answers and avoid getting incorrect ones. Fill in the blank allows a string answer which is compared against the answer (or answers). For editText, a block of text is loaded and must be modified by the student. An edittext can exceed 255 characters, so cannot use a single varchar.In addition to the base types, there are also multi-part questions where each part can be a question.My approach is as follows:A Quiz is a collection of CompoundQuestions in a particular order. This table is not included because it is not relevant to the rest of the question.A CompoundQuestion is a sequence of text, graphics, and questions. Some compound questions could have zero questions (text only) while other CompoundQuestions could have many. Most will have just one.CREATE TABLE CompoundQuestion( cqid integer(8) primary key);CREATE TABLE CompoundQuestionText( cqid integer(8), seq integer(4), primary key (cqid, seq), text varchar(255), imgname varchar(255));CREATE TABLE Questions ( qid integer(8) primary key, qtype integer(4), cqid integer(8), seq integer(4));CREATE TABLE QuestionText ( qid integer(8), seq integer(4), primary key(qid,seq), text varchar(255));CREATE TABLE Answers ( qid integer(8), aid integer(4), primary key (qid,aid), text varchar(255), img varchar(255));Is this a reasonable approach?Is there a better way to handle columns larger than 255 characters? Blobs add complication and can't be searched, right?Suppose more types are added. Do extra fields just get added in new tables and joined to the base question table? If so, every kind of question then requires a different query. Is there any better way to do that?Ideally, I would like to be able to write a stored procedure that loads all questions in a particular quiz.The two approaches that occur to me are selecting each question and based on the type, creating the object, or selected all the objects of each type. If the system expands to 10-20 different types of questions, what would be the best way to implement this query? | SQL design for representing a two-level hierarchy of objects with containment | sql;mysql | null |
_codereview.84526 | I would like to present for review my (much) revised batch templating utility which had it's humble beginnings here in a previous post. As I mentioned there, this program is my entry into python programming. I am trying to grow the simple script from my previous post into a more robust utility. Questions I am hoping to answer with this post:Is the overall structure sound?Is my use of exceptions correct?Is my documentation OK? This was my first intro into docstrings and I have worked hard to make them as complete as possible.One part that bugs me is the try block in the main() function.First, this whole block should probably be a separate function? I left it in main() since it's the meat of the program.Secondly, there seems to be a lot of code between the try: and the except: and I know this should be minimized, but I couldn't come up with a better method.Program InputsThe program takes as inputs two required files, a CSV data file and a template file and an optional appended file. The output of the program is a set of rendered files, one for each data row in the CSV file. The CSV data file contains a header row which is used as a set keys mapped to tags inside of the template file. For every row in the data file, each data item associated with the keys is substituted with the tag in the template file, the appended file added (with some added tags for *.js files) and the rendered file written to disk. Pretty straight forward I think. The main docstring illustrates a quick example.Template SyntaxThe program uses Python's string.Template() string substitution method which utilizes the $ replacement syntax with the added requirement of mandating the optional (to the method) { and } curly braces. So, for a particular Key from the data file header row, the template tag would be ${Key}.Wall of CodeI think the docstrings explain pretty well what all is going on...A simple batch templating utility for Python.Initially conceived as a patch to quickly generate small HTML files fromcatalog data. The program takes as inputs two (2) required files, a CSVdata file and a template file (see below) and the option to append athird file. Output of the program is a set of rendered files, one foreach data row in the CSV data file.USAGE: Current rendition of program uses a simple guided prompt interface to walk user through process.**SPECIAL WARNING** This program copies template file and appended file to strings which means they will both be loaded fully into memory. Common sense should be exercised when dealing with extremely large files.CSV DATA FILE: Data File shall contain a header row. Header row contains the keys that will be used to render the output files. Keys shall not contain spaces. There shall be a corresponding tag in the template file for each key in the CSV Data File. File can contain any (reasonable) number of data rows and columns. Each item in a row is swapped out with the tag in the template file which corresponds to appropriate key from the header row. There will be one output file generated for each row in the data file.TEMPLATE FILE: The template file is basically a copy of the desired output file with tags placed wherever a particular piece of data from the CSV Data File should be placed in the output. Syntax: The program uses Python's string.Template() string substitution method which utilizes the `$` replacement syntax. The program further restricts the syntax requiring the use of the optional `{` and `}` curly braces surrounding tags. So, for a particular 'Key' from the data file header row, the template tag would be ${Key}.APPENDED FILE: The appended file is strictly copied _ver batum_ to the end of the rendered output file. There is really no restriction on the appended file other than special warning above. Special Feature: If the appended file is a Javascript file (detected using the *.js file extension), the program will add appropriate opening and closing HTML tags.QUICK EXAMPLE: Assume CSV Data File: <some_file.csv> stockID,color,material,url 340,Blue,80% Wool / 20% Acrylic,http://placehold.it/400 275,brown,100% Cotton,http://placehold.it/600 Assume Template File: <another_file.html> <h1>Stock ID: ${stockID}</h1> <ul> <li>${color}</li> <li>${material}</li> </ul> <img src='${url}'> Assume ...Appended File? --> No Output file 1 = 'listing-340.html' <h1>Stock ID: 340</h1> <ul> <li>Blue</li> <li>80% Wool / 20% Acrylic</li> </ul> <img src='http://placehold.it/400'> Output file 2 = 'listing-340.html' <h1>Stock ID: 275</h1> <ul> <li>brown</li> <li>100% Cotton</li> </ul> <img src='http://placehold.it/600'>Author: Chris E. Pearson (christoper.e.pearson.1 at gmail dot com)Copyright (c) Chris E. Pearson, 2015License: TBDimport osimport reimport csvimport stringdef main(): A simple batch templating utility for Python. See main docstring for details. # Collect input file names and contents for text files. fname_data = prompt_filename('Data File') fname_template = prompt_filename('Template File') fcontents_template = get_contents(fname_template) fname_appended, fcontents_appended = get_appended() # Validate the inputs tag_set = set(re.findall('\${(\S+)}', fcontents_template)) primary_key, key_set = get_keys(fname_data) validate_inputs(tag_set, key_set) validated_template = string.Template(fcontents_template) # Generate the output try: # This seems like a lot to put in a try statement...? with open(fname_data) as f: reader = csv.DictReader(f) f_count = 0 for row in reader: # Create output filename output_filename = ('Listing_{}.html'.format(row[primary_key])) f_count += 1 print('File #{}: {}'.format(f_count, output_filename)) # Prep string output_main = validated_template.substitute(row) write_string = '{}{}'.format(output_main, fcontents_appended) # Write File with open(output_filename, 'w') as f_out: f_out.write(write_string) except OSError: print('No such file {!r}. Check file name and path and try again.' .format(fname)) raise else: print('{} of {} files created'.format(str(f_count), str(reader.line_num-1)))def prompt_filename(fclass): Prompt user for a filename for given file classification. Args: fclass (string): A descriptive string describing the type of file for which the filename is requested. _e.g._ 'Template File' Returns: filename (string) while True: filename = input('Enter {0} --> '.format(fclass)) if os.path.isfile(filename): return filename else: print('No such file: {!r}.'.format(filename)) print('Please enter a valid file name') continuedef get_contents(fname): Return contents of file `fname` as a string if file exists. Args: fname (string): Name of the file to be opened and returned as a string. Returns: text_file (string): The entire contents of `fname` read in as a string. Exceptions: OSError: informs user that fname is invalid. try: with open(fname) as f: text_file = f.read() except OSError: print('No such file {!r}. Check file name and path and try again.' .format(fname)) raise else: return text_filedef get_appended(): Ask user if appended file and prompt filename if so. Returns: fname_appended (string) Filename for appended file. fcontents_appended (string) The entire contents of `fname_appended` as a string. Exceptions: OSError: Raised by function prompt_filename informs user that fname is invalid. See Also: Function: prompt_filename Function: get_contents prompt_for_appended = input('Is there an appended file? --> ') if prompt_for_appended.lower().startswith('y'): fname_appended = prompt_filename('Appended File') fcontents_appended = get_contents(fname_appended) if fname_appended.lower().endswith('.js'): open_tag = '<script type=text/javascript>' close_tag = '</script>' fcontents_appended = '\n{0}\n{1}\n{2}'.format(open_tag, fcontents_appended, close_tag) else: fname_appended = None fcontents_appended = '' return fname_appended, fcontents_appendeddef get_keys(fname): Get key set as header row of given CSV file and get primary key. Given a CSV data file `fname`, return the header row from file as a set of keys. Also return the primary key for the data file. The primary key is simply the header for the first column. Args: fname (string): Name of the CSV file for which the keys are needed. Returns: primary_key (string) Header value of first column in given CSV file. key_set (set of strings) A set comprised of all header row values for given CSV file. Exceptions: OSError: informs user that fname is invalid. try: with open(fname) as f: key_list = f.readline().strip().split(',') except OSError: print('No such file {!r}. Check file name and path and try again.' .format(fname)) raise else: primary_key = key_list[0] key_set = set(key_list) return primary_key, key_setdef validate_spaces(item_set): Read through a set of strings and checks for spaces. The function takes a set of strings and searches through each string looking for spaces. If a space is found, string is appended to a list. Once all strings are searched, if any spaces found, print error with generated list and terminate program. Args: item_set (set of strings) Returns: None Exceptions: A `KeyingError` is raised if any spaces are detected in the data file key set. bad_items = [] for item in item_set: if ' ' in item: bad_items.append(item) if bad_items != []: try: raise KeyingError('Keys cannot contain spaces.') except KeyingError as e: print(e) print('Please correct these keys:\n', bad_items) # quit() raisedef validate_inputs(tag_set, key_set): Validate template tag_set against data file key_set. Validates the key_set from a given data file against the tag_set from the corresponding template file, first checking the key set for lack of spaces and then checking if the two sets are equivalent. If either condition is not met, an exception will be raised and the program will terminate. Args: tag_set (set of strings) key_set (set of strings) Returns: None Exceptions: A `KeyingError` is raised by function `validate_spaces` if any spaces are detected in the data file key set. A `MisMatchError` is raised if the two input sets are not equivalent. See also: Function: validate_spaces try: validate_spaces(key_set) except KeyingError as e: print('Goodbye') quit() if key_set != tag_set: try: raise MisMatchError('Tags and keys do not match') except MisMatchError as e: print(e) if tag_set - key_set == set(): print('missing tags for key(s):', key_set - tag_set) print('(or tag(s) contains spaces)') else: print('Check template file tags for key(s):', key_set - tag_set) print('Template shows:', tag_set - key_set) print('Goodbye') quit()class KeyingError(Exception): def __init__(self, arg): self.arg = argclass MisMatchError(Exception): def __init__(self, arg): self.arg = argif __name__ == '__main__': main() | Simple Batch Templating Utility in Python | python;beginner;template | null |
_webmaster.74736 | I recently made my website https and it's previous url (http) had many Facebook likes which I would like to migrate to the new https url. The only change to the url was the https, no domain change or anything else.I followed the migration steps advised by Facebook but ran into the following errors (according to developers.facebook.com's Open Graph Object Debugger):Critical Errors That Must Be Fixed.Could Not Follow Redirect Path.Using data from https://www.myurl.com because there was an error following the redirect path.Errors That Must Be Fixed.Circular Redirect Path.Circular redirect path detected (see 'Redirect Path' section for details).Could Not Follow Redirect.URL requested a HTTP redirect, but it could not be followed.To find the object, these are the redirects we had to follow.original http://www.myurl.com302 https://www.myurl.comog:url http://www.myurl.comI have no idea how to fix this. Obviously because of the http to https change I have a 301 redirect from http to https on my .htaccess which I think is what is causing the problem. Any ideas folks?Update...It appears like what I need to do is exclude Facebook's crawler (how do I identify that?) from following my http://www.myurl.com to https://www.myurl.com redirect. My .htaccess file looks like this at the moment...RewriteEngine OnRewriteCond %{HTTPS} offRewriteCond %{REQUEST_URI} !^/contact-us\.phpRewriteRule ^ https://%{HTTP_HOST}%{REQUEST_URI}RewriteCond %{HTTPS} onRewriteRule ^contact-us\.php http://%{HTTP_HOST}/contact-us.php [NC,L,R=301]I have http to https redirected with exception of one php page. But how do I exclude Facebook's crawler from automatically going to https? | Excluding Facebook Crawler From Following Htaccess Redirects? | htaccess;https;facebook;social sharing buttons | null |
_reverseengineering.2127 | I am reverse engineering some code from which IDA has generated the following disassembly. These specific lines of code are just for illustrative purposes. Notice that the third line does not call a specifc function by its name but rather by its address.mov rcx, [rsp+128h+var_D8] // reg CX gets the address at stack pointer+128h+var_D8 bytes mov r8, [rcx] // the address at reg CX is stored to reg r8call qword ptr [r8 + 18h] // at address rax+18h, call function defined by qword bytesI'm interested in determining which function is being called. What mechanisms, tools, tricks, etc. can I use to determine which function in the dissassembly a call qword ptr <address> is referring to? I'm up for trying other disassembler programs.From an answer to my previous question, this is known as an indirect call or (perhaps a virtual function call). The disassembly has many of these, so how do I resolve them? In addition, IDA has identified hundreds of functions. How do I go about figuring out which one was actually being called during any given indirect call (or virtual call)? | How to identify function calls in IDA Pro's disassembly? | ida;disassembly;virtual functions | The easiest way to find out the function in question would probably be by dynamic analysis. You can easily do this by placing a breakpoint on that instruction in a debugger and examining the registers. A more general solution would probably involve some scripting to record all calls and add that information to the IDA database. Funcap plugin does something similar if not exactly what you are looking for:This script records function calls (and returns) across an executable using IDA debugger API, along with all the arguments passed. It dumps the info to a text file, and also inserts it into IDA's inline comments. This way, static analysis that usually follows the behavioral runtime analysis when analyzing malware, can be directly fed with runtime info such as decrypted strings returned in function's arguments. |
_softwareengineering.54060 | So I know HTML5 is the replacement for Flash...however it's obviously not mainstream just yet......but what else is it supposed to replace.I ask this because web-dev has always been a secret kinda side-passion of mine (even though I a firmware programmer mostly doing C/Java stuff). And anyway I want to pursue a side thing at doing web design (I know basic XHTML/CSS + some CSS3)But what exactly is I guess....pointless to study? JavaScript I assume will always be a huge part of web design? (HTML5 isn't replacing that is it?) What about Ajax and CSS itself?)And then there's Flash....not sure if thats really worth putting effort into? Also there's Adobe Flex/Air......I'm a bit confused if you can't tell. | So with HTML5 what exactly.....is supposed to become phased out | web development;html5;web design | HTML 5 is not a replacement for Flash. Its various technologies provide alternative mechanisms for doing many or even all of the things that you can do in a Flash app, but it's not correct to say that it is the 'anything' for Flash.Both CSS and JavaScript are still extremely important technologies in the HTML 5 toolkit. You still need to style your interfaces, and CSS is how you do it. You still need to provide client-side programmability, and JavaScript is how you do it.In fact, JavaScript becomes even more important than it already is in HTML 5. The HTML 5 standard defines a very rich and powerful set of services, and they are all exposed and programmed against with JavaScript.Check out this great HTML 5 demo web site to get a better idea of what it is all about.It's probably worth mentioning that Google Gears is one product that is very definitely being phased out because it will be replaced by HTML 5. I can't think of anything else that is being made truly obsolete by it. |
_unix.245067 | I tried to upgrade MySQL 5.5 to MariaDB by removing MySQL altogether. Then I installed MariaDB 10.1 as follows: sudo apt-get install software-properties-commonsudo apt-key adv --recv-keys --keyserver keyserver.ubuntu.com 0xcbcb082a1bb943dbsudo add-apt-repository 'deb [arch=amd64,i386] http://sfo1.mirrors.digitalocean.com/mariadb/repo/10.1/debian jessie main'sudo apt-get updatesudo apt-get install mariadb-serverservice apache2 restartBut afterwards, phpMyAdmin did not load, and was returning 404. The app I was using was loading fine, and I was able to connect to the Database via command line. and mysql -V returned mariadb version.So I removed phpMyAdmin, and MariaDB as follows: service mysql stopapt-get --purge remove mysql*mv /etc/mysql/ /tmp/mysql_configs/apt-get remove --purge mysql*apt-get autoremoveapt-get autocleanapt-get purge phpmyadminapt-get autoremove phpmyadminservice apache2 restartapt-get updatecleared the mariadb entries in the sources file: /etc/apt/sources.listNow, each time I try to apt-get install anything it returns me an error: apt-get install mysql-server mysql-clientReading package lists... DoneBuilding dependency treeReading state information... DoneYou might want to run 'apt-get -f install' to correct these:The following packages have unmet dependencies: mariadb-server-10.0 : Depends: mariadb-client-10.0 (>= 10.0.22-0+deb8u1) but it is not going to be installed Depends: mariadb-server-core-10.0 (>= 10.0.22-0+deb8u1) but it is not going to be installed PreDepends: mariadb-common but it is not going to be installed Breaks: mysql-server mysql-client : Depends: mysql-client-5.5 mysql-server : Depends: mysql-server-5.5 but it is not going to be installedE: Unmet dependencies. Try 'apt-get -f install' with no packages (or specify a solution).This erros is for anything I try to install. And I did try to apt-get -f install. Any ideas what I messed up and how to fix it? Notes:lsb_release -daNo LSB modules are available.Distributor ID: DebianDescription: Debian GNU/Linux 8.2 (jessie)Release: 8.2Codename: jessieapache2 -vServer version: Apache/2.4.10 (Debian)Server built: Aug 28 2015 16:28:08 | MariaDB Dependencies issue after isntall/remove | debian;mysql;dependencies;mariadb;phpmyadmin | null |
_codereview.20041 | Background:A synchronous grammar is a like two context-free grammars connected in parallel. It is used for translation. For example, here is a small synchronous grammar that can be used for translating between natural language text and semantic representation:== {verb} ==* I offer {noun}. / OFFER({noun})* Will you accept {noun}? / QUERY({noun})* I offer no company car. / OFFER(Leased Car=Without leased car)== {noun} ==* {number} % pension / Pension Fund={number}%* A salary of {number} NIS / Salary={number}* A company car / Leased Car=With leased carThe headings ({verb}, {noun}) are the nonterminals of the grammar. under each nonterminal is a list of translations enabled by this nonterminal.Starting from the nonterminal {verb}, we can create, from the above grammar, the following 7 translations:I offer no company car. => [OFFER(Leased Car=Without leased car)]I offer A company car. => [OFFER(Leased Car=With leased car)]Will you accept A salary of {number} NIS? => [QUERY(Salary={number})]I offer {number} % pension. => [OFFER(Pension Fund={number}%)]I offer A salary of {number} NIS. => [OFFER(Salary={number})]Will you accept A company car? => [QUERY(Leased Car=With leased car)]Will you accept {number} % pension? => [QUERY(Pension Fund={number}%)]The translations are many-to-many (i.e. there can be more than one translation to each source string, and vice-versa). So, each set of translations for a specific nonterminal is represented by a multimap (we use a class ValueSetMap<String, String> for representing a many-to-many map). An entire grammar is represented by a map of such multimaps: Map<String, ValueSetMap<String, String>>. It maps a nonterminal to its multimap of translations.Here is some Java code I wrote, for expanding a grammar into a flat multimap of translations. It works for the above example and some more complicated examples, but I wonder if it really covers all cases.public class GrammarExpander { public GrammarExpander(Map<String, ValueSetMap<String,String>> grammarMap) { this.grammarMap = grammarMap; this.expandedGrammarMap = new HashMap<String,ValueSetMap<String,String>>(); } public ValueSetMap<String, String> expand(String startNonterminal, int maxDepth) { if (expandedGrammarMap.containsKey(startNonterminal)) return expandedGrammarMap.get(startNonterminal); Set<String> nonterminals = grammarMap.keySet(); ValueSetMap<String, String> translationsFromStartNonterminal = grammarMap.get(startNonterminal); if (translationsFromStartNonterminal==null) throw new NullPointerException(No translations from startNonterminal + startNonterminal); // don't expand nonterminal anymore - prevent infinite recursion if (maxDepth<=0) return translationsFromStartNonterminal; for (String nonterminal: nonterminals) { // expand each nonterminal in turn ValueSetMap<String,String> newTranslations = new SimpleValueSetMap<String,String>(); for (String source: translationsFromStartNonterminal.keySet()) { for (String target: translationsFromStartNonterminal.get(source)) { // source contains nonterminal - expand it recursively if (source.contains(nonterminal) || target.contains(nonterminal)) { ValueSetMap<String, String> expansions = this.expand(nonterminal, maxDepth-1); for (String expansionSource: expansions.keySet()) for (String expansionTarget: expansions.get(expansionSource)) newTranslations.put( source.replace(nonterminal, expansionSource), target.replace(nonterminal, expansionTarget)); } else { newTranslations.put(source, target); } } } translationsFromStartNonterminal = newTranslations; } expandedGrammarMap.put(startNonterminal, translationsFromStartNonterminal); return translationsFromStartNonterminal; } /* * protected zone */ protected Map<String, ValueSetMap<String, String>> grammarMap; protected Map<String, ValueSetMap<String, String>> expandedGrammarMap;} | Expanding a synchronous grammar | java;grammar | NullPointerExceptionIt should be IllegalArgumentException.Consider just returning empty collection of expansion.It greatly improves composeability of your code.Think about making invalid states unrepresentable.(though I cannot think a trivial way of doing this, in this case.)In the following statement translation is recursively expanded if either of source and target contains nonterminal.Whereas the comment says thatthe translation is expanded if source contains nonterminal// source contains nonterminal - expand it recursivelyif (source.contains(nonterminal) || target.contains(nonterminal)) { Misleading comments are often a sign of a buggy algorithm.Moreover in the given examples it is observed that source contains a nonterminal iff target also contains nonterminalMaybe this should be a constraint of the grammar.Moreover source.contains(nonterminal) suggest thatyou implicitly assume nonterminals, that is the keys of grammarMapstart and end with {, }. and nonterminal names do not contain {}, and there may be more constraints. Consider keys: noun, pronoun, {pronoun}, {{noun}}. See the looming trouble?The keys should be validated in the creation of the grammar.And some javadoc etc comments will also be helpful if not sufficient.Even if you will be the only user ever of this code, you will forget these implicit assumptions in no time.In according with above advice consider using abstract data typesinstead of meaningless primitive types (java.util.Collection, String, etc.)such that your multilevel loops read more something likefor (Translation translation : translations) { grammar.substituteOneLevel(translation)}// in grammar.substituteOneLevel(translation) for (Nonterminal nonterminal : translation.getNonterminals()) { for (Expansion expansion : grammar.getExpansions(nonterminal)) { result.add(translation.substitute(nonterminal, expansion); } }or some such..Apart from improved readability and clarity of you business(academic) logic by using language (nouns and verbs) of from your domain;you can also disallow a grammar to have expansions that contain nonterminals that the grammar itself does not contain.(your NullPointerException..)Efficiency-wise, one of my objections is the:for (String nonterminal: nonterminals) { // expand each nonterminal in turnwhy try to expand terminals that is not contained in any of the current translations?Expanding a non-terminal for a fixed number of steps does not seem very useful.What is your use case? Do you have a running test case, that demonstrates the setup of the grammar and the manner it will be used?Even if Expanding a non-terminal for a fixed number of steps is what you want,you probably will also want some indication of whether you have exhausted all possible expansions. (whether none of your expansions contain nonterminals)Of course possibly more useful queries with a these kind of grammars are:What are possible translations of this input?What are possible inputs that translate to this output?Is this grammar finite?Why don't you use a prolog or lisp variant? Have a look at Clojure. It is a lisp variant that runs on JVM. It has a logic library, which I believe can be more useful than java. |
_hardwarecs.6396 | Any recommendations on a fanless mini itx board with a 4x-16x PCI-E slot? Something a few years old ideally, as I need to keep costs down.Current set-up is with a fanless Asus J1800-c, which runs really well, but only has a 1x PCI-E slot.I've set-up OpenBSD on this system with no problems so far and was going to go on and repurpose an Cisco EA2700 as a wireless AP/ethernet switch... ButHave decided a one-box solution would be better. Acquired an Intel 4 port GB Ethernet card... then to my horror realised it required a 4x PCI-E as minimum.Have been out of the system-building game for so long I hadn't realised the slot had changed so much! The board spec must have a low watt power passive CPU. A celeron would be fine, as with the J1800-C. Must also have 1 gigabit ethernet port.Would ideally take the 4gb RAM, which is laptop style ddr3-1333, and low volatge (1.35v), and ideally have at least 1xUSB3 port, 1 gigabit ethernet port, and one SATA3 port.5 years old at most, as it needs more juice than a simple router/firewall requires, due to running snort and various other in-line traffic sniffing stuff.I'm currently running the machine on a 90w pico-psu, and its stable. But suspect that won't be sufficient when the intel 4-port ethernet is added, as it requires about 10w.So the lower the power draw of the cpu/m'board, the better.Thank you. | Fanless mini-itx motherboard with a 4x-16x PCI-e slot for OpenBSD project? | motherboard;router;ethernet;mini pc | null |
_unix.79887 | I have file with IP address and netmask IP'smy target is to cut the netmask IP's from the file_with_IPs.txt and paste them to another file as file_with_only_netmask_ips.txtremark - netmask IP can be any combination of netmask IP , and can start for example from xxx.xxx.xxx.xxx/1 until xxx.xxx.xxx.xxx/32 for example 10.140.4.11 10.140.4.110 255.255.0.0 255.255.255.0 10.219.39.188 10.219.39.200 10.219.39.189 10.219.39.145 10.140.4.12 10.140.4.120 10.219.39.138 10.219.39.140 10.219.39.139 10.219.39.239 255.0.0.0 255.255.0.0 255.255.255.128 255.255.255.192so finally I will have in file_with_IPs.txt file 10.140.4.11 10.140.4.110 10.219.39.188 10.219.39.200 10.219.39.189 10.219.39.145 10.140.4.12 10.140.4.120 10.219.39.138 10.219.39.140 10.219.39.139 10.219.39.239and in file_with_only_netmask_ips.txt I will have only the netmask IPs as the following: 255.255.0.0 255.255.255.0 255.0.0.0 255.255.0.0 255.255.255.128 255.255.255.192Please advice what the best way to separate the netmask IPs from the ordinary IP's ?I need to write the procedure with ksh shell , and I need to run this process on Linux and Solaris machinesremark perl one linear , sed and awk can be in ksh script | linux & solaris - separate netmask IP's from ordinary IP's | linux;sed;solaris;perl;ksh | One way with awk: awk '{ for(i=1;i<=NF;i++) { if($i~/^255/) { netmask[NR]=i>1?netmask[NR]\t$i:$i } else { regular[NR]=i>1?regular[NR]\t$i:$i } }}END { for(i=1;i<=NR;i++) { if (regular[i]) { print regular[i] > file_with_IPs.txt } if (netmask[i]) { print netmask[i] > file_with_only_netmask_ips.txt } }}' fileTest:$ lsfile$ cat file10.140.4.11 10.140.4.110 255.255.0.0 255.255.255.0 10.219.39.188 10.219.39.20010.219.39.189 10.219.39.14510.140.4.12 10.140.4.12010.219.39.138 10.219.39.14010.219.39.139 10.219.39.239255.0.0.0 255.255.0.0255.255.255.128 255.255.255.192$ awk '{> for(i=1;i<=NF;i++) {> if($i~/^255/) {> netmask[NR]=i>1?netmask[NR]\t$i:$i> }> else { > regular[NR]=i>1?regular[NR]\t$i:$i> }> }> }> END {> for(i=1;i<=NR;i++) {> if (regular[i]) {> print regular[i] > file_with_IPs.txt> }> if (netmask[i]) {> print netmask[i] > file_with_only_netmask_ips.txt> }> }> }' file$ lsfile file_with_IPs.txt file_with_only_netmask_ips.txt$ cat file_with_IPs.txt 10.140.4.11 10.140.4.11010.219.39.188 10.219.39.20010.219.39.189 10.219.39.14510.140.4.12 10.140.4.12010.219.39.138 10.219.39.14010.219.39.139 10.219.39.239$ cat file_with_only_netmask_ips.txt 255.255.0.0 255.255.255.0255.0.0.0 255.255.0.0255.255.255.128 255.255.255.192 |
_unix.166022 | I'm not an expert on computers in general or Linux in particular, so if I'm vague on something please let me know and I'll try to elaborate. I have an old computer running Red Hat 5.0. It had a Windows dual-boot (98 or XP - I forget which), but I never used it much and a few years ago when it said it was corrupted I threw up my hands and said good riddance. However, I believe it also had some version of Fedora and maybe something else. I say maybe, because I seem to be locked out of the boot menu. After it boots up, it hangs for 4 seconds saying something like, Press any key for options. Ordinarily, I'd be able to press a key and choose my OS. Today, however, I pushed a key and nothing happened. It did its typical sequence 3, 2, 1, then booted normally. I tried rebooting a couple times, and the same thing happened. My questions are these:Is there something I've screwed up somewhere that's causing this? Or is it probably just something to do with an ancient machine? Is there some sort of troubleshooting I can do to check for corruption? Is there a way to restart my computer and boot it from a CD or DVD directly, e.g. terminal command? That's what I was trying to do originally when I found the problem. Thanks! | How do I boot my computer from a Live-DVD from the terminal? | rhel;boot;livecd;bios | You can reboot the computer with a terminal command, but you can't give it a terminal command that tells it what device to reboot into. Once the machine reboots control is passed to the BIOS, which then decides what device to boot from. Some BIOSes will automatically offer to boot from a bootable CD/DVD if it detects one, but not all.So when the machine starts (or restarts) you need to press whatever key your BIOS recognises as the BIOS boot menu key, if it has one. Failing that, you need to press the key which lets you get into the BIOS setup so you can select the bootable devices and the boot order. It's a Good Idea to make a CD / DVD drive the first bootable device.To be somewhat vaguely on-topic, I guess I should've mentioned the terminal command(s) used to reboot. :) Check out the man pages for shutdown and halt, the halt man page also mentions its synonyms reboot and poweroff....I suppose corruption on your hard drive(s) could be stopping you from booting the various bootable partitions on the system, but at this stage I'm more inclined to think the problem is BIOS-related. If you haven't done so already, take the machine apart and give it a good clean. Remove the RAM cards from their slots and make sure there's no corrosion on the connectors - a pencil eraser can be used to remove minor corrosion spots. A tiny bit of solvent (like rubbing alcohol) on a cotton bud (Q Tip) may be necessary for more stubborn spots. Do the same with any other removable cards. As I mentioned in the comments, it's probably a good idea to replace the CMOS battery.To test that your RAM is healthy, run memtest (aka memtest86 or memtst86). It's probably already installed, and is generally included on any Linux live CD /DVD (maybe in the boot/ directory). If you suspect there's a problem with your hard drive partitions, run fsck on them. And you may also like to use the badblocks program. See their man pages for details, but if anything is unclear, please ask. |
_softwareengineering.264750 | So I am developing a Web Application and I have some concerns about the database. I use a lot of boolean settings at the user table with the form privacy_allow_1, privacy_allow_that, privacy_allow_this etc etc. As they keep stacking would it be just better to use a different table just for the privacy settings and link it to the users?What are the prons and cons between the two options? | Should I do a different SQL table for user privacy settings or just keep it at the users table? | mysql;efficiency | Create another table with a schema similar touseridPrivacyTypeIdallowedandPrivacyTypeIdPrivacyTypeThis structure will allow you to add\remove\deactivate permissions without any DDL changes |
_unix.326421 | On a fresh Fedora 24 install, I want to disable NetworkManager since I have a static IP and hate daemons. If I do this after startup (as root), everything works fine: ifconfig enp1s0 192.168.0.3 netmask 255.255.0.0 ip route add default via 192.168.0.1 What's the equivalent /etc/sysconfig/network-scripts/ifcfg-enp1s0 files? Following https://stackoverflow.com/questions/21432620/how-to-setup-static-ip-in-fedora-19 (which I realize is Fedora 19, not 24, but probably should work anyway), I did: DEVICE=enp1s0 NM_CONTROLLED=no NAME=enp1s0 ONBOOT=yes TYPE=Ethernet BOOTPROTO=none DEFROUTE=yes IPV4_FAILURE_FATAL=no IPADDR=192.168.0.3 NETMASK=255.255.0.0 BROADCAST=192.168.255.255 PEERDNS=yes PEERROUTES=yes IPV6INIT=no IPV6_AUTOCONF=yes IPV6_DEFROUTE=yes IPV6_PEERDNS=yes IPV6_PEERROUTES=yes IPV6_FAILURE_FATAL=no IPV6_ADDR_GEN_MODE=stable-privacy UUID=[masked though unlikely private] AUTOCONNECT_PRIORITY=-999 but this doesn't work. If I reboot with the above, I get network is unreachable. I realize I could simply run my commands in a startup script, but am trying to do things the right way for now. | How to make ifcfg script run the exact commands I need? | networking;fedora;routing;ifconfig | null |
_webmaster.74520 | When I used the Google Analytics Core Reporting API to make queries to retrieve data from a GA view I saw quite some big changes in metrics such as sessions, transactions and number of newusers in comparison to the GA UI. Finally I found out that it was due to the fact that I included the number of users in my query. When I removed that, all metrics returned exactly the same numbers as in the GA UI. Now my question is, does someone know why this is the case?Query: Dim's: date, keyword Metrics: sessions, newUsers, transactions, transactionRevenue, users | Differences in Core Reporting API metrics due to users metric | google analytics;api;users | null |
_ai.1806 | AI systems today are very capable machines, and recently the area of Natural Language Processing and Response has been exploding with innovation, as well as the fundamental algorithmic structure of AI machines.I am asking if, given these recent breakthroughs, have any AI systems been developed that are able to (preferably with some measure of success) knowingly lie to humans about facts that it knows?Note, what I'm asking goes beyond the canonical discussions of the Turing Test. I'm asking of machines that can 'understand' facts and then formulate a lie against this fact, perhaps using other facts to produce a believable 'cover-up' as part of the lie.E.G.: CIA supercomputer is stolen by spies and they try to use the computer to do things, but the computer keeps saying it's missing dependencies though it really isn't or gives correct-looking but wrong answers knowingly. Or gives incorrect location of a person, knowing that the person frequents some place but isn't there at the moment. Doesn't have to be this sophisticated, of course. | Have any AI systems yet been developed that can knowingly lie to / deceive a human? | nlp;human like | The Saturday Papers: Would AI Lie To You? is a blog post summarizing a research paper called Toward Characters Who Observe, Tell, Misremember, and Lie. This research paper details some researchers' plans to implement mental models for NPCs in video games. NPCs will gather information about the world, and convey that knowledge to other people (including human players). However, they will also misremember that knowledge (either mutating that knowledge or just forgetting about it), or even lie:As a subject of conversation gets brought up, a character may convey false informationmore precisely, information that she herself does not believeto her interlocutor. Currently, this happens probabilistically according to a characters affinity toward the interlocutor, and the misinformation is randomly chosen. Later on in the research paper, they detailed their future plans for lying:Currently, lies are only stored in the knowledge of characters who receive them, but we plan to have characters who tell them also keep track of them so that they can reason about past lies when constructing subse- quent ones. While characters currently only lie about other characters, we plan to also implement self-centered lying (DePaulo 2004), e.g., characters lying about their job titles or relationships with other characters. Finally, we envision characters who discover they have been lied to revising their affinities toward the liars, or even confronting them.The research paper also detailed how other video game developers attempted to create lying NPCs, with an emphasis on how their system differs:TALE-SPIN characters may lie to one another (Meehan 1976, 183-84), though rather arbitrarily, as in our current system implementation. GOLEM implements a blocks world variant in which agents deceive others to achieve goals (Castelfranchi, Falcone, and De Rosis 1998), while Mouth of Truth uses a probabilistic representation of character belief to fuel agent deception in a variant of Turings imitation game (De Rosis et al. 2003). In Christian (2004), a deception planner injects inaccurate world state into the beliefs of a target agent so that she may unwittingly carry out actions that fulfill ulterior goals of a deceiving agent. Lastly, agents in Reiss (2012) extension to FAtiMA employ multiple levels of theory of mind to deceive one another in the party game Werewolf. While all of the above systems showcase characters who perceiveand in some cases, deceiveother characters, none appear to support the following key components of our system: knowledge propagation and memory fallibility. ...Like a few other systems noted above, Dwarf Fortress also features characters who autonomously lie. When a character commits a crime, she may falsely implicate someone else in a witness report to a sheriff, to protect herself or even to frame an enemy. These witness reports, however, are only seen by the player; characters dont give false witness reports to each other. They may, however, lie about their opinions, for instance, out of fear of repercussions from criticizing a leader. Finally, Dwarf Fortress does not currently model issues of memory fallibilityAdams is wary that such phenomena would appear to arise from bugs if not artfully expressed to the player. |
_softwareengineering.96785 | I was surprised when I heard that Pascal didn't originally have units. If I recall correctly they were introduced in Turbo Pascal 4. Did other pascal version have units prior to that? How long before other Pascal's got units? | Was in it Turbo Pascal 4 that Unit were introduced? | history;pascal | The Pascal programming language didn't have any modularity construct. Early versions of Turbo Pascal didn't have them either. Turbo Pascal 3 consisted of about 40kB of code (including the editor); there wasn't any room for advanced language features. Units were introduced in Turbo Pascal 4.0. The concept was certainly not new (Modula-2, a successor of Pascal, had a similar primitive module system at the time, and so had other languages before), and the term compilation unit was also already in common use. |
_cs.42900 | My notes on MESI state that there are a few courses of action when we experience a local cache miss on read, depending on the global state of the data (whether other copies already exist, and the state they exist in) I'm having some trouble understanding how we know the state of the data when we miss on a read. As far as I'm aware, MESI data is attached to each cache line, so if we have do not have a copy of the required data (we missed on the read) then how do we know the relevant MESI data, so we can make the decision on how to react to the miss? (whether we should read from memory, check for shared/modified copies etc) | Local Cache miss using MESI Coherence Protocol | cpu cache | In cache coherence protocols there needs to be a single location where the global state can be figured out. There are two ways to implement this location. The first is to use a directory. The second way is to broadcast all information on a shared bus.If you are using a directory, then the cache that misses sends a message to the directory node that knows the global state for that cache line. The directory records the list of caches that have a copy of the cache line and what the state is at each of those caches. (This is easier than it sounds at first, the line is either invalid everywhere, modified in exactly one cache, exclusive in exactly one cache, or shared in more than one cache.)In a directory-based coherence protocol, when a cache misses it sends a message to the directory for that line. If the directory sees that the line is invalid everywhere it forwards the request to the main memory, which responds to the cache, and the directory updates its state so that it knows the cache has an exclusive copy.If the directory sees that the line is modified or exclusive in another cache, it forwards the request to that cache, changes the global state to shared, and (in most versions of the algorithm) saves the identifiers of the two caches that are now sharing the line.If the directory sees that the line is in state shared, it forwards the request to one of the other caches sharing the line, and adds the requester to the list of sharers.Directory based cache coherence is elegant, and scalable, but requires more than the necessary number of message hops for small systems, so for small systems a hack is used to replace the directory. Instead of a directory, all requests from caches or responses to requests (from other caches or from memory) are placed on a shared bus. Every message is broadcast to all the other nodes on the bus. So in the case of a read miss: the cache that missed broadcasts a request for the data in state shared. All the other caches and the main memory are listening to the bus (this is called snooping). If one of the other caches has the line in state modified or exclusive, it changes its state to shared and sends the data back on the bus to the requester. If several of the other caches have the line in state shared then they will all try to send the data back on the bus to the requester (and the bus arbiter will ignore all but one of the replies). Finally if none of the other caches have a copy of the line then the main memory will respond with the data. The cache that made the request sees that the responder was main memory rather than another cache, so puts the newly received line in state exclusive. |
_unix.23944 | I am trying to run the statistics software Stata 11 on Ubuntu 11.10. as a regular user and I get the following error message:bash: xstata: Permission deniedThe user priviledges seem ok to me as a linux newbie, tough:-rwxr-x--x 1 root root 16177752 2009-08-27 16:29 xstata*I would very much appreciate some advice on how to resolve this issue! | Permission denied when starting binary despite rwx priviledge | bash;permissions;executable | In the ls output you can see the file owner(root) and group(root). The user priviiledges apply to file owner (rwx), file group (r-x) and others (--x). Because you are not the root (and I suppose that you are not in the root group), only other (--x) applies to you. Thus you can run the file, but not read it. As a quick fix, try chmod +r xstata, this gives the read permission to all. |
_codereview.37708 | TaskCreate a matrix of checkboxes. The user must be able to select only 1 checkbox in a row or unselect all of them.Solution<!DOCTYPE html><html> <head> <meta http-equiv=Content-Type content=text/html;charset=utf-8 > <script type=text/javascript> function switch_checkbox(id_selected, id_pair) { //if the current checkbox is checked - uncheck the 2-nd checkbox in this row if(document.getElementById(id_selected).checked == true) { document.getElementById(id_pair).checked = false; } } </script> </head> <body> <form method=post id= action=send.php> <input type=checkbox name=group1[] id=1-1 value=1-1 onChange=switch_checkbox('1-1', '1-2');/> <input type=checkbox name=group2[] id=1-2 value=1-2 onChange=switch_checkbox('1-2', '1-1');/> <br /> <input type=checkbox name=group1[] id=2-1 value=2-1 onChange=switch_checkbox('2-1', '2-2');/> <input type=checkbox name=group2[] id=2-2 value=2-2 onChange=switch_checkbox('2-2', '2-1');/> </form> </body></html>As you see, the solution is very primitive. Is it possible to make it even simpler? | Matrix of checkboxes - only 1 allowed in a row | javascript;html;matrix | Assuming an unchecked starting condition you can eliminate a parameter and thus drop the conditional:function switch_checkbox(id_pair){ document.getElementById(id_pair).checked = false;}...<input type=checkbox name=group1[] id=1-1 value=1-1 onChange=switch_checkbox('1-2');/> |
_unix.285274 | I'm wondering how I can persist my perfect configured desktop system.I have- installed a fresh new debian- installed all my applications and tools I need (from vim to eclipse)- override my systems bashrc/-.profile etc- installed and configured my wm (fluxbox and themes)So now this was a lot of work and Im looking for a way to save /export that state. I want to be able to recover my system after a reinstall or dublicate it (in case of using it in VMs).I was thinking of exporting the list of installed packages as well as some dot-files but im not sure, that this is the best choice. What would you recomend? | Save a perfect configured desktop | backup;administration;persistence | If you did it correctly then all your changes are in your $HOME. Just copy that $HOME folder around and your done. If you don't want to copy the entire folder then ~/.local/config is a good place to start, but your better off just copying over the entire folder. |
_softwareengineering.306703 | How do you make an class that properly warns a developer in the future that they've made a mistake somewhere in their implementation that resulted in an object that gets deconstructed in a state that prevents the release of it's resources?Background:I recently upgraded to Visual Studio 2015 and began reloading and compiling code for a game engine I'm working on and ran into a new series of warnings warning C4297: '*': function assumed not to throw an exception but does. A quick search revealed a C++ convention that I'd missed and the reasons behind said convention: destructors should not throw exceptions. I also can't really argue with the reasons, but I'm also not sure how to work around the problem.Within OpenGL a Context basically holds all of the state information for the OpenGL engine. Only one thread may have a context at any given time and each thread may only have one current context. When the engine starts it creates the context and then relinquishes control over the context and starts up another thread which picks it up and proceeds to handle the graphics rendering for the engine. To handle all of this, I created a graphics engine class that uses semantics similar to a mutex to claim and relinquish the graphics engine and make sure that no mistakes are made that might some day result in someone attempting to do things with a context that it doesn't own.During destruction, the graphics engine and a number of other classes that rely on it all check to make sure that the current thread has claimed the graphics engine before they perform actions that are necessary in their destruction. If the thread didn't have the graphics context claimed, the destructor was throwing. My goal was really to provide some basic protection against the class being used improperly on accident in the future, not to make the graphics engine thread-safe. Now... I'm uncertain of how best to handle this.I've contemplated just switching over to a mutex-based approach which I could use to block access to the graphics context until a thread was done, possibly making the graphics engine class fully multi-threading capable (not that I can understand why you'd want to perform multi-threading with an OpenGL context, as the calls needed to do so are expensive enough to negate any benefit you might get out of it from what I understand).The most tempting option has been to just log an error terminate any thread that attempts to misuse the class. Unfortunately, I can't find an OS-independent way of terminating just the current thread. If I was to go this route, I'd have to look up OS-appropriate ways to terminate the current threads.I'm also not certain that I'm not being overly paranoid. Maybe I should just document the proper use of the class and if someone misuses it let them and hope that they're able to figure out why their application isn't doing what it's supposed to. I'm also worried about myself being the fool who misuses the class some day in the future. | OpenGL, multithreading, and throwing destructors | c++;multithreading;opengl | So I understand that you want to be able to check for these potential problems with the thread not owning the context, but how will throwing an exception help? How do you plan to recover from the problem? And at what point in the program? My guess is that in general you can't fix the problem by the time this has happened, its just a major structural error in the program. So why not just make the best error report you can and then call std::abort, instead of throwing an exception?Throwing exceptions from destructors violates the fundamental idea of C++ error handling, which is that destructors are used to clean up objects when an exception is thrown and the stack is being unwound. If one of those destructors throws an exception during stack unwinding for a different exception, so that there are two unresolved exceptions at the same scope, your program is terminated. In C++11, to make it safer, your program usually terminates whenever a destructor throws an exception, unless you take special boiler-plate steps to allow it. There are lots of other big problems with destructors that throw -- I have yet to see a situation where it's a good idea, or expedient in any way to make a throwing destructor. |
_cs.35815 | I have to convert A = B C + D E + from reverse polish to infix notation. I'm a bit confused because of the equals sign. Is that an operator too?This is my answer: A = (B+C) x (D+E)Is this correct or would this be written another way? | Reverse Polish to infix | computer architecture | null |
_unix.293887 | Duplicity by default asks for passwords to GnuPG keys on start up and caches them in memory throughout process lifetime. Is there a way to have it use /usr/bin/pinentry instead so I know I'm not passing the passphrase through Duplicity?I'm using GnuPG hardware smart cards, so generally when a sign or decrypt operation is requested, I usually get a pinentry dialogue that pops up. I can configure gpg-agent to cache my passphrases for a set amount of time.Is there a way to have Duplicity not know my GnuPG keys and use the GnuPG agent and pinentry to bypass inputting my passphrase into Duplicity? | Configure Duplicity to use pinentry? | gpg;gpg agent;duplicity;pinentry | Duplicity does not cache gpg pass phrases by default (you can give them as env vars though). All prompts you see are from the gpg binary run underneath. Hence, when you configure your gpg into the desired state, duplicity will use it as configured and you are set.For using gpg-agent read what the parameter --use-agent does on the manpage: http://duplicity.nongnu.org/duplicity.1.html |
_cs.6567 | Insertion-Sort (A) [where A is an array of numbers to be sorted](1) for j = 2 to A.length(2) key = A[j](3) i = j -1(4) while i > 0 and A[i] > key(5) A[i+1] = A[i](6) i = i - 1(7) A[i + 1] = keyCLRS proves the correction of the above algorithm by using a loop invariant:Loop Invariant: At the start of each iteration of the for loop of lines 18, the subarray A[1... j - 1] consists of the elements originally in A[1... j - 1] but in sorted order.We use loop invariants to help us understand why an algorithm is correct. We must show three things about a loop invariant:Initialization: It is true prior to the first iteration of the loop.Maintenance: If it is true before an iteration of the loop, it remains true before theTermination: When the loop terminates, the invariant gives us a useful property that helps show that the algorithm is correct.In the explanation of the maintenance aspect of the loop invariant, the following is mentioned:Maintenance: A more formal treatment of the this property would require us to state and show a loop invariant for the while loop of lines 57. At this point, however, we prefer not to get bogged down in such formalism, and so we rely on our informal analysis to show that the second property holds for the outer loop.Why would a formal treatment require a loop invariant for the while loop? Having one invariant for the outer for loop is sufficient to prove the correctness of the algorithm- why would a formal treatment require a loop invariant? | Loop invariants? | algorithms;algorithm analysis;correctness proof | null |
_softwareengineering.70992 | There are situations when a name passed in Parameter will be Cast into a new type, but the name of the Passed object should remain similar. For the case of Class Attributes, we can use this operator, but what about for local variable in functions. What coding convention is widely used.example, void MyFunc(BaseClass myPara){ DerivedClass _mypara = (BaseClass)myPara;}or on the contraryvoid MyFunc(BaseClass _myPara){ DerivedClass mypara = (BaseClass)_myPara;}or any other conventionl | What naming Convention to Use for C# Function Parameters | c#;naming | Prefixing either parameters or local variables with an underscore is not very idiomatic in C#, it is not very easy to read and not often used (although it is legal, so you are free to so if you wish).The best name for the parameter and the variable is a descriptive name. You need to think why you are changing the type, what is the reason for the cast. Then you should be able to come up with 2 different names. E.g. is you passed a person and converted it to a customer then you could use person and / or customer in the variable names perhaps. If you really can't think of 2 different names then I would use as in the name (there was a question on this site a few days ago about this). E.g. you would use myParaAsDerived for the local variable.If at all possible I would not use this, I would think hard about the problem you are solving and what meaningful names could be used, but if all else fails this is fairly readable. |
_webmaster.104582 | I registered a new domain name at my provider and told them afterwards, after I found out that my personal information was listed, to make it private. So they did that.I just checked who.is and put my domain name in the search box only to find out that my personal information is still listed. This is something I don't want!How does this work? Do I have to look every who.is site to check if the changes were made? | My domain name is marked private, but who.is still lists it with my personal information | domains;whois;privacy | null |
_softwareengineering.198836 | i ve just tried to do coding bat exercise. There are posted solutions to the problem i tried to solve.However, I was stubborn, i ignored them and tried to code it up my own way(basically reinventing my own wheel - square,bad working wheel). After 4 hours it looks nasty and fails some tests. Now i realize that my logic was totally wrong and wasteful. So the question :When do you start to give up solving problem your own way(reinventingthe wheel) and start to look around to see other solution? How long should a developer be 'stubborn' in trying to find his 'unique' or complicated solution? When do you give up and begin to search foranother logic? | Give advice from experienced to beginner programmer on reinventing my wheel(solution) | efficiency | I usually follow below methodology when it comes to scenarios that you are facing:Get the root cause of problem for the solution that you are developing, it is not necessary that all your code logic is waste if the solution is not achieved. If you properly get the root cause then you are able to think on the new logic that can be plugged in to overcome root cause else at this position you can search online for help on that particular root cause. This way you will narrow down your searching & it will be manageable.Time to struggle for the solution really depends on two factors - deadlines if any And if no deadlines then till you get saturated. Usually time of around one day for a genuine issue is good enough to start re-thinking on some other approach. There are some instances where even after searching a lot on some particular problem there is no suitable solution found at that time the problem resolution may take up to a week also wherein you have to spend time on thinking out of the box solutions which have been never faced by anyone else till now. For example solving the issue of memory leakage from a Java 3D applet hosted on tomcat using web start.I guess above points covers answers to your 3 questions to some extends. |
_webapps.29854 | So basically, I have many different labels in my Gmail account. I've set up my filters to attach labels such as newsletters and social to these emails as they come in. Now, once these emails are read, I want them to be archived and simple gone from the inbox. Now, this is possible by me using the following filter, and selecting all mail, then clicking archive, but i'm wondering if there is an easier way to go about this...((label:social-facebook OR label:social-twitter OR label:G+ OR label:social-spotify OR label:newsletters OR label:scanned OR label:shopping-ebay OR label:shopping-paypal) AND is:read AND label:inbox)Thanks in advanced :D | Batch processing filters in Gmail | gmail | Gmail's filters are processed immediately after the e-mail arrives in your inbox. You can not use a filter for processing an email inbox yet. What you are looking for is possible with a google script:function processInbox() { var threads = GmailApp.getInboxThreads(); for (var i = 0; i < threads.length; i++) { var th = threads[i]; if (!th.isUnread() && !th.isImportant() && !th.hasStarredMessages()) { th.moveToArchive() } Utilities.sleep(1000); }}Drive > new script > past the code > run and autorize > create a trigger for 4/6 hours.. |
_unix.316554 | In bash, is there any way to read in user input but still allow bash variable expansion?I am trying to request the user enter a path in the middle of a program but since ~ and other variables are not expanded as a part of the read builtin, users have to enter in an absolute path.Example: When a user enters a path into:read -ep input> dirin [[ -d $dirin ]] returns true when a user enters /home/user/bin but not ~/bin or $HOME/bin. | Can Bash Variable Expansion be performed directly on user input? | shell script;wildcards;variable substitution;read | A naive way would be:eval dirin=$dirinWhat that does is evaluate the expansion of dirin=$dirin as shell code.With dirin containing ~/foo, it's actually evaluating:dirin=~/fooIt's easy to see the limitations. With a dirin containing foo bar, that becomes:dirin=foo barSo it's running bar with dirin=foo in its environment (and you'd have other problems with all the shell special characters).Here, you'd need to decide what expansions are allowed (tilde, command substitution, parameter expansion, process substitution, arithmetic expansion, filename expansion...) and either do those substitutions by hand, or use eval but escape every character but those that would allow them which would be virtually impossible other than by implementing a full shell syntax parser unless you limit it to for instance ~foo, $VAR, ${VAR}.Here, I'd use zsh instead of bash that has a dedicated operator for that:vared -cp input> dirinprintf %s\n ${(e)dirin}vared is the variable editor, similar to bash's read -e.(e) is a parameter expansion flag that performs expansions (parameter, command, arithmetic but not tilde) in the content of the parameter.To address tilde expansion, which only takes place at the beginning of the string, we'd do:vared -cp input> dirinif [[ $dirin =~ '^(~[[:alnum:]_.-]*(/|$))(.*)' ]]; then eval dirin=$match[1]\${(e)match[3]}else dirin=${(e)dirin}fiPOSIXly (so bashly as well), to perform tilde and variable (not parameter) expansion, you could write a function like:expand_var() { eval _ev_var=\${$1} _ev_outvar= _ev_v=${_ev_var%%/*} case $_ev_v in (?*[![:alnum:]._-]*) ;; (~*) eval _ev_outvar=$_ev_v; _ev_var=${_ev_var#$_ev_v} esac while :; do case $_ev_var in (*'$'*) _ev_outvar=$_ev_outvar${_ev_var%%$*} _ev_var=${_ev_var#*$} case $_ev_var in ('{'*'}'*) _ev_v=${_ev_var%%\}*} _ev_v=${_ev_v#{} case $_ev_v in | [![:alpha:]_]* | *[![:alnum:]_]*) _ev_outvar=$_ev_outvar\$ ;; (*) eval _ev_outvar=\$_ev_outvar\${$_ev_v}; _ev_var=${_ev_var#*\}};; esac;; ([[:alpha:]_]*) _ev_v=${_ev_var%%[![:alnum:]_]*} eval _ev_outvar=\$_ev_outvar\$$_ev_v _ev_var=${_ev_var#$_ev_v};; (*) _ev_outvar=$_ev_outvar\$ esac;; (*) _ev_outvar=$_ev_outvar$_ev_var break esac done eval $1=\$_ev_outvar}Example:$ var='~mail/$USER'$ expand_var var;$ printf '%s\n' $var/var/mail/stephaneAs an approximation, we could also prepend every character but ~${}-_. and alnums with backslash before passing to eval:eval dirin=$( printf '%s\n' $dirin | sed 's/[^[:alnum:]~${}_.-]/\\&/g')(here simplified on the ground that $dirin can't contain newline characters as it comes from read)That would trigger syntax errors if one entered ${foo#bar} for instance but at least that can't do much harm as a simple eval would.Edit: a working solution for bash and other POSIX shells would be to separate the tilde and other expansions like in zsh and use eval with a here-document for the other expansions part like:expand_var() { eval _ev_var=\${$1} _ev_outvar= _ev_v=${_ev_var%%/*} case $_ev_v in (?*[![:alnum:]._-]*) ;; (~*) eval _ev_outvar=$_ev_v; _ev_var=${_ev_var#$_ev_v} esac eval $1=\$_ev_outvar\$(cat << //unlikely//$_ev_var//unlikely//)That would allow tilde, parameter, arithmetic and command expansions like in zsh above. } |
_webmaster.72178 | It's well-known that the SEO value of a website basically depends on how much it has been referenced, linked, and shared.From Googlebot's point of view, is a link at the top of a page more valuable than a link at the bottom? | Does Googlebot determine the value of a link by its position on the page? | seo;links;googlebot;backlinks | Yes. But more importantly, a link within content is most important.Any link in most any placement on the page will be good for you assuming that the site is of quality and the link has good link alt text. There is some exception to this of course. For example a link in the footer is not wise. As well, a site-wide link is also not wise. Just make sure you are not doing a Google no no. But links that are conversational and within content are most valuable regardless of where in the content it lies. Google does weigh a link by placement, however, the effect is minimal. So a link higher in content is best and a link before others is also best. If you have control over this, then that is good, but if you do not, just make sure it is a good link and does not violate Google's rules and do not worry about the rest. |
_cogsci.17565 | The U.S. Transportation Security Administration has red teams who try to sneak dangerous items through airport security. The security personnel often fail up to 95% of these tests. Is this high failure rate to be expected because of habituation to visual stimulus? Specifically, a screening agent is expected to find a single weapon amidst thousands of X-rays without a weapon.If this is habituation, are there techniques for reducing its impact in scenarios such as this? | Is habituation responsible for inaccurate airport security screenings? | vision;habituation | The main reason for these failures is mental fatigue and lost of concentration, in conjuction with the weak target signals.Vigilance decrement is defined as deterioration in the ability to remain vigilant for critical signals with time, as indicated by a decline in the rate of the correct detection of signals. Vigilance decrement is most commonly associated with monitoring to detect a weak target signal. Detection performance loss is less likely to occur in cases where the target signal exhibits a high saliency. ... Under most conditions, vigilance decrement becomes significant within the first 15 minutes of attention, but a decline in detection performance can occur more quickly if the task demand conditions are high ... More recent studies indicate that vigilance is hard work, requiring the allocation of significant cognitive resources, and inducing significant levels of stress.Habituation plays very little role in these failures.Early theories of vigilance explained the reduction of electrophysiological activity over time associated with the vigilance decrement as a result of neural habituation. ... More recent ERP studies indicate that when performance declines during a vigilance task, N100 amplitude was not diminished. These results indicate that vigilance is not the result of boredom or a reduction in neurological sensitivityTraining and motivation can decrease failures.Training and practice significantly reduce the vigilance decrement, reduce the false alarm rate, and may improve sensitivity for many sustained attention tasks. Training improvements may also occur due to the reduced mental workload associated with task automaticity. In pilotage and airport security screening experiments, trained or expert subjects exhibit better detection of low salience targets, a reduction in false alarms, improved sensitivity, and a significantly reduced vigilance decrement. In some cases the vigilance decrement was eliminated or not apparent. |
_scicomp.16383 | So far the closest I've found is ViennaCL, which has a Lanczos implementation for Eigenvalues.It is not clear that EigenVectors are produced by this library.Does anyone here know whether ViennaCL solves for EigenVectors? And how to get them?Or of any other sparse matrix EigenVector solver for GPGPU?ETA:From what I have read, Lanczos is only an algorithm for converting from a general matrix to tridiagonal form. The QR algorithm (or other) is needed for extracting EigenValues/Vectors from the tridiagonal matrix. Assuming I can upgrade the ViennaCL library to handle complex matricies, does anyone have a recommendation for a GPU based QR algorithm implementation? | I am looking for a complex sparse matrix EigenVector solver for GPGPU; preferably CUDA | eigensystem;eigenvalues;gpu;viennacl | null |
_vi.7480 | How can I run a command on all lines delimited, say by\begin{otherlanguage} and \end{otherlanguage}?For example,1 This line should not be affected,2 \begin{otherlanguage}3 but this should;4 \end{otherlanguage}5 this should not.(The cmd could be norm A test.) | Executing a command on delimited lines | regular expression;ex mode | You can use pattern delimiter for this::/first/,/second/norm ddYou can use any search pattern around the ,. If you want to use only the inside of the matched patterns, use + and - like so::/first/+1,/second/-1 norm dd |
_unix.150614 | I tried to curl popular websites e.g. curl google.com or curl google.com:80 and there is always a timeout error. Some troubleshooting steps performed:Able to curl localhost with responseAble to ping google.com with responseTried change DNS to google DNS but to no availTried wget and it does not work for external sites eitherQuestions:May I know what is the default port that curl is using? Is this related to Firewall blocking? As far as I know port 80 is open forthe server I am using. What else can I do to further troubleshoot? | curl timeout troubleshooting | networking;firewall;curl;http | If this is a hosted webserver I would suspect that it's been configured in a classic webserver fashion. Meaning that it allows incoming connections on port 80 but for security they may have disallowed outgoing connections on port 80. I would guess this is the issue. curl and wget generally work out of the box with no issues. curl chooses a port based on the URI given (http will be 80, https 443, ftp 21, and so on); when no protocol is given as you are using it will use 80. To troubleshoot this just disable your firewall for a second (or edit the settings if you are worried about it being down for a few seconds).Update:May I know is there a easy way to confirm that port 80 (outgoing) is block?I'd say that you've done this already. wget is especially hard to mess up. If it isn't working I'd say it is a safe bet. An even better way is to look at your firewall setup and confirm this to be the case.Is there any security concern we need to take note for enabling port 80 (outgoing)?Oh yes. It isn't that doing this makes you less secure; it is that by opening this it means that if someone does gain the ability to say, inject a little javascript code into your site DB, they could use your site to commit crimes which would be very difficult to trace back to them (because all indications of who the attacker is show you as the culprit). The only thing that could really clear your name are your own logs which aren't very convincing since you provided them to the court in the first place.I don't think I have rights to disable the firewall, please advise what would be the settings that need to be edit? Is it configure the firewall to allow port 80 outgoing?You may be right. I cannot really tell you the answer to this because you are clearly using some sort of hosted solution. iptables is the name of the most popular Linux firewall. You should see it listed in /etc/init.d if you have it installed. If not, you'll need to go to the website for your web host and find out how it should be managed. |
_codereview.85186 | This is my version of a linked list in C. The function mergeSort requires a reference to the first and to the last nodes, as well as the indices of the first and the last node and a comparator.You need to compile this source code using C99 standard in order to get it working.#include <stdio.h>#include <stdlib.h>typedef struct List { int val; struct List *next;} List; List *new(int value){ List *list = malloc(sizeof(List)); list->next = NULL; list->val = value; return list;}void freeList(List *list){ List *node = list; while(node != NULL) { List *n = node->next; free(node); node = n; } }void print(List *list){ for(List *node = list; node != NULL; node = node->next) { printf(%d , node->val); } printf(\n);}List *merge(List *part1, List *part2, int (*cmp)(const void *, const void *)){ List *temp; if(cmp(&part1->val, &part2->val) < 0) { temp = part1; part1 = part1->next; } else { temp = part2; part2 = part2->next; } List *current = temp; while(part1 != NULL && part2 != NULL) { if(cmp(&part1->val, &part2->val) < 0) { current->next = part1; part1 = part1->next; } else { current->next = part2; part2 = part2->next; } current = current->next; } while(part1 != NULL) { current->next = part1; current = current->next; part1 = part1->next; } while(part2 != NULL) { current->next = part2; current = current->next; part2 = part2->next; } return temp;}List *mergeSort(List *start, List *stop, int startIndex, int stopIndex, int (*cmp)(const void *, const void *)){ if(start != stop) { int mid = (startIndex + stopIndex) / 2; int i = startIndex; List *midNode = NULL; for(List *node = start; node != stop; node = node->next) { if(i == mid) { midNode = node; } i++; } List *midNodeNext = midNode->next; List *part1 = mergeSort(start, midNode, startIndex, mid, cmp); List *part2 = mergeSort(midNodeNext, stop, mid + 1, stopIndex, cmp); return merge(part1, part2, cmp); } start->next = NULL; return start;}int cmp(const void *a, const void *b){ return *(int *)a - *(int *)b;}List *push(List *list, List *node) { List *last; for(List *node = list; node != NULL; node = node->next) { if(node->next == NULL) { last = node; } } last->next = node; return node;}int main(){ List *list = new(9); push(list, new(4)); push(list, new(3)); push(list, new(2)); push(list, new(1)); List *stop = push(list, new(8)); list = mergeSort(list, stop, 0, 5, cmp); print(list); freeList(list); return 0;} | Sort a linked list with merge sort in C | c;sorting;linked list;mergesort | Merging tails effectively wastes cycles. The nodes are already linked correctly. if (part1 != NULL) { current->next = part1; } if (part2 != NULL) { current->next = part2; }is enough.A single most important feature of merge sort is stability: elements compared equal remain in the original order. A comparison if(cmp(&part1->val, &part2->val) < 0)makes your sort unstable. A correct comparison is either one of if(cmp(&part2->val, &part1->val) > 0) if(cmp(&part1->val, &part2->val) <= 0)Less indentation is easier to follow. I recommend to invert a recursion termination condition: if (start == stop) { start->next = NULL; return start; } ...Avoid naked loops. The for(List *node = start; node != stop; node = node->next) { if(i == mid) { midNode = node; } i++; }should be factored out into a function List * advance(List * start, int steps); |
_unix.232953 | Instead of creating common LXC unprivileged containers where all the users are mapped to (unprivileged) subuid/gid of my host user, I'm considering a mapping where my host user itself will be mapped to user 0 (root). They'd be very slim single-app containers.The reason is that in this way I don't need the rootfs directory subtree, which resides in my user's home, to be namespace-chmod to a different user and I can delete it with a plain rm instead of a namespace one.Is this kind of LXC less secure than the common one, and why is it? What could happen? | Is an unprivileged LXC where the host user itself is mapped to 0 less secure of one where one of its subids is mapped to 0, and why? | security;not root user;lxc;namespace | null |
_cs.54567 | About the following algorithm:reach(Vertex s, Vertex t): if s = t return TRUE else for v in Adj(s) do if reach(v,t) return TRUE return FALSEWhy can we say that its runtime on a directed acyclic graph is $O(n!)$?I can see why there are $n!$ different paths in the graph, is it because in each of those $n!$ paths, the loop will run at most $n$ times? | Runtime of a recursive algorithm to find a path in a DAG | algorithms;graph theory;graphs;runtime analysis | If you take each recursive call and trace it back to the original call reach(s,t) then you get some directed path starting at $s$. Hence the running time of the algorithm is $\Theta(N)$, where $N$ is the number of directed path starting at $s$ in which $t$ is not an internal vertex.The number of paths of length $k$ (containing $k$ edges) is at most $(n-1)(n-2)\cdots(n-k)$, since each such path consists of distinct vertices different from $s$ (here we use the DAG property). Since paths have length between $0$ and $n-1$, we get that$$N \leq \sum_{k=0}^{n-1} (n-1)\cdots(n-k) = (n-1)! \sum_{k=0}^{n-1} \frac{1}{k!} \leq e(n-1)!.$$In particular, $N = O(n!)$. |
_unix.365900 | I have the following rsync that syncs only files and not directories using a couple of filter rules:rsync --dry-run --log-file=/var/log/re_rsyncs/reirl.log -f- */ -f+ * -azve ssh [email protected]:/its/pron/fe1/ /mnt/X_Drive/EF/irl/I have tried to modify this to transfer just the .xml files as per below but it still transfers all files:rsync --dry-run --log-file=/var/log/re_rsyncs/reirl.log -f- */ -f+ *.xml -azve ssh [email protected]:/its/pron/fe1/ /mnt/X_Drive/EF/irl/How do I get rsync to only transfer the .xml files now ? | rsync only .xml files | linux;rsync | null |
_webmaster.88103 | I'm running on Nginx server.I try to rewrite my URL using .htacces file:RewriteEngine OnRewriteBase /RewriteRule ^search/(.+)/page=(.+)$ search-results.php?q=$1&page=$2It give me Internal Error 500. Any help? | Did httpd can be rewrite? | htaccess;mod rewrite;httpd | null |
_datascience.20237 | I was going through a solution of the Housing prices competition on Kaggle (Human Analog's Kernel on House Prices: Advance Regression Techniques) and came across this part:# Transform the skewed numeric features by taking log(feature + 1).# This will make the features more normal.from scipy.stats import skewskewed = train_df_munged[numeric_features].apply(lambda x: skew(x.dropna().astype(float)))skewed = skewed[skewed > 0.75]skewed = skewed.indextrain_df_munged[skewed] = np.log1p(train_df_munged[skewed])test_df_munged[skewed] = np.log1p(test_df_munged[skewed])I am not sure of what is the need for converting a skewed distribution into a normal distribution. Please, can someone explain in detail: Why is this being done here? or How is this helpful?How is this different from feature-scaling?Is this a necessary step for feature-engineering? What is likely to happen if I skip this step? | Why do we convert skewed data into a normal distribution | regression;feature extraction;feature engineering;kaggle;feature scaling | You might want to interpret your coefficients. That is, to be able to say things like if I increase my variable $X_1$ by 1, then, on average and all else being equal, $Y$ should increase by $\beta_1$.For your coefficients to be interpretable, linear regression assumes a bunch of things.One of these things is no multicollinearity. That is, your $X$ variables should not be correlated against each other.Another is Homoscedasticity. The errors your model commits should have the same variance, i.e. you should ensure the linear regression does not make small errors for low values of $X$ and big errors for higher values of $X$. In other words, the difference between what you predict $\hat Y$ and the true values $Y$ should be constant. You can ensure that by making sure that $Y$ follows a Gaussian distribution. (The proof is highly mathematical.)Depending on your data, you may be able to make it Gaussian. Typical transformations are taking the inverse, the logarithm or square roots. Many others exist of course, it all depends on your data. You have to look at your data, and then do a histogram or run a normality test, such as the Shapiro-Wilk test.These are all techniques to build an unbiased estimator. I don't think it has anything to do with convergence as others have said (sometimes you may also want to normalize your data, but that is a different topic).Following the linear regression assumptions is important if you want to either interpret the coefficients or if you want to use statistical tests in your model. Otherwise, forget about it.Applying the logarithm or normalizing your data, is also important because linear regression optimization algorithms typically minimize $\|\hat y - y\|^2$, so if you have some big $y$ outliers, your estimator is going to be VERY concerned about minimizing those, since it is concerned about the squared error, not absolute error. Normalizing your data is important in those case and this is why scikit-learn has a normalize option in the LinearRegression constructor. |
_softwareengineering.201021 | I'm reading Pro PHP and jQuery and want to rebuild the example. The author is creating a database connection class:class DB_Connect {protected $db;protected function __construct($dbo = NULL){ if (is_object($db)) { $this->db = $db; } else { // Constants are defined in /sys/config/db-cred.inc.php $dsn = mysql:host= . DB_HOST . ;dbname= . DB_NAME; try { $this->db = new PDO($dsn, DB_USER, DB_PASS); } catch (Exception $e) { die ($e->getMessage()); } }} This class is then extended by the application's main class, in order to get access to the database object.Why is this done that way? Couldn't we rather create an instance of DB_Connect in the main class? What are the benefits of that approach? | Extend the class or create an instance? | php;object oriented;project structure | What you see there is a classic rookie mistake, establishing an is-a relationship (creating a subclass) between two classes when the actual relationship is has-a (instantiating a class as a member). It's unfortunate that this mistake made it into print.If the as-stated purpose of Main holds true, its purpose is to be a central module for an application. It is not, in and of itself, something that establishes a database connection and you wouldn't instantiate a Main to do that. The easy test for this is to say the two alternatives aloud and see which rings true. For example:A Car is a Wheel (subclassing) doesn't make sense, because cars aren't wheels. If you had to model four- and three-wheeled cars, you'd draw a complete blank on how to do that with subclassing.A Car has a Wheel (instantiation) works because cars are composed of (among other things) one or more wheels. Envisioning a FourWheeledCar and a ThreeWheeledCar containing multiple instances of Wheel would be easy: one class has four instances of Wheel and the other has three. |
_unix.343762 | I use dd extensively as a means of software configuration control. The images are typically deployed on flash disk to update devices. I find I am often making small incremental updates to the image files and having to then re-copy the entire image over to the block device. This is fairly time consuming as images are typically 8GB in size. To compound the issue the images (once assembled) are not in an easily mountable format. In other words, making the change directly on the block is not possible. I am trying to determine if there is a method to compare an image file to a block device and only update the blocks which require an update. I suspect this would be much faster than writing the entire disk for what probably equates to a 10kb delta in the image file. | Update block device with image while writing only the Delta, is it possible? | linux;dd;block device | The following is a fast hack for a small C program able to compare two files (file1, file2) blockwise and, if the blocks are different, copies the corresponding block from file1 to file2. Works for files and block devices as well. Do with it what you want, but at your own risk!/*Small program to blockwise compare two files and write differentblocks from file1 to file2.Arguments: file1, file2, blocksize in bytesIf blocksize is not given, it is set to 512 (minimum)No error checking, no intensive tests run - use at your own risk! */#include <sys/types.h>#include <sys/stat.h>#include <fcntl.h>#include <stdlib.h>#include <stdio.h>#include <unistd.h>#include <string.h>int main(argc, argv)int argc;char *argv[];{ char *fnamein; /* Input file name */ char *fnameout; /* Output file name */ char *bufin; /* Input buffer */ char *bufout; /* Output buffer */ int bufsize; /* Buffer size (blocksize) */ int fdin; /* Input file descriptor*/ int fdout; /* Output file descriptor*/ int cnt; /* Current block # */ /* Argument processing */ if (argc < 3 || argc > 4) { fprintf(stderr,Usage: %s infile outfile [bufsize]\n, argv[0]); exit(1); } fnamein = argv[1]; fnameout = argv[2]; if (argc == 4) { bufsize = atoi(argv[3]); if (bufsize < 512) { fprintf(stderr,Error: Illegal value for [bufsize]: %s\n, argv[3]); exit(1); } } else { bufsize = 512; } fprintf(stderr, Copying differing blocks from '%s' to '%s', blocksize is %i\n, fnamein, fnameout, bufsize); if (! ((bufin = malloc(bufsize)) && (bufout = malloc(bufsize)))) { fprintf(stderr,Error: Can't allocate buffers: %i\n, bufsize); exit(1); } fdin = open(fnamein, O_RDONLY); if (fdin < 0) { fprintf(stderr,Error: Can't open input file: %s\n, fnamein); exit(1); } fdout = open(fnameout, O_RDWR | O_SYNC); if (fdout < 0) { fprintf(stderr,Error: Can't open ouput file: %s\n, fnameout); exit(1); } cnt = 0; while (read(fdin, bufin, bufsize) == bufsize) { if (read(fdout, bufout, bufsize) == bufsize) { if (memcmp(bufin, bufout, bufsize) != 0) { fprintf(stderr, Differing blocks at block # %i; writing block to %s\n, cnt, fnameout); if (lseek(fdout, -bufsize, SEEK_CUR) > -1) { if (write(fdout, bufin, bufsize) != bufsize) { fprintf(stderr,Error: Unable to write to output file %s block # %i\n, fnameout, cnt); exit(1); } } else { fprintf(stderr,Error: Unable to seek to output file %s block # %i\n, fnameout, cnt); exit(1); } } } else { fprintf(stderr,Error: Unable to read from ouput file %s block # %i\n, fnameout, cnt); exit(1); } cnt++; } exit(0);} |
_cs.71577 | I am working on a project on recommendation systems, and would like to know about specific areas/research papers on which some new work can be performed, but not something too time/coding intensive. Its for a project due in 2 months. | What are useful/new areas to work on for recommendation systems? | recommendation systems;filtering problem | null |
_codereview.149382 | Requirements:Must convert a double to a string, without losing any digits.Cannot use scientific notation.In C#, no format string exists that you can you pass into Double.ToString(format) to accomplish this, because ToString will use only 15 digits, even though a double can have 17. Although R and G17 can use 17 digits, it may or may not use scientific notation.Please correct me if I am wrong.Therefore, I wrote a function to do this. I'm posting it here as an optimization question. If you see flaws, feel free to point them out. I'm confident the code works, but there's plenty of room for improvement.[Fact]public void NumberFormats(){ double d1 = 123456789012171.23; double d2 = 0.0000000000000001; double d3 = 10000000000000000; string s1 = DoubleToString(d1); string s2 = DoubleToString(d2); string s3 = DoubleToString(d3); Assert.Equal(123456789012171.23, s1); Assert.Equal(0.0000000000000001, s2); Assert.Equal(10000000000000000, s3);}private string DoubleToString(double d){ string R = d.ToString(R, CultureInfo.InvariantCulture).Replace(,, ); int i = R.IndexOf('E'); if (i < 0) return R; string G17 = d.ToString(G17, CultureInfo.InvariantCulture); if (!G17.Contains('E')) return G17.Replace(,, ); // manual parsing string beforeTheE = R.Substring(0, i); int E = Convert.ToInt32(R.Substring(i + 1)); i = beforeTheE.IndexOf('.'); if (i < 0) i = beforeTheE.Length; else beforeTheE = beforeTheE.Replace(., ); i += E; while (i < 1) { beforeTheE = 0 + beforeTheE; i++; } while (i > beforeTheE.Length) { beforeTheE += 0; } if (i == beforeTheE.Length) return beforeTheE; return String.Format({0}.{1}, beforeTheE.Substring(0, i), beforeTheE.Substring(i));} | Converting double to string without scientific notation and without losing digits | c#;.net;formatting;floating point | null |
_softwareengineering.216111 | I have an idea how to implement sub array reverse with O(1), not including precalculation such as reading the input. I will have many reverse operations, and I can't use the trivial solution of O(N).Edit: To be more clear I want to build data structure behind the array with access layer that knows about reversing requests and inverts the indexing logic as necessary when someone wants to iterate over the array.Edit 2: The data structure will only be used for iterationsI been reading this and this and even this questions but they aren't helping.There are 3 cases that need to be taking care of:Regular reverse operationReverse that including reversed areaIntersection between reverse and part of other reversed area in the arrayHere is my implementation for the first two parts, I will need your help with the last one.This is the rule class:class Rule { public int startingIndex; public int weight;}It is used in my basic data structure City:public class City { Rule rule; private static AtomicInteger _counter = new AtomicInteger(-1); public final int id = _counter.incrementAndGet(); @Override public String toString() { return + id; }}This is the main class:public class CitiesList implements Iterable<City>, Iterator<City> { private int position; private int direction = 1; private ArrayList<City> cities; private ArrayDeque<City> citiesQeque = new ArrayDeque<>(); private LinkedList<Rule> rulesQeque = new LinkedList<>(); public void init(ArrayList<City> cities) { this.cities = cities; } public void swap(int index1, int index2){ Rule rule = new Rule(); rule.weight = Math.abs(index2 - index1); cities.get(index1).rule = rule; cities.get(index2 + 1).rule = rule; } @Override public void remove() { throw new IllegalStateException(Not implemented); } @Override public City next() { City city = cities.get(position); if (citiesQeque.peek() == city){ citiesQeque.pop(); changeDirection(); position += (city.rule.weight + 1) * direction; city = cities.get(position); } if(city.rule != null){ if(city.rule != rulesQeque.peekLast()){ rulesQeque.add(city.rule); position += city.rule.weight * direction; changeDirection(); citiesQeque.push(city); } else{ rulesQeque.removeLast(); position += direction; } } else{ position += direction; } return city; } private void changeDirection() { direction *= -1; } @Override public boolean hasNext() { return position < cities.size(); } @Override public Iterator<City> iterator() { position = 0; return this; }}And here is a sample program:public static void main(String[] args) { ArrayList<City> list = new ArrayList<>(); for(int i = 0 ; i < 20; i++){ list.add(new City()); } CitiesList citiesList = new CitiesList(); citiesList.init(list); for (City city : citiesList) { System.out.print(city + ); } System.out.println(\n******************); citiesList.swap(4, 8); for (City city : citiesList) { System.out.print(city + ); } System.out.println(\n******************); citiesList.swap(2, 15); for (City city : citiesList) { System.out.print(city + ); } }How do I handle reverse intersections? | Reverse subarray of an array with O(1) | java;algorithms;sorting;array | You could use a xor linked list variant (using indexes into an array for the pointer).Then reversing is easy:reverse(int city1prev, int city1index, int city2prev, int city2index){ int city1next = array[city1index].ptr^city1prev; int city2next = array[city2index].ptr^city2prev; city1.ptr = city2next^city1next; city2.ptr = city2prev^city1prev; array[city1prev].ptr^= city1index^city2index; array[city2next].ptr^= city1index^city2index;}The downside is (as with all linked list implementations) that indexing into it is O(n).This will reverse the sub array due to how the xor link works: city2 will be where city1 used to be and city2prev will be after it and so on. So the new sequence becomes: city1prev, city2, city2prev,..., city1next, city1, city2next. Key point is that there is no difference between traversing a xor linked list forward or backward (you keep a index to the current and the previous items and you get the next by doing array[current].ptr^previous). |
_codereview.113343 | I have been using Microsoft.Unity as my container and have decided that the approach for a lazy implementation causes too much rework. Each time you decide to swap out to use a lazy you have to go add .Value or a .Resolve() after your variable (which could be called hundreds of times).Suggested lazy implementation by microsoft Deferring the Resolution of ObjectsSo I decided to start from scratch.Firstly I needed an ILazy interface (I did add the Value property so you can get the actual value if it is ever needed)public interface ILazy<out TInterface> where TInterface : class{ TInterface Value { get; }}We then need an interface and a type that is both ILazy<T> and of the interface to be implemented.public interface IContract{ //methods and properties to be implemented void SomeFunction();}public interface ILazyContract : ILazy<IContract>, IContract{}public class ContractImplementation : IContract{ public void SomeFunction() { Console.WriteLine(Doing something); }}I now need a factory which creates the implementation of the ILazyContract at runtime. This is a long class and has a bit of il generation so it might be difficult to read.public class LazyTypeFactory{ //used to stop the same class being created twice private readonly object _lock = new object(); private readonly Dictionary<Type, Type> _lazyTypes = new Dictionary<Type, Type>(); private readonly ModuleBuilder _moduleBuilder; public LazyTypeFactory(string assembly, string module) { var an = new AssemblyName(assembly); var assemblyBuilder = AppDomain.CurrentDomain.DefineDynamicAssembly(an, AssemblyBuilderAccess.Run); _moduleBuilder = assemblyBuilder.DefineDynamicModule(module); } public Type CreateOrGetLazyType<TLazy, TInterface>(string name = null) where TLazy : ILazy<TInterface>, TInterface where TInterface : class { Type result; var @interface = typeof (TInterface); var @container = typeof (IUnityContainer); var @lazy = typeof (TLazy); var @func = typeof (Func<IUnityContainer, TInterface>); name = name ?? @lazy.Name + Impl; name = @lazy.Namespace + . + name; if ([email protected]) { throw new Exception(Expected TInterface to be a type of interface); } lock (_lock) { if (_lazyTypes.ContainsKey(@lazy)) { result = _lazyTypes[@lazy]; } else { var typeBuilder = _moduleBuilder.DefineType(name, TypeAttributes.Public | TypeAttributes.Class, null); typeBuilder.AddInterfaceImplementation(@lazy); //private field for container var c = typeBuilder.DefineField(_c, @container, FieldAttributes.Public); //private field for func var f = typeBuilder.DefineField(_f, @func, FieldAttributes.Public); //private field for func result var v = typeBuilder.DefineField(_v, @interface, FieldAttributes.Public); //constructor with func CreateConstructor(typeBuilder, @container, c, @func, f); //private property to get func result var propertyBuilder = CreateValueProperty(typeBuilder, @interface, v, @container, c, @func, f); //interface methods //call prop then invok method foreach (var iMethod in @interface.GetMethods()) { var mb = CreateOverride(typeBuilder, propertyBuilder, iMethod); typeBuilder.DefineMethodOverride(mb, iMethod); } foreach (var iProperty in @interface.GetProperties()) { var pb = typeBuilder.DefineProperty(iProperty.Name, PropertyAttributes.None, iProperty.PropertyType, Type.EmptyTypes); if (iProperty.GetMethod != null) { var iMethod = iProperty.GetMethod; var mb = CreateOverride(typeBuilder, propertyBuilder, iMethod); pb.SetGetMethod(mb); } if (iProperty.SetMethod != null) { var iMethod = iProperty.SetMethod; var mb = CreateOverride(typeBuilder, propertyBuilder, iMethod); pb.SetSetMethod(mb); } } _lazyTypes[@lazy] = result = typeBuilder.CreateType(); } } return result; } private static void CreateConstructor(TypeBuilder typeBuilder, Type @container, FieldBuilder c, Type @func, FieldBuilder f) { var constructor = typeBuilder.DefineConstructor(MethodAttributes.Public, CallingConventions.Standard, new[] { @container, @func }); constructor.DefineParameter(1, ParameterAttributes.None, container); constructor.DefineParameter(2, ParameterAttributes.None, func); var cIl = constructor.GetILGenerator(); cIl.Emit(OpCodes.Ldarg_0); cIl.Emit(OpCodes.Ldarg_1); cIl.Emit(OpCodes.Stfld, c); cIl.Emit(OpCodes.Ldarg_0); cIl.Emit(OpCodes.Ldarg_2); cIl.Emit(OpCodes.Stfld, f); cIl.Emit(OpCodes.Ret); } private static PropertyBuilder CreateValueProperty(TypeBuilder typeBuilder, Type @interface, FieldBuilder v, Type @container, FieldBuilder c, Type @func, FieldBuilder f) { var propertyBuilder = typeBuilder.DefineProperty(Value, PropertyAttributes.None, @interface, Type.EmptyTypes); var get = typeBuilder.DefineMethod(get_Value, MethodAttributes.Public | MethodAttributes.Virtual, @interface, new Type[0]); var getIl = get.GetILGenerator(); var skip = getIl.DefineLabel(); getIl.Emit(OpCodes.Ldarg_0); getIl.Emit(OpCodes.Ldfld, v); getIl.Emit(OpCodes.Ldnull); getIl.Emit(OpCodes.Bne_Un, skip); //call _f and set to _v getIl.Emit(OpCodes.Ldarg_0); getIl.Emit(OpCodes.Ldarg_0); getIl.Emit(OpCodes.Ldfld, f); getIl.Emit(OpCodes.Ldarg_0); getIl.Emit(OpCodes.Ldfld, c); getIl.Emit(OpCodes.Call, @func.GetMethod(Invoke, new[] { @container })); getIl.Emit(OpCodes.Stfld, v); getIl.MarkLabel(skip); getIl.Emit(OpCodes.Ldarg_0); getIl.Emit(OpCodes.Ldfld, v); getIl.Emit(OpCodes.Ret); propertyBuilder.SetGetMethod(get); return propertyBuilder; } private static MethodBuilder CreateOverride(TypeBuilder typeBuilder, PropertyBuilder propertyBuilder, MethodInfo iMethod) { var mb = typeBuilder.DefineMethod(iMethod.Name, MethodAttributes.Public | MethodAttributes.Virtual, iMethod.ReturnType, iMethod.GetParameters().Select(x => x.ParameterType).ToArray()); var mIL = mb.GetILGenerator(); mIL.Emit(OpCodes.Ldarg_0); mIL.Emit(OpCodes.Call, propertyBuilder.GetMethod); for (var i = 0; i < iMethod.GetParameters().Length; i++) { mIL.Emit(OpCodes.Ldarg, i + 1); } mIL.Emit(OpCodes.Call, iMethod); mIL.Emit(OpCodes.Ret); return mb; }}To create a Type of ILazyContract at runtime you would do something like this.var factory = new LazyTypeFactory(Assembly.Lazy, Module.Lazy);var type = factory.CreateOrGetLazyType<ILazyContract, IContract>();The implementation of this would look something likepublic class ILazyContractImpl : ILazyContract{ private readonly IUnityContainer _container; private readonly Func<IUnityContainer, IContract> _func; private IContract _value; public ILazyContractImpl(IUnityContainer container, Func<IUnityContainer, IContract> func) { _container = container; _func = func; } public IContract Value { get { if (_value == null) { _value = _func(_container); } return _value; } } public void SomeFunction() { Value.SomeFunction(); }}As for the final part of the puzzle you now have to register this in the unity container. For this we also need to either register a Func<IUnityContainer, IContract> or add it as a parameter in a InjectionConstructor which is what I have done.IUnityContainer container = new UnityContainer();var factory = new LazyTypeFactory(Assembly.Lazy, Module.Lazy);var type = factory.CreateOrGetLazyType<ILazyContract, IContract>();container.RegisterType<IContract, ContractImplementation>();var resolve = new Func<IUnityContainer, IContract>(c => c.Resolve<IContract>());container.RegisterType(typeof(ILazyContract), type, new InjectionConstructor(typeof(IUnityContainer), resolve));var lazy = container.Resolve<ILazyContract>();lazy.SomeFunction();So there is my take on lazy - which wasn't very lazy :(.Any feedback would be greatly appreciated, also if there are any improvements that can be made please let me know.As requested by Heslacher, this is a real implementation where I wanted to cache the UserSettings because they will not change over the life span of a request. Here you can see that the constructor will make a call to the db, which could take some time. So instead I would like to only do this when the class is actually used, not when it is created.public interface IUserConfigurationService{ UserSetting[] GetSettings(); UserSetting[] GetSettingsForProduct(Product product); UserSetting[] GetSettingsOfType(SettingType type);}public class UserConfigurationService : IUserConfigurationService{ private readonly Entities _entities; private readonly User _currentUser; //a cache so that we do not have to make the same call many times private readonly UserSetting[] _userSettings; public UserConfigurationService(Entities entities, User currentUser) { _entities = entities; _currentUser = currentUser; _userSettings = Entities.UserSettings.Where(x => x.User == currentUser).ToArray(); } public UserSetting[] GetSettings() { return _userSettings; } public UserSetting[] GetSettingsForProduct(Product product) { return _userSettings.Where(x => x.Product == product).ToArray(); } public UserSetting[] GetSettingsOfType(SettingType type) { return _userSettings.Where(x => x.SettingType == type).ToArray(); }} | Unity Lazy Resolve Implementation | c#;dependency injection;lazy;unity | null |
_webmaster.17154 | Possible Duplicate:What is duplicate content and how can I avoid being penalized for it on my site? The same page, or rather the same content on a page is accessible from two very similar URLs, namely:http://www.example.com/path/to/page.phphttp://www.example.com/path/to/page.php?catIndex=1The URL param in this case affects all the navigation links on the page. Basically, the same page is accessible via two separate navigation routes. To the end user, who doesn't look at the URL, they could be perceived as two separate pages in different parts of the site, although the content is identical.I have a rel=canonical link element linking to the first URL (without the URL param).Could a search engine perceive this as duplicate content?I think my eyes have gone square, but I was considering adding a robots noindex meta tag to the page when the catIndex URL param exists. But it is really the same URL and I do want the content indexed once, so I'm now thinking this would be foolish?! | Same page in different locations on same site - duplicate content? | search engines;url;duplicate content;canonical url | If you're using canonical URLs, and you are, you're fine. Canonical URLs were created for the very scenario you are experiencing. Don't change or worry about a thing. :) |
_softwareengineering.317637 | After I installed Wappalyzer, extension wich display technology, wich site using. I checked many sites and in most of cool projects, like Youtube, Github,stackoverflow etc, wappalyzer display only some js framework and comercial things.But when I checked sites, wich I builded on Django, wappalyzer display all my technologes in backend and frontend.Maybe question is to broad, but I just saw this pattern and thought I do something wrong and some common things lies on a surface. | Is this unprofessional when Wappalyzer sees all my technology? | python;coding style;code quality;django | Wappalyser will not be able to see the backend code running on a remote server. If you are running your Django site locally, then it will be able to inspect and report on everything. or to put it another way, you do not have access to Youtube's backend server so you cannot tell anything about it or the technology it uses. You do have full access to your own, local server so you can tell everything about it. |
_codereview.91490 | Last night I was working on a project that I've embarked upon as a learning exercise. I decided to add a pagination feature to my blog system. I made an attempt at it and got close... but I decided to undo my changes and I searched online for other 'better' solutions. I found this example which uses PagedList.Mvc NuGet and this example from asp.net which uses the same package. Long story short, I decided that before I install another package I would make one more attempt at my own implementation and I was successful... so, on to my question.Have I missed something important in my implementation that should make me want to reconsider my approach?Here's my relevant Action in my HomeController.cs:public ActionResult Index(int num = 0){ var postsPerPage = 3; ViewBag.startNum = num; using (UnitOfWork uwork = new UnitOfWork()) { IEnumerable<Post> posts = uwork.PostRepository.GetAll().ToList(); int totalPosts = posts.Count(); ViewBag.pages = totalPosts / postsPerPage; int skip = num * postsPerPage; return View(posts.Skip(skip).Take(postsPerPage)); }}and this is the relevant code from my Index.cshtml:<ul class=pagination> <li>@Html.ActionLink(Home, Index, Home, new { num = 0 }, null)</li> @for (var i = 1; i < ViewBag.pages + 1; i++) { var cur = i.ToString(); var name = i + 1; <li>@Html.ActionLink(name.ToString(), Index, Home, new { num = cur }, null)</li> if (i >= 10) { break; } }</ul>Note: I know there's going to be a bit more logic required in order to account for the remainder of pages after they are divided. But that's not my biggest concern as of yet. | Pagination implementation for a blog system | c#;asp.net mvc;pagination | int numWhat does num represent exactly? Number of pages? Number of posts per page? Current page number? Properly naming the variable would help a lot in clarity here.var postsPerPage = 3;Turn it into a parameter with default value 3. That way you can always decide later that the user can change this without much trouble.Instead of using 0-based pages and then adding 1 to the presentation, I would consider using the reverse: 1-based pages and subtracting 1 when determining the pages to display. In essence this would turn your loop from i = 2, name becomes unnecessary and in your controller you use int skip = (num - 1) * postsPerPage instead.using (UnitOfWork uwork = new UnitOfWork()){ IEnumerable<Post> posts = uwork.PostRepository.GetAll().ToList(); int totalPosts = posts.Count(); ViewBag.pages = totalPosts / postsPerPage; int skip = num * postsPerPage; return View(posts.Skip(skip).Take(postsPerPage));}ViewBag.pages (note the incorrect naming convention) indicates to me a collection of pages. In your case however, a more appropriate name would be AmountOfPages.I feel like there's room for an optimization here: what if you have 100.000 pages? Or even just 100? You will load all your items each time someone visits the frontpage and you're only doing this to show the user how many pages of items there are. In fact you're not even doing that: you won't show them past 10 pages anyway!I would argue that maybe you should just use two queries: var totalPosts = uwork.PostRepository.GetAll().Count() which will just perform a select count(*) from posts at the database side and another query that gives you your pages: var pages = uwork.PostRepository.GetAll().Skip(skip).Take(postsPerPage)Another way to keep performance in mind is to use uwork.PostRepository.GetAll().Take(postsPerPage * 10).ToArray();This will you give The amount of posts (the exact amount being pages.Length)The concrete posts (being pages.Skip(skip).Take(postsPerPage))The benefit of this is that it will only call the database once (network latency is one of your demons) with the downside being that it retrieves more items than you're interested in.Instead of looping to ViewBag.pages + 1 and possibly breaking out at i >= 10, I would just loop until Math.Min(ViewBag.Pages + 1, 10). |
_webmaster.76606 | SEO plugins like Yoast for Wordpress and in YouTube videos recommend using bold text for your keywords to improve search engine rankings. Does bolding your text actually work for SEO? What about bold anchor tags? | Does bold text really improve your search engine rankings? | seo;ranking | Be careful! The cited link (http://moz.com/search-ranking-factors/survey) is a list of reported beliefs from SEOs. It often does not reflect reality, but rather how far afield our so-called SEO experts are while living in an echo-chamber of their own making. This yearly survey is evidence of the echo which is growing quite weary. It is no wonder why the average webmaster is confused.For a period, Google did give weight to italics and bold terms thinking that these were valuable clues, but since that too could be gamed and bold and tactic terms were used in an effort to spam search engines and to gain a disproportionate advantage, that practice ended many many years ago. Bold and italics DO NOT effect SEO except for secondary effects. The mark-up is relatively ignored and the text is weighted exactly the same as all the rest. I say relatively because I suspect that the parser/filter rule likely still exists where these terms are indexed, but that the weight is 0.Having said that, bold and italics terms can have a secondary effect of increasing engagement thus increasing the time on page and time on site metrics and possibly pages visited and a site may see a modest increase in conversion rates as a result. However, from a marketing stand-point, this is a terrible thing to do. Another secondary effect that may exist is that keyword matches may be made against bold and italic text much like the description meta-tag is matched to search queries though I suspect that would be less likely. Even though there is no weight assigned to these terms, matches can be made given the right circumstance meaning that the pool of matches does not rise to an expected level and some lower criteria is used to make the match.You have to remember that most SEOs are parroting what they have read or heard which changes daily. So what was true 5-6 years ago still echoes about as fact when in reality, Google made the change a long time ago.As a hint for future questions such as this, ask yourself if the purported fact can be manipulated? In this case, the answer would be Yes. Bold and italics terms can be manipulated. If you find yourself answering yes, then the answer to the question of whether the purported whatever is an SEO factor is clearly No. The only exception that remains are links, title tags, header tags, image alt text, and description meta-tags. There my be a few more minor cases that have not come to mind. That is it. It truly is that simple folks.Ref: https://plus.google.com/+MarkTraphagen/posts/Ex7p2rDx2Do You will want to read this, but AJ Kohn essentially echos what I just said but shorter. |
_unix.9062 | I want to find the default handling application in my C program. Is there a C API with same functionality as xdg-mime query default mime-type on Linux? | Is there a glibc API that can find the default handing application for a MIME type on linux? | c;freedesktop;mime types | glibc doesn't know anything about MIME types; the API functions live at the level of desktop environment APIs, and the freedesktop.org recognize that harmonizing them is an impossible task so they only specify the shell-level interface. You either use that via popen() or code for a particular desktop environment. |
_unix.370182 | I am installing RHEL on a few systems and found an interesting option for LMVs.volgroup (optional) Creates a Logical Volume Management (LVM) group. --reserved-space= - Specify an amount of space to leave unused in a volume group in MiB. Applicable only to newly created volume groups.--reserved-percent= - Specify a percentage of total volume group space to leave unused. Applicable only to newly created volume groups. Now I'm wondering the utility of --reserved-percent. I understand the premise, many times I've encountered disks where a single partition is full and another has tons of space.I'm wondering if there's a way to dynamically allocate the reserved space to volumes as they fill? | Can an LVM Dynamically Allocate Reserved Space to its Volumes? | rhel;lvm | null |
_unix.379307 | I'm running Oracle Linux on VirtualBox, using Gnome desktop environment. After I installed the guest tools to use shared folder (and some kernel/headers related packages mandated by the installer), graphical desktop stopped working.When I try to log in, only black screen appears with the cursor, and after a minute I'm taken back to login screen. After a research I got impression something didn't install cleanly, and Gnome got broken.How do I fix this? I thought about uninstalling those tools in text mode (CTRL + alt + F2), but for some reason this terminal doesn't haven even yum installed. And the thing is, I don't remember the name of those kernel/header packages I installed before the guest tool. I'm not even sure if that is cause of the problem. | Oracle Linux server, black screen after login, and back to login screen | linux;gnome;virtualbox;oracle linux;oracle | null |
_codereview.42997 | The below code is for printing level-by-level in a binary tree://level order printingpublic static void levelOrderPrint(Node root){ Queue<Node> que = new LinkedList<Node>(); Node mark = new Node(0); if(root != null){ que.add(root); que.add(mark); } while(!que.isEmpty()){ Node temp = que.poll(); if(temp != mark) System.out.print(temp.key); if(temp == mark){ if(que.peek() == mark || que.isEmpty()){ return; } que.add(mark); System.out.println(); } if(temp.left != null){ que.add(temp.left); } if(temp.right != null){ que.add(temp.right); } }}I would like to know if there are any bugs or possible optimizations. | Printing the values on each level of a Binary Tree | java;algorithm;tree | null |
_cs.7488 | Trivially decidable problem is one in which the problem is a known property of the language/grammar. So intersection of two regular languages is regular should be trivially decidable? But it is given as not trivially decidable. | Is the intersection of two regular languages regular? | regular languages;undecidability;decision problem | Summarizing the discussion, the intersection of two regular languages is always regular, and so trivially decidable. |
_softwareengineering.264779 | I need to create a database in which several tables have images.For example, users have profile pictures and uploaded ones and products have many pictures, as well.Is it better to have one MySql table images or add images to every table that contains them?Also, how (syntax questions don't belong here but it would be nice) would you add many images into a MySql data column?I've read about the advantages of storing the file paths vs the actual files in the database but what about a long string with many file paths vs many rows?For example INSERT INTO images (userid, image1, image2, image3) VALUES (autoincrementedPK 'pathtofile1', 'pathtofile2', 'pathtofile3') orINSERT INTO images (userid, images) VALUES (autoincrementedPK, 'pathtofile1, pathtofile2, pathtofileN')Which option is better and why? Is option two viable? and if so, how would you implement it? | Creating a separate table for images or adding image fields to many tables? | database;database design;mysql;image;blob | First about storing file paths:Having several image path columns (imagepath1, imagepath2 ... imagepathN ) violates 1NF.Storing several comma-separated image paths in the same column also violates 1NF.When you violate the simplest of normal forms, you will have a lot of headaches in the future.The correct thing to do is create a separate table for images. That table would have a FK with the user table (userid). You then insert into that table as many image paths as you want to any given user.Regarding storing actual images as BLOBS.Same rules about NF1 apply. Same separate table for images applies.In this case having separate tables for BLOBS is good because the DBA can put the image tables in a separate tablespace, optimizing the disk IO.Even in the case users are allowed only one image, denormalization by creating a separate table with a one-to-one relationship is recommended.Note: I can't give you syntax because I'm not familiar with MySQL datatypes. |
_unix.237072 | Recently, I installed Kali Linux 2.0 as a third OS on my Dell Latitude E7240.I used Unetbootin to make a bootable USB of Kali Linux. When I booted from it, it gave me options I wasn't used to. I chose Default the first time, and I was met with a black screen, so I manually shut my computer off, rebooted, and then chose Live Encrypted USB Persistence.After a while of outputting stuff while booting which I did not understand at all, I finally got into a live session of Kali Linux 2.0. From there, I searched in the applications and found Install Kali, which I clicked.I was given a graphical interface which removed the dash and bar at the top, and only showed me the installer, which was partially cut off. So, for I believe two things (one of which was configuring the network), I could not see any options, and blindly hit enter. However, I'm fairly certain nothing went wrong here, as the installation carried through smoothly and asked me mostly what I would expect to be asked while installing. However, since I already have Ubuntu, the first time, I selected no for installing Grub, then it said I had to make my OS bootable and so I had to install something somewhere, so I just chose my hard drive, /dev/sda, rather than entering the device manually, which I don't have any experience with. I finished the installation successfully, then rebooted.My Ubuntu Grub loaded, but I didn't see Kali Linux. I tried following tutorials to add it to Grub, but had no luck. So, I reinstalled Kali, this time choosing to install Grub. Then I rebooted, and the Kali Grub showed up. However, now I get warnings when booting into Kali (Using Kali Linux) and when booting into Ubuntu. This post is about booting into Ubuntu. When I boot into Ubuntu, I get the warnings shown in the following image:EDIT2: New Image with more of the warnings (the warnings that flash for less than a second).I don't think I have experienced any problems yet. However, just to be safe, I would like to know what this means. So what does that mean? If I am missing information, please tell me, and I will add it. EDIT1: This all began AFTER I installed Kali Linux. | Warning while Booting into Ubuntu | ubuntu;boot;kali linux;dual boot;boot loader | null |
_reverseengineering.15420 | I using Windows, and I was wondering what is the best anti anti debug plugin that exist,I tried to use hidedebug by Bob -> Team PEiD, but unfortunately it catch only the regular ways, I know that some of you will send me to the documents of all functions, but I'm looking for something that will make my life easier.P.S:I'm trying to debug just for fun and not for work :)Thanks ahead. | Immunity debugger anti anti debug | disassembly;windows;anti debugging;immunity debugger | null |
_webapps.13047 | I'm running Acunetix to test my ASP.NET page and when it gets to the SQL injection tests it says nothing is wrong. But some data is still being added to my database.Is there any way to see what queries Acunetix is executing when it runs tests? That would give me a better idea of what's going on and why I'm getting data inserted.I have tried with Fiddler to see HTTP traffic but without success. I also tried WireShark sniffer and I could see the packets but, the protocol I'm using is HTTPS so that didn't bring so much information. | See the actual queries and actions Acunetix is testing with | testing | null |
_softwareengineering.300847 | Typically in a C# program, the entry point is in Program.cs. However, if you are creating a rather generic class library, with no Main() method, such asTCPClient.dllwhat is the appropriate base class name? (cannot be TCPClient as that is the namespace). | Class library naming conventions | c#;naming | There isn't one.The entrypoint is for your executable. There is one.Your library has as much business declaring an entrypoint as do all the other files in your program, be they dynamically or statically linked; i.e. none.Name the classes it declares however you like. |
_unix.137037 | I'm creating a new user with the following details:useradd -d /var/www/html/testuser.com -G users testuserI proceed by creating a new password for the user and attempt to login via SSH, I land on /var/www/html/testuser.com but I can still go back in the directories. I want to restrict user permissions so that they can only manage the content inside the testuser.com and sibblings (so they can do all normal operations), I do not want the user to be able to navigate back in the directories and effectively have root access. | Restrict user access in linux | users;account restrictions | null |
_cs.37818 | $A=${$ M$:$M$ $is$ $a$ $Turing$ $Machine$ }What can be said about $A$ ? Specifically, is $A$ decidable,regular,CFL,CSL?I would say $A$ is decidable since we can write an algorithm to check whether a string is a valid encoding of a Turing machine .But, is $A$ Regular[or CFL or CSL] ? Edit : Someone argued that he could make an encoding where all the possible strings(What would be the alphabet here?-same as the encoding I suppose) are valid encoding of a TM(since there is a one-to-one correspondence between two countable infinite sets), hence making $A$ regular . | Language consisting of all Turing machine encodings | formal languages;computability;turing machines | The complexity of $A$ depends on the encoding used for Turing machines. It is easy to come up with an encoding in which every string encodes some Turing machine (there are lots of ways). In contrast, it is easy to come up with artificially hard encodings, say the $k$th Turing machine being encoded by $1^kb$, where $b=1$ iff the $k$th Turing machine halts on the empty input; under this encoding, $A$ is not decidable. Nevertheless, it seems intuitively clear that any reasonable encoding makes $A$ decidable, though it's hard to say anything more without knowing the exact encoding. |
_unix.353660 | Here is my problem (windows 7 and Linux). They are supposed to be separate but seems they are not. $ df -h --totalFilesystem Size Used Avail Use% Mounted onudev 7.8G 0 7.8G 0% /devtmpfs 1.6G 9.4M 1.6G 1% /run/dev/sdb6 48G 7.0G 39G 16% /tmpfs 7.9G 464K 7.9G 1% /dev/shmtmpfs 5.0M 4.0K 5.0M 1% /run/locktmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup/dev/sdb1 453M 165M 261M 39% /boot/dev/sdb7 48G 7.7G 38G 17% /hometmpfs 1.6G 104K 1.6G 1% /run/user/1000/dev/sda1 120G 113G 6.6G 95% /media/ken/5A50B44C50B4309Dtotal 243G 128G 110G 54% -/dev/sda1 is in the windows 7 folder somehow. I want to change this to sdb drive which has space. Maybe a cp copy of /dev/sda1 to a new /dev/sdbx?I'm not even sure why this shows /dev/sdb1 and /dev/sda1.Maybe Linux is not using /dev/sda1? How can I check?Any ideas on what I can do? | change /dev/sda1 (95% full) to sdb drive | mount;partition;udev;partition table | null |
_unix.197751 | I set up to run notify-send every minute, $ crontab -l1 * * * * /usr/bin/notify-send -t 0 helloWhy does it not work? Do I need to restart OS after editing the crontab file?Does the following mean that cron is running?$ ps aux | grep -i cronroot 1038 0.0 0.0 23660 2420 ? Ss Apr20 0:00 cronCan I specify a more frequent schedule, such as 30 seconds? Can thetime be specified as decimals?0.5 * * * * /usr/bin/notify-send -t 0 hello | My cron job doesn't run | cron | null |
_codereview.23420 | So I create 3D tree like structures using L-systems. Basically this means I generate a string like this: FFFFFFFF-[[FFFF-[[FF-[[F-[[X]+X]+F[+FX]-X]+F]]]]]This string is then interpreted by a turtle performing certain actions for each character in the string. The problem is how to map characters to turtle actions in an elegant way in C++.Right now I use a enum to specify the actions and performAction calls methods based on the action. Is there a better way of doing it?The turtle code:enum TurtleAction { MoveForward, TurnLeft, TurnRight, PitchDown, PitchUp, RollLeft, RollRight, TurnAround, PushState, PopState};struct TurtleState { Vec3f pos; Vec3f H; Vec3f L; Vec3f U; double width;};class Turtle { TurtleState m_currentState; stack<TurtleState> m_states; void(*m_drawFunc)(Vec3f,double); ///< Drawing callbackpublic: double alpha; ///< Turning angle Turtle(TurtleState startState, void(*drawFunc)(Vec3f,double)); void performAction(TurtleAction action);private: void move(double dist); void turn(double deg); void pitch(double deg); void roll(double deg); void push(); void pop(); void rotate(const Mat44f & R);};voidTurtle::performAction(TurtleAction action){ switch (action) { case MoveForward: move(1.0f); break; case TurnLeft: turn(-alpha); case TurnRight: turn(+alpha); case PitchDown: pitch(-alpha); case PitchUp: pitch(+alpha); case RollLeft: roll(-alpha); case RollRight: roll(+alpha); case TurnAround: turn(180.0f); case PushState: push(); case PopState: pop(); default: break; }}The string parsing:class LSystem { map<char,TurtleAction> m_actionRules; TurtleState m_start;public: void draw(string & str,void(*drawFunc)(Vec3f,double)); void computeString(string & axiom, int generations); LSystem(); ~LSystem();};voidLSystem::draw(string &str, void (*drawFunc)(Vec3f, double)){ Turtle turtle(m_start,drawFunc); turtle.alpha = 25.0f; for (int s = 0; s < str.size(); s++) { char c = str[s]; map<char,TurtleAction>::iterator it = m_actionRules.find(c); if (it != m_actionRules.end()) { turtle.performAction(it->second); } }} | Is there a better way to translate a string into methods? | c++ | The design flaw would have been the use of this switch case with 11 entries. That's already a lot. By the way I warn you, you have forgotten 9 break out of 11 cases.However, in your case, each case relates to a single action. I mean, there is not a lot new to understand while reading: for each case you define a single behaviour action, inside a single word function call. (You don't put multi line, multi code, with multi conditions behaviour that is complex inside your switch case... : this is the thing to avoid and you have avoided it)So honestly speaking it's totally acceptable.Only for your culture I am mentionning some ways to get rid of switch case. I repeat myself, you shouldn't apply them in your case right now.However, the first thing you may notice in such design, maybe there are common behaviours that are dupplicated between all your function call inside the switch case.Imagine, if in roll() and turn() you would first have toTurn head to the right to see if nothing tries to cross my pathTurn head to the left to see if nothing is moving straight right on meand then only to roll() or to turn()Then you could start thinking about using the Strategy pattern instead of simple function calls. This way, the strategy object which contain the action to do, can share common behaviour in a mother class.If you started to have really many Strategies, that should be tune and tweaked precisely, then you would think about implementing the Interpreter Pattern so as to map between the precise language in your entry and the Strategies to apply.And if you start to have a big Interpreter object and you think you need a grammar to express all the possibilities, then no doubt you should think about a Domain Specific Language. ( with you how parser etc... ).In your case the switch case is entirely sufficient ;-) |
_softwareengineering.271083 | I'm writing the installation code of my modular web application and I stumbled on writing the update code for the plugins.The problem I am having is: how do you know if the plugin is already installed? Developers can give their plugin a very basic name like: News. There will probably be more plugins with that name and now the system think it's the same plugin and will overwrite it.What can I provide to the plugin to make it unique when installing and knowing it's an update when a user provides a new installation package?The idea is that there is only an installation service, not an install and update.The system should be able to know if it's a new installation or a update. | How to separate different plugins with the same name | plugin architecture | You've correctly noticed that a package has a name, an author, and a version. These three uniquely identify a package. If you need to map this to a file system, you could choose a structure such as./plugins/author-id/package-id/version/This allows two authors to offer different packages under the same name, and multiple versions of one package to be installed at once. If no support for multiple versions is needed, you can drop the version level.The question is how to assign author IDs. Java uses the nice trick of simply using domain names, since they should be unique anyway. This works well if every developer can be expected to own a domain (this would seem to be the case in web application development). However, Java reverses the order of subdomains so that the hierarchically most important information is at the front of the name. The domain dev.example.com is mapped to the prefix com.example.dev.It is useful to have a file in each package that describes metadata. This file could declare information about dependencies, a summary of what this package does, an a version number.With this structure, installing or upgrading a package would work like this:The new package is unpacked into a temp directory, and the metadata is read.The directory of installed modules is searched for a package of same name from the same author. If no match is found, this package is installed.If a match was found and the version number is higher, then the old package is removed and the new package installed.Before installation, the presence of dependencies is verified.There are tons of package managers which you can steal ideas from. Take a look at CPAN (Perl), pip (Python), gem (Ruby), npm (Node.js). Each of these have some strengths and some weaknesses. |
_cstheory.20843 | Suppose there is a graph $G=(V,E)$. I want to test if $V$ can be partitioned into two disjoint sets $V_1$ and $V_2$ such that the subgraphs induced by $V_1$ and $V_2$ are unit interval graphs.I know about the NP-completeness of determining interval numbers but the above problem is different.Now, in the literature I found this work by A. Gyrfs and D. West on multitrack interval graphs but I'm not sure if it is relevant to above problem.Any citation to existing literature on the above or similar problem would be helpful. Also please let me know if there is a formal name for the above problem. | Partition into interval graphs | graph theory;partition problem;interval graphs | I think, your problem is NP-complete. It is a special case of a theorem by Farrugia, stating that it is NP-hard to test if the vertex set a graph can be partitioned into two subsets $V_1,$ and $V_2$ such that $G(V_1)$ belongs to the graph class $\mathcal{P}$ and $G(V_2)$ belongs to the graph class $\mathcal{Q}$, provided $\mathcal{P}$ and $\mathcal{Q}$ are closed under taking vertex-disjoint unions and talking induced subgraphs, and at least one of $\mathcal{P}$ and $\mathcal{Q}$ is non-trivial (meaning not all graphs in the class are edgeless). |
_unix.352781 | How these process concepts are related together - background, zombie, daemon and without controlling terminal?I feel that they are somehow close, especially through the concept of controlling terminal, but there is still not much info for me to tell a story, like if you need to explain something to a child reading an article about Linux without lying too much.UPDATE #1: For example (I don't know if that's true)background -- zombie - foreground process can not become zombie, because zombie is a background process that was left without a parentdaemon -- without ctty - all daemons run without ctty, but not all processes withoutctty are daemonsbackground -- daemon - a background process can be retrieved to run interactively again, daemon is notzombie -- without ctty - zombie is indifferent if there is ctty attached to it or notbackground -- without ctty - processes sent to background while they have ctty, and become daemons or die if the ctty is taken from them | Background, zombie, daemon and without ctty - are these concepts connected? | background process;daemon;job control;zombie process;controlling terminal | In brief, plus links.zombiea process that has exited/terminated, but whose parent has not yet acknowledged the termination (using the wait() system calls). Dead processes are kept in the process table so that their parent can be informed that their child of the child process exiting, and of their exit status. Usually a program forking children will also read their exit status as they exit, so you'll see zombies only if the parent is stopped or buggy.See: Can a zombie have orphans? Will the orphan children be disturbed by reaping the zombie?How does Linux handle zombie processes?Linux man page waitpid(2)controlling terminal, session, foreground, backgroundThese are related to job control in the context of a shell running on a terminal. A user logs in, a session is started, tied to a terminal (the controlling terminal) and a shell is started. The shell then runs processes and sends them on the foreground and background as the user wishes (using & when starting the process, stopping it with ^Z, using fg and bg).Processes in the background are stopped if reading or writing from the terminal; processes in the foreground receive the interrupt signal if ^C is hit on the terminal. (It's the kernel's terminal driver that handles those signals, the shell controls which process (group) is sent to the foreground or background.See: Difference between nohup, disown and &Bash reference manual: Job Control BasicsdaemonA process running as a daemon is usually something that shouldn't be tied to any particular terminal (or a login session, or a shell). It shouldn't have a controlling terminal, so that it won't receive signals if the terminal closes, and one usually doesn't want it to do I/O on a terminal either. Starting a daemon from the command line requires breaking all ties to the terminal, i.e. starting a new session (in the job control sense, above) to get rid of the controlling terminal, and closing the file handles to the terminal. Of course something started from init, systemd or similar outside a login session wouldn't have these ties to begin with.Since a daemon doesn't have a controlling terminal, it's not subject to job control, and being in the foreground or background in the job control sense doesn't apply. Also, daemons usually re-parent to init which cleans them as they exit, so you don't usually see them as zombies.See: What's the difference between running a program as a daemon and forking it into background with '&'?Linux man page daemon(7). |
_codereview.2362 | With some advice from SO, I developed this system, which I think is quite strong for bots to automatically post comments.index.php <html><head><script type=text/javascript src=jquery.js></script><script>function main(){ var str=$(#key).load(getToken.php,function (responseText) { $(#key).val(responseText); } ); setTimeout(main(), 100000);}</script></head> <body onload='main()'> <form name=f action=poster.php method=post> <input type=text name=text/><br> <input type=text name=key id=key value=/><br> <input type=submit> </form></body></html>getToken.php<?php $key=date(Y-m-d H:i:s); $hash=sha1($key.'mySecretKey'); echo $key.'#'.$hash;?>poster.php<?phpif (!isset($_POST['key'])) exit;$parts = explode('#',$_POST['key'],2);$key = $parts[0];$hash = $parts[1];$date1 = $key;$date2 = date(Y-m-d H:i:s);$diff = abs(strtotime($date2) - strtotime($date1)); $years = floor($diff / (365*60*60*24)); $months = floor(($diff - $years * 365*60*60*24) / (30*60*60*24)); $days = floor(($diff - $years * 365*60*60*24 - $months*30*60*60*24)/ (60*60*24));$hours = floor(($diff - $years * 365*60*60*24 - $months*30*60*60*24 - $days*60*60*24)/ (60*60)); $minuts = floor(($diff - $years * 365*60*60*24 - $months*30*60*60*24 - $days*60*60*24 - $hours*60*60)/ 60); $seconds = floor(($diff - $years * 365*60*60*24 - $months*30*60*60*24 - $days*60*60*24 - $hours*60*60 - $minuts*60)); if ($seconds < 5) echo $seconds.' Too fast, must be a naughty bot <br>';else if ($seconds>5 && $seconds < 600) echo $seconds.' In time <br>';else echo $seconds.' time out <br>';if ($hash == (sha1($key.'sou'))) echo $_POST['text'];else echo 'You are a bot !';?> | Anti-Bot comment system | php;security;php5;ai | 365*60*60*24 should be a constant.You should really improve your variable naming.$date2 = date(Y-m-d H:i:s);date2? Does that say anything? currentTime is more like it. Always describe what variables contain, not what they are.Isn't $diff already the number of seconds?Most part of the date/time checking could be rewritten to:$seconds = time() - strtotime($key); |
_softwareengineering.191840 | I have seen on some sites which say ,for-example, multiple votes/views from same computer will be neglected/penalized etc. For-example liking a facebook page, or youtube video from same computer(different accounts) will not increase its worth(according to my knowledge).How do these sites identify bogus votes? I just need a direction. | How web-servers identify a client | web development;web applications | Without requiring user accounts (I'm assuming that your use case precludes them), there's no foolproof method to identify end users. But some common methods include:CookiesSet a cookie on the client machine to identify the user.Pros: Definitely unique per sessionCons: Easy to remove or prevent (even for people with limited technical knowledge), only apply to one browser, easy for a script to bypassIP addressUse the user's IP address to identify the user.Pros: Changes infrequently, works across browsers, requires technical knowledge to spoof or use a proxy serverCons: Not necessarily unique: one IP address can easily apply to entire homes/buildings/offices behind a network, and for some ISPs end user IP addresses might be reassignedBrowser fingerprintingUse all browser data passed to the server (not just user agent, but also OS, screen resolution, and a number of other things) to generate a unique browser fingerprint.Pros: High probability of being unique, opaque to the end user (changing IP addresses or clearing cookies won't bypass it)Cons: Probably too unique -- any change in browser condition (or browser) will change the fingerprint (though you can choose only a subset of supplied browser information to balance fingerprint volatility and uniqueness), can be bypassed by scriptsYou can, of course, also use multiple methods. Which method you use really depends on your specific use case. For many cases, just a simple cookie will do. If you want some defense against scripts or other methods of bypassing, add an IP address rate limit. You can also attempt to gather as much information as possible and use some sort of heuristic algorithm to try to find people that are bypassing the system for uniqueness that you have in place.But again, there's no foolproof way to do this. |
_unix.246122 | I am working on a small homework assignment that is asking me to write a script that asks for four words, and a user must type it exactly as it is echo.My issue is that in my output, it is not giving me an echo for false statements. Everything I put in my if is true, and gives me the first approved echo statement. Is there an issue with the setting of variables, or the brackets and parenthesis? I've been trying to put them in all sorts of different variations throughout the if portion, but can't seem to get it to kick the false output. I also use shell check as my syntax check source, and it is showing the script as being functional. Any help would be appreciated, here is what I have written.#!/bin/bashvarname1=evenvarname2=oddvarname3=zerovarname4=negative# Ask the user for one of four select wordsecho Type one of the following words:echo even, odd, zero, negativeread varwordif [[ ($varword -eq $varname1 ) || ($varword -eq $varname2 ) || ($varword -eq $varname3 ) || ($varword -eq $varname4 ) ]]then echo The approved word you have selected is $varword .else echo The unapproved word you have selected is $varword . Please try again.fi | Script that asks for four words, then tells the user the word they chose. Output error? | bash;shell script | Use = for string comparisons, not -eq.if [[ ($varword = $varname1 ) || ($varword = $varname2 ) || ($varword = $varname3 ) || ($varword = $varname4 ) ]]alternatively, use a regexp:if [[ $varword =~ ^(even|odd|zero|negative)$ ]] ; then |
Subsets and Splits