text
stringlengths 226
34.5k
|
---|
How to make a shape change color when I click on it in a Connect Four board?
Question: I'm writing a Connect Four game in Python using Tkinter. I'm now making a
board. I want circles to change color when I click them.
Only the last column of the last row is changed wherever I click on the board.
How can I make it so that whenever I click a specific circle, only that circle
changes color?
from tkinter import *
import random
def conx_create_window():
mw = Tk()
mw.title("Connect Four Game")
mw.geometry("650x600")
mw.configure(bg="#3C3C3C", padx=50, pady=50)
return mw
def main():
m_window = conx_create_window()
return m_window
m_window = main()
mframe = Frame(m_window, bg="#3C3C3C", padx=50, pady=150)
mframe.pack()
newframe = Frame(m_window, bg="#3C3C3C", padx=50, pady=50)
board = {}
buttons = {}
frames = {}
gameBoard = Frame(m_window)
#----------------------------------
def newgame_click():
print("New game")
mframe.pack_forget()
boardOption()
def boardOption():
newframe.pack()
def board7x6():
gameBoard.pack()
newframe.pack_forget()
print("7x6 Board Size")
for row in range(6):
board[row] = {}
frames[row] = Frame(gameBoard)
frames[row].pack()
for col in range(7):
board[row][col] = 0
frame = Frame(frames[row])
frame.pack(side = LEFT)
c = Canvas(frame, bg="#666", width=50, height=50)
c.pack()
r = c.create_rectangle((0, 0, 50, 50), fill="#3C3C3C")
circle = c.create_oval(3, 3, 49, 49, fill="#3D3D3D")
c.tag_bind(r, "<Button-1>", lambda event: print('works'))
c.tag_bind(circle, "<Button-1>", lambda event: c.itemconfig(circle, fill="green"))
print(" ", board[row][col], " ", end="")
print()
def board8x7():
gameBoard.pack()
newframe.pack_forget()
print("8x7 Board Size")
for row in range(7): # 7 rows
board[row] = {}
buttons[row] = {}
frames[row] = Frame(gameBoard)
frames[row].pack()
for col in range(8): # 8 columns
board[row][col] = 0
buttons[row][col] = Button(frames[row], text="", width=8, height=4, bg="#1EC811", bd=0, highlightthickness=0)
print(" ", board[row][col], " ", end="")
buttons[row][col].pack(side=LEFT)
print()
board7x6_btn = Button(newframe, text="7X6", bg="#64E545", command=board7x6, bd=0, highlightthickness=0)
board8x7_btn = Button(newframe, text="8X7", bg="#64E545", command=board8x7, bd=0, highlightthickness=0)
board7x6_btn.grid(row=0, column=0, padx=20, pady=10, ipadx=20, ipady=20)
board8x7_btn.grid(row=0, column=1, padx=20, pady=10, ipadx=20, ipady=20)
newgame_btn = Button(mframe, text="NEW GAME", bg="#64E545", command=newgame_click, bd=0, highlightthickness=0)
load_btn = Button(mframe, text="LOAD", bg="#64E545", padx=25, bd=0, highlightthickness=0)
ins_btn = Button(mframe, text="INSTRUCTIONS", bg="#64E545", bd=0, highlightthickness=0)
exit_btn = Button(mframe, text="EXIT", bg="#64E545", padx=10, bd=0, highlightthickness=0)#, command = exit_click)
newgame_btn.grid(row=0, column=0, padx=10, pady=10, ipadx=10, ipady=20)
load_btn.grid(row=0, column=1, padx=10, pady=10, ipady=20)
ins_btn.grid(row=1, column=0, padx=10, pady=10, ipady=20)
exit_btn.grid(row=1, column=1, padx=10, pady=10, ipadx=20, ipady=20)
#----------------------------------
m_window.mainloop()
Answer: The problem is the `lambda` construction: c is always the same (last one) and
is therefore not evaluated at execution:
c.tag_bind(circle, '<Button-1>', lambda event: c.itemconfig(circle, fill = "green"))
Use a default argument instead::
c.tag_bind(circle, '<Button-1>', lambda event, c=c: c.itemconfig(circle, fill = "green"))
So `c` is now a default argument and now you have different lamdas. See
[here](http://stackoverflow.com/a/2731158/166605) for a far better explanation
than mine.
|
Weird Python Selenium Button Click Behaviour
Question: The part I'm trying to click:
<ul class="btns right">
<li><a href="javascript:void(0)" onclick="hr_expand_event_tab_all("")" class="expand-all" id="btn_expand_all_10580503">View All Cards</a></li>
</ul>
Pretty straightforward I thought. But I seem to be missing something.
**Question is now updated a little further down the page. The xpath isn't the
problem I've tried with corrected xpath and it's the same as using the class
name. CSS was hiding several versions of the button but a common.exception is
being thrown on the ones it does actually find with xpath or the class name.**
I've checked the page is loaded properly and the element is there. I have a
check to wait until the full page is loaded and it screenshots to be sure.
loadbutton = Driver.find_element_by_xpath("//a[@class='expand-all']")
Gives:
<class 'selenium.common.exceptions.ElementNotVisibleException'>
So I tried to find an onclick with the anchor:
loadbutton = Driver.find_element_by_xpath("//li[contains(@onclick, 'View All Cards')]")
With the same outcome. I've tried a bit of regex to catch the id variations as
well but I'm not sure where I'm going wrong here. There's an onlick and it is
loaded but I can't see to find it.
I'd appreciate anyone who can show me what I'm doing wrong on this one.
/Update:
Turns out there's multiple versions of the button some are visible and others
are not.
I looped:
loadbutton = Driver.find_elements_by_xpath("//a[@class='expand-all']")
for button in loadbutton:
print "button found"
It turned up multiple results. The earlier ones are hidden but the ones at the
end are certainly showing on my browser and the screenshot. So I expected the
early ones to fail and added a .click() with a try: except: and they all
failed still. Didn't expect that.
Further update:
So I ran this:
loadbutton = Driver.find_elements_by_xpath("//a[@class='expand-all']")
for button in loadbutton:
print "button found"
try:
button.click()
except:
e = sys.exc_info()[0]
print e
The first couple gave me this:
<class 'selenium.common.exceptions.ElementNotVisibleException'>
OK expected the CSS is hiding it. The last two which are displaying gave this:
<class 'selenium.common.exceptions.WebDriverException'>
So it can see them. It won't click them. "Common exception" doesn't seem
overly helpful.
Answer: Try this xpath (updated with code block SO site removed my *)
`//*[contains(concat(' ', @class, ' '), ' btns right ')]//*[contains(concat('
', @class, ' '), ' expand-all ') and contains(text(), 'View All Cards')]`
Provide some wait for the element to be clickable(implicit is recommended).
I have Used Only for java , But I refered here for python
[here](http://selenium-python.readthedocs.io/waits.html) it may help!!
from selenium.webdriver.support import expected_conditions as EC
wait = WebDriverWait(driver, 10)
button = wait.until(EC.element_to_be_clickable((By.XPATH,'//*[contains(concat(' ', @class, ' '), ' btns right ')]//*[contains(concat(' ', @class, ' '), ' expand-all ') and contains(text(), 'View All Cards')]')))
button.click()
Even if the above thing fails, try this
form these links [link1](http://stackoverflow.com/questions/7794087/running-
javascript-in-selenium-using-python) and
[link2](http://stackoverflow.com/questions/27927964/selenium-element-not-
visible-exception)
driver.execute_script("document.getElementsByClassName('expand-all')[0].click();")
inject an artificial CLICK on the desired element, remove(comment) all other
codes
May be ur app falls under link2 OP :)
|
Issue with importing apps in Python / Django
Question: I am having an issue while importing 'registration' app into any of my python
scripts (PyCharm showing an error - "Unresolved reference 'registration'").
`Django-registration-redux` has been installed and loads fine e.g. from
`urls.py`. I also have an issue with my own apps, which I never had an issue
with. In the following `urls.py` `user_profile` is also not recognized. How do
you go a folder level up rather than down, I know that if you are going into
folders you include a dot for every folder, **but how do you go back?** This
is probably not relevant for this case but in general. This is my project
layout:
|--'project'
| |--'Lib'
| | |--'site-packages'
| | | |--'registration'
| |--'src'
| | |--'proj'
| | | |-- settings.py
| | | |-- urls.py
| | |--'user_profile'
| | | |-- forms.py
**urls.py**
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
import user_profile.views as views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home),
url(r'^accounts/', include('registration.backends.default.urls')),
]
**forms.py**
from registration.forms import RegistrationFormUniqueEmail
from django import forms
class UserProfileRegistrationForm(RegistrationFormUniqueEmail):
field = forms.CharField()
**settings.py**
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INS
TALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites', # manually added
'django.contrib.messages',
'django.contrib.staticfiles',
# third party apps
'crispy_forms',
'debug_toolbar',
# my apps
'user_profile',
'registration',
]
Answer: You don't "go back", because you're not "in" anywhere to go back from.
Python uses the PYTHONPATH environment variable to determine where to import
things from. A virtualenv will automatically put its root and the site-
packages directory into that variable. If your virtualenv is the "project"
directory, you probably need to do `from src.user_profile import views`.
|
Cannot seem to render XML or clean characters
Question: I'm trying to render my `sitemap.xml` using `flask`, however I cannot seem to
clean my input such that the rendering can work. The error is the following:
error on line 23325 at column 83: PCDATA invalid Char value 11
The code I'm using is:
url = prepend + "/explore/"+str(result['id'])+"/"+result['title'].encode('utf-8', 'xmlcharrefreplace').decode('utf-8')
I've also tried:
url = prepend + "/explore/"+str(result['id'])+"/"+result['title'].encode('ascii', 'xmlcharrefreplace').decode('ascii')
Also removing what is Char 11, doesn't seem to work either.
result["title"] = result["title"].replace('', '')
Is there a smarter way that I can clean any spurious characters or problem
solve more accurately?
I've tried to flag the problem. It looks like a linefeed or something along
those lines. I've tried before and after the URL is built.
XX XXX XXX XXX
Step by step guide
but, I cannot find anything in the python arsenal to remove the linefeed. I'm
trying, `replace('\r', '')`, `strip()` etc etc, but still this persists.
Answer: “Char value 11” (0xB in hex) refers to the vertical tab `\v`, **not** the
carriage return `\r`, **nor** the line feed `\n`.
The vertical tab is [not a valid character in an XML
document](https://www.w3.org/TR/xml/#charsets). It’s so invalid that you can’t
even represent it as a character reference like ``. You have to remove
it from the string yourself:
result["title"] = result["title"].replace('\x0b', '')
While you’re at it, you may want to remove other invalid characters as well,
as they have a tendency to pop up inscrutably in user-supplied data and bite
you in production. I’m not aware of common library functions to do this, so I
came up with my own function that makes both XML and HTML5 more or less happy:
import re
def printable(s):
# Based on `XML 1.0 section 2.2 <https://www.w3.org/TR/xml/#charsets>`_,
# with the addition of U+0085,
# which the W3C (Nu) validator also marked as a "forbidden code point".
# Even with this code, the validator still complains about
# "Text run is not in Unicode Normalization Form C"
# and "Document uses the Unicode Private Use Area(s)".
return re.sub(
pattern=(u'[\u0000-\u0008\u000B\u000C\u000E-\u001F'
u'\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF]'),
repl=u'\N{REPLACEMENT CHARACTER}',
string=s
)
As for `xmlcharrefreplace`, that’s not what you’re looking for at all. It’s
not an escaping mechanism for XML-invalid characters. It’s a way to preserve
characters that are outside of the chosen encoding. It would make sense in a
situation like this:
>>> print(u'Liberté, égalité, fraternité!'.encode('ascii', 'xmlcharrefreplace'))
Liberté, égalité, fraternité!
|
USB mapping with python
Question: While reading some CTF write-ups I came across this script
#!/usr/bin/env python
import struct
import Image
import dpkt
INIT_X, INIT_Y = 100, 400
def print_map(pcap, device):
picture = Image.new("RGB", (1200, 500), "white")
pixels = picture.load()
x, y = INIT_X, INIT_Y
for ts, buf in pcap:
device_id, = struct.unpack("b", buf[0x0B])
if device_id != device:
continue
data = struct.unpack("bbbb", buf[-4:])
status = data[0]
x = x + data[1]
y = y + data[2]
if (status == 1):
for i in range(-5, 5):
for j in range(-5, 5):
pixels[x + i , y + j] = (0, 0, 0, 0)
else:
pixels[x, y] = (255, 0, 0, 0)
picture.save("riverside-map.png", "PNG")
if __name__ == "__main__":
f = open("usb.pcap", "rb")
pcap = dpkt.pcap.Reader(f)
print_map(pcap, 5)
f.close()
And when I run it on my usb.pcap I get this error:
Traceback (most recent call last):
File "test.py", line 39, in <module>
print_map(pcap, n)
File "test.py", line 31, in print_map
pixels[x, y] = (255, 0, 0, 0)
IndexError: image index out of range
Why it is happening?
Answer: Depending on the dataset in your usb.pcap file, you may need to adjust the
INIT_X and INIT_Y variables. The problem is that struct.unpack returns a
signed value, so if the data is over 127 then it appears negative and you are
exceeding the array boundaries. If the data is really always positive, you can
test for that and force it to a positive value. Something like:
data = [item + 256 if item < 0 else item for item in data]
|
Why aren't my sets containing more than one element?- python 2.7
Question: Ok, so I wrote some code and I want to compare two sets. However, the length
will only return either 0 or 1, depending on whether I'm using two images or
the same image. This is because my sets are being formed as only 1 element
sets instead of mixing the numbers apart. For example, the sets read as [(a,
b, c)] instead of [('a', 'b', 'c')].
Here's my code
import cv2
import numpy as np
import time
N=0
colour=[]
colourfile=open('Green from RGB.txt', 'r')
for line in colourfile.readlines():
colour.append([line])
colour_set=sorted(set(map(tuple, colour)))
def OneNumber(im): #Converts the pixels rgb to a single number.
temp_im=im.astype('int32')
r,g,b = temp_im[:,:,0], temp_im[:,:,1], temp_im[:,:,2]
combo=r*1000000+g*1000+b
return combo
while True:
cam = cv2.VideoCapture(0)
start=time.time()
while(cam.isOpened()): #Opens camera
ret, im = cam.read() #Takes screenshot
#im=cv2.imread('filename.type')
im=cv2.resize(im,(325,240)) #Resize to make it faster
im= im.reshape(1,-1,3)
im=OneNumber(im) #Converts the pixels rgb to a singe number
im_list=im.tolist() #Makes it into a list
im_set=set(im_list[0]) #Makes set
ColourCount= set(colour_set) & set(colour_set) #or set(im_set) for using/ comparing camera
print len(ColourCount)
Also the text file I'm opening is written as:
126255104, 8192000, 249255254, 131078, 84181000, 213254156,
In a single, great big line.
So basically, how do I divide the numbers into different elements in the sets,
im_set and colour_set?
Thanks
Answer: You have a few bugs in your code. It looks like you are reading all the colors
into a single string. You need to split the string instead if you want a set
of colors:
for line in colourfile.readlines():
temp_line = [x.strip() for x in line.split(',')] ## create a temporary list, splitting on commas, and removing extra whitesapce
colour.extend(temp_line) ## don't put brackets around `line`, you add another "layer" of lists to the list
## also don't `append` a list with a list, use `extend()` instead
#colour_set=sorted(set(map(tuple, colour))) ## I think you're trying to convert a string to a 3-tuple of rgb color values. This is not how to do that
You have a serious problem with your rgb color representations: what is
`131078`? Is it (13, 10, 78), or (131, 0, 78), or (1, 31, 78)? You need to
change how those color strings are written to a file, because your format is
ambiguous. To keep it simple, why not write it to a file like this:
13 10 78
255 255 0
If you insist on encoding the rgb triples as a single string, then you _HAVE_
to zero-pad all the values:
## for example
my_rgb = (13,10,78)
my_rgb_string = "%03d%03d%03d" % (r, g, b) ## zero-pad to exactly 3 digit width
print(my_rgb_string)
>> 013010078
Another problem: you are intersecting a set with itself, instead of
intersecting two different sets:
ColourCount= set(colour_set) & set(colour_set) #or set(im_set) for using/ comparing camera
should be something like:
ColourCount= set(colour_set) | im_set #or set(im_set) for using/ comparing camera
if you want to create a union of all the different colors in the image.
If you still have problems after fixing these issues, I would recommend you
post a new question with the updated code.
|
Python or Powershell - Import Folder Name & Text file Content in Folder into Excel
Question: I have been looking at some python modules and powershell capabilities to try
and import some data a database recently kicked out in the form of folders and
text files.
File Structure:
Top Level Folder > FOLDER (Device hostname) > Text File (also contains the
hostname of device) (with data I need in a single cell in Excel)
The end result I am trying to accomplish is have the first cell be the FOLDER
(device name) and the second column contain the text of the text file within
that folder.
I found some python modules but they all focus on pulling directly from a text
doc...I want to have the script or powershell function iterate through each
folder and pull both the folder name and text out.
Answer: This is definitely do-able in Powershell. If I understand your question
correctly, you're going to want to use `Get Child-Item`and `Get Content` then
`-recurse` if necessary. As far as an export you're going to want to use `Out-
File` which can be a hassle when exporting directly to xlsx. If you had some
code to work with I could help better but until then this should get you
started in the right direction. I would read up on the `Get`commands because
Powershell is very simple to write but powerful.
|
Send bytes in Python with serial
Question: I'm trying to send 10 bits thought raspberry pi USB port splitting them in two
bytes and manipulating the first for identifying it with this code:
import serial
ser = serial.Serial(
port='/dev/ttyACM0',
baudrate = 38400,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
while 1:
val=input()
a= bytes((val>>8) | 0xA0)
a1=bytes(val & 0x00ff)
ser.write(a)
ser.write(a1)
but it seems to send the ASCII code instead of the binary data and I don't
find a way to solve the problem! Can anybody help me please?? Thanks a lot!
Answer: I've now solved the problem, the right code is:
val=input()
a= chr((val>>8) | 0xA0)
a1=chr(val & 0x00ff)
ser.write(a)
ser.write(a1)
|
Why can't I test Django REST Frameworks AuthToken?
Question: I'm trying to test the Django REST frameworks AuthToken with an APITestCase,
but I always get status code 400 and the message:
> b'{"non_field_errors":["Unable to log in with provided credentials."]}'
Weirdly enough when I type in the exact same commands via `python manage.py
shell` I get status code 200 and the auth token back.
What am I missing here?
Here's my code:
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
class TestAuthToken(APITestCase):
def test_login_admin(self):
client = APIClient()
response = client.post('/api-token-auth/', {'username':'admin','password':'password123'})
self.assertEqual(response.status_code, 200)
**UPDATE** Forgot to create a user for the test database... Here's the new
code:
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
class TestAuthToken(APITestCase):
def setUp(self):
self.admin = User.objects.create_user('admin', '[email protected]', 'password123')
self.admin.save()
self.admin.is_staff = True
self.admin.save()
def test_login_admin(self):
client = APIClient()
response = client.post('/api-token-auth/', {'username':'admin','password':'password123'})
self.assertEqual(response.status_code, 200)
Answer: Are you creating the user previously in the setUp of the test case? Because
django creates a testing db when you run the tests and gets empty for every
class in the tests (unless you indicate that you want the data is keeped in
the db between the tests), and if your code checks the db to see if the
credentials are OK, it will find that doesn't exist any user with that
username and password.
|
What does "'Float' object cannot be interpreted as an integer" mean and how can i help it - Python error
Question: I was trying to follow simple instructions on how to build a game in python
but for some reason it is not working and whenever i type this code in:
import pygame
from pygame.locals import *
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
keys = [False, False, False, False]
playerpos=[100,100]
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
while 1:
screen.fill(0)
#Its this bit 'for' instruction that comes up with the error
for x in range(width/grass.get_width()+1):
for y in range(height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
screen.blit(castle,(0,30))
screen.blit(castle,(0,135))
screen.blit(castle,(0,240))
screen.blit(castle,(0,345))
screen.blit(player,playerpos)
pygame.display.flip()
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
exit(0)
It says: File "/Pygame/game.py", line 18, in for x in
range(width/grass.get_width()+1) TypeError: 'float' object cannot be
interpreted as an integer
the website that i'm following is:
<https://www.raywenderlich.com/24252/beginning-game-programming-for-teens-
with-python>
If someone could tell me what I'm doing wrong it would be much appreciated, if
not just explaining what the error means might help. Thanks so much
Answer: This is happening because `width/grass.get_width()` returns a float rather
than an int. The tutorial you're using specifically says to use Python 2.7, so
you may run into other issues like this if you continue it with Python 3, but
to fix this specific issue you can replace `/` with `//` to use integer
division.
|
formatting a run within a loop and inserting in a table Python docx
Question: I am trying to format text using python docx. The text is a string pulled out
by a loop. How can I name the object a run and then apply a font to the run?
This is my code:
xcount = 0
while xcount < xnum:
xdata = datastring[xcount]
obj1 = xdata[0]
run = obj1
font = run.font
from docx.shared import Pt
font.name = 'Times New Roman'
row1.cells[0].add_paragraph(obj1)
xcount += 1
I get the error:
AttributeError: 'str' object has no attribute 'font'
Answer: `xdata[0]` is a string (doesn't have a `.font` attribute). You'll need to
create a `Document()`, add a paragraph, and add a run to that. E.g.:
from docx import Document
from docx.shared import Inches
document = Document()
document.add_heading('Document Title', 0)
p = document.add_paragraph('A plain paragraph having some ')
p.add_run('bold').bold = True
p.add_run(' and some ')
p.add_run('italic.').italic = True
(copied directly from the docs: <http://python-
docx.readthedocs.io/en/latest/>)
|
How do I perform a collision check?
Question: I have no clue what to do here, I want to make it so that a turtle in Python
will shoot a bullet and if it collides with the other turtle, the score goes
up. Except if I run this code, the game will crash when I shoot the bullet. So
far, I only tried it for one of the eight move commands.
#imports turtles
import turtle
import random
#Sets screen, turtle program, and colour group
turtle.setup(1000, 800)
screen = turtle.Screen()
wn = turtle.Screen()
wn.bgcolor("springgreen")
amir = turtle.Turtle()
amir.shape("arrow")
amir.shapesize(1)
amir.speed(10)
amir.pensize (2)
amir.color("blue")
amir.setheading(90)
amir.penup()
#bullet 1
jonafat = turtle.Turtle()
jonafat.shape("circle")
jonafat.shapesize(0.5)
jonafat.speed(2)
jonafat.pensize (1)
jonafat.color("black")
jonafat.penup()
jonafat.ht()
hossein = turtle.Turtle()
hossein.shape("arrow")
hossein.shapesize(1)
hossein.speed(10)
hossein.pensize (2)
hossein.color("gold")
hossein.setheading(90)
hossein.penup()
#bullet
jonafat2 = turtle.Turtle()
jonafat2.shape("circle")
jonafat2.shapesize(0.5)
jonafat2.speed(2)
jonafat2.pensize (1)
jonafat2.color("black")
jonafat2.penup()
jonafat2.ht()
#scoreboard
TT = turtle.Turtle()
TT.ht()
TT.penup()
TT.goto(-500,200)
TT.color("black")
TT.write("0", move = False, align = "center", font = ("Arial", 20, "normal"))
#second scoreboard
TT = turtle.Turtle()
TT.ht()
TT.penup()
TT.goto(-500,200)
TT.color("black")
TT.write("0", move = False, align = "center", font = ("Arial", 20, "normal"))
x = 0
y = 0
amirs = 2
hosseins = 2
auto = 15
vanguard = 15
trump = 0
time = 1
score = 0
panda = 295
def up():
global amir
global x
global amirs
global hosseins
amir.seth(90)
n = 1
for i in range(0, n):
amir.sety(amir.ycor()+10)
n + 1
def down():
global amir
global x
global amirs
global hosseins
amir.seth(270)
n = 1
for i in range(0, n):
amir.sety(amir.ycor()-10)
n + 1
def left():
global amir
global x
global amirs
global hosseins
amir.seth(180)
n = 1
for i in range(0, n):
amir.setx(amir.xcor()-10)
n + 1
def right():
global amir
global x
global amirs
global hosseins
amir.seth(0)
n = 1
for i in range(0, n):
amir.setx(amir.xcor()+10)
n + 1
def up2():
global hossein
global y
global hosseins
hossein.seth(90)
n = 1
for i in range(0, n):
hossein.sety(hossein.ycor()+10)
n + 1
def down2():
global hossein
global y
global hosseins
hossein.seth(270)
n = 1
for i in range(0, n):
hossein.sety(hossein.ycor()-10)
n + 1
def left2():
global hossein
global y
global hosseins
hossein.seth(180)
n = 1
for i in range(0, n):
hossein.setx(hossein.xcor()-10)
n + 1
def right2():
global hossein
global y
global hosseins
hossein.seth(0)
n = 1
for i in range(0, n):
hossein.setx(hossein.xcor()+10)
n + 1
def collisionCheck(jonafat, hossein):
crash = True
jonafat1X = jonafat.xcor()
jonafat1Y = jonafat.ycor()
hossein2X = hossein.xcor()
hossein2Y = hossein.ycor()
jonafatPos = (int(jonafat1X), int(jonafat1Y))
hosseinPos = (int(hossein2X), int(hossein2Y))
if jonafatPos != hosseinPos:
crash = False
if jonafatPos == hosseinPos:
crash = True
return crash
def clock():
global time
time = time-1
def move():
global auto
global vanguard
global score
global time
wn.ontimer(clock,1000)
angle = amir.heading()
jonafat.ht()
jonafat.speed(10)
jonafat.setpos(amir.xcor(), amir.ycor())
if angle == 0:
trump = 1.
time = time-1
jonafat.showturtle()
jonafat.speed(2)
n = 1
for i in range(0, n):
jonafat.goto(amir.xcor()+300, amir.ycor())
n + 1
infLoop = 1
while infLoop == 1:
if collisionCheck(jonafat, hossein) == True:
infLoop = 0
break
score = score + 1
TT.clear()
TT.write(score, move = False, align = "center", font = ("Arial", 20, "normal"))
if time == 0:
break
infLoop = 0
if angle == 90:
jonafat.showturtle()
jonafat.speed(2)
n = 1
for i in range(0, n):
jonafat.goto(amir.xcor(),amir.ycor()+300)
n + 1
if angle == 180:
jonafat.showturtle()
jonafat.speed(2)
n = 1
for i in range(0, n):
jonafat.goto(amir.xcor()-300, amir.ycor())
n + 1
if angle == 270:
jonafat.showturtle()
jonafat.speed(2)
n = 1
for i in range(0, n):
jonafat.goto(amir.xcor(), amir.ycor()-300)
n + 1
def move2():
angle2 = hossein.heading()
jonafat2.ht()
jonafat2.speed(10)
jonafat2.setpos(hossein.xcor(), hossein.ycor())
if angle2 == 0:
jonafat2.showturtle()
jonafat2.speed(2)
n = 1
for i in range(0, n):
jonafat2.goto(hossein.xcor()+300, hossein.ycor())
n + 1
if angle2 == 90:
jonafat2.showturtle()
jonafat2.speed(2)
n = 1
for i in range(0, n):
jonafat2.goto(hossein.xcor(), hossein.ycor()+300)
n + 1
if angle2 == 180:
jonafat2.showturtle()
jonafat2.speed(2)
n = 1
for i in range(0, n):
jonafat2.goto(hossein.xcor()-300, hossein.ycor())
n + 1
if angle2 == 270:
jonafat2.showturtle()
jonafat2.speed(2)
n = 1
for i in range(0, n):
jonafat2.goto(hossein.xcor(), hossein.ycor()-300)
n + 1
wn.onkeypress(up, "w")
wn.onkeypress(down, "s")
wn.onkeypress(left, "a")
wn.onkeypress(right, "d")
wn.onkeypress(up2, "Up")
wn.onkeypress(down2, "Down")
wn.onkeypress(left2, "Left")
wn.onkeypress(right2, "Right")
wn.onkeypress(move2, "Shift_R")
wn.onkeypress(move, "space")
wn.listen()
This is the part I have trouble with
def collisionCheck(jonafat, hossein):
crash = True
jonafat1X = jonafat.xcor()
jonafat1Y = jonafat.ycor()
hossein2X = hossein.xcor()
hossein2Y = hossein.ycor()
jonafatPos = (int(jonafat1X), int(jonafat1Y))
hosseinPos = (int(hossein2X), int(hossein2Y))
if jonafatPos != hosseinPos:
crash = False
if jonafatPos == hosseinPos:
crash = True
return crash
def clock():
global time
time = time-1
def move():
global auto
global vanguard
global score
global time
wn.ontimer(clock,1000)
angle = amir.heading()
jonafat.ht()
jonafat.speed(10)
jonafat.setpos(amir.xcor(), amir.ycor())
if angle == 0:
trump = 1.
time = time-1
jonafat.showturtle()
jonafat.speed(2)
n = 1
for i in range(0, n):
jonafat.goto(amir.xcor()+300, amir.ycor())
n + 1
infLoop = 1
while infLoop == 1:
if collisionCheck(jonafat, hossein) == True:
infLoop = 0
break
score = score + 1
TT.clear()
TT.write(score, move = False, align = "center", font = ("Arial", 20, "normal"))
if time == 0:
break
infLoop = 0
Answer: It appears your only checking if their position is the same, although two
objects can pass close to each other so that they visually collide, but aren't
on the exact same position as each other.
You'll want to take their dimensions into account to properly check for
collisions. The easiest way to check if they collide is to take the distance
between their positions and check if it's smaller than a certain value.
Although better options exist.
|
Python 3.5 for Windows missing tkinter
Question: I have Python 3.5.1 for Windows (official python.org installer) installed and
it seems to be missing tkinter. For example:
C:\Users\kostya>python
Python 3.5.1 (v3.5.1:37a07cee5969, Dec 6 2015, 01:38:48) [MSC v.1900 32 bit (In
tel)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> from tkinter import *
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named 'tkinter'
Does anyone have an idea about were I can download the package?
Answer: The installer has an option "[X] install tkinter, IDLE, turtle". It should be
on by default, but you might have clicked it off. If you run the installer
again, you should see an option to repair. That should get you an option to
see if the box is checked. Must sure it is and 'repair'.
|
Python error: PyThreadState_Get: no current thread after compiling an extension
Question: I am testing [TensorBox](https://github.com/Russell91/TensorBox), which is
based on Google's Tensorflow for some task. There is a small piece of code
that needs to be compiled and I compile it. When running `otool -L
stitch_wrapper.so` this is the output:
stitch_wrapper.so (compatibility version 0.0.0, current version 0.0.0)
libpython2.7.dylib (compatibility version 2.7.0, current version 2.7.0)
/usr/lib/libc++.1.dylib (compatibility version 1.0.0, current version 120.1.0)
/usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1225.1.1)
Whenever I do in python `import stitch_wrapper`, I get the error:
Fatal Python error: PyThreadState_Get: no current thread
Abort trap: 6
I read in a comment in [this other
question](http://stackoverflow.com/questions/15678153/homebrew-python-on-mac-
os-x-10-8-fatal-python-error-pythreadstate-get-no-cu) that this happens
whenever a module tries to use a python library that is different than the one
the interpreter uses, that is, when you mix two different pythons. I have two
different pythons in the machine, and the one I am using when running the code
is the `anaconda` python that I installed in my `home` directory. Pardon my
question if I am confused, but how can I compile properly for my `anaconda`
python? I tried importing within my other python and it works well. But I need
it working for my anaconda python.
Answer: As you say, this happens because you have compiled the extension for a
different Python version.
To make it work within Anaconda, you must see if they provide Python
libraries, header files and so on. Then you must recompile everything using
the right flags. If Anaconda provides `python-config`, you can get the flags
from there.
|
Django Framework- page not found
Question: Why am i getting a 404 in this?
This is my mysite/urls.py file:
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$/', include('personal.urls')),
]
And this is my mysite/personal/urls.py file
from django.conf.urls import url
from . import views
urlpatterns =[url(r'^$',views.index, name='index')]
The mysite/personal/views.py
from django.shortcuts import render
def index(request):
return render(request, 'personal/home.html')
after that i have created the templates folder like
templates/personal/header.html. The file looks like this
<body class="body" style="background-color:#f6f6f6">
<div>
{% block content %}
{% endblock %}
</div>
</body>
Now about the home.html in the same folder
{% extends "personal/header.html" %}
{% block content %}
<p>Hey! Welcome to my website! Well, I wasn't expecting guests. Um, my name is HVS. I am a programmer.</p>
{% endblock %}
I HAVE ALSO INSTALLED THE APP IN settings.py file in mysite sub directory but
still when i run the
">python manage.py runserver"
it runs OK bt the page tells me that the url not found 404!!
This is my browser output:
[](http://i.stack.imgur.com/P7HIK.png)
I am using Windows 8, python 3 & django 1.9
Answer: According to django
[docs](https://docs.djangoproject.com/en/1.9/intro/tutorial01/):
> The include() function allows referencing other URLconfs. Note that the
> regular expressions for the include() function doesn’t have a $ (end-of-
> string match character) but rather a trailing slash
So you should change it to this:
url(r'^/', include('personal.urls'))
|
Python syntax error running freshly installed `meld`
Question: I'm installing meld [as described here](http://linuxpitstop.com/install-meld-
on-ubuntu-and-mint-linux/):
sudo yum install intltool itstool gir1.2-gtksource-3.0 libxml2-utils
However, when I try to run `meld` this error appears:
File "/usr/bin/meld", line 47
print _("Meld requires %s or higher.") % modver
^
And indeed `/usr/bin/meld` has this code:
def missing_reqs(mod, ver):
modver = mod + " " + ".".join(map(str, ver))
print _("Meld requires %s or higher.") % modver
sys.exit(1)
I'm on CentOS 6.7, Python version 3.3.5.
Could you advise on what I'm doing wrong here?
* * *
EDIT:
Here's the command line, verbatim:
$ meld
File "/usr/bin/meld", line 47
print _("Meld requires %s or higher.") % modver
^
SyntaxError: invalid syntax
Here is a portion of `meld` script:
import sys
if "--pychecker" in sys.argv:
sys.argv.remove("--pychecker")
import os
os.environ['PYCHECKER'] = "--no-argsused --no-classattr --stdlib"
#'--blacklist=gettext,locale,pygtk,gtk,gtk.keysyms,popen2,random,difflib,filecmp,tempfile'
import pychecker.checker
#
# i18n support
#
sys.path[0:0] = [ "/usr/share/meld"
]
import paths
import gettext
_ = gettext.gettext
gettext.bindtextdomain("meld", paths.locale_dir())
gettext.textdomain("meld")
# Check requirements: Python 2.4, pygtk 2.8
pyver = (2,4)
pygtkver = (2,8,0)
def missing_reqs(mod, ver):
modver = mod + " " + ".".join(map(str, ver))
print _("Meld requires %s or higher.") % modver
sys.exit(1)
if sys.version_info[:2] < pyver:
missing_reqs("Python", pyver)
Answer: `print` is a statement in `python2` just like your script has:
print _("Meld requires %s or higher.") % modver
But you are interpreting the script using `python3` which does not have
`print` statement rather has `print()` function.
You can try to replace all `print` with `print()` with a hope that nothing
else breaks, which is not a good solution anyway.
Better just install `python2`:
sudo yum install python2
and use `python2` as the interpreter.
|
Importing OpenCV with python 2.7 in Virtualenv and PyCharm
Question: I am struggling with installing opencv for python 2.7.11 on OSX for almost
three days now.
After some failures, I accomplished a first success by following the
instructions [here](http://www.pyimagesearch.com/2015/06/15/install-
opencv-3-0-and-python-2-7-on-osx/). So my basic setup is python 2.7.11 running
from `~/.virtualenvs/cv_env/bin/python` and I have a `cv2.so` located in
`~/.virtualenvs/cv/lib/python2.7/site-packages/`.
So good so far. Using `source ~/.virtualenvs/cv_env/bin/activate`, I can
activate the virtualenv and than use `import cv2`. For some reasons, this does
not work always. From time to time, I have to deactivate first and than
reactivate (any guesses?).
Next, I wanted to use opencv in PyCharm. Under "preferences - Project
interpreter", I selected the virtualenv interpreter and this is also working.
I can import other moduals like numpy and pandas (previously installed into
the vortualenv using `pip`). But for some reasons, I am not able to import
opencv (`import cv2`). It always gives me
`ImportError: No module named cv2`
So my question is, why I am able to import opencv in terminal (at least
sometimes) but not in PyCharm. Any ideas are welcomed.
Best, Fabian
Answer: Your `cv2.so` located in a different directory. And you activating from
another directory. I mean `cv_env` and `cv`.
|
What is the correct way to report an error in a Python unittest in the setUp method?
Question: I've read some conflicting advice on the use of `assert` in the `setUp` method
of a Python unit test. I can't see the harm in failing a test if a
precondition that test relies on fails.
For example:
import unittest
class MyProcessor():
"""
This is the class under test
"""
def __init__(self):
pass
def ProcessData(self, content):
return ['some','processed','data','from','content'] # Imagine this could actually pass
class Test_test2(unittest.TestCase):
def LoadContentFromTestFile(self):
return None # Imagine this is actually doing something that could pass.
def setUp(self):
self.content = self.LoadContentFromTestFile()
self.assertIsNotNone(self.content, "Failed to load test data")
self.processor = MyProcessor()
def test_ProcessData(self):
results = self.processor.ProcessData(self.content)
self.assertGreater(results, 0, "No results returned")
if __name__ == '__main__':
unittest.main()
This seems like a reasonable thing to do to me i.e. make sure the test is able
to run. When this fails because of the setup condition we get:
F
======================================================================
FAIL: test_ProcessData (__main__.Test_test2)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Projects\Experiments\test2.py", line 21, in setUp
self.assertIsNotNone(self.content, "Failed to load test data")
AssertionError: unexpectedly None : Failed to load test data
----------------------------------------------------------------------
Ran 1 test in 0.000s
FAILED (failures=1)
Answer: The purpose of `setUp` is to reduce [Boilerplate
code](https://en.wikipedia.org/wiki/Boilerplate_code) which creates between
the tests in the test class during the Arrange phase.
In the Arrange phase you: setup everything needed for the running the tested
code. This includes any initialization of dependencies, mocks and data needed
for the test to run.
Based on the above paragraphs you should not assert anything in your `setUp`
method.
So as mentioned earlier; **If you can't create the test precondition then your
test is broken.** To avoid situations like this Roy Osherove wrote a great
book called: [The Art Of Unit
Testing](http://rads.stackoverflow.com/amzn/click/1617290890) ( For a fully
disclosure Lior Friedman(He was Roy's boss) is a friend of mine and I worked
closely with them for more then 2 years, so I am little bit biased...)
Basically there are only a few reasons to have an interaction with external
resources during the Arrange phase(or with things which may cause an
exception), most of them(if not all of them) are related in integration tests.
Back to your example; There is a pattern to structure the tests where you need
to load an external resource(for all/most of them). Just a side note; before
you decide to apply this pattern make sure that you can't has this content as
a static resource in your UT's class, if other test classes need to use this
resource extract this resource into a module.
The following pattern decrease the possibility for failure, since you have
less calls to the external resource:
class TestClass(unittest.TestCase):
def setUpClass(self):
# since external resources such as other servers can provide a bad content
# you can verify that the content is valid
# then prevent from the tests to run
# however, in most cases you shouldn't.
self.externalResourceContent = loadContentFromExternalResource()
def setUp(self):
self.content = self.copyContentForTest()
Pros:
1. less chances to failure
2. prevent inconsistency behavior (1. something/one has edited the external resource. 2. you failed to load the external resource in some of your tests)
3. faster execution
Cons:
1. the code is more complex
|
how to solve import error no module name cv in python?
Question: i am running the python code but every time is showing
line 13,in import cv2.cv as cv2
importerror: No module named cv
how to slove this problem, i am using rPi 2 opencv-3.1.0 with python 2.7
Answer: If you have compiled opencv from source, link `cv2.so` from compiled directory
to your python system directory (generally it is
`/usr/local/lib/python2.7/dist-packages/`)
If not helpful, setup `venv` in your project directory and link `cv2.so` and
`cv.py` from python system directory to your project directory.
|
Python 3 urllib Vs requests performance
Question: I'm using python 3.5 and I'm checking the performance of urllib module Vs
requests module. I wrote two clients in python the first one is using the
urllib module and the second one is using the request module. they both
generate a binary data, which I send to a server which is based on flask and
from the flask server I also return a binary data to the client. I found that
time took to send the data from the client to the server took same time for
both modules (urllib, requests) but the time it took to return data from the
server to the client is more then twice faster in urllib compare to request.
I'm working on localhost.
**my question is why?
what I'm doing wrong with request module which make it to be slower?**
**this is the server code :**
from flask import Flask, request
app = Flask(__name__)
from timeit import default_timer as timer
import os
@app.route('/onStringSend', methods=['GET', 'POST'])
def onStringSend():
return data
if __name__ == '__main__':
data_size = int(1e7)
data = os.urandom(data_size)
app.run(host="0.0.0.0", port=8080)
**this is the client code based on urllib :**
import urllib.request as urllib2
import urllib.parse
from timeit import default_timer as timer
import os
data_size = int(1e7)
num_of_runs = 20
url = 'http://127.0.0.1:8080/onStringSend'
def send_binary_data():
data = os.urandom(data_size)
headers = {'User-Agent': 'Mozilla/5.0 (compatible; Chrome/22.0.1229.94; Windows NT)', 'Content-Length': '%d' % len(data), 'Content-Type': 'application/octet-stream'}
req = urllib2.Request(url, data, headers)
round_trip_time_msec = [0] * num_of_runs
for i in range(0,num_of_runs):
t1 = timer()
resp = urllib.request.urlopen(req)
response_data = resp.read()
t2 = timer()
round_trip_time_msec[i] = (t2 - t1) * 1000
t_max = max(round_trip_time_msec)
t_min = min(round_trip_time_msec)
t_average = sum(round_trip_time_msec)/len(round_trip_time_msec)
print('max round trip time [msec]: ', t_max)
print('min round trip time [msec]: ', t_min)
print('average round trip time [msec]: ', t_average)
send_binary_data()
**this is the client code based on requests :**
import requests
import os
from timeit import default_timer as timer
url = 'http://127.0.0.1:8080/onStringSend'
data_size = int(1e7)
num_of_runs = 20
def send_binary_data():
data = os.urandom(data_size)
s = requests.Session()
s.headers['User-Agent'] = 'Mozilla/5.0 (compatible; Chrome/22.0.1229.94;Windows NT)'
s.headers['Content-Type'] = 'application/octet-stream'
s.headers['Content-Length'] = '%d' % len(data)
round_trip_time_msec = [0] * num_of_runs
for i in range(0,num_of_runs):
t1 = timer()
response_data = s.post(url=url, data=data, stream=False, verify=False)
t2 = timer()
round_trip_time_msec[i] = (t2 - t1) * 1000
t_max = max(round_trip_time_msec)
t_min = min(round_trip_time_msec)
t_average = sum(round_trip_time_msec)/len(round_trip_time_msec)
print('max round trip time [msec]: ', t_max)
print('min round trip time [msec]: ', t_min)
print('average round trip time [msec]: ', t_average)
send_binary_data()
thanks very much
Answer: First of all, to reproduce the problem, I had to add the following line to
your `onStringSend` function:
request.get_data()
Otherwise, I was getting “connection reset by peer” errors because the
server’s receive buffer kept filling up.
Now, the immediate reason for this problem is that `Response.content` (which
is called implicitly when `stream=False`) [iterates over the response data in
chunks of 10240
bytes](https://github.com/kennethreitz/requests/blob/87704105af65b382b86f168f6a54192eab91faf2/requests/models.py#L741):
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
Therefore, the easiest way to solve the problem is to use `stream=True`, thus
telling Requests that you will be reading the data at your own pace:
response_data = s.post(url=url, data=data, stream=True, verify=False).raw.read()
With this change, the performance of the Requests version becomes more or less
the same as that of the urllib version.
Please also see the “[Raw Response
Content](http://requests.readthedocs.io/en/master/user/quickstart/#raw-
response-content)” section in the Requests docs for useful advice.
Now, the interesting question remains: why is `Response.content` iterating in
such small chunks? After [talking to Cory
Benfield](https://botbot.me/freenode/python-
requests/2016-05-11/?msg=65874287&page=1), a core developer of Requests, it
looks like there may be no particular reason. I filed [issue
#3186](https://github.com/kennethreitz/requests/issues/3186) in Requests to
look further into this.
|
Adding validation to Django User form
Question: I'd like to customize the user sign-up form in Django/Mezzanine to allow only
certain email addresses, so I tried to monkey-patch as follows:
# Monkey-patch Mezzanine's user email address check to allow only
# email addresses at @example.com.
from django.forms import ValidationError
from django.utils.translation import ugettext
from mezzanine.accounts.forms import ProfileForm
from copy import deepcopy
original_clean_email = deepcopy(ProfileForm.clean_email)
def clean_email(self):
email = self.cleaned_data.get("email")
if not email.endswith('@example.com'):
raise ValidationError(
ugettext("Please enter a valid example.com email address"))
return original_clean_email(self)
ProfileForm.clean_email = clean_email
This code is added at the top of one of my `models.py`.
However, when I runserver I get the dreaded
django.core.exceptions.AppRegistryNotReady: Models aren't loaded yet.
If I add
import django
django.setup()
then `python manage.py runserver` just hangs until I `^C`.
What should I be doing to add this functionality?
Answer: Create a file `myapp/apps.py` for one of your apps (I've use `myapp` here),
and define an app config class that does the monkeypatching in the `ready()`
method.
from django.apps import AppConfig
class MyAppConfig(AppConfig):
name = 'myapp'
def ready(self):
# do the imports and define clean_email here
ProfileForm.clean_email = clean_email
Then use `'myapp.apps.MyAppConfig'` instead of `'myapp'` in your
`INSTALLED_APPS` setting.
INSTALLED_APPS = [
...
'myapp.apps.MyAppConfig',
...
]
You might need to put Mezzanine above the app config for it to work.
|
What doesn't work in this number matching regular expression?
Question: I am running Python 2.7 and I am currently having some issue with [RE
library](https://docs.python.org/2/library/re.html). As a simple example, when
trying numbers on a human written document, I wrote this regular expression :
import re
number = r'^[(]?([-]?([ ]?\d[ ]?)+)|([-]?([ ]?\d[ ]?)+[.,]([ ]?\d[ ]?)*)[)]?$'
This is supposed to match numbers with comas, dots and blanks because humans
don't always format it well, and with or without `-`, `(`and `)`.
Even if they are not optimized, I expected the following expressions to work,
but they obviously don't as we can see on the screenshot.
ex1 = r'^[(]?([-]?([ ]?\d[ ]?)+)|([-]?([ ]?\d[ ]?)+[.,]([ ]?\d[ ]?)*)[)]?$'
ex2 = r'^(\()?(-)?( ?\d ?)*[.,]?( ?\d ?)*(\))?$'
ex3 = r'^[(]?(-)?( ?\d ?)*[.,]?( ?\d ?)*[)]?$'
[> Screenshot to output <](http://i.stack.imgur.com/GtSmn.png)
Input :
import re
print "1st example :"
number = r'^[(]?([-]?([ ]?\d[ ]?)+)|([-]?([ ]?\d[ ]?)+[.,]([ ]?\d[ ]?)*)[)]?$'
print re.match(number, "16 juillet 1993")
print "\n2nd example :"
number = r'^(\()?(-)?( ?\d ?)*[.,]?( ?\d ?)*(\))?$'
print re.match(number, "16 juillet 1993")
print re.match(number, "161993")
print re.match(number, "-1619,93")
print re.match(number, "-( 9 9 . 3 )")
print "\n3rd example :"
number = r'^[(]?(-)?( ?\d ?)*[.,]?( ?\d ?)*[)]?$'
print re.match(number, "-( 9 9 . 3 )")
Ouput :
1st example :
<_sre.SRE_Match object at 0x103da5480>
2nd example :
None
<_sre.SRE_Match object at 0x103da5480>
<_sre.SRE_Match object at 0x103da5480>
None
3rd example :
None
Answer: Try:`number=r'^-?\(?( ?\d ?)+[.,]?( ?\d ?)*\)?$'` here, `-`is out side of `(
)`. If you want match `-` inside of `( )`, please use: `number=r'^\(?-?( ?\d
?)+[.,]?( ?\d ?)*\)?$'`
If you want match parentheses`( )` in string, you should escape it by `\`.
|
How to read from Process.getInputStream asynchronously during the invoked process excution
Question: I have a java program which need to start a python process and get response
from that.
what I want is to get response immediately when python print something. I'm
not asking for current java thread not block. but reading from the inputStream
asynchronously.
I use java 7's
[Process](http://docs.oracle.com/javase/7/docs/api/java/lang/Process.html)
class to start a python process.
My relating java code:
List<String> args = java.util.Arrays.asList("python", "test.py");
ProcessBuilder pb = new ProcessBuilder(args);
pb.directory(new File("/dir/to/python/script"));
Process p = processBuilder.start();
BufferedReader in = new BufferedReader(new InputStreamReader(p.getInputStream()));
String ret1 = in.readLine();
System.out.println("ret1 is: " + ret1);
String ret2 = in.readLine();
System.out.println("ret2 is: " + ret2);
My python script test.py is :
import time
if __name__ == '__main__':
print("start signal")
time.sleep(10)
print("end signal")
I run the java code, in the console, nothing printed, but after 10 seconds,
"start signal" and "end signal" both appeared at the same time.
It seemed that the readLine() will block until the whole python process return
and terminate.
Can't this method read response when python still running?
What I want is that the java code blocked when it reached the readLine(), And
when python print something, java read it through readLine, and then go on
executing until meet the second readLine() wait for python to print.
How Can I achieve that purpose?
_______eidit________
I also tried using a thread to read from the inputStream. I think the only
purpose of using thread is just not making readLine() to block the current
thread. But I don't care that, I want readLine() to read from python output
immediately not when python terminated.
I googled a lot and I noticed that somebody mentioned that this:
Maybe the output is being buffered because the command knows
it is writing to a pipe rather than a terminal?
but I can't get more information about that.
Can Somebody explain this? thank you for reading so long!
Answer: OK, I got the problem is not because of the java code, but the python code.
I need to set
sys.stdout.flush()
after every print statement. So the BufferdReader in java can read every
single print immediately.
|
How to use pyInstaller to completely pack all the necessary Library?
Question: I have already used pyinstaller to create a standalone aplication of my python
aplication
pyinstaller --windowed app.py
and it actually run in my computer and work just as intended, but when I tried
it on my friend's computer, it didn't work. It runs but somehow it can't
process the text.
here are the library used:
import tkinter as Tk
import tkinter.ttk as ttk
import tkinter.scrolledtext as ScrollTxt
from tkinter import END,E,W,filedialog,messagebox
from nltk.tokenize import sent_tokenize, RegexpTokenizer, word_tokenize
import math
import fileinput
from textblob import TextBlob as tb
from nltk.tag import pos_tag, map_tag
from nltk.corpus import stopwords
if you want to see the result file:
<https://www.dropbox.com/s/mfdnaaoik7w0r23/TextSummaryV73.rar?dl=0>
anybody knows what's wrong or what's missing?
I think it's either nltk or textblob, can anyone help how to add these files
into the package?
**EDIT:** I have added nltk and textblob into the Python Application's
directory using spec files. now the problem is, how to make the program know
that these two imports are already inside the directory?
Answer: I believe the command you are looking for is --onefile. This packages all
required packages into the executable. [This
guide](https://mborgerson.com/creating-an-executable-from-a-python-script)
should help you.
pyinstaller --onefile --windowed app.py
If you have any external files that are required by the script this makes the
process a bit more difficult as you will need to change your references to
their location. [This
answer](http://stackoverflow.com/questions/7674790/bundling-data-files-with-
pyinstaller-onefile) or [this
one](http://stackoverflow.com/questions/19669640/bundling-data-files-with-
pyinstaller-2-1-and-meipass-error-onefile) can help with that
|
scraping pagination web with beautifulsoap python
Question: just started learning python. im trying to scrape all phone number from a
paginated web site. but my code not go to paginate link and only looping on a
same page. need advice here.
from bs4 import BeautifulSoup
import requests
for i in range(5000):
url = "http://www.mobil123.com/mobil?type=used&page_number=1".format(i)
r = requests.get(url)
soup = BeautifulSoup(r.content)
for record in soup.findAll('div', {"class": "card-contact-wrap"}):
for data in soup.findAll('div', {"data-get-content": "#whatsapp"}):
print(record.find('li').text)
print(data.text)
Answer: You missed placing string formatter. Change url = "...." to
url = "http://www.mobil123.com/mobil?type=used&page_number={0}".format(i)
|
Python decorator with arguments
Question: I have a class with a lot of very similar properties:
class myClass(object):
def compute_foo(self):
return 3
def compute_bar(self):
return 4
@property
def foo(self):
try:
return self._foo
except AttributeError:
self._foo = self.compute_foo()
return self._foo
@property
def bar(self):
try:
return self._bar
except AttributeError:
self._bar = self.compute_bar()
return self._bar
...
So thought I would write a decorator to do the property definition work.
class myDecorator(property):
def __init__(self, func, prop_name):
self.func = func
self.prop_name = prop_name
self.internal_prop_name = '_' + prop_name
def fget(self, obj):
try:
return obj.__getattribute__(self.internal_prop_name)
except AttributeError:
obj.__setattr__(self.internal_prop_name, self.func(obj))
return obj.__getattribute__(self.internal_prop_name)
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.func is None:
raise AttributeError("unreadable attribute")
return self.fget(obj)
class myClass(object):
def compute_foo(self):
return 3
foo = myDecorator(compute_foo, 'foo')
def compute_bar(self):
return 4
bar = myDecorator(compute_bar, 'bar')
This works well, but when I want to use the `@myDecorator('foo')` syntax, it
gets more complicated and cannot figure what the `__call__` method should
return and how to attach the property to its class.
For the moment I have :
class myDecorator(object):
def __init__(self, prop_name):
self.prop_name = prop_name
self.internal_prop_name = '_' + prop_name
def __call__(self, func):
self.func = func
return #???
def fget(self, obj):
try:
return obj.__getattribute__(self.internal_prop_name)
except AttributeError:
obj.__setattr__(self.internal_prop_name, self.func(obj))
return obj.__getattribute__(self.internal_prop_name)
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.func is None:
raise AttributeError("unreadable attribute")
return self.fget(obj)
class myClass(object):
@myDecorator('foo')
def compute_foo(self):
return 3
c = myClass()
print(c.foo)
and it returns: `AttributeError: 'myClass' object has no attribute 'foo'`
Answer: You could always use the wraps trick to pass arguments to your decorator as
follows:
from functools import wraps
class myDecorator(property):
def __init__(self, prop_name):
self.prop_name = prop_name
def __call__(self, wrappedCall):
@wraps(wrappedCall)
def wrapCall(*args, **kwargs):
klass = args[0]
result = wrappedCall(*args, **kwargs)
setattr(klass, self.prop_name, result)
return wrapCall
class myClass(object):
@myDecorator('foo')
def compute_foo(self):
return 3
c = myClass()
c.compute_foo()
print c.foo
|
Why does '20000 is 20000' result in True?
Question: `is` in Python tests if 2 references point to the same object. Numbers between
-5 and 256 are cached internally so:
a = 10
b = 10
a is b # Results in True
How does this explain something like:
20000 is 20000 # Results in True
Both numbers are above 256. Should not the 2 integers be 2 distinct objects?
Answer: The Python interpreter sees you are re-using a immutable object, so it doesn't
bother to create two:
>>> import dis
>>> dis.dis(compile('20000 is 20000', '', 'exec'))
1 0 LOAD_CONST 0 (20000)
3 LOAD_CONST 0 (20000)
6 COMPARE_OP 8 (is)
9 POP_TOP
10 LOAD_CONST 1 (None)
13 RETURN_VALUE
Note the two `LOAD_CONST` opcodes, they both load the constant at index 0:
>>> compile('20000 is 20000', '', 'exec').co_consts
(20000, None)
In the interactive interpreter Python is limited to having to compile each
(simple or compound) statement you enter separately, so it can't reuse these
constants across different statements.
But within a function object it certainly would only create one such integer
object, even if you used the same int literal more than once. The same applies
to any code run at the _module level_ (so outside of functions or class
definitions); those all end up in the same code object constants too.
|
Python Cant Run or Reinstall Pip
Question: I have a machine that has Python 2.7 and Python 3.4 installed. Normally to
install packages under Python 3.4, I would run `pip3 install [PACKAGE]`.
But now when I run `pip3` I get
Traceback (most recent call last):
File "/volume1/@appstore/python3/bin/pip3", line 7, in <module>
from pip import main
File "/usr/local/python3/lib/python3.4/site-packages/pip/__init__.py", line 16, in <module>
from pip.vcs import git, mercurial, subversion, bazaar # noqa
File "/usr/local/python3/lib/python3.4/site-packages/pip/vcs/subversion.py", line 9, in <module>
from pip.index import Link
File "/usr/local/python3/lib/python3.4/site-packages/pip/index.py", line 30, in <module>
from pip.wheel import Wheel, wheel_ext
File "/usr/local/python3/lib/python3.4/site-packages/pip/wheel.py", line 32, in <module>
from pip import pep425tags
File "/usr/local/python3/lib/python3.4/site-packages/pip/pep425tags.py", line 335, in <module>
supported_tags = get_supported()
File "/usr/local/python3/lib/python3.4/site-packages/pip/pep425tags.py", line 307, in get_supported
elif is_manylinux1_compatible():
File "/usr/local/python3/lib/python3.4/site-packages/pip/pep425tags.py", line 163, in is_manylinux1_compatible
return have_compatible_glibc(2, 5)
File "/usr/local/python3/lib/python3.4/site-packages/pip/pep425tags.py", line 187, in have_compatible_glibc
version = [int(piece) for piece in version_str.split(".")]
File "/usr/local/python3/lib/python3.4/site-packages/pip/pep425tags.py", line 187, in <listcomp>
version = [int(piece) for piece in version_str.split(".")]
ValueError: invalid literal for int() with base 10: '20-2014'
Even if I download `get-pip.py` and run `python3 get-pip.py`, I get the same
error.
What could the problem be?
Edit: Apparently this is a problem that is persistent on Synology installs
when you installed DSM 6.0.
Answer: Found solution courtesy of @Tadhg in the comments above.
[**Here**](https://reviewable.io/reviews/pypa/pip/3590) is a link to the
necessary changes to `pep425tags.py`.
But here are the changes needed for all of you who are on this SO page.
Add the following function:
# Separated out from have_compatible_glibc for easier unit testing
def check_glibc_version(version_str, needed_major, needed_minor):
# Parse string and check against requested version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn("Expected glibc version with 2 components major.minor,"
" got: %s" % version_str, RuntimeWarning)
return False
return (int(m.group("major")) == needed_major and
int(m.group("minor")) >= needed_minor)
and replace
# Parse string and check against requested version.
version = [int(piece) for piece in version_str.split(".")]
if len(version) < 2:
warnings.warn("Expected glibc version with 2 components major.minor,"
" got: %s" % version_str, RuntimeWarning)
return False
return version[0] == major and version[1] >= minimum_minor
with `return check_glibc_version(version_str, major, minimum_minor)`
|
Trying to embed tkinter checkboxes in a text box, within a toplevel
Question: I am working on a simple database that tracks bicycle inventory. Deep within
my program I am hitting a snag that has stumped me all day. I am trying to
create a list of checkboxes in a text box, (to make use of the scrollbar), all
within a toplevel popup.
I can't upload my entire code, it's just too much, but here is the snippet
dumbed down to get the thing working:
class Delete_bike:
def __init__(self, master):
self.master = master
top = self.top = tk.Toplevel(self.master)
text_frame = tk.Frame(self.top)
text_frame.grid(row = 0, padx = 10, pady = 10)
scb = tk.Scrollbar(text_frame)
scb.grid(row = 0, column = 1, sticky = 'ns')
d_tb = tk.Text(text_frame, height = 8, width = 40, yscrollcommand = scb.set)
d_tb.configure(font = ('Times', 10, 'bold'))
d_tb.grid(row = 0, column = 0)
scb.config(command = d_tb.yview)
test_d = {}
for i in range(10):
test_d[i] = 0
for i in test_d:
test_d[i] = tk.IntVar()
cb = tk.Checkbutton(text = i, variable = test_d[i])
d_tb.window_create(tk.END, window = cb)
d_tb.insert(tk.END, '\n')
The true version makes use of drawing from a shelved dictionary to populate
the extensive list of bikes.
When I run this, I get the following exception, which I do not understand at
all:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Python34\lib\tkinter\__init__.py", line 1538, in __call__
return self.func(*args)
File "C:\Users\Gregory\Desktop\Bike Roster v2.0.pyw", line 115, in delete_bike
x = Delete_bike(self.master)
File "C:\Users\Gregory\Desktop\Bike Roster v2.0.pyw", line 239, in __init__
d_tb.window_create(tk.END, window = cb)
File "C:\Python34\lib\tkinter\__init__.py", line 3296, in window_create
+ self._options(cnf, kw))
_tkinter.TclError: can't embed .52341336 in .52340888.52341000.52341112
So next, I copied the snippet to a stand alone program, copied next, which
works perfectly fine. So can I not achieve my desired result in a toplevel?
Granted I am new at all this and have no formal training or instruction on
programming, but that seems to be the only difference I can see.
import tkinter as tk
from tkinter import ttk
import tkinter.scrolledtext as tkst
class Delete_bike:
def __init__(self, master):
self.master = master
# top = self.top = tk.Toplevel(self.master)
text_frame = tk.Frame(self.master)
text_frame.grid(row = 0, padx = 10, pady = 10)
scb = tk.Scrollbar(text_frame)
scb.grid(row = 0, column = 1, sticky = 'ns')
d_tb = tk.Text(text_frame, height = 8, width = 40, yscrollcommand = scb.set)
d_tb.configure(font = ('Times', 10, 'bold'))
d_tb.grid(row = 0, column = 0)
scb.config(command = d_tb.yview)
test_d = {}
for i in range(10):
test_d[i] = 0
for i in test_d:
test_d[i] = tk.IntVar()
cb = tk.Checkbutton(text = i, variable = test_d[i])
d_tb.window_create(tk.END, window = cb)
d_tb.insert(tk.END, '\n')
root = tk.Tk()
app = Delete_bike(root)
root.mainloop()
If I remove the hash-tag to activate the toplevel line of code and place the
frame inside the toplevel, it generates the same error message. Left like
this, it works.
And a second quick question, if I am doing something wrong here, and this can
be achieved within a toplevel, can the scrolledtext module be used in lieu of
the text box and scrollbar combination?
Thanks as always!
Answer: You aren't specifying the parent for the checkbutton, so it defaults to the
root window. You can't have a widget with a parent in the root, but try to
display it in a toplevel.
Make the checkbutton a child of the text widget.
cb = tk.Checkbutton(d_tb, ...)
Yes, you can use scrolledtext instead of a text and a scrollbar, but I don't
see any advantage in doing so.
|
NLTK API to Stanford POSTagger works fine for ipython in terminal but not working in Anaconda with spyder
Question: I have downloaded stanford postagger and parser following the instructions
written for below question:
[Stanford Parser and
NLTK](http://stackoverflow.com/questions/13883277/stanford-parser-and-
nltk/34112695#34112695)
But when I execute the commands at bottom, it worked perfectly fine for
ipython in terminal (Mac OS) but showed error in Spyder(Anaconda) (NLTK was
unable to find stanford-postagger.jar!) Since I have set `CLASSPATH` in
terminal, I am not sure what went wrong. When I checked
import os
print os.environ.get('CLASSPATH')
It returned `None` in Spyder but correct path in terminal. I have also
restarted the program and set directory to `$HOME`. Is there anything I might
be missing here?
from nltk.tag.stanford import StanfordPOSTagger
st = StanfordPOSTagger('english-bidirectional-distsim.tagger')
st.tag('What is the airspeed of an unladen swallow ?'.split())
Answer: The problem has nothing to do with python or the nltk; it's a consequence of
how OS X starts GUI applications. Basically, the `CLASSPATH` environment
variable is set in your `.profile` or its kin, but this file is only executed
when you are starting Terminal; GUI applications inherit their environment
from your login process, which doesn't know `CLASSPATH`.
There are numerous SO questions about how to deal with this; see
[here](http://superuser.com/q/476752/127080) or
[here](http://stackoverflow.com/questions/135688/setting-environment-
variables-in-os-x). But in your case, there are also a couple of work-arounds
that ought to work:
1. Start Spyder from the Terminal command line, not via the Launchpad (just type `spyder &`). Or
2. Your python program can also set its own environment (which will be inherited by child processes) prior to launching the Stanford parser, like this:
os.putenv("CLASSPATH", "/path/to/the/parser")
|
Python - How to use regex to find a text?
Question: Please bear with me, I'm new in Python. I have a text, and I want to get the
value after `^s` until the next `^` so for example there's `^s100^` then the
value is `100`. This is what I've tried so far:
#!/usr/bin/python
import re
text="^request^ #13#10#13#10^s100^GET http://facebook.com #13#10Host: http://facebook.com #13#10X-Online-Host: http://facebook.com #13#10X-Forward-Host: http://facebook.com #13#10Connection: Keep-Alive#13#10#13#10"
if re.split(r'\^s',text):
print "found it"
The problem is that it always returns `found it` even if I change the regex to
`re.split(r'\^bla',text)` and basically any text, it will always return `found
it` Please help me to fix it.
Answer: What you probably want is
[`re.search`](https://docs.python.org/2/howto/regex.html):
import re
text="^request^ #13#10#13#10^s100^GET http://facebook.com #13#10Host: http://facebook.com #13#10X-Online-Host: http://facebook.com #13#10X-Forward-Host: http://facebook.com #13#10Connection: Keep-Alive#13#10#13#10"
m = re.search(r'\^s(.*)\^',text)
print m.group(1) # 100
|
Django 1.9 django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet
Question: Trying to use django-shopify-sync in a Django 1.9 project. When loading the
config for the app it gives me the following error, likely because it's trying
to load some models in the config?
Tried moving the the two imports that eventually import models into the
ready() function below, but still getting the same error. Culpirt lines 2 and
3 in the following file <https://github.com/andresdouglas/django-shopify-
sync/blob/master/shopify_sync/apps.py>
The error is:
$ python manage.py runserver
Unhandled exception in thread started by <function wrapper at 0x10753e500>
Traceback (most recent call last):
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/utils/autoreload.py", line 226, in wrapper
fn(*args, **kwargs)
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/core/management/commands/runserver.py", line 109, in inner_run
autoreload.raise_last_exception()
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/utils/autoreload.py", line 249, in raise_last_exception
six.reraise(*_exception)
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/utils/autoreload.py", line 226, in wrapper
fn(*args, **kwargs)
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/apps/registry.py", line 85, in populate
app_config = AppConfig.create(entry)
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/apps/config.py", line 116, in create
mod = import_module(mod_path)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/Users/andres/.virtualenvs/[...]/src/shopify-sync/shopify_sync/apps.py", line 2, in <module>
from shopify_sync.handlers import webhook_received_handler
File "/Users/andres/.virtualenvs/[...]/src/shopify-sync/shopify_sync/handlers.py", line 3, in <module>
from .models import (CustomCollection, Customer, Order, Product, Shop,
File "/Users/andres/.virtualenvs/[...]/src/shopify-sync/shopify_sync/models/__init__.py", line 3, in <module>
from .address import Address
File "/Users/andres/.virtualenvs/[...]/src/shopify-sync/shopify_sync/models/address.py", line 6, in <module>
from .base import ShopifyResourceModel
File "/Users/andres/.virtualenvs/[...]/src/shopify-sync/shopify_sync/models/base.py", line 144, in <module>
class ShopifyResourceModel(models.Model):
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/db/models/base.py", line 94, in __new__
app_config = apps.get_containing_app_config(module)
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/apps/registry.py", line 239, in get_containing_app_config
self.check_apps_ready()
File "/Users/andres/.virtualenvs/[...]/lib/python2.7/site-packages/django/apps/registry.py", line 124, in check_apps_ready
raise AppRegistryNotReady("Apps aren't loaded yet.")
django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet.
**UPDATE** : If I move the following lines (model imports)
<https://github.com/andresdouglas/django-shopify-
sync/blob/master/shopify_sync/handlers.py#L3-L4> to inside `get_topic_models`
it seems to fix the error. But that's kind of dirty, can anyone come up with a
better solution?
Answer: ~~It looks like you may have an ordering issue. Make sure your application is
in the`INSTALLED_APPS` tuple after `django-shopify-sync`. You can find a few
more details in the [Application registry
documentation](https://docs.djangoproject.com/en/1.9/ref/applications/#application-
registry).~~
As unsatisfying as an inline import is, you may be stuck with it. I'd suggest
moving
from shopify_sync.handlers import webhook_received_handler
from shopify_webhook.signals import webhook_received
into the `ready` method in `apps.py`. This will delay the import until the
models are ready.
The change I tried is:
diff --git a/shopify_sync/apps.py b/shopify_sync/apps.py
index 663b43b..0bc1fcc 100644
--- a/shopify_sync/apps.py
+++ b/shopify_sync/apps.py
@@ -1,7 +1,5 @@
from django.apps import AppConfig
-from shopify_sync.handlers import webhook_received_handler
-from shopify_webhook.signals import webhook_received
-
+import importlib
class ShopifySyncConfig(AppConfig):
"""
@@ -16,5 +14,9 @@ class ShopifySyncConfig(AppConfig):
The ready() method is called after Django setup.
"""
+ signals_webhook_received = importlib.import_module('.signals', package='shopify_webhook')
+ handlers_webhook_received_handler = importlib.import_module('.handlers', package='shopify_sync')
+
# Connect shopify_webhook's webhook_received signal to our synchronisation handler.
- webhook_received.connect(webhook_received_handler, dispatch_uid = 'shopify_sync_webhook_received_handler')
+ signals_webhook_received.webhook_received.connect(handlers_webhook_received_handler.webhook_received_handler, dispatch_uid = 'shopify_sync_webhook_received_handler')
+
|
Python 2.7 problems with sub-classing dict and mirroring to json
Question: So I am trying to create a class that behaves like a dict but also copies
itself to a .json file whenever a change is made to the dict. I've got it
working for the most part; but where I have trouble is when I append something
to a list inside the dict; it updates the dict but not the .json file
associated with the dict.
I am sorry for the lengthy code block, I tried to condense as much as possible
but it still turned out fairly lengthy.
import json
import os.path
class JDict(dict):
def __init__(self, filepath, *args, **kwargs):
if str(filepath).split('.')[-1] == 'json':
self.filepath = str(filepath)
else:
self.filepath = str('{}.json'.format(filepath))
if os.path.isfile(self.filepath):
super(JDict, self).__init__(self.read())
else:
super(JDict, self).__init__(*args, **kwargs)
self.write()
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.write()
def write(self):
with open(self.filepath, 'w') as outfile:
json.dump(self, outfile, sort_keys = True, indent = 4,
ensure_ascii=False)
def read(self):
with open(self.filepath, 'r') as infile:
jsonData = json.load(infile)
self = jsonData
return self
def parseJson(filepath):
with open(filepath, 'r') as infile:
jsonData = json.load(infile)
return jsonData
test = JDict("test.json", {
"TestList": [
"element1"
]
})
test["TestList"].append("element2")
try:
if test["TestList"][1] == parseJson("test.json")["TestList"][1]:
print 'Success'
except IndexError:
print 'Failure'
Answer: So the reason I was having trouble was because **setitem** isn't called on the
dict (or even on a list for that matter) when you append an element to a
member list.
Soooo...if anyone else is having this issue; I ended up sub-classing both list
and dict datatypes and making them sort of helper classes to a new class
called QJson
all lists and dicts under the helper class QJson get converted to JDicts and
JLists respectively.
and QJson is itself a dict
The code is REALLY long and monolithic at this point so here is a link to my
[github](https://github.com/deeredman1991/QJson/blob/master/QJson.py) enjoy.
:)
|
R with function in python
Question: In `R`, I can use `with(obj, a + b + c + d)` instead of `obj$a + obj$b + obj$c
+ obj$d`, where `obj` can be a `list` or `data.frame`.
Is there any similar function for `dict`, `pandas.Series`, `pandas.DataFrame`
in python?
Answer: Not really. R and Python have pretty different philosophies when it comes to
this kind of thing--in R it's possible to write a function which parses the
entire syntax of its arguments before they are evaluated, whereas in Python
it's not. So in Python, this is impossible:
df = pd.DataFrame({'a':[1,2],'b':[3,4],'c':[5,6],'d':[7,8]})
with(df, a + b + c)
However, this works:
sum(map(df.get, ('a','b','c'))) # gives Series([9,12])
If you wanted to apply other chained operations, you could implement support
for something like this:
def chain(op, df, name, *names):
res = df[name]
while names:
res = op(res, df[names[0]])
names = names[1:]
return res
Then you can do this:
from operator import div
chain(div, df, 'a', 'b', 'c')
|
Get environment variable address with Python
Question: for an university project I have to perform Buffer Overflow with some programs
given by the professor.
I want to setup my shellcode environment variable with python and I do that
with:
import os
os.environ("EGG") = "..."
os.system('bash')
so now python spawns a child bash process. But now how can I print the address
of EGG? I have done this with C:
#include <stdlib.h>
#include <stdio.h>
int main(){
printf("%x", getenv("EGG"));
return 0;
}
but I want to do it with Python. Anyone can help?
Answer: You can use id() <https://docs.python.org/2/library/functions.html#id>
id(os.environ.get('EGG')
in hex:
hex(id(os.environ.get('EGG'))
|
Scrapy error with spider error processing
Question: I am new to Python and Scrapy and I have following spider:
# scrapy_error.py
import scrapy
from scrapy import Request
class TextScrapper(scrapy.Spider):
name = "tripadvisor"
start_urls = [
"https://www.tripadvisor.com/Hotel_Review-g312741-d306930-Reviews-Holiday_Inn_Express_Puerto_Madero-Buenos_Aires_Capital_Federal_District.html"
]
def parse(self, response):
full_review_page_links = response.xpath('//div[@class="quote isNew"]/a/@href').extract()
res = [detail_link for detail_link in full_review_page_links]
if res:
yield scrapy.Request("https://www.tripadvisor.com/" + res[0])
Every time I run this spider with
> scrapy runspider scrapy_error.py
I get following error:
2016-05-11 15:00:50 [scrapy] INFO: Spider opened
2016-05-11 15:00:50 [scrapy] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2016-05-11 15:00:50 [scrapy] DEBUG: Telnet console listening on 127.0.0.1:6023
2016-05-11 15:00:52 [scrapy] DEBUG: Redirecting (301) to <GET https://www.tripadvisor.com/Hotel_Review-g312741-d306930-Reviews-Holiday_Inn_Express_Pue
rto_Madero-Buenos_Aires_Capital_Federal_District.html> from <GET https://www.tripadvisor.com/Holiday+Inn+Express+PUERTO+MADERO>
2016-05-11 15:00:54 [scrapy] DEBUG: Crawled (200) <GET https://www.tripadvisor.com/Hotel_Review-g312741-d306930-Reviews-Holiday_Inn_Express_Puerto_Mad
ero-Buenos_Aires_Capital_Federal_District.html> (referer: None)
review_370284375
New Item is Added To The Data Collection
2016-05-11 15:00:54 [scrapy] ERROR: Spider error processing <GET https://www.tripadvisor.com/Hotel_Review-g312741-d306930-Reviews-Holiday_Inn_Express_
Puerto_Madero-Buenos_Aires_Capital_Federal_District.html> (referer: None)
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
GeneratorExit
Exception RuntimeError: 'generator ignored GeneratorExit' in <generator object iter_errback at 0x040ECB48> ignored
Unhandled error in Deferred:
2016-05-11 15:00:54 [twisted] CRITICAL: Unhandled error in Deferred:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\twisted\internet\base.py", line 1194, in run
self.mainLoop()
File "C:\Python27\lib\site-packages\twisted\internet\base.py", line 1203, in mainLoop
self.runUntilCurrent()
File "C:\Python27\lib\site-packages\twisted\internet\base.py", line 825, in runUntilCurrent
call.func(*call.args, **call.kw)
File "C:\Python27\lib\site-packages\twisted\internet\task.py", line 671, in _tick
taskObj._oneWorkUnit()
--- <exception caught here> ---
File "C:\Python27\lib\site-packages\twisted\internet\task.py", line 517, in _oneWorkUnit
result = next(self._iterator)
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\utils\defer.py", line 63, in <genexpr>
work = (callable(elem, *args, **named) for elem in iterable)
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\core\scraper.py", line 183, in _process_spidermw_output
self.crawler.engine.crawl(request=output, spider=spider)
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\core\engine.py", line 198, in crawl
self.schedule(request, spider)
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\core\engine.py", line 204, in schedule
if not self.slot.scheduler.enqueue_request(request):
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\core\scheduler.py", line 51, in enqueue_request
if not request.dont_filter and self.df.request_seen(request):
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\dupefilters.py", line 48, in request_seen
fp = self.request_fingerprint(request)
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\dupefilters.py", line 56, in request_fingerprint
return request_fingerprint(request)
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\utils\request.py", line 53, in request_fingerprint
fp.update(to_bytes(canonicalize_url(request.url)))
File "C:\Python27\lib\site-packages\scrapy-1.1.0rc1-py2.7.egg\scrapy\utils\url.py", line 67, in canonicalize_url
path = safe_url_string(_unquotepath(path)) or '/'
File "C:\Python27\lib\site-packages\w3lib\url.py", line 97, in safe_url_string
to_native_str(parts.netloc.encode('idna')),
File "C:\Python27\lib\encodings\idna.py", line 164, in encode
result.append(ToASCII(label))
File "C:\Python27\lib\encodings\idna.py", line 73, in ToASCII
raise UnicodeError("label empty or too long")
exceptions.UnicodeError: label empty or too long
2016-05-11 15:00:54 [twisted] CRITICAL:
2016-05-11 15:00:54 [scrapy] INFO: Closing spider (finished)
None
None
2016-05-11 15:00:54 [scrapy] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 1014,
'downloader/request_count': 2,
'downloader/request_method_count/GET': 2,
'downloader/response_bytes': 104006,
'downloader/response_count': 2,
'downloader/response_status_count/200': 1,
'downloader/response_status_count/301': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2016, 5, 11, 9, 30, 54, 488000),
'log_count/CRITICAL': 2,
'log_count/DEBUG': 3,
'log_count/ERROR': 1,
'log_count/INFO': 7,
'request_depth_max': 1,
'response_received_count': 1,
'scheduler/dequeued': 2,
'scheduler/dequeued/memory': 2,
'scheduler/enqueued': 2,
'scheduler/enqueued/memory': 2,
'spider_exceptions/GeneratorExit': 1,
'start_time': datetime.datetime(2016, 5, 11, 9, 30, 50, 817000)}
2016-05-11 15:00:54 [scrapy] INFO: Spider closed (finished)
I'm using Scrapy 1.1.0rc1. I have tried a few things like re-installing Python
and Scrapy, but nothing helped.
Answer: This looks like bug in Scrapy that was present up till version 1.1.0rc3.
Version 1.1.0rc4 works fine, install this specific version with following
command:
> pip install scrapy==1.1.0rc4
|
Merging 2 Lists In Multiple Ways - Python
Question: I've been experimenting with a number of techniques but I'm sure there's
smooth-ish way to get this done.
Suppose I have two lists with the same amount of items in them (4 each):
a = ['a', 'b', 'c', 'd']
b = [1, 2, 3, 4]
I'd like to merge these lists in all possible ways while retaining order.
Example outputs:
a, b, c, d, 1, 2, 3, 4
1, 2, 3, 4, a, b, c, d
a, b, 1, 2, c, 3, 4, d
The point is each of the lists must retain its order so an item can not
precede another item in the output considering its position in the list. so
for example the output can not be:
a, b, **d**, c, 1... > d precedes c whereas c is before d in the original list
1, **4**, a, b, 3.... > 4 precedes 3 whereas 3 is before 4 in the original list
I guess the idea is to merge the second list into the first list in all
possible ways. A fully worked example is this:
a = [a, b]
b = [1, 2]
desired output:
ab12
a1b2
a12b
1ab2
1a2b
12ab
How do I go about doing this? Does `itertools` have a capability to do this in
some way? Or is there another way to get this done? Please help!
Answer: In the 2x4 case, you want to take all 8 elements without disrupting the
ordering within each quad. These examples:
a, b, c, d, 1, 2, 3, 4
1, 2, 3, 4, a, b, c, d
a, b, 1, 2, c, 3, 4, d
Can be transformed into sequences of "instructions" which are the list to take
from, 0 or 1:
0 0 0 0 1 1 1 1
1 1 1 1 0 0 0 0
0 0 1 1 0 1 1 0
Once you have realized this, you may notice that the sequences we need to
generate are all the permutations of four zeros and four ones. Having made
this leap, we can use `itertools`:
itertools.permutations([0,0,0,0,1,1,1,1])
For the 2x4 case, this gives 40320 results, but only 70 unique ones (because
`itertools.permutations` thinks 1,1,1 is different from 1,1,1 if the numbers
are reordered). You can get the unique permutations from an answer here:
<http://stackoverflow.com/a/6285330/4323> or just use `set()`.
* * *
Putting that all together, here is a complete solution:
import itertools
def combos(*seqs):
counts = map(len, seqs)
base = []
for ii, count in enumerate(counts):
base.extend([ii]*count)
for take in set(itertools.permutations(base)):
result = []
where = [0] * len(seqs)
for elem in take:
result.append(seqs[elem][where[elem]])
where[elem] += 1
yield result
You can test it this way (gives 70 results):
a = ['a', 'b', 'c', 'd']
b = [1, 2, 3, 4]
for res in combos(a, b):
print res
|
ImportError: cannot import name COMError in python
Question: I am trying to convert docx file to pdf with following code
import sys
import os
import comtypes.client
wdFormatPDF = 17
in_file = os.path.abspath(sys.argv[1])
out_file = os.path.abspath(sys.argv[2])
word = comtypes.client.CreateObject('Word.Application')
doc = word.Documents.Open(in_file)
doc.SaveAs(out_file, FileFormat=wdFormatPDF)
doc.Close()
word.Quit()
It is throwing an error
ImportError: cannot import name COMError
I have installed comtypes package.
I am very new to python, I can not figure out how to resolve this problem.
[Edit]
Stacktrace
Traceback (most recent call last):
File "converttopdf.py", line 3, in <module>
import comtypes.client
File "/usr/local/lib/python2.7/dist-packages/comtypes-1.1.2-py2.7.egg/comtypes/__init__.py", line 23, in <module>
from _ctypes import COMError
ImportError: cannot import name COMError
Answer: Unfortunately COMTypes is designed for Windows, not Linux.
> comtypes allows to define, call, and implement custom and dispatch-based COM
> interfaces in pure Python. It works on Windows, 64-bit Windows, and Windows
> CE.
[Source](https://pypi.python.org/pypi/comtypes)
You'll need to find another way to do your conversion, likely through another
library.
|
Need help repositioning and resizing widgets with Python Tkinter grid
Question: Hi guys I was wondering if I could get some help on how to resize widgets with
.grid on tkinter. I have no clue and I've tried weight= but it doesn't do
anything.
I also need help moving the two combo boxes in my code to certain areas.
Normally the widgets are layered depending on where they are in the code but
it doesn't seem to be the case with the combo boxes. I want the first combo
box to be after the first input box and then the second box after the label
"To"
Here is the code:
from Tkinter import *
root = Tk()
root.title("Currency Converter")
root.geometry("365x436")
root.columnconfigure(0, weight=1)
app = Frame(root)
app.grid()
QuoteCurrencyConverter = Label(app, font="calibri 24 bold underline", text = "Currency Converter")
QuoteCurrencyConverter.grid()
InputAmount = Entry(app)
InputAmount.grid()
ComboBoxVariable = StringVar(root)
ComboBoxVariable.set("AUD")
ComboBox1 = OptionMenu(root, ComboBoxVariable, "AUD", "USD", "YEN", "CAD", "EUR", "GBP")
ComboBox1.grid()
QuoteTo = Label(app, font="calibri 24", text = "To")
QuoteTo.grid()
ComboBoxVariable1 = StringVar(root)
ComboBoxVariable1.set("AUD")
ComboBox2 = OptionMenu(root, ComboBoxVariable1, "AUD", "USD", "YEN", "CAD", "EUR", "GBP")
ComboBox2.grid()
ConvertButton = Button(app, font="calibri 13 bold", text = "Convert")
ConvertButton.grid()
DisplayAmount = Entry(app) #CURRENTLY UNSURE#
DisplayAmount.grid()
ResetButton = Button(app, font="calibri 13 bold", text = "Reset")
ResetButton.grid()
root.mainloop()
Answer: The `tkinter.grid` arranges the widgets in a form of rows and columns. What
you said about adding widgets where they appear in the code is incorrect. The
`grid` function has 2 optional parameters - `row` and `column`. Perhaps the
names are enough to make you understand their use. But let me tell you. Both
of them must be positive integers. Now set the row and column accordingly like
in Microsoft Excel though both must be integers. Now if you mean to resize
them, use the `rowspan` and `columnspan` attributes.
Example:
`frame.grid(row=3, column=4, rowspan=4, columnspan=2)`
|
Using Google endpoints in different modules of the same app
Question: I'm quite new to development with Google App engine and other Google services
of the Cloud platform and I'd like to create an app with different modules (so
they can have their own lifecycle) which use endpoints.
I'm struggling with api paths because I don't know how to route requests to
the good module.
My directory tree is like that:
/myApp
/module1
__init__.py
main.py
/module2
__init__.py
main.py
module1.yaml
module2.yaml
dispatch.yaml
module1.yaml
application: myapp
runtime: python27
threadsafe: true
module: module1
version: 0
api_version: 1
handlers:
# The endpoints handler must be mapped to /_ah/spi.
# Apps send requests to /_ah/api, but the endpoints service handles mapping
# those requests to /_ah/spi.
- url: /_ah/spi/.*
script: module1.main.api
libraries:
- name: pycrypto
version: 2.6
- name: endpoints
version: 1.0
module2.yaml
application: myapp
runtime: python27
threadsafe: true
module: module2
version: 0
api_version: 1
handlers:
# The endpoints handler must be mapped to /_ah/spi.
# Apps send requests to /_ah/api, but the endpoints service handles mapping
# those requests to /_ah/spi.
- url: /_ah/spi/.*
script: module2.main.api
libraries:
- name: pycrypto
version: 2.6
- name: endpoints
version: 1.0
dispatch.yaml
dispatch:
- url: "*/_ah/spi/*"
module: module1
- url: "*/_ah/spi/.*"
module: module2
So I'd like my endpoints to be called with the name of the corresponding
module somewhere ('_ah/api/module1' or 'module1/_ah/api'). I don't know what
to put in the different .yaml files. I don't even know if what I'm doing is
right, or possible.
Thanks for your answers.
Answer: You can host different endpoints on different modules (now called
**services**); the way to correctly address them is as follows:
`https://<service-name>-dot-<your-project-id>.appspot.com/_ah/api`
Now, let's say you have—as per your description—**module1** and **module2** ,
each one hosting different endpoints. You will call **module1** APIs by
hitting:
`https://module1-dot-<your-project-id>.appspot.com/_ah/api`
And in a similar fashion, **module2** APIs:
`https://module2-dot-<your-project-id>.appspot.com/_ah/api`
If you want to dig deeper into how this URL schema works (including versions,
which are another important part of the equation here), go read [Addressing
microservices](https://cloud.google.com/appengine/docs/python/designing-
microservice-api#addressing_microservices) and the immediately following
section [Using API
versions](https://cloud.google.com/appengine/docs/python/designing-
microservice-api#using_api_versions)
|
ServiceNow - How to use SOAP to download reports
Question: I need to automate download of reports from `serviceNow`.
I've been able to automate it using `python` and `selenium` and `win32com` by
following method.
`https://test.service-
now.com/sys_report_template.do?CSV&jvar_report_id=92a....7aa`
And using `selenium` to access `serviceNow` as well as modify `firefox`
default download option to dump the file to a folder on `windows` machine.
However, Since all of this may be ported to a `linux` server , we would like
to port it to `SOAP` or `CURL`.
I came across `serviceNow` libraries for `python`
[here](https://github.com/locaweb/python-servicenow).
I tried it out and following code is working if I set login , password and
instance-name as listed at the site using following from `ServiceNow.py`
class Change(Base):
__table__ = 'change_request.do'
and following within clientside script as listed on
[site](https://github.com/locaweb/python-servicenow).
# Fetch changes updated on the last 5 minutes
changes = chg.last_updated(minutes=5)
#print changes client side script.
for eachline in changes:
print eachline
However, When I replace URL with `sys_report_template.do`, I am getting error
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\SOAPpy\Parser.py", line 1080, in _parseSOAP
parser.parse(inpsrc)
File "C:\Python27\Lib\xml\sax\expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "C:\Python27\Lib\xml\sax\xmlreader.py", line 125, in parse
self.close()
File "C:\Python27\Lib\xml\sax\expatreader.py", line 220, in close
self.feed("", isFinal = 1)
File "C:\Python27\Lib\xml\sax\expatreader.py", line 214, in feed
self._err_handler.fatalError(exc)
File "C:\Python27\Lib\xml\sax\handler.py", line 38, in fatalError
raise exception
SAXParseException: <unknown>:1:0: no element found
Here is relevent code
from servicenow import ServiceNow
from servicenow import Connection
from servicenow.drivers import SOAP
# For SOAP connection
conn = SOAP.Auth(username='abc', password='def', instance='test')
rpt = ServiceNow.Base(conn)
rpt.__table__ = "sys_report_template.do?CSV"
#jvar_report_id replaced with .... to protect confidentiality
report = rpt.fetch_one({'jvar_report_id': '92a6760a......aas'})
for eachline in report:
print eachline
So, my question is , what can be done to make this work? I looked on web for
resources and help, but didn't find any.
Any help is appreciated.
Answer: After much research I was able to use following method to get report in `csv`
format from `servicenow`. I thought I will post over here in case anyone else
runs into similar issue.
import requests
import json
# Set the request parameters
url= 'https://myinstance.service-now.com/sys_report_template.do?CSV&jvar_report_id=929xxxxxxxxxxxxxxxxxxxx0c755'
user = 'my_username'
pwd = 'my_password'
# Set proper headers
headers = {"Accept":"application/json"}
# Do the HTTP request
response = requests.get(url, auth=(user, pwd), headers=headers )
response.raise_for_status()
print response.text
`response.text` now has report in `csv` format.
I need to next figure out, how to parse the `response` object to extract `csv`
data in correct format.
Once done, I will post over here. But for now this answers my question.
|
Why do Python's pdb and code modules interact differently with Theano
Question: I've been trying to debug some Theano code, for which I used Keras as a front
end. What I've been doing is inserting either:
import pdb
pdb.set_trace()
or
import code
code.interact(local=locals(), banner='==>")
at the point where I want to begin interacting with my code to see what's
going on and interactively test solutions. But, when I use pdb to interact
with my code, I see this odd behaviour:
--Return--
> /home/me/Projects/keras_expts2/cifar10_cnn_ecoc2a.py(174)<module>()->None
-> pdb.set_trace()
(Pdb) import theano.tensor as T
(Pdb) a=T.fvector()
(Pdb) type(a)
*** NameError: name 'a' is not defined
(Pdb) T
<module 'theano.tensor' from '/home/smgutstein/Downloads/Theano/theano/tensor/__init__.pyc'>
(Pdb) T.fvector
TensorType(float32, vector)
So, pdb knows what T and fvector are, it just won't create a Theano fvector
variable.
However, using code, I see this (better) behaviour:
==>
>>> import theano.tensor as T
>>> a = T.fvector()
>>> type(a)
<class 'theano.tensor.var.TensorVariable'>
>>> T.fvector
TensorType(float32, vector)
>>>
Why is there this difference?
Answer: Because `a` is a `pdb` command (short for `alias`), `a=T.fvector()` gets
parsed as you trying to run that command rather than the Python statement.
|
how install OCR tesseract on opencv 3.1.0 on linux ubuntu 14.10?
Question: I would like install correctly Opencv_contrib for Text_recognition. Have you a
idea to the good process?
Before I install Opencv 3.1.0 and opencv_contrib, I install leptonia-1.73,
protobuf, caffe, and after Tesseract. After I start to install Opencv 3.1.0
and opencv_contrib. But I don't use Texte module
Process: **_1\. Install leptonia :_**
$ sudo apt-get install autoconf automake libtool
$ sudo apt-get install libpng12-dev
$ sudo apt-get install libjpeg62-dev
$ sudo apt-get install libtiff4-dev
$ sudo apt-get install zlib1g-dev
$ sudo apt-get install libicu-dev
$ sudo apt-get install libpango1.0-dev
$ sudo apt-get install libcairo2-dev
$ wget <http://www.leptonica.org/source/leptonica-1.73.tar.gz>
$ tar -zxvf leptonica-1.73.tar.gz
$ cd leptonica-1.73
$ ./autogen.sh
$ ./configure
$ make
$ sudo checkinstall
$ sudo ldconfig
**2\. Install protobuf**
like "https:"//launchpad."net/ubuntu/trusty/"+package/protobuf-compiler"
**_3\. Install Caffe_**
like "http:"//caffe."berkeleyvision."org"/install_apt"."html
**_4\. Install Opencv 3.1.0 with Opencv_extra_module_path_**
$ cd ~/opencv
$ mkdir build
$ cd build
$ cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D INSTALL_C_EXAMPLES=OFF \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib/modules \
-D BUILD_EXAMPLES=ON ..
Warning message:
Warning message is : joboxlearning@joboxlearning-
VirtualBox:~/OpenCV/workspace/text_recognition$ ./Text_recognition
2856985-origpic-28a761.jpg
./Text_recognition
Demo program of the Extremal Region Filter algorithm described in Neumann L.,
Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
init done opengl support available **_OpenCV Error: Bad argument (Default
classifier file not found!) in ERClassifierNM1,_** file
/home/joboxlearning/OpenCV/opencv_contrib/modules/text/src/erfilter.cpp,
**_line 1035 terminate called after throwing an instance of 'cv::Exception'_**
what():
/home/joboxlearning/OpenCV/opencv_contrib/modules/text/src/erfilter.cpp:1035:
error: (-5) **_Default classifier file not found! in function
ERClassifierNM1_**
Abandon (core dumped)
Answer: Check this Dockerfile
(<https://gist.github.com/joost/52d59321abe31884ab818b806c69426b>) it shows
all steps to install OpenCV 3.1 with Tesseract and Python bindings. You should
be able to use all features like OCR and ERFilter.
To test them open python:
>>> import cv2
>>> cv2.__version__
'3.1.0'
>>> cv2.text # should return the module
|
How to recode SYSTEM missing values from nan to empty space while saving SPSS system (sav) files from pandas dataframe?
Question: I use savReaderWriter module to save an SPSS file from python pandas dataframe
with the following code:
import savReaderWriter as srw
savFileName = 'Outfile name.sav'
records = map(list, df.values)
varNames = list(df.columns)
varTypes = {}
for n, values in enumerate(records[0]):
varName = varNames[n]
if df.dtypes[n] == 'float64':
varTypes[varName] = 0
else:
varTypes[varName] = 255
with srw.SavWriter(savFileName, varNames, varTypes, ioUtf8=True) as writer:
writer.writerows(records)
The problem is that empty string variables in the SPSS file have "nan" values.
In the documentation default option for savWriter is **missingValues=None** ,
but changing the "None" to '' or any other string doesn't do the job. What
would be solution to have empty string instead of nan?
Thank you very much in advance
Answer: I guess if you want to represent "nan" values as empty strings the best way is
replace them in the source df
df.fillna('')
and save after that.
P.S. But please take note of the approach which SPSS uses to work with missing
data. These settings are in a file's header.
|
Internal Error returned from Softlayer DNSManager API
Question: We are using the Python 2.7 and the Python Softlayer 3.0.1 package and calling
the `get_records` method on the `DNSManager` class. This is currently
returning an `Internal Server` error:
2016-05-11T11:18:04.117406199Z Traceback (most recent call last):
2016-05-11T11:18:04.117715505Z File "/opt/**/**/***.py", line 745, in <module>
2016-05-11T11:18:04.117927757Z httpDnsRecords = dnsManager.get_records(httpDomainRecordId, data=dataspace, type="cname")
2016-05-11T11:18:04.118072183Z File "/usr/local/lib/python2.7/dist-packages/SoftLayer/managers/dns.py", line 152, in get_records
2016-05-11T11:18:04.118152705Z filter=_filter.to_dict(),
2016-05-11T11:18:04.118302389Z File "/usr/local/lib/python2.7/dist-packages/SoftLayer/API.py", line 347, in call_handler
2016-05-11T11:18:04.118398852Z return self(name, *args, **kwargs)
2016-05-11T11:18:04.118512777Z File "/usr/local/lib/python2.7/dist-packages/SoftLayer/API.py", line 316, in call
2016-05-11T11:18:04.118632422Z return self.client.call(self.name, name, *args, **kwargs)
2016-05-11T11:18:04.118814604Z File "/usr/local/lib/python2.7/dist-packages/SoftLayer/API.py", line 176, in call
2016-05-11T11:18:04.118907953Z timeout=self.timeout)
2016-05-11T11:18:04.118995360Z File "/usr/local/lib/python2.7/dist-packages/SoftLayer/transports.py", line 64, in make_xml_rpc_api_call
2016-05-11T11:18:04.119096993Z e.faultCode, e.faultString)
2016-05-11T11:18:04.119547899Z SoftLayer.exceptions.SoftLayerAPIError: SoftLayerAPIError(SOAP-ENV:Server): Internal Error
The httpDomainRecordId is the Id for the domain obtained from softlayer and
dataspace is the string 'uk'.
Does anyone know why this would be returning an `Internal Error` from the
server?
Answer: Likely the error is due to the response contains a big amount of data, this
error is documented [here](http://sldn.softlayer.com/blog/phil/How-Solve-
Error-fetching-http-headers), so you can try:
1.- Increase the timeout in the client.
2.- Add more filters in your request to limmit the result, currently your
using datqa and type try adding host or ttl
3.- you can try using limits, but the manager does not provide that option. so
you need to use API calls e.g.
import SoftLayer
client = SoftLayer.Client()
zoneId = 12345
objectMask = "id,expire,domainId,host,minimum,refresh,retry, mxPriority,ttl,type,data,responsiblePerson"
result = client['Dns_Domain'].getResourceRecords(id=zoneId, mask=objectMask, limit=200, offset=0)
print (result)
|
Python - IndexError on a program that takes data from an URL
Question: When I try to execute the program, I keep getting an IndexError: list index
out of range. Here is my code:
''' This program accesses the Bloomberg US Stock information page.
It uses BeautifulSoup to parse the html and then finds the elements with the top 20 stocks.
It finds the the stock name, value, net change, and percent change.
'''
import urllib
from urllib import request
from bs4 import BeautifulSoup
# get the bloomberg stock page
bloomberg_url = "http://www.bloomberg.com/markets/stocks/world-indexes/americas"
try:
response = request.urlopen(bloomberg_url)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
else:
# the url request was successful
html = response.read().decode('utf8')
# use the BeautifulSoup parser to create the beautiful soup object with html structure
bloomsoup = BeautifulSoup(html, 'html.parser')
pagetitle = bloomsoup.title.get_text()
# the 20 stocks are stored in the first 10 "oddrow" tr tags
# and the 10 "evenrow" tr tags
oddrows = bloomsoup.find_all("tr",class_="oddrow")
evenrows = bloomsoup.find_all("tr",class_="evenrow")
# alternate odd and even rows to put all 20 rows together
allrows=[]
for i in range(12):
allrows.append(oddrows[i])
allrows.append(evenrows[i])
allrows.append(oddrows[12])
# iterate over the BeautifulSoup tr tag objects and get the team items into a dictionary
stocklist = [ ]
for item in allrows:
stockdict = { }
stockdict['stockname'] = item.find_all('a')[1].get_text()
stockdict['value'] = item.find("td",class_="pr-rank").get_text()
stockdict['net'] = item.find('span',class_="pr-net").get_text()
stockdict['%'] = item.find('td',align="center").get_text()
stocklist.append(stockdict)
# print the title of the page
print(pagetitle, '\n')
# print out all the teams
for stock in stocklist:
print('Name:', stock['stockname'], 'Value:', stock['value'], 'Net Change:', stock['net'],\
'Percent Change:', stock['%'])
Answer: `oddrows` and `evenrows` only have 10 elements according to your comment.
> the 20 stocks are stored in the first 10 "oddrow" tr tags and the 10
> "evenrow" tr tags
But you loop 12 times instead of 10: `for i in range(12):`
Change 12 to 10 and it should work.
_Side note:_ I don't suggest hardcoding that value.
You could replace
allrows=[]
for i in range(12):
allrows.append(oddrows[i])
allrows.append(evenrows[i])
with
allrows=[]
for x,y in zip(oddrows,evenrows):
allrows.append(x)
allrows.append(y)
|
Tkinter, retrieving value from an entry
Question: So I am making a simple GUI program in python using Tkinter. The problem is
that when I try to retrieve the value from my Entry using a button, it comes
back with an `error()`:
print(ex1.get())
AttributeError: 'NoneType' object has no attribute 'get'
Can someone please help me avoid this?
This is my code:
root = Tk()
root.minsize(300,300)
root.maxsize(300,300)
n = ttk.Notebook()
f1 = Frame(n,height=280,width=280)
f1.propagate(0)
lx1 = Label(f1,text="x",width=5).grid(row = 1,column=0)
ex1 = Entry(f1,width = 10).grid(row = 1,column = 1)
ly1 = Label(f1,text="y",width=5).grid(row=3,column=0)
ey1 = Entry(f1,width = 10).grid(row=3,column = 1)
def value():
print(ex1.get())
Bcreate = Button(f1,text="CREATE",command=value).grid(row = 10,column = 5)
n.add(f1,text="add point")
f2 = Frame(n)
n.add(f2,text="draw line")
n.pack()
Thank you for your help.
Answer: Please read how to post a proper [MCVE](https://stackoverflow.com/help/mcve)
Here is one for your problem:
from tkinter import Tk, Entry
root = Tk()
ent = Entry(root).pack()
print(ent)
print(ent.get())
This prints
None
Traceback (most recent call last):
File "F:\Python\mypy\tem.py", line 6, in <module>
print(ent.get())
AttributeError: 'NoneType' object has no attribute 'get'
At this point, it should be obvious that `.pack()` returns `None`. So instead
bind the widget returned by Entry.
from tkinter import Tk, Entry
root = Tk()
ent = Entry(root)
ent.pack()
print(ent)
print(ent.get())
to get
.2425099144832
got .
|
Reading data from files in python with intelligent format
Question: I have this data table:

Ciudad means City, Fase means Phase and Tarea mins Task
The data table is saved into a file with this format:
Giron 20 15,18 40 50 60,77 37 45
Floridablanca 17 13,17 35 43 55,67 39 46
Bogota 15 12,17 35 43 55,67 39 46
Cali 14 12,10 30 40 32,59 67 33
The numbers means millions (20 Million, 18 million, etc.)
**Each city is a line. Phases are delimited by "," and tasks are delimited by
space**
I need to read this file from python and be able to work with the tasks and
phases of every single city, calculate what is the more expensive tasks in a
city, the most expensive phase, etc.
The problem is that I dont really know how to read and save the data in order
to start to calculate what I need to calculate
I have been trying with 1d-arrays and 2d-arrays with Numpy (loadtxt,
genfromtxt), but the data output is not so clear and I can't figure out how to
work with it
Answer:
import re
line = 'Santa Rosa de Cabal 20 15,18 40 50 60,77 37 45'
city, phase1, phase2, phase3 = re.match(
'(.+) (\d+ \d+),(\d+ \d+ \d+ \d+),(\d+ \d+ \d+)', line).groups()
def tasks(phase_string):
return [int(task) for task in phase_string.split()]
print(city)
for phase in phase1, phase2, phase3:
print(tasks(phase))
Output:
Santa Rosa de Cabal
[20, 15]
[18, 40, 50, 60]
[77, 37, 45]
The main thing here is regular expressions. Read about them.
|
What is the proper way to read a text file and display in HTML via Flask/Python?
Question: Specifically, let's talk about large text/log files that we want to display in
HTML. I've seen various methods, but for scalability purposes,
What I would like to do, is take a log file with space separated data and then
put this into a table displayed in HTML.
Example from the text file:
2016-05-10 21:13:56.030616 Button_Pressed
2016-05-10 21:14:44.534093 Door_Opened
2016-05-10 21:14:46.850801 Button_Pressed
2016-05-10 21:15:04.383880 Door_Closed
I'd like to take each line and convert this to a table with headings "Date,
Time, Event".
The easy way would be to use some kind of list, where we read each line into
the list, then pass the list to the HTML on a return.
Example Python code excerpt from a Flask web app (app.py):
@app.route('/history')
def history():
with open('events.log') as event_log:
event_lines = event_log.readlines()
door_history = []
events = len(event_lines)
for x in range(events):
door_history.append(event_lines[x].split(" "))
return render_template('door-log.html', events=events, door_history=door_history)
Example HTML/Jinja code excerpt from the Flask web app (door-log.html)
<table>
<tr>
<th>Date</th>
<th>Time</th>
<th>Event</th>
</tr>
{% for row in range(events) %}
<tr>
{% for column in range(3) %}
<td> {{ door_history[row][column] }} </td>
{% endfor %}
</tr>
{% endfor %}
</table>
However, for this method, would this not be the best use of memory? Would it
scale if you had many people accessing the same page? Is this the proper way
to do this kind of thing?
Answer: I would not say that your way is exactly the proper way of doing this. However
for a small application it will work and be faster than most other solutions.
If your log file grows over time you would not want to show it in it's
entirety. Instead you might want to implement only showing the latest 10 or so
events, offering a second page with more events (pagination). In this case you
could probably still work with an adapted version of the above solution.
If however you either have lots of page views or want to have advanced
features such as: showing the last week of events, just showing door opening
events or showing all door opening events that happend in the first calendar
week. Then you would want to make your data more accessible by importing it
into some kind of database.
You might use SQLite as a database though if you really have a lot of users
something like PostgreSQL would be the better option. You would not import the
data in your views instead you would have a dedicated task running that
updates your database every now an again. In the simplest case this might be a
cronjob that just runs every minute. You might also use a more advanced
solution like celery, in either case you might end up with slightly outdated
data in your database and therefore your website, there isn't really a way
around this.
For information on how to integrate a database into your application see the
[official flask documentation](http://flask.pocoo.org/docs/0.10/tutorial/).
For more information on celery (for regular database updates) take a look at
the [flask documentation on the
topic](http://flask.pocoo.org/docs/0.10/patterns/celery/). The slightly more
hacky way to do this would be cron jobs, as explained
[here](https://help.ubuntu.com/community/CronHowto).
All that being said: if this is just a simple home project and you are not in
for the learning experience your current solution will 100% suffice. If you
are interested in learing more or want lots of people to use
|
labeled intervals in matplotlib
Question: I'm making a reference to the question on [Plotting labeled intervals in
matplotlib/gnuplot](http://stackoverflow.com/questions/7684475/plotting-
labeled-intervals-in-matplotlib-gnuplot/37159537#37159537), the problem with
the solution exposed there, is that doesn't work with only one line of data in
the files. This is the code I'm trying:
#!/usr/bin/env python
#
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, MinuteLocator, SecondLocator
import numpy as np
from StringIO import StringIO
import datetime as dt
a=StringIO("""MMEX 2016-01-29T12:38:22 2016-01-29T12:39:03 SUCCESS
""")
#Converts str into a datetime object.
conv = lambda s: dt.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
#Use numpy to read the data in.
data = np.genfromtxt(a, converters={1: conv, 2: conv},
names=['caption', 'start', 'stop', 'state'], dtype=None)
cap, start, stop = data['caption'], data['start'], data['stop']
#Check the status, because we paint all lines with the same color
#together
is_ok = (data['state'] == 'SUCCESS')
not_ok = np.logical_not(is_ok)
#Get unique captions and there indices and the inverse mapping
captions, unique_idx, caption_inv = np.unique(cap, 1, 1)
#Build y values from the number of unique captions.
y = (caption_inv + 1) / float(len(captions) + 1)
#Plot function
def timelines(y, xstart, xstop, color='b'):
"""Plot timelines at y from xstart to xstop with given color."""
plt.hlines(y, xstart, xstop, color, lw=4)
plt.vlines(xstart, y+0.005, y-0.005, color, lw=2)
plt.vlines(xstop, y+0.005, y-0.005, color, lw=2)
#Plot ok tl black
timelines(y[is_ok], start[is_ok], stop[is_ok], 'k')
#Plot fail tl red
timelines(y[not_ok], start[not_ok], stop[not_ok], 'r')
#Setup the plot
ax = plt.gca()
ax.xaxis_date()
myFmt = DateFormatter('%Y-%m-%dT%H:%M:%S')
ax.xaxis.set_major_formatter(myFmt)
ax.xaxis.set_major_locator(SecondLocator(interval=3600)) # used to be SecondLocator(0, interval=20)
#To adjust the xlimits a timedelta is needed.
delta = (stop.max() - start.min())/10
plt.yticks(y[unique_idx], captions)
plt.ylim(0,1)
plt.xlim(start.min()-delta, stop.max()+delta)
plt.xlabel('Time')
plt.xticks(rotation=70)
plt.show(block=True)
When I try this code, I get the following error:
Traceback (most recent call last):
File "./testPlot.py", line 49, in <module>
timelines(y[is_ok], start[is_ok], stop[is_ok], 'k')
ValueError: boolean index array should have 1 dimension
Also, when I try to add a dummy line on the data, let's said "MMEX
2016-01-01T00:00:00 2016-01-01T00:00:00 SUCCESS", the plot works but doesn't
look good.
Any suggestions? I tried to put this question on the same post when I found
the solution, but I don't have enough reputation...
Thanks in advance
Answer: The issue is that when you only read 1 item with `np.genfromtxt`, it is
producing scalars (0-dimensions). We need them to be at least 1D.
You can add these lines just above where you define your `timelines` function,
and then everything works ok.
This makes use of the `numpy` function
[`np.atleast_1d()`](http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.atleast_1d.html),
to turn the scalars into 1D `numpy` arrays.
#Check the dimensions are at least 1D (for 1-item data input)
if start.ndim < 1:
start = np.atleast_1d(start)
if stop.ndim < 1::
stop = np.atleast_1d(stop)
if is_ok.ndim < 1:
is_ok = np.atleast_1d(is_ok)
if not_ok.ndim < 1:
not_ok = np.atleast_1d(is_ok)
The output:
[](http://i.stack.imgur.com/5JY1t.png)
|
TypeError: POST data should be bytes or an iterable of bytes. It cannot be of type str
Question: My Code.
#!/usr/bin/env python
#coding: utf-8
userid="[email protected]"
passwd="********"
import sys, re, cgi, urllib, urllib.request, urllib.error, http.cookiejar, xml.dom.minidom, time, urllib.parse
import simplejson as json
def getToken():
html = urllib.request.urlopen("http://www.nicovideo.jp/my/mylist").read()
for line in html.splitlines():
mo = re.match(r'^\s*NicoAPI\.token = "(?P<token>[\d\w-]+)";\s*',line)
if mo:
token = mo.group('token')
break
assert token
return token
def mylist_create(name):
cmdurl = "http://www.nicovideo.jp/api/mylistgroup/add"
q = {}
q['name'] = name.encode("utf-8")
q['description'] = ""
q['public'] = 0
q['default_sort'] = 0
q['icon_id'] = 0
q['token'] = token
cmdurl += "?" + urllib.parse.urlencode(q).encode("utf-8")
j = json.load( urllib.request.urlopen(cmdurl), encoding='utf-8')
return j['id']
def addvideo_tomylist(mid,smids):
for smid in smids:
cmdurl = "http://www.nicovideo.jp/api/mylist/add"
q = {}
q['group_id'] = mid
q['item_type'] = 0
q['item_id'] = smid
q['description'] = u""
q['token'] = token
cmdurl += "?" + urllib.parse.urlencode(q).encode("utf-8")
j = json.load( urllib.request.urlopen(cmdurl), encoding='utf-8')
time.sleep(0.5)
#Login
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(http.cookiejar.CookieJar()))
urllib.request.install_opener(opener)
urllib.request.urlopen("https://secure.nicovideo.jp/secure/login",
urllib.parse.urlencode( {"mail":userid, "password":passwd}) ).encode("utf-8")
#GetToken
token = getToken()
#MakeMylist&AddMylist
mid = mylist_create(u"Testlist")
addvideo_tomylist(mid, ["sm9","sm1097445", "sm1715919" ] )
MyError.
Traceback (most recent call last):
File "Nico3.py", line 48, in <module>
urllib.parse.urlencode( {"mail":userid, "password":passwd}) ).encode("utf-8")
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 162, in urlopen
return opener.open(url, data, timeout)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 463, in open
req = meth(req)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 1170, in do_request_
raise TypeError(msg)
TypeError: POST data should be bytes or an iterable of bytes. It cannot be of type str.
I've tried encode but it did not help. I'm japanese accademic students. It was
not able to be settled by my knowledge. I am aware of this similar question,
TypeError: POST data should be bytes or an iterable of bytes. It cannot be
str, but am too new for the answer to be much help.
Answer: You paren is in the wrong place so you are not actually encoding:
.urlencode({"mail":userid, "password":passwd}).encode("utf-8")) # <- move inside
|
Find course name using nltk and python
Question: I am new to NLTK. I have a file with unstructured texts. from the text i want
to extract the phrases which define the course name or education.
ex: TEXT 1: I am an Mechanical Engineer TEXT 2: I have completed my graduation
in Computer Science Engineering
and words i want to extract are only "Mechanical Engineer" and "Computer
Science Engineering".
How this can be done ?? how to build regular expressions to match if
possible.?
Answer: If all you want to extract is "Mechanical Engineer" and "Computer Science
Engineering", then just import
[`re`](https://docs.python.org/2/library/re.html) (not `NLTK`) and search for:
`"[mM]echanical [eE]ngineer"` and `"[cC]computer [sS]cience [eE]ngineering".`
If on, the other hand, you want to do this for every major you university
offers, give [WordNet](http://www.nltk.org/howto/wordnet.html) a look. It has
support for tokenizing/lemmatizing ngrams (ideas that span more than one
word).
|
Using findall function in module re for python
Question: I wrote code like this:
>>> import re
>>> url='<a href="C:\python34\koala.jpg">jpg</a><font size="10">'
>>> print(re.findall('href="(.*?)"><',url))
I except result
C:\python34\koala.jpg">jpg</a
But I can see only this result :
[]
why is this happening?
I did not know why I have this result in console.
Please help me.
I am using python 3.4 and windows8.1.
Answer: Are you sure you want the `>jpg</a` part too. If yes then you can use this:
>>> re.findall('href="([^"]*">[^<]*</a)',url)
['C:\\python34\\koala.jpg">jpg</a']
If you need only the `href` attributes value then you can use:
>>> re.findall('href="([^"]*")',url)
['C:\\python34\\koala.jpg"']
|
Genia Tagger cannot find file in Python2.7 (using Windows)
Question: (The OS on my computer is Windows8) I tried to install and run geniatagger by
following these steps:
In cmd:
python setup.py install
In Python shell:
from geniatagger import Geniatagger
tagger = GeniaTagger('C:\Users\wen\Downloads\geniatagger\geniatagger')
error messages:
Traceback (most recent call last):
File "<pyshell#20>", line 1, in <module>
tagger = GeniaTagger('C:\Users\wen\Downloads\geniatagger\geniatagger')
File "build\bdist.win-amd64\egg\geniatagger.py", line 21, in __init__
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
File "D:\Program Files\Python27\lib\subprocess.py", line 710, in __init__
errread, errwrite)
File "D:\Program Files\Python27\lib\subprocess.py", line 958, in _execute_child
startupinfo)
WindowsError: [Error 2] The system cannot find the file specified
I have looked for many solutions but the error still can't be fixed. How can I
fix it?
Answer: Notice that the error says `D:` instead of `C:`. Something in your shell setup
seems to be defaulting to the D drive. Is this script is being run from the C
drive or D? If it's the C drive, you could try using relative pathing instead
of absolute. If it's running from the D drive, can you run it from C? If it
must run from D, you'll have to dig into the genia tagger docs to figure out
how to do some custom configuration.
|
How to configure PyCharm to develop LibreOffice Python macros?
Question: I've installed Python 3.5.1 as default in Win7(x64) for all my projects in
Python.
I use PyCharm 5.0.5 community edition for develop Python scripts and its
default settings has "Default Project Interpreter" as `"3.5.1
(C:\Python35\python.exe)"`
At my work we are migrating from MS Office 2007/2010 to LibreOffice-5. I wrote
some macros in VBA, despite of I'm not a VB enthusiastic. Basic lacks good
data structures, such as lists (I love list comprehensions), dictionaries,
sets and tuples. So, I want to rewrite the VBA macros in LibreOffice-5 Python
script macros.
LibreOffice-5 installation has its own embebed Python at `"C:\Program Files
(x86)\LibreOffice 5\program"`, version 3.3.5. Scripts of Python in
LibreOffice-5 installation is at :
* Libre Office Macros; `"C:\Program Files (x86)\LibreOffice 5\share\Scripts\python"`
* My Macros; `"C:\Users\trimax\AppData\Roaming\LibreOffice\4\user\Scripts\python"`
The question is simple:
I need configure PyCharm settings to develop the python scripts of LibreOffice
macros with the embebed python version. I don't know if I need to set a
virtual environment or if I can just to set the Project Interpreter.
By the way, are there any method to insert macros in the document, to share it
with the document, as the VBA Project Modules?
Answer: From the [PyCharm
documentation](https://www.jetbrains.com/help/pycharm/2016.1/quick-start-
guide.html?origin=old_help#init), it sounds like you could use a virtual
environment to target LibreOffice (likely Python 3) and OpenOffice (likely
Python 2) in two different projects. Otherwise it looks like a local
interpreter is enough.
To test PyCharm, I did the following:
1. Download PyCharm and create a new project.
2. It asks which interpreter to use. Click on the gear icon and specify `Add Local`. Browse to `C:\Program Files (x86)\LibreOffice 5\program\python.exe`.
3. Create a new python file.
Then add this code:
import uno
from com.sun.star.awt import Point
p = Point(2,3)
print(p.X)
points = uno.Any("[]com.sun.star.awt.Point", (p,))
print(repr(points))
It underlined the `com` import statement, although it's not actually an error.
PyCharm did recognize the other statements such as `uno.Any`.
To run, go to `Run -> Run`. It ran successfully and printed results as
expected.
Instead of an IDE, I typically just use a text editor. From what I have seen,
a lot of the IDE tools (syntax highlighting, auto completion, debugging) do
not work very well with UNO anyway. It is better with Java, but that is a
different topic.
> By the way, are there any method to insert macros in the document, to share
> it with the document [...]?
To embed Python code into a document, unzip the .odt file and follow the
instructions
[here](https://wiki.openoffice.org/wiki/Python_as_a_macro_language#Embedded_within_an_OpenOffice.org.27s_document).
|
concatenating two multidimensional arrays in numpy
Question: I have two arrays `A` and `B`,
>> np.shape(A)
>> (7, 6, 2)
>> np.shape(B)
>> (6,2)
Now, I want to concatenate the two arrays such that `A` is extended to
`(8,6,2)` with `A[8] = B`
I tried `np.concatenate()`
>> np.concatenate((A,B),axis = 0)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-40-d614e94cfc50> in <module>()
----> 1 np.concatenate((A,B),axis = 0)
ValueError: all the input arrays must have same number of dimensions
and `np.vstack()`
>> np.vstack((A,B))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-41-7c091695f277> in <module>()
----> 1 np.vstack((A,B))
//anaconda/lib/python2.7/site-packages/numpy/core/shape_base.pyc in vstack(tup)
228
229 """
--> 230 return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
231
232 def hstack(tup):
ValueError: all the input arrays must have same number of dimensions
Answer: Likely the simplest way is to use numpy
[newaxis](http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#numpy.newaxis)
like this:
import numpy as np
A = np.zeros((7, 6, 2))
B = np.zeros((6,2))
C = np.concatenate((A,B[np.newaxis,:,:]),axis=0)
print(A.shape,B.shape,C.shape)
, which results in this:
(7, 6, 2) (6, 2) (8, 6, 2)
As @sascha mentioned you can use
[vstack](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.vstack.html)
(also see
[hstack](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.hstack.html#numpy.hstack),
[dstack](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.dstack.html#numpy.dstack))
to perform direct concatenation operations with an implicit axis (respectively
`axis = 0`, `axis = 1`, `axis =2`):
D = np.vstack((A,B[np.newaxis,:,:]))
print(D.shape)
, result:
(8, 6, 2)
|
nltk sentence tokenizer gives AttributeError
Question: I am very new to python and NLTK. One issue is baffling me:
When I do
tokenized = custom_sent_tokenizer.tokenize("some long text")
It gives me perfect result. But when I change this hard coded string to a
variable containing huge text, it gives me the error mentioned in subject viz:
tokenized = custom_sent_tokenizer.tokenize(text)
...
AttributeError: 'list' object has no attribute 'abbrev_types'
Below is my full code:
from __future__ import division
import urllib.request
import csv
import nltk
from string import punctuation
from nltk.corpus import stopwords
from nltk.tokenize import PunktSentenceTokenizer
comments = open("CNPS_Comments.txt").read()
comments_list = comments.split('\n')
custom_sent_tokenizer = PunktSentenceTokenizer(comments_list[:300])
##tokenized = custom_sent_tokenizer.tokenize("some long text")
text=""
for comment in comments_list:
text += comment
tokenized = custom_sent_tokenizer.tokenize(text)
def process_content():
try:
for i in tokenized[:5]:
words = nltk.word_tokenize(i)
tagged = nltk.pos_tag(words)
print(tagged)
except Exception as e:
print(str(e))
process_content()
I started with python today and there could be many things I am not doing
effectively here.
Answer: The line that's giving you trouble is correct: That's how you're supposed to
use the sentence tokenizer, with a single string as its argument. You're
getting an error because you have created a monster :-)
The Punkt sentence tokenizer is based on an unsupervised algorithm: You give
it a long text, and it figures out where the sentence boundaries must lie. But
you have trained your tokenizer with a _list_ of sentences (the first 300
elements in `comments_list`), which is incorrect. Somehow the tokenizer
doesn't notice, and gives you something that errors out when you try to use it
properly.
To fix the problem, train your tokenizer with a single string. You can best
join a list of strings into one like this:
tokenizer = PunktSentenceTokenizer(" ".join(comments_list[:300]))
PS. You must be wrong about it working successfully when you tokenized a
literal string. Certainly there were other differences between the code that
worked, and the code in your question.
|
Signing files/file objects using python and pyopenssl
Question: I have the following code which works perfectly for signing strings. However,
I now need to programatically sign and get a signature for a file in the same
way as I would using OpenSSL on the commandline
e.g. openssl dgst -sha1 –sign key.pem -out sig1 file.tar
.
import OpenSSL
from OpenSSL import crypto
import base64
key_file = open("key.pem", "r")
key = key_file.read()
key_file.close()
password = "password"
if key.startswith('-----BEGIN '):
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key, password)
else:
pkey = crypto.load_pkcs12(key, password).get_privatekey()
print pkey
data = "data"
sign = OpenSSL.crypto.sign(pkey, data, "sha256")
print sign
data_base64 = base64.b64encode(sign)
print data_base64
If open a file and try to sign:
with open('file.tar', 'r') as the_file:
sign = OpenSSL.crypto.sign(pkey, the_file, "sha256")
the_file.write(sign)
the_file.close()
OpenSSL throws an error:
sign = OpenSSL.crypto.sign(pkey, the_file, "sha256")
TypeError: must be string or read-only buffer, not file
How can sign the file object ?
Answer: The error states that you are passing an instance of file, when a string or
read-only buffer was expected. Try replacing the_file with the_file.read().
Side note: if you are attempting to encrypt and/or sign files, take a look at
[Cryptographic Message
Syntax](https://en.wikipedia.org/wiki/Cryptographic_Message_Syntax) (CMS)
which is supported by
[ctypescrypto](https://pypi.python.org/pypi/ctypescrypto). [This
article](http://www.cryptosys.net/pki/manpki/pki_signeddata.html) will
introduce the SignedData content type, which I think is what you are really
after.
|
Python client can not communicate with ruby server. I get [Errno 10061] No connection could be made because the target machine actively refused it
Question: I have the intention of running machine learning algorithms written in Python
on data in a database of a Ruby on Rails app. After some research I have
discovered sockets and therefore created a Ruby server and Python client. I am
running them both on two different command prompt terminals.
Here is the Ruby server code:
require "socket"
server = TCPServer.open(2000)
loop {
client = server.accept
client.puts(Time.now.ctime)
client.puts "Closing the connection. Bye!"
client.close
}
Here is the Python client code:
import socket
s = socket.socket()
host = "localhost"
port = 2000
s.connect((host , port))
I do not understand where the problem is. Kindly assist.
Answer: Given insightful answers to my question above the code Ruby server and Python
client should be as below.
For the Ruby server:
require "socket" # Get sockets from stdlib
server = TCPServer.open("127.0.0.1" , 2000) # Socket to listen on port 2000
loop { # Server runs forever
client = server.accept # Wait for a client to connect
client.puts(Time.now.ctime) # Send the time to the client
client.puts "Closing the connection. Bye!"
client.close # Disconnect from the client
}
For the Python client:
import socket # Import socket module
s = socket.socket() # Create a socket object
host = "127.0.0.1"
port = 2000 # Reserve a port for your service.
s.connect((host , port))
print s.recv(1024)
s.close() # Close the socket when done
The open() method of the TCPServer class in Ruby takes two parameters. The
first being the host name and the second the port i.e
TCPServer.open(hostname , port)
|
'TimeGenerator' object has no attribute 'iso' when self.iso is defined. Shorter version of GUI does not print
Question: this is my program so far.
from tkinter import *
import math
class TimeGenerator:
def __init__(self,master):
frame = Frame(master)
frame.grid()
label_iso = Label(root, text="Isotope A, Element")
label_vol = Label(root, text="Voltage")
label_range = Label(root, text="Charge Range")
entry_iso = Entry(root)
entry_vol = Entry(root)
entry_range = Entry(root)
label_iso.grid(row=0, sticky=E)
label_vol.grid(row=1, sticky=E)
label_range.grid(row=2, sticky=E)
entry_iso.grid(row=0, column=1)
entry_vol.grid(row=1, column=1)
entry_range.grid(row=2,column=1)
button = Button(root, text='Time Range')
button.grid(row=3, columnspan=2)
frame.bind(button,self.calculateTime())
self.iso = entry_iso.get()
self.vol = entry_vol.get()
self.r = entry_range.get()
def calculateTime(self):
x = 5
self.iso.replace(" ", "")
list = []
for e in self.iso.split(","):
list.append(e)
f = open("/Users/LazyLinh/PycharmProjects/mass.mas12.txt", "r")
i = 0
while (i < 40):
header = f.readline()
i += 1
for line in f:
line = line.strip()
columns = line.split()
if (list[0] == columns[5]):
if (list[1] == columns[6]):
self.mass = float(columns[13]) + float(columns[14])
self.r.replace(" ", "")
tup = tuple(int(x) for x in self.r.split(","))
list = []
for q in range(tup[0], tup[1] + 1):
y = x * math.sqrt(self.mass / (2 * q * float(self.vol)))
list.append(y)
i = tup[0]
for time in list:
print(i, ':', time)
i = i + 1
root = Tk()
b = TimeGenerator(root)
root.mainloop()
However, I got an error message saying iso attribute doesn't exist. Meanwhile,
the shorter version of the code (just to test things out) below:
from tkinter import *
class TimeGenerator:
def __init__(self, master):
frame = Frame(master)
frame.grid()
label_iso = Label(root, text="Isotope A, Element")
label_vol = Label(root, text="Voltage")
label_range = Label(root, text="Charge Range")
entry_iso = Entry(root)
entry_vol = Entry(root)
entry_range = Entry(root)
label_iso.grid(row=0, sticky=E)
label_vol.grid(row=1, sticky=E)
label_range.grid(row=2, sticky=E)
entry_iso.grid(row=0, column=1)
entry_vol.grid(row=1, column=1)
entry_range.grid(row=2, column=1)
self.iso = entry_iso.get()
self.vol = entry_vol.get()
self.r = entry_range.get()
button = Button(root, text='Time Range')
button.grid(row=3, columnspan=2)
frame.bind(button, self.calculateTime())
def calculateTime(self):
self.iso.replace(" ", "")
list = []
for e in self.iso.split(","):
list.append(e)
f = open("/Users/LazyLinh/PycharmProjects/mass.mas12.txt", "r")
i = 0
while i < 40:
header = f.readline()
i += 1
for line in f:
line = line.strip()
columns = line.split()
if (list[0] == columns[5]):
if (list[1] == columns[6]):
self.mass = float(columns[13]) + float(columns[14])
self.r.replace(" ", "")
self.r.replace("(", "")
self.r.replace(")", "")
print(self.r)
root = Tk()
b = TimeGenerator(root)
root.mainloop()
There is no 'no attribute' errors, meaning self.r does create the attribute
'r'. But still, nothing gets printed in the console, and I can't see why. Can
you please help me out?
P/S: I just started python a couple of days ago, so even if there's some very
obvious mistakes, they might not be obvious to me, so please be kind :)
Answer: This line is wrong:
frame.bind(button,self.calculateTime())
Instead, try:
frame.bind(button,self.calculateTime)
In the first instance, you invoke `calculateTime` and pass the resulting value
to `bind`. In the second instance, you pass a reference to the function itself
to `bind`.
|
Compare columns from different excel files and add a column at the beginning of each with the output
Question: I want to start this by saying that I'm not an Excel expert so I kindly need
some help.
Let's assume that I have 3 excel files: `main.xlsx`, `1.xlsx` and `2.xlsx`. In
all of them I have a column named `Serial Numbers`. I have to:
* lookup for all _serial numbers_ in `1.xlsx` and `2.xlsx` and verify if they are in `main.xlsx`.
If a serial number is find:
* on the last column of `main.xlsx`, on the same row with the _serial number_ that was find write `OK + name_of_the_file_in which_it_was_found`. Else, write `NOK`. At the same time, write in `1.xlsx` and `2.xlsx` `ok` or `nok` on the last column if the _serial number_ was found or not.
_Mention_ : `serial number` can be on different columns on `1.xlsx` and
`2.xlsx`
### Example:
**main.xlsx**
name date serial number phone status
a b abcd c <-- ok,2.xlsx
b c 1234 d <-- ok,1.xlsx
c d 3456 e <-- ok,1.xlsx
d e 4567 f <-- NOK
e f g <-- skip,don't write anything to status column
**1.xlsx**
name date serial number phone status
a b 1234 c <-- OK (because is find in main)
b c lala d <-- NOK (because not find in main)
c d 3456 e <-- OK (because find main)
d e jjjj f <-- NOK (because not find in main)
e f g <-- skip,don't write anything to status column
**2.xlsx**
name date serial number phone status
a b c <-- skip,don't write anything to status column
b c abcd d <-- OK (because find main)
c d 4533 e <-- NOK (because not find in main)
d e jjjj f <-- NOK (because not find in main)
e f g <-- skip,don't write anything to status column
Now, I tried doing this in Python, but apparently I couldn't figure how to
write to the status column (tried using `dataFrames`), on the same line where
the `serial number` is find. Any help would be much appreciated. (or at least
some guidance)
My problem it's not finding the duplicates, but rather keeping track of the
rows (to write the status on the correct `serial number`) and writing to the
excel at the specified column (`status` column).
**My try:**
import pandas as pd
get_main = pd.ExcelFile('main.xlsx')
get_1 = pd.ExcelFile('1.xlsx')
get_2 = pd.ExcelFile('2.xlsx')
sheet1_from_main = get_main.parse(0)
sheet1_from_1 = get_1.parse(0)
sheet1_from_2 = get_2.parse(0)
column_from_main = sheet1_from_main.iloc[:, 2].real
column_from_main_py = []
for x in column_from_main:
column_from_main_py.append(x)
column_from_1 = sheet1_from_1.iloc[:, 2].real
column_from_1_py = []
for y in column_from_1:
column_from_1_py.append(y)
column_from_2 = sheet1_from_2.iloc[:, 2].real
column_2_py = []
for z in column_from_2:
column_2_py.append(z)
* * *
Suggested edit:
import pandas as pd
get_main = pd.read_excel('main.xls', sheetname=0)
get_1 = pd.read_excel('1.xls', sheetname=0)
get_2 = pd.read_excel('2.xls', sheetname=0)
column_from_main = get_main.ix[:, 'Serial No.'].real
column_from_main_py = column_from_main.tolist()
column_from_1 = get_1.ix[:, 'SERIAL NUMBER'].real
column_from_1_py = column_from_1.tolist()
column_from_2 = get_2.ix[:, 'S/N'].real
column_from_2_py = column_from_2.tolist()
# Tried to put example data at specific column
df = pd.DataFrame({'Data': [10, 20, 30, 20, 15, 30, 45]})
writer = pd.ExcelWriter('first.xlsx', engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
workbook = writer.book
worksheet = writer.sheets['Sheet1']
worksheet.set_column('M:M', None, None)
writer.save()
Answer: First off you can skip using excelfile and parse by using
`pd.read_excel(filename, sheetname=0)`.
As far as you columns go, try accessing the columns by name, not by index. And
instead of using a for loop to create a list, use the tolist method. So
instead of `column_from_main = sheet1_from_main.iloc[:, 2].real` you could
say:
column_from_main = get_main.ix[:, 'serial number'].real
column_from_main_py = column_from_main.tolist()
Do the same for your other files as well. This will remove any issues with the
serial number column being indexed differently and will operate faster.
As to your comment about not being able to write to 'Status' properly, can you
show your code that you tried? I'd be more than happy to help, but it's nice
to see what you've done to this point.
For checking the values in main against the other two files you will want to
iterate over the lists you created and check if each value in the main list is
in either of the other lists. Within that loop you can then assign the value
of status based on whether the serial number in main is present in one, none,
or both:
get_main['status'] = ''
get_1['status'] = ''
get_2['status'] = ''
for num in column_from_main_py:
if num not in column_from_1_py and not in column_from_2_py:
get_main.loc[get_main['serial number'] == num, 'status'] = 'NOK'
elif num in column_from_1_py and not in column_from_2_py:
get_main.loc[get_main['serial number'] == num, 'status'] = 'OK,1.xlsx'
get_1.loc[get_1['serial number'] == num, 'status'] = 'OK'
elif num not in column_from_1_py and in column_from_2_py:
get_main.loc[get_main['serial number'] == num, 'status'] = 'OK,2.xlsx'
get_2.loc[get_2['serial number'] == num, 'status'] = 'OK'
The lines get_main.loc are where you are setting the OK or NOK value to the
status column. essentially it finds the index where some condition is true and
then lets you change the value of a specific column at that index. Once you
have gone through the main list then you can look through the lists for 1 and
2 to find serial numbers that aren't in main. Similarly:
for num in column_from_1_py:
if num not in column_from_main_py:
get_1.loc[get_1['serial number'] == num, 'status'] = 'NOK'
for num in column_from_2_py:
if num not in column_from_main_py:
get_2.loc[get_2['serial number'] == num, 'status'] = 'NOK'
That will set you NOK values and you should be good to go ahead and export the
dataframes to excel (or csv, hdf, sql, etc...) and that should do it.
There are lots of ways you can index and select data in pandas depending on
what you want to do. I recommend reading the [Indexing and Selecting
Data](http://pandas.pydata.org/pandas-
docs/version/0.17.0/indexing.html#indexing-and-selecting-data) page in the
docs as it has been a great reference for me.
|
How to send files discovered by glob to (default) printer
Question: I am trying to write a program that will take all the files with a specified
extension in a directory and print them consecutively. It would be great if
they could be printed more than once each time but one step at a time for now.
I plan to keep the .py file in the directory that I will be running the glob
on to avoid directory placeholder headaches...
I have tried the following:
import os
import glob
os.startfile((glob.glob('*.docx*')), "print")
This gives me the following message:
Traceback (most recent call last):
File "C:\Users\cmobley\Desktop\HI\print all in folder.py", line 11, in <module>
os.startfile((glob.glob('*.docx*')), "print")
TypeError: Can't convert 'list' object to str implicitly
I understand there is some sort of missing link here I just can't put my
finger on it. I have also tried other approaches with no success. I have
successfully printed individual documents before using the os.startfile with
the print argument. I am very much so a beginner so please go easy on me! I am
using python 3.5 so my glob module is updated.
Answer: glob.glob returns a list of strings, you can't os.startfile a list, you have
to do it for each string in the list. That explains `TypeError: Can't convert
'list' object to str implicitly`
import glob
import os
for filename in glob.glob('*.docx*'):
os.startfile(filename, "print")
Multiple prints:
import glob
import os
numPrints = 10
for filename in glob.glob('*.txt*'):
for i in range(numPrints):
print("Printing file", filename, "copy", i+1)
os.startfile(filename, "print")
|
Python Link Scraper
Question:
focus_Search = raw_input("Focus Search ")
url = "https://www.google.com/search?q="
res = requests.get(url + focus_Search)
print("You Just Searched")
res_String = res.text
#Now I must get ALL the sections of code that start with "<a href" and end with "/a>"
Im trying to scrape all the links from a google search webpage. I could
extract each link one at a time but I'm sure theres a better way to do it.
Answer: This creates a list of all links in the search page with some of your code,
without getting into BeautifulSoup
import requests
import lxml.html
focus_Search = input("Focus Search ")
url = "https://www.google.com/search?q="
#focus_Search
res = requests.get(url + focus_Search).content
# res
dom = lxml.html.fromstring(res)
links = [x for x in dom.xpath('//a/@href')] # Borrows from cheekybastard in link below
# http://stackoverflow.com/questions/1080411/retrieve-links-from-web-page-using-python-and-beautifulsoup
links
|
Python modules not installing
Question: I had some code that was running just fine, and over the course of the day, I
broke something, and now I cannot install any python modules. Specifically, I
need numpy, matplotlib, and pillow. I cannot install any of them.
But the weird part is that they both appear to install just fine:
$ sudo pip install numpy
Collecting numpy
Downloading numpy-1.11.0-cp27-cp27mu-manylinux1_x86_64.whl (15.3MB)
100% |████████████████████████████████| 15.3MB 94kB/s
Installing collected packages: numpy
Successfully installed numpy-1.11.0
Or when I try:
$ sudo apt-get install python-numpy
Reading package lists... Done
Building dependency tree
Reading state information... Done
Suggested packages:
python-nose python-numpy-dbg python-numpy-doc
The following NEW packages will be installed:
python-numpy
0 upgraded, 1 newly installed, 0 to remove and 20 not upgraded.
Need to get 0 B/1,763 kB of archives.
After this operation, 9,598 kB of additional disk space will be used.
Selecting previously unselected package python-numpy.
(Reading database ... 221259 files and directories currently installed.)
Preparing to unpack .../python-numpy_1%3a1.11.0-1ubuntu1_amd64.deb ...
Unpacking python-numpy (1:1.11.0-1ubuntu1) ...
Processing triggers for man-db (2.7.5-1) ...
Setting up python-numpy (1:1.11.0-1ubuntu1) ...
I am using python 2.7, and I am on Ubuntu 16.04.
$ python
Python 2.7.5 (default, May 12 2016, 13:11:58)
[GCC 5.3.1 20160413] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import numpy
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named numpy
>>>
It does this for any module I try and install. Any help would be greatly
appreciated.
Answer: This is a result of the default `python` and the default `pip` not matching
up. To ensure that packages are being installed to the correct location, use:
`python -m pip install $PACKAGE`
This results in running the `pip` assigned to the desired `python` rather than
the first one found in your path.
|
FB messenger API Too Many Responses
Question: I'm currently working on getting a chatbot app to work with the new FB
messenger API. I'm using Python's Flask to run a backend app, which is being
hosted on heroku. Right now, I'm having a bit of trouble getting my server to
send responses in a normal fashion. Here is the code I have written to handle
POST requests to the app:
import json
import os
import requests
from flask import Flask, request
app = Flask(__name__)
FB_MESSAGES_ENDPOINT = "https://graph.facebook.com/v2.6/me/messages"
FB_TOKEN = "OMITTED"
count = 0
@app.route('/', methods=["POST"])
def chatbot_response():
global count
req_data = request.data
data = json.loads(req_data)
req_args = request.args
print "Data: ", data
sender_id = data["entry"][0]["messaging"][0]["sender"]["id"]
send_back_to_fb = {
"recipient": {"id": sender_id}, "message": { "text": "sending it back"+str(count)}
}
count += 1
print "Send back to fb: ", send_back_to_fb
params_input = {"access_token": FB_TOKEN, "recipient": sender_id}
headers = {'content-type': 'application/json'}
# the big change: use another library to send an HTTP request back to FB
fb_response = requests.post(FB_MESSAGES_ENDPOINT, headers=headers, params=params_input, data=json.dumps(send_back_to_fb))
print "response status code: ", fb_response.status_code, " ", fb_response.text
# handle the response to the subrequest you made
if not fb_response.ok:
# log some useful info for yourself, for debugging
print 'jeepers. %s: %s' % (fb_response.status_code, fb_response.text)
print "whoa there buddy"
# always return 200 to Facebook's original POST request so they know you
# handled their request
return "Ok", 200
if __name__ == '__main__':
app.run(host="0.0.0.0")
Right now when I messenge my app, I get a continuous stream of responses of
the form:
sending it back0
sending it back1
sending it back0
sending it back2
sending it back1
sending it back3
sending it back4
sending it back5
sending it back2
sending it back6
sending it back7
sending it back8
sending it back9
sending it back3
sending it back4
sending it back10
sending it back11
sending it back12
sending it back5
sending it back6
sending it back7
sending it back8
sending it back13
Why does my app keep sending responses to the user that messenges it when
prompted only response? I figure this is because FB keeps interpreting POST
requests, but I'm not quite sure I follow why POST requests continue to be
sent by FB to my server if I only messenge the app once?
This is part of the heroku logs:
Data: {u'object': u'page', u'entry': [{u'time': 1463097986863, u'id': 267701720229635, u'messaging': [{u'sender': {u'id': 1022501574495987}, u'recipient': {u'id': 267701720229635}, u'message': {u'seq': 334, u'mid': u'mid.1463097986829:5267967865d8ca4230', u'text': u'alright break'}, u'timestamp': 1463097986837}]}]}
2016-05-13T00:06:27.342096+00:00 app[web.1]: Send back to fb: {'recipient': {'id': 1022501574495987}, 'message': {'text': 'sending it back0'}}
2016-05-13T00:06:28.034903+00:00 app[web.1]: response status code: 200 {"recipient_id":"1022501574495987","message_id":"mid.1463097987637:2dec6b0062f98e1832"}
2016-05-13T00:06:28.034916+00:00 app[web.1]: whoa there buddy
2016-05-13T00:06:28.087649+00:00 heroku[router]: at=info method=POST path="/" host=gentle-plateau-81579.herokuapp.com request_id=09b6fdf9-9d4a-4620-b522-f91682e20469 fwd="31.13.110.121" dyno=web.1 connect=1ms service=703ms status=200 bytes=161
2016-05-13T00:06:28.713916+00:00 app[web.1]: Data: {u'object': u'page', u'entry': [{u'time': 1463097988125, u'id': 267701720229635, u'messaging': [{u'sender': {u'id': 1022501574495987}, u'recipient': {u'id': 267701720229635}, u'delivery': {u'watermark': 1463097987675, u'seq': 336, u'mids': [u'mid.1463097987637:2dec6b0062f98e1832']}}]}]}
2016-05-13T00:06:28.714027+00:00 app[web.1]: Send back to fb: {'recipient': {'id': 1022501574495987}, 'message': {'text': 'sending it back1'}}
2016-05-13T00:06:29.321337+00:00 heroku[router]: at=info method=POST path="/" host=gentle-plateau-81579.herokuapp.com request_id=bebdf9ab-4bc5-416c-b7f0-1f5efd0b5351 fwd="31.13.102.98" dyno=web.1 connect=1ms service=608ms status=200 bytes=161
As I'm a bit of webdev newbie, any help would be greatly appreciated!
Answer: When you set up your app in the FB developer console, you had various event
types you could subscribe to when defining the Messenger webhook.
It looks like in your logging the 2nd event received from FB is likely the
messages_deliveries event and your code currently isn't differentiating
between these different event types and hence the stream of responses.
For initial testing you could try just subscribing to the messages event type
and then add in the others as required I.e. postback is probably the one you
will use the most in a structured chat bot design.
Hope this helps \- cam
|
Seaborn boxplot box assign custom edge colors from Python list
Question: I am trying to change the appearance of boxes in Seaborn's boxplot. I would
like all boxes to be transparent and for the box borders to be specified from
a list. Here is the code I am working with:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
df = pd.DataFrame(np.random.rand(10,4),columns=list('ABCD'))
df['E'] = [1,2,3,1,1,4,3,2,3,1]
sns.boxplot(x=df['E'],y=df['C'])
# Plotting the legend outside the plot (above)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
handles, labels = ax.get_legend_handles_labels()
leg = plt.legend(handles[0:2], labels[0:2],
loc='upper center', bbox_to_anchor=(0.5, 1.10), ncol=2)
plt.show()
This [post](http://stackoverflow.com/questions/36305695/assign-a-color-to-a-
specific-box-in-seaborn-boxplot) shows how to change the color and box
edgecolor of one single box. However, I would like to assign box edgecolors
based on a list like this `box_line_col = ['r','g',b','purple']`. The above
code produces 4 boxes in the plot - I would like to assign custom box edge
colors starting from the first (leftmost) box and continuing through to the
last (rightmost) box.
Is it possible to to specify the box edge colors from a list, while keeping
the boxes themselves transparent (facecolor = white)?
Answer: Looping through the boxes and setting their colors should work. At the end of
your code, just before `plt.show()` add:
box_line_col = ['r','g','b','purple']
for i,box_col in enumerate(box_line_col):
mybox = g.artists[i]
mybox.set_edgecolor(box_col)
mybox.set_fccecolor(None) #or white, if that's what you want
# If you want the whiskers etc to match, each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
# Loop over them here, and use the same colour as above
for j in range(i*6,i*6+6):
line = g.lines[j]
line.set_color(box_col)
line.set_mfc(box_col)
line.set_mec(box_col)
plt.show()
The first part is based on the post you referenced and and the whisker-
coloring directions came from [this
post](http://stackoverflow.com/a/36893152/721432 "whisker colors").
|
Sending email via gmail & python
Question: What is the recommended way of sending an email using gmail and python? There
are a lot of SO threads, but most of them are old and it seems smtp with
username & password is not working any more or the user has to downgrade the
security of their gmail (for example see
[here](http://stackoverflow.com/a/29354018/3769451)). Is OAuth the recommended
way? Or maybe there are some python libraries already there that work with
gmail (possibly with OAuth)?
It would be great if one can suggest/point to a working sample code. Also I
would like not to downgrade gmail security settings if at all possible.
Answer: With gmail API and OAuth there is no need to save the username and password in
the script. The first time the script opens a browser to authorize the script
and will store credentials locally (it will not store username and password).
So consequent runs won't need the browser and can send emails straight. With
this method you will not get errors like SMTPException below and there is no
need to allow Access for less secure apps:
raise SMTPException("SMTP AUTH extension not supported by server.")
smtplib.SMTPException: SMTP AUTH extension not supported by server.
**Here are the steps to send email using gmail API:**
[](http://i.stack.imgur.com/ICIXt.png)
(Wizard link [here](https://console.developers.google.com/start/api?id=gmail),
More info [here](https://developers.google.com/gmail/api/quickstart/python))
Step 2: Install the Google Client Library
pip install --upgrade google-api-python-client
Step 3: Use the following script to send email(just change the variables in
main function)
import httplib2
import os
import oauth2client
from oauth2client import client, tools
import base64
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from apiclient import errors, discovery
SCOPES = 'https://www.googleapis.com/auth/gmail.send'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Gmail API Python Send Email'
def get_credentials():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-email-send.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store)
print 'Storing credentials to ' + credential_path
return credentials
def SendMessage(sender, to, subject, msgHtml, msgPlain):
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
message1 = CreateMessage(sender, to, subject, msgHtml, msgPlain)
SendMessageInternal(service, "me", message1)
def SendMessageInternal(service, user_id, message):
try:
message = (service.users().messages().send(userId=user_id, body=message).execute())
print 'Message Id: %s' % message['id']
return message
except errors.HttpError, error:
print 'An error occurred: %s' % error
def CreateMessage(sender, to, subject, msgHtml, msgPlain):
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = to
msg.attach(MIMEText(msgPlain, 'plain'))
msg.attach(MIMEText(msgHtml, 'html'))
return {'raw': base64.urlsafe_b64encode(msg.as_string())}
def main():
to = "[email protected]"
sender = "[email protected]"
subject = "subject"
msgHtml = "Hi<br/>Html Email"
msgPlain = "Hi\nPlain Email"
SendMessage(sender, to, subject, msgHtml, msgPlain)
if __name__ == '__main__':
main()
|
Change caller's current working directory in Python
Question: I'm writing a simple script which ideally will help me conveniently change
directories around my system.
The details of the implementation don't matter, but let's say ideally I will
place this script in `/usr/bin` and call it with an argument denoting where I
want to go to on the system: `goto project1`
I would expect that when the script exits, my terminal's current working would
have changed to that of Project 1.
In order to accomplish this, I tried:
os.chdir('/')
subprocess.call('cd /', shell=True)
Neither of which work. The first changes the working directory in Python and
the second spawns a shell at `/`.
Then I realized how naive I was being. When a program is run, the terminal is
just forking a process, while reading from stdout, which my program is writing
to. Whatever it does, it wouldn't affect the state of terminal.
But then I thought "I've been using `cd` for years, surely someone wrote code
for that", thinking there might be something to go off of (system call or
something?).
But `cd` is not even
[coreutils](http://git.savannah.gnu.org/gitweb/?p=coreutils.git;a=tree;f=src;h=7c41b3fb7c90ccb1f66a7fb4bcdae16fcbd8fae5;hb=HEAD).
Instead, the source of `cd` is this:
builtin `echo ${0##*/} | tr \[:upper:] \[:lower:]` ${1+"$@"}
So, a couple of questions come to mind:
* What's actually going on behind the scenes when a user calls `cd`? (Meaning, how is the terminal and the system actually interacting?)
* Is it possible to have something like a Python script alter the terminal location?
Thanks so much for your help!
Answer: You could do it as a pair of scripts:
**directory.py**
#!/usr/bin/python
import sys
directory = sys.argv[1]
# do something interesting to manipulate directory...
print directory + "tmp"
**directory.csh**
#!/bin/csh -f
cd `python directory.py $1`
**Result:**
> pwd
/Users/cdl
> source directory.csh "/"
> pwd
/tmp
>
Substitute your favorite shell and variant of Python as desired. Turn on
execute for the Python script to simplify further.
Clearly the shell is changing the directory but Python can do all the clever
logic you want to figure out where to send the shell.
|
Get json payload then strip the html with python
Question: I have a json values that I need stripped of all html tags.
**After using the following function:**
def payloaded():
from urllib.request import urlopen
with urlopen("www.example.com/payload.json") as r:
data = json.loads(r.read().decode(r.headers.get_content_charset("utf-8")))
text = (data["body"]["und"][0]["value"])
return(text)
**This is the returned (text):**
<div class="blah">'<p>This is the text.</p>\r\n'
**This is the original (text):**
<div class="blah"><p>This is the text.</p>
Note: I need all html tags stripped, and there is no real guidelines of what
the tags I will be getting.
**This is what I want the (text) to be:**
This is the text.
**This is the post function I am using:**
def add_node_basic(text)
url = "www.example.com"
headers = {"content-type": "application/json"}
payload = {
"auth_token": x,
"docs":
{
"id": y,
"fields": [
{"name": "body", "value": text, "type": "text"},
]}
}
r = requests.post(url, data=json.dumps(payload), headers=headers)
Any suggestions on how to achieve this is much appreciated!
Answer: You can try _slicing_ the string along with `find` method, like this:
>>> print text[text.find('<p>'):text.find('</p>')].strip('<p>')
This is the text.
If you are trying to extract text only from the HTML source, then you can use
[HTMLParser](https://docs.python.org/2/library/htmlparser.html) library in
Python. Example:
from HTMLParser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
|
Opencv - Grayscale mode Vs gray color convertion
Question: I am working in opencv(2.4.11) python(2.7) and was playing around with gray
images. I found an unusual behavior when loading image in gray scale mode and
converting image from BGR to GRAY. Following is my experimental code:
import cv2
path = 'some/path/to/color/image.jpg'
# Load color image (BGR) and convert to gray
img = cv2.imread(path)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Load in grayscale mode
img_gray_mode = cv2.imread(path, 0)
# diff = img_gray_mode - img_gray
diff = cv2.bitwise_xor(img_gray,img_gray_mode)
cv2.imshow('diff', diff)
cv2.waitKey()
When I viewed the difference image, I can see the left out pixels instead of
jet black image. Can you suggest any reason? What is the correct way of
working with gray images.
_P.S. When I use both the images in SIFT, keypoints are different which may
lead to different outcome specially when working with bad quality images._
Answer: **Note: This is not a duplicate** , because the OP is aware that the image
from `cv2.imread` is in BGR format (unlike the suggested duplicate question
that assumed it was RGB hence the provided answers only address that issue)
To illustrate, I've opened up this same color JPEG image:
[](http://i.stack.imgur.com/mzjlI.png)
once using the conversion
img = cv2.imread(path)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
and another by loading it in gray scale mode
img_gray_mode = cv2.imread(path, vs2.IMREAD_GRAYSCALE)
Like you've documented, the diff between the two images is not perfectly 0, I
can see diff pixels in towards the left and the bottom
[](http://i.stack.imgur.com/d1uh1.png)
I've summed up the diff too to see
import numpy as np
np.sum(diff)
# I got 6143, on a 494 x 750 image
**I tried all`cv2.imread()` modes**
Among all the `IMREAD_` modes for `cv2.imread()`, only `IMREAD_COLOR` and
`IMREAD_ANYCOLOR` can be converted using `COLOR_BGR2GRAY`, and both of them
gave me the same diff against the image opened in `IMREAD_GRAYSCALE`
The difference doesn't seem that big. My guess is comes from the differences
in the numeric calculations in the two methods (loading grayscale vs
conversion to grayscale)
Naturally what you want to avoid is fine tuning your code on a particular
version of the image just to find out it was suboptimal for images coming from
a different source.
**In brief, let's not mix the versions and types in the processing pipeline.**
So I'd keep the image sources homogenous, e.g. if you have capturing the image
from a video camera in BGR, then I'd use BGR as the source, and do the BGR to
grayscale conversion `cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)`
Vice versa if my ultimate source is grayscale then I'd open the files and the
video capture in gray scale `cv2.imread(path, cv2.IMREAD_GRAYSCALE)`
|
Correlation matrix with huge dataset - Python
Question: In python, when I have a dataset X, whose rows are the different elements of
the sample and the columns are different feature of the sample, I usually
calculate the correlation matrix as follows (assuming zero mean):
import numpy as np
np.transpose(X).dot(X)/row
Now I have a dataset X that has 10 milion rows and 1 milion of columns, and if
I try to calculate that correlation matrix, the computer remains blocked. The
alternative of performing a for-loop seems to me impracticable, because is
very slow also with smaller datasets.
How shoul I manage such amount of data?
Answer: Maybe you could use a generator? I don't know if it would be acceptably fast
but it would be better memory-wise so that the computer won't block. Something
like this <https://gist.github.com/thatdatabaseguy/2577076>
Paraphrasing the example:
import numpy as np
def my_gen(some_tuple):
for each in some_tuple:
yield each
X = np.fromiter(my_gen(database.get_data())) # get_data() would return a cursor
**Edit** : This is in the case you cannot find another way of reducing/
transforming/ splitting your data. Either way it is going to be difficult to
handle on a single computer. Good luck!
|
Python ldap3 LDAPSocketOpenError unable to send message, socket is not open
Question: I am coding in Python, ldap3 trying to create a user in OpenLDAP on CentOS. My
local development machine is running Windows 8 64bit. Below is my code.
from ldap3 import Server, Connection, ALL
# define the server
s = Server('ldap://ldap.abcd.com:389', get_info=ALL)
# define the connection
c = Connection(s, user='cn=Manager,dc=mydomain,dc=com', password='Super1')
# perform the Add operation
c.add('uid=user3,ou=People,dc=abcd,dc=com',['person','organizationalPerson','inetOrgPerson', 'posixGroup', 'top','shadowAccount'], {'givenName': 'user3firstname','sn': 'user3lastname', 'uidNumber' : 520,'gidNumber' : 521,'uid': 'user3','cn': 'user3user3lastname'})
# close the connection
c.unbind()
The Server and Connection class is working ok. i guess because if i run only
that 2 statements, it didn't generate an error below.
LDAPSocketOpenError at /adminservice/users/
unable to send message, socket is not open
Request Method: GET
Request URL: http://127.0.0.1:8000/adminservice/users/
Django Version: 1.8.4
Exception Type: LDAPSocketOpenError
Exception Value:
unable to send message, socket is not open
Exception Location: C:\Python35\lib\site-packages\ldap3\strategy\base.py in send, line 299
Python Executable: C:\Python35\python.exe
Python Version: 3.5.1
Python Path:
['D:\\sourcecode\\idp',
'D:\\sourcecode\\utils-drf-public-api',
'C:\\Python26',
'C:\\Python35\\python35.zip',
'C:\\Python35\\DLLs',
'C:\\Python35\\lib',
'C:\\Python35',
'C:\\Python35\\lib\\site-packages']
Server time: Fri, 13 May 2016 17:02:08 +0700
[](http://i.stack.imgur.com/sksQc.png)
Answer: You must bind the connection before adding the user. Try c.bind() before the
add statement.
|
Python: Data analysis using FFT
Question: I have a [data](https://drive.google.com/open?id=0B7um2WC4G6FFazF1eXJHRVNqVFU)
which looks like this:
YYYY-MO-DD HH-MI-SS_SSS, ATMOSPHERIC PRESSURE (hPa) mean, ATMOSPHERIC PRESSURE (hPa) std
2016-04-20 00:00:00,1006.0515000000001,0.029159119281803602
2016-04-20 00:01:00,1006.039666666667,0.03565211699642609
2016-04-20 00:02:00,1006.0148333333334,0.036891580347842706
2016-04-20 00:03:00,1006.0058333333335,0.03351152934243721
2016-04-20 00:04:00,1005.9714999999999,0.03155973620213212
2016-04-20 00:05:00,1005.955666666667,0.027207094455343653
.............
I'm interested in the Pressure mean which is sampled every minute. My goal is
to look for periodic frequencies inside the data.
I've tried the following:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft
df3 = pd.read_csv('Pressure - Dates by Minute.csv', sep=",", skiprows=0)
Pressure = df3['ATMOSPHERIC PRESSURE (hPa) mean']
frate = 1/60
Pfft = np.fft.fft(Pressure[0])
freqs = fft.fftfreq(len(Pfft), 1/frate)
But i'm getting "tuple index out of range" errors
Any ideas on how to analyze the fft and plot the matching frequencies against
the raw data?
The raw data looks like this:
[](http://i.stack.imgur.com/u9r0c.png)
Thanks!
Answer: You are retrieving only the first element of `Pressure` but you should do the
fourier analysis on all samples. If you replace
Pfft = np.fft.fft(Pressure[0])
with
Pfft = np.fft.fft(Pressure)
it works:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df3 = pd.read_csv('Pressure - Dates by Minute.csv', sep=",", skiprows=0)
Pressure = df3['ATMOSPHERIC PRESSURE (hPa) mean']
frate = 1. / 60
Pfft = np.fft.fft(Pressure)
Pfft[0] = 0 # Set huge DC component to zero, equates to Pressure = Pressure - numpy.mean(Pressure)
freqs = np.fft.fftfreq(len(Pfft), 1. / frate)
plt.plot(freqs, Pfft)
plt.show()
|
Iterating over range(0,len(temp),-1) doesn't work
Question: I have the following code which does not enter the loop and I can't understand
why.
python file.py 1111100000000
Code:
import argparse
p = argparse.ArgumentParser()
p.add_argument("bits", help = "array_of_bits")
args = p.parse_args()
bits = args.bits
temp = []
for i in bits:
temp.append([int(i)])
print (temp)
fin = []
j = 0
for i in range(0,len(temp),(-1)):
if ( (temp[i] == 0) ):
fin.extend(temp[j].append(temp[i]))
if ( len(temp[i]) != 1 ):
fin.extend(temp[j].append(temp[i]))
j = j + 1
print (fin)
What I wanted to get is:
[[1], [1], [1], [1], [1], [0], [0], [0], [0], [0], [0], [0], [0]]
[[10010], [10010], [100]
But instead I get this:
[[1], [1], [1], [1], [1], [0], [0], [0], [0], [0], [0], [0], [0]]
[[]]
Answer: The problem is the `range` you're iterating over in your `for` loop:
for i in range(0,len(temp),(-1)):
`range(0,len(temp),(-1))` is an empty range, therefore the loop gets never
executed.
Your range would start with 0 and produce items smaller than `len(temp)`, but
in steps of `-1` i.e. counting downwards from 0. This way it could never ever
reach its target value, because `len` is always positive.
|
How to generate random sequence of numbers in python?
Question: How to generate random sequence of 1, 2, 3 provided that 80 % of numbers will
1, 15 % will 2 and 5 % will 3?
Answer: Use `random` to get a random number in [0,1) and map your outputs to that
interval.
from random import random
result = []
# Return 100 results (for instance)
for i in range(100):
res = random()
if res < 0.8:
result.append(1)
elif res < 0.95:
result.append(2)
else:
result.append(3)
return result
This is a trivial solution. You may want to write a more elegant one that
allows you to specify the probabilities for each number in a dedicated
structure (list, dict,...) rather than in a if/else statement.
But then you might be better off using a dedicated library, as suggested in
[this answer](http://stackoverflow.com/a/4266645/4653485). Here's an example
with scipy's
[`stats.rv_discrete`](http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.html)
from scipy import stats
xk = np.arange(1, 4)
pk = (0.8, 0.15, 0.05)
custm = stats.rv_discrete(name='custm', values=(xk, pk))
# Return 100 results (for instance)
return custm.rvs(size=100)
|
No module named 'json' after installing simplejson
Question: I am working in Ubuntu 14.04 and I have multiple versions of Python on my
machine (they include python2.7 and python3.4). Few days back, I installed
`simplejson` on my system. I don't remember how I did that but I guess it must
be similar to `pip install simplejson`. However, now a strange problem has
started appearing when I try installing any python package. For example, just
now I tried installing `Tkinter` using `sudo pip3.4 install Tkinter` and it
throws the following error:
Traceback (most recent call last):
File "/usr/local/bin/pip3.4", line 9, in <module>
load_entry_point('pip==1.5.4', 'console_scripts', 'pip3.4')()
File "/usr/lib/python3/dist-packages/pkg_resources.py", line 351, in load_entry_point
return get_distribution(dist).load_entry_point(group, name)
File "/usr/lib/python3/dist-packages/pkg_resources.py", line 2363, in load_entry_point
return ep.load()
File "/usr/lib/python3/dist-packages/pkg_resources.py", line 2088, in load
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
File "/usr/lib/python3/dist-packages/pip/__init__.py", line 61, in <module>
from pip.vcs import git, mercurial, subversion, bazaar # noqa
File "/usr/lib/python3/dist-packages/pip/vcs/subversion.py", line 4, in <module>
from pip.index import Link
File "/usr/lib/python3/dist-packages/pip/index.py", line 15, in <module>
from pip.wheel import Wheel, wheel_ext
File "/usr/lib/python3/dist-packages/pip/wheel.py", line 25, in <module>
from distlib.scripts import ScriptMaker
File "/usr/share/python-wheels/distlib-0.1.8-py2.py3-none-any.whl/distlib/scripts.py", line 15, in <module>
File "/usr/share/python-wheels/distlib-0.1.8-py2.py3-none-any.whl/distlib/resources.py", line 20, in <module>
File "/usr/share/python-wheels/distlib-0.1.8-py2.py3-none-any.whl/distlib/util.py", line 11, in <module>
ImportError: No module named 'json'
Sometimes I can fix this if the error tells me that in one of the files I
have:
import json
which I simply convert to
import simplejson as json
I tried uninstalling simplejson:
sudo pip uninstall simplejson
but it gives me the same error: json not found.
Can anybody please help me fix this so that I would happily be able to install
python packages? Thanks in advance.
Answer: Note: I do not have a definitive answer but will offer a series of steps you
can try:
The first thing is see if you can import json from the usual python
interpreter:
import json
print(json.__file__) #this would be important to know if it works
If that does work (as well as commenting what `json.__file__` is) you would
then want to try to [use pip from the
interpreter.](http://stackoverflow.com/questions/12332975/installing-python-
module-within-code)
## If you can't import json normally:
This is not surprising, I did not expect pip to be looking in a non-standard
place for modules. You will want to figure out where the json package _should_
be located on your computer, you can do this by importing another module from
the standard library and looking at it's `__file__`:
>>> import fractions
>>> fractions.__file__
'/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/fractions.py'
This will obviously be different for you but I'd expect there to be a `json`
folder in the same folder as `fractions.py`
## if you can't import `fractions` or `queue` or `datetime` etc.
If you can't import anything from the standard library you will probably want
to just reinstall python.
## If the json folder is there and contains an `__init__.py`
Use the rename function of your file browser to make sure there are no weird
special characters, but other then that I'm not sure, if you can import
`fractions.py` but not a package from the same folder that would imply there
is something very wrong with the import mechanics of your python version.
## If the `json` folder is not with the rest of the standard library
It is possible that your python distribution has a different structure then
I'd expect, it at least can't hurt to take a look for it.
You can search for the json folder amongst your various python files [using
the `find` command](http://superuser.com/questions/327762/how-to-find-a-
directory-on-linux), not really sure how it works but just another thing to
try. If you do find it with the `__init__.py`, `encode.py`, `decode.py`,
`scanner.py`, and `tool.py` (at least those are the ones in my version) you'll
probably want to figure out how it got there, but maybe just move it to the
same folder as the rest of the standard library.
## If you can't find the `json` package or you find it and it is corrupted
Well then you will need to replace it! Don't worry, this isn't too hard, just
grab a [source release of python from the
site](https://www.python.org/downloads/source/) and extract the `json` package
from it, once it is uncompressed the `json` folder should be in the `Lib`
folder. Simply copy/move it to the rest of the standard library and you should
be good to go!
* * *
I hope this helps you debug what is going on, This covers all the scenarios I
could imagine happening and I would be interested in which one fixed your
issue (or what you were able to figure out so I can come up with more options)
|
output multiple files based on column value python pandas
Question: i have a sample pandas data frame:
import pandas as pd
df = {'ID': [73, 68,1,94,42,22, 28,70,47, 46,17, 19, 56, 33 ],
'CloneID': [1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4 ],
'VGene': ['64D', '64D', '64D', 61, 61, 61, 311, 311, 311, 311, 311, 311, 311, 311]}
df = pd.DataFrame(df)
it looks like this:
df
Out[7]:
CloneID ID VGene
0 1 73 64D
1 1 68 64D
2 1 1 64D
3 1 94 61
4 1 42 61
5 2 22 61
6 2 28 311
7 3 70 311
8 3 47 311
9 3 46 311
10 4 17 311
11 4 19 311
12 4 56 311
13 4 33 311
i want to write a simple script to output each cloneID to a different output
file. so in this case there would be 4 different files. the first file would
be named 'CloneID1.txt' and it would look like this:
CloneID ID VGene
1 73 64D
1 68 64D
1 1 64D
1 94 61
1 42 61
second file would be named 'CloneID2.txt':
CloneID ID VGene
2 22 61
2 28 311
third file would be named 'CloneID3.txt':
CloneID ID VGene
3 70 311
3 47 311
3 46 311
and last file would be 'CloneID4.txt':
CloneID ID VGene
4 17 311
4 19 311
4 56 311
4 33 311
the code i found online was:
import pandas as pd
data = pd.read_excel('data.xlsx')
for group_name, data in data.groupby('CloneID'):
with open('results.csv', 'a') as f:
data.to_csv(f)
but it outputs everything to one file instead of multiple files.
Answer: You can do something like the following:
In [19]:
gp = df.groupby('CloneID')
for g in gp.groups:
print('CloneID' + str(g) + '.txt')
print(gp.get_group(g).to_csv())
CloneID1.txt
,CloneID,ID,VGene
0,1,73,64D
1,1,68,64D
2,1,1,64D
3,1,94,61
4,1,42,61
CloneID2.txt
,CloneID,ID,VGene
5,2,22,61
6,2,28,311
CloneID3.txt
,CloneID,ID,VGene
7,3,70,311
8,3,47,311
9,3,46,311
CloneID4.txt
,CloneID,ID,VGene
10,4,17,311
11,4,19,311
12,4,56,311
13,4,33,311
So here we iterate over the groups in `for g in gp.groups:` and we use this to
create the result file path name and call `to_csv` on the group so the
following should work for you:
gp = df.groupby('CloneID')
for g in gp.groups:
path = 'CloneID' + str(g) + '.txt'
gp.get_group(g).to_csv(path)
Actually the following would be even simpler:
gp = df.groupby('CloneID')
gp.apply(lambda x: x.to_csv('CloneID' + str(x.name) + '.txt'))
|
How can I specify a python version using setuptools?
Question: Is there a way to specify a python version to be used with a python package
defined in setup.py?
My setup.py currently looks like this:
from distutils.core import setup
setup(
name = 'macroetym',
packages = ['macroetym'], # this must be the same as the name above
version = '0.1',
description = 'A tool for macro-etymological textual analysis.',
author = 'Jonathan Reeve',
author_email = '[email protected]',
url = 'https://github.com/JonathanReeve/macro-etym',
download_url = 'https://github.com/JonathanReeve/macro-etym/tarball/0.1', # FIXME: make a git tag and confirm that this link works
install_requires = ['Click', 'nltk', 'pycountry', 'pandas',
'matplotlib'],
include_package_data = True,
package_data = {'macroetym': ['etymwm-smaller.tsv']},
keywords = ['nlp', 'text-analysis', 'etymology'],
classifiers = [],
entry_points='''
[console_scripts]
macroetym = macroetym.main:cli
''',
)
It's a command-line program. My script runs using Python 3, but a lot of
operating systems still have Python 2 as the default. How can I specify a
python version to be used here? I can't seem to find anything in [the
docs](https://pythonhosted.org/setuptools/setuptools.html#declaring-
dependencies), but maybe I'm not looking in the right place?
Answer: You can't force the usage of a Python 3 interpreter but you can raise an error
or warning using `sys.version` or `platform.python_version()`
import sys
print(sys.version)
print(sys.version_info)
print(sys.version_info.major) # Returns 3 for Python 3
Or:
import platform
print(platform.python_version())
|
Run heredoc formated script on background?
Question: I would like to know if there are any ways to run the script in heredoc format
on the background?
#!/bin/bash
python <<EOF
import time
time.sleep(2)
print "AAA"
EOF # placing "&" here doesn't work
echo "Hello"
I know that I can write it to a file and then run it like `python foo.py &`
but that's not the point of the question.
Answer: Just stick your `&` at the top, which is totally not intuitive, but... neither
are heredocs.
#!/bin/bash
python <<EOF &
import time
time.sleep(2)
print "AAA"
EOF
echo "Hello"
You can also redirect up there too:
#!/bin/bash
python <<EOF > /some/file &
import time
time.sleep(2)
print "AAA"
EOF
echo "Hello"
|
how to change relative import search path
Question: I'm trying to create an auto_import function which is part of a library: the
purpose of this to avoid listing `from .x import y` many times in `__init__`
files, only do something this `import lib; lib.auto_import(__file__)` <\- this
would search for python files in that folder where the `__init__` is present
and would import all stuff by exec statement (i.e. `exec('from .x import
abc')`).
My problem is that, somehow the 'from' statement always tries to import .x
from lib directory, even if I change the cwd to the directory where the actual
`__init__` file is placed... How should I solve this? How should I change the
search dir for `from .` statement?
Structure:
$ ls -R
.:
app.py lib x
./lib:
__init__.py auto_import.py
./x:
__init__.py y
./x/y:
__init__.py y.py
e.g.: `./x/y/__init__.py` contains `import lib; lib.auto_import(__file__)`
auto_import is checking for files in dir of `__file__` and import them with
`exec('from .{} import *')` (but this from . is always the lib folder and not
the dir of `__file__`, and that is my question, how to change this to dir of
`__file__` Of course the whole stuff is imported in app.py like:
import x
print(x.y)
Thanks
EDIT1: final auto_import (globals() / gns cannot be avoided )
import os, sys, inspect
def auto_import(gns):
current_frame = inspect.currentframe()
caller_frame = inspect.getouterframes(current_frame)[1]
src_file = caller_frame[1]
for item in os.listdir(os.path.dirname(src_file)):
item = item.split('.py')[0]
if item in ['__init__', '__pycache__']:
continue
gns.update(__import__(item, gns, locals(), ['*'], 1).__dict__)
Answer: The problem of your approach is that `auto_import` is defined in
`lib/auto_import.py` so the context for `exec('from .x import *')` is always
`lib/`. Even though you manage to fix the path problem,
`lib.auto_import(__file__)` will not import anything to the namespace of
`lib.x.y`, because the function locates in another module.
## Use the built-in function `__import__`
Here is the auto_import script:
_myimporter.py_
# myimporter.py
def __import_siblings__(gns, lns={}):
for name in find_sibling_names(gns['__file__']):
gns.update((k,v) for k,v in __import__(name, gns,lns).__dict__.items() if not k.startswith('_'))
import re,os
def find_sibling_names(filename):
pyfp = re.compile(r'([a-zA-Z]\w*)\.py$')
files = (pyfp.match(f) for f in os.listdir(os.path.dirname(filename)))
return set(f.group(1) for f in files if f)
Inside your `lib/x/y/__init__.py`
#lib/x/y/__init__.py
from myimporter import __import_siblings__
__import_siblings__(globals())
Let's say you have a dummy module that need to be imported to `y`:
#lib/x/y/dummy.py
def hello():
print 'hello'
Test it:
import x.y
x.y.hello()
Please be aware that `from lib import *` is usually a [bad
habit](http://stackoverflow.com/questions/2360724/what-exactly-does-import-
import) because of namespace pollution. Use it with caution.
Refs: [1](http://stackoverflow.com/q/4874342/6238076)
[2](http://stackoverflow.com/q/2724260/6238076)
|
Python - Get percentage of minority class in a list
Question: Suppose I have a list, how can I write an elegant one-liner to calculate the
percentage of the minority class in a list?
For example, for `list1 = [1,1,1,-1,-1]`, the minority class is `-1`. The
percentage of `-1` in the list will be `2/5=0.4`
For another list `list2 = [1,-1,-1,-1,-1]`, the minority class is `1`. The
percentage of `1` in the list will be `1/5=0.2`
Answer: You can use a `Counter`:
from collections import Counter
nums = [1,-1,-1,-1,-1]
# if using Python 2 use float(len(nums))
print(Counter(nums).most_common()[-1][-1] / len(nums))
>> 0.2
`Counter.most_common()` returns a list of tuples of the form `(element,
count)` ordered from most to least common, so `most_common()[-1][-1]` returns
the element that is the least common.
If there are several minorities, one of them will be chosen arbitrarily for
the calculation. For example, using my code with `nums = [3, 3, -1, 1, 2]`
will also return `0.2`, using either `-1`, `1`or `2` for the calculation.
|
Python: Catching an exception works outside of a function but not inside a function
Question: I have a strange problem which I can't solve myself.
If I execute `outside_func.py` in two separate terminals, the second execution
catches the BlockingIOError exception and the message is printed:
**outside_func.py**
import fcntl
import time
# Raise BlockingIOError if same script is already running.
try:
lockfile = open('lockfile', 'w')
fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError:
print('Script already running.')
time.sleep(20)
If I do the same with `inside_func.py` nothing is caught and no message is
printed:
**inside_func.py**
import fcntl
import time
# Raise BlockingIOError if same script is already running.
def script_already_running():
try:
lockfile = open('lockfile', 'w')
fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError:
print('Script already running.')
script_already_running()
time.sleep(20)
Any ideas?
Answer: The file is closed when you leave the function so the two snippets are not the
same, in the code snippet where the try is _outside of a function_ there is
still a reference to the file object in the scope of the sleep call so further
calls to open the _lockfile_ rightfully error. If you change the function by
moving the sleep inside the function you will see the error raised as now you
have comparable code:
import fcntl
import time
# Raise BlockingIOError if same script is already running.
def script_already_running():
try:
lockfile = open('lockfile', 'w')
fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError:
print('except')
sleep(20)
|
Apply several functions to a grouped by dataframe in python
Question: I have a dataset with some columns which I am using for grouping the database.
There are two more columns; one has dtype object and other is numerical. I
want to find the number of unique values for each group for each column and
also the most common value.
# Typo in code next line removed
df = pd.DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar','bar','bar',], 'C_object':['str1', 'str2', 'str2', 'str2','str1', 'str1', 'str1', 'str2'], 'D_num': [10, 2, 2, 2, 10, 10, 10, 2]})
d = df.groupby('A')
g = d['C_object', 'D_num'].transform(unique)
Expected Output [](http://i.stack.imgur.com/NXW77.jpg)
This doesn't work.
Answer: Try this:
import pandas as pd
df = pd.DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar','bar','bar',], 'C_object':['str1', 'str2', 'str2', 'str2','str1', 'str1', 'str1', 'str2'], 'D_num': [10, 2, 2, 2, 10, 10, 10, 2]})
df2=pd.DataFrame({'C_object_len_unique': df.groupby('A')['C_object'].apply(lambda x: len(x.unique())), \
'C_object_most_common': df.groupby('A')['C_object'].agg(lambda x:x.value_counts().index[0]), \
'D_num_len_unique' : df.groupby('A')['D_num'].apply(lambda x: len(x.unique())), \
'D_num_most_common': df.groupby('A')['D_num'].agg(lambda x:x.value_counts().index[0]) \
}).reset_index()
print df2
|
python 2.7: List file in folder and subfolders and write them to file error
Question: I wrote this short code in python 2.7 and it should be listing all files and
subfolders into a string. Then it multiply their ascii value by `multi` and
concatenate it to `long_string`. Long string should be written to a file but
it isn't happening i dont know why. can somebody help me? There are no error
message, the file is just not created.
`long_string` is 2 million characters long.
The code:
import os
from random import randint
username = ""
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
if os.environ.get(name):
username = os.environ.get(name)
long_string = ""
n = 10
multi = 12349790
filelist = os.popen("dir C:\users\%s\desktop /s /b"%(username)).read()
for c in filelist:
number = str(ord(c)*multi)
while len(number) < n:
number = "0"+number
long_string = long_string+number
with open("filelist.txt", "w") as outf:
outf.write(long_string)
a = [long_string[i:i+n] for i in range(0, len(long_string), n)]
long_string2 = ""
for e in a:
long_string2 = long_string2+chr(int(e)/multi)
lista_files = long_string2.split("\n")
print lista_files[:5]
Answer: The code is working fine, that is why it wasn't showing error. You could not
see your file because it was somewhere else.
just write the full path when opening `filelist.txt` and it should work as you
want.
you're code should look like this in the end:
import os
from random import randint
username = ""
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
if os.environ.get(name):
username = os.environ.get(name)
long_string = ""
n = 10
multi = 12349790
filelist = os.popen("dir C:\users\%s\desktop\ /s /b"%(username)).read()
for c in filelist:
number = str(ord(c)*multi)
while len(number) < n:
number = "0"+number
long_string = long_string+number
with open("C:/users/"+username+"/desktop/filelist.txt", "w") as outf:
outf.write(long_string)
a = [long_string[i:i+n] for i in range(0, len(long_string), n)]
long_string2 = ""
for e in a:
long_string2 = long_string2+chr(int(e)/multi)
lista_files = long_string2.split("\n")
print lista_files[:5]
|
Jinja2 install issue
Question: I am getting the following error message when attempting `{% set foo = 42 %}`
in my html template. The template works fine when the `set` is not used.
TemplateSyntaxError: Invalid block tag: 'set', expected 'endblock' or 'endblock content'
Below I have added the full session log as suggested in comments.
*** Running dev_appserver with the following flags:
--skip_sdk_update_check=yes --port=15093 --admin_port=8018
Python command: /usr/bin/python2.7
INFO 2016-05-14 18:36:26,513 devappserver2.py:762] Skipping SDK update check.
INFO 2016-05-14 18:36:26,696 api_server.py:204] Starting API server at: http://localhost:56265
INFO 2016-05-14 18:36:26,701 dispatcher.py:197] Starting module "default" running at: http://localhost:15093
INFO 2016-05-14 18:36:26,704 admin_server.py:118] Starting admin server at: http://localhost:8018
ERROR 2016-05-14 18:37:48,778 webapp2.py:1528] Invalid block tag: 'set', expected 'endblock' or 'endblock content'
Traceback (most recent call last):
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1511, in __call__
rv = self.handle_exception(request, response, e)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1505, in __call__
rv = self.router.dispatch(request, response)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1253, in default_dispatcher
return route.handler_adapter(request, response)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1077, in __call__
return handler.dispatch()
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 547, in dispatch
return self.handle_exception(e, self.app.debug)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 545, in dispatch
return method(*args, **kwargs)
File "/Users/brian/googleapps/lastturn/views.py", line 187, in get
self.response.out.write(template.render(path, template_values))
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/ext/webapp/template.py", line 91, in render
t = _load_internal_django(template_path, debug)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/ext/webapp/template.py", line 165, in _load_internal_django
template = django.template.loader.get_template(file_name)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/loader.py", line 160, in get_template
template = get_template_from_string(template, origin, template_name)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/loader.py", line 168, in get_template_from_string
return Template(source, origin, name)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 158, in __init__
self.nodelist = compile_string(template_string, origin)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 186, in compile_string
return parser.parse()
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 281, in parse
compiled_result = compile_func(self, token)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/loader_tags.py", line 195, in do_extends
nodelist = parser.parse()
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 281, in parse
compiled_result = compile_func(self, token)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/loader_tags.py", line 173, in do_block
nodelist = parser.parse(('endblock', 'endblock %s' % block_name))
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 279, in parse
self.invalid_block_tag(token, command, parse_until)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 332, in invalid_block_tag
raise self.error(token, "Invalid block tag: '%s', expected %s" % (command, get_text_list(["'%s'" % p for p in parse_until])))
TemplateSyntaxError: Invalid block tag: 'set', expected 'endblock' or 'endblock content'
ERROR 2016-05-14 18:37:48,784 wsgi.py:279]
Traceback (most recent call last):
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/runtime/wsgi.py", line 267, in Handle
result = handler(dict(self._environ), self._StartResponse)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1519, in __call__
response = self._internal_error(e)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1511, in __call__
rv = self.handle_exception(request, response, e)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1505, in __call__
rv = self.router.dispatch(request, response)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1253, in default_dispatcher
return route.handler_adapter(request, response)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 1077, in __call__
return handler.dispatch()
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 547, in dispatch
return self.handle_exception(e, self.app.debug)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/lib/webapp2-2.3/webapp2.py", line 545, in dispatch
return method(*args, **kwargs)
File "/Users/brian/googleapps/lastturn/views.py", line 187, in get
self.response.out.write(template.render(path, template_values))
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/ext/webapp/template.py", line 91, in render
t = _load_internal_django(template_path, debug)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/ext/webapp/template.py", line 165, in _load_internal_django
template = django.template.loader.get_template(file_name)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/loader.py", line 160, in get_template
template = get_template_from_string(template, origin, template_name)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/loader.py", line 168, in get_template_from_string
return Template(source, origin, name)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 158, in __init__
self.nodelist = compile_string(template_string, origin)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 186, in compile_string
return parser.parse()
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 281, in parse
compiled_result = compile_func(self, token)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/loader_tags.py", line 195, in do_extends
nodelist = parser.parse()
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 281, in parse
compiled_result = compile_func(self, token)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/loader_tags.py", line 173, in do_block
nodelist = parser.parse(('endblock', 'endblock %s' % block_name))
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 279, in parse
self.invalid_block_tag(token, command, parse_until)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/_internal/django/template/__init__.py", line 332, in invalid_block_tag
raise self.error(token, "Invalid block tag: '%s', expected %s" % (command, get_text_list(["'%s'" % p for p in parse_until])))
TemplateSyntaxError: Invalid block tag: 'set', expected 'endblock' or 'endblock content'
INFO 2016-05-14 18:37:48,792 module.py:812] default: "GET / HTTP/1.1" 500 -
Below I have added the revisions I made to my `views.py`. The commented out
lines indicate the original jinja2 environment statements. These revisions are
in response to suggestions made in comments. But I still get the same error
message which seems to suggest jinja2 is not being used.
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
#jinja_environment = \
#jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIR))
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class BaseHandler(webapp2.RequestHandler):
@webapp2.cached_property
def jinja2(self):
return jinja2.get_jinja2(app=self.app)
def render_template(
self,
filename,
template_values,
**template_args
):
#template = jinja_environment.get_template(filename)
template = JINJA_ENVIRONMENT.get_template(filename)
self.response.out.write(template.render(template_values))
Below is the base.html template.
<html>
<head>
<link rel="stylesheet" href="/static/css/reset.css">
<link rel="stylesheet" href="/static/css/style.css">
</head>
<body>
<div class="wrapper">
{% block content %}
{% endblock content %}
</div>
</body>
</html>
Below is the "heart" of the actual template. By "heart", I mean I have left
out the guts because this is just a test.
{% extends "base.html" %}
{% block content %}
<center>
<h1>Create or edit your own Blog</h1>
</center>
{% set foo = 42 %}
{{ foo }}
{% endblock content %}
[This answer suggests to me that jinja2 is not installed
(properly?).](http://stackoverflow.com/questions/31799710/)
Is there some other part of installation that I have to change?
[This answer shows more input about **ipython**
installation,](http://stackoverflow.com/questions/27843950/) but I am using
**google-app-engine** which may be different. The listing below confirms that
Jinja2 2.8 is installed.
server:~ brian$ pip freeze
Warning: cannot find svn location for setuptools===0.6c12dev-r85381
all==0.0.6
altgraph==0.7.1
atari-py==0.0.17
bdist-mpkg==0.4.4
bonjour-py==0.3
caller-module==0.0.9
Django==1.3.1
include-server===3.1-toolwhip.1
Jinja2==2.8
macholib==1.3
MarkupSafe==0.23
mod-python==3.3.1
modulegraph==0.8.1
numpy==1.11.0
PIL==1.1.7
Pillow==3.2.0
public==0.0.0
py2app==0.5.3
pyglet==1.2.4
pyobjc-core==2.3.2a0
pyobjc-framework-AddressBook==2.3.2a0
pyOpenSSL==0.12
PyRSS2Gen==1.0.0
python-dateutil==1.5
PyYAML==3.11
render==1.0.0
requests==2.9.1
six==1.10.0
Twisted==11.0.0
xattr==0.6.1
zope.interface==3.5.1
You are using pip version 8.1.1, however version 8.1.2 is available.
server:~ brian$
Answer: After much help via comments to my original question, and to much stumbling, I
have finally answered my question. I was confused by both webapp2 and Jinja
having a template.render() but they are different.
template.render() from webapp seems to require 2 parameters -- the path and
the template name -- and my current code uses this format. However, I want to
use Jinja2 where template.render() requires only 1 parameter -- the template
name.
According to the [google docs for using Jinja in the guestbook
app](https://cloud.google.com/appengine/docs/python/gettingstartedpython27/generating-
dynamic-content-templates) the following format was shown.
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
But it turns out that does not work well with my chosen BaseHandler class
which puts templates in their own subdirectory, "templates". Instead I used
the following code where TEMPLATE_DIR replaces **file** in the "loader="
sentence .
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(TEMPLATE_DIR ),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
Btw, I no longer import the webapp `template`. And I use the following style
in my "get def"s.
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.out.write(template.render( template_values))
So, although the question asks "Jinja install issue", it was not exactly the
install that was problematic, rather I had difficulty integrating Jinja with
my existing directory structure.
|
Python/Flask web development
Question: I using flask web development to create a mini-web with python 2.7.
import settings
from flask import Flask, render_template
app = Flask(__name__)
app.config.from_object(settings)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/login")
def login_template():
return render_template("login.html")
if __name__=="__main__":
app.run()
The `@app.route("/")` is working perfectly fine but i got an error in
`@app.route("/login")` because im tryng to `render_template("login.html")`.
The file is in a directory `/templates/login.html` in the same working
directory
I got this error:
<https://i.gyazo.com/761c169e0d55de45e3dd6c7af346c48c.png>
<https://i.gyazo.com/571c079b44c6216612c16798d57d200a.png>
Answer: The usage of the `url_for` includes a provider parameter, but your code
doesn't use that.
Please change accordingly
@app.route('/login/')
@app.route('/login/<provider>')
def login_template(provider=None):
# do something
|
sknn multi layer perceptron classifier
Question: I am using the following neural net classifier in python
from sknn.mlp import Layer,Classifier
nn = mlp.Classifier(
layers=[
mlp.Layer("Tanh", units=n_feat/8),
mlp.Layer("Sigmoid", units=n_feat/16),
mlp.Layer("Softmax", units=n_targets)],
n_iter=50,
n_stable=10,
batch_size=25,
learning_rate=0.002,
learning_rule="momentum",
valid_size=0.1,
verbose=1)
which is working just fine.My question is that how to proceed if I require for
example 100,200 or 500 hidden layers? Do I have to specify each layer here
manually or someone has better Idea in python for MLP?
Answer: You could create some loop-based mechanism to build the list of layers I
suppose, but there's a bigger issue here. A standard MLP with hundreds of
layers is likely to be _extremely_ expensive to train - both in terms of
computational speed as well as memory usage. MLPs typically only have one or
two hidden layers, or occasionally a few more. But for problems that can truly
benefit from more hidden layers, it becomes important to incorporate some of
the lessons learned in the field of [deep
learning](https://en.wikipedia.org/wiki/Deep_learning). For example, for
object classification on images, using all fully-connected layers is
incredibly inefficient, because you're interested in identifying spatially-
local patterns, and therefore interactions between spatially-distant pixels or
regions is largely noise. (This is a perfect case for using a deep
convolutional neural net.)
Although some _very_ deep networks have been created, it's worth pointing out
that even Google's very powerful Inception-v3 model is only 42-layers deep.
Anyway, if you're interested in building deep models, I'd recommend reading
this [Deep Learning book](http://www.deeplearningbook.org/). From what I've
read of it, it seems to be a very good introduction. Hope that helps!
|
Python: removing list from inside list results in: IndexError: list index out of range
Question: I'm a mere beginner at python, so bear with me. I seem to be having some
trouble when removing one of the lists from inside another list. The code
works perfectly fine if there is only one list inside the list, but if there
are two lists inside the list the program crashes when deleting one of them.
import pygame
import math
pygame.init()
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
GREEN = ( 0, 255, 0)
RED = ( 255, 0, 0)
BLUE = ( 0, 0, 255)
PI = math.pi
size = (700, 500)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Arcade game!")
background_image = pygame.image.load("bg_one.jpg").convert()
player_image = pygame.image.load("player.png").convert()
player_image.set_colorkey(BLACK)
click_sound = pygame.mixer.Sound("laser5.ogg")
done = False
clock = pygame.time.Clock()
bullets = []
def create_bullet(mpos):
bullets.insert(len(bullets), [mpos[0]+50, mpos[1]])
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.MOUSEBUTTONDOWN:
click_sound.play()
create_bullet(pygame.mouse.get_pos())
screen.fill(BLACK)
# Hide mouse, etc.
pygame.mouse.set_visible(False)
# Game Logic
player_position = pygame.mouse.get_pos()
x = player_position[0]
y = player_position[1]
if not len(bullets) <= 0:
for i in range(len(bullets)):
if bullets[i][1] < 0:
print (bullets)
bullets.remove(bullets[i])
print (bullets)
else:
bullets[i][1] -= 5
# Drawing code goes here
screen.blit (background_image, [0, 0])
for i in range(len(bullets)):
pygame.draw.ellipse(screen, GREEN, [bullets[i][0], bullets[i][1], 4, 4])
screen.blit (player_image, [x, y])
pygame.display.flip()
clock.tick(60)
print(bullets)
pygame.quit()
EDIT: Forgot to include error. here it is
Traceback (most recent call last):
File "main.py", line 52, in <module>
bullets.remove(bullets.index(i))
ValueError: 0 is not in list
Answer: I see your problem. Here,
for i in range(len(bullets)):
if bullets[i][1] < 0:
print (bullets)
bullets.remove(bullets[i])
print (bullets)
Let's say you have an array ["a", "b"]. I want to cycle through this array and
remove both elements. "a" is at index 0, and "b" is at index 1. Now, I remove
"a" from the array, with
array.remove(array[0])
Now, my array just contains ["b"]. However, now "b" is at index zero. But now
you're attempting to access element 1 of the array, which no longer exists.
**Your problem is that you're trying to cycle through every element in the
array, but you're also deleting elements from the array while you do it. This
means that you're trying to index into an array that is now shorter than what
you originally thought, and also all the indexes are changing.**
Try this code instead:
bulletsToRemove = []
if not len(bullets) <= 0:
for i in range(len(bullets)):
if bullets[i][1] < 0:
bulletsToRemove.append(bullets[i])
else:
bullets[i][1] -= 5
for bullet in bulletsToRemove:
bullets.remove(bullet)
|
Django-Cities-Light, Changing fields to ForeignKey in postgresql raises error
Question: My goal is to use the Django-Cities-Light and link my models with foreignkey
to City and Country models from django-cities-light.
When I run python3 manage.py migrate I get the following error:
Operations to perform:
Synchronize unmigrated apps: gis, crispy_forms, geoposition, messages, staticfiles
Apply all migrations: amenities, sites, images, venues, sessions, contenttypes, admin, auth, newsletter, registration, easy_thumbnails, cities_light
Synchronizing apps without migrations:
Creating tables...
Running deferred SQL...
Installing custom SQL...
Running migrations:
Rendering model states... DONE
Applying venues.0012_auto_20160514_2024...Traceback (most recent call last):
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/backends/utils.py", line 64, in execute
return self.cursor.execute(sql, params)
psycopg2.ProgrammingError: column "venue_city_id" cannot be cast automatically to type integer
HINT: Specify a USING expression to perform the conversion.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/__init__.py", line 346, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/base.py", line 394, in run_from_argv
self.execute(*args, **cmd_options)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/base.py", line 445, in execute
output = self.handle(*args, **options)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/commands/migrate.py", line 222, in handle
executor.migrate(targets, plan, fake=fake, fake_initial=fake_initial)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/executor.py", line 110, in migrate
self.apply_migration(states[migration], migration, fake=fake, fake_initial=fake_initial)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/executor.py", line 148, in apply_migration
state = migration.apply(state, schema_editor)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/migration.py", line 115, in apply
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/operations/fields.py", line 201, in database_forwards
schema_editor.alter_field(from_model, from_field, to_field)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/backends/base/schema.py", line 484, in alter_field
old_db_params, new_db_params, strict)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/backends/base/schema.py", line 636, in _alter_field
params,
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/backends/base/schema.py", line 111, in execute
cursor.execute(sql, params)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/backends/utils.py", line 79, in execute
return super(CursorDebugWrapper, self).execute(sql, params)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/backends/utils.py", line 64, in execute
return self.cursor.execute(sql, params)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/utils.py", line 98, in __exit__
six.reraise(dj_exc_type, dj_exc_value, traceback)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/utils/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/backends/utils.py", line 64, in execute
return self.cursor.execute(sql, params)
django.db.utils.ProgrammingError: column "venue_city_id" cannot be cast automatically to type integer
HINT: Specify a USING expression to perform the conversion.
when i run python3 manage.py sqlmigrate venues 0012 i get the following
traceback:
Traceback (most recent call last):
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/apps/registry.py", line 148, in get_app_config
return self.app_configs[app_label]
KeyError: 'cities_light'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/state.py", line 238, in __init__
model = self.get_model(lookup_model[0], lookup_model[1])
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/apps/registry.py", line 202, in get_model
return self.get_app_config(app_label).get_model(model_name.lower())
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/apps/registry.py", line 150, in get_app_config
raise LookupError("No installed app with label '%s'." % app_label)
LookupError: No installed app with label 'cities_light'.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/__init__.py", line 346, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/base.py", line 394, in run_from_argv
self.execute(*args, **cmd_options)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/commands/sqlmigrate.py", line 31, in execute
return super(Command, self).execute(*args, **options)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/base.py", line 445, in execute
output = self.handle(*args, **options)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/core/management/commands/sqlmigrate.py", line 57, in handle
sql_statements = executor.collect_sql(plan)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/executor.py", line 127, in collect_sql
state = migration.apply(state, schema_editor, collect_sql=True)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/migration.py", line 115, in apply
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/operations/fields.py", line 186, in database_forwards
to_model = to_state.apps.get_model(app_label, self.model_name)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/utils/functional.py", line 59, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/state.py", line 166, in apps
return StateApps(self.real_apps, self.models)
File "/Users/iam-tony/.envs/venuepark/lib/python3.4/site-packages/django/db/migrations/state.py", line 248, in __init__
raise ValueError(msg.format(field=operations[0][1], model=lookup_model))
ValueError: Lookup failed for model referenced by field venues.Venue.venue_city: cities_light.City
This is the 0012 migration file:
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('venues', '0011_venue_map_activation'),
]
operations = [
migrations.AlterField(
model_name='venue',
name='venue_city',
field=models.ForeignKey(to='cities_light.City', blank=True),
),
]
My models are as follows:
class Venue(models.Model):
venue_owner = models.ForeignKey(User, null=True)
venue_name = models.CharField(max_length=100)
venue_address = models.CharField(max_length=250)
venue_city = models.CharField(max_length=250)
venue_zipcode = models.CharField(max_length=20, blank=True)
venue_seated_capacity = models.PositiveIntegerField()
venue_standing_capacity = models.PositiveIntegerField()
venue_type = models.CharField(max_length=20, choices=VENUE_TYPES)
venue_sqf = models.PositiveIntegerField(default=0)
venue_description = models.TextField(blank=False, null=True)
featurete = models.PositiveIntegerField(default=0)
carousel = models.PositiveIntegerField(default=0)
gallery = models.PositiveIntegerField(default=0)
#map_activation is for activating the map for the venue
map_activation = models.BooleanField(default=False)
position = GeopositionField(blank=True)
My settings are as follows:
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#GEOdjango
'django.contrib.gis',
#Django cities Light
'cities_light',
#Custom Apps
'venues',
'images',
'amenities',
#Third party apps
'registration',
'crispy_forms',
'easy_thumbnails',
'geoposition',
)
CITIES_LIGHT_TRANSLATION_LANGUAGES = ['nl', 'en']
CITIES_LIGHT_INCLUDE_COUNTRIES = ['NL']
This caused the error. Changing venue_city from charfield to foreignkey. I
tried changing the models back but I still get the same error.
How can I fix this error and create the connections with the Django-cities-
models?
Answer: Assuming that your venue_city column at the moment contains integer values
that correspond to primary keys in the cities model. You can still carry out
the migration by editing the 0012_auto_20160514_2024 migration file. First do
./manage.py sqlmigrate venues 0012
Watch closely for the bit of generated SQl that corresponds to the venue_city
column. Now you need to edit the migration file and replace the default
generated migration with a migrations.RunSQL. The query that goes into it is
essentially the query that you found with sqlmigration. Just add `USING
venue_city::integer` to the end.
**Update** : Based on your updated answer, you would replace this section
migrations.AlterField(
model_name='venue',
name='venue_city',
field=models.ForeignKey(to='cities_light.City', blank=True),
),
with custom SQL that modifies the column.
**Update 2** : Since you are unable to run `sqlmigrate` I will try to show you
what that SQL should be.
migrations.RunSQL(''' ALTER TABLE venues_venue ALTER venue_city TYPE integer USING venue_city::integer '''),
migrations.RunSQL(''' ALTER TABLE venues_venue ALTER venue_city DROP NOT NULL '''),
migrations.RunSQL(''' ALTER TABLE venues_venue ALTER venue_city RENAME COLUMN venue_city TO venue_city_id '''),
migrations.RunSQL(''' ALTER TABLE venues_venue ADD CONSTRAINT venues_venus_somefk FOREIGN KEY (venue_city_id) REFERENCES cities_light (id) DEFERRABLE INITIALLY DEFERRED'''),
With the full query essentially being what sqlmigrate shows it to be.
|
makedirs() gives AttributeError: 'int' object has no attribute 'rfind'
Question: I'm trying to create a program that saves a backup by creating a directory for
a zip file: This is an exercise from _A byte of python_ (I'm going to give the
full example so that you guys can see where he's going.) The example code is:
#! /usr/bin/env python3
import os
import time
# 1. The files and directories to be backed up are specified in a list.
source = ['~/Desktop/python']
# 2. The backup must be stored in a main backup directory
target_dir = '~/Dropbox/Backup/' # Remember to change this to what you'll be using
# 3. The files are backed up into a zip file.
# 4. the name of the zip archive is the current date and time
target = target_dir + os.sep + time.strftime('%Y%m%d%H%M%S') +'.zip'
now = time.strftime('%H%M%S')
# Create the subdirectory if it isn't already there.
if not os.path.exists(today):
os.mkdir(today) # make directory
print('Successfully created directory', today)
# The name of the zip file
target = today + os.sep + now + '.zip'
# 5. We use the zip command to put the files in a zip archive
zip_command = "zip -qr {0} {1}".format(target, ' '.join(source))
print(zip_command)
# Run the backup
if os.system(zip_command) == 0:
print('Successful backup to', target)
else:
print('Backup FAILED')
This pulls up the error:
Traceback (most recent call last):
File "backup_ver2.py", line 23, in <module>
os.mkdir(today) # make directory
TypeError: mkdir: illegal type for path parameter
My solution:
import os
import time
today = 14052016 # I set today as a string to solve a previous issue.
.....
# Create the subdirectory if it isn't already there.
if not os.path.exists(today):
os.makedirs(today, exist_ok=True) # make directory
print('Successfully created directory', today)
Which gives the error:
Traceback (most recent call last):
File "backup_ver2a.py", line 23, in <module>
os.makedirs(today, exist_ok=True) # make directory
File "/usr/lib/python3.4/os.py", line 222, in makedirs
head, tail = path.split(name)
File "/usr/lib/python3.4/posixpath.py", line 103, in split
i = p.rfind(sep) + 1
AttributeError: 'int' object has no attribute 'rfind'
This traceback has referenced lines in the module so now I know I'm in
trouble. Is it possible that the variable "today" is still at the heart of
both of these errors? Is there a better way to define today so as not to pull
so many errors or is there a better way to check and create a subdirectory? If
you guys notice more errors in his example, please don't correct them. I'm
sure I'll find them soon. :) Thanks for any help.
Notes: I'm running ubuntu 14.04 LTS and use python 3
Answer: Agreeing with @gdlmx, both errors resulting from your variable "today" which
is an int and not a string and thus, you need to simple make the change to
that variable from an int to a string by putting it in a quote, like the
following line of code:
today = "14052016"
Once this is done the errors you're getting should be fade away.
|
How do I best adjust a timestamp with a timezone in python?
Question: I need to operate on dates within my pandas dataframe but my code has a bug.
Specifically I am importing a column of timestamps from a csv file.
x['Created at']
0 2016-05-13 13:28:41 -0400
1 2016-05-13 05:11:18 -0400
3 2016-05-12 18:06:42 -0400
4 2016-05-12 16:06:24 -0400
5 2016-05-12 13:58:01 -0400
6 2016-05-12 03:30:27 -0400
I am then changing this data into a datetime. I am doing this via
`pandas.to_datetime(df['date'])` but when I do this, the time is getting
shifted by 4 hours.
x.Createdat
0 2016-05-13 17:28:41
1 2016-05-13 09:11:18
3 2016-05-12 22:06:42
4 2016-05-12 20:06:24
5 2016-05-12 17:58:01
6 2016-05-12 07:30:27
I am assuming this is because of the `-0400` at the end of the timestamp but I
can not figure out the best way to resolve this issue so I can aggregate this
data in my own timezone.
Answer: If the -400 is information that you do not need or want, then simply change
your use of `pandas.to_datetime(df['date'])` to
`pandas.to_datetime(df['date'].apply(lambda x: x[:-6])` which will drop the
-400 from the string. Not the best and most robust approach, but it will work.
If you want to use the -400 but you want to convert it to a different
timezone, check out `tz_localize` as described in this answer: [convert gmt to
local timezone in pandas](http://stackoverflow.com/questions/34584500/convert-
gmt-to-local-timezone-in-pandas)
Another tool that should help is using `pytz`: [pytz - Converting UTC and
timezone to local time](http://stackoverflow.com/questions/25264811/pytz-
converting-utc-and-timezone-to-local-time)
|
Python - Module Not Found
Question: I am a beginner with Python. Before I start, here's my Python folder structure
-project
----src
------model
--------order.py
----hello-world.py
Under `src` I have a folder named `model` which has a Python file called
`order.py` which contents follow:
class SellOrder(object):
def __init__(self,genericName,brandName):
self.genericName = genericName
self.brandName = brandName
Next my `hello-world.py` is inside the `src` folder, one level above
`order.py`:
import model.order.SellOrder
order = SellOrder("Test","Test")
print order.brandName
Whenever I run `python hello-world.py` it results in the error
Traceback (most recent call last):
File "hello-world.py", line 1, in <module>
import model.order.SellOrder
ImportError: No module named model.order.SellOrder
Is there anything I missed?
Answer: All modules in Python have to have a certain directory structure. [You can
find details here.](https://docs.python.org/2/tutorial/modules.html#packages)
Create an empty file called `__init__.py` under the `model` directory, such
that your directory structure would look something like that:
.
└── project
└── src
├── hello-world.py
└── model
├── __init__.py
└── order.pyc
Also in your `hello-world.py` file change the import statement to the
following:
from model.order import SellOrder
That should fix it :)
P.S.: If you are placing your `model` directory in some other location (not in
the same directory branch), you will have to modify the python path using
`sys.path`.
|
/usr/local/bin/python3: bad interpreter: No such file or directory for ubuntu 14.04
Question: Hi My python installation is in different directory and i am using a docker
image which is mac based and it is referring shebang line as
/user/local/bin/python3 from other folder in shell script .
my python installation path
Python 3.4.3 (default, Oct 14 2015, 20:28:29)
[GCC 4.8.4] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import sys
>>> sys.path
['', '/home/myuser/project', '/usr/lib/python3.4', '/usr/lib/python3.4/plat-x86_64-linux-gnu', '/usr/lib/python3.4/lib-dynload', '/usr/local/lib/python3.4/dist-packages', '/usr/lib/python3/dist-packages']
>>>
so is there a way without changing the shebang line i can redirect or link to
my installation of python3 to get out of this error.
is it recommended to install python3 in given path. ?
please advice.
Answer: If you can't modify the shebang of the file, and you have access to the
Dockerfile that creates your docker image, you can add a command directive to
create a symbolic link: `ln -s /usr/bin/python3 /usr/local/bin/`. If you don't
have access to the Dockerfile. Then you can run the above command from within
the running docker instance. That should solve your issue without having to
modify the file.
<https://docs.docker.com/engine/reference/builder/#cmd>
|
Difficult Dataframe Reshape in Python Pandas
Question: If I have a dataframe that looks like this:
DATE1 DATE2 DATE3 AMOUNT1 AMOUNT2 AMOUNT3
1 1/1/15 5/22/14 7/12/13 5 6 3
.. .. .. .. .. .. ..
and I want to get it in the form:
DATE AMOUNT
1 1/1/15 5
2 5/22/14 6
3 7/12/13 3
.. .. ..
What is the most efficient code to do this? From what I can tel melting or
grouping wont work because of the difference in column names (DATE1, DATE2,
etc). Is the best thing to subset the "1" columns, "2" columns, and "3"
columns into smaller dataframes, rename the columns, and concat? Or is there a
better way to do it that I'm missing?
Thanks.
Answer: You could use `pd.lreshape`:
import pandas as pd
df = pd.DataFrame([['1/1/15', '5/22/14', '7/12/13', 5, 6, 3]],
columns=['DATE1', 'DATE2', 'DATE3', 'AMOUNT1', 'AMOUNT2', 'AMOUNT3'])
result = pd.lreshape(df, {'AMOUNT': ['AMOUNT1', 'AMOUNT2', 'AMOUNT3'],
'DATE': ['DATE1', 'DATE2', 'DATE3']})
print(result)
yields
DATE AMOUNT
0 1/1/15 5
1 5/22/14 6
2 7/12/13 3
The second argument to `pd.lreshape` is a dict of key/value pairs. Each key is
the name of a desired column, and each value is a list of columns from `df`
which you wish to coalesce into one column.
See the docstring, `help(pd.lreshape)`, for a little more on `pd.lreshape`.
* * *
Alternatively, you could use `pd.melt` to coalesce all the columns into one
column, and use `str.extract` to separate the text-part from the numeric-part
of the column names. Then use `pivot` to obtain the desired result:
result = pd.melt(df)
result[['variable', 'num']] = result['variable'].str.extract('(\D+)(\d+)', expand=True)
result = result.pivot(index='num', columns='variable', values='value')
print(result)
yields
variable AMOUNT DATE
num
1 5 1/1/15
2 6 5/22/14
3 3 7/12/13
|
Python: CARD GAME BOT - Remastering (How do you make a line continue regardless of where it is?)
Question: I'm remastering the bot by my self and I'm stuck! This is a code which prompts
the user to select how many cards they get from the options of
> `7, 9, 11, and 15`
def Cards():
print("Card Amounts")
print("\nChoices")
print(7)
print(9)
print(11)
print(15)
PlayerInput3()
def PlayerInput3():
global PlayersCards
PlayerInput = int(raw_input())
if(PlayerInput == 7):
PlayersCards == range(1,7)
print("Lets get started")
Game()
But when they choose how many cards they want it does stay into affect after
the definition is over. I want the Players card range to continue in a
different defined area. Here:
def Game():
global roundNumber, MyDeck, PlayersCards
import random
Select = PlayersCards
roundNumber = roundNumber + 1
print("Round %d!") % (roundNumber)
if(roundNumber == 1) or (roundNumber < 15):
PlayersCards = random.randint(1, 50)
MyDeck.append(PlayersCards)
print("Here are your cards")
print(MyDeck)
print("Select a card")
But It wont continue on past the
> `def Cards():`
How Can I make it so that the PlayersCard == range(1,7) Continues on
regardless of what definition it is in?
Answer: I think this code works as you require:
def instructions():
print("You will be playing with an ai and whoever lays down the highest number wins that round.")
print("The points you get are determined by how much higher your card was from your opponents card.")
print("The person with the most points wins!")
def getUserInput():
global roundNumber, My_Deck, PlayerPoints, AIPoints
My_Deck = []
roundNumber = 0
AIPoints = 0
PlayerPoints = 0
print ("\nDo you want to play?: ")
print("\nChoices")
print("1. Yes")
print("2. No\n")
Choice = input()
if(Choice == 'Yes') or (Choice == 'yes'):
print("\nOkay, lets get started!")
startGame()
elif(Choice in ['No', 'no']):
print("Okay, bye!")
quit()
else:
print("That is not a Choice!")
print("Choose 'Yes' or 'No'")
getUserInput()
def startGame():
global roundNumber, My_Deck, PlayerPoints, AIPoints
print("\nAIPoints = %d PlayerPoints = %d" % (AIPoints, PlayerPoints))
roundNumber = roundNumber + 1
print("\nRound %d!" % (roundNumber))
cardChoosen = None
import random
if(roundNumber == 1):
print("\nHere are your 9 cards.\n")
for Cards in range(9):
Cards = random.randint(1, 100)
My_Deck.append(Cards)
while True:
print("Select one of your cards: "),
print(My_Deck)
Select = int(input())
try:
if (Select in My_Deck):
My_Deck.remove(Select)
print("You choose", Select)
print("Your deck now is:")
print(My_Deck)
cardChoosen = Select
break
else:
print("You don't have that card in your deck!")
except ValueError as e:
print(e)
elif(roundNumber == 10):
if(PlayerPoints > AIPoints):
print("\nCongratulations you won with a score of %d compared to the AI's %d" % (PlayerPoints, AIPoints))
getUserInput()
elif(PlayerPoints < AIPoints):
print("\nUnfortunately you lost with a score of %d compared to the AI's %d" % (PlayerPoints, AIPoints))
getUserInput()
else:
print("\nWow this is basicaly impossible you tied with the AI with you both ahving a score of %d and %d... " % (PlayerPoints, AIPoints))
getUserInput()
else:
print("\nHere are your %d cards.\n" % (9 - roundNumber + 1))
while True:
print("Select one of your cards: "),
print(My_Deck)
Select = int(input())
try:
if (Select in My_Deck):
My_Deck.remove(Select)
print("You choose", Select)
print("Your deck now is:")
print(My_Deck)
cardChoosen = Select
break
else:
print("You don't have that card in your deck!")
except ValueError as e:
print(e)
AINumber = random.randint(1, 100)
if(cardChoosen > AINumber):
print("\nYou won! Your number %d was higher than the AI's number %d" % (cardChoosen, AINumber))
print("\nYou scored %d points" % (cardChoosen - AINumber))
PlayerPoints = PlayerPoints + (cardChoosen - AINumber)
startGame()
elif(cardChoosen < AINumber):
print("\nYou Lost! Your number %d was lower than the AI's number %d" % (cardChoosen, AINumber))
print("\nAI scored %d points" % (AINumber - cardChoosen))
AIPoints = AIPoints + (AINumber - cardChoosen)
startGame()
else:
print("\nYou tied with the AI! Your number %d was the same as the AI's number %d" % (cardChoosen, AINumber))
print("\nNobody scored points!")
startGame()
My_Deck = []
roundNumber = 0
AIPoints = 0
PlayerPoints = 0
instructions()
getUserInput()
|
python2.7 print values in list
Question: I have a question to print values in list.
import time
strings = time.strftime("%Y,%m,%d")
t = strings.split(',')
date = [int(x) for x in t]
print date
then result is
[2016,5,15]
But I want to print values in date like this
20160515
How can I fix it?
Answer: What's wrong with doing it like this:
>>> strings = time.strftime("%Y%m%d")
>>> strings
'20160515'
|
Parsing a file that looks like JSON to a JSON
Question: I'm trying to figure out was is the best way to go about this problem: I'm
reading text lines from a certain buffer that eventually creates a certain log
that looks something like this:
Some_Information: here there's some information about date and hour
Additional information: log summary #1234:
details {
name: "John Doe"
address: "myAdress"
phone: 01234567
}
information {
age: 30
height: 1.70
weight: 70
}
I would like to get all the fields in this log to a dictionary which I can
later turn into a json file, the different sections in the log are not
important so for example if myDictionary is a dictionary variable in python I
would like to have:
> myDictionary['age']
will show me `30`.
and the same for all other fields.
Speed is very important here that's why I would like to just go through every
line once and get it in a dictionary
My way about doing this would be to for each line that contains ":" colon I
would split the string and get the key and the value in the dictionary. is
there a better way to do it? Is there any python module that would be
sufficient?
If more information is needed please let me know.
Edit: So I've tried something that to me look to work best so far, I am
currently reading from a file to simulate the reading of the buffer
My code:
import json
import shlex
newDict = dict()
with open('log.txt') as f:
for line in f:
try:
line = line.replace(" ", "")
stringSplit = line.split(':')
key = stringSplit[0]
value = stringSplit[1]
value = shlex.split(value)
newDict[key] = value[0]
except:
continue
with open('result.json', 'w') as fp:
json.dump(newDict, fp)
Resulting in the following .json:
{"name": "JohnDoe", "weight": "70", "Additionalinformation": "logsummary#1234",
"height": "1.70", "phone": "01234567", "address": "myAdress", "age": "30"}
Answer: You haven't described exactly what the desired output should be from the
sample input, so it's not completely clear what you want done. So I guessed
and the following only extracts data values from lines following one that
contains a `'{'` until one with a `'}'` in it is encountered, while ignoring
others.
It uses the `re` module to isolate the two parts of each dictionary item
definition found on the line, and then uses the `ast` module to convert the
value portion of that into a valid Python literal (i.e. string, number, tuple,
list, dict, bool, and `None`).
import ast
import json
import re
pat = re.compile(r"""(?P<key>\w+)\s*:\s*(?P<value>.+)$""")
data_dict = {}
with open('log.txt', 'rU') as f:
braces = 0
for line in (line.strip() for line in f):
if braces > 0:
match = pat.search(line)
if match and len(match.groups()) == 2:
key = match.group('key')
value = ast.literal_eval(match.group('value'))
data_dict[key] = value
elif '{' in line:
braces += 1
elif '}' in line:
braces -= 1
else:
pass # ignore line
print(json.dumps(data_dict, indent=4))
Output from your example input:
{
"name": "John Doe",
"weight": 70,
"age": 30,
"height": 1.7,
"phone": 342391,
"address": "myAdress"
}
|
How to open a new window by clicking a button using Tkinter in python?
Question: I want to make a gui application in python for that I was making this type of
code. I have already tired many codes but i was not able to make it up to the
requirement.
Answer: What's stopping you from doing it, please refer the original post
[here](http://stackoverflow.com/questions/27639298/python-tkinter-open-a-new-
window-with-a-button-prompt). But basic code:
import Tkinter as tk
def create_window():
window = tk.Toplevel(root)
root = tk.Tk()
b = tk.Button(root, text="Create new window", command=create_window)
b.pack()
root.mainloop()
|
Python, using variables from __init__ in a parent dir
Question: I have a website I've been writing using Flask, although for this question I
do not think that's relevant. This is the folder structure I'm working with.
Rhea is the name of the project and the parent directory.
Rhea
|- Scripts
|-script1.py
|- Static
|- Templates
|- __init__
My problem is I declare variables inside my init that I need to use inside
script1.py. How do I import these into script1.py?
For reference this is my init file
import os
from flask import Flask
from flask_appconfig import AppConfig
from flask_bootstrap import Bootstrap
from flask.ext.sqlachemy import SQLAlchemy
from .frontend import frontend
from .nav import nav
from .models import User
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'db', 'userdb.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db')
WTF_CSRF_ENABLED = True
BOOTSTRAP_SERVE_LOCAL = True
SECRET_KEY = --SNIP--
app = Flask(__name__)
app.config.from_object(__name__)
AppConfig(app)
Bootstrap(app)
db = SQLAlchemy(app)
app.register_blueprint(frontend)
nav.init_app(app)
app.run(host='0.0.0.0', port=6060, debug=False);
return app
The variables I need is db, SQLALCHEMY_DATABASE_URI, and the
SQLALCHEMY_MIGRATE_REPO.
Thank's for any help.
Answer: _A_ solution is to append the the `sys.path` with the `Rhea`'s package parent
directory. But it's ugly.
# cript1.py
import os
import sys
rhea_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(rhea_dir)
import Rhea
print Rhea.SQLALCHEMY_DATABASE_URI
|
Animated Splash in wxPython
Question: I am struggling with wxPython and animated (gif) splash during long taking
function. So far I have:
class Splash(wx.SplashScreen):
def __init__(self, parent=None, id=-1):
image = "spinner.gif"
aBitmap = wx.Image(name =image).ConvertToBitmap()
splashStyle = wx.SPLASH_CENTRE_ON_PARENT
splashDuration = 0 # milliseconds
wx.SplashScreen.__init__(self, aBitmap, splashStyle,
splashDuration, parent)
gif = wx.animate.GIFAnimationCtrl(self, id, image,)
self.Show()
self.gif = gif
def Run(self,):
self.gif.Play()
I would like to do something like:
splash = Splash()
splash.Run()
result = very_time_consuming_function()
splash.Close()
...
use the result
Any input will be appreciated
Answer: You should perform the time consuming work on another thread, otherwise GUI
will block and not respond.
* Have a worker thread perform the time consuming task.
* Upon completing the task, inform the GUI thread so that the splash is destroyed.
Here is a snippet:
import wx
import wx.animate
from threading import Thread
import time
def wrap_very_time_consuming_function():
print "sleeping"
time.sleep(5) # very time consuming function
print "waking up"
wx.CallAfter(splash.gif.Stop)
return 0
app = wx.App()
splash = Splash()
splash.Run()
Thread(target=wrap_very_time_consuming_function).start()
app.MainLoop()
|
Subsets and Splits