content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
list
answers_scores
list
non_answers
list
non_answers_scores
list
tags
list
name
stringlengths
35
137
Q: Why won't this python script work on wild cards in filenames in Win XP? What should I change to make in work on Windows-XP keeping in mind we are working on Python 2-4 ? """ Author: Matt Weber Date: 03/04/07 Renames files based on the input options. """ import os import sys from optparse import OptionParser def RenameFile(options, filepath): """ Renames a file with the given options """ # split the pathname and filename pathname = os.path.dirname(filepath) filename = os.path.basename(filepath) # trim characters from the front if options.trimfront: filename = filename[options.trimfront:] # trim characters from the back if options.trimback: filename = filename[:len(filename)-options.trimback] # replace values if any if options.replace: for vals in options.replace: filename = filename.replace(vals[0], vals[1]) # convert to lowercase if flag set if options.lowercase: filename = filename.lower() # create the new pathname and rename the file new_filepath = os.path.join(pathname, filename) try: # check for verbose output if options.verbose: print "%s -> %s" % (filepath, new_filepath) os.rename(filepath, new_filepath) except OSError, ex: print >>sys.stderr, "Error renaming '%s': %s" % (filepath, ex.strerror) if __name__ == "__main__": """ Parses command line and renames the files passed in """ # create the options we want to parse usage = "usage: %prog [options] file1 ... fileN" optParser = OptionParser(usage=usage) optParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Use verbose output") optParser.add_option("-l", "--lowercase", action="store_true", dest="lowercase", default=False, help="Convert the filename to lowercase") optParser.add_option("-f", "--trim-front", type="int", dest="trimfront", metavar="NUM", help="Trims NUM of characters from the front of the filename") optParser.add_option("-b", "--trim-back", type="int", dest="trimback", metavar="NUM", help="Trims NUM of characters from the back of the filename") optParser.add_option("-r", "--replace", action="append", type="string", nargs=2, dest="replace", help="Replaces OLDVAL with NEWVAL in the filename", metavar="OLDVAL NEWVAL") (options, args) = optParser.parse_args() # check that they passed in atleast one file to rename if len(args) < 1: optParser.error("Files to rename not specified") # loop though the files and rename them for filename in args: RenameFile(options, filename) # exit successful sys.exit(0) A: Programming languages generally don't support shell globbing without you telling it to. You're going to want to use the Globbing module to get that functionality. A: I think smth like this must work for you import glob#on the top of script #..... for filename in args: for match in glob.iglob(filename):#we use 'glob' module for support wildcards RenameFile(options, match)
Why won't this python script work on wild cards in filenames in Win XP?
What should I change to make in work on Windows-XP keeping in mind we are working on Python 2-4 ? """ Author: Matt Weber Date: 03/04/07 Renames files based on the input options. """ import os import sys from optparse import OptionParser def RenameFile(options, filepath): """ Renames a file with the given options """ # split the pathname and filename pathname = os.path.dirname(filepath) filename = os.path.basename(filepath) # trim characters from the front if options.trimfront: filename = filename[options.trimfront:] # trim characters from the back if options.trimback: filename = filename[:len(filename)-options.trimback] # replace values if any if options.replace: for vals in options.replace: filename = filename.replace(vals[0], vals[1]) # convert to lowercase if flag set if options.lowercase: filename = filename.lower() # create the new pathname and rename the file new_filepath = os.path.join(pathname, filename) try: # check for verbose output if options.verbose: print "%s -> %s" % (filepath, new_filepath) os.rename(filepath, new_filepath) except OSError, ex: print >>sys.stderr, "Error renaming '%s': %s" % (filepath, ex.strerror) if __name__ == "__main__": """ Parses command line and renames the files passed in """ # create the options we want to parse usage = "usage: %prog [options] file1 ... fileN" optParser = OptionParser(usage=usage) optParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Use verbose output") optParser.add_option("-l", "--lowercase", action="store_true", dest="lowercase", default=False, help="Convert the filename to lowercase") optParser.add_option("-f", "--trim-front", type="int", dest="trimfront", metavar="NUM", help="Trims NUM of characters from the front of the filename") optParser.add_option("-b", "--trim-back", type="int", dest="trimback", metavar="NUM", help="Trims NUM of characters from the back of the filename") optParser.add_option("-r", "--replace", action="append", type="string", nargs=2, dest="replace", help="Replaces OLDVAL with NEWVAL in the filename", metavar="OLDVAL NEWVAL") (options, args) = optParser.parse_args() # check that they passed in atleast one file to rename if len(args) < 1: optParser.error("Files to rename not specified") # loop though the files and rename them for filename in args: RenameFile(options, filename) # exit successful sys.exit(0)
[ "Programming languages generally don't support shell globbing without you telling it to. You're going to want to use the Globbing module to get that functionality.\n", "I think smth like this must work for you\nimport glob#on the top of script\n#.....\nfor filename in args:\n for match in glob.iglob(filename)...
[ 2, 2 ]
[]
[]
[ "python", "python_2.4", "wildcard" ]
stackoverflow_0004068383_python_python_2.4_wildcard.txt
Q: Python: NameError: global name 'foobar' is not defined I have written the following class: class myClass(object): def __init__(self): pass def foo(self, arg1, arg2): pp = foobar(self, arg1, arg2) if pp: return 42 else return -666 def foobar(self, arg1, arg2): if arg1 == arg2: return 42 else: return None The logic is nonsensical - ignore it. What I am trying to so is to call an instance method from another instance method - and I am getting a NameError. I originally thought that this was due to foo() calling foobar() before it had been defined - but switching the order of the function definitions in the script made no difference. Does anyone what's causing this error, and how to fix it? A: Python doesn't scope code to the local class automatically; you need to tell it to. pp = self.foobar(arg1, arg2) http://docs.python.org/tutorial/classes.html
Python: NameError: global name 'foobar' is not defined
I have written the following class: class myClass(object): def __init__(self): pass def foo(self, arg1, arg2): pp = foobar(self, arg1, arg2) if pp: return 42 else return -666 def foobar(self, arg1, arg2): if arg1 == arg2: return 42 else: return None The logic is nonsensical - ignore it. What I am trying to so is to call an instance method from another instance method - and I am getting a NameError. I originally thought that this was due to foo() calling foobar() before it had been defined - but switching the order of the function definitions in the script made no difference. Does anyone what's causing this error, and how to fix it?
[ "Python doesn't scope code to the local class automatically; you need to tell it to.\npp = self.foobar(arg1, arg2)\n\nhttp://docs.python.org/tutorial/classes.html\n" ]
[ 52 ]
[]
[]
[ "nameerror", "namespaces", "python" ]
stackoverflow_0004068785_nameerror_namespaces_python.txt
Q: Making specific frequency (ranges) louder I want to make certain frequencies in a sequence of audio data louder. I have already analyzed the data using FFT and have gotten a value for each audio frequency in the data. I just have no idea how I can use the frequencies to manipulate the sound data itself. From what I understand so far, data is encoded in such a way that the difference between every two consecutive readings determines the audio amplitude at that time instant. So making the audio louder at that time instant would involve making the difference between the two consecutive readings greater. But how do I know which time instants are involved with which frequency? I don't know when the frequency starts appearing. (I am using Python, specifically PyAudio for getting the audio data and Num/SciPy for the FFT, though this probably shouldn't be relevant.) A: You are looking for a graphic equalizer. Some quick Googling turned up rbeq, which seems to be a plugin for Rhythmbox written in Python. I haven't looked through the code to see if the actual EQ part is written in Python or is just controlling something in the host, but I recommend looking through their source.
Making specific frequency (ranges) louder
I want to make certain frequencies in a sequence of audio data louder. I have already analyzed the data using FFT and have gotten a value for each audio frequency in the data. I just have no idea how I can use the frequencies to manipulate the sound data itself. From what I understand so far, data is encoded in such a way that the difference between every two consecutive readings determines the audio amplitude at that time instant. So making the audio louder at that time instant would involve making the difference between the two consecutive readings greater. But how do I know which time instants are involved with which frequency? I don't know when the frequency starts appearing. (I am using Python, specifically PyAudio for getting the audio data and Num/SciPy for the FFT, though this probably shouldn't be relevant.)
[ "You are looking for a graphic equalizer. Some quick Googling turned up rbeq, which seems to be a plugin for Rhythmbox written in Python. I haven't looked through the code to see if the actual EQ part is written in Python or is just controlling something in the host, but I recommend looking through their source.\...
[ 1 ]
[]
[]
[ "audio", "frequency", "python" ]
stackoverflow_0004069157_audio_frequency_python.txt
Q: Backport of builtin function bin() for python 2.4 I wrote a program that uses builtin function bin(), but this function is new in Python version 2.6 and I would like to run this application also in Python versions 2.4 and 2.5. Is there some backport of bin() for 2.4? A: You can try this version (credit goes to the original author): def bin(x): """ bin(number) -> string Stringifies an int or long in base 2. """ if x < 0: return '-' + bin(-x) out = [] if x == 0: out.append('0') while x > 0: out.append('01'[x & 1]) x >>= 1 pass try: return '0b' + ''.join(reversed(out)) except NameError, ne2: out.reverse() return '0b' + ''.join(out)
Backport of builtin function bin() for python 2.4
I wrote a program that uses builtin function bin(), but this function is new in Python version 2.6 and I would like to run this application also in Python versions 2.4 and 2.5. Is there some backport of bin() for 2.4?
[ "You can try this version (credit goes to the original author):\ndef bin(x):\n \"\"\"\n bin(number) -> string\n\n Stringifies an int or long in base 2.\n \"\"\"\n if x < 0: \n return '-' + bin(-x)\n out = []\n if x == 0: \n out.append('0')\n while x > 0:\n out.append('01...
[ 6 ]
[]
[]
[ "backport", "binary", "built_in", "python" ]
stackoverflow_0004069392_backport_binary_built_in_python.txt
Q: How does Python evaluate this expression? How does Python evaluate the following expression? anim1 gets executed after anim2. How does a simple + operator that? anim1 = Animation(duration=1, center=(100,100) type='delta') anim2 = Animation(duration=1, rotation=45 type='delta') anim = anim1 + anim2 A: This will call anim1.__add__(anim2). In order to understand what is happening under the hood you have to inspect the definition of __add__ method from Animation class. A: In Python, you can redefine the behavior of the mathematical operators. If I understood your question, Animation probably redefines the "+" operator using the __add__ method. More info: Official Documentation A: Check out the dis module. It has a function dis that will take a function/module/class and show you the byte code.
How does Python evaluate this expression?
How does Python evaluate the following expression? anim1 gets executed after anim2. How does a simple + operator that? anim1 = Animation(duration=1, center=(100,100) type='delta') anim2 = Animation(duration=1, rotation=45 type='delta') anim = anim1 + anim2
[ "This will call anim1.__add__(anim2).\nIn order to understand what is happening under the hood you have to inspect the definition of __add__ method from Animation class.\n", "In Python, you can redefine the behavior of the mathematical operators. If I understood your question, Animation probably redefines the \"+...
[ 8, 3, 0 ]
[]
[]
[ "python", "syntax" ]
stackoverflow_0004069366_python_syntax.txt
Q: Python: problems with csvwriter I am attempting to write data (mostly dates, booleans and float data types) into a CSV file format. Here is a snippet of my code: # Write data to file with open(OUTPUT_DIR + output_filename,'w') as outputfile: wrtr = csv.writer(outputfile, delimiter=',', quotechar='"') for x, y in datarows.items(): a,b,c,d,e,f,g = (somedate.strft('%Y-%m-%d'),0,6058.7,False,1913736200,0,False) rowstr = "{0},{1},{2},{3},{4},{5},{6}".format(a,b,c,d,e,f,g) wrtr.writerow(rowstr) outputfile.close() File contents look like this: 2,0,0,7,-,10,-,03,",",0,",",6,0,5,8,.,7,",",F,a,l,s,e,",",1,9,1,3,7,3,6,2,0,0,",",0,",",F,a,l,s,e I am currently using the raw file object to write to file - but I would prefer to use the csvwrite - since that is what its supposed to be used for A: Try wrtr.writerow([a,b,c,d,e,f,g]) instead. writerow() parameter must be a list of values, not a text line. The idea of csv.writer is that it will format row values according to CSV rules. The result you have is due to the fact that writerow() interpreted your rowstr string as a list of characters. A: look at this example maybe it can help you: import datetime with open('file.csv','w') as outputfile: wrtr = csv.writer(outputfile, delimiter=',', quotechar='"') a = (datetime.datetime.now().strftime('%Y-%m-%d'),0,6058.7,False,1913736200,0,False) wrtr.writerow(a) # pass an iterable here # outputfile.close() you don't have to call close() because you use with file.csv 2010-11-01,0,6058.7,False,1913736200,0,False hope this can help you
Python: problems with csvwriter
I am attempting to write data (mostly dates, booleans and float data types) into a CSV file format. Here is a snippet of my code: # Write data to file with open(OUTPUT_DIR + output_filename,'w') as outputfile: wrtr = csv.writer(outputfile, delimiter=',', quotechar='"') for x, y in datarows.items(): a,b,c,d,e,f,g = (somedate.strft('%Y-%m-%d'),0,6058.7,False,1913736200,0,False) rowstr = "{0},{1},{2},{3},{4},{5},{6}".format(a,b,c,d,e,f,g) wrtr.writerow(rowstr) outputfile.close() File contents look like this: 2,0,0,7,-,10,-,03,",",0,",",6,0,5,8,.,7,",",F,a,l,s,e,",",1,9,1,3,7,3,6,2,0,0,",",0,",",F,a,l,s,e I am currently using the raw file object to write to file - but I would prefer to use the csvwrite - since that is what its supposed to be used for
[ "Try wrtr.writerow([a,b,c,d,e,f,g]) instead.\nwriterow() parameter must be a list of values, not a text line. The idea of csv.writer is that it will format row values according to CSV rules.\nThe result you have is due to the fact that writerow() interpreted your rowstr string as a list of characters.\n", "look a...
[ 5, 3 ]
[]
[]
[ "csv", "python" ]
stackoverflow_0004069418_csv_python.txt
Q: How to add hooks in twisted.web (or twisted.web2)? How can I add a hook before and after processing a request on twisted.web (twisted.web2 is fine too)? The equivalent of webpy's: app = web.application(urls, globals()) app.add_processor(web.loadhook(my_attach_callback)) app.add_processor(web.unloadhook(my_detach_callback)) Thanks! A: One approach Twisted Web allows is to insert an extra resource into the resource hierarchy the only purpose of which is to run your custom hooks, rather than to actually handle a segment of the request URL as resources typically do. You can find an implementation of this approach in twisted/web/_auth/wrapper.py which implements the HTTPAuthSessionWrapper resource (exposed publicly in twisted.web.guard). Note the first line of getChildWithDefault which ensures the resource doesn't consume one of the request segments. This allows it to sit in the resource hierarchy, modify behavior, but not otherwise change the way URLs are dispatched.
How to add hooks in twisted.web (or twisted.web2)?
How can I add a hook before and after processing a request on twisted.web (twisted.web2 is fine too)? The equivalent of webpy's: app = web.application(urls, globals()) app.add_processor(web.loadhook(my_attach_callback)) app.add_processor(web.unloadhook(my_detach_callback)) Thanks!
[ "One approach Twisted Web allows is to insert an extra resource into the resource hierarchy the only purpose of which is to run your custom hooks, rather than to actually handle a segment of the request URL as resources typically do.\nYou can find an implementation of this approach in twisted/web/_auth/wrapper.py w...
[ 1 ]
[]
[]
[ "callback", "hook", "python", "twisted" ]
stackoverflow_0001058300_callback_hook_python_twisted.txt
Q: Python. Replacing text between html tags I want to write a function that highlights some text. It takes a HTML string as input and returns HTML string with additional html tags. Example: Input string (need to highlight the word "text"): <div> <a href="..." title="text to highlight">Some text to highlight</a> <a href="..." title="text to highlight">Some other text to highlight</a> </div> Output string: <div> <a href="..." title="text to highlight">Some <b class="highlight">text</b> to highlight</a> <a href="..." title="text to highlight">Some other <b class="highlight">text</b> to highlight</a> </div> I have found a regexp that matches text only between html tags, but I can't figure out how to surround some part of it with additional tags highlight_str = u'text' p = re.compile(r"[^<>]+(?=[<])") iterator = p.finditer(search_str) for match in iterator: # code for replacement here ??? Is there any other ideas to do it? A: Look at Beautiful Soup.
Python. Replacing text between html tags
I want to write a function that highlights some text. It takes a HTML string as input and returns HTML string with additional html tags. Example: Input string (need to highlight the word "text"): <div> <a href="..." title="text to highlight">Some text to highlight</a> <a href="..." title="text to highlight">Some other text to highlight</a> </div> Output string: <div> <a href="..." title="text to highlight">Some <b class="highlight">text</b> to highlight</a> <a href="..." title="text to highlight">Some other <b class="highlight">text</b> to highlight</a> </div> I have found a regexp that matches text only between html tags, but I can't figure out how to surround some part of it with additional tags highlight_str = u'text' p = re.compile(r"[^<>]+(?=[<])") iterator = p.finditer(search_str) for match in iterator: # code for replacement here ??? Is there any other ideas to do it?
[ "Look at Beautiful Soup. \n" ]
[ 4 ]
[]
[]
[ "html", "python", "regex", "replace", "tags" ]
stackoverflow_0004069453_html_python_regex_replace_tags.txt
Q: Python and psycopg detect network error When connected to a postgresql database using psycopg and I pull the network cable I get no errors. How can I detect this in code to notify the user? A: You will definitely get an error the next time you try and execute a query, so I wouldn't worry if you can't alert the user at the exact instance they lose there network connection. A: psycopg can't detect what happens with the network. For example, if you unplug your ethernet cable, replug it and execute a query everything will work OK. You should definitely get an exception when psycopg tries to send some SQL to the backend and there is no network connection but depending on the exact netwokr problem it can take some time. In the worst case you'll have to wait for a TCP timeout on the connection (several tens of seconds).
Python and psycopg detect network error
When connected to a postgresql database using psycopg and I pull the network cable I get no errors. How can I detect this in code to notify the user?
[ "You will definitely get an error the next time you try and execute a query, so I wouldn't worry if you can't alert the user at the exact instance they lose there network connection.\n", "psycopg can't detect what happens with the network. For example, if you unplug your ethernet cable, replug it and execute a qu...
[ 0, 0 ]
[]
[]
[ "postgresql", "psycopg", "python" ]
stackoverflow_0004061635_postgresql_psycopg_python.txt
Q: How can I build web file uploader that will allow multiple file uploads at once? When you upload files to attach to a gmail message it lets you select multiple files from the browser file selector to attach at once. How could I build the same effect with a Django/Python/jquery site? I want to let the user upload multiple files at once (if you hold down ctrl gmail file selector can select multiple files.) A: I recommend you using a jQuery plugin called Uploadify A: This is only supported in the latest browsers (read: not IE), but to enable the ability to select multiple files from a single file input field is done by adding multiple="multiple" to the input element, like so: <input type="file" multiple="multiple" /> To gracefully degrade this, you can either just not allow multiple file uploads at once for unsupported browsers, or sniff if that capability is available using a utility like modernizr, and adjust accordingly. There are a few different jQuery plugins for mutiple file uploads without the multiple="multiple" features, and from what I can tell they all mostly revolve around the idea of dynamically adding new file fields as the old ones are used, with some CSS wizardry to display the existing pending files nicely.
How can I build web file uploader that will allow multiple file uploads at once?
When you upload files to attach to a gmail message it lets you select multiple files from the browser file selector to attach at once. How could I build the same effect with a Django/Python/jquery site? I want to let the user upload multiple files at once (if you hold down ctrl gmail file selector can select multiple files.)
[ "I recommend you using a jQuery plugin called Uploadify\n", "This is only supported in the latest browsers (read: not IE), but to enable the ability to select multiple files from a single file input field is done by adding multiple=\"multiple\" to the input element, like so:\n<input type=\"file\" multiple=\"multi...
[ 2, 1 ]
[]
[]
[ "browser", "django", "file_upload", "jquery", "python" ]
stackoverflow_0004070127_browser_django_file_upload_jquery_python.txt
Q: Fetching nested child records in django framework This project is being developed in python and django. As per my requirement I want to querying all the products from the categories upto two to three level up... My entity structure is as follows. Category: - Name - ParentCategory Product: - ID - Name - Category Here are sample records which I want to query. Category: - Name: Apparel | Parent:None - Name: Shirt | Parent: Apparel - Name: TShirts | Parent: Apparel - Name: MaleTShirts | Parent:TShirts - Name: FemaleTShirts | Parent: TShirts - Name:Electornics | Parent:None Product: - ID:1 | Name:ABC | Category:MaleTShirt - ID:2 | Name:XYZ | Category:FemaleTShirt - ID:3 | Name:JKL | Category:Shirt Problem is, user should access these products from any level in category. For example, When user selects category Apparel, product ABC, XYZ, and JKL should appear in the resultset. When user selects category TShirts, product ABC and XYZ should appear in the resultset. When user selects category MaleTShirts, only ABC should appear in the resultset. When user selects category FemaleTShirts, only XYZ should appear in the resultset. Any idea how the model classes should be structured, and how should we query so that desired results can be achieved. Any suggestions would be helpful. It would be good if code is also provided. A: What about thinking an implementation of 2-interval graphs ? A: You can use something like django-mptt to store hierarchical data(your Categories model) in a database. It provides additional methods to retrieve all descendants of certain element. And using Product.objects.filter(category__in=...) you can then retrieve all products, related to selected category's descendants. A: i think your model look just fine with the fields that you have there; and for the query you can do (it's not tested): list_categories = [] head_category = Category.objects.filter(parent_category=request.GET['category']) while head_category: # Transform the Queryset to a list. head_category = [category.category for category in head_category] # Put all the new categories in the list for category in head_category: list_categories.append(category) # Get child categories of the current categories. child_category = Category.objects.filter(parent_category__in=head_category) head_category = child_category # Get all product from those category list. Product.objects.filter(category__parent_category__in=list_categories) A: For hierarchical data, you probably want to start using django-treebeard.
Fetching nested child records in django framework
This project is being developed in python and django. As per my requirement I want to querying all the products from the categories upto two to three level up... My entity structure is as follows. Category: - Name - ParentCategory Product: - ID - Name - Category Here are sample records which I want to query. Category: - Name: Apparel | Parent:None - Name: Shirt | Parent: Apparel - Name: TShirts | Parent: Apparel - Name: MaleTShirts | Parent:TShirts - Name: FemaleTShirts | Parent: TShirts - Name:Electornics | Parent:None Product: - ID:1 | Name:ABC | Category:MaleTShirt - ID:2 | Name:XYZ | Category:FemaleTShirt - ID:3 | Name:JKL | Category:Shirt Problem is, user should access these products from any level in category. For example, When user selects category Apparel, product ABC, XYZ, and JKL should appear in the resultset. When user selects category TShirts, product ABC and XYZ should appear in the resultset. When user selects category MaleTShirts, only ABC should appear in the resultset. When user selects category FemaleTShirts, only XYZ should appear in the resultset. Any idea how the model classes should be structured, and how should we query so that desired results can be achieved. Any suggestions would be helpful. It would be good if code is also provided.
[ "What about thinking an implementation of 2-interval graphs ?\n", "You can use something like django-mptt to store hierarchical data(your Categories model) in a database. It provides additional methods to retrieve all descendants of certain element. \nAnd using \nProduct.objects.filter(category__in=...)\nyou can ...
[ 1, 1, 1, 0 ]
[]
[]
[ "django", "django_models", "python" ]
stackoverflow_0004070082_django_django_models_python.txt
Q: 'Unloading' a Python module and 'unbinding' associated APIs I have a program which embeds Python (via Boost.Python), and provides an 'extension' framework. I wish to unify these two parts by allowing extensions to load a set of Python bindings for their API, however I'm confused as to how I would I 'unregister' these APIs when the plugin is unloaded. Is this even possible with Python and Boost.Python? I know it's not possible with Lua and LuaBind, as the documentation indicates it's impossible to 'unbind' an API. Sorry for how poorly this question is written, it's 2:30 AM here and I've been at it a while. ;) A: There is a related discussion about unloading/reloading python modules. Check out the 3rd answer (here), the one that starts off with: To cover my shame, here is a Python list thread that discusses how to delete a module. The summary: it can be especially difficult if the module is not pure python.
'Unloading' a Python module and 'unbinding' associated APIs
I have a program which embeds Python (via Boost.Python), and provides an 'extension' framework. I wish to unify these two parts by allowing extensions to load a set of Python bindings for their API, however I'm confused as to how I would I 'unregister' these APIs when the plugin is unloaded. Is this even possible with Python and Boost.Python? I know it's not possible with Lua and LuaBind, as the documentation indicates it's impossible to 'unbind' an API. Sorry for how poorly this question is written, it's 2:30 AM here and I've been at it a while. ;)
[ "There is a related discussion about unloading/reloading python modules. Check out the 3rd answer (here), the one that starts off with:\n\nTo cover my shame, here is a Python list thread that discusses how to delete a module. The summary: it can be especially difficult if the module is not pure python.\n\n" ]
[ 3 ]
[]
[]
[ "boost", "boost_python", "c++", "python" ]
stackoverflow_0004070424_boost_boost_python_c++_python.txt
Q: extract a methods block from a module/class I would like to extract a block of statments (function) from souce code if some condition is met. e.g in the following code snippet, i would like to extract the function method2() and all its statements to another file for analyis class MyClass: def __init__(self): pass def method1(self): pass def method2(self): statement1 statement2 ... def method2(self): print 'xyz' any ideas on how to do this? A: use inspect.getsource import inspect class MyClass: def __init__(self): pass def method1(self): pass def method2(self): a = 1 def method2(self): print 'xyz' print inspect.getsource(MyClass.method2)
extract a methods block from a module/class
I would like to extract a block of statments (function) from souce code if some condition is met. e.g in the following code snippet, i would like to extract the function method2() and all its statements to another file for analyis class MyClass: def __init__(self): pass def method1(self): pass def method2(self): statement1 statement2 ... def method2(self): print 'xyz' any ideas on how to do this?
[ "use inspect.getsource\nimport inspect\n\nclass MyClass:\n def __init__(self):\n pass\n def method1(self):\n pass\n def method2(self):\n a = 1\n def method2(self):\n print 'xyz'\n\nprint inspect.getsource(MyClass.method2)\n\n" ]
[ 4 ]
[]
[]
[ "python" ]
stackoverflow_0004070774_python.txt
Q: South django.db.utils.IntegrityError: django_content_type.name may not be NULL while running unit tests I'm getting this error django.db.utils.IntegrityError: django_content_type.name may not be NULL while running tests via nosetest. I've done everything as told in south tutorial - initial migration, fake migration etc. Running site normally via runserver command works like charm, but while using test command - above error. Also, In my development enviroment I'm using sqlite database and I'm using django-nose as test runner (at the very end of INSTALLED_APPS. Any clues? A: Ok - I've managed to solve the problem on my own. Seems to me that there is a problem integrating south, nose and in-memory db which is created by the django test command. All I had to do is to define TEST_NAME in my developement settings - like this: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'database.sqlite', 'TEST_NAME': 'test_database.sqlite', } } That's it :)
South django.db.utils.IntegrityError: django_content_type.name may not be NULL while running unit tests
I'm getting this error django.db.utils.IntegrityError: django_content_type.name may not be NULL while running tests via nosetest. I've done everything as told in south tutorial - initial migration, fake migration etc. Running site normally via runserver command works like charm, but while using test command - above error. Also, In my development enviroment I'm using sqlite database and I'm using django-nose as test runner (at the very end of INSTALLED_APPS. Any clues?
[ "Ok - I've managed to solve the problem on my own. \nSeems to me that there is a problem integrating south, nose and in-memory db which is created by the django test command.\nAll I had to do is to define TEST_NAME in my developement settings - like this:\nDATABASES = {\n 'default': {\n 'ENGINE': 'django....
[ 0 ]
[]
[]
[ "django", "django_south", "nose", "python", "unit_testing" ]
stackoverflow_0004065806_django_django_south_nose_python_unit_testing.txt
Q: Objects seems to be deleted if not assigned to object variable in PySide I'm trying to use QStandardItemModel to represent a hierarchy of data, but when I'm adding QStandardItems to the model, I have to assign them in object member variables, or the objects seems to be deleted. For example self.tree_model = QStandardItemModel() self.tree_model.setHorizontalHeaderLabels(['Category']) self.out_insertions = QStandardItem("Insertions") self.tree_model.invisibleRootItem().appendRow(self.out_insertions) Works as expected (an "Insertion" row is inserted under the column "Category"). But if I remove the self.out_insertion assignment, like: self.tree_model = QStandardItemModel() self.tree_model.setHorizontalHeaderLabels(['Category']) self.tree_model.invisibleRootItem().appendRow(QStandardItem("Insertions")) It doesn't work (an empty row is shown). I'm using Qt 4.6.3 and PySide 0.4.1. Can someone explain me why this happens? Thanks in advance ~Aki A: Your object get garbage collected since no more (Python) references to it exist. This behavior is described in the 'things to be aware of' in the PyQt documentation. Most of these problems (in PyQt land) can be avoided by correct parenting (which makes Qt take ownership instead of PyQt).
Objects seems to be deleted if not assigned to object variable in PySide
I'm trying to use QStandardItemModel to represent a hierarchy of data, but when I'm adding QStandardItems to the model, I have to assign them in object member variables, or the objects seems to be deleted. For example self.tree_model = QStandardItemModel() self.tree_model.setHorizontalHeaderLabels(['Category']) self.out_insertions = QStandardItem("Insertions") self.tree_model.invisibleRootItem().appendRow(self.out_insertions) Works as expected (an "Insertion" row is inserted under the column "Category"). But if I remove the self.out_insertion assignment, like: self.tree_model = QStandardItemModel() self.tree_model.setHorizontalHeaderLabels(['Category']) self.tree_model.invisibleRootItem().appendRow(QStandardItem("Insertions")) It doesn't work (an empty row is shown). I'm using Qt 4.6.3 and PySide 0.4.1. Can someone explain me why this happens? Thanks in advance ~Aki
[ "Your object get garbage collected since no more (Python) references to it exist.\nThis behavior is described in the 'things to be aware of' in the PyQt documentation.\nMost of these problems (in PyQt land) can be avoided by correct parenting (which makes Qt take ownership instead of PyQt).\n" ]
[ 4 ]
[]
[]
[ "pyside", "python", "qt4" ]
stackoverflow_0004071740_pyside_python_qt4.txt
Q: How to compare an item in a queue to an item in a set? REDIT: Was trying to avoid just placing the entire block of code on the forum and saying fix it for me, but here it is, to simply the process of determining the error: #! /usr/bin/python2.6 import threading import Queue import sys import urllib import urllib2 from urlparse import urlparse from lxml.html import parse, tostring, fromstring THREAD_NUMBER = 1 class Crawler(threading.Thread): def __init__(self, queue, mal_urls, max_depth): self.queue = queue self.mal_list = mal_urls self.crawled_links = [] self.max_depth = max_depth self.count = 0 threading.Thread.__init__(self) def run(self): while True: if self.count <= self.max_depth: self.crawled = set(self.crawled_links) url = self.queue.get() if url not in self.mal_list: self.count += 1 self.crawl(url) else: #self.queue.task_done() print("Malicious Link Found: {0}".format(url)) continue else: self.queue.task_done() break print("\nFinished Crawling! Reached Max Depth!") sys.exit(2) def crawl(self, tgt): try: url = urlparse(tgt) self.crawled_links.append(tgt) print("\nCrawling {0}".format(tgt)) request = urllib2.Request(tgt) request.add_header("User-Agent", "Mozilla/5,0") opener = urllib2.build_opener() data = opener.open(request) except: # TODO: write explicit exceptions the URLError, ValueERROR ... return doc = parse(data).getroot() for tag in doc.xpath("//a[@href]"): old = tag.get('href') fixed = urllib.unquote(old) self.queue_links(fixed, url) def queue_links(self, link, url): if link.startswith('/'): link = "http://" + url.netloc + link elif link.startswith("#"): return elif not link.startswith("http"): link = "http://" + url.netloc + "/" + link if link not in self.crawled_links: self.queue.put(link) self.queue.task_done() else: return def make_mal_list(): """Open various malware and phishing related blacklists and create a list of URLS from which to compare to the crawled links """ hosts1 = "hosts.txt" hosts2 = "MH-sitelist.txt" hosts3 = "urls.txt" mal_list = [] with open(hosts1) as first: for line1 in first: link = "http://" + line1.strip() mal_list.append(link) with open(hosts2) as second: for line2 in second: link = "http://" + line2.strip() mal_list.append(link) with open(hosts3) as third: for line3 in third: link = "http://" + line3.strip() mal_list.append(link) return mal_list def main(): x = int(sys.argv[2]) queue = Queue.Queue() mal_urls = set(make_mal_list()) for i in xrange(THREAD_NUMBER): cr = Crawler(queue, mal_urls, x) cr.start() queue.put(sys.argv[1]) queue.join() if __name__ == '__main__': main() So what I've got going on here is a web spider, which first creates a set made of the lines of several text files which contain 'malicious links'. Then starts a thread, passing both the set of bad links, and sys.argv[1]. The started thread, then calls teh crawl function which retrieves an lxml.html parse from sys.argv[1], and then after parsing all the links out of that initial page, places them in the queue. The loop continues, with each link placed in the queue being removed with self.queue.get(). The corresponding link is then SUPPOSED to be compared against the set of bad links. If the link in found to be bad, the loop is supposed to output it to the screen and then continue on to the next link, UNLESS it has already crawled that link. If it is not bad, crawl it, parse it, place its links into the queue, etc, incrementing a counter each time a link is crawled, and continuing until the counter reaches a limit determined by the value passed as sys.argv[2]. The problem is that, items it should be triggering the if/else statement for 'if url not in mal_list' are not, and links that have been placed in the 'crawled_already' list, are being crawled a 2nd, 3rd, and forth time anyhow. A: I don't understand one detail of this code: the queue is marked as task_done if there is any new link found in self.queue_links, but not as a matter of course in self.crawl. I'd have thought that this code would make more sense: def crawl(self, tgt): try: url = urlparse(tgt) self.crawled_links.append(tgt) print("\nCrawling {0}".format(tgt)) request = urllib2.Request(tgt) request.add_header("User-Agent", "Mozilla/5,0") opener = urllib2.build_opener() data = opener.open(request) doc = parse(data).getroot() for tag in doc.xpath("//a[@href]"): old = tag.get('href') fixed = urllib.unquote(old) self.queue_links(fixed, url) self.queue.task_done() except: # TODO: write explicit exceptions the URLError, ValueERROR ... pass def queue_links(self, link, url): if not link.startswith("#"): if link.startswith('/'): link = "http://" + url.netloc + link elif not link.startswith("http"): link = "http://" + url.netloc + "/" + link if link not in self.crawled_links: self.queue.put(link) I can't say, though, that I have a complete answer to your question. Later: the docs for Queue.task_done suggest that task_done should be 1:1 with Queue.get calls: Queue.task_done()¶ Indicate that a formerly enqueued task is complete. Used by queue consumer threads. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises a ValueError if called more times than there were items placed in the queue. Were you getting [uncaught] ValueError exceptions? It looks like this might be so.
How to compare an item in a queue to an item in a set?
REDIT: Was trying to avoid just placing the entire block of code on the forum and saying fix it for me, but here it is, to simply the process of determining the error: #! /usr/bin/python2.6 import threading import Queue import sys import urllib import urllib2 from urlparse import urlparse from lxml.html import parse, tostring, fromstring THREAD_NUMBER = 1 class Crawler(threading.Thread): def __init__(self, queue, mal_urls, max_depth): self.queue = queue self.mal_list = mal_urls self.crawled_links = [] self.max_depth = max_depth self.count = 0 threading.Thread.__init__(self) def run(self): while True: if self.count <= self.max_depth: self.crawled = set(self.crawled_links) url = self.queue.get() if url not in self.mal_list: self.count += 1 self.crawl(url) else: #self.queue.task_done() print("Malicious Link Found: {0}".format(url)) continue else: self.queue.task_done() break print("\nFinished Crawling! Reached Max Depth!") sys.exit(2) def crawl(self, tgt): try: url = urlparse(tgt) self.crawled_links.append(tgt) print("\nCrawling {0}".format(tgt)) request = urllib2.Request(tgt) request.add_header("User-Agent", "Mozilla/5,0") opener = urllib2.build_opener() data = opener.open(request) except: # TODO: write explicit exceptions the URLError, ValueERROR ... return doc = parse(data).getroot() for tag in doc.xpath("//a[@href]"): old = tag.get('href') fixed = urllib.unquote(old) self.queue_links(fixed, url) def queue_links(self, link, url): if link.startswith('/'): link = "http://" + url.netloc + link elif link.startswith("#"): return elif not link.startswith("http"): link = "http://" + url.netloc + "/" + link if link not in self.crawled_links: self.queue.put(link) self.queue.task_done() else: return def make_mal_list(): """Open various malware and phishing related blacklists and create a list of URLS from which to compare to the crawled links """ hosts1 = "hosts.txt" hosts2 = "MH-sitelist.txt" hosts3 = "urls.txt" mal_list = [] with open(hosts1) as first: for line1 in first: link = "http://" + line1.strip() mal_list.append(link) with open(hosts2) as second: for line2 in second: link = "http://" + line2.strip() mal_list.append(link) with open(hosts3) as third: for line3 in third: link = "http://" + line3.strip() mal_list.append(link) return mal_list def main(): x = int(sys.argv[2]) queue = Queue.Queue() mal_urls = set(make_mal_list()) for i in xrange(THREAD_NUMBER): cr = Crawler(queue, mal_urls, x) cr.start() queue.put(sys.argv[1]) queue.join() if __name__ == '__main__': main() So what I've got going on here is a web spider, which first creates a set made of the lines of several text files which contain 'malicious links'. Then starts a thread, passing both the set of bad links, and sys.argv[1]. The started thread, then calls teh crawl function which retrieves an lxml.html parse from sys.argv[1], and then after parsing all the links out of that initial page, places them in the queue. The loop continues, with each link placed in the queue being removed with self.queue.get(). The corresponding link is then SUPPOSED to be compared against the set of bad links. If the link in found to be bad, the loop is supposed to output it to the screen and then continue on to the next link, UNLESS it has already crawled that link. If it is not bad, crawl it, parse it, place its links into the queue, etc, incrementing a counter each time a link is crawled, and continuing until the counter reaches a limit determined by the value passed as sys.argv[2]. The problem is that, items it should be triggering the if/else statement for 'if url not in mal_list' are not, and links that have been placed in the 'crawled_already' list, are being crawled a 2nd, 3rd, and forth time anyhow.
[ "I don't understand one detail of this code: the queue is marked as task_done if there is any new link found in self.queue_links, but not as a matter of course in self.crawl. I'd have thought that this code would make more sense:\ndef crawl(self, tgt):\n try:\n url = urlparse(tgt)\n self.crawled_li...
[ 0 ]
[]
[]
[ "python", "queue", "set" ]
stackoverflow_0004070650_python_queue_set.txt
Q: Infinite yield problem Here is my simple code class Fibonacci: @staticmethod def series(): fprev = 1 fnext = 1 yield fnext while True: yield fnext fprev,fnext = fnext,fprev+fnext under10 = (i for i in Fibonacci.series() if i<10) for i in under10 : print i It's absolutely obvious, but...WHY interpreter is executing block while True: yield fnext fprev,fnext = fnext,fprev+fnext Forever? I specified in generator,that I want only elements<10 under10 = (i for i in Fibonacci.series() if i<10) IMHO, it's a little bit misunderstanding Any way to prevent infinite execution without re-writing "series"? A: How should the interpreter know that all future numbers will be < 10? It would have to either know (somehow) that it’s churning out the Fibonacci series, or it would have to inspect the whole series. It can’t do the first, so it does the second. You can fix this by using itertools.takewhile: import itertools under10 = itertools.takewhile(lambda n: n < 10, Fibonacci.series()) A: under10 = (i for i in Fibonacci.series() if i<10) Will keep going, it just won't yield values greater than 10. There's nothing instructing the for loop to stop. You would probably have better luck doing something like: for i in Fibonacci.series(): if i > 10: break #do your appends and such here EDIT: I like Konrad's itertools example much more, I always forget about itertools A: The infinite loop isn't a result of the while True: in the Fibonacci.series() method. It's caused by the under10 = (i for i in Fibonacci.series() if i<10) generator which just keeps going since it doesn't realize the values yielded will never get smaller. Here's [another] way to fix it and generalize it at the same time -- without re-writing series() -- using the itertools.takewhile() iterator: import itertools fibos_under = lambda N: itertools.takewhile(lambda f: f < N, Fibonacci.series()) for i in fibos_under(10): print i BTW: You can simplify the Fibonacci.series() method slightly by changing it to this which yields the same values: class Fibonacci: @staticmethod def series(): fprev,fnext = 0,1 while True: yield fnext fprev,fnext = fnext,fprev+fnext
Infinite yield problem
Here is my simple code class Fibonacci: @staticmethod def series(): fprev = 1 fnext = 1 yield fnext while True: yield fnext fprev,fnext = fnext,fprev+fnext under10 = (i for i in Fibonacci.series() if i<10) for i in under10 : print i It's absolutely obvious, but...WHY interpreter is executing block while True: yield fnext fprev,fnext = fnext,fprev+fnext Forever? I specified in generator,that I want only elements<10 under10 = (i for i in Fibonacci.series() if i<10) IMHO, it's a little bit misunderstanding Any way to prevent infinite execution without re-writing "series"?
[ "How should the interpreter know that all future numbers will be < 10? It would have to either know (somehow) that it’s churning out the Fibonacci series, or it would have to inspect the whole series.\nIt can’t do the first, so it does the second.\nYou can fix this by using itertools.takewhile:\nimport itertools\n\...
[ 16, 1, 0 ]
[]
[]
[ "generator", "python" ]
stackoverflow_0004071207_generator_python.txt
Q: SqlAlchemy Select Relation Type I have a simple One-to-Many relation mapped with SqlAlchemy: Base = declarative_base() class Type(Base): __tablename__ = "entity_types" type = Column(String(100), primary_key=True) description = Column(String(300)) class Entity(Base): __tablename__ = "entities" id = Column(Integer, primary_key=True) type_id = Column('type', String(100), ForeignKey(Types.type), nullable=False) type = relation(Type, backref='entities') value = Column(Text, nullable=False) I want to query all types ever used in an entity. In pure SQL I would accomplish this by: SELECT entity_types.* FROM entities JOIN entity_types ON entities.type == entity_types.type GROUP BY entity_types.type How do I solve this using SqlAlchemy's ORM-Engine? I've tried these queries, but they all don't return what I want: session.query(Action.type).group_by(Action.type).all() session.query(Type).select_from(Action).group_by(Type).all() I've also tried using options(joinedload('type')), but I found out, this is only used to force eager loading and to bypass lazy-loading. ADDITION: I've just added the backref in the relation of Entity. I think the problem is solvable by querying count(Type.entities) > 0, but I cannot figure out how to exactly form a valid ORM query. A: I've just figured it out: session.query(ActionType).filter(ActionType.actions.any()).all() The any() does the trick.
SqlAlchemy Select Relation Type
I have a simple One-to-Many relation mapped with SqlAlchemy: Base = declarative_base() class Type(Base): __tablename__ = "entity_types" type = Column(String(100), primary_key=True) description = Column(String(300)) class Entity(Base): __tablename__ = "entities" id = Column(Integer, primary_key=True) type_id = Column('type', String(100), ForeignKey(Types.type), nullable=False) type = relation(Type, backref='entities') value = Column(Text, nullable=False) I want to query all types ever used in an entity. In pure SQL I would accomplish this by: SELECT entity_types.* FROM entities JOIN entity_types ON entities.type == entity_types.type GROUP BY entity_types.type How do I solve this using SqlAlchemy's ORM-Engine? I've tried these queries, but they all don't return what I want: session.query(Action.type).group_by(Action.type).all() session.query(Type).select_from(Action).group_by(Type).all() I've also tried using options(joinedload('type')), but I found out, this is only used to force eager loading and to bypass lazy-loading. ADDITION: I've just added the backref in the relation of Entity. I think the problem is solvable by querying count(Type.entities) > 0, but I cannot figure out how to exactly form a valid ORM query.
[ "I've just figured it out:\nsession.query(ActionType).filter(ActionType.actions.any()).all()\n\nThe any() does the trick.\n" ]
[ 1 ]
[]
[]
[ "python", "sql", "sqlalchemy" ]
stackoverflow_0004071455_python_sql_sqlalchemy.txt
Q: Python - datetime of a specific timezone I am having the hardest time trying to get the current time in EDT timezone. print datetime.time(datetime.now()).strftime("%H%M%S") datetime.now([tz]) has an optional tz argument, but it has to be of type datetime.tzinfo... I have not ben able to figure out how to define a tzinfo object for Eastern timezone... Seems like it should be pretty simple, but I cant figure it out without importing an additional library. A: I am not very conversent about the EDT time zone but this example should serve your purpose. import datetime datetime.datetime.now must be passed the time zone info which should be of type datetime.tzinfo. Here is a class that implements that with some of the required functions. I am providing no day light saving details here as this is an example. class EST(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(hours=-5) def dst(self, dt): return datetime.timedelta(0) Now you could use this to get the info with time zone correctness: print datetime.datetime.now(EST()) Output: 2010-11-01 13:44:20.231259-05:00 A: The tzinfo class only defines an interface, you will need to implement it yourself (see the documentation for an example) or use a third-party module which implements it, like pytz. Edit: Sorry, I missed that you don't want to import another library.
Python - datetime of a specific timezone
I am having the hardest time trying to get the current time in EDT timezone. print datetime.time(datetime.now()).strftime("%H%M%S") datetime.now([tz]) has an optional tz argument, but it has to be of type datetime.tzinfo... I have not ben able to figure out how to define a tzinfo object for Eastern timezone... Seems like it should be pretty simple, but I cant figure it out without importing an additional library.
[ "I am not very conversent about the EDT time zone but this example should serve your purpose.\nimport datetime\n\ndatetime.datetime.now must be passed the time zone info which should be of type datetime.tzinfo. Here is a class that implements that with some of the required functions. I am providing no day light sav...
[ 20, 8 ]
[]
[]
[ "datetime", "python", "timezone" ]
stackoverflow_0004071924_datetime_python_timezone.txt
Q: does anybody know a simple base for a plug in python application? I'm discovering python, and I want to create a plug in application, I guess this is simple using python, but it will be nice create a generic start point application. To be more specific, it could be a file.py that reads a XML or INI file to get the plug in's directory path, then load all .py files as plug in Any Ideas? A: Since you are just starting out, I think it would be best to do all the work yourself so you really understand what is happening. It's not rocket science, and actually makes a fun learning experience IMO. Start by forcing a hard-coded path to plugins. For example, ~/.myapp/plugins. In there, assume each .py file is a plugin. Require that each file in that directory implement a simple interface such as a known command you can call to create an instance of that plugin. For example, a plugin might look like: # MyPlugin.py from myapp.plugin import Plugin # a base class you define def create(): return MyPlugin() class MyPlugin(Plugin): ... With that, you would load it with something like this: import imp, os.path filename=os.path.split(pathname)[-1] modulename = os.path.splitext(filename)[0] try: module = imp.load_source(modulename, pathname) plugin = module.create() except ImportError, e: print "Error importing plugin '%s': %s" % (filename, str(e)) You now have an instance of your plugin class running, and a handle to it in the local variable plugin. See? You don't need a fancy plugin framework to get started. This isn't the only way to do it, and it's probably not even the best way. But once you get something like this working you can hammer out the details for what works best for you and your app. A: I guess this depends on your level of "simple", but trac has a nice plug-in framework. http://trac.edgewall.org/wiki/TracDev/ComponentArchitecture
does anybody know a simple base for a plug in python application?
I'm discovering python, and I want to create a plug in application, I guess this is simple using python, but it will be nice create a generic start point application. To be more specific, it could be a file.py that reads a XML or INI file to get the plug in's directory path, then load all .py files as plug in Any Ideas?
[ "Since you are just starting out, I think it would be best to do all the work yourself so you really understand what is happening. It's not rocket science, and actually makes a fun learning experience IMO. \nStart by forcing a hard-coded path to plugins. For example, ~/.myapp/plugins. In there, assume each .py file...
[ 2, 0 ]
[]
[]
[ "plugins", "python" ]
stackoverflow_0004033698_plugins_python.txt
Q: Django Paginate CPU Time scaling with number of selected objects not displayed objects I have a simple database with about 3900 entries, and am using a generic view (django.views.generic.list_detail.object_list) with its django-pagination (through paginate_by) to browse the data in the database, but some queries are very slow. The weird thing is that despite only showing 50 objects per page the rendering time scales roughly linearly with how many objects are selected (and I do not do any sorting of objects). E.g., if I do a query with ~3900, ~1800, ~900, ~54 selected objects it respectively takes ~8500 ms, ~4000 ms, ~2500 ms, ~800 ms of CPU time (using django-debug-toolbar) while the SQL only took ~50 ms, ~40 ms, ~35 ms, ~30 ms, again while all pages had exactly 50 objects. I have minimized the number of SQL queries using select_related as suggested in the django optimization page. Using profiling middleware the vast majority of the time on long queries is spent doing db stuff: 735924 function calls (702255 primitive calls) in 11.950 CPU seconds Ordered by: internal time, call count ncalls tottime percall cumtime percall filename:lineno(function) 35546/3976 4.118 0.000 9.585 0.002 /usr/local/lib/python2.6/dist-packages/django/db/models/query.py:1120(get_cached_row) 30174 3.589 0.000 3.991 0.000 /usr/local/lib/python2.6/dist-packages/django/db/models/base.py:250(__init__) ---- By file ---- tottime 47.0% 3.669 /usr/local/lib/python2.6/dist-packages/django/db/models/base.py 7.7% 0.601 /usr/local/lib/python2.6/dist-packages/django/db/models/options.py 6.8% 0.531 /usr/local/lib/python2.6/dist-packages/django/db/models/query_utils.py 6.6% 0.519 /usr/local/lib/python2.6/dist-packages/django/db/backends/sqlite3/base.py 6.4% 0.496 /usr/local/lib/python2.6/dist-packages/django/db/models/sql/compiler.py 5.0% 0.387 /usr/local/lib/python2.6/dist-packages/django/db/models/fields/__init__.py 3.1% 0.244 /usr/local/lib/python2.6/dist-packages/django/db/backends/util.py 2.9% 0.225 /usr/local/lib/python2.6/dist-packages/django/db/backends/__init__.py 2.7% 0.213 /usr/local/lib/python2.6/dist-packages/django/db/models/query.py 2.2% 0.171 /usr/local/lib/python2.6/dist-packages/django/dispatch/dispatcher.py 1.7% 0.136 /usr/local/lib/python2.6/dist-packages/django/template/__init__.py 1.7% 0.131 /usr/local/lib/python2.6/dist-packages/django/utils/datastructures.py 1.1% 0.088 /usr/lib/python2.6/posixpath.py 0.8% 0.066 /usr/local/lib/python2.6/dist-packages/django/db/utils.py ... ---- By group --- tottime 89.5% 6.988 /usr/local/lib/python2.6/dist-packages/django/db 3.6% 0.279 /usr/local/lib/python2.6/dist-packages/django/utils ... I can understand why the SQL query could scale with the number of selected entries. However, I don't see why the rest of the CPU time should be in anyway affected. This is very counterintuitive and I was wondering if there's any debugging/profiling tips someone could help me with. Using django-1.2.3 with sqlite, python2.6, apache2-prefork (though switching to mpm-worker didn't significantly change things). Any tips/tricks would be greatly appreciated. Memory usage doesn't seem to be a factor (machine has 2Gb RAM and free says only using 300Mb in use (additionally 600Mb of cache)) either and the database is on the same server as the machine. Found my mistake. I found my mistake. I checked the length of the original queryset to see if it was length 1 (and then went to object_detail if so). This resulted in evaluating the full queryset (which still only took 5ms according to django-debug-toolbar), but slowed everything down significantly. Basically had something stupid like: if len(queryset) == 1: return HttpResponseRedirect( fwd to object_detail url ...) return object_list(request, queryset=queryset, paginate_by= ...) which evaluated the full query; not the paginated query. A: When django does pagination it will use standard QuerySet slicing to get the results, this means it will use LIMIT and OFFSET. You can view the SQL the ORM generates by calling str() on the .query attribute of the QuerySet: print MyModel.objects.all().query print MyModel.objects.all()[50:100].query You can then ask sqlite to EXPLAIN the query and see what the database is trying to do. I'm guessing you are sorting on some field that does not have an index. EXPLAIN QUERY PLAN will tell you what indices would have been used, according to the sqlite documentation at http://www.sqlite.org/lang_explain.html
Django Paginate CPU Time scaling with number of selected objects not displayed objects
I have a simple database with about 3900 entries, and am using a generic view (django.views.generic.list_detail.object_list) with its django-pagination (through paginate_by) to browse the data in the database, but some queries are very slow. The weird thing is that despite only showing 50 objects per page the rendering time scales roughly linearly with how many objects are selected (and I do not do any sorting of objects). E.g., if I do a query with ~3900, ~1800, ~900, ~54 selected objects it respectively takes ~8500 ms, ~4000 ms, ~2500 ms, ~800 ms of CPU time (using django-debug-toolbar) while the SQL only took ~50 ms, ~40 ms, ~35 ms, ~30 ms, again while all pages had exactly 50 objects. I have minimized the number of SQL queries using select_related as suggested in the django optimization page. Using profiling middleware the vast majority of the time on long queries is spent doing db stuff: 735924 function calls (702255 primitive calls) in 11.950 CPU seconds Ordered by: internal time, call count ncalls tottime percall cumtime percall filename:lineno(function) 35546/3976 4.118 0.000 9.585 0.002 /usr/local/lib/python2.6/dist-packages/django/db/models/query.py:1120(get_cached_row) 30174 3.589 0.000 3.991 0.000 /usr/local/lib/python2.6/dist-packages/django/db/models/base.py:250(__init__) ---- By file ---- tottime 47.0% 3.669 /usr/local/lib/python2.6/dist-packages/django/db/models/base.py 7.7% 0.601 /usr/local/lib/python2.6/dist-packages/django/db/models/options.py 6.8% 0.531 /usr/local/lib/python2.6/dist-packages/django/db/models/query_utils.py 6.6% 0.519 /usr/local/lib/python2.6/dist-packages/django/db/backends/sqlite3/base.py 6.4% 0.496 /usr/local/lib/python2.6/dist-packages/django/db/models/sql/compiler.py 5.0% 0.387 /usr/local/lib/python2.6/dist-packages/django/db/models/fields/__init__.py 3.1% 0.244 /usr/local/lib/python2.6/dist-packages/django/db/backends/util.py 2.9% 0.225 /usr/local/lib/python2.6/dist-packages/django/db/backends/__init__.py 2.7% 0.213 /usr/local/lib/python2.6/dist-packages/django/db/models/query.py 2.2% 0.171 /usr/local/lib/python2.6/dist-packages/django/dispatch/dispatcher.py 1.7% 0.136 /usr/local/lib/python2.6/dist-packages/django/template/__init__.py 1.7% 0.131 /usr/local/lib/python2.6/dist-packages/django/utils/datastructures.py 1.1% 0.088 /usr/lib/python2.6/posixpath.py 0.8% 0.066 /usr/local/lib/python2.6/dist-packages/django/db/utils.py ... ---- By group --- tottime 89.5% 6.988 /usr/local/lib/python2.6/dist-packages/django/db 3.6% 0.279 /usr/local/lib/python2.6/dist-packages/django/utils ... I can understand why the SQL query could scale with the number of selected entries. However, I don't see why the rest of the CPU time should be in anyway affected. This is very counterintuitive and I was wondering if there's any debugging/profiling tips someone could help me with. Using django-1.2.3 with sqlite, python2.6, apache2-prefork (though switching to mpm-worker didn't significantly change things). Any tips/tricks would be greatly appreciated. Memory usage doesn't seem to be a factor (machine has 2Gb RAM and free says only using 300Mb in use (additionally 600Mb of cache)) either and the database is on the same server as the machine. Found my mistake. I found my mistake. I checked the length of the original queryset to see if it was length 1 (and then went to object_detail if so). This resulted in evaluating the full queryset (which still only took 5ms according to django-debug-toolbar), but slowed everything down significantly. Basically had something stupid like: if len(queryset) == 1: return HttpResponseRedirect( fwd to object_detail url ...) return object_list(request, queryset=queryset, paginate_by= ...) which evaluated the full query; not the paginated query.
[ "When django does pagination it will use standard QuerySet slicing to get the results, this means it will use LIMIT and OFFSET. \nYou can view the SQL the ORM generates by calling str() on the .query attribute of the QuerySet:\n print MyModel.objects.all().query\n print MyModel.objects.all()[50:100].query\n\n...
[ 3 ]
[]
[]
[ "django", "django_pagination", "python" ]
stackoverflow_0004072063_django_django_pagination_python.txt
Q: how to send a tuple value as an arg to a function started as a thread? I have a class function that I want to start up as a thread. The function takes as its argument a tuple value. The function works fine but my initial setup throws a TypeError. Here's some sample code: import threading class Test: def __init__(self): t = threading.Thread(target=self.msg, args=(2,1)) t.start() print "started thread" # msg takes a tuple as its arg (e.g. tupleval = (0,1)) def msg(self,tupleval): if(tupleval[0] > 1): print "yes" else: print "no" test = Test() test.msg((2,2)) test.msg((0,0)) and then the output is as follows: started thread yes no Exception in thread Thread-1: Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/threading.py", line 532, in __bootstrap_inner self.run() File "/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/threading.py", line 484, in run self.__target(*self.__args, **self.__kwargs) TypeError: msg() takes exactly 2 arguments (3 given) It appears to work for the two explicit calls at the end, but the initial setup call throws the TypeError. I've tried packing values into a tuple in all sorts of ways but can't get rid of the error. Ideas? A: args takes a tuple of arguments to pass to the function. When you say args=(2,1) you're not telling it to call msg with a single argument (2,1); you're telling it to call it with two arguments, 2 and 1. You want args=((2,1),). A: This is going to look really ugly, but I believe it should be args=((2,1),) (or args=[(2,1)] might look slightly nicer). args is supposed to be a tuple of all the arguments to the function, so to pass a tuple, you need a tuple of a tuple. In addition, Python requires you to add the extra comma for a tuple with one element to differentiate it from just wrapping an expression with parentheses.
how to send a tuple value as an arg to a function started as a thread?
I have a class function that I want to start up as a thread. The function takes as its argument a tuple value. The function works fine but my initial setup throws a TypeError. Here's some sample code: import threading class Test: def __init__(self): t = threading.Thread(target=self.msg, args=(2,1)) t.start() print "started thread" # msg takes a tuple as its arg (e.g. tupleval = (0,1)) def msg(self,tupleval): if(tupleval[0] > 1): print "yes" else: print "no" test = Test() test.msg((2,2)) test.msg((0,0)) and then the output is as follows: started thread yes no Exception in thread Thread-1: Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/threading.py", line 532, in __bootstrap_inner self.run() File "/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/threading.py", line 484, in run self.__target(*self.__args, **self.__kwargs) TypeError: msg() takes exactly 2 arguments (3 given) It appears to work for the two explicit calls at the end, but the initial setup call throws the TypeError. I've tried packing values into a tuple in all sorts of ways but can't get rid of the error. Ideas?
[ "args takes a tuple of arguments to pass to the function. When you say args=(2,1) you're not telling it to call msg with a single argument (2,1); you're telling it to call it with two arguments, 2 and 1.\nYou want args=((2,1),).\n", "This is going to look really ugly, but I believe it should be args=((2,1),) (or...
[ 6, 1 ]
[]
[]
[ "arguments", "multithreading", "python" ]
stackoverflow_0004072397_arguments_multithreading_python.txt
Q: Cutting down a list based on events being in between two datetimes I have a list of object that, among other bits of data, include a date, and I need to create a list of all the objects where that date falls anytime last month, i.e. midnight on the 1st of last month < target data < midnight on the 1st of this month. I also need the number of total objects that meet this criteria. Right now, I'm going about it in a series of while loops, but I feel like there has to be a better way, particularly since my script hangs: post = 0 #the current post we're analyzing posts = 0 #the total number of posts in the month we actually care about lastmonthposts = [] #I think i can just get rid of this blog = pyblog.WordPress() date = blog.get_recent_posts(1 + posts)[0]['dateCreated'] while (date > startthismonth): print "So far, there have been " + str(posts) + " posts this month we've counted." post = post + 1 date = blog.get_recent_posts(1 + post)[0]['dateCreated'] while (date > startlastmonth): print "So far, there have been " + str(posts) + " posts last month we've counted, which is " + str(date.timetuple().tm_mon) + "." posts = posts + 1 post = post + 1 date = blog.get_recent_posts(1 + post)[0]['dateCreated'] lastmonthposts.append('blog') for blogpost in lastmonthposts: postnumber = blogpost['postid'] comments = comments + int(blog.get_comment_count(postnumber)['approved']) A: Instead of get_recent_posts() I would use get_page_list(): from datetime import datetime, timedelta this_month_start = datetime.now().date().replace(day=1) prev_month_start = (this_month_start - timedelta(days=1)).replace(day=1) pages = blog.get_page_list() last_month_pages = [ p for p in pages if prev_month_start <= p['dateCreated'] < this_month_start] last_month_approved_comment_count = sum( blog.get_comment_count(page['page_id'])['approved'] for page in last_month_pages) print "number of last month's pages:", len(last_month_pages) print "number of approved comments for last month's pages:", print last_month_approved_comment_count
Cutting down a list based on events being in between two datetimes
I have a list of object that, among other bits of data, include a date, and I need to create a list of all the objects where that date falls anytime last month, i.e. midnight on the 1st of last month < target data < midnight on the 1st of this month. I also need the number of total objects that meet this criteria. Right now, I'm going about it in a series of while loops, but I feel like there has to be a better way, particularly since my script hangs: post = 0 #the current post we're analyzing posts = 0 #the total number of posts in the month we actually care about lastmonthposts = [] #I think i can just get rid of this blog = pyblog.WordPress() date = blog.get_recent_posts(1 + posts)[0]['dateCreated'] while (date > startthismonth): print "So far, there have been " + str(posts) + " posts this month we've counted." post = post + 1 date = blog.get_recent_posts(1 + post)[0]['dateCreated'] while (date > startlastmonth): print "So far, there have been " + str(posts) + " posts last month we've counted, which is " + str(date.timetuple().tm_mon) + "." posts = posts + 1 post = post + 1 date = blog.get_recent_posts(1 + post)[0]['dateCreated'] lastmonthposts.append('blog') for blogpost in lastmonthposts: postnumber = blogpost['postid'] comments = comments + int(blog.get_comment_count(postnumber)['approved'])
[ "Instead of get_recent_posts() I would use get_page_list():\nfrom datetime import datetime, timedelta\n\nthis_month_start = datetime.now().date().replace(day=1)\nprev_month_start = (this_month_start - timedelta(days=1)).replace(day=1)\n\npages = blog.get_page_list()\nlast_month_pages = [\n p for p in pages\n if p...
[ 2 ]
[]
[]
[ "datetime", "python", "python_datetime" ]
stackoverflow_0004072561_datetime_python_python_datetime.txt
Q: How to wrap a proper generator function around a SAX Parser I've got 35.5Mb .XLSM file. When the actual usable content is expanded, it swamps DOM parsers like element tree exhausting memory after a long, long running time. When using a SAX parser, however, the ContentHandler seems to be constrained to accumulate rows in a temporary file. Which is a little irritating because the parser and the main application could have a simple co-routine relationship where each row parsed by SAX could be yielded to the application. It doesn't look like the following is possible. def gen_rows_from_xlsx( someFile ): myHandler= HandlerForXLSX() p= xml.sax.makeParser() p.setContentHandler( myHandler, some_kind_of_buffer ) for row in some_kind_of_buffer.rows(): p.parse() # Just enough to get to the ContentHandler's "buffer.put()" yield row Periodically, the HandlerForXLSX would invoke some_kind_of_buffer.put( row ) to put a row into the buffer. This single row should be yielded through some_kind_of_buffer.rows(). A simple coroutine relationship between a SAX parser and gen_rows_from_xslx() would be ideal. Have I overlooked some generator-function magic that will allow me to package SAX as a coroutine of some kind? Is the only alternative to create a SAX parsing thread and use a Queue to get the rows built by the parser? Or is it simpler to bite the bullet and create a temporary file in the SAX parser and then yield those objects through the generator? Related: Lazy SAX XML parser with stop/resume. A: """I've got 35.5Mb .XLSM file. When the actual usable content is expanded, it swamps DOM parsers like element tree exhausting memory after a long, long running time.""" I don't understand this. Things you should be using: import xml.etree.cElementTree as ET ET.iterparse(sourcefile) # sourcefile being a cStringIO.StringIO instance holding your worksheet XML document element.clear() # leave only scorched earth behind you This article shows how to use iterparse and clear. Example: Loading an XLSX (100Mb, most of which is two worksheets each with about 16K rows and about 200 cols) into the xlrd object model: Elapsed time about 4 minutes [beat-up old laptop [2 GHz single-core] running Windows XP and Python 2.7]. Incremental memory usage maxes out at about 300Mb of memory, most of which is the output, not the element tree. A: Seems like you could use the IncrementalParser interface for this? Something like: def gen_rows_from_xlsx(someFile): buf = collections.deque() myHandler = HandlerForXLSX(buf) p = xml.sax.make_parser() p.setContentHandler(myHandler) with open(someFile) as f: while True: d = f.read(BLOCKSIZE) if not d: break p.feed(d) while buf: yield buf.popleft() p.close() To do this with parse, you would have to yield across multiple stack frames, something which Python simply does not support.
How to wrap a proper generator function around a SAX Parser
I've got 35.5Mb .XLSM file. When the actual usable content is expanded, it swamps DOM parsers like element tree exhausting memory after a long, long running time. When using a SAX parser, however, the ContentHandler seems to be constrained to accumulate rows in a temporary file. Which is a little irritating because the parser and the main application could have a simple co-routine relationship where each row parsed by SAX could be yielded to the application. It doesn't look like the following is possible. def gen_rows_from_xlsx( someFile ): myHandler= HandlerForXLSX() p= xml.sax.makeParser() p.setContentHandler( myHandler, some_kind_of_buffer ) for row in some_kind_of_buffer.rows(): p.parse() # Just enough to get to the ContentHandler's "buffer.put()" yield row Periodically, the HandlerForXLSX would invoke some_kind_of_buffer.put( row ) to put a row into the buffer. This single row should be yielded through some_kind_of_buffer.rows(). A simple coroutine relationship between a SAX parser and gen_rows_from_xslx() would be ideal. Have I overlooked some generator-function magic that will allow me to package SAX as a coroutine of some kind? Is the only alternative to create a SAX parsing thread and use a Queue to get the rows built by the parser? Or is it simpler to bite the bullet and create a temporary file in the SAX parser and then yield those objects through the generator? Related: Lazy SAX XML parser with stop/resume.
[ "\"\"\"I've got 35.5Mb .XLSM file. When the actual usable content is expanded, it swamps DOM parsers like element tree exhausting memory after a long, long running time.\"\"\"\nI don't understand this. Things you should be using:\nimport xml.etree.cElementTree as ET\n\nET.iterparse(sourcefile) # sourcefile being a ...
[ 5, 1 ]
[]
[]
[ "python", "sax" ]
stackoverflow_0004071856_python_sax.txt
Q: Add more sample points to data Given some data of shape 20x45, where each row is a separate data set, say 20 different sine curves with 45 data points each, how would I go about getting the same data, but with shape 20x100? In other words, I have some data A of shape 20x45, and some data B of length 20x100, and I would like to have A be of shape 20x100 so I can compare them better. This is for Python and Numpy/Scipy. I assume it can be done with splines, so I am looking for a simple example, maybe just 2x10 to 2x20 or something, where each row is just a line, to demonstrate the solution. Thanks! A: Ubuntu beat me to it while I was typing this example, but his example just uses linear interpolation, which can be more easily done with numpy.interpolate... (The difference is only a keyword argument in scipy.interpolate.interp1d, however). I figured I'd include my example, as it shows using scipy.interpolate.interp1d with a cubic spline... import numpy as np import scipy as sp import scipy.interpolate import matplotlib.pyplot as plt # Generate some random data y = (np.random.random(10) - 0.5).cumsum() x = np.arange(y.size) # Interpolate the data using a cubic spline to "new_length" samples new_length = 50 new_x = np.linspace(x.min(), x.max(), new_length) new_y = sp.interpolate.interp1d(x, y, kind='cubic')(new_x) # Plot the results plt.figure() plt.subplot(2,1,1) plt.plot(x, y, 'bo-') plt.title('Using 1D Cubic Spline Interpolation') plt.subplot(2,1,2) plt.plot(new_x, new_y, 'ro-') plt.show() A: One way would be to use scipy.interpolate.interp1d: import scipy as sp import scipy.interpolate import numpy as np x=np.linspace(0,2*np.pi,45) y=np.zeros((2,45)) y[0,:]=sp.sin(x) y[1,:]=sp.sin(2*x) f=sp.interpolate.interp1d(x,y) y2=f(np.linspace(0,2*np.pi,100)) If your data is fairly dense, it may not be necessary to use higher order interpolation. A: If your application is not sensitive to precision or you just want a quick overview, you could just fill the unknown data points with averages from neighbouring known data points (in other words, do naive linear interpolation).
Add more sample points to data
Given some data of shape 20x45, where each row is a separate data set, say 20 different sine curves with 45 data points each, how would I go about getting the same data, but with shape 20x100? In other words, I have some data A of shape 20x45, and some data B of length 20x100, and I would like to have A be of shape 20x100 so I can compare them better. This is for Python and Numpy/Scipy. I assume it can be done with splines, so I am looking for a simple example, maybe just 2x10 to 2x20 or something, where each row is just a line, to demonstrate the solution. Thanks!
[ "Ubuntu beat me to it while I was typing this example, but his example just uses linear interpolation, which can be more easily done with numpy.interpolate... (The difference is only a keyword argument in scipy.interpolate.interp1d, however).\nI figured I'd include my example, as it shows using scipy.interpolate.i...
[ 11, 1, 0 ]
[]
[]
[ "numpy", "python", "scipy" ]
stackoverflow_0004072844_numpy_python_scipy.txt
Q: how to create a Storm table with no primary key? I'm trying to use Storm to create an ORM to an existing MySQL db. I'm trying to create a table Class for one of the tables but I'm getting this error: storm.exceptions.ClassInfoError: <class 'statsstorm.Aggframe'> has no primary key information This table has no primary key, or any combination of columns that produce a unique row. It functions more like a log. How do I create a Storm table class with no primary key? class Aggframe(Storm): """ Storm-based interface to the stats.aggframe table.""" __storm_table__ = 'aggframe' user = Unicode() dept = Unicode() frame_avg = Float() A: I don't recommend that you create a table without a PK. If anything, add an IDENTITY column in that table and use as PK. A: You can create a compound key: https://storm.canonical.com/Manual#Defining_compound_keys
how to create a Storm table with no primary key?
I'm trying to use Storm to create an ORM to an existing MySQL db. I'm trying to create a table Class for one of the tables but I'm getting this error: storm.exceptions.ClassInfoError: <class 'statsstorm.Aggframe'> has no primary key information This table has no primary key, or any combination of columns that produce a unique row. It functions more like a log. How do I create a Storm table class with no primary key? class Aggframe(Storm): """ Storm-based interface to the stats.aggframe table.""" __storm_table__ = 'aggframe' user = Unicode() dept = Unicode() frame_avg = Float()
[ "I don't recommend that you create a table without a PK.\nIf anything, add an IDENTITY column in that table and use as PK.\n", "You can create a compound key:\nhttps://storm.canonical.com/Manual#Defining_compound_keys\n" ]
[ 1, 1 ]
[]
[]
[ "database", "mysql", "orm", "python", "storm_orm" ]
stackoverflow_0003703552_database_mysql_orm_python_storm_orm.txt
Q: Inheriting assigned properties from a mix-in I would like to use a mix-in class to add properties to a model. from google.appengine.ext import db class Taggable(object): tag_list = db.StringListProperty() def attach_tag(self, tag): self.tag_list.append(tag) self.put() def remove_tag(self, tag): self.tag_list.pop(self.tag_list.index(tag)) self.put() class Post(db.Model, Taggable): title = db.TextProperty() This is just an example, no need to chew my ear off about bad practices or something. Currently, I have something similar to this, except I have to put tag_list = db.StringListProperty() outside of the mix-in (errors otherwise), this is messy code, and I would like to avoid that. Simply, how can I assign properties (such as tag_list) to a model from within a mix-in? A: Should not Taggable be based on db.Model instead of object?
Inheriting assigned properties from a mix-in
I would like to use a mix-in class to add properties to a model. from google.appengine.ext import db class Taggable(object): tag_list = db.StringListProperty() def attach_tag(self, tag): self.tag_list.append(tag) self.put() def remove_tag(self, tag): self.tag_list.pop(self.tag_list.index(tag)) self.put() class Post(db.Model, Taggable): title = db.TextProperty() This is just an example, no need to chew my ear off about bad practices or something. Currently, I have something similar to this, except I have to put tag_list = db.StringListProperty() outside of the mix-in (errors otherwise), this is messy code, and I would like to avoid that. Simply, how can I assign properties (such as tag_list) to a model from within a mix-in?
[ "Should not Taggable be based on db.Model instead of object?\n" ]
[ 2 ]
[]
[]
[ "google_app_engine", "google_cloud_datastore", "python" ]
stackoverflow_0004073627_google_app_engine_google_cloud_datastore_python.txt
Q: How do I design and implement a programming language? This question is related to This question on Aardvark This question on here The past couple of years I've been thinking about things I like and don't like about languages I use. I always wanted to write my own language, but never did so. I also own both the Lego RCX and NXT, but most of the time I never actually make my robots do anything because of their restrictive visual programming environments. I think I will design my programming language for the NXT because there are already tons of general purpose languages and the NXT gives me a concrete set of problems and goals and hopefully a nice sandbox to play with. Now what? Where do I start? What do I need to know? If possible, I'd write the compiler in Python or Clojure. There is an SDK for the NXT, but also an Assembly language. What would be the best/easiest route? The Lego NXT has a small screen, USB and Bluetooth, it has 4 sensor ports both digital and analogue, 3 output ports and 2 ARM processors, one main processor and one co-processor. http://mindstormsnxt.blogspot.com/2006/08/whats-inside-nxt-brick.html Programming the NXT is going to all about handling data and events, so some sort of monoiconic dataflow/reactive style seem appropriate. It should also handle parallel tasks well, so I'm thinking functional. I'm currently thinking of stack based as well. In my head I'm already trying to unify these concepts and think of sample code. I'm thinking of a tree rather than a stack, where functional branches can run in parallel. An example: # implicit main stack 5 5 + # 10 # quoted branch or list [1 -] # 10 [1 -] # eval list and recur until false loop # [9 8 7 6 5 4 3 2 1 0] # define stack as a function [1 = [1 8 motor] [1 0 motor] if] fn # [9 8 7 6 5 4 3 2 1 0] <function> # define function as a symbol "handle-press" def # [9 8 7 6 5 4 3 2 1 0] # reactively loop over infinite lazy stack returned by sensor # in a parallel branch |4 sensor handle-press for| # [9 8 7 6 5 4 3 2 1 0] [8 nil nil nil 8 ...] There are obviously still gaping holes in the reasoning behind this, but I'm posting this rough sketch anyway to spark some helpful answers and discussion. A: Now what? Where do I start? What do I need to know? Start by learning more programming languages. After learning several languages, buy a book on compilers. There are many. Google will help. It doesn't matter which one you buy. You'll need several. It's okay to read many books. Once you've learned languages and read up on compilers, do the following. Build the run-time libraries you need. Implement them in some suitable language like C or Python or whatever. Once you have run-time libraries which really work. Really totally work. Totally. You can think about syntax and lexical scanning and compiling. Those are hard problems, but not half as hard as getting your run-time libraries to work. Fooling around with syntax (i.e., a Domain Specific Language) is an attractive nuisance. Many people have "improved" syntax but no usable run-time libraries. So their "language" is incomplete because it doesn't do anything. Get your language to do something first. A: Don't afraid to write a compiler, which compiles to an existing language, and not to object code. For example, Lightweight C++ is a C++ -> C compiler is based on this idea (altough, C++ does the same job somewhere): http://linux.wareseeker.com/Programming/lightweight-c-1.3.2.zip/331414 If you have a small-but-smart idea on how to improve programming, it's a quick win way. There's a similar situation with search engines. If I say, that I can do better than Google, maybe I can do it with a Google mashup, which reorganizes Google's result set, and I don't need to buy 343 Zigabytes of storage to set up a second Google just for changing the number of results from 10 to 15. (Unfortunatelly, it does not works if I have different ranking or crawling ideas.) Maybe, Twitter is a better example. Write your own Twitter by using Twitter API. (Of course, only if your idea fits into the Twitter's base model.) We're now working on a dataflow engine (see Wikipedia: flow-based programming, dataflow programming). We've developed a very lite new language, which has 3 instruction types (component creation, parameter setting, message declaration), and 2 block types (component declaration and implementation). It's compiled to C++ code, so the compiler is simple, and result is optimal fast. Also, there're several cases, when our language script is generated from configurations, or, more elegant, it supports metaprogramming. We should break off the 1-step (source->executable) and 0-step (the source script is the executable) complilation languages; 3-4 level is easy yet to overview, and - if we do it right - it can make the developement more effective. A: The easiest route is using a concatenative programming language, like Forth, Factor, or your own design of one. The Forth interpreter is very easy to implement and does not need to take up more than a few KB; important for the Lego device. You need to understand how the Forth interpreter works. This is covered, for instance, in chapter 9 of Starting Forth. A: Read fun books about language design! the author of Clojure recommended following the book "lisp in small Pieces" by Christian Queinnec. The Clojure Reading list covers many books that incluenced the design of the Clojure language.
How do I design and implement a programming language?
This question is related to This question on Aardvark This question on here The past couple of years I've been thinking about things I like and don't like about languages I use. I always wanted to write my own language, but never did so. I also own both the Lego RCX and NXT, but most of the time I never actually make my robots do anything because of their restrictive visual programming environments. I think I will design my programming language for the NXT because there are already tons of general purpose languages and the NXT gives me a concrete set of problems and goals and hopefully a nice sandbox to play with. Now what? Where do I start? What do I need to know? If possible, I'd write the compiler in Python or Clojure. There is an SDK for the NXT, but also an Assembly language. What would be the best/easiest route? The Lego NXT has a small screen, USB and Bluetooth, it has 4 sensor ports both digital and analogue, 3 output ports and 2 ARM processors, one main processor and one co-processor. http://mindstormsnxt.blogspot.com/2006/08/whats-inside-nxt-brick.html Programming the NXT is going to all about handling data and events, so some sort of monoiconic dataflow/reactive style seem appropriate. It should also handle parallel tasks well, so I'm thinking functional. I'm currently thinking of stack based as well. In my head I'm already trying to unify these concepts and think of sample code. I'm thinking of a tree rather than a stack, where functional branches can run in parallel. An example: # implicit main stack 5 5 + # 10 # quoted branch or list [1 -] # 10 [1 -] # eval list and recur until false loop # [9 8 7 6 5 4 3 2 1 0] # define stack as a function [1 = [1 8 motor] [1 0 motor] if] fn # [9 8 7 6 5 4 3 2 1 0] <function> # define function as a symbol "handle-press" def # [9 8 7 6 5 4 3 2 1 0] # reactively loop over infinite lazy stack returned by sensor # in a parallel branch |4 sensor handle-press for| # [9 8 7 6 5 4 3 2 1 0] [8 nil nil nil 8 ...] There are obviously still gaping holes in the reasoning behind this, but I'm posting this rough sketch anyway to spark some helpful answers and discussion.
[ "\nNow what? Where do I start? What do I need to know?\n\nStart by learning more programming languages.\nAfter learning several languages, buy a book on compilers. There are many. Google will help. It doesn't matter which one you buy. You'll need several. It's okay to read many books.\nOnce you've learned lang...
[ 23, 5, 4, 4 ]
[]
[]
[ "clojure", "forth", "nxt", "programming_languages", "python" ]
stackoverflow_0004014267_clojure_forth_nxt_programming_languages_python.txt
Q: Having trouble parsing a txt file into a list full of zip codes in my zipcode lookup program Hello everyone thanks for looking into my problem. What I am trying to do is write a "Structured" program in python that takes txt from a file and parses it into lists. Then after closing the file, I need to reference the user input (zipcode) in those lists and then print out the city and state according to the zipcode that they entered. My instructor is having us use structure by making several functions. I know there are probably lots of more efficient ways of doing this, but I must keep the structure thats in place. EDIT Here is my code(Current): #----------------------------------------------------------------------- # VARIABLE DEFINITIONS eof = False zipRecord = "" zipFile = "" zipCode = [] city = [] state = [] parsedList = [] #----------------------------------------------------------------------- # CONSTANT DEFINITIONS USERPROMPT = "\nEnter a zip code to find (Press Enter key alone to stop): " #----------------------------------------------------------------------- # FUNCTION DEFINITIONS def startUp(): global zipFile print "zipcode lookup program".upper() zipFile = open("zipcodes.txt","r") loadList() def loadList(): while readRecord(): pass processRecords() def readRecord(): global eof, zipList, zipCode, city, state, parsedList zipRecord = zipFile.readline() if zipRecord == "": eof = True else: parsedList = zipRecord.split(",") zipCode.append(parsedList[0]) city.append(parsedList[1]) state.append(parsedList[2]) eof = False return not eof def processRecords(): userInput = raw_input(USERPROMPT) if userInput: print userInput print zipCode if userInput in zipCode: index_ = zipcode.index(userInput) print "The city is %s and the state is %s " % \ (city[index_], state[index_]) else: print "\nThe zip code does not exist." else: print "Please enter a data" def closeUp(): zipFile.close() #----------------------------------------------------------------------- # PROGRAM'S MAIN LOGIC startUp() closeUp() raw_input("\nRun complete. Press the Enter key to exit.") Here is a sample from the zipcode txt file: 00501,HOLTSVILLE,NY I am definitely stuck at this point and would appreciate your help in this matter. EDIT Thanks for all the help everyone. I really do appreciate it. :) A: why you fill the lists zipcode, city , state like that, i mean in each user entry we get the next line from the file i think that you should do : def loadList(): # Fill all the list first , make the readRecord() return eof (True or False). while readRecord(): pass # than process data (check for zip code) this will run it only one time # but you can put it in a loop to repeat the action. processRecords() about your problem : def processRecords(): userInput = raw_input(USERPROMPT) # Check if a user has entered a text or not if userInput: # check the index from zipcode if userInput in zipcode: # the index of the zipcode in the zipcode list is the same # to get related cities and states. index_ = zipcode.index(userInput) print "The city is %s and the state is %s " % \ (city[index_], state[index_]) else: print "\nThe zip code does not exist." else: print "Please enter a data" A: one of the beauties of Python is that it's interactive. if you take processRecords() out of loadList(), and then at the bottom of your program put: if __name__ == '__main__': processRecords() Then, from the command prompt, type "python". You'll get the Python shell prompt, ">>>". There you type: from zipcodes import * # this assumes your program is zipcodes.py dir() # shows you what's defined print zipCode # shows you what's in zipCode that ought to help debugging. A: Strings don't have an append method like lists do. What I think you're trying to do is append the strings zipCode, city, and state to parsedList. This is the code you'd use to do that: parsedList.append(zipCode) parsedList.append(city) parsedList.append(state) Or, even more compactly: parsedList = [zipCode, city, state] Let me know if you get another error message and I can offer more suggestions.
Having trouble parsing a txt file into a list full of zip codes in my zipcode lookup program
Hello everyone thanks for looking into my problem. What I am trying to do is write a "Structured" program in python that takes txt from a file and parses it into lists. Then after closing the file, I need to reference the user input (zipcode) in those lists and then print out the city and state according to the zipcode that they entered. My instructor is having us use structure by making several functions. I know there are probably lots of more efficient ways of doing this, but I must keep the structure thats in place. EDIT Here is my code(Current): #----------------------------------------------------------------------- # VARIABLE DEFINITIONS eof = False zipRecord = "" zipFile = "" zipCode = [] city = [] state = [] parsedList = [] #----------------------------------------------------------------------- # CONSTANT DEFINITIONS USERPROMPT = "\nEnter a zip code to find (Press Enter key alone to stop): " #----------------------------------------------------------------------- # FUNCTION DEFINITIONS def startUp(): global zipFile print "zipcode lookup program".upper() zipFile = open("zipcodes.txt","r") loadList() def loadList(): while readRecord(): pass processRecords() def readRecord(): global eof, zipList, zipCode, city, state, parsedList zipRecord = zipFile.readline() if zipRecord == "": eof = True else: parsedList = zipRecord.split(",") zipCode.append(parsedList[0]) city.append(parsedList[1]) state.append(parsedList[2]) eof = False return not eof def processRecords(): userInput = raw_input(USERPROMPT) if userInput: print userInput print zipCode if userInput in zipCode: index_ = zipcode.index(userInput) print "The city is %s and the state is %s " % \ (city[index_], state[index_]) else: print "\nThe zip code does not exist." else: print "Please enter a data" def closeUp(): zipFile.close() #----------------------------------------------------------------------- # PROGRAM'S MAIN LOGIC startUp() closeUp() raw_input("\nRun complete. Press the Enter key to exit.") Here is a sample from the zipcode txt file: 00501,HOLTSVILLE,NY I am definitely stuck at this point and would appreciate your help in this matter. EDIT Thanks for all the help everyone. I really do appreciate it. :)
[ "why you fill the lists zipcode, city , state like that, i mean in each user entry we get the next line from the file \ni think that you should do :\ndef loadList():\n # Fill all the list first , make the readRecord() return eof (True or False).\n while readRecord():\n pass\n\n # than process data ...
[ 2, 1, 0 ]
[]
[]
[ "python" ]
stackoverflow_0004073694_python.txt
Q: Better way to write this if statement? I have this tkinter GUI, and I need to get the values from the entries and compare. self.hystInt.get() is the way to access the string in the string variable in the Entry. *I have to write this for every variable so it ends up looking really ugly. if (self.hystInt.get().isdigit() and int(self.hystInt.get()) >= 200 and int(self.hystInt.get()) <= 500): A: def validate(num): try: return 200 <= int(num) <= 500 except ValueError: return False Simple is good! A: At the very least you can use Python's unusual comparison syntax like this: if (self.hystInt.get().isdigit() and (200 <= int(self.hystInt.get()) <= 500)): A: Do this. try: hystInt= int(self.hystInt.get()) if 200 <= hystInt <= 500: Valid. else: Out of bounds. except ValueError, e: Not even a number. A: How about a temporary variable? I think the real problem (both in in readability and (very!) marginally in performance) is that you're calling the get() method three times. histint = self.hystInt.get() if (histint.isdigit() and (200 <= int(histint) <= 500)) A: To reduce the tedious coding you could do something along these lines: valid_hystInt = lambda self, low, high: ( self.hystInt.get().isdigit() and (low <= int(self.hystInt.get()) <= high) ) class Class: hystInt = HystInt() # or whatever def some_method(self): if valid_hystInt(self, 200, 500): pass # use it or possibly the even more general: valid_int_field = lambda field, low, high: ( field.get().isdigit() and (low <= int(field.get()) <= high) ) class Class: hystInt = HystInt() # or whatever def some_method(self): if valid_int_field(self.hystInt, 200, 500): pass # use it
Better way to write this if statement?
I have this tkinter GUI, and I need to get the values from the entries and compare. self.hystInt.get() is the way to access the string in the string variable in the Entry. *I have to write this for every variable so it ends up looking really ugly. if (self.hystInt.get().isdigit() and int(self.hystInt.get()) >= 200 and int(self.hystInt.get()) <= 500):
[ "def validate(num):\n try:\n return 200 <= int(num) <= 500\n except ValueError:\n return False\n\nSimple is good!\n", "At the very least you can use Python's unusual comparison syntax like this:\nif (self.hystInt.get().isdigit() and (200 <= int(self.hystInt.get()) <= 500)):\n\n", "Do this.\n...
[ 10, 1, 1, 1, 0 ]
[]
[]
[ "comparison", "python" ]
stackoverflow_0004072871_comparison_python.txt
Q: Advanced SQL query with sub queries, group by, count and sum functions in to SQLalchemy I have written the following query. select distinct(table3.*), (select count(*) from table2 where table2.cus_id = table3.id) as count, (select sum(amount) from table2 where table2.cus_id = table3.id) as total from table2, table1, table3 where table3.id = table2.cus_id and table2.own_id = table1.own_id; It finds the sum of a column and the number of rows that produce the sum as well as some associated data from another table. (Feel free to optimise if you think it can be improved) I need to convert this in to SQLAlchemy but have no idea where to start. I'd appreciate any advice. A: Here's my re-write of your query: SELECT t3.*, x.count, x.amount FROM TABLE3 t3 JOIN (SELECT t2.cus_id COUNT(*) AS count, SUM(t2.amount) AS total FROM TABLE2 t2 WHERE EXISTS(SELECT NULL FROM TABLE1 t1 WHERE t1.own_id = t2.own_id) GROUP BY t2.cus_id) x ON x.cus_id = t3.id Can't help you with the SQLAlchemy part, sorry.
Advanced SQL query with sub queries, group by, count and sum functions in to SQLalchemy
I have written the following query. select distinct(table3.*), (select count(*) from table2 where table2.cus_id = table3.id) as count, (select sum(amount) from table2 where table2.cus_id = table3.id) as total from table2, table1, table3 where table3.id = table2.cus_id and table2.own_id = table1.own_id; It finds the sum of a column and the number of rows that produce the sum as well as some associated data from another table. (Feel free to optimise if you think it can be improved) I need to convert this in to SQLAlchemy but have no idea where to start. I'd appreciate any advice.
[ "Here's my re-write of your query:\nSELECT t3.*,\n x.count,\n x.amount\n FROM TABLE3 t3\n JOIN (SELECT t2.cus_id\n COUNT(*) AS count,\n SUM(t2.amount) AS total\n FROM TABLE2 t2\n WHERE EXISTS(SELECT NULL\n FROM TABLE1 t1\n ...
[ 3 ]
[]
[]
[ "postgresql", "python", "sql", "sqlalchemy" ]
stackoverflow_0004073918_postgresql_python_sql_sqlalchemy.txt
Q: Access from external to python development server I can't access externally to python development server, I have a very small django project running on my machine, and now I want to enable computers in the same LAN have access to it, but it can't do. There is no firewall running on my machine. Is there a way around this? A: How are you running the server? Have you tried something like this? manage.py runserver 0.0.0.0:8080 From the documentation: Note that the default IP address, 127.0.0.1, is not accessible from other machines on your network. To make your development server viewable to other machines on the network, use its own IP address (e.g. 192.168.2.1) or 0.0.0.0. 0.0.0.0 means: bind to all IP addresses this computer supports. So, as TheSingularity says, you'll then be able to access your Django app by entering the private IP address usually beginning with 192.168.*; which is not accessible from the Internet. A: run your django app like this: ./manage.py runserver 0.0.0.0:8800 you can access now your project from other machine like this: http://<ip_address_machine_where_project>:8800
Access from external to python development server
I can't access externally to python development server, I have a very small django project running on my machine, and now I want to enable computers in the same LAN have access to it, but it can't do. There is no firewall running on my machine. Is there a way around this?
[ "How are you running the server?\nHave you tried something like this? \nmanage.py runserver 0.0.0.0:8080\n\nFrom the documentation:\n\nNote that the default IP address, 127.0.0.1, is not accessible from other machines on your network. To make your development server viewable to other machines on the network, use i...
[ 21, 6 ]
[]
[]
[ "django", "python" ]
stackoverflow_0004073977_django_python.txt
Q: Writing a parallel programming framework, what have I missed? Clarification: As per some of the comments, I should clarify that this is intended as a simple framework to allow execution of programs that are naturally parallel (so-called embarrassingly parallel programs). It isn't, and never will be, a solution for tasks which require communication or synchronisation between processes. I've been looking for a simple process-based parallel programming environment in Python that can execute a function on multiple CPUs on a cluster, with the major criterion being that it needs to be able to execute unmodified Python code. The closest I found was Parallel Python, but pp does some pretty funky things, which can cause the code to not be executed in the correct context (with the appropriate modules imported etc). I finally got tired of searching, so I decided to write my own. What I came up with is actually quite simple. The problem is, I'm not sure if what I've come up with is simple because I've failed to think of a lot of things. Here's what my program does: I have a job server which hands out jobs to nodes in the cluster. The jobs are handed out to servers listening on nodes by passing a dictionary that looks like this: { 'moduleName':'some_module', 'funcName':'someFunction', 'localVars': {'someVar':someVal,...}, 'globalVars':{'someOtherVar':someOtherVal,...}, 'modulePath':'/a/path/to/a/directory', 'customPathHasPriority':aBoolean, 'args':(arg1,arg2,...), 'kwargs':{'kw1':val1, 'kw2':val2,...} } moduleName and funcName are mandatory, and the others are optional. A node server takes this dictionary and does: sys.path.append(modulePath) globals()[moduleName]=__import__(moduleName, localVars, globalVars) returnVal = globals()[moduleName].__dict__[funcName](*args, **kwargs) On getting the return value, the server then sends it back to the job server which puts it into a thread-safe queue. When the last job returns, the job server writes the output to a file and quits. I'm sure there are niggles that need to be worked out, but is there anything obvious wrong with this approach? On first glance, it seems robust, requiring only that the nodes have access to the filesystem(s) containing the .py file and the dependencies. Using __import__ has the advantage that the code in the module is automatically run, and so the function should execute in the correct context. Any suggestions or criticism would be greatly appreciated. EDIT: I should mention that I've got the code-execution bit working, but the server and job server have yet to be written. A: I have actually written something that probably satisfies your needs: jug. If it does not solve your problems, I promise you I'll fix any bugs you find. The architecture is slightly different: workers all run the same code, but they effectively generate a similar dictionary and ask the central backend "has this been run?". If not, they run it (there is a locking mechanism too). The backend can simply be the filesystem if you are on an NFS system. A: I myself have been tinkering with batch image manipulation across my computers, and my biggest problem was the fact that some things don't easily or natively pickle and transmit across the network. for example: pygame's surfaces don't pickle. these I have to convert to strings by saving them in StringIO objects and then dumping it across the network. If the data you are transmitting (eg your arguments) can be transmitted without fear, you should not have that many problems with network data. Another thing comes to mind: what do you plan to do if a computer suddenly "disappears" while doing a task? while returning the data? do you have a plan for re-sending tasks?
Writing a parallel programming framework, what have I missed?
Clarification: As per some of the comments, I should clarify that this is intended as a simple framework to allow execution of programs that are naturally parallel (so-called embarrassingly parallel programs). It isn't, and never will be, a solution for tasks which require communication or synchronisation between processes. I've been looking for a simple process-based parallel programming environment in Python that can execute a function on multiple CPUs on a cluster, with the major criterion being that it needs to be able to execute unmodified Python code. The closest I found was Parallel Python, but pp does some pretty funky things, which can cause the code to not be executed in the correct context (with the appropriate modules imported etc). I finally got tired of searching, so I decided to write my own. What I came up with is actually quite simple. The problem is, I'm not sure if what I've come up with is simple because I've failed to think of a lot of things. Here's what my program does: I have a job server which hands out jobs to nodes in the cluster. The jobs are handed out to servers listening on nodes by passing a dictionary that looks like this: { 'moduleName':'some_module', 'funcName':'someFunction', 'localVars': {'someVar':someVal,...}, 'globalVars':{'someOtherVar':someOtherVal,...}, 'modulePath':'/a/path/to/a/directory', 'customPathHasPriority':aBoolean, 'args':(arg1,arg2,...), 'kwargs':{'kw1':val1, 'kw2':val2,...} } moduleName and funcName are mandatory, and the others are optional. A node server takes this dictionary and does: sys.path.append(modulePath) globals()[moduleName]=__import__(moduleName, localVars, globalVars) returnVal = globals()[moduleName].__dict__[funcName](*args, **kwargs) On getting the return value, the server then sends it back to the job server which puts it into a thread-safe queue. When the last job returns, the job server writes the output to a file and quits. I'm sure there are niggles that need to be worked out, but is there anything obvious wrong with this approach? On first glance, it seems robust, requiring only that the nodes have access to the filesystem(s) containing the .py file and the dependencies. Using __import__ has the advantage that the code in the module is automatically run, and so the function should execute in the correct context. Any suggestions or criticism would be greatly appreciated. EDIT: I should mention that I've got the code-execution bit working, but the server and job server have yet to be written.
[ "I have actually written something that probably satisfies your needs: jug. If it does not solve your problems, I promise you I'll fix any bugs you find.\nThe architecture is slightly different: workers all run the same code, but they effectively generate a similar dictionary and ask the central backend \"has this ...
[ 8, 5 ]
[]
[]
[ "parallel_processing", "python", "python_2.x" ]
stackoverflow_0004073695_parallel_processing_python_python_2.x.txt
Q: Django - Access ForeignKey value without hitting database I have a django model like so: class Profile_Tag(models.Model): profile = models.ForeignKey(Profile) tag = models.ForeignKey(Tag) and a view like so: pts = Profile_Tag.objects.all() for pt in pts: print pt.profile.id is there any way to access the profile foreignKey without hitting the database each time? I don't want to query the profile table. I just want to grab the ids from the Profile_Tag table. A: You can do something like this: pt_ids = Profile_Tag.objects.values_list('profile', flat=True) This will return you list of IDs. For model instance, there's another way: pts = Profile_Tag.objects.all() for pt in pts: print pt.profile_id
Django - Access ForeignKey value without hitting database
I have a django model like so: class Profile_Tag(models.Model): profile = models.ForeignKey(Profile) tag = models.ForeignKey(Tag) and a view like so: pts = Profile_Tag.objects.all() for pt in pts: print pt.profile.id is there any way to access the profile foreignKey without hitting the database each time? I don't want to query the profile table. I just want to grab the ids from the Profile_Tag table.
[ "You can do something like this:\npt_ids = Profile_Tag.objects.values_list('profile', flat=True)\n\nThis will return you list of IDs. For model instance, there's another way:\npts = Profile_Tag.objects.all()\nfor pt in pts:\n print pt.profile_id\n\n" ]
[ 20 ]
[]
[]
[ "django", "foreign_key_relationship", "python" ]
stackoverflow_0004074038_django_foreign_key_relationship_python.txt
Q: Slice an audio file into ten second segments Recommendations welcome on how to slice a .wav1 file into time-delimited segments using a Python library. 1 The actual file type isn't really that material, I'm sure I'll be able to convert between different types if needed. A: I would use the wave module to open the file, read the headers, figure out how many frames are in 10 seconds, then read that many frames. Write those frames out to files with the same header info (except length) until done. A: I would suggest looking at the data structure for a given file, and 'cutting' the data at an appropriate point along the line so no frames are chopped off early. This would mean looking at the frequency of the recording and the bit rate, and using that to get the size (in bits) of each frame. Then you can take segments of audio without cutting individual frame data. Have a look at this SO posting. It suggests treating your audio as a binary read string. As it's a string you can basically copy, cut and move the string as you want to a new output file. Check this one out: http://docs.python.org/library/binascii.html Also worth looking at: https://ccrma.stanford.edu/courses/422/projects/WaveFormat/ Either that or just keep it as binary and use byte arrays. Need to think about the header file and what happens to that, although each format is different. MP3 is easy to keep the header as it is interleaved amongst the data: http://en.wikipedia.org/wiki/Mp3#File_structure Ok, a bunch of stuff. FINALLY: One you'll no doubt have seen already: http://sourceforge.net/projects/audiotools/ Updated.... Use the bits_per_sample() method in the audio tools link from sourceforge.net --Returns the number of bits-per-sample in this audio file as a positive integer. Then divide your audio into a byte array using that info and some of the info from above. You can then at least reconstruct accurately some RAW audio data. You can take the length of the file in bits and divide it by 16. You can then use a method divide the array according to time in milliseconds. It sounds complicated but it's really rudimentary maths.
Slice an audio file into ten second segments
Recommendations welcome on how to slice a .wav1 file into time-delimited segments using a Python library. 1 The actual file type isn't really that material, I'm sure I'll be able to convert between different types if needed.
[ "I would use the wave module to open the file, read the headers, figure out how many frames are in 10 seconds, then read that many frames. Write those frames out to files with the same header info (except length) until done.\n", "I would suggest looking at the data structure for a given file, and 'cutting' the da...
[ 3, 2 ]
[]
[]
[ "audio", "python" ]
stackoverflow_0004066009_audio_python.txt
Q: ideal thread structure question (involves multiple thread communication) I'm writing an application that listens for sound events (using messages passed in with Open Sound Control), and then based on those events pauses or resumes program execution. My structure works most of the time but always bombs out in the main loop, so I'm guessing it's a thread issue. Here's a generic, simplified version of what I'm talking about: import time, threading class Loop(): aborted = False def __init__(self): message = threading.Thread(target=self.message, args=((0),)) message.start() loop = threading.Thread(target=self.loop) loop.start() def message(self,val): if val > 1: if not self.aborted: self.aborted = True # do some socket communication else: self.aborted = False # do some socket communication def loop(self): cnt = 0 while True: print cnt if self.aborted: while self.aborted: print "waiting" time.sleep(.1); cnt += 1 class FakeListener(): def __init__(self,loop): self.loop = loop listener = threading.Thread(target=self.listener) listener.start() def listener(self): while True: loop.message(2) time.sleep(1) if __name__ == '__main__': loop = Loop() #fake listener standing in for the real OSC event listener listener = FakeListener(loop) Of course, this simple code seems to work great, so it's clearly not fully illustrating my real code, but you get the idea. What isn't included here is also the fact that on each loop pause and resume (by setting aborted=True/False) results in some socket communication which also involves threads. What always happens in my code is that the main loop doesn't always pickup where it left off after a sound event. It will work for a number of events but then eventually it just doesn't answer. Any suggestions for how to structure this kind of communication amongst threads? UPDATE: ok, i think i've got it. here's a modification that seems to work. there's a listener thread that periodically puts a value into a Queue object. there's a checker thread that keeps checking the queue looking for the value, and once it sees it sets a boolean to its opposite state. that boolean value controls whether the loop thread continues or waits. i'm not entirely sure what the q.task_done() function is doing here, though. import time, threading import Queue q = Queue.Queue(maxsize = 0) class Loop(): aborted = False def __init__(self): checker = threading.Thread(target=self.checker) checker.setDaemon(True) checker.start() loop = threading.Thread(target=self.loop) loop.start() def checker(self): while True: if q.get() == 2: q.task_done() if not self.aborted: self.aborted = True else: self.aborted = False def loop(self): cnt = 0 while cnt < 40: if self.aborted: while self.aborted: print "waiting" time.sleep(.1) print cnt cnt += 1 time.sleep(.1) class fakeListener(): def __init__(self): listener = threading.Thread(target=self.listener) listener.setDaemon(True) listener.start() def listener(self): while True: q.put(2) time.sleep(1) if __name__ == '__main__': #fake listener standing in for the real OSC event listener listener = fakeListener() loop = Loop() A: Umm.. I don't completely understand your question but i'll do my best to explain what I think you need to fix your problems. 1) The thread of your Loop.loop function should be set as a daemon thread so that it exits with your main thread (so you don't have to kill the python process every time you want to shut down your program). To do this just put loop.setDaemon(True) before you call the thread's "start" function. 2)The most simple and fail-proof way to communicate between threads is with a Queue. On thread will put an item in that Queue and another thread will take an item out, do something with the item and then terminate (or get another job) In python a Queue can be anything from a global list to python's built-in Queue object. I recommend the python Queue because it is thread safe and easy to use.
ideal thread structure question (involves multiple thread communication)
I'm writing an application that listens for sound events (using messages passed in with Open Sound Control), and then based on those events pauses or resumes program execution. My structure works most of the time but always bombs out in the main loop, so I'm guessing it's a thread issue. Here's a generic, simplified version of what I'm talking about: import time, threading class Loop(): aborted = False def __init__(self): message = threading.Thread(target=self.message, args=((0),)) message.start() loop = threading.Thread(target=self.loop) loop.start() def message(self,val): if val > 1: if not self.aborted: self.aborted = True # do some socket communication else: self.aborted = False # do some socket communication def loop(self): cnt = 0 while True: print cnt if self.aborted: while self.aborted: print "waiting" time.sleep(.1); cnt += 1 class FakeListener(): def __init__(self,loop): self.loop = loop listener = threading.Thread(target=self.listener) listener.start() def listener(self): while True: loop.message(2) time.sleep(1) if __name__ == '__main__': loop = Loop() #fake listener standing in for the real OSC event listener listener = FakeListener(loop) Of course, this simple code seems to work great, so it's clearly not fully illustrating my real code, but you get the idea. What isn't included here is also the fact that on each loop pause and resume (by setting aborted=True/False) results in some socket communication which also involves threads. What always happens in my code is that the main loop doesn't always pickup where it left off after a sound event. It will work for a number of events but then eventually it just doesn't answer. Any suggestions for how to structure this kind of communication amongst threads? UPDATE: ok, i think i've got it. here's a modification that seems to work. there's a listener thread that periodically puts a value into a Queue object. there's a checker thread that keeps checking the queue looking for the value, and once it sees it sets a boolean to its opposite state. that boolean value controls whether the loop thread continues or waits. i'm not entirely sure what the q.task_done() function is doing here, though. import time, threading import Queue q = Queue.Queue(maxsize = 0) class Loop(): aborted = False def __init__(self): checker = threading.Thread(target=self.checker) checker.setDaemon(True) checker.start() loop = threading.Thread(target=self.loop) loop.start() def checker(self): while True: if q.get() == 2: q.task_done() if not self.aborted: self.aborted = True else: self.aborted = False def loop(self): cnt = 0 while cnt < 40: if self.aborted: while self.aborted: print "waiting" time.sleep(.1) print cnt cnt += 1 time.sleep(.1) class fakeListener(): def __init__(self): listener = threading.Thread(target=self.listener) listener.setDaemon(True) listener.start() def listener(self): while True: q.put(2) time.sleep(1) if __name__ == '__main__': #fake listener standing in for the real OSC event listener listener = fakeListener() loop = Loop()
[ "Umm.. I don't completely understand your question but i'll do my best to explain what I think you need to fix your problems.\n1) The thread of your Loop.loop function should be set as a daemon thread so that it exits with your main thread (so you don't have to kill the python process every time you want to shut do...
[ 4 ]
[]
[]
[ "multithreading", "python", "structure" ]
stackoverflow_0004072816_multithreading_python_structure.txt
Q: Is there a way to add marked text into a variable in pyqt? I've just had my first course in programming at the university and for the following three months I have no additional programming classes so I've decided to do a small project during this "break". What I'm trying to do is a edit-program for a smaller Wiki I used to work on. It's suppose to make it easier for the users to use things like templates, and also have a wizard to help the user make basic pages. I talked to some older students and they recommended pyqt for the GUI of the software. Now to the problem, and I feel like this is a really dirty hack: My solution right now is to use the built in copy and paste commands, the problem is that right now if I just click the button for bold, without marking text, I get: '''text currently in clipboard''' and I just want it to add ''' '''. Here's the (important) code in question, I obviously call addBold when the button/hotkey is pushed. self.textEdit = QtGui.QTextEdit() def.addBold(self): self.textEdit.copy() self.textEdit.insertPlainText("\'\'\'") self.textEdit.paste() self.textEdit.insertPlainText("\'\'\'") What I'd rather have is code that looks something like: x=markedText if not x: self.textEdit.insertPlainText("\'\'\' \'\'\'") else: self.textEdit.insertPlainText("\'\'\'"+x+"\'\'\'") x = None So does anyone know how I can assign the marked text to x? Or is there yet another solution that is better? A: from PyQt4.QtGui import * from PyQt4.QtCore import SIGNAL class Widget(QWidget): def __init__(self, parent=None): QWidget.__init__(self) self.textedit = QTextEdit() self.clip = QApplication.clipboard() self.button = QPushButton("Bold") self.connect(self.button, SIGNAL("clicked()"), self.addBold) layout = QVBoxLayout() layout.addWidget(self.textedit) layout.addWidget(self.button) self.setLayout(layout) def addBold(self): self.clip.clear() self.textedit.copy() currentText = self.clip.text() self.textedit.insertPlainText("'''%s'''" % currentText) app = QApplication([]) widget = Widget() widget.show() app.exec_() Sadly I could not find a way without manipulating the clipboard. Hope this helps.
Is there a way to add marked text into a variable in pyqt?
I've just had my first course in programming at the university and for the following three months I have no additional programming classes so I've decided to do a small project during this "break". What I'm trying to do is a edit-program for a smaller Wiki I used to work on. It's suppose to make it easier for the users to use things like templates, and also have a wizard to help the user make basic pages. I talked to some older students and they recommended pyqt for the GUI of the software. Now to the problem, and I feel like this is a really dirty hack: My solution right now is to use the built in copy and paste commands, the problem is that right now if I just click the button for bold, without marking text, I get: '''text currently in clipboard''' and I just want it to add ''' '''. Here's the (important) code in question, I obviously call addBold when the button/hotkey is pushed. self.textEdit = QtGui.QTextEdit() def.addBold(self): self.textEdit.copy() self.textEdit.insertPlainText("\'\'\'") self.textEdit.paste() self.textEdit.insertPlainText("\'\'\'") What I'd rather have is code that looks something like: x=markedText if not x: self.textEdit.insertPlainText("\'\'\' \'\'\'") else: self.textEdit.insertPlainText("\'\'\'"+x+"\'\'\'") x = None So does anyone know how I can assign the marked text to x? Or is there yet another solution that is better?
[ "from PyQt4.QtGui import *\nfrom PyQt4.QtCore import SIGNAL\n\nclass Widget(QWidget): \n def __init__(self, parent=None): \n QWidget.__init__(self) \n\n self.textedit = QTextEdit()\n self.clip = QApplication.clipboard()\n self.button = QPushButton(\"Bold\") \n self.conn...
[ 1 ]
[]
[]
[ "pyqt", "python" ]
stackoverflow_0004065902_pyqt_python.txt
Q: Relay/Send through NAT in Python I'm looking for a solution to exchange data (protocols build on TCP) between NAT separated endpoints - more or less directly. I can setup a relay service for example - which is what I have in mind. However I want to route traffic without losing too much performance. or I can build a VPN with a central VPN server Are there optimised libraries in Python to allow me forwarding and building a relay server - for example. Or is there something more elegant that doesn't even require a node in the middle? :) A: I'm not sure about Python implementations, but you may want to look at Samy Kamkar's example implementations of mechanisms for relay-free NAT traversal when both endpoints are NATed. (and the papers he references) pwnat chownat NAT Pinning
Relay/Send through NAT in Python
I'm looking for a solution to exchange data (protocols build on TCP) between NAT separated endpoints - more or less directly. I can setup a relay service for example - which is what I have in mind. However I want to route traffic without losing too much performance. or I can build a VPN with a central VPN server Are there optimised libraries in Python to allow me forwarding and building a relay server - for example. Or is there something more elegant that doesn't even require a node in the middle? :)
[ "I'm not sure about Python implementations, but you may want to look at Samy Kamkar's example implementations of mechanisms for relay-free NAT traversal when both endpoints are NATed. (and the papers he references)\n\npwnat\nchownat\nNAT Pinning\n\n" ]
[ 4 ]
[]
[]
[ "networking", "python", "routing" ]
stackoverflow_0004067971_networking_python_routing.txt
Q: Getting the next specified element in lxml? I'm struggling trying to parse this html. There are h2s acting as titles for paragraphs. Both are in separate divs, and separated by more divs. The paragraph isn't a child of the h2. I'd like to group them together, but I can't figure how out. I thought find would do it but it didn't work: html = doc.cssselect('h2.title') for para in html: content = para.find('div.content') A: lxml.html.HtmlElement.find only takes a tag name or a path - it doesn't accept full CSS selectors. A better idea would just be to use doc.cssselect with advanced selectors. lxml.html will convert them to XPath selectors for you. Perhaps: for elem in doc.cssselect('h2.title div.content'): elem.text_content()
Getting the next specified element in lxml?
I'm struggling trying to parse this html. There are h2s acting as titles for paragraphs. Both are in separate divs, and separated by more divs. The paragraph isn't a child of the h2. I'd like to group them together, but I can't figure how out. I thought find would do it but it didn't work: html = doc.cssselect('h2.title') for para in html: content = para.find('div.content')
[ "lxml.html.HtmlElement.find only takes a tag name or a path - it doesn't accept full CSS selectors.\nA better idea would just be to use doc.cssselect with advanced selectors. lxml.html will convert them to XPath selectors for you.\nPerhaps:\nfor elem in doc.cssselect('h2.title div.content'):\n elem.text_content...
[ 2 ]
[]
[]
[ "html", "lxml", "python" ]
stackoverflow_0004075335_html_lxml_python.txt
Q: Django: How to get field value dynamically using custom template tags? I'm writing a generic template tag that could return a model's field value dynamically based on user inputs in template files. The idea follows which mentioned in the book "Practical Django Project 2nd Edition", but the book version is getting a list of objects where I want to get only a object's value. I want to get the site settings (Title, Tagline etc.) and pass in the template dynamically so that I don't have to write the code again to get each field's value. Here is what I have done so far. Define an admin.py for the model (not included here because it's not important) Defined a model: from django.db import models from django.contrib.sites.models import Site class Naming(models.Model): title = models.CharField(max_length=250) site_id = models.ForeignKey(Site) tagline = models.CharField(max_length=250) description = models.TextField(blank=True) def __unicode__(self): return self.title Defined a template tag file, the commented line is where I get stuck from django.db.models import get_model from django.contrib.sites.models import Site from django import template def do_site_att(parser, token): bits = token.split_contents() if len(bits) != 5: raise template.TemplateSyntaxError("'get_site_att' tag takes exactly four arguments") model_args = bits[1].split('.') if len(model_args) != 2: raise template.TemplateSyntaxError("First argument to 'get_site_att' must be an 'application name'.'model name' string.") model = get_model(*model_args) if model is None: raise template.TemplateSyntaxError("'get_site_att' tag got an invalid model: %s." % bits[1]) return ContentNode(model, bits[2], bits[4]) class ContentNode(template.Node): def __init__(self, model, field, varname): self.model = model self.field = field self.varname = varname def render(self, context): current_site = Site.objects.get_current() try: var = self.model.objects.get(site_id=current_site.id) context[self.varname] = var.title #I get stuck here because it not accepts input like var.field (I have extract the field value above) except: context[self.varname] = "Value not found" return '' register = template.Library() register.tag('get_site_att', do_site_att) The template query in base.html: {% load general_tags %} {% get_site_att general.Naming title as title %} <h1 id="branding">{{title}}</h1> {% get_site_att general.Naming tagline as tagline %} <h2 id="tagline">{{tagline}}</h2> I have tried all the possible ways I can think of, but just can't get it works. Any help is really appreciated. Thanks. A: I found the solution for this: on the commented lines, use this code: var = self.model.objects.get(site_id__exact=current_site.id) context[self.varname] = var.__dict__[self.field]#this will get the field's value dynamically, which is what I was looking for A: The normal way in Python to get an attribute whose name you have in another variable is to use getattr. context[self.varname] = getattr(var, self.field)
Django: How to get field value dynamically using custom template tags?
I'm writing a generic template tag that could return a model's field value dynamically based on user inputs in template files. The idea follows which mentioned in the book "Practical Django Project 2nd Edition", but the book version is getting a list of objects where I want to get only a object's value. I want to get the site settings (Title, Tagline etc.) and pass in the template dynamically so that I don't have to write the code again to get each field's value. Here is what I have done so far. Define an admin.py for the model (not included here because it's not important) Defined a model: from django.db import models from django.contrib.sites.models import Site class Naming(models.Model): title = models.CharField(max_length=250) site_id = models.ForeignKey(Site) tagline = models.CharField(max_length=250) description = models.TextField(blank=True) def __unicode__(self): return self.title Defined a template tag file, the commented line is where I get stuck from django.db.models import get_model from django.contrib.sites.models import Site from django import template def do_site_att(parser, token): bits = token.split_contents() if len(bits) != 5: raise template.TemplateSyntaxError("'get_site_att' tag takes exactly four arguments") model_args = bits[1].split('.') if len(model_args) != 2: raise template.TemplateSyntaxError("First argument to 'get_site_att' must be an 'application name'.'model name' string.") model = get_model(*model_args) if model is None: raise template.TemplateSyntaxError("'get_site_att' tag got an invalid model: %s." % bits[1]) return ContentNode(model, bits[2], bits[4]) class ContentNode(template.Node): def __init__(self, model, field, varname): self.model = model self.field = field self.varname = varname def render(self, context): current_site = Site.objects.get_current() try: var = self.model.objects.get(site_id=current_site.id) context[self.varname] = var.title #I get stuck here because it not accepts input like var.field (I have extract the field value above) except: context[self.varname] = "Value not found" return '' register = template.Library() register.tag('get_site_att', do_site_att) The template query in base.html: {% load general_tags %} {% get_site_att general.Naming title as title %} <h1 id="branding">{{title}}</h1> {% get_site_att general.Naming tagline as tagline %} <h2 id="tagline">{{tagline}}</h2> I have tried all the possible ways I can think of, but just can't get it works. Any help is really appreciated. Thanks.
[ "I found the solution for this:\non the commented lines, use this code:\nvar = self.model.objects.get(site_id__exact=current_site.id)\ncontext[self.varname] = var.__dict__[self.field]#this will get the field's value dynamically, which is what I was looking for\n\n", "The normal way in Python to get an attribute w...
[ 4, 4 ]
[]
[]
[ "django", "dynamic", "model", "python", "templatetags" ]
stackoverflow_0004074927_django_dynamic_model_python_templatetags.txt
Q: Python Mechanize with Dynamic Dropdown Selection I'm using mechanize to fill forms and I run into a problem with dynamically-filled dropdown lists that are dependent on a previous selection. In mechanize, I do something like this to select the category: import mechanize br = mechanize.Browser() """Select the form and set up the browser""" br["state"] = ["California"] br["city"] = ["San Francisco"] # this is where the error is br.submit() I cannot choose the city as "San Francisco" until I have chosen the state as "California," because the city dropdown list is dynamically populated after choosing "California." How can I submit the city with Python and mechanize? A: mechanize doesn't support JavaScript. Instead, you should use urllib2 to send the desired values. import urllib2 import urllib values = dict(state="CA", city="SF") # examine form for actual vars try: req = urllib2.Request("http://example.com/post.php", urllib.urlencode(values)) response_page = urllib2.urlopen(req).read() except urllib2.HTTPError, details: pass #do something with the error here...
Python Mechanize with Dynamic Dropdown Selection
I'm using mechanize to fill forms and I run into a problem with dynamically-filled dropdown lists that are dependent on a previous selection. In mechanize, I do something like this to select the category: import mechanize br = mechanize.Browser() """Select the form and set up the browser""" br["state"] = ["California"] br["city"] = ["San Francisco"] # this is where the error is br.submit() I cannot choose the city as "San Francisco" until I have chosen the state as "California," because the city dropdown list is dynamically populated after choosing "California." How can I submit the city with Python and mechanize?
[ "mechanize doesn't support JavaScript. Instead, you should use urllib2 to send the desired values.\nimport urllib2\nimport urllib\n\nvalues = dict(state=\"CA\", city=\"SF\") # examine form for actual vars\ntry:\n req = urllib2.Request(\"http://example.com/post.php\",\n urllib.urlencode(v...
[ 1 ]
[]
[]
[ "drop_down_menu", "mechanize", "python", "webforms" ]
stackoverflow_0004075666_drop_down_menu_mechanize_python_webforms.txt
Q: Convert shoutcast stream to playable samples in Python? I have a shoutcat radio station and now want to build a player for it. I know how to "get" thet stream from the server, thanks a lot to bobince , but I am not sure how to convert that stream into playable samples. How is it done? A: Shoutcast streams are typically (but not always) MP3. To get playable samples, you have to decode the stream's MP3 data. Have you seen the resource at http://codeboje.de/playing-mp3-stream-python/? Looks like a simple solution, but requires an awful lot of libraries. A: There are quite a few possibilities for MP3 decoding under Python. PyMedia is one I've had some success with in the past (but for which development seems to have stopped). It's not just an MP3 decoder though, but a playback interface with support for many audio and video formats via ffmpeg. There's also pyffmpeg which seems to have come back to life recently (haven't tried it yet). Then there's PyGame can also play MP3, though this is a pretty small part of what it does. pymad is more lightweight possibility, being a direct interface to the libmad decoder library. And then there's always the possibility of handing the task off to an external multimedia library such as DirectShow, or GStreamer (via gst-python)... A: Well, from what I can read on python, try this page. If that doesn't work, try the PythonInMusic article on the python wiki.
Convert shoutcast stream to playable samples in Python?
I have a shoutcat radio station and now want to build a player for it. I know how to "get" thet stream from the server, thanks a lot to bobince , but I am not sure how to convert that stream into playable samples. How is it done?
[ "Shoutcast streams are typically (but not always) MP3. To get playable samples, you have to decode the stream's MP3 data.\nHave you seen the resource at http://codeboje.de/playing-mp3-stream-python/? Looks like a simple solution, but requires an awful lot of libraries.\n", "There are quite a few possibilities f...
[ 2, 1, 0 ]
[]
[]
[ "audio_player", "python", "sample", "shoutcast", "stream" ]
stackoverflow_0002881230_audio_player_python_sample_shoutcast_stream.txt
Q: Phylo BioPython building trees I trying to build a tree with BioPython, Phylo module. What I've done so far is this image: each name has a four digit number followed by - and a number: this number refer to the number of times that sequence is represented. That means 1578 - 22, that node should represent 22sequences. the file with the sequences aligned: file the file with the distance to build a tree: file So now I known how to change each size of the node. Each node has a different size, this is easy doing an array of the different values: fh = open(MEDIA_ROOT + "groupsnp.txt") list_size = {} for line in fh: if '>' in line: labels = line.split('>') label = labels[-1] label = label.split() num = line.split('-') size = num[-1] size = size.split() for lab in label: for number in size: list_size[lab] = int(number) a = array(list_size.values()) But the array is arbitrary, I would like to put the correct node size into the right node, I tried this: for elem in list_size.keys(): if labels == elem: Phylo.draw_graphviz(tree_xml, prog="neato", node_size=a) but nothing appears when I use the if statement. Anyway of doing this? I would really appreciate! Thanks everybody A: I finally got this working. The basic premise is that you're going to use the labels/nodelist to build your node_sizes. This way they correlate properly. I'm sure I'm missing some important options to make the tree look 100% but it appears the node sizes are showing up properly. #basically a stripped down rewrite of Phylo.draw_graphviz import networkx, pylab from Bio import Phylo #taken from draw_graphviz def get_label_mapping(G, selection): for node in G.nodes(): if (selection is None) or (node in selection): try: label = str(node) if label not in (None, node.__class__.__name__): yield (node, label) except (LookupError, AttributeError, ValueError): pass kwargs={} tree = Phylo.read('tree.dnd', 'newick') G = Phylo.to_networkx(tree) Gi = networkx.convert_node_labels_to_integers(G, discard_old_labels=False) node_sizes = [] labels = dict(get_label_mapping(G, None)) kwargs['nodelist'] = labels.keys() #create our node sizes based on our labels because the labels are used for the node_list #this way they should be correct for label in labels.keys(): if str(label) != "Clade": num = label.name.split('-') #the times 50 is just a guess on what would look best size = int(num[-1]) * 50 node_sizes.append(size) kwargs['node_size'] = node_sizes posi = networkx.pygraphviz_layout(Gi, 'neato', args='') posn = dict((n, posi[Gi.node_labels[n]]) for n in G) networkx.draw(G, posn, labels=labels, node_color='#c0deff', **kwargs) pylab.show() Resulting Tree
Phylo BioPython building trees
I trying to build a tree with BioPython, Phylo module. What I've done so far is this image: each name has a four digit number followed by - and a number: this number refer to the number of times that sequence is represented. That means 1578 - 22, that node should represent 22sequences. the file with the sequences aligned: file the file with the distance to build a tree: file So now I known how to change each size of the node. Each node has a different size, this is easy doing an array of the different values: fh = open(MEDIA_ROOT + "groupsnp.txt") list_size = {} for line in fh: if '>' in line: labels = line.split('>') label = labels[-1] label = label.split() num = line.split('-') size = num[-1] size = size.split() for lab in label: for number in size: list_size[lab] = int(number) a = array(list_size.values()) But the array is arbitrary, I would like to put the correct node size into the right node, I tried this: for elem in list_size.keys(): if labels == elem: Phylo.draw_graphviz(tree_xml, prog="neato", node_size=a) but nothing appears when I use the if statement. Anyway of doing this? I would really appreciate! Thanks everybody
[ "I finally got this working. The basic premise is that you're going to use the labels/nodelist to build your node_sizes. This way they correlate properly. I'm sure I'm missing some important options to make the tree look 100% but it appears the node sizes are showing up properly.\n#basically a stripped down rewrite...
[ 8 ]
[]
[]
[ "biopython", "graphviz", "numpy", "python" ]
stackoverflow_0004051414_biopython_graphviz_numpy_python.txt
Q: Django + alwaysdata.com Noob Question I'm learning Django and working on sample sites.. I registered at alwaysdata but am unable to view the site after I go 'manage.py runserver' in the SSH (this is after I've created the project and navigated to the appropriate directory, of course). I appreciate any help. Thanks A: Have you taken a look at the wiki entry regarding the django dev server? Google translate seems to indicate that you need to request some ports open first, and that once you've got them assigned you can pass in one of those port numbers to runserver to run it on that port. If you need the translated-to-English version, here's a link A: I am also an alwaysdata customer. Daniel DiPaolo gave you the right links to get it working on ssh with the dev server. The google translation seems correct to me. You need to request a port range in order to use the dev server on ssh. But this is intended only for debugging purpose and should run for a short while. Here is how to deploy with fastCGI which is the regular way to deploy a Django site on alwaysdata. http://wiki.alwaysdata.com/wiki/D%C3%A9ployer_une_application_Django. Google give a decent translation AlwaysData is running a forum at http://forum.alwaysdata.com/ mostly in French but questions in English are welcomed. A: The devserver included with django is for testing purposes, only on your local machine and should not be used on a web host. From the docs: DO NOT USE THIS SERVER IN A PRODUCTION SETTING. It has not gone through security audits or performance tests. (And that's how it's gonna stay. We're in the business of making Web frameworks, not Web servers, so improving this server to be able to handle a production environment is outside the scope of Django.) If i have somehow misinterpreted your question, i apologise. A: When you enter manage.py runserver you're running the development web server on the loopback interface (127.0.0.1). You could test this out by running wget 127.0.0.1 on the same server that the development web server is running. If you want it to be on the internet so you could access it from outside that server, you'd have to specify your public ip. For example, to run the web development server on ip 1.1.1.1 and port 8080 (personally recommend using a non-standard port): manage.py runserver 1.1.1.1:8080 To find out your public ip, try running ifconfig on SSH. Also, you might have to check out the firewall settings with your ISP/server provider.
Django + alwaysdata.com Noob Question
I'm learning Django and working on sample sites.. I registered at alwaysdata but am unable to view the site after I go 'manage.py runserver' in the SSH (this is after I've created the project and navigated to the appropriate directory, of course). I appreciate any help. Thanks
[ "Have you taken a look at the wiki entry regarding the django dev server? Google translate seems to indicate that you need to request some ports open first, and that once you've got them assigned you can pass in one of those port numbers to runserver to run it on that port.\nIf you need the translated-to-English v...
[ 3, 3, 1, 1 ]
[]
[]
[ "django", "python" ]
stackoverflow_0004073352_django_python.txt
Q: s-t cut algorithm in Python I'm looking for an implementation of s-t cut algorithm for flow network (directed graph) in Python. Is there vertex cut version of the algorithm? A: igraph has it: >>> from igraph import Graph >>> from random import randint >>> g = Graph.GRG(100, 0.2) # generate a geometric random graph >>> g.es["capacity"] = [randint(0, 1000) for i in xrange(g.ecount())] >>> cut = g.maxflow(0, 99, "capacity") cut.membership then gives you the membership of each vertex (0-1 vector), cut[0] gives you the vertices on one side of the cut, cut[1] gives the other, cut.value gives the value of the cut.
s-t cut algorithm in Python
I'm looking for an implementation of s-t cut algorithm for flow network (directed graph) in Python. Is there vertex cut version of the algorithm?
[ "igraph has it:\n>>> from igraph import Graph\n>>> from random import randint\n>>> g = Graph.GRG(100, 0.2) # generate a geometric random graph\n>>> g.es[\"capacity\"] = [randint(0, 1000) for i in xrange(g.ecount())]\n>>> cut = g.maxflow(0, 99, \"capacity\")\n\ncut.membership then gives you the membership of ...
[ 1 ]
[]
[]
[ "algorithm", "graph", "python" ]
stackoverflow_0004076245_algorithm_graph_python.txt
Q: What is the best way to store change history of website articles? Our client wants us to implement change history for website articles. What is the best way to do it? A: Without knowing too much about your particular application stack (framework, language), you have at least two basic approaches: Versioned Records - Instead of storing articles as a single row in a database (or single file in a filesystem) and updating with changes, simply create a new record with a pointer to its previous versions. If you're on Rails, you might want to check out this Railscast about Model Versioning Sequential diffs - Another approach is to keep a current copy of the document, but maintain a history of edits made by users. If it's a simple text document, these edits can be represented by a diff patch. Wikipedia "diff" for more information. If you still need help, tell me more about your setup / requirements, and we can figure it out from there. A: I presume you're using a CMS. If not, use one. WordPress is a good start. If you're developing from scratch, the usual method is to have two tables: one for page information (so title, menu position etc.) and then a page_content table, which has columns for page_id, content, and timestamp. As you save a page, instead of updating a database table you instead write a new record to the page_content table with the page's ID and the time of the save. That way, when displaying pages on your front-end you just select the latest record for that particular page ID, but you also have a history of that page by querying for all records by page_id, sorted by timestamp.
What is the best way to store change history of website articles?
Our client wants us to implement change history for website articles. What is the best way to do it?
[ "Without knowing too much about your particular application stack (framework, language), you have at least two basic approaches:\n\nVersioned Records - Instead of storing articles as a single row in a database (or single file in a filesystem) and updating with changes, simply create a new record with a pointer to i...
[ 1, 0 ]
[ "There is a wide variety of ways to do this as you alluded by tagging php, .net, python, and ruby. You missed a few off the top of my head perl and jsp. Each of these have their plusses and minuses and is really a question of what best suits your needs.\nPHP is probably the fastest reward for time spent.\nRuby, i...
[ -1 ]
[ ".net", "php", "python", "ruby" ]
stackoverflow_0004075309_.net_php_python_ruby.txt
Q: eliminate trailing spaces in url If a url has trailing spaces in it.how to find them and eliminate the trailing space .. http://www.youtube.com/watch?v=cs3ROFNxa5M Thanks A: myString.strip()
eliminate trailing spaces in url
If a url has trailing spaces in it.how to find them and eliminate the trailing space .. http://www.youtube.com/watch?v=cs3ROFNxa5M Thanks
[ "myString.strip() \n" ]
[ 3 ]
[]
[]
[ "python" ]
stackoverflow_0004076776_python.txt
Q: How to connect pyqtSignal between classes in PyQT How to connect pyqtSignal between two different objects (classes) PROPERLY? I mean best practice. Look what I have done to achieve the goal: The Thermometer class is notified when Pot increases its temperature: from PyQt4 import QtCore class Pot(QtCore.QObject): temperatureRaisedSignal = QtCore.pyqtSignal() def __init__(self, parent=None): super(Pot, self).__init__(parent) self.temperature = 1 def Boil(self): self.temperature += 1 self.temperatureRaisedSignal.emit() def RegisterSignal(self, obj): self.temperatureRaisedSignal.connect(obj) class Thermometer(): def __init__(self, pot): self.pot = pot self.pot.RegisterSignal(self.temperatureWarning) def StartMeasure(self): self.pot.Boil() def temperatureWarning(self): print("Too high temperature!") if __name__ == '__main__': pot = Pot() th = Thermometer(pot) th.StartMeasure() Or is there any easier / better way to do it? I also insist (if possible) on using "new" style PyQt signals. A: from PyQt4 import QtCore class Pot(QtCore.QObject): temperatureRaisedSignal = QtCore.pyqtSignal() def __init__(self, parent=None): QtCore.QObject.__init__(self) self.temperature = 1 def Boil(self): self.temperatureRaisedSignal.emit() self.temperature += 1 class Thermometer(): def __init__(self, pot): self.pot = pot self.pot.temperatureRaisedSignal.connect(self.temperatureWarning) def StartMeasure(self): self.pot.Boil() def temperatureWarning(self): print("Too high temperature!") if __name__ == '__main__': pot = Pot() th = Thermometer(pot) th.StartMeasure() This is how I would've done it according to the docs: http://www.riverbankcomputing.com/static/Docs/PyQt4/html/new_style_signals_slots.html
How to connect pyqtSignal between classes in PyQT
How to connect pyqtSignal between two different objects (classes) PROPERLY? I mean best practice. Look what I have done to achieve the goal: The Thermometer class is notified when Pot increases its temperature: from PyQt4 import QtCore class Pot(QtCore.QObject): temperatureRaisedSignal = QtCore.pyqtSignal() def __init__(self, parent=None): super(Pot, self).__init__(parent) self.temperature = 1 def Boil(self): self.temperature += 1 self.temperatureRaisedSignal.emit() def RegisterSignal(self, obj): self.temperatureRaisedSignal.connect(obj) class Thermometer(): def __init__(self, pot): self.pot = pot self.pot.RegisterSignal(self.temperatureWarning) def StartMeasure(self): self.pot.Boil() def temperatureWarning(self): print("Too high temperature!") if __name__ == '__main__': pot = Pot() th = Thermometer(pot) th.StartMeasure() Or is there any easier / better way to do it? I also insist (if possible) on using "new" style PyQt signals.
[ "from PyQt4 import QtCore\n\nclass Pot(QtCore.QObject):\n\n temperatureRaisedSignal = QtCore.pyqtSignal()\n\n def __init__(self, parent=None):\n QtCore.QObject.__init__(self)\n self.temperature = 1\n\n def Boil(self):\n self.temperatureRaisedSignal.emit()\n self.temperature += 1...
[ 23 ]
[]
[]
[ "pyqt", "pyqt4", "python" ]
stackoverflow_0003891465_pyqt_pyqt4_python.txt
Q: Printing a formatted numerical range strings from a list of numbers in python I wrote this class to compress and expand number lists to sequence strings, including step values when the the step value is greater than 1. The code still feels clunky. Are there libraries that can do something like this? Possibly simpler code? import re class Foo( object ): def __init__( self, num_list ): self.num_list = sorted( list( set( [ int(n) for n in num_list ] ) ) ) # end def __init__ def gen_seq_data( self ): self.seq_data = list() index_offset = None backward_step_value = None forward_step_value = None sub_list = list() sub_list_step_value = None for index, num in enumerate( self.num_list ): if index - 1 < 0: backward_step_value = None # end if else: backward_step_value = num - self.num_list[ index - 1 ] # end else try: forward_step_value = self.num_list[ index + 1 ] - num # end try except IndexError: forward_step_value = None # end except if backward_step_value is None: sub_list.append( num ) # end if elif backward_step_value == forward_step_value: sub_list.append( num ) if forward_step_value is None: self.seq_data.append( ( sub_list_step_value, sub_list ) ) # end if # end if elif backward_step_value == sub_list_step_value: sub_list.append( num ) if sub_list: self.seq_data.append( ( sub_list_step_value, sub_list ) ) # end if sub_list = list() # end elif else: if sub_list: self.seq_data.append( ( sub_list_step_value, sub_list ) ) # end if sub_list = [ num ] if forward_step_value is None: self.seq_data.append( ( sub_list_step_value, sub_list ) ) # end if # end else try: sub_list_step_value = sub_list[ -1 ] - sub_list[ -2 ] # end try except IndexError: sub_list_step_value = None # end except # end for # end def gen_seq_object def format_elements( self ): format_elements = list() for step, num_list in self.seq_data: if step is None: format_elements.append( '%s' % ( num_list[ 0 ] ) ) # end if elif step == 1: format_elements.append( '%s-%s' % ( num_list[ 0 ], num_list[ -1 ] ) ) # end elif else: format_elements.append( '%s-%sx%s' % ( num_list[ 0 ], num_list[ -1 ], step ) ) # end else # end for return format_elements # end def format_range def format_range( self ): return ','.join( self.format_elements() ) # end def format_range def expand_range( self ): num_list = list() for r_token in self.format_range().split( ',' ): if r_token.isdigit(): num_list.append( int( r_token ) ) # end if elif '-' in r_token: if 'x' in r_token: start, end, step = re.split( r'[-|x]', r_token ) num_list.extend( range( int( start ), int( end ) + 1, int( step ) ) ) # end if else: start, end = r_token.split( '-' ) num_list.extend( range( int( start ), int( end ) + 1 ) ) # end else # end elif # end for return num_list # end def expand_range # end class Foo Input/output: data = [ 1, 4, 5, 6, 10, 15, 16, 17, 18, 20, 22, 24, 26, 27, 28, 30, 35, 40, 45, 50, 56, 63, 66, 69, 72 ] foo = Foo( data ) foo.gen_seq_data() print data print foo.format_range() 1,4-6,10,15-18,20-26x2,27,28,30-50x5,56,63-72x3 print foo.expand_range() [1, 4, 5, 6, 10, 15, 16, 17, 18, 20, 22, 24, 26, 27, 28, 30, 35, 40, 45, 50, 56, 63, 66, 69, 72] A: One. Remove all #END comments. They are monstrously useless. Your indentation speaks for itself. Use it. Two. Don't make this a class. It isn't a distinct object with distinct responsibilities. It's just an algorithm. Made up of functions. At best it's a class with all static methods. Three. Never do this for index, num in enumerate( self.num_list ): if index - 1 < 0: backward_step_value = None # end if else: backward_step_value = num - self.num_list[ index - 1 ] # end else If the first element is special, then treat it separately. backward_step_value = self.num_list[0] for num in self.num_list[1:]: You rarely need the index for something like this. Indeed, the only reason for having the index appears to be to treat the first element specially. Finally, this is a "reduction". Use a generator function def reduce_list( some_list ): v= min(some_list) low, high = v, v for v in sorted(some_list)[1:]: if v == high+1: high= high+1 else: yield low, high yield low, high That might yield your list of contiguous ranges. You can then format those. format_elements( reduce_list( some_list ) ) A: The following solution handles non-contiguous ranges, and also preserves the behavior of ignoring ranges of length 2. def reduce_list(seq): l = sorted(set(seq)) low = high = l[0] step = None for v in l[1:]: if step is None or v - high == step: # Extend the current range. step = v - high high = v elif high - low == step: # The current range only has two values. Yield the # first value, and start a new range comprising the # second value and the current value. yield low, low, None step = v - high low = high high = v else: # Yield the current range, and start a new one. yield low, high, step low = high = v step = None if high - low == step: # The final range has only two values. Yield them # individually. yield low, low, None step = None low = high yield low, high, step def format_element(low, high, step): if step is None: assert low == high return "%s" % (low,) elif step == 1: return "%s-%s" % (low, high) else: return "%s-%sx%s" % (low, high, step) def format_list(seq): return ','.join(format_element(*e) for e in seq) Here's some test code: def test( *args ): print args, "==", format_list(reduce_list(args)) test(1) test(1, 2) test(1, 2, 3) test(0, 10) test(0, 10, 20) test(0, 10, 11, 12, 14, 16) test(0, 2, 4, 8, 16, 32, 64) test(0, 1, 3, 4, 6, 7, 9, 10) test(0, 1, 3, 6, 10, 15, 21, 28) which outputs: (1,) == 1 (1, 2) == 1,2 (1, 2, 3) == 1-3 (0, 10) == 0,10 (0, 10, 20) == 0-20x10 (0, 10, 11, 12, 14, 16) == 0,10-12,14,16 (0, 2, 4, 8, 16, 32, 64) == 0-4x2,8,16,32,64 (0, 1, 3, 4, 6, 7, 9, 10) == 0,1,3,4,6,7,9,10 (0, 1, 3, 6, 10, 15, 21, 28) == 0,1,3,6,10,15,21,28
Printing a formatted numerical range strings from a list of numbers in python
I wrote this class to compress and expand number lists to sequence strings, including step values when the the step value is greater than 1. The code still feels clunky. Are there libraries that can do something like this? Possibly simpler code? import re class Foo( object ): def __init__( self, num_list ): self.num_list = sorted( list( set( [ int(n) for n in num_list ] ) ) ) # end def __init__ def gen_seq_data( self ): self.seq_data = list() index_offset = None backward_step_value = None forward_step_value = None sub_list = list() sub_list_step_value = None for index, num in enumerate( self.num_list ): if index - 1 < 0: backward_step_value = None # end if else: backward_step_value = num - self.num_list[ index - 1 ] # end else try: forward_step_value = self.num_list[ index + 1 ] - num # end try except IndexError: forward_step_value = None # end except if backward_step_value is None: sub_list.append( num ) # end if elif backward_step_value == forward_step_value: sub_list.append( num ) if forward_step_value is None: self.seq_data.append( ( sub_list_step_value, sub_list ) ) # end if # end if elif backward_step_value == sub_list_step_value: sub_list.append( num ) if sub_list: self.seq_data.append( ( sub_list_step_value, sub_list ) ) # end if sub_list = list() # end elif else: if sub_list: self.seq_data.append( ( sub_list_step_value, sub_list ) ) # end if sub_list = [ num ] if forward_step_value is None: self.seq_data.append( ( sub_list_step_value, sub_list ) ) # end if # end else try: sub_list_step_value = sub_list[ -1 ] - sub_list[ -2 ] # end try except IndexError: sub_list_step_value = None # end except # end for # end def gen_seq_object def format_elements( self ): format_elements = list() for step, num_list in self.seq_data: if step is None: format_elements.append( '%s' % ( num_list[ 0 ] ) ) # end if elif step == 1: format_elements.append( '%s-%s' % ( num_list[ 0 ], num_list[ -1 ] ) ) # end elif else: format_elements.append( '%s-%sx%s' % ( num_list[ 0 ], num_list[ -1 ], step ) ) # end else # end for return format_elements # end def format_range def format_range( self ): return ','.join( self.format_elements() ) # end def format_range def expand_range( self ): num_list = list() for r_token in self.format_range().split( ',' ): if r_token.isdigit(): num_list.append( int( r_token ) ) # end if elif '-' in r_token: if 'x' in r_token: start, end, step = re.split( r'[-|x]', r_token ) num_list.extend( range( int( start ), int( end ) + 1, int( step ) ) ) # end if else: start, end = r_token.split( '-' ) num_list.extend( range( int( start ), int( end ) + 1 ) ) # end else # end elif # end for return num_list # end def expand_range # end class Foo Input/output: data = [ 1, 4, 5, 6, 10, 15, 16, 17, 18, 20, 22, 24, 26, 27, 28, 30, 35, 40, 45, 50, 56, 63, 66, 69, 72 ] foo = Foo( data ) foo.gen_seq_data() print data print foo.format_range() 1,4-6,10,15-18,20-26x2,27,28,30-50x5,56,63-72x3 print foo.expand_range() [1, 4, 5, 6, 10, 15, 16, 17, 18, 20, 22, 24, 26, 27, 28, 30, 35, 40, 45, 50, 56, 63, 66, 69, 72]
[ "One. Remove all #END comments. They are monstrously useless. Your indentation speaks for itself. Use it. \nTwo. Don't make this a class. It isn't a distinct object with distinct responsibilities. It's just an algorithm. Made up of functions. At best it's a class with all static methods.\nThree. Never do ...
[ 2, 1 ]
[]
[]
[ "python" ]
stackoverflow_0004074373_python.txt
Q: a good solution to set up a rdf triplestore in python? i'm looking for something as easy and tidy as the ARC2 class for PHP to use in python. so far i've taken a look at rdflib, but the lack of support for SPARQL and inaccurate docs are putting me off! is there another python library which will enable me to set up and use a triple store quickly and easily, or am i stuck with using a traditional database for the time being? thank for any advice :) A: Yes, I agree RDFLib is not great as a SPARQL engine and it doesn't scale much. An option that always works for me is 4store as a triple store with the Python client 4store-client. In the 4store IRC channel and Google Group you'll find helpful support. A: There is a new python SQLAlchemy and RDFLib module for the Virtuoso RDF Store that is worth considering also ...
a good solution to set up a rdf triplestore in python?
i'm looking for something as easy and tidy as the ARC2 class for PHP to use in python. so far i've taken a look at rdflib, but the lack of support for SPARQL and inaccurate docs are putting me off! is there another python library which will enable me to set up and use a triple store quickly and easily, or am i stuck with using a traditional database for the time being? thank for any advice :)
[ "Yes, I agree RDFLib is not great as a SPARQL engine and it doesn't scale much.\nAn option that always works for me is 4store as a triple store with the Python client 4store-client. In the 4store IRC channel and Google Group you'll find helpful support.\n", "There is a new python SQLAlchemy and RDFLib module for ...
[ 4, 2 ]
[]
[]
[ "python", "rdf", "sparql" ]
stackoverflow_0004073002_python_rdf_sparql.txt
Q: How implement .NET server and Python client? I've got a problem. I Have a tool(threads manager) that receives some data and do some computation. I need to write Python client to send data to that tool...I thing I should use .NET Remoting, but how to do that? pls share some links where I can read or post some code...I can't google info about that... P.S Python 2.7, NOT IronPython A: .Net Remoting is designed for when you have .net at both ends so will be very hard to use from the Python end. (Event the XML encoding of .net remoting is not easy to use from other platforms) I am assuming that Python has support for soap, if so I would look at using WCF at the .net end running over the “basic profile”. JSON is another option, there are lots of open source projects that add JSON support to .net, I assume that Python also has JSON surport. A: As for JSON, on .NET side you could try JsonFx. Then call it from python using any json-rpc client. A: I would use XML-RPC for communication. It is very simple to implement (a couple lines of code in Python, not sure about .NET but it shouldn't be difficult there either) and should be enough in your scenario.
How implement .NET server and Python client?
I've got a problem. I Have a tool(threads manager) that receives some data and do some computation. I need to write Python client to send data to that tool...I thing I should use .NET Remoting, but how to do that? pls share some links where I can read or post some code...I can't google info about that... P.S Python 2.7, NOT IronPython
[ ".Net Remoting is designed for when you have .net at both ends so will be very hard to use from the Python end. (Event the XML encoding of .net remoting is not easy to use from other platforms)\nI am assuming that Python has support for soap, if so I would look at using WCF at the .net end running over the “basic ...
[ 1, 1, 0 ]
[]
[]
[ "c#", "python", "remoting" ]
stackoverflow_0004076114_c#_python_remoting.txt
Q: Hash+mapping or index+mapping to condense use of strings I have ~200K named properties and ~25K files. I extract whether the properties hold or not for each file using Python as a set of properties that hold, one set for each file. To do this extraction I might run hundreds of individual python extraction scripts on a compute farm, in parallel. Each leaving behind some representation of the set extraction from each of the files. Further processing involves reading these 20K sets and working on the accumulated data. to generate a report on this set of files/properties. One issue I have is that if I store the extracted set as text then the long property name strings and file name strings will get repeated wasting disk space and increasing parse time. I was thinking of creating a central index of the sorted property names and just saving the index - same for the file names, as probably .py files to import. An alternative to using an index into the sorted list of names would be to use the str.hash() value as the index which would mean probably faster processing, but I am worried about the possibility of two unequal strings ending up with the same hash() value. Could this happen? I would be using the same Python executable and OS version on all machines. A: Do you know the properties in advance? If you do than you might want to consider Perfect hashing (i.e. you can distribute the settings of the hash instead of the full list of properties/files). A very crude (but possibly working way) would be having a few different hash functions (h1, h2...); start e.g. with the str.hash() and compute the hashes. If there are collisions, try using a tuple (h1(property), h2(property)) as a hash. If there are still collisions, use (h1(property), h2(property), h3(property)) - etc., until there are no collisions. The h_x functions can be actually somewhat configurable function and the recomended way would be to try some random hash functions. However it seems to me that it might be overkill, just distributing the list of files/properties might be a lot easier... A: Hashes can collide. You will have to consider that.
Hash+mapping or index+mapping to condense use of strings
I have ~200K named properties and ~25K files. I extract whether the properties hold or not for each file using Python as a set of properties that hold, one set for each file. To do this extraction I might run hundreds of individual python extraction scripts on a compute farm, in parallel. Each leaving behind some representation of the set extraction from each of the files. Further processing involves reading these 20K sets and working on the accumulated data. to generate a report on this set of files/properties. One issue I have is that if I store the extracted set as text then the long property name strings and file name strings will get repeated wasting disk space and increasing parse time. I was thinking of creating a central index of the sorted property names and just saving the index - same for the file names, as probably .py files to import. An alternative to using an index into the sorted list of names would be to use the str.hash() value as the index which would mean probably faster processing, but I am worried about the possibility of two unequal strings ending up with the same hash() value. Could this happen? I would be using the same Python executable and OS version on all machines.
[ "Do you know the properties in advance? If you do than you might want to consider Perfect hashing (i.e. you can distribute the settings of the hash instead of the full list of properties/files). \nA very crude (but possibly working way) would be having a few different hash functions (h1, h2...); start e.g. with the...
[ 2, 0 ]
[]
[]
[ "data_structures", "hash", "python", "set" ]
stackoverflow_0004076822_data_structures_hash_python_set.txt
Q: Python Listbox selection cleared whenever something is highlighted when it is working with Text or Entry widget I found a problem with Tkinter.Listbox when it is working with Entry or Text widgets. Whenever something is highlighted in the Entry or Text widget, it clears the listbox selection. I am using Python 2.6.5. The following is the testing scripts. from Tkinter import * root = Tk() List = ['It is a listbox: item1','item2','item3','item4','item5'] app = Listbox(root, width = 50, height = 6, selectmode = 'multiple') for item in List: app.insert(END, item) app.select_set(0,END) app.pack() entrvar = StringVar() entry = Entry(root,width = 50, textvariable = entrvar) entry.pack() entrvar.set('it is an entry widget') texter = Text(root,width = 38,height = 5) texter.pack() texter.insert(END,'it is a text widget\nit is a text widget\nit is a text widget\n') label = Label(root,width = 50,height = 5, text = 'it is a label widget') label.pack() app.mainloop() A: You need to set the exportselection attribute of the listbox to False
Python Listbox selection cleared whenever something is highlighted when it is working with Text or Entry widget
I found a problem with Tkinter.Listbox when it is working with Entry or Text widgets. Whenever something is highlighted in the Entry or Text widget, it clears the listbox selection. I am using Python 2.6.5. The following is the testing scripts. from Tkinter import * root = Tk() List = ['It is a listbox: item1','item2','item3','item4','item5'] app = Listbox(root, width = 50, height = 6, selectmode = 'multiple') for item in List: app.insert(END, item) app.select_set(0,END) app.pack() entrvar = StringVar() entry = Entry(root,width = 50, textvariable = entrvar) entry.pack() entrvar.set('it is an entry widget') texter = Text(root,width = 38,height = 5) texter.pack() texter.insert(END,'it is a text widget\nit is a text widget\nit is a text widget\n') label = Label(root,width = 50,height = 5, text = 'it is a label widget') label.pack() app.mainloop()
[ "You need to set the exportselection attribute of the listbox to False\n" ]
[ 0 ]
[]
[]
[ "listbox", "python", "text", "tkinter", "tkinter_entry" ]
stackoverflow_0004076926_listbox_python_text_tkinter_tkinter_entry.txt
Q: Python Tkinter Embed Matplotlib in GUI I'm trying to embed a plot in my Tkinter GUI coded in Python. I believe the code below succeeds in simply putting a graph into a canvas, but I don't have any control of the canvas location within the GUI grid. I want to be able to have a subsection of my GUI be the plot...not the entirety of it. How can I position this canvas widget? #!/usr/apps/Python/bin/python import matplotlib, sys matplotlib.use('TkAgg') from numpy import arange, sin, pi from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg from matplotlib.figure import Figure from Tkinter import * master = Tk() master.title("Hello World!") #------------------------------------------------------------------------------- f = Figure(figsize=(5,4), dpi=100) a = f.add_subplot(111) t = arange(0.0,3.0,0.01) s = sin(2*pi*t) a.plot(t,s) dataPlot = FigureCanvasTkAgg(f, master=master) dataPlot.show() dataPlot.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) #------------------------------------------------------------------------------- master.mainloop() A: You don't have any other widgets so it's hard to know where you want other widgets. Here's what I can tell you though: by doing dataPlot.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) you are asking Tkinter to fill the screen with the plot. This, because you ask it to fill in all directions (fill=BOTH) and expand to fill any extra space (expand=1). However, you can still add other widgets. pack works by putting widgets on one side of a container. Your container, master, always has four sides. So, for example, if you wanted to create a toolbar you would do something like: toolbar = tk.Frame(master) button = tk.Button(toolbar, text="Push me") button.pack(side="left") # left side of parent, the toolbar frame toolbar.pack(side=TOP, fill="x") # top of parent, the master window Notice that if you put this code after the code where you pack the plot, the toolbar shows up on the bottom! That's because TOP, BOTTOM, etc refer to space left over by any other widgets that have already been packed. The plot takes up the top, the space left over is at the bottom. So when you specify TOP again it means "at the top of the area below whatever is already at the top". So, you have some choices. The best choice is to make your widgets in the order you wish them to appear. If you pack the toolbar at the top before you pack the plot, it will be the toolbar that shows up at the very top. Further, you can place the plot at the bottom rather than the top and that will solve the problem, too. By the way, I typically create my widgets in one block, then lay them all out in a separate block. I find it makes the code easier to maintain. Another choice which may fit your mental model better is to grid instead of pack. With grid you can choose the row(s) and column(s) that the widget occupies. This makes it easy to lay things out in a grid, but at the expense of having to use a little more code. For example, to put the toolbar at the top and the plot down below you might do: toolbar.grid(row=1, column=1, sticky="ew") dataPlot.get_tk_widget().grid(row=1, column=1, sticky="nsew") master.grid_rowconfigure(0, weight=0) master.grid_rowconfigure(1, weight=1) master.grid_columnconfigure(0, weight=1) Notice that rows and columns start at zero. Also, "weight" refers to how much this widget expands relative to other widgets. With two rows of equal weight, they will expand equally when the window is resized. A weight of zero means no expansion. A weight of 2 for one row, and 1 for another means that the former will expand twice as much as the latter. For more information see this page on grid, and this page on pack.
Python Tkinter Embed Matplotlib in GUI
I'm trying to embed a plot in my Tkinter GUI coded in Python. I believe the code below succeeds in simply putting a graph into a canvas, but I don't have any control of the canvas location within the GUI grid. I want to be able to have a subsection of my GUI be the plot...not the entirety of it. How can I position this canvas widget? #!/usr/apps/Python/bin/python import matplotlib, sys matplotlib.use('TkAgg') from numpy import arange, sin, pi from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg from matplotlib.figure import Figure from Tkinter import * master = Tk() master.title("Hello World!") #------------------------------------------------------------------------------- f = Figure(figsize=(5,4), dpi=100) a = f.add_subplot(111) t = arange(0.0,3.0,0.01) s = sin(2*pi*t) a.plot(t,s) dataPlot = FigureCanvasTkAgg(f, master=master) dataPlot.show() dataPlot.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) #------------------------------------------------------------------------------- master.mainloop()
[ "You don't have any other widgets so it's hard to know where you want other widgets. Here's what I can tell you though: by doing dataPlot.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) you are asking Tkinter to fill the screen with the plot. This, because you ask it to fill in all directions (fill=BOTH) and ex...
[ 23 ]
[]
[]
[ "grid", "matplotlib", "python", "tkinter", "tkinter_canvas" ]
stackoverflow_0004073660_grid_matplotlib_python_tkinter_tkinter_canvas.txt
Q: Define todo keyword in Python and Sublime Text Can I define #@TODO or something like that as a keyword to be highlighted in Python? Is this specific to the text editor or to the language/platform? A: This is very specific to the editor. Language/Platform has nothing to do with highlighting. A: it s editor feature, eclipse has highlighter, in emacs you can define and ... also you can use bash script to find all todo in your project grep "# TODO" ./ -Rn
Define todo keyword in Python and Sublime Text
Can I define #@TODO or something like that as a keyword to be highlighted in Python? Is this specific to the text editor or to the language/platform?
[ "This is very specific to the editor.\nLanguage/Platform has nothing to do with highlighting.\n", "it s editor feature, eclipse has highlighter, in emacs you can define and ...\nalso you can use bash script to find all todo in your project\ngrep \"# TODO\" ./ -Rn\n\n" ]
[ 0, 0 ]
[]
[]
[ "keyword", "python", "sublimetext", "syntax_highlighting", "todo" ]
stackoverflow_0004076695_keyword_python_sublimetext_syntax_highlighting_todo.txt
Q: regression test dealing with hard coded path I need to extend a python code which has plenty of hard coded path In order not to mess everything, I want to create unit-tests for the code before my modifications: it will serve as non-regression tests with my new code (that will not have hard-coded paths) But because of hard coded system path, I shall run my test inside a chroot tree (I don't want to pollute my system dir) My problem is that I want to set up the chroot only for test, and this can be done with os.chroot only with root privileges (and I don't want to run the test scripts as root) In fact, I just need a fake tree diretory so that when the code that open('/etc/resolv.conf) retrieves a fake resolv.conf and not my system one I obviously don't want to replace myself the hard coded path in the code because it would not be real regression test Do you have any idea how to achieve this? Thanks Note that all the path accessed are readable with a user accout A: In your test set-up you could override the built-in open in the module that you are testing with your own version that reads from your "fake" directory structure: import mymod import os.path def chroot_open(filename,*args): filename = os.path.join("/home/you/fakeroot",filename) return open(filename,*args) mymod.open = chroot_open A: You could use a helper application that is setuid root to run the chroot; that would avoid the need to run the tests as root. Of course, that would probably still open up a local root exploit, so should only be done with appropriate precautions (e.g. in a VM image). At any rate, any solution with chroot is inherently platform-dependent, so it's rather awkward. I actually like the idea of Dave Webb (override open) better, I must admit...
regression test dealing with hard coded path
I need to extend a python code which has plenty of hard coded path In order not to mess everything, I want to create unit-tests for the code before my modifications: it will serve as non-regression tests with my new code (that will not have hard-coded paths) But because of hard coded system path, I shall run my test inside a chroot tree (I don't want to pollute my system dir) My problem is that I want to set up the chroot only for test, and this can be done with os.chroot only with root privileges (and I don't want to run the test scripts as root) In fact, I just need a fake tree diretory so that when the code that open('/etc/resolv.conf) retrieves a fake resolv.conf and not my system one I obviously don't want to replace myself the hard coded path in the code because it would not be real regression test Do you have any idea how to achieve this? Thanks Note that all the path accessed are readable with a user accout
[ "In your test set-up you could override the built-in open in the module that you are testing with your own version that reads from your \"fake\" directory structure:\nimport mymod\nimport os.path\n\ndef chroot_open(filename,*args):\n filename = os.path.join(\"/home/you/fakeroot\",filename)\n return open(filen...
[ 5, 0 ]
[]
[]
[ "linux", "python", "regression", "unit_testing" ]
stackoverflow_0004077338_linux_python_regression_unit_testing.txt
Q: Python logger to dictionary I am new in python...i am trying to read a python log file and make a dictionary. How will it be done with logger? A: read a python log file and make a dictionary. How will it be done with logger? It won't. logging writes logs. file reads logs. What are you asking? First, search for [Python] log parsing: https://stackoverflow.com/search?q=%5Bpython%5D+log+parsing Second, please post some sample code. A: As other commenter said, you don't want to use logging to read the file, instead use file. Here's an example of writing a log file, then reading it back. #!/usr/bin/env python # logger.py -- will write "time:debug:A:1" "time:debug:B:2" "time:debug:A:3" etc. log entries to a file import logging, random logging.basicConfig(filename='logfile.log',level=logging.DEBUG) for i in range(1,100): logging.debug("%s:%d" % (random.choice(["a", "b"]), i)) # logfile.log now contains -- # 100.1:debug:A:1 # 100.5:debug:B:2 # 100.8:debug:B:3 # 101.3:debug:A:4 # .... # 130.3:debug:B:100 #!/usr/bin/env/python # reader.py -- will read aformentioned log files and sum up the keys handle = file.open('logfile.log', 'r') sums = {} for line in handle.readlines(): time, debug, key, value = line.split(':') if not key in sums: sums[key] = 0 sums[key] += value print sums # will output -- # "{'a': 50, 'b': 50}"
Python logger to dictionary
I am new in python...i am trying to read a python log file and make a dictionary. How will it be done with logger?
[ "\nread a python log file and make a dictionary. How will it be done with logger?\n\nIt won't. \nlogging writes logs.\nfile reads logs.\nWhat are you asking? \nFirst, search for [Python] log parsing: https://stackoverflow.com/search?q=%5Bpython%5D+log+parsing\nSecond, please post some sample code.\n", "As other...
[ 1, 1 ]
[]
[]
[ "python" ]
stackoverflow_0004076416_python.txt
Q: Memory leak in Python Twisted: where is it? I have a Twisted server under load. When the server is under load, memory usage increases, and it is never reclaimed (even when there are no more clients). Next time it goes into high load, memory usage increases again. Here's a snapshot of the situation at that point: RSS memory is 400 MB (should be 200MB with usual max number of clients). gc.garbage is empty, so there are no uncollectable objects. Using objgraph.py shows no obvious candidates for leaks (no notable difference between a normal, healthy process and a leaking process). Using pympler shows a few tens of MB (only) used by Python objects (mostly dict, list, str and other native containers). Valgrind with leak-check=full enabled doesn't show any major leaks (only couple of MBs 'definitively lost') - so C extensions are not the culprit. The total memory also doesn't add up with the 400MB+ shown by top: ==23072== HEAP SUMMARY: ==23072== in use at exit: 65,650,760 bytes in 463,153 blocks ==23072== total heap usage: 124,269,475 allocs, 123,806,322 frees, 32,660,215,602 bytes allocated The only explanation I can find is that some objects are not tracked by the garbage collector, so that they are not shown by objgraph and pympler, yet use an enormous amount of RAM. What other tools or solutions do I have? Would compiling the Python interpreter in debug mode help, by using sys.getobjects? A: If the code is only leaking under load (did you verify this?), I'd have a look at all spots where messages are buffered. Does the memory usage of the process itself increase? Or does the memory use of the system increase? If it's the latter case, your server might simply be too slow to keep up with the incoming messages and the OS buffer fill up..
Memory leak in Python Twisted: where is it?
I have a Twisted server under load. When the server is under load, memory usage increases, and it is never reclaimed (even when there are no more clients). Next time it goes into high load, memory usage increases again. Here's a snapshot of the situation at that point: RSS memory is 400 MB (should be 200MB with usual max number of clients). gc.garbage is empty, so there are no uncollectable objects. Using objgraph.py shows no obvious candidates for leaks (no notable difference between a normal, healthy process and a leaking process). Using pympler shows a few tens of MB (only) used by Python objects (mostly dict, list, str and other native containers). Valgrind with leak-check=full enabled doesn't show any major leaks (only couple of MBs 'definitively lost') - so C extensions are not the culprit. The total memory also doesn't add up with the 400MB+ shown by top: ==23072== HEAP SUMMARY: ==23072== in use at exit: 65,650,760 bytes in 463,153 blocks ==23072== total heap usage: 124,269,475 allocs, 123,806,322 frees, 32,660,215,602 bytes allocated The only explanation I can find is that some objects are not tracked by the garbage collector, so that they are not shown by objgraph and pympler, yet use an enormous amount of RAM. What other tools or solutions do I have? Would compiling the Python interpreter in debug mode help, by using sys.getobjects?
[ "If the code is only leaking under load (did you verify this?), I'd have a look at all spots where messages are buffered. Does the memory usage of the process itself increase? Or does the memory use of the system increase? If it's the latter case, your server might simply be too slow to keep up with the incoming me...
[ 2 ]
[]
[]
[ "garbage_collection", "memory_leaks", "memory_management", "python", "twisted" ]
stackoverflow_0004078084_garbage_collection_memory_leaks_memory_management_python_twisted.txt
Q: Why is subtraction faster than addition in Python? I was optimising some Python code, and tried the following experiment: import time start = time.clock() x = 0 for i in range(10000000): x += 1 end = time.clock() print '+=',end-start start = time.clock() x = 0 for i in range(10000000): x -= -1 end = time.clock() print '-=',end-start The second loop is reliably faster, anywhere from a whisker to 10%, depending on the system I run it on. I've tried varying the order of the loops, number of executions etc, and it still seems to work. Stranger, for i in range(10000000, 0, -1): (ie running the loop backwards) is faster than for i in range(10000000): even when loop contents are identical. What gives, and is there a more general programming lesson here? A: I can reproduce this on my Q6600 (Python 2.6.2); increasing the range to 100000000: ('+=', 11.370000000000001) ('-=', 10.769999999999998) First, some observations: This is 5% for a trivial operation. That's significant. The speed of the native addition and subtraction opcodes is irrelevant. It's in the noise floor, completely dwarfed by the bytecode evaluation. That's talking about one or two native instructions around thousands. The bytecode generates exactly the same number of instructions; the only difference is INPLACE_ADD vs. INPLACE_SUBTRACT and +1 vs -1. Looking at the Python source, I can make a guess. This is handled in ceval.c, in PyEval_EvalFrameEx. INPLACE_ADD has a significant extra block of code, to handle string concatenation. That block doesn't exist in INPLACE_SUBTRACT, since you can't subtract strings. That means INPLACE_ADD contains more native code. Depending (heavily!) on how the code is being generated by the compiler, this extra code may be inline with the rest of the INPLACE_ADD code, which means additions can hit the instruction cache harder than subtraction. This could be causing extra L2 cache hits, which could cause a significant performance difference. This is heavily dependent on the system you're on (different processors have different amounts of cache and cache architectures), the compiler in use, including the particular version and compilation options (different compilers will decide differently which bits of code are on the critical path, which determines how assembly code is lumped together), and so on. Also, the difference is reversed in Python 3.0.1 (+: 15.66, -: 16.71); no doubt this critical function has changed a lot. A: $ python -m timeit -s "x=0" "x+=1" 10000000 loops, best of 3: 0.151 usec per loop $ python -m timeit -s "x=0" "x-=-1" 10000000 loops, best of 3: 0.154 usec per loop Looks like you've some measurement bias A: I think the "general programming lesson" is that it is really hard to predict, solely by looking at the source code, which sequence of statements will be the fastest. Programmers at all levels frequently get caught up by this sort of "intuitive" optimisation. What you think you know may not necessarily be true. There is simply no substitute for actually measuring your program performance. Kudos for doing so; answering why undoubtedly requires delving deep into the implementation of Python, in this case. With byte-compiled languages such as Java, Python, and .NET, it is not even sufficient to measure performance on just one machine. Differences between VM versions, native code translation implementations, CPU-specific optimisations, and so on will make this sort of question ever more tricky to answer. A: "The second loop is reliably faster ..." That's your explanation right there. Re-order your script so the subtraction test is timed first, then the addition, and suddenly addition becomes the faster operation again: -= 3.05 += 2.84 Obviously something happens to the second half of the script that makes it faster. My guess is that the first call to range() is slower because python needs to allocate enough memory for such a long list, but it is able to re-use that memory for the second call to range(): import time start = time.clock() x = range(10000000) end = time.clock() del x print 'first range()',end-start start = time.clock() x = range(10000000) end = time.clock() print 'second range()',end-start A few runs of this script show that the extra time needed for the first range() accounts for nearly all of the time difference between '+=' and '-=' seen above: first range() 0.4 second range() 0.23 A: It's always a good idea when asking a question to say what platform and what version of Python you are using. Sometimes it does't matter. This is NOT one of those times: time.clock() is appropriate only on Windows. Throw away your own measuring code and use -m timeit as demonstrated in pixelbeat's answer. Python 2.X's range() builds a list. If you are using Python 2.x, replace range with xrange and see what happens. Python 3.X's int is Python2.X's long. A: Is there a more general programming lesson here? The more general programming lesson here is that intuition is a poor guide when predicting run-time performance of computer code. One can reason about algorithmic complexity, hypothesise about compiler optimisations, estimate cache performance and so on. However, since these things can interact in non-trivial ways, the only way to be sure about how fast a particular piece of code is going to be is to benchmark it in the target environment (as you have rightfully done.) A: With Python 2.5 the biggest problem here is using range, which will allocate a list that big to iterate over it. When using xrange, whichever is done second is a tiny bit faster for me. (Not sure if range has become a generator in Python 3.) A: Your experiment is faulty. The way this experiment should be designed is to write 2 different programs - 1 for addition, 1 for subtraction. They should be exactly the same and run under the same conditions with the data being put to file. Then you need to average the runs (at least several thousand), but you'd need a statistician to tell you an appropriate number. If you wanted to analyze different methods of addition, subtraction, and looping, again each of those should be a separate program. Experimental error might arise from heat of processor and other activity going on the cpu, so i'd execute the runs in a variety of patterns... A: That would be remarkable, so I have thoroughly evaluated your code and also setup the expiriment as I would find it more correct (all declarations and function calls outside the loop). Both versions I have run five times. Running your code validated your claims: -= takes constantly less time; 3.6% on average Running my code, though, contradicts the outcome of your experiment: += takes on average (not always) 0.5% less time. To show all results I have put plots online: Your evaluation: http://bayimg.com/kadAeaAcN My evaluation: http://bayimg.com/KadaAaAcN So, I conclude that your experiment has a bias, and it is significant. Finally here is my code: import time addtimes = [0.] * 100 subtracttimes = [0.] * 100 range100 = range(100) range10000000 = range(10000000) j = 0 i = 0 x = 0 start = 0. for j in range100: start = time.clock() x = 0 for i in range10000000: x += 1 addtimes[j] = time.clock() - start for j in range100: start = time.clock() x = 0 for i in range10000000: x -= -1 subtracttimes[j] = time.clock() - start print '+=', sum(addtimes) print '-=', sum(subtracttimes)
Why is subtraction faster than addition in Python?
I was optimising some Python code, and tried the following experiment: import time start = time.clock() x = 0 for i in range(10000000): x += 1 end = time.clock() print '+=',end-start start = time.clock() x = 0 for i in range(10000000): x -= -1 end = time.clock() print '-=',end-start The second loop is reliably faster, anywhere from a whisker to 10%, depending on the system I run it on. I've tried varying the order of the loops, number of executions etc, and it still seems to work. Stranger, for i in range(10000000, 0, -1): (ie running the loop backwards) is faster than for i in range(10000000): even when loop contents are identical. What gives, and is there a more general programming lesson here?
[ "I can reproduce this on my Q6600 (Python 2.6.2); increasing the range to 100000000:\n('+=', 11.370000000000001)\n('-=', 10.769999999999998)\n\nFirst, some observations:\n\nThis is 5% for a trivial operation. That's significant.\nThe speed of the native addition and subtraction opcodes is irrelevant. It's in the ...
[ 86, 13, 7, 5, 4, 2, 0, 0, 0 ]
[ "The running loop backwards is faster because the computer has an easier time comparing if a number is equal to 0.\n" ]
[ -1 ]
[ "addition", "performance", "python", "subtraction" ]
stackoverflow_0001396564_addition_performance_python_subtraction.txt
Q: Python: print unicode char escaped I have tried to convert an ascii string to an escaped pseudo unicode escaped string using python, but failed so far. What I want to do: Convert ASCII 'a' to ASCII String "<U0061>" I can convert "a" with unicode('a'), but can not safe the numerical value of a in an ascii string. How can I do that? A: You can use ord() to convert a character to its character value (str) or code point (unicode). You can then use the appropriate string formatting to convert it into a text representation. 'U+%04X' % (ord(u'A'),) A: Here goes a minimalist sample that allows you to use Ignacio's solution with Python's built-in coding/decoding engine. Check http://docs.python.org/library/codecs.html if you need something more consistent (with proper error handling, etc...) import codecs def encode(text, error="strict"): return ("".join("<U%04x>" % ord(char) for char in text), len(text)) def search(name): if name == "unicode_ltgt": info = codecs.CodecInfo(encode, None, None, None) info.name = "unicode_ltgt" info.encode = encode return info return None codecs.register(search) if __name__ == "__main__": a = u"maçã" print a.encode("unicode_ltgt") (just by importing this as a module, the codec "unicode_ltgt" will be installed and be available to any ".encode" call, like in the given example )
Python: print unicode char escaped
I have tried to convert an ascii string to an escaped pseudo unicode escaped string using python, but failed so far. What I want to do: Convert ASCII 'a' to ASCII String "<U0061>" I can convert "a" with unicode('a'), but can not safe the numerical value of a in an ascii string. How can I do that?
[ "You can use ord() to convert a character to its character value (str) or code point (unicode). You can then use the appropriate string formatting to convert it into a text representation.\n'U+%04X' % (ord(u'A'),)\n\n", "Here goes a minimalist sample that allows you to use Ignacio's solution with Python's built-i...
[ 7, 1 ]
[]
[]
[ "python", "unicode" ]
stackoverflow_0004076194_python_unicode.txt
Q: How to calculate efficient local average of each coordinate of large matrix (average filter) in python I have large matrix, 4000x4000 I need to calculate local average of 11x11 window for each x,y of this matrix Generally it must be something like for x in range(4000) for y in range(4000) b[x,y]=mean(a[x-5:x+5,y-5:y+5] But this will run a lot of time Is it some more efficient way to do this? Thanks. A: You essentially want a two-dimensional convolution. Scipy can do that for you: http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html In fact there's a similar answer right here on SO: 2d convolution using python and numpy A: You can use a dynamic programming technique to make it many times faster. Preprocess the matrix by starting the top left corner and moving left to right, then top to bottom, setting each cell to be the sum of its value and the one above (if it exists) and the one to the left (if it exists). When you get to the end, the bottom right value should be the total sum of the whole matrix. for x in xrange(4000): for y in xrange(4000): c[x,y] = a[x,y] if x > 0: c[x,y] += c[x-1,y] if y > 0: c[x,y] += c[x,y-1] and now you can get the sum of any rectangular region by subtract the top left corner from the top right: eg. in this case, the sum of the 11x11 region will be c[x+5,y+5]-c[x-5,y-5] Then you can just divide by the size of the window to get the local average: b[x,y] = (c[x+5+,y+5]-c[x-5,y-5])/121 Now instead of iterating over 121 spots for each element in the matrix, you only have to make 2 passes over the matrix with no iterations for each element.
How to calculate efficient local average of each coordinate of large matrix (average filter) in python
I have large matrix, 4000x4000 I need to calculate local average of 11x11 window for each x,y of this matrix Generally it must be something like for x in range(4000) for y in range(4000) b[x,y]=mean(a[x-5:x+5,y-5:y+5] But this will run a lot of time Is it some more efficient way to do this? Thanks.
[ "You essentially want a two-dimensional convolution. Scipy can do that for you: http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html\nIn fact there's a similar answer right here on SO:\n2d convolution using python and numpy\n", "You can use a dynamic programming technique to make it ma...
[ 7, 4 ]
[]
[]
[ "arrays", "average", "local", "matrix", "python" ]
stackoverflow_0004078811_arrays_average_local_matrix_python.txt
Q: What do I put in the visit dict passed to my user-defined __deepcopy__ function? I need to define my own deepcopy function for one of my classes. The documentation says that the function __deepcopy__() is passed a memo dictionary for tracking which objects have already been copied, to avoid getting caught in coping a recursive object. However it doesn't say what to put in the dictionary. Do I put objects in, or object IDs? How do you use it? I can't find any site that explains it, except this book that says that it need not be used, except to pass it to other invocations of __deepcopy__. Thanks A: To be consistent with the built-in copy.deepcopy behavior, you should use the id of the object as a key. More Details: While you can often get away with using a different key scheme (such as the object itself, if it's hashable), as long as you're consistent, you can still run into trouble. Frequently __deepcopy__ is implemented by calling copy.deepcopy on some contained attributes. If you add items directly to memo, you have to be certain the keys won't collide with items added by copy.deepcopy, which adds keys generated by id, which are integers. So if you use a different key scheme, such as using objects directly as keys, you can't ever add any integer objects. Bottom Line: It's much easier to just use id and not have to worry about the exceptions above. A: To answer my own question, with thanks to @Jon-Eric: What I understand from the example shown at http://www.doughellmann.com/PyMOTW/copy/, is that you use self as the key, and the copied object as the value. Then, when it is attempted to copy this object(self) again, one can check the memo dictionary to see whether it has already been copied, and return the reference to the copy, memo[self].
What do I put in the visit dict passed to my user-defined __deepcopy__ function?
I need to define my own deepcopy function for one of my classes. The documentation says that the function __deepcopy__() is passed a memo dictionary for tracking which objects have already been copied, to avoid getting caught in coping a recursive object. However it doesn't say what to put in the dictionary. Do I put objects in, or object IDs? How do you use it? I can't find any site that explains it, except this book that says that it need not be used, except to pass it to other invocations of __deepcopy__. Thanks
[ "To be consistent with the built-in copy.deepcopy behavior, you should use the id of the object as a key.\nMore Details:\nWhile you can often get away with using a different key scheme (such as the object itself, if it's hashable), as long as you're consistent, you can still run into trouble. Frequently __deepcopy_...
[ 1, 0 ]
[]
[]
[ "deep_copy", "python" ]
stackoverflow_0004078725_deep_copy_python.txt
Q: Printing hex string print 'Payload: ', struct.unpack('%dB'%len(payload), payload) print '%08x (%d bits) DATA: ' % (identifier, width), repr(payload) These two code statements generate the following results: Payload: (125, 255, 255, 125, 255, 255, 125, 255) 18feef00 (29 bits) DATA: '}\xff\xff}\xff\xff}\xff' I'd like to have ONE final string that has the correct hex data that looks like this: 7dffff7dffff7dff A: >>> tup = (125, 255, 255, 125, 255, 255, 125, 255) >>> "".join('%02x' % i for i in tup) '7dffff7dffff7dff' So, in your case, you can use struct.unpack to construct the tuple and then use the "".join() to construct the string. It's unclear what you are starting with, but if it's the string '}\xff\xff}\xff\xff}\xff' then this works without the tuple: >>> s = '}\xff\xff}\xff\xff}\xff' >>> "".join('%02x' % ord(c) for c in s) '7dffff7dffff7dff'
Printing hex string
print 'Payload: ', struct.unpack('%dB'%len(payload), payload) print '%08x (%d bits) DATA: ' % (identifier, width), repr(payload) These two code statements generate the following results: Payload: (125, 255, 255, 125, 255, 255, 125, 255) 18feef00 (29 bits) DATA: '}\xff\xff}\xff\xff}\xff' I'd like to have ONE final string that has the correct hex data that looks like this: 7dffff7dffff7dff
[ ">>> tup = (125, 255, 255, 125, 255, 255, 125, 255)\n>>> \"\".join('%02x' % i for i in tup)\n'7dffff7dffff7dff'\n\nSo, in your case, you can use struct.unpack to construct the tuple and then use the \"\".join() to construct the string.\nIt's unclear what you are starting with, but if it's the string '}\\xff\\xff}\\...
[ 2 ]
[]
[]
[ "hex", "python", "string_formatting" ]
stackoverflow_0004079277_hex_python_string_formatting.txt
Q: How do I find which version of Python to use with Mercurial? I read here that On Windows, your Python version must match the version used to compile Mercurial. Otherwise, you'll get "Invalid Magic Number" errors when trying to run the CGI. The pre-compiled Windows binaries for Mercurial 1.0.x, 1.1.x, 1.2.x and 1.3.x were compiled with Python 2.5. I looked online but I dont see a lookup table for this information. Where can I find it? A: If you're on Windows you want to install TortoiseHG. It provides the full Mercurial command line, a GUI with Windows shell integration, and the right version of Python all compiled in together. A: The various HG installation packages have the required Python version in their name. You can also look it up in the release notes for the package.
How do I find which version of Python to use with Mercurial?
I read here that On Windows, your Python version must match the version used to compile Mercurial. Otherwise, you'll get "Invalid Magic Number" errors when trying to run the CGI. The pre-compiled Windows binaries for Mercurial 1.0.x, 1.1.x, 1.2.x and 1.3.x were compiled with Python 2.5. I looked online but I dont see a lookup table for this information. Where can I find it?
[ "If you're on Windows you want to install TortoiseHG. It provides the full Mercurial command line, a GUI with Windows shell integration, and the right version of Python all compiled in together.\n", "The various HG installation packages have the required Python version in their name. You can also look it up in th...
[ 4, 1 ]
[]
[]
[ "mercurial", "python", "version", "windows" ]
stackoverflow_0004079377_mercurial_python_version_windows.txt
Q: Access local variable in function from module I have a function in my main program that attempts to retrieve information from an imported module, which is a different script I wrote. This module spits out a variable which I access from the main program by making it a global. However, since I'm threading the function that requests the information, the global variable gets polluted by adding the information from separate requests into one var. What I'm looking for is a way to access a local variable in a function in a module. Main program: import module def threaded_function(): module.function(var1, var2) print module.output Module: def function(var1, var2): global output output = [] DoThingsWithVars(var1, var2) output.append(result) Since the threading causes it to get accessed multiple times I figured I'd not use global variables and get the local variables for each request. Any idea how I can get at those? A: Well, the only cases when func locals makes sense are closures and generators. You can access them via __closure__ attribute, but you would have to spawn closure for each separate thread, and it would be easier to just pass thread-local list to function instead. Other approach, which is actually used sometimes, is to have thread-local globals. Read http://docs.python.org/library/threading.html#threading.local for details. In your case it would work like this: locs = threading.local() def function(var1, var2): global locs if not locs.output: locs.output = [] DoThingsWithVars(var1, var2) locs.output.append(result)
Access local variable in function from module
I have a function in my main program that attempts to retrieve information from an imported module, which is a different script I wrote. This module spits out a variable which I access from the main program by making it a global. However, since I'm threading the function that requests the information, the global variable gets polluted by adding the information from separate requests into one var. What I'm looking for is a way to access a local variable in a function in a module. Main program: import module def threaded_function(): module.function(var1, var2) print module.output Module: def function(var1, var2): global output output = [] DoThingsWithVars(var1, var2) output.append(result) Since the threading causes it to get accessed multiple times I figured I'd not use global variables and get the local variables for each request. Any idea how I can get at those?
[ "Well, the only cases when func locals makes sense are closures and generators. You can access them via __closure__ attribute, but you would have to spawn closure for each separate thread, and it would be easier to just pass thread-local list to function instead.\nOther approach, which is actually used sometimes, i...
[ 2 ]
[]
[]
[ "python", "python_multithreading" ]
stackoverflow_0004076799_python_python_multithreading.txt
Q: How to get absolute url in Pylons? How to get absolute url in Pylons ? A: To generate a fully qualified URL with Routes, use qualified=True keyword in url() call. Example: print url("blog", id=123, qualified=True) # depending on routing configuration, # would print something like "http://somehost/blog/123" If your web application is running behind load balancer or reverse proxy, you might get into issues where generated URLs point to backend appservers not the frontend proxy / load balancer. You can use host argument to correct for that: print url("blog", id=123, qualified=True, host="example.com") # ==> "http://example.com/blog/123" Refer to Routes manual for more options and tweaks.
How to get absolute url in Pylons?
How to get absolute url in Pylons ?
[ "To generate a fully qualified URL with Routes, use qualified=True keyword in url() call.\nExample:\nprint url(\"blog\", id=123, qualified=True) \n# depending on routing configuration,\n# would print something like \"http://somehost/blog/123\"\n\nIf your web application is running behind load balancer or reverse p...
[ 8 ]
[]
[]
[ "pylons", "python", "routes" ]
stackoverflow_0004071837_pylons_python_routes.txt
Q: Problem with multiprocessing python 2.6 I'm trying to build a simple program that will fire off a load of processes, and if the main process is killed, the sub processes will die. My code looks like this: import time def test_proc(name, conn): x = 0 while True: print x x += 1 conn.poll() from multiprocessing import Process, Pipe proc_name= ['a', 'b', 'c'] procs = [] for p in proc_name: parent_conn, child_conn = Pipe() p = Process(target=test_proc, args=(p, child_conn)) procs.append(p) p.start() while True: print [(p.is_alive(), 'Pid %s' %(p.pid)) for p in procs] time.sleep(1) It works, but if I remove the print x on line 5 it doesn't. The processes will continue to run, why? Also, I'd love to know if this is the right way of doing what I'm trying to achieve. A: This works fine for me in Ubuntu: >>> from time import sleep >>> from multiprocessing import Process, Pipe >>> >>> def test_proc(name, conn): ... x = 0 ... while True: ... #print x ... x += 1 ... conn.poll() ... >>> def main(): ... proc_name= ['a', 'b', 'c'] ... procs = [Process(target=test_proc, args=Pipe()) for p in proc_name] ... for p in procs: ... p.start() ... while True: ... print [(p.is_alive(), 'Pid %s' %(p.pid)) for p in procs] ... sleep(1) ... >>> main() [(True, 'Pid 423'), (True, 'Pid 424'), (True, 'Pid 425')] [(True, 'Pid 423'), (True, 'Pid 424'), (True, 'Pid 425')] [(True, 'Pid 423'), (True, 'Pid 424'), (True, 'Pid 425')] [(True, 'Pid 423'), (True, 'Pid 424'), (True, 'Pid 425')] ... Are you using Windows, maybe? There are programming guidelines that relate to using multiprocessing with Windows. In particular, you need to provide an entry point by using if __name__ == '__main__':. Later: actually, there is something I don't get. In your original code, you expected to kill the parent of the threads and have the threads keep running. How were you killing the parent -- main() in my code? And if the threads were performing no I/O, how did you know that the threads were still alive? And later still: When I run the threads, I get this: >>> main() [(True, 'Pid 940'), (True, 'Pid 941'), (True, 'Pid 942')] [(True, 'Pid 940'), (True, 'Pid 941'), (True, 'Pid 942')] [(True, 'Pid 940'), (True, 'Pid 941'), (True, 'Pid 942')] [(True, 'Pid 940'), (True, 'Pid 941'), (True, 'Pid 942')] [(True, 'Pid 940'), (True, 'Pid 941'), (True, 'Pid 942')] and this: PID TTY TIME CMD 911 pts/6 00:00:00 python 940 pts/6 00:00:29 python 941 pts/6 00:00:29 python 942 pts/6 00:00:37 python 944 pts/5 00:00:00 ps And when I kill the main thread in python (Ctrl-C), I get this: PID TTY TIME CMD 911 pts/6 00:00:00 python 940 pts/6 00:00:42 python <defunct> 941 pts/6 00:00:50 python <defunct> 942 pts/6 00:00:51 python <defunct> 946 pts/5 00:00:00 ps Is this unexpected or undesirable?
Problem with multiprocessing python 2.6
I'm trying to build a simple program that will fire off a load of processes, and if the main process is killed, the sub processes will die. My code looks like this: import time def test_proc(name, conn): x = 0 while True: print x x += 1 conn.poll() from multiprocessing import Process, Pipe proc_name= ['a', 'b', 'c'] procs = [] for p in proc_name: parent_conn, child_conn = Pipe() p = Process(target=test_proc, args=(p, child_conn)) procs.append(p) p.start() while True: print [(p.is_alive(), 'Pid %s' %(p.pid)) for p in procs] time.sleep(1) It works, but if I remove the print x on line 5 it doesn't. The processes will continue to run, why? Also, I'd love to know if this is the right way of doing what I'm trying to achieve.
[ "This works fine for me in Ubuntu:\n>>> from time import sleep\n>>> from multiprocessing import Process, Pipe\n>>> \n>>> def test_proc(name, conn):\n... x = 0\n... while True:\n... #print x\n... x += 1\n... conn.poll()\n... \n>>> def main():\n... proc_name= ['a', 'b',...
[ 1 ]
[]
[]
[ "multiprocessing", "python" ]
stackoverflow_0004078925_multiprocessing_python.txt
Q: wxPython - implementing a scrollable view on a page similar to Visio or MS Word's print layout view I want to be able to embedd a panel (with size A4, A5, A6 custom etc) in a scrollable a page with a drop-shaddow, similar to how visio or ms word displays. I'm a bit of a beginner to python - been using it for 6 months full-time now. Ideas of how to do this or links to examples / tutorial would be welcome. Thx DM A: This is the closest I've got. No drop shaddow but at least a scrollable panel that almost looks like a piece of paper. import wx from wx.lib.scrolledpanel import ScrolledPanel app = wx.PySimpleApp() frame = wx.Frame(None, id=wx.ID_ANY, name="Just one child", size=(100,100)) scrollablePanel = ScrolledPanel(parent=frame, id=wx.ID_ANY, name="scrolledPanel", style=wx.ALWAYS_SHOW_SB) scrollablePanel.SetupScrolling() scrollablePanel.SetBackgroundColour(wx.Colour(128,128,128)) innerPanel = wx.Panel(parent=scrollablePanel, id=wx.ID_ANY, name="innerPanel", size=(250,100), style=wx.SIMPLE_BORDER) innerPanel.SetBackgroundColour(wx.Colour(255,255,255)) vSizer = wx.BoxSizer(wx.VERTICAL) vSizer.Add(innerPanel, proportion=0, flag=wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, border=8) hSizer = wx.BoxSizer(wx.HORIZONTAL) hSizer.Add(vSizer, proportion=1, flag=wx.ALIGN_CENTER_VERTICAL) scrollablePanel.SetSizer(hSizer) frame.Show() app.MainLoop() Any ideas how to do the drop shaddow would be appreciated. Thx -- DM
wxPython - implementing a scrollable view on a page similar to Visio or MS Word's print layout view
I want to be able to embedd a panel (with size A4, A5, A6 custom etc) in a scrollable a page with a drop-shaddow, similar to how visio or ms word displays. I'm a bit of a beginner to python - been using it for 6 months full-time now. Ideas of how to do this or links to examples / tutorial would be welcome. Thx DM
[ "This is the closest I've got. No drop shaddow but at least a scrollable panel that almost looks like a piece of paper.\nimport wx\nfrom wx.lib.scrolledpanel import ScrolledPanel\n\napp = wx.PySimpleApp()\nframe = wx.Frame(None, id=wx.ID_ANY, name=\"Just one child\", size=(100,100))\nscrollablePanel = ScrolledPanel...
[ 0 ]
[]
[]
[ "graphics", "python", "wxpython" ]
stackoverflow_0004076596_graphics_python_wxpython.txt
Q: Unpack string with hexadecimals I have a string that contains a float value in hexadecimal characters like this: "\\64\\2e\\9b\\38" I want to extract the float, but in order to do that I have to make Python see the string as 4 hex characters, instead of 16 regular characters. First I tried replacing the forward slashes, but I got an error: >>>> hexstring.replace("\\", "\x") ValueError: invalid \x escape I've discovered struct.unpack("f", "\x64\x2e\x9b\x38") does exactly what I want, but how do I convert the string? A: Whenever I see a (malformed) string, such as one composed of this list of characters: ['\\', '\\', '6', '4', '\\', '\\', '2', 'e', '\\', '\\', '9', 'b', '\\', '\\', '3', '8'] when what was intended was this list of characters ['\x64', '\x2e', '\x9b', '\x38'] I reach for the decode('string_escape') method. But to use it, we need to replace the two characters r'\\' with r'\x'. You can use the replace(...) method for that. In [37]: hexstring=r'\\64\\2e\\9b\\38' In [38]: struct.unpack('f',(hexstring.replace(r'\\',r'\x').decode('string_escape'))) Out[38]: (7.3996168794110417e-05,) In [39]: struct.unpack("f", "\x64\x2e\x9b\x38") Out[39]: (7.3996168794110417e-05,) PS. This use of the decode method works in Python2 but will not work in Python3. In Python3 codecs.decode is meant strictly for converting byte objects to string objects (err, what Python2 calls unicode objects), whereas in the example above, decode is actually converting a string object to a string object. Most decoding codecs in Python2 do convert string objects to unicode objects, but a few like 'string_escape' do not. In general they have been moved to other modules, or called in some other way. In Python3, the equivalent of hexstring.decode('string_encode') is codecs.escape_decode(hexstring)[0]. Edit: Another way, similar in spirit to jsbueno's answer, is to use binascii.unhexlify: In [76]: import binascii In [81]: hexstring=r"\\64\\2e\\9b\\38" In [82]: hexstring.replace('\\','') Out[82]: '642e9b38' In [83]: binascii.unhexlify(hexstring.replace('\\','')) Out[83]: 'd.\x9b8' These timeit results suggest binascii.unhexlify is the fastest: In [84]: %timeit binascii.unhexlify(hexstring.replace('\\','')) 1000000 loops, best of 3: 1.42 us per loop In [85]: %timeit hexstring.replace('\\','').decode('hex_codec') 100000 loops, best of 3: 2.94 us per loop In [86]: %timeit hexstring.replace(r'\\',r'\x').decode('string_escape') 100000 loops, best of 3: 2.13 us per loop Edit, per the comments: This answer contains raw strings. The Department of Public Health advises that eating raw or undercooked strings poses a health risk to everyone, but especially to the elderly, young children under age 4, pregnant women and other highly susceptible individuals with compromised immune systems. Thorough cooking of raw strings reduces the risk of illness. A: A shorter way to go ehre, is to just get rid of the "\" characters, and make python see each two hex-digits as a byte, using the "hex_codec": struct.unpack("f", "\\64\\2e\\9b\\38".replace("\\", "\").decode("hex_codec"))
Unpack string with hexadecimals
I have a string that contains a float value in hexadecimal characters like this: "\\64\\2e\\9b\\38" I want to extract the float, but in order to do that I have to make Python see the string as 4 hex characters, instead of 16 regular characters. First I tried replacing the forward slashes, but I got an error: >>>> hexstring.replace("\\", "\x") ValueError: invalid \x escape I've discovered struct.unpack("f", "\x64\x2e\x9b\x38") does exactly what I want, but how do I convert the string?
[ "Whenever I see a (malformed) string, such as one composed of this list of characters:\n['\\\\', '\\\\', '6', '4', '\\\\', '\\\\', '2', 'e', '\\\\', '\\\\', '9', 'b', '\\\\', '\\\\', '3', '8']\n\nwhen what was intended was this list of characters\n['\\x64', '\\x2e', '\\x9b', '\\x38']\n\nI reach for the decode('stri...
[ 6, 0 ]
[]
[]
[ "python", "struct" ]
stackoverflow_0004078929_python_struct.txt
Q: python thread exception errors when exit the whole program HI, guys, I am developing a GUI with python 2.4.3 and wxpython. Everything works fine except when I exit main program(close the main window of GUI). The wierd thing is that sometimes there is such error, sometimes there is no error at all. Although I found the same error reports from the python mailing list(the link is http://bugs.python.org/issue1722344, I am not sure if my case is the same as this one). I do not know how it is finally solved and what I should do to overcome this problem. The error message from the console is as follows. Exception in thread Thread-1 (most likely raised during interpreter shutdown): Traceback (most recent call last): File "/usr/lib/python2.4/threading.py", line 442, in __bootstrap File "/opt/company/workspace/username/application/src/mainwidget.py", line 1066, in run File "/usr/lib/python2.4/Queue.py", line 89, in put File "/usr/lib/python2.4/threading.py", line 237, in notify exceptions.TypeError: exceptions must be classes, instances, or strings (deprecated), not NoneType Unhandled exception in thread started by Error in sys.excepthook: Original exception was: The following is part of my code(the thread related code is complete, I extract the main operations for the rest). when I use the GUI to launch an external subprocess, at the same time, a wx.TextCtrl object is created. This wx.TextCtrl object is used to give input and print output of the external subprocess class BashProcessThread(threading.Thread): def __init__(self, readlineFunc): threading.Thread.__init__(self) self.readlineFunc = readlineFunc self.lines = [] self.outputQueue = Queue.Queue() self.setDaemon(True) def run(self): while True: line = self.readlineFunc() self.outputQueue.put(line) if (line==""): break return ''.join(self.lines) def getOutput(self): """ called from other thread """ while True: try: line = self.outputQueue.get_nowait() lines.append(line) except Queue.Empty: break return ''.join(self.lines) class ExternalProcWindow(wx.Window): def __init__(self, parent, externapp): wx.Window.__init__(self, parent, -1, pos=wx.DefaultPosition, size = wx.Size(1200, 120)) self.externapp=externapp self.prompt = externapp.name.lower() + '>>' self.textctrl = wx.TextCtrl(self, -1, '', size= wx.Size(1200, 120), style=wx.TE_PROCESS_ENTER|wx.TE_MULTILINE) self.default_txt = self.textctrl.GetDefaultStyle() self.textctrl.AppendText(self.prompt) self.outputThread = BashProcessThread(self.externapp.sub_process.stdout.readline) self.outputThread.start() self.textctrl.SetFocus() self.__bind_events() self.Fit() def __bind_events(self): self.Bind(wx.EVT_TEXT_ENTER, self.__enter) def __enter(self, e): nl=self.textctrl.GetNumberOfLines() ln = self.textctrl.GetLineText(nl-1) ln = ln[len(self.prompt):] self.externapp.sub_process.stdin.write(ln+"\n") time.sleep(.3) self.textctrl.AppendText(self.outputThread.getOutput()) class ExternApp: def launch(self): self.sub_process = subprocess.Popen(launchcmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) A: The problem is caused by the use of threading.Thread.setDaemon. Threads set daemonic don't prevent the Python intepreter from exiting, but they still keep running. Because Python cleans up the environment before the process is terminated, the threads can run into trouble when stuff is removed from under them. That raises an exception, which the thread class tries to print for your convenience -- but that, then, too fails because the process is exiting. You could try to silence the exception, but that's tricky (and if the thread does anything substantial, it might hide a real problem. Not the case here, though.) Or you could ask the thread to stop before exiting, and not set the thread daemonic. Or you can simply avoid using threads altogether. I do not remember if wxPython has a convenient mechanism for getting a process's output or even of doing asynchronous I/O, but many GUI toolkits do. And there's always Twisted, which does it all for you.
python thread exception errors when exit the whole program
HI, guys, I am developing a GUI with python 2.4.3 and wxpython. Everything works fine except when I exit main program(close the main window of GUI). The wierd thing is that sometimes there is such error, sometimes there is no error at all. Although I found the same error reports from the python mailing list(the link is http://bugs.python.org/issue1722344, I am not sure if my case is the same as this one). I do not know how it is finally solved and what I should do to overcome this problem. The error message from the console is as follows. Exception in thread Thread-1 (most likely raised during interpreter shutdown): Traceback (most recent call last): File "/usr/lib/python2.4/threading.py", line 442, in __bootstrap File "/opt/company/workspace/username/application/src/mainwidget.py", line 1066, in run File "/usr/lib/python2.4/Queue.py", line 89, in put File "/usr/lib/python2.4/threading.py", line 237, in notify exceptions.TypeError: exceptions must be classes, instances, or strings (deprecated), not NoneType Unhandled exception in thread started by Error in sys.excepthook: Original exception was: The following is part of my code(the thread related code is complete, I extract the main operations for the rest). when I use the GUI to launch an external subprocess, at the same time, a wx.TextCtrl object is created. This wx.TextCtrl object is used to give input and print output of the external subprocess class BashProcessThread(threading.Thread): def __init__(self, readlineFunc): threading.Thread.__init__(self) self.readlineFunc = readlineFunc self.lines = [] self.outputQueue = Queue.Queue() self.setDaemon(True) def run(self): while True: line = self.readlineFunc() self.outputQueue.put(line) if (line==""): break return ''.join(self.lines) def getOutput(self): """ called from other thread """ while True: try: line = self.outputQueue.get_nowait() lines.append(line) except Queue.Empty: break return ''.join(self.lines) class ExternalProcWindow(wx.Window): def __init__(self, parent, externapp): wx.Window.__init__(self, parent, -1, pos=wx.DefaultPosition, size = wx.Size(1200, 120)) self.externapp=externapp self.prompt = externapp.name.lower() + '>>' self.textctrl = wx.TextCtrl(self, -1, '', size= wx.Size(1200, 120), style=wx.TE_PROCESS_ENTER|wx.TE_MULTILINE) self.default_txt = self.textctrl.GetDefaultStyle() self.textctrl.AppendText(self.prompt) self.outputThread = BashProcessThread(self.externapp.sub_process.stdout.readline) self.outputThread.start() self.textctrl.SetFocus() self.__bind_events() self.Fit() def __bind_events(self): self.Bind(wx.EVT_TEXT_ENTER, self.__enter) def __enter(self, e): nl=self.textctrl.GetNumberOfLines() ln = self.textctrl.GetLineText(nl-1) ln = ln[len(self.prompt):] self.externapp.sub_process.stdin.write(ln+"\n") time.sleep(.3) self.textctrl.AppendText(self.outputThread.getOutput()) class ExternApp: def launch(self): self.sub_process = subprocess.Popen(launchcmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[ "The problem is caused by the use of threading.Thread.setDaemon. Threads set daemonic don't prevent the Python intepreter from exiting, but they still keep running. Because Python cleans up the environment before the process is terminated, the threads can run into trouble when stuff is removed from under them. That...
[ 5 ]
[]
[]
[ "exception", "multithreading", "python", "wxpython" ]
stackoverflow_0004079810_exception_multithreading_python_wxpython.txt
Q: Generating Python code from XML tree What is the best way in Python to read XML from a file, build a tree, do some rewriting in the tree and generate Python code? (I'm slightly confused as in Python there seem to be multiple options such as expat, ElementTree, XSLT, ...). A: You should check out lxml. It is easy to use and does what you want in a few steps. If you want to stick to the stdlib, then you check out ElementTree (Python 2.5+) . The type of XML processing you want depends on your needs and for high performance XML parsing, read this. EDIT: My answer is for XML parsing with Python and does not answer your last question: "generate Python code", cause that makes no sense :) A: I use Beautiful Soup. Its relatively easy to use and powerful. Check it out here
Generating Python code from XML tree
What is the best way in Python to read XML from a file, build a tree, do some rewriting in the tree and generate Python code? (I'm slightly confused as in Python there seem to be multiple options such as expat, ElementTree, XSLT, ...).
[ "You should check out lxml. It is easy to use and does what you want in a few steps. If you want to stick to the stdlib, then you check out ElementTree (Python 2.5+) . The type of XML processing you want depends on your needs and for high performance XML parsing, read this.\nEDIT: My answer is for XML parsing with ...
[ 3, 0 ]
[]
[]
[ "python", "xml" ]
stackoverflow_0004079829_python_xml.txt
Q: the smart way to recombine these two lists in python i have the following lists in python which i want to recombine into a dictionary/list: names = ['banana','grapefruit','apple'] colors = ['yellow','pink','green'] to fruits = [ {'name':'banana','color':'yellow'}, {'name':'grapefruit','color':'pink'}, {'name':'apple','color':'green'} ] what's the best way to do it? i'm currently tying my brain up in knots trying to write loops. please help! :) A: fruits = [{"name": name, "color": color} for name, color in zip(names, colors)] A: [ {'name': name, 'color': color} for name, color in zip(names, colors) ] The usual terms and conditions apply: In Python 2, itertools.izip is advisable for potentially large lists If the actual data has many lists/attributes, this can be generalized to be more elegant on the large scale Warranty void if seal is broken ...
the smart way to recombine these two lists in python
i have the following lists in python which i want to recombine into a dictionary/list: names = ['banana','grapefruit','apple'] colors = ['yellow','pink','green'] to fruits = [ {'name':'banana','color':'yellow'}, {'name':'grapefruit','color':'pink'}, {'name':'apple','color':'green'} ] what's the best way to do it? i'm currently tying my brain up in knots trying to write loops. please help! :)
[ "fruits = [{\"name\": name, \"color\": color} for name, color in zip(names, colors)]\n\n", "[ {'name': name, 'color': color} for name, color in zip(names, colors) ]\n\nThe usual terms and conditions apply:\n\nIn Python 2, itertools.izip is advisable for potentially large lists\nIf the actual data has many lists/a...
[ 11, 9 ]
[]
[]
[ "dictionary", "list", "python" ]
stackoverflow_0004079940_dictionary_list_python.txt
Q: django query eliminate duplicates In the following query how to eliminate the duplicates, d_query = Profile.objects.filter(company="12") search_string ="Tom" if search_string != "": d_query = d_query.filter(Q(profiles__name__icontains=search_string) | Q(first_name__icontains=search_string)| Q(last_name__icontains=search_string)) A: Assuming you mean you want to avoid getting back the same record more than once, you can just add .distinct() to your queryset before evaluating it A: For the record - .distinct() has some caveeats described in its documentation: http://docs.djangoproject.com/en/dev/ref/models/querysets/#distinct
django query eliminate duplicates
In the following query how to eliminate the duplicates, d_query = Profile.objects.filter(company="12") search_string ="Tom" if search_string != "": d_query = d_query.filter(Q(profiles__name__icontains=search_string) | Q(first_name__icontains=search_string)| Q(last_name__icontains=search_string))
[ "Assuming you mean you want to avoid getting back the same record more than once, you can just add .distinct() to your queryset before evaluating it\n", "For the record - .distinct() has some caveeats described in its documentation: http://docs.djangoproject.com/en/dev/ref/models/querysets/#distinct\n" ]
[ 3, 0 ]
[]
[]
[ "django", "django_models", "django_templates", "django_views", "python" ]
stackoverflow_0004076285_django_django_models_django_templates_django_views_python.txt
Q: Issue in string matching in Python I am trying to read from a file and match for a certain combination of strings. PFB my program: def negative_verbs_features(filename): # Open and read the file content file = open (filename, "r") text = file.read() file.close() # Create a list of negative verbs from the MPQA lexicon file_negative_mpqa = open("../data/PolarLexicons/negative_mpqa.txt", "r") negative_verbs = [] for line in file_negative_mpqa: #print line, pos, word = line.split(",") #print line.split(",") if pos == "verb": negative_verbs.append(word) return negative_verbs if __name__ == "__main__": print negative_verbs_features("../data/test.txt") The file negative_mpqa.txt consists of word, part-of-speech tag pairs separated by a comma(,). Here's a snippet of the file: abandoned,adj abandonment,noun abandon,verb abasement,anypos abase,verb abash,verb abate,verb abdicate,verb aberration,adj aberration,noun I would like create a list of all words in the file which has verb as it's part-of-speech. However, when I run my program and the list returned (negative_verbs) is always empty. The if loop wasn't executing. I tried printing word,pos pair by uncommenting the line print line.split(",") PFB a snippet of the ouput. ['wrongful', 'adj\r\n'] ['wrongly', 'anypos\r\n'] ['wrought', 'adj\r\n'] ['wrought', 'noun\r\n'] ['yawn', 'noun\r\n'] ['yawn', 'verb\r\n'] ['yelp', 'verb\r\n'] ['zealot', 'noun\r\n'] ['zealous', 'adj\r\n'] ['zealously', 'anypos\r\n'] I understand my file may have some special characters like newline and return feed at the end of every line. I just want to ignore them and build my list. Kindly let me know how to proceed. PS: I am newbie in Python. A: You said the file has lines like this: abandoned,adj so those are word, pos pairs. But you wrote pos, word = line.split(",") which means that pos == 'abandoned' and word == 'adj' ... I think it's clear why the list will be empty now :-) A: Replace the line pos, word = line.split(",") by word, pos = line.rstrip().split(",") rstrip() removes the white characters (spaces, new lines, carriage return...) at the right of your string. Note that lstrip() and even strip() also exist. You also switched word and pos! You could also use rstrip() on your word variable instead, when you append it to your list.
Issue in string matching in Python
I am trying to read from a file and match for a certain combination of strings. PFB my program: def negative_verbs_features(filename): # Open and read the file content file = open (filename, "r") text = file.read() file.close() # Create a list of negative verbs from the MPQA lexicon file_negative_mpqa = open("../data/PolarLexicons/negative_mpqa.txt", "r") negative_verbs = [] for line in file_negative_mpqa: #print line, pos, word = line.split(",") #print line.split(",") if pos == "verb": negative_verbs.append(word) return negative_verbs if __name__ == "__main__": print negative_verbs_features("../data/test.txt") The file negative_mpqa.txt consists of word, part-of-speech tag pairs separated by a comma(,). Here's a snippet of the file: abandoned,adj abandonment,noun abandon,verb abasement,anypos abase,verb abash,verb abate,verb abdicate,verb aberration,adj aberration,noun I would like create a list of all words in the file which has verb as it's part-of-speech. However, when I run my program and the list returned (negative_verbs) is always empty. The if loop wasn't executing. I tried printing word,pos pair by uncommenting the line print line.split(",") PFB a snippet of the ouput. ['wrongful', 'adj\r\n'] ['wrongly', 'anypos\r\n'] ['wrought', 'adj\r\n'] ['wrought', 'noun\r\n'] ['yawn', 'noun\r\n'] ['yawn', 'verb\r\n'] ['yelp', 'verb\r\n'] ['zealot', 'noun\r\n'] ['zealous', 'adj\r\n'] ['zealously', 'anypos\r\n'] I understand my file may have some special characters like newline and return feed at the end of every line. I just want to ignore them and build my list. Kindly let me know how to proceed. PS: I am newbie in Python.
[ "You said the file has lines like this: abandoned,adj so those are word, pos pairs. But you wrote pos, word = line.split(\",\") which means that pos == 'abandoned' and word == 'adj' ... I think it's clear why the list will be empty now :-)\n", "Replace the line pos, word = line.split(\",\") by \nword, pos = line....
[ 2, 1 ]
[]
[]
[ "file_io", "python", "special_characters", "string" ]
stackoverflow_0004080123_file_io_python_special_characters_string.txt
Q: User specified date time I need to parse a date/time string from user input, and convert to UTC based on timzeone info not available in the string for datetime.strptime() (any suggestions?). Is there a straightforward way of doing this? Ideally, on google app engine i'd like to grab local time with tzinfo from the browser if possible also. timezone_string = "GMT-0800" fields = ("eventstartmonth","eventstartday", "eventstartyear", "eventstarttimehour", "eventstarttimeampm") date_string = '_'.join(map(lambda x: self.request.get(x), fields)) # date_string = "01_11_2000_1:35_PM" dt = datetime.datetime.strptime(date_string, "%m_%d_%Y_%I:%M_%p") # how to convert dt into a tz-aware datetime, and then to UTC A: While searching for similar information I came across a demo app engine app (with source included) that demonstrates how to convert timezones in a way that's similar to what you've requested. Unfortunately, though, you'll need to create custom tzinfo classes (explanation/code in the demo app linked above) for each timezone you'll be converting. If you need to be able to handle any timezone and/or want to take the easy route, I'd recommend using the pytz module. However, keep in mind, pytz is a rather bulky module that you'd have to upload to your GAE instance.
User specified date time
I need to parse a date/time string from user input, and convert to UTC based on timzeone info not available in the string for datetime.strptime() (any suggestions?). Is there a straightforward way of doing this? Ideally, on google app engine i'd like to grab local time with tzinfo from the browser if possible also. timezone_string = "GMT-0800" fields = ("eventstartmonth","eventstartday", "eventstartyear", "eventstarttimehour", "eventstarttimeampm") date_string = '_'.join(map(lambda x: self.request.get(x), fields)) # date_string = "01_11_2000_1:35_PM" dt = datetime.datetime.strptime(date_string, "%m_%d_%Y_%I:%M_%p") # how to convert dt into a tz-aware datetime, and then to UTC
[ "While searching for similar information I came across a demo app engine app (with source included) that demonstrates how to convert timezones in a way that's similar to what you've requested. Unfortunately, though, you'll need to create custom tzinfo classes (explanation/code in the demo app linked above) for each...
[ 0 ]
[]
[]
[ "google_app_engine", "python", "python_datetime" ]
stackoverflow_0003692869_google_app_engine_python_python_datetime.txt
Q: Perl or Python: Convert date from dd/mm/yyyy to yyyy-mm-dd I have lots of dates in a column in a CSV file that I need to convert from dd/mm/yyyy to yyyy-mm-dd format. For example 17/01/2010 should be converted to 2010-01-17. How can I do this in Perl or Python? A: If you are guaranteed to have well-formed data consisting of nothing else but a singleton date in the DD-MM-YYYY format, then this works: # FIRST METHOD my $ndate = join("-" => reverse split(m[/], $date)); That works on a $date holding "07/04/1776" but fails on "this 17/01/2010 and that 01/17/2010 there". Instead, use: # SECOND METHOD ($ndate = $date) =~ s{ \b ( \d \d ) / ( \d \d ) / ( \d {4} ) \b }{$3-$2-$1}gx; If you prefer a more "grammatical" regex, so that it’s easier to maintain and update, you can instead use this: # THIRD METHOD ($ndate = $date) =~ s{ (?&break) (?<DAY> (?&day) ) (?&slash) (?<MONTH> (?&month) ) (?&slash) (?<YEAR> (?&year) ) (?&break) (?(DEFINE) (?<break> \b ) (?<slash> / ) (?<year> \d {4} ) (?<month> \d {2} ) (?<day> \d {2} ) ) }{ join "-" => @+{qw<YEAR MONTH DAY>} }gxe; Finally, if you have Unicode data, you might want to be a bit more careful. # FOURTH METHOD ($ndate = $date) =~ s{ (?&break_before) (?<DAY> (?&day) ) (?&slash) (?<MONTH> (?&month) ) (?&slash) (?<YEAR> (?&year) ) (?&break_after) (?(DEFINE) (?<slash> / ) (?<start> \A ) (?<finish> \z ) # don't really want to use \D or [^0-9] here: (?<break_before> (?<= [\pC\pP\pS\p{Space}] ) | (?<= \A ) ) (?<break_after> (?= [\pC\pP\pS\p{Space}] | \z ) ) (?<digit> \d ) (?<year> (?&digit) {4} ) (?<month> (?&digit) {2} ) (?<day> (?&digit) {2} ) ) }{ join "-" => @+{qw<YEAR MONTH DAY>} }gxe; You can see how each of these four approaches performs when confronted with sample input strings like these: my $sample = q(17/01/2010); my @strings = ( $sample, # trivial case # multiple case "this $sample and that $sample there", # multiple case with non-ASCII BMP code points # U+201C and U+201D are LEFT and RIGHT DOUBLE QUOTATION MARK "from \x{201c}$sample\x{201d} through\xA0$sample", # multiple case with non-ASCII code points # from both the BMP and the SMP # code point U+02013 is EN DASH, props \pP \p{Pd} # code point U+10179 is GREEK YEAR SIGN, props \pS \p{So} # code point U+110BD is KAITHI NUMBER SIGN, props \pC \p{Cf} "\x{10179}$sample\x{2013}\x{110BD}$sample", ); Now letting $date be a foreach iterator through that array, we get this output: Original is: 17/01/2010 First method: 2010-01-17 Second method: 2010-01-17 Third method: 2010-01-17 Fourth method: 2010-01-17 Original is: this 17/01/2010 and that 17/01/2010 there First method: 2010 there-01-2010 and that 17-01-this 17 Second method: this 2010-01-17 and that 2010-01-17 there Third method: this 2010-01-17 and that 2010-01-17 there Fourth method: this 2010-01-17 and that 2010-01-17 there Original is: from “17/01/2010” through 17/01/2010 First method: 2010-01-2010” through 17-01-from “17 Second method: from “2010-01-17” through 2010-01-17 Third method: from “2010-01-17” through 2010-01-17 Fourth method: from “2010-01-17” through 2010-01-17 Original is: 17/01/2010–17/01/2010 First method: 2010-01-2010–17-01-17 Second method: 2010-01-17–2010-01-17 Third method: 2010-01-17–2010-01-17 Fourth method: 2010-01-17–2010-01-17 Now let’s suppose that you actually do want to match non-ASCII digits. For example: U+660 ARABIC-INDIC DIGIT ZERO U+661 ARABIC-INDIC DIGIT ONE U+662 ARABIC-INDIC DIGIT TWO U+663 ARABIC-INDIC DIGIT THREE U+664 ARABIC-INDIC DIGIT FOUR U+665 ARABIC-INDIC DIGIT FIVE U+666 ARABIC-INDIC DIGIT SIX U+667 ARABIC-INDIC DIGIT SEVEN U+668 ARABIC-INDIC DIGIT EIGHT U+669 ARABIC-INDIC DIGIT NINE or even U+1D7F6 MATHEMATICAL MONOSPACE DIGIT ZERO U+1D7F7 MATHEMATICAL MONOSPACE DIGIT ONE U+1D7F8 MATHEMATICAL MONOSPACE DIGIT TWO U+1D7F9 MATHEMATICAL MONOSPACE DIGIT THREE U+1D7FA MATHEMATICAL MONOSPACE DIGIT FOUR U+1D7FB MATHEMATICAL MONOSPACE DIGIT FIVE U+1D7FC MATHEMATICAL MONOSPACE DIGIT SIX U+1D7FD MATHEMATICAL MONOSPACE DIGIT SEVEN U+1D7FE MATHEMATICAL MONOSPACE DIGIT EIGHT U+1D7FF MATHEMATICAL MONOSPACE DIGIT NINE So imagine you have a date in mathematical monospace digits, like this: $date = "\x{1D7F7}\x{1D7FD}/\x{1D7F7}\x{1D7F6}/\x{1D7F8}\x{1D7F6}\x{1D7F7}\x{1D7F6}"; The Perl code will work just fine on that: Original is: // First method: -- Second method: -- Third method: -- Fourth method: -- I think you’ll find that Python has a pretty brain‐damaged Unicode model whose lack of support for abstract characters and strings irrespective of content makes it ridiculously difficult to write things like this. It’s also tough to write legible regular expressions in Python where you decouple the declaration of the subexpressions from their execution, since (?(DEFINE)...) blocks are not supported there. Heck, Python doesn’t even support Unicode properties. It’s just not suitable for Unicode regex work because of this. But hey, if you think that’s bad in Python compared to Perl (and it certainly is), just try any other language. I haven’t found one that isn’t still worse for this sort of work. As you see, you run into real problems when you ask for regex solutions from multiple languages. First of all, the solutions are difficult to compare because of the different regex flavors. But also because no other language can compare with Perl for power, expressivity, and maintainability in its regular expressions. This may become even more obvious once arbitrary Unicode enters the picture. So if you just wanted Python, you should have asked for only that. Otherwise it’s a terribly unfair contest that Python will nearly always lose; it’s just too messy to get things like this correct in Python, let alone both correct and clean. That’s asking more of it than it can produce. In contrast, Perl’s regexes excel at both those. A: >>> from datetime import datetime >>> datetime.strptime('02/11/2010', '%d/%m/%Y').strftime('%Y-%m-%d') '2010-11-02' or more hackish way (that doesn't check for validity of values): >>> '-'.join('02/11/2010'.split('/')[::-1]) '2010-11-02' >>> '-'.join(reversed('02/11/2010'.split('/'))) '2010-11-02' A: Use Time::Piece (in core since 5.9.5), very similar to the Python solution accepted, as it provides the strptime and strftime functions: use Time::Piece; my $dt_str = Time::Piece->strptime('13/10/1979', '%d/%m/%Y')->strftime('%Y-%m-%d'); or $ perl -MTime::Piece print Time::Piece->strptime('13/10/1979', '%d/%m/%Y')->strftime('%Y-%m-%d'); 1979-10-13 $ A: Go with Perl: the datetime Python package is just broken. You could just do it with regexes to swap the date parts around, eg echo "17/01/2010" | perl -pe 's{(\d+)/(\d+)/(\d+)}{$3-$2-$1}g' If you do need to parse these dates (eg to compute their day of week or other calendar-type operations), look into DateTimeX::Easy (you can install it with apt-get under Ubuntu): perl -MDateTimeX::Easy -e 'print DateTimeX::Easy->parse("17/01/2010")->ymd("-")' A: Perl : while (<>) { s/(^|[^\d])(\d\d)\/(\d\d)\/(\d{4})($|[^\d])/$4-$3-$2/g; print $_; } Then you just have to run: perl MyScript.pl < oldfile.txt > newfile.txt A: Perl: my $date =~ s/(\d+)\/(\d+)\/(\d+)/$3-$2-$1/; A: In Perl you can do: use strict; while(<>) { chomp; my($d,$m,$y) = split/\//; my $newDate = $y.'-'.$m.'-'.$d; }
Perl or Python: Convert date from dd/mm/yyyy to yyyy-mm-dd
I have lots of dates in a column in a CSV file that I need to convert from dd/mm/yyyy to yyyy-mm-dd format. For example 17/01/2010 should be converted to 2010-01-17. How can I do this in Perl or Python?
[ "If you are guaranteed to have well-formed data consisting of nothing else but a singleton date in the DD-MM-YYYY format, then this works:\n# FIRST METHOD\nmy $ndate = join(\"-\" => reverse split(m[/], $date));\n\nThat works on a $date holding \"07/04/1776\" but fails on \"this 17/01/2010 and that 01/17/2010 there\...
[ 30, 20, 11, 6, 5, 1, 0 ]
[ "In glorious perl-oneliner form:\necho 17/01/2010 | perl -p -e \"chomp; join('-', reverse split /\\//);\"\n\nBut seriously I would do it like this:\n#!/usr/bin/env perl\nwhile (<>) {\n chomp;\n print join('-', reverse split /\\//), \"\\n\";\n}\n\nWhich will work on a pipe, converting and printing one date pe...
[ -2 ]
[ "date", "perl", "python", "text_processing" ]
stackoverflow_0004077896_date_perl_python_text_processing.txt
Q: Python: os.stat().st_size gives different value than du I'm creating a utility that will walk through directories and get the sizes of child directories and files for all directories and store the value. However, the sizes aren't computed correctly. Here's my class, which automatically recurses through all sub-directories: class directory: ''' Class that automatically traverses directories and builds a tree with size info ''' def __init__(self, path, parent=None): if path[-1] != '/': # Add trailing / self.path = path + '/' else: self.path = path self.size = 4096 self.parent = parent self.children = [] self.errors = [] for i in os.listdir(self.path): try: self.size += os.lstat(self.path + i).st_size if os.path.isdir(self.path + i) and not os.path.islink(self.path + i): a = directory(self.path + i, self) self.size += a.size self.children.append(a) except OSError: self.errors.append(path + i) I have a directory of videos that I'm testing this program with: >>> a = directory('/var/media/television/The Wire') >>> a.size 45289964053 However, when I try the same with du, I get $ du -sx /var/media/television/The\ Wire 44228824 The directories don't contain any links or anything special. Could someone explain why os.stat() is giving weird size readings? Platform: Linux (Fedora 13) Python 2.7 A: Consider this file foo -rw-rw-r-- 1 unutbu unutbu 25334 2010-10-31 12:55 foo It consists of 25334 bytes. tune2fs tells me foo resides on a filesystem with block size 4096 bytes: % sudo tune2fs -l /dev/mapper/vg1-OS1 ... Block size: 4096 ... Thus, the smallest file on the filesystem will occupy 4096 bytes, even if its contents consist of just 1 byte. As the file grows larger, space is allocated in 4096-byte blocks. du reports % du -B1 foo 28672 foo Note that 28672/4096 = 7. This is saying that foo occupys 7 4096-byte blocks on the filesystem. This is the smallest number of blocks needed to hold 25334 bytes. % du foo 28 foo This version of du is just reporting 28672/1024 rounded down. A: du gives the size on disk by default, versus the actual file size as given in st_size. $ du test.txt 8 test.txt $ du -b test.txt 6095 test.txt >>> os.stat('test.txt').st_size 6095 A: On linux (I am using CentOS), 'du -b' will return in bytes and will activate --apparent-size thus returning the size of the file rather than the amount of disk space it is using. Try that and see if that agrees with what Python os.stat says. A: I would write this code as: import os, os.path def size_dir(d): file_walker = ( os.path.join(root, f) for root, _, files in os.walk(d) for f in files ) return sum(os.path.getsize(f) for f in file_walker) If you want to count directories as 4k, then do something like this: import os, os.path def size_dir(d): file_walker = ( os.path.join(root, f) for root, _, files in os.walk(d) for f in files ) dir_walker = ( 4096 for root, dirs, _ in os.walk(d) for d in dirs ) return 4096 + sum(os.path.getsize(f) for f in file_walker) + sum(size for size in dir_walker)
Python: os.stat().st_size gives different value than du
I'm creating a utility that will walk through directories and get the sizes of child directories and files for all directories and store the value. However, the sizes aren't computed correctly. Here's my class, which automatically recurses through all sub-directories: class directory: ''' Class that automatically traverses directories and builds a tree with size info ''' def __init__(self, path, parent=None): if path[-1] != '/': # Add trailing / self.path = path + '/' else: self.path = path self.size = 4096 self.parent = parent self.children = [] self.errors = [] for i in os.listdir(self.path): try: self.size += os.lstat(self.path + i).st_size if os.path.isdir(self.path + i) and not os.path.islink(self.path + i): a = directory(self.path + i, self) self.size += a.size self.children.append(a) except OSError: self.errors.append(path + i) I have a directory of videos that I'm testing this program with: >>> a = directory('/var/media/television/The Wire') >>> a.size 45289964053 However, when I try the same with du, I get $ du -sx /var/media/television/The\ Wire 44228824 The directories don't contain any links or anything special. Could someone explain why os.stat() is giving weird size readings? Platform: Linux (Fedora 13) Python 2.7
[ "Consider this file foo\n-rw-rw-r-- 1 unutbu unutbu 25334 2010-10-31 12:55 foo\n\nIt consists of 25334 bytes.\ntune2fs tells me foo resides on a filesystem with block size 4096 bytes:\n% sudo tune2fs -l /dev/mapper/vg1-OS1\n...\nBlock size: 4096\n...\n\nThus, the smallest file on the filesystem will o...
[ 9, 1, 0, 0 ]
[]
[]
[ "linux", "python" ]
stackoverflow_0004080254_linux_python.txt
Q: How to place a widget in a Canvas widget in Tkinter? I want basically to be able to use a Canvas as a meta container for other widgets. I want to divide my GUI into a left, center and middle section. Within each section I would like to be able to place widgets like: Checkbutton, Button, Label, etc. How to place widgets in a Canvas widget? A: Your choices depend on what you're really trying to accomplish. Why is using a canvas preferable to using a frame? You can easily add widgets to a canvas just like you do any other container, using pack or grid or place. when you do this, the items will not scroll when you scroll the canvas because they aren't actually part of the canvas. The other choice is to create window objects on the canvas. You do this with the create_window method of the canvas. The advantage is, this window becomes part of the canvas and will scroll along with any other objects on the canvas. The downside is, your only option is absolute placement and you have to explicitly control the size of the widgets.
How to place a widget in a Canvas widget in Tkinter?
I want basically to be able to use a Canvas as a meta container for other widgets. I want to divide my GUI into a left, center and middle section. Within each section I would like to be able to place widgets like: Checkbutton, Button, Label, etc. How to place widgets in a Canvas widget?
[ "Your choices depend on what you're really trying to accomplish. Why is using a canvas preferable to using a frame?\nYou can easily add widgets to a canvas just like you do any other container, using pack or grid or place. when you do this, the items will not scroll when you scroll the canvas because they aren't ac...
[ 6 ]
[]
[]
[ "python", "python_3.x", "tkinter_canvas", "widget" ]
stackoverflow_0004080413_python_python_3.x_tkinter_canvas_widget.txt
Q: importing other python file functions not working I have two script one does the work and the other has a function that gives correct values to variables. Anyway I get the following error when calling a function in script(2) using a parameter from script(1) SCRIPT 1 (PROBLEM WITH LAST LINE COUNT_help.months() : while True: day = raw_input("Please Enter The Day: ") month = raw_input("Please Enter The Month: ") year = raw_input("Please Enter The Year: ") if day.isdigit(): if day > 0: correctcheck = "1" else: print "You Did Not Enter A Valid Day" errormsg = "1" break else: print "You Did Not Enter A Valid Day" errormsg = "1" break if month.isdigit(): if month > 0: correctcheck = "2" else: print "You Did Not Enter A Valid Month" errormsg = "1" break else: print "You Did Not Enter A Valid Month" errormsg = "1" break if year.isdigit(): if year > 0: correctcheck = "3" else: print "You Did Not Enter A Valid Year" errormsg = "1" break else: print "You Did Not Enter A Valid Year" errormsg = "1" break if correctcheck == "3": COUNT_help.months(months = month) print month print months SCRIPT 2 : jan = 1 feb = 32 mar = 60 apr = 91 may = 121 jun = 152 jul = 182 aug = 213 sep = 244 obr = 274 nov = 305 dec = 335 def months(months = ""): if months == "1": months = jan return months if months == "2": months = feb return months if months == "3": months = mar return months if months == "4": months = apr return months if months == "5": months = may return months if months == "6": months = jun return months if months == "7": months = jul return months if months == "8": months = aug return months if months == "9": months = sep return months if months == "10": months = obr return months if months == "11": months = nov return months if months == "12": months = dec return months A: You need to assign the result of COUNT_help.months(months = month) somewhere. Also look into the datetime library. if correctcheck == "3": months = COUNT_help.months(months = month) print month print months
importing other python file functions not working
I have two script one does the work and the other has a function that gives correct values to variables. Anyway I get the following error when calling a function in script(2) using a parameter from script(1) SCRIPT 1 (PROBLEM WITH LAST LINE COUNT_help.months() : while True: day = raw_input("Please Enter The Day: ") month = raw_input("Please Enter The Month: ") year = raw_input("Please Enter The Year: ") if day.isdigit(): if day > 0: correctcheck = "1" else: print "You Did Not Enter A Valid Day" errormsg = "1" break else: print "You Did Not Enter A Valid Day" errormsg = "1" break if month.isdigit(): if month > 0: correctcheck = "2" else: print "You Did Not Enter A Valid Month" errormsg = "1" break else: print "You Did Not Enter A Valid Month" errormsg = "1" break if year.isdigit(): if year > 0: correctcheck = "3" else: print "You Did Not Enter A Valid Year" errormsg = "1" break else: print "You Did Not Enter A Valid Year" errormsg = "1" break if correctcheck == "3": COUNT_help.months(months = month) print month print months SCRIPT 2 : jan = 1 feb = 32 mar = 60 apr = 91 may = 121 jun = 152 jul = 182 aug = 213 sep = 244 obr = 274 nov = 305 dec = 335 def months(months = ""): if months == "1": months = jan return months if months == "2": months = feb return months if months == "3": months = mar return months if months == "4": months = apr return months if months == "5": months = may return months if months == "6": months = jun return months if months == "7": months = jul return months if months == "8": months = aug return months if months == "9": months = sep return months if months == "10": months = obr return months if months == "11": months = nov return months if months == "12": months = dec return months
[ "You need to assign the result of COUNT_help.months(months = month) somewhere. Also look into the datetime library.\nif correctcheck == \"3\": \n months = COUNT_help.months(months = month) \n print month \n print months \n\n" ]
[ 1 ]
[]
[]
[ "function", "python" ]
stackoverflow_0004080496_function_python.txt
Q: syntax error with glfloat python? I am getting this error (note the code is from openGL red book: GLfloat mat_specular[] = { 0.8, 0.8, 0.8, 1.0 }; ^ SyntaxError: invalid syntax for the following code, i know i need to import something use GLfloat, can you any one tell me what do i need to import for doing this in python. GLfloat mat_specular[] = { 0.8, 0.8, 0.8, 1.0 }; GLfloat mat_shininess[] = { 32.0 }; GLfloat light_position[] = { 1.0, 1.0, 1.0, 0.0 }; glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular); glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess); glLightfv(GL_LIGHT0, GL_POSITION, light_position); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glDepthFunc(GL_LEQUAL); glEnable(GL_DEPTH_TEST) A: Basic Python syntax issues here. First, in Python, you don't declare the variable type (GLfloat). Second (related to the first), you don't need the square braces after the variable name ([]) to denote that it is an array. And third, to use a list in Python (similar to an array), you need to wrap the contents in square brackets ([]), not curly brackets ({}). Taking all that into account, it should look like: mat_specular = [0.8, 0.8, 0.8, 1] All that being said, even if you do manage to convert the code to Python, you need to first find libraries that you can use from Python. Using functions with the same names doesn't magically make OpenGL work.
syntax error with glfloat python?
I am getting this error (note the code is from openGL red book: GLfloat mat_specular[] = { 0.8, 0.8, 0.8, 1.0 }; ^ SyntaxError: invalid syntax for the following code, i know i need to import something use GLfloat, can you any one tell me what do i need to import for doing this in python. GLfloat mat_specular[] = { 0.8, 0.8, 0.8, 1.0 }; GLfloat mat_shininess[] = { 32.0 }; GLfloat light_position[] = { 1.0, 1.0, 1.0, 0.0 }; glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular); glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess); glLightfv(GL_LIGHT0, GL_POSITION, light_position); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glDepthFunc(GL_LEQUAL); glEnable(GL_DEPTH_TEST)
[ "Basic Python syntax issues here. First, in Python, you don't declare the variable type (GLfloat). Second (related to the first), you don't need the square braces after the variable name ([]) to denote that it is an array. And third, to use a list in Python (similar to an array), you need to wrap the contents in sq...
[ 2 ]
[]
[]
[ "opengl", "python" ]
stackoverflow_0004080572_opengl_python.txt
Q: Can you change the way numpy prints arrays? I have a 3d, 3x3x3 array of integers. Numpy will print these as a block of the first 3x3, then below it the 2nd 3x3, then below that the 3rd 3x3. If I wanted to print these 3 3x3 blocks BESIDE each other, rather than underneath each other, how would I tell numpy to print differently? A: class MyArray(numpy.array): def __str__(self): print [[[d for d in c] for c in b] for b in a] This essentially subclasses numpy.array and just changes the __str__ function (which is called when getting the string representation of an object) which converts it into a regular array and prints that. A: import numpy as np arr=np.random.random((3,3,3)) print(arr) # [[[ 0.05733376 0.00646892 0.96180769] # [ 0.11560363 0.56058966 0.83942817] # [ 0.5520361 0.17355794 0.87699437]] # [[ 0.90999361 0.03036473 0.5064459 ] # [ 0.76169531 0.48234618 0.56884999] # [ 0.93220906 0.9460365 0.65307273]] # [[ 0.04400683 0.58783221 0.74281147] # [ 0.69999475 0.14870245 0.32175415] # [ 0.20044376 0.11985585 0.69949965]]] for rows in zip(*arr): print('\t'.join(map(str,rows))) # [ 0.05733376 0.00646892 0.96180769] [ 0.90999361 0.03036473 0.5064459 ] [ 0.04400683 0.58783221 0.74281147] # [ 0.11560363 0.56058966 0.83942817] [ 0.76169531 0.48234618 0.56884999] [ 0.69999475 0.14870245 0.32175415] # [ 0.5520361 0.17355794 0.87699437] [ 0.93220906 0.9460365 0.65307273] [ 0.20044376 0.11985585 0.69949965] For convenience, you could wrap it in a function: def format_arr(arr): result=[] for x in zip(*arr): result.append('\t'.join(map(str,x))) return '\n'.join(result) print(format_arr(arr))
Can you change the way numpy prints arrays?
I have a 3d, 3x3x3 array of integers. Numpy will print these as a block of the first 3x3, then below it the 2nd 3x3, then below that the 3rd 3x3. If I wanted to print these 3 3x3 blocks BESIDE each other, rather than underneath each other, how would I tell numpy to print differently?
[ "class MyArray(numpy.array):\n def __str__(self):\n print [[[d for d in c] for c in b] for b in a]\n\nThis essentially subclasses numpy.array and just changes the __str__ function (which is called when getting the string representation of an object) which converts it into a regular array and prints that.\...
[ 3, 3 ]
[]
[]
[ "numpy", "python" ]
stackoverflow_0004080622_numpy_python.txt
Q: Python: extracting a list from an array of dictionaries with arrays in them This question is a bit of a convoluted brain-twister, I'm afraid. I'm writing a function test on an api, that when I query it, returns a bunch of json with embedded lists. Here is a significant fragment of what that looks like (with all the data anonymized for this question): [{u'account': {u'account_name': u'Autotest Account', u'account_uid': u'000000000'}, u'address': {u'city': u'AutoTest City', u'country': u'United States', u'postal_code': u'10019', u'province': None, u'state': u'IL', u'street': [u'12 Auto Road']}, u'children': [{u'institution_name': u'Autotest Bottom Institution 1', u'institution_type': 1, u'institution_uid': u'111111111'}, {u'institution_name': u'Autotest Bottom Institution 2', u'institution_type': 1, u'institution_uid': u'222222222'}, {u'institution_name': u'Autotest Bottom Institution 3', u'institution_type': 1, u'institution_uid': u'333333333'}, {u'institution_name': u'Autotest Bottom Institution 4', u'institution_type': 1, u'institution_uid': u'444444444'}, {u'institution_name': u'Autotest Bottom Institution 5', u'institution_type': 1, u'institution_uid': u'555555555'}, {u'institution_name': u'Autotest Bottom Institution 6', u'institution_type': 1, u'institution_uid': u'666666666'}, {u'institution_name': u'Autotest Bottom Institution 7', u'institution_type': 1, u'institution_uid': u'777777777'}, {u'institution_name': u'Autotest Bottom Institution 8', u'institution_type': 1, u'institution_uid': u'888888888'}], u'institution_name': u'Autotest Middle Institution 1', u'institution_type': 2, u'institution_uid': u'000000001', u'parent': {u'institution_name': u'Autotest Top Institution', u'institution_type': 3, u'institution_uid': u'000000099'}, u'school_year': 2011}, {u'account': {u'account_name': u'Autotest Account', u'account_uid': u'000000000'}, u'address': {u'city': u'Bacon City', u'country': u'United States', u'postal_code': u'10018', u'province': None, u'state': u'IL', u'street': [u'15 Wonder Road']}, u'children': [], u'institution_name': u'Autotest Bottom Institution 1', u'institution_type': 1, u'institution_uid': u'111111111', u'parent': {u'institution_name': u'Autotest Middle Institution 1', u'institution_type': 2, u'institution_uid': u'000000001'}, u'school_year': 2011}] What I'm trying to accomplish, is to extract all of the "Bottom Institution" names from the JSON, and put them into a list that I can then compare to a list that's already in my test fixture data. It should look something like this: ['Autotest Bottom Institution 1','Autotest Bottom Institution 2','Autotest Bottom Institution 3','Autotest Bottom Institution 4','Autotest Bottom Institution 5','Autotest Bottom Institution 6','Autotest Bottom Institution 7','Autotest Bottom Institution 8'] I'm able to extract them one-at-a-time or via iteration, after loading the data into "inst_array", like this: >>> print inst_array[0]['children'][0]['institution_name'] Autotest Bottom Institution 1 >>> print inst_array[0]['children'][1]['institution_name'] Autotest Bottom Institution 2 >>> print inst_array[0]['children'][2]['institution_name'] Autotest Bottom Institution 3 But here's the kicker: I want to be able to do this without iteration (or, with as little iteration as possible), and because of the umpteen layers of nesting in this, It's got me puzzled. Any thoughts? A: Shouldn't something like this work? names = [child['institution_name'] for child in inst_array[0]['children']] A: I guess a list comprehension is still a form of iteration, but at least concise: your_list = [elem['institution_name'] for elem in inst_array[0]['children']] A: Not sure about what you mean by 'without iteration'. Whatever you do here has to involve iterating through the lists, by definition. Anyway, here's an attempt: institutions = set() for account in data: for child in account['children']: institutions.add(child['institution_name']) A: I'm not sure what do you mean exactly with "without iteration", but here is something with implied iteration that could work: [x['institution_name'] for x in inst_array[0]['children'] if x['institution_name']] A: Something like: [ x['institution_name'] for x in inst_array[0]['children'] ]
Python: extracting a list from an array of dictionaries with arrays in them
This question is a bit of a convoluted brain-twister, I'm afraid. I'm writing a function test on an api, that when I query it, returns a bunch of json with embedded lists. Here is a significant fragment of what that looks like (with all the data anonymized for this question): [{u'account': {u'account_name': u'Autotest Account', u'account_uid': u'000000000'}, u'address': {u'city': u'AutoTest City', u'country': u'United States', u'postal_code': u'10019', u'province': None, u'state': u'IL', u'street': [u'12 Auto Road']}, u'children': [{u'institution_name': u'Autotest Bottom Institution 1', u'institution_type': 1, u'institution_uid': u'111111111'}, {u'institution_name': u'Autotest Bottom Institution 2', u'institution_type': 1, u'institution_uid': u'222222222'}, {u'institution_name': u'Autotest Bottom Institution 3', u'institution_type': 1, u'institution_uid': u'333333333'}, {u'institution_name': u'Autotest Bottom Institution 4', u'institution_type': 1, u'institution_uid': u'444444444'}, {u'institution_name': u'Autotest Bottom Institution 5', u'institution_type': 1, u'institution_uid': u'555555555'}, {u'institution_name': u'Autotest Bottom Institution 6', u'institution_type': 1, u'institution_uid': u'666666666'}, {u'institution_name': u'Autotest Bottom Institution 7', u'institution_type': 1, u'institution_uid': u'777777777'}, {u'institution_name': u'Autotest Bottom Institution 8', u'institution_type': 1, u'institution_uid': u'888888888'}], u'institution_name': u'Autotest Middle Institution 1', u'institution_type': 2, u'institution_uid': u'000000001', u'parent': {u'institution_name': u'Autotest Top Institution', u'institution_type': 3, u'institution_uid': u'000000099'}, u'school_year': 2011}, {u'account': {u'account_name': u'Autotest Account', u'account_uid': u'000000000'}, u'address': {u'city': u'Bacon City', u'country': u'United States', u'postal_code': u'10018', u'province': None, u'state': u'IL', u'street': [u'15 Wonder Road']}, u'children': [], u'institution_name': u'Autotest Bottom Institution 1', u'institution_type': 1, u'institution_uid': u'111111111', u'parent': {u'institution_name': u'Autotest Middle Institution 1', u'institution_type': 2, u'institution_uid': u'000000001'}, u'school_year': 2011}] What I'm trying to accomplish, is to extract all of the "Bottom Institution" names from the JSON, and put them into a list that I can then compare to a list that's already in my test fixture data. It should look something like this: ['Autotest Bottom Institution 1','Autotest Bottom Institution 2','Autotest Bottom Institution 3','Autotest Bottom Institution 4','Autotest Bottom Institution 5','Autotest Bottom Institution 6','Autotest Bottom Institution 7','Autotest Bottom Institution 8'] I'm able to extract them one-at-a-time or via iteration, after loading the data into "inst_array", like this: >>> print inst_array[0]['children'][0]['institution_name'] Autotest Bottom Institution 1 >>> print inst_array[0]['children'][1]['institution_name'] Autotest Bottom Institution 2 >>> print inst_array[0]['children'][2]['institution_name'] Autotest Bottom Institution 3 But here's the kicker: I want to be able to do this without iteration (or, with as little iteration as possible), and because of the umpteen layers of nesting in this, It's got me puzzled. Any thoughts?
[ "Shouldn't something like this work?\nnames = [child['institution_name'] for child in inst_array[0]['children']]\n\n", "I guess a list comprehension is still a form of iteration, but at least concise:\nyour_list = [elem['institution_name'] for elem in inst_array[0]['children']]\n\n", "Not sure about what you me...
[ 5, 1, 0, 0, 0 ]
[]
[]
[ "arrays", "dictionary", "nested", "python" ]
stackoverflow_0004080304_arrays_dictionary_nested_python.txt
Q: How can I add a command to the Python interactive shell? I'm trying to save myself just a few keystrokes for a command I type fairly regularly in Python. In my python startup script, I define a function called load which is similar to import, but adds some functionality. It takes a single string: def load(s): # Do some stuff return something In order to call this function I have to type >>> load('something') I would rather be able to simply type: >>> load something I am running Python with readline support, so I know there exists some programmability there, but I don't know if this sort of thing is possible using it. I attempted to get around this by using the InteractivConsole and creating an instance of it in my startup file, like so: import code, re, traceback class LoadingInteractiveConsole(code.InteractiveConsole): def raw_input(self, prompt = ""): s = raw_input(prompt) match = re.match('^load\s+(.+)', s) if match: module = match.group(1) try: load(module) print "Loaded " + module except ImportError: traceback.print_exc() return '' else: return s console = LoadingInteractiveConsole() console.interact("") This works with the caveat that I have to hit Ctrl-D twice to exit the python interpreter: once to get out of my custom console, once to get out of the real one. Is there a way to do this without writing a custom C program and embedding the interpreter into it? Edit Out of channel, I had the suggestion of appending this to the end of my startup file: import sys sys.exit() It works well enough, but I'm still interested in alternative solutions. A: You could try ipython - which gives a python shell which does allow many things including automatic parentheses which gives you the function call as you requested. A: I think you want the cmd module. See a tutorial here: http://wiki.python.org/moin/CmdModule A: Hate to answer my own question, but there hasn't been an answer that works for all the versions of Python I use. Aside from the solution I posted in my question edit (which is what I'm now using), here's another: Edit .bashrc to contain the following lines: alias python3='python3 ~/py/shellreplace.py' alias python='python ~/py/shellreplace.py' alias python27='python27 ~/py/shellreplace.py' Then simply move all of the LoadingInteractiveConsole code into the file ~/py/shellreplace.py Once the script finishes executing, python will cease executing, and the improved interactive session will be seamless.
How can I add a command to the Python interactive shell?
I'm trying to save myself just a few keystrokes for a command I type fairly regularly in Python. In my python startup script, I define a function called load which is similar to import, but adds some functionality. It takes a single string: def load(s): # Do some stuff return something In order to call this function I have to type >>> load('something') I would rather be able to simply type: >>> load something I am running Python with readline support, so I know there exists some programmability there, but I don't know if this sort of thing is possible using it. I attempted to get around this by using the InteractivConsole and creating an instance of it in my startup file, like so: import code, re, traceback class LoadingInteractiveConsole(code.InteractiveConsole): def raw_input(self, prompt = ""): s = raw_input(prompt) match = re.match('^load\s+(.+)', s) if match: module = match.group(1) try: load(module) print "Loaded " + module except ImportError: traceback.print_exc() return '' else: return s console = LoadingInteractiveConsole() console.interact("") This works with the caveat that I have to hit Ctrl-D twice to exit the python interpreter: once to get out of my custom console, once to get out of the real one. Is there a way to do this without writing a custom C program and embedding the interpreter into it? Edit Out of channel, I had the suggestion of appending this to the end of my startup file: import sys sys.exit() It works well enough, but I'm still interested in alternative solutions.
[ "You could try ipython - which gives a python shell which does allow many things including automatic parentheses which gives you the function call as you requested.\n", "I think you want the cmd module.\nSee a tutorial here:\nhttp://wiki.python.org/moin/CmdModule\n", "Hate to answer my own question, but there h...
[ 7, 0, 0 ]
[]
[]
[ "cpython", "interactive", "python", "readline" ]
stackoverflow_0004017204_cpython_interactive_python_readline.txt
Q: Python and subprocess input piping I have a small script that launches and, every half hour, feeds a command to a java program (game server manager) as if the user was typing it. However, after reading documentation and experimenting, I can't figure out how I can get two things: 1) A version which allows the user to type commands into the terminal windoe and they will be sent to the server manager input just as the "save-all" command is. 2) A version which remains running, but sends any new input to the system itself, removing the need for a second terminal window. This one is actually half-happening right now as when something is typed, there is no visual feedback, but once the program is ended, it's clear the terminal has received the input. For example, a list of directory contents will be there if "dir" was typed while the program was running. This one is more for understanding than practicality. Thanks for the help. Here's the script: from time import sleep import sys,os import subprocess # Launches the server with specified parameters, waits however # long is specified in saveInterval, then saves the map. # Edit the value after "saveInterval =" to desired number of minutes. # Default is 30 saveInterval = 30 # Start the server. Substitute the launch command with whatever you please. p = subprocess.Popen('java -Xmx1024M -Xms1024M -jar minecraft_server.jar', shell=False, stdin=subprocess.PIPE); while(True): sleep(saveInterval*60) # Comment out these two lines if you want the save to happen silently. p.stdin.write("say Backing up map...\n") p.stdin.flush() # Stop all other saves to prevent corruption. p.stdin.write("save-off\n") p.stdin.flush() sleep(1) # Perform save p.stdin.write("save-all\n") p.stdin.flush() sleep(10) # Allow other saves again. p.stdin.write("save-on\n") p.stdin.flush() A: Replace your sleep() with a call to select((sys.stdin, ), (), (), saveInterval*60) -- that will have the same timeout but listens on stdin for user commands. When select says you have input, read a line from sys.stdin and feed it to your process. When select indicates a timeout, perform the "save" command that you're doing now. A: It won't completely solve your problem, but you might find python's cmd module useful. It's a way of easily implementing an extensible command line loop (often called a REPL). A: You can run the program using screen, then you can send the input to the specific screen session instead of to the program directly (if you are in Windows just install cygwin).
Python and subprocess input piping
I have a small script that launches and, every half hour, feeds a command to a java program (game server manager) as if the user was typing it. However, after reading documentation and experimenting, I can't figure out how I can get two things: 1) A version which allows the user to type commands into the terminal windoe and they will be sent to the server manager input just as the "save-all" command is. 2) A version which remains running, but sends any new input to the system itself, removing the need for a second terminal window. This one is actually half-happening right now as when something is typed, there is no visual feedback, but once the program is ended, it's clear the terminal has received the input. For example, a list of directory contents will be there if "dir" was typed while the program was running. This one is more for understanding than practicality. Thanks for the help. Here's the script: from time import sleep import sys,os import subprocess # Launches the server with specified parameters, waits however # long is specified in saveInterval, then saves the map. # Edit the value after "saveInterval =" to desired number of minutes. # Default is 30 saveInterval = 30 # Start the server. Substitute the launch command with whatever you please. p = subprocess.Popen('java -Xmx1024M -Xms1024M -jar minecraft_server.jar', shell=False, stdin=subprocess.PIPE); while(True): sleep(saveInterval*60) # Comment out these two lines if you want the save to happen silently. p.stdin.write("say Backing up map...\n") p.stdin.flush() # Stop all other saves to prevent corruption. p.stdin.write("save-off\n") p.stdin.flush() sleep(1) # Perform save p.stdin.write("save-all\n") p.stdin.flush() sleep(10) # Allow other saves again. p.stdin.write("save-on\n") p.stdin.flush()
[ "Replace your sleep() with a call to select((sys.stdin, ), (), (), saveInterval*60) -- that will have the same timeout but listens on stdin for user commands. When select says you have input, read a line from sys.stdin and feed it to your process. When select indicates a timeout, perform the \"save\" command that y...
[ 2, 1, 1 ]
[]
[]
[ "pipe", "popen", "python", "subprocess" ]
stackoverflow_0004080906_pipe_popen_python_subprocess.txt
Q: Reverse an explicit assignment of _ in python? In python, if I create a local variable _ (underscore) and assign it to something: _ = 3 Is it possible to reverse that assignment to regain usage of the underscore as the last returned output in the REPL? A: Yes. Use the del statement. $ python >>> _ = 3 >>> _ 3 >>> 5 5 >>> _ 3 >>> del _ >>> 6 6 >>> _ 6
Reverse an explicit assignment of _ in python?
In python, if I create a local variable _ (underscore) and assign it to something: _ = 3 Is it possible to reverse that assignment to regain usage of the underscore as the last returned output in the REPL?
[ "Yes. Use the del statement.\n$ python\n>>> _ = 3\n>>> _\n3\n>>> 5\n5\n>>> _\n3\n>>> del _\n>>> 6\n6\n>>> _\n6\n\n" ]
[ 7 ]
[]
[]
[ "python" ]
stackoverflow_0004081308_python.txt
Q: Finding the best "deal" in a "group buy", given a table of values Using PHP or Python, but I'm sure the basic functions are agnostic. I am unsure what the proper term, mathematical theory, or algorithm is, otherwise I'm sure Google would have fixed this for me in minutes. I have a data set similar to the following: cost | qty | ppl | store ------------------------ 30| 500| 10| 1 40| 600| 12| 2 35| 500| 14| 3 50| 700| 10| 1 30| 700| 12| 1 40| 250| 14| 2 What I'm trying to do is find the "optimal" row, based on these qualifiers: cost: lower is better. qty: higher is better. ppl: lower is better. store: Doesn't matter in this case, but used later to find "best" depending on 'store'. Essentially, I'm trying to find the best particular "deal" in a "group buy"-like situation, where the fewest amount of people are required to get the best "value" (quantity -vs- cost). To my eye, it appears that the best-overall would be Row #5, because of the jump in quantity. If there is a name for this, and a good (Wikipedia?) article on the subject, I'd be happy to finish this myself. Thanks for your time! A: Compute qty / (cost * ppl) and sort the list by that number. This number will be higher for higher qty and lower cost and ppl. You might want to use something like this (python): def cmp(a, b): return (a["qty"] / (a["cost"] * a["ppl"])) - (b["qty"] / (b["cost"] * b["ppl"])) list = sorted(list, cmp) Explanation: think what happens if qty is getting bigger when cost * ppl are constant. The ratio will increase, because a/x > b>x if a > b. Now with the other two values it's the other way around; if x/a > x/b, then a < b, so the ratio would actually get decreased when cost or ppl increases (think what happens if you split 100$ to two people vs three people; if you split it to two, each will get 100/2 = 50$. If you split it to three, each will get 100/3 ~= 33$, which is less). (Sorry if i'm not making it clear enough; i'm tired) A: You are looking at linear programming in general, and the simplex algorithm in particular. A: You need to decide what makes it optimal, define a function that depends on cost, qty, ppl and maximize/minimize it. Then its just optimization. I'm assuming cost is cost per item, qty is amount available in store, and ppl are minimum number of people for group buy to kick in. Then minimizing cost*ppl/qty is equivalent to minimizing the minimum total amount spent before it kicks in, divided by the number of people who could participate in the group buy if it becomes available. But you still have to think does this make sense. You may say that if the cost doubles, but qty available also doubles that this group-buy is worse than a smaller one. Maybe if cost doubles, qty would have to increase by 4, or # of ppl necessary decrease by 4. Then you may want a function like cost^2 ppl/qty. E.g., something in general like cost^m ppl^n/qty^p should work; you just adjust the m, n, p (all positive numbers) depending on the weights you think appropriate.
Finding the best "deal" in a "group buy", given a table of values
Using PHP or Python, but I'm sure the basic functions are agnostic. I am unsure what the proper term, mathematical theory, or algorithm is, otherwise I'm sure Google would have fixed this for me in minutes. I have a data set similar to the following: cost | qty | ppl | store ------------------------ 30| 500| 10| 1 40| 600| 12| 2 35| 500| 14| 3 50| 700| 10| 1 30| 700| 12| 1 40| 250| 14| 2 What I'm trying to do is find the "optimal" row, based on these qualifiers: cost: lower is better. qty: higher is better. ppl: lower is better. store: Doesn't matter in this case, but used later to find "best" depending on 'store'. Essentially, I'm trying to find the best particular "deal" in a "group buy"-like situation, where the fewest amount of people are required to get the best "value" (quantity -vs- cost). To my eye, it appears that the best-overall would be Row #5, because of the jump in quantity. If there is a name for this, and a good (Wikipedia?) article on the subject, I'd be happy to finish this myself. Thanks for your time!
[ "Compute qty / (cost * ppl) and sort the list by that number. This number will be higher for higher qty and lower cost and ppl.\nYou might want to use something like this (python):\ndef cmp(a, b):\n return (a[\"qty\"] / (a[\"cost\"] * a[\"ppl\"])) - (b[\"qty\"] / (b[\"cost\"] * b[\"ppl\"]))\n\nlist = sorted(list...
[ 5, 1, 0 ]
[]
[]
[ "math", "php", "python" ]
stackoverflow_0004081720_math_php_python.txt
Q: Using the --quiet tag when extending Mercurial I'm writing a Mercurial extension in Python and need to call the "Pull" command using the Mercurial API, but I want to suppress its output using the --quiet flag. In Hg terms, I want to execute the following code, but from within my extension: hg pull --quiet Given the Mercurial API documentation, I thought it would be as simple as: commands.pull(ui, repo, quiet=True) Unfortunately, although this doesn't generate errors and will successfully execute the "Pull" command, the --quiet flag doesn't seem to be getting through as I still see the standard output. All the examples only show passing non-global flags, so I'm a bit worried that this isn't possible. What am I doing wrong? How can I pass the --quiet flag? A: Global options are affected through the ui object. It allows you to control many of the things you would normally set in your (or the repository's) hgrc. In this case, you want to set the quiet option in the ui section to True. ui.setconfig('ui', 'quiet', True) commands.pull(ui, repo)
Using the --quiet tag when extending Mercurial
I'm writing a Mercurial extension in Python and need to call the "Pull" command using the Mercurial API, but I want to suppress its output using the --quiet flag. In Hg terms, I want to execute the following code, but from within my extension: hg pull --quiet Given the Mercurial API documentation, I thought it would be as simple as: commands.pull(ui, repo, quiet=True) Unfortunately, although this doesn't generate errors and will successfully execute the "Pull" command, the --quiet flag doesn't seem to be getting through as I still see the standard output. All the examples only show passing non-global flags, so I'm a bit worried that this isn't possible. What am I doing wrong? How can I pass the --quiet flag?
[ "Global options are affected through the ui object. It allows you to control many of the things you would normally set in your (or the repository's) hgrc. In this case, you want to set the quiet option in the ui section to True.\nui.setconfig('ui', 'quiet', True)\ncommands.pull(ui, repo)\n\n" ]
[ 8 ]
[]
[]
[ "mercurial", "mercurial_extension", "python" ]
stackoverflow_0004081717_mercurial_mercurial_extension_python.txt
Q: why does python inspect.isclass think an instance is a class? Given the following module: class Dummy(dict): def __init__(self, data): for key, value in data.iteritems(): self.__setattr__(key, value) def __getattr__(self, attr): return self.get(attr, None) __setattr__=dict.__setitem__ __delattr__=dict.__delitem__ foo=Dummy({"one":1, "two":2}) why does foo show up in the output of inspect.getmembers(..., predicate=inspect.isclass)? $ python2.5 Python 2.5.2 (r252:60911, Aug 28 2008, 13:13:37) [GCC 4.1.2 20071124 (Red Hat 4.1.2-42)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import junk >>> import inspect >>> inspect.getmembers(junk, predicate=inspect.isclass) [('Dummy', <class 'junk.Dummy'>), ('foo', {'two': 2, 'one': 1})] >>> inspect.isclass(junk.foo) True I expected that inspect would only return Dummy since that is the only class definition in the module. Apparently, though, junk.foo is a class in the eyes of the inspect module. Why is that? A: Prior to Python v2.7, inspect.isclass naively assumed anything with a __bases__ attribute must be a class. Dummy's __getattr__ makes Dummy instances appear to have every attribute (with a value of None). Therefore, to inspect.isclass, foo appears to be a class. Note: __getattr__ should raiseAttributeError when asked for an attribute it does not know about. (This is very different than returning None.) A: First if all great answer Jon-Eric i just wanted to add some stuff: if you do in ipython (what a great tool): %psource inspect.isclass you will get: return isinstance(object, types.ClassType) or hasattr(object, '__bases__') which what Jon-Eric said. but i guess that you use python < 2.6 and this bug was already fixed, this is the code of inspect.isclass() in python2.7: return isinstance(object, (type, types.ClassType))
why does python inspect.isclass think an instance is a class?
Given the following module: class Dummy(dict): def __init__(self, data): for key, value in data.iteritems(): self.__setattr__(key, value) def __getattr__(self, attr): return self.get(attr, None) __setattr__=dict.__setitem__ __delattr__=dict.__delitem__ foo=Dummy({"one":1, "two":2}) why does foo show up in the output of inspect.getmembers(..., predicate=inspect.isclass)? $ python2.5 Python 2.5.2 (r252:60911, Aug 28 2008, 13:13:37) [GCC 4.1.2 20071124 (Red Hat 4.1.2-42)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import junk >>> import inspect >>> inspect.getmembers(junk, predicate=inspect.isclass) [('Dummy', <class 'junk.Dummy'>), ('foo', {'two': 2, 'one': 1})] >>> inspect.isclass(junk.foo) True I expected that inspect would only return Dummy since that is the only class definition in the module. Apparently, though, junk.foo is a class in the eyes of the inspect module. Why is that?
[ "Prior to Python v2.7, inspect.isclass naively assumed anything with a __bases__ attribute must be a class.\nDummy's __getattr__ makes Dummy instances appear to have every attribute (with a value of None).\nTherefore, to inspect.isclass, foo appears to be a class.\nNote: __getattr__ should raiseAttributeError when ...
[ 11, 6 ]
[]
[]
[ "introspection", "python" ]
stackoverflow_0004081819_introspection_python.txt
Q: Using python for commercial web application development We (there are 2 of us) are new to web application development and are trying to develop a web based application product targeted at medium sized businesses. The following is a basic flow of the work flow of the application: Collect data through file upload process (100K to 5 million records) Collect control data from users through forms Perform statistical analysis on the input data based on business rules Generate reports and visualization graphs Process (Edit, Encrypt, Redact etc…) the input file and generate output file. The application has to support 50 to 100 users concurrently. The application will be an intranet application and will not be available over the web. We have decided to use PostgreSQL as our database. We did some research on the web to find a suitable language to build our application. We are thinking about using Python as we have got some Python experience already. However we couldn’t decide on the python web frameworks. Are there any serious drawbacks in using a 3rd party web framework for a project like ours? We welcome suggestions about other languages/framework that would best suit this scenario. This is our first post and if you need more details, do let us know. A: To answer your first question, yes you absolutely should use a 3rd party web framework. There's no use in reinventing the wheel. The two Python web frameworks that I am familiar with are Pylons and Django. This question covers the pros and cons for using either. Based on your description I would recommend Django. A: Django is awesome. It is really easy to extend and is very well documented. I highly recommend you check it out. http://www.djangoproject.com/
Using python for commercial web application development
We (there are 2 of us) are new to web application development and are trying to develop a web based application product targeted at medium sized businesses. The following is a basic flow of the work flow of the application: Collect data through file upload process (100K to 5 million records) Collect control data from users through forms Perform statistical analysis on the input data based on business rules Generate reports and visualization graphs Process (Edit, Encrypt, Redact etc…) the input file and generate output file. The application has to support 50 to 100 users concurrently. The application will be an intranet application and will not be available over the web. We have decided to use PostgreSQL as our database. We did some research on the web to find a suitable language to build our application. We are thinking about using Python as we have got some Python experience already. However we couldn’t decide on the python web frameworks. Are there any serious drawbacks in using a 3rd party web framework for a project like ours? We welcome suggestions about other languages/framework that would best suit this scenario. This is our first post and if you need more details, do let us know.
[ "To answer your first question, yes you absolutely should use a 3rd party web framework. There's no use in reinventing the wheel.\nThe two Python web frameworks that I am familiar with are Pylons and Django. This question covers the pros and cons for using either. Based on your description I would recommend Django....
[ 3, 1 ]
[]
[]
[ "python", "web_applications", "web_frameworks" ]
stackoverflow_0004082109_python_web_applications_web_frameworks.txt
Q: Please help extract text from HTML tags using Python Regex I have the following HTML text : Country/<i>List it here</i><br><font color="#ff00ff">Dubai</font><br><br> How do I extract 'Dubai' out of the above HTML? I have few hundreds such lines, kind of urgent, so not researching BeautifulSoup or XML parser implementation. Thanks much! A: Since you just want something quick and dirty, you could use: re.match(r'.*>([^<>]*)</font>.*', s).group(1) This just grabs all of the non angle-brackety things before a closing font tag. Again, not suitable for "real" parsing.
Please help extract text from HTML tags using Python Regex
I have the following HTML text : Country/<i>List it here</i><br><font color="#ff00ff">Dubai</font><br><br> How do I extract 'Dubai' out of the above HTML? I have few hundreds such lines, kind of urgent, so not researching BeautifulSoup or XML parser implementation. Thanks much!
[ "Since you just want something quick and dirty, you could use:\nre.match(r'.*>([^<>]*)</font>.*', s).group(1)\n\nThis just grabs all of the non angle-brackety things before a closing font tag. Again, not suitable for \"real\" parsing.\n" ]
[ 2 ]
[]
[]
[ "parsing", "python", "regex" ]
stackoverflow_0004082205_parsing_python_regex.txt
Q: How should I share code ("modules") between my templates? If I had two controllers in my Pylons website that server up two different template files, what would be the best way to show the same piece of HTML on each template? For example, lets say I had a blog. The front page would show the list of recent entries and each entry would have a 'permanent link' linking to a page showing just that entry. On each of those pages, I want to show 'latest entries' - A list of the 5 most recent blog posts. The template files different. The controllers are different. How do I show the 'latest posts module' ? Should I just have something like: from blog.model import posts class BlogController(BaseController): def index(self): c.latestPosts = posts.get_latest() return render('home.html') class OtherController(BaseController): def index(self): c.latestPosts = posts.get_latest() return render('otherpage.html') c.latestPosts would then be a list of links that the template renders. The problem I see with this is, I have to render the HTML for this on two separate template files. If I want to change the HTML, it means changing it in two places... I'm trying to come up with a neat way to do this but I'm running out of ideas. How would you do it? A: Being able to share common HTML fragments like headers, footers, "logged in" area of page, sidebars etc. is a very common requirement. Template engines usually provide means for this. If you're using Mako, here are the two main mechanisms you can use: Includes Check out the <%include> tag. In your page template you specify where various reusable bits would be placed. You kind of build the page from ground up, assemble it from the reusable components you have. Example from Mako documentation: <%include file="header.html"/> hello world <%include file="footer.html"/> Inheritance Check out the <%inherit> tag. This works similarly to inheritance in programming languages like Python. In a base template you set up a skeleton of the page. In page templates you customize and extend some parts of the base template. Quick example, base.mako: <html> <head></head> <body> ${self.header()} ${self.body()} </body> </html> <%def name="header()"> This is the common header all pages will get unless they override this. </%def> And somepage.mako: <%inherit file="/base.mako"/> This content will go into body of base. Template engines usually have many nifty features, and I encourage you to get to know them well! A: Although Pēteris' answer is good, you may also be looking for Mako's <%namespace> functionality, which closely parallels the "import" statement in raw Python. However, <%inherit> and <%include> are also things that you should find yourself using regularly.
How should I share code ("modules") between my templates?
If I had two controllers in my Pylons website that server up two different template files, what would be the best way to show the same piece of HTML on each template? For example, lets say I had a blog. The front page would show the list of recent entries and each entry would have a 'permanent link' linking to a page showing just that entry. On each of those pages, I want to show 'latest entries' - A list of the 5 most recent blog posts. The template files different. The controllers are different. How do I show the 'latest posts module' ? Should I just have something like: from blog.model import posts class BlogController(BaseController): def index(self): c.latestPosts = posts.get_latest() return render('home.html') class OtherController(BaseController): def index(self): c.latestPosts = posts.get_latest() return render('otherpage.html') c.latestPosts would then be a list of links that the template renders. The problem I see with this is, I have to render the HTML for this on two separate template files. If I want to change the HTML, it means changing it in two places... I'm trying to come up with a neat way to do this but I'm running out of ideas. How would you do it?
[ "Being able to share common HTML fragments like headers, footers, \"logged in\" area of page, sidebars etc. is a very common requirement. Template engines usually provide means for this. \nIf you're using Mako, here are the two main mechanisms you can use:\nIncludes\nCheck out the <%include> tag. In your page templ...
[ 2, 0 ]
[]
[]
[ "module", "pylons", "python", "templates" ]
stackoverflow_0004075183_module_pylons_python_templates.txt
Q: Google Search with Python From this question I've learned how to google-search using Python 3. But in the example given there you can retrieve only the first 4 results. I need to retrieve information about the first 25 results (at least). For each result I want to be able to get its: title url visible url (the visible url of http://en.wikipedia.org/wiki/Information_retrieval is http://en.wikipedia.org/) site description How can I do it? EDIT: I'm using Python 3.1 A: This thread should tell you what you need to know. In short, add &rsz=large to the URI to get eight results at a time, and &start=8 (or 16, 24, etc.) gets you results starting with the indicated number. In no case can you get more than 64, and some searches allow only 8. A: Use the JSON/Atom Custom Search API and if necessary call it multiple times by using the start parameter to receive all results you are interested in.
Google Search with Python
From this question I've learned how to google-search using Python 3. But in the example given there you can retrieve only the first 4 results. I need to retrieve information about the first 25 results (at least). For each result I want to be able to get its: title url visible url (the visible url of http://en.wikipedia.org/wiki/Information_retrieval is http://en.wikipedia.org/) site description How can I do it? EDIT: I'm using Python 3.1
[ "This thread should tell you what you need to know. In short, add &rsz=large to the URI to get eight results at a time, and &start=8 (or 16, 24, etc.) gets you results starting with the indicated number. In no case can you get more than 64, and some searches allow only 8.\n", "Use the JSON/Atom Custom Search API ...
[ 2, 1 ]
[]
[]
[ "python", "search" ]
stackoverflow_0004082714_python_search.txt
Q: Scaling disqus using Django, horizontal and vertical partitioning helper methods, please explain http://www.slideshare.net/zeeg/djangocon-2010-scaling-disqus Vertical partitioning helper: class ApplicationRouter(object) def db_for_read(self, model, **hints): instance = hints.get('instance') if not instance: return None app_label = instance._meta.app_label return get_application_alias(app_label) Can someone explain what this is doing? Now this is for horizontal partitioning: class ForumPartitionRouter(object): def db_for_read(self, model, **hints): instance = hints.get('instance') if not instance: return None forum_id = getattr(instance, 'forum_id', None) if not forum_id: return None return get_forum_alias(forum_id) I sort of understand what these are doing, but not sure what some of the lines are doing like: instance = hints.get('instance') A: He's discussing how Disqus leverages the database routing technique used in Django 1.2 and above, which allows you to have connections to multiple databases alive at the same time. Unlike earlier versions of Django, Django 1.2 and above use a dictionary of dictionaries, with the key of the initial dictionary being a token to entitle the database, and the keys of the inner dictionary mapping to the familiar Django settings in older versions. The ForumPartitionRouter is the more obvious: different forums are stored in different databases (this is interesting, because it's clear that they have some awesome management tools that allow them to define these partitions to their django servers when the servers are started, treating each Django instance as a transient and ephemeral object of their system) and the server uses the forum_id to go and find the right database from which to get the related members of the forum (in this case, the posts). db_for_read() returns the token which points to a database. Many forums have many posts, but each post has one forum. Since forums don't have interrelations, you can store each forum and its posts in one database, wholly independent of other forums and their posts. If you look at the example on page 23, it's clear from his example: forum.post_set.all() ... that what's happening is the forum's object_id is being used as a lookup in a table somewhere that related forum IDs to databases (not tables, but databases) that may be anywhere else on the network. It seems to me, therefore, that the ApplicationRouter is doing something similar with relationships. Look at the text: "Vertical Partitioning involves creating tables with fewer columns, and using additional tables to store the remaining columns." That's what ApplicationRouter does: when you're using an application, and you go to access a related object in a diffent table, ApplicationRouter looks at the request and the instance to be filled in, and using a lookup table similar to the one for ForumPartitionRouter returns the key to the database in which that related instance's details can be found. You can look up multi-dbs here: http://docs.djangoproject.com/en/dev/topics/db/multi-db/
Scaling disqus using Django, horizontal and vertical partitioning helper methods, please explain
http://www.slideshare.net/zeeg/djangocon-2010-scaling-disqus Vertical partitioning helper: class ApplicationRouter(object) def db_for_read(self, model, **hints): instance = hints.get('instance') if not instance: return None app_label = instance._meta.app_label return get_application_alias(app_label) Can someone explain what this is doing? Now this is for horizontal partitioning: class ForumPartitionRouter(object): def db_for_read(self, model, **hints): instance = hints.get('instance') if not instance: return None forum_id = getattr(instance, 'forum_id', None) if not forum_id: return None return get_forum_alias(forum_id) I sort of understand what these are doing, but not sure what some of the lines are doing like: instance = hints.get('instance')
[ "He's discussing how Disqus leverages the database routing technique used in Django 1.2 and above, which allows you to have connections to multiple databases alive at the same time.\nUnlike earlier versions of Django, Django 1.2 and above use a dictionary of dictionaries, with the key of the initial dictionary bein...
[ 2 ]
[]
[]
[ "django", "python" ]
stackoverflow_0004055213_django_python.txt
Q: Using Python 2.X's locale module to format numbers and currency The task is to format numbers, currency amounts and dates as unicode strings in a locale-aware manner. First naive attempt with numbers gave hope: Python 2.7 (r27:82525, Jul 4 2010, 09:01:59) [MSC v.1500 32 bit (Intel)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import locale >>> locale.setlocale(locale.LC_ALL, '') 'English_Australia.1252' >>> locale.format("%d", 12345678, grouping=True) '12,345,678' >>> locale.format(u"%d", 12345678, grouping=True) u'12,345,678' >>> Now try French: >>> locale.setlocale(locale.LC_ALL, 'French_France') 'French_France.1252' >>> locale.format("%d", 12345678, grouping=True) '12\xa0345\xa0678' >>> locale.format(u"%d", 12345678, grouping=True) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\python27\lib\locale.py", line 190, in format return _format(percent, value, grouping, monetary, *additional) File "C:\python27\lib\locale.py", line 211, in _format formatted, seps = _group(formatted, monetary=monetary) File "C:\python27\lib\locale.py", line 160, in _group left_spaces + thousands_sep.join(groups) + right_spaces, UnicodeDecodeError: 'ascii' codec can't decode byte 0xa0 in position 0: ordinal not in range(128) What is happening here? >>> locale.localeconv() # output edited for brevity {'thousands_sep': '\xa0', 'mon_thousands_sep': '\xa0', 'currency_symbol': '\x80'} Wah! Looks a little legacyish. A work-around suggests itself: >>> locale.format("%d", 12345678, grouping=True).decode(locale.getpreferredencoding()) u'12\xa0345\xa0678' >>> UPDATE 1 locale.getpreferredencoding() is NOT the way to go; use locale.getlocale()[1] instead: Python 2.7 (r27:82525, Jul 4 2010, 09:01:59) [MSC v.1500 32 bit (Intel)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import locale >>> locale.getpreferredencoding(), locale.getlocale() ('cp1252', (None, None)) >>> locale.setlocale(locale.LC_ALL, '') 'English_Australia.1252' >>> locale.getpreferredencoding(), locale.getlocale() ('cp1252', ('English_Australia', '1252')) >>> locale.setlocale(locale.LC_ALL, 'russian_russia') 'Russian_Russia.1251' >>> locale.getpreferredencoding(), locale.getlocale() ('cp1252', ('Russian_Russia', '1251')) #### Whoops! #### >>> UPDATE 2 There are very similar problems with the strftime() family and with str.format() >>> locale.setlocale(locale.LC_ALL, 'french_france') 'French_France.1252' >>> format(12345678, 'n') '12\xa0345\xa0678' >>> format(12345678, u'n') # type triggers cast to unicode somehow Traceback (most recent call last): File "<stdin>", line 1, in <module> UnicodeDecodeError: 'ascii' codec can't decode byte 0xa0 in position 2: ordinal not in range(128) >>> import datetime;datetime.date(1999,12,31).strftime(u'%B') # type is ignored 'd\xe9cembre' >>> In all cases, the workaround is to use only str objects when calling these methods, get a str result, and decode it using the encoding obtained by locale.getlocale()[1] Other problems: (1) It's a considerable nuisance when testing/exploring that the Windows locale names are not only different from POSIX ("fr_FR") but also verbose, and not fully documented. For example, evidently the grouping in India is not "every 3 digits" ... I can't find the locale to use to explore this; attempts like "Hindi" and "Hindi_India" don't work. (2) Some of the localeconv() data is just plain wrong. E.g. for Korean the currency symbol is given as '\\' i.e. a single backslash. I'm aware that some 7-bit legacy charsets are not ASCII-compatible and that chr(92) was sometimes used for the local currency symbol, so I expected '\\'.decode('949') to produce a won symbol, not just u'\\' I'm aware of modules such as babel but I don't particularly want to impose a big external dependency like that. Can I get correctness and convenience at the same time? Is there something about the locale module that I've missed? A: The thing about the locale module you seem to have missed is that it exposes your operating system vendor's (really: C library vendor's) notion of locales. So on Windows, you will have to use Windows locale names, use your OS vendor's documentation to find out what supported names are. Googling for "windows locale name" quickly brings up this list. That locale.format doesn't really support Unicode is a 2.x limitation; try Python 3.1. Edit: as for the Won sign, I think the story is this: Microsoft has allocated the Won sign to the same code position as the backslash in MS-DOS (likewise for the Yen sign in Japanese versions). As a consequence, the file separator character was the Won sign, and rendered as such. As they moved to Windows, and later to Unicode, they had to keep supporting this, but they also had to preserve the property that the file separator is the backslash (in particular in the Unicode API). They resolved this conflict so that The character \x5c really is the backslash, and not the Won sign In a terminal application, the backslash is rendered as the won sign; that's only a font issue. the currency symbol is reported as \x5c: so the currency symbol really is the backslash.
Using Python 2.X's locale module to format numbers and currency
The task is to format numbers, currency amounts and dates as unicode strings in a locale-aware manner. First naive attempt with numbers gave hope: Python 2.7 (r27:82525, Jul 4 2010, 09:01:59) [MSC v.1500 32 bit (Intel)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import locale >>> locale.setlocale(locale.LC_ALL, '') 'English_Australia.1252' >>> locale.format("%d", 12345678, grouping=True) '12,345,678' >>> locale.format(u"%d", 12345678, grouping=True) u'12,345,678' >>> Now try French: >>> locale.setlocale(locale.LC_ALL, 'French_France') 'French_France.1252' >>> locale.format("%d", 12345678, grouping=True) '12\xa0345\xa0678' >>> locale.format(u"%d", 12345678, grouping=True) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\python27\lib\locale.py", line 190, in format return _format(percent, value, grouping, monetary, *additional) File "C:\python27\lib\locale.py", line 211, in _format formatted, seps = _group(formatted, monetary=monetary) File "C:\python27\lib\locale.py", line 160, in _group left_spaces + thousands_sep.join(groups) + right_spaces, UnicodeDecodeError: 'ascii' codec can't decode byte 0xa0 in position 0: ordinal not in range(128) What is happening here? >>> locale.localeconv() # output edited for brevity {'thousands_sep': '\xa0', 'mon_thousands_sep': '\xa0', 'currency_symbol': '\x80'} Wah! Looks a little legacyish. A work-around suggests itself: >>> locale.format("%d", 12345678, grouping=True).decode(locale.getpreferredencoding()) u'12\xa0345\xa0678' >>> UPDATE 1 locale.getpreferredencoding() is NOT the way to go; use locale.getlocale()[1] instead: Python 2.7 (r27:82525, Jul 4 2010, 09:01:59) [MSC v.1500 32 bit (Intel)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import locale >>> locale.getpreferredencoding(), locale.getlocale() ('cp1252', (None, None)) >>> locale.setlocale(locale.LC_ALL, '') 'English_Australia.1252' >>> locale.getpreferredencoding(), locale.getlocale() ('cp1252', ('English_Australia', '1252')) >>> locale.setlocale(locale.LC_ALL, 'russian_russia') 'Russian_Russia.1251' >>> locale.getpreferredencoding(), locale.getlocale() ('cp1252', ('Russian_Russia', '1251')) #### Whoops! #### >>> UPDATE 2 There are very similar problems with the strftime() family and with str.format() >>> locale.setlocale(locale.LC_ALL, 'french_france') 'French_France.1252' >>> format(12345678, 'n') '12\xa0345\xa0678' >>> format(12345678, u'n') # type triggers cast to unicode somehow Traceback (most recent call last): File "<stdin>", line 1, in <module> UnicodeDecodeError: 'ascii' codec can't decode byte 0xa0 in position 2: ordinal not in range(128) >>> import datetime;datetime.date(1999,12,31).strftime(u'%B') # type is ignored 'd\xe9cembre' >>> In all cases, the workaround is to use only str objects when calling these methods, get a str result, and decode it using the encoding obtained by locale.getlocale()[1] Other problems: (1) It's a considerable nuisance when testing/exploring that the Windows locale names are not only different from POSIX ("fr_FR") but also verbose, and not fully documented. For example, evidently the grouping in India is not "every 3 digits" ... I can't find the locale to use to explore this; attempts like "Hindi" and "Hindi_India" don't work. (2) Some of the localeconv() data is just plain wrong. E.g. for Korean the currency symbol is given as '\\' i.e. a single backslash. I'm aware that some 7-bit legacy charsets are not ASCII-compatible and that chr(92) was sometimes used for the local currency symbol, so I expected '\\'.decode('949') to produce a won symbol, not just u'\\' I'm aware of modules such as babel but I don't particularly want to impose a big external dependency like that. Can I get correctness and convenience at the same time? Is there something about the locale module that I've missed?
[ "The thing about the locale module you seem to have missed is that it exposes your operating system vendor's (really: C library vendor's) notion of locales. So on Windows, you will have to use Windows locale names, use your OS vendor's documentation to find out what supported names are. Googling for \"windows local...
[ 3 ]
[]
[]
[ "internationalization", "locale", "python" ]
stackoverflow_0004082645_internationalization_locale_python.txt
Q: Hide information in a PDF file in Python In Python, I have files generated by ReportLab. Now, i need to extract some pages from that PDF and hide confidential information. I can create a PDF file with blacked-out spots and use pyPdf to mergePage, but people can still select and copy-paste the information under the blacked-out spots. Is there a way to make those spots completely confidential? Per example, I need to hide addresses on the pages, how would i do it? Thanks, A: Basically you'll have to remove the corresponding text drawing commands in the PDF's page content stream. It's much easier to generate the pages twice, once with the confidential information, once without them. It might be possible (I don't know ReportLab enough) to specially craft the PDF in a way that the confidential information is easier accessible (e.g. as separate XObjects) for deletion. Still you'd have to do pretty low-level operations on the PDF -- which I would advise against. A: (Sorry, I was not able to log on when I posted the question...) Unfortunately, the document cannot be regenerated at will (context sensitive), and those PDF files (about 35) are 3000+ pages. I was thinking about using pdf2ps and pdf2ps back, but there is a lot of quality. pdf2ps -dLanguageLevel=3 input.pdf - | ps2pdf14 - output.pdf And if i use "pdftops" instead, the text is still selectable. If there is a way to make it non-selectable like with "pdf2ps" but with better quality, it will do too.
Hide information in a PDF file in Python
In Python, I have files generated by ReportLab. Now, i need to extract some pages from that PDF and hide confidential information. I can create a PDF file with blacked-out spots and use pyPdf to mergePage, but people can still select and copy-paste the information under the blacked-out spots. Is there a way to make those spots completely confidential? Per example, I need to hide addresses on the pages, how would i do it? Thanks,
[ "Basically you'll have to remove the corresponding text drawing commands in the PDF's page content stream. It's much easier to generate the pages twice, once with the confidential information, once without them.\nIt might be possible (I don't know ReportLab enough) to specially craft the PDF in a way that the confi...
[ 1, 0 ]
[]
[]
[ "pypdf", "python", "reportlab" ]
stackoverflow_0004082725_pypdf_python_reportlab.txt
Q: Python/Django: Getting random articles from huge table I have a huge table in my database (MySQL)with millions of data. I need to populate 10 random data and show it on the UI. What would be a good approach considering performance? I was thinking about creating MySQL View to populate 10 random rows and read it from UI. Or is there any other efficient way to handle this situation? A: This may be expensive and slow, but: MyModel.objects.order_by('?')[:10] The main advantages being clarity and that it is not raw SQL. A: First of all (it's my proper opinion) i'm against using raw SQL when we are already working with a hight level framework like Django unless we don't find what we are looking for in the framework (Django) , so i will rather use Django for this matter: remark this approach it's only working if you have set auto increment to you PK and of course if you data is consistent (you don't remove record from the table so that you can be sure that all ids are auto incremented) import random # Getting the number of rows in the table it's equivalent to do SELECT COUNT(*). count_record = Table.objects.count() # Choose 10 (at most) number from the list of all ids. random_pks = random.sample(range(1, count_record) , min(count_record, 10)) random_list = Table.objects.filter(pk__in=random_pks) if the condition sited before are not satisfied i think you can do it with an raw SQL query like this: query = """SELECT * FROM table ORDER BY RAND() LIMIT 10""") table.objects.raw(query) about performance i think you have to timeit, Hope this will help.
Python/Django: Getting random articles from huge table
I have a huge table in my database (MySQL)with millions of data. I need to populate 10 random data and show it on the UI. What would be a good approach considering performance? I was thinking about creating MySQL View to populate 10 random rows and read it from UI. Or is there any other efficient way to handle this situation?
[ "This may be expensive and slow, but:\nMyModel.objects.order_by('?')[:10]\n\nThe main advantages being clarity and that it is not raw SQL.\n", "First of all (it's my proper opinion) i'm against using raw SQL when we are already working with a hight level framework like Django unless we don't find what we are look...
[ 3, 2 ]
[]
[]
[ "django", "mysql", "python" ]
stackoverflow_0004083040_django_mysql_python.txt
Q: List comprehension and len() vs. simple for loop I'm supposed to take a list of words and count all words in it which are 2 or more characters long and where the first and last character are equal. I came up with two possible solutions: result = 0 for word in words: if len(word) >= 2 and word[0] == word[-1]: result += 1 return result vs. return len([word for word in words if len(word) >= 2 and word[0] == word[-1]]) Which one would be the preferred solution? Or are there even better ones? A: In your second example a generator expression would be better than list-comp if your list is large. sum(1 for word in words if len(word) >= 2 and word[0] == word[-1]) A: The first one would definitely be the preferred solution in Python. Don't forget your Zen of Python: The Zen of Python, by Tim Peters Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than right now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those! Other than that your solutions are good. A: I personally find the explicit loop more readable, but it's much a matter of taste (some prefer shorter code generally, especially when they have to write it). Either version can be further shortened/improved: result = 0 for word in words: result += int(len(word) >= 2 and word[0] == word[-1]) return result The int() conversions is strictly speaking unnecessary, since True is a kind of 1, but it may be better for readability. The same approach can apply to the comprehension: return sum(len(word) >= 2 and word[0] == word[-1] for word in words) If you want to use len(), I'd point the reader to the fact that the values don't really matter: len(1 for word in words if len(word) >= 2 and word[0] == word[-1]) A: Both are pretty good. There are small differences: List comprehension returns another list which you are passing to len. The first solution avoids creation of another list. A: Some other variants you might want to consider: First, you can break the filter condition into a function. This condition is fine either way, but if it becomes any more complex I'd definitely do this: def check(word): return len(word) >= 2 and word[0] == word[-1] sum(1 for word in words if check(word)) Next, if generating a list (as in the original list comprehension) is acceptable, then you can do this: len(filter(check, words)) There's itertools.ifilter, but if you use that you need to use the sum expression again, so it doesn't end up any clearer. The sum trick comes up so often that I'm surprised there isn't a standard library call to count the number of items in an iterator (if there is, I havn't found it). Alternatively, it'd make sense if len would consume and count the number of entries in an iterator if it has no __len__, but it doesn't.
List comprehension and len() vs. simple for loop
I'm supposed to take a list of words and count all words in it which are 2 or more characters long and where the first and last character are equal. I came up with two possible solutions: result = 0 for word in words: if len(word) >= 2 and word[0] == word[-1]: result += 1 return result vs. return len([word for word in words if len(word) >= 2 and word[0] == word[-1]]) Which one would be the preferred solution? Or are there even better ones?
[ "In your second example a generator expression would be better than list-comp if your list is large.\nsum(1 for word in words if len(word) >= 2 and word[0] == word[-1])\n\n", "The first one would definitely be the preferred solution in Python.\nDon't forget your Zen of Python:\n\nThe Zen of Python, by Tim Peters ...
[ 17, 3, 2, 1, 1 ]
[]
[]
[ "list_comprehension", "python" ]
stackoverflow_0004083098_list_comprehension_python.txt
Q: How can I improve the quality of this python remote service code? I wrote several small Python scripts to start clusters inside cloud infrastructures with distributed filesystems. Now, I integrated the code into a single command-line application but the code quality is very bad. http://code.google.com/p/diffuser/source/browse/trunk/diffuser.py The application needs to send lots of commands via SSH with paramiko. Each command that is send via SSH needs three lines of code: stdin, stdout, stderr = client.exec_command("<command>") print stdout.readlines() print stderr.readlines() How can I improve the code quality? A: Commands are executed in a shell, so you can use regular shell syntax to combine them together. As a first step, I'd issue a batch of commands in a single exec_command: stdin, stdout, stderr = client.exec_command( "sudo hostname;" "sudo apt-get update;" "sudo apt-get -y install nfs-common nfs-kernel-server;" "echo y | sudo mkfs.ext3 /dev/sdc;" "sudo mkdir /mnt/export;" "sudo mount /dev/sdc /mnt/export/;" "sudo chmod o+wx /etc/exports;") print stdout.readlines() print stderr.readlines() In addition, I consider it unnecessary to start a fresh sudo for each of them. So I'd rather write stdin, stdout, stderr = client.exec_command( "sudo /bin/sh -c '" "hostname;" "apt-get update;" "apt-get -y install nfs-common nfs-kernel-server;" "echo y | mkfs.ext3 /dev/sdc;" "mkdir /mnt/export;" "mount /dev/sdc /mnt/export/;" "chmod o+wx /etc/exports;" "'") print stdout.readlines() print stderr.readlines() A: The first thing I would do is improve the readability of what is going on by adding some functions, as it stands now it's one big script with too many levels of indentation and redirection, which makes it difficult to read. To begin with, now that you have a command-line application, you should first ensure that this is how it's being run. You should also make filesystem selection and anything else that needs to be chosen at runtime a commandline switch. If the user enters something incorrect, print usage and exit. Maybe something more like this: if __name__ == '__main__': filesystem, other_thing = parse_args(sys.argv) config = read_config() if filesystem in valid_filesystems and valid_thing(other_thing): start_client(config, filesystem) start_server(whatever) else: print_usage() sys.exit(0) and then add your top level control flow to start_client / start server. You may also want to create a GenericClient class and a GenericServer class, which you inherit from and modify depending on the filesystem selected. In general, I'd read up a bit on Object Orientation in python. I've found one guide here but there may be some better ones that others can suggest.
How can I improve the quality of this python remote service code?
I wrote several small Python scripts to start clusters inside cloud infrastructures with distributed filesystems. Now, I integrated the code into a single command-line application but the code quality is very bad. http://code.google.com/p/diffuser/source/browse/trunk/diffuser.py The application needs to send lots of commands via SSH with paramiko. Each command that is send via SSH needs three lines of code: stdin, stdout, stderr = client.exec_command("<command>") print stdout.readlines() print stderr.readlines() How can I improve the code quality?
[ "Commands are executed in a shell, so you can use regular shell syntax to combine them together. As a first step, I'd issue a batch of commands in a single exec_command:\n stdin, stdout, stderr = client.exec_command(\n \"sudo hostname;\"\n \"sudo apt-get update;\"\n \"sudo apt-get -y ...
[ 2, 1 ]
[]
[]
[ "paramiko", "python" ]
stackoverflow_0004082699_paramiko_python.txt
Q: Python Twisted with callInThread I stripped down my app but this should give you an example of what i'm doing def run_app(f): p = Popen(['/usr/bin/app'],stdout=PIPE) while True: o = p.stdout.readline() if o == '' and p.poll() != None: break reactor.callFromThread(f, o) class Echo(Protocol): def connectionMade(self): reactor.callInThread(run_app, self.appDataReceived) def dataReceived(self, data): data = data.strip() if data == "getmore": print "getmore" def appDataReceived(self, u): print u def main(): factory = Factory() factory.protocol = Echo reactor.listenTCP(3646, factory) reactor.run() if __name__ == "__main__": main() I have an app which I want to connect in and run a app that continually spits out data to stdout. Right now my app works but the issue is when the client exits the socket connection the /usr/bin/app still continues to run. The more socket connections made the more this app is still running. Is there anyway from the Echo Procool to kill the run_app() function? A: Don't use threads and Popen. Use Twisted's process support. Also, your Echo protocol needs framing or there's no guarantee that it will ever receive the "getmore" string. A: There are few suggestions that I can make and hopefully it will resolve your issues. Don't use reactor.callFromThread, instead use deferToThread from twisted.internet.threads import deferToThread deferredObj = threads.deferToThread(run_app, self.appDataReceived) Just like you start the thread when the connection is made. You need to act when connection is lost. Example code: class Echo(Protocol): def connectionLost(self, reason): print reason # which is crude, there should be a more elegant answer reactor.stop() Agreed that deferToThread is optimized for short-running tasks. In fact, it is better to re do your code so that you could call the thread to just run the process and return the result.
Python Twisted with callInThread
I stripped down my app but this should give you an example of what i'm doing def run_app(f): p = Popen(['/usr/bin/app'],stdout=PIPE) while True: o = p.stdout.readline() if o == '' and p.poll() != None: break reactor.callFromThread(f, o) class Echo(Protocol): def connectionMade(self): reactor.callInThread(run_app, self.appDataReceived) def dataReceived(self, data): data = data.strip() if data == "getmore": print "getmore" def appDataReceived(self, u): print u def main(): factory = Factory() factory.protocol = Echo reactor.listenTCP(3646, factory) reactor.run() if __name__ == "__main__": main() I have an app which I want to connect in and run a app that continually spits out data to stdout. Right now my app works but the issue is when the client exits the socket connection the /usr/bin/app still continues to run. The more socket connections made the more this app is still running. Is there anyway from the Echo Procool to kill the run_app() function?
[ "Don't use threads and Popen. Use Twisted's process support. Also, your Echo protocol needs framing or there's no guarantee that it will ever receive the \"getmore\" string.\n", "There are few suggestions that I can make and hopefully it will resolve your issues.\nDon't use reactor.callFromThread, instead use d...
[ 3, 1 ]
[]
[]
[ "python", "twisted" ]
stackoverflow_0004081578_python_twisted.txt
Q: Proper location to set up MySQLdb connection in Pylons: app_globals? Thread safety? I want to use MySQLdb in Pylons, but can't figure out where to actually connect. It would seem that making the connection in app_globals.py would be convenient: class Globals(object): def __init__(self): self.db = MySQLdb.connect() Then controllers can access the db via the globals. However, this seems to lead to problems with 'MySQL has gone away' errors, and crashes with concurrent requests, so I'm thinking that this is bad because of thread safety. What is the best way to do this? Should each controller open and then close a MySQL connection? Thanks! A: Summarizing the comments on your question: you should probably use SQLAlchemy's non-ORM features such as the SQL Expression Language and the engine API, which each let you still get some of the benefits of SQLAlchemy without needing to go all the way up to ORM-ness.
Proper location to set up MySQLdb connection in Pylons: app_globals? Thread safety?
I want to use MySQLdb in Pylons, but can't figure out where to actually connect. It would seem that making the connection in app_globals.py would be convenient: class Globals(object): def __init__(self): self.db = MySQLdb.connect() Then controllers can access the db via the globals. However, this seems to lead to problems with 'MySQL has gone away' errors, and crashes with concurrent requests, so I'm thinking that this is bad because of thread safety. What is the best way to do this? Should each controller open and then close a MySQL connection? Thanks!
[ "Summarizing the comments on your question: you should probably use SQLAlchemy's non-ORM features such as the SQL Expression Language and the engine API, which each let you still get some of the benefits of SQLAlchemy without needing to go all the way up to ORM-ness.\n" ]
[ 1 ]
[]
[]
[ "mysql", "pylons", "python" ]
stackoverflow_0004040890_mysql_pylons_python.txt