title
stringlengths
10
172
question_id
int64
469
40.1M
question_body
stringlengths
22
48.2k
question_score
int64
-44
5.52k
question_date
stringlengths
20
20
answer_id
int64
497
40.1M
answer_body
stringlengths
18
33.9k
answer_score
int64
-38
8.38k
answer_date
stringlengths
20
20
tags
list
Crazy but minor issue in grid-based game
39,030,278
<p>This has been really bugging me. I'm creating a game where a hero moves on a grid and part of it involves a collision check. The collision check stops the hero from moving in a direction if something already occupies the grid there.</p> <p>It works for all directions except down. I can't see why?! It's the same structure for up, down, left and right. They all work, except down. I don't see it, can someone help?</p> <p>Here is the code:</p> <pre><code>import random as random import pygame as pygame pygame.init() clock = pygame.time.Clock() Screen = pygame.display.set_mode([650, 650]) Done = False MapSize = 25 TileWidth = 20 TileHeight = 20 TileMargin = 4 BLACK = (0, 0, 0) WHITE = (255, 255, 255) GREEN = (0, 255, 0) RED = (255, 0, 0) BLUE = (0, 0, 255) class MapTile(object): def __init__(self, Name, Column, Row): self.Name = Name self.Column = Column self.Row = Row class Character(object): def __init__(self, Name, HP, Column, Row): self.Name = Name self.HP = HP self.Column = Column self.Row = Row def Move(self, Direction): if Direction == "UP": if self.Row &gt; 0: if self.CollisionCheck("UP") == False: self.Row -= 1 if Direction == "LEFT": if self.Column &gt; 0: if self.CollisionCheck("LEFT") == False: self.Column -= 1 if Direction == "RIGHT": if self.Column &lt; MapSize-1: if self.CollisionCheck("RIGHT") == False: self.Column += 1 if Direction == "DOWN": if self.Row &lt; MapSize-1: if self.CollisionCheck("DOWN") == False: self.Row += 1 Map.update() def CollisionCheck(self, Direction): if Direction == "UP": if len(Map.Grid[self.Column][(self.Row)-1]) &gt; 1: return True if Direction == "LEFT": if len(Map.Grid[self.Column-1][(self.Row)]) &gt; 1: return True if Direction == "RIGHT": if len(Map.Grid[self.Column+1][(self.Row)]) &gt; 1: return True if Direction == "DOWN": if len(Map.Grid[self.Column][self.Row+1]) &gt; 1: return True else: return False def Location(self): print("Coordinates: " + str(self.Column) + ", " + str(self.Row)) class Map(object): global MapSize Grid = [] for Row in range(MapSize): # Creating grid Grid.append([]) for Column in range(MapSize): Grid[Row].append([]) for Row in range(MapSize): #Filling grid with grass for Column in range(MapSize): TempTile = MapTile("Grass", Column, Row) Grid[Column][Row].append(TempTile) for Row in range(MapSize): #Rocks for Column in range(MapSize): TempTile = MapTile("Rock", Column, Row) if Row == 1: Grid[Column][Row].append(TempTile) for i in range(10): #Random trees RandomRow = random.randint(0, MapSize - 1) RandomColumn = random.randint(0, MapSize - 1) TempTile = MapTile("Tree", RandomColumn, RandomRow) Grid[RandomColumn][RandomRow].append(TempTile) RandomRow = random.randint(0, MapSize - 1) RandomColumn = random.randint(0, MapSize - 1) Hero = Character("Hero", 10, RandomColumn, RandomRow) def update(self): for Column in range(MapSize): for Row in range(MapSize): for i in range(len(Map.Grid[Column][Row])): if Map.Grid[Column][Row][i].Column != Column: Map.Grid[Column][Row].remove(Map.Grid[Column][Row][i]) elif Map.Grid[Column][Row][i].Name == "Hero": Map.Grid[Column][Row].remove(Map.Grid[Column][Row][i]) Map.Grid[int(Map.Hero.Column)][int(Map.Hero.Row)].append(Map.Hero) Map = Map() while not Done: for event in pygame.event.get(): if event.type == pygame.QUIT: Done = True elif event.type == pygame.MOUSEBUTTONDOWN: Pos = pygame.mouse.get_pos() Column = Pos[0] // (TileWidth + TileMargin) Row = Pos[1] // (TileHeight + TileMargin) print(str(Row) + ", " + str(Column)) for i in range(len(Map.Grid[Column][Row])): print(str(Map.Grid[Column][Row][i].Name)) elif event.type == pygame.KEYDOWN: if event.key == pygame.K_LEFT: Map.Hero.Move("LEFT") if event.key == pygame.K_RIGHT: Map.Hero.Move("RIGHT") if event.key == pygame.K_UP: Map.Hero.Move("UP") if event.key == pygame.K_DOWN: Map.Hero.Move("DOWN") Screen.fill(BLACK) for Row in range(MapSize): # Drawing grid for Column in range(MapSize): for i in range(0, len(Map.Grid[Column][Row])): Color = WHITE if len(Map.Grid[Column][Row]) == 2: Color = RED if Map.Grid[Column][Row][i].Name == "Hero": Color = GREEN pygame.draw.rect(Screen, Color, [(TileMargin + TileWidth) * Column + TileMargin, (TileMargin + TileHeight) * Row + TileMargin, TileWidth, TileHeight]) clock.tick(60) pygame.display.flip() Map.update() pygame.quit() </code></pre>
-1
2016-08-19T02:34:36Z
39,049,865
<p>The problem in the <code>CollisionCheck()</code> method for the <code>if Direction == "DOWN":</code> case is that whenever the <code>len(Map.Grid[self.Column][self.Row+1])</code> is less than 2, the method returns <code>None</code> <em>not</em> the <code>False</code> value <code>Move()</code> explicitly checks for, so the <code>self.Row += 1</code> statement is never executed.</p> <p>This is because the <code>else:</code>, <code>return False</code> at the end of the method is only executed when a corresponding length test failed <strong><em>or</em></strong> <code>Direction</code> wasn't equal to any of the direction string values.</p> <p>The simplest fix is to change all but the first <code>if</code> into <code>elif</code>'s and remove the <code>else:</code> clause at the end:</p> <pre><code> def CollisionCheck(self, Direction): if Direction == "UP": if len(Map.Grid[self.Column][(self.Row)-1]) &gt; 1: return True elif Direction == "LEFT": if len(Map.Grid[self.Column-1][(self.Row)]) &gt; 1: return True elif Direction == "RIGHT": if len(Map.Grid[self.Column+1][(self.Row)]) &gt; 1: return True elif Direction == "DOWN": if len(Map.Grid[self.Column][self.Row+1]) &gt; 1: return True return False </code></pre> <p>Alternatively, it could be written a little more succinctly like this:</p> <pre><code> def CollisionCheck(self, Direction): column, row = self.Column, self.Row if Direction == "UP": objects = Map.Grid[column][row-1] elif Direction == "LEFT": objects = Map.Grid[column-1][row] elif Direction == "RIGHT": objects = Map.Grid[column+1][row] elif Direction == "DOWN": objects = Map.Grid[column][row+1] else: raise ValueError("Unknown direction in CollisionCheck") return len(objects) &gt; 1 # adjacent position has more than just grass? </code></pre> <p>BTW, you don't need the <code>global MapSize</code> in the <code>class Map</code> definition nor that call to <code>Map.update()</code> at the end of the <code>while not Done:</code> loop (because <code>Move()</code> does it).</p>
2
2016-08-20T02:20:55Z
[ "python", "python-3.x", "oop", "pygame" ]
How to delete the last item of a collection in mongodb
39,030,332
<p>I made a program with python and mongodb to do some diaries. Like this</p> <p><a href="http://i.stack.imgur.com/8othU.png" rel="nofollow"><img src="http://i.stack.imgur.com/8othU.png" alt="slice diary"></a></p> <p>Sometimes I want to delete the last sentence, just by typing "delete!" But I dont know how to delete in a samrt way. I dont want to use "skip". Is there a good way to do it? </p>
0
2016-08-19T02:42:58Z
39,030,354
<p>Be it first or last item, <code>MongoDB</code> maintains unique <code>_id</code> key for each record and thus you can just pass that id field in your delete query either using <code>deleteOne()</code> or <code>deleteMany()</code>. Since only one record to delete you need to use <code>deleteOne()</code> like</p> <pre><code>db.collection_name.deleteOne({"_id": "1234"}) // replace 1234 with actual id </code></pre>
0
2016-08-19T02:46:08Z
[ "python", "mongodb", "collections" ]
NumPy array element not getting updated
39,030,366
<p>I have a NumPy array as follows:</p> <pre><code>supp = np.array([['A', '5', '0'], ['B', '3', '0'], ['C', '4', '0'], ['D', '1', '0'], ['E', '2', '0']]) </code></pre> <p>Now, I want to update the row[2] as row[1]/6. I'm using..</p> <p><code>for row in supp: row[2] = row[1].astype(int) / 6</code></p> <p>But row[2] seems to remain unaffected..</p> <pre><code>&gt;&gt;&gt; supp array([['A', '5', '0'], ['B', '3', '0'], ['C', '4', '0'], ['D', '1', '0'], ['E', '2', '0']], dtype='&lt;U1') </code></pre> <p>I'm using Python 3.5.2 and NumPy 1.11.1.</p> <p>Any help is appreciated. Thanks in advance</p>
2
2016-08-19T02:47:06Z
39,030,429
<p>The problem is that an <code>np.array</code> has only one type which is automatically assumed to be strings <code>supp.dtype == '|S1'</code> since your input contains only strings of length <code>1</code>. So numpy will automatically convert your updated inputs to strings of length <code>1</code>, <code>'0'</code>s in your case. Force it to be of generic type <code>object</code> and then it will be able to have both strings and ints or floats or anything else:</p> <pre><code>supp = np.array([['A', '5', '0'], ['B', '3', '0'], ['C', '4', '0'], ['D', '1', '0'], ['E', '2', '0']]) supp = supp.astype(object) for row in supp: row[2] = int(row[1]) / 6 </code></pre> <p>result:</p> <pre><code>[['A' '5' 0.8333333333333334] ['B' '3' 0.5] ['C' '4' 0.6666666666666666] ['D' '1' 0.16666666666666666] ['E' '2' 0.3333333333333333]] </code></pre> <p>alternatively you can also use the <code>dtype</code> <code>'|Sn'</code> with larger value of <code>n</code>:</p> <pre><code>supp = np.array([['A', '5', '0'], ['B', '3', '0'], ['C', '4', '0'], ['D', '1', '0'], ['E', '2', '0']]) supp = supp.astype('|S5') for row in supp: row[2] = int(row[1]) / 6 </code></pre> <p>result: </p> <pre><code>[['A' '5' '0.833'] ['B' '3' '0.5'] ['C' '4' '0.666'] ['D' '1' '0.166'] ['E' '2' '0.333']] </code></pre> <p>and in this case you are still having only strings if that is what you want.</p>
4
2016-08-19T02:55:33Z
[ "python", "arrays", "python-3.x", "numpy" ]
Cython and numpy
39,030,546
<p>Try to run Example 7-11 of <strong>High Performance Python</strong></p> <p><strong>cython_np.pyx</strong></p> <pre><code>#cython_np.pyx import numpy as np cimport numpy as np def calculate_z(int maxiter, double complex[:] zs, double complex[:] cs): cdef unsigned int i, n cdef double complex z, c cdef int[:] output = np.empty(len(zs), dtype = np.int32) for i in range(len(zs)): n = 0 z = zs[i] c = cs[i] while n &lt; maxiter and (z.real * z.real + z.imag * z.imag) &lt; 4: z = z * z + c n += 1 output[i] = n return output </code></pre> <p><strong>setup.py</strong></p> <pre><code>from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext setup( cmdclass = {'build_ext':build_ext}, ext_modules = [Extension("calculate", ["cythonfn.pyx"])] ) </code></pre> <p>In the terminal , ubuntu 16.04</p> <pre><code>python3 setup.py build_ext --inplace </code></pre> <p>get some warning</p> <pre><code>running build_ext cythoning cythonfn.pyx to cythonfn.c building 'calculate' extension x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/include/python3.5m -c cythonfn.c -o build/temp.linux-x86_64-3.5/cythonfn.o In file included from /usr/include/python3.5m/numpy/ndarraytypes.h:1777:0, from /usr/include/python3.5m/numpy/ndarrayobject.h:18, from /usr/include/python3.5m/numpy/arrayobject.h:4, from cythonfn.c:274: /usr/include/python3.5m/numpy/npy_1_7_deprecated_api.h:15:2: warning: #warning "Using deprecated NumPy API, disable it by " "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" [-Wcpp] #warning "Using deprecated NumPy API, disable it by " \ ^ In file included from /usr/include/python3.5m/numpy/ndarrayobject.h:27:0, from /usr/include/python3.5m/numpy/arrayobject.h:4, from cythonfn.c:274: /usr/include/python3.5m/numpy/__multiarray_api.h:1448:1: warning: ‘_import_array’ defined but not used [-Wunused-function] _import_array(void) ^ x86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.5/cythonfn.o -o MY_DIR/calculate.cpython-35m-x86_64-linux-gnu.so </code></pre> <p>when I try to run use function <strong>calculate.calculate.z</strong> in Ipython, it says </p> <pre><code>TypeError: a bytes-like object is required, not 'list' </code></pre> <p><a href="http://i.stack.imgur.com/UQfFM.png" rel="nofollow">detail of using calculate.z</a></p> <p>Any idea about the warning?</p>
-1
2016-08-19T03:12:11Z
39,030,954
<p>I think, based on examples in <a href="http://docs.cython.org/en/latest/src/userguide/memoryviews.html" rel="nofollow">http://docs.cython.org/en/latest/src/userguide/memoryviews.html</a></p> <pre><code>double complex[:] zs </code></pre> <p>defines a 1d memoryview with <code>double complex</code> dtype. This is similar to a <code>numpy</code> array with that dtype.</p> <p>Your code passes a list to this function. Lists are Python objects, and can't be accessed as C or Cython native arrays.</p> <p>Does your source give any information on how to call this function? I don't have access to that book.</p>
0
2016-08-19T04:08:21Z
[ "python", "numpy", "cython" ]
How can I have my script run certain days / months
39,030,624
<p>I have just created a python script that automatically renews my pass. Is it possible to make the script run like a app or a background program to check if it is a certain day of a month and to run it?</p>
-2
2016-08-19T03:23:09Z
39,030,743
<p>That's just scheduled task.</p> <p>In your task call python.exe, pass your script as an argument and that's it.</p>
0
2016-08-19T03:39:08Z
[ "python", "selenium" ]
CUDA histogram2d not working
39,030,738
<p>Due to a seeming lack of a decent 2D histogram for CUDA (that I can find... pointers welcome), I'm trying to implement it myself with pyCUDA.</p> <p>Here's what the histogram should look like (using Numpy):</p> <p><a href="http://i.stack.imgur.com/ThdCb.png" rel="nofollow"><img src="http://i.stack.imgur.com/ThdCb.png" alt="Numpy Histogram"></a></p> <p>Here's what I've got so far:</p> <pre><code>code = ''' __global__ void histogram2d(const float *in_x, const float *in_y, const float *in_w, float *out) {{ int start = blockIdx.x * blockDim.x + threadIdx.x; float *block_out = &amp;out[{xres} * {yres} * {num_chans} * blockIdx.x]; for(int i = 0; i &lt; {length}; i++) {{ float x = in_x[start + i]; float y = in_y[start + i]; int w_idx = (start + i) * {num_chans}; int xbin = (int) (((x - {xmin}) / {xptp}) * {xres}); int ybin = (int) (((y - {ymin}) / {yptp}) * {yres}); if (0 &lt;= xbin &amp;&amp; xbin &lt; {xres} &amp;&amp; 0 &lt;= ybin &amp;&amp; ybin &lt; {yres}) {{ for(int c = 0; c &lt; {num_chans}; c++) {{ atomicAdd(&amp;block_out[(ybin * {xres} + xbin) * {num_chans} + c], in_w[w_idx + c]); }} }} }} }} '''.format(**args) ------ __global__ void histogram2d(const float *in_x, const float *in_y, const float *in_w, float *out) { int start = blockIdx.x * blockDim.x + threadIdx.x; float *block_out = &amp;out[50 * 50 * 4 * blockIdx.x]; for(int i = 0; i &lt; 100; i++) { float x = in_x[start + i]; float y = in_y[start + i]; int w_idx = (start + i) * 4; int xbin = (int) (((x - -10.0) / 20.0) * 50); int ybin = (int) (((y - -10.0) / 20.0) * 50); if (0 &lt;= xbin &amp;&amp; xbin &lt; 50 &amp;&amp; 0 &lt;= ybin &amp;&amp; ybin &lt; 50) { for(int c = 0; c &lt; 4; c++) { atomicAdd(&amp;block_out[(ybin * 50 + xbin) * 4 + c], in_w[w_idx + c]); } } } } </code></pre> <p><a href="http://i.stack.imgur.com/DMY29.png" rel="nofollow"><img src="http://i.stack.imgur.com/DMY29.png" alt="CUDA histogram"></a></p> <p>There seems to be a problem with the indexing, but I haven't done much pure CUDA before, so I can't tell what it is. Here's what I think the equivalent python would be:</p> <pre><code>def slow_hist(in_x, in_y, in_w, out, blockx, blockdimx, threadx): start = blockx * blockdimx + threadx block_out_addr = args['xres'] * args['yres'], args['num_chans'] * blockx for i in range(args['length']): x = in_x[start + i] y = in_y[start + i] w_idx = (start + i) * args['num_chans'] xbin = int(((x - args['xmin']) / args['xptp']) * args['xres']) ybin = int(((y - args['ymin']) / args['yptp']) * args['yres']) if 0 &lt;= xbin &lt; args['xres'] and 0 &lt;= ybin &lt; args['yres']: for c in range(args['num_chans']): out[(ybin * args['xres'] + xbin) * args['num_chans'] + c] += in_w[w_idx + c] </code></pre> <p><a href="http://i.stack.imgur.com/OLsuX.png" rel="nofollow"><img src="http://i.stack.imgur.com/OLsuX.png" alt="Pure-python histogram"></a></p> <p>All the code is viewable, including these images, <a href="https://github.com/scnerd/flames/blob/master/flames.ipynb" rel="nofollow">at the Github page of this notebook</a> (this cell is at the bottom).</p> <p>What am I doing wrong in this CUDA code? I've tried lots of little tweaks (striding the atomicAdd address by 1, 4, 8, 16, transposing outputs, etc.), but it seems like I'm missing something subtle, probably in how the pointer arithmetic is working. Any help would be appreciated.</p>
-1
2016-08-19T03:38:14Z
39,040,518
<p>The array being allocated for the output array for the CUDA section used Numpy's default float64 instead of float32, so memory was twice as large as expected. Here's the new histogram output:</p> <p><a href="http://i.stack.imgur.com/whUpm.png" rel="nofollow"><img src="http://i.stack.imgur.com/whUpm.png" alt="New CUDA histogram"></a></p> <p>I'd still greatly appreciate comments or answers that help explain why these histograms are all so different from each other.</p>
0
2016-08-19T13:39:35Z
[ "python", "numpy", "cuda", "pycuda", "histogram2d" ]
Accessing dataframes in different notebooks in google data lab
39,030,914
<p>I am using google cloud data lab, and I have several data frames in different notebooks. Is there any way to access all these data frames in one notebook? I use SQL and python for writing the codes.</p> <p>The data frames are the base tables for user engagement in an android app. These base tables are user data, experience data, and session data.</p> <p>Now, if I want to generate an analytics table that uses all these base tables( in different notebooks), then is there a way to use these tables in a different notebook</p>
1
2016-08-19T04:03:08Z
39,039,925
<p>To access functions/variables from other notebooks in Google Cloud Datalab, simply run the other notebooks from your current notebook using the line magic command <code>%run</code></p> <p>For example:</p> <ol> <li>Create a notebook called <code>'notebook_a.ipynb'</code></li> <li>In the notebook, enter the following in an empty cell: <code>variable_a = 5</code></li> <li>Save the notebook</li> <li>Open a different notebook in the same directory.</li> <li>Execute the following line magic command in a cell from the notebook in step 4.<code>%run 'notebook_a.ipynb'</code></li> <li>You should be able to see the value of <code>variable_a</code> which exists in another notebook.</li> </ol> <p>Adjust the path accordingly if the notebook is not the same directory with <code>%run &lt;relative path to notebook&gt;</code></p> <p>I posted a similar answer in a <a href="http://stackoverflow.com/questions/36814849/how-to-execute-a-python-notebook-inside-another-one-at-google-cloud-datalab/36818415#36818415">related Stack Overflow post</a>.</p> <p>Note: This does not work if you are running <a href="https://cloud.google.com/datalab/docs/quickstarts/quickstart-gce" rel="nofollow">Datalab on Google Cloud Platform</a>.</p>
1
2016-08-19T13:09:55Z
[ "python", "google-cloud-datalab" ]
Unpack a string into an expanded string
39,031,019
<p>I am given a string in the following format: <code>"a{1;4:6}"</code> and <code>"a{1;2}b{2:4}"</code> where the <code>;</code> represents two different numbers, and a <code>:</code> represents a sequence of numbers. There can be any number of combinations of semicolons and colons within the brace.</p> <p>I want to expand it such that these are the results of expanding the two examples above: </p> <ul> <li><code>"a{1;4:6}"</code> = "a1a4a5a6"</li> <li><code>"a{1;2}b{2:4}" = "a1b2b3b4a2b2b3b4"</code></li> </ul> <p>I've never had to deal with something like this before, since I am usually given strings in some sort of ready-made format which is easily parsable. In this case I have to parse the string manually. </p> <p>My attempt is to split the string manually, over and over again, until you hit a case where there is either a colon or a semicolon, then start building the string from there. This is horribly inefficient, and I would appreciate any thoughts on this approach. Here is essentially what the code looks like (I omitted a lot of it, just to get the point across more quickly):</p> <pre><code>&gt;&gt;&gt; s = "a{1;4:6}" &gt;&gt;&gt; splitted = s.split("}") &gt;&gt;&gt; splitted ['a{1;4:6', ''] &gt;&gt;&gt; splitted2 = [s.split("{") for s in splitted] &gt;&gt;&gt; splitted2 [['a', '1;4:6'], ['']] &gt;&gt;&gt; splitted3 = [s.split(";") for s in splitted2[0]] &gt;&gt;&gt; splitted3 [['a'], ['1', '4:6']] # ... etc, then build up the strings manually once the ranges are figured out. </code></pre> <p>The thinking behind splitting at the close brace at first is that it is guaranteed that a new identifier, with an associated range comes up after it. Where am I going wrong? My approach works for simple strings such as the first example, but it doesn't for the second example. Furthermore it is inefficient. I would be thankful for any input on this problem.</p>
1
2016-08-19T04:15:44Z
39,031,504
<pre><code>import re def expand(compressed): # 'b{2:4}' -&gt; 'b{2;3;4}' i.e. reduce the problem to just one syntax normalized = re.sub(r'(\d+):(\d+)', lambda m: ';'.join(map(str, range(int(m.group(1)), int(m.group(2)) + 1))), compressed) # 'a{1;2}b{2;3;4}' -&gt; ['a{1;2}', 'b{2;3;4}'] elements = re.findall(r'[a-z]\{[\d;]+\}', normalized) tokens = [] # ['a{1;2}', 'b{2;3;4}'] -&gt; [['a1', 'a2'], ['b2', 'b3', 'b4']] for element in elements: match = re.match(r'([a-z])\{([\d;]+)\}', element) alphanumerics = [] # match result already guaranteed by re.findall() for number in match.group(2).split(';'): alphanumerics.append(match.group(1) + number) tokens.append(alphanumerics) # [['a1', 'a2'], ['b2', 'b3', 'b4']] -&gt; 'a1b2b3b4a2b2b3b4' def pack_tokens(tokens): current, *rest = tokens if not rest: return ''.join(current) # base case return ''.join(token + pack_tokens(rest) for token in current) return pack_tokens(tokens) strings = ['a{1;4:6}', 'a{1;2}b{2:4}', 'a{1;2}b{2:4}c{3;6}'] for string in strings: print(string, '-&gt;', expand(string)) </code></pre> <p><strong>OUTPUT</strong></p> <pre><code>a{1;4:6} -&gt; a1a4a5a6 a{1;2}b{2:4} -&gt; a1b2b3b4a2b2b3b4 a{1;2}b{2:4}c{3;6} -&gt; a1b2c3c6b3c3c6b4c3c6a2b2c3c6b3c3c6b4c3c6 </code></pre>
4
2016-08-19T05:09:23Z
[ "python" ]
Unpack a string into an expanded string
39,031,019
<p>I am given a string in the following format: <code>"a{1;4:6}"</code> and <code>"a{1;2}b{2:4}"</code> where the <code>;</code> represents two different numbers, and a <code>:</code> represents a sequence of numbers. There can be any number of combinations of semicolons and colons within the brace.</p> <p>I want to expand it such that these are the results of expanding the two examples above: </p> <ul> <li><code>"a{1;4:6}"</code> = "a1a4a5a6"</li> <li><code>"a{1;2}b{2:4}" = "a1b2b3b4a2b2b3b4"</code></li> </ul> <p>I've never had to deal with something like this before, since I am usually given strings in some sort of ready-made format which is easily parsable. In this case I have to parse the string manually. </p> <p>My attempt is to split the string manually, over and over again, until you hit a case where there is either a colon or a semicolon, then start building the string from there. This is horribly inefficient, and I would appreciate any thoughts on this approach. Here is essentially what the code looks like (I omitted a lot of it, just to get the point across more quickly):</p> <pre><code>&gt;&gt;&gt; s = "a{1;4:6}" &gt;&gt;&gt; splitted = s.split("}") &gt;&gt;&gt; splitted ['a{1;4:6', ''] &gt;&gt;&gt; splitted2 = [s.split("{") for s in splitted] &gt;&gt;&gt; splitted2 [['a', '1;4:6'], ['']] &gt;&gt;&gt; splitted3 = [s.split(";") for s in splitted2[0]] &gt;&gt;&gt; splitted3 [['a'], ['1', '4:6']] # ... etc, then build up the strings manually once the ranges are figured out. </code></pre> <p>The thinking behind splitting at the close brace at first is that it is guaranteed that a new identifier, with an associated range comes up after it. Where am I going wrong? My approach works for simple strings such as the first example, but it doesn't for the second example. Furthermore it is inefficient. I would be thankful for any input on this problem.</p>
1
2016-08-19T04:15:44Z
39,032,210
<p>I tried pyparsing for that and IMHO it produced a pretty readable code (took pack_tokens from the previous answer).</p> <pre><code>from pyparsing import nums, Literal, Word, oneOf, Optional, OneOrMore, Group, delimitedList from string import ascii_lowercase as letters # transform a '123' to 123 number = Word(nums).setParseAction(lambda s, l, t: int(t[0])) # parses 234:543 ranges range_ = number + Literal(':').suppress() + number # transforms the range x:y to a list [x, x+1, ..., y] range_.setParseAction(lambda s, l, t: list(range(t[0], t[1]+1))) # parse the comma delimited list of ranges or individual numbers range_list = delimitedList(range_|number,",") # and pack them in a tuple range_list.setParseAction(lambda s, l, t: tuple(t)) # parses 'a{2,3,4:5}' group group = Word(letters, max=1) + Literal('{').suppress() + range_list + Literal('}').suppress() # transform the group parsed as ['a', [2, 4, 5]] to ['a2', 'a4' ...] group.setParseAction(lambda s, l, t: tuple("%s%d" % (t[0],num) for num in t[1])) # the full expression is just those group one after another expression = OneOrMore(group) def pack_tokens(s, l, tokens): current, *rest = tokens if not rest: return ''.join(current) # base case return ''.join(token + pack_tokens(s, l, rest) for token in current) expression.setParseAction(pack_tokens) parsed = expression.parseString('a{1,2,3}')[0] print(parsed) parsed = expression.parseString('a{1,3:7}b{1:5}')[0] print(parsed) </code></pre>
7
2016-08-19T06:12:41Z
[ "python" ]
Unpack a string into an expanded string
39,031,019
<p>I am given a string in the following format: <code>"a{1;4:6}"</code> and <code>"a{1;2}b{2:4}"</code> where the <code>;</code> represents two different numbers, and a <code>:</code> represents a sequence of numbers. There can be any number of combinations of semicolons and colons within the brace.</p> <p>I want to expand it such that these are the results of expanding the two examples above: </p> <ul> <li><code>"a{1;4:6}"</code> = "a1a4a5a6"</li> <li><code>"a{1;2}b{2:4}" = "a1b2b3b4a2b2b3b4"</code></li> </ul> <p>I've never had to deal with something like this before, since I am usually given strings in some sort of ready-made format which is easily parsable. In this case I have to parse the string manually. </p> <p>My attempt is to split the string manually, over and over again, until you hit a case where there is either a colon or a semicolon, then start building the string from there. This is horribly inefficient, and I would appreciate any thoughts on this approach. Here is essentially what the code looks like (I omitted a lot of it, just to get the point across more quickly):</p> <pre><code>&gt;&gt;&gt; s = "a{1;4:6}" &gt;&gt;&gt; splitted = s.split("}") &gt;&gt;&gt; splitted ['a{1;4:6', ''] &gt;&gt;&gt; splitted2 = [s.split("{") for s in splitted] &gt;&gt;&gt; splitted2 [['a', '1;4:6'], ['']] &gt;&gt;&gt; splitted3 = [s.split(";") for s in splitted2[0]] &gt;&gt;&gt; splitted3 [['a'], ['1', '4:6']] # ... etc, then build up the strings manually once the ranges are figured out. </code></pre> <p>The thinking behind splitting at the close brace at first is that it is guaranteed that a new identifier, with an associated range comes up after it. Where am I going wrong? My approach works for simple strings such as the first example, but it doesn't for the second example. Furthermore it is inefficient. I would be thankful for any input on this problem.</p>
1
2016-08-19T04:15:44Z
39,035,866
<p>Just to demonstrate a technique for doing this using <code>eval</code> (as @ialcuaz asked in the comments). Again I wouldn't recommend doing it this way, the other answers are more appropriate. This technique can be useful when the structure is more complex (i.e. recursive with brackets and so on) when you don't want a full blown parser.</p> <pre><code>import re import functools class Group(object): def __init__(self, prefix, items): self.groups = [[prefix + str(x) for x in items]] def __add__(self, other): self.groups.extend(other.groups) return self def __repr__(self): return self.pack_tokens(self.groups) # adapted for Python 2.7 from @cdlane's code def pack_tokens(self, tokens): current = tokens[:1][0] rest = tokens[1:] if not rest: return ''.join(current) return ''.join(token + self.pack_tokens(rest) for token in current) def createGroup(str, *items): return Group(str, items) def expand(compressed): # Replace a{...}b{...} with a{...} + b{...} as we will overload the '+' operator to help during the evaluation expr = re.sub(r'(\}\w+\{)', lambda m: '} + ' + m.group(1)[1:-1] + '{', compressed) # Expand : range to explicit list of items (from @cdlane's answer) expr = re.sub(r'(\d+):(\d+)', lambda m: ';'.join(map(str, range(int(m.group(1)), int(m.group(2)) + 1))), expr) # Convert a{x;y;..} to a(x,y, ...) so that it evaluates as a function expr = expr.replace('{', '(').replace('}', ')').replace(";", ",") # Extract the group prefixes ('a', 'b', ...) groupPrefixes = re.findall(ur'(\w+)\([\d,]+\)', expr) # Build a namespace mapping functions 'a', 'b', ... to createGroup() capturing the groupName prefix in the closure ns = {prefix: functools.partial(createGroup, prefix) for prefix in groupPrefixes} # Evaluate the expression using the namespace return eval(expr, ns) tests = ['a{1;4:6}', 'a{1;2}b{2:4}', 'a{1;2}b{2:4}c{3;6}'] for test in tests: print(test, '-&gt;', expand(test)) </code></pre> <p>Produces:</p> <pre><code>('a{1;4:6}', '-&gt;', a1a4a5a6) ('a{1;2}b{2:4}', '-&gt;', a1b2b3b4a2b2b3b4) ('a{1;2}b{2:4}c{3;6}', '-&gt;', a1b2c3c6b3c3c6b4c3c6a2b2c3c6b3c3c6b4c3c6) </code></pre>
2
2016-08-19T09:43:52Z
[ "python" ]
define a function like numpy transpose and inverse
39,031,127
<p>i would define a function like calculating of inverse <code>np.matrix.I</code>:</p> <pre><code>class matrix: def __init__(self, data): self.data = data self.I = 1.0/data def inv(self): return 1.0/self.data if __name__ == '__main__': m = matrix(10) m.I </code></pre> <p>it is possible to get calculated value of <code>m.I</code> without using <code>m.inv()</code> ?</p> <pre><code>In [3]: m.data = 20 In [4]: m.inv() Out[4]: 0.05 In [5]: m.I Out[5]: 0.1 </code></pre>
-1
2016-08-19T04:28:12Z
39,031,323
<p>You could do this with a <code>property</code>:</p> <pre><code>class matrix: def __init__(self, data): self.data = data @property def I(self): return 1.0/self.data if __name__ == '__main__': m = matrix(10) print m.I # 0.1 m.data = 20 print m.I # 0.05 </code></pre> <p>(As an aside, I called this class <code>matrix</code> as you did, but I'd prefer a different name since the inverse of a matrix is not <code>1./data</code>.)</p>
2
2016-08-19T04:49:27Z
[ "python", "numpy" ]
Error running socket.io and express-nodejs setup
39,031,156
<p>I was working on nodejs executing python scripts using spawn and socket.io methods. I am getting output on the console. but I am not able to display it on the browser. It is showing error.I have pasted the error below. Can any one please help me in solving this problem. I have got this example from <a href="http://stackoverflow.com/questions/25607799/node-jss-python-child-script-outputting-on-finish-not-real-time">this stackoverflow</a> Here i am pasting my code: <strong>sample.py</strong></p> <pre><code>import random, time for x in range(10): print(str(random.randint(23,28))+" C") time.sleep(random.uniform(0.4,5)) </code></pre> <p><strong>index.js</strong></p> <pre><code>var express = require("express"); var path = require('path'); var bodyParser = require('body-parser'); var fs = require('fs'); var spawn = require('child_process').spawn; var http = require('http').Server(app); var io = require('socket.io')(http); var app = express(); app.get('/', function(req, res){ res.sendFile(__dirname + '/index.html'); }); app.post('/showtemp',function(req,res){ var pathtoScript = spawn('python', ["../External_scripts/sample.py"]); pathtoScript.stdout.on('data', function (output) { var val = String(output); console.log(val); io.sockets.emit('response', { data: val}); }); }) var server = app.listen(8082,'0.0.0.0', function () { var port = server.address().port console.log("App is listening at %s", port) }); </code></pre> <p>And <strong>index.html</strong> page</p> <pre><code>&lt;!doctype html&gt; &lt;html&gt; &lt;head&gt; &lt;title&gt;Live temperature&lt;/title&gt; &lt;link rel="stylesheet" type="text/css" href="styles.css"&gt; &lt;/head&gt; &lt;body&gt; &lt;div id="liveTemp"&gt;Loading...&lt;/div&gt; &lt;script src="http://code.jquery.com/jquery-1.11.1.js"&gt;&lt;/script&gt; &lt;script src="https://cdn.socket.io/socket.io-1.4.5.js"&gt;&lt;/script&gt; &lt;script&gt; var socket = io(); socket.on('response',function(msg){ console.log("msg"); $('#liveTemp').html(msg.data); }) }); &lt;/script&gt; &lt;/body&gt; &lt;/html&gt; </code></pre> <p>I am getting this error </p> <pre><code>GET http://localhost:8082/socket.io/?EIO=3&amp;transport=polling&amp;t=LQSVrTN 404 (Not Found) GET http://localhost:8082/socket.io/?EIO=3&amp;transport=polling&amp;t=LQSVrTN 404 (Not Found) </code></pre>
0
2016-08-19T04:31:30Z
39,031,212
<p>You haven't started a web server anywhere or hooked socket.io to it. There are several different ways to do this, but here's one that works:</p> <pre><code>var express = require('express'); var app = express(); var server = require('http').Server(app); var io = require('socket.io')(server); server.listen(80); </code></pre> <p>Note: You must see a <code>server.listen()</code> (or equivalent) somewhere. <strong>And, you need to pass the server to socket.io so it can hook into it.</strong></p> <p>You can also do this where you don't directly need to load the <code>http</code> module yourself:</p> <pre><code>var express = require('express'); var app = express(); var server = app.listen(80); var io = require('socket.io')(server); </code></pre> <p>socket.io documentation for several different options <a href="http://socket.io/docs/" rel="nofollow">here</a>.</p>
2
2016-08-19T04:37:38Z
[ "javascript", "python", "node.js", "express", "socket.io" ]
Import single OBJ files into Maya, moving and rotating it with Python/MEL
39,031,168
<p>Hi I have this code to import OBJ files into maya with Python</p> <p>**</p> <p><div class="snippet" data-lang="js" data-hide="false" data-console="true" data-babel="false"> <div class="snippet-code"> <pre class="snippet-code-js lang-js prettyprint-override"><code>import maya.cmds as cmds pathOfFiles = "/Path/to/the/files/folder/" fileType = "obj" files = cmds.getFileList(folder=pathOfFiles, filespec='*.%s' % fileType) if len(files) == 0: cmds.warning("No files found") else: for f in files: cmds.file(pathOfFiles + f, i=True)</code></pre> </div> </div> </p> <p>**</p> <p>It imports all the obj files which are into that folder.</p> <p>However, what I need is:</p> <ol> <li>Import an individual OBJ file at once</li> <li>Move and rotate the imported file</li> <li>Apply a Material already created in Maya</li> <li>Render</li> <li>Delete the file 6 Repeat the process with the next file</li> </ol> <p>Is it possible to do it with Python or MEL</p>
0
2016-08-19T04:32:41Z
39,060,775
<p>This looks like a fun challenge, so here's my attempt at answering it:</p> <pre><code>import maya.cmds as cmds import glob #1. Import an individual OBJ file at once def importFile(i): cmds.file(i, i=True, groupReference=True, groupName="myobj") #2. Move and rotate the imported file def moveFile(): cmds.select("myobj") # Add the X,Y,Z cordinates to change scale, translate and rotate below cmds.scale(1,1,1) cmds.move(0,0,0) cmds.rotate(0,90,0) #3. Apply a Material already created in Maya def materialFile(): cmds.select("myobj") myMaterial = "lambert2" + "SG" #replace lambert2 with your material cmds.sets(forceElement=myMaterial) #4. Render def renderFile(i): cmds.setAttr("defaultRenderGlobals.imageFilePrefix", i, type="string") cmds.render(batch=True) #5. Delete the imported file def deleteFile(): cmds.select("myobj") cmds.delete() # Add the path to your obj files. Make sure to leave the /*.obj at the end myglob = glob.glob("/Users/OSX/Desktop/objs/*.obj") for i in myglob: importFile(i) moveFile() materialFile() renderFile(i) deleteFile() </code></pre> <p>Because you have a list of individual things you need the script to do I've divided up each requirement on your list into its own function. This should make the script more modular and hopefully easy to edit and reuse.</p> <p>Python works much better for this kind of task because MEL doesn't have functions, instead it has procedures which act like functions but don't work as well from what I've experienced.</p>
0
2016-08-21T04:11:59Z
[ "python", "import", "rendering", "maya" ]
what does "by = lambda x: lambda y: getattr(y, x)" mean?
39,031,255
<p>There is a dataFrame named "subset" and the codes are as follows. pd is the nickname of pandas. I can't figure out the meaning of <code>by = lambda x: lambda y: getattr(y, x)</code>.</p> <pre><code>pivot = pd.pivot_table(subset, values='count', rows=['date'], cols=['sample'], fill_value=0) by = lambda x: lambda y: getattr(y, x) grouped = pivot.groupby([by('year'),by('month')]).sum() </code></pre>
2
2016-08-19T04:42:01Z
39,031,305
<p><code>by = lambda x: lambda y: getattr(y, x)</code> is equivalent to the following:</p> <pre><code>def by(x): def getter(y): return getattr(y, x) return getter </code></pre> <p><code>getattr(a, b)</code> gets an attribute with the name <code>b</code> from an object named <code>a</code>.</p> <p>So <code>by('bar')</code> returns a function that returns the attribute <code>'bar'</code> from an object.</p> <p><code>by('bar')(foo)</code> means <code>getattr(foo, 'bar')</code> which is roughly <code>foo.bar</code>.</p> <p>If that doesn't help, let us know which part you're still having trouble with.</p>
2
2016-08-19T04:47:58Z
[ "python", "pandas", "lambda" ]
Unity - How to reduce time spent in Update and GC
39,031,315
<p>I have script in Unity that is exchanging data with another Python app. It has a while loop that listens for UDP messages as a background thread. Also the script is asking for new data every frame via the Update function.</p> <p>After I receive a message, the script parses it as a string and it needs to split the string by tabs in order to retrieve all the values. Currently, the string contains eyetracker and joystick data that Unity needs as player inputs.</p> <p>UDPController.cs</p> <pre><code>private void init() { // define address to send data to pythonEndPoint = new IPEndPoint(IPAddress.Parse(IP), pythonPort); unityEndPoint = new IPEndPoint (IPAddress.Parse (IP), unityPort); pythonSock = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp); //define client to receive data at client = new UdpClient(unityPort); client.Client.ReceiveTimeout = 1; client.Client.SendTimeout = 1; // start background thread to receive information receiveThread = new Thread(new ThreadStart(ReceiveData)); receiveThread.IsBackground = true; receiveThread.Start(); } void Update(){ if (Calibration.calibrationFinished &amp;&amp; startRequestNewFrame) { RequestData(); } } private void RequestData() { // Sends this to the UDP server written in Python SendString("NEWFRAME"); } // receive thread which listens for messages from Python UDP Server private void ReceiveData() { while (true) { try { if(client.Available &gt; 0) { double unixRecvTimeStamp = DataManager.ConvertToUnixTimestamp(DateTime.Now); byte[] data = client.Receive(ref pythonEndPoint); string rawtext = Encoding.UTF8.GetString(data); string[] msgs = rawtext.Split('\t'); string msgType = msgs[0]; double pythonSentTimeStamp = double.Parse(msgs[msgs.Length-1].Split(' ')[1]); DataManager.UdpRecvBuffer += '"' + rawtext + '"' + "\t" + pythonSentTimeStamp + "\t" + unixRecvTimeStamp + "\t" + DataManager.ConvertToUnixTimestamp(DateTime.Now) + "\n"; if (String.Equals(msgType, "FRAMEDATA")) { DataManager.gazeAdcsPos = new Vector2(float.Parse(msgs[1].Split(' ')[1]), float.Parse(msgs[2].Split(' ')[1])); float GazeTimeStamp = float.Parse(msgs[3].Split(' ')[1]); DataManager.rawJoy = new Vector2(float.Parse(msgs[4].Split(' ')[1]), 255 - float.Parse(msgs[5].Split(' ')[1])); float joyC = float.Parse(msgs[6].Split(' ')[1]); float ArduinoTimeStamp = float.Parse(msgs[7].Split(' ')[1]); } } } catch (Exception err) { print(err.ToString()); } } } </code></pre> <p>So according to the Unity Profiler, it seems like there is a huge amount of time spent in Behaviour Update, especially inside UDPController.Update() and GC.Collect. My initial hypothesis is that perhaps I'm creating too many strings and arrays overtime and the garbage collector kicks in quite often to remove the unused memory space. </p> <p><a href="http://i.stack.imgur.com/x69EM.png" rel="nofollow"><img src="http://i.stack.imgur.com/x69EM.png" alt="Unity Profiler - Showing high CPU Usage and Time in Behaviour Update"></a></p> <p>So my question is, is my hypothesis right? If so, how I can rewrite this code to increase my performance and reduce the drop in FPS and perceived lag. If not, where is the issue at because currently the game starts to lag right about 10 minutes in. </p> <p>Moreover, is there a better way or format for data transferring? It seems like I can be using objects like JSON, Google Protocol Buffer or msgpack or would that be an overkill?</p>
0
2016-08-19T04:48:56Z
39,035,947
<ol> <li><p>I can see a lot of local variables in your while loop (along with arrays). Local variables cause Garbage collector to run. You should declare all the variables outside of the method.</p></li> <li><p>Moreover, avoid string operations in <code>while/update()</code> as <a href="http://stackoverflow.com/a/4274202/4366237">strings are immutable</a>. Thus your code create a new copy to store the result after every concatenation. Use <a href="https://msdn.microsoft.com/en-us/library/system.text.stringbuilder(v=vs.110).aspx" rel="nofollow">StringBuilder</a> in these situations to avoid GC.</p></li> </ol> <p><a href="https://docs.unity3d.com/Manual/MobileOptimizationPracticalScriptingOptimizations.html" rel="nofollow">Read More</a></p>
0
2016-08-19T09:48:29Z
[ "c#", "python", "multithreading", "unity3d", "garbage-collection" ]
windows python stdout and stderr to two different files
39,031,332
<p>I would like to output the stdout and stderr to two different log files. i have tried this code but it only outputs the error to error log , but output is not redirected to runtime.log file. </p> <p>the code is being run on windows and mostly robocopy is done in the code. </p> <pre><code>saveerr = sys.stderr fsock = open('error.log', 'a') sys.stderr = fsock saveout = sys.stdout fsock1 = open('runtime.log', 'a') sys.stdout = fsock1 </code></pre> <p>the sys.stdout area is not working. please let me know any correction in this code. </p> <p>here is my entire code </p> <pre><code>import sys saveerr = sys.stderr fsock = open('error.log', 'a') sys.stderr = fsock saveout = sys.stdout fsock1 = open('runtime.log', 'a') sys.stdout = fsock1 ##For site AUCB-NET-01 from source folder AUDC-RSTOR-01 E:\Canberra exit_code1 = subprocess.call('robocopy \\\\aucb-net-01\\d$ \\\\nasaudc01\\remote_site_sync\\aucb-net-01 /E /MIR /W:2 /R:1', shell=True) print ("exitcoode1=", exit_code1) </code></pre> <p>thanks to everyone for reading my post. </p>
0
2016-08-19T04:49:59Z
39,031,459
<p>As mentioned in my comment your code should divert <strong>your</strong> stdout to the file. To get robocopy's stdout to go there too just echo each line to your stdout as shown in this link</p> <p><a href="http://stackoverflow.com/a/28319191/6550457">http://stackoverflow.com/a/28319191/6550457</a></p> <hr> <pre><code>from subprocess import Popen, PIPE import sys saveerr = sys.stderr fsock = open('error.log', 'a') sys.stderr = fsock saveout = sys.stdout fsock1 = open('runtime.log', 'a') sys.stdout = fsock1 cmd = 'robocopy \\\\aucb-net-01\\d$ \\\\nasaudc01\\remote_site_sync\\aucb-net-01 /E /MIR /W:2 /R:1' p = Popen(cmd, stdout=sys.stdout, stderror=sys.stderr, bufsize=1, universal_newlines=True) exit_code1 = p.wait() </code></pre> <p>see @eryksuns comments about robo copy and it's exit codes. <a href="http://ss64.com/nt/robocopy-exit.html" rel="nofollow">http://ss64.com/nt/robocopy-exit.html</a></p>
3
2016-08-19T05:05:34Z
[ "python", "windows", "cmd" ]
Spotify - get playlist track names
39,031,363
<p>on <code>search endpoint</code>, one can retrieve <code>playlist</code> data, for instance:</p> <pre><code>def search_playlist(): results = sp.search(q='doom metal', type='playlist') </code></pre> <p>if add this to the code:</p> <pre><code> items = results['playlists']['items'][0]['tracks'] print (items) </code></pre> <p>I get:</p> <p><code>{u'total': 349, u'href': u'https://api.spotify.com/v1/users/handiofiblood/playlists/71CdtOFANPpdboCh6e8lHr/tracks'}</code></p> <p>but how do I access <strong>track</strong> <strong>NAMES</strong>, given the fact that the <code>id</code> at this <code>endpoint</code> stands for the <code>playlist</code> and not the <code>tracks</code> themselves?</p> <p>many thanks on advance.</p>
0
2016-08-19T04:54:33Z
39,044,777
<p><a href="https://github.com/plamere/spotipy/blob/master/examples/user_playlists_contents.py" rel="nofollow">https://github.com/plamere/spotipy/blob/master/examples/user_playlists_contents.py</a></p> <pre><code>def show_tracks(results): for i, item in enumerate(tracks['items']): track = item['track'] print(" %d %32.32s %s" % (i, track['artists'][0]['name'], track['name'])) </code></pre> <p>and</p> <pre><code> playlists = sp.user_playlists(username) for playlist in playlists['items']: if playlist['owner']['id'] == username: print() print(playlist['name']) print(' total tracks', playlist['tracks']['total']) results = sp.user_playlist(username, playlist['id'], fields="tracks,next") tracks = results['tracks'] show_tracks(tracks) while tracks['next']: tracks = sp.next(tracks) show_tracks(tracks) </code></pre>
0
2016-08-19T17:32:57Z
[ "python", "spotify", "spotipy" ]
Selenium and Javascript
39,031,535
<p>I'm trying to learn how to use selenium. I'm trying to work on creating script to like instagram photos; however, i'm running into a problem where xpath won't detect the image i want to click. I think this is probably due to the fact it's a javascript button.</p> <p>This is a picture of the element i am inspecting. There's multiple pictures on the site and i am given the line </p> <pre><code>&lt;a class="thumb-shadow" href="javascript:void(0);"&gt;&lt;/a&gt; </code></pre> <p> <a href="https://gyazo.com/558df373e6ac426f098759665fd8f918" rel="nofollow">https://gyazo.com/558df373e6ac426f098759665fd8f918</a></p> <p>I've tried clicking the xpath of image wrapper, but it doesn't work either. How can i click the javascript item? Are there any resources you can point me to?</p> <p>Thanks</p>
0
2016-08-19T05:12:13Z
39,032,673
<p>Try <code>driver.find_element_by_xpath("//a[@class='thumb-shadow']/img").c‌​lick()</code></p>
0
2016-08-19T06:45:30Z
[ "python", "selenium" ]
I am thinking Web Crawler but how to start?
39,031,548
<p>I am working at a company that deals with phish and fake Facebook accounts. I want to show my dedication to the "mission". We are unable to passively monitor facebook pages for when they are removed. I am thinking a web crawler but I am curious on how to design one that constant checks a specific link to see if the Facebook page is still active or not? I hope this made sense? </p>
-1
2016-08-19T05:14:35Z
39,031,836
<p>Yes! You can use crawling. However, if you want it to be as fast as possible, crawling may not be the best way to do it. If you're interested this is how I'd do it using HTTPConnection. also, unfortunately, the link has to be <em>completely</em> broken.</p> <p>If you need more information then you will most likely have to use an API or web crawler to check if the link is broken(Thus meaning it has to link to nowhere),</p> <pre><code>from http.client import HTTPConnection # Importing HTTPConnection from http.client. conn = HTTPConnection('www.google.com') # Connecting to 'google.com' conn.request('HEAD', '/index.html') # Request data. res = conn.getresponse() # Now we get the data sent back. print(res.status, res.reason) # Finally print it. </code></pre> <p>If it returns '302 Found' then it should be an active web page, I hope this helps! Please tell me if this isn't what you wanted. :)</p> <p>Thanks,</p> <p>~Coolq</p>
1
2016-08-19T05:42:57Z
[ "python", "facebook" ]
I am thinking Web Crawler but how to start?
39,031,548
<p>I am working at a company that deals with phish and fake Facebook accounts. I want to show my dedication to the "mission". We are unable to passively monitor facebook pages for when they are removed. I am thinking a web crawler but I am curious on how to design one that constant checks a specific link to see if the Facebook page is still active or not? I hope this made sense? </p>
-1
2016-08-19T05:14:35Z
39,032,196
<p>You can send a http request to tell the account is active or not by it's response status, Python has some standard library, you may have a look at <a href="https://docs.python.org/3/library/internet.html" rel="nofollow">Internet Protocols and Support</a>. Personally, I will recommend use requests:</p> <pre><code>import requests response = requests.get("http://facebook.com/account") if response.status_code == 302: # or 404 # the page is missing </code></pre> <p>If you really care about speed or performance, you should use multiprocessing or asynchronous i/o (like gevent) in Python.</p> <p>If you are focus on crawl,you may have a look at <a href="http://doc.scrapy.org/en/latest/intro/overview.html" rel="nofollow">Scrapy</a></p> <blockquote> <p>Here you notice one of the main advantages about Scrapy: requests are scheduled and processed asynchronously. This means that Scrapy doesn’t need to wait for a request to be finished and processed, it can send another request or do other things in the meantime. This also means that other requests can keep going even if some request fails or an error happens while handling it.</p> </blockquote>
0
2016-08-19T06:11:53Z
[ "python", "facebook" ]
I am thinking Web Crawler but how to start?
39,031,548
<p>I am working at a company that deals with phish and fake Facebook accounts. I want to show my dedication to the "mission". We are unable to passively monitor facebook pages for when they are removed. I am thinking a web crawler but I am curious on how to design one that constant checks a specific link to see if the Facebook page is still active or not? I hope this made sense? </p>
-1
2016-08-19T05:14:35Z
39,032,243
<p><a href="https://www.quora.com/How-can-I-build-a-web-crawler-from-scratch/answer/Raghavendran-Balu" rel="nofollow">https://www.quora.com/How-can-I-build-a-web-crawler-from-scratch/answer/Raghavendran-Balu</a> One of the best articles I have read about Crawlers.</p> <p>A web crawler might sound like a simple fetch-parse-append system, but watch out! you may over look the complexity. I might deviate from the question intent by focussing more on architecture than implementation specifics.I believe it is necessary because, to build a web scale crawler, the architecture of the crawler is more important than the choice of language/ framework.</p> <p><strong>Architecture:</strong></p> <p>A bare minimum crawler needs at least these components:</p> <ul> <li><p>HTTP Fetcher : To retrieve web page from the server.</p></li> <li><p>Extractor: Minimal support to extract URL from page like anchor links.</p></li> <li><p>Duplicate Eliminator: To make sure same content is not extracted twice unintentionally. Consider it as a set based data structure.</p></li> <li><p>URL Frontier: To prioritize URL that has to fetched and parsed. Consider it as a priority queue</p></li> <li><p>Datastore: To store retrieve pages and URL and other meta data.</p></li> </ul> <p><strong>A good starting point to learn about architecture is:</strong></p> <ol> <li><a href="http://dl.acm.org/citation.cfm?id=1734789" rel="nofollow">Web Crawling</a> </li> <li><a href="http://link.springer.com/chapter/10.1007/978-3-662-10874-1_7" rel="nofollow">Crawling the Web</a></li> <li><a href="http://dl.acm.org/citation.cfm?id=598684.598733" rel="nofollow">Mercator: A scalable, extensible Web crawler</a></li> <li><a href="http://dl.acm.org/citation.cfm?id=1045969" rel="nofollow">UbiCrawler: a scalable fully distributed web crawler</a></li> <li><a href="http://dl.acm.org/citation.cfm?id=1541823" rel="nofollow">IRLbot: Scaling to 6 billion pages and beyond</a></li> <li>(single-sever crawler) and <a href="http://dl.acm.org/citation.cfm?id=2127045.2127065" rel="nofollow">MultiCrawler: a pipelined architecture</a></li> </ol> <p><strong>Programming Language</strong>: Any high level language with good network library that you are comfortable with is fine. I personally prefer Python/Java. As your crawler project might grow in terms of code size it will be hard to manage if you develop in a design-restricted programming language. While it is possible to build a crawler using just unix commands and shell script, you might not want to do so for obvious reasons.</p> <p><strong>Framework/Libraries</strong>: Many frameworks are already suggested in other answers. I shall summarise here:</p> <ol> <li>Apache Nutch and Heritrix (Java): Mature, Large scale, configurable</li> <li><p>Scrapy (Python): Technically a scraper but can be used to build a crawler.</p> <p>You can also visit <a href="https://github.com/scrapinghub/distributed-frontera" rel="nofollow">https://github.com/scrapinghub/distributed-frontera</a> - URL frontier and data storage for Scrapy, allowing you to run large scale crawls.</p></li> <li><a href="https://www.node.io/" rel="nofollow">node.io</a> (Javascript): Scraper. Nascent, but worth considering, if you are ready to live with javascript.</li> </ol> <p>For Python: Refer <a href="http://www-rohan.sdsu.edu/~gawron/python_for_ss/course_core/book_draft/web/web_intro.html" rel="nofollow">Introduction to web-crawling in Python</a></p> <p>Code in Python: <a href="https://www.quora.com/How-can-I-build-a-web-crawler-from-scratch/answer/Rishi-Giri-1" rel="nofollow">https://www.quora.com/How-can-I-build-a-web-crawler-from-scratch/answer/Rishi-Giri-1</a></p> <p><strong>Suggestions for scalable distributed crawling</strong>:</p> <ol> <li>It is better to go for a asynchronous model, given the nature of the problem.</li> <li>Choose a distributed data base for data storage ex. Hbase.</li> <li>A distributed data structure like redis is also worth considering for URL frontier and duplicate detector.</li> </ol> <p>For more Information visit: <a href="https://www.quora.com/How-can-I-build-a-web-crawler-from-scratch" rel="nofollow">https://www.quora.com/How-can-I-build-a-web-crawler-from-scratch</a> </p> <p><strong>References</strong>:</p> <ol> <li><p>Olston, C., &amp; Najork, M. (2010). Web crawling. Foundations and Trends in Information Retrieval, 4(3), 175-246.</p></li> <li><p>Pant, G., Srinivasan, P., &amp; Menczer, F. (2004). Crawling the web. In Web Dynamics (pp. 153-177). Springer Berlin Heidelberg.</p></li> <li><p>Heydon, A., &amp; Najork, M. (1999). Mercator: A scalable, extensible web crawler.World Wide Web, 2(4), 219-229.</p></li> <li><p>Boldi, P., Codenotti, B., Santini, M., &amp; Vigna, S. (2004). Ubicrawler: A scalable fully distributed web crawler. Software: Practice and Experience, 34(8), 711-726.</p></li> <li><p>Lee, H. T., Leonard, D., Wang, X., &amp; Loguinov, D. (2009). IRLbot: scaling to 6 billion pages and beyond. ACM Transactions on the Web (TWEB), 3(3), 8.</p></li> <li><p>Harth, A., Umbrich, J., &amp; Decker, S. (2006). Multicrawler: A pipelined architecture for crawling and indexing semantic web data. In The Semantic Web-ISWC 2006 (pp. 258-271). Springer Berlin Heidelberg.</p></li> </ol>
0
2016-08-19T06:15:19Z
[ "python", "facebook" ]
How do I change the parsing order of the rules?
39,031,596
<p><a href="https://github.com/EricsonWillians/Lilith" rel="nofollow">I'm developing a programming language</a>, and I'm having trouble dealing with condition statements. Here's the code in my language:</p> <pre><code>x = 4 -&gt; ? 2 &gt; 5 &lt;? x = 7 -&gt; ?&gt; -&gt; [o] &lt;- x -&gt; </code></pre> <p>Here's the specific part of the grammar that defines condition statements:</p> <pre><code>post_condition_evaluation_block : post_condition_evaluation_block_opening_operator compound_statement post_condition_evaluation_block_closing_operator condition_statement : condition_specification_operator expression post_condition_evaluation_block | condition_specification_operator expression post_condition_evaluation_block condition_extension </code></pre> <p>There's nothing actually wrong with the grammar, the code runs normally. The problem is that the expression <code>2 &gt; 5</code> gets evaluated <strong>after</strong> the following statement <code>x = 7</code>, so what gets printed is 7 instead of 4 (Which is wrong, since the expression evaluates to false). I'm dealing with this problem counting the condition blocks:</p> <pre><code>condition_blocks = {0: True} current_condition_block = 0 </code></pre> <p>And then when it comes to the condition statement:</p> <pre><code>def p_condition_statement(p): """condition_statement : condition_specification_operator expression post_condition_evaluation_block | condition_specification_operator expression post_condition_evaluation_block condition_extension""" global current_condition_block current_condition_block += 1 condition_blocks[current_condition_block] = p[2] print(condition_blocks) </code></pre> <p>It adds the value of False (p<a href="http://www.dabeaz.com/ply/ply.html" rel="nofollow">2</a>) of the expression to the corresponding block index in the dictionary. The problem is that when I get to do the assignment:</p> <pre><code>def p_assignment(p): """assignment : identifier assignment_operator expression""" if len(p) == 4 and condition_blocks[current_condition_block]: if p[2] == '=': identifiers[p[1]] = parse_object(p[3]) elif p[2] == "+=": identifiers[p[1]] += parse_object(p[3]) elif p[2] == "-=": identifiers[p[1]] -= parse_object(p[3]) elif p[2] == "*=": identifiers[p[1]] *= parse_object(p[3]) elif p[2] == "/=": identifiers[p[1]] /= parse_object(p[3]) p[0] = (p[1], p[2], p[3]) </code></pre> <p>The block that gets evaluated is the default "out-of-block-scope" one. The assignment rule gets parsed / processed <strong>before</strong> the expression, which makes no sense in my head, since the whole code should be processed from the beginning to the end.</p> <p>I'm obviously no expert in parsing / YACC, it's my first attempt and I find absolutely no hint of what to do <a href="http://www.dabeaz.com/ply/ply.html" rel="nofollow">in the docs</a>. I don't know how to stop the parser, to skip the parser, to change the parsing order... Perhaps the problem is in my grammar, but I can't see how to change the parsing order.</p>
4
2016-08-19T05:18:38Z
39,032,555
<p>When a production is <em>reduced</em> -- that is, its semantic action is executed -- all of the non-terminals referenced have already been reduced. In other words, children are always reduced before their parents, which is why yacc is called a bottom-up parser.</p> <p>The only practical way to achieve more flexibility is to build an AST during the parse, and then evaluate (or whatever) the complete expression afterwards by doing a treewalk over the AST. You are then free to walk the complete tree in whatever order(s) seem appropriate.</p>
1
2016-08-19T06:37:29Z
[ "python", "parsing", "yacc", "ply" ]
Is there a better way to work with result from JSON and check whether it's blank or not
39,031,617
<p>I am trying to use the PostCodeAPI (<a href="http://postcodeapi.com.au/" rel="nofollow">http://postcodeapi.com.au/</a>) to get Australian postal information based on postcode. </p> <p>Below would work successfully if a correct postcode is entered in the url (such as 2601.json). </p> <p>I have used try-except to catch HTTPError, and this is working successfully too. </p> <p>However, I am just wondering if there's a better way to determine whether <strong>data</strong> has value or not, than using a if-statement in my example.</p> <pre><code>import urllib2 import json data = [] url = 'http://v0.postcodeapi.com.au/suburbs/26xx.json' header = {'User-Agent' : 'ubuntu Browser'} req = urllib2.Request(url, headers=header) opener = urllib2.build_opener() try: data = json.loads(opener.open(req).read()) except urllib2.HTTPError, e: print "Request doesn't generate anything." print data if data == []: print "no data" else: print data[0]['name'] </code></pre> <p>Following @mgilson's suggestions, here is the updated version, with changes around (1) date = None and (2) if data</p> <pre><code>import urllib2 import json data = None url = 'http://v0.postcodeapi.com.au/suburbs/26xx.json' header = {'User-Agent' : 'ubuntu Browser'} req = urllib2.Request(url, headers=header) opener = urllib2.build_opener() try: data = json.loads(opener.open(req).read()) except urllib2.HTTPError, e: print "Request doesn't generate anything." print data if data: print data[0]['name'] else: print "no data" </code></pre>
0
2016-08-19T05:21:07Z
39,031,985
<p>For me if is also required .Still urllib2 is old requests is much simpler for rest api's and web requests.</p> <blockquote> <p>class NoDataException(Exception):<br></p> <blockquote> <p>print("No Data Received ")</p> </blockquote> <p>import requests</p> <p>url='<a href="http://v0.postcodeapi.com.au/suburbs/26xx.json" rel="nofollow">http://v0.postcodeapi.com.au/suburbs/26xx.json</a>'<br/> header = {'User-Agent' : 'ubuntu Browser'}<br/></p> <p>try:</p> <blockquote> <p>r=requests.get(url,header)<br/> if(r.json() is null):<br/> raise NoDataException<br/> # raise ReadTimeout(inbuilt exception for class not sure if works correctly)<br/> print(r.json())<br/> except NoDataException:<br/> pass;<br/><br></p> </blockquote> <p>except Exception as e:<br/></p> <blockquote> <p>print("Error Occurred ")<br/></p> </blockquote> </blockquote>
0
2016-08-19T05:54:32Z
[ "python", "json" ]
Is there a better way to work with result from JSON and check whether it's blank or not
39,031,617
<p>I am trying to use the PostCodeAPI (<a href="http://postcodeapi.com.au/" rel="nofollow">http://postcodeapi.com.au/</a>) to get Australian postal information based on postcode. </p> <p>Below would work successfully if a correct postcode is entered in the url (such as 2601.json). </p> <p>I have used try-except to catch HTTPError, and this is working successfully too. </p> <p>However, I am just wondering if there's a better way to determine whether <strong>data</strong> has value or not, than using a if-statement in my example.</p> <pre><code>import urllib2 import json data = [] url = 'http://v0.postcodeapi.com.au/suburbs/26xx.json' header = {'User-Agent' : 'ubuntu Browser'} req = urllib2.Request(url, headers=header) opener = urllib2.build_opener() try: data = json.loads(opener.open(req).read()) except urllib2.HTTPError, e: print "Request doesn't generate anything." print data if data == []: print "no data" else: print data[0]['name'] </code></pre> <p>Following @mgilson's suggestions, here is the updated version, with changes around (1) date = None and (2) if data</p> <pre><code>import urllib2 import json data = None url = 'http://v0.postcodeapi.com.au/suburbs/26xx.json' header = {'User-Agent' : 'ubuntu Browser'} req = urllib2.Request(url, headers=header) opener = urllib2.build_opener() try: data = json.loads(opener.open(req).read()) except urllib2.HTTPError, e: print "Request doesn't generate anything." print data if data: print data[0]['name'] else: print "no data" </code></pre>
0
2016-08-19T05:21:07Z
39,032,062
<p>Unless you absolutely need to use <code>urllib2</code>, I'd suggest using the <a href="http://docs.python-requests.org/en/master/%20requests" rel="nofollow">requests</a> library. It's a more user-friendly HTTP client and does not raise an HTTPError when encountering an HTTP 404. If a 404 is encountered, you shouldn't even bother trying to parse the body as JSON.</p> <pre><code>import json, requests url = 'http://v0.postcodeapi.com.au/suburbs/22xx.json' headers = {'Accept': 'application/json'} response = requests.get(url, headers=headers) data = [] # set a default value, if you want if response.status_code == 200: data = json.loads(response.text) print data[0]['name'] else: print "no data" </code></pre>
0
2016-08-19T06:01:47Z
[ "python", "json" ]
Is there a better way to work with result from JSON and check whether it's blank or not
39,031,617
<p>I am trying to use the PostCodeAPI (<a href="http://postcodeapi.com.au/" rel="nofollow">http://postcodeapi.com.au/</a>) to get Australian postal information based on postcode. </p> <p>Below would work successfully if a correct postcode is entered in the url (such as 2601.json). </p> <p>I have used try-except to catch HTTPError, and this is working successfully too. </p> <p>However, I am just wondering if there's a better way to determine whether <strong>data</strong> has value or not, than using a if-statement in my example.</p> <pre><code>import urllib2 import json data = [] url = 'http://v0.postcodeapi.com.au/suburbs/26xx.json' header = {'User-Agent' : 'ubuntu Browser'} req = urllib2.Request(url, headers=header) opener = urllib2.build_opener() try: data = json.loads(opener.open(req).read()) except urllib2.HTTPError, e: print "Request doesn't generate anything." print data if data == []: print "no data" else: print data[0]['name'] </code></pre> <p>Following @mgilson's suggestions, here is the updated version, with changes around (1) date = None and (2) if data</p> <pre><code>import urllib2 import json data = None url = 'http://v0.postcodeapi.com.au/suburbs/26xx.json' header = {'User-Agent' : 'ubuntu Browser'} req = urllib2.Request(url, headers=header) opener = urllib2.build_opener() try: data = json.loads(opener.open(req).read()) except urllib2.HTTPError, e: print "Request doesn't generate anything." print data if data: print data[0]['name'] else: print "no data" </code></pre>
0
2016-08-19T05:21:07Z
39,032,431
<p>The simplest way I can think of, is this:</p> <pre><code>import requests import json.decoder.JSONDecodeError try: data = requests.get(url, headers=headers).json() except json.decode.DecodeError: print("No data") .. # rest of your code </code></pre> <p>This is similar to your code's <code>try/except</code> statement, except that I feel it's a bit more explicit.</p> <p>Also, <code>try/except</code> should be preferred if the majority of calls of the <code>try</code> statement succeeds. </p>
0
2016-08-19T06:28:28Z
[ "python", "json" ]
Detecting 'Fold' of a webpage without loading it in a browser
39,031,809
<p>Is it possible to detect <code>Fold</code> of a webpage without actually loading it in Web Browser ?</p> <p>Say I have got 5 URLs, and I wish to know the fold position for these 5 URLs without loading them. More like a batch operation. Can this be done using <code>Java/Javascript/Python ?</code></p>
0
2016-08-19T05:40:05Z
39,032,500
<p>A 'Fold' is more or less a design principle from back in the days, when responsive wasn't a thing yet. Nowadays I wouldn't care too much about the 'fold', but ask yourself the question: 'why bother about the fold?'.</p> <p>A great article you (or your designer) should read: <a href="http://iampaddy.com/lifebelow600/" rel="nofollow">http://iampaddy.com/lifebelow600/</a></p> <p><em>Having said that...</em></p> <p>The 'fold' is relative to the browsers window height. On a mobile device, the fold can be as small is 320px, whereas full screen on a 27" screen the fold can be somewhere around 1400px.</p> <p>If your question is: 'How can I detect the Y-position of a particular element on a given resolution without loading it in a browser?', you should reconsider the term 'browser'. The browser is the part that renders your HTML/CSS to some visual eye candy. Most browsers you now render direct to the screen (Chrome, Safari, Firefox, etc).</p> <p>However... there is also such a thing as a 'headless' browser. These are browsers that don't render to the screen, but do provide functionality to interact with the page (as in: take screenshots and stuff). In that case I would suggest you take a look at <a href="http://phantomjs.org/" rel="nofollow">Phantom JS</a> or <a href="http://casperjs.org/" rel="nofollow">Casper JS</a>. Those are great packages that allow you to load one or more pages by script and take a screenshot of it, or you can read out the top offset of a DOM element by plain and simple JavaScript.</p> <p>Phantom JS example:</p> <pre><code>var page = require('webpage').create(); var url = 'http://phantomjs.org/'; page.open(url, function (status) { //Page is loaded! // Take screenshot: page.render('screenshot.png'); // Get offsetTop of the footer in the page: var offset = page.evaluate(function() { return document.querySelector('.footer').offsetTop; }); console.log('Footer offset = ' + offset); phantom.exit(); }); </code></pre> <p>Hope this helps you somewhat.</p>
0
2016-08-19T06:33:45Z
[ "javascript", "java", "python", "html" ]
I can't decode encoded text using python
39,032,019
<pre><code>from base64 import b64decode uio = input("Please enter the text you want to decode...") pu = b64decode(uio.decode()) print("Decode text : ") print(pu) </code></pre> <p>It tells me this : -</p> <pre><code>pu = b64decode(uio.decode()) AttributeError: 'str' object has no attribute 'decode' </code></pre> <p>Any help is appreciated...</p>
-2
2016-08-19T05:57:38Z
39,032,057
<p>You're trying to <code>decode()</code> a string object. That does not work. simply remove that and it should not raise an error anymore, like so:</p> <pre><code>pu = b64decode(uio) </code></pre> <p>If you're not certain there's always a string object do this:</p> <pre><code>try: pu = b64decode(uio.decode('utf-8')) except AttributeError: pu = b64decode(uio) </code></pre> <p>If you're keen on performance, note that <code>try/except</code> statements are preferred if the statement in <code>try</code> more often than not succeeds. If it doesn't, either replace it using an <code>if/else</code> statement, or simply swap the statements in <code>try/except</code> - but make sure it's catching a specific exception.</p> <p>Also note that in python3, <code>b64decode()</code> takes a <code>byte-like</code> object, instead of a string. Hence, the above code would have to be this instead:</p> <pre><code>try: pu = b64decode(uio.encode('utf-8')) # Note the use of encode() except AttributeError: pu = b64decode(uio) </code></pre>
1
2016-08-19T06:01:31Z
[ "python", "decode" ]
I can't decode encoded text using python
39,032,019
<pre><code>from base64 import b64decode uio = input("Please enter the text you want to decode...") pu = b64decode(uio.decode()) print("Decode text : ") print(pu) </code></pre> <p>It tells me this : -</p> <pre><code>pu = b64decode(uio.decode()) AttributeError: 'str' object has no attribute 'decode' </code></pre> <p>Any help is appreciated...</p>
-2
2016-08-19T05:57:38Z
39,032,096
<p>In Python 3, all strings are unicode. Thus, decode is not necessary. (Also, you are supposed to specify an encoding anyways :) ). Example:</p> <pre><code>pu = b64decode(uio.decode("utf-8")) </code></pre>
-1
2016-08-19T06:04:31Z
[ "python", "decode" ]
Generating XML file with proper indentation
39,032,046
<p>I am trying to generate the XML file in python but its not getting indented the out put is coming in straight line.</p> <pre><code>from xml.etree.ElementTree import Element, SubElement, Comment, tostring name = str(request.POST.get('name')) top = Element('scenario') environment = SubElement(top, 'environment') cluster = SubElement(top, 'cluster') cluster.text=name </code></pre> <p>I tried to use pretty parser but its giving me an error as: 'Element' object has no attribute 'read'</p> <pre><code>import xml.dom.minidom xml_p = xml.dom.minidom.parse(top) pretty_xml = xml_p.toprettyxml() </code></pre> <p>Is the input given to parser is proper format ? if this is wrong method please suggest another way to indent.</p>
1
2016-08-19T06:00:19Z
39,032,366
<p>You cannot directly parse <code>top</code> which is an <code>Element()</code>, you need to make that into a string (which is why you should import <code>tostring</code> that you are currently not using), and use <code>xml.dom.minidom.parseString()</code> on the result:</p> <pre><code>import xml.dom.minidom xml_p = xml.dom.minidom.parseString(tostring(top)) pretty_xml = xml_p.toprettyxml() print(pretty_xml) </code></pre> <p>that gives:</p> <pre><code>&lt;?xml version="1.0" ?&gt; &lt;scenario&gt; &lt;environment/&gt; &lt;cluster&gt;xyz&lt;/cluster&gt; &lt;/scenario&gt; </code></pre>
1
2016-08-19T06:24:20Z
[ "python", "xml", "xml-parsing", "xml.etree" ]
How to get cut a few value in to different col in python dataframe?
39,032,119
<p>I have a data frame like following:</p> <pre><code> pop state year value1 value2 value3 0 1.8 Ohio 2000001 3 3 1 1 1.9 Ohio 2001001 3 3 1 2 3.9 Nevada 2002 3 3 1 3 2.9 Nevada 2001003 3 3 1 4 2.0 Nevada 2002004 3 3 1 </code></pre> <p>I want to cut the df['year'] to three segment in value1, value2,value3. If the value is not long enough, I can fill with 0. So I want to get the data frame as following:</p> <pre><code> pop state year value1 value2 value3 0 1.8 Ohio 2000001 200 000 1 1 1.9 Ohio 2001001 200 100 1 2 3.9 Nevada 2002 200 2 0 3 2.9 Nevada 2001003 200 100 3 4 2.0 Nevada 2002004 200 200 4 </code></pre> <p>Moreover if the df['value1'],df['value2'],df['value3'] have different values. How can I do in python pandas?</p>
2
2016-08-19T06:06:12Z
39,034,575
<p>You can cast <code>year</code> to take string values followed by <a href="http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.extract.html" rel="nofollow"><code>str.extract</code></a> to extract the specific slices corresponding to the values in the 3 columns.</p> <p>You could then replace the empty strings with <code>0</code> to get back the desired result.</p> <pre><code>In [3]: df['year'] = df['year'].astype(str) In [4]: df.ix[:,3:6] = df['year'].str.extract('(?P&lt;value1&gt;\d{0,3})(?P&lt;value2&gt;\d{0,3})(?P&lt;value3&gt;\d{0,1})') In [5]: df.replace('', 0, inplace=True) In [6]: df Out[6]: pop state year value1 value2 value3 0 1.8 Ohio 2000001 200 000 1 1 1.9 Ohio 2001001 200 100 1 2 3.9 Nevada 2002 200 2 0 3 2.9 Nevada 2001003 200 100 3 4 2.0 Nevada 2002004 200 200 4 </code></pre>
2
2016-08-19T08:35:36Z
[ "python", "pandas", "dataframe" ]
Converting Python List to Numpy Array InPlace
39,032,200
<p>I have a python list that's huge (16 GB) and I want to convert it to numpy array, inplace. I can't afford this statement</p> <pre><code>huge_array = np.array(huge_list).astype(np.float16) </code></pre> <p>I'm looking for some efficient ways to transform this <code>huge_list</code> into <code>numpy array</code> without making it's copy.</p> <p>Can anyone suggest an efficient method to do this? that might involve saving the list to disk first and then loading it as numpy array, I'm ok with that.</p> <p>I'll highly appreciate any help.</p> <p>EDIT 1 : <code>huge_list</code> is an in memory python list that's created on runtime so it's already taking 16GB. I need to convert it to <code>numpy float16</code> array.</p>
2
2016-08-19T06:12:04Z
39,033,240
<p>As I previously mentioned, the easiest would be to just dump the array to a file and then load that file as a numpy array.</p> <p>First we need the size of the huge list: </p> <pre><code>huge_list_size = len(huge_list) </code></pre> <p>Next we dump it to disk</p> <pre><code>dumpfile = open('huge_array.txt', 'w') for item in huge_list: dumpfile.write(str(item)+"\n") dumpfile.close() </code></pre> <p>Ensure we clear the memory if this all happens in the same environment</p> <pre><code>del huge_list </code></pre> <p>Next we define a simple read generator</p> <pre><code>def read_file_generator(filename): with open(filename) as infile: for i, line in enumerate(infile): yield [i, line] </code></pre> <p>And then we create a numpy array of zeros, which we fill with the generator we just created</p> <pre><code>huge_array = np.zeros(huge_list_size, dtype='float16') for i, item in read_file_generator('huge_array.txt'): huge_array[i] = item </code></pre> <p>My previous answer was incorrect. I suggested the following to be a solution, which it is not as commented by <a href="http://stackoverflow.com/users/901925/hpaulj">hpaulj</a></p> <blockquote> <p>You can do this in a multiple ways, the easiest would be to just dump the array to a file and then load that file as a numpy array:</p> <pre><code>dumpfile = open('huge_array.txt', 'w') for item in huge_array: print&gt;&gt;dumpfile, item </code></pre> <p>Then load it as a numpy array</p> <pre><code>huge_array = numpy.loadtxt('huge_array.txt') </code></pre> <p>If you want to perform further computations on this data you can also use the joblib library for memmapping, which is extremely usefull in handling large numpy array cmputations. Available at <a href="https://pypi.python.org/pypi/joblib" rel="nofollow">https://pypi.python.org/pypi/joblib</a></p> </blockquote>
2
2016-08-19T07:19:35Z
[ "python", "arrays", "list", "numpy", "in-place" ]
Converting Python List to Numpy Array InPlace
39,032,200
<p>I have a python list that's huge (16 GB) and I want to convert it to numpy array, inplace. I can't afford this statement</p> <pre><code>huge_array = np.array(huge_list).astype(np.float16) </code></pre> <p>I'm looking for some efficient ways to transform this <code>huge_list</code> into <code>numpy array</code> without making it's copy.</p> <p>Can anyone suggest an efficient method to do this? that might involve saving the list to disk first and then loading it as numpy array, I'm ok with that.</p> <p>I'll highly appreciate any help.</p> <p>EDIT 1 : <code>huge_list</code> is an in memory python list that's created on runtime so it's already taking 16GB. I need to convert it to <code>numpy float16</code> array.</p>
2
2016-08-19T06:12:04Z
39,034,913
<p><code>np.array(huge_list, dtype=np.float16)</code> will be faster, since it only copies the list once and not twice</p> <hr> <p>You probably don't need to worry about making this copy, because the copy is a lot smaller than the original:</p> <pre><code>&gt;&gt;&gt; x = [float(i) for i in range(10000)] &gt;&gt;&gt; sys.getsizeof(x) 83112 &gt;&gt;&gt; y = np.array(x, dtype=np.float16) &gt;&gt;&gt; sys.getsizeof(y) 20096 </code></pre> <p>But that's not even the worst of it - with the python list, each number in the list is taking up memory of its own:</p> <pre><code>&gt;&gt;&gt; sum(sys.getsizeof(i) for i in x) 240000 </code></pre> <p>So the numpy array is ~15x smaller!</p>
2
2016-08-19T08:55:13Z
[ "python", "arrays", "list", "numpy", "in-place" ]
Converting Python List to Numpy Array InPlace
39,032,200
<p>I have a python list that's huge (16 GB) and I want to convert it to numpy array, inplace. I can't afford this statement</p> <pre><code>huge_array = np.array(huge_list).astype(np.float16) </code></pre> <p>I'm looking for some efficient ways to transform this <code>huge_list</code> into <code>numpy array</code> without making it's copy.</p> <p>Can anyone suggest an efficient method to do this? that might involve saving the list to disk first and then loading it as numpy array, I'm ok with that.</p> <p>I'll highly appreciate any help.</p> <p>EDIT 1 : <code>huge_list</code> is an in memory python list that's created on runtime so it's already taking 16GB. I need to convert it to <code>numpy float16</code> array.</p>
2
2016-08-19T06:12:04Z
39,072,596
<p>You can use <code>numpy</code>'s <code>save</code> and <code>load</code> functions:</p> <p>You can use a normal python list as an argument to <code>np.save</code> and <code>np.load</code> will load directly into a numpy array.</p> <p>Example: </p> <pre><code>from tempfile import TemporaryFile outfile = TemporaryFile() x = [1, 2, 3] np.save(outfile, x) outfile.seek(0) np.load(outfile) </code></pre>
0
2016-08-22T06:28:30Z
[ "python", "arrays", "list", "numpy", "in-place" ]
Merge two dataframes with python
39,032,204
<p>I have two dataframes :dfDepas and df7 ; </p> <pre><code>dfDepas.info() &lt;class 'pandas.core.frame.DataFrame'&gt; Int64Index: 7 entries, 0 to 6 Data columns (total 4 columns): day_of_week 7 non-null object P_ACT_KW 7 non-null float64 P_SOUSCR 7 non-null float64 depassement 7 non-null float64 dtypes: float64(3), object(1) memory usage: 280.0+ bytes df7.info() &lt;class 'pandas.core.frame.DataFrame'&gt; Index: 7 entries, Fri to Thurs Data columns (total 6 columns): ACT_TIME_AERATEUR_1_F1 7 non-null float64 ACT_TIME_AERATEUR_1_F3 7 non-null float64 ACT_TIME_AERATEUR_1_F5 7 non-null float64 ACT_TIME_AERATEUR_1_F6 7 non-null float64 ACT_TIME_AERATEUR_1_F7 7 non-null float64 ACT_TIME_AERATEUR_1_F8 7 non-null float64 dtypes: float64(6) memory usage: 392.0+ bytes </code></pre> <p>I try to merge these two dataframes according ['day_of_week'] which is the index in dfDepas dataframe. I don't know how can I use this : <code>merged_df = pd.merge(dfDepas, df7, how='inner',on=['day_of_week'])</code> </p> <p>Any idea to help me please? Thank you</p> <p>Kind regards</p> <p><strong>EDIT</strong></p> <pre><code>dfDepas day_of_week P_ACT_KW P_SOUSCR depassement Fri 157.258929 427.142857 0.0 Mon 157.788110 426.875000 0.0 Sat 166.989236 426.875000 0.0 Sun 149.676215 426.875000 0.0 Thurs 157.339286 427.142857 0.0 Tues 151.122913 427.016021 0.0 Weds 159.569444 427.142857 0.0 df7 ACT_TIME_AERATEUR_1_F1 ACT_TIME_AERATEUR_1_F3 ACT_TIME_AERATEUR_1_F5 ACT_TIME_AERATEUR_1_F6 ACT_TIME_AERATEUR_1_F7 ACT_TIME_AERATEUR_1_F8 Fri 0.326258 0.330253 0.791144 0.654682 3.204544 1.008550 Sat -0.201327 -0.228196 0.044616 0.184003 -0.579214 0.292886 Sun 5.068735 5.250199 5.407271 5.546657 7.823564 5.786713 Mon -0.587129 -0.559986 -0.294890 -0.155503 2.013379 -0.131496 Tues-1.244922 -1.510025 -0.788717 -1.098790 -0.996845 -0.718881 Weds-3.264598 -3.391776 -3.188409 -3.041306 -4.846189 -4.668533 Thurs -0.178179 0.011002 -1.907544 -2.084516 -6.119337 </code></pre>
3
2016-08-19T06:12:18Z
39,032,246
<p>You can use <a href="http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.reset_index.html" rel="nofollow"><code>reset_index</code></a> and rename column <code>0</code> to <code>day_of_week</code> for matching:</p> <pre><code>merged_df = pd.merge(dfDepas, df7.reset_index().rename(columns={0:'day_of_week'}), on=['day_of_week']) </code></pre> <p>Thank you <a href="http://stackoverflow.com/questions/39032204/merge-two-dataframes-with-python/39032246#comment65414378_39032246"><code>Quickbeam2k1</code></a> for another solution:</p> <pre><code>merged_df = pd.merge(dfDepas.set_index('day_of_week'), df7, right_index=True, left_index =True) </code></pre>
3
2016-08-19T06:15:23Z
[ "python", "pandas", "merge" ]
How do I use a trained Matlab Neural Network to Python for detection purpose
39,032,261
<p>I have trained a Neural Network using a Matlab Neural Network Toolbox, particularly using the command nntool. The detection is basically for traffic sign and I have used a database of 90 traffic images(no-entry,no right and stop signs), each of 30 images of size 8*8 pixels of which no-entry signs are taken positive. My input is 64*90 and target as 1*90. Now, I need to use this neural network in Python for real-time recognition. What parameters do I need? I am completely new to neural networking.<a href="http://i.stack.imgur.com/Rkjnt.png" rel="nofollow">Here's the link to an image</a> <a href="http://i.stack.imgur.com/9QamL.png" rel="nofollow">Here's my link to the weights</a></p>
0
2016-08-19T06:17:03Z
39,035,758
<p>First you need to take the weights of your neural network. An example:</p> <pre><code>[x,t] = simplefit_dataset; net = feedforwardnet(20); net = train(net,x,t); wb = getwb(net) </code></pre> <p>Then I also suggest that you read on <a href="http://neuralnetworksanddeeplearning.com/chap1.html" rel="nofollow">ANN structure</a>, this will help you understand how the output of a neural network is calculated, given the weights. Then you can adapt it to your own needs and using that calculate an output in Python.</p>
1
2016-08-19T09:36:12Z
[ "python", "matlab", "image-processing", "neural-network" ]
Finding the corresponding value in python
39,032,311
<p>I want to find the name of the car which has maximum mpg. I want to print 'Toyota' which has maximum mpg. I want to do this in a Pythonic way. I don't like to use pandas.</p> <p>Here is my code:</p> <pre><code>dataset=[] f= open('auto-mpg-data.csv') csv_f=csv.reader(f) for row in csv_f: dataset.append(row) #reading column mpg=[] for row in dataset: mpg.append(float(row[0])) a=max(mpg) for a in dataset: print(carname) </code></pre> <p>This is my data:</p> <p><img src="http://i.stack.imgur.com/cLpp7.jpg" alt="This is my data"></p>
-1
2016-08-19T06:20:29Z
39,032,410
<p>You mentioned that you don't like <code>pandas</code>, but, for completeness sake, here is how you could have used <a href="http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html" rel="nofollow"><code>pandas.read_csv()</code></a> to read the CSV file into a <em>dataframe</em> (which is quite <em>convenient when dealing with tabular data</em>) and then get the <code>carname</code> value for the maximum <code>mpg</code> value:</p> <pre><code>import pandas as pd df = pd.read_csv('cars.csv', delim_whitespace=True) print(df.loc[df['mpg'].idxmax()]['carname']) </code></pre> <p>Prints <code>'Toyota'</code> for the provided sample CSV.</p>
0
2016-08-19T06:27:26Z
[ "python", "python-3.x", "csv" ]
Finding the corresponding value in python
39,032,311
<p>I want to find the name of the car which has maximum mpg. I want to print 'Toyota' which has maximum mpg. I want to do this in a Pythonic way. I don't like to use pandas.</p> <p>Here is my code:</p> <pre><code>dataset=[] f= open('auto-mpg-data.csv') csv_f=csv.reader(f) for row in csv_f: dataset.append(row) #reading column mpg=[] for row in dataset: mpg.append(float(row[0])) a=max(mpg) for a in dataset: print(carname) </code></pre> <p>This is my data:</p> <p><img src="http://i.stack.imgur.com/cLpp7.jpg" alt="This is my data"></p>
-1
2016-08-19T06:20:29Z
39,033,002
<p>Here are a couple of ways to improve your code:</p> <ol> <li>When you are working with files, it's always best to <a href="https://docs.python.org/3/tutorial/inputoutput.html#methods-of-file-objects" rel="nofollow"><code>close()</code></a> your file after working with it, or wrap your snippet of code in a <a href="https://docs.python.org/3/reference/compound_stmts.html#the-with-statement" rel="nofollow"><code>with</code></a> block. This closes your file automatically.</li> <li>You are iterating multiple times through the lines in your file, which isn't necessary. There are much more performant approaches to solve your problem.</li> </ol> <p>This code worked for me:</p> <pre><code>import csv with open('auto-mpg-data.csv','r') as f: csv_f = list(csv.reader(f)) best_mpg = 0 best_row = 0 for i,j in enumerate(csv_f): if i == 0: continue best_mpg = max(best_mpg, float(j[0])) if best_mpg == float(j[0]): best_row = i print (csv_f[best_row][3]) # Output: # 'Toyota' </code></pre>
0
2016-08-19T07:05:59Z
[ "python", "python-3.x", "csv" ]
Finding the corresponding value in python
39,032,311
<p>I want to find the name of the car which has maximum mpg. I want to print 'Toyota' which has maximum mpg. I want to do this in a Pythonic way. I don't like to use pandas.</p> <p>Here is my code:</p> <pre><code>dataset=[] f= open('auto-mpg-data.csv') csv_f=csv.reader(f) for row in csv_f: dataset.append(row) #reading column mpg=[] for row in dataset: mpg.append(float(row[0])) a=max(mpg) for a in dataset: print(carname) </code></pre> <p>This is my data:</p> <p><img src="http://i.stack.imgur.com/cLpp7.jpg" alt="This is my data"></p>
-1
2016-08-19T06:20:29Z
39,033,415
<p>First, every object that supports iteration, can be converted directly to a list using the <code>list</code> function. Hence instead of</p> <pre><code>for row in csv_f: dataset.append(row) </code></pre> <p>you can do:</p> <pre><code>dataset = list(csv_f) </code></pre> <p>Next, since <code>dataset</code> is a list of rows (each row is a list), you can use Python's <code>max</code> function to find the maximum row provided that the key to check against is the float value of the first number on each row, like so:</p> <pre><code>max_row = max(dataset, key=lambda row: float(row[0])) </code></pre> <p>max_row holds the row with maximum mpg</p>
0
2016-08-19T07:29:31Z
[ "python", "python-3.x", "csv" ]
Finding the corresponding value in python
39,032,311
<p>I want to find the name of the car which has maximum mpg. I want to print 'Toyota' which has maximum mpg. I want to do this in a Pythonic way. I don't like to use pandas.</p> <p>Here is my code:</p> <pre><code>dataset=[] f= open('auto-mpg-data.csv') csv_f=csv.reader(f) for row in csv_f: dataset.append(row) #reading column mpg=[] for row in dataset: mpg.append(float(row[0])) a=max(mpg) for a in dataset: print(carname) </code></pre> <p>This is my data:</p> <p><img src="http://i.stack.imgur.com/cLpp7.jpg" alt="This is my data"></p>
-1
2016-08-19T06:20:29Z
39,033,651
<p>The simplies way:</p> <pre><code>with open('auto-mpg-data.csv') as fo: reader = csv.reader(fo) next(reader) # skip the header biggest_row = max(reader, key=lambda row: float(row[0])) print(biggest_row[3]) # or whatever the index is </code></pre> <p>Note that if your csv contains an incorrect data then this will fail so in order to make it fault-tolerant you would have to write a manual loop over <code>reader</code> instead of <code>max</code> and validate each <code>row</code> inside.</p> <p>Also if you've already loaded the file then you can use <code>next</code> and <code>max</code> on lists as follows:</p> <pre><code>reader = iter(dataset) next(reader) # skip the header biggest_row = max(reader, key=lambda row: float(row[0])) </code></pre>
0
2016-08-19T07:43:20Z
[ "python", "python-3.x", "csv" ]
Finding the corresponding value in python
39,032,311
<p>I want to find the name of the car which has maximum mpg. I want to print 'Toyota' which has maximum mpg. I want to do this in a Pythonic way. I don't like to use pandas.</p> <p>Here is my code:</p> <pre><code>dataset=[] f= open('auto-mpg-data.csv') csv_f=csv.reader(f) for row in csv_f: dataset.append(row) #reading column mpg=[] for row in dataset: mpg.append(float(row[0])) a=max(mpg) for a in dataset: print(carname) </code></pre> <p>This is my data:</p> <p><img src="http://i.stack.imgur.com/cLpp7.jpg" alt="This is my data"></p>
-1
2016-08-19T06:20:29Z
39,034,936
<p>Using for loop iterator...</p> <pre><code>&gt;&gt;&gt; mpg = [12,34,40.5,6] &gt;&gt;&gt; idx,maxMpg = 0,0 &gt;&gt;&gt; for n,v in enumerate(mpg): ... if v&gt;maxMpg: idx,maxMpg = n,v ... &gt;&gt;&gt; idx 2 &gt;&gt;&gt; maxMpg 40.5 &gt;&gt;&gt; carnames = ['ford','bmw','toyota','bugatti'] &gt;&gt;&gt; carnames[idx] 'toyota' &gt;&gt;&gt; </code></pre> <p>Using list comprehensions:...</p> <pre><code>&gt;&gt;&gt; maxMpg = max(mpg) &gt;&gt;&gt; maxMpgId = [maxMpg == m for m in mpg] &gt;&gt;&gt; maxMpgId [False, False, True, False] &gt;&gt;&gt; carname = [carnames[n] for n,m in enumerate(mpg) if maxMpg == m] &gt;&gt;&gt; carname ['toyota'] </code></pre> <p>Nasty one liner...</p> <pre><code>carname = [carnames[n] for n,m in enumerate(mpg) if max(mpg) == m] </code></pre>
0
2016-08-19T08:56:10Z
[ "python", "python-3.x", "csv" ]
Python High Pass Filter
39,032,325
<p>I implemented an high pass filter in python using this code:</p> <pre><code>from scipy.signal import butter, filtfilt import numpy as np def butter_highpass(cutoff, fs, order=5): nyq = 0.5 * fs normal_cutoff = cutoff / nyq b, a = butter(order, normal_cutoff, btype='high', analog=False) return b, a def butter_highpass_filter(data, cutoff, fs, order=5): b, a = butter_highpass(cutoff, fs, order=order) y = filtfilt(b, a, data) return y rawdata = np.loadtxt('sampleSignal.txt', skiprows=0) signal = rawdata fs = 100000.0 cutoff = 100 order = 6 conditioned_signal = butter_highpass_filter(signal, cutoff, fs, order) </code></pre> <p>I am applying this filter on a 100 kHz voltage signal and it works fine for cutoff frequencies >= 60 Hz. But it doesn't work below. I would like to cut off all the frequencies below 10 Hz. Any hints where my mistake is? What I observed is the lower the order of the filter the lower the cutoff frequency could be. </p> <p><a href="https://www.dropbox.com/home/Turbulence%20Spectrum" rel="nofollow">The sample Signal can be found here.</a></p>
1
2016-08-19T06:21:30Z
39,032,946
<p>I hope this can help you:</p> <pre><code>import numpy as np import pandas as pd from scipy import signal import matplotlib.pyplot as plt def sine_generator(fs, sinefreq, duration): T = duration nsamples = fs * T w = 2. * np.pi * sinefreq t_sine = np.linspace(0, T, nsamples, endpoint=False) y_sine = np.sin(w * t_sine) result = pd.DataFrame({ 'data' : y_sine} ,index=t_sine) return result def butter_highpass(cutoff, fs, order=5): nyq = 0.5 * fs normal_cutoff = cutoff / nyq b, a = signal.butter(order, normal_cutoff, btype='high', analog=False) return b, a def butter_highpass_filter(data, cutoff, fs, order=5): b, a = butter_highpass(cutoff, fs, order=order) y = signal.filtfilt(b, a, data) return y fps = 30 sine_fq = 10 #Hz duration = 10 #seconds sine_5Hz = sine_generator(fps,sine_fq,duration) sine_fq = 1 #Hz duration = 10 #seconds sine_1Hz = sine_generator(fps,sine_fq,duration) sine = sine_5Hz + sine_1Hz filtered_sine = butter_highpass_filter(sine.data,10,fps) plt.figure(figsize=(20,10)) plt.subplot(211) plt.plot(range(len(sine)),sine) plt.title('generated signal') plt.subplot(212) plt.plot(range(len(filtered_sine)),filtered_sine) plt.title('filtered signal') plt.show() </code></pre>
1
2016-08-19T07:02:50Z
[ "python", "signal-processing" ]
Python High Pass Filter
39,032,325
<p>I implemented an high pass filter in python using this code:</p> <pre><code>from scipy.signal import butter, filtfilt import numpy as np def butter_highpass(cutoff, fs, order=5): nyq = 0.5 * fs normal_cutoff = cutoff / nyq b, a = butter(order, normal_cutoff, btype='high', analog=False) return b, a def butter_highpass_filter(data, cutoff, fs, order=5): b, a = butter_highpass(cutoff, fs, order=order) y = filtfilt(b, a, data) return y rawdata = np.loadtxt('sampleSignal.txt', skiprows=0) signal = rawdata fs = 100000.0 cutoff = 100 order = 6 conditioned_signal = butter_highpass_filter(signal, cutoff, fs, order) </code></pre> <p>I am applying this filter on a 100 kHz voltage signal and it works fine for cutoff frequencies >= 60 Hz. But it doesn't work below. I would like to cut off all the frequencies below 10 Hz. Any hints where my mistake is? What I observed is the lower the order of the filter the lower the cutoff frequency could be. </p> <p><a href="https://www.dropbox.com/home/Turbulence%20Spectrum" rel="nofollow">The sample Signal can be found here.</a></p>
1
2016-08-19T06:21:30Z
39,083,588
<p>Since my reputation is low I am unable to comment on your question - "What is the relationship between cutoff and filter order?" This is not an answer to your original question.</p> <p>For an FIR filter, for a given cutoff frequency, the slope of the impulse response plot (|H(f)| vs f) is steeper for a higher order filter. So, to achieve higher attenuation for the undesired frequency range, you increase the filter order. But what happens when the filter order is so high that the impulse response is an ideal box function? You will see an inter-symbol interference (ISI in digital communications) like effect. The intensity of this effect increases when the ratio of cutoff frequency to the sampling frequency gets smaller (think of the relation between the width of box function in frequency domain and the width of the main lobe of the sinc function). </p> <p>I first observed this when I tried to implement a very narrow band low-pass IIR filter on a TI DSP microcontroller. The TI library, implemented the filter as a cascaded bi-quad structure to handle the well known truncation effects. This still did not solve the problem because the problem is not due to truncation alone. The way I solved this problem was to use an anti-aliasing filter followed by down-sampling the input signal, followed my desired low-pass IIR filter. </p> <p>I understand that you are implementing a HPF, which is an LPF translated in frequency domain. Hope this answers some of your questions. Let me know if down-sampling works for you.</p>
0
2016-08-22T15:37:56Z
[ "python", "signal-processing" ]
Flashing multiple images in python
39,032,376
<p>I am new to python and I am working on flashing (displaying) like 516 images in a go. There have been many questions like this before but none helped me. Here is the code </p> <pre><code>import cv2 import matplotlib import matplotlib.pyplot as plt import matplotlib.image as mpimg for i in range(1,516,1): #a=cv2.imread('test01001.tif') abb=cv2.imread('This PC\G:\TRAINING\1\test0100%d.tif'%(i)) cv2.imshow('test0100%d.tif'%(i),abb) plt.show() </code></pre> <p>The error that is shows is</p> <pre><code>Using matplotlib backend: Qt4Agg Populating the interactive namespace from numpy and matplotlib --------------------------------------------------------------------------- error Traceback (most recent call last) &lt;ipython-input-10-709418c86b86&gt; in &lt;module&gt;() 7 #a=cv2.imread('test01001.tif') 8 abb=cv2.imread('This PC\G:\TRAINING\1\test0100%d.png'%(i)) ----&gt; 9 cv2.imshow('test0100%d.tif'%(i),abb) 10 11 plt.show() error: ..\..\..\..\opencv\modules\highgui\src\window.cpp:266: error: (-215) size.width&gt;0 &amp;&amp; size.height&gt;0 in function cv::imshow </code></pre> <p>Please help me. I have been trying to rectify this error from many days but nothing helped me. I would appreciate if code is also posted in the answer Thanks a lot</p> <p>Debugged code</p> <pre><code>Using matplotlib backend: Qt4Agg Populating the interactive namespace from numpy and matplotlib &gt; &lt;ipython-input-2-d89d3a2017ad&gt;(11)&lt;module&gt;() -&gt; cv2.imshow('test0100%d.tif'%(i),abb) (Pdb) n error: '..\\..\\..\\..\\opencv\\modules\\highgui\\src\\window.cpp:266: error: (-215) size.width&gt;0 &amp;&amp; size.height&gt;0 in function cv::imshow\n' &gt; &lt;ipython-input-2-d89d3a2017ad&gt;(11)&lt;module&gt;() -&gt; cv2.imshow('test0100%d.tif'%(i),abb) (Pdb) n --Return-- &gt; &lt;ipython-input-2-d89d3a2017ad&gt;(11)&lt;module&gt;()-&gt;None -&gt; cv2.imshow('test0100%d.tif'%(i),abb) (Pdb) n error: '..\\..\\..\\..\\opencv\\modules\\highgui\\src\\window.cpp:266: error: (-215) size.width&gt;0 &amp;&amp; size.height&gt;0 in function cv::imshow\n' &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3066)run_code() -&gt; exec(code_obj, self.user_global_ns, self.user_ns) (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3069)run_code() -&gt; sys.excepthook = old_excepthook (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3070)run_code() -&gt; except SystemExit as e: (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3075)run_code() -&gt; except self.custom_exceptions: (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3080)run_code() -&gt; except: (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3081)run_code() -&gt; if result is not None: (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3082)run_code() -&gt; result.error_in_exec = sys.exc_info()[1] (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3083)run_code() -&gt; self.showtraceback() </code></pre>
-1
2016-08-19T06:24:57Z
39,032,994
<p>Your file path to <code>imread</code> is very likely invalid, so you do not get a proper image in <code>abb</code>, which then of course cannot be displayed, so you get the error about the image size.</p>
0
2016-08-19T07:05:32Z
[ "python", "opencv", "matplotlib" ]
Flashing multiple images in python
39,032,376
<p>I am new to python and I am working on flashing (displaying) like 516 images in a go. There have been many questions like this before but none helped me. Here is the code </p> <pre><code>import cv2 import matplotlib import matplotlib.pyplot as plt import matplotlib.image as mpimg for i in range(1,516,1): #a=cv2.imread('test01001.tif') abb=cv2.imread('This PC\G:\TRAINING\1\test0100%d.tif'%(i)) cv2.imshow('test0100%d.tif'%(i),abb) plt.show() </code></pre> <p>The error that is shows is</p> <pre><code>Using matplotlib backend: Qt4Agg Populating the interactive namespace from numpy and matplotlib --------------------------------------------------------------------------- error Traceback (most recent call last) &lt;ipython-input-10-709418c86b86&gt; in &lt;module&gt;() 7 #a=cv2.imread('test01001.tif') 8 abb=cv2.imread('This PC\G:\TRAINING\1\test0100%d.png'%(i)) ----&gt; 9 cv2.imshow('test0100%d.tif'%(i),abb) 10 11 plt.show() error: ..\..\..\..\opencv\modules\highgui\src\window.cpp:266: error: (-215) size.width&gt;0 &amp;&amp; size.height&gt;0 in function cv::imshow </code></pre> <p>Please help me. I have been trying to rectify this error from many days but nothing helped me. I would appreciate if code is also posted in the answer Thanks a lot</p> <p>Debugged code</p> <pre><code>Using matplotlib backend: Qt4Agg Populating the interactive namespace from numpy and matplotlib &gt; &lt;ipython-input-2-d89d3a2017ad&gt;(11)&lt;module&gt;() -&gt; cv2.imshow('test0100%d.tif'%(i),abb) (Pdb) n error: '..\\..\\..\\..\\opencv\\modules\\highgui\\src\\window.cpp:266: error: (-215) size.width&gt;0 &amp;&amp; size.height&gt;0 in function cv::imshow\n' &gt; &lt;ipython-input-2-d89d3a2017ad&gt;(11)&lt;module&gt;() -&gt; cv2.imshow('test0100%d.tif'%(i),abb) (Pdb) n --Return-- &gt; &lt;ipython-input-2-d89d3a2017ad&gt;(11)&lt;module&gt;()-&gt;None -&gt; cv2.imshow('test0100%d.tif'%(i),abb) (Pdb) n error: '..\\..\\..\\..\\opencv\\modules\\highgui\\src\\window.cpp:266: error: (-215) size.width&gt;0 &amp;&amp; size.height&gt;0 in function cv::imshow\n' &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3066)run_code() -&gt; exec(code_obj, self.user_global_ns, self.user_ns) (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3069)run_code() -&gt; sys.excepthook = old_excepthook (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3070)run_code() -&gt; except SystemExit as e: (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3075)run_code() -&gt; except self.custom_exceptions: (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3080)run_code() -&gt; except: (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3081)run_code() -&gt; if result is not None: (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3082)run_code() -&gt; result.error_in_exec = sys.exc_info()[1] (Pdb) n &gt; c:\anaconda2\lib\site-packages\ipython\core\interactiveshell.py(3083)run_code() -&gt; self.showtraceback() </code></pre>
-1
2016-08-19T06:24:57Z
39,035,688
<p>You can use python os module to navigate through directory , and you do not need plotting to flash image .</p> <pre><code>import cv2 import os # import os module path = r"G:\TRAINING\1" img_names = os.listdir(path) # return all image names in given path for name in img_names: im = cv2.imread( os.path.join(path,name) ) cv2.imshow('image' ,im) cv2.waitKey(300) # no of millisecond to wait for next image cv2.destroyAllWindows() </code></pre>
0
2016-08-19T09:32:39Z
[ "python", "opencv", "matplotlib" ]
Encoding issue with ASCII-safe file with codec header, depending on line count
39,032,416
<p>Here is magical bug of Python 3.5.2 on Windows that killed my day. File below fails on this system:</p> <blockquote> <pre><code>C:\Python35\python.exe encoding-problem-cp1252.py File "encoding-problem-cp1252.py", line 2 SyntaxError: encoding problem: cp1252 </code></pre> </blockquote> <p>Contains almost nothing - apart from the <code>coding</code> header there are a bunch of empty lines, but <strong>when any line is removed</strong>, even an empty one, it works again. I thought that it is a local problem, so I setup <a href="https://ci.appveyor.com/project/techtonik/testbin/build/1.0.2" rel="nofollow">job on AppVeyor</a> that showed the same behavior. </p> <p>What's going on with Python?</p> <p>There is a <a href="https://github.com/techtonik/testbin/commit/fbb8aec3650b45f690c4febfd621fe5d6892b14a#diff-9583069b05ef628abe1dda1ae5962a43" rel="nofollow">binary accurate version</a> of the file below:</p> <pre><code>#!/usr/bin/env python # -*- coding: cp1252 -*- """ There is nothing in this file, except that it is more than 50 lines long. Running it with Python 3.5.2 on Windows gives the following error: &gt;python encoding-problem-cp1252.py File "encoding-problem-cp1252.py", line 2 SyntaxError: encoding problem: cp1252 &gt;python Python 3.5.2 (v3.5.2:4def2a2901a5, Jun 25 2016, 22:01:18) [MSC v.1900 32 bit (Intel)] on win32 Type "help", "copyright", "credits" or "license" for more information. If you remove any lines from this file, it will execute successfully. """ def restore(dump): """ """ return def main(): print('ok') if __name__ == '__main__': main() </code></pre>
3
2016-08-19T06:27:40Z
39,033,975
<p>This looks like a regression caused by <a href="http://bugs.python.org/issue20731" rel="nofollow">issue #20731</a>. It looks like the position calculation is assuming there will always be CRLF line endings while your file has only got LF characters, leading to an incorrect offset <a href="https://hg.python.org/cpython/file/v3.5.2/Parser/tokenizer.c#l510" rel="nofollow">being calculated here</a>:</p> <pre class="lang-c prettyprint-override"><code>fd = fileno(tok-&gt;fp); /* Due to buffering the file offset for fd can be different from the file * position of tok-&gt;fp. If tok-&gt;fp was opened in text mode on Windows, * its file position counts CRLF as one char and can't be directly mapped * to the file offset for fd. Instead we step back one byte and read to * the end of line.*/ pos = ftell(tok-&gt;fp); if (pos == -1 || lseek(fd, (off_t)(pos &gt; 0 ? pos - 1 : pos), SEEK_SET) == (off_t)-1) { PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL); goto cleanup; } </code></pre> <p>The problem disappears when you convert your file to use Windows (CRLF) line endings, but I can understand that for cross-platform scripts that's not a practical solution.</p> <p>I've filed <a href="http://bugs.python.org/issue27797" rel="nofollow">issue #27797</a>; this should be fixed in Python itself.</p>
3
2016-08-19T08:01:23Z
[ "python", "windows", "python-3.x", "encoding", "python-3.5" ]
Update Cassandra Json Value with python Script
39,032,691
<p>Hi I am quite new to python. I do have following column in my Cassandra table. I need to update a certain value(set displayable to <strong>true</strong>) in it. How do i do this via python script?</p> <pre><code>{ "rowId": "SYSTEM", "status": null, "startDate": null, "endDate": null, "creationDate": 1457051550494, "overview": null, "displayable": false, "checklistType": "NUMERIC" } </code></pre> <p>I tried following which doesn't work. (<strong>indentation error</strong> expected an indented block)</p> <pre><code>session.execute('UPDATE CourseAssignment SET jsonObject["displayable"] = fromJson("true") WHERE "rowId" = \'%s\'' % jsonObject["SYSTEM"] ) </code></pre> <p><a href="http://i.stack.imgur.com/aIcq3.png" rel="nofollow"><img src="http://i.stack.imgur.com/aIcq3.png" alt="CourseAssignment table"></a></p> <p><a href="http://i.stack.imgur.com/j12I1.png" rel="nofollow"><img src="http://i.stack.imgur.com/j12I1.png" alt="CourseAssignment table"></a></p>
0
2016-08-19T06:46:29Z
39,033,801
<p>Based on the screenshot you shared, it looks like the <code>value</code> field is simply a <code>text</code> field (if that's not the case you must share the schema of the table), so you'll have to update the complete value stored in that field.</p>
0
2016-08-19T07:51:43Z
[ "python", "cassandra" ]
Formatting Lists into columns of a table output (python 3)
39,032,720
<p>I have data that is collected in a loop and stored under separate lists that hold only the same datatypes (e.g. only strings, only floats) as shown below:</p> <pre><code>names = ['bar', 'chocolate', 'chips'] weights = [0.05, 0.1, 0.25] costs = [2.0, 5.0, 3.0] unit_costs = [40.0, 50.0, 12.0] </code></pre> <p>I have treated these lists as "columns" of a table and wish to print them out as a formatted table that should look something like this:</p> <pre><code>Names | Weights | Costs | Unit_Costs ----------|---------|-------|------------ bar | 0.05 | 2.0 | 40.0 chocolate | 0.1 | 5.0 | 50.0 chips | 0.25 | 3.0 | 12.0 </code></pre> <p>I only know how to print out data from lists horizontally across table rows, I have looked online (and on this site) for some help regarding this issue, however I only managed to find help for getting it to work in python 2.7 and not 3.5.1 which is what I am using.<br> my question is:<br> how do I get entries from the above 4 lists to print out into a table as shown above. </p> <p>Each item index from the lists above is associated (i.e. entry[0] from the 4 lists is associated with the same item; bar, 0.05, 2.0, 40.0). </p>
3
2016-08-19T06:47:57Z
39,032,993
<p>Here is a small implementation that does what you want in basic python (no special modules). </p> <pre><code> names = ['bar', 'chocolate', 'chips'] weights = [0.05, 0.1, 0.25] costs = [2.0, 5.0, 3.0] unit_costs = [40.0, 50.0, 12.0] titles = ['names', 'weights', 'costs', 'unit_costs'] data = [titles] + list(zip(names, weights, costs, unit_costs)) for i, d in enumerate(data): line = '|'.join(str(x).ljust(12) for x in d) print(line) if i == 0: print('-' * len(line)) </code></pre> <p>Output:</p> <pre><code> names |weights |costs |unit_costs --------------------------------------------------- bar |0.05 |2.0 |40.0 chocolate |0.1 |5.0 |50.0 chips |0.25 |3.0 |12.0 </code></pre>
0
2016-08-19T07:05:25Z
[ "python", "list", "formatting", "columnsorting" ]
Formatting Lists into columns of a table output (python 3)
39,032,720
<p>I have data that is collected in a loop and stored under separate lists that hold only the same datatypes (e.g. only strings, only floats) as shown below:</p> <pre><code>names = ['bar', 'chocolate', 'chips'] weights = [0.05, 0.1, 0.25] costs = [2.0, 5.0, 3.0] unit_costs = [40.0, 50.0, 12.0] </code></pre> <p>I have treated these lists as "columns" of a table and wish to print them out as a formatted table that should look something like this:</p> <pre><code>Names | Weights | Costs | Unit_Costs ----------|---------|-------|------------ bar | 0.05 | 2.0 | 40.0 chocolate | 0.1 | 5.0 | 50.0 chips | 0.25 | 3.0 | 12.0 </code></pre> <p>I only know how to print out data from lists horizontally across table rows, I have looked online (and on this site) for some help regarding this issue, however I only managed to find help for getting it to work in python 2.7 and not 3.5.1 which is what I am using.<br> my question is:<br> how do I get entries from the above 4 lists to print out into a table as shown above. </p> <p>Each item index from the lists above is associated (i.e. entry[0] from the 4 lists is associated with the same item; bar, 0.05, 2.0, 40.0). </p>
3
2016-08-19T06:47:57Z
39,033,135
<p>After visiting docs.python.org/3/library/functions.html#zip (link provided by cdarke) </p> <p>I managed to find the solution I needed: </p> <p>using the zip method I created a new summary list of the associated data:</p> <pre><code># sort into rows of associated data and convert to list rows = zip(names, weights, costs, unit_costs) summary = list(rows) </code></pre> <p>Once I had the new summary list, I proceeded to sort and print out the table to the user (however, I will deal with the formatting later):</p> <pre><code># Sort Alphabetically and print summary.sort() print() print("*** Results shown below (alphabetically) ***") print("Name\t\tWeight\tCost\tUnit Cost") for item in summary: print("") for data in item: print(data, "\t", end='') </code></pre> <p>output is as follows:</p> <pre><code>*** Results shown below (alphabetically) *** Name Weight Cost Unit Cost bar 0.05 2.0 40.0 chips 0.25 3.0 12.0 chocolate 0.1 5.0 50.0 </code></pre> <p>Thanks to cdarke for the help :)</p>
0
2016-08-19T07:13:47Z
[ "python", "list", "formatting", "columnsorting" ]
Formatting Lists into columns of a table output (python 3)
39,032,720
<p>I have data that is collected in a loop and stored under separate lists that hold only the same datatypes (e.g. only strings, only floats) as shown below:</p> <pre><code>names = ['bar', 'chocolate', 'chips'] weights = [0.05, 0.1, 0.25] costs = [2.0, 5.0, 3.0] unit_costs = [40.0, 50.0, 12.0] </code></pre> <p>I have treated these lists as "columns" of a table and wish to print them out as a formatted table that should look something like this:</p> <pre><code>Names | Weights | Costs | Unit_Costs ----------|---------|-------|------------ bar | 0.05 | 2.0 | 40.0 chocolate | 0.1 | 5.0 | 50.0 chips | 0.25 | 3.0 | 12.0 </code></pre> <p>I only know how to print out data from lists horizontally across table rows, I have looked online (and on this site) for some help regarding this issue, however I only managed to find help for getting it to work in python 2.7 and not 3.5.1 which is what I am using.<br> my question is:<br> how do I get entries from the above 4 lists to print out into a table as shown above. </p> <p>Each item index from the lists above is associated (i.e. entry[0] from the 4 lists is associated with the same item; bar, 0.05, 2.0, 40.0). </p>
3
2016-08-19T06:47:57Z
39,033,695
<p>Some interesting table draw with <code>texttable</code>.</p> <pre><code>import texttable as tt tab = tt.Texttable() headings = ['Names','Weights','Costs','Unit_Costs'] tab.header(headings) names = ['bar', 'chocolate', 'chips'] weights = [0.05, 0.1, 0.25] costs = [2.0, 5.0, 3.0] unit_costs = [40.0, 50.0, 12.0] for row in zip(names,weights,costs,unit_costs): tab.add_row(row) s = tab.draw() print (s) </code></pre> <p><strong>Result</strong></p> <pre><code>+-----------+---------+-------+------------+ | Names | Weights | Costs | Unit_Costs | +===========+=========+=======+============+ | bar | 0.050 | 2 | 40 | +-----------+---------+-------+------------+ | chocolate | 0.100 | 5 | 50 | +-----------+---------+-------+------------+ | chips | 0.250 | 3 | 12 | +-----------+---------+-------+------------+ </code></pre> <p>You can install <code>texttable</code> with using this command <code>pip install texttable</code>.</p>
1
2016-08-19T07:46:06Z
[ "python", "list", "formatting", "columnsorting" ]
Pyramid debug toolbar serving static content over HTTP instead of HTTPS
39,033,106
<p>On our test servers, we're using the <a href="http://docs.pylonsproject.org/projects/pyramid_debugtoolbar/en/latest/" rel="nofollow">Pyramid debug toolbar</a>, however, it generates <code>http://</code> links to static content (like its CSS and JavaScript files), while the rest of the content is served over HTTPS. This causes mixed content warnings, and it breaks all functionality. Is there a way to force it to generate HTTPS links?</p> <p>I know it's possible to enable mixed content in Chrome, and this works, but it's not a feasible solution for the entire QA team.</p>
2
2016-08-19T07:12:53Z
39,034,219
<p>There might be better/simpler ways to achieve this, but one thing you can do to achieve this add the <code>_scheme='https'</code> parameter to each call to <code>request.static_url()</code>.</p> <p>For that you can of course edit <code>pyramid/url.py</code>, but you can also do this in your projects' <code>__init__.py</code>:</p> <pre><code>from pyramid.url import URLMethodsMixin URLMethodsMixin.static_url_org = URLMethodsMixin.static_url # backup of original def https_static_url(self, *args, **kw): kw['_scheme'] = 'https' # add parameter forcing https return URLMethodsMixin.static_url_org(self, *args, **kw) # call backup URLMethodsMixin.static_url = https_static_url # replace original with backup </code></pre> <p>Parameters for <code>static_url</code> works like <a href="http://docs.pylonsproject.org/projects/pyramid/en/latest/api/request.html#pyramid.request.Request.route_url" rel="nofollow"><code>route_url</code></a>. From the documentation:</p> <blockquote> <p>Note that if _scheme is passed as https, and _port is not passed, the _port value is assumed to have been passed as 443. Likewise, if _scheme is passed as http and _port is not passed, the _port value is assumed to have been passed as 80. To avoid this behavior, always explicitly pass _port whenever you pass _scheme. Setting '_scheme' automatically forces port 443</p> </blockquote>
2
2016-08-19T08:15:26Z
[ "python", "python-3.x", "pyramid", "pylons", "pyramid-debug-toolbar" ]
Pyramid debug toolbar serving static content over HTTP instead of HTTPS
39,033,106
<p>On our test servers, we're using the <a href="http://docs.pylonsproject.org/projects/pyramid_debugtoolbar/en/latest/" rel="nofollow">Pyramid debug toolbar</a>, however, it generates <code>http://</code> links to static content (like its CSS and JavaScript files), while the rest of the content is served over HTTPS. This causes mixed content warnings, and it breaks all functionality. Is there a way to force it to generate HTTPS links?</p> <p>I know it's possible to enable mixed content in Chrome, and this works, but it's not a feasible solution for the entire QA team.</p>
2
2016-08-19T07:12:53Z
39,072,118
<p>Usually you signal your web server to use HTTPS instead of HTTP by passing through <code>X-Forwarded-Proto</code> HTTP header.</p> <p>Example from Nginx:</p> <pre><code> proxy_set_header X-Forwarded-Proto $scheme; </code></pre> <p>However, this is not standard and may depend on your web server configuration. Here is full example for Nginx + uWSGI:</p> <pre><code> proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $server_name; proxy_set_header X-Forwarded-Proto $scheme; uwsgi_pass 127.0.0.1:8001; uwsgi_param UWSGI_SCHEME https; uwsgi_pass_header X_FORWARDED_PROTO; uwsgi_pass_header X_REAL_IP; </code></pre> <p><a href="https://github.com/Pylons/webob/blob/master/webob/request.py#L409" rel="nofollow">See how WebOb (underlying Request for Pyramid) reconstructs URL from given HTTP headers</a>.</p>
0
2016-08-22T05:49:15Z
[ "python", "python-3.x", "pyramid", "pylons", "pyramid-debug-toolbar" ]
Cython's prange not improving performance
39,033,170
<p>I'm trying to improve the performance of some metric computations with Cython's <code>prange</code>. Here are my codes:</p> <pre><code>def shausdorff(float64_t[:,::1] XA not None, float64_t[:,:,::1] XB not None): cdef: Py_ssize_t i Py_ssize_t n = XB.shape[2] float64_t[::1] hdist = np.zeros(n) #arrangement to fix contiguity XB = np.asanyarray([np.ascontiguousarray(XB[:,:,i]) for i in range(n)]) for i in range(n): hdist[i] = _hausdorff(XA, XB[i]) return hdist def phausdorff(float64_t[:,::1] XA not None, float64_t[:,:,::1] XB not None): cdef: Py_ssize_t i Py_ssize_t n = XB.shape[2] float64_t[::1] hdist = np.zeros(n) #arrangement to fix contiguity (EDITED) cdef float64_t[:,:,::1] XC = np.asanyarray([np.ascontiguousarray(XB[:,:,i]) for i in range(n)]) with nogil, parallel(num_threads=4): for i in prange(n, schedule='static', chunksize=1): hdist[i] = _hausdorff(XA, XC[i]) return hdist </code></pre> <p>Basically, in each iteration the hausdorff metric is computed between <code>XA</code> and each <code>XB[i]</code>. Here is the signature of the <code>_hausdorff</code> function:</p> <pre><code>cdef inline float64_t _hausdorff(float64_t[:,::1] XA, float64_t[:,::1] XB) nogil: ... </code></pre> <p>my problem is that both the sequential <code>shausdorff</code> and the parallel <code>phausdorff</code> have the same timings. Furthermore, it seems that <code>phausdorff</code> is not creating any thread at all.</p> <p>So my question is what is wrong with my code, and how can I fix it to get threading working.</p> <p>Here is my <code>setup.py</code>:</p> <pre><code>from distutils.core import setup from distutils.extension import Extension from Cython.Build import cythonize from Cython.Distutils import build_ext ext_modules=[ Extension("custom_metric", ["custom_metric.pyx"], libraries=["m"], extra_compile_args = ["-O3", "-ffast-math", "-march=native", "-fopenmp" ], extra_link_args=['-fopenmp'] ) ] setup( name = "custom_metric", cmdclass = {"build_ext": build_ext}, ext_modules = ext_modules ) </code></pre> <p><strong>EDIT 1:</strong> Here is a link to the html generated by <code>cython -a</code>: <a href="http://csrg.cl/~mavillan/custom_metric.html" rel="nofollow">custom_metric.html</a></p> <p><strong>EDIT 2:</strong> Here is an example on how to call the corresponding functions (you need to compile <a href="http://csrg.cl/~mavillan/custom_metric.pyx" rel="nofollow">the Cython file</a> first)</p> <pre><code>import custom_metric as cm import numpy as np XA = np.random.random((9000, 210)) XB = np.random.random((1000, 210, 9)) #timing 'parallel' version %timeit cm.phausdorff(XA, XB) #timing sequential version %timeit cm.shausdorff(XA, XB) </code></pre>
8
2016-08-19T07:15:34Z
39,178,995
<p>I think this the parallelization is working, but the extra overhead of the parallelization is eating up the time it would have saved. If I try with different sized arrays then I do begin to see a speed up in the parallel version</p> <pre><code>XA = np.random.random((900, 2100)) XB = np.random.random((100, 2100, 90)) </code></pre> <p>Here the parallel version takes ~2/3 of the time of the serial version for me, which certainly isn't the 1/4 you'd expect, but does at least show some benefit.</p> <hr> <p>One improvement I can offer is to replace the code that fixes contiguity:</p> <pre><code>XB = np.asanyarray([np.ascontiguousarray(XB[:,:,i]) for i in range(n)]) </code></pre> <p>with</p> <pre><code>XB = np.ascontiguousarray(np.transpose(XB,[2,0,1])) </code></pre> <p>This speeds up both the parallel and non-parallel functions fairly significantly (a factor of 2 with the arrays you originally gave). It does make it slightly more obvious that you're being slowed down by overhead in the <code>prange</code> - the serial version is actually faster for the arrays in your example.</p>
3
2016-08-27T08:19:20Z
[ "python", "numpy", "openmp", "cython", "gil" ]
Pygame - Pygame keeps drawing more sprites after moving them
39,033,505
<p><a href="http://i.stack.imgur.com/Y6qT0.png" rel="nofollow">no idea what's wrong</a></p> <p>won't let me post the full code, searched this website and google, but found no answers...</p> <pre><code>DISPLAYSURF=pygame.display.set_mode((width,height),0,32) sprite=pygame.image.load('generic_legionnary.png' def move(direction, sprite, spritex, spritey): if direction: if direction == K_UP: spritey-=5 sprite=pygame.image.load('generic_legionnary.png') elif direction == K_DOWN: spritey+=5 sprite=pygame.image.load('generic_legionnary.png') if direction == K_LEFT: spritex-=5 sprite=pygame.image.load('generic_legionnary.png') elif direction == K_RIGHT: spritex+=5 sprite=pygame.image.load('generic_legionnary.png') return sprite, spritex, spritey while True: DISPLAYSURF.blit(sprite,(spritex,spritey)) </code></pre> <p>I don't know what's wrong, it keeps putting new images when moving, how can I make sure that it's only one drawing that's moving? thanks!</p> <p>Edit: solved it</p> <pre><code>screen.fill((255,255,255)) </code></pre> <p>sets the background color and the tracing thing disappears</p>
0
2016-08-19T07:34:31Z
39,034,694
<p>Do you clear your screen at the start of each loop ?</p> <p>Why are you loading new sprites each time you move ?</p> <p>Who need only the one loaded before the function, not the others.</p> <p>If you don't clear your screen in each loop, you get this trace. (or your trace might be the old sprites that you have overwritten, but normally the garbage collector should get rid of them.)</p>
0
2016-08-19T08:42:06Z
[ "python", "pygame" ]
How to remove duplicates of huge lists of objects in Python
39,033,565
<p>I have gigantic lists of objects with many duplicates (I'm talking thousands of lists with thousands of objects each, taking up to about 10million individual objects (already without duplicates).</p> <p>I need to go through them and remove all the duplicates inside each list (no need to compare between lists, only inside each one).</p> <p>I can, of course, go through the lists and compare with any dedupe algorithm that has been posted a lot of times, but I would guess this would take me forever.</p> <p>I thought I could create an object with a crafted <code>__hash__</code> method and use a <code>list(set(obj))</code> to remove them but first: I don't know if this would work, second: I would still have to loop the lists to convert the elements to the new object.</p> <p>I know Python is not the best solution for what I am trying to achieve, but in this case, it will have to be done in Python. I wonder what would be the best way to achieve this with the best performance possible.</p> <p><strong>Edit</strong>: for clarification: I have about 2k lists of objects, with about 5k objects inside each one (rough estimate). The duplicated objects are copies, and not references to the same memory location. The lists (dicts) are basically converted JSON arrays</p> <hr> <p><strong>Edit 2</strong>: I'm sorry for not being clear, I will rephrase.</p> <p>This is for a django data migration, although my question only applies to the data 'formatting' and not the framework itself or database insertion. I inserted a whole bunch of data as JSON to a table for later analysis. Now I need to normalize it and save it correctly. I created new tables and need to migrate the data.</p> <p>So when I retrieve the data from the db I have about 2000 JSON arrays. Applying <code>json.loads(arr)</code> (by the documentation) I get 2000 lists of objects (dicts). Each dict has only strings, numbers and booleans as values to each key, no nested objects/arrays, so something like this:</p> <pre><code>[ { a: 'aa', b: 2, c: False, date: &lt;date_as_long&gt; // ex: 1471688210 }, { a: 'bb', b: 4, c: True, date: &lt;date_as_long&gt; // ex: 1471688210 } ] </code></pre> <p>What I need is to run through every list and remove duplicates. Something is considered duplicate if all the fields except the date match (this wasn't in the original question, as I had not predicted it) inside a list. If they match across different lists, they are not considered duplicates.</p> <p>After better analysis of the contents, I found out I have close to 2 million individual records (not 10 million as said previously). The performance problems I face are because each dict needs to suffer some sort of data formatting (converting dates, for example) and 'wrap' it in the model object for database insertion: <code>ModelName(a='aaa', b=2, c=True, date=1471688210)</code>.</p> <p>The insertion on the database itself is done by <code>bulk_create</code>.</p> <p><strong>NOTE</strong>: I'm sorry for the lack of clarification on the original question. The more I dug into this the more I learned about what had to be done and how to handle the data.</p> <hr> <p>I accepted @tuergeist 's answer because it pointed to what I needed even with bad details on my part.</p> <p>Given dicts cannot be hashed, thus I can't add them to a set(), my solution was to create a <code>set()</code> of tuples for the duplicated data, and verify the duplicates with it. This prevented an extra iteration if the duplicates where in a list.</p> <p>So it was something like this:</p> <pre><code>data = [lots of lists of dicts] formatted_data = [] duplicates = set() for my_list in data: for element in my_list: a = element['a'] b = convert_whatever(element['b']) c = element['c'] d = (a, b, c) # Notice how only the elements that count for checking if it's a duplicate are here (not the date) if d not in duplicates: duplicates.add(d) normalized_data = { a: a, b: b, c: c, date: element['date'] } formatted_data.append(MyModel(**normalized_data) duplicates.clear() </code></pre> <p>After this, for better memory management, I used generators:</p> <pre><code>data = [lots of lists of dicts] formatted_data = [] duplicates = set() def format_element(el): a = el['a'] b = convert_whatever(el['b']) c = el['c'] d = (a, b, c) if d not in duplicates: duplicates.add(d) normalized_data = { 'a': a, 'b': b, 'c': c, 'date': el['date'] } formatted_data.append(MyModel(**normalized_data)) def iter_list(l): [format_element(x) for x in l] duplicates.clear() [iter_list(my_list) for my_list in data] </code></pre> <p>Working code here: <a href="http://codepad.org/frHJQaLu" rel="nofollow">http://codepad.org/frHJQaLu</a></p> <p><strong>NOTE</strong>: My finished code is a little different (and in a functional style) than this one. This serves only as an example of how I solved the problem.</p> <hr> <p><strong>Edit 3</strong>: For the database insertion I used bulk_create. In the end it took 1 minute to format everything correctly (1.5 million unique entries, 225k duplicates) and 2 minutes to insert everything to the database.</p> <p>Thank you all!</p>
1
2016-08-19T07:38:22Z
39,033,758
<p>I'd suggest to have a sorted list (if possible), so you can be more precise when you want compare items (like a dictionnary I mean). A hash (or not) list can fulfill that thing.</p> <p>If you have the ability to manage the "add and delete" from your lists, it's better ! Sort the new items each time you add/delete. (IMO good if you have hash list, forgot if you have linked list).</p> <p>Complexity will of course depends on your structure (fifo/filo list, linked list, hash...) </p>
1
2016-08-19T07:49:07Z
[ "python", "python-3.x" ]
How to remove duplicates of huge lists of objects in Python
39,033,565
<p>I have gigantic lists of objects with many duplicates (I'm talking thousands of lists with thousands of objects each, taking up to about 10million individual objects (already without duplicates).</p> <p>I need to go through them and remove all the duplicates inside each list (no need to compare between lists, only inside each one).</p> <p>I can, of course, go through the lists and compare with any dedupe algorithm that has been posted a lot of times, but I would guess this would take me forever.</p> <p>I thought I could create an object with a crafted <code>__hash__</code> method and use a <code>list(set(obj))</code> to remove them but first: I don't know if this would work, second: I would still have to loop the lists to convert the elements to the new object.</p> <p>I know Python is not the best solution for what I am trying to achieve, but in this case, it will have to be done in Python. I wonder what would be the best way to achieve this with the best performance possible.</p> <p><strong>Edit</strong>: for clarification: I have about 2k lists of objects, with about 5k objects inside each one (rough estimate). The duplicated objects are copies, and not references to the same memory location. The lists (dicts) are basically converted JSON arrays</p> <hr> <p><strong>Edit 2</strong>: I'm sorry for not being clear, I will rephrase.</p> <p>This is for a django data migration, although my question only applies to the data 'formatting' and not the framework itself or database insertion. I inserted a whole bunch of data as JSON to a table for later analysis. Now I need to normalize it and save it correctly. I created new tables and need to migrate the data.</p> <p>So when I retrieve the data from the db I have about 2000 JSON arrays. Applying <code>json.loads(arr)</code> (by the documentation) I get 2000 lists of objects (dicts). Each dict has only strings, numbers and booleans as values to each key, no nested objects/arrays, so something like this:</p> <pre><code>[ { a: 'aa', b: 2, c: False, date: &lt;date_as_long&gt; // ex: 1471688210 }, { a: 'bb', b: 4, c: True, date: &lt;date_as_long&gt; // ex: 1471688210 } ] </code></pre> <p>What I need is to run through every list and remove duplicates. Something is considered duplicate if all the fields except the date match (this wasn't in the original question, as I had not predicted it) inside a list. If they match across different lists, they are not considered duplicates.</p> <p>After better analysis of the contents, I found out I have close to 2 million individual records (not 10 million as said previously). The performance problems I face are because each dict needs to suffer some sort of data formatting (converting dates, for example) and 'wrap' it in the model object for database insertion: <code>ModelName(a='aaa', b=2, c=True, date=1471688210)</code>.</p> <p>The insertion on the database itself is done by <code>bulk_create</code>.</p> <p><strong>NOTE</strong>: I'm sorry for the lack of clarification on the original question. The more I dug into this the more I learned about what had to be done and how to handle the data.</p> <hr> <p>I accepted @tuergeist 's answer because it pointed to what I needed even with bad details on my part.</p> <p>Given dicts cannot be hashed, thus I can't add them to a set(), my solution was to create a <code>set()</code> of tuples for the duplicated data, and verify the duplicates with it. This prevented an extra iteration if the duplicates where in a list.</p> <p>So it was something like this:</p> <pre><code>data = [lots of lists of dicts] formatted_data = [] duplicates = set() for my_list in data: for element in my_list: a = element['a'] b = convert_whatever(element['b']) c = element['c'] d = (a, b, c) # Notice how only the elements that count for checking if it's a duplicate are here (not the date) if d not in duplicates: duplicates.add(d) normalized_data = { a: a, b: b, c: c, date: element['date'] } formatted_data.append(MyModel(**normalized_data) duplicates.clear() </code></pre> <p>After this, for better memory management, I used generators:</p> <pre><code>data = [lots of lists of dicts] formatted_data = [] duplicates = set() def format_element(el): a = el['a'] b = convert_whatever(el['b']) c = el['c'] d = (a, b, c) if d not in duplicates: duplicates.add(d) normalized_data = { 'a': a, 'b': b, 'c': c, 'date': el['date'] } formatted_data.append(MyModel(**normalized_data)) def iter_list(l): [format_element(x) for x in l] duplicates.clear() [iter_list(my_list) for my_list in data] </code></pre> <p>Working code here: <a href="http://codepad.org/frHJQaLu" rel="nofollow">http://codepad.org/frHJQaLu</a></p> <p><strong>NOTE</strong>: My finished code is a little different (and in a functional style) than this one. This serves only as an example of how I solved the problem.</p> <hr> <p><strong>Edit 3</strong>: For the database insertion I used bulk_create. In the end it took 1 minute to format everything correctly (1.5 million unique entries, 225k duplicates) and 2 minutes to insert everything to the database.</p> <p>Thank you all!</p>
1
2016-08-19T07:38:22Z
39,034,026
<p>A fast, not order preserving solution for (hashable items) is</p> <pre><code>def unify(seq): # Not order preserving return list(set(seq)) </code></pre> <p><strong>Complete Edit</strong></p> <p>I assume, that you have <code>dicts</code> inside a <code>list</code>. And you have many lists. The solution to remove duplicates from a single list is:</p> <pre><code>def remove_dupes(mylist): newlist = [mylist[0]] for e in mylist: if e not in newlist: newlist.append(e) return newlist </code></pre> <p>A list here contains the following dicts. (But all random)</p> <pre><code>{"firstName":"John", "lastName":"Doe"}, {"firstName":"Anna", "lastName":"Smith"}, {"firstName":"Peter","lastName":"Jones"} </code></pre> <p>Running this, it took 8s for 2000 dicts on my MacBook (2,4GHz, i5)</p> <p>Complete code: <a href="http://pastebin.com/NSKuuxUe" rel="nofollow">http://pastebin.com/NSKuuxUe</a></p>
0
2016-08-19T08:04:24Z
[ "python", "python-3.x" ]
Bs4 select_one vs find
39,033,612
<p>I was wondering what is the difference between performing <code>bs.find('div')</code> and <code>bs.select_one('div')</code>. Same goes for <code>find_all</code> and <code>select</code>. </p> <p>Is there any difference performance wise, or if any is better to use over the other in specific cases.</p>
2
2016-08-19T07:40:56Z
39,033,728
<p><code>select()</code> and <code>select_one()</code> give you a different way navigating through an HTML tree using the <a href="https://www.crummy.com/software/BeautifulSoup/bs4/doc/#css-selectors" rel="nofollow">CSS selectors</a> which has rich and convenient syntax. Though, the CSS selector syntax support in <code>BeautifulSoup</code> is <em>limited</em> but covers most common cases. </p> <p>Performance-wise, it really depends on an HTML tree to parse and on which element, how deep is it and what selector is used to locate it. Plus, what <code>find()</code> + <code>find_all()</code> alternative there is to compare the <code>select()</code> with, is also important. In a simple case like <code>bs.find('div')</code> vs <code>bs.select_one('div')</code>, I'd say that, generally, <code>find()</code> should perform faster simply because <a href="http://bazaar.launchpad.net/~leonardr/beautifulsoup/bs4/view/head:/bs4/element.py#L1331" rel="nofollow">there is a lot going on to support CSS selector syntax under-the-hood</a>.</p>
2
2016-08-19T07:47:28Z
[ "python", "beautifulsoup", "html-parsing", "bs4" ]
Bs4 select_one vs find
39,033,612
<p>I was wondering what is the difference between performing <code>bs.find('div')</code> and <code>bs.select_one('div')</code>. Same goes for <code>find_all</code> and <code>select</code>. </p> <p>Is there any difference performance wise, or if any is better to use over the other in specific cases.</p>
2
2016-08-19T07:40:56Z
39,036,044
<p>select_one is normally much faster than find:</p> <pre><code>In [13]: req = requests.get("https://httpbin.org/") In [14]: soup = BeautifulSoup(req.content, "html.parser") In [15]: soup.select_one("#DESCRIPTION") Out[15]: &lt;h2 id="DESCRIPTION"&gt;DESCRIPTION&lt;/h2&gt; In [16]: soup.find("h2", id="DESCRIPTION") Out[16]: &lt;h2 id="DESCRIPTION"&gt;DESCRIPTION&lt;/h2&gt; In [17]: timeit soup.find("h2", id="DESCRIPTION") 100 loops, best of 3: 5.27 ms per loop In [18]: timeit soup.select_one("#DESCRIPTION") 1000 loops, best of 3: 649 µs per loop In [19]: timeit soup.select_one("div") 10000 loops, best of 3: 61 µs per loop In [20]: timeit soup.find("div") 1000 loops, best of 3: 446 µs per loop </code></pre> <p><em>find</em> basically is just the same as using <em>find_all</em> setting the limit to 1, then checking if the list returned is empty or not, indexing, if it is not empty or returning None if it is.</p> <pre><code>def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.find_all(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r </code></pre> <p><em>select_one</em> does something similar using <em>select</em>:</p> <pre><code>def select_one(self, selector): """Perform a CSS selection operation on the current element.""" value = self.select(selector, limit=1) if value: return value[0] return None </code></pre> <p>The cost is much lower with the select without all the keyword args to process.</p> <p><a href="http://stackoverflow.com/questions/38028384/beautifulsoup-is-there-a-difference-between-find-and-select-python-3-x/38033910#38033910">Beautifulsoup : Is there a difference between .find() and .select() - python 3.xx</a> covers a bit more on the differences.</p>
1
2016-08-19T09:54:03Z
[ "python", "beautifulsoup", "html-parsing", "bs4" ]
How can I view python library functions in VScode
39,033,652
<p>I can use <kbd>Ctrl</kbd>+left-click in the name of function to view library functions in PyCharm, and I want to do the same in VScode; what should I do?</p> <p>I may not be very clear, so I recorded a gif.</p> <p><a href="http://i.stack.imgur.com/xBAUK.gif" rel="nofollow"><img src="http://i.stack.imgur.com/xBAUK.gif" alt="demo"></a></p>
0
2016-08-19T07:43:26Z
39,037,438
<p>@cnkl, what you're after is the 'go to definition' feature. You can go to the definition of a symbol by pressing F12. If you press Ctrl and hover over a symbol, a preview of the declaration will appear</p> <p>You can find more details here: <a href="https://code.visualstudio.com/docs/editor/editingevolved#_go-to-definition" rel="nofollow">https://code.visualstudio.com/docs/editor/editingevolved#_go-to-definition</a></p>
0
2016-08-19T11:03:38Z
[ "python", "vscode" ]
Are Jupyter notebook executors distributed dynamically in Apache Spark?
39,033,661
<p>I got a question in order to better understand a big data concept within Apache Hadoop Spark. Not sure if it's off-topic in this forum, but let me know.</p> <p>Imagine a Apache Hadoop cluster with 8 servers managed by the Yarn resource manager. I uploaded a file into HDFS (file system) that is configured with 64MB blocksize and a replication count of 3. That file is then split into blocks of 64MB. Now let's imagine the blocks are distributed by HDFS onto node 1, 2 and 3. </p> <p>But now I'm coding some Python code with a Jupyter notebook. Therefore the notebook is started with this command:</p> <blockquote> <p>PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS="notebook" pyspark --master yarn-client --num-executors 3 --executor-cores 4 --executor-memory 16G</p> </blockquote> <p>Within the notebook I'm loading the file from HDFS to do some analytics. When I executed my code, I can see in the YARN Web-UI that I got 3 executors and how the jobs are submitted (distributed) to the executors. </p> <p>The interesting part is, that my executors are fixed to specific computing nodes right after the start command (see above). For instance node 6, 7 and 8. </p> <p>My questions are:</p> <ol> <li>Is my assumption correct, that the executor nodes are fixed to computing nodes and the HDFS blocks will be transferred to the executors once I'm accessing (loading) the file from HDFS?</li> <li>Or, are the executors dynamically assigned and started at the nodes where the data is (node 1, 2 and 3). In this case my observation in the YARN web-ui must be wrong.</li> </ol> <p>I'm really interested in understanding this better.</p> <p><a href="http://i.stack.imgur.com/I3iZL.png" rel="nofollow"><img src="http://i.stack.imgur.com/I3iZL.png" alt="cluster setup"></a></p>
1
2016-08-19T07:44:05Z
39,034,110
<blockquote> <p>Are Jupyter notebook executors distributed dynamically in Apache Spark</p> </blockquote> <p>For the sake of clarity, let's distinguish</p> <ul> <li><p>Jupyter notebooks and their associated kernels - a kernel is the Python process behind a notebook's UI. A kernel executes whatever code you type and submit in your notebook. Kernels are managed by Jupyter, not by Spark.</p></li> <li><p>Spark executors - these are the compute resources allocated on the YARN cluster to execute spark jobs</p></li> <li><p>HDFS data nodes - these are where your data resides. Data nodes may or may not be the same as executor nodes.</p></li> </ul> <blockquote> <p>Is my assumption correct, that the executor nodes are fixed to computing nodes and the HDFS blocks will be transferred to the executors once I'm accessing (loading) the file from HDFS</p> </blockquote> <p>Yes and no - yes, Spark takes <a href="http://spark.apache.org/docs/latest/tuning.html#data-locality" rel="nofollow">data locality</a> into account when scheulding jobs. No, there is no guarantee. As per <a href="http://spark.apache.org/docs/latest/tuning.html#data-locality" rel="nofollow">Spark documentation</a>:</p> <p><em>(...) there are two options: a) wait until a busy CPU frees up to start a task on data on the same server, or b) immediately start a new task in a farther away place that requires moving data there. What Spark typically does is wait a bit in the hopes that a busy CPU frees up. Once that timeout expires, it starts moving the data from far away to the free CPU. (...)</em> </p> <blockquote> <p>Or, are the executors dynamically assigned and started at the nodes where the data is (node 1, 2 and 3). </p> </blockquote> <p>This depends on the configuration. In general executors are allocated to a spark application (i.e. a SparkContext) dynamically, and deallocated when no longer used. However, executors are kept alive for some time, as per the <a href="http://spark.apache.org/docs/latest/job-scheduling.html#remove-policy" rel="nofollow">Job scheduling documentation</a>:</p> <p><em>(...)A Spark application removes an executor when it has been idle for more than spark.dynamicAllocation.executorIdleTimeout seconds.(...)</em></p> <p>To get more control on what runs where, you may use <a href="http://spark.apache.org/docs/latest/job-scheduling.html#fair-scheduler-pools" rel="nofollow">Scheduler Pools</a>.</p>
3
2016-08-19T08:08:48Z
[ "python", "apache-spark", "pyspark", "jupyter", "jupyter-notebook" ]
Plot confusion matrix sklearn with multiple labels
39,033,880
<p>I am plotting a confusion matrix for a multiple labelled data, where labels look like: </p> <blockquote> <p>label1: 1, 0, 0, 0</p> <p>label2: 0, 1, 0, 0</p> <p>label3: 0, 0, 1, 0</p> <p>label4: 0, 0, 0, 1</p> </blockquote> <p>I am able to classify successfully using the below code. <strong>I only need some help to plot confusion matrix.</strong></p> <pre><code> for i in range(4): y_train= y[:,i] print('Train subject %d, class %s' % (subject, cols[i])) lr.fit(X_train[::sample,:],y_train[::sample]) pred[:,i] = lr.predict_proba(X_test)[:,1] </code></pre> <p>I used the following code to print confusion matrix, but it always return a 2X2 matrix </p> <pre><code>prediction = lr.predict(X_train) print(confusion_matrix(y_train, prediction)) </code></pre>
2
2016-08-19T07:56:40Z
39,034,386
<p>I see this is still an open issue in <code>sklearn</code>'s repository:</p> <p><a href="https://github.com/scikit-learn/scikit-learn/issues/3452" rel="nofollow">https://github.com/scikit-learn/scikit-learn/issues/3452</a></p> <p>However there have been some attempts at implementing it. From the same #3452 thread issue:</p> <p><a href="https://github.com/Magellanea/scikit-learn/commit/514287c1d5dad2f0ab4918dc4da5cf7053fe6734#diff-b04acd877dd793f28ae7be13a999ed88R187" rel="nofollow">https://github.com/Magellanea/scikit-learn/commit/514287c1d5dad2f0ab4918dc4da5cf7053fe6734#diff-b04acd877dd793f28ae7be13a999ed88R187</a></p> <p>You can check the code proposed in the function and see if that fits your needs.</p>
0
2016-08-19T08:24:25Z
[ "python", "machine-learning", "scikit-learn", "confusion-matrix" ]
Minimize delay between two sounds played using winsound.PlaySound
39,033,922
<pre><code>winsound.PlaySound('1.wav', winsound.SND_FILENAME) time.sleep(0.15) winsound.PlaySound('1.wav', winsound.SND_FILENAME) </code></pre> <p><code>1.wav</code> is a sound file of length 01s</p> <p>There is delay of more then a second between the two <code>winsound.PlaySound</code> calls, even if <code>time.sleep</code> is commented out. But if the parameter for <code>time.sleep</code> is increase for more then <code>1s</code> then my code runs as it should. </p> <p>I need to bring the delay down to a <code>0.15s</code>.</p> <p>Thanks in advance.</p>
1
2016-08-19T07:58:42Z
39,044,558
<p>The winsound module seems to be unloved. It hasn't adapted to the Python 3 distinction between bytes and strings (<a href="http://bugs.python.org/issue11620" rel="nofollow">http://bugs.python.org/issue11620</a>), so it can't play a .wav file that is stored in memory. </p> <p>You should probably move to a different audio module such as <code>pyaudio</code> <a href="https://people.csail.mit.edu/hubert/pyaudio/" rel="nofollow">https://people.csail.mit.edu/hubert/pyaudio/</a> </p> <p>Since the file is a short one. You should read the whole file into memory. If there is still a gap, you can join the file to itself with 0.15 s of silence in the middle and then play (that single file). Audio modules <code>wave</code>, <code>pydub</code>, <code>audioop</code> or <code>audiolab</code> can do the joining. <a href="http://stackoverflow.com/questions/2890703/how-to-join-two-wav-files-using-python">How to join two wav files using python?</a></p>
1
2016-08-19T17:15:42Z
[ "python" ]
Python : Argument based Singleton
39,033,946
<p>I'm following this <a href="http://stackoverflow.com/a/6798042/820410">link</a> and trying to make a singleton class. But, taking arguments (passed while initiating a class) into account so that the same object is returned if the arguments are same.</p> <p>So, instead of storing class name/class reference as a <code>dict</code> key, I want to store passed arguments as keys in <code>dict</code>. But, there could be unhashable arguments also (like <code>dict</code>, <code>set</code> itself).</p> <p>What is the best way to store class arguments and class objects mapping? So that I can return an object corresponding to the arguments.</p> <p>Thanks anyways.</p> <hr> <p><strong>EDIT-1</strong> : A little more explanation. Let's say there is class as follows</p> <pre><code>class A: __metaclass__ == Singleton def __init__(arg1, arg2): pass </code></pre> <p>Now, <code>A(1,2)</code> should always return the same object. But, it should be different from <code>A(3,4)</code></p> <p>I think, the arguments very much define the functioning of a class. Let's say if the class is to make <code>redis</code> connections. I might want to create 2 singletons objects with diff <code>redis</code> hosts as parameters, but the underlying class/code could be common. </p>
2
2016-08-19T07:59:42Z
39,038,020
<p>As theheadofabroom and me already mentioned in the comments, there are some odds when relying on non-hashable values for instance caching or memoization. Therefore, if you still want to do exactly that, the following example does not hide the memoization in the <code>__new__</code> or <code>__init__</code> method. (A self-memoizing class would be hazardous because the memoization criterion can be fooled by code that you don't control).</p> <p>Instead, I provide the function <code>memoize</code> which returns a memoizing factory function for a class. Since there is no generic way to tell from non-hashable arguments, if they will result in an instance that is equivalent to an already existing isntance, the memoization semantics have to be provided explicitly. This is achieved by passing the <code>keyfunc</code> function to <code>memoize</code>. <code>keyfunc</code> takes the same arguments as the class' <code>__init__</code> method and returns a hashable key, whose equality relation (<code>__eq__</code>) determines memoization.</p> <p>The proper use of the memoization is in the responsibility of the using code (providing a sensible <code>keyfunc</code> and using the factory), since the class to be memoized is not modified and can still be instantiated normally.</p> <pre><code>def memoize(cls, keyfunc): memoized_instances = {} def factory(*args, **kwargs): key = keyfunc(*args, **kwargs) if key in memoized_instances: return memoized_instances[key] instance = cls(*args, **kwargs) memoized_instances[key] = instance return instance return factory class MemoTest1(object): def __init__(self, value): self.value = value factory1 = memoize(MemoTest1, lambda value : value) class MemoTest2(MemoTest1): def __init__(self, value, foo): MemoTest1.__init__(self, value) self.foo = foo factory2 = memoize(MemoTest2, lambda value, foo : (value, frozenset(foo))) m11 = factory1('test') m12 = factory1('test') assert m11 is m12 m21 = factory2('test', [1, 2]) lst = [1, 2] m22 = factory2('test', lst) lst.append(3) m23 = factory2('test', lst) assert m21 is m22 assert m21 is not m23 </code></pre> <p>I only included <code>MemoTest2</code> as a sublclass of <code>MemoTest1</code> to show that there is no magic involved in using regular class inheritance.</p>
0
2016-08-19T11:33:02Z
[ "python", "dictionary", "arguments", "singleton", "hashable" ]
Python: Date Conversion Error
39,033,982
<p>I'm trying to convert a string into a date format, to be later stored into an SQLite database. Below is the code line at which I'm getting an error.</p> <pre><code>date_object = datetime.strptime(date, '%b %d, %Y %H:%M %Z') </code></pre> <p>And this is the error:</p> <pre><code>File "00Basic.py", line 20, in spider date_object = datetime.strptime(date, '%b %d, %Y %H:%M %Z') File "C:\Python27\lib\_strptime.py", line 332, in _strptime (data_string, format)) ValueError: time data 'Aug 19, 2016 08:13 IST' does not match format '%b %d, %Y %H %M %Z' </code></pre> <p>Question 1: How do I resolve this error?</p> <p>Question 2: Is this the right approach for preparing to store the date in SQLite later?</p> <p>Please Note: Very new to programming.</p>
0
2016-08-19T08:01:39Z
39,034,769
<p>The problem is located in the <code>%Z</code> (Time zone) part of the format. As the <a href="https://docs.python.org/2/library/datetime.html" rel="nofollow">documentation</a> explains</p> <pre><code>%Z Time zone name (empty string if the object is naive). (empty), UTC, EST, CST </code></pre> <p>It looks like only UTC,EST and CST are valid. (Or it just doesn't recognize <strong>IST</strong>)</p> <p>In order to fix this, you could use the <code>%z</code> parameter that accepts any UTC offset, like so:</p> <pre><code>struct_time = time.strptime("Aug 19, 2016 08:13 +0530", '%b %d, %Y %H:%M %z') </code></pre> <p>Update: Although this works fine in Python +3.2 it raises an exception when it's run with Python2</p>
0
2016-08-19T08:47:16Z
[ "python", "sqlite", "date-conversion" ]
Python: Date Conversion Error
39,033,982
<p>I'm trying to convert a string into a date format, to be later stored into an SQLite database. Below is the code line at which I'm getting an error.</p> <pre><code>date_object = datetime.strptime(date, '%b %d, %Y %H:%M %Z') </code></pre> <p>And this is the error:</p> <pre><code>File "00Basic.py", line 20, in spider date_object = datetime.strptime(date, '%b %d, %Y %H:%M %Z') File "C:\Python27\lib\_strptime.py", line 332, in _strptime (data_string, format)) ValueError: time data 'Aug 19, 2016 08:13 IST' does not match format '%b %d, %Y %H %M %Z' </code></pre> <p>Question 1: How do I resolve this error?</p> <p>Question 2: Is this the right approach for preparing to store the date in SQLite later?</p> <p>Please Note: Very new to programming.</p>
0
2016-08-19T08:01:39Z
39,035,752
<p>You could use <a href="https://pypi.python.org/pypi/pytz?" rel="nofollow"><code>pytz</code></a> for the timezone conversion as shown:</p> <pre><code>from datetime import datetime from pytz import timezone s = "Aug 19, 2016 08:13 IST".replace('IST', '') print(timezone('Asia/Calcutta').localize(datetime.strptime(s.rstrip(), '%b %d, %Y %H:%M'))) #2016-08-19 08:13:00+05:30 #&lt;class 'datetime.datetime'&gt; </code></pre> <p>I would suggest you to use <a href="https://pypi.python.org/pypi/python-dateutil/2.4.1" rel="nofollow"><code>dateutil</code></a> incase you are handling multiple timezones of string.</p>
0
2016-08-19T09:35:51Z
[ "python", "sqlite", "date-conversion" ]
Python BeautifulSoup - Scraping Google Finance historical data
39,033,999
<p>I was trying to scrap Google Finance historical data. I was need of to total number of rows, which is located along with the pagination. The following is the div tag which is responsible for displaying the total number of rows:</p> <pre><code>&lt;div class="tpsd"&gt;1 - 30 of 1634 rows&lt;/div&gt; </code></pre> <p>I tried using the following code to get the data, but its returning an empty list:</p> <pre><code>soup.find_all('div', 'tpsd') </code></pre> <p>I tried getting the entire table but even then I was not successful, when I checked the page source I was able to find the value inside a JavaScript function. When I Googled how to get values from script tag, it was mentioned to used regex. So, I tried using regex and the following is my code:</p> <pre><code>import requests import re from bs4 import BeautifulSoup r = requests.get('https://www.google.com/finance/historical?cid=13564339&amp;startdate=Jan+01%2C+2010&amp;enddate=Aug+18%2C+2016&amp;num=30&amp;ei=ilC1V6HlPIasuASP9Y7gAQ') soup = BeautifulSoup(r.content,'lxml') var = soup.find_all("script")[8].string a = re.compile('google.finance.applyPagination\((.*)\'http', re.DOTALL) b = a.search(var) num = b.group(1) print(num.replace(',','').split('\n')[3]) </code></pre> <p>I am able to get the values which I want, but my doubt is whether the above code which I used to get the values is correct, or is there any other way better way. Kindly help.</p>
1
2016-08-19T08:02:46Z
39,034,299
<p>You can just use the python module: <a href="https://pypi.python.org/pypi/googlefinance" rel="nofollow">https://pypi.python.org/pypi/googlefinance</a></p> <p>The api is simple:</p> <pre><code>#The google finance API that we need. from googlefinance import getQuotes #The json handeler, since the API returns a JSON. import json intelJSON = (getQuotes('INTC')) intelDump = json.dumps(intelJSON, indent=2) intelInfo = json.loads(intelDump) intelPrice = intelInfo[0]['LastTradePrice'] intelTime = intelInfo[0]['LastTradeDateTimeLong'] print ("As of " + intelTime + ", Intel stock is trading at: " + intelPrice) </code></pre>
0
2016-08-19T08:19:43Z
[ "javascript", "python", "beautifulsoup" ]
Python BeautifulSoup - Scraping Google Finance historical data
39,033,999
<p>I was trying to scrap Google Finance historical data. I was need of to total number of rows, which is located along with the pagination. The following is the div tag which is responsible for displaying the total number of rows:</p> <pre><code>&lt;div class="tpsd"&gt;1 - 30 of 1634 rows&lt;/div&gt; </code></pre> <p>I tried using the following code to get the data, but its returning an empty list:</p> <pre><code>soup.find_all('div', 'tpsd') </code></pre> <p>I tried getting the entire table but even then I was not successful, when I checked the page source I was able to find the value inside a JavaScript function. When I Googled how to get values from script tag, it was mentioned to used regex. So, I tried using regex and the following is my code:</p> <pre><code>import requests import re from bs4 import BeautifulSoup r = requests.get('https://www.google.com/finance/historical?cid=13564339&amp;startdate=Jan+01%2C+2010&amp;enddate=Aug+18%2C+2016&amp;num=30&amp;ei=ilC1V6HlPIasuASP9Y7gAQ') soup = BeautifulSoup(r.content,'lxml') var = soup.find_all("script")[8].string a = re.compile('google.finance.applyPagination\((.*)\'http', re.DOTALL) b = a.search(var) num = b.group(1) print(num.replace(',','').split('\n')[3]) </code></pre> <p>I am able to get the values which I want, but my doubt is whether the above code which I used to get the values is correct, or is there any other way better way. Kindly help.</p>
1
2016-08-19T08:02:46Z
39,037,402
<p>You can easily pass an offset i.e <em>start=..</em> to the url getting 30 rows at a time which is exactly what is happening with the pagination logic:</p> <pre><code>from bs4 import BeautifulSoup import requests url = "https://www.google.com/finance/historical?cid=13564339&amp;startdate=Jan+01%2C+2010&amp;" \ "enddate=Aug+18%2C+2016&amp;num=30&amp;ei=ilC1V6HlPIasuASP9Y7gAQ&amp;start={}" with requests.session() as s: start = 0 req = s.get(url.format(start)) soup = BeautifulSoup(req.content, "lxml") table = soup.select_one("table.gf-table.historical_price") all_rows = table.find_all("tr") while True: start += 30 soup = BeautifulSoup(s.get(url.format(start)).content, "lxml") table = soup.select_one("table.gf-table.historical_price") if not table: break all_rows.extend(table.find_all("tr")) </code></pre> <p>You can also get the total rows using the script tag and use that with range:</p> <pre><code>with requests.session() as s: req = s.get(url.format(0)) soup = BeautifulSoup(req.content, "lxml") table = soup.select_one("table.gf-table.historical_price") scr = soup.find("script", text=re.compile('google.finance.applyPagination')) total = int(scr.text.split(",", 3)[2]) all_rows = table.find_all("tr") for start in range(30, total+1, 30): soup = BeautifulSoup(s.get(url.format(start)).content, "lxml") table = soup.select_one("table.gf-table.historical_price") all_rows.extend(table.find_all("tr")) print(len(all_rows)) </code></pre> <p>The <code>num=30</code> is the amount of rows per page, to make less requests you can set it to 200 which seems to be the max and work your step/offset from that.</p> <pre><code>url = "https://www.google.com/finance/historical?cid=13564339&amp;startdate=Jan+01%2C+2010&amp;" \ "enddate=Aug+18%2C+2016&amp;num=200&amp;ei=ilC1V6HlPIasuASP9Y7gAQ&amp;start={}" with requests.session() as s: req = s.get(url.format(0)) soup = BeautifulSoup(req.content, "lxml") table = soup.select_one("table.gf-table.historical_price") scr = soup.find("script", text=re.compile('google.finance.applyPagination')) total = int(scr.text.split(",", 3)[2]) all_rows = table.find_all("tr") for start in range(200, total+1, 200): soup = BeautifulSoup(s.get(url.format(start)).content, "lxml") print(url.format(start) table = soup.select_one("table.gf-table.historical_price") all_rows.extend(table.find_all("tr")) </code></pre> <p>If we run the code, you will see we get 1643 rows:</p> <pre><code>In [7]: with requests.session() as s: ...: req = s.get(url.format(0)) ...: soup = BeautifulSoup(req.content, "lxml") ...: table = soup.select_one("table.gf-table.historical_price") ...: scr = soup.find("script", text=re.compile('google.finance.applyPagination')) ...: total = int(scr.text.split(",", 3)[2]) ...: all_rows = table.find_all("tr") ...: for start in range(200, total+1, 200): ...: soup = BeautifulSoup(s.get(url.format(start)).content, "lxml") ...: table = soup.select_one("table.gf-table.historical_price") ...: all_rows.extend(table.find_all("tr")) ...: print(len(all_rows)) ...: 1643 In [8]: </code></pre>
1
2016-08-19T11:01:01Z
[ "javascript", "python", "beautifulsoup" ]
for in a parameter function python - zip dynamic
39,034,136
<p>I have dynamic <code>zip()</code> function call:</p> <pre><code>zip(id, value[0], value[1], value[2], value[3], value[4]) </code></pre> <p>Value has a dynamic length: it could contain 3 or 4 or 7 elements, etc. Is there a way I can make the <code>zip()</code> function dynamic and work with a variable number of elements from <code>value</code>?</p> <p>e.g. <strong>pseudo code</strong>:</p> <pre><code>zip(id, for i in range(0,len(value)): value[i]) </code></pre>
1
2016-08-19T08:10:38Z
39,034,229
<p>Use the <code>*args</code> <em>call</em> syntax:</p> <pre><code>zip(id, *value) </code></pre> <p>Prepending <code>value</code> with <code>*</code> tells Python to apply each entry in <code>value</code> as a separate argument to <code>zip()</code>.</p>
3
2016-08-19T08:16:08Z
[ "python", "python-2.7", "function" ]
Dynamically add columns to Exsisting BigQuery table
39,034,249
<p><strong>Background</strong></p> <p>I am loading files from local machine to BigQuery.Each file has variable number of fields.So,i am using <strong>'autodetect=true'</strong> while running load job.</p> <p><strong>Issue</strong> is,when load job is run for first time and if the destination table doesn't exsist,Bigquery creates the table ,by infering the fields present in our file and that becomes New table's schema.</p> <p>Now,when i run load job with a different file,which contains some extra (Eg:"Middile Name":"xyz")fields ,<strong>bigQuery throws error</strong> saying "field doesn't exsist in table")</p> <p>From this post::<a href="http://stackoverflow.com/questions/36295488/bigquery-add-new-column-to-existing-tables-using-python-bq-api">BigQuery : add new column to existing tables using python BQ API</a>,i learnt that columns can be added dynamically.However what i don't understand is,</p> <p><strong>Query</strong></p> <p><strong>How will my program come to know,that the file being uploaded ,contains extra fields and schema mismatch will occur.(Not a problem ,if table doesn't exsist bcoz. new table will be created).</strong></p> <p>If my program can somehow infer the extra fields present in file being uploaded,i could add those columns to the exsisting table and then run the load job.</p> <p>I am using python BQ API.</p> <p>Any thoughts on how to automate this process ,would be helpful.</p>
0
2016-08-19T08:17:12Z
39,035,979
<p>A naive solution would be:</p> <p>1.get the target table schema using </p> <p>service.tables().get(projectId=projectId, datasetId=datasetId, tableId=tableId)</p> <p>2.Generate schema of your data in the file.</p> <p>3.Compare the schemas (kind of a "diff") and then add those columns to the target table ,which are extra in your data schema</p> <p>Any better ideas or approaches would be highly appreciated!</p>
0
2016-08-19T09:50:09Z
[ "python", "google-bigquery", "google-cloud-platform" ]
Dynamically add columns to Exsisting BigQuery table
39,034,249
<p><strong>Background</strong></p> <p>I am loading files from local machine to BigQuery.Each file has variable number of fields.So,i am using <strong>'autodetect=true'</strong> while running load job.</p> <p><strong>Issue</strong> is,when load job is run for first time and if the destination table doesn't exsist,Bigquery creates the table ,by infering the fields present in our file and that becomes New table's schema.</p> <p>Now,when i run load job with a different file,which contains some extra (Eg:"Middile Name":"xyz")fields ,<strong>bigQuery throws error</strong> saying "field doesn't exsist in table")</p> <p>From this post::<a href="http://stackoverflow.com/questions/36295488/bigquery-add-new-column-to-existing-tables-using-python-bq-api">BigQuery : add new column to existing tables using python BQ API</a>,i learnt that columns can be added dynamically.However what i don't understand is,</p> <p><strong>Query</strong></p> <p><strong>How will my program come to know,that the file being uploaded ,contains extra fields and schema mismatch will occur.(Not a problem ,if table doesn't exsist bcoz. new table will be created).</strong></p> <p>If my program can somehow infer the extra fields present in file being uploaded,i could add those columns to the exsisting table and then run the load job.</p> <p>I am using python BQ API.</p> <p>Any thoughts on how to automate this process ,would be helpful.</p>
0
2016-08-19T08:17:12Z
39,700,879
<p>You should check schema update options. There is an option named as "ALLOW_FIELD_ADDITION" that will help you.</p>
1
2016-09-26T10:50:37Z
[ "python", "google-bigquery", "google-cloud-platform" ]
cloudsql databases with django on 'new' flexible google app engine
39,034,250
<p>I'm building a django (1.9) app using cloudsql and a 'new' flexible environment. I have this same error: <a href="http://stackoverflow.com/questions/20843817/django-on-google-appengine-with-cloudsql-how-to-connect-database-error-2002-c">Django on Google AppEngine with CloudSQL: How to connect database (Error 2002, Can&#39;t connect to local MySQL server..)</a></p> <p>However, in that thread, they refer to docs which are older and explain to put the database key ,<code>HOST</code>, in the format:</p> <pre><code>DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'HOST': '/cloudsql/myapp-test01:myapp-db-test01', 'NAME': 'test01', 'USER': 'test01', } </code></pre> <p>Whereas in the <a href="https://cloud.google.com/python/django/flexible-environment" rel="nofollow">documentation for running django on the 'flexible' environment</a>, it is explained to open your database to the world (0.0.0.0/0) and use the format:</p> <pre><code>DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': '&lt;your-database-name&gt;', 'USER': '&lt;your-database-user&gt;', 'PASSWORD': '&lt;your-database-password&gt;', 'HOST': '&lt;your-database-host&gt;', 'PORT': '3306', } } </code></pre> <p>Where is presumably the IP address given by CloudSQL.</p> <p>Please see the docs here: <a href="https://cloud.google.com/python/django/flexible-environment" rel="nofollow">https://cloud.google.com/python/django/flexible-environment</a></p> <p>Note the 'Flexible Environment' which seems to be distinctly different (but I don't know why).</p> <p>I'm writing, because I when I change from IP address format to <code>/cloudsq/...</code> format I get the error </p> <blockquote> <p>OperationalError: (2002, "Can't connect to local MySQL server through socket '/cloudsql/desgn-test-01:db-test-01' (2)") </p> </blockquote> <p>Any advice is <em>highly</em> appreciated. It seems impossible that google really expects us to leave databases open to <code>0.0.0.0</code>, and in the docs they even say it is only for testing but provide no further information.</p> <p>I should also mention, I'm using two databases in my django config and the 'default' one is a sqlite db (this part of the site is working fine). I get the error when I try to connect to my app that is using the cloud sql parameter. This ONLY happens when deployed, locally everything works perfect.</p>
0
2016-08-19T08:17:14Z
39,045,983
<p>I've made a PR for the documentation on github, but the key changes that were required in order for me to change my <code>DEBUG</code> flag from <code>True</code> to <code>False</code> are described below. </p> <p>Be aware, that it seems once you change the <code>DEBUG</code> flag, GAE requires that your Cloud SQL database uses the <code>/cloudsql/</code> socket connection.</p> <p>Also, I missed the very critical piece of information regarding the <code>ALLOWED_HOSTS</code>, which should include <code>.appspot.com</code></p> <h3>Production</h3> <p>Once you are ready to serve your content in production, there are several changes required for the configuration. Most notable changes are: </p> <ul> <li>Add ".appspot.com" to your <code>ALLOWED_HOSTS</code></li> <li>Change the <code>DEBUG</code> variable to <code>False</code> in your settings.py file.</li> <li>If you are using a Cloud SQL database instance, in order to change from <code>DEBUG = True</code> to <code>DEBUG = False</code> you will need to properly configure the database. See instructions <a href="https://cloud.google.com/sql/docs/app-engine-connect#gaev2-csqlv2" rel="nofollow">here</a> and be sure to change your <code>app.yaml</code> file as well as the <code>HOST</code> key in your <code>DATABASES</code> object.</li> </ul>
0
2016-08-19T18:53:38Z
[ "python", "django", "google-app-engine", "google-cloud-platform", "google-cloud-sql" ]
how to remove blank lines from a csv file created using python
39,034,289
<p>We are creating a subfile from a big csv file. The subfile contains only those rows which have their first column's value as D1:</p> <pre><code>import csv with open('input.csv', 'rb') as csvfile: reader = csv.reader(csvfile, delimiter='|', quotechar='|') writer1 = csv.writer(open('output.csv', 'w'), delimiter = ' ') for row in reader: a = row[0] row = '|'.join(row) if (a=='D1'): writer1.writerow(row) </code></pre> <p>This code gives 2 issues:</p> <ol> <li>A Blank line comes after every row in new csv file</li> <li>Every word has extra spaces between it's letters. So, "Hello" becomes <code>"H e l l o"</code>.</li> </ol>
1
2016-08-19T08:19:13Z
39,034,390
<p>From python csv docs..</p> <blockquote> <pre><code>import csv with open('eggs.csv', 'w', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(['Spam'] * 5 + ['Baked Beans']) spamwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam']) </code></pre> </blockquote> <p>So it seems that You have to use lists <code>['Spam']</code> with <code>spamwriter.writerow</code>... If its not list, this method <code>writerow</code> is creating its own list from iterable input like this:</p> <pre><code>&gt;&gt;&gt; a = 'ala ma kota' &gt;&gt;&gt; list(a) ['a', 'l', 'a', ' ', 'm', 'a', ' ', 'k', 'o', 't', 'a'] </code></pre>
0
2016-08-19T08:24:38Z
[ "python", "csv" ]
how to remove blank lines from a csv file created using python
39,034,289
<p>We are creating a subfile from a big csv file. The subfile contains only those rows which have their first column's value as D1:</p> <pre><code>import csv with open('input.csv', 'rb') as csvfile: reader = csv.reader(csvfile, delimiter='|', quotechar='|') writer1 = csv.writer(open('output.csv', 'w'), delimiter = ' ') for row in reader: a = row[0] row = '|'.join(row) if (a=='D1'): writer1.writerow(row) </code></pre> <p>This code gives 2 issues:</p> <ol> <li>A Blank line comes after every row in new csv file</li> <li>Every word has extra spaces between it's letters. So, "Hello" becomes <code>"H e l l o"</code>.</li> </ol>
1
2016-08-19T08:19:13Z
39,416,946
<p>This code runs fine:</p> <pre><code>import csv with open('input.csv', 'rb') as csvfile: reader = csv.reader(csvfile, delimiter='|', quotechar='|') writer1 = csv.writer(open('output.csv', 'wb'), delimiter = '|') for row in reader: a = row[0] if (a=='D1'): writer1.writerow(row) </code></pre> <p>Thanks to Pedru</p>
0
2016-09-09T17:35:28Z
[ "python", "csv" ]
How to modify cells in a pandas DataFrame?
39,034,351
<p>I need to change individual elements in a DataFrame. I tried doing something like this, but it doesn't work:</p> <pre><code>for index, row in df.iterrows(): if df.at[row, index] == 'something': df.at[row, index] = df.at[row, index] + 'add a string' else: df.at[row, index] = df.at[row, index] + 'add a value' </code></pre> <p>How can I do that?</p>
2
2016-08-19T08:22:35Z
39,034,468
<p>If need modify all columns in <code>DataFrame</code> use <a href="http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.where.html" rel="nofollow"><code>numpy.where</code></a> with <a href="http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html" rel="nofollow"><code>DataFrame</code></a> constructor, because <code>where</code> return <code>numpy array</code>:</p> <pre><code>df = pd.DataFrame(np.where(df == 'something', df + 'add a string', df + 'add a value'), index=df.index, columns=df.columns) </code></pre> <p>If only one column <code>col</code>:</p> <pre><code>df['col'] = np.where(df['col'] == 'something', df['col'] + 'add a string', df['col'] + 'add a value') </code></pre> <p>Sample:</p> <pre><code>df = pd.DataFrame({'col': ['a', 'b', 'a'], 'col1': ['a', 'b', 'b']}) print (df) col col1 0 a a 1 b b 2 a b df = pd.DataFrame(np.where(df == 'a', df + 'add a string', df + 'add a value'), index=df.index, columns=df.columns) print (df) col col1 0 aadd a string aadd a string 1 badd a value badd a value 2 aadd a string badd a value </code></pre> <hr> <pre><code>df['col'] = np.where(df['col'] == 'a', df['col'] + 'add a string', df['col'] + 'add a value') print (df) col col1 0 aadd a string a 1 badd a value b 2 aadd a string b </code></pre>
1
2016-08-19T08:29:08Z
[ "python", "string", "pandas", "dataframe", "condition" ]
How to modify cells in a pandas DataFrame?
39,034,351
<p>I need to change individual elements in a DataFrame. I tried doing something like this, but it doesn't work:</p> <pre><code>for index, row in df.iterrows(): if df.at[row, index] == 'something': df.at[row, index] = df.at[row, index] + 'add a string' else: df.at[row, index] = df.at[row, index] + 'add a value' </code></pre> <p>How can I do that?</p>
2
2016-08-19T08:22:35Z
39,037,148
<p>You can use <code>.ix</code> and apply a function like this:</p> <pre><code>import pandas as pd D = pd.DataFrame({'A': ['a', 'b', 3,7,'b','a'], 'B': ['a', 'b', 3,7,'b','a']}) D.ix[D.index%2 == 0,'A'] = D.ix[D.index%2 == 0,'A'].apply(lambda s: s+'x' if isinstance(s,str) else s+1) D.ix[D.index[2:5],'B'] = D.ix[D.index[2:5],'B'].apply(lambda s: s+'y' if isinstance(s,str) else s-1) </code></pre> <p>First example appends x to each string or alternatively adds 1 to each non-string on column A for every even index.</p> <p>The second example appends y to each string or alternatively subtracts 1 from each non-string on column B for the indices 2,3,4.</p> <p>Original Frame:</p> <pre><code> A B 0 a a 1 b b 2 3 3 3 7 7 4 b b 5 a a </code></pre> <p>Modified Frame:</p> <pre><code> A B 0 ax a 1 b b 2 4 2 3 7 6 4 bx by 5 a a </code></pre>
1
2016-08-19T10:48:38Z
[ "python", "string", "pandas", "dataframe", "condition" ]
Risk to overwrite tables via Django Models migrations?
39,034,499
<p>I'm currently trying to update my Django models to incorporate some new functionality but after runnning "makemigrations", the console output makes me worried I'm going to overwrite other tables in the database. </p> <p>Essentially, in my models.py, I have 8 models. One is entirely new, one is merely modified. I want to migrate these changes to the database so I run "makemigrations". A migration's file is created (I note there are no others whatsoever - presumably the colleague who created these originally has deleted them for whatever reason). The console output is:</p> <pre><code>- Create model ModelNew - Create model ModelDontTouch - Create model ModelDontTouch - Create model ModelDontTouch - Create model ModelDontTouch - Create model ModelDontTouch - Create model ModelDontTouch - Create model ModelUpdated </code></pre> <p>Why does it say <em>create</em> model? Is it because, as far as Django knows, this is the first migration ever performed? Or does it plan on overwriting all those other tables (which would kill our app completely and result in a very terrible day)? </p> <p>I also notice some models have specified</p> <pre><code>db_table = 'some_table' db_tablespace = 'sometable' </code></pre> <p>others, just, db_tablespace = 'sometable'</p> <p>others, nothing at all. Anyone have any thoughts on this?</p>
0
2016-08-19T08:30:51Z
39,034,579
<p>Django doesn't interrogate the database itself when making migrations; it builds up a graph based on previous migrations and the current state of the models. So if there are no previous migrations Django will create them from scratch. This won't overwrite your tables, but it won't actually work either, as it will attempt to create them and the database will refuse.</p> <p>One option would be to temporarily revert your changes and run makemigrations again to get back to the right starting point, then run that migration with <code>--fake-initial</code> to mark it as applied without actually doing it, then reapply your changes and run makemigrations again.</p>
2
2016-08-19T08:35:47Z
[ "python", "django", "python-2.7", "django-models", "django-migrations" ]
Python: "unexpected end of regular expression" during re.compile, empty brackets
39,034,603
<p>To summarize i have <code>re.compile</code> statement like so:</p> <pre><code>markers = ['x'] # some list re.compile(r" *[{}].*(?=\n|$)".format('\\'.join([''] + markers))) </code></pre> <p>For most cases it works fine <strong>unless <code>markers</code> is empty</strong> and RegEx pattern looks like so:</p> <pre><code>pattern = ' *[].*(?=\\n|$)' </code></pre> <p>Why does it have problem with <strong>empty character set</strong>? What is the workaround to make it work for empty <code>markers</code> list?</p> <h2>SOLUTION</h2> <p>Credits for: <a href="http://stackoverflow.com/users/100297/martijn-pieters">Martijn Pieters</a>, <a href="http://stackoverflow.com/users/3832970/wiktor-stribi%C5%BCew">Wiktor Stribiżew</a> and <a href="http://stackoverflow.com/users/240443/amadan">Amadan</a>.</p> <p>To summarize:</p> <ul> <li><strong>empty character set</strong> doesn't exist in RegEx. <code>[]</code> is parsed like <code>[a</code> so interpreter expects closing <code>]</code> and that causes error,</li> <li>checking for empty <code>markers</code> must be done before compiling this pattern, to avoid invalid empty brackets <code>[]</code>,</li> <li><code>.*(?=\n|$)</code> has redundant <code>(?=\n|$)</code> and can be simplified to <code>.*</code>,</li> <li>to escape special characters efficiently inside brackets <code>[]</code> it it's better to use <code>re.escape()</code>.</li> </ul> <p>Adding thigs up the solution for my problem is:</p> <pre><code>if markers: re.compile(r" *[{}].*".format(re.escape(''.join(markers)))) else: # something </code></pre>
0
2016-08-19T08:37:34Z
39,035,409
<p>You may check if the markers list is not empty <em>at the very beginning</em>, then, only escape the characters that must be escaped in the character class: <code>^</code>, <code>\</code>, <code>]</code>, <code>[</code>, <code>-</code>.</p> <p>Note that if the markers list is empty, the pattern becomes <code> *.*</code>, basically accepting <em>any line</em>. You can match it with <code>"^.*$"</code>.</p> <p>Here is my suggestion:</p> <pre><code>import re markers = ['x', ']', '[', '-', '^', '\\'] # some list global p #markers = [] # some list if markers: escaped = [re.sub(r"[][^\\-]", r"\\\g&lt;0&gt;", x) for x in markers] pat = r" *[{}].*".format("".join(escaped)) p = re.compile(pat) else: p = re.compile("^.*$") print(p.pattern) </code></pre> <p>See the <a href="https://ideone.com/JxNBa0" rel="nofollow">Python demo</a></p> <p>Also, the <code>.*(?=\n|$)</code> can be actually reduced to <code>.*</code> since <code>.</code> matches any character but a newline (it also can match a CR symbol) and <code>.*</code> will always match all chars up to the <code>\n</code> or end of string.</p>
1
2016-08-19T09:18:48Z
[ "python", "regex", "python-2.7" ]
Which exception should I throw if a module is not the correct version?
39,034,635
<p>This may have been asked before or I may be overly pedantic, but my own searches have come up empty.</p> <p>Looking through the <a href="https://docs.python.org/2/library/exceptions.html" rel="nofollow">Python 2.x exceptions page</a>, I'm not sure which one I should <code>raise</code> if my script determines that the <code>__version__</code> of a module that's been imported, e.g. <code>cv2</code>, is not the correct version. For example, a script I'm working on requires OpenCV version 3; what's the best exception to <code>raise</code> in the following block if it determines that the version != 3?</p> <pre><code>import cv2 if not cv2.__version__.startswith('3'): raise ValueError('OpenCV _3_ required') </code></pre>
0
2016-08-19T08:39:07Z
39,034,724
<p>You can create you own custom exception if the existing ones don't suffice.</p> <pre><code>class VersionError(Exception): def __init__(self, msg): Exception.__init__(self,msg) </code></pre>
2
2016-08-19T08:44:02Z
[ "python", "exception", "version" ]
Which exception should I throw if a module is not the correct version?
39,034,635
<p>This may have been asked before or I may be overly pedantic, but my own searches have come up empty.</p> <p>Looking through the <a href="https://docs.python.org/2/library/exceptions.html" rel="nofollow">Python 2.x exceptions page</a>, I'm not sure which one I should <code>raise</code> if my script determines that the <code>__version__</code> of a module that's been imported, e.g. <code>cv2</code>, is not the correct version. For example, a script I'm working on requires OpenCV version 3; what's the best exception to <code>raise</code> in the following block if it determines that the version != 3?</p> <pre><code>import cv2 if not cv2.__version__.startswith('3'): raise ValueError('OpenCV _3_ required') </code></pre>
0
2016-08-19T08:39:07Z
39,034,756
<p>You've got a lot of options depending on what you want to do with this exception... Generally, I'd expect the install scripts to handle setting up the appropriate versions of dependencies so I might think of this as a simple runtime assertion -- Therefore <code>AssertionError</code> may be appropriate.</p> <p>This one is really nice -- You don't need an <code>if</code> statement, just an <code>assert</code>:</p> <pre><code>assert cv2.__version__.startswith('3'), 'OpenCV _3_ required' </code></pre> <p>My next bet would be to use <code>RuntimeError</code> as that is really meant to be a general exception that happens at runtime (and isn't usually meant to be caught)... It's a pretty general "Oh snap, something bad happened that we cannot recover from. Lets just spit out an error to let the user know what happened".</p>
2
2016-08-19T08:46:46Z
[ "python", "exception", "version" ]
Changing Odoo 9 "login title"
39,034,681
<p>I was following this post <a href="http://stackoverflow.com/questions/26974431/odoo-8-how-to-change-page-title/29182681#29182681">Odoo 8 - how to change page title?</a> on how to change Odoo 9 login title . I believe I followed the steps and also restarted the server, but the title didn't change. Any suggestions?</p> <p>Here are steps I followed:</p> <ol> <li><p>I created a new folder/module called brin in addons folder</p></li> <li><p>Created a new xml file(with new title) in that folder that looks like this</p></li> </ol> <p><a href="http://i.stack.imgur.com/f18P8.png" rel="nofollow"><img src="http://i.stack.imgur.com/f18P8.png" alt="brin.xml"></a></p> <ol start="3"> <li>Created an <strong>openerp</strong>.py file in that folder and declared the xml file that looks like this:</li> </ol> <p><a href="http://i.stack.imgur.com/NYtoM.png" rel="nofollow"><img src="http://i.stack.imgur.com/NYtoM.png" alt="__openerp__.py file"></a></p>
-2
2016-08-19T08:41:07Z
39,038,555
<p>So i figured found a way to do it.</p> <p>1.Click the drop down menu on administrator(In the upper right corner).</p> <p>2.Click the "about" link and activate developer mode.</p> <p>3.Now go to settings,User interface,Views.Type "layout" in search bar.A set of layouts is brought.Click on Web layout.Click edit.In the xml change the title to custom title.</p> <p>4.Walaaaa !!</p>
0
2016-08-19T11:58:15Z
[ "python", "odoo-9" ]
Unexpected behaviour when grouping outliers in pandas [Python]
39,034,691
<p>My dataframe is in this format </p> <pre><code>df Count DateTime 2015-01-16 10 2015-01-17 28 2015-01-18 26 2015-01-19 10 2015-01-20 24 2015-01-21 25 </code></pre> <p>Im experimenting with this function to eliminate outliers using groupby</p> <pre><code>def replaceit(group): mean, std = group.mean(), group.std() outliers = (group - mean).abs() &gt; 3*std group[outliers] = mean # or "group[~outliers].mean()" return group </code></pre> <p>Creating a copy of that dataframe as I want to use it elsewhere:</p> <pre><code>df2 = df </code></pre> <p>Lets see the output of df2</p> <pre><code>df2 Count DateTime 2015-01-16 10 2015-01-17 28 2015-01-18 26 2015-01-19 10 2015-01-20 24 2015-01-21 25 </code></pre> <p>lets use the function</p> <pre><code>df2 = replaceit(df2) df2 DateTime 2015-01-16 10.000000 2015-01-17 28.000000 2015-01-18 26.000000 2015-01-19 10.000000 2015-01-20 24.000000 2015-01-21 25.000000 </code></pre> <p>BUT now lets see the output of df:</p> <pre><code>df Count DateTime 2015-01-16 10.000000 2015-01-17 28.000000 2015-01-18 26.000000 2015-01-19 10.000000 2015-01-20 24.000000 2015-01-21 25.000000 </code></pre> <p>My question is, why is this happening? How can I solve this issue?</p>
1
2016-08-19T08:42:03Z
39,034,744
<p>Problem is if use <code>df2 = df</code> it is reference to the initial DataFrame. Thus, changing <code>df2</code> will change the initial DataFrame <code>df</code>.</p> <p>You need <a href="http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.copy.html" rel="nofollow"><code>copy</code></a>:</p> <pre><code>df2 = df.copy() </code></pre>
2
2016-08-19T08:45:44Z
[ "python", "pandas", "dataframe" ]
Create a python class with a uniqure feature
39,034,712
<p>I have a bunch of interdependent variables that I want to save in a list of class elements. Now I want it to be sorted according to one of the class features. At the moment, what I am doing is this:</p> <pre><code> class myclass(object): lst_of_A=[] def __init__(self, Attr_A, Attr_B, Attr_C): self.A=Attr_A self.B=[Attr_B] self.C=[Attr_C] self.lst_of_A.append(Attr_A) def append_to_existing_entry(self, Attr_B, Attr_C): self.B.append(Attr_B) self.C.append(Attr_C) </code></pre> <p>Now, I use try and except in a for loop to generate the list accordingly:</p> <pre><code> my_lst_of_classes=[] for el in read_in_data: try: ind=my_lst_of_classes[0].lst_of_A.index(el[0]) my_lst_of_classes[index].append_to_existing_entry(el[1],el[2]) except: my_lst_of_classes.append(myclass(el[0],el[1],el[2])) </code></pre> <p>This works, but seems very clumsy. Is there any better way of doing this? </p>
0
2016-08-19T08:43:13Z
39,036,009
<p>First point, you want to use a <code>dict</code> (or <code>OrderedDict</code> if insertion order matters) instead of a <code>list</code> - this makes for more readable and much faster code. </p> <p>Second point, you want to encapsulate all the handling of this collection (specially the "create or extend" part) so the client code doesn't have to care about it. <code>classmethod</code>s (methods that take the class - not the instance - as first argument) are your friend here.</p> <p>Here's a simple example that should get you started:</p> <pre><code>from collections import OrderedDict class MyObj(object): _index = OrderedDict() @classmethod def insert(cls, a, b, c): if a in cls._index: cls._index[a].extend(b, c) else: cls._index[a] = cls(a, b, c) @classmethod def list_instances(cls): return cls._index.values() @classmethod def get_instances(cls, key): return cls._index.get(key, []) def __init__(self, a, b, c): self.a = a self.b = [b] self.c = [c] def extend(self, b, c): self.b.append(b) self.c.append(c) def __str__(self): return "({a}, {b}, {c})".format(**self.__dict__) def __repr__(self): return "&lt;{}({})&gt;".format(type(self).__name__, self) sources = [ (2, 'B0', 'C0'), (3, 'B1', 'C1'), (5, 'B2', 'C2'), (2, 'B3', 'C3'), (4, 'B4', 'C4'), (4, 'B5', 'C5'), (2, 'B6', 'C6'), (3, 'B7', 'C7'), (5, 'B8', 'C8'), (2, 'B9', 'C9'), (4, 'B10', 'C10'), (2, 'B11', 'C11'), (2, 'B12', 'C12'), (4, 'B13', 'C13'), (2, 'B14', 'C14'), (4, 'B15', 'C15'), (4, 'B16', 'C16'), (3, 'B17', 'C17'), (4, 'B18', 'C18'), (1, 'B19', 'C19'), (3, 'B20', 'C20'), (4, 'B21', 'C21'), (5, 'B22', 'C22'), (1, 'B23', 'C23'), (3, 'B24', 'C24'), (4, 'B25', 'C25'), (3, 'B26', 'C26'), (2, 'B27', 'C27'), (4, 'B28', 'C28'), (5, 'B29', 'C29') ] for data in sources: MyObj.insert(*data) print MyObj.list_instances() print MyObj.get_instances(5) </code></pre>
0
2016-08-19T09:51:33Z
[ "python", "list", "class" ]
add exceptions to python function
39,034,718
<p>I have a function that takes an integer as input and returns the number as words, it works as I want with the exception of <code>30,40,50,60,70,80</code> and <code>90</code>, it returns for <code>90 = ninty zero</code>, because the function works by splitting the number, how can I remove the zero being added for the above numbers. my code is as follows</p> <pre><code>d = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'ninteen', 20: 'twenty', 30: 'thirty', 40: 'fourth', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninty'} def wordify(n): k = 1000 m = k * 1000 if n &lt; 20: return d[n] if n &lt; 100: if n % 100 == 0: return d[n] else: return d[n // 10 * 10] + ' ' + d[n % 10] if n &lt; k: if n % 100 == 0: return d[n // 100] + ' hundred' else: return d[n // 100] + ' hundred ' + wordify(n % 100) if n &lt; m: if n % k == 0: return wordify(n // k) + ' thousand' else: return wordify(n // k) + ' thousand, ' + wordify(n % k) print wordify(90) </code></pre>
0
2016-08-19T08:43:39Z
39,034,802
<p>Just perform an extra pass to correct problems. Sometimes simpler than trying to figure out how to avoid them.</p> <pre><code>print wordify(90).replace(" zero","") </code></pre> <p>or: rename your wordify function into <code>internal_wordify</code>, then define <code>wordify</code> like that:</p> <pre><code>def wordify(n): return internal_wordify(n).replace(" zero","") </code></pre>
0
2016-08-19T08:48:55Z
[ "python", "python-2.7", "function" ]
add exceptions to python function
39,034,718
<p>I have a function that takes an integer as input and returns the number as words, it works as I want with the exception of <code>30,40,50,60,70,80</code> and <code>90</code>, it returns for <code>90 = ninty zero</code>, because the function works by splitting the number, how can I remove the zero being added for the above numbers. my code is as follows</p> <pre><code>d = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'ninteen', 20: 'twenty', 30: 'thirty', 40: 'fourth', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninty'} def wordify(n): k = 1000 m = k * 1000 if n &lt; 20: return d[n] if n &lt; 100: if n % 100 == 0: return d[n] else: return d[n // 10 * 10] + ' ' + d[n % 10] if n &lt; k: if n % 100 == 0: return d[n // 100] + ' hundred' else: return d[n // 100] + ' hundred ' + wordify(n % 100) if n &lt; m: if n % k == 0: return wordify(n // k) + ' thousand' else: return wordify(n // k) + ' thousand, ' + wordify(n % k) print wordify(90) </code></pre>
0
2016-08-19T08:43:39Z
39,034,823
<p>Changing the following line:</p> <pre><code>if n % 100 == 0: return d[n] </code></pre> <p>to</p> <pre><code>if n % 10 == 0: return d[n] </code></pre> <p>solves this issue. Since it will then always use the single word and not add the zero behind it whenever it's a multiple of 10.</p>
2
2016-08-19T08:50:02Z
[ "python", "python-2.7", "function" ]
Heatmap for nonuniformly spaced data
39,034,797
<p>I want to create a heatmap using matplotlib like the one depicted below. The data is not uniformly spaced as you can see from the axes ticks. So suppose we have</p> <pre><code>x = [1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 7] y = [.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5] vals = np.random.rand(len(x), len(y)) </code></pre> <p>How is such an interpolated plot created in matplotlib?</p> <p><a href="http://i.stack.imgur.com/yXh23.png" rel="nofollow"><img src="http://i.stack.imgur.com/yXh23.png" alt="enter image description here"></a></p>
-1
2016-08-19T08:48:41Z
39,035,053
<p>you should interpolate missing data, I used in one of my project following:</p> <pre><code>#create regular grid xi, yi = np.linspace(x.min(), x.max(), 100), np.linspace(y.min(), y.max(), 100) xi, yi = np.meshgrid(xi, yi) #interpolate missing data rbf = scipy.interpolate.Rbf(x, y, z, function='linear') zi = rbf(xi, yi) </code></pre>
0
2016-08-19T09:02:24Z
[ "python", "matplotlib", "plot", "heatmap" ]
Django signals for models value change condition?
39,034,798
<p>I have a very simple models in my models.py like this :</p> <pre><code>class Music(models.Model): title= models.CharField( max_length=100 ) description = models.TextField( null=True, blank=True ) def __unicode__(self): return self.name class Album(models.Model): musician= models.CharField( related_name='musician', max_length=100 ) music = models.ForeignKey( Music, related_name='music' ) def __unicode__(self): return self.user.username </code></pre> <p>In this case, i made a very simple logic in my signals.py to check if the new field (new music) has just added to album.</p> <pre><code>Album(musician="someone", music=Music.objects.get(title="something")) #pk=1 Album.save() </code></pre> <p>like this:</p> <pre><code>@receiver(post_save, sender=Album) def add_new_album(sender, instance, **kwargs): if kwargs.get('created'): print "new album recently created" </code></pre> <p>In other condition, i want to make a signals that only be responded if the existing fields is edited to the new one :</p> <pre><code>edit_album = Album.objects.get(pk=1) edit_album.music = Music.objects.get(title="something_else") edit_album.save() </code></pre> <p>The using of post_save receiver make add_new_album() function is triggered every time i edit the existing field.. So my question is, what is the logic that should be implemented in signals.py that only respond the editing fields condition ?</p>
0
2016-08-19T08:48:48Z
39,036,405
<p>So I you want to trigger your signal only when certain field is changed right? You can use a package called <a href="https://github.com/grantmcconnaughey/django-field-history" rel="nofollow">Django Field history</a></p> <p>Something like this:</p> <pre><code>@receiver(post_save, sender=&lt;sender&gt;, dispatch_uid=&lt;string&gt;) def method(sender, instance, **kwargs): last_field_change = FieldHistory.objects.filter(field_name=&lt;your_field_which_you_want_to_check_for_change&gt;, object_id=instance.id).last() if last_field_change: #do your thing </code></pre>
0
2016-08-19T10:12:01Z
[ "python", "django", "signals" ]
How to parallelize task in Flask?
39,034,849
<p>I'm sending a XHR request to my Flask server in order to do several pings on a network</p> <h2>Resource</h2> <pre><code>def get(self, site_id): … for printer in printers: hostname = printer['hostname'] response[site_id][hostname] = network_utils.ping(hostname) return response </code></pre> <h3>Ping</h3> <p>Below <code>shell.execute</code> I'm using <a href="https://docs.python.org/3.5/library/subprocess.html#subprocess.check_output" rel="nofollow"><code>subprocess.check_output</code></a> to run a native <code>ping</code>:</p> <pre><code>def ping(hostname): command = ['ping', '-c', '1', '-W', '1', '-q', hostname] response = shell.execute(command) return output_parser.ping(response['results']) </code></pre> <h3>Output</h3> <pre><code>{ "test-site": { "avg": 0.093, "max": 0.093, "mdev": 0.0, "min": 0.093, "1.1.1.1": { "avg": null, "max": null, "mdev": null, "min": null}, "1.2.3.4": { "avg": null, "max": null, "mdev": null, "min": null}, "127.0.0.1": { "avg": 0.061, "max": 0.061, "mdev": 0.0, "min": 0.061} } } </code></pre> <h3>Questions</h3> <p>The pings are run sequentially making the request super slow (tens seconds, how can I speed up thing? </p>
1
2016-08-19T08:51:12Z
39,037,917
<p>Make the subprocess asynchronous via <a href="http://www.gevent.org/" rel="nofollow">gevent</a>, for example.</p> <pre><code>from gevent import subprocess import gevent def ping(hostname): command = ['ping', '-c', '1', '-W', '1', '-q', hostname] return subprocess.Popen(command, stdout=subprocess.PIPE) def get(self, site_id): … # Start all the pings in parallel, asynchronously # Use dict to reference host: ping subprocess # as results will come in at different times pings = {printer['hostname']: ping(printer['hostname']) for printer in printers} # Wait for all of them to complete gevent.wait(pings.values()) for hostname in pings: response[site_id][hostname] = output_parser.ping(pings[hostname].stdout.read()) return response </code></pre>
1
2016-08-19T11:27:27Z
[ "python", "flask", "parallel-processing", "ping", "flask-restful" ]
How to parallelize task in Flask?
39,034,849
<p>I'm sending a XHR request to my Flask server in order to do several pings on a network</p> <h2>Resource</h2> <pre><code>def get(self, site_id): … for printer in printers: hostname = printer['hostname'] response[site_id][hostname] = network_utils.ping(hostname) return response </code></pre> <h3>Ping</h3> <p>Below <code>shell.execute</code> I'm using <a href="https://docs.python.org/3.5/library/subprocess.html#subprocess.check_output" rel="nofollow"><code>subprocess.check_output</code></a> to run a native <code>ping</code>:</p> <pre><code>def ping(hostname): command = ['ping', '-c', '1', '-W', '1', '-q', hostname] response = shell.execute(command) return output_parser.ping(response['results']) </code></pre> <h3>Output</h3> <pre><code>{ "test-site": { "avg": 0.093, "max": 0.093, "mdev": 0.0, "min": 0.093, "1.1.1.1": { "avg": null, "max": null, "mdev": null, "min": null}, "1.2.3.4": { "avg": null, "max": null, "mdev": null, "min": null}, "127.0.0.1": { "avg": 0.061, "max": 0.061, "mdev": 0.0, "min": 0.061} } } </code></pre> <h3>Questions</h3> <p>The pings are run sequentially making the request super slow (tens seconds, how can I speed up thing? </p>
1
2016-08-19T08:51:12Z
39,037,929
<p>Sounds like the best choice is threading because your issue is <strong>I/O bound</strong>. I'm using <a href="https://docs.python.org/2/library/threading.html#semaphore-objects" rel="nofollow">Semaphore</a> to limit to 5 threads.</p> <p>I'm sending the response dict to the ping dict are thread safe, but you should read <a href="http://stackoverflow.com/questions/1312331/using-a-global-dictionary-with-threads-in-python">this</a> if you think about something more complicated.</p> <pre><code>def get(self, site_id): … semaphore = threading.Semaphore(5) threads = [] for printer in printers: hostname = printer['hostname'] threads.append(threading.Thread(target=network_utils.ping, args=(semaphore, response, site_id, hostname))) # Start and wait to all threads to finish map(lambda t: t.start(), threads) map(lambda t: t.join(), threads) return response def ping(semaphore, response, site_id, hostname): semaphore.acquire() command = ['ping', '-c', '1', '-W', '1', '-q', hostname] response = shell.execute(command) ping_data = output_parser.ping(response['results']) response[site_id][hostname] = ping_data semaphore.release() </code></pre>
2
2016-08-19T11:27:49Z
[ "python", "flask", "parallel-processing", "ping", "flask-restful" ]
How to parallelize task in Flask?
39,034,849
<p>I'm sending a XHR request to my Flask server in order to do several pings on a network</p> <h2>Resource</h2> <pre><code>def get(self, site_id): … for printer in printers: hostname = printer['hostname'] response[site_id][hostname] = network_utils.ping(hostname) return response </code></pre> <h3>Ping</h3> <p>Below <code>shell.execute</code> I'm using <a href="https://docs.python.org/3.5/library/subprocess.html#subprocess.check_output" rel="nofollow"><code>subprocess.check_output</code></a> to run a native <code>ping</code>:</p> <pre><code>def ping(hostname): command = ['ping', '-c', '1', '-W', '1', '-q', hostname] response = shell.execute(command) return output_parser.ping(response['results']) </code></pre> <h3>Output</h3> <pre><code>{ "test-site": { "avg": 0.093, "max": 0.093, "mdev": 0.0, "min": 0.093, "1.1.1.1": { "avg": null, "max": null, "mdev": null, "min": null}, "1.2.3.4": { "avg": null, "max": null, "mdev": null, "min": null}, "127.0.0.1": { "avg": 0.061, "max": 0.061, "mdev": 0.0, "min": 0.061} } } </code></pre> <h3>Questions</h3> <p>The pings are run sequentially making the request super slow (tens seconds, how can I speed up thing? </p>
1
2016-08-19T08:51:12Z
39,052,548
<p>Upvote <a href="http://stackoverflow.com/a/39037929/802365">Or Duan answer</a> as mine is based on his answer:</p> <h3>Resource</h3> <pre><code>class Ping(Resource): def get(self, site_id): site_hostname = mast_utils.list_sites(site_id)['results'][0]['hostname'] printers = mast_utils.list_printers(site_id)['results']['channels'] response = network_utils.parellelize(network_utils.ping, site_hostname, printers) return response api.add_resource(Ping, '/ping/&lt;string:site_id&gt;/') </code></pre> <h3><em>network_utils.py</em></h3> <pre><code>def ping(hostname): command = ['ping', '-q', hostname, '-w', '1', '-W', '1', '-i', '0.2' ] response = shell.execute(command) return output_parser.ping(response['results']) def collect(task, response, **kwargs): hostname = kwargs['hostname'] response[hostname] = task(**kwargs) def parellelize(task, site_id, printers, **kwargs): response = {} kw = kwargs.copy() kw.update({'hostname': site_id}) collect(task, response, **kw) printers_response = {} threads = [] for printer in printers: hostname = printer['hostname'] kw = kwargs.copy() kw.update({'hostname': hostname}) threads.append( threading.Thread( target=collect, args=(task, printers_response), kwargs=kw ) ) for thread in threads: thread.start() thread.join() response[site_id].update(printers_response) return response </code></pre> <h3><em>test_network_utils.py</em></h3> <pre><code>class NetwrokUtilsTestCase(unittest.TestCase): def test_ping_is_null_when_host_unreachable(self): hostname = 'unreachable' response = network_utils.ping(hostname) self.assertDictEqual(response, { 'avg': None, 'max': None, 'mdev': None, 'min': None }) def test_ping_reply_time_when_reachable(self): hostname = '127.0.0.1' response = network_utils.ping(hostname) self.assertGreater(response['avg'], 0) def test_ping_with_only_a_site(self): site_hostname = 'localhost' printers = [] response = {} response = network_utils.parellelize(network_utils.ping, site_hostname, printers) self.assertGreater(response[site_hostname]['avg'], 0) def test_ping_with_printers(self): site_hostname = 'localhost' printers = [ {'hostname': '127.0.0.1', 'port': 22}, {'hostname': '0.0.0.0', 'port': 22}, ] response = network_utils.parellelize(network_utils.ping, site_hostname, printers) self.assertGreater(response[site_hostname]['avg'], 0) self.assertGreater(response[site_hostname]['127.0.0.1']['avg'], 0) </code></pre>
0
2016-08-20T09:30:00Z
[ "python", "flask", "parallel-processing", "ping", "flask-restful" ]
Python script not getting executed when put on crontab
39,034,905
<p>I have written a python script which is reading some logs and then writing the required info into a csv file.Its working fine if I execute it manually by python <code>myscript.py</code> .However when I m calling it through cron its not working. I have tried:</p> <pre><code>* * * * * python /path/to/myscript.py * * * * * /path/to/myscript.py * * * * * /usr/bin/python /path/to/myscript.py </code></pre> <p>After some failed attempts I put python <code>myscript.py</code> in a shell script and ran it manually,it worked but again in cron it did not.</p>
-3
2016-08-19T08:54:22Z
39,035,132
<p>Default shell for cron is not <code>/bin/bash</code>, so you need to give the full path to Python executable.<br> Check the output of <code>which python</code>. On my machine it's <code>/usr/bin/python</code>.<br> Assuming this, your cron job entry should be:</p> <pre><code>* * * * * /usr/bin/python /path/to/myscript.py </code></pre> <p>Sometimes you want to use virtualenv or set some environment variables or change working directory before you run the script.<br> In that case it's handy to create a wrapper script in bash: </p> <pre><code>#!/bin/bash # Change working directory cd /path/to # Activate Python virtual environment source bin/activate # Set some environment variable export LOG_FILE_PATH="/var/log/whatever.log" # Finally run your python script python myscript.py </code></pre> <p>Given that you save the above script to <code>/path/to/myscript_wrapper.sh</code>, your cron job entry will look like:</p> <pre><code>* * * * * /bin/bash /path/to/myscript_wrapper.sh </code></pre>
0
2016-08-19T09:06:40Z
[ "python", "shell", "cron-task" ]
Python script not getting executed when put on crontab
39,034,905
<p>I have written a python script which is reading some logs and then writing the required info into a csv file.Its working fine if I execute it manually by python <code>myscript.py</code> .However when I m calling it through cron its not working. I have tried:</p> <pre><code>* * * * * python /path/to/myscript.py * * * * * /path/to/myscript.py * * * * * /usr/bin/python /path/to/myscript.py </code></pre> <p>After some failed attempts I put python <code>myscript.py</code> in a shell script and ran it manually,it worked but again in cron it did not.</p>
-3
2016-08-19T08:54:22Z
39,035,239
<p>Make sure that you do not use any kind of relative paths inside your python script. This can cause your script to exit with an error. </p> <p>So make sure that within your script you do not use <code>../to/something</code>, but instead <code>/path/to/something</code>.</p> <p>ie</p> <pre><code>with open('../file.txt', 'r') as f: </code></pre> <p>should become</p> <pre><code>with open('/full/path/to/file.txt', 'r') as f: </code></pre>
0
2016-08-19T09:11:16Z
[ "python", "shell", "cron-task" ]
How to print out elements of a class
39,034,964
<p>I just got started with python, now I need to print out the names of elements/objects/instances of a class, this is my code: </p> <pre><code>class Delicious : sweet = 0 sour = 0 apple = Delicious() kiwi = Delicious() litchi = Delicious() bracket = [apple, kiwi, lichi] print(bracket) </code></pre> <p>I ran it in the terminal, it prints out: [&lt;<strong>main</strong>.Delicious object at 0x1019d9940>, &lt;<strong>main</strong>.Delicious object at 0x1019d9978>, &lt;<strong>main</strong>.Delicious object at 0x1019d99b0>] how do I make it print <code>apple</code> <code>kiwi</code> <code>litchi</code>? </p> <p>The other answers are too hard for me, can somebody answer it simply? </p>
-1
2016-08-19T08:57:56Z
39,035,216
<p>The way to get this to work is by grabbing the instances name from <code>globals()</code> and returning it from <code>__str__</code>:</p> <pre><code>class Delicious : sweet = 0 sour = 0 def __str__(self): for k, v in globals().items(): if v == self: return k def __repr__(self): return str(self) </code></pre> <p>Now printing will return the name as you defined it:</p> <pre><code>apple = Delicious() print(apple) apple </code></pre> <p>Printing a <code>list</code> that contains your objects will call their <code>__repr__</code>, so you can either also define <code>__repr__</code> to return <code>__str__</code>s result, or call <code>print(*bracket)</code> to unpack the list in the print call which uses their <code>__str__</code> function:</p> <pre><code>print(bracket) [apple, kiwi, lichi] print(*bracket) apple, kiwi, lichi </code></pre> <p>But this make 0 practical sense, in Python names are just labels assigned to objects, by using <code>print</code> and printing the name you get no information about the underlying object.</p>
2
2016-08-19T09:10:06Z
[ "python", "python-3.x" ]
Convert a list of Gene Symbols to UniProt accession numbers using Python
39,035,103
<p>I have a list of gene symbols which represent the intersection of two high throughput data sets. I'm interested in doing some sort of GO annotation and clustering, but in order to do this I need to convert these gene symbols into UniProt accession numbers. My question is, what is the best way to do this using Python? </p> <p>For example, the gene for 'Transforming growth factor beta-1' is called 'TGFB1' and its accession number is 'P01137'. I'm looking for a function/class/module/package that will enable me to input TGFB1 as argument and give me P01137 back. Could somebody give me some directions to look at? Thanks</p>
0
2016-08-19T09:05:12Z
39,299,849
<p>Get some mapping from gene name to PDB ID, like this JSON: <a href="http://www.rcsb.org/pdb/browse/homo_sapiens_download.jsp?rows=100000&amp;page=1&amp;sidx=id&amp;sord=desc" rel="nofollow">http://www.rcsb.org/pdb/browse/homo_sapiens_download.jsp?rows=100000&amp;page=1&amp;sidx=id&amp;sord=desc</a> saving it for example as "mapping.json".</p> <p>Then use that data to get the mapping:</p> <pre><code>import json with open("mapping.json") as mapping: map_dict = json.load(mapping) data = map_dict["rows"] def get_uniprot(gene_id): for row in map_dict["rows"]: if row["cell"][1] == gene_id: return row["cell"][4] print(get_uniprot("TGFB1")) </code></pre>
1
2016-09-02T19:55:30Z
[ "python", "converter", "biopython" ]
Run Python program as service on Ubuntu 16.04 inside a virtual environment
39,035,233
<p>I'm trying to get a Flask + SocketIO app running as a service on Ubuntu 16.04, inside a virtual environment. My server is restarted every day at 3 am (outside of my control), so I need it to automatically launch on startup.</p> <p>Running the script by itself works fine:</p> <pre><code>$ python main.py (29539) wsgi starting up on http://127.0.0.1:8081 </code></pre> <p>I can tell that it's working because it's serving pages (through an nginx server set up by following <a href="http://stackoverflow.com/a/23431713/1814949">this Stack Overflow answer</a>, though I don't think that's relevant.)</p> <p>Here's my <code>/etc/systemd/system/opendc.service</code>:</p> <pre><code>[Unit] Description=OpenDC flask + socketio service [Service] Environment=PYTHON_HOME=/var/www/opendc.ewi.tudelft.nl/web-server/venv Environment=PATH=$VIRTUAL_ENV/bin:$PATH ExecStart=/var/www/opendc.ewi.tudelft.nl/web-server main.py Restart=always [Install] WantedBy=multi-user.target </code></pre> <p>So when I try to get that going using:</p> <pre><code>$ sudo systemctl daemon-reload $ sudo systemctl restart opendc </code></pre> <p>It doesn't serve pages anymore. The status shows:</p> <pre><code>$ sudo systemctl status opendc * opendc.service - OpenDC flask + socketio service Loaded: loaded (/etc/systemd/system/opendc.service; enabled; vendor preset: enabled) Active: inactive (dead) (Result: exit-code) since Fri 2016-08-19 10:48:31 CEST; 15min ago Process: 29533 ExecStart=/var/www/opendc.ewi.tudelft.nl/web-server main.py (code=exited, status=203/EXEC) Main PID: 29533 (code=exited, status=203/EXEC) Aug 19 10:48:31 opendc.ewi.tudelft.nl systemd[1]: opendc.service: Service hold-off time over, scheduling restart. Aug 19 10:48:31 opendc.ewi.tudelft.nl systemd[1]: Stopped OpenDC flask + socketio service. Aug 19 10:48:31 opendc.ewi.tudelft.nl systemd[1]: opendc.service: Start request repeated too quickly. Aug 19 10:48:31 opendc.ewi.tudelft.nl systemd[1]: Failed to start OpenDC flask + socketio service. </code></pre> <p>I've looked up <code>(code=exited, status=203/EXEC)</code> and done some troubleshooting with what I found:</p> <p>I checked that <code>main.py</code> is executable:</p> <pre><code>$ ls -l main.py -rwxr-xr-x 1 leon leon 2007 Aug 19 10:46 main.py </code></pre> <p>And that <code>main.py</code> has this first line to point to Python in the virtual environment:</p> <pre><code>#!/var/www/opendc.ewi.tudelft.nl/web-server/venv/bin/python </code></pre> <p>So what's the problem here?</p>
0
2016-08-19T09:11:06Z
39,671,027
<p>I believe that you mistype PYTHON_HOME and than PATH=$VIRTUAL_ENV/bin:$PATH</p> <p>you should use PATH=$PYTHON_HOME/bin:$PATH</p>
0
2016-09-23T23:43:10Z
[ "python", "ubuntu", "flask", "virtualenv", "daemon" ]
How can I get the number of groups to vary depending on the number of lines?
39,035,234
<p>I have this regex: <code>^:([^:]+):([^:]*)</code> which works as in <a href="https://regex101.com/r/qM5nR0/2" rel="nofollow">this regex101 link</a>.</p> <p>Now, in Python, I have this:</p> <pre><code>def get_data(): data = read_mt_file() match_fields = re.compile('^:([^:]+):([^:]*)', re.MULTILINE) fields = re.findall(match_fields, data) return fields </code></pre> <p>Which, for a file containing the data from regex101, returns:</p> <pre><code>[('1', 'text\ntext\n\n'), ('20', 'text\n\n'), ('21', 'text\ntext\ntext\n\n'), ('22', ' \n\n'), ('25', 'aa\naa\naaaaa')] </code></pre> <p>Now, this is ok, but I want to change the regex, so that I can get the number of groups to vary depending on the number of lines. Meaning:</p> <ul> <li>for the first line, now, I get two groups: <ol> <li><code>1</code></li> <li><code>text\ntext\n\n</code></li> </ol></li> </ul> <p>I'd like to get instead:</p> <ol> <li><code>1</code></li> <li>((<code>text\n</code>), (<code>text\n\n</code>)) &lt;-- those should be somehow in the same group but separated, each in his own <em>subgroup</em>. Somehow I need to know they both belong to <code>1</code> field, but are sepparate lines.</li> </ol> <p>So, In python, the desired result for that file would be:</p> <pre><code>[('1', '(text\n), (text\n\n)'), ('20', 'text\n\n'), ('21', '(text\n), (text\n), (text\n\n)'), ('22', ' \n\n'), ('25', '(aa\n), (aa\n), (aaaaa)')] </code></pre> <p>Is this possible with regex? Could this be achieved with some nice string manipulation instead ?</p>
0
2016-08-19T09:11:06Z
39,035,667
<p>To do what you want, you'd need another regex. This is as <code>re.match</code> only matches the last item it matches:</p> <pre><code>&gt;&gt;&gt; re.match(r'(\d)+', '12345').groups() ('5',) </code></pre> <p>Instead of using one regex you'll need to use two. The one that you are using at the moment, and then one to match all the 'sub-groups', using say <code>re.findall</code>. You can get these sub-groups by simply matching anything that isn't a <code>\n</code> and then any amount of <code>\n</code>.</p> <p>So you could use a regex such as <code>[^\n]+\n*</code>:</p> <pre><code>&gt;&gt;&gt; re.findall(r'[^\n]+\n*', 'text\ntext') ['text\n', 'text'] &gt;&gt;&gt; re.findall(r'[^\n]+\n*', 'text\ntext\n\n') ['text\n', 'text\n\n'] &gt;&gt;&gt; re.findall(r'[^\n]+\n*', '') [] </code></pre>
1
2016-08-19T09:30:49Z
[ "python", "regex", "python-3.x", "split" ]
How can I get the number of groups to vary depending on the number of lines?
39,035,234
<p>I have this regex: <code>^:([^:]+):([^:]*)</code> which works as in <a href="https://regex101.com/r/qM5nR0/2" rel="nofollow">this regex101 link</a>.</p> <p>Now, in Python, I have this:</p> <pre><code>def get_data(): data = read_mt_file() match_fields = re.compile('^:([^:]+):([^:]*)', re.MULTILINE) fields = re.findall(match_fields, data) return fields </code></pre> <p>Which, for a file containing the data from regex101, returns:</p> <pre><code>[('1', 'text\ntext\n\n'), ('20', 'text\n\n'), ('21', 'text\ntext\ntext\n\n'), ('22', ' \n\n'), ('25', 'aa\naa\naaaaa')] </code></pre> <p>Now, this is ok, but I want to change the regex, so that I can get the number of groups to vary depending on the number of lines. Meaning:</p> <ul> <li>for the first line, now, I get two groups: <ol> <li><code>1</code></li> <li><code>text\ntext\n\n</code></li> </ol></li> </ul> <p>I'd like to get instead:</p> <ol> <li><code>1</code></li> <li>((<code>text\n</code>), (<code>text\n\n</code>)) &lt;-- those should be somehow in the same group but separated, each in his own <em>subgroup</em>. Somehow I need to know they both belong to <code>1</code> field, but are sepparate lines.</li> </ol> <p>So, In python, the desired result for that file would be:</p> <pre><code>[('1', '(text\n), (text\n\n)'), ('20', 'text\n\n'), ('21', '(text\n), (text\n), (text\n\n)'), ('22', ' \n\n'), ('25', '(aa\n), (aa\n), (aaaaa)')] </code></pre> <p>Is this possible with regex? Could this be achieved with some nice string manipulation instead ?</p>
0
2016-08-19T09:11:06Z
39,043,680
<p>You may use a simple trick: after getting the matches with your regex, run a <code>.+\n*</code> regex over the Group 2 value:</p> <pre><code>import re p = re.compile(r'^:([^:]+):([^:]+)', re.MULTILINE) s = ":1:text\ntext\n\n:20:text\n\n:21:text\ntext\ntext\n\n:22: \n\n:25:aa\naa\naaaaa" print([[x.group(1)] + re.findall(r".+\n*", x.group(2)) for x in p.finditer(s)]) </code></pre> <p>Here, </p> <ul> <li><code>p.finditer(s)</code> finds all matches in the string using your regex</li> <li><code>[x.group(1)]</code> - a list created from the first group contents</li> <li><code>re.findall(r".+\n*", x.group(2))</code> - fetches individual lines from Group 2 contents (with trailing newlines, 0 or more)</li> <li><code>[] + re.findall</code> - combining the lists into 1.</li> </ul> <p>Result is </p> <p><code>[['1', 'text\n', 'text\n\n'], ['20', 'text\n\n'], ['21', 'text\n', 'text\n', 'text\n\n'], ['22', ' \n\n'], ['25', 'aa\n', 'aa\n', 'aaaaa']]</code></p> <p>Another approach: match all the substrings with your pattern and then use a <code>re.sub</code> to add <code>), (</code> between the lines ending with optional newlines:</p> <pre><code>[(x, "({})".format(re.sub(r".+(?!\n*$)\n+", r"\g&lt;0&gt;), (", y))) for x, y in p.findall(s)] </code></pre> <p>Result:</p> <p><code>[('1', '(text\n), (text\n\n)'), ('20', '(text\n\n)'), ('21', '(text\n), (text\n), (text\n\n)'), ('22', '( \n\n)'), ('25', '(aa\n), (aa\n), (aaaaa)')]</code></p> <p>See the <a href="https://ideone.com/DPP9mq" rel="nofollow">Python 3 demo</a></p> <p>Here:</p> <ul> <li><code>p.findall(s)</code> - grabs all the matches in the form of a list of tuples containing your capture group contents using your regex</li> <li><code>(x, "({})".format(re.sub(r".+(?!\n*$)\n+", r"\g&lt;0&gt;), (", y)))</code> - creates a tuple from Group 1 contents and Group 2 contents that are a bit modified with the <code>re.sub</code> the way described below</li> <li><code>.+(?!\n*$)\n+</code> - pattern that matches 1+ characters other than newline and then 1+ newline symbols if they are not at the end of the string. If they are at the end of the string, there will be no replacement made (to avoid <code>, ()</code> at the end). The <code>\g&lt;0&gt;</code> in the replacement string is re-inserting the whole match back into the resulting string and appends <code>), (</code> to it.</li> </ul>
1
2016-08-19T16:20:37Z
[ "python", "regex", "python-3.x", "split" ]
Why do every virtual environment use(point) same python version in pyenv?
39,035,288
<p>I'm using <code>pyenv</code>, <code>virtualenv</code>, 'autoev` for setting my virtual environment.</p> <p>I install <code>Python3.5.1</code>, <code>Python2.7.9</code> and create several project virtual environments.</p> <p>But when I activate each environment and check python version by <code>which python</code>: they point same <code>python</code> execution files : </p> <ol> <li><code>Python 3.5.1 env</code></li> </ol> <p>(chacha_dabang) Chois@Chois-MacPro $ which python <br> /Users/Chois/.pyenv/shims/python</p> <p><br></p> <p>(chacha_dabang) Chois@Chois-MacPro $ which pip <br> /Users/Chois/.pyenv/shims/pip</p> <p><br></p> <ol start="2"> <li><p>Python 2.7.9 env</p> <p>(pycon2016) Chois@Chois-MacPro $ (master)which python<br> /Users/Chois/.pyenv/shims/python<br> (pycon2016) Chois@Chois-MacPro $ (master)which pip<br> /Users/Chois/.pyenv/shims/pip<br></p></li> </ol> <p>And I change my directory to : /Users/Chois/.pyenv/shims </p> <pre><code>Chois@Chois-MacPro pyconapac-2016 $ (master)cd ~/.pyenv/shims/ Chois@Chois-MacPro shims $ls 2to3 easy_install-2.7 iptest3 nosetests-3.4 pip2.7 python3 rst2xml.py 2to3-3.5 easy_install-3.5 ipython painter.py pip3 python3-config rstpep2html.py __pycache__ enhancer.py ipython3 painter.pyc pip3.5 python3.5 smtpd.py activate enhancer.pyc jsonschema pep8 player.py python3.5-config sphinx-apidoc activate.csh explode.py jupyter pilconvert.py player.pyc python3.5m sphinx-autogen activate.fish explode.pyc jupyter-kernelspec pilconvert.pyc pybabel python3.5m-config sphinx-build activate_this.py f2py3.5 jupyter-migrate pildriver.py pydoc pyvenv sphinx-quickstart coverage gifmaker.py jupyter-nbconvert pildriver.pyc pydoc3 pyvenv-3.5 sqlformat coverage-3.5 gifmaker.pyc jupyter-nbextension pilfile.py pydoc3.5 rst2html.py thresholder.py coverage3 idle jupyter-notebook pilfile.pyc pygmentize rst2latex.py thresholder.pyc createfontdatachunk.py idle3 jupyter-qtconsole pilfont.py python rst2man.py viewer.py createfontdatachunk.pyc idle3.5 jupyter-serverextension pilfont.pyc python-config rst2odt.py viewer.pyc django-admin ipcluster jupyter-troubleshoot pilprint.py python2 rst2odt_prepstyles.py virtualenv django-admin.py ipcontroller jupyter-trust pilprint.pyc python2-config rst2pseudoxml.py waitress-serve django-admin.pyc ipengine jwt pip python2.7 rst2s5.py wheel easy_install iptest nosetests pip2 python2.7-config rst2xetex.py </code></pre> <p>They have both python2 and python3...</p> <p>I think that when I set certain project's <code>virtualenv</code>, it is supposed to on its own virtualenv's <code>python</code> like this :</p> <pre><code>Chois@Chois-MacPro bin $pwd /Users/Chois/.pyenv/versions/chacha_dabang/bin Chois@Chois-MacPro bin $ls __pycache__ easy_install iptest3 jupyter-qtconsole pildriver.py python rst2xml.py activate easy_install-3.5 ipython jupyter-serverextension pilfile.py python3 rstpep2html.py activate.csh enhancer.py ipython3 jupyter-troubleshoot pilfont.py rst2html.py sphinx-apidoc activate.fish explode.py jsonschema jupyter-trust pilprint.py rst2latex.py sphinx-autogen coverage f2py3.5 jupyter jwt pip rst2man.py sphinx-build coverage-3.5 gifmaker.py jupyter-kernelspec nosetests pip3 rst2odt.py sphinx-quickstart coverage3 ipcluster jupyter-migrate nosetests-3.4 pip3.5 rst2odt_prepstyles.py sqlformat createfontdatachunk.py ipcontroller jupyter-nbconvert painter.py player.py rst2pseudoxml.py thresholder.py django-admin ipengine jupyter-nbextension pep8 pybabel rst2s5.py viewer.py django-admin.py iptest jupyter-notebook pilconvert.py pygmentize rst2xetex.py waitress-serve </code></pre> <p>So, When I type <code>which python</code> in my <code>chacha_dabang</code> virtualenv, it should be point to : <code>/Users/Chois/.pyenv/versions/chacha_dabang/bin/python</code>.</p> <p>How can I fix it?</p>
2
2016-08-19T09:13:23Z
39,035,384
<p>To change the <code>PYTHONPATH</code> used in a <code>virtualenv</code>, you will have to add this line to your virtualenv's <code>bin/activate</code> file:</p> <p>`export PYTHONPATH="/correct_path/"</p> <p><br> So in your case it should be <code>export PYTHONPATH="/Users/Chois/.pyenv/versions/chacha_dabang/bin/python"</code></p>
0
2016-08-19T09:17:41Z
[ "python", "virtualenv", "pyenv" ]
Why do every virtual environment use(point) same python version in pyenv?
39,035,288
<p>I'm using <code>pyenv</code>, <code>virtualenv</code>, 'autoev` for setting my virtual environment.</p> <p>I install <code>Python3.5.1</code>, <code>Python2.7.9</code> and create several project virtual environments.</p> <p>But when I activate each environment and check python version by <code>which python</code>: they point same <code>python</code> execution files : </p> <ol> <li><code>Python 3.5.1 env</code></li> </ol> <p>(chacha_dabang) Chois@Chois-MacPro $ which python <br> /Users/Chois/.pyenv/shims/python</p> <p><br></p> <p>(chacha_dabang) Chois@Chois-MacPro $ which pip <br> /Users/Chois/.pyenv/shims/pip</p> <p><br></p> <ol start="2"> <li><p>Python 2.7.9 env</p> <p>(pycon2016) Chois@Chois-MacPro $ (master)which python<br> /Users/Chois/.pyenv/shims/python<br> (pycon2016) Chois@Chois-MacPro $ (master)which pip<br> /Users/Chois/.pyenv/shims/pip<br></p></li> </ol> <p>And I change my directory to : /Users/Chois/.pyenv/shims </p> <pre><code>Chois@Chois-MacPro pyconapac-2016 $ (master)cd ~/.pyenv/shims/ Chois@Chois-MacPro shims $ls 2to3 easy_install-2.7 iptest3 nosetests-3.4 pip2.7 python3 rst2xml.py 2to3-3.5 easy_install-3.5 ipython painter.py pip3 python3-config rstpep2html.py __pycache__ enhancer.py ipython3 painter.pyc pip3.5 python3.5 smtpd.py activate enhancer.pyc jsonschema pep8 player.py python3.5-config sphinx-apidoc activate.csh explode.py jupyter pilconvert.py player.pyc python3.5m sphinx-autogen activate.fish explode.pyc jupyter-kernelspec pilconvert.pyc pybabel python3.5m-config sphinx-build activate_this.py f2py3.5 jupyter-migrate pildriver.py pydoc pyvenv sphinx-quickstart coverage gifmaker.py jupyter-nbconvert pildriver.pyc pydoc3 pyvenv-3.5 sqlformat coverage-3.5 gifmaker.pyc jupyter-nbextension pilfile.py pydoc3.5 rst2html.py thresholder.py coverage3 idle jupyter-notebook pilfile.pyc pygmentize rst2latex.py thresholder.pyc createfontdatachunk.py idle3 jupyter-qtconsole pilfont.py python rst2man.py viewer.py createfontdatachunk.pyc idle3.5 jupyter-serverextension pilfont.pyc python-config rst2odt.py viewer.pyc django-admin ipcluster jupyter-troubleshoot pilprint.py python2 rst2odt_prepstyles.py virtualenv django-admin.py ipcontroller jupyter-trust pilprint.pyc python2-config rst2pseudoxml.py waitress-serve django-admin.pyc ipengine jwt pip python2.7 rst2s5.py wheel easy_install iptest nosetests pip2 python2.7-config rst2xetex.py </code></pre> <p>They have both python2 and python3...</p> <p>I think that when I set certain project's <code>virtualenv</code>, it is supposed to on its own virtualenv's <code>python</code> like this :</p> <pre><code>Chois@Chois-MacPro bin $pwd /Users/Chois/.pyenv/versions/chacha_dabang/bin Chois@Chois-MacPro bin $ls __pycache__ easy_install iptest3 jupyter-qtconsole pildriver.py python rst2xml.py activate easy_install-3.5 ipython jupyter-serverextension pilfile.py python3 rstpep2html.py activate.csh enhancer.py ipython3 jupyter-troubleshoot pilfont.py rst2html.py sphinx-apidoc activate.fish explode.py jsonschema jupyter-trust pilprint.py rst2latex.py sphinx-autogen coverage f2py3.5 jupyter jwt pip rst2man.py sphinx-build coverage-3.5 gifmaker.py jupyter-kernelspec nosetests pip3 rst2odt.py sphinx-quickstart coverage3 ipcluster jupyter-migrate nosetests-3.4 pip3.5 rst2odt_prepstyles.py sqlformat createfontdatachunk.py ipcontroller jupyter-nbconvert painter.py player.py rst2pseudoxml.py thresholder.py django-admin ipengine jupyter-nbextension pep8 pybabel rst2s5.py viewer.py django-admin.py iptest jupyter-notebook pilconvert.py pygmentize rst2xetex.py waitress-serve </code></pre> <p>So, When I type <code>which python</code> in my <code>chacha_dabang</code> virtualenv, it should be point to : <code>/Users/Chois/.pyenv/versions/chacha_dabang/bin/python</code>.</p> <p>How can I fix it?</p>
2
2016-08-19T09:13:23Z
39,061,621
<p>No problem</p> <p>When you try to run Python, it first looks for a <code>.python-version</code> in the current directory to decide which version of python to run. If it doesn’t find this file, then it looks for the user-level file <code>~/.pyenv/version</code>.</p> <p>You can use <code>python --version</code> instead of <code>which python</code> .</p>
1
2016-08-21T06:57:13Z
[ "python", "virtualenv", "pyenv" ]
fast way to loop over whole python dictionary
39,035,291
<p>I have non-ordered data that sometimes I want to analyse by looking at all the entries and some other time I want to pick just one entry.</p> <p><code> p1 x1 x2 x3 x4 p2 x1 x2 x3 x4 p33 x1 x2 x3 x4 p3 x1 x2 x3 x4 p4 x1 x2 x3 x4 </code></p> <p><code>Dictionary</code> seems a nice format to store the data, as it is not sorted, and if I want to get <code>p33</code>, which might be anywhere in the table I can do that by <code>dict["p33"]</code>. This lookup will take some time, but I suppose is faster than looping on the whole data to find the line that I want (at least this is the advantage I have been advertised <code>dict</code> should buy me).</p> <p>If I want to look at the whole data, e.g. counting how many times x3 is zero, I should loop on all the lines and doing it by a for loop of the type <code>for item in dict.keys():</code> is too slow. I have the impression that getting the keys and then doing <code>dict[item]</code> make a lot of useless lookup, because for each item it has to find it in the dictionary, whereas for my goal would be good enough to read serially "as if it were a list".</p> <p>So I was wondering if there is a faster way to loop on all the entries of the dictionary. </p> <p>Thanks</p>
0
2016-08-19T09:13:33Z
39,036,018
<p>If its possible use numpy/pandas...</p> <p>For me Python is only for High Level Programming and Low Level is C++... So if possible use existing c++ functions which are in numpy pandas or other libs..</p> <p>Check it out...</p> <pre><code>&gt;&gt;&gt; import numpy as np, pandas as pd &gt;&gt;&gt; p1 = np.arange(10) &gt;&gt;&gt; dct = dict( ... p1 = np.arange(10), ... p2 = np.ones(10), ... p3 = np.zeros(10), ... p33 = np.ones(10)*10, ... p4 = np.linspace(0,1,10)) &gt;&gt;&gt; &gt;&gt;&gt; dct {'p2': array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]), 'p33': array([ 10., 10., 10., 10., 10., 10., 10., 10., 10., 10.]), 'p1': array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 'p4': array([ 0. , 0.11111111, 0.22222222, 0.33333333, 0.44444444, 0.55555556, 0.66666667, 0.77777778, 0.88888889, 1. ]), 'p3': array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])} &gt;&gt;&gt; from pprint import pprint as pr &gt;&gt;&gt; pr(dct) {'p1': array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 'p2': array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]), 'p3': array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), 'p33': array([ 10., 10., 10., 10., 10., 10., 10., 10., 10., 10.]), 'p4': array([ 0. , 0.11111111, 0.22222222, 0.33333333, 0.44444444, 0.55555556, 0.66666667, 0.77777778, 0.88888889, 1. ])} &gt;&gt;&gt; df = pd.DataFrame(dct) &gt;&gt;&gt; df p1 p2 p3 p33 p4 0 0 1.0 0.0 10.0 0.000000 1 1 1.0 0.0 10.0 0.111111 2 2 1.0 0.0 10.0 0.222222 3 3 1.0 0.0 10.0 0.333333 4 4 1.0 0.0 10.0 0.444444 5 5 1.0 0.0 10.0 0.555556 6 6 1.0 0.0 10.0 0.666667 7 7 1.0 0.0 10.0 0.777778 8 8 1.0 0.0 10.0 0.888889 9 9 1.0 0.0 10.0 1.000000 &gt;&gt;&gt; df.T 0 1 2 3 4 5 6 \ p1 0.0 1.000000 2.000000 3.000000 4.000000 5.000000 6.000000 p2 1.0 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 p3 0.0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 p33 10.0 10.000000 10.000000 10.000000 10.000000 10.000000 10.000000 p4 0.0 0.111111 0.222222 0.333333 0.444444 0.555556 0.666667 7 8 9 p1 7.000000 8.000000 9.0 p2 1.000000 1.000000 1.0 p3 0.000000 0.000000 0.0 p33 10.000000 10.000000 10.0 p4 0.777778 0.888889 1.0 &gt;&gt;&gt; df = df.T &gt;&gt;&gt; df.columns = ['x%d'%(n+1) for n in df.columns.values] &gt;&gt;&gt; df x1 x2 x3 x4 x5 x6 x7 \ p1 0.0 1.000000 2.000000 3.000000 4.000000 5.000000 6.000000 p2 1.0 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 p3 0.0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 p33 10.0 10.000000 10.000000 10.000000 10.000000 10.000000 10.000000 p4 0.0 0.111111 0.222222 0.333333 0.444444 0.555556 0.666667 x8 x9 x10 p1 7.000000 8.000000 9.0 p2 1.000000 1.000000 1.0 p3 0.000000 0.000000 0.0 p33 10.000000 10.000000 10.0 p4 0.777778 0.888889 1.0 &gt;&gt;&gt; df.x3 p1 2.000000 p2 1.000000 p3 0.000000 p33 10.000000 p4 0.222222 Name: x3, dtype: float64 &gt;&gt;&gt; df.x3 == 0 p1 False p2 False p3 True p33 False p4 False Name: x3, dtype: bool &gt;&gt;&gt; np.sum(df.x3 == 0) 1 &gt;&gt;&gt; </code></pre>
0
2016-08-19T09:52:12Z
[ "python", "performance", "dictionary", "bigdata" ]
JoinableQueue join() method blocking main thread even after task_done()
39,035,329
<p>In below code, if I put <code>daemon = True</code> , consumer will quit before reading all queue entries. If consumer is non-daemon, Main thread is always blocked even after the <code>task_done()</code> for all the entries.</p> <pre><code>from multiprocessing import Process, JoinableQueue import time def consumer(queue): while True: final = queue.get() print (final) queue.task_done() def producer1(queue): for i in "QWERTYUIOPASDFGHJKLZXCVBNM": queue.put(i) if __name__ == "__main__": queue = JoinableQueue(maxsize=100) p1 = Process(target=consumer, args=((queue),)) p2 = Process(target=producer1, args=((queue),)) #p1.daemon = True p1.start() p2.start() print(p1.is_alive()) print (p2.is_alive()) for i in range(1, 10): queue.put(i) time.sleep(0.01) queue.join() </code></pre>
0
2016-08-19T09:14:59Z
39,060,824
<p>Let's see what—I believe—is happening here:</p> <ol> <li>both processes are being started.</li> <li>the <code>consumer</code> process starts its loop and blocks until a value is received from the queue.</li> <li>the <code>producer1</code> process feeds the queue 26 times with a letter while the main process feeds the queue 9 times with a number. The order in which letters or numbers are being fed is not guaranteed—a number could very well show up before a letter.</li> <li>when both the <code>producer1</code> and the main processes are done with feeding their data, the queue is being joined. No problem here, the queue can be joined since all the buffered data has been consumed and <code>task_done()</code> has been called after each read.</li> <li>the <code>consumer</code> process is still running but is blocked until more data to consume show up.</li> </ol> <p>Looking at your code, I believe that you are confusing the concept of joining processes with the one of joining queues. What you most likely want here is to join processes, you probably don't need a joinable queue at all.</p> <pre><code>#!/usr/bin/env python3 from multiprocessing import Process, Queue import time def consumer(queue): for final in iter(queue.get, 'STOP'): print(final) def producer1(queue): for i in "QWERTYUIOPASDFGHJKLZXCVBNM": queue.put(i) if __name__ == "__main__": queue = Queue(maxsize=100) p1 = Process(target=consumer, args=((queue),)) p2 = Process(target=producer1, args=((queue),)) p1.start() p2.start() print(p1.is_alive()) print(p2.is_alive()) for i in range(1, 10): queue.put(i) time.sleep(0.01) queue.put('STOP') p1.join() p2.join() </code></pre> <p>Also your <code>producer1</code> exits on its own after feeding all the letters but you need a way to tell your <code>consumer</code> process to exit when there won't be any more data for it to process. You can do this by sending a sentinel, here I chose the string <code>'STOP'</code> but it can be anything.</p> <p>In fact, this code is not great since the <code>'STOP'</code> sentinel could be received before some letters, thus both causing some letters to not be processed but also a deadlock because the processes are trying to join even though the queue still contains some data. But this is a different problem.</p>
0
2016-08-21T04:24:31Z
[ "python", "multiprocessing" ]
python - Implementing Sobel operators with python without opencv
39,035,510
<p>Given a greyscale 8 bit image (2D array with values from 0 - 255 for pixel intensity), I want to implement the Sobel operators (mask) on an image. The Sobel function below basically loops around a given pixel,applies the following weight to the pixels: <a href="http://i.stack.imgur.com/1N67K.png" rel="nofollow"><img src="http://i.stack.imgur.com/1N67K.png" alt="enter image description here"></a> </p> <p><a href="http://i.stack.imgur.com/Ut0Aq.png" rel="nofollow"><img src="http://i.stack.imgur.com/Ut0Aq.png" alt="enter image description here"></a></p> <p>And then aplies the given formula:</p> <p><a href="http://i.stack.imgur.com/aBBUL.png" rel="nofollow"><img src="http://i.stack.imgur.com/aBBUL.png" alt="enter image description here"></a></p> <p>Im trying to implement the formulas from this link: <a href="http://homepages.inf.ed.ac.uk/rbf/HIPR2/sobel.htm" rel="nofollow">http://homepages.inf.ed.ac.uk/rbf/HIPR2/sobel.htm</a></p> <pre><code>import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import Image def Sobel(arr,rstart, cstart,masksize, divisor): sum = 0; x = 0 y = 0 for i in range(rstart, rstart+masksize, 1): x = 0 for j in range(cstart, cstart+masksize, 1): if x == 0 and y == 0: p1 = arr[i][j] if x == 0 and y == 1: p2 = arr[i][j] if x == 0 and y == 2: p3 = arr[i][j] if x == 1 and y == 0: p4 = arr[i][j] if x == 1 and y == 1: p5 = arr[i][j] if x == 1 and y == 2: p6 = arr[i][j] if x == 2 and y == 0: p7 = arr[i][j] if x == 2 and y == 1: p8 = arr[i][j] if x == 2 and y == 2: p9 = arr[i][j] x +=1 y +=1 return np.abs((p1 + 2*p2 + p3) - (p7 + 2*p8+p9)) + np.abs((p3 + 2*p6 + p9) - (p1 + 2*p4 +p7)) def padwithzeros(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 0 vector[-pad_width[1]:] = 0 return vector im = Image.open('charlie.jpg') im.show() img = np.asarray(im) img.flags.writeable = True p = 1 k = 2 m = img.shape[0] n = img.shape[1] masksize = 3 img = np.lib.pad(img, p, padwithzeros) #this function padds image with zeros to cater for pixels on the border. x = 0 y = 0 for row in img: y = 0 for col in row: if not (x &lt; p or y &lt; p or y &gt; (n-k) or x &gt; (m-k)): img[x][y] = Sobel(img, x-p,y-p,masksize,masksize*masksize) y = y + 1 x = x + 1 img2 = Image.fromarray(img) img2.show() </code></pre> <p>Given this greyscale 8 bit image</p> <p><a href="http://i.stack.imgur.com/8zINU.gif" rel="nofollow"><img src="http://i.stack.imgur.com/8zINU.gif" alt="enter image description here"></a></p> <p>I get this when applying the function:</p> <p><a href="http://i.stack.imgur.com/MPM6y.png" rel="nofollow"><img src="http://i.stack.imgur.com/MPM6y.png" alt="enter image description here"></a></p> <p>but should get this:</p> <p><a href="http://i.stack.imgur.com/ECAIK.gif" rel="nofollow"><img src="http://i.stack.imgur.com/ECAIK.gif" alt="enter image description here"></a></p> <p>I have implemented other gaussian filters with python, I'm not sure where I'm going wrong here?</p>
2
2016-08-19T09:24:08Z
39,037,728
<p>If using NumPy ans SciPy is not a problem, then a simple solution is to use the SciPy's <a href="http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.convolve2d.html" rel="nofollow"><code>convolve2d()</code></a>.</p> <pre><code>import numpy as np from scipy.signal import convolve2d from scipy.ndimage import imread # Load sample data with np.DataSource().open("http://i.stack.imgur.com/8zINU.gif", "rb") as f: img = imread(f, mode="I") # Prepare the kernels a1 = np.matrix([1, 2, 1]) a2 = np.matrix([-1, 0, 1]) Kx = a1.T * a2 Ky = a2.T * a1 # Apply the Sobel operator Gx = convolve2d(img, Kx, "same", "symm") Gy = convolve2d(img, Ky, "same", "symm") G = np.sqrt(Gx**2 + Gy**2) # or using the absolute values G = np.abs(Gx) + np.abs(Gy) </code></pre>
0
2016-08-19T11:17:57Z
[ "python", "image", "vision", "sobel" ]