qid
int64
46k
74.7M
question
stringlengths
54
37.8k
date
stringlengths
10
10
metadata
listlengths
3
3
response_j
stringlengths
17
26k
response_k
stringlengths
26
26k
73,064,635
I was trying to capture a video in kivy/android using camera4kivy. but it seems that this function won't work. I tried capture video with location, subdir and filename (kwarg\*\*) but still nothing happend. ``` from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.image import Image from camera4kivy.preview import Preview class CamApp(App): def build(self): self.cam = Preview() self.cam.connect_camera(enable_analyze_pixels=True) self.cam.select_camera('1') box1 = BoxLayout() box1.add_widget(self.cam) try: self.cam.capture_video(location = 'shared', subdir='myapp', name='myvid') except Exception as e: print(e) return box1 def on_stop(self): self.cam.disconnect_camera() return super().on_stop() if __name__ == '__main__': CamApp().run() ``` > > 07-21 16:17:14.405 28320 29758 I python : JVM exception occurred: > Attempt to invoke virtual method 'void > androidx.camera.core.VideoCapture.startRecording(androidx.camera.core.VideoCapture$OutputFileOptions, > java.util.concurrent.Executor, > androidx.camera.core.VideoCapture$OnVideoSavedCallback)' on a null > object reference java.lang.NullPointerException 07-21 16:17:14.406 > 28320 28320 I python : Traceback (most recent call last): 07-21 > 16:17:14.406 28320 28320 I python : File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/android/runnable.py", > line 38, in run 07-21 16:17:14.407 28320 28320 I python : File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/camera4kivy/preview\_camerax.py", > line 289, in do\_select\_camera 07-21 16:17:14.407 28320 28320 I python > : File "jnius/jnius\_export\_class.pxi", line 857, in > jnius.jnius.JavaMethod.**call** 07-21 16:17:14.407 28320 28320 I > python : File "jnius/jnius\_export\_class.pxi", line 954, in > jnius.jnius.JavaMethod.call\_method 07-21 16:17:14.407 28320 28320 I > python : File "jnius/jnius\_utils.pxi", line 91, in > jnius.jnius.check\_exception 07-21 16:17:14.407 28320 28320 I python : > jnius.jnius.JavaException: JVM exception occurred: Attempt to invoke > virtual method 'void > androidx.camera.lifecycle.ProcessCameraProvider.unbindAll()' on a null > object reference java.lang.NullPointerException 07-21 16:17:14.408 > 28320 29758 I python : [WARNING] [Base ] Unknown > provider 07-21 16:17:14.408 28320 29758 I python : [INFO ] [Base > > ] Start application main loop 07-21 16:17:14.411 28320 29758 I python > : [INFO ] [Base ] Leaving application in progress... 07-21 > 16:17:14.412 28320 29758 I python : Traceback (most recent call > last): 07-21 16:17:14.412 28320 29758 I python : File > "/home/testapp/.buildozer/android/app/main.py", line 31, in > 07-21 16:17:14.412 28320 29758 I python : File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/kivy/app.py", > line 955, in run 07-21 16:17:14.412 28320 29758 I python : File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/kivy/base.py", > line 574, in runTouchApp 07-21 16:17:14.413 28320 29758 I python : > > File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/kivy/base.py", > line 339, in mainloop 07-21 16:17:14.413 28320 29758 I python : > > File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/kivy/base.py", > line 391, in idle 07-21 16:17:14.413 28320 29758 I python : File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/kivy/clock.py", > line 783, in tick\_draw 07-21 16:17:14.414 28320 29758 I python : > > File "kivy/\_clock.pyx", line 662, in > kivy.\_clock.CyClockBase.\_process\_events\_before\_frame 07-21 > 16:17:14.414 28320 29758 I python : File "kivy/\_clock.pyx", line > 708, in kivy.\_clock.CyClockBase.\_process\_events\_before\_frame 07-21 > 16:17:14.414 28320 29758 I python : File "kivy/\_clock.pyx", line > 704, in kivy.\_clock.CyClockBase.\_process\_events\_before\_frame 07-21 > 16:17:14.414 28320 29758 I python : File "kivy/\_clock.pyx", line > 218, in kivy.\_clock.ClockEvent.tick 07-21 16:17:14.414 28320 29758 I > python : File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/kivy/uix/anchorlayout.py", > line 122, in do\_layout 07-21 16:17:14.415 28320 29758 I python : > > File "kivy/properties.pyx", line 520, in > kivy.properties.Property.**set** 07-21 16:17:14.415 28320 29758 I > python : File "kivy/properties.pyx", line 1478, in > kivy.properties.ReferenceListProperty.set 07-21 16:17:14.415 28320 > 29758 I python : File "kivy/properties.pyx", line 606, in > kivy.properties.Property.\_dispatch 07-21 16:17:14.415 28320 29758 I > python : File "kivy/\_event.pyx", line 1307, in > kivy.\_event.EventObservers.dispatch 07-21 16:17:14.416 28320 29758 I > python : File "kivy/\_event.pyx", line 1213, in > kivy.\_event.EventObservers.\_dispatch 07-21 16:17:14.416 28320 29758 I > python : File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/camera4kivy/preview\_camerax.py", > line 159, in on\_size 07-21 16:17:14.416 28320 29758 I python : > > File > "/home/testapp/.buildozer/android/platform/build-arm64-v8a/build/python-installs/test/arm64-v8a/camera4kivy/preview\_camerax.py", > line 217, in stop\_capture\_video 07-21 16:17:14.416 28320 29758 I > python : File "jnius/jnius\_export\_class.pxi", line 857, in > jnius.jnius.JavaMethod.**call** 07-21 16:17:14.417 28320 29758 I > python : File "jnius/jnius\_export\_class.pxi", line 954, in > jnius.jnius.JavaMethod.call\_method 07-21 16:17:14.417 28320 29758 I > python : File "jnius/jnius\_utils.pxi", line 91, in > jnius.jnius.check\_exception 07-21 16:17:14.417 28320 29758 I python : > jnius.jnius.JavaException: JVM exception occurred: Attempt to invoke > virtual method 'void > androidx.camera.core.VideoCapture.stopRecording()' on a null object > reference java.lang.NullPointerException 07-21 16:17:14.417 28320 > 29758 I python : Python for android ended. 07-21 16:17:14.540 28320 > 29758 F com.moria.test: mutex.cc:340] destroying mutex with owner or > contenders. Owner:29737 07-21 16:17:14.541 28320 29737 F > com.moria.test: debugger\_interface.cc:356] Check failed: removed\_it == > removed\_entries.end() > > >
2022/07/21
[ "https://Stackoverflow.com/questions/73064635", "https://Stackoverflow.com", "https://Stackoverflow.com/users/19499522/" ]
You can use `Map` collection: ``` new Map(fooArr.map(i => [i.name, i.surname])); ``` As [mdn says about `Map` collection](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map): > > The Map object holds key-value pairs and remembers the original > insertion order of the keys. Any value (both objects and primitive > values) may be used as either a key or a value. > > > An example: ```js let fooArr = [ { name: 'name 1', surname: 'surname 1' }, { name: 'name 2', surname: 'surname 2' } ]; let result = new Map(fooArr.map(i => [i.name, i.surname])); console.log(JSON.stringify([...result])); ``` As an alternative, you can use `Set` or just create simple object. Object has `key-value` too. Let me show an example: ```js let fooArr = [ { name: 'foo', surname: 'bar' }, { name: 'hello', surname: 'world' } ]; let object = fooArr.reduce( (obj, item) => Object.assign(obj, { [item.name]: item.surname }), {}); console.log(object) ```
When dealing with objects you can use `Object.keys()`, `Object.values()` and `Object.entries()`, which do a similar thing than their similarly named python counterparts. (`keys()`, `values()`, `pairs()`) ``` const obj = { person: 'John' } console.log(Object.entries(obj)) // ['person', 'John'] ``` Some differences from python... * Differently from python, those functions return Arrays. * (python returns an iterable, which is not a list, and you may need to cast it to `list()` for indexed access). Arrays are iterables (as lists are on Python), so you can write: ```js // loop over iterable syntax, similar to python's for in for (const [k, v] of Object.entries(obj)) { } ``` Extra info on objects: * On JS almost everything is an Object, so there is no difference on writing `obj.property` or `obj['property']`. On Python class instances and dictionaries require a bit different handling. * JS Object keys can only be strings. If you pass a non string to an object key, it will be cast to a string; * JS also has its `Map` and `Set` collections. Those classes have iterators of their own (instead of using `Object.X`, they have their own `.entries()` etc); You usually won't need to use `Map` and `Set` as JS Objects already cover most of the needs;
53,900,909
I’m new to python. Why is this code not printing the top50 films? ``` #!/usr/bin/python3 import requests from bs4 import BeautifulSoup import warnings warnings.filterwarnings("ignore", category=UserWarning, module='bs4') # website url = "https://www.imdb.com/search/title?release_date=" year = input("Enter you're fav year for movie display: ") output = url+year # extracting the info from website soup = BeautifulSoup(output, "lxml") # Display the top 50 films i = 1 movieList = soup.find_all('div', attrs={'class': 'lister-item mode-advanced'}) for x in movieList: div = x.find('div', attrs={'class': 'lister-item-content'}) print(str(i) + '.') header = x.findChild('h', attrs={'class': 'lister-item-header'}) print('Movie: ' + str(header[0].findChild('a')) [0].contents[0].encode('utf-8').decode('ascii', 'ignore')) #and can someone tell me what is this.. because I’m following some guide. And i didn’t understand this line. i += 1 ``` My current output is empty, can’t see anything on the terminal. ``` 0/50 [00:00<?, ?it/s]1. Traceback (most recent call last): File "movie_recom.py", line 26, in <module> print('Movie: ' + str((header[0].findChild('a')) TypeError: 'NoneType' object is not subscriptable ``` I need this output: ``` Most Popular Feature Films Released 2018-01-01: 1. Movie: Avengers: Infinity War 2. Movie: Venom 3. Movie: A Quiet Place 4. Movie: Black Panther 5. Movie: I Feel Pretty 6. Movie: Deadpool 2 7. Movie: Ready Player One 8. Movie: Super Troopers 2 9. Movie: Rampage 10. Movie: Den of Thieves ``` and so on until 50. Thanks in advance.
2018/12/23
[ "https://Stackoverflow.com/questions/53900909", "https://Stackoverflow.com", "https://Stackoverflow.com/users/10222187/" ]
You haven't yet issued a request, then you can parse the response content. This should get the full list: ``` r = requests.get(output) soup = BeautifulSoup(r.text, "lxml") # Display the top 50 films movieList = soup.find_all('div', attrs={'class': 'lister-item mode-advanced'}) for n, x in enumerate(movieList, 1): div = x.find('div', attrs={'class': 'lister-item-content'}) print(str(n)+'.', div.find('a', href=True).text) ``` will return: ``` 1. Aquaman 2. Mowgli: Legend of the Jungle 3. Spider-Man: Into the Spider-Verse ... 50. The Rookie ```
Thanks Guys for the help but i already solved it. ``` i = 1 movieList = soup.find_all('div', attrs={'class': 'lister-item mode-advanced'}) for x in tqdm(movieList): div = x.find('div', attrs={'class': 'lister-item-content'}) # print(str(i) + '.') header = x.findChild('h3', attrs={'class': 'lister-item-header'}) print(str(i) + '.' + header.findChild('a').text) i += 1 ```
3,413,144
I am using Selenium RC to do some test now. And the driver I use is python. But now, I faced a problem, that is: every time Selenium RC runs, and open a url, it opens 2 windows, one is for logging and the other one is for showing HTML content. But I can't close them all in script. Here is my script: ``` #!/usr/bin/env python #-*-coding:utf-8-*- from selenium import selenium def main(): sel = selenium('localhost', 4444, '*firefox', 'http://www.sina.com.cn/') sel.start() try: sel.open('http://www.sina.com.cn/') except Exception, e: print e else: print sel.get_title() sel.close() sel.stop() if __name__ == '__main__': main() ``` It is very easy to understand. What I really want is to close all windows that selenium opens. I've tried close() and stop(), but they all don't work.
2010/08/05
[ "https://Stackoverflow.com/questions/3413144", "https://Stackoverflow.com", "https://Stackoverflow.com/users/411728/" ]
I've fix this problem. It happens because I installed firefox-bin not firefox. Now I've removed firefox-bin and have installed firefox, it works now. stop() will close all windows that selenium opened. Thank you for your reminds [AutomatedTester](https://stackoverflow.com/users/108827/automatedtester)
I may suggest to make a system command with python to close the firefox windows Bussiere
3,413,144
I am using Selenium RC to do some test now. And the driver I use is python. But now, I faced a problem, that is: every time Selenium RC runs, and open a url, it opens 2 windows, one is for logging and the other one is for showing HTML content. But I can't close them all in script. Here is my script: ``` #!/usr/bin/env python #-*-coding:utf-8-*- from selenium import selenium def main(): sel = selenium('localhost', 4444, '*firefox', 'http://www.sina.com.cn/') sel.start() try: sel.open('http://www.sina.com.cn/') except Exception, e: print e else: print sel.get_title() sel.close() sel.stop() if __name__ == '__main__': main() ``` It is very easy to understand. What I really want is to close all windows that selenium opens. I've tried close() and stop(), but they all don't work.
2010/08/05
[ "https://Stackoverflow.com/questions/3413144", "https://Stackoverflow.com", "https://Stackoverflow.com/users/411728/" ]
I had a similar case where my program opened many windows when scraping a webpage. here is a sample code: ``` #!/usr/bin/python import webbrowser from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.common.exceptions import NoSuchElementException driver = webdriver.Firefox() print "Browser fired-up!" driver.get("https://www.something.com/") driver.implicitly_wait(5) while True: try: playlink = driver.find_element_by_xpath("/html/body/div[2]/div[1]/div/a") playlink.click() time.sleep(3) except NoSuchElementException: print "playlink Element not found " else: backbutton = driver.find_element_by_id("back-to-bing-text") backbutton.click() try: quizlink = driver.find_element_by_xpath("/html/body/div[2]/div[1]/div[1]/ul/li[1]/a/span/span[1]") quizlink.click() except NoSuchElementException: print "quiz1 Element not found " else: print "quiz1 clicked" driver.quit() ``` The "driver.close()" bugged me for a week as I believed it would close all the windows. "driver.quit()" is to terminate all the process and close all the windows.
I may suggest to make a system command with python to close the firefox windows Bussiere
3,413,144
I am using Selenium RC to do some test now. And the driver I use is python. But now, I faced a problem, that is: every time Selenium RC runs, and open a url, it opens 2 windows, one is for logging and the other one is for showing HTML content. But I can't close them all in script. Here is my script: ``` #!/usr/bin/env python #-*-coding:utf-8-*- from selenium import selenium def main(): sel = selenium('localhost', 4444, '*firefox', 'http://www.sina.com.cn/') sel.start() try: sel.open('http://www.sina.com.cn/') except Exception, e: print e else: print sel.get_title() sel.close() sel.stop() if __name__ == '__main__': main() ``` It is very easy to understand. What I really want is to close all windows that selenium opens. I've tried close() and stop(), but they all don't work.
2010/08/05
[ "https://Stackoverflow.com/questions/3413144", "https://Stackoverflow.com", "https://Stackoverflow.com/users/411728/" ]
I had a similar case where my program opened many windows when scraping a webpage. here is a sample code: ``` #!/usr/bin/python import webbrowser from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.common.exceptions import NoSuchElementException driver = webdriver.Firefox() print "Browser fired-up!" driver.get("https://www.something.com/") driver.implicitly_wait(5) while True: try: playlink = driver.find_element_by_xpath("/html/body/div[2]/div[1]/div/a") playlink.click() time.sleep(3) except NoSuchElementException: print "playlink Element not found " else: backbutton = driver.find_element_by_id("back-to-bing-text") backbutton.click() try: quizlink = driver.find_element_by_xpath("/html/body/div[2]/div[1]/div[1]/ul/li[1]/a/span/span[1]") quizlink.click() except NoSuchElementException: print "quiz1 Element not found " else: print "quiz1 clicked" driver.quit() ``` The "driver.close()" bugged me for a week as I believed it would close all the windows. "driver.quit()" is to terminate all the process and close all the windows.
I've fix this problem. It happens because I installed firefox-bin not firefox. Now I've removed firefox-bin and have installed firefox, it works now. stop() will close all windows that selenium opened. Thank you for your reminds [AutomatedTester](https://stackoverflow.com/users/108827/automatedtester)
11,479,955
I started learning python today from the tutorial on the official site. When reading about **filter(function, sequence)** i thought of making a function that returns if a number is prime to use it with the filter. ``` notDividedBy = [2,3,4,5,6,7,8,9] def prime(num): """True if num is prime, false otherwise""" copy = notDividedBy[:] check = True if num in copy: copy.remove(num) for x in copy: if num % x == 0: check = False break return check ``` The above code works in the shell. My question is: Since i feel like although a solution, it is not the most elegant one, can anyone transform this code to something more python-like?(better structure? less lines?) I believe it would help me for better understanding of the basics of the language. The thing is, don't use any imports or anything, just simple staff.
2012/07/14
[ "https://Stackoverflow.com/questions/11479955", "https://Stackoverflow.com", "https://Stackoverflow.com/users/596298/" ]
Creating many many copies of lists is not a particularly efficient way of doing things. Instead use the `xrange()` (Python 2.x) or `range()` (Python 3) iterator. Here's one (naive) way you could implement a primality test: ``` from math import sqrt def isPrime(n): if n < 2: return False if n == 2: return True if not n % 2: return False #test if n is even #we've already remove all the even numbers, no need to test for 2 #we only need to test up to sqrt(n), because any composite numbers can be # factored into 2 values, at least one of which is < sqrt(n) for i in xrange(3, int(sqrt(n)) + 1, 2): if not n % i: return False return True ```
One thing off the bat, if you are going to implement prime testing in this fashion, there's no reason to use an auxillary array ``` def prime(num): """True if num is prime, false otherwise""" check = True #if num in copy: # copy.remove(num) for x in range(2,x-1): if num % x == 0: check = False break return check ```
11,479,955
I started learning python today from the tutorial on the official site. When reading about **filter(function, sequence)** i thought of making a function that returns if a number is prime to use it with the filter. ``` notDividedBy = [2,3,4,5,6,7,8,9] def prime(num): """True if num is prime, false otherwise""" copy = notDividedBy[:] check = True if num in copy: copy.remove(num) for x in copy: if num % x == 0: check = False break return check ``` The above code works in the shell. My question is: Since i feel like although a solution, it is not the most elegant one, can anyone transform this code to something more python-like?(better structure? less lines?) I believe it would help me for better understanding of the basics of the language. The thing is, don't use any imports or anything, just simple staff.
2012/07/14
[ "https://Stackoverflow.com/questions/11479955", "https://Stackoverflow.com", "https://Stackoverflow.com/users/596298/" ]
Creating many many copies of lists is not a particularly efficient way of doing things. Instead use the `xrange()` (Python 2.x) or `range()` (Python 3) iterator. Here's one (naive) way you could implement a primality test: ``` from math import sqrt def isPrime(n): if n < 2: return False if n == 2: return True if not n % 2: return False #test if n is even #we've already remove all the even numbers, no need to test for 2 #we only need to test up to sqrt(n), because any composite numbers can be # factored into 2 values, at least one of which is < sqrt(n) for i in xrange(3, int(sqrt(n)) + 1, 2): if not n % i: return False return True ```
Here's a 2 liner using filter(). ``` def prime(num): """True if num is prime, false otherwise""" if num < 2: return False return len(filter(lambda x: num % x == 0, range(2, num))) == 0 ```
11,479,955
I started learning python today from the tutorial on the official site. When reading about **filter(function, sequence)** i thought of making a function that returns if a number is prime to use it with the filter. ``` notDividedBy = [2,3,4,5,6,7,8,9] def prime(num): """True if num is prime, false otherwise""" copy = notDividedBy[:] check = True if num in copy: copy.remove(num) for x in copy: if num % x == 0: check = False break return check ``` The above code works in the shell. My question is: Since i feel like although a solution, it is not the most elegant one, can anyone transform this code to something more python-like?(better structure? less lines?) I believe it would help me for better understanding of the basics of the language. The thing is, don't use any imports or anything, just simple staff.
2012/07/14
[ "https://Stackoverflow.com/questions/11479955", "https://Stackoverflow.com", "https://Stackoverflow.com/users/596298/" ]
How about this one: ``` def is_prime(num): return not any(num%i == 0 for i in xrange(2,num/2+1)) for i in xrange(10): print i, is_prime(i) ``` **Explanation** start with: ``` (num%i==0 for i in xrange(2,num/2+1)) ``` This is a generator expression. I could have made it a list comprehension: ``` [num%i==0 for i in xrange(2,num/2+1)] ``` The list comprehension is equivalent to: ``` ll=[] for i in xrange(2,num/2+1): ll.append(num%i==0) ``` The difference between the generator and the list comprehension is that the generator only gives up it's elements as you iterate over it -- whereas the list comprehension calculates all the values up front. Anyway, from the above code, you can see that the expression generates a sequence of True's and False's. True if the number can be divided by i and False otherwise. If we generate a sequence of all False numbers, we know we have a prime. The next trick is the `any` built in function. It basically searches through an iterable and checks if any of the values is True. As soon as it hits a `True`, it returns True. If it gets to the end of the iterable, it returns `False`. So, if the entire sequence is False (a prime number) then `any` will return `False`, otherwise it returns `True`. This would be perfect for a `not_prime` function, but our function is `is_prime`, so we just need to invert that result using the `not` operator. The benefit of using the generator expression is that it is nice and concise, but also that it allows `any` to return before checking every value which means that as soon as it finds a number that divides `num`, it returns instead of generating all `num/2` numbers. Anyway, I hope this explanation is helpful. If not, feel free to leave a comment and I'll try to explain better.
One thing off the bat, if you are going to implement prime testing in this fashion, there's no reason to use an auxillary array ``` def prime(num): """True if num is prime, false otherwise""" check = True #if num in copy: # copy.remove(num) for x in range(2,x-1): if num % x == 0: check = False break return check ```
11,479,955
I started learning python today from the tutorial on the official site. When reading about **filter(function, sequence)** i thought of making a function that returns if a number is prime to use it with the filter. ``` notDividedBy = [2,3,4,5,6,7,8,9] def prime(num): """True if num is prime, false otherwise""" copy = notDividedBy[:] check = True if num in copy: copy.remove(num) for x in copy: if num % x == 0: check = False break return check ``` The above code works in the shell. My question is: Since i feel like although a solution, it is not the most elegant one, can anyone transform this code to something more python-like?(better structure? less lines?) I believe it would help me for better understanding of the basics of the language. The thing is, don't use any imports or anything, just simple staff.
2012/07/14
[ "https://Stackoverflow.com/questions/11479955", "https://Stackoverflow.com", "https://Stackoverflow.com/users/596298/" ]
One thing off the bat, if you are going to implement prime testing in this fashion, there's no reason to use an auxillary array ``` def prime(num): """True if num is prime, false otherwise""" check = True #if num in copy: # copy.remove(num) for x in range(2,x-1): if num % x == 0: check = False break return check ```
Here's a 2 liner using filter(). ``` def prime(num): """True if num is prime, false otherwise""" if num < 2: return False return len(filter(lambda x: num % x == 0, range(2, num))) == 0 ```
11,479,955
I started learning python today from the tutorial on the official site. When reading about **filter(function, sequence)** i thought of making a function that returns if a number is prime to use it with the filter. ``` notDividedBy = [2,3,4,5,6,7,8,9] def prime(num): """True if num is prime, false otherwise""" copy = notDividedBy[:] check = True if num in copy: copy.remove(num) for x in copy: if num % x == 0: check = False break return check ``` The above code works in the shell. My question is: Since i feel like although a solution, it is not the most elegant one, can anyone transform this code to something more python-like?(better structure? less lines?) I believe it would help me for better understanding of the basics of the language. The thing is, don't use any imports or anything, just simple staff.
2012/07/14
[ "https://Stackoverflow.com/questions/11479955", "https://Stackoverflow.com", "https://Stackoverflow.com/users/596298/" ]
How about this one: ``` def is_prime(num): return not any(num%i == 0 for i in xrange(2,num/2+1)) for i in xrange(10): print i, is_prime(i) ``` **Explanation** start with: ``` (num%i==0 for i in xrange(2,num/2+1)) ``` This is a generator expression. I could have made it a list comprehension: ``` [num%i==0 for i in xrange(2,num/2+1)] ``` The list comprehension is equivalent to: ``` ll=[] for i in xrange(2,num/2+1): ll.append(num%i==0) ``` The difference between the generator and the list comprehension is that the generator only gives up it's elements as you iterate over it -- whereas the list comprehension calculates all the values up front. Anyway, from the above code, you can see that the expression generates a sequence of True's and False's. True if the number can be divided by i and False otherwise. If we generate a sequence of all False numbers, we know we have a prime. The next trick is the `any` built in function. It basically searches through an iterable and checks if any of the values is True. As soon as it hits a `True`, it returns True. If it gets to the end of the iterable, it returns `False`. So, if the entire sequence is False (a prime number) then `any` will return `False`, otherwise it returns `True`. This would be perfect for a `not_prime` function, but our function is `is_prime`, so we just need to invert that result using the `not` operator. The benefit of using the generator expression is that it is nice and concise, but also that it allows `any` to return before checking every value which means that as soon as it finds a number that divides `num`, it returns instead of generating all `num/2` numbers. Anyway, I hope this explanation is helpful. If not, feel free to leave a comment and I'll try to explain better.
Here's a 2 liner using filter(). ``` def prime(num): """True if num is prime, false otherwise""" if num < 2: return False return len(filter(lambda x: num % x == 0, range(2, num))) == 0 ```
25,204,021
I'm using Continuum's Anaconda Spyder for python. All of a sudden it's giving me this error, although it's supposed to be free: ``` Vendor: Continuum Analytics, Inc. Package: mkl Message: trial mode EXPIRED 14 days ago You cannot run mkl without a license any longer. A license can be purchased it at: http://continuum.io We are sorry for any inconveniences. SHUTTING DOWN PYTHON INTERPRETER ``` Any idea what the problem might be? I'm using it on Windows 64-bit. I've been using it fine for months. I just got back to it now after some weeks and now it's saying it's expired, but there never was a license as far as I can remember.
2014/08/08
[ "https://Stackoverflow.com/questions/25204021", "https://Stackoverflow.com", "https://Stackoverflow.com/users/961627/" ]
There is a free trial that starts when you `conda install mkl`. If you want to remove it, use `conda remove --features mkl`.
The MKL optimizations are not free: <https://store.continuum.io/cshop/mkl-optimizations/>. There is a trial period but after that it costs you money. Interesting that you used it for a while. Maybe it's an issue with license checking or there was no mechanism to actually check for a license. When you install the package with `conda install mkl` their is no mention of a license. Maybe something was introduced with a recent version of anaconda?
25,204,021
I'm using Continuum's Anaconda Spyder for python. All of a sudden it's giving me this error, although it's supposed to be free: ``` Vendor: Continuum Analytics, Inc. Package: mkl Message: trial mode EXPIRED 14 days ago You cannot run mkl without a license any longer. A license can be purchased it at: http://continuum.io We are sorry for any inconveniences. SHUTTING DOWN PYTHON INTERPRETER ``` Any idea what the problem might be? I'm using it on Windows 64-bit. I've been using it fine for months. I just got back to it now after some weeks and now it's saying it's expired, but there never was a license as far as I can remember.
2014/08/08
[ "https://Stackoverflow.com/questions/25204021", "https://Stackoverflow.com", "https://Stackoverflow.com/users/961627/" ]
Update as of 5 Feb 2016: Anaconda now includes an `mkl` package by default, which is in a trial mode unless you get a license. (There is also a free license for personal use.) To get a license, go to: <http://docs.continuum.io/mkl-optimizations/index> and follow the link the "Add Ons" For details on this change (and instructions on how to remove `mkl` on non-Windows machines), see: <https://www.continuum.io/blog/developer-blog/anaconda-25-release-now-mkl-optimizations> For Windows you can't remove `mkl` at this time. You can only get a (free) license. (See <https://github.com/conda/conda/issues/2032#issuecomment-182400140>)
The MKL optimizations are not free: <https://store.continuum.io/cshop/mkl-optimizations/>. There is a trial period but after that it costs you money. Interesting that you used it for a while. Maybe it's an issue with license checking or there was no mechanism to actually check for a license. When you install the package with `conda install mkl` their is no mention of a license. Maybe something was introduced with a recent version of anaconda?
25,204,021
I'm using Continuum's Anaconda Spyder for python. All of a sudden it's giving me this error, although it's supposed to be free: ``` Vendor: Continuum Analytics, Inc. Package: mkl Message: trial mode EXPIRED 14 days ago You cannot run mkl without a license any longer. A license can be purchased it at: http://continuum.io We are sorry for any inconveniences. SHUTTING DOWN PYTHON INTERPRETER ``` Any idea what the problem might be? I'm using it on Windows 64-bit. I've been using it fine for months. I just got back to it now after some weeks and now it's saying it's expired, but there never was a license as far as I can remember.
2014/08/08
[ "https://Stackoverflow.com/questions/25204021", "https://Stackoverflow.com", "https://Stackoverflow.com/users/961627/" ]
There is a free trial that starts when you `conda install mkl`. If you want to remove it, use `conda remove --features mkl`.
Update as of 5 Feb 2016: Anaconda now includes an `mkl` package by default, which is in a trial mode unless you get a license. (There is also a free license for personal use.) To get a license, go to: <http://docs.continuum.io/mkl-optimizations/index> and follow the link the "Add Ons" For details on this change (and instructions on how to remove `mkl` on non-Windows machines), see: <https://www.continuum.io/blog/developer-blog/anaconda-25-release-now-mkl-optimizations> For Windows you can't remove `mkl` at this time. You can only get a (free) license. (See <https://github.com/conda/conda/issues/2032#issuecomment-182400140>)
26,152,787
i'm just starting to learn python and I need to solve this problem but i'm stuck. We've been given a function (lSegInt)to find the intersections of lines. What I need to do is to format the data properly inorder to pass it through this function too find how many time two polylines intersect. Here's the data: ``` pt1 = (1,1) pt2 = (5,1) pt3 = (5,5) pt4 = (1,5) pt5 = (2,2) pt6 = (2,3) pt7 = (4,6) pt8 = (6,3) pt9 = (3,1) pt10 = (1,4) pt11 = (3,6) pt12 = (4,3) pt13 = (7,4) l5 = [[pt1, pt5, pt6, pt7, pt8, pt9]] l6 = [[pt10, pt11, pt12, pt13]] ``` Here's my code: ``` def split(a): lines = [] for i in range(len(a[0]) - 1): line = [] for j in (i,i+1): line.append(a[0][j]) lines.append(line) return lines sl5 = split(l5) sl6 = split(l6) + split(l6) ``` This is where i'm stuck. Need to find out how many times the polylines intersect. I wanted to use a zipped for loop with sl5 and sl6 but it wouldn't check every line of one list vs every line of another and the lists are different lengths. ``` while i < len(sl5): for x, in a,: z = 1 fresults.append(lSegInt(x[0],x[1],sl6[0][0],sl6[1][0])) fresults.append(lSegInt(x[0],x[1],sl6[1][0],sl6[1][1])) fresults.append(lSegInt(x[0],x[1],sl6[2][0],sl6[2][1])) i = i + 1 print fresults ``` Function: ``` def lSegInt(s1, s2, t1, t2): '''Function to check the intersection of two line segments. Returns None if no intersection, or a coordinate indicating the intersection. An implementation from the NCGIA core curriculum. s1 and s2 are points (e.g.: 2-item tuples) marking the beginning and end of segment s. t1 and t2 are points marking the beginning and end of segment t. Each point has an x and y coordinate: (1, 3). Variables are named following linear formula: y = a + bx.''' if s1[0] != s2[0]: # if s is not vertical b1 = (s2[1] - s1[1]) / float(s2[0] - s1[0]) if t1[0] != t2[0]: # if t is not vertical b2 = (t2[1] - t1[1]) / float(t2[0] - t1[0]) a1 = s1[1] - (b1 * s1[0]) a2 = t1[1] - (b2 * t1[0]) if b1 == b2: # if lines are parallel (slopes match) return(None) xi = -(a1-a2)/float(b1-b2) # solve for intersection point yi = a1 + (b1 * xi) else: xi = t1[0] a1 = s1[1] - (b1 * s1[0]) yi = a1 + (b1 * xi) else: xi = s1[0] if t1[0] != t2[0]: # if t is not vertical b2 = (t2[1] - t1[1]) / float(t2[0] - t1[0]) a2 = t1[1] - (b2 * t1[0]) yi = a2 + (b2 * xi) else: return(None) # Here is the actual intersection test! if (s1[0]-xi)*(xi-s2[0]) >= 0 and \ (s1[1]-yi)*(yi-s2[1]) >= 0 and \ (t1[0]-xi)*(xi-t2[0]) >= 0 and \ (t1[1]-yi)*(yi-t2[1]) >= 0: return((float(xi), float(yi))) # Return the intersection point. else: return(None) ``` Any help on this is greatly appreciated. Sorry for the wall of text.
2014/10/02
[ "https://Stackoverflow.com/questions/26152787", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4099447/" ]
There are a couple of flaws with your CSS code but the biggest one causing the display issue is: ``` .box2 { float: middle; ``` } There is no `float: middle;` property. You need to either set them all to `float:left;` (or `float:right;`) or use an entirely different approach. (like using `display: table-cell;`)
<http://jsfiddle.net/rishabh66/kLfb2wet/> . Add `float: left;` to all divs to make horizontally align boxes inside parent div.
26,152,787
i'm just starting to learn python and I need to solve this problem but i'm stuck. We've been given a function (lSegInt)to find the intersections of lines. What I need to do is to format the data properly inorder to pass it through this function too find how many time two polylines intersect. Here's the data: ``` pt1 = (1,1) pt2 = (5,1) pt3 = (5,5) pt4 = (1,5) pt5 = (2,2) pt6 = (2,3) pt7 = (4,6) pt8 = (6,3) pt9 = (3,1) pt10 = (1,4) pt11 = (3,6) pt12 = (4,3) pt13 = (7,4) l5 = [[pt1, pt5, pt6, pt7, pt8, pt9]] l6 = [[pt10, pt11, pt12, pt13]] ``` Here's my code: ``` def split(a): lines = [] for i in range(len(a[0]) - 1): line = [] for j in (i,i+1): line.append(a[0][j]) lines.append(line) return lines sl5 = split(l5) sl6 = split(l6) + split(l6) ``` This is where i'm stuck. Need to find out how many times the polylines intersect. I wanted to use a zipped for loop with sl5 and sl6 but it wouldn't check every line of one list vs every line of another and the lists are different lengths. ``` while i < len(sl5): for x, in a,: z = 1 fresults.append(lSegInt(x[0],x[1],sl6[0][0],sl6[1][0])) fresults.append(lSegInt(x[0],x[1],sl6[1][0],sl6[1][1])) fresults.append(lSegInt(x[0],x[1],sl6[2][0],sl6[2][1])) i = i + 1 print fresults ``` Function: ``` def lSegInt(s1, s2, t1, t2): '''Function to check the intersection of two line segments. Returns None if no intersection, or a coordinate indicating the intersection. An implementation from the NCGIA core curriculum. s1 and s2 are points (e.g.: 2-item tuples) marking the beginning and end of segment s. t1 and t2 are points marking the beginning and end of segment t. Each point has an x and y coordinate: (1, 3). Variables are named following linear formula: y = a + bx.''' if s1[0] != s2[0]: # if s is not vertical b1 = (s2[1] - s1[1]) / float(s2[0] - s1[0]) if t1[0] != t2[0]: # if t is not vertical b2 = (t2[1] - t1[1]) / float(t2[0] - t1[0]) a1 = s1[1] - (b1 * s1[0]) a2 = t1[1] - (b2 * t1[0]) if b1 == b2: # if lines are parallel (slopes match) return(None) xi = -(a1-a2)/float(b1-b2) # solve for intersection point yi = a1 + (b1 * xi) else: xi = t1[0] a1 = s1[1] - (b1 * s1[0]) yi = a1 + (b1 * xi) else: xi = s1[0] if t1[0] != t2[0]: # if t is not vertical b2 = (t2[1] - t1[1]) / float(t2[0] - t1[0]) a2 = t1[1] - (b2 * t1[0]) yi = a2 + (b2 * xi) else: return(None) # Here is the actual intersection test! if (s1[0]-xi)*(xi-s2[0]) >= 0 and \ (s1[1]-yi)*(yi-s2[1]) >= 0 and \ (t1[0]-xi)*(xi-t2[0]) >= 0 and \ (t1[1]-yi)*(yi-t2[1]) >= 0: return((float(xi), float(yi))) # Return the intersection point. else: return(None) ``` Any help on this is greatly appreciated. Sorry for the wall of text.
2014/10/02
[ "https://Stackoverflow.com/questions/26152787", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4099447/" ]
Hey I simplified your code, just using a unique class for the three divs, and display them `inline-block`, please see below: ```css .parentbox { width: 1120px; padding: 40px 40px 40px 40px; align: center; height: auto; } .box { display:inline-block; /* vertical-align:top; you might need this property */ width: 23%; height: 400px; background-color: #f1f1f1; margin: 5%; } ``` ```html <div class="parentbox"> <div align=center> <div class="box">hello</div> <div class="box">hi</div> <div class="box">hgi</div> </div> </div> ```
<http://jsfiddle.net/rishabh66/kLfb2wet/> . Add `float: left;` to all divs to make horizontally align boxes inside parent div.
40,521,707
Whenever I am trying to perform normalization over the array obtained from the csv file . My code wont work because i have n't provided the custom file. I an getting an error message as : ``` x = np.myarray ``` **AttributeError: 'module' object has no attribute'myarray'** As I am new to python ,can anyone please help me how to get a normalized matrix if we read the matrix from csv file ? ``` import numpy as np import csv with open('csvk.csv', 'rb') as f: reader = csv.reader(f) data_as_list = list(reader) print data_as_list myarray = np.asarray(data_as_list) print myarray x = np.myarray x_normed = x / x.max(axis=0) print x_normed ```
2016/11/10
[ "https://Stackoverflow.com/questions/40521707", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6302830/" ]
The answer is simply, it's the maximum that field can hold. > > MySQL retrieves and displays TIME values in 'HH:MM:SS' format (or > 'HHH:MM:SS' format for large hours values). TIME values may range from > '-838:59:59' to '838:59:59'. The hours part may be so large because > the TIME type can be used not only to represent a time of day (which > must be less than 24 hours), but also elapsed time or a time interval > between two events (which may be much greater than 24 hours, or even > negative). > > > You would probably be better off just using an int field (where the value is stored as seconds difference from the starting time). It's a common practice to have a field that stores seconds elapsed since epoch rather than a datetime field anyway. Else you would need to switch to a [datetime](http://dev.mysql.com/doc/refman/5.7/en/datetime.html) field. <http://dev.mysql.com/doc/refman/5.7/en/time.html>
You are using **strtotime()** strtotime() - Parse English textual datetimes into Unix timestamps: Eg: ``` echo(strtotime("3 October 2005")); output as 1128312000 ``` **HERE TRY THIS** ``` $epoch_time_out_user =strtotime($_POST['timeout'])- (300*60); $dt = new DateTime("@$epoch_time_out_user"); $time_out_user = $dt->format('H:i:s'); //now $time_out_user has time // now you can use **$time_out_user** in insert query // do same for **$time_in_user** //FYI : The value (300*60) is depends timezone ```
70,640,586
I have a shred library `libcustum.so` in a non standard folder, and a python package where I use `ctypes.cdll.LoadLibrary("libcustom.so")`. How can I set `libcustum.so` path at build time (something similar to rpath) ? ``` env LD_LIBRARY_PATH=/path/to/custum/lib python3 -c "import mypackage" ``` work fine, but I don't want to use global `LD_LIBRARY_PATH`, and I don't want to set library path at run time. ``` python3 -c "import mypackage" ``` result in: ``` OSError: libcustum.so: cannot open shared object file: No such file or directory ```
2022/01/09
[ "https://Stackoverflow.com/questions/70640586", "https://Stackoverflow.com", "https://Stackoverflow.com/users/5940776/" ]
This might be somewhat related to [this question](https://stackoverflow.com/questions/32998502/python-importerror-no-module-named-crypto-publickey-rsa). /e: Ok, since you are using [this firebase package](https://pypi.org/project/firebase/), I can hopefully help you out. First of all, it's the package's fault that it isn't running. While it depends on many external packages, it has none of them defined. This is what I had to do in a clean virtual environment just to be able to do `from firebase import Firebase`: ``` pip install sseclient python_jwt gcloud pycryptodome requests-toolbelt ``` [Here](https://pastebin.com/yksdudUc) is the requirements.txt I ended up with in the clean environment. Notice, that this was only for importing a single class from the package. There still might be other dependencies hidden somewhere, waiting to throw an exception. I encourage you to give feedback to the developer about this issue. Even better, fix this yourself and open a Pull Request. You might help others with the same issue.
I found a workaround for this. I simply used another module to read from the firebase db. Instead of using `firebase` I used `firebase_admin` as mentioned in the [firebase documentation](https://firebase.google.com/docs/database/admin/start#python). `firebase_admin` doesn't use Crypto so there's no more problem from this point of view. However I had to change a little bit how I retrieve and write data.
66,355,390
I'm trying to make a flask pipeline which receives data from a python file and sends the data to react which display them. I currently am stuck trying to receive the data in flask after sending them via post to the URL: `localhost:5000/weather-data` The data is being posted with this Code: ``` dummy_data = {'data': str(msg.payload.decode('iso-8859-1')), 'timestamp': datetime.datetime.now().isoformat()} response = requests.post(url, data=dummy_data) print(response.text) ``` The print result is: ``` {"data": "{\"region\": \"Jokkmokk\", \"temp_now\": 8.91, \"weather_now\": \"bewölkt\", \"humidity\": 50, \"wind\": 24}", "timestamp": "2021-02-24T17:23:15.347058"} ``` Which is all right but then i try to receive the data and return it on the flask side with this code: ``` from flask import Flask, request app = Flask(__name__) @app.route('/') def test(): return 'HelloWorld' @app.route('/weather-data', methods=['POST', 'GET']) def weather_data(): try: data = request.form.to_dict() print(data) return data except Exception as e: print(e) if __name__ == '__main__': app.run(host='127.0.0.1', debug=True, port=5000) ``` This runs normally through and my print(data) gives the exact same dictionary back but if i take a look at `localhost:5000/weather-data` i only see empty curly braces `{}` As a Test i tried to return the data without receivng them first with this code: ``` @app.route('/weather-data', methods=['POST', 'GET']) def weather_data(): return {"data": "{\"region\": \"Fishermans City\", \"temp_now\": 6.87, \"weather_now\": \"st\\u00fcrmisch\", "humidity\": 52, \"wind\": 58}", "timestamp": "2021-02-23T18:32:49.120861"} ``` Like this it perfectly worked and showed the Data on the website. **Edit:** I think this is a stupid question for some of you but because i am kinda new to this i wanted to ask if it is possible that the Data is on the Page but when i reload the Page it overwrites the data with empty curly braces? If yes is there a way that i can keep them on the Page until i make another Post with new data?
2021/02/24
[ "https://Stackoverflow.com/questions/66355390", "https://Stackoverflow.com", "https://Stackoverflow.com/users/15275815/" ]
If the files requested are big consider use spawn instead of exec. ``` const http = require('http'); const exec = require('child_process').exec; const DOWNLOAD_DIR = './downloads/'; const generate_width_and_height = function() { const random = Math.floor((Math.random() * 100) + 200); console.log(random); return random } const create_file_url = function() { return "http://placekitten.com/" + generate_width_and_height() + "/" + generate_width_and_height() } const oneHundredTwentyEightElementsArray = Array.from(Array(127), (_,x) => x); const oneHundredTwentyEightUrlsArray = oneHundredTwentyEightElementsArray.map( _ => create_file_url()) const download_file_wget = function(file_url, file_number) { // extract the file name const file_name = "file_" + file_number // compose the wget command const wget = 'wget -P ' + DOWNLOAD_DIR + ' ' + file_url; // excute wget using child_process' exec function const child = exec(wget, function(err, stdout, stderr) { if (err) throw err; else console.log(file_name + ' downloaded to ' + DOWNLOAD_DIR); }); }; for (let index = 0; index < oneHundredTwentyEightElementsArray.length; index++) { const url = oneHundredTwentyEightUrlsArray[index]; download_file_wget(url, index) } ```
You can use [Javascript Promises](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise) to download multiple files with node and wget. First wrap your inner code in a promise: ```js const downloadFile = (url) => { return new Promise((resolve) => { console.log(`wget ${url} --no-check-certificate`) exec(`wget ${url} --no-check-certificate`, function(err, stdout, stderr) { if (err) { console.log('ERR', err, url) } else { console.log('SUCCESS ' + url); resolve(1) } }); }) } ``` Then use Promise.all to process all the downloads asynchronously: ``` const files = [ 'http://placekitten.com/10/10', 'http://placekitten.com/10/10' // etc ] (async () => { await Promise.all(files.map(url => downloadFile(url))) })() ```
2,399,812
Is there a way to create a 'kiosk mode' in wxpython under Windows (98 - 7) where the application disables you from breaking out of the app using Windows keys, alt-tab, alt-f4, and ctrl+alt+delete?
2010/03/08
[ "https://Stackoverflow.com/questions/2399812", "https://Stackoverflow.com", "https://Stackoverflow.com/users/204535/" ]
If an application could do that it would make a great denial-of-service attack on the machine. In particular Ctrl+Alt+Delete is the [Secure Attention Sequence](http://technet.microsoft.com/en-us/library/cc780332(WS.10).aspx). Microsoft goes to great lengths to insure that when the user hits those keys, they switch to a secure desktop that they can be confident that the logon box is the *real* Windows logon and not a counterfeit. What you need to look at isn't functions that your application can call, but System Administration options that allow an Administrator to configure a machine for limited use. These exist, but it's more a question for Super User than for Stack Overflow. This should get you started <http://msdn.microsoft.com/en-us/library/aa372139(VS.85).aspx>
wxPython alone cannot be done with that. You need to do Low Level Keyboard Hook with C/C++ or with equivalent ctypes, for Windows keys, alt-tab, alt-f4, but Ctrl-Alt-Del, I don't think so for Windows XP and above.
28,911,296
I want convert nametuple to dict with python: I have: ``` CommentInfo(stt=1, gid=12, uid=222) ``` Now I want: ``` {"stt":1,"gid":12,"uid":222} ``` Please help me! Thanks very much!
2015/03/07
[ "https://Stackoverflow.com/questions/28911296", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3214584/" ]
You need to use `_asdict()` function to convert the named tuples into a dictionary. **Example:** ``` >>> CommentInfo = namedtuple('CommentInfo', ["stt", "gid", "uid"]) >>> x = CommentInfo(stt=1,gid=12,uid=222) >>> x._asdict() OrderedDict([('stt', 1), ('gid', 12), ('uid', 222)]) ```
namedtuples has a `._asdict()` method to convert it to an OrderedDict, so if you have an instance in a variable `comment` you can use `comment._asdict()`
48,842,401
I'm using python 3.6 and selenium 3.8.1, Chrome browser to simulate users entering an order. The app we use has a particularly frustrating implementation for automation - a loading modal will pop up whenever a filter for a product is loading, but it does not truly cover elements underneath it. Additionally, load time fluctuates wildly, but with an upper bound. If I don't use excessive sleep statements, selenium will either start clicking wildly before the correct objects are loaded or clicks on the element but, of course, hits the loading modal. (Fun side note, the loading modal only fills the screen view, so selenium is also able to interact with items below the fold. :P) To get around this: ``` def kill_evil_loading_modal(self): # i pause for a second to make sure the loader has a chance to pop time.sleep(1) # pulling locator type and from another file: ("id","locator_id") loading_modal = ProductsLocators.loading_modal_selector # call a function that returns true/false for object if exists check_for_evil = self.is_element_exist(*loading_modal) while check_for_evil == True: check_for_evil = self.is_element_exist(*loading_modal) ``` This works great! Where I had a ton of evil time.sleep(x) statements to avoid the loading modal, I'm now catching it and waiting until it's gone to move forward. If I only had to deal with that two or three times, I would move on. Sadly, this loading modal hits after *every click* ... so this is what my main script looks like now: ``` new_quote02_obj.edit_quote_job(**data) new_quote03_obj.kill_evil_loading_modal() new_quote03_obj.click_product_dropdown() new_quote03_obj.kill_evil_loading_modal() new_quote03_obj.click_product_dropdown_link() new_quote03_obj.kill_evil_loading_modal() new_quote03_obj.select_category_dropdown(scenario_data['category_name']) new_quote03_obj.kill_evil_loading_modal() new_quote03_obj.select_market_dropdown(scenario_data['local_sales_market']) new_quote03_obj.kill_evil_loading_modal() new_quote03_obj.add_products_job(scenario_data['product_list_original']) new_quote03_obj.kill_evil_loading_modal() new_quote03_obj.click_done_btn() new_quote03_obj.kill_evil_loading_modal() new_quote03_obj.next_btn_page() ``` How can I refactor to stay DRY?
2018/02/17
[ "https://Stackoverflow.com/questions/48842401", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2252481/" ]
If you want to wait until modal disappeared and avoid using `time.sleep()` you can try [ExplicitWait](http://selenium-python.readthedocs.io/waits.html#explicit-waits): ``` from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait as wait wait(driver, 10).until_not(EC.visibility_of_element_located(("id", "locator_id"))) ``` or ``` wait(driver, 10).until(EC.invisibility_of_element_located(("id", "locator_id"))) ``` This should allow you to wait up to 10 seconds (increase delay if needed) until element with specified selector (`"id", "locator_id"`) will become invisible If modal appears after each click you can implement your own click method, like ``` def click_n_wait(by, value, timeout=10): wait(driver, timeout).until(EC.element_to_be_clickable((by, value))).click() wait(driver, timeout).until(EC.invisibility_of_element_located(("id", "locator_id"))) ``` and use it as ``` click_n_wait("id", "button_id") ```
As you mentioned in your question *a loading modal will pop up whenever a filter for a product is loading* irespective of the loader *cover elements underneath it* or not you can simply `wait` for the next intended element with which you want to interact with. Following this approach you can completely get rid of the function `kill_evil_loading_modal()` which looks to me as a overhead. As a replacement to `kill_evil_loading_modal()` function you have to invoke [**WebDriverWait()**](https://seleniumhq.github.io/selenium/docs/api/py/webdriver_support/selenium.webdriver.support.wait.html#module-selenium.webdriver.support.wait) method along with proper [**expected\_conditions**](https://seleniumhq.github.io/selenium/docs/api/py/webdriver_support/selenium.webdriver.support.expected_conditions.html#module-selenium.webdriver.support.expected_conditions) as required as follows : ``` from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC # other code WebDriverWait(driver, 2).until(EC.element_to_be_clickable((By.XPATH, "xpath_of_element_A"))).click() WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, "xpath_of_element_B"))).click() WebDriverWait(driver, 3).until(EC.element_to_be_clickable((By.XPATH, "xpath_of_element_C"))).click() ```
67,499,322
i am trying to control my browser using python, what I need is I give commands in terminal that should work on the browser like opening and searching for something(like scorling the bowser) and closing the browser currently I am done with opening the browser and closing
2021/05/12
[ "https://Stackoverflow.com/questions/67499322", "https://Stackoverflow.com", "https://Stackoverflow.com/users/15904381/" ]
A conflict might occur when both of you are changing the same file in the same branch(or while pulling a different branch to your local branch). In such cases, sometimes git would be able to automatically merge the changes when you try to pull your friend's commit. But if the changes are mostly on the same/nearby lines, automatic merge would fail and you need to merge the changes manually and then commit. You could use `git mergetool` to correct any merge conflict (after defining a mergetool of your choice)
It depends if you are working on the same git branch or not. Even If you are working on the same branch but you modify different files you won't get conflicts. You will only get conflicts if you both change the same file.
48,028,274
this is the code just to find some sort of product :print the product of all the number in this array Modulo 10^9+7 ``` n=int(input()) answer=1 b=10**9 array_1=[] for i in range(n): array_1.append(int(input())) for j in range(n): answer=(answer*array_1[j])% (b+7) print(answer) ``` this my code in python 3 working properly in jupyter notebook, but on python (3.5.2) it is showing error for input 5 and then 1 2 3 4 5 ``` Execution failed. ValueError: invalid literal for int() with base 10 : '1 2 3 4 5' Stack Trace: Traceback (most recent call last): File "/hackerearth/PYTHON3_46/s_ad.py3", line 16, in array_1.append(int(input())) ValueError: invalid literal for int() with base 10: '1 2 3 4 5' ``` please some one help me to solve this error as i m newbie in python
2017/12/29
[ "https://Stackoverflow.com/questions/48028274", "https://Stackoverflow.com", "https://Stackoverflow.com/users/7756559/" ]
> > array\_1.append(int(input())) by using this i m trying get an array of int by > taking value one by one from user input – > > > But it looks like you are entering the numbers one after the other as a single string with each number separated by a space. In that case, you should use split to get the individual numbers: ``` array_of_ints = [int(num) for num in input().split()] array_1 += array_of_ints ``` If you are trying to input the numbers one by one, your code seems correct. You just need to make sure you actually do enter the numbers one by one. i.e. 1 then press Enter, 2 then press Enter, and so on...
I'm not completely sure what you are trying to achieve here, but just based on looking at it, your code would not accept any of the inputs after `1 2` If you are running this from a terminal, there should be a new line between each input, i.e. ``` ./your_program.py 4 4 3 2 1 ```
34,207,898
I am having an issue selecting data from a pandas DataFrame with between\_time. When the start and end dates of the query are between two days the result is empty. I am using pandas 0.17.1 (python 2.7) I have the following data frame: ``` mydf = pd.DataFrame.from_dict({'azi': {Timestamp('2015-05-12 00:00:14.348000'): 109.801, Timestamp('2015-05-12 00:00:36.125000'): 109.994, Timestamp('2015-05-12 00:00:57.599000'): 109.60299999999999, Timestamp('2015-05-12 00:01:14.576000'): 100.2}, 'ele': {Timestamp('2015-05-12 00:00:14.348000'): 180.001, Timestamp('2015-05-12 00:00:36.125000'): 179.999, Timestamp('2015-05-12 00:00:57.599000'): 179.999, Timestamp('2015-05-12 00:01:14.576000'): 180.001}}) ``` Which results in: ``` azi ele 2015-05-12 00:00:14.348 109.801 180.001 2015-05-12 00:00:36.125 109.994 179.999 2015-05-12 00:00:57.599 109.603 179.999 2015-05-12 00:01:14.576 100.200 180.001 ``` The following query **fails**: ``` mydf['azi'].between_time(datetime(2015, 5, 11, 23, 59, 59, 850000), datetime(2015, 5, 12, 0, 1, 59, 850000)) ``` resulting in: ``` Series([], Name: azi, dtype: float64) ``` However the following query **works**: ``` mydf2['azi'].between_time(datetime(2015, 5, 11, 0, 0, 0, 0), datetime(2015, 5, 12, 0, 1, 59, 850000)) ``` with the right answer: ``` 2015-05-12 00:00:14.348 109.801 2015-05-12 00:00:36.125 109.994 2015-05-12 00:00:57.599 109.603 2015-05-12 00:01:14.576 100.200 Name: azi, dtype: float64 ``` **Questions**: 1. I am missing something in the functionality of the function, or is this a real bug? 2. Is there a workaround for this? The background is that I really need to process data in 1 minute chunks which limits are not always coinciding with 00:00:00
2015/12/10
[ "https://Stackoverflow.com/questions/34207898", "https://Stackoverflow.com", "https://Stackoverflow.com/users/5665206/" ]
From the [documentation](http://search.cpan.org/~peco/Email-Send-SMTP-Gmail-0.1.1/lib/Email/Send/SMTP/Gmail.pm): put commas between the email addresses. > > send(-to=>'', [-subject=>'', -cc=>'', -bcc=>'', -replyto=>'', -body=>'', -attachments=>'']) > > It composes and sends the email in one shot > > > to, cc, bcc: **comma separated email addresses** > > attachments: comma separated files with full path > > > ``` $mail->send(-to=>'a@gmail.com,b@gmail.com,c@gmail.com,...' ```
Simple add all recipients as a comma separated list: ``` (-to=>'pqr@gmail.com,rec2@gmail.com' ... ```
66,604,878
I am new to CI/CD and Gitlab. I have a CI/CD script to test, build and deploy and I use 2 branches and 2 EC2. My goal is to have a light and not redundant script to build and deploy my changes in functions of the branch. Currently my script looks like this but after looking the Gitlab doc I saw many conditionals keywords like `rules` but I'm really lost about how I can use conditional format in my script to optimise it. Is there a way to use condition and run some script if there is a merge from a branch or from an other? Thanks in advance! ``` #image: alpine image: "python:3.7" before_script: - python --version stages: - test - build_staging - build_prod - deploy_staging - deploy_prod test: stage: test script: - pip install -r requirements.txt - pytest Flask_server/test_app.py only: refs: - develop build_staging: stage: build_staging image: node before_script: - npm install -g npm - hash -d npm - nodejs -v - npm -v script: - cd client - npm install - npm update - npm run build:staging artifacts: paths: - client/dist/ expire_in: 30 minutes only: refs: - develop build_prod: stage: build_prod image: node before_script: - npm install -g npm - hash -d npm - nodejs -v - npm -v script: - cd client - npm install - npm update - npm run build artifacts: paths: - client/dist/ expire_in: 30 minutes only: refs: - master deploy_staging: stage: deploy_staging image: registry.gitlab.com/gitlab-org/cloud-deploy/aws-base:latest # gitlab image for awc cli commands before_script: - apt-get update # - apt-get -y install python3-pip # - apt-get --assume-yes install awscli - apt-get --assume-yes install -y shellcheck script: - shellcheck .ci/deploy_aws_STAGING.sh - chmod +x .ci/deploy_aws_STAGING.sh - .ci/deploy_aws_STAGING.sh - aws s3 cp client/dist/ s3://......./ --recursive only: refs: - develop deploy_prod: stage: deploy_prod image: registry.gitlab.com/gitlab-org/cloud-deploy/aws-base:latest # gitlab image for awc cli commands before_script: - apt-get update # - apt-get -y install python3-pip # - apt-get --assume-yes install awscli - apt-get --assume-yes install -y shellcheck script: - shellcheck .ci/deploy_aws_PROD.sh - chmod +x .ci/deploy_aws_PROD.sh - .ci/deploy_aws_PROD.sh - aws s3 cp client/dist/ s3://........../ --recursive only: refs: - master ```
2021/03/12
[ "https://Stackoverflow.com/questions/66604878", "https://Stackoverflow.com", "https://Stackoverflow.com/users/15306690/" ]
The first is creating an anonymous subclass of `MyRunnable`. The second is creating an anonymous subclass of `Thread`, which requires that `MyRunnable` is instantiable; and `MyRunnable` wouldn't actually then be used at all, because it's not invoked in the `run()` method you're defining in the `Thread` subclass. There is no reason to subclass `Thread`, and presumably you want some special behavior from your `MyRunnable` base class (although the only thing that would provide special behavior that would actually be run is the constructor). Use the first way.
You can also use a lambda expression to start a thread. ``` Thread myRunnableThread3 = new Thread(()-> { System.out.println(Thread.currentThread().getName()); System.out.println("myRunnableThread3!");},"MyThread"); myRunnableThread3.start(); ``` Prints ``` MyThread myRunnableThread3! ```
20,115,972
I tried to use Ambari to manage the installation and maintenance of the Hadoop cluster. After I started ambari server, I use the web page to set up Hadoop cluster. But at the 3rd step-- confirm hosts, the error shows below And I check the log at /var/log/ambari-server, I found: > > INFO:root:BootStrapping hosts ['qiao'] using /usr/lib/python2.6/site-packages/ambari\_server cluster primary OS: redhat6 with user 'root' sshKey File /var/run/ambari-server/bootstrap/1/sshKey password File null using tmp dir /var/run/ambari-server/bootstrap/1 ambari: master; server\_port: 8080; ambari version: 1.4.1.25 > > > INFO:root:Executing parallel bootstrap > > > ERROR:root:ERROR: Bootstrap of host qiao fails because previous action finished with non-zero exit code (1) > > > INFO:root:Finished parallel bootstrap > > >
2013/11/21
[ "https://Stackoverflow.com/questions/20115972", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2951132/" ]
Do you provide ssh rsa private key or paste it? and from the place you are installing, make sure you can ssh to any hosts without typing any password. If still the same error, try ambari-server reset ambari-server setup
Pls restart ambari-server **ambari-server restart** and then try accessing Ambari It would work.
20,115,972
I tried to use Ambari to manage the installation and maintenance of the Hadoop cluster. After I started ambari server, I use the web page to set up Hadoop cluster. But at the 3rd step-- confirm hosts, the error shows below And I check the log at /var/log/ambari-server, I found: > > INFO:root:BootStrapping hosts ['qiao'] using /usr/lib/python2.6/site-packages/ambari\_server cluster primary OS: redhat6 with user 'root' sshKey File /var/run/ambari-server/bootstrap/1/sshKey password File null using tmp dir /var/run/ambari-server/bootstrap/1 ambari: master; server\_port: 8080; ambari version: 1.4.1.25 > > > INFO:root:Executing parallel bootstrap > > > ERROR:root:ERROR: Bootstrap of host qiao fails because previous action finished with non-zero exit code (1) > > > INFO:root:Finished parallel bootstrap > > >
2013/11/21
[ "https://Stackoverflow.com/questions/20115972", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2951132/" ]
Do you provide ssh rsa private key or paste it? and from the place you are installing, make sure you can ssh to any hosts without typing any password. If still the same error, try ambari-server reset ambari-server setup
Make sure you can ssh to every single host on the list, including all master hosts. To do this, ensure that Ambari host's .ssh/id\_rsa.pub entry is included in every hosts' .ssh/authorized\_keys file. Then ssh from Ambari's host to every single server - and check if it is asking for your password. You can use a tutorial like <http://www.tecmint.com/ssh-passwordless-login-using-ssh-keygen-in-5-easy-steps/> to check if everything has been done properly. You need to do the same on the Ambari host itself, if you added it to hosts list.
67,219,194
So recently I've been doing a project whera as optimisation I want to use numpy arrays instead of python list built-in. It would be a 2d array with fixed length in both axes. I also want to maximasie cashe use so that code is as fast as it can be. However when playing with id(var) function I gor unexpected results: code: ``` a = numpy.ascontiguousarray([1,2,3,4,5,6,7,8,9], dtype=numpy.int32) for var in a: print(hex(id(var))) ``` returned: ``` x1aaba10d8f0 0x1aaba1f33d0 0x1aaba10d8f0 0x1aaba1f33d0 0x1aaba10d8f0 0x1aaba1f33d0 0x1aaba10d8f0 0x1aaba1f33d0 0x1aaba10d8f0 ``` which to me it is super weird cus that would mean 2 variables are located in same memory block (is that even a thing ?). anyway - is it me not understanding it correctlly? As a side question - can the original task of building 2d array be acheaved with less expensive method? Numpy arrays come with many functions I do not need. Only 2 things I need: 1. to be able to reverse it normally done with [::-1] syntax 2. check if one == other efficiently Thank in advance for all the help :-)
2021/04/22
[ "https://Stackoverflow.com/questions/67219194", "https://Stackoverflow.com", "https://Stackoverflow.com/users/13757444/" ]
`id(var)` does not work as you think it is. Indeed, `id(var)` returns a unique ID for the specified object `var`, but `var` is not a cell of `a`. **`var` is a Python object referencing a cell of `a`**. Note that `a` does not contains such objets as it would be too inefficient (and data would not be contiguous as requested). The reason why you see duplicated IDs is that previous `var` object as been recycled.
The kinds of arrays that you really want are unclear, nor is the purpose. But talk of contiguous (or continuous) and caching, suggests that you aren't clear about how Python works. First, Python is object oriented, all the way down. Integers, strings, lists are all objects of some class, with associated methods, and attributes. For builtin classes we have little say about the storage. Let's make a small list: ``` In [89]: alist = [1,2,3,1000,1001,1000,'foobar'] In [90]: alist Out[90]: [1, 2, 3, 1000, 1001, 1000, 'foobar'] ``` A list has a data buffer that stores references (pointers if you will) to objects else where in memory. The `id` may give some idea of where, it shouldn't be understood as a 'pointer' in the `c` language sense. For this list: ``` In [91]: [id(i) for i in alist] Out[91]: [9784896, 9784928, 9784960, 140300786887792, 140300786888080, 140300786887792, 140300786115632] ``` 1,2,3 have small id values because Python has initialized small integers (up to 256) at the start. So all uses will have that unique id. ``` In [92]: id(2) Out[92]: 9784928 ``` Within the list creation `1000` appears to be unique, but not so outside of that context. ``` In [93]: id(1001) Out[93]: 140300786888592 ``` Looks like the string is cached as well - but that's just the interpreter's choice, and we shouldn't count on it. ``` In [94]: id('foobar') Out[94]: 140300786115632 ``` The reverse list is a new list, with its own pointer array. But the references are same: ``` In [95]: rlist = alist[::-1] In [96]: rlist Out[96]: ['foobar', 1000, 1001, 1000, 3, 2, 1] In [97]: rlist[5],id(rlist[5]) Out[97]: (2, 9784928) ``` Indexing actions like `[::-1]` should just depend on the number of items in the list. It doesn't depend on where the value actually point to. Same for other copies. Even appending to the array is relatively time independent (it maintains growth space in the data buffer). Actually working with the objects in the list may be depend on where they are stored in memory, but we have little say about that. A "2d" list is actually a list with list elements; nested lists. The sublists are stored else where in memory, just like strings and numbers. In that sense the nested lists are not contiguous. So what about arrays? ``` In [101]: x = np.arange(12) In [102]: x Out[102]: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) In [104]: x.__array_interface__ Out[104]: {'data': (57148880, False), 'strides': None, # default (8,) 'descr': [('', '<i8')], 'typestr': '<i8', 'shape': (12,), 'version': 3} In [105]: x.nbytes # 12*8 bytes Out[105]: 96 ``` `x` is a `ndarray` object, with attributes like `shape`, `strides` and `dtype`. And a data buffer. In this case is a `c` array 96 bytes long, at "57148880`. We can't use that number, but I find it useful when comparing this` array\_interface`dict across arrays. A`view` in particular will have the same, or related value. ``` In [106]: x.reshape(3,4) Out[106]: array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) In [107]: x.reshape(3,4).__array_interface__['data'] Out[107]: (57148880, False) In [108]: x.reshape(3,4)[1,:].__array_interface__['data'] Out[108]: (57148912, False) # 32 bytes later ``` The array data buffer has actual values, not references. Here with `int` dtype, each 8 bytes is interpreted as a 'int64' value. Your `id` iteration effectively asks for a list, `[x[i] for i in range(n)]`. An element of an array has to be "unboxed", and is a new object, type `np.int64`. While not an array, it does have a lot of properties in common with a 1 element array. ``` In [110]: x[4].__array_interface__ Out[110]: {'data': (57106480, False), ... 'shape': (),....} ``` That `data` value is unrelated to `x`'s. As long as you use `numpy` methods on existing arrays, speeds are good, often 10x better than equivalent list methods. But if you start with a list, it takes time to make an array. And treating the array like list is slow. And the reverse of `x`? ``` In [111]: x[::-1].__array_interface__ Out[111]: {'data': (57148968, False), 'strides': (-8,), 'descr': [('', '<i8')], 'typestr': '<i8', 'shape': (12,), 'version': 3} ``` It's a new array, but with a different `strides` (-8,), and `data` points to the end of the buffer, `880+96-8`.
3,312,436
Running GNU Emacs 22.2.1 on Ubuntu 9.04. When editing python code in emacs, if a docstring contains an apostrophe, emacs highlights all following code as a comment, until another apostrophe is used. Really annoying! In other words, if I have a docstring like this: ``` ''' This docstring has an apostrophe ' ''' ``` Then all following code is highlighted as a comment. Comments are highlighted as code. I can escape the docstring to avoid this, like this: ``` ''' This docstring has an escaped apostrophe \' ''' ``` Then highlighting is fine, but then it looks funny and unnecessary to other devs on my team, and I get made fun of for using emacs since " it can't handle apostrophies". ;) So, anyone know how to make emacs behave better in this regard? Thanks, Josh
2010/07/22
[ "https://Stackoverflow.com/questions/3312436", "https://Stackoverflow.com", "https://Stackoverflow.com/users/316963/" ]
This appears to work correctly in GNU Emacs 23.2.1. If it's not practical to upgrade, you might be able to copy `python.el` out of the Emacs 23 source code, or perhaps just the relevant pieces of it (python-quote-syntax, python-font-lock-syntactic-keywords, and the code that uses the latter, I think - I'm not much of an Elisp hacker). Unfortunately savannah.gnu.org's bzr browser isn't working just now so I can't point you directly at the code, you'll have to download it. See <http://www.gnu.org/software/emacs/>
It may be an emacs bug, but it could also be by purpose. If you insert doctests in your docstrings, as I often do to explain API, I could even wish to have the full python syntax highlighting inside docstrings. But it's probably a bug... (probably emacs syntax highlighter just care of simple and double quotes and ignore triple simple and triple doubles). If so, you should use triple double quotes instead of triple simple quotes as in your example (as far as I know most users use triple double quotes for docstring), and you won't have the problem.
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
You may use `pandas` packages. By defining the data frame : ``` import pandas as pd df = pd.DataFrame([['men', 2, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ['people', 10, '1945-Truman.txt'], ['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['men', 7, '1947-Truman.txt'], ['women', 2, '1947-Truman.txt'], ['people', 12, '1947-Truman.txt'], ['men', 4, '1948-Truman.txt'], ['women', 1, '1948-Truman.txt'], ['people', 22, '1948-Truman.txt'], ['men', 2, '1949-Truman.txt'], ['women', 1, '1949-Truman.txt'], ['people', 15, '1949-Truman.txt'], ['men', 6, '1950-Truman.txt'], ['women', 2, '1950-Truman.txt'], ['people', 15, '1950-Truman.txt'], ['men', 8, '1951-Truman.txt'], ['women', 2, '1951-Truman.txt'], ['people', 9, '1951-Truman.txt'], ['men', 3, '1953-Eisenhower.txt'], ['women', 0, '1953-Eisenhower.txt'], ['people', 17, '1953-Eisenhower.txt']]) ``` Then ``` df.groupby([0], sort=False)[1].max() ``` return ``` 0 men 12 women 7 people 49 Name: 1, dtype: int64 ``` Is that what you want ?
You can use `itertools.groupby`: ``` import itertools new_data = [(a, list(b)) for a, b in itertools.groupby(sorted(data, key=lambda x:x[0]), key=lambda x:x[0])] new_final_data = [max(b, key=lambda x:x[1]) for a, b in new_data] ``` Output: ``` [['men', 12, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']] ``` Or, a dictionary with each key the type of individual: ``` new_final_data = {a:max(b, key=lambda x:x[1]) for a, b in new_data} ``` Output: ``` {'women': ['women', 7, '1946-Truman.txt'], 'men': ['men', 12, '1946-Truman.txt'], 'people': ['people', 49, '1946-Truman.txt']} ```
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
`pandas` is a good one, but you can just use `max` and `lambda`: ``` men = max(data, key=lambda x: x[1] if x[0] == 'men' else 0) women = max(data, key=lambda x: x[1] if x[0] == 'women' else 0) people = max(data, key=lambda x: x[1] if x[0] == 'people' else 0) ```
You can use `itertools.groupby`: ``` import itertools new_data = [(a, list(b)) for a, b in itertools.groupby(sorted(data, key=lambda x:x[0]), key=lambda x:x[0])] new_final_data = [max(b, key=lambda x:x[1]) for a, b in new_data] ``` Output: ``` [['men', 12, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']] ``` Or, a dictionary with each key the type of individual: ``` new_final_data = {a:max(b, key=lambda x:x[1]) for a, b in new_data} ``` Output: ``` {'women': ['women', 7, '1946-Truman.txt'], 'men': ['men', 12, '1946-Truman.txt'], 'people': ['people', 49, '1946-Truman.txt']} ```
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
`pandas` is a good one, but you can just use `max` and `lambda`: ``` men = max(data, key=lambda x: x[1] if x[0] == 'men' else 0) women = max(data, key=lambda x: x[1] if x[0] == 'women' else 0) people = max(data, key=lambda x: x[1] if x[0] == 'people' else 0) ```
You can use `pandas`, I suppose **data** is a list of list: ``` import pandas as pd df = pd.DataFrame(data) df.loc[df.groupby([0])[1].idxmax()] 0 1 2 3 men 12 1946-Truman.txt 5 people 49 1946-Truman.txt 4 women 7 1946-Truman.txt ``` For a result in the same format: ``` df.loc[df.groupby([0])[1].idxmax()].values.tolist() [['men', 12, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']] ```
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
`pandas` is a good one, but you can just use `max` and `lambda`: ``` men = max(data, key=lambda x: x[1] if x[0] == 'men' else 0) women = max(data, key=lambda x: x[1] if x[0] == 'women' else 0) people = max(data, key=lambda x: x[1] if x[0] == 'people' else 0) ```
If you are using a list of lists such as: ``` lst=[['men', 2123, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ['people', 10, '1945-Truman.txt'], ['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['men', 7, '1947-Truman.txt'], ['women', 2, '1947-Truman.txt']] ``` Then you can use the following code. ``` max_men=0 max_women=0 max_people =0 for item in lst: if((item[0]=="men") and (item[1]>max_men)): max_men=item[1] elif((item[0]=="women") and (item[1]>max_women)): max_women=item[1] elif((item[0]=="people") and (item[1]>max_people)): max_people=item[1] print max_men print max_women print max_people ``` This will go into each list within the bit list called `lst` and find the max value for men, women, and people.
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
You may use `pandas` packages. By defining the data frame : ``` import pandas as pd df = pd.DataFrame([['men', 2, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ['people', 10, '1945-Truman.txt'], ['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['men', 7, '1947-Truman.txt'], ['women', 2, '1947-Truman.txt'], ['people', 12, '1947-Truman.txt'], ['men', 4, '1948-Truman.txt'], ['women', 1, '1948-Truman.txt'], ['people', 22, '1948-Truman.txt'], ['men', 2, '1949-Truman.txt'], ['women', 1, '1949-Truman.txt'], ['people', 15, '1949-Truman.txt'], ['men', 6, '1950-Truman.txt'], ['women', 2, '1950-Truman.txt'], ['people', 15, '1950-Truman.txt'], ['men', 8, '1951-Truman.txt'], ['women', 2, '1951-Truman.txt'], ['people', 9, '1951-Truman.txt'], ['men', 3, '1953-Eisenhower.txt'], ['women', 0, '1953-Eisenhower.txt'], ['people', 17, '1953-Eisenhower.txt']]) ``` Then ``` df.groupby([0], sort=False)[1].max() ``` return ``` 0 men 12 women 7 people 49 Name: 1, dtype: int64 ``` Is that what you want ?
You could create a set out of the first column and find the maximum value afterwards: ``` data = [ ['men', 2, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ... ] keys = set([col[0] for col in data]) for k in keys: print (k, max([col[1] for col in data if col[0] == k])) ``` Returns: ``` women 7 people 49 men 12 ```
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
`pandas` is a good one, but you can just use `max` and `lambda`: ``` men = max(data, key=lambda x: x[1] if x[0] == 'men' else 0) women = max(data, key=lambda x: x[1] if x[0] == 'women' else 0) people = max(data, key=lambda x: x[1] if x[0] == 'people' else 0) ```
You may use `pandas` packages. By defining the data frame : ``` import pandas as pd df = pd.DataFrame([['men', 2, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ['people', 10, '1945-Truman.txt'], ['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['men', 7, '1947-Truman.txt'], ['women', 2, '1947-Truman.txt'], ['people', 12, '1947-Truman.txt'], ['men', 4, '1948-Truman.txt'], ['women', 1, '1948-Truman.txt'], ['people', 22, '1948-Truman.txt'], ['men', 2, '1949-Truman.txt'], ['women', 1, '1949-Truman.txt'], ['people', 15, '1949-Truman.txt'], ['men', 6, '1950-Truman.txt'], ['women', 2, '1950-Truman.txt'], ['people', 15, '1950-Truman.txt'], ['men', 8, '1951-Truman.txt'], ['women', 2, '1951-Truman.txt'], ['people', 9, '1951-Truman.txt'], ['men', 3, '1953-Eisenhower.txt'], ['women', 0, '1953-Eisenhower.txt'], ['people', 17, '1953-Eisenhower.txt']]) ``` Then ``` df.groupby([0], sort=False)[1].max() ``` return ``` 0 men 12 women 7 people 49 Name: 1, dtype: int64 ``` Is that what you want ?
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
`pandas` is a good one, but you can just use `max` and `lambda`: ``` men = max(data, key=lambda x: x[1] if x[0] == 'men' else 0) women = max(data, key=lambda x: x[1] if x[0] == 'women' else 0) people = max(data, key=lambda x: x[1] if x[0] == 'people' else 0) ```
You could create a set out of the first column and find the maximum value afterwards: ``` data = [ ['men', 2, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ... ] keys = set([col[0] for col in data]) for k in keys: print (k, max([col[1] for col in data if col[0] == k])) ``` Returns: ``` women 7 people 49 men 12 ```
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
`pandas` is a good one, but you can just use `max` and `lambda`: ``` men = max(data, key=lambda x: x[1] if x[0] == 'men' else 0) women = max(data, key=lambda x: x[1] if x[0] == 'women' else 0) people = max(data, key=lambda x: x[1] if x[0] == 'people' else 0) ```
``` men = [t for t in yourlist if t[0] == 'men'] women = [t for t in yourlist if t[0] == 'women'] people = [t for t in yourlist if t[0] == 'people'] sorted(men, key=operator.itemgetter(1), reverse=True)[0][1] sorted(women, key=operator.itemgetter(1), reverse=True)[0][1] sorted(people, key=operator.itemgetter(1), reverse=True)[0][1] ```
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
You may use `pandas` packages. By defining the data frame : ``` import pandas as pd df = pd.DataFrame([['men', 2, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ['people', 10, '1945-Truman.txt'], ['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['men', 7, '1947-Truman.txt'], ['women', 2, '1947-Truman.txt'], ['people', 12, '1947-Truman.txt'], ['men', 4, '1948-Truman.txt'], ['women', 1, '1948-Truman.txt'], ['people', 22, '1948-Truman.txt'], ['men', 2, '1949-Truman.txt'], ['women', 1, '1949-Truman.txt'], ['people', 15, '1949-Truman.txt'], ['men', 6, '1950-Truman.txt'], ['women', 2, '1950-Truman.txt'], ['people', 15, '1950-Truman.txt'], ['men', 8, '1951-Truman.txt'], ['women', 2, '1951-Truman.txt'], ['people', 9, '1951-Truman.txt'], ['men', 3, '1953-Eisenhower.txt'], ['women', 0, '1953-Eisenhower.txt'], ['people', 17, '1953-Eisenhower.txt']]) ``` Then ``` df.groupby([0], sort=False)[1].max() ``` return ``` 0 men 12 women 7 people 49 Name: 1, dtype: int64 ``` Is that what you want ?
If you are using a list of lists such as: ``` lst=[['men', 2123, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ['people', 10, '1945-Truman.txt'], ['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['men', 7, '1947-Truman.txt'], ['women', 2, '1947-Truman.txt']] ``` Then you can use the following code. ``` max_men=0 max_women=0 max_people =0 for item in lst: if((item[0]=="men") and (item[1]>max_men)): max_men=item[1] elif((item[0]=="women") and (item[1]>max_women)): max_women=item[1] elif((item[0]=="people") and (item[1]>max_people)): max_people=item[1] print max_men print max_women print max_people ``` This will go into each list within the bit list called `lst` and find the max value for men, women, and people.
48,080,359
I am new to python. I want to find the max value from col2 with respect to the values 'men', 'women' and 'people' in col1 of the list. Like, `['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt']`and`['people', 49, '1946-Truman.txt']` contain max values of col2 for men, women and people. One possible solution is to convert this list of tuples to three separate arrays for men, women and people and then finding max value from all of the arrays. But, I want a better solution. **Data:** ``` [['men', 2, '1945-Truman.txt'] ['women', 2, '1945-Truman.txt'] ['people', 10, '1945-Truman.txt'] ['men', 12, '1946-Truman.txt'] ['women', 7, '1946-Truman.txt'] ['people', 49, '1946-Truman.txt'] ['men', 7, '1947-Truman.txt'] ['women', 2, '1947-Truman.txt'] ['people', 12, '1947-Truman.txt'] ['men', 4, '1948-Truman.txt'] ['women', 1, '1948-Truman.txt'] ['people', 22, '1948-Truman.txt'] ['men', 2, '1949-Truman.txt'] ['women', 1, '1949-Truman.txt'] ['people', 15, '1949-Truman.txt'] ['men', 6, '1950-Truman.txt'] ['women', 2, '1950-Truman.txt'] ['people', 15, '1950-Truman.txt'] ['men', 8, '1951-Truman.txt'] ['women', 2, '1951-Truman.txt'] ['people', 9, '1951-Truman.txt'] ['men', 3, '1953-Eisenhower.txt'] ['women', 0, '1953-Eisenhower.txt'] ['people', 17, '1953-Eisenhower.txt']] ``` Thanks in advance.
2018/01/03
[ "https://Stackoverflow.com/questions/48080359", "https://Stackoverflow.com", "https://Stackoverflow.com/users/6181928/" ]
You may use `pandas` packages. By defining the data frame : ``` import pandas as pd df = pd.DataFrame([['men', 2, '1945-Truman.txt'], ['women', 2, '1945-Truman.txt'], ['people', 10, '1945-Truman.txt'], ['men', 12, '1946-Truman.txt'], ['women', 7, '1946-Truman.txt'], ['people', 49, '1946-Truman.txt'], ['men', 7, '1947-Truman.txt'], ['women', 2, '1947-Truman.txt'], ['people', 12, '1947-Truman.txt'], ['men', 4, '1948-Truman.txt'], ['women', 1, '1948-Truman.txt'], ['people', 22, '1948-Truman.txt'], ['men', 2, '1949-Truman.txt'], ['women', 1, '1949-Truman.txt'], ['people', 15, '1949-Truman.txt'], ['men', 6, '1950-Truman.txt'], ['women', 2, '1950-Truman.txt'], ['people', 15, '1950-Truman.txt'], ['men', 8, '1951-Truman.txt'], ['women', 2, '1951-Truman.txt'], ['people', 9, '1951-Truman.txt'], ['men', 3, '1953-Eisenhower.txt'], ['women', 0, '1953-Eisenhower.txt'], ['people', 17, '1953-Eisenhower.txt']]) ``` Then ``` df.groupby([0], sort=False)[1].max() ``` return ``` 0 men 12 women 7 people 49 Name: 1, dtype: int64 ``` Is that what you want ?
``` men = [t for t in yourlist if t[0] == 'men'] women = [t for t in yourlist if t[0] == 'women'] people = [t for t in yourlist if t[0] == 'people'] sorted(men, key=operator.itemgetter(1), reverse=True)[0][1] sorted(women, key=operator.itemgetter(1), reverse=True)[0][1] sorted(people, key=operator.itemgetter(1), reverse=True)[0][1] ```
42,776,454
I have multiple series of "start" and "stop" times in a set of data, and would like to see if a particular set of dates/times does or does not fall between a given set of "start/stop" times. I'm using pandas in python, and I've tried having the data as dataframes or as timeseries- haven't gotten either to work. I've been using this bit of code: ``` print (start1 < test1[0:LenS] < stop1).any() ``` (note that the "test1[0:LenS]" is just to make sure test1 is the same length as start1 and stop1) and I get this error: ``` ValueError: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all(). ``` Since I'm trying to use .any(), I'm pretty confused. Thanks for your help!
2017/03/14
[ "https://Stackoverflow.com/questions/42776454", "https://Stackoverflow.com", "https://Stackoverflow.com/users/7697187/" ]
Put `saveFileDialog1.ShowDialog();` inside some button event handler which lets the user save the document. Double-click on the `SaveFileDialog` icon in your Visual Studio designer window as well to add the FileOk event handler and within event handler, put your code like this: ``` private void saveFileDialog1_FileOk(object sender, CancelEventArgs e) { var doc = DocX.Create(saveFileDialog1.FileName); doc.InsertParagraph("This is my first paragraph"); doc.Save(); } ``` Hope it helps!
To do this: ``` private void btn_approve_Click(object sender, EventArgs e) { saveFileDialog1.Title = "Save As"; saveFileDialog1.Filter = "DocX|*.docx"; if (saveFileDialog1.ShowDialog() == DialogResult.OK) { var doc = DocX.Create(saveFileDialog1.FileName); doc.InsertParagraph("This is my first paragraph"); doc.Save(); } } ```
41,467,654
I have a python script on a machine. I could run it from both **ssh connection** and the **console** of the machine. Because the script changes some IP config stuff, I want to disconnect the ssh before doing the IP changing - that way the ssh won't hang and will be closed properly before the IP changes. **So** - is there a way in python to check if the script is ran from ssh? and if so to close the ssh?
2017/01/04
[ "https://Stackoverflow.com/questions/41467654", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1662033/" ]
You can add header info as a dict on 4 arguments. As far as know is not possible embed in the BODY. ``` import http.client BODY = "***filecontents***" conn = http.client.HTTPConnection("127.0.0.1", 5000) conn.connect() conn.request("PUT", "/file", BODY, {"someheadername":"someheadervalues", "someotherheadername":"someotherheadervalues"}) ```
The command: ``` conn.request("PUT", "/file", BODY) ``` Is overloaded as below as well, so its pretty straight forward :) ``` conn.request("PUT", "url", payload, headers) ```
57,717,100
By "comparable", I mean "able to mutually perform the comparison operations `>`, `<`, `>=`, `<=`, `==`, and `!=` without raising a `TypeError`". There are a number of different classes for which this property does hold: ```py 1 < 2.5 # int and float 2 < decimal.Decimal(4) # int and Decimal "alice" < "bob" # str and str (1, 2) < (3, 4) # tuple and tuple ``` and for which it doesn't: ```py 1 < "2" # int and str 1.5 < "2.5" # float and str ``` even when it seems like it really ought to: ```py datetime.date(2018, 9, 25) < datetime.datetime(2019, 1, 31) # date and datetime [1, 2] < (3, 4) # list and tuple ``` [As demonstrated in this similar question](https://stackoverflow.com/questions/29457135/in-python-how-to-know-whether-objects-can-be-compared), you can obviously check this for two unknown-typed objects `a` and `b` by using the traditional python approach of "ask forgiveness, not permission" and using a `try`/`except` block: ```py try: a < b # do something except TypeError: # do something else ``` but [catching exceptions is expensive](https://stackoverflow.com/questions/2522005/cost-of-exception-handlers-in-python), and I expect the second branch to be taken sufficiently frequently for that to matter, so I'd like to catch this in an `if`/`else` statement instead. How would I do that?
2019/08/29
[ "https://Stackoverflow.com/questions/57717100", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2648811/" ]
Since it is impossible to know beforehand whether a comparison operation can be performed on two specific types of operands until you actually perform such an operation, the closest thing you can do to achieving the desired behavior of avoiding having to catch a `TypeError` is to cache the known combinations of the operator and the types of the left and right operands that have already caused a `TypeError` before. You can do this by creating a class with such a cache and wrapper methods that do such a validation before proceeding with the comparisons: ``` from operator import gt, lt, ge, le def validate_operation(op): def wrapper(cls, a, b): # the signature can also be just (type(a), type(b)) if you don't care about op signature = op, type(a), type(b) if signature not in cls.incomparables: try: return op(a, b) except TypeError: cls.incomparables.add(signature) else: print('Exception avoided for {}'.format(signature)) # for debug only return wrapper class compare: incomparables = set() for op in gt, lt, ge, le: setattr(compare, op.__name__, classmethod(validate_operation(op))) ``` so that: ``` import datetime print(compare.gt(1, 2.0)) print(compare.gt(1, "a")) print(compare.gt(2, 'b')) print(compare.lt(datetime.date(2018, 9, 25), datetime.datetime(2019, 1, 31))) print(compare.lt(datetime.date(2019, 9, 25), datetime.datetime(2020, 1, 31))) ``` would output: ``` False None Exception avoided for (<built-in function gt>, <class 'int'>, <class 'str'>) None None Exception avoided for (<built-in function lt>, <class 'datetime.date'>, <class 'datetime.datetime'>) None ``` and so that you can use an `if` statement instead of an exception handler to validate a comparison: ``` result = compare.gt(obj1, obj2) if result is None: # handle the fact that we cannot perform the > operation on obj1 and obj2 elsif result: # obj1 is greater than obj2 else: # obj1 is not greater than obj2 ``` And here are some timing statistics: ``` from timeit import timeit print(timeit('''try: 1 > 1 except TypeError: pass''', globals=globals())) print(timeit('''try: 1 > "a" except TypeError: pass''', globals=globals())) print(timeit('compare.gt(1, "a")', globals=globals())) ``` This outputs, on my machine: ``` 0.047088712933431365 0.7171912713398885 0.46406612257995117 ``` As you can see, the cached comparison validation does save you around 1/3 of time when the comparison throws an exception, but is around 10 times slower when it doesn't, so this caching mechanism makes sense only if you anticipate that the vast majority of your comparisons are going to throw an exception.
What you could do is use `isinstance` before the comparison, and deal with the exceptions yourself. ``` if(isinstance(date_1,datetime) != isinstance(date_2,datetime)): #deal with the exception ```
53,581,563
Currently, I'm trying to make a game and in the game I would like it so if the character is on top of an object, it picks it up. This is what I have so far: ``` import turtle import time default = turtle.clone() scar = turtle.clone() def pickupScar(): if default.distance(-7,48) > 5.0: default.changeshape('defaultscar.gif') wn = turtle.Screen() wn.setup(500,500) wn.bgpic('TrumpTowers.gif') wn.register_shape('default.gif') wn.register_shape('scar.gif') wn.register_shape('defaultscar.gif') turtle.hideturtle() default.shape('default.gif') scar.shape('scar.gif') default.pu() default.left(90) default.bk(35) scar.pu() scar.left(90) scar.fd(45) scar.speed(-1) default.ondrag(default.goto) ``` Does anybody know how I would go with making the def pickupScar as I'm new to python & turtle. If you recognize what my game is about please don't judge me, it's for a school project and I couldn't think of any game ideas.
2018/12/02
[ "https://Stackoverflow.com/questions/53581563", "https://Stackoverflow.com", "https://Stackoverflow.com/users/10735185/" ]
Since I don't have your images, nor recognize what your game is about, below is an example of the functionality you describe. On the screen is a black circle and pink square. You can drag the circle and if you drag it onto the square, it will sprout a head and legs becoming a turtle. Dragging off the square, it reverts to being a circle: ``` from turtle import Screen, Turtle def drag(x, y): default.ondrag(None) # disable handler inside handler default.goto(x, y) if default.distance(scar) < 40: default.shape('turtle') elif default.shape() == 'turtle': default.shape('circle') default.ondrag(drag) wn = Screen() wn.setup(500, 500) scar = Turtle('square', visible=False) scar.shapesize(4) scar.color('pink') scar.penup() scar.left(90) scar.forward(50) scar.showturtle() default = Turtle('circle', visible=False) default.shapesize(2) default.speed('fastest') default.penup() default.left(90) default.backward(50) default.showturtle() default.ondrag(drag) wn.mainloop() ```
I dont know the `turtle-graphics`, but in real world to determine the distance between two points (for 2D surfaces) we use **Pythagorean theorem**. If some object is at `(x1, y1)` and another at `(x2, y2)`, the distance is ``` dist=sqrt((x1-x2)^2 + (y1-y2)^2) ``` So, if `dist <= R`, turtle (or whatever) is `in R radius from desired point`
29,871,209
I have compressed a file using python-snappy and put it in my hdfs store. I am now trying to read it in like so but I get the following traceback. I can't find an example of how to read the file in so I can process it. I can read the text file (uncompressed) version fine. Should I be using sc.sequenceFile ? Thanks! ``` I first compressed the file and pushed it to hdfs python-snappy -m snappy -c gene_regions.vcf gene_regions.vcf.snappy hdfs dfs -put gene_regions.vcf.snappy / I then added the following to spark-env.sh export SPARK_EXECUTOR_MEMORY=16G export HADOOP_HOME=/usr/local/hadoop export JAVA_LIBRARY_PATH=$JAVA_LIBRARY_PATH:$HADOOP_HOME/lib/native export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_LIBRARY_PATH=$SPARK_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_CLASSPATH=$SPARK_CLASSPATH:$HADOOP_HOME/lib/lib/snappy-java-1.1.1.8-SNAPSHOT.jar I then launch my spark master and slave and finally my ipython notebook where I am executing the code below. a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` --- ValueError Traceback (most recent call last) in () ----> 1 a\_file.first() /home/user/Software/spark-1.3.0-bin-hadoop2.4/python/pyspark/rdd.pyc in first(self) 1244 if rs: 1245 return rs[0] -> 1246 raise ValueError("RDD is empty") 1247 1248 def isEmpty(self): ValueError: RDD is empty ``` Working code (uncompressed) text file a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf") a_file.first() ``` output: u'##fileformat=VCFv4.1'
2015/04/25
[ "https://Stackoverflow.com/questions/29871209", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4833015/" ]
The issue here is that python-snappy is not compatible with Hadoop's snappy codec, which is what Spark will use to read the data when it sees a ".snappy" suffix. They are based on the same underlying algorithm but they aren't compatible in that you can compress with one and decompress with another. You can make this work either by writing your data out in the first place to snappy using Spark or Hadoop. Or by having Spark read your data as binary blobs and then you manually invoke the python-snappy decompression yourself (see binaryFiles here <http://spark.apache.org/docs/latest/api/python/pyspark.html>). The binary blob approach is a bit more brittle because it needs to fit the entire file in memory for each input file. But if your data is small enough that will work.
Alright I found a solution! Build this... <https://github.com/liancheng/snappy-utils> On ubuntu 14.10 I had to install gcc-4.4 to get it to build commented on my error I was seeing here <https://code.google.com/p/hadoop-snappy/issues/detail?id=9> I can now compress the text files using snappy at the command line like so ``` snappy -c gene_regions.vcf -o gene_regions.vcf.snappy ``` dump it into hdfs ``` hdfs dfs -put gene_regions.vcf.snappy ``` and then load it in pyspark! ``` a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` Voila! The header of the vcf... ``` u'##fileformat=VCFv4.1' ```
29,871,209
I have compressed a file using python-snappy and put it in my hdfs store. I am now trying to read it in like so but I get the following traceback. I can't find an example of how to read the file in so I can process it. I can read the text file (uncompressed) version fine. Should I be using sc.sequenceFile ? Thanks! ``` I first compressed the file and pushed it to hdfs python-snappy -m snappy -c gene_regions.vcf gene_regions.vcf.snappy hdfs dfs -put gene_regions.vcf.snappy / I then added the following to spark-env.sh export SPARK_EXECUTOR_MEMORY=16G export HADOOP_HOME=/usr/local/hadoop export JAVA_LIBRARY_PATH=$JAVA_LIBRARY_PATH:$HADOOP_HOME/lib/native export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_LIBRARY_PATH=$SPARK_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_CLASSPATH=$SPARK_CLASSPATH:$HADOOP_HOME/lib/lib/snappy-java-1.1.1.8-SNAPSHOT.jar I then launch my spark master and slave and finally my ipython notebook where I am executing the code below. a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` --- ValueError Traceback (most recent call last) in () ----> 1 a\_file.first() /home/user/Software/spark-1.3.0-bin-hadoop2.4/python/pyspark/rdd.pyc in first(self) 1244 if rs: 1245 return rs[0] -> 1246 raise ValueError("RDD is empty") 1247 1248 def isEmpty(self): ValueError: RDD is empty ``` Working code (uncompressed) text file a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf") a_file.first() ``` output: u'##fileformat=VCFv4.1'
2015/04/25
[ "https://Stackoverflow.com/questions/29871209", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4833015/" ]
The issue here is that python-snappy is not compatible with Hadoop's snappy codec, which is what Spark will use to read the data when it sees a ".snappy" suffix. They are based on the same underlying algorithm but they aren't compatible in that you can compress with one and decompress with another. You can make this work either by writing your data out in the first place to snappy using Spark or Hadoop. Or by having Spark read your data as binary blobs and then you manually invoke the python-snappy decompression yourself (see binaryFiles here <http://spark.apache.org/docs/latest/api/python/pyspark.html>). The binary blob approach is a bit more brittle because it needs to fit the entire file in memory for each input file. But if your data is small enough that will work.
Not sure exactly which `snappy` codec my files have, but `spark.read.text` worked without incident for me.
29,871,209
I have compressed a file using python-snappy and put it in my hdfs store. I am now trying to read it in like so but I get the following traceback. I can't find an example of how to read the file in so I can process it. I can read the text file (uncompressed) version fine. Should I be using sc.sequenceFile ? Thanks! ``` I first compressed the file and pushed it to hdfs python-snappy -m snappy -c gene_regions.vcf gene_regions.vcf.snappy hdfs dfs -put gene_regions.vcf.snappy / I then added the following to spark-env.sh export SPARK_EXECUTOR_MEMORY=16G export HADOOP_HOME=/usr/local/hadoop export JAVA_LIBRARY_PATH=$JAVA_LIBRARY_PATH:$HADOOP_HOME/lib/native export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_LIBRARY_PATH=$SPARK_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_CLASSPATH=$SPARK_CLASSPATH:$HADOOP_HOME/lib/lib/snappy-java-1.1.1.8-SNAPSHOT.jar I then launch my spark master and slave and finally my ipython notebook where I am executing the code below. a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` --- ValueError Traceback (most recent call last) in () ----> 1 a\_file.first() /home/user/Software/spark-1.3.0-bin-hadoop2.4/python/pyspark/rdd.pyc in first(self) 1244 if rs: 1245 return rs[0] -> 1246 raise ValueError("RDD is empty") 1247 1248 def isEmpty(self): ValueError: RDD is empty ``` Working code (uncompressed) text file a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf") a_file.first() ``` output: u'##fileformat=VCFv4.1'
2015/04/25
[ "https://Stackoverflow.com/questions/29871209", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4833015/" ]
The issue here is that python-snappy is not compatible with Hadoop's snappy codec, which is what Spark will use to read the data when it sees a ".snappy" suffix. They are based on the same underlying algorithm but they aren't compatible in that you can compress with one and decompress with another. You can make this work either by writing your data out in the first place to snappy using Spark or Hadoop. Or by having Spark read your data as binary blobs and then you manually invoke the python-snappy decompression yourself (see binaryFiles here <http://spark.apache.org/docs/latest/api/python/pyspark.html>). The binary blob approach is a bit more brittle because it needs to fit the entire file in memory for each input file. But if your data is small enough that will work.
The accepted answer is now outdated. You can use python-snappy to compress hadoop-snappy, but the documentation is virtually absent. Example: ``` import snappy with open('test.json.snappy', 'wb') as out_file: data=json.dumps({'test':'somevalue','test2':'somevalue2'}).encode('utf-8') compressor = snappy.hadoop_snappy.StreamCompressor() compressed = compressor.compress(data) out_file.write(compressed) ``` You can also use the command line, where the option is a bit more straight forward, using the -t hadoop\_snappy flag. Example: echo "{'test':'somevalue','test2':'somevalue2'}" | python -m snappy -t hadoop\_snappy -c - test.json.snappy
29,871,209
I have compressed a file using python-snappy and put it in my hdfs store. I am now trying to read it in like so but I get the following traceback. I can't find an example of how to read the file in so I can process it. I can read the text file (uncompressed) version fine. Should I be using sc.sequenceFile ? Thanks! ``` I first compressed the file and pushed it to hdfs python-snappy -m snappy -c gene_regions.vcf gene_regions.vcf.snappy hdfs dfs -put gene_regions.vcf.snappy / I then added the following to spark-env.sh export SPARK_EXECUTOR_MEMORY=16G export HADOOP_HOME=/usr/local/hadoop export JAVA_LIBRARY_PATH=$JAVA_LIBRARY_PATH:$HADOOP_HOME/lib/native export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_LIBRARY_PATH=$SPARK_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_CLASSPATH=$SPARK_CLASSPATH:$HADOOP_HOME/lib/lib/snappy-java-1.1.1.8-SNAPSHOT.jar I then launch my spark master and slave and finally my ipython notebook where I am executing the code below. a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` --- ValueError Traceback (most recent call last) in () ----> 1 a\_file.first() /home/user/Software/spark-1.3.0-bin-hadoop2.4/python/pyspark/rdd.pyc in first(self) 1244 if rs: 1245 return rs[0] -> 1246 raise ValueError("RDD is empty") 1247 1248 def isEmpty(self): ValueError: RDD is empty ``` Working code (uncompressed) text file a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf") a_file.first() ``` output: u'##fileformat=VCFv4.1'
2015/04/25
[ "https://Stackoverflow.com/questions/29871209", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4833015/" ]
Not sure exactly which `snappy` codec my files have, but `spark.read.text` worked without incident for me.
Alright I found a solution! Build this... <https://github.com/liancheng/snappy-utils> On ubuntu 14.10 I had to install gcc-4.4 to get it to build commented on my error I was seeing here <https://code.google.com/p/hadoop-snappy/issues/detail?id=9> I can now compress the text files using snappy at the command line like so ``` snappy -c gene_regions.vcf -o gene_regions.vcf.snappy ``` dump it into hdfs ``` hdfs dfs -put gene_regions.vcf.snappy ``` and then load it in pyspark! ``` a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` Voila! The header of the vcf... ``` u'##fileformat=VCFv4.1' ```
29,871,209
I have compressed a file using python-snappy and put it in my hdfs store. I am now trying to read it in like so but I get the following traceback. I can't find an example of how to read the file in so I can process it. I can read the text file (uncompressed) version fine. Should I be using sc.sequenceFile ? Thanks! ``` I first compressed the file and pushed it to hdfs python-snappy -m snappy -c gene_regions.vcf gene_regions.vcf.snappy hdfs dfs -put gene_regions.vcf.snappy / I then added the following to spark-env.sh export SPARK_EXECUTOR_MEMORY=16G export HADOOP_HOME=/usr/local/hadoop export JAVA_LIBRARY_PATH=$JAVA_LIBRARY_PATH:$HADOOP_HOME/lib/native export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_LIBRARY_PATH=$SPARK_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_CLASSPATH=$SPARK_CLASSPATH:$HADOOP_HOME/lib/lib/snappy-java-1.1.1.8-SNAPSHOT.jar I then launch my spark master and slave and finally my ipython notebook where I am executing the code below. a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` --- ValueError Traceback (most recent call last) in () ----> 1 a\_file.first() /home/user/Software/spark-1.3.0-bin-hadoop2.4/python/pyspark/rdd.pyc in first(self) 1244 if rs: 1245 return rs[0] -> 1246 raise ValueError("RDD is empty") 1247 1248 def isEmpty(self): ValueError: RDD is empty ``` Working code (uncompressed) text file a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf") a_file.first() ``` output: u'##fileformat=VCFv4.1'
2015/04/25
[ "https://Stackoverflow.com/questions/29871209", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4833015/" ]
The accepted answer is now outdated. You can use python-snappy to compress hadoop-snappy, but the documentation is virtually absent. Example: ``` import snappy with open('test.json.snappy', 'wb') as out_file: data=json.dumps({'test':'somevalue','test2':'somevalue2'}).encode('utf-8') compressor = snappy.hadoop_snappy.StreamCompressor() compressed = compressor.compress(data) out_file.write(compressed) ``` You can also use the command line, where the option is a bit more straight forward, using the -t hadoop\_snappy flag. Example: echo "{'test':'somevalue','test2':'somevalue2'}" | python -m snappy -t hadoop\_snappy -c - test.json.snappy
Alright I found a solution! Build this... <https://github.com/liancheng/snappy-utils> On ubuntu 14.10 I had to install gcc-4.4 to get it to build commented on my error I was seeing here <https://code.google.com/p/hadoop-snappy/issues/detail?id=9> I can now compress the text files using snappy at the command line like so ``` snappy -c gene_regions.vcf -o gene_regions.vcf.snappy ``` dump it into hdfs ``` hdfs dfs -put gene_regions.vcf.snappy ``` and then load it in pyspark! ``` a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` Voila! The header of the vcf... ``` u'##fileformat=VCFv4.1' ```
29,871,209
I have compressed a file using python-snappy and put it in my hdfs store. I am now trying to read it in like so but I get the following traceback. I can't find an example of how to read the file in so I can process it. I can read the text file (uncompressed) version fine. Should I be using sc.sequenceFile ? Thanks! ``` I first compressed the file and pushed it to hdfs python-snappy -m snappy -c gene_regions.vcf gene_regions.vcf.snappy hdfs dfs -put gene_regions.vcf.snappy / I then added the following to spark-env.sh export SPARK_EXECUTOR_MEMORY=16G export HADOOP_HOME=/usr/local/hadoop export JAVA_LIBRARY_PATH=$JAVA_LIBRARY_PATH:$HADOOP_HOME/lib/native export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_LIBRARY_PATH=$SPARK_LIBRARY_PATH:$HADOOP_HOME/lib/native export SPARK_CLASSPATH=$SPARK_CLASSPATH:$HADOOP_HOME/lib/lib/snappy-java-1.1.1.8-SNAPSHOT.jar I then launch my spark master and slave and finally my ipython notebook where I am executing the code below. a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf.snappy") a_file.first() ``` --- ValueError Traceback (most recent call last) in () ----> 1 a\_file.first() /home/user/Software/spark-1.3.0-bin-hadoop2.4/python/pyspark/rdd.pyc in first(self) 1244 if rs: 1245 return rs[0] -> 1246 raise ValueError("RDD is empty") 1247 1248 def isEmpty(self): ValueError: RDD is empty ``` Working code (uncompressed) text file a_file = sc.textFile("hdfs://master:54310/gene_regions.vcf") a_file.first() ``` output: u'##fileformat=VCFv4.1'
2015/04/25
[ "https://Stackoverflow.com/questions/29871209", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4833015/" ]
The accepted answer is now outdated. You can use python-snappy to compress hadoop-snappy, but the documentation is virtually absent. Example: ``` import snappy with open('test.json.snappy', 'wb') as out_file: data=json.dumps({'test':'somevalue','test2':'somevalue2'}).encode('utf-8') compressor = snappy.hadoop_snappy.StreamCompressor() compressed = compressor.compress(data) out_file.write(compressed) ``` You can also use the command line, where the option is a bit more straight forward, using the -t hadoop\_snappy flag. Example: echo "{'test':'somevalue','test2':'somevalue2'}" | python -m snappy -t hadoop\_snappy -c - test.json.snappy
Not sure exactly which `snappy` codec my files have, but `spark.read.text` worked without incident for me.
56,436,777
Referencing this question: [What's the canonical way to check for type in Python?](https://stackoverflow.com/questions/152580/whats-the-canonical-way-to-check-for-type-in-python) It is said that the best way to check for inputs is to not check them - that is to let try/except blocks take care of bad inputs. My question is that if I want to design a function that handles multiple inputs, my intuition is to do something like this ```py def my_function(self, input): if isinstance(input, type): ...do this elif isinstance(input, type2): ...do that else print("can only handle type and type2") raise TypeError ``` but this is un-pythonic. How should I structure it? In my specific use-case, I want to make a function that can handle a `list`or a `pandas DataFrame`, but from a function design POV, how should I design that in a try except paradigm? It kind of feels "ugly," but I also haven't seen code directly that does this in python yet.
2019/06/04
[ "https://Stackoverflow.com/questions/56436777", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4992644/" ]
there you go ``` For Each row As DataGridViewRow In DataGridView1.Rows For Each nextrow As DataGridViewRow In DataGridView1.Rows If row.Index <> nextrow.Index Then If row.Cells(0).Value = nextrow.Cells(0).Value Then MsgBox("Duplicate on col 0, index = " & row.Index.ToString) End If If row.Cells(2).Value = nextrow.Cells(2).Value Then MsgBox("Duplicate on col 2, index = " & row.Index.ToString) End If If row.Cells(3).Value = nextrow.Cells(3).Value Then MsgBox("Duplicate on col 3, index = " & row.Index.ToString) End If If row.Cells(8).Value = nextrow.Cells(8).Value Then MsgBox("Duplicate on col 8, index = " & row.Index.ToString) End If End If Next Next ``` This will also check all the column for duplicates not only the row under it like in your example, you were incrementing i and incrementing ii so you were always checking only 2 rows at the same time instead of comparing 1 row to all others..
``` For Each row As DataGridViewRow In dtg3.Rows For Each nextrow As DataGridViewRow In dtg3.Rows If row.Index <> nextrow.Index Then If row.Cells(0).Value = nextrow.Cells(0).Value AndAlso row.Cells(2).Value = nextrow.Cells(2).Value AndAlso row.Cells(3).Value = nextrow.Cells(3).Value AndAlso row.Cells(8).Value = nextrow.Cells(8).Value Then dtg3.Rows.Remove(row) dtg3.Rows.Remove(nextrow) End If End If Next Next ```
9,014,991
So let's say i have this python code: ``` def loopForEachFileInDirectory(self, conn): for filename in os.listdir(uploadedFilesDirectory): try: self.insertNewEntryForStagingFile(conn, filename) self.copyFilesToStagingDirectory(filename) except: ??? def copyFilesToStagingDirectory(self, filename): logging.info("copying %s to youtube_ready",filename) try: shutil.copy(uploadedFilesDirectory+filename, stagingDirectory) logging.info("move successful") except shutil.Error,e: logging.warn("move failed for reasons \n\t%d:%s", e.args[0],e.args[1]) raise ??? ``` Now, the "loopForEachFileInDirectory" method is going to have a few more methods in it - i'm doing a bit of clean coding (cheers Robert Martin) here. What i'd like is to bubble up any exceptions from the sub-methods, and if anything happens in the main loop, bail on that loop and continue. The question is, what is the rule for bubbling up a generic exception? Do i just raise on its own? And if so, how do i generically throw an exception, and how do i catch and log the details of a generic exception?
2012/01/26
[ "https://Stackoverflow.com/questions/9014991", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1061426/" ]
> > what is the rule for bubbling up a generic exception? Do i just raise on its own? > > > Yes. The short answer is to just use `raise`. > > And if so, how do i generically throw an exception... > > > The above answer the other submitter posted is correct, but it doesn't provide much in the way of context. `Exception` is the base class exception. `except Exception` works across all types of `Exception` because all Python exceptions inherit from this class. `except` statements can specify an argument which points to the the exception object. I don't believe that specifying it is strictly necessary in this context. In fact, it's likely sufficient to bubble up an exception with the default: ``` except: raise ``` without any real need to specify an exception type or the variable `e` referencing the particular exception object. > > ..and how do i catch and log the details of a generic exception? > > > `logging.exception` is a good way to go. Try it like so: ``` try: <something that raises an error> except Exception, e: logging.exception(e) ```
``` #!/usr/bin/python try: print 'Youll see me first.' try: 1[90] except Exception,e: print "************ UTOH!",str(e) raise e except Exception,e: print ">>>>>>>>>>>> I concur, THE JIG IS UP!",str(e) raise e ```
9,014,991
So let's say i have this python code: ``` def loopForEachFileInDirectory(self, conn): for filename in os.listdir(uploadedFilesDirectory): try: self.insertNewEntryForStagingFile(conn, filename) self.copyFilesToStagingDirectory(filename) except: ??? def copyFilesToStagingDirectory(self, filename): logging.info("copying %s to youtube_ready",filename) try: shutil.copy(uploadedFilesDirectory+filename, stagingDirectory) logging.info("move successful") except shutil.Error,e: logging.warn("move failed for reasons \n\t%d:%s", e.args[0],e.args[1]) raise ??? ``` Now, the "loopForEachFileInDirectory" method is going to have a few more methods in it - i'm doing a bit of clean coding (cheers Robert Martin) here. What i'd like is to bubble up any exceptions from the sub-methods, and if anything happens in the main loop, bail on that loop and continue. The question is, what is the rule for bubbling up a generic exception? Do i just raise on its own? And if so, how do i generically throw an exception, and how do i catch and log the details of a generic exception?
2012/01/26
[ "https://Stackoverflow.com/questions/9014991", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1061426/" ]
A short Python 3 extension to what other posters have written. In most cases, the bare `raise` is enough. It re-raises the original exception which is what you want to do in most cases. But there are cases when you want to raise a new exception instead - perhaps to add more info to the original one (say the id of the culprit object being processed) or to wrap multiple exception types into one for the convenience of the caller. In such cases the `raise ... from ...` (Python 3 only, I'm afraid) construct is useful as in: ``` try: self.file = open(filename) except IOError, exc: raise DatabaseError('failed to open') from exc ``` The benefit of the `from` clause is that it preserves the stack trace of the original exception, which would be lost if you just raised a brand new exception. Proper re-packing of exceptions in Python 2 is harder, there's a good blog post by Ian Bicking covering the topic here: <http://blog.ianbicking.org/2007/09/12/re-raising-exceptions/>
``` #!/usr/bin/python try: print 'Youll see me first.' try: 1[90] except Exception,e: print "************ UTOH!",str(e) raise e except Exception,e: print ">>>>>>>>>>>> I concur, THE JIG IS UP!",str(e) raise e ```
9,014,991
So let's say i have this python code: ``` def loopForEachFileInDirectory(self, conn): for filename in os.listdir(uploadedFilesDirectory): try: self.insertNewEntryForStagingFile(conn, filename) self.copyFilesToStagingDirectory(filename) except: ??? def copyFilesToStagingDirectory(self, filename): logging.info("copying %s to youtube_ready",filename) try: shutil.copy(uploadedFilesDirectory+filename, stagingDirectory) logging.info("move successful") except shutil.Error,e: logging.warn("move failed for reasons \n\t%d:%s", e.args[0],e.args[1]) raise ??? ``` Now, the "loopForEachFileInDirectory" method is going to have a few more methods in it - i'm doing a bit of clean coding (cheers Robert Martin) here. What i'd like is to bubble up any exceptions from the sub-methods, and if anything happens in the main loop, bail on that loop and continue. The question is, what is the rule for bubbling up a generic exception? Do i just raise on its own? And if so, how do i generically throw an exception, and how do i catch and log the details of a generic exception?
2012/01/26
[ "https://Stackoverflow.com/questions/9014991", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1061426/" ]
> > what is the rule for bubbling up a generic exception? Do i just raise on its own? > > > Yes. The short answer is to just use `raise`. > > And if so, how do i generically throw an exception... > > > The above answer the other submitter posted is correct, but it doesn't provide much in the way of context. `Exception` is the base class exception. `except Exception` works across all types of `Exception` because all Python exceptions inherit from this class. `except` statements can specify an argument which points to the the exception object. I don't believe that specifying it is strictly necessary in this context. In fact, it's likely sufficient to bubble up an exception with the default: ``` except: raise ``` without any real need to specify an exception type or the variable `e` referencing the particular exception object. > > ..and how do i catch and log the details of a generic exception? > > > `logging.exception` is a good way to go. Try it like so: ``` try: <something that raises an error> except Exception, e: logging.exception(e) ```
A short Python 3 extension to what other posters have written. In most cases, the bare `raise` is enough. It re-raises the original exception which is what you want to do in most cases. But there are cases when you want to raise a new exception instead - perhaps to add more info to the original one (say the id of the culprit object being processed) or to wrap multiple exception types into one for the convenience of the caller. In such cases the `raise ... from ...` (Python 3 only, I'm afraid) construct is useful as in: ``` try: self.file = open(filename) except IOError, exc: raise DatabaseError('failed to open') from exc ``` The benefit of the `from` clause is that it preserves the stack trace of the original exception, which would be lost if you just raised a brand new exception. Proper re-packing of exceptions in Python 2 is harder, there's a good blog post by Ian Bicking covering the topic here: <http://blog.ianbicking.org/2007/09/12/re-raising-exceptions/>
21,819,649
What's the difference between a namespace Python package (no `__init__.py`) and a regular Python package (has an `__init__.py`), especially when `__init__.py` is empty for a regular package? I am curious because recently I've been forgetting to make `__init__.py` in packages I make, and I never noticed any problems. In fact, they seem to behave identically to regular packages. Edit: Namespace packages only supported from Python 3.3 ([see PEP 420](http://legacy.python.org/dev/peps/pep-0420/)), so naturally, this question only applies to Python 3.
2014/02/17
[ "https://Stackoverflow.com/questions/21819649", "https://Stackoverflow.com", "https://Stackoverflow.com/users/469721/" ]
Namespace packages ------------------ As of [Python 3.3](https://docs.python.org/3/whatsnew/3.3.html#pep-420-implicit-namespace-packages), we get namespace packages. These are a special kind of package that allows you to unify two packages with the same name at different points on your Python-path. For example, consider path1 and path2 as separate entries on your Python-path: ``` path1 +--namespace +--module1.py +--module2.py path2 +--namespace +--module3.py +--module4.py ``` with this arrangement you should be able to do the following: ``` from namespace import module1, module3 ``` thus you get the unification of two packages with the same name in a single namespace. If either one of them gain an `__init__.py` that becomes **the** package - and you no longer get the unification as the other directory is ignored. If both of them have an `__init__.py`, the first one in the PYTHONPATH (`sys.path`) is the one used. `__init__.py` used to be required to make directory a package ------------------------------------------------------------- Namespace packages are packages without the `__init__.py`. For an example of a simple package, if you have a directory: ``` root +--package +--file1.py +--file2.py ... ``` While you could run these files independently in the `package` directory, e.g. with `python2 file1.py`, under Python 2 you wouldn't be able to import the files as modules in the root directory, e.g. ``` import package.file1 ``` would fail, and in order for it to work, you at least need this: ``` package +--__init__.py +--file1.py +--file2.py ... ``` `__init__.py` initializes the package so you can have code in the `__init__.py` that is run when the module is first imported: ``` run_initial_import_setup() ``` provide an `__all__` list of names to be imported, ``` __all__ = ['star_import', 'only', 'these', 'names'] ``` if the package is imported with the following: ``` from module import * ``` or you can leave the `__init__.py` completely empty if you only want to be able to import the remaining .py files in the directory. ### Namespaces with `__init__.py` using pkgutil: You could originally use [pkgutil](http://docs.python.org/2/library/pkgutil.html), available since Python 2.3. to accomplish adding namespaces, by adding the following into each separate package's `__init__.py`: ``` from pkgutil import extend_path __path__ = extend_path(__path__, __name__) ``` Setuptools uses a similar method, again, all `__init__.py` files should contain the following (with no other code): ``` import pkg_resources pkg_resources.declare_namespace(__name__) ``` Namespaces were more thoroughly addressed in [PEP 420](http://www.python.org/dev/peps/pep-0420/) See also more discussion on setuptools and Namespaces here: <http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages>
1. Having `__init__.py` makes it so you can import that package elsewhere. 2. Also, the `__init__.py` file can contain code you want executed each time the module is loaded.
21,819,649
What's the difference between a namespace Python package (no `__init__.py`) and a regular Python package (has an `__init__.py`), especially when `__init__.py` is empty for a regular package? I am curious because recently I've been forgetting to make `__init__.py` in packages I make, and I never noticed any problems. In fact, they seem to behave identically to regular packages. Edit: Namespace packages only supported from Python 3.3 ([see PEP 420](http://legacy.python.org/dev/peps/pep-0420/)), so naturally, this question only applies to Python 3.
2014/02/17
[ "https://Stackoverflow.com/questions/21819649", "https://Stackoverflow.com", "https://Stackoverflow.com/users/469721/" ]
Reading [link](http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages) from Aaron, and [PEP420](http://www.python.org/dev/peps/pep-0420/), it appears that the fundamental difference between a namespace package and a regular package, beside the obvious difference that a regular package may contain various initialization code in `__init__.py`, is that a namespace package is a virtual package whose contents can be distributed in various places along Python's lookup path. For example, given ``` a/foo/bar.py b/foo/baz.py ``` If both `b` and `a` are in Python's path, you can import `foo.bar` and `foo.baz` freely. Of course, this begs the question that, if `__init__.py` is not needed, then all other things being equal, is it better to make a regular package or a namespace package, but is a little off-topic.
1. Having `__init__.py` makes it so you can import that package elsewhere. 2. Also, the `__init__.py` file can contain code you want executed each time the module is loaded.
21,819,649
What's the difference between a namespace Python package (no `__init__.py`) and a regular Python package (has an `__init__.py`), especially when `__init__.py` is empty for a regular package? I am curious because recently I've been forgetting to make `__init__.py` in packages I make, and I never noticed any problems. In fact, they seem to behave identically to regular packages. Edit: Namespace packages only supported from Python 3.3 ([see PEP 420](http://legacy.python.org/dev/peps/pep-0420/)), so naturally, this question only applies to Python 3.
2014/02/17
[ "https://Stackoverflow.com/questions/21819649", "https://Stackoverflow.com", "https://Stackoverflow.com/users/469721/" ]
Namespace packages ------------------ As of [Python 3.3](https://docs.python.org/3/whatsnew/3.3.html#pep-420-implicit-namespace-packages), we get namespace packages. These are a special kind of package that allows you to unify two packages with the same name at different points on your Python-path. For example, consider path1 and path2 as separate entries on your Python-path: ``` path1 +--namespace +--module1.py +--module2.py path2 +--namespace +--module3.py +--module4.py ``` with this arrangement you should be able to do the following: ``` from namespace import module1, module3 ``` thus you get the unification of two packages with the same name in a single namespace. If either one of them gain an `__init__.py` that becomes **the** package - and you no longer get the unification as the other directory is ignored. If both of them have an `__init__.py`, the first one in the PYTHONPATH (`sys.path`) is the one used. `__init__.py` used to be required to make directory a package ------------------------------------------------------------- Namespace packages are packages without the `__init__.py`. For an example of a simple package, if you have a directory: ``` root +--package +--file1.py +--file2.py ... ``` While you could run these files independently in the `package` directory, e.g. with `python2 file1.py`, under Python 2 you wouldn't be able to import the files as modules in the root directory, e.g. ``` import package.file1 ``` would fail, and in order for it to work, you at least need this: ``` package +--__init__.py +--file1.py +--file2.py ... ``` `__init__.py` initializes the package so you can have code in the `__init__.py` that is run when the module is first imported: ``` run_initial_import_setup() ``` provide an `__all__` list of names to be imported, ``` __all__ = ['star_import', 'only', 'these', 'names'] ``` if the package is imported with the following: ``` from module import * ``` or you can leave the `__init__.py` completely empty if you only want to be able to import the remaining .py files in the directory. ### Namespaces with `__init__.py` using pkgutil: You could originally use [pkgutil](http://docs.python.org/2/library/pkgutil.html), available since Python 2.3. to accomplish adding namespaces, by adding the following into each separate package's `__init__.py`: ``` from pkgutil import extend_path __path__ = extend_path(__path__, __name__) ``` Setuptools uses a similar method, again, all `__init__.py` files should contain the following (with no other code): ``` import pkg_resources pkg_resources.declare_namespace(__name__) ``` Namespaces were more thoroughly addressed in [PEP 420](http://www.python.org/dev/peps/pep-0420/) See also more discussion on setuptools and Namespaces here: <http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages>
Reading [link](http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages) from Aaron, and [PEP420](http://www.python.org/dev/peps/pep-0420/), it appears that the fundamental difference between a namespace package and a regular package, beside the obvious difference that a regular package may contain various initialization code in `__init__.py`, is that a namespace package is a virtual package whose contents can be distributed in various places along Python's lookup path. For example, given ``` a/foo/bar.py b/foo/baz.py ``` If both `b` and `a` are in Python's path, you can import `foo.bar` and `foo.baz` freely. Of course, this begs the question that, if `__init__.py` is not needed, then all other things being equal, is it better to make a regular package or a namespace package, but is a little off-topic.
21,272,497
I'm trying to see if this is the most efficient way to sort a bubble list in python or if there are better ways some people tell me to use two loops, what are the benefits of doing like that vs the below ``` def sort_bubble(blist): n = 0 while n < len(blist) - 1: if blist[n] > blist[n + 1]: n1 = blist[n] n2 = blist[n + 1] blist[n] = n2 blist[n + 1] = n1 n = 0 else: n = n + 1 print blist ```
2014/01/22
[ "https://Stackoverflow.com/questions/21272497", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3221614/" ]
Your algorithm is technically a bubble sort in that it does exactly the swaps that it should. However, it's a *very inefficient* bubble sort, in that it does a *lot* more compares than are necessary. How can you *know* that? It's pretty easy to instrument your code to count the number of compares and swaps. And meanwhile, [Wikipedia](http://en.wikipedia.org/wiki/Bubble_sort) gives implementations of a simple bubble sort, and one with the skip-sorted-tail optimization, in a pseudocode language that's pretty easy to port to Python and similarly instrument. I'll show the code at the bottom. For a perfect bubble sort, given a random list of length 100, you should expect a bit under 10000 compares (100 \* 100), and a bit under 2500 swaps. And the Wikipedia implementation does exactly that. The "skip-sorted-tail" version should have just over half as many compares, and it does. Yours, however, has 10x as many compares as it should. The reason your code is inefficient is that it starts over at the beginning over and over, instead of starting where it swapped whenever possible. This causes an extra factor of `O(sqrt(N))`. Meanwhile, almost any sort algorithm is better than bubble sort for almost any input, so even an efficient bubble sort is not an efficient sort. --- I've made one minor change to your code: replacing the four-line swap with a more idiomatic single-line swap. Otherwise, nothing is changed but adding the `cmpcount` and `swapcount` variables, and returning the result instead of printing it. ``` def bogo_bubble(blist): cmpcount, swapcount = 0, 0 n = 0 while n < len(blist) - 1: cmpcount += 1 if blist[n] > blist[n + 1]: swapcount += 1 blist[n], blist[n+1] = blist[n+1], blist[n] n = 0 else: n = n + 1 return blist, cmpcount, swapcount ``` This is the [Psuedocode implementation](http://en.wikipedia.org/wiki/Bubble_sort#Pseudocode_implementation) from Wikipedia, translated to Python. I had to replace the `repeat… unit` with a `while True… if not …: break`, but everything else is trivial. ``` def wp1_bubble(blist): cmpcount, swapcount = 0, 0 while True: swapped = False for i in range(1, len(blist)): cmpcount += 1 if blist[i-1] > blist[i]: swapcount += 1 blist[i-1], blist[i] = blist[i], blist[i-1] swapped = True if not swapped: break return blist, cmpcount, swapcount ``` This is the [Optimizing bubble sort](http://en.wikipedia.org/wiki/Bubble_sort#Optimizing_bubble_sort), which does the simple version of the skip-sorted-tail optimization, but not the more elaborate version (which comes right after it). ``` def wp2_bubble(blist): cmpcount, swapcount = 0, 0 n = len(blist) while True: swapped = False for i in range(1, n): cmpcount += 1 if blist[i-1] > blist[i]: swapcount += 1 blist[i-1], blist[i] = blist[i], blist[i-1] swapped = True n -= 1 if not swapped: break return blist, cmpcount, swapcount import random alist = [random.randrange(100) for _ in range(100)] bb, cb, sb = bogo_bubble(alist[:]) b1, c1, s1 = wp1_bubble(alist[:]) b2, c2, s2 = wp2_bubble(alist[:]) assert bb == b1 == b2 print('bogo_bubble: {} cmp, {} swap'.format(cb, sb)) print('wp1_bubble : {} cmp, {} swap'.format(c1, s1)) print('wp2_bubble : {} cmp, {} swap'.format(c2, s2)) ``` Typical output: ``` bogo_bubble: 100619 cmp, 2250 swap wp1_bubble : 8811 cmp, 2250 swap wp2_bubble : 4895 cmp, 2250 swap ```
This is how I would do it if I was forced to use bubble sort, you should probably always just use the default sort() function in python, it's very fast. ``` def BubbleSort(A): end = len(A)-1 swapped = True while swapped: swapped = False for i in range(0, end): if A[i] > A[i+1]: A[i], A[i+1] = A[i+1], A[i] swapped = True end -= 1 ``` It's basically regular bubblesort but instead of traversing the entire list every time it only traverses up to the last swapped value, by definition any value past that is already in place. Also you do not need to use temp values in python to swap, the pythonic way to do this is: ``` a , b = b , a ```
21,272,497
I'm trying to see if this is the most efficient way to sort a bubble list in python or if there are better ways some people tell me to use two loops, what are the benefits of doing like that vs the below ``` def sort_bubble(blist): n = 0 while n < len(blist) - 1: if blist[n] > blist[n + 1]: n1 = blist[n] n2 = blist[n + 1] blist[n] = n2 blist[n + 1] = n1 n = 0 else: n = n + 1 print blist ```
2014/01/22
[ "https://Stackoverflow.com/questions/21272497", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3221614/" ]
This is how I would do it if I was forced to use bubble sort, you should probably always just use the default sort() function in python, it's very fast. ``` def BubbleSort(A): end = len(A)-1 swapped = True while swapped: swapped = False for i in range(0, end): if A[i] > A[i+1]: A[i], A[i+1] = A[i+1], A[i] swapped = True end -= 1 ``` It's basically regular bubblesort but instead of traversing the entire list every time it only traverses up to the last swapped value, by definition any value past that is already in place. Also you do not need to use temp values in python to swap, the pythonic way to do this is: ``` a , b = b , a ```
You could test it out yourself. Other things remaining the same, just counting the number of iterations will give you an idea, what is faster. Here is what I wrote: ``` def sort_bubble(blist): ops=0 n = 0 while n < len(blist) - 1: if blist[n] > blist[n + 1]: n1 = blist[n] n2 = blist[n + 1] blist[n] = n2 blist[n + 1] = n1 n = 0 else: n = n + 1 ops+=1 print ops print blist def bubbleSort(list): ops=0 for i in range(len(list)): for j in range(i): if list[i] < list[j]: list[i], list[j] = list[j], list[i] ops+=1 print ops return list sort_bubble([ 6,5, 3 ,1, 8, 7, 2, 4]) print bubbleSort([ 6,5, 3 ,1, 8, 7, 2, 4]) ```
21,272,497
I'm trying to see if this is the most efficient way to sort a bubble list in python or if there are better ways some people tell me to use two loops, what are the benefits of doing like that vs the below ``` def sort_bubble(blist): n = 0 while n < len(blist) - 1: if blist[n] > blist[n + 1]: n1 = blist[n] n2 = blist[n + 1] blist[n] = n2 blist[n + 1] = n1 n = 0 else: n = n + 1 print blist ```
2014/01/22
[ "https://Stackoverflow.com/questions/21272497", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3221614/" ]
Your algorithm is technically a bubble sort in that it does exactly the swaps that it should. However, it's a *very inefficient* bubble sort, in that it does a *lot* more compares than are necessary. How can you *know* that? It's pretty easy to instrument your code to count the number of compares and swaps. And meanwhile, [Wikipedia](http://en.wikipedia.org/wiki/Bubble_sort) gives implementations of a simple bubble sort, and one with the skip-sorted-tail optimization, in a pseudocode language that's pretty easy to port to Python and similarly instrument. I'll show the code at the bottom. For a perfect bubble sort, given a random list of length 100, you should expect a bit under 10000 compares (100 \* 100), and a bit under 2500 swaps. And the Wikipedia implementation does exactly that. The "skip-sorted-tail" version should have just over half as many compares, and it does. Yours, however, has 10x as many compares as it should. The reason your code is inefficient is that it starts over at the beginning over and over, instead of starting where it swapped whenever possible. This causes an extra factor of `O(sqrt(N))`. Meanwhile, almost any sort algorithm is better than bubble sort for almost any input, so even an efficient bubble sort is not an efficient sort. --- I've made one minor change to your code: replacing the four-line swap with a more idiomatic single-line swap. Otherwise, nothing is changed but adding the `cmpcount` and `swapcount` variables, and returning the result instead of printing it. ``` def bogo_bubble(blist): cmpcount, swapcount = 0, 0 n = 0 while n < len(blist) - 1: cmpcount += 1 if blist[n] > blist[n + 1]: swapcount += 1 blist[n], blist[n+1] = blist[n+1], blist[n] n = 0 else: n = n + 1 return blist, cmpcount, swapcount ``` This is the [Psuedocode implementation](http://en.wikipedia.org/wiki/Bubble_sort#Pseudocode_implementation) from Wikipedia, translated to Python. I had to replace the `repeat… unit` with a `while True… if not …: break`, but everything else is trivial. ``` def wp1_bubble(blist): cmpcount, swapcount = 0, 0 while True: swapped = False for i in range(1, len(blist)): cmpcount += 1 if blist[i-1] > blist[i]: swapcount += 1 blist[i-1], blist[i] = blist[i], blist[i-1] swapped = True if not swapped: break return blist, cmpcount, swapcount ``` This is the [Optimizing bubble sort](http://en.wikipedia.org/wiki/Bubble_sort#Optimizing_bubble_sort), which does the simple version of the skip-sorted-tail optimization, but not the more elaborate version (which comes right after it). ``` def wp2_bubble(blist): cmpcount, swapcount = 0, 0 n = len(blist) while True: swapped = False for i in range(1, n): cmpcount += 1 if blist[i-1] > blist[i]: swapcount += 1 blist[i-1], blist[i] = blist[i], blist[i-1] swapped = True n -= 1 if not swapped: break return blist, cmpcount, swapcount import random alist = [random.randrange(100) for _ in range(100)] bb, cb, sb = bogo_bubble(alist[:]) b1, c1, s1 = wp1_bubble(alist[:]) b2, c2, s2 = wp2_bubble(alist[:]) assert bb == b1 == b2 print('bogo_bubble: {} cmp, {} swap'.format(cb, sb)) print('wp1_bubble : {} cmp, {} swap'.format(c1, s1)) print('wp2_bubble : {} cmp, {} swap'.format(c2, s2)) ``` Typical output: ``` bogo_bubble: 100619 cmp, 2250 swap wp1_bubble : 8811 cmp, 2250 swap wp2_bubble : 4895 cmp, 2250 swap ```
You could test it out yourself. Other things remaining the same, just counting the number of iterations will give you an idea, what is faster. Here is what I wrote: ``` def sort_bubble(blist): ops=0 n = 0 while n < len(blist) - 1: if blist[n] > blist[n + 1]: n1 = blist[n] n2 = blist[n + 1] blist[n] = n2 blist[n + 1] = n1 n = 0 else: n = n + 1 ops+=1 print ops print blist def bubbleSort(list): ops=0 for i in range(len(list)): for j in range(i): if list[i] < list[j]: list[i], list[j] = list[j], list[i] ops+=1 print ops return list sort_bubble([ 6,5, 3 ,1, 8, 7, 2, 4]) print bubbleSort([ 6,5, 3 ,1, 8, 7, 2, 4]) ```
48,689,158
I want to send commands to run a python script to the Linux terminal. I have a list of python files which I want to run and I want to run them one after the other as we read the list sequentially. Once the first file is finished, it should send the second one to run and so on.
2018/02/08
[ "https://Stackoverflow.com/questions/48689158", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4782295/" ]
I would suggest gsl-like syntactic sugar to mark that it is not the pointer you manage. Something like: ``` template<class T> using observer = T; observer<library_managed_object *> foo = nullptr; ``` You can also use, as sugested elsewhere the `observer_ptr`. And one final word - in world of C++11 and so forth - using raw pointers is perfectly reasonable when you want to pass something like nullable non owning reference. If smart pointers shows ownership then lack of one - shows lack of ownership. All in all - if you do not have legacy code with manually managed memory - don't be afraid to use raw pointers. As says Herb Sutter: > > Pass by \* or & to accept a widget independently of how the caller is > managing its lifetime. Most of the time, we don’t want to commit to a > lifetime policy in the parameter type, such as requiring the object be > held by a specific smart pointer, because this is usually needlessly > restrictive. As usual, use a \* if you need to express null (no > widget), otherwise prefer to use a &; and if the object is input-only, > write const widget\* or const widget&. > > > <https://herbsutter.com/2013/06/05/gotw-91-solution-smart-pointer-parameters/>
You can try [gsl::owner](https://github.com/Microsoft/GSL/blob/master/include/gsl/pointers) defined in the GSL project. Its not a type but more of a tag to define ownership. The [CPP core guidelines](https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#Ri-raw) define the use case of `gsl::owner` > > mark owning pointers using owner from the guideline support library: > > > ``` owner<X*> compute(args) // It is now clear that ownership is transferred { owner<X*> res = new X{}; // ... return res; } ``` > > This tells analysis tools that res is an owner. That is, its value > must be deleted or transferred to another owner, as is done here by > the return. > > > owner is used similarly in the implementation of resource handles. > > > Another alternative is [observer\_ptr](http://en.cppreference.com/w/cpp/experimental/observer_ptr)
39,185,797
In Node.js when I want to quickly check the value of something rather than busting out the debugger and stepping through, I quickly add a console.log(foo) and get a beautiful: ``` { lemmons: "pie", number: 9, fetch: function(){..} elements: { fire: 99.9 } } ``` Very clear! In Python I get this: ``` class LinkedList: head = None tail = None lemmons = 99 ``` `<__main__.LinkedList instance at 0x105989f80>` or with `vars()`, `{}` or with `dir()`, `['_LinkedList__Node', '__doc__', '__module__', 'append', 'get_tail', 'head', 'lemmons', 'remove', 'tail']` Yuck! Look at all that nonsense - I thought python was supposed to be fast, beautiful and clean? Is this really how people do it? Do they implement customer **str** and custom **repr** for everything? Because that seems kind of crazy too..
2016/08/27
[ "https://Stackoverflow.com/questions/39185797", "https://Stackoverflow.com", "https://Stackoverflow.com/users/5947872/" ]
Actually, there is a way to stop Java GC. Just use the Epsilon GC algorithm that was introduced as an experimental feature in Java 11. Just add the following two arguments to your JVM's startup script: ``` -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC ``` All or Nothing -------------- Now just keep in mind that this Java GC algorithm does no GC at all. So if you do any object allocation in your code, eventually you'll hit an `OutOfMemoryError` and your app will crash. But if your JVM is short lived, and you don't think that's an issue, give Epsilon GC a try. Just remember it's all or nothing. You can't [force Java GC](https://youtu.be/onjlJBDdeTk) and you can't [stop Java GC](https://youtu.be/aTMZGs0ZGPE) from happening if you use any of the other garbage collectors. The collector is non-deterministic, so control by programmers or admins just isn't possible out of the box.
By default the JVM runs the JVM only needed. This means you can't turn off the GC or your program will fail. The simplest way to avoid stopping the JVM is; * use a very small eden size so when it stops it will be less than some acceptable time. * or make the eden size very large and delay the GC until it hardly matters. e.g. you can reduce you garbage rate and run for 24 hours or longer between minor GCs.
14,817,210
I have quite a simple question here. In Tkinter (python), I was wondering who to use a button to go to different pages of my application, e.g a register page, and a login page. I am aware that GUI does not have 'pages' like websites do, I've seen a few different ways, but what is the best way to make links to different pages?
2013/02/11
[ "https://Stackoverflow.com/questions/14817210", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2061989/" ]
Make each page a frame. Then, all your buttons need to do is hide whatever is visible, then make the desired frame visible. A simple method to do this is to stack the frames on top of each other (this is one time when `place` makes sense) and then ,`lift()` the frame you want to be visible. This technique works best when all pages are the same size; in fact, it requires that you explicitly set the size of containing frame. The following is a contrived example. This isn't the only way to solve the problem, just proof that it's not a particularly hard problem to solve: ``` import Tkinter as tk class Page(tk.Frame): def __init__(self, *args, **kwargs): tk.Frame.__init__(self, *args, **kwargs) def show(self): self.lift() class Page1(Page): def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) label = tk.Label(self, text="This is page 1") label.pack(side="top", fill="both", expand=True) class Page2(Page): def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) label = tk.Label(self, text="This is page 2") label.pack(side="top", fill="both", expand=True) class Page3(Page): def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) label = tk.Label(self, text="This is page 3") label.pack(side="top", fill="both", expand=True) class MainView(tk.Frame): def __init__(self, *args, **kwargs): tk.Frame.__init__(self, *args, **kwargs) p1 = Page1(self) p2 = Page2(self) p3 = Page3(self) buttonframe = tk.Frame(self) container = tk.Frame(self) buttonframe.pack(side="top", fill="x", expand=False) container.pack(side="top", fill="both", expand=True) p1.place(in_=container, x=0, y=0, relwidth=1, relheight=1) p2.place(in_=container, x=0, y=0, relwidth=1, relheight=1) p3.place(in_=container, x=0, y=0, relwidth=1, relheight=1) b1 = tk.Button(buttonframe, text="Page 1", command=p1.show) b2 = tk.Button(buttonframe, text="Page 2", command=p2.show) b3 = tk.Button(buttonframe, text="Page 3", command=p3.show) b1.pack(side="left") b2.pack(side="left") b3.pack(side="left") p1.show() if __name__ == "__main__": root = tk.Tk() main = MainView(root) main.pack(side="top", fill="both", expand=True) root.wm_geometry("400x400") root.mainloop() ```
Could you do something like this? ``` import tkinter def page1(): page2text.pack_forget() page1text.pack() def page2(): page1text.pack_forget() page2text.pack() window = tkinter.Tk() page1btn = tkinter.Button(window, text="Page 1", command=page1) page2btn = tkinter.Button(window, text="Page 2", command=page2) page1text = tkinter.Label(window, text="This is page 1") page2text = tkinter.Label(window, text="This is page 2") page1btn.pack() page2btn.pack() page1text.pack() ``` It seems a lot simpler to me.
14,817,210
I have quite a simple question here. In Tkinter (python), I was wondering who to use a button to go to different pages of my application, e.g a register page, and a login page. I am aware that GUI does not have 'pages' like websites do, I've seen a few different ways, but what is the best way to make links to different pages?
2013/02/11
[ "https://Stackoverflow.com/questions/14817210", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2061989/" ]
Make each page a frame. Then, all your buttons need to do is hide whatever is visible, then make the desired frame visible. A simple method to do this is to stack the frames on top of each other (this is one time when `place` makes sense) and then ,`lift()` the frame you want to be visible. This technique works best when all pages are the same size; in fact, it requires that you explicitly set the size of containing frame. The following is a contrived example. This isn't the only way to solve the problem, just proof that it's not a particularly hard problem to solve: ``` import Tkinter as tk class Page(tk.Frame): def __init__(self, *args, **kwargs): tk.Frame.__init__(self, *args, **kwargs) def show(self): self.lift() class Page1(Page): def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) label = tk.Label(self, text="This is page 1") label.pack(side="top", fill="both", expand=True) class Page2(Page): def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) label = tk.Label(self, text="This is page 2") label.pack(side="top", fill="both", expand=True) class Page3(Page): def __init__(self, *args, **kwargs): Page.__init__(self, *args, **kwargs) label = tk.Label(self, text="This is page 3") label.pack(side="top", fill="both", expand=True) class MainView(tk.Frame): def __init__(self, *args, **kwargs): tk.Frame.__init__(self, *args, **kwargs) p1 = Page1(self) p2 = Page2(self) p3 = Page3(self) buttonframe = tk.Frame(self) container = tk.Frame(self) buttonframe.pack(side="top", fill="x", expand=False) container.pack(side="top", fill="both", expand=True) p1.place(in_=container, x=0, y=0, relwidth=1, relheight=1) p2.place(in_=container, x=0, y=0, relwidth=1, relheight=1) p3.place(in_=container, x=0, y=0, relwidth=1, relheight=1) b1 = tk.Button(buttonframe, text="Page 1", command=p1.show) b2 = tk.Button(buttonframe, text="Page 2", command=p2.show) b3 = tk.Button(buttonframe, text="Page 3", command=p3.show) b1.pack(side="left") b2.pack(side="left") b3.pack(side="left") p1.show() if __name__ == "__main__": root = tk.Tk() main = MainView(root) main.pack(side="top", fill="both", expand=True) root.wm_geometry("400x400") root.mainloop() ```
``` import tkinter as tk root=tk.Tk() root.geometry("360x360") frame=tk.Frame(root,bg='lightblue') frame.place(relx=0.2,rely=0.2,relheight=0.6,relwidth=0.6) def page1(): label=tk.Label(frame,text='this is the page1') label.place(relx=0.3,rely=0.4) def page2(): label=tk.Label(frame,text='this is the page2') label.place(relx=0.3,rely=0.4) def page3(): label=tk.Label(frame,text='this is the page3') label.place(relx=0.3,rely=0.4) bt=tk.Button(root,text='page1',command=page1) bt.grid(column=0,row=0) bt1=tk.Button(root,text='page2',command=page2) bt1.grid(row=0,column=1) bt2=tk.Button(root,text='page3',command=page3) bt2.grid(row=0,column=2) root.mainloop()` ```
14,817,210
I have quite a simple question here. In Tkinter (python), I was wondering who to use a button to go to different pages of my application, e.g a register page, and a login page. I am aware that GUI does not have 'pages' like websites do, I've seen a few different ways, but what is the best way to make links to different pages?
2013/02/11
[ "https://Stackoverflow.com/questions/14817210", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2061989/" ]
Could you do something like this? ``` import tkinter def page1(): page2text.pack_forget() page1text.pack() def page2(): page1text.pack_forget() page2text.pack() window = tkinter.Tk() page1btn = tkinter.Button(window, text="Page 1", command=page1) page2btn = tkinter.Button(window, text="Page 2", command=page2) page1text = tkinter.Label(window, text="This is page 1") page2text = tkinter.Label(window, text="This is page 2") page1btn.pack() page2btn.pack() page1text.pack() ``` It seems a lot simpler to me.
``` import tkinter as tk root=tk.Tk() root.geometry("360x360") frame=tk.Frame(root,bg='lightblue') frame.place(relx=0.2,rely=0.2,relheight=0.6,relwidth=0.6) def page1(): label=tk.Label(frame,text='this is the page1') label.place(relx=0.3,rely=0.4) def page2(): label=tk.Label(frame,text='this is the page2') label.place(relx=0.3,rely=0.4) def page3(): label=tk.Label(frame,text='this is the page3') label.place(relx=0.3,rely=0.4) bt=tk.Button(root,text='page1',command=page1) bt.grid(column=0,row=0) bt1=tk.Button(root,text='page2',command=page2) bt1.grid(row=0,column=1) bt2=tk.Button(root,text='page3',command=page3) bt2.grid(row=0,column=2) root.mainloop()` ```
56,642,128
I have a data set with columns titled as product name, brand,rating(1:5),review text, review-helpfulness. What I need is to propose a recommendation algorithm using reviews. I have to use python for coding here. data set is in .csv format. To identify the nature of the data set I need to use kmeans on the data set. How to use k means on this data set? Thus I did following, 1.data pre-processing, 2.review text data cleaning, 3.sentiment analysis, 4.giving sentiment score from 1 to 5 according to the sentiment value (given by sentiment analysis) they get and tagging reviews as very negative, negative, neutral, positive, very positive. after these procedures i have these columns in my data set, product name, brand,rating(1:5),review text, review-helpfulness, sentiment-value, sentiment-tag. This is the link to the data set <https://drive.google.com/file/d/1YhCJNvV2BQk0T7PbPoR746DCL6tYmH7l/view?usp=sharing> I tried to get k means using following code It run without error. but I don't know this is something useful or is there any other ways to use kmeans on this data set to get some other useful outputs. To identify more about data how should i use k means in this data set.. ``` import pandas as pd import numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot as plt df.info() X = np.array(df.drop(['sentiment_value'], 1).astype(float)) y = np.array(df['rating']) kmeans = KMeans(n_clusters=2) kmeans.fit(X) KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300, n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto', random_state=None, tol=0.0001, verbose=0) plt.show() ```
2019/06/18
[ "https://Stackoverflow.com/questions/56642128", "https://Stackoverflow.com", "https://Stackoverflow.com/users/9725182/" ]
You did not plot anything. So nothing shows up.
Unless you are more specific about what you are trying to achieve we won't be able to help. Figure out what exactly you want to predict. Do you just want to cluster products according to their sentiment score which isn't especially promising or do you want to predict actual product preferences on a new dataset? If you want to build a recommendation system the only possibility (considering your dataset) would be to identify similar products according to the rating/sentiment. Is that what you want?
69,557,664
I have a custom python logger ``` # logger.py import logging #logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) c_handler = logging.StreamHandler() c_handler.setLevel(logging.DEBUG) c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s') c_handler.setFormatter(c_format) logger.addHandler(c_handler) ``` I have set the level to DEBUG, but only WARNINGS (and above) are shown ``` from ..logger import logger ... logger.debug('this is a debug log message') logger.warning('too hot to handle') ... ``` > > my\_module.logger:too hot to handle > > > if I uncomment the line ``` logging.basicConfig(level=logging.DEBUG) ``` then I get the DEBUG level, but two copies of the message > > my\_module.logger - DEBUG - this is a debug log message > > > > > DEBUG:my\_module.logger:this is a debug log message > > > > > my\_module.logger - WARNING - too hot to handle > > > > > WARNING:my\_module.logger:too hot to handle > > > I am not importing *logging* at any other point in the package How should I configure the logger?
2021/10/13
[ "https://Stackoverflow.com/questions/69557664", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3070181/" ]
TL;DR Use `logger.setLevel(logging.DEBUG)` --- According to [Python documentation](https://docs.python.org/3/library/logging.html#logging.Logger.setLevel), a handler processes messages with a level equal to or higher than the handler is set to (via `.setLevel()`). But also note, emphasis mine: > > When a logger is created, the level is set to `NOTSET` (which causes all messages to be processed when the logger is the root logger, **or delegation to the parent when the logger is a non-root logger**). Note that the root logger is created with level `WARNING`. > > > So without `logging.basicConfig`, there's no "root logger" at program startup, and your first `getLogger()` creates a stub root logger with default level WARNING, and your logger with level NOTSET (which fallbacks to that of the root logger). As a result your `logger.debug` message is thrown away before it gets handled. With `logging.basicConfig`, you explicitly create a root logger with the given level *and [**a StreamHandler** with default Formatter](https://docs.python.org/3/library/logging.html#logging.basicConfig)*. Your new `getLogger()` is attached to the root logger and any log record is [propagated](https://docs.python.org/3/library/logging.html#logging.Logger.propagate) to the root logger - thus printing twice with a different formatter (the default one indeed). The stub root logger created by the first call to `getLogger()` has no handler attached so any propagated record is not printed out. If you want to have full control over your logging facility, it's better to give your logger an explicit level than relying on `basicConfig`, which creates a root logger that you may not want: ``` logger.setLevel(logging.DEBUG) ```
Having read the [docs](https://docs.python.org/3/library/logging.html#logging.Logger.setLevel) again I realise that *propagate* is the attribute that I need to use to turn off the ancestor *logging* output. So my logger becomes ``` # logger.py import logging logging.basicConfig(level=logging.DEBUG) logger.propagate = False logger = logging.getLogger(__name__) c_handler = logging.StreamHandler() c_handler.setLevel(logging.DEBUG) c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s') c_handler.setFormatter(c_format) logger.addHandler(c_handler) ``` And I get just one log message and the debug level is used
65,343,093
I am working on a pipeline where the majority of code is within a python script that I call in the pipeline. In the script I would like to use the predefined variable System.AccessToken to make a call to the DevOps API that sets the status of a pull request. However, when I try to get the token using `os.environ['System.AccessToken']` I get a key error. Oddly though, it seems that System.AccessToken is set, because in the yaml file for the pipeline I am able to access the API like: ``` curl -u ":$(System.AccessToken)" URL ``` and get back a valid response. Is there something additional I need to do in Python to access this variable?
2020/12/17
[ "https://Stackoverflow.com/questions/65343093", "https://Stackoverflow.com", "https://Stackoverflow.com/users/11090784/" ]
After reviewing the page that Mani posted I found the answer. For most variables, something like System.AccessToken would have a corresponding SYSTEM\_ACCESSTOKEN. However, with a secret variable this is not the case. I was able to make it accessible to my python script by adding: ``` env: SYSTEM_ACCESSTOKEN: $(System.AccessToken) ``` to where the Python script is called in the pipeline's yaml file. See <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables> for more details.
with this documentation it can work: <https://learn.microsoft.com/de-de/azure/developer/python/azure-sdk-authenticate?tabs=cmd> Just change the language to "read in english" There must be a vault and a present Secret aka SAS Token. And I have to say your code above is curl not python. --- ``` import os from azure.identity import DefaultAzureCredential from azure.keyvault.secrets import SecretClient # Acquire the resource URL vault_url = os.environ["KEY_VAULT_URL"] # Acquire a credential object credential = DefaultAzureCredential() # Acquire a client object secret_client = SecretClient(vault_url=vault_url, credential=credential) # Attempt to perform an operation retrieved_secret = secret_client.get_secret("secret-name-01") ``` with this save change the fields to your vault and secret name the file as test.py and run it. --- If you need the token outside, each Environment have it own namespace. So adding it in local context with export ... or follow the Unix policy, "everything is a file" write it to file. Good practise here is to use ansible-vault or something similar. store it encrypted, use it if you need it. read it from file.
65,343,093
I am working on a pipeline where the majority of code is within a python script that I call in the pipeline. In the script I would like to use the predefined variable System.AccessToken to make a call to the DevOps API that sets the status of a pull request. However, when I try to get the token using `os.environ['System.AccessToken']` I get a key error. Oddly though, it seems that System.AccessToken is set, because in the yaml file for the pipeline I am able to access the API like: ``` curl -u ":$(System.AccessToken)" URL ``` and get back a valid response. Is there something additional I need to do in Python to access this variable?
2020/12/17
[ "https://Stackoverflow.com/questions/65343093", "https://Stackoverflow.com", "https://Stackoverflow.com/users/11090784/" ]
After reviewing the page that Mani posted I found the answer. For most variables, something like System.AccessToken would have a corresponding SYSTEM\_ACCESSTOKEN. However, with a secret variable this is not the case. I was able to make it accessible to my python script by adding: ``` env: SYSTEM_ACCESSTOKEN: $(System.AccessToken) ``` to where the Python script is called in the pipeline's yaml file. See <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables> for more details.
Can you use `os.environ['SYSTEM_ACCESSTOKEN']` . As mentioned in <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#environment-variables> the **case/format of the environment variables** is different
6,600,039
I'm trying to figure out if there is a quick way to test my django view functions form either the python or django shell. How would I go about instantiating and passing in faux HTTPrequest object?
2011/07/06
[ "https://Stackoverflow.com/questions/6600039", "https://Stackoverflow.com", "https://Stackoverflow.com/users/234723/" ]
If you're using Django 1.3, take a look at the included [RequestFactory](https://docs.djangoproject.com/en/1.3/topics/testing/#the-request-factory).
Sounds like you want the django test client <https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client>
6,600,039
I'm trying to figure out if there is a quick way to test my django view functions form either the python or django shell. How would I go about instantiating and passing in faux HTTPrequest object?
2011/07/06
[ "https://Stackoverflow.com/questions/6600039", "https://Stackoverflow.com", "https://Stackoverflow.com/users/234723/" ]
The `django.test.client` would be the way to go. From the [django docs](https://docs.djangoproject.com/en/1.1/topics/testing/#default-test-client) ``` from django.test.client import Client c = Client() response = c.post('/login/', {'username': 'john', 'password': 'smith'}) response.status_code ```
Sounds like you want the django test client <https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client>
6,600,039
I'm trying to figure out if there is a quick way to test my django view functions form either the python or django shell. How would I go about instantiating and passing in faux HTTPrequest object?
2011/07/06
[ "https://Stackoverflow.com/questions/6600039", "https://Stackoverflow.com", "https://Stackoverflow.com/users/234723/" ]
You should check out django.test.client.Client or django.test.client.RequestFactory which are documented in django's unit testing facility: <https://docs.djangoproject.com/en/1.3/topics/testing/> I also suggest using template responses in your views since they let you inspect the context used to render the template: <http://docs.djangoproject.com/en/1.3/ref/template-response/>
Sounds like you want the django test client <https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client>
6,600,039
I'm trying to figure out if there is a quick way to test my django view functions form either the python or django shell. How would I go about instantiating and passing in faux HTTPrequest object?
2011/07/06
[ "https://Stackoverflow.com/questions/6600039", "https://Stackoverflow.com", "https://Stackoverflow.com/users/234723/" ]
The `django.test.client` would be the way to go. From the [django docs](https://docs.djangoproject.com/en/1.1/topics/testing/#default-test-client) ``` from django.test.client import Client c = Client() response = c.post('/login/', {'username': 'john', 'password': 'smith'}) response.status_code ```
If you're using Django 1.3, take a look at the included [RequestFactory](https://docs.djangoproject.com/en/1.3/topics/testing/#the-request-factory).
6,600,039
I'm trying to figure out if there is a quick way to test my django view functions form either the python or django shell. How would I go about instantiating and passing in faux HTTPrequest object?
2011/07/06
[ "https://Stackoverflow.com/questions/6600039", "https://Stackoverflow.com", "https://Stackoverflow.com/users/234723/" ]
If you're using Django 1.3, take a look at the included [RequestFactory](https://docs.djangoproject.com/en/1.3/topics/testing/#the-request-factory).
You should check out django.test.client.Client or django.test.client.RequestFactory which are documented in django's unit testing facility: <https://docs.djangoproject.com/en/1.3/topics/testing/> I also suggest using template responses in your views since they let you inspect the context used to render the template: <http://docs.djangoproject.com/en/1.3/ref/template-response/>
6,600,039
I'm trying to figure out if there is a quick way to test my django view functions form either the python or django shell. How would I go about instantiating and passing in faux HTTPrequest object?
2011/07/06
[ "https://Stackoverflow.com/questions/6600039", "https://Stackoverflow.com", "https://Stackoverflow.com/users/234723/" ]
The `django.test.client` would be the way to go. From the [django docs](https://docs.djangoproject.com/en/1.1/topics/testing/#default-test-client) ``` from django.test.client import Client c = Client() response = c.post('/login/', {'username': 'john', 'password': 'smith'}) response.status_code ```
You should check out django.test.client.Client or django.test.client.RequestFactory which are documented in django's unit testing facility: <https://docs.djangoproject.com/en/1.3/topics/testing/> I also suggest using template responses in your views since they let you inspect the context used to render the template: <http://docs.djangoproject.com/en/1.3/ref/template-response/>
19,130,113
I've got a database full of BlobKeys that were previously uploaded through the standard Google App Engine [create\_upload\_url()](https://developers.google.com/appengine/docs/python/blobstore/functions#create_upload_url) process, and each of the uploads went to the same Google Cloud Storage bucket by setting the `gs_bucket_name` argument. What I'd like to do is be able to decode the existing blobkeys so I can get their Google Cloud Storage filenames. I understand that I ***could*** have been using the [gs\_object\_name](https://developers.google.com/appengine/docs/python/blobstore/fileinfoclass#FileInfo_gs_object_name) property from the [FileInfo class](https://developers.google.com/appengine/docs/python/blobstore/fileinfoclass), except: > > You must save the gs\_object\_name yourself in your upload handler or > this data will be lost. (The other metadata for the object in GCS is stored > in GCS automatically, so you don't need to save that in your upload handler. > > > Meaning `gs_object_name` property is only available in the upload handler, and if I haven't been saving it at that time then its lost. Also, [create\_gs\_key()](https://developers.google.com/appengine/docs/python/blobstore/functions#create_gs_key) doesn't do the trick because it instead takes a google storage filename and creates a blobkey. So, how can I take a blobkey that was previously uploaded to a Google Cloud Storage bucket through app engine, and get it's Google Cloud Storage filename? (python)
2013/10/02
[ "https://Stackoverflow.com/questions/19130113", "https://Stackoverflow.com", "https://Stackoverflow.com/users/361897/" ]
You can get the cloudstorage filename only in the upload handler (fileInfo.gs\_object\_name) and store it in your database. After that it is lost and it seems not to be preserved in BlobInfo or other metadata structures. > > Google says: Unlike BlobInfo metadata FileInfo metadata is not > persisted to datastore. (There is no blob key either, but you can > create one later if needed by calling create\_gs\_key.) You must save > the gs\_object\_name yourself in your upload handler or this data will > be lost. > > > <https://developers.google.com/appengine/docs/python/blobstore/fileinfoclass> Update: I was able to decode a SDK-BlobKey in Blobstore-Viewer: "encoded\_gs\_file:base64-encoded-filename-here". However the real thing is not base64 encoded. create\_gs\_key(filename, rpc=None) ... Google says: "Returns an encrypted blob key as a string." Does anyone have a guess why this is encrypted?
From the statement in the docs, it looks like the generated GCS filenames are lost. You'll have to use gsutil to manually browse your bucket. <https://developers.google.com/storage/docs/gsutil/commands/ls>
19,130,113
I've got a database full of BlobKeys that were previously uploaded through the standard Google App Engine [create\_upload\_url()](https://developers.google.com/appengine/docs/python/blobstore/functions#create_upload_url) process, and each of the uploads went to the same Google Cloud Storage bucket by setting the `gs_bucket_name` argument. What I'd like to do is be able to decode the existing blobkeys so I can get their Google Cloud Storage filenames. I understand that I ***could*** have been using the [gs\_object\_name](https://developers.google.com/appengine/docs/python/blobstore/fileinfoclass#FileInfo_gs_object_name) property from the [FileInfo class](https://developers.google.com/appengine/docs/python/blobstore/fileinfoclass), except: > > You must save the gs\_object\_name yourself in your upload handler or > this data will be lost. (The other metadata for the object in GCS is stored > in GCS automatically, so you don't need to save that in your upload handler. > > > Meaning `gs_object_name` property is only available in the upload handler, and if I haven't been saving it at that time then its lost. Also, [create\_gs\_key()](https://developers.google.com/appengine/docs/python/blobstore/functions#create_gs_key) doesn't do the trick because it instead takes a google storage filename and creates a blobkey. So, how can I take a blobkey that was previously uploaded to a Google Cloud Storage bucket through app engine, and get it's Google Cloud Storage filename? (python)
2013/10/02
[ "https://Stackoverflow.com/questions/19130113", "https://Stackoverflow.com", "https://Stackoverflow.com/users/361897/" ]
From the statement in the docs, it looks like the generated GCS filenames are lost. You'll have to use gsutil to manually browse your bucket. <https://developers.google.com/storage/docs/gsutil/commands/ls>
If you have blobKeys you can use: ImagesServiceFactory.[makeImageFromBlob](https://developers.google.com/appengine/docs/java/javadoc/com/google/appengine/api/images/IImagesServiceFactory.html#makeImageFromBlob)
19,130,113
I've got a database full of BlobKeys that were previously uploaded through the standard Google App Engine [create\_upload\_url()](https://developers.google.com/appengine/docs/python/blobstore/functions#create_upload_url) process, and each of the uploads went to the same Google Cloud Storage bucket by setting the `gs_bucket_name` argument. What I'd like to do is be able to decode the existing blobkeys so I can get their Google Cloud Storage filenames. I understand that I ***could*** have been using the [gs\_object\_name](https://developers.google.com/appengine/docs/python/blobstore/fileinfoclass#FileInfo_gs_object_name) property from the [FileInfo class](https://developers.google.com/appengine/docs/python/blobstore/fileinfoclass), except: > > You must save the gs\_object\_name yourself in your upload handler or > this data will be lost. (The other metadata for the object in GCS is stored > in GCS automatically, so you don't need to save that in your upload handler. > > > Meaning `gs_object_name` property is only available in the upload handler, and if I haven't been saving it at that time then its lost. Also, [create\_gs\_key()](https://developers.google.com/appengine/docs/python/blobstore/functions#create_gs_key) doesn't do the trick because it instead takes a google storage filename and creates a blobkey. So, how can I take a blobkey that was previously uploaded to a Google Cloud Storage bucket through app engine, and get it's Google Cloud Storage filename? (python)
2013/10/02
[ "https://Stackoverflow.com/questions/19130113", "https://Stackoverflow.com", "https://Stackoverflow.com/users/361897/" ]
You can get the cloudstorage filename only in the upload handler (fileInfo.gs\_object\_name) and store it in your database. After that it is lost and it seems not to be preserved in BlobInfo or other metadata structures. > > Google says: Unlike BlobInfo metadata FileInfo metadata is not > persisted to datastore. (There is no blob key either, but you can > create one later if needed by calling create\_gs\_key.) You must save > the gs\_object\_name yourself in your upload handler or this data will > be lost. > > > <https://developers.google.com/appengine/docs/python/blobstore/fileinfoclass> Update: I was able to decode a SDK-BlobKey in Blobstore-Viewer: "encoded\_gs\_file:base64-encoded-filename-here". However the real thing is not base64 encoded. create\_gs\_key(filename, rpc=None) ... Google says: "Returns an encrypted blob key as a string." Does anyone have a guess why this is encrypted?
If you have blobKeys you can use: ImagesServiceFactory.[makeImageFromBlob](https://developers.google.com/appengine/docs/java/javadoc/com/google/appengine/api/images/IImagesServiceFactory.html#makeImageFromBlob)
66,921,090
I am trying to create SparkContext in jupyter notebook but I am getting following Error: **Py4JError: org.apache.spark.api.python.PythonUtils.getPythonAuthSocketTimeout does not exist in the JVM** Here is my code ``` from pyspark import SparkContext, SparkConf conf = SparkConf().setMaster("local").setAppName("Groceries") sc = SparkContext(conf = conf) Py4JError Traceback (most recent call last) <ipython-input-20-5058f350f58a> in <module> 1 conf = SparkConf().setMaster("local").setAppName("My App") ----> 2 sc = SparkContext(conf = conf) ~/Documents/python38env/lib/python3.8/site-packages/pyspark/context.py in __init__(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, gateway, jsc, profiler_cls) 144 SparkContext._ensure_initialized(self, gateway=gateway, conf=conf) 145 try: --> 146 self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer, 147 conf, jsc, profiler_cls) 148 except: ~/Documents/python38env/lib/python3.8/site-packages/pyspark/context.py in _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, jsc, profiler_cls) 224 self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(self._jsc) 225 os.environ["SPARK_AUTH_SOCKET_TIMEOUT"] = \ --> 226 str(self._jvm.PythonUtils.getPythonAuthSocketTimeout(self._jsc)) 227 os.environ["SPARK_BUFFER_SIZE"] = \ 228 str(self._jvm.PythonUtils.getSparkBufferSize(self._jsc)) ~/Documents/python38env/lib/python3.8/site-packages/py4j/java_gateway.py in __getattr__(self, name) 1528 answer, self._gateway_client, self._fqn, name) 1529 else: -> 1530 raise Py4JError( 1531 "{0}.{1} does not exist in the JVM".format(self._fqn, name)) 1532 Py4JError: org.apache.spark.api.python.PythonUtils.getPythonAuthSocketTimeout does not exist in the JVM ```
2021/04/02
[ "https://Stackoverflow.com/questions/66921090", "https://Stackoverflow.com", "https://Stackoverflow.com/users/7527164/" ]
Python's pyspark and spark cluster versions are inconsistent and this error is reported. Uninstall the version that is consistent with the current pyspark, then install the same version as the spark cluster. My spark version is 3.0.2 and run the following code: ``` pip3 uninstall pyspark pip3 install pyspark==3.0.2 ```
I have had the same error today and resolved it with the below code: Execute this in a separate cell before you have your spark session builder ``` from pyspark import SparkContext,SQLContext,SparkConf,StorageLevel from pyspark.sql import SparkSession from pyspark.conf import SparkConf SparkSession.builder.config(conf=SparkConf()) ```
66,921,090
I am trying to create SparkContext in jupyter notebook but I am getting following Error: **Py4JError: org.apache.spark.api.python.PythonUtils.getPythonAuthSocketTimeout does not exist in the JVM** Here is my code ``` from pyspark import SparkContext, SparkConf conf = SparkConf().setMaster("local").setAppName("Groceries") sc = SparkContext(conf = conf) Py4JError Traceback (most recent call last) <ipython-input-20-5058f350f58a> in <module> 1 conf = SparkConf().setMaster("local").setAppName("My App") ----> 2 sc = SparkContext(conf = conf) ~/Documents/python38env/lib/python3.8/site-packages/pyspark/context.py in __init__(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, gateway, jsc, profiler_cls) 144 SparkContext._ensure_initialized(self, gateway=gateway, conf=conf) 145 try: --> 146 self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer, 147 conf, jsc, profiler_cls) 148 except: ~/Documents/python38env/lib/python3.8/site-packages/pyspark/context.py in _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, jsc, profiler_cls) 224 self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(self._jsc) 225 os.environ["SPARK_AUTH_SOCKET_TIMEOUT"] = \ --> 226 str(self._jvm.PythonUtils.getPythonAuthSocketTimeout(self._jsc)) 227 os.environ["SPARK_BUFFER_SIZE"] = \ 228 str(self._jvm.PythonUtils.getSparkBufferSize(self._jsc)) ~/Documents/python38env/lib/python3.8/site-packages/py4j/java_gateway.py in __getattr__(self, name) 1528 answer, self._gateway_client, self._fqn, name) 1529 else: -> 1530 raise Py4JError( 1531 "{0}.{1} does not exist in the JVM".format(self._fqn, name)) 1532 Py4JError: org.apache.spark.api.python.PythonUtils.getPythonAuthSocketTimeout does not exist in the JVM ```
2021/04/02
[ "https://Stackoverflow.com/questions/66921090", "https://Stackoverflow.com", "https://Stackoverflow.com/users/7527164/" ]
We need to uninstall the default/exsisting/latest version of PySpark from PyCharm/Jupyter Notebook or any tool that we use. Then check the version of Spark that we have installed in PyCharm/ Jupyter Notebook / CMD. Using the command `spark-submit --version` (In CMD/Terminal). Then Install PySpark which matches the version of Spark that you have. *For example, I have Spark 3.0.3, so I have installed PySpark 3.0.3* In CMD/PyCharm Terminal, ``` pip install pyspark=3.0.3 ``` Or check [this](https://www.jetbrains.com/help/pycharm/installing-uninstalling-and-upgrading-packages.html#interpreter-settings) if you are a PyCharm user.
I have had the same error today and resolved it with the below code: Execute this in a separate cell before you have your spark session builder ``` from pyspark import SparkContext,SQLContext,SparkConf,StorageLevel from pyspark.sql import SparkSession from pyspark.conf import SparkConf SparkSession.builder.config(conf=SparkConf()) ```
66,921,090
I am trying to create SparkContext in jupyter notebook but I am getting following Error: **Py4JError: org.apache.spark.api.python.PythonUtils.getPythonAuthSocketTimeout does not exist in the JVM** Here is my code ``` from pyspark import SparkContext, SparkConf conf = SparkConf().setMaster("local").setAppName("Groceries") sc = SparkContext(conf = conf) Py4JError Traceback (most recent call last) <ipython-input-20-5058f350f58a> in <module> 1 conf = SparkConf().setMaster("local").setAppName("My App") ----> 2 sc = SparkContext(conf = conf) ~/Documents/python38env/lib/python3.8/site-packages/pyspark/context.py in __init__(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, gateway, jsc, profiler_cls) 144 SparkContext._ensure_initialized(self, gateway=gateway, conf=conf) 145 try: --> 146 self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer, 147 conf, jsc, profiler_cls) 148 except: ~/Documents/python38env/lib/python3.8/site-packages/pyspark/context.py in _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, jsc, profiler_cls) 224 self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(self._jsc) 225 os.environ["SPARK_AUTH_SOCKET_TIMEOUT"] = \ --> 226 str(self._jvm.PythonUtils.getPythonAuthSocketTimeout(self._jsc)) 227 os.environ["SPARK_BUFFER_SIZE"] = \ 228 str(self._jvm.PythonUtils.getSparkBufferSize(self._jsc)) ~/Documents/python38env/lib/python3.8/site-packages/py4j/java_gateway.py in __getattr__(self, name) 1528 answer, self._gateway_client, self._fqn, name) 1529 else: -> 1530 raise Py4JError( 1531 "{0}.{1} does not exist in the JVM".format(self._fqn, name)) 1532 Py4JError: org.apache.spark.api.python.PythonUtils.getPythonAuthSocketTimeout does not exist in the JVM ```
2021/04/02
[ "https://Stackoverflow.com/questions/66921090", "https://Stackoverflow.com", "https://Stackoverflow.com/users/7527164/" ]
Python's pyspark and spark cluster versions are inconsistent and this error is reported. Uninstall the version that is consistent with the current pyspark, then install the same version as the spark cluster. My spark version is 3.0.2 and run the following code: ``` pip3 uninstall pyspark pip3 install pyspark==3.0.2 ```
We need to uninstall the default/exsisting/latest version of PySpark from PyCharm/Jupyter Notebook or any tool that we use. Then check the version of Spark that we have installed in PyCharm/ Jupyter Notebook / CMD. Using the command `spark-submit --version` (In CMD/Terminal). Then Install PySpark which matches the version of Spark that you have. *For example, I have Spark 3.0.3, so I have installed PySpark 3.0.3* In CMD/PyCharm Terminal, ``` pip install pyspark=3.0.3 ``` Or check [this](https://www.jetbrains.com/help/pycharm/installing-uninstalling-and-upgrading-packages.html#interpreter-settings) if you are a PyCharm user.
4,787,291
I'm writing an application. No fancy GUI:s or anything, just a plain old console application. This application, lets call it App, needs to be able to load plugins on startup. So, naturally, i created a class for the plugins to inherit from: ``` class PluginBase(object): def on_load(self): pass def on_unload(self): pass def do_work(self, data): pass ``` The idea being that on startup, App would walk through the current dir, including subdirs, searching for modules containing classes that themselves are subclasses of `PluginBase`. More code: ``` class PluginLoader(object): def __init__(self, path, cls): """ path=path to search (unused atm), cls=baseclass """ self.path=path def search(self): for root, dirs, files in os.walk('.'): candidates = [fname for fname in files if fname.endswith('.py') \ and not fname.startswith('__')] ## this only works if the modules happen to be in the current working dir ## that is not important now, i'll fix that later if candidates: basename = os.path.split(os.getcwd())[1] for c in candidates: modname = os.path.splitext(c)[0] modname = '{0}.{1}'.format(basename, modname) __import__(mod) module = sys.modules[mod] ``` After that last line in `search` I'd like to somehow a) find all classes in the newly loaded module, b) check if one or more of those classes are subclasses of `PluginBase` and c) (if b) instantiate that/those classes and add to App's list of loaded modules. I've tried various combinations of `issubclass` and others, followed by a period of intense `dir`:ing and about an hour of panicked googling. I did find a similar approach to mine [here](http://www.luckydonkey.com/2008/01/02/python-style-plugins-made-easy/) and I tried just copy-pasting that but got an error saying that Python doesn't support imports by filename, at which point I kind of lost my concentration and as a result of that, this post was written. I'm at my wits end here, all help appreciated.
2011/01/24
[ "https://Stackoverflow.com/questions/4787291", "https://Stackoverflow.com", "https://Stackoverflow.com/users/350784/" ]
You would make this a lot easier if you forced some constraints on the plugin writer, for example that all plugins must be packages that contain a `load_plugin( app, config)` function that returns a Plugin instance. Then all you have to do is try to import these packages and run the function.
Could you use execfile() instead of import with a specified namespace dict, then iterate over that namespace with issubclass, etc?
4,787,291
I'm writing an application. No fancy GUI:s or anything, just a plain old console application. This application, lets call it App, needs to be able to load plugins on startup. So, naturally, i created a class for the plugins to inherit from: ``` class PluginBase(object): def on_load(self): pass def on_unload(self): pass def do_work(self, data): pass ``` The idea being that on startup, App would walk through the current dir, including subdirs, searching for modules containing classes that themselves are subclasses of `PluginBase`. More code: ``` class PluginLoader(object): def __init__(self, path, cls): """ path=path to search (unused atm), cls=baseclass """ self.path=path def search(self): for root, dirs, files in os.walk('.'): candidates = [fname for fname in files if fname.endswith('.py') \ and not fname.startswith('__')] ## this only works if the modules happen to be in the current working dir ## that is not important now, i'll fix that later if candidates: basename = os.path.split(os.getcwd())[1] for c in candidates: modname = os.path.splitext(c)[0] modname = '{0}.{1}'.format(basename, modname) __import__(mod) module = sys.modules[mod] ``` After that last line in `search` I'd like to somehow a) find all classes in the newly loaded module, b) check if one or more of those classes are subclasses of `PluginBase` and c) (if b) instantiate that/those classes and add to App's list of loaded modules. I've tried various combinations of `issubclass` and others, followed by a period of intense `dir`:ing and about an hour of panicked googling. I did find a similar approach to mine [here](http://www.luckydonkey.com/2008/01/02/python-style-plugins-made-easy/) and I tried just copy-pasting that but got an error saying that Python doesn't support imports by filename, at which point I kind of lost my concentration and as a result of that, this post was written. I'm at my wits end here, all help appreciated.
2011/01/24
[ "https://Stackoverflow.com/questions/4787291", "https://Stackoverflow.com", "https://Stackoverflow.com/users/350784/" ]
You might do something like this: ``` for c in candidates: modname = os.path.splitext(c)[0] try: module=__import__(modname) #<-- You can get the module this way except (ImportError,NotImplementedError): continue for cls in dir(module): #<-- Loop over all objects in the module's namespace cls=getattr(module,cls) if (inspect.isclass(cls) # Make sure it is a class and inspect.getmodule(cls)==module # Make sure it was defined in module, not just imported and issubclass(cls,base)): # Make sure it is a subclass of base # print('found in {f}: {c}'.format(f=module.__name__,c=cls)) classList.append(cls) ``` To test the above, I had to modify your code a bit; below is the full script. ``` import sys import inspect import os class PluginBase(object): pass def search(base): for root, dirs, files in os.walk('.'): candidates = [fname for fname in files if fname.endswith('.py') and not fname.startswith('__')] classList=[] if candidates: for c in candidates: modname = os.path.splitext(c)[0] try: module=__import__(modname) except (ImportError,NotImplementedError): continue for cls in dir(module): cls=getattr(module,cls) if (inspect.isclass(cls) and inspect.getmodule(cls)==module and issubclass(cls,base)): # print('found in {f}: {c}'.format(f=module.__name__,c=cls)) classList.append(cls) print(classList) search(PluginBase) ```
Could you use execfile() instead of import with a specified namespace dict, then iterate over that namespace with issubclass, etc?
4,787,291
I'm writing an application. No fancy GUI:s or anything, just a plain old console application. This application, lets call it App, needs to be able to load plugins on startup. So, naturally, i created a class for the plugins to inherit from: ``` class PluginBase(object): def on_load(self): pass def on_unload(self): pass def do_work(self, data): pass ``` The idea being that on startup, App would walk through the current dir, including subdirs, searching for modules containing classes that themselves are subclasses of `PluginBase`. More code: ``` class PluginLoader(object): def __init__(self, path, cls): """ path=path to search (unused atm), cls=baseclass """ self.path=path def search(self): for root, dirs, files in os.walk('.'): candidates = [fname for fname in files if fname.endswith('.py') \ and not fname.startswith('__')] ## this only works if the modules happen to be in the current working dir ## that is not important now, i'll fix that later if candidates: basename = os.path.split(os.getcwd())[1] for c in candidates: modname = os.path.splitext(c)[0] modname = '{0}.{1}'.format(basename, modname) __import__(mod) module = sys.modules[mod] ``` After that last line in `search` I'd like to somehow a) find all classes in the newly loaded module, b) check if one or more of those classes are subclasses of `PluginBase` and c) (if b) instantiate that/those classes and add to App's list of loaded modules. I've tried various combinations of `issubclass` and others, followed by a period of intense `dir`:ing and about an hour of panicked googling. I did find a similar approach to mine [here](http://www.luckydonkey.com/2008/01/02/python-style-plugins-made-easy/) and I tried just copy-pasting that but got an error saying that Python doesn't support imports by filename, at which point I kind of lost my concentration and as a result of that, this post was written. I'm at my wits end here, all help appreciated.
2011/01/24
[ "https://Stackoverflow.com/questions/4787291", "https://Stackoverflow.com", "https://Stackoverflow.com/users/350784/" ]
Here is a meta-classier way to register the plugins: Define `PluginBase` to be of type `PluginType`. `PluginType` automatically registers any instance (class) in the `plugins` set. **plugin.py:** ``` plugins=set() class PluginType(type): def __init__(cls, name, bases, attrs): super(PluginType, cls).__init__(name, bases, attrs) # print(cls, name,cls.__module__) plugins.add(cls) class PluginBase(object): __metaclass__=PluginType pass ``` This is the part that the user writes. Notice that there is nothing special here. **pluginDir/myplugin.py:** ``` import plugin class Foo(plugin.PluginBase): pass ``` Here is what the search function might look like: **test.py:** ``` import plugin import os import imp def search(plugindir): for root, dirs, files in os.walk(plugindir): for fname in files: modname = os.path.splitext(fname)[0] try: module=imp.load_source(modname,os.path.join(root,fname)) except Exception: continue search('pluginDir') print(plugin.plugins) ``` Running test.py yields ``` set([<class 'myplugin.Foo'>]) ```
Could you use execfile() instead of import with a specified namespace dict, then iterate over that namespace with issubclass, etc?
4,787,291
I'm writing an application. No fancy GUI:s or anything, just a plain old console application. This application, lets call it App, needs to be able to load plugins on startup. So, naturally, i created a class for the plugins to inherit from: ``` class PluginBase(object): def on_load(self): pass def on_unload(self): pass def do_work(self, data): pass ``` The idea being that on startup, App would walk through the current dir, including subdirs, searching for modules containing classes that themselves are subclasses of `PluginBase`. More code: ``` class PluginLoader(object): def __init__(self, path, cls): """ path=path to search (unused atm), cls=baseclass """ self.path=path def search(self): for root, dirs, files in os.walk('.'): candidates = [fname for fname in files if fname.endswith('.py') \ and not fname.startswith('__')] ## this only works if the modules happen to be in the current working dir ## that is not important now, i'll fix that later if candidates: basename = os.path.split(os.getcwd())[1] for c in candidates: modname = os.path.splitext(c)[0] modname = '{0}.{1}'.format(basename, modname) __import__(mod) module = sys.modules[mod] ``` After that last line in `search` I'd like to somehow a) find all classes in the newly loaded module, b) check if one or more of those classes are subclasses of `PluginBase` and c) (if b) instantiate that/those classes and add to App's list of loaded modules. I've tried various combinations of `issubclass` and others, followed by a period of intense `dir`:ing and about an hour of panicked googling. I did find a similar approach to mine [here](http://www.luckydonkey.com/2008/01/02/python-style-plugins-made-easy/) and I tried just copy-pasting that but got an error saying that Python doesn't support imports by filename, at which point I kind of lost my concentration and as a result of that, this post was written. I'm at my wits end here, all help appreciated.
2011/01/24
[ "https://Stackoverflow.com/questions/4787291", "https://Stackoverflow.com", "https://Stackoverflow.com/users/350784/" ]
You might do something like this: ``` for c in candidates: modname = os.path.splitext(c)[0] try: module=__import__(modname) #<-- You can get the module this way except (ImportError,NotImplementedError): continue for cls in dir(module): #<-- Loop over all objects in the module's namespace cls=getattr(module,cls) if (inspect.isclass(cls) # Make sure it is a class and inspect.getmodule(cls)==module # Make sure it was defined in module, not just imported and issubclass(cls,base)): # Make sure it is a subclass of base # print('found in {f}: {c}'.format(f=module.__name__,c=cls)) classList.append(cls) ``` To test the above, I had to modify your code a bit; below is the full script. ``` import sys import inspect import os class PluginBase(object): pass def search(base): for root, dirs, files in os.walk('.'): candidates = [fname for fname in files if fname.endswith('.py') and not fname.startswith('__')] classList=[] if candidates: for c in candidates: modname = os.path.splitext(c)[0] try: module=__import__(modname) except (ImportError,NotImplementedError): continue for cls in dir(module): cls=getattr(module,cls) if (inspect.isclass(cls) and inspect.getmodule(cls)==module and issubclass(cls,base)): # print('found in {f}: {c}'.format(f=module.__name__,c=cls)) classList.append(cls) print(classList) search(PluginBase) ```
You would make this a lot easier if you forced some constraints on the plugin writer, for example that all plugins must be packages that contain a `load_plugin( app, config)` function that returns a Plugin instance. Then all you have to do is try to import these packages and run the function.
4,787,291
I'm writing an application. No fancy GUI:s or anything, just a plain old console application. This application, lets call it App, needs to be able to load plugins on startup. So, naturally, i created a class for the plugins to inherit from: ``` class PluginBase(object): def on_load(self): pass def on_unload(self): pass def do_work(self, data): pass ``` The idea being that on startup, App would walk through the current dir, including subdirs, searching for modules containing classes that themselves are subclasses of `PluginBase`. More code: ``` class PluginLoader(object): def __init__(self, path, cls): """ path=path to search (unused atm), cls=baseclass """ self.path=path def search(self): for root, dirs, files in os.walk('.'): candidates = [fname for fname in files if fname.endswith('.py') \ and not fname.startswith('__')] ## this only works if the modules happen to be in the current working dir ## that is not important now, i'll fix that later if candidates: basename = os.path.split(os.getcwd())[1] for c in candidates: modname = os.path.splitext(c)[0] modname = '{0}.{1}'.format(basename, modname) __import__(mod) module = sys.modules[mod] ``` After that last line in `search` I'd like to somehow a) find all classes in the newly loaded module, b) check if one or more of those classes are subclasses of `PluginBase` and c) (if b) instantiate that/those classes and add to App's list of loaded modules. I've tried various combinations of `issubclass` and others, followed by a period of intense `dir`:ing and about an hour of panicked googling. I did find a similar approach to mine [here](http://www.luckydonkey.com/2008/01/02/python-style-plugins-made-easy/) and I tried just copy-pasting that but got an error saying that Python doesn't support imports by filename, at which point I kind of lost my concentration and as a result of that, this post was written. I'm at my wits end here, all help appreciated.
2011/01/24
[ "https://Stackoverflow.com/questions/4787291", "https://Stackoverflow.com", "https://Stackoverflow.com/users/350784/" ]
You might do something like this: ``` for c in candidates: modname = os.path.splitext(c)[0] try: module=__import__(modname) #<-- You can get the module this way except (ImportError,NotImplementedError): continue for cls in dir(module): #<-- Loop over all objects in the module's namespace cls=getattr(module,cls) if (inspect.isclass(cls) # Make sure it is a class and inspect.getmodule(cls)==module # Make sure it was defined in module, not just imported and issubclass(cls,base)): # Make sure it is a subclass of base # print('found in {f}: {c}'.format(f=module.__name__,c=cls)) classList.append(cls) ``` To test the above, I had to modify your code a bit; below is the full script. ``` import sys import inspect import os class PluginBase(object): pass def search(base): for root, dirs, files in os.walk('.'): candidates = [fname for fname in files if fname.endswith('.py') and not fname.startswith('__')] classList=[] if candidates: for c in candidates: modname = os.path.splitext(c)[0] try: module=__import__(modname) except (ImportError,NotImplementedError): continue for cls in dir(module): cls=getattr(module,cls) if (inspect.isclass(cls) and inspect.getmodule(cls)==module and issubclass(cls,base)): # print('found in {f}: {c}'.format(f=module.__name__,c=cls)) classList.append(cls) print(classList) search(PluginBase) ```
Here is a meta-classier way to register the plugins: Define `PluginBase` to be of type `PluginType`. `PluginType` automatically registers any instance (class) in the `plugins` set. **plugin.py:** ``` plugins=set() class PluginType(type): def __init__(cls, name, bases, attrs): super(PluginType, cls).__init__(name, bases, attrs) # print(cls, name,cls.__module__) plugins.add(cls) class PluginBase(object): __metaclass__=PluginType pass ``` This is the part that the user writes. Notice that there is nothing special here. **pluginDir/myplugin.py:** ``` import plugin class Foo(plugin.PluginBase): pass ``` Here is what the search function might look like: **test.py:** ``` import plugin import os import imp def search(plugindir): for root, dirs, files in os.walk(plugindir): for fname in files: modname = os.path.splitext(fname)[0] try: module=imp.load_source(modname,os.path.join(root,fname)) except Exception: continue search('pluginDir') print(plugin.plugins) ``` Running test.py yields ``` set([<class 'myplugin.Foo'>]) ```
29,463,921
A frog wants to cross a river. There are 3 stones in the river she can jump to. She wants to choose among all possible paths the one that leads to the smallest longest jump. Ie. each of the possible paths will have one jump that is the longest. She needs to find the path where this longest jump is smallest. The 2 shores are 10 apart and are parallel to the y axis. Each stone position is given by a list x=[x1,x2,x3] of the x positions and y=[y1,y2,y3] of the y positions. Return both the longest jump in this path (rounded to the closest integer) and the path itself through a list of indices in the lists x and y of the stones in the path. Here it is my python code to find the longest jump. How would I track the path itself? And my code looks clumsy with 3 nested loops is there a better/more elegant way to write this code? ``` def longestJump(x, y): best = 10 for i in range(0,3): for j in range(0,3): for k in range(0,3): # first jump from shore to a stone dist = x[i] # second jump between stones dist = max(dist, round(math.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2))) # third jump between stones dist = max(dist, round(math.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2))) dist = max(dist, round(math.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2))) # last jump from a stone to the opposite shore dist = max(dist, 10 - x[j]) best = min(dist, best) return best ```
2015/04/06
[ "https://Stackoverflow.com/questions/29463921", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4542063/" ]
I would use `Array()` to normalize the input and then there is only on case left: ``` work.map! do |w| element = Array(w).first console.button_map[element] || element end ```
I settled on this, not sure if it can be cleaner: ``` work.map! do |w| if w.is_a? Array w.tap{|x| x[0] = console.button_map[x[0]] || x[0] } else console.button_map[w] || w end end ```
29,463,921
A frog wants to cross a river. There are 3 stones in the river she can jump to. She wants to choose among all possible paths the one that leads to the smallest longest jump. Ie. each of the possible paths will have one jump that is the longest. She needs to find the path where this longest jump is smallest. The 2 shores are 10 apart and are parallel to the y axis. Each stone position is given by a list x=[x1,x2,x3] of the x positions and y=[y1,y2,y3] of the y positions. Return both the longest jump in this path (rounded to the closest integer) and the path itself through a list of indices in the lists x and y of the stones in the path. Here it is my python code to find the longest jump. How would I track the path itself? And my code looks clumsy with 3 nested loops is there a better/more elegant way to write this code? ``` def longestJump(x, y): best = 10 for i in range(0,3): for j in range(0,3): for k in range(0,3): # first jump from shore to a stone dist = x[i] # second jump between stones dist = max(dist, round(math.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2))) # third jump between stones dist = max(dist, round(math.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2))) dist = max(dist, round(math.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2))) # last jump from a stone to the opposite shore dist = max(dist, 10 - x[j]) best = min(dist, best) return best ```
2015/04/06
[ "https://Stackoverflow.com/questions/29463921", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4542063/" ]
I settled on this, not sure if it can be cleaner: ``` work.map! do |w| if w.is_a? Array w.tap{|x| x[0] = console.button_map[x[0]] || x[0] } else console.button_map[w] || w end end ```
``` work = [[3, 1], 4, [3, 3, 4], 4, :sync, 1, 2, [5]] work.map! do |w| val = my_method [*w].first case w when Array then [val, *w[1..-1]] else val end end def my_method(n) case n when Fixnum then n+4 else n.to_s end end work #=> [[7, 1], 8, [7, 3, 4], 8, "sync", 5, 6, [9]] ``` Note: ``` [*[1,2]] #=> [1,2] [*3] #=> [3] ```
29,463,921
A frog wants to cross a river. There are 3 stones in the river she can jump to. She wants to choose among all possible paths the one that leads to the smallest longest jump. Ie. each of the possible paths will have one jump that is the longest. She needs to find the path where this longest jump is smallest. The 2 shores are 10 apart and are parallel to the y axis. Each stone position is given by a list x=[x1,x2,x3] of the x positions and y=[y1,y2,y3] of the y positions. Return both the longest jump in this path (rounded to the closest integer) and the path itself through a list of indices in the lists x and y of the stones in the path. Here it is my python code to find the longest jump. How would I track the path itself? And my code looks clumsy with 3 nested loops is there a better/more elegant way to write this code? ``` def longestJump(x, y): best = 10 for i in range(0,3): for j in range(0,3): for k in range(0,3): # first jump from shore to a stone dist = x[i] # second jump between stones dist = max(dist, round(math.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2))) # third jump between stones dist = max(dist, round(math.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2))) dist = max(dist, round(math.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2))) # last jump from a stone to the opposite shore dist = max(dist, 10 - x[j]) best = min(dist, best) return best ```
2015/04/06
[ "https://Stackoverflow.com/questions/29463921", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4542063/" ]
I would use `Array()` to normalize the input and then there is only on case left: ``` work.map! do |w| element = Array(w).first console.button_map[element] || element end ```
``` work = [[3, 1], 4, [3, 3, 4], 4, :sync, 1, 2, [5]] work.map! do |w| val = my_method [*w].first case w when Array then [val, *w[1..-1]] else val end end def my_method(n) case n when Fixnum then n+4 else n.to_s end end work #=> [[7, 1], 8, [7, 3, 4], 8, "sync", 5, 6, [9]] ``` Note: ``` [*[1,2]] #=> [1,2] [*3] #=> [3] ```
49,582,981
I have a flask app in a docker container that writes to a local copy of SQLite db. what I want to do is move the db out of the container and have it reside on my host. how do I setup docker to run the python code from the container and read and write to the sql lite db on the host.
2018/03/31
[ "https://Stackoverflow.com/questions/49582981", "https://Stackoverflow.com", "https://Stackoverflow.com/users/9577029/" ]
Use bind-mount to share host file to container. If you have the SQLite DB file as `app.db`, you can run your container with the `-v` flag (or the `--mount` flag): ``` docker run -v /absolute/path/to/app.db:/flask/app/app.db <IMAGE> ``` Docs: <https://docs.docker.com/storage/bind-mounts/>
You have either * setup ownership privileges of your host directory to match `uid`:`gid` of the user in the container or * change `uid`:`gid` of the user in the container to match numerically `uid`:`gid` of your host user who owns directory with sqlite db file Great answers for both approaches are described [here](https://stackoverflow.com/questions/29245216/write-in-shared-volumes-docker/29251160#29251160)
52,710,878
I created conda environment and install pytorch and fastai (Mac OS Mojave) as below: ``` conda create -n fai_course python=3.7 source activate fai_course conda install -c pytorch pytorch-nightly-cpu conda install -c fastai torchvision-nightly-cpu jupyter notebook ``` When I import a package from jupyter notebook, I get OSError as shown below: ``` from fastai.imports import * ``` - ``` -------------------------------------------------------------------------- FileNotFoundError Traceback (most recent call last) /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in <module> 1352 try: -> 1353 fontManager = json_load(_fmcache) 1354 if (not hasattr(fontManager, '_version') or /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in json_load(filename) 887 """ --> 888 with open(filename, 'r') as fh: 889 return json.load(fh, object_hook=_json_decode) FileNotFoundError: [Errno 2] No such file or directory: '/Users/user/.matplotlib/fontlist-v300.json' During handling of the above exception, another exception occurred: OSError Traceback (most recent call last) <ipython-input-5-9f9378ae0f2a> in <module> ----> 1 from fastai.imports import * /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastai/__init__.py in <module> ----> 1 from .basic_train import * 2 from .callback import * 3 from .callbacks import * 4 from .core import * 5 from .data import * /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastai/basic_train.py in <module> 1 "Provides basic training and validation with `Learner`" ----> 2 from .torch_core import * 3 from .data import * 4 from .callback import * 5 /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastai/torch_core.py in <module> 1 "Utility functions to help deal with tensors" ----> 2 from .imports.torch import * 3 from .core import * 4 5 AffineMatrix = Tensor /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastai/imports/__init__.py in <module> ----> 1 from .core import * 2 from .torch import * /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/fastai/imports/core.py in <module> 1 import csv, gc, gzip, os, pickle, shutil, sys, warnings ----> 2 import math, matplotlib.pyplot as plt, numpy as np, pandas as pd, random 3 import scipy.stats, scipy.special 4 import abc, collections, hashlib, itertools, json, operator 5 import mimetypes, inspect, typing, functools /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/pyplot.py in <module> 30 from cycler import cycler 31 import matplotlib ---> 32 import matplotlib.colorbar 33 import matplotlib.image 34 from matplotlib import rcsetup, style /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/colorbar.py in <module> 30 import matplotlib.collections as collections 31 import matplotlib.colors as colors ---> 32 import matplotlib.contour as contour 33 import matplotlib.cm as cm 34 import matplotlib.gridspec as gridspec /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/contour.py in <module> 16 import matplotlib.colors as mcolors 17 import matplotlib.collections as mcoll ---> 18 import matplotlib.font_manager as font_manager 19 import matplotlib.text as text 20 import matplotlib.cbook as cbook /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in <module> 1361 raise 1362 except Exception: -> 1363 _rebuild() 1364 else: 1365 _rebuild() /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in _rebuild() 1342 global fontManager 1343 -> 1344 fontManager = FontManager() 1345 1346 if _fmcache: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in __init__(self, size, weight) 976 self.defaultFont = {} 977 --> 978 ttffiles = findSystemFonts(paths) + findSystemFonts() 979 self.defaultFont['ttf'] = next( 980 (fname for fname in ttffiles /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in findSystemFonts(fontpaths, fontext) 268 # check for OS X & load its fonts if present 269 if sys.platform == 'darwin': --> 270 fontfiles.update(OSXInstalledFonts(fontext=fontext)) 271 272 elif isinstance(fontpaths, str): /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in OSXInstalledFonts(directories, fontext) 216 directories = OSXFontDirectories 217 return [path --> 218 for directory in directories 219 for ext in get_fontext_synonyms(fontext) 220 for path in list_fonts(directory, ext)] /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in <listcomp>(.0) 218 for directory in directories 219 for ext in get_fontext_synonyms(fontext) --> 220 for path in list_fonts(directory, ext)] 221 222 /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in list_fonts(directory, extensions) 155 extensions = ["." + ext for ext in extensions] 156 return [str(path) --> 157 for path in filter(Path.is_file, Path(directory).glob("**/*.*")) 158 if path.suffix in extensions] 159 /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/matplotlib/font_manager.py in <listcomp>(.0) 154 """ 155 extensions = ["." + ext for ext in extensions] --> 156 return [str(path) 157 for path in filter(Path.is_file, Path(directory).glob("**/*.*")) 158 if path.suffix in extensions] /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in glob(self, pattern) 1080 raise NotImplementedError("Non-relative patterns are unsupported") 1081 selector = _make_selector(tuple(pattern_parts)) -> 1082 for p in selector.select_from(self): 1083 yield p 1084 /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _select_from(self, parent_path, is_dir, exists, scandir) 541 try: 542 successor_select = self.successor._select_from --> 543 for starting_point in self._iterate_directories(parent_path, is_dir, scandir): 544 for p in successor_select(starting_point, is_dir, exists, scandir): 545 if p not in yielded: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _iterate_directories(self, parent_path, is_dir, scandir) 531 if entry.is_dir() and not entry.is_symlink(): 532 path = parent_path._make_child_relpath(entry.name) --> 533 for p in self._iterate_directories(path, is_dir, scandir): 534 yield p 535 except PermissionError: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _iterate_directories(self, parent_path, is_dir, scandir) 531 if entry.is_dir() and not entry.is_symlink(): 532 path = parent_path._make_child_relpath(entry.name) --> 533 for p in self._iterate_directories(path, is_dir, scandir): 534 yield p 535 except PermissionError: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _iterate_directories(self, parent_path, is_dir, scandir) 531 if entry.is_dir() and not entry.is_symlink(): 532 path = parent_path._make_child_relpath(entry.name) --> 533 for p in self._iterate_directories(path, is_dir, scandir): 534 yield p 535 except PermissionError: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _iterate_directories(self, parent_path, is_dir, scandir) 531 if entry.is_dir() and not entry.is_symlink(): 532 path = parent_path._make_child_relpath(entry.name) --> 533 for p in self._iterate_directories(path, is_dir, scandir): 534 yield p 535 except PermissionError: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _iterate_directories(self, parent_path, is_dir, scandir) 531 if entry.is_dir() and not entry.is_symlink(): 532 path = parent_path._make_child_relpath(entry.name) --> 533 for p in self._iterate_directories(path, is_dir, scandir): 534 yield p 535 except PermissionError: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _iterate_directories(self, parent_path, is_dir, scandir) 531 if entry.is_dir() and not entry.is_symlink(): 532 path = parent_path._make_child_relpath(entry.name) --> 533 for p in self._iterate_directories(path, is_dir, scandir): 534 yield p 535 except PermissionError: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _iterate_directories(self, parent_path, is_dir, scandir) 531 if entry.is_dir() and not entry.is_symlink(): 532 path = parent_path._make_child_relpath(entry.name) --> 533 for p in self._iterate_directories(path, is_dir, scandir): 534 yield p 535 except PermissionError: /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py in _iterate_directories(self, parent_path, is_dir, scandir) 529 entries = list(scandir(parent_path)) 530 for entry in entries: --> 531 if entry.is_dir() and not entry.is_symlink(): 532 path = parent_path._make_child_relpath(entry.name) 533 for p in self._iterate_directories(path, is_dir, scandir): OSError: [Errno 62] Too many levels of symbolic links: '.Trash/NETGEARGenie.app/Contents/Frameworks/QtPrintSupport.framework/Versions/5/5' ``` Can you please let me know what am I missing? Thanks
2018/10/08
[ "https://Stackoverflow.com/questions/52710878", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3899975/" ]
The comments say you "should" do a print, but nothing says you cannot print anything else after the print. Nothing even *forces* you to do a print, otherwise it would be a *shall*. --- To be honest, reading questions about homework like this one makes me unhappy. To me, the whole thing is useless, ugly, and does not teach anything valuable for future real-world software design and coding. The one thing it teaches is: given poor specifications, do poor work to please the client. *Well ... that is maybe a good lesson to learn ?* Since you did not give any piece of code of what you tried, nor any hint about what you understood, I took the liberty to propose some kind of a solution that does the required prints. So here are 3 implementations. I tried to do this in a rough way. Maybe others would find other weird ways to do it. ``` import java.util.HashMap; import java.util.Map; public class Main { private static final String REQUEST_MUST_NOT_BE_EMPTY = "request must not be empty"; private static final String REQUEST_IS_REQUIRED = "request is required"; private static final String REQUEST = "request"; private static final String EMPTY_STRING = ""; private static final Map<String, Object> requestMessageMap = new HashMap<String, Object>(); static { // requestMessageMap.put(REQUEST, Integer.valueOf(7)); // requestMessageMap.put(REQUEST, Integer.valueOf(REQUEST.length())); requestMessageMap.put(EMPTY_STRING, REQUEST_MUST_NOT_BE_EMPTY); requestMessageMap.put(null, REQUEST_IS_REQUIRED); } public static void main(String[] args) { // Should print 7 System.out.println(stringLength("request")); // this line cannot be changed // Should print "request must not be empty" System.out.println(stringLength("")); // this line cannot be changed // Should print "request is required" System.out.println(stringLength(null)); // this line cannot be changed } public static Integer stringLength(String request) // this line cannot be changed { return sillyMethod4(request); } private static Integer sillyMethod1(String request) { Integer returnValue = -1; if (request == null) { // do exactly what specification required // (completly pointeless) System.err.println(REQUEST_IS_REQUIRED); } else if (request.equals(EMPTY_STRING)) { // do exactly what specification required // (completly pointeless) System.err.println(REQUEST_MUST_NOT_BE_EMPTY); } else if (request.equals(REQUEST)) { // do exactly what specification required // (completly pointeless) returnValue = 7; } else { // my best guess about what we should really do returnValue = request.length(); } return returnValue; } private static Integer lessSillyMethod2(String request) { Integer returnValue = -1; if (request == null) { // do exactly what specification required // (completly pointeless) System.err.println(REQUEST_IS_REQUIRED); } else if (request.equals(EMPTY_STRING)) { // do exactly what specification required // (completly pointeless) System.err.println(REQUEST_MUST_NOT_BE_EMPTY); } else { // my best guess about what we should really do returnValue = request.length(); } return returnValue; } private static Integer sillyMethod3(String request) { Integer returnValue = -1; if (request == null) { // do exactly what specification required // (completly pointeless) System.err.println("request is required: "); } else { switch (request) { case EMPTY_STRING: // do exactly what specification required // (completly pointeless) System.err.println("request must not be empty: "); break; case REQUEST: // do exactly what specification required // (completly pointeless) returnValue = 7; // break; default: // my best guess about what we should really do returnValue = request.length(); break; } } return returnValue; } private static Integer sillyMethod4(String request) { Integer returnValue = -1; if (requestMessageMap.containsKey(request)) { System.err.println(requestMessageMap.get(request)); } else { returnValue = request.length(); } return returnValue; } } ``` Output: ``` 7 request must not be empty request is required -1 -1 ```
The method stringLengt() should return "7" when you pass it the string "request", a different value when you pass it an empty string and another different value, when you pass null. Maybe you should take a look on control structures, especially selections. Also your stringLength method needs to return values of type String.
32,042,679
I saw a [twitter post](https://twitter.com/kssreeram/status/627477751797121024) pointing out that -12/10 = -2 in Python. What causes this? I thought the answer should (mathematically) be one. Why does python "literally" round down like this? ``` >>> -12/10 -2 >>> 12/10 1 >>> -1*12/10 -2 >>> 12/10 * -1 -1 ```
2015/08/17
[ "https://Stackoverflow.com/questions/32042679", "https://Stackoverflow.com", "https://Stackoverflow.com/users/3474956/" ]
This is due to [int](https://docs.python.org/2/library/functions.html#int) rounding down divisions. (aka [Floor division](http://python-history.blogspot.com.br/2010/08/why-pythons-integer-division-floors.html)) ``` >>> -12/10 -2 >>> -12.0/10 -1.2 >>> 12/10 1 >>> 12.0/10 1.2 ```
This is known as floor division (aka int division). In Python 2, this is the default behavior for `-12/10`. In Python 3, the default behavior is to use floating point division. To enable this behavior in Python 2, use the following import statement: ``` from __future__ import division ``` To use floor division in Python 3 or Python 2 with this module imported, use `//`. More information can be found in the [Python documentation](https://docs.python.org/2/whatsnew/2.2.html#pep-238-changing-the-division-operator), "PEP 238: Changing the Division Operato".
27,102,518
I need to optimize this regular expression. ``` ^(.+?)\|[\w\d]+?\s+?(\d\d\/\d\d\/\d\d\d\d\s+?\d\d:\d\d:\d\d\.\d\d\d)[\s\d]+?\s+?(\d+?)\s+?\d+?\s+?(\d+?)$ ``` The input is something like this: ``` -tpf0q16|856B 11/20/2014 00:00:00.015 0 0 0 0 0 689 14 689 703 702 701 700 ``` I'm already replaced all gready matches with lazy matches but this didn't helps. I've use DOTALL but it didn't help either. I use python and PCRE (re module), I know about re2 but I can't use it :(
2014/11/24
[ "https://Stackoverflow.com/questions/27102518", "https://Stackoverflow.com", "https://Stackoverflow.com/users/42371/" ]
The first step is to get rid of the unneeded reluctant (a.k.a. "lazy") quantifiers. According to RegexBuddy, your regex: ``` ^(.+?)\|[\w\d]+?\s+?(\d\d\/\d\d\/\d\d\d\d\s+?\d\d:\d\d:\d\d\.\d\d\d)[\s\d]+?\s+?(\d+?)\s+?\d+?\s+?(\d+?)$ ``` ...takes 6425 steps to match your sample string. This one: ``` ^(.+?)\|[\w\d]+\s+(\d\d\/\d\d\/\d\d\d\d\s+\d\d:\d\d:\d\d\.\d\d\d)[\s\d]+\s+(\d+)\s+\d+\s+(\d+)$ ``` ...takes 716 steps. Reluctant quantifiers reduce backtracking by doing more work up front. Your regex wasn't prone to excessive backtracking, so the reluctant quantifiers were *adding* quite a lot to the workload. This version brings it down to 237 steps: ``` ^([^|]+)\|\w+\s+(\d\d/\d\d/\d\d\d\d\s+\d\d:\d\d:\d\d\.\d\d\d)(?:\s+\d+)+\s+(\d+)\s+\d+\s+(\d+)$ ``` It also removes some noise, like the backslash before `/`; and `[\w\d]`, which is exactly the same as `\w`.
A bit more optimized. ``` >>> import re >>> s = "-tpf0q16|856B 11/20/2014 00:00:00.015 0 0 0 0 0 689 14 689 703 702 701 700" >>> re.findall(r'(?m)^([^|]+)\|[\w\d]+?\s+?(\d{2}\/\d{2}\/\d{4}\s+\d{2}:\d{2}:\d{2}\.\d{3})[\s\d]+?(\d+)\s+\d+\s+(\d+?)$', s) [('-tpf0q16', '11/20/2014 00:00:00.015', '702', '700')] ``` [DEMO](http://regex101.com/r/zU7dA5/11)
24,995,438
I can run iPython, but when I try to initiate a notebook I get the following error: ``` ~ ipython notebook Traceback (most recent call last): File "/usr/local/bin/ipython", line 8, in <module> load_entry_point('ipython==2.1.0', 'console_scripts', 'ipython')() File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/__init__.py", line 120, in start_ipython return launch_new_instance(argv=argv, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 563, in launch_instance app.initialize(argv) File "<string>", line 2, in initialize File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 92, in catch_config_error return method(app, *args, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/terminal/ipapp.py", line 321, in initialize super(TerminalIPythonApp, self).initialize(argv) File "<string>", line 2, in initialize File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 92, in catch_config_error return method(app, *args, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/core/application.py", line 381, in initialize self.parse_command_line(argv) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/terminal/ipapp.py", line 316, in parse_command_line return super(TerminalIPythonApp, self).parse_command_line(argv) File "<string>", line 2, in parse_command_line File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 92, in catch_config_error return method(app, *args, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 475, in parse_command_line return self.initialize_subcommand(subc, subargv) File "<string>", line 2, in initialize_subcommand File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 92, in catch_config_error return method(app, *args, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 406, in initialize_subcommand subapp = import_item(subapp) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/utils/importstring.py", line 42, in import_item module = __import__(package, fromlist=[obj]) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/html/notebookapp.py", line 39, in <module> check_for_zmq('2.1.11', 'IPython.html') File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/utils/zmqrelated.py", line 37, in check_for_zmq raise ImportError("%s requires pyzmq >= %s"%(required_by, minimum_version)) ImportError: IPython.html requires pyzmq >= 2.1.11 ``` But as far as I can see, I already have the pyzmq package installed. ``` ~ pip install pyzmq Requirement already satisfied (use --upgrade to upgrade): pyzmq in /Library/Python/2.7/site-packages/pyzmq-14.3.1-py2.7-macosx-10.6-intel.egg Cleaning up... ```
2014/07/28
[ "https://Stackoverflow.com/questions/24995438", "https://Stackoverflow.com", "https://Stackoverflow.com/users/54564/" ]
Arg. The *ipython* install is a little idiosyncratic. Here's what I had to do to resolve this: ``` $ pip uninstall ipython $ pip install "ipython[all]" ``` The issue is that notebooks have their own set of dependencies, which aren't installed with `pip install ipython`. However, having installed *ipython*, pip doesn't see the need to add anything if you then try the `[all]` form. As mentioned in comments for some shells (e.g. zsh) it's necessary to escape or quote the square brackets (`pip install ipython\[all\]` would also work).
For me (Ubuntu 14.04.2) worked installation by synaptic package manager: the package is called python3-zmq, with this package will be installed libzmq3. After that check if pyzmq is correctly installed: ``` pip list ``` Then I installed ipython: ``` pip install "ipython[all]" ```
24,995,438
I can run iPython, but when I try to initiate a notebook I get the following error: ``` ~ ipython notebook Traceback (most recent call last): File "/usr/local/bin/ipython", line 8, in <module> load_entry_point('ipython==2.1.0', 'console_scripts', 'ipython')() File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/__init__.py", line 120, in start_ipython return launch_new_instance(argv=argv, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 563, in launch_instance app.initialize(argv) File "<string>", line 2, in initialize File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 92, in catch_config_error return method(app, *args, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/terminal/ipapp.py", line 321, in initialize super(TerminalIPythonApp, self).initialize(argv) File "<string>", line 2, in initialize File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 92, in catch_config_error return method(app, *args, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/core/application.py", line 381, in initialize self.parse_command_line(argv) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/terminal/ipapp.py", line 316, in parse_command_line return super(TerminalIPythonApp, self).parse_command_line(argv) File "<string>", line 2, in parse_command_line File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 92, in catch_config_error return method(app, *args, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 475, in parse_command_line return self.initialize_subcommand(subc, subargv) File "<string>", line 2, in initialize_subcommand File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 92, in catch_config_error return method(app, *args, **kwargs) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/config/application.py", line 406, in initialize_subcommand subapp = import_item(subapp) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/utils/importstring.py", line 42, in import_item module = __import__(package, fromlist=[obj]) File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/html/notebookapp.py", line 39, in <module> check_for_zmq('2.1.11', 'IPython.html') File "/Library/Python/2.7/site-packages/ipython-2.1.0-py2.7.egg/IPython/utils/zmqrelated.py", line 37, in check_for_zmq raise ImportError("%s requires pyzmq >= %s"%(required_by, minimum_version)) ImportError: IPython.html requires pyzmq >= 2.1.11 ``` But as far as I can see, I already have the pyzmq package installed. ``` ~ pip install pyzmq Requirement already satisfied (use --upgrade to upgrade): pyzmq in /Library/Python/2.7/site-packages/pyzmq-14.3.1-py2.7-macosx-10.6-intel.egg Cleaning up... ```
2014/07/28
[ "https://Stackoverflow.com/questions/24995438", "https://Stackoverflow.com", "https://Stackoverflow.com/users/54564/" ]
Arg. The *ipython* install is a little idiosyncratic. Here's what I had to do to resolve this: ``` $ pip uninstall ipython $ pip install "ipython[all]" ``` The issue is that notebooks have their own set of dependencies, which aren't installed with `pip install ipython`. However, having installed *ipython*, pip doesn't see the need to add anything if you then try the `[all]` form. As mentioned in comments for some shells (e.g. zsh) it's necessary to escape or quote the square brackets (`pip install ipython\[all\]` would also work).
Using ``` ipython2.7 notebook ``` Instead of ``` ipython notebook ``` Did the trick for me.
48,452,294
I have a python script that accepts a `-f` flag, and appends multiple uses of the flag. For example, if I run `python myscript -f file1.txt -f file2.txt`, I would have a list of files, `files=['file1.txt', 'files2.txt']`. This works great, but am wondering how I can automatically use the results of a find command to append as many `-f` flags as there are files. I've tried: ``` find ./ -iname '*.txt' -print0 | xargs python myscript.py -f ``` But it only grabs the first file
2018/01/25
[ "https://Stackoverflow.com/questions/48452294", "https://Stackoverflow.com", "https://Stackoverflow.com/users/4509191/" ]
With the caveat that this will fail if there are more files than will fit on a single command line (whereas `xargs` would run `myscript.py` multiple times, each with a subset of the full list of arguments): ``` #!/usr/bin/env bash args=( ) while IFS= read -r -d '' name; do args+=( -f "$name" ) done < <(find . -iname '*.txt' -print0) python myscript.py "${args[@]}" ``` If you want to do this **safely** in a way that tolerates an arbitrary number of filenames, you're better off using a long-form option -- such as `--file` rather than `-f` -- with the `=` separator allowing the individual name to be passed as part of the same argv entry, thus preventing `xargs` from splitting a filename apart from the sigil that precedes it: ``` #!/usr/bin/env bash # This requires -printf, a GNU find extension find . -iname '*.txt' -printf '--file=%p\0' | xargs -0 python myscript.py ``` ...or, more portably (running on MacOS, albeit still requiring a shell -- such as bash -- that can handle NUL-delimited reads): ``` #!/usr/bin/env bash # requires find -print0 and xargs -0; these extensions are available on BSD as well as GNU find . -iname '*.txt' -print0 | while IFS= read -r -d '' f; do printf '--file=%s\0' "$f"; done | xargs -0 python myscript.py ```
Your title seems to imply that you can modify the script. In that case, use the `nargs` (number of args) option to allow more arguments for the `-f` flag: ``` parser = argparse.ArgumentParser() parser.add_argument('--files', '-f', nargs='+') args = parser.parse_args() print(args.files) ``` Then you can use your find command easily: ``` 15:44 $ find . -depth 1 | xargs python args.py -f ['./args.py', './for_clint', './install.sh', './sys_user.json'] ``` Otherwise, if you can't modify the script, see @CharlesDuffy's answer.
22,597,089
There are a lot of questions about installing matplotlib on mac, but as far as I can tell I've installed it correctly using pip and it's just not working. When I try and run a script with matplotlib.pyplot.plot(x, y) nothing happens. No error, no nothing. ``` import matplotlib.pyplot x = [1,2,3,4] y = [4,3,2,1] matplotlib.pyplot.plot(x, y) ``` When I run this in the terminal in a file called pyplot.py I get this: ``` pgcudahy$ python pyplot.py pgcudahy$ ``` No errors, but no plot either. In an interactive python shell I get this: ``` >>> import matplotlib >>> print matplotlib.__version__ 1.1.1 >>> print matplotlib.__file__ /System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/matplotlib/__init__.pyc ``` Which leads me to believe it's installed correctly. Any ideas?
2014/03/23
[ "https://Stackoverflow.com/questions/22597089", "https://Stackoverflow.com", "https://Stackoverflow.com/users/2930596/" ]
You need to call the `show` function. ``` import matplotlib.pyplot as plt x = [1,2,3,4] y = [4,3,2,1] plt.plot(x, y) plt.show() ```
It's likely that the plot is hidden behind the editor window or the spyder window on the screen. Instead of changing matplotlib settings, just learn the trackpack gestures of the mac, "app exposé" is the one you need to make your plots visible (see system preferences, trackpack). Then click on the figure to raise it to the front.
14,938,541
I use matplotlib to plot a scatter chart: ![enter image description here](https://i.stack.imgur.com/Nllqp.png) And label the bubble using a transparent box according to the tip at [How to annotate point on a scatter automatically placed arrow](https://stackoverflow.com/q/9074996/7758804) Here is the code: ``` if show_annote: for i in range(len(x)): annote_text = annotes[i][0][0] # STK_ID ax.annotate(annote_text, xy=(x[i], y[i]), xytext=(-10,3), textcoords='offset points', ha='center', va='bottom', bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.2), fontproperties=ANNOTE_FONT) ``` and the resulting plot: ![enter image description here](https://i.stack.imgur.com/v5FET.png) But there is still room for improvement to reduce overlap (for instance the label box offset is fixed as (-10,3)). Are there algorithms that can: 1. dynamically change the offset of label box according to the crowdedness of its neighbourhood 2. dynamically place the label box remotely and add an arrow line beween bubble and label box 3. somewhat change the label orientation 4. label\_box overlapping bubble is better than label\_box overlapping label\_box? I just want to make the chart easy for human eyes to comprehand, so some overlap is OK, not as rigid a constraint as <http://en.wikipedia.org/wiki/Automatic_label_placement> suggests. And the bubble quantity within the chart is less than 150 most of the time. I find the so called `Force-based label placement` <http://bl.ocks.org/MoritzStefaner/1377729> is quite interesting. I don't know if there is any python code/package available to implement the algorithm. I am not an academic guy and not looking for an optimum solution, and my python codes need to label many many charts, so the the speed/memory is in the scope of consideration. I am looking for a quick and effective solution. Any help (code,algorithm,tips,thoughts) on this subject? Thanks.
2013/02/18
[ "https://Stackoverflow.com/questions/14938541", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1072888/" ]
It is a little rough around the edges (I can't quite figure out how to scale the relative strengths of the spring network vs the repulsive force, and the bounding box is a bit screwed up), but this is a decent start: ``` import networkx as nx N = 15 scatter_data = rand(3, N) G=nx.Graph() data_nodes = [] init_pos = {} for j, b in enumerate(scatter_data.T): x, y, _ = b data_str = 'data_{0}'.format(j) ano_str = 'ano_{0}'.format(j) G.add_node(data_str) G.add_node(ano_str) G.add_edge(data_str, ano_str) data_nodes.append(data_str) init_pos[data_str] = (x, y) init_pos[ano_str] = (x, y) pos = nx.spring_layout(G, pos=init_pos, fixed=data_nodes) ax = gca() ax.scatter(scatter_data[0], scatter_data[1], c=scatter_data[2], s=scatter_data[2]*150) for j in range(N): data_str = 'data_{0}'.format(j) ano_str = 'ano_{0}'.format(j) ax.annotate(ano_str, xy=pos[data_str], xycoords='data', xytext=pos[ano_str], textcoords='data', arrowprops=dict(arrowstyle="->", connectionstyle="arc3")) all_pos = np.vstack(pos.values()) mins = np.min(all_pos, 0) maxs = np.max(all_pos, 0) ax.set_xlim([mins[0], maxs[0]]) ax.set_ylim([mins[1], maxs[1]]) draw() ``` ![sample image](https://i.stack.imgur.com/xhM4q.png) How well it works depends a bit on how your data is clustered.
We can use plotly for this. But we can't help placing overlap correctly if there is lot of data. Instead we can zoom in and zoom out. ``` import plotly.express as px df = px.data.tips() df = px.data.gapminder().query("year==2007 and continent=='Americas'") fig = px.scatter(df, x="gdpPercap", y="lifeExp", text="country", log_x=True, size_max=100, color="lifeExp", title="Life Expectency") fig.update_traces(textposition='top center') fig.show() ``` Output: [![enter image description here](https://i.stack.imgur.com/Ei4n6.gif)](https://i.stack.imgur.com/Ei4n6.gif)
14,938,541
I use matplotlib to plot a scatter chart: ![enter image description here](https://i.stack.imgur.com/Nllqp.png) And label the bubble using a transparent box according to the tip at [How to annotate point on a scatter automatically placed arrow](https://stackoverflow.com/q/9074996/7758804) Here is the code: ``` if show_annote: for i in range(len(x)): annote_text = annotes[i][0][0] # STK_ID ax.annotate(annote_text, xy=(x[i], y[i]), xytext=(-10,3), textcoords='offset points', ha='center', va='bottom', bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.2), fontproperties=ANNOTE_FONT) ``` and the resulting plot: ![enter image description here](https://i.stack.imgur.com/v5FET.png) But there is still room for improvement to reduce overlap (for instance the label box offset is fixed as (-10,3)). Are there algorithms that can: 1. dynamically change the offset of label box according to the crowdedness of its neighbourhood 2. dynamically place the label box remotely and add an arrow line beween bubble and label box 3. somewhat change the label orientation 4. label\_box overlapping bubble is better than label\_box overlapping label\_box? I just want to make the chart easy for human eyes to comprehand, so some overlap is OK, not as rigid a constraint as <http://en.wikipedia.org/wiki/Automatic_label_placement> suggests. And the bubble quantity within the chart is less than 150 most of the time. I find the so called `Force-based label placement` <http://bl.ocks.org/MoritzStefaner/1377729> is quite interesting. I don't know if there is any python code/package available to implement the algorithm. I am not an academic guy and not looking for an optimum solution, and my python codes need to label many many charts, so the the speed/memory is in the scope of consideration. I am looking for a quick and effective solution. Any help (code,algorithm,tips,thoughts) on this subject? Thanks.
2013/02/18
[ "https://Stackoverflow.com/questions/14938541", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1072888/" ]
It is a little rough around the edges (I can't quite figure out how to scale the relative strengths of the spring network vs the repulsive force, and the bounding box is a bit screwed up), but this is a decent start: ``` import networkx as nx N = 15 scatter_data = rand(3, N) G=nx.Graph() data_nodes = [] init_pos = {} for j, b in enumerate(scatter_data.T): x, y, _ = b data_str = 'data_{0}'.format(j) ano_str = 'ano_{0}'.format(j) G.add_node(data_str) G.add_node(ano_str) G.add_edge(data_str, ano_str) data_nodes.append(data_str) init_pos[data_str] = (x, y) init_pos[ano_str] = (x, y) pos = nx.spring_layout(G, pos=init_pos, fixed=data_nodes) ax = gca() ax.scatter(scatter_data[0], scatter_data[1], c=scatter_data[2], s=scatter_data[2]*150) for j in range(N): data_str = 'data_{0}'.format(j) ano_str = 'ano_{0}'.format(j) ax.annotate(ano_str, xy=pos[data_str], xycoords='data', xytext=pos[ano_str], textcoords='data', arrowprops=dict(arrowstyle="->", connectionstyle="arc3")) all_pos = np.vstack(pos.values()) mins = np.min(all_pos, 0) maxs = np.max(all_pos, 0) ax.set_xlim([mins[0], maxs[0]]) ax.set_ylim([mins[1], maxs[1]]) draw() ``` ![sample image](https://i.stack.imgur.com/xhM4q.png) How well it works depends a bit on how your data is clustered.
Just created another quick solution that is also very fast: [textalloc](https://github.com/ckjellson/textalloc) In this case you could do something like this: ``` import textalloc as ta import numpy as np import matplotlib.pyplot as plt np.random.seed(2022) N = 30 scatter_data = np.random.rand(N, 3)*10 fig, ax = plt.subplots() ax.scatter(scatter_data[:, 0], scatter_data[:, 1], c=scatter_data[:, 2], s=scatter_data[:, 2] * 50, zorder=10,alpha=0.5) labels = ['ano-{}'.format(i) for i in range(N)] text_list = labels = ['ano-{}'.format(i) for i in range(N)] ta.allocate_text(fig,ax,scatter_data[:, 0],scatter_data[:, 1], text_list, x_scatter=scatter_data[:, 0], y_scatter=scatter_data[:, 1], max_distance=0.2, min_distance=0.04, margin=0.039, linewidth=0.5, nbr_candidates=400) plt.show() ``` [![scatterplot](https://i.stack.imgur.com/jKY77.png)](https://i.stack.imgur.com/jKY77.png)
14,938,541
I use matplotlib to plot a scatter chart: ![enter image description here](https://i.stack.imgur.com/Nllqp.png) And label the bubble using a transparent box according to the tip at [How to annotate point on a scatter automatically placed arrow](https://stackoverflow.com/q/9074996/7758804) Here is the code: ``` if show_annote: for i in range(len(x)): annote_text = annotes[i][0][0] # STK_ID ax.annotate(annote_text, xy=(x[i], y[i]), xytext=(-10,3), textcoords='offset points', ha='center', va='bottom', bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.2), fontproperties=ANNOTE_FONT) ``` and the resulting plot: ![enter image description here](https://i.stack.imgur.com/v5FET.png) But there is still room for improvement to reduce overlap (for instance the label box offset is fixed as (-10,3)). Are there algorithms that can: 1. dynamically change the offset of label box according to the crowdedness of its neighbourhood 2. dynamically place the label box remotely and add an arrow line beween bubble and label box 3. somewhat change the label orientation 4. label\_box overlapping bubble is better than label\_box overlapping label\_box? I just want to make the chart easy for human eyes to comprehand, so some overlap is OK, not as rigid a constraint as <http://en.wikipedia.org/wiki/Automatic_label_placement> suggests. And the bubble quantity within the chart is less than 150 most of the time. I find the so called `Force-based label placement` <http://bl.ocks.org/MoritzStefaner/1377729> is quite interesting. I don't know if there is any python code/package available to implement the algorithm. I am not an academic guy and not looking for an optimum solution, and my python codes need to label many many charts, so the the speed/memory is in the scope of consideration. I am looking for a quick and effective solution. Any help (code,algorithm,tips,thoughts) on this subject? Thanks.
2013/02/18
[ "https://Stackoverflow.com/questions/14938541", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1072888/" ]
The following builds on [tcaswell's answer](https://stackoverflow.com/a/15859652/190597). Networkx layout methods such as `nx.spring_layout` rescale the positions so that they all fit in a unit square (by default). Even the position of the fixed `data_nodes` are rescaled. So, to apply the `pos` to the original `scatter_data`, an unshifting and unscaling must be performed. Note also that `nx.spring_layout` has a `k` parameter which controls the optimal distance between nodes. As `k` increases, so does the distance of the annotations from the data points. ``` import numpy as np import matplotlib.pyplot as plt import networkx as nx np.random.seed(2016) N = 20 scatter_data = np.random.rand(N, 3)*10 def repel_labels(ax, x, y, labels, k=0.01): G = nx.DiGraph() data_nodes = [] init_pos = {} for xi, yi, label in zip(x, y, labels): data_str = 'data_{0}'.format(label) G.add_node(data_str) G.add_node(label) G.add_edge(label, data_str) data_nodes.append(data_str) init_pos[data_str] = (xi, yi) init_pos[label] = (xi, yi) pos = nx.spring_layout(G, pos=init_pos, fixed=data_nodes, k=k) # undo spring_layout's rescaling pos_after = np.vstack([pos[d] for d in data_nodes]) pos_before = np.vstack([init_pos[d] for d in data_nodes]) scale, shift_x = np.polyfit(pos_after[:,0], pos_before[:,0], 1) scale, shift_y = np.polyfit(pos_after[:,1], pos_before[:,1], 1) shift = np.array([shift_x, shift_y]) for key, val in pos.items(): pos[key] = (val*scale) + shift for label, data_str in G.edges(): ax.annotate(label, xy=pos[data_str], xycoords='data', xytext=pos[label], textcoords='data', arrowprops=dict(arrowstyle="->", shrinkA=0, shrinkB=0, connectionstyle="arc3", color='red'), ) # expand limits all_pos = np.vstack(pos.values()) x_span, y_span = np.ptp(all_pos, axis=0) mins = np.min(all_pos-x_span*0.15, 0) maxs = np.max(all_pos+y_span*0.15, 0) ax.set_xlim([mins[0], maxs[0]]) ax.set_ylim([mins[1], maxs[1]]) fig, ax = plt.subplots() ax.scatter(scatter_data[:, 0], scatter_data[:, 1], c=scatter_data[:, 2], s=scatter_data[:, 2] * 150) labels = ['ano_{}'.format(i) for i in range(N)] repel_labels(ax, scatter_data[:, 0], scatter_data[:, 1], labels, k=0.008) plt.show() ``` with `k=0.011` yields [![enter image description here](https://i.stack.imgur.com/Za4XZ.png)](https://i.stack.imgur.com/Za4XZ.png) and with `k=0.008` yields [![enter image description here](https://i.stack.imgur.com/AcTUI.png)](https://i.stack.imgur.com/AcTUI.png)
We can use plotly for this. But we can't help placing overlap correctly if there is lot of data. Instead we can zoom in and zoom out. ``` import plotly.express as px df = px.data.tips() df = px.data.gapminder().query("year==2007 and continent=='Americas'") fig = px.scatter(df, x="gdpPercap", y="lifeExp", text="country", log_x=True, size_max=100, color="lifeExp", title="Life Expectency") fig.update_traces(textposition='top center') fig.show() ``` Output: [![enter image description here](https://i.stack.imgur.com/Ei4n6.gif)](https://i.stack.imgur.com/Ei4n6.gif)
14,938,541
I use matplotlib to plot a scatter chart: ![enter image description here](https://i.stack.imgur.com/Nllqp.png) And label the bubble using a transparent box according to the tip at [How to annotate point on a scatter automatically placed arrow](https://stackoverflow.com/q/9074996/7758804) Here is the code: ``` if show_annote: for i in range(len(x)): annote_text = annotes[i][0][0] # STK_ID ax.annotate(annote_text, xy=(x[i], y[i]), xytext=(-10,3), textcoords='offset points', ha='center', va='bottom', bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.2), fontproperties=ANNOTE_FONT) ``` and the resulting plot: ![enter image description here](https://i.stack.imgur.com/v5FET.png) But there is still room for improvement to reduce overlap (for instance the label box offset is fixed as (-10,3)). Are there algorithms that can: 1. dynamically change the offset of label box according to the crowdedness of its neighbourhood 2. dynamically place the label box remotely and add an arrow line beween bubble and label box 3. somewhat change the label orientation 4. label\_box overlapping bubble is better than label\_box overlapping label\_box? I just want to make the chart easy for human eyes to comprehand, so some overlap is OK, not as rigid a constraint as <http://en.wikipedia.org/wiki/Automatic_label_placement> suggests. And the bubble quantity within the chart is less than 150 most of the time. I find the so called `Force-based label placement` <http://bl.ocks.org/MoritzStefaner/1377729> is quite interesting. I don't know if there is any python code/package available to implement the algorithm. I am not an academic guy and not looking for an optimum solution, and my python codes need to label many many charts, so the the speed/memory is in the scope of consideration. I am looking for a quick and effective solution. Any help (code,algorithm,tips,thoughts) on this subject? Thanks.
2013/02/18
[ "https://Stackoverflow.com/questions/14938541", "https://Stackoverflow.com", "https://Stackoverflow.com/users/1072888/" ]
The following builds on [tcaswell's answer](https://stackoverflow.com/a/15859652/190597). Networkx layout methods such as `nx.spring_layout` rescale the positions so that they all fit in a unit square (by default). Even the position of the fixed `data_nodes` are rescaled. So, to apply the `pos` to the original `scatter_data`, an unshifting and unscaling must be performed. Note also that `nx.spring_layout` has a `k` parameter which controls the optimal distance between nodes. As `k` increases, so does the distance of the annotations from the data points. ``` import numpy as np import matplotlib.pyplot as plt import networkx as nx np.random.seed(2016) N = 20 scatter_data = np.random.rand(N, 3)*10 def repel_labels(ax, x, y, labels, k=0.01): G = nx.DiGraph() data_nodes = [] init_pos = {} for xi, yi, label in zip(x, y, labels): data_str = 'data_{0}'.format(label) G.add_node(data_str) G.add_node(label) G.add_edge(label, data_str) data_nodes.append(data_str) init_pos[data_str] = (xi, yi) init_pos[label] = (xi, yi) pos = nx.spring_layout(G, pos=init_pos, fixed=data_nodes, k=k) # undo spring_layout's rescaling pos_after = np.vstack([pos[d] for d in data_nodes]) pos_before = np.vstack([init_pos[d] for d in data_nodes]) scale, shift_x = np.polyfit(pos_after[:,0], pos_before[:,0], 1) scale, shift_y = np.polyfit(pos_after[:,1], pos_before[:,1], 1) shift = np.array([shift_x, shift_y]) for key, val in pos.items(): pos[key] = (val*scale) + shift for label, data_str in G.edges(): ax.annotate(label, xy=pos[data_str], xycoords='data', xytext=pos[label], textcoords='data', arrowprops=dict(arrowstyle="->", shrinkA=0, shrinkB=0, connectionstyle="arc3", color='red'), ) # expand limits all_pos = np.vstack(pos.values()) x_span, y_span = np.ptp(all_pos, axis=0) mins = np.min(all_pos-x_span*0.15, 0) maxs = np.max(all_pos+y_span*0.15, 0) ax.set_xlim([mins[0], maxs[0]]) ax.set_ylim([mins[1], maxs[1]]) fig, ax = plt.subplots() ax.scatter(scatter_data[:, 0], scatter_data[:, 1], c=scatter_data[:, 2], s=scatter_data[:, 2] * 150) labels = ['ano_{}'.format(i) for i in range(N)] repel_labels(ax, scatter_data[:, 0], scatter_data[:, 1], labels, k=0.008) plt.show() ``` with `k=0.011` yields [![enter image description here](https://i.stack.imgur.com/Za4XZ.png)](https://i.stack.imgur.com/Za4XZ.png) and with `k=0.008` yields [![enter image description here](https://i.stack.imgur.com/AcTUI.png)](https://i.stack.imgur.com/AcTUI.png)
Just created another quick solution that is also very fast: [textalloc](https://github.com/ckjellson/textalloc) In this case you could do something like this: ``` import textalloc as ta import numpy as np import matplotlib.pyplot as plt np.random.seed(2022) N = 30 scatter_data = np.random.rand(N, 3)*10 fig, ax = plt.subplots() ax.scatter(scatter_data[:, 0], scatter_data[:, 1], c=scatter_data[:, 2], s=scatter_data[:, 2] * 50, zorder=10,alpha=0.5) labels = ['ano-{}'.format(i) for i in range(N)] text_list = labels = ['ano-{}'.format(i) for i in range(N)] ta.allocate_text(fig,ax,scatter_data[:, 0],scatter_data[:, 1], text_list, x_scatter=scatter_data[:, 0], y_scatter=scatter_data[:, 1], max_distance=0.2, min_distance=0.04, margin=0.039, linewidth=0.5, nbr_candidates=400) plt.show() ``` [![scatterplot](https://i.stack.imgur.com/jKY77.png)](https://i.stack.imgur.com/jKY77.png)