_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q40700
Daemon._write_pidfile
train
def _write_pidfile(self): """Create, write to, and lock the PID file.""" flags = os.O_CREAT | os.O_RDWR try: # Some systems don't have os.O_EXLOCK flags = flags | os.O_EXLOCK except AttributeError: pass self._pid_fd = os.open(self.pidfile, flags, 0o666 & ~self.umask) os.write(self._pid_fd, str(os.getpid()).encode('utf-8'))
python
{ "resource": "" }
q40701
Daemon._close_pidfile
train
def _close_pidfile(self): """Closes and removes the PID file.""" if self._pid_fd is not None: os.close(self._pid_fd) try: os.remove(self.pidfile) except OSError as ex: if ex.errno != errno.ENOENT: raise
python
{ "resource": "" }
q40702
Daemon._prevent_core_dump
train
def _prevent_core_dump(cls): """Prevent the process from generating a core dump.""" try: # Try to get the current limit resource.getrlimit(resource.RLIMIT_CORE) except ValueError: # System doesn't support the RLIMIT_CORE resource limit return else: # Set the soft and hard limits for core dump size to zero resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
python
{ "resource": "" }
q40703
Daemon._setup_environment
train
def _setup_environment(self): """Setup the environment for the daemon.""" # Save the original working directory so that reload can launch # the new process with the same arguments as the original self._orig_workdir = os.getcwd() if self.chrootdir is not None: try: # Change the root directory os.chdir(self.chrootdir) os.chroot(self.chrootdir) except Exception as ex: raise DaemonError('Unable to change root directory ' '({error})'.format(error=str(ex))) # Prevent the process from generating a core dump self._prevent_core_dump() try: # Switch directories os.chdir(self.workdir) except Exception as ex: raise DaemonError('Unable to change working directory ' '({error})'.format(error=str(ex))) # Create the directory for the pid file if necessary self._setup_piddir() try: # Set file creation mask os.umask(self.umask) except Exception as ex: raise DaemonError('Unable to change file creation mask ' '({error})'.format(error=str(ex))) try: # Switch users os.setgid(self.gid) os.setuid(self.uid) except Exception as ex: raise DaemonError('Unable to setuid or setgid ' '({error})'.format(error=str(ex)))
python
{ "resource": "" }
q40704
Daemon._reset_file_descriptors
train
def _reset_file_descriptors(self): """Close open file descriptors and redirect standard streams.""" if self.close_open_files: # Attempt to determine the max number of open files max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if max_fds == resource.RLIM_INFINITY: # If the limit is infinity, use a more reasonable limit max_fds = 2048 else: # If we're not closing all open files, we at least need to # reset STDIN, STDOUT, and STDERR. max_fds = 3 for fd in range(max_fds): try: os.close(fd) except OSError: # The file descriptor probably wasn't open pass # Redirect STDIN, STDOUT, and STDERR to /dev/null devnull_fd = os.open(os.devnull, os.O_RDWR) os.dup2(devnull_fd, 0) os.dup2(devnull_fd, 1) os.dup2(devnull_fd, 2)
python
{ "resource": "" }
q40705
Daemon._is_socket
train
def _is_socket(cls, stream): """Check if the given stream is a socket.""" try: fd = stream.fileno() except ValueError: # If it has no file descriptor, it's not a socket return False sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW) try: # This will raise a socket.error if it's not a socket sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE) except socket.error as ex: if ex.args[0] != errno.ENOTSOCK: # It must be a socket return True else: # If an exception wasn't raised, it's a socket return True
python
{ "resource": "" }
q40706
Daemon._pid_is_alive
train
def _pid_is_alive(cls, pid, timeout): """Check if a PID is alive with a timeout.""" try: proc = psutil.Process(pid) except psutil.NoSuchProcess: return False try: proc.wait(timeout=timeout) except psutil.TimeoutExpired: return True return False
python
{ "resource": "" }
q40707
Daemon._is_detach_necessary
train
def _is_detach_necessary(cls): """Check if detaching the process is even necessary.""" if os.getppid() == 1: # Process was started by init return False if cls._is_socket(sys.stdin): # If STDIN is a socket, the daemon was started by a super-server return False return True
python
{ "resource": "" }
q40708
Daemon._detach_process
train
def _detach_process(self): """Detach the process via the standard double-fork method with some extra magic.""" # First fork to return control to the shell pid = os.fork() if pid > 0: # Wait for the first child, because it's going to wait and # check to make sure the second child is actually running # before exiting os.waitpid(pid, 0) sys.exit(0) # Become a process group and session group leader os.setsid() # Fork again so the session group leader can exit and to ensure # we can never regain a controlling terminal pid = os.fork() if pid > 0: time.sleep(1) # After waiting one second, check to make sure the second # child hasn't become a zombie already status = os.waitpid(pid, os.WNOHANG) if status[0] == pid: # The child is already gone for some reason exitcode = status[1] % 255 self._emit_failed() self._emit_error('Child exited immediately with exit ' 'code {code}'.format(code=exitcode)) sys.exit(exitcode) else: self._emit_ok() sys.exit(0) self._reset_file_descriptors()
python
{ "resource": "" }
q40709
Daemon._orphan_this_process
train
def _orphan_this_process(cls, wait_for_parent=False): """Orphan the current process by forking and then waiting for the parent to exit.""" # The current PID will be the PPID of the forked child ppid = os.getpid() pid = os.fork() if pid > 0: # Exit parent sys.exit(0) if wait_for_parent and cls._pid_is_alive(ppid, timeout=1): raise DaemonError( 'Parent did not exit while trying to orphan process')
python
{ "resource": "" }
q40710
Daemon._fork_and_supervise_child
train
def _fork_and_supervise_child(cls): """Fork a child and then watch the process group until there are no processes in it.""" pid = os.fork() if pid == 0: # Fork again but orphan the child this time so we'll have # the original parent and the second child which is orphaned # so we don't have to worry about it becoming a zombie cls._orphan_this_process() return # Since this process is not going to exit, we need to call # os.waitpid() so that the first child doesn't become a zombie os.waitpid(pid, 0) # Generate a list of PIDs to exclude when checking for processes # in the group (exclude all ancestors that are in the group) pgid = os.getpgrp() exclude_pids = set([0, os.getpid()]) proc = psutil.Process() while os.getpgid(proc.pid) == pgid: exclude_pids.add(proc.pid) proc = psutil.Process(proc.ppid()) while True: try: # Look for other processes in this process group group_procs = [] for proc in psutil.process_iter(): try: if (os.getpgid(proc.pid) == pgid and proc.pid not in exclude_pids): # We found a process in this process group group_procs.append(proc) except (psutil.NoSuchProcess, OSError): continue if group_procs: psutil.wait_procs(group_procs, timeout=1) else: # No processes were found in this process group # so we can exit cls._emit_message( 'All children are gone. Parent is exiting...\n') sys.exit(0) except KeyboardInterrupt: # Don't exit immediatedly on Ctrl-C, because we want to # wait for the child processes to finish cls._emit_message('\n') continue
python
{ "resource": "" }
q40711
Daemon._shutdown
train
def _shutdown(self, message=None, code=0): """Shutdown and cleanup everything.""" if self._shutdown_complete: # Make sure we don't accidentally re-run the all cleanup sys.exit(code) if self.shutdown_callback is not None: # Call the shutdown callback with a message suitable for # logging and the exit code self.shutdown_callback(message, code) if self.pidfile is not None: self._close_pidfile() self._shutdown_complete = True sys.exit(code)
python
{ "resource": "" }
q40712
Daemon._handle_terminate
train
def _handle_terminate(self, signal_number, _): """Handle a signal to terminate.""" signal_names = { signal.SIGINT: 'SIGINT', signal.SIGQUIT: 'SIGQUIT', signal.SIGTERM: 'SIGTERM', } message = 'Terminated by {name} ({number})'.format( name=signal_names[signal_number], number=signal_number) self._shutdown(message, code=128+signal_number)
python
{ "resource": "" }
q40713
Daemon._run
train
def _run(self): """Run the worker function with some custom exception handling.""" try: # Run the worker self.worker() except SystemExit as ex: # sys.exit() was called if isinstance(ex.code, int): if ex.code is not None and ex.code != 0: # A custom exit code was specified self._shutdown( 'Exiting with non-zero exit code {exitcode}'.format( exitcode=ex.code), ex.code) else: # A message was passed to sys.exit() self._shutdown( 'Exiting with message: {msg}'.format(msg=ex.code), 1) except Exception as ex: if self.detach: self._shutdown('Dying due to unhandled {cls}: {msg}'.format( cls=ex.__class__.__name__, msg=str(ex)), 127) else: # We're not detached so just raise the exception raise self._shutdown('Shutting down normally')
python
{ "resource": "" }
q40714
Daemon.status
train
def status(self): """Get the status of the daemon.""" if self.pidfile is None: raise DaemonError('Cannot get status of daemon without PID file') pid = self._read_pidfile() if pid is None: self._emit_message( '{prog} -- not running\n'.format(prog=self.prog)) sys.exit(1) proc = psutil.Process(pid) # Default data data = { 'prog': self.prog, 'pid': pid, 'status': proc.status(), 'uptime': '0m', 'cpu': 0.0, 'memory': 0.0, } # Add up all the CPU and memory usage of all the # processes in the process group pgid = os.getpgid(pid) for gproc in psutil.process_iter(): try: if os.getpgid(gproc.pid) == pgid and gproc.pid != 0: data['cpu'] += gproc.cpu_percent(interval=0.1) data['memory'] += gproc.memory_percent() except (psutil.Error, OSError): continue # Calculate the uptime and format it in a human-readable but # also machine-parsable format try: uptime_mins = int(round((time.time() - proc.create_time()) / 60)) uptime_hours, uptime_mins = divmod(uptime_mins, 60) data['uptime'] = str(uptime_mins) + 'm' if uptime_hours: uptime_days, uptime_hours = divmod(uptime_hours, 24) data['uptime'] = str(uptime_hours) + 'h ' + data['uptime'] if uptime_days: data['uptime'] = str(uptime_days) + 'd ' + data['uptime'] except psutil.Error: pass template = ('{prog} -- pid: {pid}, status: {status}, ' 'uptime: {uptime}, %cpu: {cpu:.1f}, %mem: {memory:.1f}\n') self._emit_message(template.format(**data))
python
{ "resource": "" }
q40715
Daemon.get_action
train
def get_action(self, action): """Get a callable action.""" func_name = action.replace('-', '_') if not hasattr(self, func_name): # Function doesn't exist raise DaemonError( 'Invalid action "{action}"'.format(action=action)) func = getattr(self, func_name) if (not hasattr(func, '__call__') or getattr(func, '__daemonocle_exposed__', False) is not True): # Not a function or not exposed raise DaemonError( 'Invalid action "{action}"'.format(action=action)) return func
python
{ "resource": "" }
q40716
Daemon.reload
train
def reload(self): """Make the daemon reload itself.""" pid = self._read_pidfile() if pid is None or pid != os.getpid(): raise DaemonError( 'Daemon.reload() should only be called by the daemon process ' 'itself') # Copy the current environment new_environ = os.environ.copy() new_environ['DAEMONOCLE_RELOAD'] = 'true' # Start a new python process with the same arguments as this one subprocess.call( [sys.executable] + sys.argv, cwd=self._orig_workdir, env=new_environ) # Exit this process self._shutdown('Shutting down for reload')
python
{ "resource": "" }
q40717
_get_windows
train
def _get_windows(peak_list): """ Given a list of peaks, bin them into windows. """ win_list = [] for t0, t1, hints in peak_list: p_w = (t0, t1) for w in win_list: if p_w[0] <= w[0][1] and p_w[1] >= w[0][0]: w[0] = (min(p_w[0], w[0][0]), max(p_w[1], w[0][1])) w[1].append((t0, t1, hints)) break else: win_list.append([p_w, [(t0, t1, hints)]]) return win_list
python
{ "resource": "" }
q40718
DKCloudAPI.list_order
train
def list_order(self, kitchen, save_to_file=None): """ List the orders for a kitchen or recipe """ rc = DKReturnCode() if kitchen is None or isinstance(kitchen, basestring) is False: rc.set(rc.DK_FAIL, 'issue with kitchen parameter') return rc url = '%s/v2/order/status/%s' % (self.get_url_for_direct_rest_call(), kitchen) try: response = requests.get(url, headers=self._get_common_headers()) rdict = self._get_json(response) pass except (RequestException, ValueError, TypeError), c: s = "get_recipe: exception: %s" % str(c) rc.set(rc.DK_FAIL, s) return rc if not DKCloudAPI._valid_response(response): arc = DKAPIReturnCode(rdict) rc.set(rc.DK_FAIL, arc.get_message()) else: if save_to_file is not None: import pickle pickle.dump(rdict, open(save_to_file, "wb")) rc.set(rc.DK_SUCCESS, None, rdict) return rc
python
{ "resource": "" }
q40719
_date_trunc
train
def _date_trunc(value, timeframe): """ A date flooring function. Returns the closest datetime to the current one that aligns to timeframe. For example, _date_trunc('2014-08-13 05:00:00', DateTrunc.Unit.MONTH) will return a Kronos time representing 2014-08-01 00:00:00. """ if isinstance(value, types.StringTypes): value = parse(value) return_as_str = True else: value = kronos_time_to_datetime(value) return_as_str = False timeframes = { DateTrunc.Unit.SECOND: (lambda dt: dt - timedelta(microseconds=dt.microsecond)), DateTrunc.Unit.MINUTE: (lambda dt: dt - timedelta(seconds=dt.second, microseconds=dt.microsecond)), DateTrunc.Unit.HOUR: (lambda dt: dt - timedelta(minutes=dt.minute, seconds=dt.second, microseconds=dt.microsecond)), DateTrunc.Unit.DAY: lambda dt: dt.date(), DateTrunc.Unit.WEEK: lambda dt: dt.date() - timedelta(days=dt.weekday()), DateTrunc.Unit.MONTH: lambda dt: datetime(dt.year, dt.month, 1), DateTrunc.Unit.YEAR: lambda dt: datetime(dt.year, 1, 1) } value = timeframes[timeframe](value) if return_as_str: return value.isoformat() return datetime_to_kronos_time(value)
python
{ "resource": "" }
q40720
_date_part
train
def _date_part(value, part): """ Returns a portion of a datetime. Returns the portion of a datetime represented by timeframe. For example, _date_part('2014-08-13 05:00:00', DatePart.Unit.WEEK_DAY) will return 2, for Wednesday. """ if isinstance(value, types.StringTypes): value = parse(value) else: value = kronos_time_to_datetime(value) parts = { DatePart.Unit.SECOND: lambda dt: dt.second, DatePart.Unit.MINUTE: lambda dt: dt.minute, DatePart.Unit.HOUR: lambda dt: dt.hour, DatePart.Unit.DAY: lambda dt: dt.day, DatePart.Unit.MONTH: lambda dt: dt.month, DatePart.Unit.YEAR: lambda dt: dt.year, DatePart.Unit.WEEK_DAY: lambda dt: dt.weekday(), } result = parts[part](value) return result
python
{ "resource": "" }
q40721
Client.List
train
def List(self, name, initial=None): """The list datatype. :param name: The name of the list. :keyword initial: Initial contents of the list. See :class:`redish.types.List`. """ return types.List(name, self.api, initial=initial)
python
{ "resource": "" }
q40722
Client.Set
train
def Set(self, name, initial=None): """The set datatype. :param name: The name of the set. :keyword initial: Initial members of the set. See :class:`redish.types.Set`. """ return types.Set(name, self.api, initial)
python
{ "resource": "" }
q40723
Client.SortedSet
train
def SortedSet(self, name, initial=None): """The sorted set datatype. :param name: The name of the sorted set. :param initial: Initial members of the set as an iterable of ``(element, score)`` tuples. See :class:`redish.types.SortedSet`. """ return types.SortedSet(name, self.api, initial)
python
{ "resource": "" }
q40724
Client.Queue
train
def Queue(self, name, initial=None, maxsize=None): """The queue datatype. :param name: The name of the queue. :keyword initial: Initial items in the queue. See :class:`redish.types.Queue`. """ return types.Queue(name, self.api, initial=initial, maxsize=maxsize)
python
{ "resource": "" }
q40725
Client.LifoQueue
train
def LifoQueue(self, name, initial=None, maxsize=None): """The LIFO queue datatype. :param name: The name of the queue. :keyword initial: Initial items in the queue. See :class:`redish.types.LifoQueue`. """ return types.LifoQueue(name, self.api, initial=initial, maxsize=maxsize)
python
{ "resource": "" }
q40726
Client.rename
train
def rename(self, old_name, new_name): """Rename key to a new name.""" try: self.api.rename(mkey(old_name), mkey(new_name)) except ResponseError, exc: if "no such key" in exc.args: raise KeyError(old_name) raise
python
{ "resource": "" }
q40727
csv
train
def csv(file, *args, **kwargs): ''' Write CSV file. Parameters ---------- file : Path *args csv.DictWriter args (except the f arg) **kwargs csv.DictWriter args Examples -------- with write.csv(file) as writer: writer.writerow((1,2,3)) ''' with file.open('w', newline='') as f: yield DictWriter(f, *args, **kwargs)
python
{ "resource": "" }
q40728
State.encrypt
train
def encrypt(self, key): """This method encrypts and signs the state to make it unreadable by the server, since it contains information that would allow faking proof of storage. :param key: the key to encrypt and sign with """ if (self.encrypted): return # encrypt self.iv = Random.new().read(AES.block_size) aes = AES.new(key, AES.MODE_CFB, self.iv) self.f_key = aes.encrypt(self.f_key) self.alpha_key = aes.encrypt(self.alpha_key) self.encrypted = True # sign self.hmac = self.get_hmac(key)
python
{ "resource": "" }
q40729
State.decrypt
train
def decrypt(self, key): """This method checks the signature on the state and decrypts it. :param key: the key to decrypt and sign with """ # check signature if (self.get_hmac(key) != self.hmac): raise HeartbeatError("Signature invalid on state.") if (not self.encrypted): return # decrypt aes = AES.new(key, AES.MODE_CFB, self.iv) self.f_key = aes.decrypt(self.f_key) self.alpha_key = aes.decrypt(self.alpha_key) self.encrypted = False self.hmac = self.get_hmac(key)
python
{ "resource": "" }
q40730
PySwizzle.gen_challenge
train
def gen_challenge(self, state): """This function generates a challenge for given state. It selects a random number and sets that as the challenge key. By default, v_max is set to the prime, and the number of chunks to challenge is the number of chunks in the file. (this doesn't guarantee that the whole file will be checked since some chunks could be selected twice and some selected none. :param state: the state to use. it can be encrypted, as it will have just been received from the server """ state.decrypt(self.key) chal = Challenge(state.chunks, self.prime, Random.new().read(32)) return chal
python
{ "resource": "" }
q40731
PySwizzle.prove
train
def prove(self, file, chal, tag): """This function returns a proof calculated from the file, the challenge, and the file tag :param file: this is a file like object that supports `read()`, `tell()` and `seek()` methods. :param chal: the challenge to use for proving :param tag: the file tag """ chunk_size = self.sectors * self.sectorsize index = KeyedPRF(chal.key, len(tag.sigma)) v = KeyedPRF(chal.key, chal.v_max) proof = Proof() proof.mu = [0] * self.sectors proof.sigma = 0 for i in range(0, chal.chunks): for j in range(0, self.sectors): pos = index.eval(i) * chunk_size + j * self.sectorsize file.seek(pos) buffer = file.read(self.sectorsize) if (len(buffer) > 0): proof.mu[j] += v.eval(i) * number.bytes_to_long(buffer) if (len(buffer) != self.sectorsize): break for j in range(0, self.sectors): proof.mu[j] %= self.prime for i in range(0, chal.chunks): proof.sigma += v.eval(i) * tag.sigma[index.eval(i)] proof.sigma %= self.prime return proof
python
{ "resource": "" }
q40732
PySwizzle.verify
train
def verify(self, proof, chal, state): """This returns True if the proof matches the challenge and file state :param proof: the proof that was returned from the server :param chal: the challenge sent to the server :param state: the state of the file, which can be encrypted """ state.decrypt(self.key) index = KeyedPRF(chal.key, state.chunks) v = KeyedPRF(chal.key, chal.v_max) f = KeyedPRF(state.f_key, self.prime) alpha = KeyedPRF(state.alpha_key, self.prime) rhs = 0 for i in range(0, chal.chunks): rhs += v.eval(i) * f.eval(index.eval(i)) for j in range(0, self.sectors): rhs += alpha.eval(j) * proof.mu[j] rhs %= self.prime return proof.sigma == rhs
python
{ "resource": "" }
q40733
KronosClient.get_streams
train
def get_streams(self, namespace=None): """ Queries the Kronos server and fetches a list of streams available to be read. """ request_dict = {} namespace = namespace or self.namespace if namespace is not None: request_dict['namespace'] = namespace response = self._make_request(self._streams_url, data=request_dict, stream=True) for line in response.iter_lines(): if line: yield line
python
{ "resource": "" }
q40734
KronosClient.infer_schema
train
def infer_schema(self, stream, namespace=None): """ Queries the Kronos server and fetches the inferred schema for the requested stream. """ return self._make_request(self._infer_schema_url, data={'stream': stream, 'namespace': namespace or self.namespace})
python
{ "resource": "" }
q40735
JonesClient._nodemap_changed
train
def _nodemap_changed(self, data, stat): """Called when the nodemap changes.""" if not stat: raise EnvironmentNotFoundException(self.nodemap_path) try: conf_path = self._deserialize_nodemap(data)[self.hostname] except KeyError: conf_path = '/services/%s/conf' % self.service self.config_watcher = DataWatch( self.zk, conf_path, self._config_changed )
python
{ "resource": "" }
q40736
JonesClient._config_changed
train
def _config_changed(self, data, stat): """Called when config changes.""" self.config = json.loads(data) if self.cb: self.cb(self.config)
python
{ "resource": "" }
q40737
put_a_hit_out
train
def put_a_hit_out(name): """Download a feed's most recent enclosure that we don't have""" feed = resolve_name(name) if six.PY3: feed = feed.decode() d = feedparser.parse(feed) # logger.info(d) # logger.info(feed) print(d['feed']['title']) if d.entries[0].enclosures: with Database("settings") as s: if 'verbose' in s: print(d.entries[0].enclosures[0]) # print d.feed.updated_parsed # Doesn't work everywhere, may nest in try or # use .headers['last-modified'] url = str(d.entries[0].enclosures[0]['href']) with Database("downloads") as db: if url.split('/')[-1] not in db: with Database("settings") as settings: if 'dl' in settings: dl_dir = settings['dl'] else: dl_dir = os.path.join(os.path.expanduser("~"), "Downloads") requests_get(url, dl_dir) db[url.split('/')[-1]] = json.dumps({'url': url, 'date': time.ctime(), 'feed': feed}) growl("Mission Complete: %s downloaded" % d.feed.title) print("Mission Complete: %s downloaded" % d.feed.title) else: growl("Mission Aborted: %s already downloaded" % d.feed.title) print("Mission Aborted: %s already downloaded" % d.feed.title)
python
{ "resource": "" }
q40738
resolve_name
train
def resolve_name(name): """Takes a given input from a user and finds the url for it""" logger.debug("resolve_name: %s", name) with Database("feeds") as feeds, Database("aliases") as aliases: if name in aliases.keys(): return feeds[aliases[name]] elif name in feeds.keys(): return feeds[name] else: print("Cannot find feed named: %s" % name) return
python
{ "resource": "" }
q40739
growl
train
def growl(text): """send native notifications where supported. Growl is gone.""" if platform.system() == 'Darwin': import pync pync.Notifier.notify(text, title="Hitman") elif platform.system() == 'Linux': notified = False try: logger.debug("Trying to import pynotify") import pynotify pynotify.init("Hitman") n = pynotify.Notification("Hitman Status Report", text) n.set_timeout(pynotify.EXPIRES_DEFAULT) n.show() notified = True except ImportError: logger.debug("Trying notify-send") # print("trying to notify-send") if Popen(['which', 'notify-send'], stdout=PIPE).communicate()[0]: # Do an OSD-Notify # notify-send "Totem" "This is a superfluous notification" os.system("notify-send \"Hitman\" \"%r\" " % str(text)) notified = True if not notified: try: logger.info("notificatons gnome gi???") import gi gi.require_version('Notify', '0.7') from gi.repository import Notify Notify.init("Hitman") # TODO have Icon as third argument. notification = Notify.Notification.new("Hitman", text) notification.show() Notify.uninit() notified = True except ImportError: logger.exception() elif platform.system() == 'Haiku': os.system("notify --type information --app Hitman --title 'Status Report' '%s'" % str(text)) elif platform.system() == 'Windows': try: from win10toast import ToastNotifier toaster = ToastNotifier() toaster.show_toast(text, "Hitman") # gntplib.publish("Hitman", "Status Update", "Hitman", text=text) except Exception: logger.exception()
python
{ "resource": "" }
q40740
add_feed
train
def add_feed(url): """add to db""" with Database("feeds") as db: title = feedparser.parse(url).feed.title name = str(title) db[name] = url return name
python
{ "resource": "" }
q40741
del_alias
train
def del_alias(alias): """sometimes you goof up.""" with Database("aliases") as mydb: try: print("removing alias of %s to %s" % (alias, mydb.pop(alias))) except KeyError: print("No such alias key") print("Check alias db:") print(zip(list(mydb.keys()), list(mydb.values())))
python
{ "resource": "" }
q40742
alias_feed
train
def alias_feed(name, alias): """write aliases to db""" with Database("aliases") as db: if alias in db: print("Something has gone horribly wrong with your aliases! Try deleting the %s entry." % name) return else: db[alias] = name
python
{ "resource": "" }
q40743
list_feeds
train
def list_feeds(): """List all feeds in plain text and give their aliases""" with Database("feeds") as feeds, Database("aliases") as aliases_db: for feed in feeds: name = feed url = feeds[feed] aliases = [] for k, v in zip(list(aliases_db.keys()), list(aliases_db.values())): if v == name: aliases.append(k) if aliases: print(name, " : %s Aliases: %s" % (url, aliases)) else: print(name, " : %s" % url)
python
{ "resource": "" }
q40744
import_opml
train
def import_opml(url): """Import an OPML file locally or from a URL. Uses your text attributes as aliases.""" # Test if URL given is local, then open, parse out feed urls, # add feeds, set text= to aliases and report success, list feeds added from bs4 import BeautifulSoup try: f = file(url).read() except IOError: f = requests.get(url).text soup = BeautifulSoup(f, "xml") links = soup.find_all('outline', type="rss" or "pie") # This is very slow, might cache this info on add for link in links: # print link add_feed(link['xmlUrl']) print("Added " + link['text'])
python
{ "resource": "" }
q40745
directory
train
def directory(): """Construct hitman_dir from os name""" home = os.path.expanduser('~') if platform.system() == 'Linux': hitman_dir = os.path.join(home, '.hitman') elif platform.system() == 'Darwin': hitman_dir = os.path.join(home, 'Library', 'Application Support', 'hitman') elif platform.system() == 'Windows': hitman_dir = os.path.join(os.environ['appdata'], 'hitman') else: hitman_dir = os.path.join(home, '.hitman') if not os.path.isdir(hitman_dir): os.mkdir(hitman_dir) return hitman_dir
python
{ "resource": "" }
q40746
add
train
def add(url, force=False): """Add a atom or RSS feed by url. If it doesn't end in .atom or .rss we'll do some guessing.""" if url[-3:] == 'xml' or url[1][-4:] == 'atom': print("Added your feed as %s" % str(add_feed(url))) elif is_feed(url): print("Added your feed as %s" % str(add_feed(url))) elif force: print("Added your feed as %s" % str(add_feed(url))) else: print("Hitman doesn't think that url is a feed; if you're sure it is rerun with --force")
python
{ "resource": "" }
q40747
set_settings
train
def set_settings(key, value): """Set Hitman internal settings.""" with Database("settings") as settings: if value in ['0', 'false', 'no', 'off', 'False']: del settings[key] print("Disabled setting") else: print(value) settings[key] = value print("Setting saved")
python
{ "resource": "" }
q40748
get_settings
train
def get_settings(all,key): """View Hitman internal settings. Use 'all' for all keys""" with Database("settings") as s: if all: for k, v in zip(list(s.keys()), list(s.values())): print("{} = {}".format(k, v)) elif key: print("{} = {}".format(key, s[key])) else: print("Don't know what you want? Try --all")
python
{ "resource": "" }
q40749
Challenge.fromdict
train
def fromdict(dict): """Takes a dictionary as an argument and returns a new Challenge object from the dictionary. :param dict: the dictionary to convert """ seed = hb_decode(dict['seed']) index = dict['index'] return Challenge(seed, index)
python
{ "resource": "" }
q40750
Tag.fromdict
train
def fromdict(dict): """Takes a dictionary as an argument and returns a new Tag object from the dictionary. :param dict: the dictionary to convert """ tree = MerkleTree.fromdict(dict['tree']) chunksz = dict['chunksz'] filesz = dict['filesz'] return Tag(tree, chunksz, filesz)
python
{ "resource": "" }
q40751
Merkle.encode
train
def encode(self, file, n=DEFAULT_CHALLENGE_COUNT, seed=None, chunksz=None, filesz=None): """This function generates a merkle tree with the leaves as seed file hashes, the seed for each leaf being a deterministic seed generated from a key. :param file: a file like object that supports the `read()`, `seek()` and `tell()` methods :param n: the number of challenges to generate :param seed: the root seed for this batch of challenges. by default generates a random seed :param chunksz: the chunk size for breaking up the file: the amount of the file that will be checked by each challenge. defaults to the chunk size defined by check_fraction :param filesz: optional size of the file. if not specified, file size will be detected by seeking to the end of the file and reading the position """ if (seed is None): seed = os.urandom(DEFAULT_KEY_SIZE) if (filesz is None): file.seek(0, 2) filesz = file.tell() if (chunksz is None): if (self.check_fraction is not None): chunksz = int(self.check_fraction * filesz) else: chunksz = DEFAULT_CHUNK_SIZE mt = MerkleTree() state = State(0, seed, n) seed = MerkleHelper.get_next_seed(self.key, state.seed) for i in range(0, n): leaf = MerkleHelper.get_chunk_hash(file, seed, filesz, chunksz) mt.add_leaf(leaf) seed = MerkleHelper.get_next_seed(self.key, seed) mt.build() state.root = mt.get_root() mt.strip_leaves() tag = Tag(mt, chunksz, filesz) state.sign(self.key) return (tag, state)
python
{ "resource": "" }
q40752
Merkle.gen_challenge
train
def gen_challenge(self, state): """returns the next challenge and increments the seed and index in the state. :param state: the state to use for generating the challenge. will verify the integrity of the state object before using it to generate a challenge. it will then modify the state by incrementing the seed and index and resign the state for passing back to the server for storage """ state.checksig(self.key) if (state.index >= state.n): raise HeartbeatError("Out of challenges.") state.seed = MerkleHelper.get_next_seed(self.key, state.seed) chal = Challenge(state.seed, state.index) state.index += 1 state.sign(self.key) return chal
python
{ "resource": "" }
q40753
Merkle.prove
train
def prove(self, file, challenge, tag): """Returns a proof of ownership of the given file based on the challenge. The proof consists of a hash of the specified file chunk and the complete merkle branch. :param file: a file that supports `read()`, `seek()` and `tell()` :param challenge: the challenge to use for generating this proof :param tag: the file tag as provided from the client :param filesz: optional filesz parameter. if not specified, the filesz will be detected by seeking to the end of the stream """ leaf = MerkleLeaf(challenge.index, MerkleHelper.get_chunk_hash(file, challenge.seed, filesz=tag.filesz, chunksz=tag.chunksz)) return Proof(leaf, tag.tree.get_branch(challenge.index))
python
{ "resource": "" }
q40754
Merkle.verify
train
def verify(self, proof, challenge, state): """returns true if the proof matches the challenge. verifies that the server possesses the encoded file. :param proof: the proof that was returned from the server :param challenge: the challenge provided to the server :param state: the state of the file, which includes the merkle root of of the merkle tree, for verification. """ state.checksig(self.key) if (proof.leaf.index != challenge.index): return False return MerkleTree.verify_branch(proof.leaf, proof.branch, state.root)
python
{ "resource": "" }
q40755
MerkleHelper.get_next_seed
train
def get_next_seed(key, seed): """This takes a seed and generates the next seed in the sequence. it simply calculates the hmac of the seed with the key. It returns the next seed :param key: the key to use for the HMAC :param seed: the seed to permutate """ return hmac.new(key, seed, hashlib.sha256).digest()
python
{ "resource": "" }
q40756
MerkleHelper.get_file_hash
train
def get_file_hash(file, seed, bufsz=DEFAULT_BUFFER_SIZE): """This method generates a secure hash of the given file. Returns the hash :param file: a file like object to get a hash of. should support `read()` :param seed: the seed to use for key of the HMAC function :param bufsz: an optional buffer size to use for reading the file """ h = hmac.new(seed, None, hashlib.sha256) while (True): buffer = file.read(bufsz) h.update(buffer) if (len(buffer) != bufsz): break return h.digest()
python
{ "resource": "" }
q40757
MerkleHelper.get_chunk_hash
train
def get_chunk_hash(file, seed, filesz=None, chunksz=DEFAULT_CHUNK_SIZE, bufsz=DEFAULT_BUFFER_SIZE): """returns a hash of a chunk of the file provided. the position of the chunk is determined by the seed. additionally, the hmac of the chunk is calculated from the seed. :param file: a file like object to get the chunk hash from. should support `read()`, `seek()` and `tell()`. :param seed: the seed to use for calculating the chunk position and chunk hash :param chunksz: the size of the chunk to check :param bufsz: an optional buffer size to use for reading the file. """ if (filesz is None): file.seek(0, 2) filesz = file.tell() if (filesz < chunksz): chunksz = filesz prf = KeyedPRF(seed, filesz - chunksz + 1) i = prf.eval(0) file.seek(i) h = hmac.new(seed, None, hashlib.sha256) while (True): if (chunksz < bufsz): bufsz = chunksz buffer = file.read(bufsz) h.update(buffer) chunksz -= len(buffer) assert(chunksz >= 0) if (chunksz == 0): break return h.digest()
python
{ "resource": "" }
q40758
Domain.from_tuple
train
def from_tuple(cls, queries): """Create a ``Domain`` given a set of complex query tuples. Args: queries (iter): An iterator of complex queries. Each iteration should contain either: * A data-set compatible with :func:`~domain.Domain.add_query` * A string to switch the join type Example:: [('subject', 'Test1'), 'OR', ('subject', 'Test2')', ('subject', 'Test3')', ] # The above is equivalent to: # subject:'Test1' OR subject:'Test2' OR subject:'Test3' [('modified_at', datetime(2017, 01, 01)), ('status', 'active'), ] # The above is equivalent to: # modified_at:[2017-01-01T00:00:00Z TO *] # AND status:"active" Returns: Domain: A domain representing the input queries. """ domain = cls() join_with = cls.AND for query in queries: if query in [cls.OR, cls.AND]: join_with = query else: domain.add_query(query, join_with) return domain
python
{ "resource": "" }
q40759
Domain.add_query
train
def add_query(self, query, join_with=AND): """Join a new query to existing queries on the stack. Args: query (tuple or list or DomainCondition): The condition for the query. If a ``DomainCondition`` object is not provided, the input should conform to the interface defined in :func:`~.domain.DomainCondition.from_tuple`. join_with (str): The join string to apply, if other queries are already on the stack. """ if not isinstance(query, DomainCondition): query = DomainCondition.from_tuple(query) if len(self.query): self.query.append(join_with) self.query.append(query)
python
{ "resource": "" }
q40760
DomainCondition.from_tuple
train
def from_tuple(cls, query): """Create a condition from a query tuple. Args: query (tuple or list): Tuple or list that contains a query domain in the format of ``(field_name, field_value, field_value_to)``. ``field_value_to`` is only applicable in the case of a date search. Returns: DomainCondition: An instance of a domain condition. The specific type will depend on the data type of the first value provided in ``query``. """ field, query = query[0], query[1:] try: cls = TYPES[type(query[0])] except KeyError: # We just fallback to the base class if unknown type. pass return cls(field, *query)
python
{ "resource": "" }
q40761
StorageRouter.load_backends
train
def load_backends(self): """ Loads all the backends setup in settings.py. """ for name, backend_settings in settings.storage.iteritems(): backend_path = backend_settings['backend'] backend_module, backend_cls = backend_path.rsplit('.', 1) backend_module = import_module(backend_module) # Create an instance of the configured backend. backend_constructor = getattr(backend_module, backend_cls) self.backends[name] = backend_constructor(name, self.namespaces, **backend_settings)
python
{ "resource": "" }
q40762
StorageRouter.get_matching_prefix
train
def get_matching_prefix(self, namespace, stream): """ We look at the stream prefixs configured in stream.yaml and match stream to the longest prefix. """ validate_stream(stream) default_prefix = '' longest_prefix = default_prefix for prefix in self.prefix_confs[namespace]: if prefix == default_prefix: continue if not stream.startswith(prefix): continue if len(prefix) <= len(longest_prefix): continue longest_prefix = prefix return longest_prefix
python
{ "resource": "" }
q40763
StorageRouter.backends_to_mutate
train
def backends_to_mutate(self, namespace, stream): """ Return all the backends enabled for writing for `stream`. """ if namespace not in self.namespaces: raise NamespaceMissing('`{}` namespace is not configured' .format(namespace)) return self.prefix_confs[namespace][self.get_matching_prefix(namespace, stream)]
python
{ "resource": "" }
q40764
StorageRouter.backend_to_retrieve
train
def backend_to_retrieve(self, namespace, stream): """ Return backend enabled for reading for `stream`. """ if namespace not in self.namespaces: raise NamespaceMissing('`{}` namespace is not configured' .format(namespace)) stream_prefix = self.get_matching_prefix(namespace, stream) read_backend = self.prefix_read_backends[namespace][stream_prefix] return (read_backend, self.prefix_confs[namespace][stream_prefix][read_backend])
python
{ "resource": "" }
q40765
WebHook.create
train
def create(cls, session, web_hook): """Create a web hook. Note that creating a new web hook will overwrite the web hook that is already configured for this company. There is also no way to programmatically determine if a web hook already exists for the company. This is a limitation of the HelpScout API and cannot be circumvented. Args: session (requests.sessions.Session): Authenticated session. web_hook (helpscout.models.WebHook): The web hook to be created. Returns: bool: ``True`` if the creation was a success. Errors otherwise. """ cls( '/hooks.json', data=web_hook.to_api(), request_type=RequestPaginator.POST, session=session, ) return True
python
{ "resource": "" }
q40766
AttachmentData.raw_data
train
def raw_data(self, value): """Set the base64 encoded data using a raw value or file object.""" if value: try: value = value.read() except AttributeError: pass b64 = base64.b64encode(value.encode('utf-8')) self.data = b64.decode('utf-8')
python
{ "resource": "" }
q40767
schedule
train
def schedule(): """HTTP endpoint for scheduling tasks If a task with the same code already exists, the one with the shorter interval will be made active. """ code = request.form['code'] interval = int(request.form['interval']) task_id = binascii.b2a_hex(os.urandom(5)) new_task = Task(id=task_id) new_task.active = True new_task.code = code new_task.interval = interval # TODO(derek): Assert there is only one other_task other_task = Task.query.filter_by(code=code, active=True).first() if other_task: if other_task.interval <= new_task.interval: new_task.active = False else: other_task.active = False other_task.save() current_app.scheduler.cancel(other_task.id) if new_task.active: print current_app.scheduler.schedule current_app.scheduler.schedule({ 'id': task_id, 'code': new_task.code, 'interval': new_task.interval }) new_task.save() return json.dumps({ 'status': 'success', 'id': task_id, })
python
{ "resource": "" }
q40768
cancel
train
def cancel(): """HTTP endpoint for canceling tasks If an active task is cancelled, an inactive task with the same code and the smallest interval will be activated if it exists. """ task_id = request.form['id'] task = Task.query.get(task_id) if not task: return json.dumps({ 'status': 'success', 'id': None, }) task.delete() if task.active: current_app.scheduler.cancel(task_id) code = task.code other_task = Task.query.filter_by(code=code).order_by('interval').first() if other_task: other_task.active = True other_task.save() current_app.scheduler.schedule({ 'id': other_task.id, 'code': other_task.code, 'interval': other_task.interval }) return json.dumps({ 'status': 'success', 'id': task_id, })
python
{ "resource": "" }
q40769
OplogReplayer.insert
train
def insert(self, ns, docid, raw, **kw): """ Perform a single insert operation. {'docid': ObjectId('4e95ae77a20e6164850761cd'), 'ns': u'mydb.tweets', 'raw': {u'h': -1469300750073380169L, u'ns': u'mydb.tweets', u'o': {u'_id': ObjectId('4e95ae77a20e6164850761cd'), u'content': u'Lorem ipsum', u'nr': 16}, u'op': u'i', u'ts': Timestamp(1318432375, 1)}} """ try: self._dest_coll(ns).insert(raw['o'], safe=True) except DuplicateKeyError, e: logging.warning(e)
python
{ "resource": "" }
q40770
OplogReplayer.update
train
def update(self, ns, docid, raw, **kw): """ Perform a single update operation. {'docid': ObjectId('4e95ae3616692111bb000001'), 'ns': u'mydb.tweets', 'raw': {u'h': -5295451122737468990L, u'ns': u'mydb.tweets', u'o': {u'$set': {u'content': u'Lorem ipsum'}}, u'o2': {u'_id': ObjectId('4e95ae3616692111bb000001')}, u'op': u'u', u'ts': Timestamp(1318432339, 1)}} """ self._dest_coll(ns).update(raw['o2'], raw['o'], safe=True)
python
{ "resource": "" }
q40771
OplogReplayer.delete
train
def delete(self, ns, docid, raw, **kw): """ Perform a single delete operation. {'docid': ObjectId('4e959ea11669210edc002902'), 'ns': u'mydb.tweets', 'raw': {u'b': True, u'h': -8347418295715732480L, u'ns': u'mydb.tweets', u'o': {u'_id': ObjectId('4e959ea11669210edc002902')}, u'op': u'd', u'ts': Timestamp(1318432261, 10499)}} """ self._dest_coll(ns).remove(raw['o'], safe=True)
python
{ "resource": "" }
q40772
OplogReplayer.drop_index
train
def drop_index(self, raw): """ Executes a drop index command. { "op" : "c", "ns" : "testdb.$cmd", "o" : { "dropIndexes" : "testcoll", "index" : "nuie_1" } } """ dbname = raw['ns'].split('.', 1)[0] collname = raw['o']['dropIndexes'] self.dest[dbname][collname].drop_index(raw['o']['index'])
python
{ "resource": "" }
q40773
OplogReplayer.command
train
def command(self, ns, raw, **kw): """ Executes command. { "op" : "c", "ns" : "testdb.$cmd", "o" : { "drop" : "fs.files"} } """ try: dbname = raw['ns'].split('.', 1)[0] self.dest[dbname].command(raw['o'], check=True) except OperationFailure, e: logging.warning(e)
python
{ "resource": "" }
q40774
Mailboxes.get_folders
train
def get_folders(cls, session, mailbox_or_id): """List the folders for the mailbox. Args: mailbox_or_id (helpscout.models.Mailbox or int): Mailbox or the ID of the mailbox to get the folders for. Returns: RequestPaginator(output_type=helpscout.models.Folder): Folders iterator. """ if isinstance(mailbox_or_id, Mailbox): mailbox_or_id = mailbox_or_id.id return cls( '/mailboxes/%d/folders.json' % mailbox_or_id, session=session, out_type=Folder, )
python
{ "resource": "" }
q40775
Board.json
train
def json(self): """A JSON-encoded description of this board. Format: {'id': board_id, 'title': 'The title of the board', 'panels': [{ 'title': 'The title of the panel' 'data_source': { 'source_type': PanelSource.TYPE, 'refresh_seconds': 600, ...source_specific_details... }, 'display': { 'display_type': PanelDisplay.TYPE, ...display_specific_details... }, ...]} """ if self.board_data: board_dict = json.loads(self.board_data) board_dict['id'] = self.id del board_dict['__version__'] else: board_dict = { 'id': self.id, 'title': '', 'panels': [] } return board_dict """ pycode = self.pycodes.first() or PyCode() return {'id': self.id, 'pycode': pycode.json()} """
python
{ "resource": "" }
q40776
get
train
def get(dataset = None, include_metadata = False, mnemonics = None, **dim_values): """Use this function to get data from Knoema dataset.""" if not dataset and not mnemonics: raise ValueError('Dataset id is not specified') if mnemonics and dim_values: raise ValueError('The function does not support specifying mnemonics and selection in a single call') config = ApiConfig() client = ApiClient(config.host, config.app_id, config.app_secret) client.check_correct_host() ds = client.get_dataset(dataset) if dataset else None reader = MnemonicsDataReader(client, mnemonics) if mnemonics else StreamingDataReader(client, dim_values) if ds.type == 'Regular' else PivotDataReader(client, dim_values) reader.include_metadata = include_metadata reader.dataset = ds return reader.get_pandasframe()
python
{ "resource": "" }
q40777
delete
train
def delete(dataset): """Use this function to delete dataset by it's id.""" config = ApiConfig() client = ApiClient(config.host, config.app_id, config.app_secret) client.check_correct_host() client.delete(dataset) return ('Dataset {} has been deleted successfully'.format(dataset))
python
{ "resource": "" }
q40778
verify
train
def verify(dataset, publication_date, source, refernce_url): """Use this function to verify a dataset.""" config = ApiConfig() client = ApiClient(config.host, config.app_id, config.app_secret) client.check_correct_host() client.verify(dataset, publication_date, source, refernce_url)
python
{ "resource": "" }
q40779
OplogWatcher.start
train
def start(self): """ Starts the OplogWatcher. """ oplog = self.connection.local['oplog.rs'] if self.ts is None: cursor = oplog.find().sort('$natural', -1) obj = cursor[0] if obj: self.ts = obj['ts'] else: # In case no oplogs are present. self.ts = None if self.ts: logging.info('Watching oplogs with timestamp > %s' % self.ts) else: logging.info('Watching all oplogs') while self.running: query = { 'ts': {'$gt': self.ts} } try: logging.debug('Tailing over %r...' % query) cursor = oplog.find(query, tailable=True) # OplogReplay flag greatly improves scanning for ts performance. cursor.add_option(pymongo.cursor._QUERY_OPTIONS['oplog_replay']) while self.running: for op in cursor: self.process_op(op['ns'], op) time.sleep(self.poll_time) if not cursor.alive: break except AutoReconnect, e: logging.warning(e) time.sleep(self.poll_time) except OperationFailure, e: logging.exception(e) time.sleep(self.poll_time)
python
{ "resource": "" }
q40780
OplogWatcher.process_op
train
def process_op(self, ns, raw): """ Processes a single operation from the oplog. Performs a switch by raw['op']: "i" insert "u" update "d" delete "c" db cmd "db" declares presence of a database "n" no op """ # Compute the document id of the document that will be altered # (in case of insert, update or delete). docid = self.__get_id(raw) op = raw['op'] if op == 'i': self.insert(ns=ns, docid=docid, raw=raw) elif op == 'u': self.update(ns=ns, docid=docid, raw=raw) elif op == 'd': self.delete(ns=ns, docid=docid, raw=raw) elif op == 'c': self.command(ns=ns, raw=raw) elif op == 'db': self.db_declare(ns=ns, raw=raw) elif op == 'n': self.noop() else: logging.error("Unknown op: %r" % op) # Save timestamp of last processed oplog. self.ts = raw['ts']
python
{ "resource": "" }
q40781
multi_way_partitioning
train
def multi_way_partitioning(items, bin_count): #TODO rename bin_count -> bins ''' Greedily divide weighted items equally across bins. This approximately solves a multi-way partition problem, minimising the difference between the largest and smallest sum of weights in a bin. Parameters ---------- items : ~typing.Iterable[~typing.Tuple[~typing.Any, float]] Weighted items as ``(item, weight)`` tuples. bin_count : int Number of bins. Returns ------- bins : ~collections_extended.frozenbag[~collections_extended.frozenbag[~typing.Any]] Item bins as a bag of item bags. Notes ---------- - `A greedy solution <http://stackoverflow.com/a/6855546/1031434>`_ - `Problem definition and solutions <http://ijcai.org/Proceedings/09/Papers/096.pdf>`_ ''' bins = [_Bin() for _ in range(bin_count)] for item, weight in sorted(items, key=lambda x: x[1], reverse=True): bin_ = min(bins, key=lambda bin_: bin_.weights_sum) bin_.add(item, weight) return frozenbag(frozenbag(bin_.items) for bin_ in bins)
python
{ "resource": "" }
q40782
_GlobalFigure.set_foregroundcolor
train
def set_foregroundcolor(self, color): '''For the specified axes, sets the color of the frame, major ticks, tick labels, axis labels, title and legend ''' ax = self.ax for tl in ax.get_xticklines() + ax.get_yticklines(): tl.set_color(color) for spine in ax.spines: ax.spines[spine].set_edgecolor(color) for tick in ax.xaxis.get_major_ticks(): tick.label1.set_color(color) for tick in ax.yaxis.get_major_ticks(): tick.label1.set_color(color) ax.axes.xaxis.label.set_color(color) ax.axes.yaxis.label.set_color(color) ax.axes.xaxis.get_offset_text().set_color(color) ax.axes.yaxis.get_offset_text().set_color(color) ax.axes.title.set_color(color) lh = ax.get_legend() if lh != None: lh.get_title().set_color(color) lh.legendPatch.set_edgecolor('none') labels = lh.get_texts() for lab in labels: lab.set_color(color) for tl in ax.get_xticklabels(): tl.set_color(color) for tl in ax.get_yticklabels(): tl.set_color(color) plt.draw()
python
{ "resource": "" }
q40783
_BaseDataPerClass._getPlotData
train
def _getPlotData(self): """ Turns the resultsByClass Dict into a list of bin groups skipping the uncertain group if empty return: (label list, ydata list) :rtype: tuple(list(str), list(float)) """ resultsByClass = self.resultsByClass try: if resultsByClass['Uncertain'] == 0: # remove uncertain tag if present and = 0 resultsByClass.pop('Uncertain', None) except KeyError: pass plotData = list(zip(*resultsByClass.items())) # (labels, ydata) return plotData
python
{ "resource": "" }
q40784
DataPerParameterBin._genKeysBins
train
def _genKeysBins(self): """ Generates keys from bins, sets self._allowedKeys normally set in _classVariables """ binlimits = self._binlimits allowedKeys = [] midbinlimits = binlimits if binlimits[0] == -float('inf'): midbinlimits = binlimits[1:] # remove the bottom limit allowedKeys.append('<{0}'.format(midbinlimits[0])) if binlimits[-1] == float('inf'): midbinlimits = midbinlimits[:-1] lastbin = midbinlimits[0] for binlimit in midbinlimits[1:]: if lastbin == binlimit: allowedKeys.append('{0}'.format(binlimit)) else: allowedKeys.append('{0} to {1}'.format(lastbin, binlimit)) lastbin = binlimit if binlimits[-1] == float('inf'): allowedKeys.append('{0}+'.format(binlimits[-2])) allowedKeys.append('Uncertain') self._allowedKeys = allowedKeys
python
{ "resource": "" }
q40785
GeneralPlotter._set_axis
train
def _set_axis(self, param, unit): """ this should take a variable or a function and turn it into a list by evaluating on each planet """ axisValues = [] for astroObject in self.objectList: try: value = eval('astroObject.{0}'.format(param)) except ac.HierarchyError: # ie trying to call planet.star and one planet is a lone ranger value = np.nan if unit is None: # no unit to rescale (a aq.unitless quanitity would otherwise fail with ValueError) axisValues.append(value) else: try: axisValues.append(value.rescale(unit)) except AttributeError: # either nan or unitless axisValues.append(value) return axisValues
python
{ "resource": "" }
q40786
DiscoveryMethodByYear.setup_keys
train
def setup_keys(self): """ Build the initial data dictionary to store the values """ discovery_methods = {} discovery_years = {} nan_list = [] # Initial Loop to get keys for planet in self.planet_list: if 'Solar System' in planet.params['list'] and self.skip_solar_system_planets: continue try: discovery_methods[planet.discoveryMethod] += 1 except KeyError: discovery_methods[planet.discoveryMethod] = 1 try: discovery_years[planet.discoveryYear] += 1 except KeyError: discovery_years[planet.discoveryYear] = 1 if planet.discoveryMethod is np.nan: nan_list.append(planet) self.nan_list = nan_list return discovery_years
python
{ "resource": "" }
q40787
plot_confusion_matrix
train
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
python
{ "resource": "" }
q40788
is_zsettable
train
def is_zsettable(s): """quick check that all values in a dict are reals""" return all(map(lambda x: isinstance(x, (int, float, long)), s.values()))
python
{ "resource": "" }
q40789
List.trim
train
def trim(self, start, stop): """Trim the list to the specified range of elements.""" return self.client.ltrim(self.name, start, stop - 1)
python
{ "resource": "" }
q40790
List.remove
train
def remove(self, value, count=1): """Remove occurences of ``value`` from the list. :keyword count: Number of matching values to remove. Default is to remove a single value. """ count = self.client.lrem(self.name, value, num=count) if not count: raise ValueError("%s not in list" % value) return count
python
{ "resource": "" }
q40791
Set.remove
train
def remove(self, member): """Remove element from set; it must be a member. :raises KeyError: if the element is not a member. """ if not self.client.srem(self.name, member): raise KeyError(member)
python
{ "resource": "" }
q40792
Set.pop
train
def pop(self): """Remove and return an arbitrary set element. :raises KeyError: if the set is empty. """ member = self.client.spop(self.name) if member is not None: return member raise KeyError()
python
{ "resource": "" }
q40793
Set.union
train
def union(self, other): """Return the union of sets as a new set. (i.e. all elements that are in either set.) Operates on either redish.types.Set or __builtins__.set. """ if isinstance(other, self.__class__): return self.client.sunion([self.name, other.name]) else: return self._as_set().union(other)
python
{ "resource": "" }
q40794
Set.update
train
def update(self, other): """Update this set with the union of itself and others.""" if isinstance(other, self.__class__): return self.client.sunionstore(self.name, [self.name, other.name]) else: return map(self.add, other)
python
{ "resource": "" }
q40795
Set.intersection
train
def intersection(self, other): """Return the intersection of two sets as a new set. (i.e. all elements that are in both sets.) Operates on either redish.types.Set or __builtins__.set. """ if isinstance(other, self.__class__): return self.client.sinter([self.name, other.name]) else: return self._as_set().intersection(other)
python
{ "resource": "" }
q40796
Set.intersection_update
train
def intersection_update(self, other): """Update the set with the intersection of itself and another.""" return self.client.sinterstore(self.name, [self.name, other.name])
python
{ "resource": "" }
q40797
Set.difference_update
train
def difference_update(self, other): """Remove all elements of another set from this set.""" return self.client.sdiffstore(self.name, [self.name, other.name])
python
{ "resource": "" }
q40798
SortedSet.add
train
def add(self, member, score): """Add the specified member to the sorted set, or update the score if it already exist.""" return self.client.zadd(self.name, member, score)
python
{ "resource": "" }
q40799
SortedSet.remove
train
def remove(self, member): """Remove member.""" if not self.client.zrem(self.name, member): raise KeyError(member)
python
{ "resource": "" }