_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q43200
average_over_area
train
def average_over_area(q, x, y): """Averages a quantity `q` over a rectangular area given a 2D array and the x and y vectors for sample locations, using the trapezoidal rule""" area = (np.max(x) - np.min(x))*(np.max(y) - np.min(y)) integral = np.trapz(np.trapz(q, y, axis=0), x) return integral/area
python
{ "resource": "" }
q43201
build_plane_arrays
train
def build_plane_arrays(x, y, qlist): """Build a 2-D array out of data taken in the same plane, for contour plotting. """ if type(qlist) is not list: return_list = False qlist = [qlist] else: return_list = True xv = x[np.where(y==y[0])[0]] yv = y[np.where(x==x[0])[0]] qlistp = [] for n in range(len(qlist)): qlistp.append(np.zeros((len(yv), len(xv)))) for j in range(len(qlist)): for n in range(len(yv)): i = np.where(y==yv[n])[0] qlistp[j][n,:] = qlist[j][i] if not return_list: qlistp = qlistp[0] return xv, yv, qlistp
python
{ "resource": "" }
q43202
corr_coeff
train
def corr_coeff(x1, x2, t, tau1, tau2): """Compute lagged correlation coefficient for two time series.""" dt = t[1] - t[0] tau = np.arange(tau1, tau2+dt, dt) rho = np.zeros(len(tau)) for n in range(len(tau)): i = np.abs(int(tau[n]/dt)) if tau[n] >= 0: # Positive lag, push x2 forward in time seg2 = x2[0:-1-i] seg1 = x1[i:-1] elif tau[n] < 0: # Negative lag, push x2 back in time seg1 = x1[0:-i-1] seg2 = x2[i:-1] seg1 = seg1 - seg1.mean() seg2 = seg2 - seg2.mean() rho[n] = np.mean(seg1*seg2)/seg1.std()/seg2.std() return tau, rho
python
{ "resource": "" }
q43203
autocorr_coeff
train
def autocorr_coeff(x, t, tau1, tau2): """Calculate the autocorrelation coefficient.""" return corr_coeff(x, x, t, tau1, tau2)
python
{ "resource": "" }
q43204
integral_scale
train
def integral_scale(u, t, tau1=0.0, tau2=1.0): """Calculate the integral scale of a time series by integrating up to the first zero crossing. """ tau, rho = autocorr_coeff(u, t, tau1, tau2) zero_cross_ind = np.where(np.diff(np.sign(rho)))[0][0] int_scale = np.trapz(rho[:zero_cross_ind], tau[:zero_cross_ind]) return int_scale
python
{ "resource": "" }
q43205
student_t
train
def student_t(degrees_of_freedom, confidence=0.95): """Return Student-t statistic for given DOF and confidence interval.""" return scipy.stats.t.interval(alpha=confidence, df=degrees_of_freedom)[-1]
python
{ "resource": "" }
q43206
calc_uncertainty
train
def calc_uncertainty(quantity, sys_unc, mean=True): """Calculate the combined standard uncertainty of a quantity.""" n = len(quantity) std = np.nanstd(quantity) if mean: std /= np.sqrt(n) return np.sqrt(std**2 + sys_unc**2)
python
{ "resource": "" }
q43207
get_numbers
train
def get_numbers(s): """Extracts all integers from a string an return them in a list""" result = map(int, re.findall(r'[0-9]+', unicode(s))) return result + [1] * (2 - len(result))
python
{ "resource": "" }
q43208
Index.put
train
def put(self, key, value, element): """Puts an element in an index under a given key-value pair @params key: Index key string @params value: Index value string @params element: Vertex or Edge element to be indexed""" self.neoindex[key][value] = element.neoelement
python
{ "resource": "" }
q43209
Index.get
train
def get(self, key, value): """Gets an element from an index under a given key-value pair @params key: Index key string @params value: Index value string @returns A generator of Vertex or Edge objects""" for element in self.neoindex[key][value]: if self.indexClass == "vertex": yield Vertex(element) elif self.indexClass == "edge": yield Edge(element) else: raise TypeError(self.indexClass)
python
{ "resource": "" }
q43210
Index.remove
train
def remove(self, key, value, element): """Removes an element from an index under a given key-value pair @params key: Index key string @params value: Index value string @params element: Vertex or Edge element to be removed""" self.neoindex.delete(key, value, element.neoelement)
python
{ "resource": "" }
q43211
Neo4jIndexableGraph.getIndices
train
def getIndices(self): """Returns a generator function over all the existing indexes @returns A generator function over all rhe Index objects""" for indexName in self.neograph.nodes.indexes.keys(): indexObject = self.neograph.nodes.indexes.get(indexName) yield Index(indexName, "vertex", "manual", indexObject) for indexName in self.neograph.relationships.indexes.keys(): indexObject = self.neograph.relationships.indexes.get(indexName) yield Index(indexName, "edge", "manual", indexObject)
python
{ "resource": "" }
q43212
Subversion.get_revision
train
def get_revision(self, location): """ Return the maximum revision for all files under a given location """ # Note: taken from setuptools.command.egg_info revision = 0 for base, dirs, files in os.walk(location): if self.dirname not in dirs: dirs[:] = [] continue # no sense walking uncontrolled subdirs dirs.remove(self.dirname) entries_fn = os.path.join(base, self.dirname, 'entries') if not os.path.exists(entries_fn): ## FIXME: should we warn? continue f = open(entries_fn) data = f.read() f.close() if data.startswith('8') or data.startswith('9') or data.startswith('10'): data = list(map(str.splitlines, data.split('\n\x0c\n'))) del data[0][0] # get rid of the '8' dirurl = data[0][3] revs = [int(d[9]) for d in data if len(d)>9 and d[9]]+[0] if revs: localrev = max(revs) else: localrev = 0 elif data.startswith('<?xml'): dirurl = _svn_xml_url_re.search(data).group(1) # get repository URL revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)]+[0] if revs: localrev = max(revs) else: localrev = 0 else: logger.warn("Unrecognized .svn/entries format; skipping %s", base) dirs[:] = [] continue if base == location: base_url = dirurl+'/' # save the root url elif not dirurl.startswith(base_url): dirs[:] = [] continue # not part of the same svn tree, skip it revision = max(revision, localrev) return revision
python
{ "resource": "" }
q43213
DigitWord.load
train
def load(self, value): """Load the value of the DigitWord from a JSON representation of a list. The representation is validated to be a string and the encoded data a list. The list is then validated to ensure each digit is a valid digit""" if not isinstance(value, str): raise TypeError('Expected JSON string') _value = json.loads(value) self._validate_word(value=_value) self.word = _value
python
{ "resource": "" }
q43214
InstallCommand._build_package_finder
train
def _build_package_finder(self, options, index_urls): """ Create a package finder appropriate to this install command. This method is meant to be overridden by subclasses, not called directly. """ return PackageFinder(find_links=options.find_links, index_urls=index_urls, use_mirrors=options.use_mirrors, mirrors=options.mirrors)
python
{ "resource": "" }
q43215
to_etree
train
def to_etree(source, root_tag=None): """ Convert various representations of an XML structure to a etree Element Args: source -- The source object to be converted - ET.Element\ElementTree, dict or string. Keyword args: root_tag -- A optional parent tag in which to wrap the xml tree if no root in dict representation. See dict_to_etree() Returns: A etree Element matching the source object. >>> to_etree("<content/>") #doctest: +ELLIPSIS <Element content at 0x...> >>> to_etree({'document': {'title': 'foo', 'list': [{'li':1}, {'li':2}]}}) #doctest: +ELLIPSIS <Element document at 0x...> >>> to_etree(ET.Element('root')) #doctest: +ELLIPSIS <Element root at 0x...> """ if hasattr(source, 'get_root'): #XXX: return source.get_root() elif isinstance(source, type(ET.Element('x'))): #XXX: # cElementTree.Element isn't exposed directly return source elif isinstance(source, basestring): try: return ET.fromstring(source) except: raise XMLError(source) elif hasattr(source, 'keys'): # Dict. return dict_to_etree(source, root_tag) else: raise XMLError(source)
python
{ "resource": "" }
q43216
to_raw_xml
train
def to_raw_xml(source): """ Convert various representations of an XML structure to a normal XML string. Args: source -- The source object to be converted - ET.Element, dict or string. Returns: A rew xml string matching the source object. >>> to_raw_xml("<content/>") '<content/>' >>> to_raw_xml({'document': {'title': 'foo', 'list': [{'li':1}, {'li':2}]}}) '<document><list><li>1</li><li>2</li></list><title>foo</title></document>' >>> to_raw_xml(ET.Element('root')) '<root/>' """ if isinstance(source, basestring): return source elif hasattr(source, 'getiterator'): # Element or ElementTree. return ET.tostring(source, encoding="utf-8") elif hasattr(source, 'keys'): # Dict. xml_root = dict_to_etree(source) return ET.tostring(xml_root, encoding="utf-8") else: raise TypeError("Accepted representations of a document are string, dict and etree")
python
{ "resource": "" }
q43217
parse_arguments
train
def parse_arguments(filters, arguments, modern=False): """ Return a dict of parameters. Take a list of filters and for each try to get the corresponding value in arguments or a default value. Then check that value's type. The @modern parameter indicates how the arguments should be interpreted. The old way is that you always specify a list and in the list you write the names of types as strings. I.e. instad of `str` you write `'str'`. The modern way allows you to specify arguments by real Python types and entering it as a list means you accept and expect it to be a list. For example, using the modern way: filters = [ ("param1", "default", [str]), ("param2", None, int), ("param3", ["list", "of", 4, "values"], [str]) ] arguments = { "param1": "value1", "unknown": 12345 } => { "param1": ["value1"], "param2": 0, "param3": ["list", "of", "4", "values"] } And an example for the old way: filters = [ ("param1", "default", ["list", "str"]), ("param2", None, "int"), ("param3", ["list", "of", 4, "values"], ["list", "str"]) ] arguments = { "param1": "value1", "unknown": 12345 } => { "param1": ["value1"], "param2": 0, "param3": ["list", "of", "4", "values"] } The reason for having the modern and the non-modern way is transition of legacy code. One day it will all be the modern way. """ params = DotDict() for i in filters: count = len(i) param = None if count <= 1: param = arguments.get(i[0]) else: param = arguments.get(i[0], i[1]) # proceed and do the type checking if count >= 3: types = i[2] if modern: if isinstance(types, list) and param is not None: assert len(types) == 1 if not isinstance(param, list): param = [param] param = [check_type(x, types[0]) for x in param] else: param = check_type(param, types) else: if not isinstance(types, list): types = [types] for t in reversed(types): if t == "list" and not isinstance(param, list): if param is None or param == '': param = [] else: param = [param] elif t == "list" and isinstance(param, list): continue elif isinstance(param, list) and "list" not in types: param = " ".join(param) param = check_type(param, t) elif isinstance(param, list): param = [check_type(x, t) for x in param] else: param = check_type(param, t) params[i[0]] = param return params
python
{ "resource": "" }
q43218
check_type
train
def check_type(param, datatype): """ Make sure that param is of type datatype and return it. If param is None, return it. If param is an instance of datatype, return it. If param is not an instance of datatype and is not None, cast it as datatype and return it. """ if param is None: return param if getattr(datatype, 'clean', None) and callable(datatype.clean): try: return datatype.clean(param) except ValueError: raise BadArgumentError(param) elif isinstance(datatype, str): # You've given it something like `'bool'` as a string. # This is the legacy way of doing it. datatype = { 'str': str, 'bool': bool, 'float': float, 'date': datetime.date, 'datetime': datetime.datetime, 'timedelta': datetime.timedelta, 'json': 'json', # exception 'int': int, }[datatype] if datatype is str and not isinstance(param, basestring): try: param = str(param) except ValueError: param = str() elif datatype is int and not isinstance(param, int): try: param = int(param) except ValueError: param = int() elif datatype is bool and not isinstance(param, bool): param = str(param).lower() in ("true", "t", "1", "y", "yes") elif ( datatype is datetime.datetime and not isinstance(param, datetime.datetime) ): try: param = dtutil.string_to_datetime(param) except ValueError: param = None elif datatype is datetime.date and not isinstance(param, datetime.date): try: param = dtutil.string_to_datetime(param).date() except ValueError: param = None elif ( datatype is datetime.timedelta and not isinstance(param, datetime.timedelta) ): try: param = dtutil.strHoursToTimeDelta(param) except ValueError: param = None elif datatype == "json" and isinstance(param, basestring): try: param = json.loads(param) except ValueError: param = None return param
python
{ "resource": "" }
q43219
LocalRefResolver.resolve_local
train
def resolve_local(self, uri, base_uri, ref): """ Resolve a local ``uri``. Does not check the store first. :argument str uri: the URI to resolve :returns: the retrieved document """ # read it from the filesystem file_path = None # make the reference saleskingstyle item_name = None if (uri.startswith(u"file") or uri.startswith(u"File")): if ref.startswith(u"./"): ref = ref.split(u"./")[-1] org_ref = ref if ref.find(u"#properties") != -1: ref = ref.split(u"#properties")[0] if ref.find(u".json") != -1: item_name = ref.split(u".json")[0] # on windwos systesm this needs to happen if base_uri.startswith(u"file://") is True: base_uri = base_uri.split(u"file://")[1] elif base_uri.startswith(u"File://") is True: base_uri = base_uri.split(u"File://")[1] file_path = os.path.join(base_uri, ref) result = None try: schema_file = open(file_path, "r").read() result = json.loads(schema_file.decode("utf-8")) except IOError as e: log.error(u"file not found %s" % e) msg = "Could not find schema file. %s" % file_path raise SalesKingException("SCHEMA_NOT_FOUND", msg) if self.cache_remote: self.store[uri] = result return result
python
{ "resource": "" }
q43220
cached
train
def cached(func): """ A decorator function to cache values. It uses the decorated function's arguments as the keys to determine if the function has been called previously. """ cache = {} @f.wraps(func) def wrapper(*args, **kwargs): key = func.__name__ + str(sorted(args)) + str(sorted(kwargs.items())) if key not in cache: cache[key] = func(*args, **kwargs) return cache[key] return wrapper
python
{ "resource": "" }
q43221
retry_ex
train
def retry_ex(callback, times=3, cap=120000): """ Retry a callback function if any exception is raised. :param function callback: The function to call :keyword int times: Number of times to retry on initial failure :keyword int cap: Maximum wait time in milliseconds :returns: The return value of the callback :raises Exception: If the callback raises an exception after exhausting all retries """ for attempt in range(times + 1): if attempt > 0: time.sleep(retry_wait_time(attempt, cap) / 1000.0) try: return callback() except: if attempt == times: raise
python
{ "resource": "" }
q43222
retry_bool
train
def retry_bool(callback, times=3, cap=120000): """ Retry a callback function if it returns False. :param function callback: The function to call :keyword int times: Number of times to retry on initial failure :keyword int cap: Maximum wait time in milliseconds :returns: The return value of the callback :rtype: bool """ for attempt in range(times + 1): if attempt > 0: time.sleep(retry_wait_time(attempt, cap) / 1000.0) ret = callback() if ret or attempt == times: break return ret
python
{ "resource": "" }
q43223
retryable
train
def retryable(retryer=retry_ex, times=3, cap=120000): """ A decorator to make a function retry. By default the retry occurs when an exception is thrown, but this may be changed by modifying the ``retryer`` argument. See also :py:func:`retry_ex` and :py:func:`retry_bool`. By default :py:func:`retry_ex` is used as the retry function. Note that the decorator must be called even if not given keyword arguments. :param function retryer: A function to handle retries :param int times: Number of times to retry on initial failure :param int cap: Maximum wait time in milliseconds :Example: :: @retryable() def can_fail(): .... @retryable(retryer=retry_bool, times=10) def can_fail_bool(): .... """ def _retryable(func): @f.wraps(func) def wrapper(*args, **kwargs): return retryer(lambda: func(*args, **kwargs), times, cap) return wrapper return _retryable
python
{ "resource": "" }
q43224
ensure_environment
train
def ensure_environment(variables): """ Check os.environ to ensure that a given collection of variables has been set. :param variables: A collection of environment variable names :returns: os.environ :raises IncompleteEnvironment: if any variables are not set, with the exception's ``variables`` attribute populated with the missing variables """ missing = [v for v in variables if v not in os.environ] if missing: formatted = ', '.join(missing) message = 'Environment variables not set: {}'.format(formatted) raise IncompleteEnvironment(message, missing) return os.environ
python
{ "resource": "" }
q43225
change_column_length
train
def change_column_length(table: Table, column: Column, length: int, engine: Engine) -> None: """ Change the column length in the supplied table """ if column.type.length < length: print("Changing length of {} from {} to {}".format(column, column.type.length, length)) column.type.length = length column_name = column.name column_type = column.type.compile(engine.dialect) engine.execute('ALTER TABLE {table} ALTER COLUMN {column_name} TYPE {column_type}'.format(**locals()))
python
{ "resource": "" }
q43226
isdir
train
def isdir(path, message): """ Raise an exception if the given directory does not exist. :param path: The path to a directory to be tested :param message: A custom message to report in the exception :raises: FileNotFoundError """ if not os.path.isdir(path): raise FileNotFoundError( errno.ENOENT, "{}: {}".format(message, os.strerror(errno.ENOENT)), path)
python
{ "resource": "" }
q43227
_HeraldInputStream.readline
train
def readline(self): """ Waits for a line from the Herald client """ content = {"session_id": self._session} prompt_msg = self._herald.send( self._peer, beans.Message(MSG_CLIENT_PROMPT, content)) if prompt_msg.subject == MSG_SERVER_CLOSE: # Client closed its shell raise EOFError return prompt_msg.content
python
{ "resource": "" }
q43228
make_frog_fresco
train
def make_frog_fresco(text, width, padding=8): """\ Formats your lovely text into a speech bubble spouted by this adorable little frog. """ stem = r' /' frog = r""" {text} {stem} @..@ (----) ( >__< ) ^^ ~~ ^^""" offset = len(stem) - 1 formatted_indent = ' ' * offset formatted_text = textwrap.fill(text, width=width-padding, initial_indent=formatted_indent, subsequent_indent=formatted_indent) return frog.format(stem=stem, text=formatted_text)
python
{ "resource": "" }
q43229
mgz_to_nifti
train
def mgz_to_nifti(filename,prefix=None,gzip=True): '''Convert ``filename`` to a NIFTI file using ``mri_convert``''' setup_freesurfer() if prefix==None: prefix = nl.prefix(filename) + '.nii' if gzip and not prefix.endswith('.gz'): prefix += '.gz' nl.run([os.path.join(freesurfer_home,'bin','mri_convert'),filename,prefix],products=prefix)
python
{ "resource": "" }
q43230
guess_home
train
def guess_home(): '''If ``freesurfer_home`` is not set, try to make an intelligent guess at it''' global freesurfer_home if freesurfer_home != None: return True # if we already have it in the path, use that fv = nl.which('freeview') if fv: freesurfer_home = parpar_dir(os.path.realpath(fv)) return True for guess_dir in guess_locations: if os.path.exists(guess_dir): freesurfer_home = guess_dir return True return False
python
{ "resource": "" }
q43231
setup_freesurfer
train
def setup_freesurfer(): '''Setup the freesurfer environment variables''' guess_home() os.environ['FREESURFER_HOME'] = freesurfer_home os.environ['SUBJECTS_DIR'] = subjects_dir # Run the setup script and collect the output: o = subprocess.check_output(['bash','-c','source %s/SetUpFreeSurfer.sh && env' % freesurfer_home]) env = [(a.partition('=')[0],a.partition('=')[2]) for a in o.split('\n') if len(a.strip())>0] for e in env: os.environ[e[0]] = e[1] environ_setup = True
python
{ "resource": "" }
q43232
recon_all
train
def recon_all(subj_id,anatomies): '''Run the ``recon_all`` script''' if not environ_setup: setup_freesurfer() if isinstance(anatomies,basestring): anatomies = [anatomies] nl.run([os.path.join(freesurfer_home,'bin','recon-all'),'-all','-subjid',subj_id] + [['-i',anat] for anat in anatomies])
python
{ "resource": "" }
q43233
parse_docstring
train
def parse_docstring(docstring): """ Parse a PEP-257 docstring. SHORT -> blank line -> LONG """ short_desc = long_desc = '' if docstring: docstring = trim(docstring.lstrip('\n')) lines = docstring.split('\n\n', 1) short_desc = lines[0].strip().replace('\n', ' ') if len(lines) > 1: long_desc = lines[1].strip() return short_desc, long_desc
python
{ "resource": "" }
q43234
CLI.load_commands
train
def load_commands(self, obj): """ Load commands defined on an arbitrary object. All functions decorated with the :func:`subparse.command` decorator attached the specified object will be loaded. The object may be a dictionary, an arbitrary python object, or a dotted path. The dotted path may be absolute, or relative to the current package by specifying a leading '.' (e.g. ``'.commands'``). """ if isinstance(obj, str): if obj.startswith('.') or obj.startswith(':'): package = caller_package() if obj in ['.', ':']: obj = package.__name__ else: obj = package.__name__ + obj obj = pkg_resources.EntryPoint.parse('x=%s' % obj).resolve() command.discover_and_call(obj, self.command)
python
{ "resource": "" }
q43235
CLI.load_commands_from_entry_point
train
def load_commands_from_entry_point(self, specifier): """ Load commands defined within a pkg_resources entry point. Each entry will be a module that should be searched for functions decorated with the :func:`subparse.command` decorator. This operation is not recursive. """ for ep in pkg_resources.iter_entry_points(specifier): module = ep.load() command.discover_and_call(module, self.command)
python
{ "resource": "" }
q43236
CLI.run
train
def run(self, argv=None): """ Run the command-line application. This will dispatch to the specified function or raise a ``SystemExit`` and output the appropriate usage information if there is an error parsing the arguments. The default ``argv`` is equivalent to ``sys.argv[1:]``. """ if argv is None: # pragma: no cover argv = sys.argv[1:] argv = [str(v) for v in argv] meta, args = parse_args(self, argv) context_factory = contextmanager(make_generator(self.context_factory)) with context_factory(self, args, **meta.context_kwargs) as context: main = load_main(meta) return main(context, args) or 0
python
{ "resource": "" }
q43237
remove_small_objects
train
def remove_small_objects(image, min_size=50, connectivity=1): """Remove small objects from an boolean image. :param image: boolean numpy array or :class:`jicbioimage.core.image.Image` :returns: boolean :class:`jicbioimage.core.image.Image` """ return skimage.morphology.remove_small_objects(image, min_size=min_size, connectivity=connectivity)
python
{ "resource": "" }
q43238
invert
train
def invert(image): """Return an inverted image of the same dtype. Assumes the full range of the input dtype is in use and that no negative values are present in the input image. :param image: :class:`jicbioimage.core.image.Image` :returns: inverted image of the same dtype as the input """ if image.dtype == bool: return np.logical_not(image) maximum = np.iinfo(image.dtype).max maximum_array = np.ones(image.shape, dtype=image.dtype) * maximum return maximum_array - image
python
{ "resource": "" }
q43239
CompletedProcess.check_returncode
train
def check_returncode(self): """Raise CalledProcessError if the exit code is non-zero.""" if self.returncode: raise CalledProcessError(self.returncode, self.args, self.stdout, self.stderr)
python
{ "resource": "" }
q43240
double_lorgauss
train
def double_lorgauss(x,p): """Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian. Parameters ---------- x : float or array-like Value(s) at which to evaluate distribution p : array-like Input parameters: mu (mode of distribution), sig1 (LH Gaussian width), sig2 (RH Gaussian width), gam1 (LH Lorentzian width), gam2 (RH Lorentzian width), G1 (LH Gaussian "strength"), G2 (RH Gaussian "strength"). Returns ------- values : float or array-like Double LorGauss distribution evaluated at input(s). If single value provided, single value returned. """ mu,sig1,sig2,gam1,gam2,G1,G2 = p gam1 = float(gam1) gam2 = float(gam2) G1 = abs(G1) G2 = abs(G2) sig1 = abs(sig1) sig2 = abs(sig2) gam1 = abs(gam1) gab2 = abs(gam2) L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) - (gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) + (gam2/gam1)*(4-G1-G2)) L1 = 4 - G1 - G2 - L2 #print G1,G2,L1,L2 y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\ L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2) y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\ L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2) lo = (x < mu) hi = (x >= mu) return y1*lo + y2*hi
python
{ "resource": "" }
q43241
doublegauss
train
def doublegauss(x,p): """Evaluates normalized two-sided Gaussian distribution Parameters ---------- x : float or array-like Value(s) at which to evaluate distribution p : array-like Parameters of distribution: (mu: mode of distribution, sig1: LH width, sig2: RH width) Returns ------- value : float or array-like Distribution evaluated at input value(s). If single value provided, single value returned. """ mu,sig1,sig2 = p x = np.atleast_1d(x) A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.) ylo = A*np.exp(-(x-mu)**2/(2*sig1**2)) yhi = A*np.exp(-(x-mu)**2/(2*sig2**2)) y = x*0 wlo = np.where(x < mu) whi = np.where(x >= mu) y[wlo] = ylo[wlo] y[whi] = yhi[whi] if np.size(x)==1: return y[0] else: return y
python
{ "resource": "" }
q43242
doublegauss_cdf
train
def doublegauss_cdf(x,p): """Cumulative distribution function for two-sided Gaussian Parameters ---------- x : float Input values at which to calculate CDF. p : array-like Parameters of distribution: (mu: mode of distribution, sig1: LH width, sig2: RH width) """ x = np.atleast_1d(x) mu,sig1,sig2 = p sig1 = np.absolute(sig1) sig2 = np.absolute(sig2) ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2))) yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2))) lo = x < mu hi = x >= mu return ylo*lo + yhi*hi
python
{ "resource": "" }
q43243
fit_doublegauss_samples
train
def fit_doublegauss_samples(samples,**kwargs): """Fits a two-sided Gaussian to a set of samples. Calculates 0.16, 0.5, and 0.84 quantiles and passes these to `fit_doublegauss` for fitting. Parameters ---------- samples : array-like Samples to which to fit the Gaussian. kwargs Keyword arguments passed to `fit_doublegauss`. """ sorted_samples = np.sort(samples) N = len(samples) med = sorted_samples[N/2] siglo = med - sorted_samples[int(0.16*N)] sighi = sorted_samples[int(0.84*N)] - med return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
python
{ "resource": "" }
q43244
fit_doublegauss
train
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True): """Fits a two-sided Gaussian distribution to match a given confidence interval. The center of the distribution may be either the median or the mode. Parameters ---------- med : float The center of the distribution to which to fit. Default this will be the mode unless the `median` keyword is set to True. siglo : float Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is the "lower error bar." sighi : float Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is the "upper error bar." interval : float, optional The confidence interval enclosed by the provided error bars. Default is 0.683 (1-sigma). p0 : array-like, optional Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`). median : bool, optional Whether to treat the `med` parameter as the median or mode (default will be mode). return_distribution: bool, optional If `True`, then function will return a `DoubleGauss_Distribution` object. Otherwise, will return just the parameters. """ if median: q1 = 0.5 - (interval/2) q2 = 0.5 + (interval/2) targetvals = np.array([med-siglo,med,med+sighi]) qvals = np.array([q1,0.5,q2]) def objfn(pars): logging.debug('{}'.format(pars)) logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals)) return doublegauss_cdf(targetvals,pars) - qvals if p0 is None: p0 = [med,siglo,sighi] pfit,success = leastsq(objfn,p0) else: q1 = 0.5 - (interval/2) q2 = 0.5 + (interval/2) targetvals = np.array([med-siglo,med+sighi]) qvals = np.array([q1,q2]) def objfn(pars): params = (med,pars[0],pars[1]) return doublegauss_cdf(targetvals,params) - qvals if p0 is None: p0 = [siglo,sighi] pfit,success = leastsq(objfn,p0) pfit = (med,pfit[0],pfit[1]) if return_distribution: dist = DoubleGauss_Distribution(*pfit) return dist else: return pfit
python
{ "resource": "" }
q43245
Distribution.pctile
train
def pctile(self,pct,res=1000): """Returns the desired percentile of the distribution. Will only work if properly normalized. Designed to mimic the `ppf` method of the `scipy.stats` random variate objects. Works by gridding the CDF at a given resolution and matching the nearest point. NB, this is of course not as precise as an analytic ppf. Parameters ---------- pct : float Percentile between 0 and 1. res : int, optional The resolution at which to grid the CDF to find the percentile. Returns ------- percentile : float """ grid = np.linspace(self.minval,self.maxval,res) return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
python
{ "resource": "" }
q43246
Distribution.save_hdf
train
def save_hdf(self,filename,path='',res=1000,logspace=False): """Saves distribution to an HDF5 file. Saves a pandas `dataframe` object containing tabulated pdf and cdf values at a specfied resolution. After saving to a particular path, a distribution may be regenerated using the `Distribution_FromH5` subclass. Parameters ---------- filename : string File in which to save the distribution. Should end in .h5. path : string, optional Path in which to save the distribution within the .h5 file. By default this is an empty string, which will lead to saving the `fns` dataframe at the root level of the file. res : int, optional Resolution at which to grid the distribution for saving. logspace : bool, optional Sets whether the tabulated function should be gridded with log or linear spacing. Default will be logspace=False, corresponding to linear gridding. """ if logspace: vals = np.logspace(np.log10(self.minval), np.log10(self.maxval), res) else: vals = np.linspace(self.minval,self.maxval,res) d = {'vals':vals, 'pdf':self(vals), 'cdf':self.cdf(vals)} df = pd.DataFrame(d) df.to_hdf(filename,path+'/fns') if hasattr(self,'samples'): s = pd.Series(self.samples) s.to_hdf(filename,path+'/samples') store = pd.HDFStore(filename) attrs = store.get_storer('{}/fns'.format(path)).attrs attrs.keywords = self.keywords attrs.disttype = type(self) store.close()
python
{ "resource": "" }
q43247
Distribution.plot
train
def plot(self,minval=None,maxval=None,fig=None,log=False, npts=500,**kwargs): """ Plots distribution. Parameters ---------- minval : float,optional minimum value to plot. Required if minval of Distribution is `-np.inf`. maxval : float, optional maximum value to plot. Required if maxval of Distribution is `np.inf`. fig : None or int, optional Parameter to pass to `setfig`. If `None`, then a new figure is created; if a non-zero integer, the plot will go to that figure (clearing everything first), if zero, then will overplot on current axes. log : bool, optional If `True`, the x-spacing of the points to plot will be logarithmic. npoints : int, optional Number of points to plot. kwargs Keyword arguments are passed to plt.plot Raises ------ ValueError If finite lower and upper bounds are not provided. """ if minval is None: minval = self.minval if maxval is None: maxval = self.maxval if maxval==np.inf or minval==-np.inf: raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)') if log: xs = np.logspace(np.log10(minval),np.log10(maxval),npts) else: xs = np.linspace(minval,maxval,npts) setfig(fig) plt.plot(xs,self(xs),**kwargs) plt.xlabel(self.name) plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
python
{ "resource": "" }
q43248
Distribution.resample
train
def resample(self,N,minval=None,maxval=None,log=False,res=1e4): """Returns random samples generated according to the distribution Mirrors basic functionality of `rvs` method for `scipy.stats` random variates. Implemented by mapping uniform numbers onto the inverse CDF using a closest-matching grid approach. Parameters ---------- N : int Number of samples to return minval,maxval : float, optional Minimum/maximum values to resample. Should both usually just be `None`, which will default to `self.minval`/`self.maxval`. log : bool, optional Whether grid should be log- or linear-spaced. res : int, optional Resolution of CDF grid used. Returns ------- values : ndarray N samples. Raises ------ ValueError If maxval/minval are +/- infinity, this doesn't work because of the grid-based approach. """ N = int(N) if minval is None: if hasattr(self,'minval_cdf'): minval = self.minval_cdf else: minval = self.minval if maxval is None: if hasattr(self,'maxval_cdf'): maxval = self.maxval_cdf else: maxval = self.maxval if maxval==np.inf or minval==-np.inf: raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)') u = rand.random(size=N) if log: vals = np.logspace(log10(minval),log10(maxval),res) else: vals = np.linspace(minval,maxval,res) #sometimes cdf is flat. so ys will need to be uniqued ys,yinds = np.unique(self.cdf(vals), return_index=True) vals = vals[yinds] inds = np.digitize(u,ys) return vals[inds]
python
{ "resource": "" }
q43249
Hist_Distribution.resample
train
def resample(self,N): """Returns a bootstrap resampling of provided samples. Parameters ---------- N : int Number of samples. """ inds = rand.randint(len(self.samples),size=N) return self.samples[inds]
python
{ "resource": "" }
q43250
Box_Distribution.resample
train
def resample(self,N): """Returns a random sampling. """ return rand.random(size=N)*(self.maxval - self.minval) + self.minval
python
{ "resource": "" }
q43251
DoubleGauss_Distribution.resample
train
def resample(self,N,**kwargs): """Random resampling of the doublegauss distribution """ lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo) hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi) u = rand.random(size=N) hi = (u < float(self.sighi)/(self.sighi + self.siglo)) lo = (u >= float(self.sighi)/(self.sighi + self.siglo)) vals = np.zeros(N) vals[hi] = hivals[hi] vals[lo] = lovals[lo] return vals
python
{ "resource": "" }
q43252
tzname_in_python2
train
def tzname_in_python2(myfunc): """Change unicode output into bytestrings in Python 2 tzname() API changed in Python 3. It used to return bytes, but was changed to unicode strings """ def inner_func(*args, **kwargs): if PY3: return myfunc(*args, **kwargs) else: return myfunc(*args, **kwargs).encode() return inner_func
python
{ "resource": "" }
q43253
ac3
train
def ac3(space): """ AC-3 algorithm. This reduces the domains of the variables by propagating constraints to ensure arc consistency. :param Space space: The space to reduce """ #determine arcs arcs = {} for name in space.variables: arcs[name] = set([]) for const in space.constraints: for vname1,vname2 in product(const.vnames,const.vnames): if vname1 != vname2: #this is pessimistic, we assume that each constraint #pairwisely couples all variables it affects arcs[vname1].add(vname2) #enforce node consistency for vname in space.variables: for const in space.constraints: _unary(space,const,vname) #assemble work list worklist = set([]) for v1 in space.variables: for v2 in space.variables: for const in space.constraints: if _binary(space,const,v1,v2): for name in arcs[v1]: worklist.add((v1,name)) #work through work list while worklist: v1,v2 = worklist.pop() for const in space.constraints: if _binary(space,const,v1,v2): for vname in arcs[v1]: worklist.add((v1,vname))
python
{ "resource": "" }
q43254
_unary
train
def _unary(space,const,name): """ Reduce the domain of variable name to be node-consistent with this constraint, i.e. remove those values for the variable that are not consistent with the constraint. returns True if the domain of name was modified """ if not name in const.vnames: return False if space.variables[name].discrete: values = const.domains[name] else: values = const.domains[name] space.domains[name] = space.domains[name].intersection(values) return True
python
{ "resource": "" }
q43255
solve
train
def solve(space,method='backtrack',ordering=None): """ Generator for all solutions. :param str method: the solution method to employ :param ordering: an optional parameter ordering :type ordering: sequence of parameter names Methods: :"backtrack": simple chronological backtracking :"ac-lookahead": full lookahead """ if ordering is None: ordering = list(space.variables.keys()) if not space.is_discrete(): raise ValueError("Can not backtrack on non-discrete space") if method=='backtrack': for label in _backtrack(space,{},ordering): yield label elif method=='ac-lookahead': for label in _lookahead(space,{},ordering): yield label else: raise ValueError("Unknown solution method: %s" % method)
python
{ "resource": "" }
q43256
get_task_parser
train
def get_task_parser(task): """ Construct an ArgumentParser for the given task. This function returns a tuple (parser, proxy_args). If task accepts varargs only, proxy_args is True. If task accepts only positional and explicit keyword args, proxy args is False. """ args, varargs, keywords, defaults = inspect.getargspec(task) defaults = defaults or [] parser = argparse.ArgumentParser( prog='ape ' + task.__name__, add_help=False, description=task.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) posargslen = len(args) - len(defaults) if varargs is None and keywords is None: for idx, arg in enumerate(args): if idx < posargslen: parser.add_argument(arg) else: default = defaults[idx - posargslen] parser.add_argument('--' + arg, default=default) return parser, False elif not args and varargs and not keywords and not defaults: return parser, True else: raise InvalidTask(ERRMSG_UNSUPPORTED_SIG % task.__name__)
python
{ "resource": "" }
q43257
invoke_task
train
def invoke_task(task, args): """ Parse args and invoke task function. :param task: task function to invoke :param args: arguments to the task (list of str) :return: result of task function :rtype: object """ parser, proxy_args = get_task_parser(task) if proxy_args: return task(*args) else: pargs = parser.parse_args(args) return task(**vars(pargs))
python
{ "resource": "" }
q43258
get_task_module
train
def get_task_module(feature): """ Return imported task module of feature. This function first tries to import the feature and raises FeatureNotFound if that is not possible. Thereafter, it looks for a submodules called ``apetasks`` and ``tasks`` in that order. If such a submodule exists, it is imported and returned. :param feature: name of feature to fet task module for. :raises: FeatureNotFound if feature_module could not be imported. :return: imported module containing the ape tasks of feature or None, if module cannot be imported. """ try: importlib.import_module(feature) except ImportError: raise FeatureNotFound(feature) tasks_module = None # ape tasks may be located in a module called apetasks # or (if no apetasks module exists) in a module called tasks try: tasks_module = importlib.import_module(feature + '.apetasks') except ImportError: # No apetasks module in feature ... try tasks pass try: tasks_module = importlib.import_module(feature + '.tasks') except ImportError: # No tasks module in feature ... skip it pass return tasks_module
python
{ "resource": "" }
q43259
run
train
def run(args, features=None): """ Run an ape task. Composes task modules out of the selected features and calls the task with arguments. :param args: list comprised of task name followed by arguments :param features: list of features to compose before invoking the task """ features = features or [] for feature in features: tasks_module = get_task_module(feature) if tasks_module: tasks.superimpose(tasks_module) if len(args) < 2 or (len(args) == 2 and args[1] == 'help'): tasks.help() else: taskname = args[1] try: task = tasks.get_task(taskname, include_helpers=False) except TaskNotFound: print('Task "%s" not found! Use "ape help" to get usage information.' % taskname) else: remaining_args = args[2:] if len(args) > 2 else [] invoke_task(task, remaining_args)
python
{ "resource": "" }
q43260
main
train
def main(): """ Entry point when used via command line. Features are given using the environment variable ``PRODUCT_EQUATION``. If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points to an existing equation file that selection is used. (if ``APE_PREPEND_FEATURES`` is given, those features are prepended) If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised. """ # check APE_PREPEND_FEATURES features = os.environ.get('APE_PREPEND_FEATURES', '').split() # features can be specified inline in PRODUCT_EQUATION inline_features = os.environ.get('PRODUCT_EQUATION', '').split() if inline_features: # append inline features features += inline_features else: # fallback: features are specified in equation file feature_file = os.environ.get('PRODUCT_EQUATION_FILENAME', '') if feature_file: # append features from equation file features += get_features_from_equation_file(feature_file) else: if not features: raise EnvironmentIncomplete( 'Error running ape:\n' 'Either the PRODUCT_EQUATION or ' 'PRODUCT_EQUATION_FILENAME environment ' 'variable needs to be set!' ) # run ape with features selected run(sys.argv, features=features)
python
{ "resource": "" }
q43261
CollectNewMixin.collect
train
def collect(self): """ Perform the bulk of the work of collectstatic. Split off from handle_noargs() to facilitate testing. """ if self.symlink: if sys.platform == 'win32': raise CommandError("Symlinking is not supported by this " "platform (%s)." % sys.platform) if not self.local: raise CommandError("Can't symlink to a remote destination.") if self.clear: self.clear_dir('') handler = self._get_handler() do_post_process = self.post_process and hasattr(self.storage, 'post_process') found_files = SortedDict() for finder in finders.get_finders(): for path, storage in finder.list(self.ignore_patterns): # Prefix the relative path if the source storage contains it if getattr(storage, 'prefix', None): prefixed_path = os.path.join(storage.prefix, path) else: prefixed_path = path if prefixed_path not in found_files: found_files[prefixed_path] = (storage, path) handler(path, prefixed_path, storage) if self.progressive_post_process and do_post_process: try: self._post_process( {prefixed_path: (storage, path)}, self.dry_run) except ValueError as e: message = ('%s current storage requires all files' ' to have been collected first. Try ' ' ecstatic.storage.CachedStaticFilesStorage' \ % e) raise ValueError(message) if not self.progressive_post_process and do_post_process: self._post_process(found_files, self.dry_run) return { 'modified': self.copied_files + self.symlinked_files, 'unmodified': self.unmodified_files, 'post_processed': self.post_processed_files, }
python
{ "resource": "" }
q43262
CollectNewMixin.compare
train
def compare(self, path, prefixed_path, source_storage): """ Returns True if the file should be copied. """ # First try a method on the command named compare_<comparison_method> # If that doesn't exist, create a comparitor that calls methods on the # storage with the name <comparison_method>, passing them the name. comparitor = getattr(self, 'compare_%s' % self.comparison_method, None) if not comparitor: comparitor = self._create_comparitor(self.comparison_method) return comparitor(path, prefixed_path, source_storage)
python
{ "resource": "" }
q43263
verify_session
train
def verify_session(session, baseurl): """ Check that this session is still valid on this baseurl, ie, we get a list of projects """ request = session.post(baseurl+"/select_projet.php") return VERIFY_SESSION_STRING in request.content.decode('iso-8859-1')
python
{ "resource": "" }
q43264
get_session
train
def get_session(session, baseurl, config): """ Try to get a valid session for this baseurl, using login found in config. This function invoques Firefox if necessary """ # Read proxy for firefox if environ.get("HTTP_PROXY"): myProxy = environ.get("HTTP_PROXY") proxy = Proxy({ 'proxyType': ProxyType.MANUAL, 'httpProxy': myProxy, 'ftpProxy': myProxy, 'sslProxy': myProxy, 'noProxy': '' # set this value as desired }) else: proxy = None if 'login' in config['DEFAULT']: login, password = credentials(config['DEFAULT']['login']) else: login, password = credentials() browser = webdriver.Firefox(proxy=proxy) browser.get(baseurl) browser.find_element_by_name('login').send_keys(login) browser.find_element_by_name('passwd').send_keys(password) cookie = {'PHPSESSID': browser.get_cookie('PHPSESSID')['value']} prof_session.cookies = requests.utils.cookiejar_from_dict(cookie) print("Please log using firefox") while True: try: browser.find_element_by_css_selector("select") break except: sleep(0.5) browser.close() set_sessid(cookie['PHPSESSID']) if not verify_session(session, baseurl): print("Cannot get a valid session, retry") get_session(session, baseurl, {'DEFAULT': {}})
python
{ "resource": "" }
q43265
LM.display
train
def display(self): ''' Displays statistics about our LM ''' voc_list = [] doc_ids = self.term_count_n.keys() doc_ids.sort() for doc_id in doc_ids: ngrams = len(self.term_count_n[doc_id]['ngrams']) print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams) ngrams1 = len(self.term_count_n_1[doc_id]['ngrams']) print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1) voc_list.append(ngrams) print 'Classed Vocabularies:', voc_list print '' corpus_ngrams = len(self.corpus_count_n['ngrams']) print 'n-Grams (collection): %d' % (corpus_ngrams) corpus_ngrams1 = len(self.corpus_count_n_1['ngrams']) print '(n-1)-Grams (collection): %d' % (corpus_ngrams1) self.unseen_counts.display()
python
{ "resource": "" }
q43266
LM.get_ngram_counts
train
def get_ngram_counts(self): ''' Returns a list of n-gram counts Array of classes counts and last item is for corpus ''' ngram_counts = { 'classes': [], 'corpus': 0 } doc_ids = self.term_count_n.keys() doc_ids.sort() for doc_id in doc_ids: print self.term_count_n[doc_id] class_ngrams = len(self.term_count_n[doc_id]['ngrams']) ngram_counts['classes'].append(class_ngrams) corpus_ngrams = len(self.corpus_count_n['ngrams']) ngram_counts['corpus'] = corpus_ngrams return ngram_counts
python
{ "resource": "" }
q43267
LM.lr_padding
train
def lr_padding(self, terms): ''' Pad doc from the left and right before adding, depending on what's in self.lpad and self.rpad If any of them is '', then don't pad there. ''' lpad = rpad = [] if self.lpad: lpad = [self.lpad] * (self.n - 1) if self.rpad: rpad = [self.rpad] * (self.n - 1) return lpad + terms + rpad
python
{ "resource": "" }
q43268
PressEnter2ExitGUI.run
train
def run(self): """ pop up a dialog box and return when the user has closed it """ response = None root = tkinter.Tk() root.withdraw() while response is not True: response = tkinter.messagebox.askokcancel(title=self.title, message=self.pre_message) if self.post_message: print(self.post_message) self.exit_time = time.time()
python
{ "resource": "" }
q43269
download
train
def download(url, file=None): """ Pass file as a filename, open file object, or None to return the request bytes Args: url (str): URL of file to download file (Union[str, io, None]): One of the following: - Filename of output file - File opened in binary write mode - None: Return raw bytes instead Returns: Union[bytes, None]: Bytes of file if file is None """ import urllib.request import shutil if isinstance(file, str): file = open(file, 'wb') try: with urllib.request.urlopen(url) as response: if file: shutil.copyfileobj(response, file) else: return response.read() finally: if file: file.close()
python
{ "resource": "" }
q43270
download_extract_tar
train
def download_extract_tar(tar_url, folder, tar_filename=''): """ Download and extract the tar at the url to the given folder Args: tar_url (str): URL of tar file to download folder (str): Location of parent directory to extract to. Doesn't have to exist tar_filename (str): Location to download tar. Default is to a temp file """ try: makedirs(folder) except OSError: if not isdir(folder): raise data_file = tar_filename if not data_file: fd, data_file = mkstemp('.tar.gz') download(tar_url, os.fdopen(fd, 'wb')) else: download(tar_url, data_file) with tarfile.open(data_file) as tar: tar.extractall(path=folder)
python
{ "resource": "" }
q43271
install_package
train
def install_package(tar_url, folder, md5_url='{tar_url}.md5', on_download=lambda: None, on_complete=lambda: None): """ Install or update a tar package that has an md5 Args: tar_url (str): URL of package to download folder (str): Location to extract tar. Will be created if doesn't exist md5_url (str): URL of md5 to use to check for updates on_download (Callable): Function that gets called when downloading a new update on_complete (Callable): Function that gets called when a new download is complete Returns: bool: Whether the package was updated """ data_file = join(folder, basename(tar_url)) md5_url = md5_url.format(tar_url=tar_url) try: remote_md5 = download(md5_url).decode('utf-8').split(' ')[0] except (UnicodeDecodeError, URLError): raise ValueError('Invalid MD5 url: ' + md5_url) if remote_md5 != calc_md5(data_file): on_download() if isfile(data_file): try: with tarfile.open(data_file) as tar: for i in reversed(list(tar)): try: os.remove(join(folder, i.path)) except OSError: pass except (OSError, EOFError): pass download_extract_tar(tar_url, folder, data_file) on_complete() if remote_md5 != calc_md5(data_file): raise ValueError('MD5 url does not match tar: ' + md5_url) return True return False
python
{ "resource": "" }
q43272
_process_cell
train
def _process_cell(i, state, finite=False): """Process 3 cells and return a value from 0 to 7. """ op_1 = state[i - 1] op_2 = state[i] if i == len(state) - 1: if finite: op_3 = state[0] else: op_3 = 0 else: op_3 = state[i + 1] result = 0 for i, val in enumerate([op_3, op_2, op_1]): if val: result += 2**i return result
python
{ "resource": "" }
q43273
_remove_lead_trail_false
train
def _remove_lead_trail_false(bool_list): """Remove leading and trailing false's from a list""" # The internet can be a wonderful place... for i in (0, -1): while bool_list and not bool_list[i]: bool_list.pop(i) return bool_list
python
{ "resource": "" }
q43274
_crop_list_to_size
train
def _crop_list_to_size(l, size): """Make a list a certain size""" for x in range(size - len(l)): l.append(False) for x in range(len(l) - size): l.pop() return l
python
{ "resource": "" }
q43275
JobManagerLocal.submit
train
def submit(self, command_line, name = None, array = None, dependencies = [], exec_dir = None, log_dir = None, dry_run = False, stop_on_failure = False, **kwargs): """Submits a job that will be executed on the local machine during a call to "run". All kwargs will simply be ignored.""" # remove duplicate dependencies dependencies = sorted(list(set(dependencies))) # add job to database self.lock() job = add_job(self.session, command_line=command_line, name=name, dependencies=dependencies, array=array, exec_dir=exec_dir, log_dir=log_dir, stop_on_failure=stop_on_failure) logger.info("Added job '%s' to the database", job) if dry_run: print("Would have added the Job", job, "to the database to be executed locally.") self.session.delete(job) logger.info("Deleted job '%s' from the database due to dry-run option", job) job_id = None else: job_id = job.unique # return the new job id self.unlock() return job_id
python
{ "resource": "" }
q43276
JobManagerLocal.stop_jobs
train
def stop_jobs(self, job_ids=None): """Resets the status of the job to 'submitted' when they are labeled as 'executing'.""" self.lock() jobs = self.get_jobs(job_ids) for job in jobs: if job.status in ('executing', 'queued', 'waiting') and job.queue_name == 'local': logger.info("Reset job '%s' (%s) in the database", job.name, self._format_log(job.id)) job.submit() self.session.commit() self.unlock()
python
{ "resource": "" }
q43277
JobManagerLocal.stop_job
train
def stop_job(self, job_id, array_id = None): """Resets the status of the given to 'submitted' when they are labeled as 'executing'.""" self.lock() job, array_job = self._job_and_array(job_id, array_id) if job is not None: if job.status in ('executing', 'queued', 'waiting'): logger.info("Reset job '%s' (%s) in the database", job.name, self._format_log(job.id)) job.status = 'submitted' if array_job is not None and array_job.status in ('executing', 'queued', 'waiting'): logger.debug("Reset array job '%s' in the database", array_job) array_job.status = 'submitted' if array_job is None: for array_job in job.array: if array_job.status in ('executing', 'queued', 'waiting'): logger.debug("Reset array job '%s' in the database", array_job) array_job.status = 'submitted' self.session.commit() self.unlock()
python
{ "resource": "" }
q43278
JobManagerLocal._run_parallel_job
train
def _run_parallel_job(self, job_id, array_id = None, no_log = False, nice = None, verbosity = 0): """Executes the code for this job on the local machine.""" environ = copy.deepcopy(os.environ) environ['JOB_ID'] = str(job_id) if array_id: environ['SGE_TASK_ID'] = str(array_id) else: environ['SGE_TASK_ID'] = 'undefined' # generate call to the wrapper script command = [self.wrapper_script, '-l%sd'%("v"*verbosity), self._database, 'run-job'] if nice is not None: command = ['nice', '-n%d'%nice] + command job, array_job = self._job_and_array(job_id, array_id) if job is None: # rare case: job was deleted before starting return None logger.info("Starting execution of Job '%s' (%s)", job.name, self._format_log(job_id, array_id, len(job.array))) # create log files if no_log or job.log_dir is None: out, err = sys.stdout, sys.stderr else: makedirs_safe(job.log_dir) # create line-buffered files for writing output and error status if array_job is not None: out, err = open(array_job.std_out_file(), 'w', 1), open(array_job.std_err_file(), 'w', 1) else: out, err = open(job.std_out_file(), 'w', 1), open(job.std_err_file(), 'w', 1) # return the subprocess pipe to the process try: return subprocess.Popen(command, env=environ, stdout=out, stderr=err, bufsize=1) except OSError as e: logger.error("Could not execute job '%s' (%s) locally\n- reason:\t%s\n- command line:\t%s\n- directory:\t%s\n- command:\t%s", job.name, self._format_log(job_id, array_id, len(job.array)), e, " ".join(job.get_command_line()), "." if job.exec_dir is None else job.exec_dir, " ".join(command)) job.finish(117, array_id) # ASCII 'O' return None
python
{ "resource": "" }
q43279
html_to_text
train
def html_to_text(html, base_url='', bodywidth=CONFIG_DEFAULT): """ Convert a HTML mesasge to plain text. """ def _patched_handle_charref(c): self = h charref = self.charref(c) if self.code or self.pre: charref = cgi.escape(charref) self.o(charref, 1) def _patched_handle_entityref(c): self = h entityref = self.entityref(c) if self.code or self.pre: # this expression was inversed. entityref = cgi.escape(entityref) self.o(entityref, 1) h = HTML2Text(baseurl=base_url, bodywidth=config.BODY_WIDTH if bodywidth is CONFIG_DEFAULT else bodywidth) h.handle_entityref = _patched_handle_entityref h.handle_charref = _patched_handle_charref return h.handle(html).rstrip()
python
{ "resource": "" }
q43280
render_email_template
train
def render_email_template(email_template, base_url, extra_context=None, user=None): """ Render the email template. :type email_template: fluentcms_emailtemplates.models.EmailTemplate :type base_url: str :type extra_context: dict | None :type user: django.contrib.auth.models.User :return: The subject, html and text content :rtype: fluentcms_emailtemplates.rendering.EmailContent """ dummy_request = _get_dummy_request(base_url, user) context_user = user or extra_context.get('user', None) context_data = { 'request': dummy_request, 'email_template': email_template, 'email_format': 'html', 'user': user, # Common replacements 'first_name': context_user.first_name if context_user else '', 'last_name': context_user.last_name if context_user else '', 'full_name': context_user.get_full_name() if context_user else '', 'email': context_user.email if context_user else '', 'site': extra_context.get('site', None) or { 'domain': dummy_request.get_host(), 'name': dummy_request.get_host(), } } if extra_context: context_data.update(extra_context) # Make sure the templates and i18n are identical to the emailtemplate language. # This is the same as the current Django language, unless the object was explicitly fetched in a different language. with switch_language(email_template): # Get the body content context_data['body'] = _render_email_placeholder(dummy_request, email_template, base_url, context_data) context_data['subject'] = subject = replace_fields(email_template.subject, context_data, autoescape=False) # Merge that with the HTML templates. context = RequestContext(dummy_request).flatten() context.update(context_data) html = render_to_string(email_template.get_html_templates(), context, request=dummy_request) html, url_changes = _make_links_absolute(html, base_url) # Render the Text template. # Disable auto escaping context['email_format'] = 'text' text = render_to_string(email_template.get_text_templates(), context, request=dummy_request) text = _make_text_links_absolute(text, url_changes) return EmailContent(subject, text, html)
python
{ "resource": "" }
q43281
_make_links_absolute
train
def _make_links_absolute(html, base_url): """ Make all links absolute. """ url_changes = [] soup = BeautifulSoup(html) for tag in soup.find_all('a', href=True): old = tag['href'] fixed = urljoin(base_url, old) if old != fixed: url_changes.append((old, fixed)) tag['href'] = fixed for tag in soup.find_all('img', src=True): old = tag['src'] fixed = urljoin(base_url, old) if old != fixed: url_changes.append((old, fixed)) tag['src'] = fixed return mark_safe(six.text_type(soup)), url_changes
python
{ "resource": "" }
q43282
string_to_datetime
train
def string_to_datetime(date): """Return a datetime.datetime instance with tzinfo. I.e. a timezone aware datetime instance. Acceptable formats for input are: * 2012-01-10T12:13:14 * 2012-01-10T12:13:14.98765 * 2012-01-10T12:13:14.98765+03:00 * 2012-01-10T12:13:14.98765Z * 2012-01-10 12:13:14 * 2012-01-10 12:13:14.98765 * 2012-01-10 12:13:14.98765+03:00 * 2012-01-10 12:13:14.98765Z But also, some more odd ones (probably because of legacy): * 2012-01-10 * ['2012-01-10', '12:13:14'] """ if date is None: return None if isinstance(date, datetime.datetime): if not date.tzinfo: date = date.replace(tzinfo=UTC) return date if isinstance(date, list): date = 'T'.join(date) if isinstance(date, basestring): if len(date) <= len('2000-01-01'): return (datetime.datetime .strptime(date, '%Y-%m-%d') .replace(tzinfo=UTC)) else: try: parsed = isodate.parse_datetime(date) except ValueError: # e.g. '2012-01-10 12:13:14Z' becomes '2012-01-10T12:13:14Z' parsed = isodate.parse_datetime( re.sub('(\d)\s(\d)', r'\1T\2', date) ) if not parsed.tzinfo: parsed = parsed.replace(tzinfo=UTC) return parsed raise ValueError("date not a parsable string")
python
{ "resource": "" }
q43283
date_to_string
train
def date_to_string(date): """Transform a date or datetime object into a string and return it. Examples: >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC)) '2012-01-03T12:23:34+00:00' >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34)) '2012-01-03T12:23:34' >>> date_to_string(datetime.date(2012, 1, 3)) '2012-01-03' """ if isinstance(date, datetime.datetime): # Create an ISO 8601 datetime string date_str = date.strftime('%Y-%m-%dT%H:%M:%S') tzstr = date.strftime('%z') if tzstr: # Yes, this is ugly. And no, I haven't found a better way to have a # truly ISO 8601 datetime with timezone in Python. date_str = '%s%s:%s' % (date_str, tzstr[0:3], tzstr[3:5]) elif isinstance(date, datetime.date): # Create an ISO 8601 date string date_str = date.strftime('%Y-%m-%d') else: raise TypeError('Argument is not a date or datetime. ') return date_str
python
{ "resource": "" }
q43284
uuid_to_date
train
def uuid_to_date(uuid, century='20'): """Return a date created from the last 6 digits of a uuid. Arguments: uuid The unique identifier to parse. century The first 2 digits to assume in the year. Default is '20'. Examples: >>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201') datetime.date(2012, 2, 1) >>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201', '18') datetime.date(1812, 2, 1) """ day = int(uuid[-2:]) month = int(uuid[-4:-2]) year = int('%s%s' % (century, uuid[-6:-4])) return datetime.date(year=year, month=month, day=day)
python
{ "resource": "" }
q43285
hardware_info
train
def hardware_info(): """ Returns basic hardware information about the computer. Gives actual number of CPU's in the machine, even when hyperthreading is turned on. Returns ------- info : dict Dictionary containing cpu and memory information. """ try: if sys.platform == 'darwin': out = _mac_hardware_info() elif sys.platform == 'win32': out = _win_hardware_info() elif sys.platform in ['linux', 'linux2']: out = _linux_hardware_info() else: out = {} except: return {} else: return out
python
{ "resource": "" }
q43286
add
train
async def add(client: Client, identity_signed_raw: str) -> ClientResponse: """ POST identity raw document :param client: Client to connect to the api :param identity_signed_raw: Identity raw document :return: """ return await client.post(MODULE + '/add', {'identity': identity_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
{ "resource": "" }
q43287
certify
train
async def certify(client: Client, certification_signed_raw: str) -> ClientResponse: """ POST certification raw document :param client: Client to connect to the api :param certification_signed_raw: Certification raw document :return: """ return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
{ "resource": "" }
q43288
revoke
train
async def revoke(client: Client, revocation_signed_raw: str) -> ClientResponse: """ POST revocation document :param client: Client to connect to the api :param revocation_signed_raw: Certification raw document :return: """ return await client.post(MODULE + '/revoke', {'revocation': revocation_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
{ "resource": "" }
q43289
identity_of
train
async def identity_of(client: Client, search: str) -> dict: """ GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/identity-of/%s' % search, schema=IDENTITY_OF_SCHEMA)
python
{ "resource": "" }
q43290
shell_exec
train
def shell_exec(command, **kwargs): # from gitapi.py """Excecutes the given command silently. """ proc = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE, **kwargs) out, err = [x.decode("utf-8") for x in proc.communicate()] return {'out': out, 'err': err, 'code': proc.returncode}
python
{ "resource": "" }
q43291
run
train
def run(command, **kwargs): """Excecutes the given command while transfering control, till the execution is complete. """ print command p = Popen(shlex.split(command), **kwargs) p.wait() return p.returncode
python
{ "resource": "" }
q43292
data
train
def data(tableid, variables=dict(), stream=False, descending=False, lang=DEFAULT_LANGUAGE): """Pulls data from a table and generates rows. Variables is a dictionary mapping variable codes to values. Streaming: Values must be chosen for all variables when streaming """ # bulk is also in csv format, but the response is streamed format = 'BULK' if stream else 'CSV' request = Request('data', tableid, format, timeOrder='Descending' if descending else None, valuePresentation='CodeAndValue', lang=lang, **variables) return (Data(datum, lang=lang) for datum in request.csv)
python
{ "resource": "" }
q43293
subjects
train
def subjects(subjects=None, recursive=False, include_tables=False, lang=DEFAULT_LANGUAGE): """List subjects from the subject hierarchy. If subjects is not given, the root subjects will be used. Returns a generator. """ request = Request('subjects', *subjects, recursive=recursive, includeTables=include_tables, lang=lang) return (Subject(subject, lang=lang) for subject in request.json)
python
{ "resource": "" }
q43294
tableinfo
train
def tableinfo(tableid, lang=DEFAULT_LANGUAGE): """Fetch metadata for statbank table Metadata includes information about variables, which can be used when extracting data. """ request = Request('tableinfo', tableid, lang=lang) return Tableinfo(request.json, lang=lang)
python
{ "resource": "" }
q43295
tables
train
def tables(subjects=None, pastDays=None, include_inactive=False, lang=DEFAULT_LANGUAGE): """Find tables placed under given subjects. """ request = Request('tables', subjects=subjects, pastDays=pastDays, includeInactive=include_inactive, lang=lang) return (Table(table, lang=lang) for table in request.json)
python
{ "resource": "" }
q43296
API.request_method
train
def request_method(self, method: str, **method_kwargs: Union[str, int]) -> dict: """ Process method request and return json with results :param method: str: specifies the method, example: "users.get" :param method_kwargs: dict: method parameters, example: "users_id=1", "fields='city, contacts'" """ response = self.session.send_method_request(method, method_kwargs) self.check_for_errors(method, method_kwargs, response) return response
python
{ "resource": "" }
q43297
API.request_get_user
train
def request_get_user(self, user_ids) -> dict: """ Method to get users by ID, do not need authorization """ method_params = {'user_ids': user_ids} response = self.session.send_method_request('users.get', method_params) self.check_for_errors('users.get', method_params, response) return response
python
{ "resource": "" }
q43298
API.request_set_status
train
def request_set_status(self, text: str) -> dict: """ Method to set user status """ method_params = {'text': text} response = self.session.send_method_request('status.set', method_params) self.check_for_errors('status.set', method_params, response) return response
python
{ "resource": "" }
q43299
run_spy
train
def run_spy(group, port, verbose): """ Runs the multicast spy :param group: Multicast group :param port: Multicast port :param verbose: If True, prints more details """ # Create the socket socket, group = multicast.create_multicast_socket(group, port) print("Socket created:", group, "port:", port) # Set the socket as non-blocking socket.setblocking(0) # Prepare stats storage stats = { "total_bytes": 0, "total_count": 0, "sender_bytes": {}, "sender_count": {}, } print("Press Ctrl+C to exit") try: loop_nb = 0 while True: if loop_nb % 50 == 0: loop_nb = 0 print("Reading...") loop_nb += 1 ready = select.select([socket], [], [], .1) if ready[0]: # Socket is ready data, sender = socket.recvfrom(1024) len_data = len(data) # Store stats stats["total_bytes"] += len_data stats["total_count"] += 1 try: stats["sender_bytes"][sender] += len_data stats["sender_count"][sender] += 1 except KeyError: stats["sender_bytes"][sender] = len_data stats["sender_count"][sender] = 1 print("Got", len_data, "bytes from", sender[0], "port", sender[1], "at", datetime.datetime.now()) if verbose: print(hexdump(data)) except KeyboardInterrupt: # Interrupt print("Ctrl+C received: bye !") # Print statistics print("Total number of packets:", stats["total_count"]) print("Total read bytes.......:", stats["total_bytes"]) for sender in stats["sender_count"]: print("\nSender", sender[0], "from port", sender[1]) print("\tTotal packets:", stats["sender_count"][sender]) print("\tTotal bytes..:", stats["sender_bytes"][sender]) return 0
python
{ "resource": "" }