code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def verify_branch(branch_name): # type: (str) -> bool """ Verify if the given branch exists. Args: branch_name (str): The name of the branch to check. Returns: bool: **True** if a branch with name *branch_name* exits, **False** otherwise. """ try: shell.run( 'git rev-parse --verify {}'.format(branch_name), never_pretend=True ) return True except IOError: return False
Verify if the given branch exists. Args: branch_name (str): The name of the branch to check. Returns: bool: **True** if a branch with name *branch_name* exits, **False** otherwise.
def Rt_display(stock_no): """ For real time stock display 即時盤用,顯示目前查詢各股的股價資訊。 """ a = twsk(stock_no).real if a: re = "{%(time)s} %(stock_no)s %(c)s %(range)+.2f(%(pp)+.2f%%) %(value)s" % { 'stock_no': stock_no, 'time': a['time'], 'c': a['c'], 'range': covstr(a['range']), 'value': a['value'], 'pp': covstr(a['pp']) } return re else: return a
For real time stock display 即時盤用,顯示目前查詢各股的股價資訊。
def uncompress(pub): ''' Input must be hex string, and a valid compressed public key. Check if it's a valid key first, using the validatepubkey() function below, and then verify that the str len is 66. ''' yp = int(pub[:2],16) - 2 x = int(pub[2:],16) a = (pow_mod(x,3,P) + 7) % P y = pow_mod(a, (P+1)//4, P) if y % 2 != yp: y = -y % P x = dechex(x,32) y = dechex(y,32) return '04' + x + y
Input must be hex string, and a valid compressed public key. Check if it's a valid key first, using the validatepubkey() function below, and then verify that the str len is 66.
def findpeak_multi(x, y, dy, N, Ntolerance, Nfit=None, curve='Lorentz', return_xfit=False, return_stat=False): """Find multiple peaks in the dataset given by vectors x and y. Points are searched for in the dataset where the N points before and after have strictly lower values than them. To get rid of false negatives caused by fluctuations, Ntolerance is introduced. It is the number of outlier points to be tolerated, i.e. points on the left-hand side of the peak where the growing tendency breaks or on the right-hand side where the diminishing tendency breaks. Increasing this number, however gives rise to false positives. Inputs: x, y, dy: vectors defining the data-set. dy can be None. N, Ntolerance: the parameters of the peak-finding routines Nfit: the number of points on the left and on the right of the peak to be used for least squares refinement of the peak positions. curve: the type of the curve to be fitted to the peaks. Can be 'Lorentz' or 'Gauss' return_xfit: if the abscissa used for fitting is to be returned. return_stat: if the fitting statistics is to be returned for each peak. Outputs: position, hwhm, baseline, amplitude, (xfit): lists Notes: Peaks are identified where the curve grows N points before and decreases N points after. On noisy curves Ntolerance may improve the results, i.e. decreases the 2*N above mentioned criteria. """ if Nfit is None: Nfit = N # find points where the curve grows for N points before them and # decreases for N points after them. To accomplish this, we create # an indicator array of the sign of the first derivative. sgndiff = np.sign(np.diff(y)) xdiff = x[:-1] # associate difference values to the lower 'x' value. pix = np.arange(len(x) - 1) # pixel coordinates create an indicator # array as the sum of sgndiff shifted left and right. whenever an # element of this is 2*N, it fulfills the criteria above. indicator = np.zeros(len(sgndiff) - 2 * N) for i in range(2 * N): indicator += np.sign(N - i) * sgndiff[i:-2 * N + i] # add the last one, since the indexing is different (would be # [2*N:0], which is not what we want) indicator += -sgndiff[2 * N:] # find the positions (indices) of the peak. The strict criteria is # relaxed somewhat by using the Ntolerance value. Note the use of # 2*Ntolerance, since each outlier point creates two outliers in # sgndiff (-1 insted of +1 and vice versa). peakpospix = pix[N:-N][indicator >= 2 * N - 2 * Ntolerance] ypeak = y[peakpospix] # Now refine the found positions by least-squares fitting. But # first we have to sort out other non-peaks, i.e. found points # which have other found points with higher values in their [-N,N] # neighbourhood. pos = []; ampl = []; hwhm = []; baseline = []; xfit = []; stat = [] dy1 = None for i in range(len(ypeak)): if not [j for j in list(range(i + 1, len(ypeak))) + list(range(0, i)) if abs(peakpospix[j] - peakpospix[i]) <= N and ypeak[i] < ypeak[j]]: # only leave maxima. idx = peakpospix[i] if dy is not None: dy1 = dy[(idx - Nfit):(idx + Nfit + 1)] xfit_ = x[(idx - Nfit):(idx + Nfit + 1)] pos_, hwhm_, baseline_, ampl_, stat_ = findpeak_single(xfit_, y[(idx - Nfit):(idx + Nfit + 1)], dy1, position=x[idx], return_stat=True) stat.append(stat_) xfit.append(xfit_) pos.append(pos_) ampl.append(ampl_) hwhm.append(hwhm_) baseline.append(baseline_) results = [pos, hwhm, baseline, ampl] if return_xfit: results.append(xfit) if return_stat: results.append(stat) return tuple(results)
Find multiple peaks in the dataset given by vectors x and y. Points are searched for in the dataset where the N points before and after have strictly lower values than them. To get rid of false negatives caused by fluctuations, Ntolerance is introduced. It is the number of outlier points to be tolerated, i.e. points on the left-hand side of the peak where the growing tendency breaks or on the right-hand side where the diminishing tendency breaks. Increasing this number, however gives rise to false positives. Inputs: x, y, dy: vectors defining the data-set. dy can be None. N, Ntolerance: the parameters of the peak-finding routines Nfit: the number of points on the left and on the right of the peak to be used for least squares refinement of the peak positions. curve: the type of the curve to be fitted to the peaks. Can be 'Lorentz' or 'Gauss' return_xfit: if the abscissa used for fitting is to be returned. return_stat: if the fitting statistics is to be returned for each peak. Outputs: position, hwhm, baseline, amplitude, (xfit): lists Notes: Peaks are identified where the curve grows N points before and decreases N points after. On noisy curves Ntolerance may improve the results, i.e. decreases the 2*N above mentioned criteria.
def convert_args_to_sets(f): """ Converts all args to 'set' type via self.setify function. """ @wraps(f) def wrapper(*args, **kwargs): args = (setify(x) for x in args) return f(*args, **kwargs) return wrapper
Converts all args to 'set' type via self.setify function.
def replicate_filter(sources, model, cache=None): '''Replicates the list of objects to other class and returns their reflections''' targets = [replicate_no_merge(source, model, cache=cache) for source in sources] # Some objects may not be available in target DB (not published), so we # have to exclude None from the list. return [target for target in targets if target is not None]
Replicates the list of objects to other class and returns their reflections
def get_item(track_url, client_id=CLIENT_ID): """ Fetches metadata for a track or playlist """ try: item_url = url['resolve'].format(track_url) r = requests.get(item_url, params={'client_id': client_id}) logger.debug(r.url) if r.status_code == 403: return get_item(track_url, ALT_CLIENT_ID) item = r.json() no_tracks = item['kind'] == 'playlist' and not item['tracks'] if no_tracks and client_id != ALT_CLIENT_ID: return get_item(track_url, ALT_CLIENT_ID) except Exception: if client_id == ALT_CLIENT_ID: logger.error('Failed to get item...') return logger.error('Error resolving url, retrying...') time.sleep(5) try: return get_item(track_url, ALT_CLIENT_ID) except Exception as e: logger.error('Could not resolve url {0}'.format(track_url)) logger.exception(e) sys.exit(0) return item
Fetches metadata for a track or playlist
def translate_symbol(self, in_symbol: str) -> str: """ translate the incoming symbol into locally-used """ # read all mappings from the db if not self.symbol_maps: self.__load_symbol_maps() # translate the incoming symbol result = self.symbol_maps[in_symbol] if in_symbol in self.symbol_maps else in_symbol return result
translate the incoming symbol into locally-used
def show_delvol_on_destroy(name, kwargs=None, call=None): ''' Do not delete all/specified EBS volumes upon instance termination CLI Example: .. code-block:: bash salt-cloud -a show_delvol_on_destroy mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The show_delvol_on_destroy action must be called ' 'with -a or --action.' ) if not kwargs: kwargs = {} instance_id = kwargs.get('instance_id', None) device = kwargs.get('device', None) volume_id = kwargs.get('volume_id', None) if instance_id is None: instance_id = _get_node(name)['instanceId'] params = {'Action': 'DescribeInstances', 'InstanceId.1': instance_id} data = aws.query(params, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') blockmap = data[0]['instancesSet']['item']['blockDeviceMapping'] if not isinstance(blockmap['item'], list): blockmap['item'] = [blockmap['item']] items = [] for idx, item in enumerate(blockmap['item']): device_name = item['deviceName'] if device is not None and device != device_name: continue if volume_id is not None and volume_id != item['ebs']['volumeId']: continue info = { 'device_name': device_name, 'volume_id': item['ebs']['volumeId'], 'deleteOnTermination': item['ebs']['deleteOnTermination'] } items.append(info) return items
Do not delete all/specified EBS volumes upon instance termination CLI Example: .. code-block:: bash salt-cloud -a show_delvol_on_destroy mymachine
async def page_view(self, url: str, title: str, user_id: str, user_lang: str='') -> None: """ Track the view of a page """ raise NotImplementedError
Track the view of a page
def new_registry_ont_id_transaction(self, ont_id: str, pub_key: str or bytes, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object which is used to register ONT ID. :param ont_id: OntId. :param pub_key: the hexadecimal public key in the form of string. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which is used to register ONT ID. """ if isinstance(pub_key, str): bytes_ctrl_pub_key = bytes.fromhex(pub_key) elif isinstance(pub_key, bytes): bytes_ctrl_pub_key = pub_key else: raise SDKException(ErrorCode.param_err('a bytes or str type of public key is required.')) args = dict(ontid=ont_id.encode('utf-8'), ctrl_pk=bytes_ctrl_pub_key) tx = self.__generate_transaction('regIDWithPublicKey', args, b58_payer_address, gas_limit, gas_price) return tx
This interface is used to generate a Transaction object which is used to register ONT ID. :param ont_id: OntId. :param pub_key: the hexadecimal public key in the form of string. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which is used to register ONT ID.
def _extra_stats(self): """Adds ``loglr``, ``optimal_snrsq`` and matched filter snrsq in each detector to the default stats.""" return ['loglr'] + \ ['{}_optimal_snrsq'.format(det) for det in self._data] + \ ['{}_matchedfilter_snrsq'.format(det) for det in self._data]
Adds ``loglr``, ``optimal_snrsq`` and matched filter snrsq in each detector to the default stats.
def do_alias(self, args: argparse.Namespace) -> None: """Manage aliases""" func = getattr(args, 'func', None) if func is not None: # Call whatever sub-command function was selected func(self, args) else: # No sub-command was provided, so call help self.do_help('alias')
Manage aliases
def task_denotate(self, task, annotation): """ Removes an annotation from a task. """ self._execute( task['uuid'], 'denotate', '--', annotation ) id, denotated_task = self.get_task(uuid=task[six.u('uuid')]) return denotated_task
Removes an annotation from a task.
def upload_progress(request): """ Used by Ajax calls Return the upload progress and total length values """ if 'X-Progress-ID' in request.GET: progress_id = request.GET['X-Progress-ID'] elif 'X-Progress-ID' in request.META: progress_id = request.META['X-Progress-ID'] if progress_id: cache_key = "%s_%s" % (request.META['REMOTE_ADDR'], progress_id) data = cache.get(cache_key) return HttpResponse(simplejson.dumps(data))
Used by Ajax calls Return the upload progress and total length values
def iterate_forever(func, *args, **kwargs): """Iterate over a finite iterator forever When the iterator is exhausted will call the function again to generate a new iterator and keep iterating. """ output = func(*args, **kwargs) while True: try: playlist_item = next(output) playlist_item.prepare_playback() yield playlist_item except StopIteration: output = func(*args, **kwargs)
Iterate over a finite iterator forever When the iterator is exhausted will call the function again to generate a new iterator and keep iterating.
def get_parser(func, parent): """ Imposta il parser. """ parser = parent.add_parser(func.__cmd_name__, help=func.__doc__) for args, kwargs in func.__arguments__: parser.add_argument(*args, **kwargs) return parser
Imposta il parser.
def from_dade_matrix(filename, header=False): """ Loads a numpy array from a Dade matrix instance, e.g.: A matrix containing the following (or equivalent in numpy) [['RST','chr1~0','chr1~10','chr2~0','chr2~30'], ['chr1~0','5', '10', '11', '2'], ['chr1~10', '8', '3', '5'], ['chr2~0', '3', '5'], ['chr2~30', '5']] [['5', '10', '11', '2'], ['10', '8', '3', '5'], [11', '3', '3', '5'], ['2', '5', '5', '5']], [chr1~0','chr1~10','chr2~0','chr2~30'] Header data processing is delegated downstream. See https://github.com/scovit/DADE for more details about Dade. """ A = np.genfromtxt(filename, delimiter="\t", dtype=None, filling_values=0) M, headers = np.array(A[1:, 1:], dtype=np.float64), A[0] matrix = M + M.T - np.diag(np.diag(M)) parsed_header = list( zip(*[str(h)[:-1].strip('"').strip("'").split("~") for h in headers[1:]])) if header: return matrix, parsed_header else: return matrix
Loads a numpy array from a Dade matrix instance, e.g.: A matrix containing the following (or equivalent in numpy) [['RST','chr1~0','chr1~10','chr2~0','chr2~30'], ['chr1~0','5', '10', '11', '2'], ['chr1~10', '8', '3', '5'], ['chr2~0', '3', '5'], ['chr2~30', '5']] [['5', '10', '11', '2'], ['10', '8', '3', '5'], [11', '3', '3', '5'], ['2', '5', '5', '5']], [chr1~0','chr1~10','chr2~0','chr2~30'] Header data processing is delegated downstream. See https://github.com/scovit/DADE for more details about Dade.
def download_url(job, url, work_dir='.', name=None, s3_key_path=None, cghub_key_path=None): """ Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID If downloading S3 URLs, the S3AM binary must be on the PATH :param toil.job.Job job: Toil job that is calling this function :param str url: URL to download from :param str work_dir: Directory to download file to :param str name: Name of output file, if None, basename of URL is used :param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C :param str cghub_key_path: Path to cghub key used to download from CGHub. :return: Path to the downloaded file :rtype: str """ file_path = os.path.join(work_dir, name) if name else os.path.join(work_dir, os.path.basename(url)) if cghub_key_path: _download_with_genetorrent(job, url, file_path, cghub_key_path) elif urlparse(url).scheme == 's3': _s3am_with_retry(job, num_cores=1, file_path=file_path, s3_url=url, mode='download', s3_key_path=s3_key_path) elif urlparse(url).scheme == 'file': shutil.copy(urlparse(url).path, file_path) else: subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path]) assert os.path.exists(file_path) return file_path
Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID If downloading S3 URLs, the S3AM binary must be on the PATH :param toil.job.Job job: Toil job that is calling this function :param str url: URL to download from :param str work_dir: Directory to download file to :param str name: Name of output file, if None, basename of URL is used :param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C :param str cghub_key_path: Path to cghub key used to download from CGHub. :return: Path to the downloaded file :rtype: str
def genExampleStar(binaryLetter='', heirarchy=True): """ generates example star, if binaryLetter is true creates a parent binary object, if heirarchy is true will create a system and link everything up """ starPar = StarParameters() starPar.addParam('age', '7.6') starPar.addParam('magB', '9.8') starPar.addParam('magH', '7.4') starPar.addParam('magI', '7.6') starPar.addParam('magJ', '7.5') starPar.addParam('magK', '7.3') starPar.addParam('magV', '9.0') starPar.addParam('mass', '0.98') starPar.addParam('metallicity', '0.43') starPar.addParam('name', 'Example Star {0}{1}'.format(ac._ExampleSystemCount, binaryLetter)) starPar.addParam('name', 'HD {0}{1}'.format(ac._ExampleSystemCount, binaryLetter)) starPar.addParam('radius', '0.95') starPar.addParam('spectraltype', 'G5') starPar.addParam('temperature', '5370') exampleStar = Star(starPar.params) exampleStar.flags.addFlag('Fake') if heirarchy: if binaryLetter: exampleBinary = genExampleBinary() exampleBinary._addChild(exampleStar) exampleStar.parent = exampleBinary else: exampleSystem = genExampleSystem() exampleSystem._addChild(exampleStar) exampleStar.parent = exampleSystem return exampleStar
generates example star, if binaryLetter is true creates a parent binary object, if heirarchy is true will create a system and link everything up
def timestamp_to_local_time(timestamp, timezone_name): """Convert epoch timestamp to a localized Delorean datetime object. Arguments --------- timestamp : int The timestamp to convert. timezone_name : datetime.timezone The timezone of the desired local time. Returns ------- delorean.Delorean A localized Delorean datetime object. """ # first convert timestamp to UTC utc_time = datetime.utcfromtimestamp(float(timestamp)) delo = Delorean(utc_time, timezone='UTC') # shift d according to input timezone localized_d = delo.shift(timezone_name) return localized_d
Convert epoch timestamp to a localized Delorean datetime object. Arguments --------- timestamp : int The timestamp to convert. timezone_name : datetime.timezone The timezone of the desired local time. Returns ------- delorean.Delorean A localized Delorean datetime object.
def update(self, name, rssi): """Update the device name and/or RSSI. During an ongoing scan, multiple records from the same device can be received during the scan. Each time that happens this method is called to update the :attr:`name` and/or :attr:`rssi` attributes. """ self.name = name self.rssi = rssi self._age = time.time()
Update the device name and/or RSSI. During an ongoing scan, multiple records from the same device can be received during the scan. Each time that happens this method is called to update the :attr:`name` and/or :attr:`rssi` attributes.
def get_ml_job(): """get_ml_job Get an ``MLJob`` by database id. """ parser = argparse.ArgumentParser( description=("Python client get AI Job by ID")) parser.add_argument( "-u", help="username", required=False, dest="user") parser.add_argument( "-p", help="user password", required=False, dest="password") parser.add_argument( "-e", help="user email", required=False, dest="email") parser.add_argument( "-a", help="url endpoint with default http://localhost:8010", required=False, dest="url") parser.add_argument( "-i", help="User's MLJob.id to look up", required=False, dest="job_id") parser.add_argument( "-b", help=( "optional - path to CA bundle directory for " "client encryption over HTTP"), required=False, dest="ca_dir") parser.add_argument( "-c", help=( "optional - path to x509 certificate for " "client encryption over HTTP"), required=False, dest="cert_file") parser.add_argument( "-k", help=( "optional - path to x509 key file for " "client encryption over HTTP"), required=False, dest="key_file") parser.add_argument( "-s", help="silent", required=False, dest="silent", action="store_true") parser.add_argument( "-d", help="debug", required=False, dest="debug", action="store_true") args = parser.parse_args() user = ev( "API_USER", "user-not-set") password = ev( "API_PASSWORD", "password-not-set") email = ev( "API_EMAIL", "email-not-set") url = ev( "API_URL", "http://localhost:8010") job_id = ev( "JOB_ID", "job_id-not-set") ca_dir = os.getenv( "API_CA_BUNDLE_DIR", None) cert_file = os.getenv( "API_CERT_FILE", None) key_file = os.getenv( "API_KEY_FILE", None) verbose = bool(str(ev( "API_VERBOSE", "true")).lower() == "true") debug = bool(str(ev( "API_DEBUG", "false")).lower() == "true") if args.user: user = args.user if args.password: password = args.password if args.email: email = args.email if args.url: url = args.url if args.job_id: job_id = args.job_id if args.ca_dir: ca_dir = args.ca_dir if args.cert_file: cert_file = args.cert_file if args.key_file: key_file = args.key_file if args.silent: verbose = False if args.debug: debug = True usage = ( "Please run with " "-u <username> " "-p <password> " "-a <AntiNex URL http://localhost:8010> " "-i <job_id> " "-b <optional - path to CA bundle directory> " "-c <optional - path to x509 ssl certificate file> " "-k <optional - path to x509 ssl key file>") valid = True if not user or user == "user-not-set": log.error("missing user") valid = False if not password or password == "password-not-set": log.error("missing password") valid = False if not job_id or job_id == "job_id-not-set": log.error("missing job_id") valid = False else: try: job_id = int(job_id) except Exception as e: log.error("please use -i <job_id with an integer>") valid = False if not valid: log.error(usage) sys.exit(1) if verbose: log.info(( "creating client user={} url={} job_id={} " "ca_dir={} cert_file={} key_file={}").format( user, url, job_id, ca_dir, cert_file, key_file)) client = AIClient( user=user, email=email, password=password, url=url, ca_dir=ca_dir, cert_file=cert_file, key_file=key_file, verbose=verbose, debug=debug) if verbose: log.info(("loading request in job_id={}") .format( job_id)) response = client.get_job_by_id( job_id=job_id) if response["status"] == SUCCESS: if debug: log.info(("got a job response={}") .format( response["data"])) elif response["status"] == FAILED: log.error(("job failed with error='{}' with response={}") .format( response["error"], response["data"])) sys.exit(1) elif response["status"] == ERROR: if "missing " in response["error"]: log.error(("Did not find a job with id={} for user={}") .format( job_id, user)) else: log.error(("job had an error='{}' with response={}") .format( response["error"], response["data"])) sys.exit(1) elif response["status"] == LOGIN_FAILED: log.error(("job reported user was not able to log in " "with an error='{}' with response={}") .format( response["error"], response["data"])) sys.exit(1) job_data = response["data"] if len(job_data) == 0: log.error(("Did not find a job with id={} for user={}") .format( job_id, user)) sys.exit(1) job_id = job_data.get("id", None) job_status = job_data.get("status", None) log.info(("job={}") .format( ppj(job_data))) log.info(("done getting job.id={} status={}") .format( job_id, job_status))
get_ml_job Get an ``MLJob`` by database id.
def _data_dict_to_bokeh_chart_data(self, data): """ Take a dictionary of data, as returned by the :py:class:`~.ProjectStats` per_*_data properties, return a 2-tuple of data dict and x labels list usable by bokeh.charts. :param data: data dict from :py:class:`~.ProjectStats` property :type data: dict :return: 2-tuple of data dict, x labels list :rtype: tuple """ labels = [] # find all the data keys keys = set() for date in data: for k in data[date]: keys.add(k) # final output dict out_data = {} for k in keys: out_data[k] = [] # transform the data; deal with sparse data for data_date, data_dict in sorted(data.items()): labels.append(data_date) for k in out_data: if k in data_dict: out_data[k].append(data_dict[k]) else: out_data[k].append(0) return out_data, labels
Take a dictionary of data, as returned by the :py:class:`~.ProjectStats` per_*_data properties, return a 2-tuple of data dict and x labels list usable by bokeh.charts. :param data: data dict from :py:class:`~.ProjectStats` property :type data: dict :return: 2-tuple of data dict, x labels list :rtype: tuple
def read(self, len): """Refresh the content of the input buffer, the old data are considered consumed This routine handle the I18N transcoding to internal UTF-8 """ ret = libxml2mod.xmlParserInputBufferRead(self._o, len) return ret
Refresh the content of the input buffer, the old data are considered consumed This routine handle the I18N transcoding to internal UTF-8
def inject_long_nonspeech_fragments(self, pairs, replacement_string): """ Inject nonspeech fragments corresponding to the given intervals in this fragment list. It is assumed that ``pairs`` are consistent, e.g. they are produced by ``fragments_ending_inside_nonspeech_intervals``. :param list pairs: list of ``(TimeInterval, int)`` pairs, each identifying a nonspeech interval and the corresponding fragment index ending inside it :param string replacement_string: the string to be applied to the nonspeech intervals """ self.log(u"Called inject_long_nonspeech_fragments") # set the appropriate fragment text if replacement_string in [None, gc.PPV_TASK_ADJUST_BOUNDARY_NONSPEECH_REMOVE]: self.log(u" Remove long nonspeech") lines = [] else: self.log([u" Replace long nonspeech with '%s'", replacement_string]) lines = [replacement_string] # first, make room for the nonspeech intervals self.log(u" First pass: making room...") for nsi, index in pairs: self[index].interval.end = nsi.begin self[index + 1].interval.begin = nsi.end self.log(u" First pass: making room... done") self.log(u" Second pass: append nonspeech intervals...") for i, (nsi, index) in enumerate(pairs, 1): identifier = u"n%06d" % i self.add(SyncMapFragment( text_fragment=TextFragment( identifier=identifier, language=None, lines=lines, filtered_lines=lines ), interval=nsi, fragment_type=SyncMapFragment.NONSPEECH ), sort=False) self.log(u" Second pass: append nonspeech intervals... done") self.log(u" Third pass: sorting...") self.sort() self.log(u" Third pass: sorting... done")
Inject nonspeech fragments corresponding to the given intervals in this fragment list. It is assumed that ``pairs`` are consistent, e.g. they are produced by ``fragments_ending_inside_nonspeech_intervals``. :param list pairs: list of ``(TimeInterval, int)`` pairs, each identifying a nonspeech interval and the corresponding fragment index ending inside it :param string replacement_string: the string to be applied to the nonspeech intervals
def get_filename(self, instance): """Get the filename """ filename = self.field.getFilename(instance) if filename: return filename fieldname = self.get_field_name() content_type = self.get_content_type(instance) extension = mimetypes.guess_extension(content_type) return fieldname + extension
Get the filename
def get_position(self, rst_tree, node_id=None): """Get the linear position of an element of this DGParentedTree in an RSTTree. If ``node_id`` is given, this will return the position of the subtree with that node ID. Otherwise, the position of the root of this DGParentedTree in the given RSTTree is returned. """ if node_id is None: node_id = self.root_id if node_id in rst_tree.edu_set: return rst_tree.edus.index(node_id) return min(self.get_position(rst_tree, child_node_id) for child_node_id in rst_tree.child_dict[node_id])
Get the linear position of an element of this DGParentedTree in an RSTTree. If ``node_id`` is given, this will return the position of the subtree with that node ID. Otherwise, the position of the root of this DGParentedTree in the given RSTTree is returned.
def readNullModelFile(nfile): """" reading file with null model info nfile File containing null model info """ params0_file = nfile+'.p0' nll0_file = nfile+'.nll0' assert os.path.exists(params0_file), '%s is missing.'%params0_file assert os.path.exists(nll0_file), '%s is missing.'%nll0_file params = SP.loadtxt(params0_file) NLL0 = SP.array([float(SP.loadtxt(nll0_file))]) if params.ndim==1: rv = {'params0_g':SP.array([params[0]]),'params0_n':SP.array([params[1]]),'NLL0':NLL0} else: rv = {'params0_g':params[0],'params0_n':params[1],'NLL0':NLL0} return rv
reading file with null model info nfile File containing null model info
def _check_set_values(instance, dic): """ This function checks if the dict values are correct. :instance: the object instance. Used for querying :dic: is a dictionary with the following format: {'actions': [{'act_row_idx': 0, 'action': 'repeat', 'an_result_id': 'rep-1', 'analyst': '', 'otherWS': 'current', 'setresultdiscrete': '', 'setresulton': 'original', 'setresultvalue': '', 'worksheettemplate': ''}], 'conditions': [{'analysisservice': '52853cf7d5114b5aa8c159afad2f3da1', 'and_or': 'no', 'cond_row_idx': 0, 'discreteresult': '', 'range0': '11', 'range1': '12'}], 'mother_service_uid': '52853cf7d5114b5aa8c159afad2f3da1', 'rulenumber': '0', 'trigger': 'submit'}, These are the checking rules: :range0/range1: string or number. They are the numeric range within the action will be carried on. It is needed to keep it as None or '' if the discreteresult is going to be used instead. :discreteresult: string If discreteresult is not Null, ranges have to be Null. :trigger: string. So far there are only two options: 'submit'/'verify'. They are defined in browser/widgets/reflexrulewidget.py/ReflexRuleWidget/getTriggerVoc. :analysisservice: it is the uid of an analysis service :actions: It is a list of dictionaries with the following format: [{'action':'<action_name>', 'act_row_idx':'X', 'otherWS':Bool, 'analyst': '<analyst_id>'}, {'action':'<action_name>', 'act_row_idx':'X', 'otherWS':Bool, 'analyst': '<analyst_id>'}, ] :'repetition_max': integer or string representing an integer. <action_name> options are found in browser/widgets/reflexrulewidget.py/ReflexRuleWidget/getActionVoc so far. """ uc = getToolByName(instance, 'uid_catalog') rulenumber = dic.get('rulenumber', '0') if rulenumber and not(isnumber(rulenumber)): logger.warn('The range must be a number. Now its value is: ' '%s' % (rulenumber)) return False trigger = dic.get('trigger', 'submit') if trigger not in ['submit', 'verify']: logger.warn('Only available triggers are "verify" or "submit". ' '%s has been introduced.' % (trigger)) return False mother_service_uid = dic.get('mother_service_uid', '') as_brain = uc(UID=mother_service_uid) if not as_brain: logger.warn( 'Not correct analysis service with UID. %s' % (mother_service_uid)) return False # Checking the conditions conditions = dic.get('conditions', []) if not conditions or not _check_conditions(instance, conditions): return False # Checking the actions actions = dic.get('actions', []) if not actions or not _check_actions(instance, actions): return False return True
This function checks if the dict values are correct. :instance: the object instance. Used for querying :dic: is a dictionary with the following format: {'actions': [{'act_row_idx': 0, 'action': 'repeat', 'an_result_id': 'rep-1', 'analyst': '', 'otherWS': 'current', 'setresultdiscrete': '', 'setresulton': 'original', 'setresultvalue': '', 'worksheettemplate': ''}], 'conditions': [{'analysisservice': '52853cf7d5114b5aa8c159afad2f3da1', 'and_or': 'no', 'cond_row_idx': 0, 'discreteresult': '', 'range0': '11', 'range1': '12'}], 'mother_service_uid': '52853cf7d5114b5aa8c159afad2f3da1', 'rulenumber': '0', 'trigger': 'submit'}, These are the checking rules: :range0/range1: string or number. They are the numeric range within the action will be carried on. It is needed to keep it as None or '' if the discreteresult is going to be used instead. :discreteresult: string If discreteresult is not Null, ranges have to be Null. :trigger: string. So far there are only two options: 'submit'/'verify'. They are defined in browser/widgets/reflexrulewidget.py/ReflexRuleWidget/getTriggerVoc. :analysisservice: it is the uid of an analysis service :actions: It is a list of dictionaries with the following format: [{'action':'<action_name>', 'act_row_idx':'X', 'otherWS':Bool, 'analyst': '<analyst_id>'}, {'action':'<action_name>', 'act_row_idx':'X', 'otherWS':Bool, 'analyst': '<analyst_id>'}, ] :'repetition_max': integer or string representing an integer. <action_name> options are found in browser/widgets/reflexrulewidget.py/ReflexRuleWidget/getActionVoc so far.
def to_postdata(self): """Serialize as post data for a POST request.""" items = [] for k, v in sorted(self.items()): # predictable for testing items.append((k.encode('utf-8'), to_utf8_optional_iterator(v))) # tell urlencode to deal with sequence values and map them correctly # to resulting querystring. for example self["k"] = ["v1", "v2"] will # result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D return urlencode(items, True).replace('+', '%20').encode('ascii')
Serialize as post data for a POST request.
def rcfile(appname, args={}, strip_dashes=True, module_name=None): """ Read environment variables and config files and return them merged with predefined list of arguments. Arguments: appname - application name, used for config files and environemnt variable names. args - arguments from command line (optparse, docopt, etc). strip_dashes - strip dashes prefixing key names from args dict. Returns: dict containing the merged variables of environment variables, config files and args. Environment variables are read if they start with appname in uppercase with underscore, for example: TEST_VAR=1 Config files compatible with ConfigParser are read and the section name appname is read, example: [appname] var=1 Files are read from: /etc/appname/config, /etc/appfilerc, ~/.config/appname/config, ~/.config/appname, ~/.appname/config, ~/.appnamerc, .appnamerc, file provided by config variable in args. Example usage with docopt: args = rcfile(__name__, docopt(__doc__, version=__version__)) """ if strip_dashes: for k in args.keys(): args[k.lstrip('-')] = args.pop(k) environ = get_environment(appname) if not module_name: module_name = appname config = get_config(appname, module_name, args.get('config', '')) return merge(merge(args, config), environ)
Read environment variables and config files and return them merged with predefined list of arguments. Arguments: appname - application name, used for config files and environemnt variable names. args - arguments from command line (optparse, docopt, etc). strip_dashes - strip dashes prefixing key names from args dict. Returns: dict containing the merged variables of environment variables, config files and args. Environment variables are read if they start with appname in uppercase with underscore, for example: TEST_VAR=1 Config files compatible with ConfigParser are read and the section name appname is read, example: [appname] var=1 Files are read from: /etc/appname/config, /etc/appfilerc, ~/.config/appname/config, ~/.config/appname, ~/.appname/config, ~/.appnamerc, .appnamerc, file provided by config variable in args. Example usage with docopt: args = rcfile(__name__, docopt(__doc__, version=__version__))
def coordination_geometry_symmetry_measures_standard(self, coordination_geometry, algo, points_perfect=None, optimization=None): """ Returns the symmetry measures for a set of permutations (whose setup depends on the coordination geometry) for the coordination geometry "coordination_geometry". Standard implementation looking for the symmetry measures of each permutation :param coordination_geometry: The coordination geometry to be investigated :return: The symmetry measures for the given coordination geometry for each permutation investigated """ # permutations_symmetry_measures = np.zeros(len(algo.permutations), # np.float) if optimization == 2: permutations_symmetry_measures = [None] * len(algo.permutations) permutations = list() algos = list() local2perfect_maps = list() perfect2local_maps = list() for iperm, perm in enumerate(algo.permutations): local2perfect_map = {} perfect2local_map = {} permutations.append(perm) for iperfect, ii in enumerate(perm): perfect2local_map[iperfect] = ii local2perfect_map[ii] = iperfect local2perfect_maps.append(local2perfect_map) perfect2local_maps.append(perfect2local_map) points_distorted = self.local_geometry.points_wcs_ctwcc( permutation=perm) sm_info = symmetry_measure(points_distorted=points_distorted, points_perfect=points_perfect) sm_info['translation_vector'] = self.local_geometry.centroid_with_centre permutations_symmetry_measures[iperm] = sm_info algos.append(str(algo)) return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps else: permutations_symmetry_measures = [None] * len(algo.permutations) permutations = list() algos = list() local2perfect_maps = list() perfect2local_maps = list() for iperm, perm in enumerate(algo.permutations): local2perfect_map = {} perfect2local_map = {} permutations.append(perm) for iperfect, ii in enumerate(perm): perfect2local_map[iperfect] = ii local2perfect_map[ii] = iperfect local2perfect_maps.append(local2perfect_map) perfect2local_maps.append(perfect2local_map) points_distorted = self.local_geometry.points_wcs_ctwcc( permutation=perm) sm_info = symmetry_measure(points_distorted=points_distorted, points_perfect=points_perfect) sm_info['translation_vector'] = self.local_geometry.centroid_with_centre permutations_symmetry_measures[iperm] = sm_info algos.append(str(algo)) return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps
Returns the symmetry measures for a set of permutations (whose setup depends on the coordination geometry) for the coordination geometry "coordination_geometry". Standard implementation looking for the symmetry measures of each permutation :param coordination_geometry: The coordination geometry to be investigated :return: The symmetry measures for the given coordination geometry for each permutation investigated
def root_chip(self): """The coordinates (x, y) of the chip used to boot the machine.""" # If not known, query the machine if self._root_chip is None: self._root_chip = self.get_software_version(255, 255, 0).position return self._root_chip
The coordinates (x, y) of the chip used to boot the machine.
def command(self, cmd_name, callback, *args): """Run an asynchronous command. Args: cmd_name (int): The unique code for the command to execute. callback (callable): The optional callback to run when the command finishes. The signature should be callback(cmd_name, result, exception) *args: Any arguments that are passed to the underlying command handler """ cmd = JLinkCommand(cmd_name, args, callback) self._commands.put(cmd)
Run an asynchronous command. Args: cmd_name (int): The unique code for the command to execute. callback (callable): The optional callback to run when the command finishes. The signature should be callback(cmd_name, result, exception) *args: Any arguments that are passed to the underlying command handler
def to_json(self): """ Writes the complete Morse complex merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all maxima. """ capsule = {} capsule["Hierarchy"] = [] for ( dying, (persistence, surviving, saddle), ) in self.merge_sequence.items(): capsule["Hierarchy"].append( { "Persistence": persistence, "Dying": dying, "Surviving": surviving, "Saddle": saddle, } ) capsule["Partitions"] = [] base = np.array([None] * len(self.Y)) for label, items in self.base_partitions.items(): base[items] = label capsule["Partitions"] = base.tolist() return json.dumps(capsule, separators=(",", ":"))
Writes the complete Morse complex merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all maxima.
def maybe_convert_platform_interval(values): """ Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array """ if isinstance(values, (list, tuple)) and len(values) == 0: # GH 19016 # empty lists/tuples get object dtype by default, but this is not # prohibited for IntervalArray, so coerce to integer instead return np.array([], dtype=np.int64) elif is_categorical_dtype(values): values = np.asarray(values) return maybe_convert_platform(values)
Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array
def list_merge(list_a, list_b): """ Merge two lists without duplicating items Args: list_a: list list_b: list Returns: New list with deduplicated items from list_a and list_b """ #return list(collections.OrderedDict.fromkeys(list_a + list_b)) #result = list(list_b) result = [] for item in list_a: if not item in result: result.append(item) for item in list_b: if not item in result: result.append(item) return result
Merge two lists without duplicating items Args: list_a: list list_b: list Returns: New list with deduplicated items from list_a and list_b
def compositions(self): """ :rtype: twilio.rest.video.v1.composition.CompositionList """ if self._compositions is None: self._compositions = CompositionList(self) return self._compositions
:rtype: twilio.rest.video.v1.composition.CompositionList
def physical_conversion(quantity,pop=False): """Decorator to convert to physical coordinates: quantity = [position,velocity,time]""" def wrapper(method): @wraps(method) def wrapped(*args,**kwargs): use_physical= kwargs.get('use_physical',True) and \ not kwargs.get('log',False) # Parse whether ro or vo should be considered to be set, because # the return value will have units anyway # (like in Orbit methods that return numbers with units, like ra) roSet= '_' in quantity # _ in quantity name means always units voSet= '_' in quantity # _ in quantity name means always units use_physical= use_physical or '_' in quantity # _ in quantity name means always units ro= kwargs.get('ro',None) if ro is None and \ (roSet or (hasattr(args[0],'_roSet') and args[0]._roSet)): ro= args[0]._ro if ro is None and isinstance(args[0],list) \ and hasattr(args[0][0],'_roSet') and args[0][0]._roSet: # For lists of Potentials ro= args[0][0]._ro if _APY_LOADED and isinstance(ro,units.Quantity): ro= ro.to(units.kpc).value vo= kwargs.get('vo',None) if vo is None and \ (voSet or (hasattr(args[0],'_voSet') and args[0]._voSet)): vo= args[0]._vo if vo is None and isinstance(args[0],list) \ and hasattr(args[0][0],'_voSet') and args[0][0]._voSet: # For lists of Potentials vo= args[0][0]._vo if _APY_LOADED and isinstance(vo,units.Quantity): vo= vo.to(units.km/units.s).value # Override Quantity output? _apy_units= kwargs.get('quantity',_APY_UNITS) #Remove ro and vo kwargs if necessary if pop and 'use_physical' in kwargs: kwargs.pop('use_physical') if pop and 'ro' in kwargs: kwargs.pop('ro') if pop and 'vo' in kwargs: kwargs.pop('vo') if use_physical and \ not (_voNecessary[quantity.lower()] and vo is None) and \ not (_roNecessary[quantity.lower()] and ro is None): from galpy.orbit import Orbit if isinstance(args[0],Orbit): print_physical_warning() if quantity.lower() == 'time': fac= time_in_Gyr(vo,ro) if _apy_units: u= units.Gyr elif quantity.lower() == 'position': fac= ro if _apy_units: u= units.kpc elif quantity.lower() == 'position_kpc': # already in kpc fac= 1. if _apy_units: u= units.kpc elif quantity.lower() == 'velocity': fac= vo if _apy_units: u= units.km/units.s elif quantity.lower() == 'velocity2': fac= vo**2. if _apy_units: u= (units.km/units.s)**2 elif quantity.lower() == 'velocity_kms': # already in km/s fac= 1. if _apy_units: u= units.km/units.s elif quantity.lower() == 'frequency': if kwargs.get('kmskpc',False) and not _apy_units: fac= freq_in_kmskpc(vo,ro) else: fac= freq_in_Gyr(vo,ro) if _apy_units: u= units.Gyr**-1. elif quantity.lower() == 'frequency-kmskpc': fac= freq_in_kmskpc(vo,ro) if _apy_units: u= units.km/units.s/units.kpc elif quantity.lower() == 'action': fac= ro*vo if _apy_units: u= units.kpc*units.km/units.s elif quantity.lower() == 'energy': fac= vo**2. if _apy_units: u= units.km**2./units.s**2. elif quantity.lower() == 'angle': # in rad fac= 1. if _apy_units: u= units.rad elif quantity.lower() == 'angle_deg': # already in deg fac= 1. if _apy_units: u= units.deg elif quantity.lower() == 'proper-motion_masyr': # already in mas/yr fac= 1. if _apy_units: u= units.mas/units.yr elif quantity.lower() == 'force': fac= force_in_kmsMyr(vo,ro) if _apy_units: u= units.km/units.s/units.Myr elif quantity.lower() == 'density': fac= dens_in_msolpc3(vo,ro) if _apy_units: u= units.Msun/units.pc**3 elif quantity.lower() == 'numberdensity': fac= 1/ro**3. if _apy_units: u= 1/units.kpc**3 elif quantity.lower() == 'velocity2surfacedensity': fac= surfdens_in_msolpc2(vo,ro)*vo**2 if _apy_units: u= units.Msun/units.pc**2*(units.km/units.s)**2 elif quantity.lower() == 'surfacedensity': fac= surfdens_in_msolpc2(vo,ro) if _apy_units: u= units.Msun/units.pc**2 elif quantity.lower() == 'numbersurfacedensity': fac= 1./ro**2. if _apy_units: u= 1/units.kpc**2 elif quantity.lower() == 'surfacedensitydistance': fac= surfdens_in_msolpc2(vo,ro)*ro*1000. if _apy_units: u= units.Msun/units.pc elif quantity.lower() == 'mass': fac= mass_in_msol(vo,ro) if _apy_units: u= units.Msun elif quantity.lower() == 'forcederivative': fac= freq_in_Gyr(vo,ro)**2. if _apy_units: u= units.Gyr**-2. elif quantity.lower() == 'phasespacedensity': fac= 1./vo**3./ro**3. if _apy_units: u= 1/(units.km/units.s)**3/units.kpc**3 elif quantity.lower() == 'phasespacedensity2d': fac= 1./vo**2./ro**2. if _apy_units: u= 1/(units.km/units.s)**2/units.kpc**2 elif quantity.lower() == 'phasespacedensityvelocity': fac= 1./vo**2./ro**3. if _apy_units: u= 1/(units.km/units.s)**2/units.kpc**3 elif quantity.lower() == 'phasespacedensityvelocity2': fac= 1./vo/ro**3. if _apy_units: u= 1/(units.km/units.s)/units.kpc**3 elif quantity.lower() == 'dimensionless': fac= 1. if _apy_units: u= units.dimensionless_unscaled out= method(*args,**kwargs) if out is None: return out if _apy_units: return units.Quantity(out*fac,unit=u) else: return out*fac else: return method(*args,**kwargs) return wrapped return wrapper
Decorator to convert to physical coordinates: quantity = [position,velocity,time]
def delete_event_public_discount(self, id, discount_id, **data): """ DELETE /events/:id/public_discounts/:discount_id/ Deletes a public discount. """ return self.delete("/events/{0}/public_discounts/{0}/".format(id,discount_id), data=data)
DELETE /events/:id/public_discounts/:discount_id/ Deletes a public discount.
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Eucalyptus', is_secure=False, **kwargs): """ Connect to a Eucalyptus service. :type host: string :param host: the host name or ip address of the Eucalyptus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server """ raise BotoClientError('Not Implemented')
Connect to a Eucalyptus service. :type host: string :param host: the host name or ip address of the Eucalyptus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server
def generateSummary(self, extraLapse = TYPICAL_LAPSE): '''Generates a summary of the status of the expected scripts broken based on the log. This summary (a list of strings) is returned as well as a list with the dates (which can be used to index the log)of the most recent attempts at the failed jobs. ''' scriptsRun = self.scriptsRun body = [] numberOfFailed = 0 numberWithWarnings = 0 failedList = [] successList = [] warningsList = [] for name, details in sorted(scriptsRun.iteritems()): status = None daysSinceSuccess = None if details["lastSuccess"] and expectedScripts.get(name): daysSinceSuccess = expectedScripts.get(name).getDaysSinceLastSuccess(details["lastSuccess"]) if not expectedScripts.check(name, details["lastSuccess"], extraLapse): status = "FAILED" else: status = "FAILED" if not status: if details["status"] & RETROSPECT_FAIL: status = "FAILED" elif details["status"] & RETROSPECT_WARNING: status = "WARNINGS" elif status != "FAILED": status = "OK" if details["lastSuccess"] and daysSinceSuccess: lastSuccessDetails = "Last successful run on %s (%0.1f days ago)" % (details["lastSuccess"], daysSinceSuccess) else: lastSuccessDetails = "No recent successful run." if details["lastRun"]: lastRunDetails = "Last run on %s (%s)" % (details["lastRun"], status) else: lastRunDetails = "No recent run (%s)" % status if status == "FAILED": numberOfFailed += 1 failedList.append("%s: %s. %s" % (name, lastRunDetails, lastSuccessDetails)) elif status == "WARNINGS": numberWithWarnings += 1 warningsList.append("%s: %s. %s" % (name, lastRunDetails, lastSuccessDetails)) else: successList.append("%s: %s. %s" % (name, lastRunDetails, lastSuccessDetails)) body = [] if failedList: body.append("FAILED JOBS (%d)" % numberOfFailed) body.append("****************") for j in failedList: body.append(j) body.append("\n") if warningsList: body.append("JOBS WITH WARNINGS (%d)" % numberWithWarnings) body.append("***********************") for j in warningsList: body.append(j) body.append("\n") if successList: body.append("SUCCESSFUL JOBS") body.append("***************") for j in successList: body.append(j) return body, failedList
Generates a summary of the status of the expected scripts broken based on the log. This summary (a list of strings) is returned as well as a list with the dates (which can be used to index the log)of the most recent attempts at the failed jobs.
def grid_to_eccentric_radii(self, grid): """Convert a grid of (y,x) coordinates to an eccentric radius, which is (1.0/axis_ratio) * elliptical radius \ and used to define light profile half-light radii using circular radii. If the coordinates have not been transformed to the profile's geometry, this is performed automatically. Parameters ---------- grid : TransformedGrid(ndarray) The (y, x) coordinates in the reference frame of the elliptical profile. """ return np.multiply(np.sqrt(self.axis_ratio), self.grid_to_elliptical_radii(grid)).view(np.ndarray)
Convert a grid of (y,x) coordinates to an eccentric radius, which is (1.0/axis_ratio) * elliptical radius \ and used to define light profile half-light radii using circular radii. If the coordinates have not been transformed to the profile's geometry, this is performed automatically. Parameters ---------- grid : TransformedGrid(ndarray) The (y, x) coordinates in the reference frame of the elliptical profile.
def matches(self, sexp): ''' Body of a non-terminal is always a :class:`Sequence`. For an s-expr to match, it must be of the form:: ['name'] + [sexpr-0, ..., sexpr-n] where the first list contains a name of the non-terminal, and the second one matches its body sequence. ''' if sexp and isinstance(sexp, list) and self.name == sexp[0]: return self.body.matches(sexp[1:]) return False
Body of a non-terminal is always a :class:`Sequence`. For an s-expr to match, it must be of the form:: ['name'] + [sexpr-0, ..., sexpr-n] where the first list contains a name of the non-terminal, and the second one matches its body sequence.
def setProfile(self, name): """ Assign a PROFILE to this unnamed component. Used by vCard, not by vCalendar. """ if self.name or self.useBegin: if self.name == name: return raise VObjectError("This component already has a PROFILE or " "uses BEGIN.") self.name = name.upper()
Assign a PROFILE to this unnamed component. Used by vCard, not by vCalendar.
def mex_hat(x, sigma): r"""Mexican hat This method implements a Mexican hat (or Ricker) wavelet. Parameters ---------- x : float Input data point sigma : float Standard deviation (filter scale) Returns ------- float Mexican hat filtered data point Examples -------- >>> from modopt.signal.filter import mex_hat >>> mex_hat(2, 1) -0.35213905225713371 """ x = check_float(x) sigma = check_float(sigma) xs = (x / sigma) ** 2 val = 2 * (3 * sigma) ** -0.5 * np.pi ** -0.25 return val * (1 - xs) * np.exp(-0.5 * xs)
r"""Mexican hat This method implements a Mexican hat (or Ricker) wavelet. Parameters ---------- x : float Input data point sigma : float Standard deviation (filter scale) Returns ------- float Mexican hat filtered data point Examples -------- >>> from modopt.signal.filter import mex_hat >>> mex_hat(2, 1) -0.35213905225713371
def encode(self): ''' Encode and store a SUBACK control packet. ''' header = bytearray(1) payload = bytearray() varHeader = encode16Int(self.msgId) header[0] = 0x90 for code in self.granted: payload.append(code[0] | (0x80 if code[1] == True else 0x00)) header.extend(encodeLength(len(varHeader) + len(payload))) header.extend(varHeader) header.extend(payload) self.encoded = header return str(header) if PY2 else bytes(header)
Encode and store a SUBACK control packet.
def _property_create_dict(header, data): ''' Create a property dict ''' prop = dict(zip(header, _merge_last(data, len(header)))) prop['name'] = _property_normalize_name(prop['property']) prop['type'] = _property_detect_type(prop['name'], prop['values']) prop['edit'] = from_bool(prop['edit']) if 'inherit' in prop: prop['inherit'] = from_bool(prop['inherit']) del prop['property'] return prop
Create a property dict
def getStatus(self): """ RDY - Ready Bit. This bit provides the status of the RDY flag from the part. The status and function of this bit is the same as the RDY output pin. A number of events set the RDY bit high as indicated in Table XVIII in datasheet STDY - Steady Bit. This bit is updated when the filter writes a result to the Data Register. If the filter is in FASTStep mode (see Filter Register section) and responding to a step input, the STDY bit remains high as the initial conversion results become available. The RDY output and bit are set low on these initial conversions to indicate that a result is available. If the STDY is high, however, it indicates that the result being provided is not from a fully settled second-stage FIR filter. When the FIR filter has fully settled, the STDY bit will go low coincident with RDY. If the part is never placed into its FASTStep mode, the STDY bit will go low at the first Data Register read and it is not cleared by subsequent Data Register reads. A number of events set the STDY bit high as indicated in Table XVIII. STDY is set high along with RDY by all events in the table except a Data Register read. STBY - Standby Bit. This bit indicates whether the AD7730 is in its Standby Mode or normal mode of operation. The part can be placed in its standby mode using the STANDBY input pin or by writing 011 to the MD2 to MD0 bits of the Mode Register. The power-on/reset status of this bit is 0 assuming the STANDBY pin is high. NOREF - No Reference Bit. If the voltage between the REF IN(+) and REF IN(-) pins is below 0.3 V, or either of these inputs is open-circuit, the NOREF bit goes to 1. If NOREF is active on completion of a conversion, the Data Register is loaded with all 1s. If NOREF is active on completion of a calibration, updating of the calibration registers is inhibited.""" status = self.single_read(self.AD7730_STATUS_REG) bits_values = dict([('NOREF',status[0] & 0x10 == 0x10), ('STBY',status[0] & 0x20 == 0x20), ('STDY',status[0] & 0x40 == 0x40), ('RDY',status[0] & 0x80 == 0x80)]) return bits_values
RDY - Ready Bit. This bit provides the status of the RDY flag from the part. The status and function of this bit is the same as the RDY output pin. A number of events set the RDY bit high as indicated in Table XVIII in datasheet STDY - Steady Bit. This bit is updated when the filter writes a result to the Data Register. If the filter is in FASTStep mode (see Filter Register section) and responding to a step input, the STDY bit remains high as the initial conversion results become available. The RDY output and bit are set low on these initial conversions to indicate that a result is available. If the STDY is high, however, it indicates that the result being provided is not from a fully settled second-stage FIR filter. When the FIR filter has fully settled, the STDY bit will go low coincident with RDY. If the part is never placed into its FASTStep mode, the STDY bit will go low at the first Data Register read and it is not cleared by subsequent Data Register reads. A number of events set the STDY bit high as indicated in Table XVIII. STDY is set high along with RDY by all events in the table except a Data Register read. STBY - Standby Bit. This bit indicates whether the AD7730 is in its Standby Mode or normal mode of operation. The part can be placed in its standby mode using the STANDBY input pin or by writing 011 to the MD2 to MD0 bits of the Mode Register. The power-on/reset status of this bit is 0 assuming the STANDBY pin is high. NOREF - No Reference Bit. If the voltage between the REF IN(+) and REF IN(-) pins is below 0.3 V, or either of these inputs is open-circuit, the NOREF bit goes to 1. If NOREF is active on completion of a conversion, the Data Register is loaded with all 1s. If NOREF is active on completion of a calibration, updating of the calibration registers is inhibited.
def exponential_terms(order, variables, data): """ Compute exponential expansions. Parameters ---------- order: range or list(int) A list of exponential terms to include. For instance, [1, 2] indicates that the first and second exponential terms should be added. To retain the original terms, 1 *must* be included in the list. variables: list(str) List of variables for which exponential terms should be computed. data: pandas DataFrame object Table of values of all observations of all variables. Returns ------- variables_exp: list A list of variables to include in the final data frame after adding the specified exponential terms. data_exp: pandas DataFrame object Table of values of all observations of all variables, including any specified exponential terms. """ variables_exp = OrderedDict() data_exp = OrderedDict() if 1 in order: data_exp[1] = data[variables] variables_exp[1] = variables order = set(order) - set([1]) for o in order: variables_exp[o] = ['{}_power{}'.format(v, o) for v in variables] data_exp[o] = data[variables]**o variables_exp = reduce((lambda x, y: x + y), variables_exp.values()) data_exp = pd.DataFrame(columns=variables_exp, data=np.concatenate([*data_exp.values()], axis=1)) return (variables_exp, data_exp)
Compute exponential expansions. Parameters ---------- order: range or list(int) A list of exponential terms to include. For instance, [1, 2] indicates that the first and second exponential terms should be added. To retain the original terms, 1 *must* be included in the list. variables: list(str) List of variables for which exponential terms should be computed. data: pandas DataFrame object Table of values of all observations of all variables. Returns ------- variables_exp: list A list of variables to include in the final data frame after adding the specified exponential terms. data_exp: pandas DataFrame object Table of values of all observations of all variables, including any specified exponential terms.
def _GetLink(self): """Retrieves the link. Returns: str: full path of the linked file entry. """ if self._link is None: self._link = '' if self.entry_type != definitions.FILE_ENTRY_TYPE_LINK: return self._link cpio_archive_file = self._file_system.GetCPIOArchiveFile() link_data = cpio_archive_file.ReadDataAtOffset( self._cpio_archive_file_entry.data_offset, self._cpio_archive_file_entry.data_size) # TODO: should this be ASCII? self._link = link_data.decode('ascii') return self._link
Retrieves the link. Returns: str: full path of the linked file entry.
def set_code(self, code): """Sets widget from code string Parameters ---------- code: String \tCode representation of widget value """ for i, (_, style_code) in enumerate(self.styles): if code == style_code: self.SetSelection(i)
Sets widget from code string Parameters ---------- code: String \tCode representation of widget value
def terminate(self): """ Terminate a running cluster. (Due to a signal.) :return none """ for node in self.client_nodes: node.terminate() for node in self.nodes: node.terminate()
Terminate a running cluster. (Due to a signal.) :return none
def add_directive(self, key: Optional[str], value: str, lineno: Optional[int] = None, comment: str = '') -> None: '''Assignments are items with ':' type ''' if key is None: # continuation of multi-line directive self.statements[-1][2] += value self.values.append(value) if self._action is not None: self._action_options += value else: # new directive, the comment before it are used self.statements.append([':', key, value, comment]) self.values = [value] if lineno: self.lineno = lineno
Assignments are items with ':' type
def _post(self, *args, **kwargs): """ A wrapper for posting things. It will also json encode your 'data' parameter :returns: The response of your post :rtype: dict :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>` if there is an error from New Relic """ if 'data' in kwargs: kwargs['data'] = json.dumps(kwargs['data']) response = requests.post(*args, **kwargs) if not response.ok: raise NewRelicAPIServerException('{}: {}'.format(response.status_code, response.text)) return response.json()
A wrapper for posting things. It will also json encode your 'data' parameter :returns: The response of your post :rtype: dict :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>` if there is an error from New Relic
def find(self, id, columns=None): """ Execute a query for a single record by id :param id: The id of the record to retrieve :type id: mixed :param columns: The columns of the record to retrive :type columns: list :return: mixed :rtype: mixed """ if not columns: columns = ['*'] return self.where('id', '=', id).first(1, columns)
Execute a query for a single record by id :param id: The id of the record to retrieve :type id: mixed :param columns: The columns of the record to retrive :type columns: list :return: mixed :rtype: mixed
def inverse(self): """Inverse of this operator. The inverse of ``scalar * op`` is given by ``op.inverse * 1/scalar`` if ``scalar != 0``. If ``scalar == 0``, the inverse is not defined. ``OperatorLeftScalarMult(op, s).inverse == OperatorRightScalarMult(op.inverse, 1/s)`` Examples -------- >>> space = odl.rn(3) >>> operator = odl.IdentityOperator(space) >>> left_mul_op = OperatorLeftScalarMult(operator, 3) >>> left_mul_op.inverse([3, 3, 3]) rn(3).element([ 1., 1., 1.]) """ if self.scalar == 0.0: raise ZeroDivisionError('{} not invertible'.format(self)) return self.operator.inverse * (1.0 / self.scalar)
Inverse of this operator. The inverse of ``scalar * op`` is given by ``op.inverse * 1/scalar`` if ``scalar != 0``. If ``scalar == 0``, the inverse is not defined. ``OperatorLeftScalarMult(op, s).inverse == OperatorRightScalarMult(op.inverse, 1/s)`` Examples -------- >>> space = odl.rn(3) >>> operator = odl.IdentityOperator(space) >>> left_mul_op = OperatorLeftScalarMult(operator, 3) >>> left_mul_op.inverse([3, 3, 3]) rn(3).element([ 1., 1., 1.])
async def start(self, remoteParameters): """ Initiate connectivity checks. :param: remoteParameters: The :class:`RTCIceParameters` associated with the remote :class:`RTCIceTransport`. """ if self.state == 'closed': raise InvalidStateError('RTCIceTransport is closed') # handle the case where start is already in progress if self.__start is not None: return await self.__start.wait() self.__start = asyncio.Event() self.__setState('checking') self._connection.remote_username = remoteParameters.usernameFragment self._connection.remote_password = remoteParameters.password try: await self._connection.connect() except ConnectionError: self.__setState('failed') else: self.__setState('completed') self.__start.set()
Initiate connectivity checks. :param: remoteParameters: The :class:`RTCIceParameters` associated with the remote :class:`RTCIceTransport`.
def _get_all_templates(network_id, template_id): """ Get all the templates for the nodes, links and groups of a network. Return these templates as a dictionary, keyed on type (NODE, LINK, GROUP) then by ID of the node or link. """ base_qry = db.DBSession.query( ResourceType.ref_key.label('ref_key'), ResourceType.node_id.label('node_id'), ResourceType.link_id.label('link_id'), ResourceType.group_id.label('group_id'), ResourceType.network_id.label('network_id'), Template.name.label('template_name'), Template.id.label('template_id'), TemplateType.id.label('type_id'), TemplateType.layout.label('layout'), TemplateType.name.label('type_name'), ).filter(TemplateType.id==ResourceType.type_id, Template.id==TemplateType.template_id) all_node_type_qry = base_qry.filter(Node.id==ResourceType.node_id, Node.network_id==network_id) all_link_type_qry = base_qry.filter(Link.id==ResourceType.link_id, Link.network_id==network_id) all_group_type_qry = base_qry.filter(ResourceGroup.id==ResourceType.group_id, ResourceGroup.network_id==network_id) network_type_qry = base_qry.filter(ResourceType.network_id==network_id) #Filter the group attributes by template if template_id is not None: all_node_type_qry = all_node_type_qry.filter(Template.id==template_id) all_link_type_qry = all_link_type_qry.filter(Template.id==template_id) all_group_type_qry = all_group_type_qry.filter(Template.id==template_id) x = time.time() log.info("Getting all types") type_qry = all_node_type_qry.union(all_link_type_qry, all_group_type_qry, network_type_qry) all_types = db.DBSession.execute(type_qry.statement).fetchall() log.info("%s types retrieved in %s", len(all_types), time.time()-x) log.info("Attributes retrieved. Processing results...") x = time.time() node_type_dict = dict() link_type_dict = dict() group_type_dict = dict() network_type_dict = dict() for t in all_types: templatetype = JSONObject({ 'template_id':t.template_id, 'id':t.type_id, 'template_name':t.template_name, 'layout': t.layout, 'name': t.type_name,}) if t.ref_key == 'NODE': nodetype = node_type_dict.get(t.node_id, []) nodetype.append(templatetype) node_type_dict[t.node_id] = nodetype elif t.ref_key == 'LINK': linktype = link_type_dict.get(t.link_id, []) linktype.append(templatetype) link_type_dict[t.link_id] = linktype elif t.ref_key == 'GROUP': grouptype = group_type_dict.get(t.group_id, []) grouptype.append(templatetype) group_type_dict[t.group_id] = grouptype elif t.ref_key == 'NETWORK': nettype = network_type_dict.get(t.network_id, []) nettype.append(templatetype) network_type_dict[t.network_id] = nettype all_types = { 'NODE' : node_type_dict, 'LINK' : link_type_dict, 'GROUP': group_type_dict, 'NETWORK': network_type_dict, } logging.info("Attributes processed in %s", time.time()-x) return all_types
Get all the templates for the nodes, links and groups of a network. Return these templates as a dictionary, keyed on type (NODE, LINK, GROUP) then by ID of the node or link.
def populate_iteration(self, iteration): """Parse genotypes from the file and iteration with relevant marker \ details. :param iteration: ParseLocus object which is returned per iteration :return: True indicates current locus is valid. StopIteration is thrown if the marker reaches the end of the file or the valid genomic region for analysis. """ global encoding line, info, exp_freq = self.get_next_line() if info > Parser.info_threshold: junk, iteration.rsid, iteration.pos, iteration.major_allele, iteration.minor_allele = line[0:5] iteration.chr = self.current_chrom iteration.pos = int(iteration.pos) if DataParser.boundary.TestBoundary(iteration.chr, iteration.pos, iteration.rsid): frequencies = [] idx = 5 total_maf = 0.0 additive = [] for is_ignored in self.ind_mask[:,0]: if not is_ignored: AA,Aa,aa = [float(x) for x in line[idx:idx+3]] additive.append(Aa+2*aa) if encoding==Encoding.Dominant: estimate = Aa + aa elif encoding==Encoding.Additive: estimate = additive[-1] elif encoding==Encoding.Recessive: estimate = aa elif encoding==Encoding.Genotype: if Aa >= AA and Aa >= aa: estimate = 1 elif AA >= Aa and AA >= aa: estimate = 0 else: estimate = 2 elif encoding==Encoding.Raw: estimate = [AA, Aa, aa] total_maf += numpy.sqrt(aa) frequencies.append(estimate) idx += 3 iteration.non_missing_alc = len(additive)*2 maf = numpy.mean(numpy.array(additive))/2 iteration.allele_count2 = maf * (len(additive) * 2) iteration.effa_freq = maf if maf > 0.5: iteration.min_allele_count = len(additive)*2 - iteration.allele_count2 iteration.maj_allele_count = iteration.allele_count2 maf = 1.0 - maf else: iteration.min_allele_count = iteration.allele_count2 iteration.maj_allele_count = len(additive)*2 - iteration.allele_count2 iteration._maf = maf iteration.genotype_data = numpy.array(frequencies) return iteration.maf >= DataParser.min_maf and iteration.maf <= DataParser.max_maf else: return False else: return False
Parse genotypes from the file and iteration with relevant marker \ details. :param iteration: ParseLocus object which is returned per iteration :return: True indicates current locus is valid. StopIteration is thrown if the marker reaches the end of the file or the valid genomic region for analysis.
def strict_deps_for_target(self, target, predicate=None): """Get the dependencies of `target` filtered by `predicate`, accounting for 'strict_deps'. If 'strict_deps' is on, instead of using the transitive closure of dependencies, targets will only be able to see their immediate dependencies declared in the BUILD file. The 'strict_deps' setting is obtained from the result of `get_compile_settings()`. NB: This includes the current target in the result. """ if self._native_build_settings.get_strict_deps_value_for_target(target): strict_deps = target.strict_dependencies(DependencyContext()) if predicate: filtered_deps = list(filter(predicate, strict_deps)) else: filtered_deps = strict_deps deps = [target] + filtered_deps else: deps = self.context.build_graph.transitive_subgraph_of_addresses( [target.address], predicate=predicate) # Filter out the beginning target depending on whether it matches the predicate. # TODO: There should be a cleaner way to do this. deps = filter(predicate, deps) return deps
Get the dependencies of `target` filtered by `predicate`, accounting for 'strict_deps'. If 'strict_deps' is on, instead of using the transitive closure of dependencies, targets will only be able to see their immediate dependencies declared in the BUILD file. The 'strict_deps' setting is obtained from the result of `get_compile_settings()`. NB: This includes the current target in the result.
def ckw02(handle, begtim, endtim, inst, ref, segid, nrec, start, stop, quats, avvs, rates): """ Write a type 2 segment to a C-kernel. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw02_c.html :param handle: Handle of an open CK file. :type handle: int :param begtim: The beginning encoded SCLK of the segment. :type begtim: float :param endtim: The ending encoded SCLK of the segment. :type endtim: float :param inst: The NAIF instrument ID code. :type inst: int :param ref: The reference frame of the segment. :type ref: str :param segid: Segment identifier. :type segid: str :param nrec: Number of pointing records. :type nrec: int :param start: Encoded SCLK interval start times. :type start: Array of floats :param stop: Encoded SCLK interval stop times. :type stop: Array of floats :param quats: Quaternions representing instrument pointing. :type quats: Nx4-Element Array of floats :param avvs: Angular velocity vectors. :type avvs: Nx3-Element Array of floats :param rates: Number of seconds per tick for each interval. :type rates: Array of floats """ handle = ctypes.c_int(handle) begtim = ctypes.c_double(begtim) endtim = ctypes.c_double(endtim) inst = ctypes.c_int(inst) ref = stypes.stringToCharP(ref) segid = stypes.stringToCharP(segid) start = stypes.toDoubleVector(start) stop = stypes.toDoubleVector(stop) rates = stypes.toDoubleVector(rates) quats = stypes.toDoubleMatrix(quats) avvs = stypes.toDoubleMatrix(avvs) nrec = ctypes.c_int(nrec) libspice.ckw02_c(handle, begtim, endtim, inst, ref, segid, nrec, start, stop, quats, avvs, rates)
Write a type 2 segment to a C-kernel. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw02_c.html :param handle: Handle of an open CK file. :type handle: int :param begtim: The beginning encoded SCLK of the segment. :type begtim: float :param endtim: The ending encoded SCLK of the segment. :type endtim: float :param inst: The NAIF instrument ID code. :type inst: int :param ref: The reference frame of the segment. :type ref: str :param segid: Segment identifier. :type segid: str :param nrec: Number of pointing records. :type nrec: int :param start: Encoded SCLK interval start times. :type start: Array of floats :param stop: Encoded SCLK interval stop times. :type stop: Array of floats :param quats: Quaternions representing instrument pointing. :type quats: Nx4-Element Array of floats :param avvs: Angular velocity vectors. :type avvs: Nx3-Element Array of floats :param rates: Number of seconds per tick for each interval. :type rates: Array of floats
def branches(self): """ Returns a data frame of all branches in origin. The DataFrame will have the columns: * repository * local * branch :returns: DataFrame """ df = pd.DataFrame(columns=['repository', 'local', 'branch']) if _has_joblib: ds = Parallel(n_jobs=-1, backend='threading', verbose=0)( delayed(_branches_func) (x) for x in self.repos ) for d in ds: df = df.append(d) else: for repo in self.repos: try: df = df.append(_branches_func(repo)) except GitCommandError: print('Warning! Repo: %s couldn\'t be inspected' % (repo, )) df.reset_index() return df
Returns a data frame of all branches in origin. The DataFrame will have the columns: * repository * local * branch :returns: DataFrame
def flatten_container(self, container): """ Accepts a kubernetes container and pulls out the nested values into the top level """ for names in ARG_MAP.values(): if names[TransformationTypes.KUBERNETES.value]['name'] and \ '.' in names[TransformationTypes.KUBERNETES.value]['name']: kubernetes_dotted_name = names[TransformationTypes.KUBERNETES.value]['name'] parts = kubernetes_dotted_name.split('.') result = lookup_nested_dict(container, *parts) if result: container[kubernetes_dotted_name] = result return container
Accepts a kubernetes container and pulls out the nested values into the top level
def register(view): # Type[BananasAPI] """ Register the API view class in the bananas router. :param BananasAPI view: """ meta = view.get_admin_meta() prefix = meta.basename.replace(".", "/") router.register(prefix, view, meta.basename)
Register the API view class in the bananas router. :param BananasAPI view:
def init_argparser(self, argparser): """ Other runtimes (or users of ArgumentParser) can pass their subparser into here to collect the arguments here for a subcommand. """ super(PackageManagerRuntime, self).init_argparser(argparser) # Ideally, we could use more subparsers for each action (i.e. # init and install). However, this is complicated by the fact # that setuptools has its own calling conventions through the # setup.py file, and to present a consistent cli to end-users # from both calmjs entry point and setuptools using effectively # the same codebase will require a bit of creative handling. # provide this for the setuptools command class. actions = argparser.add_argument_group('action arguments') count = 0 for full, short, desc in self.pkg_manager_options: args = [ dash + key for dash, key in zip(('-', '--'), (short, full)) if key ] # default is singular, but for the argparsed version in our # runtime permits multiple packages. desc = desc.replace('Python package', 'Python package(s)') if not short: f = getattr(self.cli_driver, '%s_%s' % ( self.cli_driver.binary, full), None) if callable(f): count += 1 actions.add_argument( *args, help=desc, action=PackageManagerAction, dest=self.action_key, const=(count, f) ) if self.default_action is None: self.default_action = f continue # pragma: no cover argparser.add_argument(*args, help=desc, action='store_true') argparser.add_argument( 'package_names', metavar=metavar('package'), nargs='+', help="python packages to be used for the generation of '%s'" % ( self.cli_driver.pkgdef_filename, ), )
Other runtimes (or users of ArgumentParser) can pass their subparser into here to collect the arguments here for a subcommand.
def _writen(fd, data): """Write all the data to a descriptor.""" while data: n = os.write(fd, data) data = data[n:]
Write all the data to a descriptor.
def view(allowed_methods, exceptions={}): """ Decorates a Django function based view and wraps it's return in the :py:func:`jason.response` function. The view should return a list or tuple which is unpacked using the ``*``-operator into :py:func:`jason.response`. The view can raise a :py:class:`jason.Bail` Exception. ``allowed_methods`` lists which HTTP methods are allowed, e.g. ['GET', 'POST']. ``exceptions`` is a dictionary where the keys are ``Exception`` classes and values are callables. It defines responses for raised Exceptions other than the :py:class:`jason.Bail` Exception. The callable should return a tuple or list that can unpacked into :py:func:`jason.response`. Example:: import jason @jason.view(allowed_methods=['GET', 'POST'], exceptions={ WebFault: lambda e: ({}, 400, e.message, ) }) def my_view(request): return {'numbers': get_numbers()}, """ def _(f): def __(request, *args, **kwargs): if request.method not in allowed_methods: return response({}, 405, 'Method Not Allowed') try: return response(*f(request, *args, **kwargs)) except Bail as e: return response(e.data, e.status, e.message) except Exception as e: if e.__class__ in exceptions: return response(*exceptions[e.__class__](e)) else: return response({}, 500, 'Internal Server Error') return __ return _
Decorates a Django function based view and wraps it's return in the :py:func:`jason.response` function. The view should return a list or tuple which is unpacked using the ``*``-operator into :py:func:`jason.response`. The view can raise a :py:class:`jason.Bail` Exception. ``allowed_methods`` lists which HTTP methods are allowed, e.g. ['GET', 'POST']. ``exceptions`` is a dictionary where the keys are ``Exception`` classes and values are callables. It defines responses for raised Exceptions other than the :py:class:`jason.Bail` Exception. The callable should return a tuple or list that can unpacked into :py:func:`jason.response`. Example:: import jason @jason.view(allowed_methods=['GET', 'POST'], exceptions={ WebFault: lambda e: ({}, 400, e.message, ) }) def my_view(request): return {'numbers': get_numbers()},
def rmon_event_entry_event_owner(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon") event_entry = ET.SubElement(rmon, "event-entry") event_index_key = ET.SubElement(event_entry, "event-index") event_index_key.text = kwargs.pop('event_index') event_owner = ET.SubElement(event_entry, "event-owner") event_owner.text = kwargs.pop('event_owner') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args
Parses the auth/authconfig line
def sms(self, client_id, phone_number): """Start flow sending a SMS message. """ return self.post( 'https://{}/passwordless/start'.format(self.domain), data={ 'client_id': client_id, 'connection': 'sms', 'phone_number': phone_number, }, headers={'Content-Type': 'application/json'} )
Start flow sending a SMS message.
def setFieldStats(self, fieldName, fieldStats): """ TODO: document """ #If the stats are not fully formed, ignore. if fieldStats[fieldName]['min'] == None or \ fieldStats[fieldName]['max'] == None: return self.minval = fieldStats[fieldName]['min'] self.maxval = fieldStats[fieldName]['max'] if self.minval == self.maxval: self.maxval+=1 self._setEncoderParams()
TODO: document
def spkacs(targ, et, ref, abcorr, obs): """ Return the state (position and velocity) of a target body relative to an observer, optionally corrected for light time and stellar aberration, expressed relative to an inertial reference frame. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkacs_c.html :param targ: Target body. :type targ: int :param et: Observer epoch. :type et: float :param ref: Inertial reference frame of output state. :type ref: str :param abcorr: Aberration correction flag. :type abcorr: str :param obs: Observer. :type obs: int :return: State of target, One way light time between observer and target, Derivative of light time with respect to time. :rtype: tuple """ targ = ctypes.c_int(targ) et = ctypes.c_double(et) ref = stypes.stringToCharP(ref) abcorr = stypes.stringToCharP(abcorr) obs = ctypes.c_int(obs) starg = stypes.emptyDoubleVector(6) lt = ctypes.c_double() dlt = ctypes.c_double() libspice.spkacs_c(targ, et, ref, abcorr, obs, starg, ctypes.byref(lt), ctypes.byref(dlt)) return stypes.cVectorToPython(starg), lt.value, dlt.value
Return the state (position and velocity) of a target body relative to an observer, optionally corrected for light time and stellar aberration, expressed relative to an inertial reference frame. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkacs_c.html :param targ: Target body. :type targ: int :param et: Observer epoch. :type et: float :param ref: Inertial reference frame of output state. :type ref: str :param abcorr: Aberration correction flag. :type abcorr: str :param obs: Observer. :type obs: int :return: State of target, One way light time between observer and target, Derivative of light time with respect to time. :rtype: tuple
def parseTargetNameAndSpec(target_name_and_spec): ''' Parse targetname[@versionspec] and return a tuple (target_name_string, version_spec_string). targetname[,versionspec] is also supported (this is how target names and specifications are stored internally, and was the documented way of setting the spec on the commandline) Also accepts raw github version specs (Owner/reponame#whatever), as the name can be deduced from these. Note that the specification split from the name is not validated. If there is no specification (just a target name) passed in, then '*' will be returned as the specification. ''' import re # fist check if this is a raw github specification that we can get the # target name from: name, spec = _getNonRegistryRef(target_name_and_spec) if name: return name, target_name_and_spec # next split at the first @ or , if any split_at = '@' if target_name_and_spec.find('@') > target_name_and_spec.find(',') and \ ',' in target_name_and_spec: split_at = ',' name = target_name_and_spec.split(split_at)[0] spec = target_name_and_spec[len(name)+1:] name = name.strip() # if there's no specification, return the explicit any-version # specification: if not spec: spec = '*' return name, spec
Parse targetname[@versionspec] and return a tuple (target_name_string, version_spec_string). targetname[,versionspec] is also supported (this is how target names and specifications are stored internally, and was the documented way of setting the spec on the commandline) Also accepts raw github version specs (Owner/reponame#whatever), as the name can be deduced from these. Note that the specification split from the name is not validated. If there is no specification (just a target name) passed in, then '*' will be returned as the specification.
def get_feed_permissions(self, feed_id, include_ids=None, exclude_inherited_permissions=None, identity_descriptor=None): """GetFeedPermissions. [Preview API] Get the permissions for a feed. :param str feed_id: Name or Id of the feed. :param bool include_ids: True to include user Ids in the response. Default is false. :param bool exclude_inherited_permissions: True to only return explicitly set permissions on the feed. Default is false. :param str identity_descriptor: Filter permissions to the provided identity. :rtype: [FeedPermission] """ route_values = {} if feed_id is not None: route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str') query_parameters = {} if include_ids is not None: query_parameters['includeIds'] = self._serialize.query('include_ids', include_ids, 'bool') if exclude_inherited_permissions is not None: query_parameters['excludeInheritedPermissions'] = self._serialize.query('exclude_inherited_permissions', exclude_inherited_permissions, 'bool') if identity_descriptor is not None: query_parameters['identityDescriptor'] = self._serialize.query('identity_descriptor', identity_descriptor, 'str') response = self._send(http_method='GET', location_id='be8c1476-86a7-44ed-b19d-aec0e9275cd8', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[FeedPermission]', self._unwrap_collection(response))
GetFeedPermissions. [Preview API] Get the permissions for a feed. :param str feed_id: Name or Id of the feed. :param bool include_ids: True to include user Ids in the response. Default is false. :param bool exclude_inherited_permissions: True to only return explicitly set permissions on the feed. Default is false. :param str identity_descriptor: Filter permissions to the provided identity. :rtype: [FeedPermission]
def _compile(self, expression): """ Transform a class exp into an actual regex """ x = self.RE_PYTHON_VAR.sub('(?:\\1,)', expression) x = self.RE_SPACES.sub('', x) return re.compile(x)
Transform a class exp into an actual regex
def download_file_insecure_to_io(url, target_file=None, headers={}): """ Use Python to download the file, even though it cannot authenticate the connection. """ src = None try: req = Request( url, data=None, headers=headers ) src = urlopen(req) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() target_file.write(data) finally: if src: src.close()
Use Python to download the file, even though it cannot authenticate the connection.
def add_genelist(self, list_id, gene_ids, case_obj=None): """Create a new gene list and optionally link to cases.""" new_genelist = GeneList(list_id=list_id) new_genelist.gene_ids = gene_ids if case_obj: new_genelist.cases.append(case_obj) self.session.add(new_genelist) self.save() return new_genelist
Create a new gene list and optionally link to cases.
def q(line, cell=None, _ns=None): """Run q code. Options: -l (dir|script) - pre-load database or script -h host:port - execute on the given host -o var - send output to a variable named var. -i var1,..,varN - input variables -1/-2 - redirect stdout/stderr """ if cell is None: return pyq.q(line) if _ns is None: _ns = vars(sys.modules['__main__']) input = output = None preload = [] outs = {} try: h = pyq.q('0i') if line: for opt, value in getopt(line.split(), "h:l:o:i:12")[0]: if opt == '-l': preload.append(value) elif opt == '-h': h = pyq.K(str(':' + value)) elif opt == '-o': output = str(value) # (see #673) elif opt == '-i': input = str(value).split(',') elif opt in ('-1', '-2'): outs[int(opt[1])] = None if outs: if int(h) != 0: raise ValueError("Cannot redirect remote std stream") for fd in outs: tmpfd, tmpfile = mkstemp() try: pyq.q(r'\%d %s' % (fd, tmpfile)) finally: os.unlink(tmpfile) os.close(tmpfd) r = None for script in preload: h(pyq.kp(r"\l " + script)) if input is not None: for chunk in logical_lines(cell): func = "{[%s]%s}" % (';'.join(input), chunk) args = tuple(_ns[i] for i in input) if r != Q_NONE: r.show() r = h((pyq.kp(func),) + args) if outs: _forward_outputs(outs) else: for chunk in logical_lines(cell): if r != Q_NONE: r.show() r = h(pyq.kp(chunk)) if outs: _forward_outputs(outs) except pyq.kerr as e: print("'%s" % e) else: if output is not None: if output.startswith('q.'): pyq.q('@[`.;;:;]', output[2:], r) else: _ns[output] = r else: if r != Q_NONE: return r
Run q code. Options: -l (dir|script) - pre-load database or script -h host:port - execute on the given host -o var - send output to a variable named var. -i var1,..,varN - input variables -1/-2 - redirect stdout/stderr
def sanitize_color_palette(colorpalette): """ Sanitze the given color palette so it can be safely used by Colorful. It will convert colors specified in hex RGB to a RGB channel triplet. """ new_palette = {} def __make_valid_color_name(name): """ Convert the given name into a valid colorname """ if len(name) == 1: name = name[0] return name[:1].lower() + name[1:] return name[0].lower() + ''.join(word.capitalize() for word in name[1:]) for key, value in colorpalette.items(): if isinstance(value, str): # we assume it's a hex RGB value value = utils.hex_to_rgb(value) new_palette[__make_valid_color_name(key.split())] = value return new_palette
Sanitze the given color palette so it can be safely used by Colorful. It will convert colors specified in hex RGB to a RGB channel triplet.
def to_JSON(self): """Dumps object fields into a JSON formatted string :returns: the JSON string """ return json.dumps({'name': self._name, 'coordinates': {'lon': self._lon, 'lat': self._lat }, 'ID': self._ID, 'country': self._country})
Dumps object fields into a JSON formatted string :returns: the JSON string
def get_command(self, ctx, name): """Get command for click.""" env = ctx.ensure_object(environment.Environment) env.load() # Do alias lookup (only available for root commands) if len(self.path) == 0: name = env.resolve_alias(name) new_path = list(self.path) new_path.append(name) module = env.get_command(*new_path) if isinstance(module, types.ModuleType): return CommandLoader(*new_path, help=module.__doc__ or '') else: return module
Get command for click.
def reind_proc(self, inputstring, **kwargs): """Add back indentation.""" out = [] level = 0 for line in inputstring.splitlines(): line, comment = split_comment(line.strip()) indent, line = split_leading_indent(line) level += ind_change(indent) if line: line = " " * self.tabideal * level + line line, indent = split_trailing_indent(line) level += ind_change(indent) line = (line + comment).rstrip() out.append(line) if level != 0: complain(CoconutInternalException("non-zero final indentation level", level)) return "\n".join(out)
Add back indentation.
def new_child(self): """Get a new child :class:`Environment`. The child's scopes will be mine, with an additional empty innermost one. Returns: Environment: The child. """ child = Environment(self.globals, self.max_things) child.scopes = self.scopes.new_child() child.things = WeakSet(self.things) return child
Get a new child :class:`Environment`. The child's scopes will be mine, with an additional empty innermost one. Returns: Environment: The child.
def _wiki_urls_for_shard(shard_id, urls_dir=None): """Urls for chunk: dict<str wiki_url, list<str> ref_urls>.""" urls_dir = urls_dir or WIKI_URLS_DIR urls_filepath = os.path.join(urls_dir, WIKI_URLS_FILE % shard_id) with tf.gfile.GFile(urls_filepath) as f: return json.loads(f.read())
Urls for chunk: dict<str wiki_url, list<str> ref_urls>.
def wrap(self, data, many): """Wrap response in envelope.""" if not many: return data else: data = {'parts': data} multipart = self.context.get('multipart') if multipart: data.update(MultipartObjectSchema(context={ 'bucket': multipart.bucket}).dump(multipart).data) return data
Wrap response in envelope.
def _legendre_dtr(x, y, y_err, legendredeg=10): '''This calculates the residual and chi-sq values for a Legendre function fit. Parameters ---------- x : np.array Array of the independent variable. y : np.array Array of the dependent variable. y_err : np.array Array of errors associated with each `y` value. Used to calculate fit weights. legendredeg : int The degree of the Legendre function to use when fitting. Returns ------- tuple The tuple returned is of the form: (fit_y, fitchisq, fitredchisq) ''' try: p = Legendre.fit(x, y, legendredeg) fit_y = p(x) except Exception as e: fit_y = npzeros_like(y) fitchisq = npsum( ((fit_y - y)*(fit_y - y)) / (y_err*y_err) ) nparams = legendredeg + 1 fitredchisq = fitchisq/(len(y) - nparams - 1) LOGINFO( 'legendre detrend applied. chisq = %.5f, reduced chisq = %.5f' % (fitchisq, fitredchisq) ) return fit_y, fitchisq, fitredchisq
This calculates the residual and chi-sq values for a Legendre function fit. Parameters ---------- x : np.array Array of the independent variable. y : np.array Array of the dependent variable. y_err : np.array Array of errors associated with each `y` value. Used to calculate fit weights. legendredeg : int The degree of the Legendre function to use when fitting. Returns ------- tuple The tuple returned is of the form: (fit_y, fitchisq, fitredchisq)
def get(self, call_sid): """ Constructs a ParticipantContext :param call_sid: The Call SID of the resource to fetch :returns: twilio.rest.api.v2010.account.conference.participant.ParticipantContext :rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext """ return ParticipantContext( self._version, account_sid=self._solution['account_sid'], conference_sid=self._solution['conference_sid'], call_sid=call_sid, )
Constructs a ParticipantContext :param call_sid: The Call SID of the resource to fetch :returns: twilio.rest.api.v2010.account.conference.participant.ParticipantContext :rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext
def send_build_close(params,response_url): '''send build close sends a final response (post) to the server to bring down the instance. The following must be included in params: repo_url, logfile, repo_id, secret, log_file, token ''' # Finally, package everything to send back to shub response = {"log": json.dumps(params['log_file']), "repo_url": params['repo_url'], "logfile": params['logfile'], "repo_id": params['repo_id'], "container_id": params['container_id']} body = '%s|%s|%s|%s|%s' %(params['container_id'], params['commit'], params['branch'], params['token'], params['tag']) signature = generate_header_signature(secret=params['token'], payload=body, request_type="finish") headers = {'Authorization': signature } finish = requests.post(response_url,data=response, headers=headers) bot.debug("FINISH POST TO SINGULARITY HUB ---------------------") bot.debug(finish.status_code) bot.debug(finish.reason) return finish
send build close sends a final response (post) to the server to bring down the instance. The following must be included in params: repo_url, logfile, repo_id, secret, log_file, token
def get_node_meta_type(manager, handle_id): """ Returns the meta type of the supplied node as a string. :param manager: Neo4jDBSessionManager :param handle_id: Unique id :return: string """ node = get_node(manager=manager, handle_id=handle_id, legacy=False) for label in node.labels: if label in META_TYPES: return label raise exceptions.NoMetaLabelFound(handle_id)
Returns the meta type of the supplied node as a string. :param manager: Neo4jDBSessionManager :param handle_id: Unique id :return: string
def get_limit_log(self, stat_name, default_action=False): """Return the log tag for the alert.""" # Get the log tag for stat + header # Exemple: network_wlan0_rx_log try: log_tag = self._limits[stat_name + '_log'] except KeyError: # Try fallback to plugin default log # Exemple: network_log try: log_tag = self._limits[self.plugin_name + '_log'] except KeyError: # By defaukt, log are disabled return default_action # Return the action list return log_tag[0].lower() == 'true'
Return the log tag for the alert.
def to_float(self): """ Converts to 32-bit data. Returns ------- :obj:`DepthImage` depth image with 32 bit float data """ return DepthImage(self.data.astype(np.float32), frame=self.frame)
Converts to 32-bit data. Returns ------- :obj:`DepthImage` depth image with 32 bit float data
def add_new_enriched_bins_matrixes(region_files, dfs, bin_size): """Add enriched bins based on bed files. There is no way to find the correspondence between region file and matrix file, but it does not matter.""" dfs = _remove_epic_enriched(dfs) names = ["Enriched_" + os.path.basename(r) for r in region_files] regions = region_files_to_bins(region_files, names, bin_size) new_dfs = OrderedDict() assert len(regions.columns) == len(dfs) for region, (n, df) in zip(regions, dfs.items()): region_col = regions[region] df = df.join(region_col, how="outer").fillna(0) new_dfs[n] = df return new_dfs
Add enriched bins based on bed files. There is no way to find the correspondence between region file and matrix file, but it does not matter.
def _completed_families(self, reference_name, rightmost_boundary): '''returns one or more families whose end < rightmost boundary''' in_progress = self._right_coords_in_progress[reference_name] while len(in_progress): right_coord = in_progress[0] if right_coord < rightmost_boundary: in_progress.pop(0) left_families = self._coordinate_family.pop((reference_name, right_coord), {}) for family in sorted(left_families.values(), key=lambda x:x[0].left.reference_start): family.sort(key=lambda x: x.query_name) self.pending_pair_count -= len(family) yield family else: break
returns one or more families whose end < rightmost boundary
def equal_to(self, key, value): """ 增加查询条件,查询字段的值必须为指定值。 :param key: 查询条件的字段名 :param value: 查询条件的值 :rtype: Query """ self._where[key] = utils.encode(value) return self
增加查询条件,查询字段的值必须为指定值。 :param key: 查询条件的字段名 :param value: 查询条件的值 :rtype: Query
def eval_hessian(self, *args, **kwargs): """ :return: Hessian evaluated at the specified point. """ # Evaluate the hessian model and use the resulting Ans namedtuple as a # dict. From this, take the relevant components. eval_hess_dict = self.hessian_model(*args, **kwargs)._asdict() hess = [[[np.broadcast_to(eval_hess_dict.get(D(var, p1, p2), 0), eval_hess_dict[var].shape) for p2 in self.params] for p1 in self.params] for var in self ] # Use numpy to broadcast these arrays together and then stack them along # the parameter dimension. We do not include the component direction in # this, because the components can have independent shapes. for idx, comp in enumerate(hess): hess[idx] = np.stack(np.broadcast_arrays(*comp)) Ans = variabletuple('Ans', self.keys()) return Ans(*hess)
:return: Hessian evaluated at the specified point.
def device_text_string_request(self): """Get FX Username. Only required for devices that support FX Commands. FX Addressee responds with an ED 0x0301 FX Username Response message. """ msg = StandardSend(self._address, COMMAND_FX_USERNAME_0X03_0X01) self._send_msg(msg)
Get FX Username. Only required for devices that support FX Commands. FX Addressee responds with an ED 0x0301 FX Username Response message.
def connect(self, ssl=None, timeout=None): """ Returns a new :class:`~server.PlexServer` or :class:`~client.PlexClient` object. Often times there is more than one address specified for a server or client. This function will prioritize local connections before remote and HTTPS before HTTP. After trying to connect to all available addresses for this resource and assuming at least one connection was successful, the PlexServer object is built and returned. Parameters: ssl (optional): Set True to only connect to HTTPS connections. Set False to only connect to HTTP connections. Set None (default) to connect to any HTTP or HTTPS connection. Raises: :class:`plexapi.exceptions.NotFound`: When unable to connect to any addresses for this resource. """ # Sort connections from (https, local) to (http, remote) # Only check non-local connections unless we own the resource connections = sorted(self.connections, key=lambda c: c.local, reverse=True) owned_or_unowned_non_local = lambda x: self.owned or (not self.owned and not x.local) https = [c.uri for c in connections if owned_or_unowned_non_local(c)] http = [c.httpuri for c in connections if owned_or_unowned_non_local(c)] cls = PlexServer if 'server' in self.provides else PlexClient # Force ssl, no ssl, or any (default) if ssl is True: connections = https elif ssl is False: connections = http else: connections = https + http # Try connecting to all known resource connections in parellel, but # only return the first server (in order) that provides a response. listargs = [[cls, url, self.accessToken, timeout] for url in connections] log.info('Testing %s resource connections..', len(listargs)) results = utils.threaded(_connect, listargs) return _chooseConnection('Resource', self.name, results)
Returns a new :class:`~server.PlexServer` or :class:`~client.PlexClient` object. Often times there is more than one address specified for a server or client. This function will prioritize local connections before remote and HTTPS before HTTP. After trying to connect to all available addresses for this resource and assuming at least one connection was successful, the PlexServer object is built and returned. Parameters: ssl (optional): Set True to only connect to HTTPS connections. Set False to only connect to HTTP connections. Set None (default) to connect to any HTTP or HTTPS connection. Raises: :class:`plexapi.exceptions.NotFound`: When unable to connect to any addresses for this resource.
def get_enough_colours(num_unique_values): """ Generates and returns an array of `num_unique_values` HEX colours. :param num_unique_values: int, number of colours to be generated. :return: array of str, containing colours in HEX format. """ if num_unique_values in NUM2COLOURS: return NUM2COLOURS[num_unique_values] vs = ['#%02x%02x%02x' % tuple(rgb) for rgb in (map(lambda x: int(x * 255), colorsys.hsv_to_rgb(*hsv)) for hsv in ((_ / num_unique_values, 0.25 * (1 + (_ % 3)), .8) for _ in range(1, num_unique_values + 1)))] if num_unique_values < 20: return vs[::5] + vs[1::5] + vs[2::5] + vs[3::5] + vs[4::5] return vs[::10] + vs[1::10] + vs[2::10] + vs[3::10] + vs[4::10] \ + vs[5::10] + vs[6::10] + vs[7::10] + vs[8::10] + vs[9::10]
Generates and returns an array of `num_unique_values` HEX colours. :param num_unique_values: int, number of colours to be generated. :return: array of str, containing colours in HEX format.