code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
try: return error_classes[status] except KeyError: def new_status_error(xml_response): if (status > 400 and status < 500): return UnexpectedClientError(status, xml_response) if (status > 500 and status < 600): return UnexpectedServerError(status, xml_response) return UnexpectedStatusError(status, xml_response) return new_status_error
def error_class_for_http_status(status)
Return the appropriate `ResponseError` subclass for the given HTTP status code.
2.646111
2.82935
0.935236
try: return self.__dict__['response_doc'] except KeyError: self.__dict__['response_doc'] = ElementTree.fromstring( self.response_xml ) return self.__dict__['response_doc']
def response_doc(self)
The XML document received from the service.
2.974475
2.719961
1.093573
error = self.response_doc.find('transaction_error') if error is not None: code = error.find('error_code') if code is not None: return code.text
def transaction_error_code(self)
The machine-readable error code for a transaction error.
3.793884
3.680926
1.030688
try: return self.__dict__['errors'] except KeyError: pass suberrors = dict() for err in self.response_doc.findall('error'): field = err.attrib['field'] symbol = err.attrib['symbol'] message = err.text sub_err = self.Suberror(field, symbol, message) # If the field exists, we need to turn the suberror # into a list of suberrors if field in suberrors: if type(suberrors[field]) != list: suberrors[field] = [suberrors[field]] suberrors[field].append(sub_err) else: suberrors[field] = sub_err self.__dict__['errors'] = suberrors return suberrors
def errors(self)
A dictionary of error objects, keyed on the name of the request field that was invalid. Each error value has `field`, `symbol`, and `message` attributes describing the particular invalidity of that field.
2.848018
2.612418
1.090185
notification_el = ElementTree.fromstring(notification) objects = {'type': notification_el.tag} for child_el in notification_el: tag = child_el.tag res = Resource.value_for_element(child_el) objects[tag] = res return objects
def objects_for_push_notification(notification)
Decode a push notification with the given body XML. Returns a dictionary containing the constituent objects of the push notification. The kind of push notification is given in the ``"type"`` member of the returned dictionary.
4.064402
4.107274
0.989562
url = urljoin(self._url, '/adjustments') return charge.post(url)
def charge(self, charge)
Charge (or credit) this account with the given `Adjustment`.
18.608103
11.453875
1.624612
url = urljoin(self._url, '/invoices') if kwargs: response = self.http_request(url, 'POST', Invoice(**kwargs), {'Content-Type': 'application/xml; charset=utf-8'}) else: response = self.http_request(url, 'POST') if response.status != 201: self.raise_http_error(response) response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) elem = ElementTree.fromstring(response_xml) invoice_collection = InvoiceCollection.from_element(elem) return invoice_collection
def invoice(self, **kwargs)
Create an invoice for any outstanding adjustments this account has.
3.561845
3.611022
0.986382
url = urljoin(self._url, '/subscriptions') return subscription.post(url)
def subscribe(self, subscription)
Create the given `Subscription` for this existing account.
8.937773
7.880123
1.134217
url = urljoin(self._url, '/billing_info') response = billing_info.http_request(url, 'PUT', billing_info, {'Content-Type': 'application/xml; charset=utf-8'}) if response.status == 200: pass elif response.status == 201: billing_info._url = response.getheader('Location') else: billing_info.raise_http_error(response) response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) billing_info.update_from_element(ElementTree.fromstring(response_xml))
def update_billing_info(self, billing_info)
Change this account's billing information to the given `BillingInfo`.
3.616283
3.607672
1.002387
url = urljoin(self._url, '/shipping_addresses') return shipping_address.post(url)
def create_shipping_address(self, shipping_address)
Creates a shipping address on an existing account. If you are creating an account, you can embed the shipping addresses with the request
8.608644
14.407549
0.597509
if hasattr(self, '_url'): url = self._url + '/preview' return self.post(url) else: url = urljoin(recurly.base_uri(), self.collection_path + '/preview') return self.post(url)
def preview(self)
Preview the purchase of this gift card
5.676132
6.101329
0.930311
redemption_path = '%s/redeem' % (self.redemption_code) if hasattr(self, '_url'): url = urljoin(self._url, '/redeem') else: url = urljoin(recurly.base_uri(), self.collection_path + '/' + redemption_path) recipient_account = _RecipientAccount(account_code=account_code) return self.post(url, recipient_account)
def redeem(self, account_code)
Redeem this gift card on the specified account code
4.937436
5.206844
0.948259
url = urljoin(base_uri(), cls.member_path % (uuid,)) pdf_response = cls.http_request(url, headers={'Accept': 'application/pdf'}) return pdf_response.read()
def pdf(cls, uuid)
Return a PDF of the invoice identified by the UUID This is a raw string, which can be written to a file with: ` with open('invoice.pdf', 'w') as invoice_file: invoice_file.write(recurly.Invoice.pdf(uuid)) `
5.372802
5.850507
0.918348
url = urljoin(self._url, '/transactions') transaction.post(url) return transaction
def enter_offline_payment(self, transaction)
Records an offline (external) payment on the invoice. Pass in a Transaction object to set the details of the created transaction. The attributes available to set are (payment_method, collected_at, amount_in_cents, description) Returns: Transaction: The created transaction
10.157673
16.562605
0.61329
for key, val in iteritems(kwargs): setattr(self, key, val) url = urljoin(self._url, '/notes') self.put(url)
def update_notes(self, **kwargs)
Updates the notes on the subscription without generating a change This endpoint also allows you to update custom fields: ` sub.custom_fields[0].value = 'A new value' sub.update_notes() `
4.733962
5.274267
0.897558
url = urljoin(self._url, '/pause') elem = ElementTreeBuilder.Element(self.nodename) elem.append(Resource.element_for_value('remaining_pause_cycles', remaining_pause_cycles)) body = ElementTree.tostring(elem, encoding='UTF-8') response = self.http_request(url, 'PUT', body, { 'Content-Type': 'application/xml; charset=utf-8' }) if response.status not in (200, 201, 204): self.raise_http_error(response) self.update_from_element(ElementTree.fromstring(response.read()))
def pause(self, remaining_pause_cycles)
Pause a subscription
3.69709
3.74441
0.987363
url = urljoin(self._url, '/add_ons/%s/usage' % (sub_add_on.add_on_code,)) return usage.post(url)
def create_usage(self, sub_add_on, usage)
Record the usage on the given subscription add on and update the usage object with returned xml
5.64266
5.262106
1.07232
try: url = self._refund_transaction_url except AttributeError: raise ValueError("No refund transaction is available for this transaction") resp, elem = self.element_for_url(url) value = self.value_for_element(elem) return value
def get_refund_transaction(self)
Retrieve the refund transaction for this transaction, immediately after refunding. After calling `refund()` to refund a transaction, call this method to retrieve the new transaction representing the refund.
5.877466
5.680739
1.034631
# Find the URL and method to refund the transaction. try: selfnode = self._elem except AttributeError: raise AttributeError('refund') url, method = None, None for anchor_elem in selfnode.findall('a'): if anchor_elem.attrib.get('name') == 'refund': url = anchor_elem.attrib['href'] method = anchor_elem.attrib['method'].upper() if url is None or method is None: raise AttributeError("refund") # should do something more specific probably actionator = self._make_actionator(url, method, extra_handler=self._handle_refund_accepted) return actionator(**kwargs)
def refund(self, **kwargs)
Refund this transaction. Calling this method returns the refunded transaction (that is, ``self``) if the refund was successful, or raises a `ResponseError` if an error occurred requesting the refund. After a successful call to `refund()`, to retrieve the new transaction representing the refund, use the `get_refund_transaction()` method.
6.089146
5.865054
1.038208
url = urljoin(self._url, '/add_ons/%s' % (add_on_code,)) resp, elem = AddOn.element_for_url(url) return AddOn.from_element(elem)
def get_add_on(self, add_on_code)
Return the `AddOn` for this plan with the given add-on code.
5.146542
5.167409
0.995962
url = urljoin(self._url, '/add_ons') return add_on.post(url)
def create_add_on(self, add_on)
Make the given `AddOn` available to subscribers on this plan.
6.4156
7.109345
0.902418
user_results = {} track_estimate_dir = os.path.join( estimates_dir, track.subset, track.name ) for target in glob.glob( track_estimate_dir + '/*.wav' ): target_name = op.splitext( os.path.basename(target) )[0] try: target_audio, _ = sf.read( target, always_2d=True ) user_results[target_name] = target_audio except RuntimeError: pass if user_results: eval_mus_track( track, user_results, output_dir=output_dir ) return None
def _load_track_estimates(track, estimates_dir, output_dir)
load estimates from disk instead of processing
3.41861
3.408058
1.003096
reference = [] estimates = [] data = EvalStore(win=win, hop=hop) global_rate = None reference_glob = os.path.join(reference_dir, '*.wav') # Load in each reference file in the supplied dir for reference_file in glob.glob(reference_glob): ref_audio, rate = sf.read( reference_file, always_2d=True ) # Make sure fs is the same for all files assert (global_rate is None or rate == global_rate) global_rate = rate reference.append(ref_audio) if not reference: raise ValueError('`reference_dir` contains no wav files') estimated_glob = os.path.join(estimates_dir, '*.wav') targets = [] for estimated_file in glob.glob(estimated_glob): targets.append(os.path.basename(estimated_file)) ref_audio, rate = sf.read( estimated_file, always_2d=True ) assert (global_rate is None or rate == global_rate) global_rate = rate estimates.append(ref_audio) SDR, ISR, SIR, SAR = evaluate( reference, estimates, win=int(win*global_rate), hop=int(hop*global_rate), mode=mode ) for i, target in enumerate(targets): values = { "SDR": SDR[i].tolist(), "SIR": SIR[i].tolist(), "ISR": ISR[i].tolist(), "SAR": SAR[i].tolist() } data.add_target( target_name=target, values=values ) return data
def eval_dir( reference_dir, estimates_dir, output_dir=None, mode='v4', win=1.0, hop=1.0, )
Compute bss_eval metrics for two given directories assuming file names are identical for both, reference source and estimates. Parameters ---------- reference_dir : str path to reference sources directory. estimates_dir : str path to estimates directory. output_dir : str path to output directory used to save evaluation results. Defaults to `None`, meaning no evaluation files will be saved. mode : str bsseval version number. Defaults to 'v4'. win : int window size in Returns ------- scores : EvalStore scores object that holds the framewise and global evaluation scores.
2.365653
2.451493
0.964985
# create a new musdb instance for estimates with the same file structure est = musdb.DB(root_dir=estimates_dir, is_wav=True) # load all estimates track_names est_tracks = est.load_mus_tracks() # get a list of track names tracknames = [t.name for t in est_tracks] # load only musdb tracks where we have estimate tracks tracks = dataset.load_mus_tracks(tracknames=tracknames) # wrap the estimate loader run_fun = functools.partial( _load_track_estimates, estimates_dir=estimates_dir, output_dir=output_dir ) # evaluate tracks dataset.run(run_fun, estimates_dir=None, tracks=tracks, *args, **kwargs)
def eval_mus_dir( dataset, estimates_dir, output_dir=None, *args, **kwargs )
Run musdb.run for the purpose of evaluation of musdb estimate dir Parameters ---------- dataset : DB(object) Musdb Database object. estimates_dir : str Path to estimates folder. output_dir : str Output folder where evaluation json files are stored. *args Variable length argument list for `musdb.run()`. **kwargs Arbitrary keyword arguments for `musdb.run()`.
4.433828
4.352309
1.01873
audio_estimates = [] audio_reference = [] # make sure to always build the list in the same order # therefore track.targets is an OrderedDict eval_targets = [] # save the list of target names to be evaluated for key, target in list(track.targets.items()): try: # try to fetch the audio from the user_results of a given key user_estimates[key] except KeyError: # ignore wrong key and continue continue # append this target name to the list of target to evaluate eval_targets.append(key) data = EvalStore(win=win, hop=hop) # check if vocals and accompaniment is among the targets has_acc = all(x in eval_targets for x in ['vocals', 'accompaniment']) if has_acc: # remove accompaniment from list of targets, because # the voc/acc scenario will be evaluated separately eval_targets.remove('accompaniment') if len(eval_targets) >= 2: # compute evaluation of remaining targets for target in eval_targets: audio_estimates.append(user_estimates[target]) audio_reference.append(track.targets[target].audio) SDR, ISR, SIR, SAR = evaluate( audio_reference, audio_estimates, win=int(win*track.rate), hop=int(hop*track.rate), mode=mode ) # iterate over all evaluation results except for vocals for i, target in enumerate(eval_targets): if target == 'vocals' and has_acc: continue values = { "SDR": SDR[i].tolist(), "SIR": SIR[i].tolist(), "ISR": ISR[i].tolist(), "SAR": SAR[i].tolist() } data.add_target( target_name=target, values=values ) # add vocal accompaniment targets later if has_acc: # add vocals and accompaniments as a separate scenario eval_targets = ['vocals', 'accompaniment'] audio_estimates = [] audio_reference = [] for target in eval_targets: audio_estimates.append(user_estimates[target]) audio_reference.append(track.targets[target].audio) SDR, ISR, SIR, SAR = evaluate( audio_reference, audio_estimates, win=int(win*track.rate), hop=int(hop*track.rate), mode=mode ) # iterate over all targets for i, target in enumerate(eval_targets): values = { "SDR": SDR[i].tolist(), "SIR": SIR[i].tolist(), "ISR": ISR[i].tolist(), "SAR": SAR[i].tolist() } data.add_target( target_name=target, values=values ) if output_dir: # validate against the schema data.validate() try: subset_path = op.join( output_dir, track.subset ) if not op.exists(subset_path): os.makedirs(subset_path) with open( op.join(subset_path, track.name) + '.json', 'w+' ) as f: f.write(data.json) except (IOError): pass return data
def eval_mus_track( track, user_estimates, output_dir=None, mode='v4', win=1.0, hop=1.0 )
Compute all bss_eval metrics for the musdb track and estimated signals, given by a `user_estimates` dict. Parameters ---------- track : Track musdb track object loaded using musdb estimated_sources : Dict dictionary, containing the user estimates as np.arrays. output_dir : str path to output directory used to save evaluation results. Defaults to `None`, meaning no evaluation files will be saved. mode : str bsseval version number. Defaults to 'v4'. win : int window size in Returns ------- scores : EvalStore scores object that holds the framewise and global evaluation scores.
2.598267
2.599672
0.99946
est_shape = audio_estimates.shape ref_shape = audio_reference.shape if est_shape[1] != ref_shape[1]: if est_shape[1] >= ref_shape[1]: audio_estimates = audio_estimates[:, :ref_shape[1], :] else: # pad end with zeros audio_estimates = np.pad( audio_estimates, [ (0, 0), (0, ref_shape[1] - est_shape[1]), (0, 0) ], mode='constant' ) return audio_reference, audio_estimates
def pad_or_truncate( audio_reference, audio_estimates )
Pad or truncate estimates by duration of references: - If reference > estimates: add zeros at the and of the estimated signal - If estimates > references: truncate estimates to duration of references Parameters ---------- references : np.ndarray, shape=(nsrc, nsampl, nchan) array containing true reference sources estimates : np.ndarray, shape=(nsrc, nsampl, nchan) array containing estimated sources Returns ------- references : np.ndarray, shape=(nsrc, nsampl, nchan) array containing true reference sources estimates : np.ndarray, shape=(nsrc, nsampl, nchan) array containing estimated sources
1.720576
1.792406
0.959925
estimates = np.array(estimates) references = np.array(references) if padding: references, estimates = pad_or_truncate(references, estimates) SDR, ISR, SIR, SAR, _ = metrics.bss_eval( references, estimates, compute_permutation=False, window=win, hop=hop, framewise_filters=(mode == "v3"), bsseval_sources_version=False ) return SDR, ISR, SIR, SAR
def evaluate( references, estimates, win=1*44100, hop=1*44100, mode='v4', padding=True )
BSS_EVAL images evaluation using metrics module Parameters ---------- references : np.ndarray, shape=(nsrc, nsampl, nchan) array containing true reference sources estimates : np.ndarray, shape=(nsrc, nsampl, nchan) array containing estimated sources window : int, defaults to 44100 window size in samples hop : int hop size in samples, defaults to 44100 (no overlap) mode : str BSSEval version, default to `v4` Returns ------- SDR : np.ndarray, shape=(nsrc,) vector of Signal to Distortion Ratios (SDR) ISR : np.ndarray, shape=(nsrc,) vector of Source to Spatial Distortion Image (ISR) SIR : np.ndarray, shape=(nsrc,) vector of Source to Interference Ratios (SIR) SAR : np.ndarray, shape=(nsrc,) vector of Sources to Artifacts Ratios (SAR)
3.907333
3.805153
1.026853
target_data = { 'name': target_name, 'frames': [] } for i, v in enumerate(values['SDR']): frame_data = { 'time': i * self.hop, 'duration': self.win, 'metrics': { "SDR": self._q(values['SDR'][i]), "SIR": self._q(values['SIR'][i]), "SAR": self._q(values['SAR'][i]), "ISR": self._q(values['ISR'][i]) } } target_data['frames'].append(frame_data) self.scores['targets'].append(target_data)
def add_target(self, target_name, values)
add target to scores Dictionary Parameters ---------- target_name : str name of target to be added to list of targets values : List(Dict) List of framewise data entries, see `musdb.schema.json`
2.925739
2.732082
1.070883
json_string = json.dumps( self.scores, indent=2, allow_nan=True ) return json_string
def json(self)
add target to scores Dictionary Returns ---------- json_string : str json dump of the scores dictionary
5.575728
4.436519
1.25678
if np.isinf(number): return np.nan else: return D(D(number).quantize(D(precision)))
def _q(self, number, precision='.00001')
quantiztion of BSSEval values
5.401036
5.370348
1.005714
parser = argparse.ArgumentParser() parser.add_argument( 'reference_dir', type=str ) parser.add_argument( 'estimates_dir', type=str ) parser.add_argument('-o', help='output_dir') parser.add_argument( '--win', type=float, help='Window size in seconds', default=1.0 ) parser.add_argument( '--hop', type=float, help='Hop size in seconds', default=1.0 ) parser.add_argument( '-m', type=str, help='bss_eval version [`v3`, `v4`]', default='v4' ) parser.add_argument( '--version', '-v', action='version', version='%%(prog)s %s' % util.__version__ ) args = parser.parse_args(inargs) if not args.o: output_dir = args.estimates_dir else: output_dir = args.o # evaluate an existing estimate folder with wav files data = eval_dir( args.reference_dir, args.estimates_dir, output_dir=output_dir, mode=args.m, win=args.win, hop=args.hop ) print(data)
def bsseval(inargs=None)
Generic cli app for bsseval results. Expects two folder with
2.673149
2.783127
0.960484
parser = argparse.ArgumentParser() parser.add_argument( 'estimates_dir', type=str ) parser.add_argument('-o', help='output_dir') parser.add_argument('--cpu', type=int, help='number of cpus', default=4) parser.add_argument( '-p', help='enable multiprocessing', action='store_true', ) parser.add_argument( '--musdb', help='path to musdb', type=str ) parser.add_argument( '--iswav', help='Read musdb wav instead of stems', action='store_true', ) parser.add_argument( '--version', '-v', action='version', version='%%(prog)s %s' % util.__version__ ) args = parser.parse_args(inargs) mus = musdb.DB(root_dir=args.musdb, is_wav=args.iswav) if not args.o: output_dir = args.estimates_dir else: output_dir = args.o # evaluate an existing estimate folder with wav files eval_mus_dir( dataset=mus, # instance of musdb estimates_dir=args.estimates_dir, # path to estiamte folder output_dir=output_dir, # set a folder to write eval json files parallel=args.p, cpus=args.cpu )
def museval(inargs=None)
Commandline interface for museval evaluation tools
3.23245
3.229201
1.001006
try: next_url = self.next_url except AttributeError: raise PageError("Page %r has no next page" % self) return self.page_for_url(next_url)
def next_page(self)
Return the next `Page` after this one in the result sequence it's from. If the current page is the last page in the sequence, calling this method raises a `ValueError`.
4.297385
4.195575
1.024266
try: start_url = self.start_url except AttributeError: raise PageError("Page %r is already the first page" % self) return self.page_for_url(start_url)
def first_page(self)
Return the first `Page` in the result sequence this `Page` instance is from. If the current page is already the first page in the sequence, calling this method raises a `ValueError`.
5.733559
4.648654
1.23338
resp, elem = Resource.element_for_url(url) value = Resource.value_for_element(elem) return cls.page_for_value(resp, value)
def page_for_url(cls, url)
Return a new `Page` containing the items at the given endpoint URL.
8.281392
9.051197
0.91495
page = cls(value) links = parse_link_value(resp.getheader('Link')) for url, data in six.iteritems(links): if data.get('rel') == 'start': page.start_url = url if data.get('rel') == 'next': page.next_url = url return page
def page_for_value(cls, resp, value)
Return a new `Page` representing the given resource `value` retrieved using the HTTP response `resp`. This method records pagination ``Link`` headers present in `resp`, so that the returned `Page` can return their resources from its `next_page()` and `first_page()` methods.
3.107795
3.270193
0.95034
if hasattr(self, 'blacklist_attributes'): return [attr for attr in self.attributes if attr not in self.blacklist_attributes] else: return self.attributes
def serializable_attributes(self)
Attributes to be serialized in a ``POST`` or ``PUT`` request. Returns all attributes unless a blacklist is specified
4.130535
3.270114
1.263117
if recurly.API_KEY is None: raise recurly.UnauthorizedError('recurly.API_KEY not set') url_parts = urlparse(url) if not any(url_parts.netloc.endswith(d) for d in recurly.VALID_DOMAINS): # TODO Exception class used for clean backport, change to # ConfigurationError raise Exception('Only a recurly domain may be called') is_non_ascii = lambda s: any(ord(c) >= 128 for c in s) if is_non_ascii(recurly.API_KEY) or is_non_ascii(recurly.SUBDOMAIN): raise recurly.ConfigurationError() urlparts = urlsplit(url) connection_options = {} if recurly.SOCKET_TIMEOUT_SECONDS: connection_options['timeout'] = recurly.SOCKET_TIMEOUT_SECONDS if urlparts.scheme != 'https': connection = http_client.HTTPConnection(urlparts.netloc, **connection_options) elif recurly.CA_CERTS_FILE is None: connection = http_client.HTTPSConnection(urlparts.netloc, **connection_options) else: connection_options['context'] = ssl.create_default_context(cafile=recurly.CA_CERTS_FILE) connection = http_client.HTTPSConnection(urlparts.netloc, **connection_options) headers = {} if headers is None else dict(headers) headers.setdefault('Accept', 'application/xml') headers.update({ 'User-Agent': recurly.USER_AGENT }) headers['X-Api-Version'] = recurly.api_version() headers['Authorization'] = 'Basic %s' % base64.b64encode(six.b('%s:' % recurly.API_KEY)).decode() log = logging.getLogger('recurly.http.request') if log.isEnabledFor(logging.DEBUG): log.debug("%s %s HTTP/1.1", method, url) for header, value in six.iteritems(headers): if header == 'Authorization': value = '<redacted>' log.debug("%s: %s", header, value) log.debug('') if method in ('POST', 'PUT') and body is not None: if isinstance(body, Resource): log.debug(body.as_log_output()) else: log.debug(body) if isinstance(body, Resource): body = ElementTreeBuilder.tostring(body.to_element(), encoding='UTF-8') headers['Content-Type'] = 'application/xml; charset=utf-8' if method in ('POST', 'PUT') and body is None: headers['Content-Length'] = '0' connection.request(method, url, body, headers) resp = connection.getresponse() resp_headers = cls.headers_as_dict(resp) log = logging.getLogger('recurly.http.response') if log.isEnabledFor(logging.DEBUG): log.debug("HTTP/1.1 %d %s", resp.status, resp.reason) log.debug(resp_headers) log.debug('') recurly.cache_rate_limit_headers(resp_headers) return resp
def http_request(cls, url, method='GET', body=None, headers=None)
Make an HTTP request with the given method to the given URL, returning the resulting `http_client.HTTPResponse` instance. If the `body` argument is a `Resource` instance, it is serialized to XML by calling its `to_element()` method before submitting it. Requests are authenticated per the Recurly API specification using the ``recurly.API_KEY`` value for the API key. Requests and responses are logged at the ``DEBUG`` level to the ``recurly.http.request`` and ``recurly.http.response`` loggers respectively.
2.558345
2.481093
1.031136
if six.PY2: pairs = [header.split(':', 1) for header in resp.msg.headers] return dict([(k, v.strip()) for k, v in pairs]) else: return dict([(k, v.strip()) for k, v in resp.msg._headers])
def headers_as_dict(cls, resp)
Turns an array of response headers into a dictionary
2.958649
2.682137
1.103094
elem = self.to_element() for attrname in self.sensitive_attributes: for sensitive_el in elem.iter(attrname): sensitive_el.text = 'XXXXXXXXXXXXXXXX' return ElementTreeBuilder.tostring(elem, encoding='UTF-8')
def as_log_output(self)
Returns an XML string containing a serialization of this instance suitable for logging. Attributes named in the instance's `sensitive_attributes` are redacted.
7.534647
5.202519
1.448269
if not uuid: raise ValueError("get must have a value passed as an argument") uuid = quote(str(uuid)) url = recurly.base_uri() + (cls.member_path % (uuid,)) _resp, elem = cls.element_for_url(url) return cls.from_element(elem)
def get(cls, uuid)
Return a `Resource` instance of this class identified by the given code or UUID. Only `Resource` classes with specified `member_path` attributes can be directly requested with this method.
7.793166
7.457844
1.044962
response = cls.http_request(url, method='HEAD') if response.status != 200: cls.raise_http_error(response) return Resource.headers_as_dict(response)
def headers_for_url(cls, url)
Return the headers only for the given URL as a dict
4.236913
4.22195
1.003544
response = cls.http_request(url) if response.status != 200: cls.raise_http_error(response) assert response.getheader('Content-Type').startswith('application/xml') response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) response_doc = ElementTree.fromstring(response_xml) return response, response_doc
def element_for_url(cls, url)
Return the resource at the given URL, as a (`http_client.HTTPResponse`, `xml.etree.ElementTree.Element`) tuple resulting from a ``GET`` request to that URL.
3.5272
3.499192
1.008004
log = logging.getLogger('recurly.resource') if elem is None: log.debug("Converting %r element into None value", elem) return if elem.attrib.get('nil') is not None: log.debug("Converting %r element with nil attribute into None value", elem.tag) return if elem.tag.endswith('_in_cents') and 'currency' not in cls.attributes and not cls.inherits_currency: log.debug("Converting %r element in class with no matching 'currency' into a Money value", elem.tag) return Money.from_element(elem) attr_type = elem.attrib.get('type') log.debug("Converting %r element with type %r", elem.tag, attr_type) if attr_type == 'integer': return int(elem.text.strip()) if attr_type == 'float': return float(elem.text.strip()) if attr_type == 'boolean': return elem.text.strip() == 'true' if attr_type == 'datetime': return iso8601.parse_date(elem.text.strip()) if attr_type == 'array': return [cls._subclass_for_nodename(sub_elem.tag).from_element(sub_elem) for sub_elem in elem] # Unknown types may be the names of resource classes. if attr_type is not None: try: value_class = cls._subclass_for_nodename(attr_type) except ValueError: log.debug("Not converting %r element with type %r to a resource as that matches no known nodename", elem.tag, attr_type) else: return value_class.from_element(elem) # Untyped complex elements should still be resource instances. Guess from the nodename. if len(elem): # has children value_class = cls._subclass_for_nodename(elem.tag) log.debug("Converting %r tag into a %s", elem.tag, value_class.__name__) return value_class.from_element(elem) value = elem.text or '' return value.strip()
def value_for_element(cls, elem)
Deserialize the given XML `Element` into its representative value. Depending on the content of the element, the returned value may be: * a string, integer, or boolean value * a `datetime.datetime` instance * a list of `Resource` instances * a single `Resource` instance * a `Money` instance * ``None``
3.118605
3.017358
1.033555
if isinstance(value, Resource): if attrname in cls._classes_for_nodename: # override the child's node name with this attribute name return value.to_element(attrname) return value.to_element() el = ElementTreeBuilder.Element(attrname) if value is None: el.attrib['nil'] = 'nil' elif isinstance(value, bool): el.attrib['type'] = 'boolean' el.text = 'true' if value else 'false' elif isinstance(value, int): el.attrib['type'] = 'integer' el.text = str(value) elif isinstance(value, datetime): el.attrib['type'] = 'datetime' el.text = value.strftime('%Y-%m-%dT%H:%M:%SZ') elif isinstance(value, list) or isinstance(value, tuple): for sub_resource in value: if hasattr(sub_resource, 'to_element'): el.append(sub_resource.to_element()) else: el.append(cls.element_for_value(re.sub(r"s$", "", attrname), sub_resource)) elif isinstance(value, Money): value.add_to_element(el) else: el.text = six.text_type(value) return el
def element_for_value(cls, attrname, value)
Serialize the given value into an XML `Element` with the given tag name, returning it. The value argument may be: * a `Resource` instance * a `Money` instance * a `datetime.datetime` instance * a string, integer, or boolean value * ``None`` * a list or tuple of these values
2.655339
2.565441
1.035042
self._elem = elem for attrname in self.attributes: try: delattr(self, attrname) except AttributeError: pass document_url = elem.attrib.get('href') if document_url is not None: self._url = document_url return self
def update_from_element(self, elem)
Reset this `Resource` instance to represent the values in the given XML element.
4.008337
3.914662
1.023929
url = recurly.base_uri() + cls.collection_path if kwargs: url = '%s?%s' % (url, urlencode_params(kwargs)) return Page.page_for_url(url)
def all(cls, **kwargs)
Return a `Page` of instances of this `Resource` class from its general collection endpoint. Only `Resource` classes with specified `collection_path` endpoints can be requested with this method. Any provided keyword arguments are passed to the API endpoint as query parameters.
6.253184
4.989096
1.25337
url = recurly.base_uri() + cls.collection_path if kwargs: url = '%s?%s' % (url, urlencode_params(kwargs)) return Page.count_for_url(url)
def count(cls, **kwargs)
Return a count of server side resources given filtering arguments in kwargs.
6.842403
6.318374
1.082937
response = self.http_request(url, 'PUT', self, {'Content-Type': 'application/xml; charset=utf-8'}) if response.status != 200: self.raise_http_error(response) response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) self.update_from_element(ElementTree.fromstring(response_xml))
def put(self, url)
Sends this `Resource` instance to the service with a ``PUT`` request to the given URL.
3.473496
3.540654
0.981032
response = self.http_request(url, 'POST', body or self, {'Content-Type': 'application/xml; charset=utf-8'}) if response.status not in (200, 201, 204): self.raise_http_error(response) self._url = response.getheader('Location') if response.status in (200, 201): response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) self.update_from_element(ElementTree.fromstring(response_xml))
def post(self, url, body=None)
Sends this `Resource` instance to the service with a ``POST`` request to the given URL. Takes an optional body
3.117959
3.361292
0.927607
response = self.http_request(self._url, 'DELETE') if response.status != 204: self.raise_http_error(response)
def delete(self)
Submits a deletion request for this `Resource` instance as a ``DELETE`` request to its URL.
4.082256
3.846895
1.061182
response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) exc_class = recurly.errors.error_class_for_http_status(response.status) raise exc_class(response_xml)
def raise_http_error(cls, response)
Raise a `ResponseError` of the appropriate subclass in reaction to the given `http_client.HTTPResponse`.
4.599169
4.818821
0.954418
if not root_name: root_name = self.nodename elem = ElementTreeBuilder.Element(root_name) for attrname in self.serializable_attributes(): # Only use values that have been loaded into the internal # __dict__. For retrieved objects we look into the XML response at # access time, so the internal __dict__ contains only the elements # that have been set on the client side. try: value = self.__dict__[attrname] except KeyError: continue if attrname in self.xml_attribute_attributes: elem.attrib[attrname] = six.text_type(value) else: sub_elem = self.element_for_value(attrname, value) elem.append(sub_elem) return elem
def to_element(self, root_name=None)
Serialize this `Resource` instance to an XML element.
4.832385
4.620259
1.045912
(sdr, isr, sir, sar, perm) = \ bss_eval( reference_sources, estimated_sources, window=np.inf, hop=np.inf, compute_permutation=compute_permutation, filters_len=512, framewise_filters=True, bsseval_sources_version=True ) return (sdr, sir, sar, perm)
def bss_eval_sources(reference_sources, estimated_sources, compute_permutation=True)
BSS Eval v3 bss_eval_sources Wrapper to ``bss_eval`` with the right parameters. The call to this function is not recommended. See the description for the ``bsseval_sources`` parameter of ``bss_eval``.
6.011145
6.53791
0.919429
(sdr, isr, sir, sar, perm) = \ bss_eval( reference_sources, estimated_sources, window=window, hop=hop, compute_permutation=compute_permutation, filters_len=512, framewise_filters=True, bsseval_sources_version=True) return (sdr, sir, sar, perm)
def bss_eval_sources_framewise(reference_sources, estimated_sources, window=30 * 44100, hop=15 * 44100, compute_permutation=False)
BSS Eval v3 bss_eval_sources_framewise Wrapper to ``bss_eval`` with the right parameters. The call to this function is not recommended. See the description for the ``bsseval_sources`` parameter of ``bss_eval``.
4.690118
4.776243
0.981968
return bss_eval( reference_sources, estimated_sources, window=np.inf, hop=np.inf, compute_permutation=compute_permutation, filters_len=512, framewise_filters=True, bsseval_sources_version=False)
def bss_eval_images(reference_sources, estimated_sources, compute_permutation=True)
BSS Eval v3 bss_eval_images Wrapper to ``bss_eval`` with the right parameters.
6.843192
7.713135
0.887213
return bss_eval( reference_sources, estimated_sources, window=window, hop=hop, compute_permutation=compute_permutation, filters_len=512, framewise_filters=True, bsseval_sources_version=False )
def bss_eval_images_framewise(reference_sources, estimated_sources, window=30 * 44100, hop=15 * 44100, compute_permutation=False)
BSS Eval v3 bss_eval_images_framewise Framewise computation of bss_eval_images. Wrapper to ``bss_eval`` with the right parameters.
5.177902
5.7766
0.896358
filters_len = Cj.shape[-2] # zero pad s_true = _zeropad(reference_sources[j], filters_len - 1, axis=0) # compute appropriate projections e_spat = _project(reference_sources[j], Cj) - s_true e_interf = _project(reference_sources, C) - s_true - e_spat e_artif = - s_true - e_spat - e_interf e_artif[:estimated_source.shape[0], :] += estimated_source return (s_true, e_spat, e_interf, e_artif)
def _bss_decomp_mtifilt(reference_sources, estimated_source, j, C, Cj)
Decomposition of an estimated source image into four components representing respectively the true source image, spatial (or filtering) distortion, interference and artifacts, derived from the true source images using multichannel time-invariant filters.
4.252978
3.862431
1.101115
# ensures concatenation dimension is the first sig = np.moveaxis(sig, axis, 0) # zero pad out = np.zeros((sig.shape[0] + N,) + sig.shape[1:]) out[:sig.shape[0], ...] = sig # put back axis in place out = np.moveaxis(out, 0, axis) return out
def _zeropad(sig, N, axis=0)
pads with N zeros at the end of the signal, along given axis
3.038168
3.258643
0.932341
G = np.moveaxis(G, (1, 3), (3, 4)) (nsrc, nchan, filters_len) = G.shape[0:3] G = np.reshape( G, (nsrc * nchan * filters_len, nsrc * nchan * filters_len) ) return G
def _reshape_G(G)
From a correlation matrix of size nsrc X nsrc X nchan X nchan X filters_len X filters_len, creates a new one of size nsrc*nchan*filters_len X nsrc*nchan*filters_len
4.028525
2.469026
1.631625
# reshape references as nsrc X nchan X nsampl (nsrc, nsampl, nchan) = reference_sources.shape reference_sources = np.moveaxis(reference_sources, (1), (2)) # zero padding and FFT of references reference_sources = _zeropad(reference_sources, filters_len - 1, axis=2) n_fft = int(2**np.ceil(np.log2(nsampl + filters_len - 1.))) sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=2) # compute intercorrelation between sources G = np.zeros((nsrc, nsrc, nchan, nchan, filters_len, filters_len)) for ((i, c1), (j, c2)) in itertools.combinations_with_replacement( itertools.product( list(range(nsrc)), list(range(nchan)) ), 2 ): ssf = sf[j, c2] * np.conj(sf[i, c1]) ssf = np.real(scipy.fftpack.ifft(ssf)) ss = toeplitz( np.hstack((ssf[0], ssf[-1:-filters_len:-1])), r=ssf[:filters_len] ) G[j, i, c2, c1] = ss G[i, j, c1, c2] = ss.T return G, sf
def _compute_reference_correlations(reference_sources, filters_len)
Compute the inner products between delayed versions of reference_sources reference is nsrc X nsamp X nchan. Returns * G, matrix : nsrc X nsrc X nchan X nchan X filters_len X filters_len * sf, reference spectra: nsrc X nchan X filters_len
3.05201
2.842947
1.073537
# epsilon eps = np.finfo(np.float).eps # shapes (nsampl, nchan) = estimated_source.shape # handles the case where we are calling this with only one source # G should be nsrc X nsrc X nchan X nchan X filters_len X filters_len # and sf should be nsrc X nchan X filters_len if len(G.shape) == 4: G = G[None, None, ...] sf = sf[None, ...] nsrc = G.shape[0] filters_len = G.shape[-1] # zero pad estimates and put chan in first dimension estimated_source = _zeropad(estimated_source.T, filters_len - 1, axis=1) # compute its FFT n_fft = int(2**np.ceil(np.log2(nsampl + filters_len - 1.))) sef = scipy.fftpack.fft(estimated_source, n=n_fft) # compute the cross-correlations between sources and estimates D = np.zeros((nsrc, nchan, filters_len, nchan)) for (j, cj, c) in itertools.product( list(range(nsrc)), list(range(nchan)), list(range(nchan)) ): ssef = sf[j, cj] * np.conj(sef[c]) ssef = np.real(scipy.fftpack.ifft(ssef)) D[j, cj, :, c] = np.hstack((ssef[0], ssef[-1:-filters_len:-1])) # reshape matrices to build the filters D = D.reshape(nsrc * nchan * filters_len, nchan) G = _reshape_G(G) # Distortion filters try: C = np.linalg.solve(G + eps*np.eye(G.shape[0]), D).reshape( nsrc, nchan, filters_len, nchan ) except np.linalg.linalg.LinAlgError: C = np.linalg.lstsq(G, D)[0].reshape( nsrc, nchan, filters_len, nchan ) # if we asked for one single reference source, # return just a nchan X filters_len matrix if nsrc == 1: C = C[0] return C
def _compute_projection_filters(G, sf, estimated_source)
Least-squares projection of estimated source on the subspace spanned by delayed versions of reference sources, with delays between 0 and filters_len-1
3.38764
3.237191
1.046475
# shapes: ensure that input is 3d (comprising the source index) if len(reference_sources.shape) == 2: reference_sources = reference_sources[None, ...] C = C[None, ...] (nsrc, nsampl, nchan) = reference_sources.shape filters_len = C.shape[-2] # zero pad reference_sources = _zeropad(reference_sources, filters_len - 1, axis=1) sproj = np.zeros((nchan, nsampl + filters_len - 1)) for (j, cj, c) in itertools.product( list(range(nsrc)), list(range(nchan)), list(range(nchan)) ): sproj[c] += fftconvolve( C[j, cj, :, c], reference_sources[j, :, cj] )[:nsampl + filters_len - 1] return sproj.T
def _project(reference_sources, C)
Project images using pre-computed filters C reference_sources are nsrc X nsampl X nchan C is nsrc X nchan X filters_len X nchan
4.088233
3.34525
1.222101
# energy ratios if bsseval_sources_version: s_filt = s_true + e_spat energy_s_filt = np.sum(s_filt**2) sdr = _safe_db(energy_s_filt, np.sum((e_interf + e_artif)**2)) isr = np.empty(sdr.shape) * np.nan sir = _safe_db(energy_s_filt, np.sum(e_interf**2)) sar = _safe_db(np.sum((s_filt + e_interf)**2), np.sum(e_artif**2)) else: energy_s_true = np.sum((s_true)**2) sdr = _safe_db(energy_s_true, np.sum((e_spat + e_interf + e_artif)**2)) isr = _safe_db(energy_s_true, np.sum(e_spat**2)) sir = _safe_db(np.sum((s_true + e_spat)**2), np.sum(e_interf**2)) sar = _safe_db(np.sum((s_true + e_spat + e_interf)**2), np.sum(e_artif**2)) return (sdr, isr, sir, sar)
def _bss_crit(s_true, e_spat, e_interf, e_artif, bsseval_sources_version)
Measurement of the separation quality for a given source in terms of filtered true source, interference and artifacts.
2.004582
2.024568
0.990128
if den == 0: return np.inf return 10 * np.log10(num / den)
def _safe_db(num, den)
Properly handle the potential +Inf db SIR instead of raising a RuntimeWarning.
3.804892
3.55742
1.069565
out = {} if not instr: return out for link in [h.strip() for h in link_splitter.findall(instr)]: url, params = link.split(">", 1) url = url[1:] param_dict = {} for param in _splitstring(params, PARAMETER, "\s*;\s*"): try: a, v = param.split("=", 1) param_dict[a.lower()] = _unquotestring(v) except ValueError: param_dict[param.lower()] = None out[url] = param_dict return out
def parse_link_value(instr)
Given a link-value (i.e., after separating the header-value on commas), return a dictionary whose keys are link URLs and values are dictionaries of the parameters for their associated links. Note that internationalised parameters (e.g., title*) are NOT percent-decoded. Also, only the last instance of a given parameter will be included. For example, >>> parse_link_value('</foo>; rel="self"; title*=utf-8\'de\'letztes%20Kapitel') {'/foo': {'title*': "utf-8'de'letztes%20Kapitel", 'rel': 'self'}}
3.509609
3.821765
0.918322
# File formats require representation=file and the format in the querystring if representation in FILE_FORMATS: kwargs['format'] = representation representation = 'file' # Prepend input with 'tautomers:' to return all tautomers if tautomers: input = 'tautomers:%s' % input url = '%s/%s/%s' % (API_BASE, quote(input), representation) if xml: url += '/xml' if resolvers: kwargs['resolver'] = ','.join(resolvers) if get3d: kwargs['get3d'] = True if kwargs: url += '?%s' % urlencode(kwargs) return url
def construct_api_url(input, representation, resolvers=None, get3d=False, tautomers=False, xml=True, **kwargs)
Return the URL for the desired API endpoint. :param string input: Chemical identifier to resolve :param string representation: Desired output representation :param list(str) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :param bool tautomers: (Optional) Whether to return all tautomers :param bool xml: (Optional) Whether to return full XML response :returns: CIR API URL :rtype: str
3.597055
3.684432
0.976285
url = construct_api_url(input, representation, resolvers, get3d, tautomers, **kwargs) log.debug('Making request: %s', url) response = urlopen(url) return etree.parse(response).getroot()
def request(input, representation, resolvers=None, get3d=False, tautomers=False, **kwargs)
Make a request to CIR and return the XML response. :param string input: Chemical identifier to resolve :param string representation: Desired output representation :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :param bool tautomers: (Optional) Whether to return all tautomers :returns: XML response from CIR :rtype: Element :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable
3.253214
3.401538
0.956395
tree = request(input, representation, resolvers, get3d, tautomers, **kwargs) results = [] for data in tree.findall('.//data'): value = [item.text for item in data.findall('item')] result = Result( input=tree.attrib['string'], representation=tree.attrib['representation'], resolver=data.attrib['resolver'], input_format=data.attrib['string_class'], notation=data.attrib['notation'], value=value[0] if len(value) == 1 else value ) results.append(result) log.debug('Received %s query results', len(results)) return results
def query(input, representation, resolvers=None, get3d=False, tautomers=False, **kwargs)
Get all results for resolving input to the specified output representation. :param string input: Chemical identifier to resolve :param string representation: Desired output representation :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :param bool tautomers: (Optional) Whether to return all tautomers :returns: List of resolved results :rtype: list(Result) :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable
3.445683
3.497003
0.985324
# Take first result from XML query results = query(input, representation, resolvers, False, get3d, **kwargs) result = results[0].value if results else None return result
def resolve(input, representation, resolvers=None, get3d=False, **kwargs)
Resolve input to the specified output representation. :param string input: Chemical identifier to resolve :param string representation: Desired output representation :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :returns: Output representation or None :rtype: string or None :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable
5.867907
9.476123
0.619231
# Aggregate all arguments into kwargs args, _, _, values = inspect.getargvalues(inspect.currentframe()) for arg in args: if values[arg] is not None: kwargs[arg] = values[arg] # Turn off anti-aliasing for transparent background if kwargs.get('bgcolor') == 'transparent': kwargs['antialiasing'] = False # Renamed parameters if 'stereolabels' in kwargs: kwargs['showstereo'] = kwargs.pop('stereolabels') if 'fmt' in kwargs: kwargs['format'] = kwargs.pop('fmt') # Toggle stereo wedges if 'stereowedges' in kwargs: status = kwargs.pop('stereowedges') kwargs.update({'wedges': status, 'dashes': status}) # Constant values kwargs.update({'representation': 'image', 'xml': False}) url = construct_api_url(**kwargs) log.debug('Making image request: %s', url) response = urlopen(url) return response.read()
def resolve_image(input, resolvers=None, fmt='png', width=300, height=300, frame=False, crop=None, bgcolor=None, atomcolor=None, hcolor=None, bondcolor=None, framecolor=None, symbolfontsize=11, linewidth=2, hsymbol='special', csymbol='special', stereolabels=False, stereowedges=True, header=None, footer=None, **kwargs)
Resolve input to a 2D image depiction. :param string input: Chemical identifier to resolve :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param string fmt: (Optional) gif or png image format (default png) :param int width: (Optional) Image width in pixels (default 300) :param int height: (Optional) Image height in pixels (default 300) :param bool frame: (Optional) Whether to show border frame (default False) :param int crop: (Optional) Crop image with specified padding :param int symbolfontsize: (Optional) Atom label font size (default 11) :param int linewidth: (Optional) Bond line width (default 2) :param string bgcolor: (Optional) Background color :param string atomcolor: (Optional) Atom label color :param string hcolor: (Optional) Hydrogen atom label color :param string bondcolor: (Optional) Bond color :param string framecolor: (Optional) Border frame color :param bool hsymbol: (Optional) Hydrogens: all, special or none (default special) :param bool csymbol: (Optional) Carbons: all, special or none (default special) :param bool stereolabels: (Optional) Whether to show stereochemistry labels (default False) :param bool stereowedges: (Optional) Whether to show wedge/dash bonds (default True) :param string header: (Optional) Header text above structure :param string footer: (Optional) Footer text below structure
3.75236
3.838667
0.977516
result = resolve(input, representation, resolvers, get3d, **kwargs) # Just log and return if nothing resolved if not result: log.debug('No file to download.') return # Only overwrite an existing file if explicitly instructed to. if not overwrite and os.path.isfile(filename): raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename) # Ensure file ends with a newline if not result.endswith('\n'): result += '\n' with open(filename, 'w') as f: f.write(result)
def download(input, filename, representation, overwrite=False, resolvers=None, get3d=False, **kwargs)
Convenience function to save a CIR response as a file. This is just a simple wrapper around the resolve function. :param string input: Chemical identifier to resolve :param string filename: File path to save to :param string representation: Desired output representation :param bool overwrite: (Optional) Whether to allow overwriting of an existing file :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable :raises IOError: if overwrite is False and file already exists
3.422349
3.841065
0.89099
return construct_api_url(self.input, 'image', self.resolvers, False, self.get3d, False, **self.kwargs)
def image_url(self)
URL of a GIF image.
24.028467
20.324348
1.18225
return construct_api_url(self.input, 'twirl', self.resolvers, False, self.get3d, False, **self.kwargs)
def twirl_url(self)
Url of a TwirlyMol 3D viewer.
20.554327
18.121027
1.13428
download(self.input, filename, representation, overwrite, self.resolvers, self.get3d, **self.kwargs)
def download(self, filename, representation, overwrite=False)
Download the resolved structure as a file. :param string filename: File path to save to :param string representation: Desired output representation :param bool overwrite: (Optional) Whether to allow overwriting of an existing file
14.732671
34.796371
0.423397
if 'logger' in self.options: self.options['logger'](msgs) else: print(msgs)
def log(self, *msgs)
Log a message
5.735334
5.865087
0.977877
self.fire('progress', { 'loaded': loaded, 'total': total, 'msg': msg })
def progress(self, loaded, total, msg='')
Notify on a progress change
4.184289
3.423244
1.222317
raw = base64.b64encode(raw) return { 'type': 'raw', 'tag': tag, 'raw': raw, 'metadata': metadata }
def raw(self, tag, raw, metadata)
Create a raw response object
2.656036
2.701548
0.983153
first = _re.sub("\s+", " ", first.strip()) second = _re.sub("\s+", " ", second.strip()) if first != second: raise AssertionError(msg or "%r does not look like %r" % (first, second))
def assert_looks_like(first, second, msg=None)
Compare two strings if all contiguous whitespace is coalesced.
2.319927
2.021074
1.147869
fname = os.path.abspath(fname) feat = parse_file(fname, language) return feat
def load_feature(fname, language)
Load and parse a feature file.
5.295112
4.132057
1.281471
# The way this works is a little exotic, but I couldn't think of a better way to work around # the fact that this has to be a global function and therefore cannot know about which step # runner to use (other than making step runner global) # Find the step runner that is currently running and use it to run the given steps fr = inspect.currentframe() while fr: if "self" in fr.f_locals: f_self = fr.f_locals['self'] if isinstance(f_self, StepsRunner): return f_self.run_steps_from_string(spec, language) fr = fr.f_back
def run_steps(spec, language="en")
Can be called by the user from within a step definition to execute other steps.
5.932955
5.811332
1.020929
caller = inspect.currentframe().f_back line = caller.f_lineno - 1 fname = caller.f_code.co_filename steps = parse_steps(spec, fname, line, load_language(language_name)) for s in steps: self.run_step(s)
def run_steps_from_string(self, spec, language_name='en')
Called from within step definitions to run other steps.
3.824205
3.663269
1.043932
if self.default_mappings is not None and key not in self.mappings: return self.default_mappings[key].encode('utf').split("|") else: return self.mappings[key].encode('utf').split("|")
def words(self, key)
Give all the synonymns of a word in the requested language (or the default language if no word is available).
4.313921
4.329421
0.99642
scc.state = 'executing' def async_event(result): scc.state = result return 'some event result' deferred = Deferred() reactor.callLater(1, deferred.callback, 'done') # pylint: disable=E1101 deferred.addCallback(async_event) return deferred
def simulate_async_event()
Simulate an asynchronous event.
6.039701
5.925115
1.019339
def decorator_wrapper(*tags_or_func): if len(tags_or_func) == 1 and callable(tags_or_func[0]): # No tags were passed to this decorator func = tags_or_func[0] return HookImpl(cb_type, func) else: # We got some tags, so we need to produce the real decorator tags = tags_or_func def d(func): return HookImpl(cb_type, func, tags) return d return decorator_wrapper
def hook_decorator(cb_type)
Decorator to wrap hook definitions in. Registers hook.
3.202132
3.235271
0.989757
if not module_names: module_names = ['steps'] path = os.path.abspath(path) for module_name in module_names: mod = self.modules.get((path, module_name)) if mod is None: #log.debug("Looking for step def module '%s' in %s" % (module_name, path)) cwd = os.getcwd() if cwd not in sys.path: sys.path.append(cwd) try: actual_module_name = os.path.basename(module_name) complete_path = os.path.join(path, os.path.dirname(module_name)) info = imp.find_module(actual_module_name, [complete_path]) except ImportError: #log.debug("Did not find step defs module '%s' in %s" % (module_name, path)) return try: # Modules have to be loaded with unique names or else problems arise mod = imp.load_module("stepdefs_" + str(self.module_counter), *info) except: exc = sys.exc_info() raise StepImplLoadException(exc) self.module_counter += 1 self.modules[(path, module_name)] = mod for item_name in dir(mod): item = getattr(mod, item_name) if isinstance(item, StepImpl): registry.add_step(item.step_type, item) elif isinstance(item, HookImpl): registry.add_hook(item.cb_type, item) elif isinstance(item, NamedTransformImpl): registry.add_named_transform(item) elif isinstance(item, TransformImpl): registry.add_transform(item)
def load_steps_impl(self, registry, path, module_names=None)
Load the step implementations at the given path, with the given module names. If module_names is None then the module 'steps' is searched by default.
2.659466
2.633233
1.009962
result = None for si in self.steps[step.step_type]: matches = si.match(step.match) if matches: if result: raise AmbiguousStepImpl(step, result[0], si) args = [self._apply_transforms(arg, si) for arg in matches.groups()] result = si, args if not result: raise UndefinedStepImpl(step) return result
def find_step_impl(self, step)
Find the implementation of the step for the given match string. Returns the StepImpl object corresponding to the implementation, and the arguments to the step implementation. If no implementation is found, raises UndefinedStepImpl. If more than one implementation is found, raises AmbiguousStepImpl. Each of the arguments returned will have been transformed by the first matching transform implementation.
4.692966
3.323418
1.41209
child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out = [s.decode("utf-8").strip() for s in child.stdout] err = [s.decode("utf-8").strip() for s in child.stderr] w = child.wait() return os.WEXITSTATUS(w), out, err
def run_command(cmd)
Open a child process, and return its exit status and stdout.
2.279105
2.207638
1.032373
filter_list = [] filter_ids = [] for f in filters: filter_ids.append(f.index) fullname = URL_P.sub(r'`<\1>`_', f.fullname) filter_list.append((str(f.index + 1), f.name, "{0:.2f}".format(f.msun_vega), "{0:.2f}".format(f.msun_ab), "{0:.1f}".format(f.lambda_eff), fullname)) sortf = lambda item: int(item[0]) filter_list.sort(key=sortf) return filter_list
def make_filter_list(filters)
Transform filters into list of table rows.
4.167911
4.123836
1.010688
n_cols = len(data[0]) assert n_cols == len(col_names) col_sizes = [max(len(r[i]) for r in data) for i in range(n_cols)] for i, cname in enumerate(col_names): if col_sizes[i] < len(cname): col_sizes[i] = len(cname) formatter = ' '.join('{:<%d}' % c for c in col_sizes) rows = '\n'.join([formatter.format(*row) for row in data]) header = formatter.format(*col_names) divider = formatter.format(*['=' * c for c in col_sizes]) output = '\n'.join((divider, header, divider, rows, divider)) return output
def make_table(data, col_names)
Code for this RST-formatted table generator comes from http://stackoverflow.com/a/11350643
2.050797
2.007055
1.021794
writer = PdfFileWriter() if os.path.isfile(output): ans = input( "The file '%s' already exists. " "Overwrite? Yes/Abort [Y/a]: " % output ).lower() if ans == "a": return outputfile = open(output, "wb") try: infiles = [] for filename in inputs: f = open(filename, "rb") reader = PdfFileReader(f) for page in reader.pages: writer.addPage(page) infiles.append(f) writer.write(outputfile) except FileNotFoundError as e: print(e.strerror + ": " + e.filename) finally: outputfile.close() for f in infiles: f.close() if delete: for filename in inputs: os.remove(filename)
def pdf_merge(inputs: [str], output: str, delete: bool = False)
Merge multiple Pdf input files in one output file. :param inputs: input files :param output: output file :param delete: delete input files after completion if true
2.574317
2.637814
0.975928
infile = open(input, "rb") reader = PdfFileReader(infile) writer = PdfFileWriter() # get pages from source depending on pages parameter if pages is None: source_pages = reader.pages else: pages = parse_rangearg(pages, len(reader.pages)) source_pages = [reader.getPage(i) for i in pages] # rotate pages and add to writer for i, page in enumerate(source_pages): if pages is None or i in pages: if counter_clockwise: writer.addPage(page.rotateCounterClockwise(90)) else: writer.addPage(page.rotateClockwise(90)) else: writer.addPage(page) # Open output file or temporary file for writing if output is None: outfile = NamedTemporaryFile(delete=False) else: if not os.path.isfile(output) or overwrite_dlg(output): outfile = open(output, "wb") else: return # Write to file writer.write(outfile) infile.close() outfile.close() # If no output defined move temporary file to input if output is None: if overwrite_dlg(input): os.remove(input) move(outfile.name, input) else: os.remove(outfile.name)
def pdf_rotate( input: str, counter_clockwise: bool = False, pages: [str] = None, output: str = None, )
Rotate the given Pdf files clockwise or counter clockwise. :param inputs: pdf files :param counter_clockwise: rotate counter clockwise if true else clockwise :param pages: list of page numbers to rotate, if None all pages will be rotated
2.46266
2.526384
0.974777
if not os.path.isfile(input): print("Error. The file '%s' does not exist." % input) return if os.path.isfile(output) and not yes_to_all and not overwrite_dlg(output): return with open(input, "rb") as inputfile: reader = PdfFileReader(inputfile) outputfile = open(output, "wb") writer = PdfFileWriter() if pages is None: pages = range(len(reader.pages)) else: pages = parse_rangearg(pages, len(reader.pages)) for pagenr in sorted(pages): page = reader.getPage(pagenr) writer.addPage(page) writer.write(outputfile) outputfile.close()
def pdf_copy(input: str, output: str, pages: [int], yes_to_all=False)
Copy pages from the input file in a new output file. :param input: name of the input pdf file :param output: name of the output pdf file :param pages: list containing the page numbers to copy in the new file
2.404801
2.543652
0.945413
output = output or os.path.splitext(input)[0] if not os.path.isfile(input): print("Error. The file '%s' does not exist." % input) return with open(input, "rb") as inputfile: reader = PdfFileReader(inputfile) pagenr = 0 outputfile = None if sequence is None: for i, page in enumerate(reader.pages): if not i % stepsize: pagenr += 1 outputfile = open(output + "_%i.pdf" % pagenr, "wb") writer = PdfFileWriter() writer.addPage(page) if not (i + 1) % stepsize: writer.write(outputfile) outputfile.close() else: sequence = map(int, sequence) iter_pages = iter(reader.pages) for filenr, pagecount in enumerate(sequence): with open( output + "_%i.pdf" % (filenr + 1), "wb" ) as outputfile: writer = PdfFileWriter() for i in range(pagecount): try: page = next(iter_pages) writer.addPage(page) except StopIteration: writer.write(outputfile) return writer.write(outputfile) if not outputfile.closed: writer.write(outputfile) outputfile.close()
def pdf_split( input: str, output: str, stepsize: int = 1, sequence: [int] = None )
Split the input file in multiple output files :param input: name of the input file :param output: name of the output files :param stepsize: how many pages per file, only if sequence is None :param sequence: list with number of pages per file
2.080993
2.052156
1.014052
if os.path.isfile(output): ans = input( "The file '%s' already exists. " "Overwrite? Yes/Abort [Y/a]: " % output ).lower() if ans not in ["y", ""]: return outputfile = open(output, "wb") try: f1, f2 = open(input1, "rb"), open(input2, "rb") r1, r2 = PdfFileReader(f1), PdfFileReader(f2) writer = PdfFileWriter() pages1 = [page for page in r1.pages] pages2 = [page for page in r2.pages] if not revert: for p1, p2 in zip(pages1, pages2): writer.addPage(p1) writer.addPage(p2) else: for p1, p2 in zip(pages1, reversed(pages2)): writer.addPage(p1) writer.addPage(p2) writer.write(outputfile) f1.close() f2.close() except FileNotFoundError as e: print(e.strerror + ": " + e.filename) finally: outputfile.close() if delete: os.remove(input1) os.remove(input2)
def pdf_zip( input1: str, input2: str, output: str, delete: bool = False, revert: bool = False, )
Zip pages of input1 and input2 in one output file. Useful for putting even and odd pages together in one document. :param input1: first input file :param input2: second input file :param output: output file :param delete: if true the input files will be deleted after zipping
1.94979
1.985102
0.982211
if output is not None and os.path.isfile(output): ans = input( "The file '%s' already exists. " "Overwrite? Yes/Abort [Y/a]: " % output ).lower() if ans not in ["y", ""]: return writer = PdfFileWriter() # read pages from file1 destfile = open(dest, "rb") destreader = PdfFileReader(destfile) for page in destreader.pages: writer.addPage(page) # read pages from file2 srcfile = open(source, "rb") srcreader = PdfFileReader(srcfile) # if no page numbers are given insert all pages index = limit(index - 1, 0, len(destreader.pages)) if pages is None: for i, page in enumerate(srcreader.pages): if index is None: writer.addPage(page) else: writer.insertPage(page, index + i) else: pages = parse_rangearg(pages, len(srcreader.pages)) for i, pagenr in enumerate(pages): page = srcreader.getPage(pagenr) if index is None: writer.addPage(page) else: writer.insertPage(page, index + i) if output is None: # Write into Temporary File first and then overwrite dest file ans = input( "Overwrite the file '%s'? Yes/Abort [Y/a]: " % dest ).lower() if ans in ["y", ""]: tempfile = NamedTemporaryFile(delete=False) writer.write(tempfile) tempfile.close() move(tempfile.name, dest) else: with open(output, "wb") as outfile: writer.write(outfile) destfile.close() srcfile.close()
def pdf_insert( dest: str, source: str, pages: [str] = None, index: int = None, output: str = None, )
Insert pages from one file into another. :param dest: Destination file :param source: Source file :param pages: list of page numbers to insert :param index: index in destination file where to insert the pages :param output: output file
2.518907
2.464945
1.021892
if output is not None and os.path.isfile(output): if overwrite_dlg(output) is False: return writer = PdfFileWriter() srcfile = open(source, "rb") srcreader = PdfFileReader(srcfile) # Add pages, leave out removed pages pages = parse_rangearg(pages, len(srcreader.pages)) for pagenr, page in enumerate(srcreader.pages): if pagenr not in pages: writer.addPage(page) # Open output file or temporary file for writing if output is None: outfile = NamedTemporaryFile(delete=False) else: outfile = open(output, "wb") # Write file and close writer.write(outfile) srcfile.close() outfile.close() # Move temporary file to source if output is None: if overwrite_dlg(source): os.remove(source) move(outfile.name, source) else: os.remove(outfile)
def pdf_remove(source: str, pages: [str], output: str = None)
Remove pages from a PDF source file. :param source: pdf source file :param pages: list of page numbers or range expressions :param output: pdf output file
3.068802
3.081594
0.995849
if output is not None and os.path.isfile(output): if not overwrite_dlg(output): return writer = PdfFileWriter() # read pages from destination file destfile = open(dest, "rb") destreader = PdfFileReader(destfile) for page in destreader.pages: writer.addPage(page) # read pages from source file srcfile = open(source, "rb") srcreader = PdfFileReader(srcfile) # if no page numbers are given add all pages from source if pages is None: for i, page in enumerate(srcreader.pages): writer.addPage(page) else: pages = parse_rangearg(pages, len(srcreader.pages)) for pagenr in pages: page = srcreader.getPage(pagenr) writer.addPage(page) if output is None: # Write into Temporary File first and then overwrite dest file if overwrite_dlg(dest): tempfile = NamedTemporaryFile(delete=False) writer.write(tempfile) tempfile.close() destfile.close() srcfile.close() os.remove(dest) move(tempfile.name, dest) else: with open(output, "wb") as outfile: writer.write(outfile) destfile.close() srcfile.close()
def pdf_add(dest: str, source: str, pages: [str], output: str)
Add pages from a source pdf file to an output file. If the output file does not exist a new file will be created. :param source: source pdf file :param dest: destination pdf file :param pages: list of page numbers or range expressions :param output: output pdf file
2.509356
2.473627
1.014444
with open('pdftools/__init__.py', 'r') as f: content = f.read() version_match = _version_re.search(content) version = str(ast.literal_eval(version_match.group(1))) return version
def extract_version()
Extract the version from the package.
3.191365
3.192099
0.99977
args_to_add = _determine_args( argument_groups=groups, arguments=args, use_all_args=all_args) parser = _add_args(parser, args_to_add, required) return parser
def add_pypiper_args(parser, groups=("pypiper", ), args=None, required=None, all_args=False)
Use this to add standardized pypiper arguments to your python pipeline. There are two ways to use `add_pypiper_args`: by specifying argument groups, or by specifying individual arguments. Specifying argument groups will add multiple arguments to your parser; these convenient argument groupings make it easy to add arguments to certain types of pipeline. For example, to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`. :param argparse.ArgumentParser parser: ArgumentParser object from a pipeline :param str | Iterable[str] groups: Adds arguments belong to specified group of args. Options: pypiper, config, looper, resources, common, ngs, all. :param str | Iterable[str] args: You may specify a list of specific arguments one by one. :param Iterable[str] required: Arguments to be flagged as 'required' by argparse. :param bool all_args: Whether to include all of pypiper's arguments defined here. :return argparse.ArgumentParser: A new ArgumentParser object, with selected pypiper arguments added
4.330609
5.598032
0.773595