_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q13400
|
generate_id
|
train
|
def generate_id():
""" Generate a 64bit base 16 ID for use as a Span or Trace ID """
global _current_pid
pid = os.getpid()
if _current_pid != pid:
_current_pid = pid
_rnd.seed(int(1000000 * time.time()) ^ pid)
id = format(_rnd.randint(0, 18446744073709551615), '02x')
if len(id) < 16:
id = id.zfill(16)
return id
|
python
|
{
"resource": ""
}
|
q13401
|
package_version
|
train
|
def package_version():
"""
Determine the version of this package.
:return: String representing known version
"""
version = ""
try:
version = pkg_resources.get_distribution('instana').version
except pkg_resources.DistributionNotFound:
version = 'unknown'
finally:
return version
|
python
|
{
"resource": ""
}
|
q13402
|
strip_secrets
|
train
|
def strip_secrets(qp, matcher, kwlist):
"""
This function will scrub the secrets from a query param string based on the passed in matcher and kwlist.
blah=1&secret=password&valid=true will result in blah=1&secret=<redacted>&valid=true
You can even pass in path query combinations:
/signup?blah=1&secret=password&valid=true will result in /signup?blah=1&secret=<redacted>&valid=true
:param qp: a string representing the query params in URL form (unencoded)
:param matcher: the matcher to use
:param kwlist: the list of keywords to match
:return: a scrubbed query param string
"""
path = None
try:
if qp is None:
return ''
if type(kwlist) is not list:
logger.debug("strip_secrets: bad keyword list")
return qp
# If there are no key=values, then just return
if not '=' in qp:
return qp
if '?' in qp:
path, query = qp.split('?')
else:
query = qp
params = parse.parse_qsl(query, keep_blank_values=True)
redacted = ['<redacted>']
if matcher == 'equals-ignore-case':
for keyword in kwlist:
for index, kv in enumerate(params):
if kv[0].lower() == keyword.lower():
params[index] = (kv[0], redacted)
elif matcher == 'equals':
for keyword in kwlist:
for index, kv in enumerate(params):
if kv[0] == keyword:
params[index] = (kv[0], redacted)
elif matcher == 'contains-ignore-case':
for keyword in kwlist:
for index, kv in enumerate(params):
if keyword.lower() in kv[0].lower():
params[index] = (kv[0], redacted)
elif matcher == 'contains':
for keyword in kwlist:
for index, kv in enumerate(params):
if keyword in kv[0]:
params[index] = (kv[0], redacted)
elif matcher == 'regex':
for regexp in kwlist:
for index, kv in enumerate(params):
if re.match(regexp, kv[0]):
params[index] = (kv[0], redacted)
else:
logger.debug("strip_secrets: unknown matcher")
return qp
if sys.version_info < (3, 0):
result = urllib.urlencode(params, doseq=True)
else:
result = parse.urlencode(params, doseq=True)
query = parse.unquote(result)
if path:
query = path + '?' + query
return query
except:
logger.debug("strip_secrets", exc_info=True)
|
python
|
{
"resource": ""
}
|
q13403
|
get_py_source
|
train
|
def get_py_source(file):
"""
Retrieves and returns the source code for any Python
files requested by the UI via the host agent
@param file [String] The fully qualified path to a file
"""
try:
response = None
pysource = ""
if regexp_py.search(file) is None:
response = {"error": "Only Python source files are allowed. (*.py)"}
else:
with open(file, 'r') as pyfile:
pysource = pyfile.read()
response = {"data": pysource}
except Exception as e:
response = {"error": str(e)}
finally:
return response
|
python
|
{
"resource": ""
}
|
q13404
|
make_middleware
|
train
|
def make_middleware(app=None, *args, **kw):
""" Given an app, return that app wrapped in iWSGIMiddleware """
app = iWSGIMiddleware(app, *args, **kw)
return app
|
python
|
{
"resource": ""
}
|
q13405
|
eum_snippet
|
train
|
def eum_snippet(trace_id=None, eum_api_key=None, meta={}):
"""
Return an EUM snippet for use in views, templates and layouts that reports
client side metrics to Instana that will automagically be linked to the
current trace.
@param trace_id [optional] the trace ID to insert into the EUM string
@param eum_api_key [optional] the EUM API key from your Instana dashboard
@param meta [optional] optional additional KVs you want reported with the
EUM metrics
@return string
"""
try:
eum_file = open(os.path.dirname(__file__) + '/eum.js')
eum_src = Template(eum_file.read())
# Prepare the standard required IDs
ids = {}
ids['meta_kvs'] = ''
parent_span = tracer.active_span
if trace_id or parent_span:
ids['trace_id'] = trace_id or parent_span.trace_id
else:
# No trace_id passed in and tracer doesn't show an active span so
# return nothing, nada & zip.
return ''
if eum_api_key:
ids['eum_api_key'] = eum_api_key
else:
ids['eum_api_key'] = global_eum_api_key
# Process passed in EUM 'meta' key/values
for key, value in meta.items():
ids['meta_kvs'] += ("'ineum('meta', '%s', '%s');'" % (key, value))
return eum_src.substitute(ids)
except Exception as e:
logger.debug(e)
return ''
|
python
|
{
"resource": ""
}
|
q13406
|
InstanaTracer.start_span
|
train
|
def start_span(self,
operation_name=None,
child_of=None,
references=None,
tags=None,
start_time=None,
ignore_active_span=False):
"Taken from BasicTracer so we can override generate_id calls to ours"
start_time = time.time() if start_time is None else start_time
# See if we have a parent_ctx in `references`
parent_ctx = None
if child_of is not None:
parent_ctx = (
child_of if isinstance(child_of, ot.SpanContext)
else child_of.context)
elif references is not None and len(references) > 0:
# TODO only the first reference is currently used
parent_ctx = references[0].referenced_context
# retrieve the active SpanContext
if not ignore_active_span and parent_ctx is None:
scope = self.scope_manager.active
if scope is not None:
parent_ctx = scope.span.context
# Assemble the child ctx
gid = generate_id()
ctx = SpanContext(span_id=gid)
if parent_ctx is not None:
if parent_ctx._baggage is not None:
ctx._baggage = parent_ctx._baggage.copy()
ctx.trace_id = parent_ctx.trace_id
ctx.sampled = parent_ctx.sampled
else:
ctx.trace_id = gid
ctx.sampled = self.sampler.sampled(ctx.trace_id)
# Tie it all together
span = InstanaSpan(self,
operation_name=operation_name,
context=ctx,
parent_id=(None if parent_ctx is None else parent_ctx.span_id),
tags=tags,
start_time=start_time)
if operation_name in self.recorder.exit_spans:
self.__add_stack(span)
elif operation_name in self.recorder.entry_spans:
# For entry spans, add only a backtrace fingerprint
self.__add_stack(span, limit=2)
return span
|
python
|
{
"resource": ""
}
|
q13407
|
InstanaTracer.__add_stack
|
train
|
def __add_stack(self, span, limit=None):
""" Adds a backtrace to this span """
span.stack = []
frame_count = 0
tb = traceback.extract_stack()
tb.reverse()
for frame in tb:
if limit is not None and frame_count >= limit:
break
# Exclude Instana frames unless we're in dev mode
if "INSTANA_DEV" not in os.environ:
if re_tracer_frame.search(frame[0]) is not None:
continue
if re_with_stan_frame.search(frame[2]) is not None:
continue
span.stack.append({
"c": frame[0],
"n": frame[1],
"m": frame[2]
})
if limit is not None:
frame_count += 1
|
python
|
{
"resource": ""
}
|
q13408
|
hook
|
train
|
def hook(module):
""" Hook method to install the Instana middleware into Flask """
if "INSTANA_DEV" in os.environ:
print("==============================================================")
print("Instana: Running flask hook")
print("==============================================================")
wrapt.wrap_function_wrapper('flask', 'Flask.__init__', wrapper)
|
python
|
{
"resource": ""
}
|
q13409
|
error_class_for_http_status
|
train
|
def error_class_for_http_status(status):
"""Return the appropriate `ResponseError` subclass for the given
HTTP status code."""
try:
return error_classes[status]
except KeyError:
def new_status_error(xml_response):
if (status > 400 and status < 500):
return UnexpectedClientError(status, xml_response)
if (status > 500 and status < 600):
return UnexpectedServerError(status, xml_response)
return UnexpectedStatusError(status, xml_response)
return new_status_error
|
python
|
{
"resource": ""
}
|
q13410
|
ResponseError.response_doc
|
train
|
def response_doc(self):
"""The XML document received from the service."""
try:
return self.__dict__['response_doc']
except KeyError:
self.__dict__['response_doc'] = ElementTree.fromstring(
self.response_xml
)
return self.__dict__['response_doc']
|
python
|
{
"resource": ""
}
|
q13411
|
ValidationError.transaction_error_code
|
train
|
def transaction_error_code(self):
"""The machine-readable error code for a transaction error."""
error = self.response_doc.find('transaction_error')
if error is not None:
code = error.find('error_code')
if code is not None:
return code.text
|
python
|
{
"resource": ""
}
|
q13412
|
ValidationError.errors
|
train
|
def errors(self):
"""A dictionary of error objects, keyed on the name of the
request field that was invalid.
Each error value has `field`, `symbol`, and `message`
attributes describing the particular invalidity of that field.
"""
try:
return self.__dict__['errors']
except KeyError:
pass
suberrors = dict()
for err in self.response_doc.findall('error'):
field = err.attrib['field']
symbol = err.attrib['symbol']
message = err.text
sub_err = self.Suberror(field, symbol, message)
# If the field exists, we need to turn the suberror
# into a list of suberrors
if field in suberrors:
if type(suberrors[field]) != list:
suberrors[field] = [suberrors[field]]
suberrors[field].append(sub_err)
else:
suberrors[field] = sub_err
self.__dict__['errors'] = suberrors
return suberrors
|
python
|
{
"resource": ""
}
|
q13413
|
objects_for_push_notification
|
train
|
def objects_for_push_notification(notification):
"""Decode a push notification with the given body XML.
Returns a dictionary containing the constituent objects of the push
notification. The kind of push notification is given in the ``"type"``
member of the returned dictionary.
"""
notification_el = ElementTree.fromstring(notification)
objects = {'type': notification_el.tag}
for child_el in notification_el:
tag = child_el.tag
res = Resource.value_for_element(child_el)
objects[tag] = res
return objects
|
python
|
{
"resource": ""
}
|
q13414
|
Account.invoice
|
train
|
def invoice(self, **kwargs):
"""Create an invoice for any outstanding adjustments this account has."""
url = urljoin(self._url, '/invoices')
if kwargs:
response = self.http_request(url, 'POST', Invoice(**kwargs), {'Content-Type':
'application/xml; charset=utf-8'})
else:
response = self.http_request(url, 'POST')
if response.status != 201:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
elem = ElementTree.fromstring(response_xml)
invoice_collection = InvoiceCollection.from_element(elem)
return invoice_collection
|
python
|
{
"resource": ""
}
|
q13415
|
Account.subscribe
|
train
|
def subscribe(self, subscription):
"""Create the given `Subscription` for this existing account."""
url = urljoin(self._url, '/subscriptions')
return subscription.post(url)
|
python
|
{
"resource": ""
}
|
q13416
|
Account.update_billing_info
|
train
|
def update_billing_info(self, billing_info):
"""Change this account's billing information to the given `BillingInfo`."""
url = urljoin(self._url, '/billing_info')
response = billing_info.http_request(url, 'PUT', billing_info,
{'Content-Type': 'application/xml; charset=utf-8'})
if response.status == 200:
pass
elif response.status == 201:
billing_info._url = response.getheader('Location')
else:
billing_info.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
billing_info.update_from_element(ElementTree.fromstring(response_xml))
|
python
|
{
"resource": ""
}
|
q13417
|
Account.create_shipping_address
|
train
|
def create_shipping_address(self, shipping_address):
"""Creates a shipping address on an existing account. If you are
creating an account, you can embed the shipping addresses with the
request"""
url = urljoin(self._url, '/shipping_addresses')
return shipping_address.post(url)
|
python
|
{
"resource": ""
}
|
q13418
|
GiftCard.preview
|
train
|
def preview(self):
"""Preview the purchase of this gift card"""
if hasattr(self, '_url'):
url = self._url + '/preview'
return self.post(url)
else:
url = urljoin(recurly.base_uri(), self.collection_path + '/preview')
return self.post(url)
|
python
|
{
"resource": ""
}
|
q13419
|
GiftCard.redeem
|
train
|
def redeem(self, account_code):
"""Redeem this gift card on the specified account code"""
redemption_path = '%s/redeem' % (self.redemption_code)
if hasattr(self, '_url'):
url = urljoin(self._url, '/redeem')
else:
url = urljoin(recurly.base_uri(), self.collection_path + '/' + redemption_path)
recipient_account = _RecipientAccount(account_code=account_code)
return self.post(url, recipient_account)
|
python
|
{
"resource": ""
}
|
q13420
|
Invoice.pdf
|
train
|
def pdf(cls, uuid):
"""Return a PDF of the invoice identified by the UUID
This is a raw string, which can be written to a file with:
`
with open('invoice.pdf', 'w') as invoice_file:
invoice_file.write(recurly.Invoice.pdf(uuid))
`
"""
url = urljoin(base_uri(), cls.member_path % (uuid,))
pdf_response = cls.http_request(url, headers={'Accept': 'application/pdf'})
return pdf_response.read()
|
python
|
{
"resource": ""
}
|
q13421
|
Subscription.pause
|
train
|
def pause(self, remaining_pause_cycles):
"""Pause a subscription"""
url = urljoin(self._url, '/pause')
elem = ElementTreeBuilder.Element(self.nodename)
elem.append(Resource.element_for_value('remaining_pause_cycles',
remaining_pause_cycles))
body = ElementTree.tostring(elem, encoding='UTF-8')
response = self.http_request(url, 'PUT', body, { 'Content-Type':
'application/xml; charset=utf-8' })
if response.status not in (200, 201, 204):
self.raise_http_error(response)
self.update_from_element(ElementTree.fromstring(response.read()))
|
python
|
{
"resource": ""
}
|
q13422
|
Subscription.create_usage
|
train
|
def create_usage(self, sub_add_on, usage):
"""Record the usage on the given subscription add on and update the
usage object with returned xml"""
url = urljoin(self._url, '/add_ons/%s/usage' % (sub_add_on.add_on_code,))
return usage.post(url)
|
python
|
{
"resource": ""
}
|
q13423
|
Transaction.get_refund_transaction
|
train
|
def get_refund_transaction(self):
"""Retrieve the refund transaction for this transaction, immediately
after refunding.
After calling `refund()` to refund a transaction, call this method to
retrieve the new transaction representing the refund.
"""
try:
url = self._refund_transaction_url
except AttributeError:
raise ValueError("No refund transaction is available for this transaction")
resp, elem = self.element_for_url(url)
value = self.value_for_element(elem)
return value
|
python
|
{
"resource": ""
}
|
q13424
|
Transaction.refund
|
train
|
def refund(self, **kwargs):
"""Refund this transaction.
Calling this method returns the refunded transaction (that is,
``self``) if the refund was successful, or raises a `ResponseError` if
an error occurred requesting the refund. After a successful call to
`refund()`, to retrieve the new transaction representing the refund,
use the `get_refund_transaction()` method.
"""
# Find the URL and method to refund the transaction.
try:
selfnode = self._elem
except AttributeError:
raise AttributeError('refund')
url, method = None, None
for anchor_elem in selfnode.findall('a'):
if anchor_elem.attrib.get('name') == 'refund':
url = anchor_elem.attrib['href']
method = anchor_elem.attrib['method'].upper()
if url is None or method is None:
raise AttributeError("refund") # should do something more specific probably
actionator = self._make_actionator(url, method, extra_handler=self._handle_refund_accepted)
return actionator(**kwargs)
|
python
|
{
"resource": ""
}
|
q13425
|
Plan.get_add_on
|
train
|
def get_add_on(self, add_on_code):
"""Return the `AddOn` for this plan with the given add-on code."""
url = urljoin(self._url, '/add_ons/%s' % (add_on_code,))
resp, elem = AddOn.element_for_url(url)
return AddOn.from_element(elem)
|
python
|
{
"resource": ""
}
|
q13426
|
Plan.create_add_on
|
train
|
def create_add_on(self, add_on):
"""Make the given `AddOn` available to subscribers on this plan."""
url = urljoin(self._url, '/add_ons')
return add_on.post(url)
|
python
|
{
"resource": ""
}
|
q13427
|
_load_track_estimates
|
train
|
def _load_track_estimates(track, estimates_dir, output_dir):
"""load estimates from disk instead of processing"""
user_results = {}
track_estimate_dir = os.path.join(
estimates_dir,
track.subset,
track.name
)
for target in glob.glob(
track_estimate_dir + '/*.wav'
):
target_name = op.splitext(
os.path.basename(target)
)[0]
try:
target_audio, _ = sf.read(
target,
always_2d=True
)
user_results[target_name] = target_audio
except RuntimeError:
pass
if user_results:
eval_mus_track(
track,
user_results,
output_dir=output_dir
)
return None
|
python
|
{
"resource": ""
}
|
q13428
|
eval_dir
|
train
|
def eval_dir(
reference_dir,
estimates_dir,
output_dir=None,
mode='v4',
win=1.0,
hop=1.0,
):
"""Compute bss_eval metrics for two given directories assuming file
names are identical for both, reference source and estimates.
Parameters
----------
reference_dir : str
path to reference sources directory.
estimates_dir : str
path to estimates directory.
output_dir : str
path to output directory used to save evaluation results. Defaults to
`None`, meaning no evaluation files will be saved.
mode : str
bsseval version number. Defaults to 'v4'.
win : int
window size in
Returns
-------
scores : EvalStore
scores object that holds the framewise and global evaluation scores.
"""
reference = []
estimates = []
data = EvalStore(win=win, hop=hop)
global_rate = None
reference_glob = os.path.join(reference_dir, '*.wav')
# Load in each reference file in the supplied dir
for reference_file in glob.glob(reference_glob):
ref_audio, rate = sf.read(
reference_file,
always_2d=True
)
# Make sure fs is the same for all files
assert (global_rate is None or rate == global_rate)
global_rate = rate
reference.append(ref_audio)
if not reference:
raise ValueError('`reference_dir` contains no wav files')
estimated_glob = os.path.join(estimates_dir, '*.wav')
targets = []
for estimated_file in glob.glob(estimated_glob):
targets.append(os.path.basename(estimated_file))
ref_audio, rate = sf.read(
estimated_file,
always_2d=True
)
assert (global_rate is None or rate == global_rate)
global_rate = rate
estimates.append(ref_audio)
SDR, ISR, SIR, SAR = evaluate(
reference,
estimates,
win=int(win*global_rate),
hop=int(hop*global_rate),
mode=mode
)
for i, target in enumerate(targets):
values = {
"SDR": SDR[i].tolist(),
"SIR": SIR[i].tolist(),
"ISR": ISR[i].tolist(),
"SAR": SAR[i].tolist()
}
data.add_target(
target_name=target,
values=values
)
return data
|
python
|
{
"resource": ""
}
|
q13429
|
eval_mus_dir
|
train
|
def eval_mus_dir(
dataset,
estimates_dir,
output_dir=None,
*args, **kwargs
):
"""Run musdb.run for the purpose of evaluation of musdb estimate dir
Parameters
----------
dataset : DB(object)
Musdb Database object.
estimates_dir : str
Path to estimates folder.
output_dir : str
Output folder where evaluation json files are stored.
*args
Variable length argument list for `musdb.run()`.
**kwargs
Arbitrary keyword arguments for `musdb.run()`.
"""
# create a new musdb instance for estimates with the same file structure
est = musdb.DB(root_dir=estimates_dir, is_wav=True)
# load all estimates track_names
est_tracks = est.load_mus_tracks()
# get a list of track names
tracknames = [t.name for t in est_tracks]
# load only musdb tracks where we have estimate tracks
tracks = dataset.load_mus_tracks(tracknames=tracknames)
# wrap the estimate loader
run_fun = functools.partial(
_load_track_estimates,
estimates_dir=estimates_dir,
output_dir=output_dir
)
# evaluate tracks
dataset.run(run_fun, estimates_dir=None, tracks=tracks, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q13430
|
eval_mus_track
|
train
|
def eval_mus_track(
track,
user_estimates,
output_dir=None,
mode='v4',
win=1.0,
hop=1.0
):
"""Compute all bss_eval metrics for the musdb track and estimated signals,
given by a `user_estimates` dict.
Parameters
----------
track : Track
musdb track object loaded using musdb
estimated_sources : Dict
dictionary, containing the user estimates as np.arrays.
output_dir : str
path to output directory used to save evaluation results. Defaults to
`None`, meaning no evaluation files will be saved.
mode : str
bsseval version number. Defaults to 'v4'.
win : int
window size in
Returns
-------
scores : EvalStore
scores object that holds the framewise and global evaluation scores.
"""
audio_estimates = []
audio_reference = []
# make sure to always build the list in the same order
# therefore track.targets is an OrderedDict
eval_targets = [] # save the list of target names to be evaluated
for key, target in list(track.targets.items()):
try:
# try to fetch the audio from the user_results of a given key
user_estimates[key]
except KeyError:
# ignore wrong key and continue
continue
# append this target name to the list of target to evaluate
eval_targets.append(key)
data = EvalStore(win=win, hop=hop)
# check if vocals and accompaniment is among the targets
has_acc = all(x in eval_targets for x in ['vocals', 'accompaniment'])
if has_acc:
# remove accompaniment from list of targets, because
# the voc/acc scenario will be evaluated separately
eval_targets.remove('accompaniment')
if len(eval_targets) >= 2:
# compute evaluation of remaining targets
for target in eval_targets:
audio_estimates.append(user_estimates[target])
audio_reference.append(track.targets[target].audio)
SDR, ISR, SIR, SAR = evaluate(
audio_reference,
audio_estimates,
win=int(win*track.rate),
hop=int(hop*track.rate),
mode=mode
)
# iterate over all evaluation results except for vocals
for i, target in enumerate(eval_targets):
if target == 'vocals' and has_acc:
continue
values = {
"SDR": SDR[i].tolist(),
"SIR": SIR[i].tolist(),
"ISR": ISR[i].tolist(),
"SAR": SAR[i].tolist()
}
data.add_target(
target_name=target,
values=values
)
# add vocal accompaniment targets later
if has_acc:
# add vocals and accompaniments as a separate scenario
eval_targets = ['vocals', 'accompaniment']
audio_estimates = []
audio_reference = []
for target in eval_targets:
audio_estimates.append(user_estimates[target])
audio_reference.append(track.targets[target].audio)
SDR, ISR, SIR, SAR = evaluate(
audio_reference,
audio_estimates,
win=int(win*track.rate),
hop=int(hop*track.rate),
mode=mode
)
# iterate over all targets
for i, target in enumerate(eval_targets):
values = {
"SDR": SDR[i].tolist(),
"SIR": SIR[i].tolist(),
"ISR": ISR[i].tolist(),
"SAR": SAR[i].tolist()
}
data.add_target(
target_name=target,
values=values
)
if output_dir:
# validate against the schema
data.validate()
try:
subset_path = op.join(
output_dir,
track.subset
)
if not op.exists(subset_path):
os.makedirs(subset_path)
with open(
op.join(subset_path, track.name) + '.json', 'w+'
) as f:
f.write(data.json)
except (IOError):
pass
return data
|
python
|
{
"resource": ""
}
|
q13431
|
evaluate
|
train
|
def evaluate(
references,
estimates,
win=1*44100,
hop=1*44100,
mode='v4',
padding=True
):
"""BSS_EVAL images evaluation using metrics module
Parameters
----------
references : np.ndarray, shape=(nsrc, nsampl, nchan)
array containing true reference sources
estimates : np.ndarray, shape=(nsrc, nsampl, nchan)
array containing estimated sources
window : int, defaults to 44100
window size in samples
hop : int
hop size in samples, defaults to 44100 (no overlap)
mode : str
BSSEval version, default to `v4`
Returns
-------
SDR : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
ISR : np.ndarray, shape=(nsrc,)
vector of Source to Spatial Distortion Image (ISR)
SIR : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
SAR : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
"""
estimates = np.array(estimates)
references = np.array(references)
if padding:
references, estimates = pad_or_truncate(references, estimates)
SDR, ISR, SIR, SAR, _ = metrics.bss_eval(
references,
estimates,
compute_permutation=False,
window=win,
hop=hop,
framewise_filters=(mode == "v3"),
bsseval_sources_version=False
)
return SDR, ISR, SIR, SAR
|
python
|
{
"resource": ""
}
|
q13432
|
EvalStore._q
|
train
|
def _q(self, number, precision='.00001'):
"""quantiztion of BSSEval values"""
if np.isinf(number):
return np.nan
else:
return D(D(number).quantize(D(precision)))
|
python
|
{
"resource": ""
}
|
q13433
|
bsseval
|
train
|
def bsseval(inargs=None):
"""
Generic cli app for bsseval results. Expects two folder with
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'reference_dir',
type=str
)
parser.add_argument(
'estimates_dir',
type=str
)
parser.add_argument('-o', help='output_dir')
parser.add_argument(
'--win', type=float, help='Window size in seconds', default=1.0
)
parser.add_argument(
'--hop', type=float, help='Hop size in seconds', default=1.0
)
parser.add_argument(
'-m', type=str, help='bss_eval version [`v3`, `v4`]', default='v4'
)
parser.add_argument(
'--version', '-v',
action='version',
version='%%(prog)s %s' % util.__version__
)
args = parser.parse_args(inargs)
if not args.o:
output_dir = args.estimates_dir
else:
output_dir = args.o
# evaluate an existing estimate folder with wav files
data = eval_dir(
args.reference_dir,
args.estimates_dir,
output_dir=output_dir,
mode=args.m,
win=args.win,
hop=args.hop
)
print(data)
|
python
|
{
"resource": ""
}
|
q13434
|
museval
|
train
|
def museval(inargs=None):
"""
Commandline interface for museval evaluation tools
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'estimates_dir',
type=str
)
parser.add_argument('-o', help='output_dir')
parser.add_argument('--cpu', type=int, help='number of cpus', default=4)
parser.add_argument(
'-p', help='enable multiprocessing',
action='store_true',
)
parser.add_argument(
'--musdb',
help='path to musdb',
type=str
)
parser.add_argument(
'--iswav', help='Read musdb wav instead of stems',
action='store_true',
)
parser.add_argument(
'--version', '-v',
action='version',
version='%%(prog)s %s' % util.__version__
)
args = parser.parse_args(inargs)
mus = musdb.DB(root_dir=args.musdb, is_wav=args.iswav)
if not args.o:
output_dir = args.estimates_dir
else:
output_dir = args.o
# evaluate an existing estimate folder with wav files
eval_mus_dir(
dataset=mus, # instance of musdb
estimates_dir=args.estimates_dir, # path to estiamte folder
output_dir=output_dir, # set a folder to write eval json files
parallel=args.p,
cpus=args.cpu
)
|
python
|
{
"resource": ""
}
|
q13435
|
Page.next_page
|
train
|
def next_page(self):
"""Return the next `Page` after this one in the result sequence
it's from.
If the current page is the last page in the sequence, calling
this method raises a `ValueError`.
"""
try:
next_url = self.next_url
except AttributeError:
raise PageError("Page %r has no next page" % self)
return self.page_for_url(next_url)
|
python
|
{
"resource": ""
}
|
q13436
|
Page.first_page
|
train
|
def first_page(self):
"""Return the first `Page` in the result sequence this `Page`
instance is from.
If the current page is already the first page in the sequence,
calling this method raises a `ValueError`.
"""
try:
start_url = self.start_url
except AttributeError:
raise PageError("Page %r is already the first page" % self)
return self.page_for_url(start_url)
|
python
|
{
"resource": ""
}
|
q13437
|
Page.page_for_url
|
train
|
def page_for_url(cls, url):
"""Return a new `Page` containing the items at the given
endpoint URL."""
resp, elem = Resource.element_for_url(url)
value = Resource.value_for_element(elem)
return cls.page_for_value(resp, value)
|
python
|
{
"resource": ""
}
|
q13438
|
Page.page_for_value
|
train
|
def page_for_value(cls, resp, value):
"""Return a new `Page` representing the given resource `value`
retrieved using the HTTP response `resp`.
This method records pagination ``Link`` headers present in `resp`, so
that the returned `Page` can return their resources from its
`next_page()` and `first_page()` methods.
"""
page = cls(value)
links = parse_link_value(resp.getheader('Link'))
for url, data in six.iteritems(links):
if data.get('rel') == 'start':
page.start_url = url
if data.get('rel') == 'next':
page.next_url = url
return page
|
python
|
{
"resource": ""
}
|
q13439
|
Resource.serializable_attributes
|
train
|
def serializable_attributes(self):
""" Attributes to be serialized in a ``POST`` or ``PUT`` request.
Returns all attributes unless a blacklist is specified
"""
if hasattr(self, 'blacklist_attributes'):
return [attr for attr in self.attributes if attr not in
self.blacklist_attributes]
else:
return self.attributes
|
python
|
{
"resource": ""
}
|
q13440
|
Resource.headers_as_dict
|
train
|
def headers_as_dict(cls, resp):
"""Turns an array of response headers into a dictionary"""
if six.PY2:
pairs = [header.split(':', 1) for header in resp.msg.headers]
return dict([(k, v.strip()) for k, v in pairs])
else:
return dict([(k, v.strip()) for k, v in resp.msg._headers])
|
python
|
{
"resource": ""
}
|
q13441
|
Resource.as_log_output
|
train
|
def as_log_output(self):
"""Returns an XML string containing a serialization of this
instance suitable for logging.
Attributes named in the instance's `sensitive_attributes` are
redacted.
"""
elem = self.to_element()
for attrname in self.sensitive_attributes:
for sensitive_el in elem.iter(attrname):
sensitive_el.text = 'XXXXXXXXXXXXXXXX'
return ElementTreeBuilder.tostring(elem, encoding='UTF-8')
|
python
|
{
"resource": ""
}
|
q13442
|
Resource.get
|
train
|
def get(cls, uuid):
"""Return a `Resource` instance of this class identified by
the given code or UUID.
Only `Resource` classes with specified `member_path` attributes
can be directly requested with this method.
"""
if not uuid:
raise ValueError("get must have a value passed as an argument")
uuid = quote(str(uuid))
url = recurly.base_uri() + (cls.member_path % (uuid,))
_resp, elem = cls.element_for_url(url)
return cls.from_element(elem)
|
python
|
{
"resource": ""
}
|
q13443
|
Resource.headers_for_url
|
train
|
def headers_for_url(cls, url):
"""Return the headers only for the given URL as a dict"""
response = cls.http_request(url, method='HEAD')
if response.status != 200:
cls.raise_http_error(response)
return Resource.headers_as_dict(response)
|
python
|
{
"resource": ""
}
|
q13444
|
Resource.value_for_element
|
train
|
def value_for_element(cls, elem):
"""Deserialize the given XML `Element` into its representative
value.
Depending on the content of the element, the returned value may be:
* a string, integer, or boolean value
* a `datetime.datetime` instance
* a list of `Resource` instances
* a single `Resource` instance
* a `Money` instance
* ``None``
"""
log = logging.getLogger('recurly.resource')
if elem is None:
log.debug("Converting %r element into None value", elem)
return
if elem.attrib.get('nil') is not None:
log.debug("Converting %r element with nil attribute into None value", elem.tag)
return
if elem.tag.endswith('_in_cents') and 'currency' not in cls.attributes and not cls.inherits_currency:
log.debug("Converting %r element in class with no matching 'currency' into a Money value", elem.tag)
return Money.from_element(elem)
attr_type = elem.attrib.get('type')
log.debug("Converting %r element with type %r", elem.tag, attr_type)
if attr_type == 'integer':
return int(elem.text.strip())
if attr_type == 'float':
return float(elem.text.strip())
if attr_type == 'boolean':
return elem.text.strip() == 'true'
if attr_type == 'datetime':
return iso8601.parse_date(elem.text.strip())
if attr_type == 'array':
return [cls._subclass_for_nodename(sub_elem.tag).from_element(sub_elem) for sub_elem in elem]
# Unknown types may be the names of resource classes.
if attr_type is not None:
try:
value_class = cls._subclass_for_nodename(attr_type)
except ValueError:
log.debug("Not converting %r element with type %r to a resource as that matches no known nodename",
elem.tag, attr_type)
else:
return value_class.from_element(elem)
# Untyped complex elements should still be resource instances. Guess from the nodename.
if len(elem): # has children
value_class = cls._subclass_for_nodename(elem.tag)
log.debug("Converting %r tag into a %s", elem.tag, value_class.__name__)
return value_class.from_element(elem)
value = elem.text or ''
return value.strip()
|
python
|
{
"resource": ""
}
|
q13445
|
Resource.element_for_value
|
train
|
def element_for_value(cls, attrname, value):
"""Serialize the given value into an XML `Element` with the
given tag name, returning it.
The value argument may be:
* a `Resource` instance
* a `Money` instance
* a `datetime.datetime` instance
* a string, integer, or boolean value
* ``None``
* a list or tuple of these values
"""
if isinstance(value, Resource):
if attrname in cls._classes_for_nodename:
# override the child's node name with this attribute name
return value.to_element(attrname)
return value.to_element()
el = ElementTreeBuilder.Element(attrname)
if value is None:
el.attrib['nil'] = 'nil'
elif isinstance(value, bool):
el.attrib['type'] = 'boolean'
el.text = 'true' if value else 'false'
elif isinstance(value, int):
el.attrib['type'] = 'integer'
el.text = str(value)
elif isinstance(value, datetime):
el.attrib['type'] = 'datetime'
el.text = value.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(value, list) or isinstance(value, tuple):
for sub_resource in value:
if hasattr(sub_resource, 'to_element'):
el.append(sub_resource.to_element())
else:
el.append(cls.element_for_value(re.sub(r"s$", "", attrname), sub_resource))
elif isinstance(value, Money):
value.add_to_element(el)
else:
el.text = six.text_type(value)
return el
|
python
|
{
"resource": ""
}
|
q13446
|
Resource.update_from_element
|
train
|
def update_from_element(self, elem):
"""Reset this `Resource` instance to represent the values in
the given XML element."""
self._elem = elem
for attrname in self.attributes:
try:
delattr(self, attrname)
except AttributeError:
pass
document_url = elem.attrib.get('href')
if document_url is not None:
self._url = document_url
return self
|
python
|
{
"resource": ""
}
|
q13447
|
Resource.all
|
train
|
def all(cls, **kwargs):
"""Return a `Page` of instances of this `Resource` class from
its general collection endpoint.
Only `Resource` classes with specified `collection_path`
endpoints can be requested with this method. Any provided
keyword arguments are passed to the API endpoint as query
parameters.
"""
url = recurly.base_uri() + cls.collection_path
if kwargs:
url = '%s?%s' % (url, urlencode_params(kwargs))
return Page.page_for_url(url)
|
python
|
{
"resource": ""
}
|
q13448
|
Resource.count
|
train
|
def count(cls, **kwargs):
"""Return a count of server side resources given
filtering arguments in kwargs.
"""
url = recurly.base_uri() + cls.collection_path
if kwargs:
url = '%s?%s' % (url, urlencode_params(kwargs))
return Page.count_for_url(url)
|
python
|
{
"resource": ""
}
|
q13449
|
Resource.put
|
train
|
def put(self, url):
"""Sends this `Resource` instance to the service with a
``PUT`` request to the given URL."""
response = self.http_request(url, 'PUT', self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
|
python
|
{
"resource": ""
}
|
q13450
|
Resource.post
|
train
|
def post(self, url, body=None):
"""Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body"""
response = self.http_request(url, 'POST', body or self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status not in (200, 201, 204):
self.raise_http_error(response)
self._url = response.getheader('Location')
if response.status in (200, 201):
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
|
python
|
{
"resource": ""
}
|
q13451
|
Resource.delete
|
train
|
def delete(self):
"""Submits a deletion request for this `Resource` instance as
a ``DELETE`` request to its URL."""
response = self.http_request(self._url, 'DELETE')
if response.status != 204:
self.raise_http_error(response)
|
python
|
{
"resource": ""
}
|
q13452
|
Resource.raise_http_error
|
train
|
def raise_http_error(cls, response):
"""Raise a `ResponseError` of the appropriate subclass in
reaction to the given `http_client.HTTPResponse`."""
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
exc_class = recurly.errors.error_class_for_http_status(response.status)
raise exc_class(response_xml)
|
python
|
{
"resource": ""
}
|
q13453
|
Resource.to_element
|
train
|
def to_element(self, root_name=None):
"""Serialize this `Resource` instance to an XML element."""
if not root_name:
root_name = self.nodename
elem = ElementTreeBuilder.Element(root_name)
for attrname in self.serializable_attributes():
# Only use values that have been loaded into the internal
# __dict__. For retrieved objects we look into the XML response at
# access time, so the internal __dict__ contains only the elements
# that have been set on the client side.
try:
value = self.__dict__[attrname]
except KeyError:
continue
if attrname in self.xml_attribute_attributes:
elem.attrib[attrname] = six.text_type(value)
else:
sub_elem = self.element_for_value(attrname, value)
elem.append(sub_elem)
return elem
|
python
|
{
"resource": ""
}
|
q13454
|
bss_eval_sources
|
train
|
def bss_eval_sources(reference_sources, estimated_sources,
compute_permutation=True):
"""
BSS Eval v3 bss_eval_sources
Wrapper to ``bss_eval`` with the right parameters.
The call to this function is not recommended. See the description for the
``bsseval_sources`` parameter of ``bss_eval``.
"""
(sdr, isr, sir, sar, perm) = \
bss_eval(
reference_sources, estimated_sources,
window=np.inf, hop=np.inf,
compute_permutation=compute_permutation, filters_len=512,
framewise_filters=True,
bsseval_sources_version=True
)
return (sdr, sir, sar, perm)
|
python
|
{
"resource": ""
}
|
q13455
|
bss_eval_sources_framewise
|
train
|
def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30 * 44100, hop=15 * 44100,
compute_permutation=False):
"""
BSS Eval v3 bss_eval_sources_framewise
Wrapper to ``bss_eval`` with the right parameters.
The call to this function is not recommended. See the description for the
``bsseval_sources`` parameter of ``bss_eval``.
"""
(sdr, isr, sir, sar, perm) = \
bss_eval(
reference_sources, estimated_sources,
window=window, hop=hop,
compute_permutation=compute_permutation, filters_len=512,
framewise_filters=True,
bsseval_sources_version=True)
return (sdr, sir, sar, perm)
|
python
|
{
"resource": ""
}
|
q13456
|
bss_eval_images
|
train
|
def bss_eval_images(reference_sources, estimated_sources,
compute_permutation=True):
"""
BSS Eval v3 bss_eval_images
Wrapper to ``bss_eval`` with the right parameters.
"""
return bss_eval(
reference_sources, estimated_sources,
window=np.inf, hop=np.inf,
compute_permutation=compute_permutation, filters_len=512,
framewise_filters=True,
bsseval_sources_version=False)
|
python
|
{
"resource": ""
}
|
q13457
|
bss_eval_images_framewise
|
train
|
def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30 * 44100, hop=15 * 44100,
compute_permutation=False):
"""
BSS Eval v3 bss_eval_images_framewise
Framewise computation of bss_eval_images.
Wrapper to ``bss_eval`` with the right parameters.
"""
return bss_eval(
reference_sources, estimated_sources,
window=window, hop=hop,
compute_permutation=compute_permutation, filters_len=512,
framewise_filters=True,
bsseval_sources_version=False
)
|
python
|
{
"resource": ""
}
|
q13458
|
_zeropad
|
train
|
def _zeropad(sig, N, axis=0):
"""pads with N zeros at the end of the signal, along given axis"""
# ensures concatenation dimension is the first
sig = np.moveaxis(sig, axis, 0)
# zero pad
out = np.zeros((sig.shape[0] + N,) + sig.shape[1:])
out[:sig.shape[0], ...] = sig
# put back axis in place
out = np.moveaxis(out, 0, axis)
return out
|
python
|
{
"resource": ""
}
|
q13459
|
_compute_projection_filters
|
train
|
def _compute_projection_filters(G, sf, estimated_source):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and
filters_len-1
"""
# epsilon
eps = np.finfo(np.float).eps
# shapes
(nsampl, nchan) = estimated_source.shape
# handles the case where we are calling this with only one source
# G should be nsrc X nsrc X nchan X nchan X filters_len X filters_len
# and sf should be nsrc X nchan X filters_len
if len(G.shape) == 4:
G = G[None, None, ...]
sf = sf[None, ...]
nsrc = G.shape[0]
filters_len = G.shape[-1]
# zero pad estimates and put chan in first dimension
estimated_source = _zeropad(estimated_source.T, filters_len - 1, axis=1)
# compute its FFT
n_fft = int(2**np.ceil(np.log2(nsampl + filters_len - 1.)))
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# compute the cross-correlations between sources and estimates
D = np.zeros((nsrc, nchan, filters_len, nchan))
for (j, cj, c) in itertools.product(
list(range(nsrc)), list(range(nchan)), list(range(nchan))
):
ssef = sf[j, cj] * np.conj(sef[c])
ssef = np.real(scipy.fftpack.ifft(ssef))
D[j, cj, :, c] = np.hstack((ssef[0], ssef[-1:-filters_len:-1]))
# reshape matrices to build the filters
D = D.reshape(nsrc * nchan * filters_len, nchan)
G = _reshape_G(G)
# Distortion filters
try:
C = np.linalg.solve(G + eps*np.eye(G.shape[0]), D).reshape(
nsrc, nchan, filters_len, nchan
)
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(
nsrc, nchan, filters_len, nchan
)
# if we asked for one single reference source,
# return just a nchan X filters_len matrix
if nsrc == 1:
C = C[0]
return C
|
python
|
{
"resource": ""
}
|
q13460
|
_project
|
train
|
def _project(reference_sources, C):
"""Project images using pre-computed filters C
reference_sources are nsrc X nsampl X nchan
C is nsrc X nchan X filters_len X nchan
"""
# shapes: ensure that input is 3d (comprising the source index)
if len(reference_sources.shape) == 2:
reference_sources = reference_sources[None, ...]
C = C[None, ...]
(nsrc, nsampl, nchan) = reference_sources.shape
filters_len = C.shape[-2]
# zero pad
reference_sources = _zeropad(reference_sources, filters_len - 1, axis=1)
sproj = np.zeros((nchan, nsampl + filters_len - 1))
for (j, cj, c) in itertools.product(
list(range(nsrc)), list(range(nchan)), list(range(nchan))
):
sproj[c] += fftconvolve(
C[j, cj, :, c],
reference_sources[j, :, cj]
)[:nsampl + filters_len - 1]
return sproj.T
|
python
|
{
"resource": ""
}
|
q13461
|
_safe_db
|
train
|
def _safe_db(num, den):
"""Properly handle the potential +Inf db SIR instead of raising a
RuntimeWarning.
"""
if den == 0:
return np.inf
return 10 * np.log10(num / den)
|
python
|
{
"resource": ""
}
|
q13462
|
construct_api_url
|
train
|
def construct_api_url(input, representation, resolvers=None, get3d=False, tautomers=False, xml=True, **kwargs):
"""Return the URL for the desired API endpoint.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(str) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:param bool tautomers: (Optional) Whether to return all tautomers
:param bool xml: (Optional) Whether to return full XML response
:returns: CIR API URL
:rtype: str
"""
# File formats require representation=file and the format in the querystring
if representation in FILE_FORMATS:
kwargs['format'] = representation
representation = 'file'
# Prepend input with 'tautomers:' to return all tautomers
if tautomers:
input = 'tautomers:%s' % input
url = '%s/%s/%s' % (API_BASE, quote(input), representation)
if xml:
url += '/xml'
if resolvers:
kwargs['resolver'] = ','.join(resolvers)
if get3d:
kwargs['get3d'] = True
if kwargs:
url += '?%s' % urlencode(kwargs)
return url
|
python
|
{
"resource": ""
}
|
q13463
|
request
|
train
|
def request(input, representation, resolvers=None, get3d=False, tautomers=False, **kwargs):
"""Make a request to CIR and return the XML response.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:param bool tautomers: (Optional) Whether to return all tautomers
:returns: XML response from CIR
:rtype: Element
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
"""
url = construct_api_url(input, representation, resolvers, get3d, tautomers, **kwargs)
log.debug('Making request: %s', url)
response = urlopen(url)
return etree.parse(response).getroot()
|
python
|
{
"resource": ""
}
|
q13464
|
query
|
train
|
def query(input, representation, resolvers=None, get3d=False, tautomers=False, **kwargs):
"""Get all results for resolving input to the specified output representation.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:param bool tautomers: (Optional) Whether to return all tautomers
:returns: List of resolved results
:rtype: list(Result)
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
"""
tree = request(input, representation, resolvers, get3d, tautomers, **kwargs)
results = []
for data in tree.findall('.//data'):
value = [item.text for item in data.findall('item')]
result = Result(
input=tree.attrib['string'],
representation=tree.attrib['representation'],
resolver=data.attrib['resolver'],
input_format=data.attrib['string_class'],
notation=data.attrib['notation'],
value=value[0] if len(value) == 1 else value
)
results.append(result)
log.debug('Received %s query results', len(results))
return results
|
python
|
{
"resource": ""
}
|
q13465
|
resolve
|
train
|
def resolve(input, representation, resolvers=None, get3d=False, **kwargs):
"""Resolve input to the specified output representation.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:returns: Output representation or None
:rtype: string or None
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
"""
# Take first result from XML query
results = query(input, representation, resolvers, False, get3d, **kwargs)
result = results[0].value if results else None
return result
|
python
|
{
"resource": ""
}
|
q13466
|
resolve_image
|
train
|
def resolve_image(input, resolvers=None, fmt='png', width=300, height=300, frame=False, crop=None, bgcolor=None,
atomcolor=None, hcolor=None, bondcolor=None, framecolor=None, symbolfontsize=11, linewidth=2,
hsymbol='special', csymbol='special', stereolabels=False, stereowedges=True, header=None, footer=None,
**kwargs):
"""Resolve input to a 2D image depiction.
:param string input: Chemical identifier to resolve
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param string fmt: (Optional) gif or png image format (default png)
:param int width: (Optional) Image width in pixels (default 300)
:param int height: (Optional) Image height in pixels (default 300)
:param bool frame: (Optional) Whether to show border frame (default False)
:param int crop: (Optional) Crop image with specified padding
:param int symbolfontsize: (Optional) Atom label font size (default 11)
:param int linewidth: (Optional) Bond line width (default 2)
:param string bgcolor: (Optional) Background color
:param string atomcolor: (Optional) Atom label color
:param string hcolor: (Optional) Hydrogen atom label color
:param string bondcolor: (Optional) Bond color
:param string framecolor: (Optional) Border frame color
:param bool hsymbol: (Optional) Hydrogens: all, special or none (default special)
:param bool csymbol: (Optional) Carbons: all, special or none (default special)
:param bool stereolabels: (Optional) Whether to show stereochemistry labels (default False)
:param bool stereowedges: (Optional) Whether to show wedge/dash bonds (default True)
:param string header: (Optional) Header text above structure
:param string footer: (Optional) Footer text below structure
"""
# Aggregate all arguments into kwargs
args, _, _, values = inspect.getargvalues(inspect.currentframe())
for arg in args:
if values[arg] is not None:
kwargs[arg] = values[arg]
# Turn off anti-aliasing for transparent background
if kwargs.get('bgcolor') == 'transparent':
kwargs['antialiasing'] = False
# Renamed parameters
if 'stereolabels' in kwargs:
kwargs['showstereo'] = kwargs.pop('stereolabels')
if 'fmt' in kwargs:
kwargs['format'] = kwargs.pop('fmt')
# Toggle stereo wedges
if 'stereowedges' in kwargs:
status = kwargs.pop('stereowedges')
kwargs.update({'wedges': status, 'dashes': status})
# Constant values
kwargs.update({'representation': 'image', 'xml': False})
url = construct_api_url(**kwargs)
log.debug('Making image request: %s', url)
response = urlopen(url)
return response.read()
|
python
|
{
"resource": ""
}
|
q13467
|
download
|
train
|
def download(input, filename, representation, overwrite=False, resolvers=None, get3d=False, **kwargs):
"""Convenience function to save a CIR response as a file.
This is just a simple wrapper around the resolve function.
:param string input: Chemical identifier to resolve
:param string filename: File path to save to
:param string representation: Desired output representation
:param bool overwrite: (Optional) Whether to allow overwriting of an existing file
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
:raises IOError: if overwrite is False and file already exists
"""
result = resolve(input, representation, resolvers, get3d, **kwargs)
# Just log and return if nothing resolved
if not result:
log.debug('No file to download.')
return
# Only overwrite an existing file if explicitly instructed to.
if not overwrite and os.path.isfile(filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename)
# Ensure file ends with a newline
if not result.endswith('\n'):
result += '\n'
with open(filename, 'w') as f:
f.write(result)
|
python
|
{
"resource": ""
}
|
q13468
|
Molecule.image_url
|
train
|
def image_url(self):
"""URL of a GIF image."""
return construct_api_url(self.input, 'image', self.resolvers, False, self.get3d, False, **self.kwargs)
|
python
|
{
"resource": ""
}
|
q13469
|
Molecule.twirl_url
|
train
|
def twirl_url(self):
"""Url of a TwirlyMol 3D viewer."""
return construct_api_url(self.input, 'twirl', self.resolvers, False, self.get3d, False, **self.kwargs)
|
python
|
{
"resource": ""
}
|
q13470
|
Molecule.download
|
train
|
def download(self, filename, representation, overwrite=False):
"""Download the resolved structure as a file.
:param string filename: File path to save to
:param string representation: Desired output representation
:param bool overwrite: (Optional) Whether to allow overwriting of an existing file
"""
download(self.input, filename, representation, overwrite, self.resolvers, self.get3d, **self.kwargs)
|
python
|
{
"resource": ""
}
|
q13471
|
DataSource.progress
|
train
|
def progress(self, loaded, total, msg=''):
""" Notify on a progress change """
self.fire('progress', {
'loaded': loaded,
'total': total,
'msg': msg
})
|
python
|
{
"resource": ""
}
|
q13472
|
DataSource.raw
|
train
|
def raw(self, tag, raw, metadata):
""" Create a raw response object """
raw = base64.b64encode(raw)
return {
'type': 'raw',
'tag': tag,
'raw': raw,
'metadata': metadata
}
|
python
|
{
"resource": ""
}
|
q13473
|
assert_looks_like
|
train
|
def assert_looks_like(first, second, msg=None):
""" Compare two strings if all contiguous whitespace is coalesced. """
first = _re.sub("\s+", " ", first.strip())
second = _re.sub("\s+", " ", second.strip())
if first != second:
raise AssertionError(msg or "%r does not look like %r" % (first, second))
|
python
|
{
"resource": ""
}
|
q13474
|
load_feature
|
train
|
def load_feature(fname, language):
""" Load and parse a feature file. """
fname = os.path.abspath(fname)
feat = parse_file(fname, language)
return feat
|
python
|
{
"resource": ""
}
|
q13475
|
run_steps
|
train
|
def run_steps(spec, language="en"):
""" Can be called by the user from within a step definition to execute other steps. """
# The way this works is a little exotic, but I couldn't think of a better way to work around
# the fact that this has to be a global function and therefore cannot know about which step
# runner to use (other than making step runner global)
# Find the step runner that is currently running and use it to run the given steps
fr = inspect.currentframe()
while fr:
if "self" in fr.f_locals:
f_self = fr.f_locals['self']
if isinstance(f_self, StepsRunner):
return f_self.run_steps_from_string(spec, language)
fr = fr.f_back
|
python
|
{
"resource": ""
}
|
q13476
|
StepsRunner.run_steps_from_string
|
train
|
def run_steps_from_string(self, spec, language_name='en'):
""" Called from within step definitions to run other steps. """
caller = inspect.currentframe().f_back
line = caller.f_lineno - 1
fname = caller.f_code.co_filename
steps = parse_steps(spec, fname, line, load_language(language_name))
for s in steps:
self.run_step(s)
|
python
|
{
"resource": ""
}
|
q13477
|
simulate_async_event
|
train
|
def simulate_async_event():
"""Simulate an asynchronous event."""
scc.state = 'executing'
def async_event(result):
"""All other asynchronous events or function calls
returned from later steps will wait until this
callback fires."""
scc.state = result
return 'some event result'
deferred = Deferred()
reactor.callLater(1, deferred.callback, 'done') # pylint: disable=E1101
deferred.addCallback(async_event)
return deferred
|
python
|
{
"resource": ""
}
|
q13478
|
hook_decorator
|
train
|
def hook_decorator(cb_type):
""" Decorator to wrap hook definitions in. Registers hook. """
def decorator_wrapper(*tags_or_func):
if len(tags_or_func) == 1 and callable(tags_or_func[0]):
# No tags were passed to this decorator
func = tags_or_func[0]
return HookImpl(cb_type, func)
else:
# We got some tags, so we need to produce the real decorator
tags = tags_or_func
def d(func):
return HookImpl(cb_type, func, tags)
return d
return decorator_wrapper
|
python
|
{
"resource": ""
}
|
q13479
|
StepImplLoader.load_steps_impl
|
train
|
def load_steps_impl(self, registry, path, module_names=None):
"""
Load the step implementations at the given path, with the given module names. If
module_names is None then the module 'steps' is searched by default.
"""
if not module_names:
module_names = ['steps']
path = os.path.abspath(path)
for module_name in module_names:
mod = self.modules.get((path, module_name))
if mod is None:
#log.debug("Looking for step def module '%s' in %s" % (module_name, path))
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
try:
actual_module_name = os.path.basename(module_name)
complete_path = os.path.join(path, os.path.dirname(module_name))
info = imp.find_module(actual_module_name, [complete_path])
except ImportError:
#log.debug("Did not find step defs module '%s' in %s" % (module_name, path))
return
try:
# Modules have to be loaded with unique names or else problems arise
mod = imp.load_module("stepdefs_" + str(self.module_counter), *info)
except:
exc = sys.exc_info()
raise StepImplLoadException(exc)
self.module_counter += 1
self.modules[(path, module_name)] = mod
for item_name in dir(mod):
item = getattr(mod, item_name)
if isinstance(item, StepImpl):
registry.add_step(item.step_type, item)
elif isinstance(item, HookImpl):
registry.add_hook(item.cb_type, item)
elif isinstance(item, NamedTransformImpl):
registry.add_named_transform(item)
elif isinstance(item, TransformImpl):
registry.add_transform(item)
|
python
|
{
"resource": ""
}
|
q13480
|
StepImplRegistry.find_step_impl
|
train
|
def find_step_impl(self, step):
"""
Find the implementation of the step for the given match string. Returns the StepImpl object
corresponding to the implementation, and the arguments to the step implementation. If no
implementation is found, raises UndefinedStepImpl. If more than one implementation is
found, raises AmbiguousStepImpl.
Each of the arguments returned will have been transformed by the first matching transform
implementation.
"""
result = None
for si in self.steps[step.step_type]:
matches = si.match(step.match)
if matches:
if result:
raise AmbiguousStepImpl(step, result[0], si)
args = [self._apply_transforms(arg, si) for arg in matches.groups()]
result = si, args
if not result:
raise UndefinedStepImpl(step)
return result
|
python
|
{
"resource": ""
}
|
q13481
|
run_command
|
train
|
def run_command(cmd):
"""
Open a child process, and return its exit status and stdout.
"""
child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = [s.decode("utf-8").strip() for s in child.stdout]
err = [s.decode("utf-8").strip() for s in child.stderr]
w = child.wait()
return os.WEXITSTATUS(w), out, err
|
python
|
{
"resource": ""
}
|
q13482
|
make_filter_list
|
train
|
def make_filter_list(filters):
"""Transform filters into list of table rows."""
filter_list = []
filter_ids = []
for f in filters:
filter_ids.append(f.index)
fullname = URL_P.sub(r'`<\1>`_', f.fullname)
filter_list.append((str(f.index + 1),
f.name,
"{0:.2f}".format(f.msun_vega),
"{0:.2f}".format(f.msun_ab),
"{0:.1f}".format(f.lambda_eff),
fullname))
sortf = lambda item: int(item[0])
filter_list.sort(key=sortf)
return filter_list
|
python
|
{
"resource": ""
}
|
q13483
|
extract_version
|
train
|
def extract_version():
"""Extract the version from the package."""
with open('pdftools/__init__.py', 'r') as f:
content = f.read()
version_match = _version_re.search(content)
version = str(ast.literal_eval(version_match.group(1)))
return version
|
python
|
{
"resource": ""
}
|
q13484
|
add_pypiper_args
|
train
|
def add_pypiper_args(parser, groups=("pypiper", ), args=None,
required=None, all_args=False):
"""
Use this to add standardized pypiper arguments to your python pipeline.
There are two ways to use `add_pypiper_args`: by specifying argument groups,
or by specifying individual arguments. Specifying argument groups will add
multiple arguments to your parser; these convenient argument groupings
make it easy to add arguments to certain types of pipeline. For example,
to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`.
:param argparse.ArgumentParser parser: ArgumentParser object from a pipeline
:param str | Iterable[str] groups: Adds arguments belong to specified group
of args. Options: pypiper, config, looper, resources, common, ngs, all.
:param str | Iterable[str] args: You may specify a list of specific arguments one by one.
:param Iterable[str] required: Arguments to be flagged as 'required' by argparse.
:param bool all_args: Whether to include all of pypiper's arguments defined here.
:return argparse.ArgumentParser: A new ArgumentParser object, with selected
pypiper arguments added
"""
args_to_add = _determine_args(
argument_groups=groups, arguments=args, use_all_args=all_args)
parser = _add_args(parser, args_to_add, required)
return parser
|
python
|
{
"resource": ""
}
|
q13485
|
build_command
|
train
|
def build_command(chunks):
"""
Create a command from various parts.
The parts provided may include a base, flags, option-bound arguments, and
positional arguments. Each element must be either a string or a two-tuple.
Raw strings are interpreted as either the command base, a pre-joined
pair (or multiple pairs) of option and argument, a series of positional
arguments, or a combination of those elements. The only modification they
undergo is trimming of any space characters from each end.
:param Iterable[str | (str, str | NoneType)] chunks: the collection of the
command components to interpret, modify, and join to create a
single meaningful command
:return str: the single meaningful command built from the given components
:raise ValueError: if no command parts are provided
"""
if not chunks:
raise ValueError(
"No command parts: {} ({})".format(chunks, type(chunks)))
if isinstance(chunks, str):
return chunks
parsed_pieces = []
for cmd_part in chunks:
if cmd_part is None:
continue
try:
# Trim just space, not all whitespace.
# This prevents damage to an option that specifies,
# say, tab as a delimiter.
parsed_pieces.append(cmd_part.strip(" "))
except AttributeError:
option, argument = cmd_part
if argument is None or argument == "":
continue
option, argument = option.strip(" "), str(argument).strip(" ")
parsed_pieces.append("{} {}".format(option, argument))
return " ".join(parsed_pieces)
|
python
|
{
"resource": ""
}
|
q13486
|
build_sample_paths
|
train
|
def build_sample_paths(sample):
"""
Ensure existence of folders for a Sample.
:param looper.models.Sample sample: Sample (or instance supporting get()
that stores folders paths in a 'paths' key, in which the value is a
mapping from path name to actual folder path)
"""
for path_name, path in sample.paths.items():
print("{}: '{}'".format(path_name, path))
base, ext = os.path.splitext(path)
if ext:
print("Skipping file-like: '[}'".format(path))
elif not os.path.isdir(base):
os.makedirs(base)
|
python
|
{
"resource": ""
}
|
q13487
|
checkpoint_filename
|
train
|
def checkpoint_filename(checkpoint, pipeline_name=None):
"""
Translate a checkpoint to a filename.
This not only adds the checkpoint file extension but also standardizes the
way in which checkpoint names are mapped to filenames.
:param str | pypiper.Stage checkpoint: name of a pipeline phase/stage
:param str pipeline_name: name of pipeline to prepend to the checkpoint
filename; this differentiates checkpoint files, e.g. within the
same sample output folder but associated with different pipelines,
in case of the (somewhat probable) scenario of a stage name
collision between pipelines the processed the same sample and
wrote to the same output folder
:return str | NoneType: standardized checkpoint name for file, plus
extension; null if the input is a Stage that's designated as a
non-checkpoint
"""
# Allow Stage as type for checkpoint parameter's argument without
# needing to import here the Stage type from stage.py module.
try:
base = checkpoint.checkpoint_name
except AttributeError:
base = translate_stage_name(checkpoint)
if pipeline_name:
base = "{}{}{}".format(
pipeline_name, PIPELINE_CHECKPOINT_DELIMITER, base)
return base + CHECKPOINT_EXTENSION
|
python
|
{
"resource": ""
}
|
q13488
|
checkpoint_filepath
|
train
|
def checkpoint_filepath(checkpoint, pm):
"""
Create filepath for indicated checkpoint.
:param str | pypiper.Stage checkpoint: Pipeline phase/stage or one's name
:param pypiper.PipelineManager | pypiper.Pipeline pm: manager of a pipeline
instance, relevant for output folder path.
:return str: standardized checkpoint name for file, plus extension
:raise ValueError: if the checkpoint is given as absolute path that does
not point within pipeline output folder
"""
# Handle case in which checkpoint is given not just as a string, but
# as a checkpoint-like filename. Don't worry about absolute path status
# of a potential filename input, or whether it's in the pipeline's
# output folder. That's handled upstream. While this isn't a protected
# function, there's no real reason to call this from outside the package.
if isinstance(checkpoint, str):
if os.path.isabs(checkpoint):
if is_in_file_tree(checkpoint, pm.outfolder):
return checkpoint
else:
raise ValueError(
"Absolute checkpoint path '{}' is not in pipeline output "
"folder '{}'".format(checkpoint, pm.outfolder))
_, ext = os.path.splitext(checkpoint)
if ext == CHECKPOINT_EXTENSION:
return pipeline_filepath(pm, filename=checkpoint)
# Allow Pipeline as pm type without importing Pipeline.
try:
pm = pm.manager
except AttributeError:
pass
# We want the checkpoint filename itself to become a suffix, with a
# delimiter intervening between the pipeline name and the checkpoint
# name + extension. This is to handle the case in which a single, e.g.,
# sample's output folder is the destination for output from multiple
# pipelines, and we thus want to be able to distinguish between
# checkpoint files from different pipelines for that sample that may
# well define one or more stages with the same name (e.g., trim_reads,
# align_reads, etc.)
chkpt_name = checkpoint_filename(checkpoint, pipeline_name=pm.name)
return pipeline_filepath(pm, filename=chkpt_name)
|
python
|
{
"resource": ""
}
|
q13489
|
check_shell_redirection
|
train
|
def check_shell_redirection(cmd):
"""
Determine whether a command appears to contain shell redirection symbol outside of curly brackets
:param str cmd: Command to investigate.
:return bool: Whether the command appears to contain shell redirection.
"""
curly_brackets = True
while curly_brackets:
SRE_match_obj = re.search(r'\{(.*?)}',cmd)
if SRE_match_obj is not None:
cmd = cmd[:SRE_match_obj.start()] + cmd[(SRE_match_obj.end()+1):]
if re.search(r'\{(.*?)}',cmd) is None:
curly_brackets = False
else:
curly_brackets = False
return ">" in cmd
|
python
|
{
"resource": ""
}
|
q13490
|
get_proc_name
|
train
|
def get_proc_name(cmd):
"""
Get the representative process name from complex command
:param str | list[str] cmd: a command to be processed
:return str: the basename representative command
"""
if isinstance(cmd, Iterable) and not isinstance(cmd, str):
cmd = " ".join(cmd)
return cmd.split()[0].replace('(', '').replace(')', '')
|
python
|
{
"resource": ""
}
|
q13491
|
get_first_value
|
train
|
def get_first_value(param, param_pools, on_missing=None, error=True):
"""
Get the value for a particular parameter from the first pool in the provided
priority list of parameter pools.
:param str param: Name of parameter for which to determine/fetch value.
:param Sequence[Mapping[str, object]] param_pools: Ordered (priority)
collection of mapping from parameter name to value; this should be
ordered according to descending priority.
:param object | function(str) -> object on_missing: default value or
action to take if the requested parameter is missing from all of the
pools. If a callable, it should return a value when passed the
requested parameter as the one and only argument.
:param bool error: Whether to raise an error if the requested parameter
is not mapped to a value AND there's no value or strategy provided
with 'on_missing' with which to handle the case of a request for an
unmapped parameter.
:return object: Value to which the requested parameter first mapped in
the (descending) priority collection of parameter 'pools,' or
a value explicitly defined or derived with 'on_missing.'
:raise KeyError: If the requested parameter is unmapped in all of the
provided pools, and the argument to the 'error' parameter evaluates
to True.
"""
# Search for the requested parameter.
for pool in param_pools:
if param in pool:
return pool[param]
# Raise error if unfound and no strategy or value is provided or handling
# unmapped parameter requests.
if error and on_missing is None:
raise KeyError("Unmapped parameter: '{}'".format(param))
# Use the value or strategy for handling unmapped parameter case.
try:
return on_missing(param)
except TypeError:
if hasattr(on_missing, "__call__"):
raise TypeError(
"Any callable passed as the action to take when a requested "
"parameter is missing should accept that parameter and return "
"a value.")
return on_missing
|
python
|
{
"resource": ""
}
|
q13492
|
is_in_file_tree
|
train
|
def is_in_file_tree(fpath, folder):
"""
Determine whether a file is in a folder.
:param str fpath: filepath to investigate
:param folder: path to folder to query
:return bool: whether the path indicated is in the folder indicated
"""
file_folder, _ = os.path.split(fpath)
other_folder = os.path.join(folder, "")
return other_folder.startswith(file_folder)
|
python
|
{
"resource": ""
}
|
q13493
|
is_gzipped_fastq
|
train
|
def is_gzipped_fastq(file_name):
"""
Determine whether indicated file appears to be a gzipped FASTQ.
:param str file_name: Name/path of file to check as gzipped FASTQ.
:return bool: Whether indicated file appears to be in gzipped FASTQ format.
"""
_, ext = os.path.splitext(file_name)
return file_name.endswith(".fastq.gz") or file_name.endswith(".fq.gz")
|
python
|
{
"resource": ""
}
|
q13494
|
make_lock_name
|
train
|
def make_lock_name(original_path, path_base_folder):
"""
Create name for lock file from an absolute path.
The original path must be absolute, and it should point to a location
within the location indicated by the base folder path provided. This is
particularly useful for deleting a sample's output folder path from
within the path of a target file to generate a lock file corresponding
to the original target.
:param str original_path: Full original filepath.
:param str path_base_folder: Portion of original path to delete
:return str: Name or perhaps relative (to the base folder path indicated)
path to lock file
"""
def make_name(p):
return p.replace(path_base_folder, "").replace(os.sep, "__")
if isinstance(original_path, str):
return make_name(original_path)
elif isinstance(original_path, Sequence):
return [make_name(p) for p in original_path]
raise TypeError("Neither string nor other sequence type: {} ({})".
format(original_path, type(original_path)))
|
python
|
{
"resource": ""
}
|
q13495
|
is_multi_target
|
train
|
def is_multi_target(target):
"""
Determine if pipeline manager's run target is multiple.
:param None or str or Sequence of str target: 0, 1, or multiple targets
:return bool: Whether there are multiple targets
:raise TypeError: if the argument is neither None nor string nor Sequence
"""
if target is None or isinstance(target, str):
return False
elif isinstance(target, Sequence):
return len(target) > 1
else:
raise TypeError("Could not interpret argument as a target: {} ({})".
format(target, type(target)))
|
python
|
{
"resource": ""
}
|
q13496
|
parse_cores
|
train
|
def parse_cores(cores, pm, default):
"""
Framework to finalize number of cores for an operation.
Some calls to a function may directly provide a desired number of cores,
others may not. Similarly, some pipeline managers may define a cores count
while others will not. This utility provides a single via which the
count of cores to use for an operation may be determined. If a cores
count is given explicitly, use that. Then try pipeline manager for cores.
Finally, fall back to a default. Force default to be defined (this
function is intended to be partially applied, then reused within a
module, class, etc. to standardize the way in which this value is
determined within a scope.)
:param int | str cores: direct specification of cores count
:param pypiper.PipelineManager pm: pipeline manager perhaps defining cores
:param int | str default: default number of cores, used if a value isn't
directly given and the pipeline manager doesn't define core count.
:return int: number of cores
"""
cores = cores or getattr(pm, "cores", default)
return int(cores)
|
python
|
{
"resource": ""
}
|
q13497
|
parse_stage_name
|
train
|
def parse_stage_name(stage):
"""
Determine the name of a stage.
The stage may be provided already as a name, as a Stage object, or as a
callable with __name__ (e.g., function).
:param str | pypiper.Stage | function stage: Object representing a stage,
from which to obtain name.
:return str: Name of putative pipeline Stage.
"""
if isinstance(stage, str):
return stage
try:
return stage.name
except AttributeError:
try:
return stage.__name__
except AttributeError:
raise TypeError("Unsupported stage type: {}".format(type(stage)))
|
python
|
{
"resource": ""
}
|
q13498
|
pipeline_filepath
|
train
|
def pipeline_filepath(pm, filename=None, suffix=None):
"""
Derive path to file for managed pipeline.
:param pypiper.PipelineManager | pypiper.Pipeline pm: Manager of a
particular pipeline instance.
:param str filename: Name of file for which to create full path based
on pipeline's output folder.
:param str suffix: Suffix for the file; this can be added to the filename
if provided or added to the pipeline name if there's no filename.
:raises TypeError: If neither filename nor suffix is provided, raise a
TypeError, as in that case there's no substance from which to create
a filepath.
:return str: Path to file within managed pipeline's output folder, with
filename as given or determined by the pipeline name, and suffix
appended if given.
"""
if filename is None and suffix is None:
raise TypeError("Provide filename and/or suffix to create "
"path to a pipeline file.")
filename = (filename or pm.name) + (suffix or "")
# Note that Pipeline and PipelineManager define the same outfolder.
# In fact, a Pipeline just references its manager's outfolder.
# So we can handle argument of either type to pm parameter.
return filename if os.path.isabs(filename) \
else os.path.join(pm.outfolder, filename)
|
python
|
{
"resource": ""
}
|
q13499
|
NGSTk.bam_to_fastq_bedtools
|
train
|
def bam_to_fastq_bedtools(self, bam_file, out_fastq_pre, paired_end):
"""
Converts bam to fastq; A version using bedtools
"""
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
fq1 = out_fastq_pre + "_R1.fastq"
fq2 = None
cmd = self.tools.bedtools + " bamtofastq -i " + bam_file + " -fq " + fq1 + ".fastq"
if paired_end:
fq2 = out_fastq_pre + "_R2.fastq"
cmd += " -fq2 " + fq2
return cmd, fq1, fq2
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.