_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q39300 | main | train | def main():
"""Main entry-point for oz's cli"""
# Hack to make user code available for import
sys.path.append(".")
# Run the specified action
oz.initialize()
retr = optfn.run(list(oz._actions.values()))
if retr == optfn.ERROR_RETURN_CODE:
sys.exit(-1)
elif retr == None:
sys.exit(0)
elif isinstance(retr, int):
sys.exit(retr)
else:
raise Exception("Unexpected return value from action: %s" % retr) | python | {
"resource": ""
} |
q39301 | build_tree_from_distance_matrix | train | def build_tree_from_distance_matrix(matrix, best_tree=False, params={},\
working_dir='/tmp'):
"""Returns a tree from a distance matrix.
matrix: a square Dict2D object (cogent.util.dict2d)
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Clearcut app controller.
The result will be an cogent.core.tree.PhyloNode object, or None if tree
fails.
"""
params['--out'] = get_tmp_filename(working_dir)
# Create instance of app controller, enable tree, disable alignment
app = Clearcut(InputHandler='_input_as_multiline_string', params=params, \
WorkingDir=working_dir, SuppressStdout=True,\
SuppressStderr=True)
#Turn off input as alignment
app.Parameters['-a'].off()
#Input is a distance matrix
app.Parameters['-d'].on()
if best_tree:
app.Parameters['-N'].on()
# Turn the dict2d object into the expected input format
matrix_input, int_keys = _matrix_input_from_dict2d(matrix)
# Collect result
result = app(matrix_input)
# Build tree
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
# reassign to original names
for node in tree.tips():
node.Name = int_keys[node.Name]
# Clean up
result.cleanUp()
del(app, result, params)
return tree | python | {
"resource": ""
} |
q39302 | _matrix_input_from_dict2d | train | def _matrix_input_from_dict2d(matrix):
"""makes input for running clearcut on a matrix from a dict2D object"""
#clearcut truncates names to 10 char- need to rename before and
#reassign after
#make a dict of env_index:full name
int_keys = dict([('env_' + str(i), k) for i,k in \
enumerate(sorted(matrix.keys()))])
#invert the dict
int_map = {}
for i in int_keys:
int_map[int_keys[i]] = i
#make a new dict2D object with the integer keys mapped to values instead of
#the original names
new_dists = []
for env1 in matrix:
for env2 in matrix[env1]:
new_dists.append((int_map[env1], int_map[env2], matrix[env1][env2]))
int_map_dists = Dict2D(new_dists)
#names will be fed into the phylipTable function - it is the int map names
names = sorted(int_map_dists.keys())
rows = []
#populated rows with values based on the order of names
#the following code will work for a square matrix only
for index, key1 in enumerate(names):
row = []
for key2 in names:
row.append(str(int_map_dists[key1][key2]))
rows.append(row)
input_matrix = phylipMatrix(rows, names)
#input needs a trailing whitespace or it will fail!
input_matrix += '\n'
return input_matrix, int_keys | python | {
"resource": ""
} |
q39303 | ManifestDownloader._close | train | def _close(self):
'''
Closes aiohttp session and all open file descriptors
'''
if hasattr(self, 'aiohttp'):
if not self.aiohttp.closed:
self.aiohttp.close()
if hasattr(self, 'file_descriptors'):
for fd in self.file_descriptors.values():
if not fd.closed:
fd.close() | python | {
"resource": ""
} |
q39304 | _unbytes | train | def _unbytes(bytestr):
"""
Returns a bytestring from the human-friendly string returned by `_bytes`.
>>> _unbytes('123456')
'\x12\x34\x56'
"""
return ''.join(chr(int(bytestr[k:k + 2], 16))
for k in range(0, len(bytestr), 2)) | python | {
"resource": ""
} |
q39305 | _blocking | train | def _blocking(lock, state_dict, event, timeout=None):
"""
A contextmanager that clears `state_dict` and `event`, yields, and waits
for the event to be set. Clearing an yielding are done within `lock`.
Used for blocking request/response semantics on the request side, as in:
with _blocking(lock, state, event):
send_request()
The response side would then do something like:
with lock:
state['data'] = '...'
event.set()
"""
with lock:
state_dict.clear()
event.clear()
yield
event.wait(timeout) | python | {
"resource": ""
} |
q39306 | Callbacks.register | train | def register(self, event, fn):
"""
Tell the object to run `fn` whenever a message of type `event` is
received.
"""
self._callbacks.setdefault(event, []).append(fn)
return fn | python | {
"resource": ""
} |
q39307 | Callbacks.put | train | def put(self, event, *args, **kwargs):
"""
Schedule a callback for `event`, passing `args` and `kwargs` to each
registered callback handler.
"""
self._queue.put((event, args, kwargs)) | python | {
"resource": ""
} |
q39308 | Lifx._on_gateway | train | def _on_gateway(self, header, payload, rest, addr):
"""
Records a discovered gateway, for connecting to later.
"""
if payload.get('service') == SERVICE_UDP:
self.gateway = Gateway(addr[0], payload['port'], header.gateway)
self.gateway_found_event.set() | python | {
"resource": ""
} |
q39309 | Lifx._on_light_state | train | def _on_light_state(self, header, payload, rest, addr):
"""
Records the light state of bulbs, and forwards to a high-level callback
with human-friendlier arguments.
"""
with self.lock:
label = payload['label'].strip('\x00')
self.bulbs[header.mac] = bulb = Bulb(label, header.mac)
if len(self.bulbs) >= self.num_bulbs:
self.bulbs_found_event.set()
self.light_state[header.mac] = payload
if len(self.light_state) >= self.num_bulbs:
self.light_state_event.set()
self.callbacks.put(EVENT_LIGHT_STATE, bulb,
raw=payload,
hue=(payload['hue'] / float(0xffff) * 360) % 360.0,
saturation=payload['sat'] / float(0xffff),
brightness=payload['bright'] / float(0xffff),
kelvin=payload['kelvin'],
is_on=bool(payload['power'])) | python | {
"resource": ""
} |
q39310 | Lifx.send | train | def send(self, packet_type, bulb, packet_fmt, *packet_args):
"""
Builds and sends a packet to one or more bulbs.
"""
packet = build_packet(packet_type, self.gateway.mac, bulb,
packet_fmt, *packet_args)
self.logger('>> %s', _bytes(packet))
self.sender.put(packet) | python | {
"resource": ""
} |
q39311 | Lifx.set_power_state | train | def set_power_state(self, is_on, bulb=ALL_BULBS, timeout=None):
"""
Sets the power state of one or more bulbs.
"""
with _blocking(self.lock, self.power_state, self.light_state_event,
timeout):
self.send(REQ_SET_POWER_STATE,
bulb, '2s', '\x00\x01' if is_on else '\x00\x00')
self.send(REQ_GET_LIGHT_STATE, ALL_BULBS, '')
return self.power_state | python | {
"resource": ""
} |
q39312 | Lifx.set_light_state | train | def set_light_state(self, hue, saturation, brightness, kelvin,
bulb=ALL_BULBS, timeout=None):
"""
Sets the light state of one or more bulbs.
Hue is a float from 0 to 360, saturation and brightness are floats from
0 to 1, and kelvin is an integer.
"""
raw_hue = int((hue % 360) / 360.0 * 0xffff) & 0xffff
raw_sat = int(saturation * 0xffff) & 0xffff
raw_bright = int(brightness * 0xffff) & 0xffff
return self.set_light_state_raw(raw_hue, raw_sat, raw_bright, kelvin,
bulb, timeout) | python | {
"resource": ""
} |
q39313 | Lifx.on_packet | train | def on_packet(self, packet_type):
"""
Registers a function to be called when packet data is received with a
specific type.
"""
def _wrapper(fn):
return self.callbacks.register(packet_type, fn)
return _wrapper | python | {
"resource": ""
} |
q39314 | Lifx.connect | train | def connect(self, attempts=20, delay=0.5):
"""
Connects to a gateway, blocking until a connection is made and bulbs
are found.
Step 1: send a gateway discovery packet to the broadcast address, wait
until we've received some info about the gateway.
Step 2: connect to a discovered gateway, wait until the connection has
been completed.
Step 3: ask for info about bulbs, wait until we've found the number of
bulbs we expect.
Raises a ConnectException if any of the steps fail.
"""
# Broadcast discovery packets until we find a gateway.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
with closing(sock):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
discover_packet = build_packet(REQ_GATEWAY,
ALL_BULBS, ALL_BULBS, '',
protocol=DISCOVERY_PROTOCOL)
for _, ok in _retry(self.gateway_found_event, attempts, delay):
sock.sendto(discover_packet, BROADCAST_ADDRESS)
if not ok:
raise ConnectException('discovery failed')
self.callbacks.put(EVENT_DISCOVERED)
# Tell the sender to connect to the gateway until it does.
for _, ok in _retry(self.sender.is_connected, 1, 3):
self.sender.put(self.gateway)
if not ok:
raise ConnectException('connection failed')
self.callbacks.put(EVENT_CONNECTED)
# Send light state packets to the gateway until we find bulbs.
for _, ok in _retry(self.bulbs_found_event, attempts, delay):
self.send(REQ_GET_LIGHT_STATE, ALL_BULBS, '')
if not ok:
raise ConnectException('only found %d of %d bulbs' % (
len(self.bulbs), self.num_bulbs))
self.callbacks.put(EVENT_BULBS_FOUND) | python | {
"resource": ""
} |
q39315 | Lifx.run | train | def run(self):
"""
A context manager starting up threads to send and receive data from a
gateway and handle callbacks. Yields when a connection has been made,
and cleans up connections and threads when it's done.
"""
listener_thr = _spawn(self.receiver.run)
callback_thr = _spawn(self.callbacks.run)
sender_thr = _spawn(self.sender.run)
logger_thr = _spawn(self.logger.run)
self.connect()
try:
yield
finally:
self.stop()
# Wait for the listener to finish.
listener_thr.join()
self.callbacks.put('shutdown')
# Tell the other threads to finish, and wait for them.
for obj in [self.callbacks, self.sender, self.logger]:
obj.stop()
for thr in [callback_thr, sender_thr, logger_thr]:
thr.join() | python | {
"resource": ""
} |
q39316 | BaseWorker.run_multiconvert | train | async def run_multiconvert(self, url_string, to_type):
'''
Enqueues in succession all conversions steps necessary to take the
given URL and convert it to to_type, storing the result in the cache
'''
async def enq_convert(*args):
await self.enqueue(Task.CONVERT, args)
await tasks.multiconvert(url_string, to_type, enq_convert) | python | {
"resource": ""
} |
q39317 | mothur_classify_file | train | def mothur_classify_file(
query_file, ref_fp, tax_fp, cutoff=None, iters=None, ksize=None,
output_fp=None, tmp_dir=None):
"""Classify a set of sequences using Mothur's naive bayes method
Dashes are used in Mothur to provide multiple filenames. A
filepath with a dash typically breaks an otherwise valid command
in Mothur. This wrapper script makes a copy of both files, ref_fp
and tax_fp, to ensure that the path has no dashes.
For convenience, we also ensure that each taxon list in the
id-to-taxonomy file ends with a semicolon.
"""
if tmp_dir is None:
tmp_dir = gettempdir()
ref_seq_ids = set()
user_ref_file = open(ref_fp)
tmp_ref_file = NamedTemporaryFile(dir=tmp_dir, suffix=".ref.fa")
for seq_id, seq in parse_fasta(user_ref_file):
id_token = seq_id.split()[0]
ref_seq_ids.add(id_token)
tmp_ref_file.write(">%s\n%s\n" % (seq_id, seq))
tmp_ref_file.seek(0)
user_tax_file = open(tax_fp)
tmp_tax_file = NamedTemporaryFile(dir=tmp_dir, suffix=".tax.txt")
for line in user_tax_file:
line = line.rstrip()
if not line:
continue
# MOTHUR is particular that each assignment end with a semicolon.
if not line.endswith(";"):
line = line + ";"
id_token, _, _ = line.partition("\t")
if id_token in ref_seq_ids:
tmp_tax_file.write(line)
tmp_tax_file.write("\n")
tmp_tax_file.seek(0)
params = {"reference": tmp_ref_file.name, "taxonomy": tmp_tax_file.name}
if cutoff is not None:
params["cutoff"] = cutoff
if ksize is not None:
params["ksize"] = ksize
if iters is not None:
params["iters"] = iters
# Create a temporary working directory to accommodate mothur's output
# files, which are generated automatically based on the input
# file.
work_dir = mkdtemp(dir=tmp_dir)
app = MothurClassifySeqs(
params, InputHandler='_input_as_lines', WorkingDir=work_dir,
TmpDir=tmp_dir)
result = app(query_file)
# Force evaluation so we can safely clean up files
assignments = list(parse_mothur_assignments(result['assignments']))
result.cleanUp()
rmtree(work_dir)
if output_fp is not None:
f = open(output_fp, "w")
for query_id, taxa, conf in assignments:
taxa_str = ";".join(taxa)
f.write("%s\t%s\t%.2f\n" % (query_id, taxa_str, conf))
f.close()
return None
return dict((a, (b, c)) for a, b, c in assignments) | python | {
"resource": ""
} |
q39318 | Mothur._derive_log_path | train | def _derive_log_path(self):
"""Guess logfile path produced by Mothur
This method checks the working directory for log files
generated by Mothur. It will raise an ApplicationError if no
log file can be found.
Mothur generates log files named in a nondeterministic way,
using the current time. We return the log file with the most
recent time, although this may lead to incorrect log file
detection if you are running many instances of mothur
simultaneously.
"""
filenames = listdir(self.WorkingDir)
lognames = [
x for x in filenames if re.match(
"^mothur\.\d+\.logfile$",
x)]
if not lognames:
raise ApplicationError(
'No log file detected in directory %s. Contents: \n\t%s' % (
input_dir, '\n\t'.join(possible_logfiles)))
most_recent_logname = sorted(lognames, reverse=True)[0]
return path.join(self.WorkingDir, most_recent_logname) | python | {
"resource": ""
} |
q39319 | Mothur._derive_unique_path | train | def _derive_unique_path(self):
"""Guess unique sequences path produced by Mothur"""
base, ext = path.splitext(self._input_filename)
return '%s.unique%s' % (base, ext) | python | {
"resource": ""
} |
q39320 | Mothur.__get_method_abbrev | train | def __get_method_abbrev(self):
"""Abbreviated form of clustering method parameter.
Used to guess output filenames for MOTHUR.
"""
abbrevs = {
'furthest': 'fn',
'nearest': 'nn',
'average': 'an',
}
if self.Parameters['method'].isOn():
method = self.Parameters['method'].Value
else:
method = self.Parameters['method'].Default
return abbrevs[method] | python | {
"resource": ""
} |
q39321 | Mothur._derive_list_path | train | def _derive_list_path(self):
"""Guess otu list file path produced by Mothur"""
base, ext = path.splitext(self._input_filename)
return '%s.unique.%s.list' % (base, self.__get_method_abbrev()) | python | {
"resource": ""
} |
q39322 | Mothur._derive_rank_abundance_path | train | def _derive_rank_abundance_path(self):
"""Guess rank abundance file path produced by Mothur"""
base, ext = path.splitext(self._input_filename)
return '%s.unique.%s.rabund' % (base, self.__get_method_abbrev()) | python | {
"resource": ""
} |
q39323 | Mothur._derive_species_abundance_path | train | def _derive_species_abundance_path(self):
"""Guess species abundance file path produced by Mothur"""
base, ext = path.splitext(self._input_filename)
return '%s.unique.%s.sabund' % (base, self.__get_method_abbrev()) | python | {
"resource": ""
} |
q39324 | Mothur.getTmpFilename | train | def getTmpFilename(self, tmp_dir=None, prefix='tmp', suffix='.txt'):
"""Returns a temporary filename
Similar interface to tempfile.mktmp()
"""
# Override to change default constructor to str(). FilePath
# objects muck up the Mothur script.
return super(Mothur, self).getTmpFilename(
tmp_dir=tmp_dir, prefix=prefix, suffix=suffix,
result_constructor=str) | python | {
"resource": ""
} |
q39325 | Mothur._input_as_multiline_string | train | def _input_as_multiline_string(self, data):
"""Write multiline string to temp file, return filename
data: a multiline string to be written to a file.
"""
self._input_filename = self.getTmpFilename(
self.WorkingDir, suffix='.fasta')
with open(self._input_filename, 'w') as f:
f.write(data)
return self._input_filename | python | {
"resource": ""
} |
q39326 | Mothur._input_as_lines | train | def _input_as_lines(self, data):
"""Write sequence of lines to temp file, return filename
data: a sequence to be written to a file, each element of the
sequence will compose a line in the file
* Note: '\n' will be stripped off the end of each sequence
element before writing to a file in order to avoid
multiple new lines accidentally be written to a file
"""
self._input_filename = self.getTmpFilename(
self.WorkingDir, suffix='.fasta')
with open(self._input_filename, 'w') as f:
# Use lazy iteration instead of list comprehension to
# prevent reading entire file into memory
for line in data:
f.write(str(line).strip('\n'))
f.write('\n')
return self._input_filename | python | {
"resource": ""
} |
q39327 | Mothur._input_as_path | train | def _input_as_path(self, data):
"""Copys the provided file to WorkingDir and returns the new filename
data: path or filename
"""
self._input_filename = self.getTmpFilename(
self.WorkingDir, suffix='.fasta')
copyfile(data, self._input_filename)
return self._input_filename | python | {
"resource": ""
} |
q39328 | Mothur._set_WorkingDir | train | def _set_WorkingDir(self, path):
"""Sets the working directory
"""
self._curr_working_dir = path
try:
mkdir(self.WorkingDir)
except OSError:
# Directory already exists
pass | python | {
"resource": ""
} |
q39329 | MothurClassifySeqs._format_function_arguments | train | def _format_function_arguments(self, opts):
"""Format a series of function arguments in a Mothur script."""
params = [self.Parameters[x] for x in opts]
return ', '.join(filter(None, map(str, params))) | python | {
"resource": ""
} |
q39330 | Alloy._add_parameter | train | def _add_parameter(self, parameter):
'''
Force adds a `Parameter` object to the instance.
'''
if isinstance(parameter, MethodParameter):
# create a bound instance of the MethodParameter
parameter = parameter.bind(alloy=self)
self._parameters[parameter.name] = parameter
for alias in parameter.aliases:
self._aliases[alias] = parameter | python | {
"resource": ""
} |
q39331 | Alloy.get_parameter | train | def get_parameter(self, name, default=None):
'''
Returns the named parameter if present, or the value of `default`,
otherwise.
'''
if hasattr(self, name):
item = getattr(self, name)
if isinstance(item, Parameter):
return item
return default | python | {
"resource": ""
} |
q39332 | Base.set_log_level | train | def set_log_level(self, level: str) -> None:
"""Override the default log level of the class."""
if level == 'info':
to_set = logging.INFO
if level == 'debug':
to_set = logging.DEBUG
if level == 'error':
to_set = logging.ERROR
self.log.setLevel(to_set) | python | {
"resource": ""
} |
q39333 | Base._request_bulk | train | def _request_bulk(self, urls: List[str]) -> List:
"""Batch the requests going out."""
if not urls:
raise Exception("No results were found")
session: FuturesSession = FuturesSession(max_workers=len(urls))
self.log.info("Bulk requesting: %d" % len(urls))
futures = [session.get(u, headers=gen_headers(), timeout=3) for u in urls]
done, incomplete = wait(futures)
results: List = list()
for response in done:
try:
results.append(response.result())
except Exception as err:
self.log.warn("Failed result: %s" % err)
return results | python | {
"resource": ""
} |
q39334 | MethodParameter.bind | train | def bind(self, alloy):
'''
Shallow copies this MethodParameter, and binds it to an alloy.
This is required before calling.
'''
param = MethodParameter(self.name, self.method, self.dependencies,
self.units, self.aliases, self._references)
param.alloy = alloy
return param | python | {
"resource": ""
} |
q39335 | PlugIt.doQuery | train | def doQuery(self, url, method='GET', getParmeters=None, postParameters=None, files=None, extraHeaders={}, session={}):
"""Send a request to the server and return the result"""
# Build headers
headers = {}
if not postParameters:
postParameters = {}
for key, value in extraHeaders.iteritems():
# Fixes #197 for values with utf-8 chars to be passed into plugit
if isinstance(value, basestring):
headers['X-Plugit-' + key] = value.encode('utf-8')
else:
headers['X-Plugit-' + key] = value
for key, value in session.iteritems():
headers['X-Plugitsession-' + key] = value
if 'Cookie' not in headers:
headers['Cookie'] = ''
headers['Cookie'] += key + '=' + str(value) + '; '
if method == 'POST':
if not files:
r = requests.post(self.baseURI + '/' + url, params=getParmeters, data=postParameters, stream=True, headers=headers)
else:
# Special way, for big files
# Requests is not usable: https://github.com/shazow/urllib3/issues/51
from poster.encode import multipart_encode, MultipartParam
from poster.streaminghttp import register_openers
import urllib2
import urllib
# Register the streaming http handlers with urllib2
register_openers()
# headers contains the necessary Content-Type and Content-Length
# datagen is a generator object that yields the encoded parameters
data = []
for x in postParameters:
if isinstance(postParameters[x], list):
for elem in postParameters[x]:
data.append((x, elem))
else:
data.append((x, postParameters[x]))
for f in files:
data.append((f, MultipartParam(f, fileobj=open(files[f].temporary_file_path(), 'rb'), filename=files[f].name)))
datagen, headers_multi = multipart_encode(data)
headers.update(headers_multi)
if getParmeters:
get_uri = '?' + urllib.urlencode(getParmeters)
else:
get_uri = ''
# Create the Request object
request = urllib2.Request(self.baseURI + '/' + url + get_uri, datagen, headers)
re = urllib2.urlopen(request)
from requests import Response
r = Response()
r.status_code = re.getcode()
r.headers = dict(re.info())
r.encoding = "application/json"
r.raw = re.read()
r._content = r.raw
return r
else:
# Call the function based on the method.
r = requests.request(method.upper(), self.baseURI + '/' + url, params=getParmeters, stream=True, headers=headers, allow_redirects=True)
return r | python | {
"resource": ""
} |
q39336 | PlugIt.ping | train | def ping(self):
"""Return true if the server successfully pinged"""
randomToken = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for x in range(32))
r = self.doQuery('ping?data=' + randomToken)
if r.status_code == 200: # Query ok ?
if r.json()['data'] == randomToken: # Token equal ?
return True
return False | python | {
"resource": ""
} |
q39337 | PlugIt.checkVersion | train | def checkVersion(self):
"""Check if the server use the same version of our protocol"""
r = self.doQuery('version')
if r.status_code == 200: # Query ok ?
data = r.json()
if data['result'] == 'Ok' and data['version'] == self.PI_API_VERSION and data['protocol'] == self.PI_API_NAME:
return True
return False | python | {
"resource": ""
} |
q39338 | PlugIt.newMail | train | def newMail(self, data, message):
"""Send a mail to a plugit server"""
r = self.doQuery('mail', method='POST', postParameters={'response_id': str(data), 'message': str(message)})
if r.status_code == 200: # Query ok ?
data = r.json()
return data['result'] == 'Ok'
return False | python | {
"resource": ""
} |
q39339 | PlugIt.getMedia | train | def getMedia(self, uri):
"""Return a tuple with a media and his content-type. Don't cache anything !"""
r = self.doQuery('media/' + uri)
if r.status_code == 200:
content_type = 'application/octet-stream'
if 'content-type' in r.headers:
content_type = r.headers['content-type']
cache_control = None
if 'cache-control' in r.headers:
cache_control = r.headers['cache-control']
return (r.content, content_type, cache_control)
else:
return (None, None, None) | python | {
"resource": ""
} |
q39340 | PlugIt.getMeta | train | def getMeta(self, uri):
"""Return meta information about an action. Cache the result as specified by the server"""
action = urlparse(uri).path
mediaKey = self.cacheKey + '_meta_' + action
mediaKey = mediaKey.replace(' ', '__')
meta = cache.get(mediaKey, None)
# Nothing found -> Retrieve it from the server and cache it
if not meta:
r = self.doQuery('meta/' + uri)
if r.status_code == 200: # Get the content if there is not problem. If there is, template will stay to None
meta = r.json()
if 'expire' not in r.headers:
expire = 5 * 60 # 5 minutes of cache if the server didn't specified anything
else:
expire = int((parser.parse(r.headers['expire']) - datetime.datetime.now(tzutc())).total_seconds()) # Use the server value for cache
if expire > 0: # Do the server want us to cache ?
cache.set(mediaKey, meta, expire)
return meta | python | {
"resource": ""
} |
q39341 | PlugIt.getTemplate | train | def getTemplate(self, uri, meta=None):
"""Return the template for an action. Cache the result. Can use an optional meta parameter with meta information"""
if not meta:
metaKey = self.cacheKey + '_templatesmeta_cache_' + uri
meta = cache.get(metaKey, None)
if not meta:
meta = self.getMeta(uri)
cache.set(metaKey, meta, 15)
if not meta: # No meta, can return a template
return None
# Let's find the template in the cache
action = urlparse(uri).path
templateKey = self.cacheKey + '_templates_' + action + '_' + meta['template_tag']
template = cache.get(templateKey, None)
# Nothing found -> Retrieve it from the server and cache it
if not template:
r = self.doQuery('template/' + uri)
if r.status_code == 200: # Get the content if there is not problem. If there is, template will stay to None
template = r.content
cache.set(templateKey, template, None) # None = Cache forever
return template | python | {
"resource": ""
} |
q39342 | component_offsetvectors | train | def component_offsetvectors(offsetvectors, n):
"""
Given an iterable of offset vectors, return the shortest list of
the unique n-instrument offset vectors from which all the vectors
in the input iterable can be constructed. This can be used to
determine the minimal set of n-instrument coincs required to
construct all of the coincs for all of the requested instrument and
offset combinations in a set of offset vectors.
It is assumed that the coincs for the vector {"H1": 0, "H2": 10,
"L1": 20} can be constructed from the coincs for the vectors {"H1":
0, "H2": 10} and {"H2": 0, "L1": 10}, that is only the relative
offsets are significant in determining if two events are
coincident, not the absolute offsets. This assumption is not true
for the standard inspiral pipeline, where the absolute offsets are
significant due to the periodic wrapping of triggers around rings.
"""
#
# collect unique instrument set / deltas combinations
#
delta_sets = {}
for vect in offsetvectors:
for instruments in iterutils.choices(sorted(vect), n):
# NOTE: the arithmetic used to construct the
# offsets *must* match the arithmetic used by
# offsetvector.deltas so that the results of the
# two can be compared to each other without worry
# of floating-point round off confusing things.
delta_sets.setdefault(instruments, set()).add(tuple(vect[instrument] - vect[instruments[0]] for instrument in instruments))
#
# translate into a list of normalized n-instrument offset vectors
#
return [offsetvector(zip(instruments, deltas)) for instruments, delta_set in delta_sets.items() for deltas in delta_set] | python | {
"resource": ""
} |
q39343 | offsetvector.normalize | train | def normalize(self, **kwargs):
"""
Adjust the offsetvector so that a particular instrument has
the desired offset. All other instruments have their
offsets adjusted so that the relative offsets are
preserved. The instrument to noramlize, and the offset one
wishes it to have, are provided as a key-word argument.
The return value is the time slide dictionary, which is
modified in place.
If more than one key-word argument is provided the keys are
sorted and considered in order until a key is found that is
in the offset vector. The offset vector is normalized to
that value. This function is a no-op if no key-word
argument is found that applies.
Example:
>>> a = offsetvector({"H1": -10, "H2": -10, "L1": -10})
>>> a.normalize(L1 = 0)
offsetvector({'H2': 0, 'H1': 0, 'L1': 0})
>>> a = offsetvector({"H1": -10, "H2": -10})
>>> a.normalize(L1 = 0, H2 = 5)
offsetvector({'H2': 5, 'H1': 5})
"""
# FIXME: should it be performed in place? if it should
# be, the should there be no return value?
for key, offset in sorted(kwargs.items()):
if key in self:
delta = offset - self[key]
for key in self.keys():
self[key] += delta
break
return self | python | {
"resource": ""
} |
q39344 | offsetvector.fromdeltas | train | def fromdeltas(cls, deltas):
"""
Construct an offsetvector from a dictionary of offset
deltas as returned by the .deltas attribute.
Example:
>>> x = offsetvector({"H1": 0, "L1": 10, "V1": 20})
>>> y = offsetvector.fromdeltas(x.deltas)
>>> y
offsetvector({'V1': 20, 'H1': 0, 'L1': 10})
>>> y == x
True
See also .deltas, .fromkeys()
"""
return cls((key, value) for (refkey, key), value in deltas.items()) | python | {
"resource": ""
} |
q39345 | processGif | train | def processGif(searchStr):
'''
This function returns the url of the gif searched for
with the given search parameters using the Giphy API.
Thanks!
Fails gracefully when it can't find a gif by returning an
appropriate image url with the failure message on it.
'''
# Sanitizing searchStr
# TODO: Find a better way to do this
searchStr.replace('| ', ' ')
searchStr.replace('|', ' ')
searchStr.replace(', ', ' ')
searchStr.replace(',', ' ')
searchStr.rstrip()
searchStr = searchStr.strip('./?\'!,')
searchStr = searchStr.replace(' ', '+')
if searchStr is None or searchStr == '':
print("No search parameters specified!")
return no_search_params
api_url = 'http://api.giphy.com/v1/gifs/search'
api_key = 'dc6zaTOxFJmzC'
payload = {
'q': searchStr,
'limit': 1,
'api_key': api_key,
}
r = requests.get(api_url, params=payload)
parsed_json = json.loads(r.text)
# print(parsed_json)
if len(parsed_json['data']) == 0:
print("Couldn't find suitable match for gif! :(")
return -1
else: # Success!
imgURL = parsed_json['data'][0]['images']['fixed_height']['url']
# print(imgURL)
return imgURL | python | {
"resource": ""
} |
q39346 | HTMLPurifier.__set_whitelist | train | def __set_whitelist(self, whitelist=None):
"""
Update default white list by customer white list
"""
# add tag's names as key and list of enabled attributes as value for defaults
self.whitelist = {}
# tags that removed with contents
self.sanitizelist = ['script', 'style']
if isinstance(whitelist, dict) and '*' in whitelist.keys():
self.isNotPurify = True
self.whitelist_keys = []
return
else:
self.isNotPurify = False
self.whitelist.update(whitelist or {})
self.whitelist_keys = self.whitelist.keys() | python | {
"resource": ""
} |
q39347 | HTMLPurifier.__attrs_str | train | def __attrs_str(self, tag, attrs):
"""
Build string of attributes list for tag
"""
enabled = self.whitelist.get(tag, ['*'])
all_attrs = '*' in enabled
items = []
for attr in attrs:
key = attr[0]
value = attr[1] or ''
if all_attrs or key in enabled:
items.append( u'%s="%s"' % (key, value,) )
return u' '.join(items) | python | {
"resource": ""
} |
q39348 | hex_from | train | def hex_from(val):
"""Returns hex string representation for a given value.
:param bytes|str|unicode|int|long val:
:rtype: bytes|str
"""
if isinstance(val, integer_types):
hex_str = '%x' % val
if len(hex_str) % 2:
hex_str = '0' + hex_str
return hex_str
return hexlify(val) | python | {
"resource": ""
} |
q39349 | format_hyperlink | train | def format_hyperlink( val, hlx, hxl, xhl ):
"""
Formats an html hyperlink into other forms.
@hlx, hxl, xhl: values returned by set_output_format
"""
if '<a href="' in str(val) and hlx != '<a href="':
val = val.replace('<a href="', hlx).replace('">', hxl, 1).replace('</a>', xhl)
return val | python | {
"resource": ""
} |
q39350 | format_cell | train | def format_cell(val, round_floats = False, decimal_places = 2, format_links = False,
hlx = '', hxl = '', xhl = ''):
"""
Applys smart_round and format_hyperlink to values in a cell if desired.
"""
if round_floats:
val = smart_round(val, decimal_places = decimal_places)
if format_links:
val = format_hyperlink(val, hlx, hxl, xhl)
return val | python | {
"resource": ""
} |
q39351 | get_row_data | train | def get_row_data(row, column_name, cat_time_ns = True):
"""
Retrieves the requested column's data from the given row.
@cat_time_ns: If the column_name has "_time" in it, will concatenate
the column with any column having the same name but "_time_ns".
"""
column_name_ns = re.sub(r'_time', r'_time_ns', column_name)
try:
rowattrs = [attr for attr in row.__slots__]
except AttributeError:
rowattrs = [attr for attr in row.__dict__.iterkeys()]
if cat_time_ns and "_time" in column_name and column_name_ns in rowattrs:
return int(getattr(row, column_name)) + 10**(-9.)*int(getattr(row, column_name_ns))
else:
return getattr(row, column_name) | python | {
"resource": ""
} |
q39352 | SRPContext.generate_random | train | def generate_random(self, bits_len=None):
"""Generates a random value.
:param int bits_len:
:rtype: int
"""
bits_len = bits_len or self._bits_random
return random().getrandbits(bits_len) | python | {
"resource": ""
} |
q39353 | CondorJob.add_checkpoint_file | train | def add_checkpoint_file(self, filename):
"""
Add filename as a checkpoint file for this DAG job.
"""
if filename not in self.__checkpoint_files:
self.__checkpoint_files.append(filename) | python | {
"resource": ""
} |
q39354 | CondorJob.add_file_arg | train | def add_file_arg(self, filename):
"""
Add a file argument to the executable. Arguments are appended after any
options and their order is guaranteed. Also adds the file name to the
list of required input data for this job.
@param filename: file to add as argument.
"""
self.__arguments.append(filename)
if filename not in self.__input_files:
self.__input_files.append(filename) | python | {
"resource": ""
} |
q39355 | CondorJob.get_opt | train | def get_opt( self, opt):
"""
Returns the value associated with the given command line option.
Returns None if the option does not exist in the options list.
@param opt: command line option
"""
if self.__options.has_key(opt):
return self.__options[opt]
return None | python | {
"resource": ""
} |
q39356 | CondorJob.add_ini_opts | train | def add_ini_opts(self, cp, section):
"""
Parse command line options from a given section in an ini file and
pass to the executable.
@param cp: ConfigParser object pointing to the ini file.
@param section: section of the ini file to add to the options.
"""
for opt in cp.options(section):
arg = string.strip(cp.get(section,opt))
self.__options[opt] = arg | python | {
"resource": ""
} |
q39357 | CondorDAGJob.set_grid_site | train | def set_grid_site(self,site):
"""
Set the grid site to run on. If not specified,
will not give hint to Pegasus
"""
self.__grid_site=str(site)
if site != 'local':
self.set_executable_installed(False) | python | {
"resource": ""
} |
q39358 | CondorDAGNode.add_checkpoint_file | train | def add_checkpoint_file(self,filename):
"""
Add filename as a checkpoint file for this DAG node
@param filename: checkpoint filename to add
"""
if filename not in self.__checkpoint_files:
self.__checkpoint_files.append(filename)
if not isinstance(self.job(), CondorDAGManJob):
if self.job().get_universe() == 'grid':
self.add_checkpoint_macro(filename) | python | {
"resource": ""
} |
q39359 | CondorDAGNode.get_input_files | train | def get_input_files(self):
"""
Return list of input files for this DAG node and its job.
"""
input_files = list(self.__input_files)
if isinstance(self.job(), CondorDAGJob):
input_files = input_files + self.job().get_input_files()
return input_files | python | {
"resource": ""
} |
q39360 | CondorDAGNode.get_output_files | train | def get_output_files(self):
"""
Return list of output files for this DAG node and its job.
"""
output_files = list(self.__output_files)
if isinstance(self.job(), CondorDAGJob):
output_files = output_files + self.job().get_output_files()
return output_files | python | {
"resource": ""
} |
q39361 | CondorDAGNode.get_checkpoint_files | train | def get_checkpoint_files(self):
"""
Return a list of checkpoint files for this DAG node and its job.
"""
checkpoint_files = list(self.__checkpoint_files)
if isinstance(self.job(), CondorDAGJob):
checkpoint_files = checkpoint_files + self.job().get_checkpoint_files()
return checkpoint_files | python | {
"resource": ""
} |
q39362 | CondorDAGNode.write_job | train | def write_job(self,fh):
"""
Write the DAG entry for this node's job to the DAG file descriptor.
@param fh: descriptor of open DAG file.
"""
if isinstance(self.job(),CondorDAGManJob):
# create an external subdag from this dag
fh.write( ' '.join(
['SUBDAG EXTERNAL', self.__name, self.__job.get_sub_file()]) )
if self.job().get_dag_directory():
fh.write( ' DIR ' + self.job().get_dag_directory() )
else:
# write a regular condor job
fh.write( 'JOB ' + self.__name + ' ' + self.__job.get_sub_file() )
fh.write( '\n')
fh.write( 'RETRY ' + self.__name + ' ' + str(self.__retry) + '\n' ) | python | {
"resource": ""
} |
q39363 | CondorDAGNode.write_pre_script | train | def write_pre_script(self,fh):
"""
Write the pre script for the job, if there is one
@param fh: descriptor of open DAG file.
"""
if self.__pre_script:
fh.write( 'SCRIPT PRE ' + str(self) + ' ' + self.__pre_script + ' ' +
' '.join(self.__pre_script_args) + '\n' ) | python | {
"resource": ""
} |
q39364 | CondorDAGNode.write_post_script | train | def write_post_script(self,fh):
"""
Write the post script for the job, if there is one
@param fh: descriptor of open DAG file.
"""
if self.__post_script:
fh.write( 'SCRIPT POST ' + str(self) + ' ' + self.__post_script + ' ' +
' '.join(self.__post_script_args) + '\n' ) | python | {
"resource": ""
} |
q39365 | CondorDAGNode.write_input_files | train | def write_input_files(self, fh):
"""
Write as a comment into the DAG file the list of input files
for this DAG node.
@param fh: descriptor of open DAG file.
"""
for f in self.__input_files:
print >>fh, "## Job %s requires input file %s" % (self.__name, f) | python | {
"resource": ""
} |
q39366 | CondorDAGNode.write_output_files | train | def write_output_files(self, fh):
"""
Write as a comment into the DAG file the list of output files
for this DAG node.
@param fh: descriptor of open DAG file.
"""
for f in self.__output_files:
print >>fh, "## Job %s generates output file %s" % (self.__name, f) | python | {
"resource": ""
} |
q39367 | CondorDAGNode.add_parent | train | def add_parent(self,node):
"""
Add a parent to this node. This node will not be executed until the
parent node has run sucessfully.
@param node: CondorDAGNode to add as a parent.
"""
if not isinstance(node, (CondorDAGNode,CondorDAGManNode) ):
raise CondorDAGNodeError, "Parent must be a CondorDAGNode or a CondorDAGManNode"
self.__parents.append( node ) | python | {
"resource": ""
} |
q39368 | CondorDAGNode.get_cmd_tuple_list | train | def get_cmd_tuple_list(self):
"""
Return a list of tuples containg the command line arguments
"""
# pattern to find DAGman macros
pat = re.compile(r'\$\((.+)\)')
argpat = re.compile(r'\d+')
# first parse the options and replace macros with values
options = self.job().get_opts()
macros = self.get_opts()
cmd_list = []
for k in options:
val = options[k]
m = pat.match(val)
if m:
key = m.group(1)
value = macros[key]
cmd_list.append(("--%s" % k, str(value)))
else:
cmd_list.append(("--%s" % k, str(val)))
# second parse the short options and replace macros with values
options = self.job().get_short_opts()
for k in options:
val = options[k]
m = pat.match(val)
if m:
key = m.group(1)
value = macros[key]
cmd_list.append(("-%s" % k, str(value)))
else:
cmd_list.append(("-%s" % k, str(val)))
# lastly parse the arguments and replace macros with values
args = self.job().get_args()
macros = self.get_args()
for a in args:
m = pat.match(a)
if m:
arg_index = int(argpat.findall(a)[0])
try:
cmd_list.append(("%s" % macros[arg_index], ""))
except IndexError:
cmd_list.append("")
else:
cmd_list.append(("%s" % a, ""))
return cmd_list | python | {
"resource": ""
} |
q39369 | CondorDAGNode.get_cmd_line | train | def get_cmd_line(self):
"""
Return the full command line that will be used when this node
is run by DAGman.
"""
cmd = ""
cmd_list = self.get_cmd_tuple_list()
for argument in cmd_list:
cmd += ' '.join(argument) + " "
return cmd | python | {
"resource": ""
} |
q39370 | CondorDAGManNode.add_maxjobs_category | train | def add_maxjobs_category(self,categoryName,maxJobsNum):
"""
Add a category to this DAG called categoryName with a maxjobs of maxJobsNum.
@param node: Add (categoryName,maxJobsNum) tuple to CondorDAG.__maxjobs_categories.
"""
self.__maxjobs_categories.append((str(categoryName),str(maxJobsNum))) | python | {
"resource": ""
} |
q39371 | CondorDAG.add_node | train | def add_node(self,node):
"""
Add a CondorDAGNode to this DAG. The CondorJob that the node uses is
also added to the list of Condor jobs in the DAG so that a list of the
submit files needed by the DAG can be maintained. Each unique CondorJob
will be added once to prevent duplicate submit files being written.
@param node: CondorDAGNode to add to the CondorDAG.
"""
if not isinstance(node, CondorDAGNode):
raise CondorDAGError, "Nodes must be class CondorDAGNode or subclass"
if not isinstance(node.job(), CondorDAGManJob):
node.set_log_file(self.__log_file_path)
self.__nodes.append(node)
if self.__integer_node_names:
node.set_name(str(self.__node_count))
self.__node_count += 1
if node.job() not in self.__jobs:
self.__jobs.append(node.job()) | python | {
"resource": ""
} |
q39372 | CondorDAG.write_maxjobs | train | def write_maxjobs(self,fh,category):
"""
Write the DAG entry for this category's maxjobs to the DAG file descriptor.
@param fh: descriptor of open DAG file.
@param category: tuple containing type of jobs to set a maxjobs limit for
and the maximum number of jobs of that type to run at once.
"""
fh.write( 'MAXJOBS ' + str(category[0]) + ' ' + str(category[1]) + '\n' ) | python | {
"resource": ""
} |
q39373 | CondorDAG.write_sub_files | train | def write_sub_files(self):
"""
Write all the submit files used by the dag to disk. Each submit file is
written to the file name set in the CondorJob.
"""
if not self.__nodes_finalized:
for node in self.__nodes:
node.finalize()
if not self.is_dax():
for job in self.__jobs:
job.write_sub_file() | python | {
"resource": ""
} |
q39374 | CondorDAG.write_concrete_dag | train | def write_concrete_dag(self):
"""
Write all the nodes in the DAG to the DAG file.
"""
if not self.__dag_file_path:
raise CondorDAGError, "No path for DAG file"
try:
dagfile = open( self.__dag_file_path, 'w' )
except:
raise CondorDAGError, "Cannot open file " + self.__dag_file_path
for node in self.__nodes:
node.write_job(dagfile)
node.write_vars(dagfile)
if node.get_category():
node.write_category(dagfile)
if node.get_priority():
node.write_priority(dagfile)
node.write_pre_script(dagfile)
node.write_post_script(dagfile)
node.write_input_files(dagfile)
node.write_output_files(dagfile)
for node in self.__nodes:
node.write_parents(dagfile)
for category in self.__maxjobs_categories:
self.write_maxjobs(dagfile, category)
dagfile.close() | python | {
"resource": ""
} |
q39375 | CondorDAG.write_dag | train | def write_dag(self):
"""
Write either a dag or a dax.
"""
if not self.__nodes_finalized:
for node in self.__nodes:
node.finalize()
self.write_concrete_dag()
self.write_abstract_dag() | python | {
"resource": ""
} |
q39376 | AnalysisJob.get_config | train | def get_config(self,sec,opt):
"""
Get the configration variable in a particular section of this jobs ini
file.
@param sec: ini file section.
@param opt: option from section sec.
"""
return string.strip(self.__cp.get(sec,opt)) | python | {
"resource": ""
} |
q39377 | AnalysisNode.set_ifo_tag | train | def set_ifo_tag(self,ifo_tag,pass_to_command_line=True):
"""
Set the ifo tag that is passed to the analysis code.
@param ifo_tag: a string to identify one or more IFOs
@bool pass_to_command_line: add ifo-tag as a variable option.
"""
self.__ifo_tag = ifo_tag
if pass_to_command_line:
self.add_var_opt('ifo-tag', ifo_tag) | python | {
"resource": ""
} |
q39378 | AnalysisNode.set_user_tag | train | def set_user_tag(self,usertag,pass_to_command_line=True):
"""
Set the user tag that is passed to the analysis code.
@param user_tag: the user tag to identify the job
@bool pass_to_command_line: add user-tag as a variable option.
"""
self.__user_tag = usertag
if pass_to_command_line:
self.add_var_opt('user-tag', usertag) | python | {
"resource": ""
} |
q39379 | AnalysisNode.calibration_cache_path | train | def calibration_cache_path(self):
"""
Determine the path to the correct calibration cache file to use.
"""
if self.__ifo and self.__start > 0:
cal_path = self.job().get_config('calibration','path')
# check if this is S2: split calibration epochs
if ( self.__LHO2k.match(self.__ifo) and
(self.__start >= 729273613) and (self.__start <= 734367613) ):
if self.__start < int(
self.job().get_config('calibration','H2-cal-epoch-boundary')):
cal_file = self.job().get_config('calibration','H2-1')
else:
cal_file = self.job().get_config('calibration','H2-2')
else:
# if not: just add calibration cache
cal_file = self.job().get_config('calibration',self.__ifo)
cal = os.path.join(cal_path,cal_file)
self.__calibration_cache = cal
else:
msg = "IFO and start-time must be set first"
raise CondorDAGNodeError, msg | python | {
"resource": ""
} |
q39380 | AnalysisNode.calibration | train | def calibration(self):
"""
Set the path to the calibration cache file for the given IFO.
During S2 the Hanford 2km IFO had two calibration epochs, so
if the start time is during S2, we use the correct cache file.
"""
# figure out the name of the calibration cache files
# as specified in the ini-file
self.calibration_cache_path()
if self.job().is_dax():
# new code for DAX
self.add_var_opt('glob-calibration-data','')
cache_filename=self.get_calibration()
pat = re.compile(r'(file://.*)')
f = open(cache_filename, 'r')
lines = f.readlines()
# loop over entries in the cache-file...
for line in lines:
m = pat.search(line)
if not m:
raise IOError
url = m.group(1)
# ... and add files to input-file list
path = urlparse.urlparse(url)[2]
calibration_lfn = os.path.basename(path)
self.add_input_file(calibration_lfn)
else:
# old .calibration for DAG's
self.add_var_opt('calibration-cache', self.__calibration_cache)
self.__calibration = self.__calibration_cache
self.add_input_file(self.__calibration) | python | {
"resource": ""
} |
q39381 | ScienceSegment.add_chunk | train | def add_chunk(self,start,end,trig_start=0,trig_end=0):
"""
Add an AnalysisChunk to the list associated with this ScienceSegment.
@param start: GPS start time of chunk.
@param end: GPS end time of chunk.
@param trig_start: GPS start time for triggers from chunk
"""
self.__chunks.append(AnalysisChunk(start,end,trig_start,trig_end)) | python | {
"resource": ""
} |
q39382 | ScienceData.tama_read | train | def tama_read(self,filename):
"""
Parse the science segments from a tama list of locked segments contained in
file.
@param filename: input text file containing a list of tama segments.
"""
self.__filename = filename
for line in open(filename):
columns = line.split()
id = int(columns[0])
start = int(math.ceil(float(columns[3])))
end = int(math.floor(float(columns[4])))
dur = end - start
x = ScienceSegment(tuple([id, start, end, dur]))
self.__sci_segs.append(x) | python | {
"resource": ""
} |
q39383 | ScienceData.make_chunks | train | def make_chunks(self,length,overlap=0,play=0,sl=0,excl_play=0,pad_data=0):
"""
Divide each ScienceSegment contained in this object into AnalysisChunks.
@param length: length of chunk in seconds.
@param overlap: overlap between segments.
@param play: if true, only generate chunks that overlap with S2 playground
data.
@param sl: slide by sl seconds before determining playground data.
@param excl_play: exclude the first excl_play second from the start and end
of the chunk when computing if the chunk overlaps with playground.
"""
for seg in self.__sci_segs:
seg.make_chunks(length,overlap,play,sl,excl_play,pad_data) | python | {
"resource": ""
} |
q39384 | ScienceData.make_short_chunks_from_unused | train | def make_short_chunks_from_unused(
self,min_length,overlap=0,play=0,sl=0,excl_play=0):
"""
Create a chunk that uses up the unused data in the science segment
@param min_length: the unused data must be greater than min_length to make a
chunk.
@param overlap: overlap between chunks in seconds.
@param play: if true, only generate chunks that overlap with S2 playground data.
@param sl: slide by sl seconds before determining playground data.
@param excl_play: exclude the first excl_play second from the start and end
of the chunk when computing if the chunk overlaps with playground.
"""
for seg in self.__sci_segs:
if seg.unused() > min_length:
start = seg.end() - seg.unused() - overlap
end = seg.end()
length = start - end
if (not play) or (play and (((end-sl-excl_play-729273613)%6370) <
(600+length-2*excl_play))):
seg.add_chunk(start, end, start)
seg.set_unused(0) | python | {
"resource": ""
} |
q39385 | ScienceData.make_optimised_chunks | train | def make_optimised_chunks(self, min_length, max_length, pad_data=0):
"""
Splits ScienceSegments up into chunks, of a given maximum length.
The length of the last two chunks are chosen so that the data
utilisation is optimised.
@param min_length: minimum chunk length.
@param max_length: maximum chunk length.
@param pad_data: exclude the first and last pad_data seconds of the
segment when generating chunks
"""
for seg in self.__sci_segs:
# pad data if requested
seg_start = seg.start() + pad_data
seg_end = seg.end() - pad_data
if seg.unused() > max_length:
# get number of max_length chunks
N = (seg_end - seg_start)/max_length
# split into chunks of max_length
for i in range(N-1):
start = seg_start + (i * max_length)
stop = start + max_length
seg.add_chunk(start, stop)
# optimise data usage for last 2 chunks
start = seg_start + ((N-1) * max_length)
middle = (start + seg_end)/2
seg.add_chunk(start, middle)
seg.add_chunk(middle, seg_end)
seg.set_unused(0)
elif seg.unused() > min_length:
# utilise as single chunk
seg.add_chunk(seg_start, seg_end)
else:
# no chunk of usable length
seg.set_unused(0) | python | {
"resource": ""
} |
q39386 | ScienceData.intersection | train | def intersection(self, other):
"""
Replaces the ScienceSegments contained in this instance of ScienceData
with the intersection of those in the instance other. Returns the number
of segments in the intersection.
@param other: ScienceData to use to generate the intersection
"""
# we only deal with the case of two lists here
length1 = len(self)
length2 = len(other)
# initialize list of output segments
ostart = -1
outlist = []
iseg2 = -1
start2 = -1
stop2 = -1
for seg1 in self:
start1 = seg1.start()
stop1 = seg1.end()
id = seg1.id()
# loop over segments from the second list which overlap this segment
while start2 < stop1:
if stop2 > start1:
# these overlap
# find the overlapping range
if start1 < start2:
ostart = start2
else:
ostart = start1
if stop1 > stop2:
ostop = stop2
else:
ostop = stop1
x = ScienceSegment(tuple([id, ostart, ostop, ostop-ostart]))
outlist.append(x)
if stop2 > stop1:
break
# step forward
iseg2 += 1
if iseg2 < len(other):
seg2 = other[iseg2]
start2 = seg2.start()
stop2 = seg2.end()
else:
# pseudo-segment in the far future
start2 = 2000000000
stop2 = 2000000000
# save the intersection and return the length
self.__sci_segs = outlist
return len(self) | python | {
"resource": ""
} |
q39387 | ScienceData.union | train | def union(self, other):
"""
Replaces the ScienceSegments contained in this instance of ScienceData
with the union of those in the instance other. Returns the number of
ScienceSegments in the union.
@param other: ScienceData to use to generate the intersection
"""
# we only deal with the case of two lists here
length1 = len(self)
length2 = len(other)
# initialize list of output segments
ostart = -1
seglist = []
i1 = -1
i2 = -1
start1 = -1
start2 = -1
id = -1
while 1:
# if necessary, get a segment from list 1
if start1 == -1:
i1 += 1
if i1 < length1:
start1 = self[i1].start()
stop1 = self[i1].end()
id = self[i1].id()
elif i2 == length2:
break
# if necessary, get a segment from list 2
if start2 == -1:
i2 += 1
if i2 < length2:
start2 = other[i2].start()
stop2 = other[i2].end()
elif i1 == length1:
break
# pick the earlier segment from the two lists
if start1 > -1 and ( start2 == -1 or start1 <= start2):
ustart = start1
ustop = stop1
# mark this segment has having been consumed
start1 = -1
elif start2 > -1:
ustart = start2
ustop = stop2
# mark this segment has having been consumed
start2 = -1
else:
break
# if the output segment is blank, initialize it; otherwise, see
# whether the new segment extends it or is disjoint
if ostart == -1:
ostart = ustart
ostop = ustop
elif ustart <= ostop:
if ustop > ostop:
# this extends the output segment
ostop = ustop
else:
# This lies entirely within the current output segment
pass
else:
# flush the current output segment, and replace it with the
# new segment
x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
seglist.append(x)
ostart = ustart
ostop = ustop
# flush out the final output segment (if any)
if ostart != -1:
x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
seglist.append(x)
self.__sci_segs = seglist
return len(self) | python | {
"resource": ""
} |
q39388 | ScienceData.coalesce | train | def coalesce(self):
"""
Coalesces any adjacent ScienceSegments. Returns the number of
ScienceSegments in the coalesced list.
"""
# check for an empty list
if len(self) == 0:
return 0
# sort the list of science segments
self.__sci_segs.sort()
# coalesce the list, checking each segment for validity as we go
outlist = []
ostop = -1
for seg in self:
start = seg.start()
stop = seg.end()
id = seg.id()
if start > ostop:
# disconnected, so flush out the existing segment (if any)
if ostop >= 0:
x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
outlist.append(x)
ostart = start
ostop = stop
elif stop > ostop:
# extend the current segment
ostop = stop
# flush out the final segment (if any)
if ostop >= 0:
x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
outlist.append(x)
self.__sci_segs = outlist
return len(self) | python | {
"resource": ""
} |
q39389 | ScienceData.play | train | def play(self):
"""
Keep only times in ScienceSegments which are in the playground
"""
length = len(self)
# initialize list of output segments
ostart = -1
outlist = []
begin_s2 = 729273613
play_space = 6370
play_len = 600
for seg in self:
start = seg.start()
stop = seg.end()
id = seg.id()
# select first playground segment which ends after start of seg
play_start = begin_s2+play_space*( 1 +
int((start - begin_s2 - play_len)/play_space) )
while play_start < stop:
if play_start > start:
ostart = play_start
else:
ostart = start
play_stop = play_start + play_len
if play_stop < stop:
ostop = play_stop
else:
ostop = stop
x = ScienceSegment(tuple([id, ostart, ostop, ostop-ostart]))
outlist.append(x)
# step forward
play_start = play_start + play_space
# save the playground segs and return the length
self.__sci_segs = outlist
return len(self) | python | {
"resource": ""
} |
q39390 | ScienceData.intersect_3 | train | def intersect_3(self, second, third):
"""
Intersection routine for three inputs. Built out of the intersect,
coalesce and play routines
"""
self.intersection(second)
self.intersection(third)
self.coalesce()
return len(self) | python | {
"resource": ""
} |
q39391 | ScienceData.intersect_4 | train | def intersect_4(self, second, third, fourth):
"""
Intersection routine for four inputs.
"""
self.intersection(second)
self.intersection(third)
self.intersection(fourth)
self.coalesce()
return len(self) | python | {
"resource": ""
} |
q39392 | ScienceData.split | train | def split(self, dt):
"""
Split the segments in the list is subsegments at least as long as dt
"""
outlist=[]
for seg in self:
start = seg.start()
stop = seg.end()
id = seg.id()
while start < stop:
tmpstop = start + dt
if tmpstop > stop:
tmpstop = stop
elif tmpstop + dt > stop:
tmpstop = int( (start + stop)/2 )
x = ScienceSegment(tuple([id,start,tmpstop,tmpstop-start]))
outlist.append(x)
start = tmpstop
# save the split list and return length
self.__sci_segs = outlist
return len(self) | python | {
"resource": ""
} |
q39393 | LsyncCache.group | train | def group(self, lst, n):
"""
Group an iterable into an n-tuples iterable. Incomplete
tuples are discarded
"""
return itertools.izip(*[itertools.islice(lst, i, None, n) for i in range(n)]) | python | {
"resource": ""
} |
q39394 | LSCDataFindNode.__set_output | train | def __set_output(self):
"""
Private method to set the file to write the cache to. Automaticaly set
once the ifo, start and end times have been set.
"""
if self.__start and self.__end and self.__observatory and self.__type:
self.__output = os.path.join(self.__job.get_cache_dir(), self.__observatory + '-' + self.__type +'_CACHE' + '-' + str(self.__start) + '-' + str(self.__end - self.__start) + '.lcf')
self.set_output(self.__output) | python | {
"resource": ""
} |
q39395 | LSCDataFindNode.set_start | train | def set_start(self,time,pad = None):
"""
Set the start time of the datafind query.
@param time: GPS start time of query.
"""
if pad:
self.add_var_opt('gps-start-time', int(time)-int(pad))
else:
self.add_var_opt('gps-start-time', int(time))
self.__start = time
self.__set_output() | python | {
"resource": ""
} |
q39396 | LSCDataFindNode.set_end | train | def set_end(self,time):
"""
Set the end time of the datafind query.
@param time: GPS end time of query.
"""
self.add_var_opt('gps-end-time', time)
self.__end = time
self.__set_output() | python | {
"resource": ""
} |
q39397 | LSCDataFindNode.set_type | train | def set_type(self,type):
"""
sets the frame type that we are querying
"""
self.add_var_opt('type',str(type))
self.__type = str(type)
self.__set_output() | python | {
"resource": ""
} |
q39398 | LigolwSqliteNode.set_xml_output | train | def set_xml_output(self, xml_file):
"""
Tell ligolw_sqlite to dump the contents of the database to a file.
"""
if self.get_database() is None:
raise ValueError, "no database specified"
self.add_file_opt('extract', xml_file)
self.__xml_output = xml_file | python | {
"resource": ""
} |
q39399 | LigolwSqliteNode.get_output | train | def get_output(self):
"""
Override standard get_output to return xml-file if xml-file is specified.
Otherwise, will return database.
"""
if self.__xml_output:
return self.__xml_output
elif self.get_database():
return self.get_database()
else:
raise ValueError, "no output xml file or database specified" | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.