code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def set_up(self):
"""Set up your applications and the test environment."""
self.path.state = self.path.gen.joinpath("state")
if self.path.state.exists():
self.path.state.rmtree(ignore_errors=True)
self.path.state.mkdir()
for script in self.given.get("scripts", []):
script_path = self.path.state.joinpath(script)
if not script_path.dirname().exists():
script_path.dirname().makedirs()
script_path.write_text(self.given["scripts"][script])
script_path.chmod("u+x")
for filename, contents in self.given.get("files", {}).items():
self.path.state.joinpath(filename).write_text(contents)
self.python = hitchpylibrarytoolkit.project_build(
"commandlib", self.path, self.given["python version"]
).bin.python
self.example_py_code = (
ExamplePythonCode(self.python, self.path.state)
.with_code(self.given.get("code", ""))
.with_setup_code(self.given.get("setup", ""))
) | Set up your applications and the test environment. |
def distance_matrix(client, origins, destinations,
mode=None, language=None, avoid=None, units=None,
departure_time=None, arrival_time=None, transit_mode=None,
transit_routing_preference=None, traffic_model=None, region=None):
""" Gets travel distance and time for a matrix of origins and destinations.
:param origins: One or more locations and/or latitude/longitude values,
from which to calculate distance and time. If you pass an address as
a string, the service will geocode the string and convert it to a
latitude/longitude coordinate to calculate directions.
:type origins: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:param destinations: One or more addresses and/or lat/lng values, to
which to calculate distance and time. If you pass an address as a
string, the service will geocode the string and convert it to a
latitude/longitude coordinate to calculate directions.
:type destinations: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:param mode: Specifies the mode of transport to use when calculating
directions. Valid values are "driving", "walking", "transit" or
"bicycling".
:type mode: string
:param language: The language in which to return results.
:type language: string
:param avoid: Indicates that the calculated route(s) should avoid the
indicated features. Valid values are "tolls", "highways" or "ferries".
:type avoid: string
:param units: Specifies the unit system to use when displaying results.
Valid values are "metric" or "imperial".
:type units: string
:param departure_time: Specifies the desired time of departure.
:type departure_time: int or datetime.datetime
:param arrival_time: Specifies the desired time of arrival for transit
directions. Note: you can't specify both departure_time and
arrival_time.
:type arrival_time: int or datetime.datetime
:param transit_mode: Specifies one or more preferred modes of transit.
This parameter may only be specified for requests where the mode is
transit. Valid values are "bus", "subway", "train", "tram", "rail".
"rail" is equivalent to ["train", "tram", "subway"].
:type transit_mode: string or list of strings
:param transit_routing_preference: Specifies preferences for transit
requests. Valid values are "less_walking" or "fewer_transfers".
:type transit_routing_preference: string
:param traffic_model: Specifies the predictive travel time model to use.
Valid values are "best_guess" or "optimistic" or "pessimistic".
The traffic_model parameter may only be specified for requests where
the travel mode is driving, and where the request includes a
departure_time.
:param region: Specifies the prefered region the geocoder should search
first, but it will not restrict the results to only this region. Valid
values are a ccTLD code.
:type region: string
:rtype: matrix of distances. Results are returned in rows, each row
containing one origin paired with each destination.
"""
params = {
"origins": convert.location_list(origins),
"destinations": convert.location_list(destinations)
}
if mode:
# NOTE(broady): the mode parameter is not validated by the Maps API
# server. Check here to prevent silent failures.
if mode not in ["driving", "walking", "bicycling", "transit"]:
raise ValueError("Invalid travel mode.")
params["mode"] = mode
if language:
params["language"] = language
if avoid:
if avoid not in ["tolls", "highways", "ferries"]:
raise ValueError("Invalid route restriction.")
params["avoid"] = avoid
if units:
params["units"] = units
if departure_time:
params["departure_time"] = convert.time(departure_time)
if arrival_time:
params["arrival_time"] = convert.time(arrival_time)
if departure_time and arrival_time:
raise ValueError("Should not specify both departure_time and"
"arrival_time.")
if transit_mode:
params["transit_mode"] = convert.join_list("|", transit_mode)
if transit_routing_preference:
params["transit_routing_preference"] = transit_routing_preference
if traffic_model:
params["traffic_model"] = traffic_model
if region:
params["region"] = region
return client._request("/maps/api/distancematrix/json", params) | Gets travel distance and time for a matrix of origins and destinations.
:param origins: One or more locations and/or latitude/longitude values,
from which to calculate distance and time. If you pass an address as
a string, the service will geocode the string and convert it to a
latitude/longitude coordinate to calculate directions.
:type origins: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:param destinations: One or more addresses and/or lat/lng values, to
which to calculate distance and time. If you pass an address as a
string, the service will geocode the string and convert it to a
latitude/longitude coordinate to calculate directions.
:type destinations: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:param mode: Specifies the mode of transport to use when calculating
directions. Valid values are "driving", "walking", "transit" or
"bicycling".
:type mode: string
:param language: The language in which to return results.
:type language: string
:param avoid: Indicates that the calculated route(s) should avoid the
indicated features. Valid values are "tolls", "highways" or "ferries".
:type avoid: string
:param units: Specifies the unit system to use when displaying results.
Valid values are "metric" or "imperial".
:type units: string
:param departure_time: Specifies the desired time of departure.
:type departure_time: int or datetime.datetime
:param arrival_time: Specifies the desired time of arrival for transit
directions. Note: you can't specify both departure_time and
arrival_time.
:type arrival_time: int or datetime.datetime
:param transit_mode: Specifies one or more preferred modes of transit.
This parameter may only be specified for requests where the mode is
transit. Valid values are "bus", "subway", "train", "tram", "rail".
"rail" is equivalent to ["train", "tram", "subway"].
:type transit_mode: string or list of strings
:param transit_routing_preference: Specifies preferences for transit
requests. Valid values are "less_walking" or "fewer_transfers".
:type transit_routing_preference: string
:param traffic_model: Specifies the predictive travel time model to use.
Valid values are "best_guess" or "optimistic" or "pessimistic".
The traffic_model parameter may only be specified for requests where
the travel mode is driving, and where the request includes a
departure_time.
:param region: Specifies the prefered region the geocoder should search
first, but it will not restrict the results to only this region. Valid
values are a ccTLD code.
:type region: string
:rtype: matrix of distances. Results are returned in rows, each row
containing one origin paired with each destination. |
def check_views(view_set, max_views=3):
"""Ensures valid view/dimensions are selected."""
if not isinstance(view_set, Iterable):
view_set = tuple([view_set, ])
if len(view_set) > max_views:
raise ValueError('Can only have {} views'.format(max_views))
return [check_int(view, 'view', min_value=0, max_value=max_views - 1) for view in view_set] | Ensures valid view/dimensions are selected. |
def bindata(data, maxbins = 30, reduction = 0.1):
'''
data must be numeric list with a len above 20
This function counts the number of data points in a reduced array
'''
tole = 0.01
N = len(data)
assert N > 20
vmin = min(data)
vmax = max(data)
DV = vmax - vmin
tol = tole*DV
vmax += tol
if vmin >= 0:
vmin -= tol
vmin = max(0.0,vmin)
else:
vmin -= tol
n = min(maxbins,max(2,int(round(reduction*N))))
DV = vmax - vmin
bbin = npy.linspace(vmin,vmax,n+1)
sso = npy.searchsorted(bbin,npy.sort(data))
x = []
y = []
for i in range(0,n):
x.append(0.5*(bbin[i+1]+bbin[i]))
y.append(0.0)
dy = 1.0/N
for i in sso:
y[i-1] += dy/(bbin[i]-bbin[i-1])
return (x,y) | data must be numeric list with a len above 20
This function counts the number of data points in a reduced array |
def parse(self, string, parent, module=True, filepath=None):
"""Extracts modules *and* programs from a fortran code file.
:arg string: the contents of the fortran code file.
:arg parent: the instance of CodeParser that will own the return Module.
:arg module: when true, the code file will be searched for modules; otherwise
it will be searched for programs.
"""
if module:
result = self._parse_modules(string, parent, filepath)
else:
result = self._parse_programs(string, parent, filepath)
return result | Extracts modules *and* programs from a fortran code file.
:arg string: the contents of the fortran code file.
:arg parent: the instance of CodeParser that will own the return Module.
:arg module: when true, the code file will be searched for modules; otherwise
it will be searched for programs. |
def weibo_url(self):
"""获取用户微博链接.
:return: 微博链接地址,如没有则返回 ‘unknown’
:rtype: str
"""
if self.url is None:
return None
else:
tmp = self.soup.find(
'a', class_='zm-profile-header-user-weibo')
return tmp['href'] if tmp is not None else 'unknown' | 获取用户微博链接.
:return: 微博链接地址,如没有则返回 ‘unknown’
:rtype: str |
def removeRef(self, doc):
"""Remove the given attribute from the Ref table maintained
internally. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlRemoveRef(doc__o, self._o)
return ret | Remove the given attribute from the Ref table maintained
internally. |
def attached(name, force=False):
'''
Ensure zone is attached
name : string
name of the zone
force : boolean
force attach the zone
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
zones = __salt__['zoneadm.list'](installed=True, configured=True)
if name in zones:
if zones[name]['state'] == 'configured':
if __opts__['test']:
res_attach = {'status': True}
else:
res_attach = __salt__['zoneadm.attach'](name, force)
ret['result'] = res_attach['status']
if ret['result']:
ret['changes'][name] = 'attached'
ret['comment'] = 'The zone {0} was attached.'.format(name)
else:
ret['comment'] = []
ret['comment'].append('Failed to attach zone {0}!'.format(name))
if 'message' in res_attach:
ret['comment'].append(res_attach['message'])
ret['comment'] = "\n".join(ret['comment'])
else:
ret['result'] = True
ret['comment'] = 'zone {0} already attached.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'zone {0} is not configured!'.format(name)
return ret | Ensure zone is attached
name : string
name of the zone
force : boolean
force attach the zone |
def fields(depth, Rp, Rm, Gam, lrec, lsrc, zsrc, TM):
r"""Calculate Pu+, Pu-, Pd+, Pd-.
This is a modified version of empymod.kernel.fields(). See the original
version for more information.
"""
# Booleans if src in first or last layer; swapped if up=True
first_layer = lsrc == 0
last_layer = lsrc == depth.size-1
# Depths; dp and dm are swapped if up=True
if lsrc != depth.size-1:
ds = depth[lsrc+1]-depth[lsrc]
dp = depth[lsrc+1]-zsrc
dm = zsrc-depth[lsrc]
# Rm and Rp; swapped if up=True
Rmp = Rm
Rpm = Rp
# Boolean if plus or minus has to be calculated
if TM:
plus = False
else:
plus = True
# Sign-switches
pm = 1 # + if plus=True, - if plus=False
if not plus:
pm = -1
# Calculate down- and up-going fields
for up in [False, True]:
# No upgoing field if rec is in last layer or below src
if up and (lrec == depth.size-1 or lrec > lsrc):
Puu = np.full_like(Gam[:, :, lsrc, :], 0+0j)
Pud = np.full_like(Gam[:, :, lsrc, :], 0+0j)
continue
# No downgoing field if rec is in first layer or above src
if not up and (lrec == 0 or lrec < lsrc):
Pdu = np.full_like(Gam[:, :, lsrc, :], 0+0j)
Pdd = np.full_like(Gam[:, :, lsrc, :], 0+0j)
continue
# Swaps if up=True
if up:
dp, dm = dm, dp
Rmp, Rpm = Rpm, Rmp
first_layer, last_layer = last_layer, first_layer
# Calculate Pu+, Pu-, Pd+, Pd-; rec in src layer; Eqs 81/82, A-8/A-9
iGam = Gam[:, :, lsrc, :]
if last_layer: # If src/rec are in top (up) or bottom (down) layer
Pd = Rmp*np.exp(-iGam*dm)
Pu = np.full_like(Gam[:, :, lsrc, :], 0+0j)
else: # If src and rec are in any layer in between
Ms = 1 - Rmp*Rpm*np.exp(-2*iGam*ds)
Pd = Rmp/Ms*np.exp(-iGam*dm)
Pu = Rmp/Ms*pm*Rpm*np.exp(-iGam*(ds+dp))
# Store P's
if up:
Puu = Pu
Pud = Pd
else:
Pdu = Pd
Pdd = Pu
# Return fields (up- and downgoing)
return Puu, Pud, Pdu, Pdd | r"""Calculate Pu+, Pu-, Pd+, Pd-.
This is a modified version of empymod.kernel.fields(). See the original
version for more information. |
def var_explained(y_true, y_pred):
"""Fraction of variance explained.
"""
y_true, y_pred = _mask_nan(y_true, y_pred)
var_resid = np.var(y_true - y_pred)
var_y_true = np.var(y_true)
return 1 - var_resid / var_y_true | Fraction of variance explained. |
def get_flanker(group, query):
"""
>>> get_flanker([(370, 15184), (372, 15178), (373, 15176), (400, 15193)], 385)
((373, 15176), (400, 15193), True)
>>> get_flanker([(124, 13639), (137, 13625)], 138)
((137, 13625), (137, 13625), False)
"""
group.sort()
pos = bisect_left(group, (query, 0))
left_flanker = group[0] if pos == 0 else group[pos-1]
right_flanker = group[-1] if pos == len(group) else group[pos]
# pick the closest flanker
if abs(query - left_flanker[0]) < abs(query - right_flanker[0]):
flanker, other = left_flanker, right_flanker
else:
flanker, other = right_flanker, left_flanker
flanked = not (pos == 0 or pos == len(group) or flanker == query)
return flanker, other, flanked | >>> get_flanker([(370, 15184), (372, 15178), (373, 15176), (400, 15193)], 385)
((373, 15176), (400, 15193), True)
>>> get_flanker([(124, 13639), (137, 13625)], 138)
((137, 13625), (137, 13625), False) |
def set_link(self, prop, value):
""" Set given link in CTS Namespace
.. example::
collection.set_link(NAMESPACES.CTS.about, "urn:cts:latinLit:phi1294.phi002")
:param prop: Property to set (Without namespace)
:param value: Value to set for given property
"""
# https://rdflib.readthedocs.io/en/stable/
# URIRef == identifiers (urn, http, URI in general)
# Literal == String or Number (can have a language)
# BNode == Anonymous nodes (So no specific identifier)
# eg. BNode : Edition(MartialEpigrams:URIRef) ---has_metadata--> Metadata(BNode)
if not isinstance(value, URIRef):
value = URIRef(value)
self.metadata.add(prop, value) | Set given link in CTS Namespace
.. example::
collection.set_link(NAMESPACES.CTS.about, "urn:cts:latinLit:phi1294.phi002")
:param prop: Property to set (Without namespace)
:param value: Value to set for given property |
def _get_rsa_public_key(cert):
"""
PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted
ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>),
but this dumps the public key within a PrivateKeyInfo structure which is not suitable for a comparison. This
approach uses the PyOpenSSL CFFI bindings to invoke the i2d_RSAPublicKey() which correctly extracts the key
material in an ASN.1 RSAPublicKey structure.
:param cert: The ASN.1 Encoded Certificate
:return: The ASN.1 Encoded RSAPublicKey structure containing the supplied certificates public Key
"""
openssl_pkey = cert.get_pubkey()
openssl_lib = _util.binding.lib
ffi = _util.binding.ffi
buf = ffi.new("unsigned char **")
rsa = openssl_lib.EVP_PKEY_get1_RSA(openssl_pkey._pkey)
length = openssl_lib.i2d_RSAPublicKey(rsa, buf)
public_key = ffi.buffer(buf[0], length)[:]
ffi.gc(buf[0], openssl_lib.OPENSSL_free)
return public_key | PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted
ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>),
but this dumps the public key within a PrivateKeyInfo structure which is not suitable for a comparison. This
approach uses the PyOpenSSL CFFI bindings to invoke the i2d_RSAPublicKey() which correctly extracts the key
material in an ASN.1 RSAPublicKey structure.
:param cert: The ASN.1 Encoded Certificate
:return: The ASN.1 Encoded RSAPublicKey structure containing the supplied certificates public Key |
def run(self):
"""Start the oplog worker.
"""
ReplicationLagLogger(self, 30).start()
LOG.debug("OplogThread: Run thread started")
while self.running is True:
LOG.debug("OplogThread: Getting cursor")
cursor, cursor_empty = retry_until_ok(self.init_cursor)
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
LOG.error("%s %s %s" % (err_msg, effect, self.oplog))
self.running = False
continue
if cursor_empty:
LOG.debug(
"OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping."
)
time.sleep(1)
continue
last_ts = None
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
LOG.debug("OplogThread: about to process new oplog entries")
while cursor.alive and self.running:
LOG.debug(
"OplogThread: Cursor is still"
" alive and thread is still running."
)
for n, entry in enumerate(cursor):
# Break out if this thread should stop
if not self.running:
break
LOG.debug(
"OplogThread: Iterating through cursor,"
" document number in this cursor is %d" % n
)
skip, is_gridfs_file = self._should_skip_entry(entry)
if skip:
# update the last_ts on skipped entries to ensure
# our checkpoint does not fall off the oplog. This
# also prevents reprocessing skipped entries.
last_ts = entry["ts"]
continue
# Sync the current oplog operation
operation = entry["op"]
ns = entry["ns"]
timestamp = util.bson_ts_to_long(entry["ts"])
for docman in self.doc_managers:
try:
LOG.debug(
"OplogThread: Operation for this "
"entry is %s" % str(operation)
)
# Remove
if operation == "d":
docman.remove(entry["o"]["_id"], ns, timestamp)
remove_inc += 1
# Insert
elif operation == "i": # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get("o")
# Extract timestamp and namespace
if is_gridfs_file:
db, coll = ns.split(".", 1)
gridfile = GridFSFile(
self.primary_client[db][coll], doc
)
docman.insert_file(gridfile, ns, timestamp)
else:
docman.upsert(doc, ns, timestamp)
upsert_inc += 1
# Update
elif operation == "u":
docman.update(
entry["o2"]["_id"], entry["o"], ns, timestamp
)
update_inc += 1
# Command
elif operation == "c":
# use unmapped namespace
doc = entry.get("o")
docman.handle_command(doc, entry["ns"], timestamp)
except errors.OperationFailed:
LOG.exception(
"Unable to process oplog document %r" % entry
)
except errors.ConnectionFailed:
LOG.exception(
"Connection failed while processing oplog "
"document %r" % entry
)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
LOG.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far"
% (remove_inc, upsert_inc, update_inc)
)
LOG.debug("OplogThread: Doc is processed.")
last_ts = entry["ts"]
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1:
self.update_checkpoint(last_ts)
last_ts = None
# update timestamp after running through oplog
if last_ts is not None:
LOG.debug(
"OplogThread: updating checkpoint after "
"processing new oplog entries"
)
self.update_checkpoint(last_ts)
except (
pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError,
):
LOG.exception(
"Cursor closed due to an exception. " "Will attempt to reconnect."
)
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
LOG.debug(
"OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread."
)
self.update_checkpoint(last_ts)
LOG.debug(
"OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d" % (remove_inc, upsert_inc, update_inc)
)
time.sleep(2) | Start the oplog worker. |
def _contents(self):
"""Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents().
"""
stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
"tennis.md",
"vagabond.md"
]
story = random.choice(stories)
with open("static/stimuli/{}".format(story), "r") as f:
return f.read() | Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents(). |
def patch_memcache():
"""Monkey patch python-memcached to implement our consistent hashring
in its node selection and operations.
"""
def _init(self, servers, *k, **kw):
self._old_init(servers, *k, **kw)
nodes = {}
for server in self.servers:
conf = {
'hostname': server.ip,
'instance': server,
'port': server.port,
'weight': server.weight
}
nodes[server.ip] = conf
self.uhashring = HashRing(nodes)
def _get_server(self, key):
if isinstance(key, tuple):
return self._old_get_server(key)
for i in range(self._SERVER_RETRIES):
for node in self.uhashring.range(key):
if node['instance'].connect():
return node['instance'], key
return None, None
memcache = __import__('memcache')
memcache.Client._old_get_server = memcache.Client._get_server
memcache.Client._old_init = memcache.Client.__init__
memcache.Client.__init__ = _init
memcache.Client._get_server = _get_server | Monkey patch python-memcached to implement our consistent hashring
in its node selection and operations. |
def p_sens_all_paren(self, p):
'senslist : AT LPAREN TIMES RPAREN'
p[0] = SensList(
(Sens(None, 'all', lineno=p.lineno(1)),), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | senslist : AT LPAREN TIMES RPAREN |
def handshake_peers(self):
'''
pstrlen = length of pstr as one byte
pstr = BitTorrent protocol
reserved = chr(0)*8
info_hash = 20-byte hash above (aka self.hash_string)
peer_id = 20-byte string
'''
pstr = 'BitTorrent protocol'
pstrlen = len(pstr)
info_hash = self.hash_string
peer_id = self.peer_id
packet = ''.join([chr(pstrlen), pstr, chr(0) * 8, info_hash,
peer_id])
print "Here's my packet {}".format(repr(packet))
# TODO -- add some checks in here so that I'm talking
# to a maximum of 30 peers
# TODO -- think about why i'm deleting self.peer_ips.
# What was the point of it? Why won't I need it?
# Think about what we're doing -- using this list to create
# new peer objects. Should make this functional, that way I
# can also call when I get new peers.
for i in self.peer_ips:
if len(self.peer_dict) >= 30:
break
s = socket.socket()
s.setblocking(True)
s.settimeout(0.5)
try:
s.connect(i)
except socket.timeout:
print '{} timed out on connect'.format(s.fileno())
continue
except socket.error:
print '{} threw a socket error'.format(s.fileno())
continue
except:
raise Exception
s.send(packet)
try:
data = s.recv(68) # Peer's handshake - len from docs
if data:
print 'From {} received: {}'.format(s.fileno(), repr(data))
self.initpeer(s)
except:
print '{} timed out on recv'.format(s.fileno())
continue
else:
self.peer_ips = [] | pstrlen = length of pstr as one byte
pstr = BitTorrent protocol
reserved = chr(0)*8
info_hash = 20-byte hash above (aka self.hash_string)
peer_id = 20-byte string |
def do_fit(self, event):
"""
Re-fit the window to the size of the content.
"""
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.GetSize()
rows = self.grid.GetNumberRows()
# if there isn't enough room to display new content
# resize the frame
if disp_size[1] - 75 < actual_size[1]:
self.SetSize((actual_size[0], disp_size[1] * .95))
self.Centre() | Re-fit the window to the size of the content. |
def make_blastdb(self):
"""
Create a BLAST database of the primer file
"""
# remove the path and the file extension for easier future globbing
db = os.path.splitext(self.formattedprimers)[0]
nhr = '{db}.nhr'.format(db=db) # add nhr for searching
if not os.path.isfile(str(nhr)):
# Create the databases
command = 'makeblastdb -in {primerfile} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {outfile}'\
.format(primerfile=self.formattedprimers,
outfile=db)
run_subprocess(command) | Create a BLAST database of the primer file |
def inet_to_str(inet):
"""Convert inet object to a string
Args:
inet (inet struct): inet network address
Returns:
str: Printable/readable IP address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_ntop(socket.AF_INET, inet)
except ValueError:
return socket.inet_ntop(socket.AF_INET6, inet) | Convert inet object to a string
Args:
inet (inet struct): inet network address
Returns:
str: Printable/readable IP address |
def _build_tree(self, position, momentum, slice_var, direction, depth, stepsize):
"""
Recursively builds a tree for proposing new position and momentum
"""
# Parameter names in algorithm (here -> representation in algorithm)
# position -> theta, momentum -> r, slice_var -> u, direction -> v, depth ->j, stepsize -> epsilon
# candidate_set_size -> n, accept_set_bool -> s
if depth == 0:
# Take single leapfrog step in the given direction (direction * stepsize)
position_bar, momentum_bar, candidate_set_size, accept_set_bool =\
self._initalize_tree(position, momentum, slice_var, direction * stepsize)
return (position_bar, momentum_bar, position_bar, momentum_bar, position_bar,
candidate_set_size, accept_set_bool)
else:
# Build left and right subtrees
(position_backward, momentum_backward, position_forward, momentum_forward, position_bar,
candidate_set_size, accept_set_bool) = self._build_tree(position, momentum,
slice_var, direction, depth - 1, stepsize)
if accept_set_bool == 1:
if direction == -1:
# Build tree in backward direction
(position_backward, momentum_backward, _, _, position_bar2, candidate_set_size2,
accept_set_bool2) = self._build_tree(position_backward, momentum_backward,
slice_var, direction, depth - 1, stepsize)
else:
# Build tree in forward direction
(_, _, position_forward, momentum_forward, position_bar2, candidate_set_size2,
accept_set_bool2) = self._build_tree(position_forward, momentum_forward,
slice_var, direction, depth - 1, stepsize)
if np.random.rand() < candidate_set_size2 / (candidate_set_size2 + candidate_set_size):
position_bar = position_bar2
accept_set_bool, candidate_set_size =\
self._update_acceptance_criteria(position_forward, position_backward, momentum_forward,
momentum_backward, accept_set_bool2, candidate_set_size,
candidate_set_size2)
return (position_backward, momentum_backward, position_forward, momentum_forward,
position_bar, candidate_set_size, accept_set_bool) | Recursively builds a tree for proposing new position and momentum |
def add_edge_configuration(self, param_name, edge, param_value):
"""
Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value
"""
if param_name not in self.config['edges']:
self.config['edges'][param_name] = {edge: param_value}
else:
self.config['edges'][param_name][edge] = param_value | Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value |
def _registered(self):
"""
A optional boolean property indidcating whether this job store is registered. The
registry is the authority on deciding if a job store exists or not. If True, this job
store exists, if None the job store is transitioning from True to False or vice versa,
if False the job store doesn't exist.
:type: bool|None
"""
# The weird mapping of the SDB item attribute value to the property value is due to
# backwards compatibility. 'True' becomes True, that's easy. Toil < 3.3.0 writes this at
# the end of job store creation. Absence of either the registry, the item or the
# attribute becomes False, representing a truly absent, non-existing job store. An
# attribute value of 'False', which is what Toil < 3.3.0 writes at the *beginning* of job
# store destruction, indicates a job store in transition, reflecting the fact that 3.3.0
# may leak buckets or domains even though the registry reports 'False' for them. We
# can't handle job stores that were partially created by 3.3.0, though.
registry_domain = self._bindDomain(domain_name='toil-registry',
create=False,
block=False)
if registry_domain is None:
return False
else:
for attempt in retry_sdb():
with attempt:
attributes = registry_domain.get_attributes(item_name=self.namePrefix,
attribute_name='exists',
consistent_read=True)
try:
exists = attributes['exists']
except KeyError:
return False
else:
if exists == 'True':
return True
elif exists == 'False':
return None
else:
assert False | A optional boolean property indidcating whether this job store is registered. The
registry is the authority on deciding if a job store exists or not. If True, this job
store exists, if None the job store is transitioning from True to False or vice versa,
if False the job store doesn't exist.
:type: bool|None |
def make_iterable(obj, default=None):
""" Ensure obj is iterable. """
if obj is None:
return default or []
if isinstance(obj, (compat.string_types, compat.integer_types)):
return [obj]
return obj | Ensure obj is iterable. |
def get_filename(self):
''' Return the source filename of the current Stim. '''
if self.filename is None or not os.path.exists(self.filename):
tf = tempfile.mktemp() + self._default_file_extension
self.save(tf)
yield tf
os.remove(tf)
else:
yield self.filename | Return the source filename of the current Stim. |
def insert_attribute(self, att, index):
"""
Inserts the attribute at the specified location.
:param att: the attribute to insert
:type att: Attribute
:param index: the index to insert the attribute at
:type index: int
"""
javabridge.call(self.jobject, "insertAttributeAt", "(Lweka/core/Attribute;I)V", att.jobject, index) | Inserts the attribute at the specified location.
:param att: the attribute to insert
:type att: Attribute
:param index: the index to insert the attribute at
:type index: int |
def rmdir(self, pathobj):
"""
Removes a directory
"""
stat = self.stat(pathobj)
if not stat.is_dir:
raise OSError(20, "Not a directory: '%s'" % str(pathobj))
url = str(pathobj) + '/'
text, code = self.rest_del(url, session=pathobj.session, verify=pathobj.verify, cert=pathobj.cert)
if code not in [200, 202, 204]:
raise RuntimeError("Failed to delete directory: '%s'" % text) | Removes a directory |
def _proc_pax(self, filetar):
"""Process an extended or global header as described in POSIX.1-2001."""
# Read the header information.
buf = filetar.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == tarfile.XGLTYPE:
pax_headers = filetar.pax_headers
else:
pax_headers = filetar.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
try:
keyword = keyword.decode("utf8")
except Exception:
pass
try:
value = value.decode("utf8")
except Exception:
pass
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(filetar)
except tarfile.HeaderError:
raise tarfile.SubsequentHeaderError("missing or bad subsequent header")
if self.type in (tarfile.XHDTYPE, tarfile.SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, filetar.encoding, filetar.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in tarfile.SUPPORTED_TYPES:
offset += next._block(next.size)
filetar.offset = offset
return next | Process an extended or global header as described in POSIX.1-2001. |
def add(env, securitygroup_id, remote_ip, remote_group,
direction, ethertype, port_max, port_min, protocol):
"""Add a security group rule to a security group.
\b
Examples:
# Add an SSH rule (TCP port 22) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol tcp \\
--port-min 22 \\
--port-max 22
\b
# Add a ping rule (ICMP type 8 code 0) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol icmp \\
--port-min 8 \\
--port-max 0
"""
mgr = SoftLayer.NetworkManager(env.client)
ret = mgr.add_securitygroup_rule(securitygroup_id, remote_ip, remote_group,
direction, ethertype, port_max,
port_min, protocol)
if not ret:
raise exceptions.CLIAbort("Failed to add security group rule")
table = formatting.Table(REQUEST_RULES_COLUMNS)
table.add_row([ret['requestId'], str(ret['rules'])])
env.fout(table) | Add a security group rule to a security group.
\b
Examples:
# Add an SSH rule (TCP port 22) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol tcp \\
--port-min 22 \\
--port-max 22
\b
# Add a ping rule (ICMP type 8 code 0) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol icmp \\
--port-min 8 \\
--port-max 0 |
def get_atlas_summary_df(self):
"""Create a single data frame which summarizes all genes per row.
Returns:
DataFrame: Pandas DataFrame of the results
"""
all_info = []
for g in self.reference_gempro.genes_with_a_representative_sequence:
info = {}
info['Gene_ID'] = g.id
info['Gene_name'] = g.name
# Protein object
p = g.protein
info['Protein_sequences'] = len(p.sequences)
info['Protein_structures'] = len(p.structures)
# SeqProp
rseq = p.representative_sequence
info['RepSeq_ID'] = rseq.id
info['RepSeq_sequence_length'] = rseq.seq_len
info['RepSeq_num_sequence_alignments'] = len([x for x in p.sequence_alignments if x.annotations['ssbio_type'] == 'seqalign'])
info['RepSeq_num_structure_alignments'] = len([x for x in p.sequence_alignments if x.annotations['ssbio_type'] == 'structalign'])
# SeqRecord annotations (properties calculated that summarize the whole sequence)
for annotation_name, annotation in rseq.annotations.items():
info['RepSeq_' + annotation_name] = annotation
# SeqRecord alignment annotations
all_num_mutations = []
all_num_deletions = []
all_len_deletions = []
all_num_insertions = []
all_len_insertions = []
all_percent_identity = []
all_percent_similarity = []
for aln in p.sequence_alignments:
# Gather the strain speicific stuff
if '{}_'.format(p.id) not in aln.annotations['b_seq']:
continue
info[aln.annotations['b_seq'].split('{}_'.format(p.id))[1]] = aln.annotations['percent_identity']
# Gather the percent identities/similarities
all_percent_identity.append(aln.annotations['percent_identity'])
all_percent_similarity.append(aln.annotations['percent_similarity'])
# Gather the number of residues that are mutated (filter for different mutations of same residue)
num_mutations = len(list(set([x[1] for x in aln.annotations['mutations']])))
all_num_mutations.append(num_mutations)
# Gather the number of deletions as well as the length of the deletion
if not aln.annotations['deletions']:
num_deletions = 0
len_deletions = [0]
else:
num_deletions = len(aln.annotations['deletions'])
len_deletions = [x[1] for x in aln.annotations['deletions']]
all_num_deletions.append(num_deletions)
# Get the total length of the deletion for this one strain
avg_len_deletions = np.sum(len_deletions)
all_len_deletions.append(avg_len_deletions)
# Gather the number of insertions as well as the length of the insertion
if not aln.annotations['insertions']:
num_insertions = 0
len_insertions = [0]
else:
num_insertions = len(aln.annotations['insertions'])
len_insertions = [x[1] for x in aln.annotations['insertions']]
all_num_insertions.append(num_insertions)
# Get the total length of insertion for this one strain
avg_len_insertions = np.sum(len_insertions)
all_len_insertions.append(avg_len_insertions)
info['ATLAS_mean_num_mutations'] = np.mean(all_num_mutations)
info['ATLAS_mean_num_deletions'] = np.mean(all_num_deletions)
info['ATLAS_mean_len_deletions'] = np.mean(all_len_deletions)
info['ATLAS_mean_num_insertions'] = np.mean(all_num_insertions)
info['ATLAS_mean_len_insertions'] = np.mean(all_len_insertions)
info['ATLAS_mean_percent_identity'] = np.mean(all_percent_identity)
info['ATLAS_mean_percent_similarity'] = np.mean(all_percent_similarity)
# Other mutation analysis
single, fingerprint = p.sequence_mutation_summary()
# Mutations that show up in more than 10% of strains
singles = []
for k, v in single.items():
k = [str(x) for x in k]
if len(v) / len(p.sequence_alignments) >= 0.01:
singles.append(''.join(k)) # len(v) is the number of strains which have this mutation
info['ATLAS_popular_mutations'] = ';'.join(singles)
# Mutation groups that show up in more than 10% of strains
allfingerprints = []
for k, v in fingerprint.items():
if len(v) / len(p.sequence_alignments) >= 0.01:
fingerprints = []
for m in k:
y = [str(x) for x in m]
fingerprints.append(''.join(y))
allfingerprints.append('-'.join(fingerprints))
info['ATLAS_popular_mutation_groups'] = ';'.join(allfingerprints)
# StructProp
rstruct = p.representative_structure
if rstruct:
if rstruct.structure_file:
info['RepStruct_ID'] = rstruct.id
info['RepStruct_is_experimental'] = rstruct.is_experimental
info['RepStruct_description'] = rstruct.description
info['RepStruct_repseq_coverage'] = p.representative_chain_seq_coverage
# ChainProp
rchain = p.representative_chain
info['RepChain_ID'] = rchain
# ChainProp SeqRecord annotations
rchain_sr = rstruct.chains.get_by_id(rchain).seq_record
for annotation_name, annotation in rchain_sr.annotations.items():
info['RepChain_' + annotation_name] = annotation
all_info.append(info)
cols = ['Gene_ID', 'Gene_name', 'Protein_sequences', 'Protein_structures',
'RepSeq_ID', 'RepSeq_sequence_length',
'RepSeq_num_sequence_alignments', 'RepSeq_num_structure_alignments',
'RepStruct_ID', 'RepChain_ID', 'RepStruct_description',
'RepStruct_is_experimental', 'RepStruct_repseq_coverage',
'ATLAS_mean_percent_identity', 'ATLAS_mean_percent_similarity', 'ATLAS_mean_num_mutations',
'ATLAS_popular_mutations', 'ATLAS_popular_mutation_groups', 'ATLAS_mean_num_deletions',
'ATLAS_mean_num_insertions', 'ATLAS_mean_len_deletions', 'ATLAS_mean_len_insertions',
'RepSeq_aromaticity', 'RepSeq_instability_index', 'RepSeq_isoelectric_point', 'RepSeq_molecular_weight',
'RepSeq_monoisotopic', 'RepSeq_num_tm_helix-tmhmm', 'RepSeq_percent_acidic', 'RepSeq_percent_aliphatic',
'RepSeq_percent_aromatic', 'RepSeq_percent_B-sspro8', 'RepSeq_percent_basic',
'RepSeq_percent_buried-accpro', 'RepSeq_percent_buried-accpro20', 'RepSeq_percent_C-sspro',
'RepSeq_percent_C-sspro8', 'RepSeq_percent_charged', 'RepSeq_percent_E-sspro',
'RepSeq_percent_E-sspro8', 'RepSeq_percent_exposed-accpro', 'RepSeq_percent_exposed-accpro20',
'RepSeq_percent_G-sspro8', 'RepSeq_percent_H-sspro', 'RepSeq_percent_H-sspro8',
'RepSeq_percent_helix_naive', 'RepSeq_percent_I-sspro8', 'RepSeq_percent_non-polar',
'RepSeq_percent_polar', 'RepSeq_percent_S-sspro8', 'RepSeq_percent_small',
'RepSeq_percent_strand_naive', 'RepSeq_percent_T-sspro8', 'RepSeq_percent_tiny',
'RepSeq_percent_turn_naive', 'RepChain_percent_B-dssp', 'RepChain_percent_C-dssp',
'RepChain_percent_E-dssp', 'RepChain_percent_G-dssp', 'RepChain_percent_H-dssp',
'RepChain_percent_I-dssp', 'RepChain_percent_S-dssp', 'RepChain_percent_T-dssp',
'RepChain_SSBOND-biopython']
cols.extend([x.id for x in self.strains])
df_atlas_summary = pd.DataFrame(all_info, columns=cols)
# Drop columns that don't have anything in them
df_atlas_summary.dropna(axis=1, how='all', inplace=True)
return df_atlas_summary | Create a single data frame which summarizes all genes per row.
Returns:
DataFrame: Pandas DataFrame of the results |
def extract(fileobj, keywords, comment_tags, options):
"""Extracts translation messages from underscore template files.
This method does also extract django templates. If a template does not
contain any django translation tags we always fallback to underscore extraction.
This is a plugin to Babel, written according to
http://babel.pocoo.org/docs/messages/#writing-extraction-methods
:param fileobj: the file-like object the messages should be extracted
from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: ``iterator``
"""
encoding = options.get('encoding', 'utf-8')
original_position = fileobj.tell()
text = fileobj.read().decode(encoding)
if django.VERSION[:2] >= (1, 9):
tokens = Lexer(text).tokenize()
else:
tokens = Lexer(text, None).tokenize()
vars = [token.token_type != TOKEN_TEXT for token in tokens]
could_be_django = any(list(vars))
if could_be_django:
fileobj.seek(original_position)
iterator = extract_django(fileobj, keywords, comment_tags, options)
for lineno, funcname, message, comments in iterator:
yield lineno, funcname, message, comments
else:
# Underscore template extraction
comments = []
fileobj.seek(original_position)
for lineno, line in enumerate(fileobj, 1):
funcname = None
stream = TokenStream.from_tuple_iter(tokenize(line, underscore.rules))
while not stream.eof:
if stream.current.type == 'gettext_begin':
stream.expect('gettext_begin')
funcname = stream.expect('func_name').value
args, kwargs = parse_arguments(stream, 'gettext_end')
strings = []
for arg, argtype in args:
if argtype == 'func_string_arg':
strings.append(force_text(arg))
else:
strings.append(None)
for arg in kwargs:
strings.append(None)
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield lineno, funcname, strings, []
stream.next() | Extracts translation messages from underscore template files.
This method does also extract django templates. If a template does not
contain any django translation tags we always fallback to underscore extraction.
This is a plugin to Babel, written according to
http://babel.pocoo.org/docs/messages/#writing-extraction-methods
:param fileobj: the file-like object the messages should be extracted
from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: ``iterator`` |
def speakerDiarization(filename, n_speakers, mt_size=2.0, mt_step=0.2,
st_win=0.05, lda_dim=35, plot_res=False):
'''
ARGUMENTS:
- filename: the name of the WAV file to be analyzed
- n_speakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mt_size (opt) mid-term window size
- mt_step (opt) mid-term window step
- st_win (opt) short-term window size
- lda_dim (opt) LDA dimension (0 for no LDA)
- plot_res (opt) 0 for not plotting the results 1 for plottingy
'''
[fs, x] = audioBasicIO.readAudioFile(filename)
x = audioBasicIO.stereo2mono(x)
duration = len(x) / fs
[classifier_1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "knnSpeakerAll"))
[classifier_2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "knnSpeakerFemaleMale"))
[mt_feats, st_feats, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs,
mt_step * fs,
round(fs * st_win),
round(fs*st_win * 0.5))
MidTermFeatures2 = numpy.zeros((mt_feats.shape[0] + len(classNames1) +
len(classNames2), mt_feats.shape[1]))
for i in range(mt_feats.shape[1]):
cur_f1 = (mt_feats[:, i] - MEAN1) / STD1
cur_f2 = (mt_feats[:, i] - MEAN2) / STD2
[res, P1] = aT.classifierWrapper(classifier_1, "knn", cur_f1)
[res, P2] = aT.classifierWrapper(classifier_2, "knn", cur_f2)
MidTermFeatures2[0:mt_feats.shape[0], i] = mt_feats[:, i]
MidTermFeatures2[mt_feats.shape[0]:mt_feats.shape[0]+len(classNames1), i] = P1 + 0.0001
MidTermFeatures2[mt_feats.shape[0] + len(classNames1)::, i] = P2 + 0.0001
mt_feats = MidTermFeatures2 # TODO
iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]
mt_feats = mt_feats[iFeaturesSelect, :]
(mt_feats_norm, MEAN, STD) = aT.normalizeFeatures([mt_feats.T])
mt_feats_norm = mt_feats_norm[0].T
n_wins = mt_feats.shape[1]
# remove outliers:
dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_norm.T)),
axis=0)
m_dist_all = numpy.mean(dist_all)
i_non_outliers = numpy.nonzero(dist_all < 1.2 * m_dist_all)[0]
# TODO: Combine energy threshold for outlier removal:
#EnergyMin = numpy.min(mt_feats[1,:])
#EnergyMean = numpy.mean(mt_feats[1,:])
#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
#i_non_outliers = numpy.nonzero(mt_feats[1,:] > Thres)[0]
#print i_non_outliers
perOutLier = (100.0 * (n_wins - i_non_outliers.shape[0])) / n_wins
mt_feats_norm_or = mt_feats_norm
mt_feats_norm = mt_feats_norm[:, i_non_outliers]
# LDA dimensionality reduction:
if lda_dim > 0:
#[mt_feats_to_red, _, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs, st_win * fs, round(fs*st_win), round(fs*st_win));
# extract mid-term features with minimum step:
mt_win_ratio = int(round(mt_size / st_win))
mt_step_ratio = int(round(st_win / st_win))
mt_feats_to_red = []
num_of_features = len(st_feats)
num_of_stats = 2
#for i in range(num_of_stats * num_of_features + 1):
for i in range(num_of_stats * num_of_features):
mt_feats_to_red.append([])
for i in range(num_of_features): # for each of the short-term features:
curPos = 0
N = len(st_feats[i])
while (curPos < N):
N1 = curPos
N2 = curPos + mt_win_ratio
if N2 > N:
N2 = N
curStFeatures = st_feats[i][N1:N2]
mt_feats_to_red[i].append(numpy.mean(curStFeatures))
mt_feats_to_red[i+num_of_features].append(numpy.std(curStFeatures))
curPos += mt_step_ratio
mt_feats_to_red = numpy.array(mt_feats_to_red)
mt_feats_to_red_2 = numpy.zeros((mt_feats_to_red.shape[0] +
len(classNames1) + len(classNames2),
mt_feats_to_red.shape[1]))
for i in range(mt_feats_to_red.shape[1]):
cur_f1 = (mt_feats_to_red[:, i] - MEAN1) / STD1
cur_f2 = (mt_feats_to_red[:, i] - MEAN2) / STD2
[res, P1] = aT.classifierWrapper(classifier_1, "knn", cur_f1)
[res, P2] = aT.classifierWrapper(classifier_2, "knn", cur_f2)
mt_feats_to_red_2[0:mt_feats_to_red.shape[0], i] = mt_feats_to_red[:, i]
mt_feats_to_red_2[mt_feats_to_red.shape[0]:mt_feats_to_red.shape[0] + len(classNames1), i] = P1 + 0.0001
mt_feats_to_red_2[mt_feats_to_red.shape[0]+len(classNames1)::, i] = P2 + 0.0001
mt_feats_to_red = mt_feats_to_red_2
mt_feats_to_red = mt_feats_to_red[iFeaturesSelect, :]
#mt_feats_to_red += numpy.random.rand(mt_feats_to_red.shape[0], mt_feats_to_red.shape[1]) * 0.0000010
(mt_feats_to_red, MEAN, STD) = aT.normalizeFeatures([mt_feats_to_red.T])
mt_feats_to_red = mt_feats_to_red[0].T
#dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_to_red.T)), axis=0)
#m_dist_all = numpy.mean(dist_all)
#iNonOutLiers2 = numpy.nonzero(dist_all < 3.0*m_dist_all)[0]
#mt_feats_to_red = mt_feats_to_red[:, iNonOutLiers2]
Labels = numpy.zeros((mt_feats_to_red.shape[1], ));
LDAstep = 1.0
LDAstepRatio = LDAstep / st_win
#print LDAstep, LDAstepRatio
for i in range(Labels.shape[0]):
Labels[i] = int(i*st_win/LDAstepRatio);
clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=lda_dim)
clf.fit(mt_feats_to_red.T, Labels)
mt_feats_norm = (clf.transform(mt_feats_norm.T)).T
if n_speakers <= 0:
s_range = range(2, 10)
else:
s_range = [n_speakers]
clsAll = []
sil_all = []
centersAll = []
for iSpeakers in s_range:
k_means = sklearn.cluster.KMeans(n_clusters=iSpeakers)
k_means.fit(mt_feats_norm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
# Y = distance.squareform(distance.pdist(mt_feats_norm.T))
clsAll.append(cls)
centersAll.append(means)
sil_1 = []; sil_2 = []
for c in range(iSpeakers):
# for each speaker (i.e. for each extracted cluster)
clust_per_cent = numpy.nonzero(cls == c)[0].shape[0] / \
float(len(cls))
if clust_per_cent < 0.020:
sil_1.append(0.0)
sil_2.append(0.0)
else:
# get subset of feature vectors
mt_feats_norm_temp = mt_feats_norm[:, cls==c]
# compute average distance between samples
# that belong to the cluster (a values)
Yt = distance.pdist(mt_feats_norm_temp.T)
sil_1.append(numpy.mean(Yt)*clust_per_cent)
silBs = []
for c2 in range(iSpeakers):
# compute distances from samples of other clusters
if c2 != c:
clust_per_cent_2 = numpy.nonzero(cls == c2)[0].shape[0] /\
float(len(cls))
MidTermFeaturesNormTemp2 = mt_feats_norm[:, cls == c2]
Yt = distance.cdist(mt_feats_norm_temp.T,
MidTermFeaturesNormTemp2.T)
silBs.append(numpy.mean(Yt)*(clust_per_cent
+ clust_per_cent_2)/2.0)
silBs = numpy.array(silBs)
# ... and keep the minimum value (i.e.
# the distance from the "nearest" cluster)
sil_2.append(min(silBs))
sil_1 = numpy.array(sil_1);
sil_2 = numpy.array(sil_2);
sil = []
for c in range(iSpeakers):
# for each cluster (speaker) compute silhouette
sil.append( ( sil_2[c] - sil_1[c]) / (max(sil_2[c],
sil_1[c]) + 0.00001))
# keep the AVERAGE SILLOUETTE
sil_all.append(numpy.mean(sil))
imax = numpy.argmax(sil_all)
# optimal number of clusters
nSpeakersFinal = s_range[imax]
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows:
# this is achieved by giving them the value of their
# nearest non-outlier window)
cls = numpy.zeros((n_wins,))
for i in range(n_wins):
j = numpy.argmin(numpy.abs(i-i_non_outliers))
cls[i] = clsAll[imax][j]
# Post-process method 1: hmm smoothing
for i in range(1):
# hmm training
start_prob, transmat, means, cov = \
trainHMM_computeStatistics(mt_feats_norm_or, cls)
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag")
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means; hmm.covars_ = cov
cls = hmm.predict(mt_feats_norm_or.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
sil = sil_all[imax]
class_names = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)];
# load ground-truth if available
gt_file = filename.replace('.wav', '.segments')
# if groundturh exists
if os.path.isfile(gt_file):
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
flags_gt, class_names_gt = segs2flags(seg_start, seg_end, seg_labs, mt_step)
if plot_res:
fig = plt.figure()
if n_speakers > 0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(numpy.array(range(len(cls)))*mt_step+mt_step/2.0, cls)
if os.path.isfile(gt_file):
if plot_res:
ax1.plot(numpy.array(range(len(flags_gt))) *
mt_step + mt_step / 2.0, flags_gt, 'r')
purity_cluster_m, purity_speaker_m = \
evaluateSpeakerDiarization(cls, flags_gt)
print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.title("Cluster purity: {0:.1f}% - "
"Speaker purity: {1:.1f}%".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.xlabel("time (seconds)")
#print s_range, sil_all
if n_speakers<=0:
plt.subplot(212)
plt.plot(s_range, sil_all)
plt.xlabel("number of clusters");
plt.ylabel("average clustering's sillouette");
plt.show()
return cls | ARGUMENTS:
- filename: the name of the WAV file to be analyzed
- n_speakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mt_size (opt) mid-term window size
- mt_step (opt) mid-term window step
- st_win (opt) short-term window size
- lda_dim (opt) LDA dimension (0 for no LDA)
- plot_res (opt) 0 for not plotting the results 1 for plottingy |
def encode(self, X, seed=None):
"""Given a string ``X``, returns ``unrank(X[:n]) || X[n:]`` where ``n``
is the the maximum number of bytes that can be unranked w.r.t. the
capacity of the input ``dfa`` and ``unrank`` is w.r.t. to the input
``dfa``.
"""
if not X:
return ''
if not isinstance(X, str):
raise InvalidInputException('Input must be of type string.')
if seed is not None and len(seed) != 8:
raise InvalidSeedLength('The seed is not 8 bytes long, seed length: '+str(len(seed)))
ciphertext = self._encrypter.encrypt(X)
maximumBytesToRank = int(math.floor(self.getCapacity() / 8.0))
unrank_payload_len = (
maximumBytesToRank - DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT)
unrank_payload_len = min(len(ciphertext), unrank_payload_len)
if unrank_payload_len <= 0:
raise InsufficientCapacityException('Language doesn\'t have enough capacity')
msg_len_header = fte.bit_ops.long_to_bytes(unrank_payload_len)
msg_len_header = string.rjust(
msg_len_header, DfaEncoderObject._COVERTEXT_HEADER_LEN_PLAINTEXT, '\x00')
random_bytes = seed if seed is not None else fte.bit_ops.random_bytes(8)
msg_len_header = random_bytes + msg_len_header
msg_len_header = self._encrypter.encryptOneBlock(msg_len_header)
unrank_payload = msg_len_header + \
ciphertext[:maximumBytesToRank -
DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT]
random_padding_bytes = maximumBytesToRank - len(unrank_payload)
if random_padding_bytes > 0:
unrank_payload += fte.bit_ops.random_bytes(random_padding_bytes)
unrank_payload = fte.bit_ops.bytes_to_long(unrank_payload)
formatted_covertext_header = self._dfa.unrank(unrank_payload)
unformatted_covertext_body = ciphertext[
maximumBytesToRank - DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT:]
covertext = formatted_covertext_header + unformatted_covertext_body
return covertext | Given a string ``X``, returns ``unrank(X[:n]) || X[n:]`` where ``n``
is the the maximum number of bytes that can be unranked w.r.t. the
capacity of the input ``dfa`` and ``unrank`` is w.r.t. to the input
``dfa``. |
def log_tensor_stats(self, tensor, name):
"""Add distribution statistics on a tensor's elements to the current History entry
"""
# TODO Handle the case of duplicate names.
if (isinstance(tensor, tuple) or isinstance(tensor, list)):
while (isinstance(tensor, tuple) or isinstance(tensor, list)) and (isinstance(tensor[0], tuple) or isinstance(tensor[0], list)):
tensor = [item for sublist in tensor for item in sublist]
tensor = torch.cat([t.view(-1) for t in tensor])
# checking for inheritance from _TensorBase didn't work for some reason
if not hasattr(tensor, 'shape'):
cls = type(tensor)
raise TypeError('Expected Tensor, not {}.{}'.format(
cls.__module__, cls.__name__))
history = self._history()
if history is None or not history.compute:
return
# HalfTensors on cpu do not support view(), upconvert to 32bit
if isinstance(tensor, torch.HalfTensor):
tensor = tensor.clone().type(torch.FloatTensor).detach()
flat = tensor.view(-1)
# For pytorch 0.3 we use unoptimized numpy histograms (detach is new in 0.4)
if not hasattr(flat, "detach"):
tensor = flat.cpu().clone().numpy()
history.row.update({
name: wandb.Histogram(tensor)
})
return
if flat.is_cuda:
# TODO(jhr): see if pytorch will accept something upstream to check cuda support for ops
# until then, we are going to have to catch a specific exception to check for histc support.
if self._is_cuda_histc_supported is None:
self._is_cuda_histc_supported = True
check = torch.cuda.FloatTensor(1).fill_(0)
try:
check = flat.histc(bins=self._num_bins)
except RuntimeError as e:
# Only work around missing support with specific exception
if str(e).startswith("_th_histc is not implemented"):
self._is_cuda_histc_supported = False
if not self._is_cuda_histc_supported:
flat = flat.cpu().clone().detach()
# As of torch 1.0.1.post2+nightly, float16 cuda summary ops are not supported (convert to float32)
if isinstance(flat, torch.cuda.HalfTensor):
flat = flat.clone().type(torch.cuda.FloatTensor).detach()
if isinstance(flat, torch.HalfTensor):
flat = flat.clone().type(torch.FloatTensor).detach()
tmin = flat.min().item()
tmax = flat.max().item()
tensor = flat.histc(bins=self._num_bins, min=tmin, max=tmax)
tensor = tensor.cpu().clone().detach()
bins = torch.linspace(tmin, tmax, steps=self._num_bins + 1)
history.row.update({
name: wandb.Histogram(np_histogram=(
tensor.tolist(), bins.tolist()))
}) | Add distribution statistics on a tensor's elements to the current History entry |
def get(self, language: str=None, default: str=None) -> str:
"""Gets the underlying value in the specified or
primary language.
Arguments:
language:
The language to get the value in.
Returns:
The value in the current language, or
the primary language in case no language
was specified.
"""
language = language or settings.LANGUAGE_CODE
value = super().get(language, default)
return value if value is not None else default | Gets the underlying value in the specified or
primary language.
Arguments:
language:
The language to get the value in.
Returns:
The value in the current language, or
the primary language in case no language
was specified. |
def _to_s(val, sep=", "):
"""Convert any to string.
:param val: An object
:param sep: separator between values
>>> _to_s([1, 2, 3])
'1, 2, 3'
>>> _to_s("aaa")
'aaa'
"""
if anyconfig.utils.is_iterable(val):
return sep.join(str(x) for x in val)
return str(val) | Convert any to string.
:param val: An object
:param sep: separator between values
>>> _to_s([1, 2, 3])
'1, 2, 3'
>>> _to_s("aaa")
'aaa' |
def reset_statistics(stat, frequencies, reset_cumulative, recalculate=False):
"""
Resets the specified statistic's data (deletes it) for the given
frequency/ies.
"""
stats = ensure_list(stat)
frequencies = ensure_list(frequencies)
for s in stats:
for f in frequencies:
if not s.cumulative or reset_cumulative:
print "Resetting %s (%s)..." % (s.__name__, settings.STATISTIC_FREQUENCY_DICT[f])
s.objects.filter(frequency=f).delete()
elif s.cumulative and not reset_cumulative:
print "Skipping %s because it is cumulative." % s.__name__
if recalculate:
print "Recalculating statistics..."
calculate_statistics(stats, frequencies) | Resets the specified statistic's data (deletes it) for the given
frequency/ies. |
def GetPageInfo(self):
"""Returns page information
What is the page range available, and what is the selected page range.
"""
return self.first_tab, self.last_tab, self.first_tab, self.last_tab | Returns page information
What is the page range available, and what is the selected page range. |
def reset(self):
"""Reset the clustering to the original clustering.
All changes are lost.
"""
self._undo_stack.clear()
self._spike_clusters = self._spike_clusters_base
self._new_cluster_id = self._new_cluster_id_0 | Reset the clustering to the original clustering.
All changes are lost. |
def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data with linear direct value interpolation
If y is a time series and passed, it will be transformed as well
The time dimension is removed from the data
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : array-like or None
None is returned if target is changed. Otherwise it is returned unchanged.
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
N = len(Xt) # number of series
D = Xt[0].shape[1] - 1 # number of data channels
# 1st channel is time
t = [Xt[i][:, 0] for i in np.arange(N)]
t_lin = [np.arange(Xt[i][0, 0], Xt[i][-1, 0], self.sample_period) for i in np.arange(N)]
if D == 1:
Xt = [self._interp(t_lin[i], t[i], Xt[i][:, 1], kind=self.kind) for i in np.arange(N)]
elif D > 1:
Xt = [np.column_stack([self._interp(t_lin[i], t[i], Xt[i][:, j], kind=self.kind)
for j in range(1, D + 1)]) for i in np.arange(N)]
if Xc is not None:
Xt = TS_Data(Xt, Xc)
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
swt = None
if self.categorical_target is True:
yt = [self._interp(t_lin[i], t[i], yt[i], kind='nearest') for i in np.arange(N)]
else:
yt = [self._interp(t_lin[i], t[i], yt[i], kind=self.kind) for i in np.arange(N)]
else:
# y is static - leave y alone
pass
return Xt, yt, swt | Transforms the time series data with linear direct value interpolation
If y is a time series and passed, it will be transformed as well
The time dimension is removed from the data
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : array-like or None
None is returned if target is changed. Otherwise it is returned unchanged. |
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex) | Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false |
def copy_unit_spike_features(self, sorting, unit_ids=None):
'''Copy unit spike features from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the spike features will be copied
unit_ids: (array_like, int)
The list (or single value) of unit_ids for which the spike features will be copied.
def get_unit_spike_features(self, unit_id, feature_name, start_frame=None, end_frame=None):
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
if isinstance(unit_ids, int):
curr_feature_names = sorting.get_unit_spike_feature_names(unit_id=unit_ids)
for curr_feature_name in curr_feature_names:
value = sorting.get_unit_spike_features(unit_id=unit_ids, feature_name=curr_feature_name)
self.set_unit_spike_features(unit_id=unit_ids, feature_name=curr_feature_name, value=value)
else:
for unit_id in unit_ids:
curr_feature_names = sorting.get_unit_spike_feature_names(unit_id=unit_id)
for curr_feature_name in curr_feature_names:
value = sorting.get_unit_spike_features(unit_id=unit_id, feature_name=curr_feature_name)
self.set_unit_spike_features(unit_id=unit_id, feature_name=curr_feature_name, value=value) | Copy unit spike features from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the spike features will be copied
unit_ids: (array_like, int)
The list (or single value) of unit_ids for which the spike features will be copied.
def get_unit_spike_features(self, unit_id, feature_name, start_frame=None, end_frame=None): |
def find_outputs_in_range(self, ifo, current_segment, useSplitLists=False):
"""
Return the list of Files that is most appropriate for the supplied
time range. That is, the Files whose coverage time has the
largest overlap with the supplied time range.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to
current_segment : glue.segment.segment
The segment of time that files must intersect.
Returns
--------
FileList class
The list of Files that are most appropriate for the time range
"""
currsegment_list = segments.segmentlist([current_segment])
# Get all files overlapping the window
overlap_files = self.find_all_output_in_range(ifo, current_segment,
useSplitLists=useSplitLists)
# By how much do they overlap?
overlap_windows = [abs(i.segment_list & currsegment_list) for i in overlap_files]
if not overlap_windows:
return []
# Return the File with the biggest overlap
# Note if two File have identical overlap, the first is used
# to define the valid segment
overlap_windows = numpy.array(overlap_windows, dtype = int)
segmentLst = overlap_files[overlap_windows.argmax()].segment_list
# Get all output files with the exact same segment definition
output_files = [f for f in overlap_files if f.segment_list==segmentLst]
return output_files | Return the list of Files that is most appropriate for the supplied
time range. That is, the Files whose coverage time has the
largest overlap with the supplied time range.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to
current_segment : glue.segment.segment
The segment of time that files must intersect.
Returns
--------
FileList class
The list of Files that are most appropriate for the time range |
def save_info(self, dirn):
'''
Save information about the distribution in its dist_dir.
'''
with current_directory(dirn):
info('Saving distribution info')
with open('dist_info.json', 'w') as fileh:
json.dump({'dist_name': self.ctx.dist_name,
'bootstrap': self.ctx.bootstrap.name,
'archs': [arch.arch for arch in self.ctx.archs],
'ndk_api': self.ctx.ndk_api,
'use_setup_py': self.ctx.use_setup_py,
'recipes': self.ctx.recipe_build_order + self.ctx.python_modules,
'hostpython': self.ctx.hostpython,
'python_version': self.ctx.python_recipe.major_minor_version_string},
fileh) | Save information about the distribution in its dist_dir. |
def direction(self):
"""
Get image direction
Returns
-------
tuple
"""
libfn = utils.get_lib_fn('getDirection%s'%self._libsuffix)
return libfn(self.pointer) | Get image direction
Returns
-------
tuple |
def amplitude_by_welch(self, data_frame):
"""
This methods uses the Welch method :cite:`Welch1967` to obtain the power spectral density, this is a robust
alternative to using fft_signal & amplitude
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: the ampl
:rtype ampl: float
:return: the freq
:rtype freq: float
"""
frq, Pxx_den = signal.welch(data_frame.filtered_signal.values, self.sampling_frequency, nperseg=self.window)
freq = frq[Pxx_den.argmax(axis=0)]
ampl = sum(Pxx_den[(frq > self.lower_frequency) & (frq < self.upper_frequency)])
logging.debug("tremor amplitude by welch calculated")
return ampl, freq | This methods uses the Welch method :cite:`Welch1967` to obtain the power spectral density, this is a robust
alternative to using fft_signal & amplitude
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: the ampl
:rtype ampl: float
:return: the freq
:rtype freq: float |
def X(self) -> Optional[Union[np.ndarray, sparse.spmatrix, ArrayView]]:
"""Data matrix of shape :attr:`n_obs` × :attr:`n_vars`."""
if self.isbacked:
if not self.file.isopen: self.file.open()
X = self.file['X']
if self.isview:
X = X[self._oidx, self._vidx]
return X
else:
if self.n_obs == 1 and self.n_vars == 1:
return self._X[0, 0]
elif self.n_obs == 1 or self.n_vars == 1:
X = self._X
if issparse(self._X): X = self._X.toarray()
return X.flatten()
else:
return self._X | Data matrix of shape :attr:`n_obs` × :attr:`n_vars`. |
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash.
"""
text = "\n".join(lines)
while 1:
m = FENCED_BLOCK_RE.search(text)
if not m:
break
lang = m.group('lang')
linenums = bool(m.group('linenums'))
html = highlight_syntax(m.group('code'), lang, linenums=linenums)
placeholder = self.markdown.htmlStash.store(html, safe=True)
text = '{}\n{}\n{}'.format(
text[:m.start()],
placeholder,
text[m.end():]
)
return text.split("\n") | Match and store Fenced Code Blocks in the HtmlStash. |
def get_unique_filename(filename, new_filename=None, new_extension=None):
"""
Génère un nouveau nom pour un fichier en gardant son extension
Soit le nouveau nom est généré à partir de la date
(heures+minutes+secondes+microsecondes) soit un nouveau nom est spécifié et on
l'utilise tel quel.
:type filename: string
:param filename: Nom du fichier original
:type new_filename: string
:param new_filename: (optional) Nouveau nom de fichier personnalisé, ceci implique que
la date ne sera pas insérée dans le nouveau nom
:type new_extension: string
:param new_extension: (optional) Force une nouvelle extension de fichier au lieu de
celle de l'original. À spécifier sans le "point" de début d'une
extension.
:rtype: string
:return: Nouveau nom de fichier
"""
if new_extension:
extension = new_extension
else:
extension = splitext(filename)[1][1:]
if not new_filename:
now = real_datetime.now()
return '%s%s%s%s.%s' % (unicode(now.hour).zfill(2), unicode(now.minute).zfill(2), unicode(now.second).zfill(2), unicode(now.microsecond).zfill(6), extension)
else:
return '%s.%s' % (new_filename, extension) | Génère un nouveau nom pour un fichier en gardant son extension
Soit le nouveau nom est généré à partir de la date
(heures+minutes+secondes+microsecondes) soit un nouveau nom est spécifié et on
l'utilise tel quel.
:type filename: string
:param filename: Nom du fichier original
:type new_filename: string
:param new_filename: (optional) Nouveau nom de fichier personnalisé, ceci implique que
la date ne sera pas insérée dans le nouveau nom
:type new_extension: string
:param new_extension: (optional) Force une nouvelle extension de fichier au lieu de
celle de l'original. À spécifier sans le "point" de début d'une
extension.
:rtype: string
:return: Nouveau nom de fichier |
def progressbar(stream, prefix='Loading: ', width=0.5, **options):
""" Generator filter to print a progress bar. """
size = len(stream)
if not size:
return stream
if 'width' not in options:
if width <= 1:
width = round(shutil.get_terminal_size()[0] * width)
options['width'] = width
with ProgressBar(max=size, prefix=prefix, **options) as b:
b.set(0)
for i, x in enumerate(stream, 1):
yield x
b.set(i) | Generator filter to print a progress bar. |
def set_(key, value, profile=None):
'''
Set a key/value pair in the vault service
'''
if '?' in key:
__utils__['versions.warn_until'](
'Neon',
(
'Using ? to seperate between the path and key for vault has been deprecated '
'and will be removed in {version}. Please just use a /.'
),
)
path, key = key.split('?')
else:
path, key = key.rsplit('/', 1)
try:
url = 'v1/{0}'.format(path)
data = {key: value}
response = __utils__['vault.make_request'](
'POST',
url,
profile,
json=data)
if response.status_code != 204:
response.raise_for_status()
return True
except Exception as e:
log.error('Failed to write secret! %s: %s', type(e).__name__, e)
raise salt.exceptions.CommandExecutionError(e) | Set a key/value pair in the vault service |
def calcsize(values, sizerange=(2,70), inds=None, plaw=3):
""" Use set of values to calculate symbol size.
values is a list of floats for candidate significance.
inds is an optional list of indexes to use to calculate symbol size.
Scaling of symbol size min max set by sizerange tuple (min, max).
plaw is powerlaw scaling of symbol size from values
"""
if inds:
smax = max([abs(values[i]) for i in inds])
smin = min([abs(values[i]) for i in inds])
else:
smax = max([abs(val) for val in values])
smin = min([abs(val) for val in values])
return [sizerange[0] + sizerange[1] * ((abs(val) - smin)/(smax - smin))**plaw for val in values] | Use set of values to calculate symbol size.
values is a list of floats for candidate significance.
inds is an optional list of indexes to use to calculate symbol size.
Scaling of symbol size min max set by sizerange tuple (min, max).
plaw is powerlaw scaling of symbol size from values |
def get_adaptive_threshold(threshold_method, image, threshold,
mask = None,
adaptive_window_size = 10,
**kwargs):
"""Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks.
"""
# for the X and Y direction, find the # of blocks, given the
# size constraints
image_size = np.array(image.shape[:2],dtype=int)
nblocks = image_size // adaptive_window_size
#
# Use a floating point block size to apportion the roundoff
# roughly equally to each block
#
increment = ( np.array(image_size,dtype=float) /
np.array(nblocks,dtype=float))
#
# Put the answer here
#
thresh_out = np.zeros(image_size, image.dtype)
#
# Loop once per block, computing the "global" threshold within the
# block.
#
block_threshold = np.zeros([nblocks[0],nblocks[1]])
for i in range(nblocks[0]):
i0 = int(i*increment[0])
i1 = int((i+1)*increment[0])
for j in range(nblocks[1]):
j0 = int(j*increment[1])
j1 = int((j+1)*increment[1])
block = image[i0:i1,j0:j1]
block_mask = None if mask is None else mask[i0:i1,j0:j1]
block_threshold[i,j] = get_global_threshold(
threshold_method,
block, mask = block_mask,
**kwargs)
#
# Use a cubic spline to blend the thresholds across the image to avoid image artifacts
#
spline_order = min(3, np.min(nblocks) - 1)
xStart = int(increment[0] / 2)
xEnd = int((nblocks[0] - 0.5) * increment[0])
yStart = int(increment[1] / 2)
yEnd = int((nblocks[1] - 0.5) * increment[1])
xtStart = .5
xtEnd = image.shape[0] - .5
ytStart = .5
ytEnd = image.shape[1] - .5
block_x_coords = np.linspace(xStart,xEnd, nblocks[0])
block_y_coords = np.linspace(yStart,yEnd, nblocks[1])
adaptive_interpolation = scipy.interpolate.RectBivariateSpline(
block_x_coords, block_y_coords, block_threshold,
bbox = (xtStart, xtEnd, ytStart, ytEnd),
kx = spline_order, ky = spline_order)
thresh_out_x_coords = np.linspace(.5, int(nblocks[0] * increment[0]) - .5, thresh_out.shape[0])
thresh_out_y_coords = np.linspace(.5, int(nblocks[1] * increment[1]) - .5 , thresh_out.shape[1])
thresh_out = adaptive_interpolation(thresh_out_x_coords, thresh_out_y_coords)
return thresh_out | Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks. |
def _refreshNodeFromTarget(self):
""" Updates the config settings
"""
for key, value in self.viewBox.state.items():
if key != "limits":
childItem = self.childByNodeName(key)
childItem.data = value
else:
# limits contains a dictionary as well
for limitKey, limitValue in value.items():
limitChildItem = self.limitsItem.childByNodeName(limitKey)
limitChildItem.data = limitValue | Updates the config settings |
def write(self, *args, **kwargs):
"""
:param args: tuple(value, style), tuple(value, style)
:param kwargs: header=tuple(value, style), header=tuple(value, style)
:param args: value, value
:param kwargs: header=value, header=value
"""
if args:
kwargs = dict(zip(self.header, args))
for header in kwargs:
cell = kwargs[header]
if not isinstance(cell, tuple):
cell = (cell,)
self.write_cell(self._row, self.header.index(header), *cell)
self._row += 1 | :param args: tuple(value, style), tuple(value, style)
:param kwargs: header=tuple(value, style), header=tuple(value, style)
:param args: value, value
:param kwargs: header=value, header=value |
def inspect(self):
"""
Fetches information about the container from the client.
"""
policy = self.policy
config_id = self.config_id
if self.config_id.config_type == ItemType.VOLUME:
if self.container_map.use_attached_parent_name:
container_name = policy.aname(config_id.map_name, config_id.instance_name, config_id.config_name)
else:
container_name = policy.aname(config_id.map_name, config_id.instance_name)
else:
container_name = policy.cname(config_id.map_name, config_id.config_name, config_id.instance_name)
self.container_name = container_name
if container_name in policy.container_names[self.client_name]:
self.detail = self.client.inspect_container(container_name)
else:
self.detail = NOT_FOUND | Fetches information about the container from the client. |
def top_x_bleu(query_dic, mark, x=1):
"""
Calculate the top x average bleu value predictions ranking by item, x default is set above
:param query_dic: dict, key is qid, value is (item, bleu) tuple list, which will be ranked by 'item' as key
:param mark:string, which indicates which method is evaluated, also used as output file name here.
:param x:int, define top x
:return:average bleu score
"""
all_total = 0.0
with open(top_bleu_path + mark, 'w') as writer:
for k in query_dic:
candidate_lst = query_dic[k]
top_x = sorted(candidate_lst, key=lambda a: a[0], reverse=True)[:x]
total = 0
for t in top_x:
total += t[1]
ave_bleu = total / x
writer.write('%s\tAverageBleu:%f\tTop%d:%s\n' % (k, ave_bleu, x, str(top_x)))
all_total += ave_bleu
if k in contrast_dic:
contrast_dic[k].append(str(ave_bleu))
else:
contrast_dic[k] = []
contrast_dic[k].append(str(ave_bleu))
result_string = '%s\ttop%d_Bleu:\t%f' % (mark, x, all_total / len(query_dic))
print result_string
# eval_result_dict['Bleu'].append(result_string)
return ['Bleu', result_string] | Calculate the top x average bleu value predictions ranking by item, x default is set above
:param query_dic: dict, key is qid, value is (item, bleu) tuple list, which will be ranked by 'item' as key
:param mark:string, which indicates which method is evaluated, also used as output file name here.
:param x:int, define top x
:return:average bleu score |
def tt_avg(self, print_output=True, output_file="tt.csv"):
"""
Compute average term-topic matrix, and print to file if
print_output=True.
"""
avg = self.tt.mean(axis=2)
if print_output:
np.savetxt(output_file, avg, delimiter=",")
return avg | Compute average term-topic matrix, and print to file if
print_output=True. |
def is_done(self):
"""True if the last two moves were Pass or if the position is at a move
greater than the max depth."""
return self.position.is_game_over() or self.position.n >= FLAGS.max_game_length | True if the last two moves were Pass or if the position is at a move
greater than the max depth. |
def remove_whitespace(s):
""" Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags
"""
ignores = {}
for ignore in html_ignore_whitespace_re.finditer(s):
name = "{}{}{}".format(r"{}", uuid.uuid4(), r"{}")
ignores[name] = ignore.group()
s = s.replace(ignore.group(), name)
s = whitespace_re(r' ', s).strip()
for name, val in ignores.items():
s = s.replace(name, val)
return s | Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags |
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class."""
self._add_next_and_user(request)
return super(DeleteImageView, self).dispatch(request, *args, **kwargs) | Adds useful objects to the class. |
def all_terminated():
"""For each remote shell determine if its terminated"""
instances_found = False
for i in all_instances():
instances_found = True
if i.state not in (remote_dispatcher.STATE_TERMINATED,
remote_dispatcher.STATE_DEAD):
return False
return instances_found | For each remote shell determine if its terminated |
def list(self):
"""List collection items."""
if self.is_fake:
return
for item in self.collection.list():
yield item.uid + self.content_suffix | List collection items. |
def stream(self, page, limit=None, page_limit=None):
"""
Generates records one a time from a page, stopping at prescribed limits.
:param Page page: The page to stream.
:param int limit: The max number of records to read.
:param int page_imit: The max number of pages to read.
"""
current_record = 1
current_page = 1
while page is not None:
for record in page:
yield record
current_record += 1
if limit and limit is not values.unset and limit < current_record:
return
if page_limit and page_limit is not values.unset and page_limit < current_page:
return
page = page.next_page()
current_page += 1 | Generates records one a time from a page, stopping at prescribed limits.
:param Page page: The page to stream.
:param int limit: The max number of records to read.
:param int page_imit: The max number of pages to read. |
def populate_requirement_set(requirement_set, args, options, finder,
session, name, wheel_cache):
"""
Marshal cmd line args into a requirement set.
"""
for req in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
req, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for req in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
req,
default_vcs=options.default_vcs,
isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
found_req_in_file = False
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder, options=options, session=session,
wheel_cache=wheel_cache):
found_req_in_file = True
requirement_set.add_requirement(req)
if not (args or options.editables or found_req_in_file):
opts = {'name': name}
if options.find_links:
msg = ('You must give at least one requirement to '
'%(name)s (maybe you meant "pip %(name)s '
'%(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warning(msg) | Marshal cmd line args into a requirement set. |
def fill_superseqs(data, samples):
"""
Fills the superseqs array with seq data from cat.clust
and fill the edges array with information about paired split locations.
"""
## load super to get edges
io5 = h5py.File(data.clust_database, 'r+')
superseqs = io5["seqs"]
splits = io5["splits"]
## samples are already sorted
snames = [i.name for i in samples]
LOGGER.info("snames %s", snames)
## get maxlen again
maxlen = data._hackersonly["max_fragment_length"] + 20
LOGGER.info("maxlen inside fill_superseqs is %s", maxlen)
## data has to be entered in blocks
infile = os.path.join(data.dirs.across, data.name+"_catclust.gz")
clusters = gzip.open(infile, 'r')
pairdealer = itertools.izip(*[iter(clusters)]*2)
## iterate over clusters
chunks = superseqs.attrs["chunksize"]
chunksize = chunks[0]
done = 0
iloc = 0
cloc = 0
chunkseqs = np.zeros(chunks, dtype="|S1")
chunkedge = np.zeros(chunksize, dtype=np.uint16)
while 1:
try:
done, chunk = clustdealer(pairdealer, 1)
except IndexError:
raise IPyradWarningExit("clustfile formatting error in %s", chunk)
## if chunk is full put into superseqs and reset counter
if cloc == chunksize:
LOGGER.info("cloc chunk writing %s", cloc)
superseqs[iloc-cloc:iloc] = chunkseqs
splits[iloc-cloc:iloc] = chunkedge
## reset chunkseqs, chunkedge, cloc
cloc = 0
chunkseqs = np.zeros((chunksize, len(samples), maxlen), dtype="|S1")
chunkedge = np.zeros((chunksize), dtype=np.uint16)
## get seq and split it
if chunk:
try:
fill = np.zeros((len(samples), maxlen), dtype="|S1")
fill.fill("N")
piece = chunk[0].strip().split("\n")
names = piece[0::2]
seqs = np.array([list(i) for i in piece[1::2]])
## fill in the separator if it exists
separator = np.where(np.all(seqs == 'n', axis=0))[0]
if np.any(separator):
chunkedge[cloc] = separator.min()
## fill in the hits
## seqs will be (5,) IF the seqs are variable lengths, which
## can happen if it had duplicaes AND there were indels, and
## so the indels did not get aligned
try:
shlen = seqs.shape[1]
except IndexError as inst:
shlen = min([len(x) for x in seqs])
for name, seq in zip(names, seqs):
sidx = snames.index(name.rsplit("_", 1)[0])
#fill[sidx, :shlen] = seq[:maxlen]
fill[sidx, :shlen] = seq[:shlen]
## PUT seqs INTO local ARRAY
chunkseqs[cloc] = fill
except Exception as inst:
LOGGER.info(inst)
LOGGER.info("\nfill: %s\nshlen %s\nmaxlen %s", fill.shape, shlen, maxlen)
LOGGER.info("dupe chunk \n{}".format("\n".join(chunk)))
## increase counters if there was a chunk
cloc += 1
iloc += 1
if done:
break
## write final leftover chunk
superseqs[iloc-cloc:,] = chunkseqs[:cloc]
splits[iloc-cloc:] = chunkedge[:cloc]
## close super
io5.close()
clusters.close()
## edges is filled with splits for paired data.
LOGGER.info("done filling superseqs") | Fills the superseqs array with seq data from cat.clust
and fill the edges array with information about paired split locations. |
def lsp_server_ready(self, language, configuration):
"""Notify all stackeditors about LSP server availability."""
for editorstack in self.editorstacks:
editorstack.notify_server_ready(language, configuration) | Notify all stackeditors about LSP server availability. |
def relabel(self, label=None, group=None, depth=1):
"""Clone object and apply new group and/or label.
Applies relabeling to children up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object
"""
return super(HoloMap, self).relabel(label=label, group=group, depth=depth) | Clone object and apply new group and/or label.
Applies relabeling to children up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object |
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug('terminating {}'.format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug('sending signal {} to pid {}'.format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!'
.format(e=e, signal=signal_type, pid=pid))
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(self.is_dead, 'daemon to exit', timeout=kill_wait):
alive = False
logger.debug('successfully terminated pid {}'.format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise ProcessManager.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}'
.format(pid=self.pid, chain=signal_chain))
if purge:
self.purge_metadata(force=True) | Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL). |
def _validate(self, writing=False):
"""Verify that the box obeys the specifications."""
if ((len(self.bits_per_component) != len(self.signed)) or
(len(self.signed) != self.palette.shape[1])):
msg = ("The length of the 'bits_per_component' and the 'signed' "
"members must equal the number of columns of the palette.")
self._dispatch_validation_error(msg, writing=writing)
bps = self.bits_per_component
if writing and not all(b == bps[0] for b in bps):
# We don't support writing palettes with bit depths that are
# different.
msg = "Writing palettes with varying bit depths is not supported."
self._dispatch_validation_error(msg, writing=writing) | Verify that the box obeys the specifications. |
def keys(self, remote=False):
"""
Returns the database names for this client. Default is
to return only the locally cached database names, specify
``remote=True`` to make a remote request to include all databases.
:param bool remote: Dictates whether the list of locally cached
database names are returned or a remote request is made to include
an up to date list of databases from the server. Defaults to False.
:returns: List of database names
"""
if not remote:
return list(super(CouchDB, self).keys())
return self.all_dbs() | Returns the database names for this client. Default is
to return only the locally cached database names, specify
``remote=True`` to make a remote request to include all databases.
:param bool remote: Dictates whether the list of locally cached
database names are returned or a remote request is made to include
an up to date list of databases from the server. Defaults to False.
:returns: List of database names |
def quick_report(report_type, change, options):
"""
writes a change report via report_type to options.output or
sys.stdout
"""
report = report_type(None, options)
if options.output:
with open(options.output, "w") as out:
report.run(change, None, out)
else:
report.run(change, None, sys.stdout) | writes a change report via report_type to options.output or
sys.stdout |
def _trj_check_version(self, version, python, force):
"""Checks for version mismatch
Raises a VersionMismatchError if version of loaded trajectory and current pypet version
do not match. In case of `force=True` error is not raised only a warning is emitted.
"""
curr_python = pypetconstants.python_version_string
if (version != VERSION or curr_python != python) and not force:
raise pex.VersionMismatchError('Current pypet version is %s used under python %s '
' but your trajectory'
' was created with version %s and python %s.'
' Use >>force=True<< to perform your load regardless'
' of version mismatch.' %
(VERSION, curr_python, version, python))
elif version != VERSION or curr_python != python:
self._logger.warning('Current pypet version is %s with python %s but your trajectory'
' was created with version %s under python %s.'
' Yet, you enforced the load, so I will'
' handle the trajectory despite the'
' version mismatch.' %
(VERSION, curr_python, version, python)) | Checks for version mismatch
Raises a VersionMismatchError if version of loaded trajectory and current pypet version
do not match. In case of `force=True` error is not raised only a warning is emitted. |
def add_debug(parser):
"""Add a `debug` flag to the _parser_."""
parser.add_argument(
'-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.INFO, help='Set DEBUG output') | Add a `debug` flag to the _parser_. |
def XXX_REMOVEME(func):
"""Decorator for dead code removal
"""
@wraps(func)
def decorator(self, *args, **kwargs):
msg = "~~~~~~~ XXX REMOVEME marked method called: {}.{}".format(
self.__class__.__name__, func.func_name)
raise RuntimeError(msg)
return func(self, *args, **kwargs)
return decorator | Decorator for dead code removal |
def graph(self, as_dot=False):
"""Get the resolve graph.
Args:
as_dot: If True, get the graph as a dot-language string. Otherwise,
a pygraph.digraph object is returned.
Returns:
A string or `pygraph.digraph` object, or None if there is no graph
associated with the resolve.
"""
if not self.has_graph:
return None
if not as_dot:
if self.graph_ is None:
# reads either dot format or our compact format
self.graph_ = read_graph_from_string(self.graph_string)
return self.graph_
if self.graph_string:
if self.graph_string.startswith('{'): # compact format
self.graph_ = read_graph_from_string(self.graph_string)
else:
# already in dot format. Note that this will only happen in
# old rez contexts where the graph is not stored in the newer
# compact format.
return self.graph_string
return write_dot(self.graph_) | Get the resolve graph.
Args:
as_dot: If True, get the graph as a dot-language string. Otherwise,
a pygraph.digraph object is returned.
Returns:
A string or `pygraph.digraph` object, or None if there is no graph
associated with the resolve. |
def get_disk_statistics(self):
"""
Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...)
"""
result = {}
if os.access('/proc/diskstats', os.R_OK):
self.proc_diskstats = True
fp = open('/proc/diskstats')
try:
for line in fp:
try:
columns = line.split()
# On early linux v2.6 versions, partitions have only 4
# output fields not 11. From linux 2.6.25 partitions
# have the full stats set.
if len(columns) < 14:
continue
major = int(columns[0])
minor = int(columns[1])
device = columns[2]
if ((device.startswith('ram') or
device.startswith('loop'))):
continue
result[(major, minor)] = {
'device': device,
'reads': float(columns[3]),
'reads_merged': float(columns[4]),
'reads_sectors': float(columns[5]),
'reads_milliseconds': float(columns[6]),
'writes': float(columns[7]),
'writes_merged': float(columns[8]),
'writes_sectors': float(columns[9]),
'writes_milliseconds': float(columns[10]),
'io_in_progress': float(columns[11]),
'io_milliseconds': float(columns[12]),
'io_milliseconds_weighted': float(columns[13])
}
except ValueError:
continue
finally:
fp.close()
else:
self.proc_diskstats = False
if not psutil:
self.log.error('Unable to import psutil')
return None
disks = psutil.disk_io_counters(True)
sector_size = int(self.config['sector_size'])
for disk in disks:
result[(0, len(result))] = {
'device': disk,
'reads': disks[disk].read_count,
'reads_sectors': disks[disk].read_bytes / sector_size,
'reads_milliseconds': disks[disk].read_time,
'writes': disks[disk].write_count,
'writes_sectors': disks[disk].write_bytes / sector_size,
'writes_milliseconds': disks[disk].write_time,
'io_milliseconds':
disks[disk].read_time + disks[disk].write_time,
'io_milliseconds_weighted':
disks[disk].read_time + disks[disk].write_time
}
return result | Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...) |
def dayname(year, month, day):
'''
Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name
'''
legal_date(year, month, day)
yearday = (month - 1) * 28 + day
if isleap(year + YEAR_EPOCH - 1):
dname = data.day_names_leap[yearday - 1]
else:
dname = data.day_names[yearday - 1]
return MONTHS[month - 1], dname | Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name |
def electric_field_amplitude_intensity(s0,Omega=1.0e6):
'''This function returns the value of E0 (the amplitude of the electric field)
at a given saturation parameter s0=I/I0, where I0=2.50399 mW/cm^2 is the
saturation intensity of the D2 line of Rubidium for linearly polarized light.'''
e0=hbar*Omega/(e*a0) #This is the electric field scale.
I0=2.50399 #mW/cm^2
I0=1.66889451102868 #mW/cm^2
I0=I0/1000*(100**2) #W/m^2
r_ciclic=4.226983616875483 #a0
gamma_D2=2*Pi*6.065e6/Omega # The decay frequency of the D2 line.
E0_sat=gamma_D2/r_ciclic/sqrt(2.0)
E0_sat=E0_sat*e0
I0=E0_sat**2/2/c/mu0
#return sqrt(c*mu0*s0*I0/2)/e0
#return sqrt(c*mu0*s0*I0)/e0
return sqrt(2*c*mu0*s0*I0)/e0 | This function returns the value of E0 (the amplitude of the electric field)
at a given saturation parameter s0=I/I0, where I0=2.50399 mW/cm^2 is the
saturation intensity of the D2 line of Rubidium for linearly polarized light. |
def download(cls, filename, input_dir, dl_dir=None):
"""Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
"""
file_info = cls.parse_remote(filename)
if not dl_dir:
dl_dir = os.path.join(input_dir, file_info.bucket,
os.path.dirname(file_info.key))
utils.safe_makedir(dl_dir)
out_file = os.path.join(dl_dir, os.path.basename(file_info.key))
if not utils.file_exists(out_file):
with file_transaction({}, out_file) as tx_out_file:
command, prog = cls._download_cl(filename)
if prog == "gof3r":
command.extend(["-p", tx_out_file])
elif prog == "awscli":
command.extend([tx_out_file])
else:
raise NotImplementedError(
"Unexpected download program %s" % prog)
subprocess.check_call(command)
return out_file | Provide potentially streaming download from S3 using gof3r
or the AWS CLI. |
def import_module(dotted_path):
"""
Imports the specified module based on the
dot notated import path for the module.
"""
import importlib
module_parts = dotted_path.split('.')
module_path = '.'.join(module_parts[:-1])
module = importlib.import_module(module_path)
return getattr(module, module_parts[-1]) | Imports the specified module based on the
dot notated import path for the module. |
def has_value(key, value=None):
'''
Determine whether the key exists in the current salt process
environment dictionary. Optionally compare the current value
of the environment against the supplied value string.
key
Must be a string. Used as key for environment lookup.
value:
Optional. If key exists in the environment, compare the
current value with this value. Return True if they are equal.
CLI Example:
.. code-block:: bash
salt '*' environ.has_value foo
'''
if not isinstance(key, six.string_types):
log.debug('%s: \'key\' argument is not a string type: \'%s\'', __name__, key)
return False
try:
cur_val = os.environ[key]
if value is not None:
if cur_val == value:
return True
else:
return False
except KeyError:
return False
return True | Determine whether the key exists in the current salt process
environment dictionary. Optionally compare the current value
of the environment against the supplied value string.
key
Must be a string. Used as key for environment lookup.
value:
Optional. If key exists in the environment, compare the
current value with this value. Return True if they are equal.
CLI Example:
.. code-block:: bash
salt '*' environ.has_value foo |
def get_remembered_position(self):
"""Returns the current position in the partially formatted phone
number of the character which was previously passed in as the
parameter of input_digit(remember_position=True)."""
if not self._able_to_format:
return self._original_position
accrued_input_index = 0
current_output_index = 0
while (accrued_input_index < self._position_to_remember and
current_output_index < len(self._current_output)):
if (self._accrued_input_without_formatting[accrued_input_index] ==
self._current_output[current_output_index]):
accrued_input_index += 1
current_output_index += 1
return current_output_index | Returns the current position in the partially formatted phone
number of the character which was previously passed in as the
parameter of input_digit(remember_position=True). |
def getGolangPackages(self):
"""Get a list of all golang packages for all available branches
"""
packages = {}
# get all packages
url = "%s/packages" % self.base_url
params = {"pattern": "golang-*", "limit": 200}
response = requests.get(url, params=params)
if response.status_code != requests.codes.ok:
return {}
data = response.json()
for package in data["packages"]:
packages[package["name"]] = self._processPackageData(package)
# accumulate packages from all pages
for page in range(2, data["page_total"] + 1):
params = {"pattern": "golang-*", "limit": 200, "page": page}
response = requests.get(url, params=params)
if response.status_code != requests.codes.ok:
continue
data = response.json()
for package in data["packages"]:
packages[package["name"]] = self._processPackageData(package)
# get branches of all packages
MAX_LEN = 30
# break the list of packages into lists of at most 50 packages
package_names = packages.keys()
packages_total = len(package_names)
packages_counter = 0
logger.info("%s packages to process" % packages_total)
for i in range(0, packages_total, MAX_LEN):
sublist = package_names[i:i+MAX_LEN]
branches = self._getPackageBranches(sublist)
for package in sublist:
packages[package]["branches"] = branches[package]
packages_counter = packages_counter + len(branches)
logger.info("%s/%s packages processed" % (packages_counter, packages_total))
return packages | Get a list of all golang packages for all available branches |
def catch(do, my_exception=TypeError, hints='', do_raise=None, prt_tb=True):
"""
防止程序出现 exception后异常退出,
但是这里的异常捕获机制仅仅是为了防止程序退出, 无法做相应处理
可以支持有参数或者无参数模式
- ``do == True`` , 则启用捕获异常
- 无参数也启用 try-catch
.. code:: python
@catch
def fnc():
pass
- 在有可能出错的函数前添加, 不要在最外层添加,
- 这个catch 会捕获从该函数开始的所有异常, 会隐藏下一级函数调用的错误.
- 但是如果在内层的函数也有捕获方法, 则都会catch到异常.
:param do:
:type do:
:param my_exception:
:type my_exception:
:param hints:
:type hints:
:return:
:rtype:
"""
if not hasattr(do, '__call__'):
def dec(fn):
@wraps(fn)
def wrapper_(*args, **kwargs):
if not do:
return fn(*args, **kwargs)
try:
return fn(*args, **kwargs)
except my_exception as e:
log.error("{}({})>{}: has err({})".format(
fn.__code__.co_filename.split('/')[-1],
fn.__code__.co_firstlineno,
fn.__name__, e))
if prt_tb:
traceback.print_exc()
if hints:
print(hints)
if do_raise:
raise do_raise(
hints or "{}({}):{}".format(
fn.__code__.co_filename.split('/')[-1],
fn.__code__.co_firstlineno,
fn.__name__)
)
return wrapper_
return dec
@wraps(do)
def wrapper(*args, **kwargs):
try:
return do(*args, **kwargs)
except my_exception as e:
log.error("{}({})>{}: has err({})".format(
do.__code__.co_filename.split('/')[-1],
do.__code__.co_firstlineno,
do.__name__, e
))
if prt_tb:
traceback.print_exc()
if hints:
print(hints)
if do_raise:
raise do_raise(
hints or "{}({}):{}".format(
do.__code__.co_filename.split('/')[-1],
do.__code__.co_firstlineno,
do.__name__)
)
return wrapper | 防止程序出现 exception后异常退出,
但是这里的异常捕获机制仅仅是为了防止程序退出, 无法做相应处理
可以支持有参数或者无参数模式
- ``do == True`` , 则启用捕获异常
- 无参数也启用 try-catch
.. code:: python
@catch
def fnc():
pass
- 在有可能出错的函数前添加, 不要在最外层添加,
- 这个catch 会捕获从该函数开始的所有异常, 会隐藏下一级函数调用的错误.
- 但是如果在内层的函数也有捕获方法, 则都会catch到异常.
:param do:
:type do:
:param my_exception:
:type my_exception:
:param hints:
:type hints:
:return:
:rtype: |
def _ExtractPathSpecsFromDirectory(self, file_entry, depth=0):
"""Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory.
"""
if depth >= self._MAXIMUM_DEPTH:
raise errors.MaximumRecursionDepth('Maximum recursion depth reached.')
# Need to do a breadth-first search otherwise we'll hit the Python
# maximum recursion depth.
sub_directories = []
for sub_file_entry in file_entry.sub_file_entries:
try:
if not sub_file_entry.IsAllocated() or sub_file_entry.IsLink():
continue
except dfvfs_errors.BackEndError as exception:
logger.warning(
'Unable to process file: {0:s} with error: {1!s}'.format(
sub_file_entry.path_spec.comparable.replace(
'\n', ';'), exception))
continue
# For TSK-based file entries only, ignore the virtual /$OrphanFiles
# directory.
if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':
continue
if sub_file_entry.IsDirectory():
sub_directories.append(sub_file_entry)
elif sub_file_entry.IsFile():
# If we are dealing with a VSS we want to calculate a hash
# value based on available timestamps and compare that to previously
# calculated hash values, and only include the file into the queue if
# the hash does not match.
if self._duplicate_file_check:
hash_value = self._CalculateNTFSTimeHash(sub_file_entry)
inode = getattr(sub_file_entry.path_spec, 'inode', 0)
if inode in self._hashlist:
if hash_value in self._hashlist[inode]:
continue
self._hashlist.setdefault(inode, []).append(hash_value)
for path_spec in self._ExtractPathSpecsFromFile(sub_file_entry):
yield path_spec
for sub_file_entry in sub_directories:
try:
for path_spec in self._ExtractPathSpecsFromDirectory(
sub_file_entry, depth=(depth + 1)):
yield path_spec
except (
IOError, dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.warning('{0!s}'.format(exception)) | Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory. |
def _check_lib(self, remake, compiler, debug, profile):
"""Makes sure that the linked library with the original code exists. If it doesn't
the library is compiled from scratch.
"""
from os import path
if self.link is None or not path.isfile(self.link):
self.makelib(remake, True, compiler, debug, profile) | Makes sure that the linked library with the original code exists. If it doesn't
the library is compiled from scratch. |
def winning_name(self):
"""
Returns a ``string`` of the winning team's name, such as 'Houston
Astros'.
"""
if self.winner == HOME:
return self._home_name.text()
return self._away_name.text() | Returns a ``string`` of the winning team's name, such as 'Houston
Astros'. |
def run(self, inputRecord):
"""
Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by :meth:`.getInferenceType`.
"""
# 0-based prediction index for ModelResult
predictionNumber = self._numPredictions
self._numPredictions += 1
result = opf_utils.ModelResult(predictionNumber=predictionNumber,
rawInput=inputRecord)
return result | Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by :meth:`.getInferenceType`. |
def shard_stores(self, index=None, params=None):
"""
Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard index or from earlier engine failure.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
:arg status: A comma-separated list of statuses used to filter on shards
to get store information for, valid choices are: 'green', 'yellow',
'red', 'all'
"""
return self.transport.perform_request(
"GET", _make_path(index, "_shard_stores"), params=params
) | Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard index or from earlier engine failure.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
:arg status: A comma-separated list of statuses used to filter on shards
to get store information for, valid choices are: 'green', 'yellow',
'red', 'all' |
def save_template(self, name, unlocked=False):
"""Save a message template for later use with `Load template`.
If saved template is marked as unlocked, then changes can be made to it
afterwards. By default tempaltes are locked.
Examples:
| Save Template | MyMessage |
| Save Template | MyOtherMessage | unlocked=True |
"""
if isinstance(unlocked, basestring):
unlocked = unlocked.lower() != 'false'
template = self._get_message_template()
if not unlocked:
template.set_as_saved()
self._message_templates[name] = (template, self._field_values) | Save a message template for later use with `Load template`.
If saved template is marked as unlocked, then changes can be made to it
afterwards. By default tempaltes are locked.
Examples:
| Save Template | MyMessage |
| Save Template | MyOtherMessage | unlocked=True | |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
# Probably name should be removed altogether until its usage is decided, see
# https://github.com/LEMS/LEMS/issues/4
# '''(' name = "{0}"'.format(self.name) if self.name else '') +\'''
return '<Unit' +\
(' symbol = "{0}"'.format(self.symbol) if self.symbol else '') +\
(' dimension = "{0}"'.format(self.dimension) if self.dimension else '') +\
(' power = "{0}"'.format(self.power) if self.power else '') +\
(' scale = "{0}"'.format(self.scale) if self.scale else '') +\
(' offset = "{0}"'.format(self.offset) if self.offset else '') +\
(' description = "{0}"'.format(self.description) if self.description else '') +\
'/>' | Exports this object into a LEMS XML object |
def __check_command_completion(self, testsemicolon=True):
"""Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
"""
if not self.__curcommand.iscomplete():
return True
ctype = self.__curcommand.get_type()
if ctype == "action" or \
(ctype == "control" and
not self.__curcommand.accept_children):
if testsemicolon:
self.__set_expected("semicolon")
return True
while self.__curcommand.parent:
cmd = self.__curcommand
self.__curcommand = self.__curcommand.parent
if self.__curcommand.get_type() in ["control", "test"]:
if self.__curcommand.iscomplete():
if self.__curcommand.get_type() == "control":
break
continue
if not self.__curcommand.check_next_arg("test", cmd, add=False):
return False
if not self.__curcommand.iscomplete():
if self.__curcommand.variable_args_nb:
self.__set_expected("comma", "right_parenthesis")
break
return True | Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise. |
def like(self):
""" Like a clip.
"""
r = requests.post(
"https://kippt.com/api/clips/%s/likes" % (self.id),
headers=self.kippt.header
)
return (r.json()) | Like a clip. |
def _run(self, *args, **kwargs):
"""Runs RPC server.
Wait for peer to connect and start rpc session with it.
For every connection we start and new rpc session.
"""
apgw_rpc_bind_ip = _validate_rpc_ip(kwargs.pop(NC_RPC_BIND_IP))
apgw_rpc_bind_port = _validate_rpc_port(kwargs.pop(NC_RPC_BIND_PORT))
sock_addr = (apgw_rpc_bind_ip, apgw_rpc_bind_port)
LOG.debug('NetworkController started listening for connections...')
server_thread, _ = self._listen_tcp(sock_addr,
self._start_rpc_session)
self.pause(0)
server_thread.wait() | Runs RPC server.
Wait for peer to connect and start rpc session with it.
For every connection we start and new rpc session. |
def declare_type_info(config):
"""Lookup type info from app configuration."""
settings = config.registry.settings
settings['_type_info'] = []
for line in settings['exports-allowable-types'].splitlines():
if not line.strip():
continue
type_name, type_info = line.strip().split(':', 1)
type_info = type_info.split(',', 3)
settings['_type_info'].append((type_name, {
'type_name': type_name,
'file_extension': type_info[0],
'mimetype': type_info[1],
'user_friendly_name': type_info[2],
'description': type_info[3],
})) | Lookup type info from app configuration. |
def add2node(self, othereplus, node):
"""add the node here with the node from othereplus
this will potentially have duplicates"""
node = node.upper()
self.dt[node.upper()] = self.dt[node.upper()] + \
othereplus.dt[node.upper()] | add the node here with the node from othereplus
this will potentially have duplicates |
def __WaitForVolume(volume, desired_state):
""" Blocks until EBS volume is in desired state. """
print 'Waiting for volume %s to be %s...' % (volume.id, desired_state)
while True:
volume.update()
sys.stdout.write('.')
sys.stdout.flush()
#print 'status is: %s' % volume.status
if volume.status == desired_state:
break
time.sleep(5)
return | Blocks until EBS volume is in desired state. |
def create_user(self, username, email, password, active=False,
send_email=True):
"""
A simple wrapper that creates a new :class:`User`.
:param username:
String containing the username of the new user.
:param email:
String containing the email address of the new user.
:param password:
String containing the password for the new user.
:param active:
Boolean that defines if the user requires activation by clicking
on a link in an e-mail. Defaults to ``False``.
:param send_email:
Boolean that defines if the user should be sent an email. You could
set this to ``False`` when you want to create a user in your own
code, but don't want the user to activate through email.
:return: :class:`User` instance representing the new user.
"""
new_user = get_user_model().objects.create_user(
username, email, password)
new_user.is_active = active
new_user.save()
# Give permissions to view and change profile
for perm in ASSIGNED_PERMISSIONS['profile']:
assign_perm(perm[0], new_user, get_user_profile(user=new_user))
# Give permissions to view and change itself
for perm in ASSIGNED_PERMISSIONS['user']:
assign_perm(perm[0], new_user, new_user)
userena_profile = self.create_userena_profile(new_user)
if send_email:
userena_profile.send_activation_email()
return new_user | A simple wrapper that creates a new :class:`User`.
:param username:
String containing the username of the new user.
:param email:
String containing the email address of the new user.
:param password:
String containing the password for the new user.
:param active:
Boolean that defines if the user requires activation by clicking
on a link in an e-mail. Defaults to ``False``.
:param send_email:
Boolean that defines if the user should be sent an email. You could
set this to ``False`` when you want to create a user in your own
code, but don't want the user to activate through email.
:return: :class:`User` instance representing the new user. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.