_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q39600 | Arg.undone | train | def undone(self, index):
"""Handles the 'D' command.
:index: Index of the item to mark as not done.
"""
if self.model.exists(index):
self.model.edit(index, done=False) | python | {
"resource": ""
} |
q39601 | Arg.options | train | def options(self, glob=False, **args):
"""Handles the 'o' command.
:glob: Whether to store specified options globally.
:args: Arguments supplied to the 'o' command (excluding '-g').
"""
kwargs = {}
for argname, argarg in args.items():
if argname == "sort":
argarg = self._getPattern(argarg)
if argname not in ["done", "undone"]:
kwargs[argname] = argarg
if "done" in args or "undone" in args:
kwargs["done"] = self._getDone(
args.get("done"), args.get("undone")
)
self.model.setOptions(glob=glob, **kwargs) | python | {
"resource": ""
} |
q39602 | Arg.getKwargs | train | def getKwargs(self, args, values={}, get=Get()):
"""Gets necessary data from user input.
:args: Dictionary of arguments supplied in command line.
:values: Default values dictionary, supplied for editing.
:get: Object used to get values from user input.
:returns: A dictionary containing data gathered from user input.
"""
kwargs = dict()
for field in ['name', 'priority', 'comment', 'parent']:
fvalue = args.get(field) or get.get(field, values.get(field))
if fvalue is not None:
kwargs[field] = fvalue
return kwargs | python | {
"resource": ""
} |
q39603 | CacheEntry.url | train | def url(self):
"""
The cache entry's URL. The URL is constructed from the
values of the scheme, host, and path attributes. Assigning
a value to the URL attribute causes the value to be parsed
and the scheme, host and path attributes updated.
"""
return urlparse.urlunparse((self.scheme, self.host, self.path, None, None, None)) | python | {
"resource": ""
} |
q39604 | CacheEntry.from_T050017 | train | def from_T050017(cls, url, coltype = LIGOTimeGPS):
"""
Parse a URL in the style of T050017-00 into a CacheEntry.
The T050017-00 file name format is, essentially,
observatory-description-start-duration.extension
Example:
>>> c = CacheEntry.from_T050017("file://localhost/data/node144/frames/S5/strain-L2/LLO/L-L1_RDS_C03_L2-8365/L-L1_RDS_C03_L2-836562330-83.gwf")
>>> c.observatory
'L'
>>> c.host
'localhost'
>>> os.path.basename(c.path)
'L-L1_RDS_C03_L2-836562330-83.gwf'
"""
match = cls._url_regex.search(url)
if not match:
raise ValueError("could not convert %s to CacheEntry" % repr(url))
observatory = match.group("obs")
description = match.group("dsc")
start = match.group("strt")
duration = match.group("dur")
if start == "-" and duration == "-":
# no segment information
segment = None
else:
segment = segments.segment(coltype(start), coltype(start) + coltype(duration))
return cls(observatory, description, segment, url) | python | {
"resource": ""
} |
q39605 | Cache.fromfile | train | def fromfile(cls, fileobj, coltype=LIGOTimeGPS):
"""
Return a Cache object whose entries are read from an open file.
"""
c = [cls.entry_class(line, coltype=coltype) for line in fileobj]
return cls(c) | python | {
"resource": ""
} |
q39606 | Cache.fromfilenames | train | def fromfilenames(cls, filenames, coltype=LIGOTimeGPS):
"""
Read Cache objects from the files named and concatenate the results into a
single Cache.
"""
cache = cls()
for filename in filenames:
cache.extend(cls.fromfile(open(filename), coltype=coltype))
return cache | python | {
"resource": ""
} |
q39607 | Cache.unique | train | def unique(self):
"""
Return a Cache which has every element of self, but without
duplication. Preserve order. Does not hash, so a bit slow.
"""
new = self.__class__([])
for elem in self:
if elem not in new:
new.append(elem)
return new | python | {
"resource": ""
} |
q39608 | Cache.tofile | train | def tofile(self, fileobj):
"""
write a cache object to the fileobj as a lal cache file
"""
for entry in self:
print >>fileobj, str(entry)
fileobj.close() | python | {
"resource": ""
} |
q39609 | Cache.topfnfile | train | def topfnfile(self, fileobj):
"""
write a cache object to filename as a plain text pfn file
"""
for entry in self:
print >>fileobj, entry.path
fileobj.close() | python | {
"resource": ""
} |
q39610 | Cache.to_segmentlistdict | train | def to_segmentlistdict(self):
"""
Return a segmentlistdict object describing the instruments
and times spanned by the entries in this Cache. The return
value is coalesced.
"""
d = segments.segmentlistdict()
for entry in self:
d |= entry.segmentlistdict
return d | python | {
"resource": ""
} |
q39611 | get_ilwdchar_class | train | def get_ilwdchar_class(tbl_name, col_name, namespace = globals()):
"""
Searches this module's namespace for a subclass of _ilwd.ilwdchar
whose table_name and column_name attributes match those provided.
If a matching subclass is found it is returned; otherwise a new
class is defined, added to this module's namespace, and returned.
Example:
>>> process_id = get_ilwdchar_class("process", "process_id")
>>> x = process_id(10)
>>> str(type(x))
"<class 'pycbc_glue.ligolw.ilwd.process_process_id_class'>"
>>> str(x)
'process:process_id:10'
Retrieving and storing the class provides a convenient mechanism
for quickly constructing new ID objects.
Example:
>>> for i in range(10):
... print str(process_id(i))
...
process:process_id:0
process:process_id:1
process:process_id:2
process:process_id:3
process:process_id:4
process:process_id:5
process:process_id:6
process:process_id:7
process:process_id:8
process:process_id:9
"""
#
# if the class already exists, retrieve and return it
#
key = (str(tbl_name), str(col_name))
cls_name = "%s_%s_class" % key
assert cls_name != "get_ilwdchar_class"
try:
return namespace[cls_name]
except KeyError:
pass
#
# otherwise define a new class, and add it to the cache
#
class new_class(_ilwd.ilwdchar):
__slots__ = ()
table_name, column_name = key
index_offset = len("%s:%s:" % key)
new_class.__name__ = cls_name
namespace[cls_name] = new_class
#
# pickle support
#
copy_reg.pickle(new_class, lambda x: (ilwdchar, (unicode(x),)))
#
# return the new class
#
return new_class | python | {
"resource": ""
} |
q39612 | doc_includes_process | train | def doc_includes_process(xmldoc, program):
"""
Return True if the process table in xmldoc includes entries for a
program named program.
"""
return program in lsctables.ProcessTable.get_table(xmldoc).getColumnByName(u"program") | python | {
"resource": ""
} |
q39613 | plugitInclude | train | def plugitInclude(parser, token):
"""
Load and render a template, using the same context of a specific action.
Example: {% plugitInclude "/menuBar" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'plugitInclude' tag takes one argument: the tempalte's action to use")
action = parser.compile_filter(bits[1])
return PlugItIncludeNode(action) | python | {
"resource": ""
} |
q39614 | set_temp_store_directory | train | def set_temp_store_directory(connection, temp_store_directory, verbose = False):
"""
Sets the temp_store_directory parameter in sqlite.
"""
if temp_store_directory == "_CONDOR_SCRATCH_DIR":
temp_store_directory = os.getenv("_CONDOR_SCRATCH_DIR")
if verbose:
print >>sys.stderr, "setting the temp_store_directory to %s ..." % temp_store_directory,
cursor = connection.cursor()
cursor.execute("PRAGMA temp_store_directory = '%s'" % temp_store_directory)
cursor.close()
if verbose:
print >>sys.stderr, "done" | python | {
"resource": ""
} |
q39615 | idmap_sync | train | def idmap_sync(connection):
"""
Iterate over the tables in the database, ensure that there exists a
custom DBTable class for each, and synchronize that table's ID
generator to the ID values in the database.
"""
xmldoc = get_xml(connection)
for tbl in xmldoc.getElementsByTagName(DBTable.tagName):
tbl.sync_next_id()
xmldoc.unlink() | python | {
"resource": ""
} |
q39616 | idmap_get_new | train | def idmap_get_new(connection, old, tbl):
"""
From the old ID string, obtain a replacement ID string by either
grabbing it from the _idmap_ table if one has already been assigned
to the old ID, or by using the current value of the Table
instance's next_id class attribute. In the latter case, the new ID
is recorded in the _idmap_ table, and the class attribute
incremented by 1.
This function is for internal use, it forms part of the code used
to re-map row IDs when merging multiple documents.
"""
cursor = connection.cursor()
cursor.execute("SELECT new FROM _idmap_ WHERE old == ?", (old,))
new = cursor.fetchone()
if new is not None:
# a new ID has already been created for this old ID
return ilwd.ilwdchar(new[0])
# this ID was not found in _idmap_ table, assign a new ID and
# record it
new = tbl.get_next_id()
cursor.execute("INSERT INTO _idmap_ VALUES (?, ?)", (old, new))
return new | python | {
"resource": ""
} |
q39617 | get_table_names | train | def get_table_names(connection):
"""
Return a list of the table names in the database.
"""
cursor = connection.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type == 'table'")
return [name for (name,) in cursor] | python | {
"resource": ""
} |
q39618 | build_indexes | train | def build_indexes(connection, verbose = False):
"""
Using the how_to_index annotations in the table class definitions,
construct a set of indexes for the database at the given
connection.
"""
cursor = connection.cursor()
for table_name in get_table_names(connection):
# FIXME: figure out how to do this extensibly
if table_name in TableByName:
how_to_index = TableByName[table_name].how_to_index
elif table_name in lsctables.TableByName:
how_to_index = lsctables.TableByName[table_name].how_to_index
else:
continue
if how_to_index is not None:
if verbose:
print >>sys.stderr, "indexing %s table ..." % table_name
for index_name, cols in how_to_index.iteritems():
cursor.execute("CREATE INDEX IF NOT EXISTS %s ON %s (%s)" % (index_name, table_name, ",".join(cols)))
connection.commit() | python | {
"resource": ""
} |
q39619 | DBTable.row_from_cols | train | def row_from_cols(self, values):
"""
Given an iterable of values in the order of columns in the
database, construct and return a row object. This is a
convenience function for turning the results of database
queries into Python objects.
"""
row = self.RowType()
for c, t, v in zip(self.dbcolumnnames, self.dbcolumntypes, values):
if t in ligolwtypes.IDTypes:
v = ilwd.ilwdchar(v)
setattr(row, c, v)
return row | python | {
"resource": ""
} |
q39620 | DBTable.applyKeyMapping | train | def applyKeyMapping(self):
"""
Used as the second half of the key reassignment algorithm.
Loops over each row in the table, replacing references to
old row keys with the new values from the _idmap_ table.
"""
assignments = ", ".join("%s = (SELECT new FROM _idmap_ WHERE old == %s)" % (colname, colname) for coltype, colname in zip(self.dbcolumntypes, self.dbcolumnnames) if coltype in ligolwtypes.IDTypes and (self.next_id is None or colname != self.next_id.column_name))
if assignments:
# SQLite documentation says ROWID is monotonically
# increasing starting at 1 for the first row unless
# it ever wraps around, then it is randomly
# assigned. ROWID is a 64 bit integer, so the only
# way it will wrap is if somebody sets it to a very
# high number manually. This library does not do
# that, so I don't bother checking.
self.cursor.execute("UPDATE %s SET %s WHERE ROWID > %d" % (self.Name, assignments, self.last_maxrowid))
self.last_maxrowid = self.maxrowid() or 0 | python | {
"resource": ""
} |
q39621 | findCredential | train | def findCredential():
"""
Follow the usual path that GSI libraries would
follow to find a valid proxy credential but
also allow an end entity certificate to be used
along with an unencrypted private key if they
are pointed to by X509_USER_CERT and X509_USER_KEY
since we expect this will be the output from
the eventual ligo-login wrapper around
kinit and then myproxy-login.
"""
# use X509_USER_PROXY from environment if set
if os.environ.has_key('X509_USER_PROXY'):
filePath = os.environ['X509_USER_PROXY']
if validateProxy(filePath):
return filePath, filePath
else:
RFCproxyUsage()
sys.exit(1)
# use X509_USER_CERT and X509_USER_KEY if set
if os.environ.has_key('X509_USER_CERT'):
if os.environ.has_key('X509_USER_KEY'):
certFile = os.environ['X509_USER_CERT']
keyFile = os.environ['X509_USER_KEY']
return certFile, keyFile
# search for proxy file on disk
uid = os.getuid()
path = "/tmp/x509up_u%d" % uid
if os.access(path, os.R_OK):
if validateProxy(path):
return path, path
else:
RFCproxyUsage()
sys.exit(1)
# if we get here could not find a credential
RFCproxyUsage()
sys.exit(1) | python | {
"resource": ""
} |
q39622 | validateProxy | train | def validateProxy(path):
"""
Test that the proxy certificate is RFC 3820
compliant and that it is valid for at least
the next 15 minutes.
"""
# load the proxy from path
try:
proxy = M2Crypto.X509.load_cert(path)
except Exception, e:
msg = "Unable to load proxy from path %s : %s" % (path, e)
print >>sys.stderr, msg
sys.exit(1)
# make sure the proxy is RFC 3820 compliant
# or is an end-entity X.509 certificate
try:
proxy.get_ext("proxyCertInfo")
except LookupError:
# it is not an RFC 3820 proxy so check
# if it is an old globus legacy proxy
subject = proxy.get_subject().as_text()
if re.search(r'.+CN=proxy$', subject):
# it is so print warning and exit
RFCproxyUsage()
sys.exit(1)
# attempt to make sure the proxy is still good for more than 15 minutes
try:
expireASN1 = proxy.get_not_after().__str__()
expireGMT = time.strptime(expireASN1, "%b %d %H:%M:%S %Y %Z")
expireUTC = calendar.timegm(expireGMT)
now = int(time.time())
secondsLeft = expireUTC - now
except Exception, e:
# problem getting or parsing time so just let the client
# continue and pass the issue along to the server
secondsLeft = 3600
if secondsLeft <= 0:
msg = """\
Your proxy certificate is expired.
Please generate a new proxy certificate and
try again.
"""
print >>sys.stderr, msg
sys.exit(1)
if secondsLeft < (60 * 15):
msg = """\
Your proxy certificate expires in less than
15 minutes.
Please generate a new proxy certificate and
try again.
"""
print >>sys.stderr, msg
sys.exit(1)
# return True to indicate validated proxy
return True | python | {
"resource": ""
} |
q39623 | CustomerProfile.save | train | def save(self, *args, **kwargs):
"""If creating new instance, create profile on Authorize.NET also"""
data = kwargs.pop('data', {})
sync = kwargs.pop('sync', True)
if not self.id and sync:
self.push_to_server(data)
super(CustomerProfile, self).save(*args, **kwargs) | python | {
"resource": ""
} |
q39624 | CustomerProfile.delete | train | def delete(self):
"""Delete the customer profile remotely and locally"""
response = delete_profile(self.profile_id)
response.raise_if_error()
super(CustomerProfile, self).delete() | python | {
"resource": ""
} |
q39625 | CustomerProfile.push_to_server | train | def push_to_server(self, data):
"""Create customer profile for given ``customer`` on Authorize.NET"""
output = add_profile(self.customer.pk, data, data)
output['response'].raise_if_error()
self.profile_id = output['profile_id']
self.payment_profile_ids = output['payment_profile_ids'] | python | {
"resource": ""
} |
q39626 | CustomerProfile.sync | train | def sync(self):
"""Overwrite local customer profile data with remote data"""
output = get_profile(self.profile_id)
output['response'].raise_if_error()
for payment_profile in output['payment_profiles']:
instance, created = CustomerPaymentProfile.objects.get_or_create(
customer_profile=self,
payment_profile_id=payment_profile['payment_profile_id']
)
instance.sync(payment_profile) | python | {
"resource": ""
} |
q39627 | CustomerPaymentProfile.save | train | def save(self, *args, **kwargs):
"""Sync payment profile on Authorize.NET if sync kwarg is not False"""
if kwargs.pop('sync', True):
self.push_to_server()
self.card_code = None
self.card_number = "XXXX%s" % self.card_number[-4:]
super(CustomerPaymentProfile, self).save(*args, **kwargs) | python | {
"resource": ""
} |
q39628 | CustomerPaymentProfile.push_to_server | train | def push_to_server(self):
"""
Use appropriate CIM API call to save payment profile to Authorize.NET
1. If customer has no profile yet, create one with this payment profile
2. If payment profile is not on Authorize.NET yet, create it there
3. If payment profile exists on Authorize.NET update it there
"""
if not self.customer_profile_id:
try:
self.customer_profile = CustomerProfile.objects.get(
customer=self.customer)
except CustomerProfile.DoesNotExist:
pass
if self.payment_profile_id:
response = update_payment_profile(
self.customer_profile.profile_id,
self.payment_profile_id,
self.raw_data,
self.raw_data,
)
response.raise_if_error()
elif self.customer_profile_id:
output = create_payment_profile(
self.customer_profile.profile_id,
self.raw_data,
self.raw_data,
)
response = output['response']
response.raise_if_error()
self.payment_profile_id = output['payment_profile_id']
else:
output = add_profile(
self.customer.id,
self.raw_data,
self.raw_data,
)
response = output['response']
response.raise_if_error()
self.customer_profile = CustomerProfile.objects.create(
customer=self.customer,
profile_id=output['profile_id'],
sync=False,
)
self.payment_profile_id = output['payment_profile_ids'][0] | python | {
"resource": ""
} |
q39629 | CustomerPaymentProfile.sync | train | def sync(self, data):
"""Overwrite local customer payment profile data with remote data"""
for k, v in data.get('billing', {}).items():
setattr(self, k, v)
self.card_number = data.get('credit_card', {}).get('card_number',
self.card_number)
self.save(sync=False) | python | {
"resource": ""
} |
q39630 | CustomerPaymentProfile.delete | train | def delete(self):
"""Delete the customer payment profile remotely and locally"""
response = delete_payment_profile(self.customer_profile.profile_id,
self.payment_profile_id)
response.raise_if_error()
return super(CustomerPaymentProfile, self).delete() | python | {
"resource": ""
} |
q39631 | get_changelog_file_for_database | train | def get_changelog_file_for_database(database=DEFAULT_DB_ALIAS):
"""get changelog filename for given `database` DB alias"""
from django.conf import settings
try:
return settings.LIQUIMIGRATE_CHANGELOG_FILES[database]
except AttributeError:
if database == DEFAULT_DB_ALIAS:
try:
return settings.LIQUIMIGRATE_CHANGELOG_FILE
except AttributeError:
raise ImproperlyConfigured(
'Please set LIQUIMIGRATE_CHANGELOG_FILE or '
'LIQUIMIGRATE_CHANGELOG_FILES in your '
'project settings')
else:
raise ImproperlyConfigured(
'LIQUIMIGRATE_CHANGELOG_FILES dictionary setting '
'is required for multiple databases support')
except KeyError:
raise ImproperlyConfigured(
"Liquibase changelog file is not set for database: %s" % database) | python | {
"resource": ""
} |
q39632 | find_target_migration_file | train | def find_target_migration_file(database=DEFAULT_DB_ALIAS, changelog_file=None):
"""Finds best matching target migration file"""
if not database:
database = DEFAULT_DB_ALIAS
if not changelog_file:
changelog_file = get_changelog_file_for_database(database)
try:
doc = minidom.parse(changelog_file)
except ExpatError as ex:
raise InvalidChangelogFile(
'Could not parse XML file %s: %s' % (changelog_file, ex))
try:
dbchglog = doc.getElementsByTagName('databaseChangeLog')[0]
except IndexError:
raise InvalidChangelogFile(
'Missing <databaseChangeLog> node in file %s' % (
changelog_file))
else:
nodes = list(filter(lambda x: x.nodeType is x.ELEMENT_NODE,
dbchglog.childNodes))
if not nodes:
return changelog_file
last_node = nodes[-1]
if last_node.tagName == 'include':
last_file = last_node.attributes.get('file').firstChild.data
return find_target_migration_file(
database=database, changelog_file=last_file)
else:
return changelog_file | python | {
"resource": ""
} |
q39633 | BOSHClient.connection | train | def connection(self):
"""Returns an stablished connection"""
if self._connection:
return self._connection
self.log.debug('Initializing connection to %s' % (self.bosh_service.
netloc))
if self.bosh_service.scheme == 'http':
Connection = httplib.HTTPConnection
elif self.bosh_service.scheme == 'https':
Connection = httplib.HTTPSConnection
else:
# TODO: raise proper exception
raise Exception('Invalid URL scheme %s' % self.bosh_service.scheme)
self._connection = Connection(self.bosh_service.netloc, timeout=10)
self.log.debug('Connection initialized')
# TODO add exceptions handler there (URL not found etc)
return self._connection | python | {
"resource": ""
} |
q39634 | BOSHClient.send_challenge_response | train | def send_challenge_response(self, response_plain):
"""Send a challenge response to server"""
# Get a basic stanza body
body = self.get_body()
# Create a response tag and add the response content on it
# using base64 encoding
response_node = ET.SubElement(body, 'response')
response_node.set('xmlns', XMPP_SASL_NS)
response_node.text = base64.b64encode(response_plain)
# Send the challenge response to server
resp_root = ET.fromstring(self.send_request(body))
return resp_root | python | {
"resource": ""
} |
q39635 | BOSHClient.authenticate_xmpp | train | def authenticate_xmpp(self):
"""Authenticate the user to the XMPP server via the BOSH connection."""
self.request_sid()
self.log.debug('Prepare the XMPP authentication')
# Instantiate a sasl object
sasl = SASLClient(
host=self.to,
service='xmpp',
username=self.jid,
password=self.password
)
# Choose an auth mechanism
sasl.choose_mechanism(self.server_auth, allow_anonymous=False)
# Request challenge
challenge = self.get_challenge(sasl.mechanism)
# Process challenge and generate response
response = sasl.process(base64.b64decode(challenge))
# Send response
resp_root = self.send_challenge_response(response)
success = self.check_authenticate_success(resp_root)
if success is None and\
resp_root.find('{{{0}}}challenge'.format(XMPP_SASL_NS)) is not None:
resp_root = self.send_challenge_response('')
return self.check_authenticate_success(resp_root)
return success | python | {
"resource": ""
} |
q39636 | getParamsByName | train | def getParamsByName(elem, name):
"""
Return a list of params with name name under elem.
"""
name = StripParamName(name)
return elem.getElements(lambda e: (e.tagName == ligolw.Param.tagName) and (e.Name == name)) | python | {
"resource": ""
} |
q39637 | get_param | train | def get_param(xmldoc, name):
"""
Scan xmldoc for a param named name. Raises ValueError if not
exactly 1 such param is found.
"""
params = getParamsByName(xmldoc, name)
if len(params) != 1:
raise ValueError("document must contain exactly one %s param" % StripParamName(name))
return params[0] | python | {
"resource": ""
} |
q39638 | pickle_to_param | train | def pickle_to_param(obj, name):
"""
Return the top-level element of a document sub-tree containing the
pickled serialization of a Python object.
"""
return from_pyvalue(u"pickle:%s" % name, unicode(pickle.dumps(obj))) | python | {
"resource": ""
} |
q39639 | pickle_from_param | train | def pickle_from_param(elem, name):
"""
Retrieve a pickled Python object from the document tree rooted at
elem.
"""
return pickle.loads(str(get_pyvalue(elem, u"pickle:%s" % name))) | python | {
"resource": ""
} |
q39640 | yaml_to_param | train | def yaml_to_param(obj, name):
"""
Return the top-level element of a document sub-tree containing the
YAML serialization of a Python object.
"""
return from_pyvalue(u"yaml:%s" % name, unicode(yaml.dump(obj))) | python | {
"resource": ""
} |
q39641 | use_in | train | def use_in(ContentHandler):
"""
Modify ContentHandler, a sub-class of
pycbc_glue.ligolw.LIGOLWContentHandler, to cause it to use the Param
class defined in this module when parsing XML documents.
Example:
>>> from pycbc_glue.ligolw import ligolw
>>> def MyContentHandler(ligolw.LIGOLWContentHandler):
... pass
...
>>> use_in(MyContentHandler)
"""
def startParam(self, parent, attrs):
return Param(attrs)
ContentHandler.startParam = startParam
return ContentHandler | python | {
"resource": ""
} |
q39642 | RoundRobinConnectionPool.all_connections | train | def all_connections(self):
"""Returns a generator over all current connection objects"""
for i in _xrange(self.num_patterns):
for c in self._available_connections[i]:
yield c
for c in self._in_use_connections[i]:
yield c | python | {
"resource": ""
} |
q39643 | RoundRobinConnectionPool.purge | train | def purge(self, connection):
"""Remove the connection from rotation"""
self._checkpid()
if connection.pid == self.pid:
idx = connection._pattern_idx
if connection in self._in_use_connections[idx]:
self._in_use_connections[idx].remove(connection)
else:
self._available_connections[idx].remove(connection)
connection.disconnect() | python | {
"resource": ""
} |
q39644 | local_path_from_url | train | def local_path_from_url(url):
"""
For URLs that point to locations in the local filesystem, extract
and return the filesystem path of the object to which they point.
As a special case pass-through, if the URL is None, the return
value is None. Raises ValueError if the URL is not None and does
not point to a local file.
Example:
>>> print local_path_from_url(None)
None
>>> local_path_from_url("file:///home/me/somefile.xml.gz")
'/home/me/somefile.xml.gz'
"""
if url is None:
return None
scheme, host, path = urlparse.urlparse(url)[:3]
if scheme.lower() not in ("", "file") or host.lower() not in ("", "localhost"):
raise ValueError("%s is not a local file" % repr(url))
return path | python | {
"resource": ""
} |
q39645 | load_fileobj | train | def load_fileobj(fileobj, gz = None, xmldoc = None, contenthandler = None):
"""
Parse the contents of the file object fileobj, and return the
contents as a LIGO Light Weight document tree. The file object
does not need to be seekable.
If the gz parameter is None (the default) then gzip compressed data
will be automatically detected and decompressed, otherwise
decompression can be forced on or off by setting gz to True or
False respectively.
If the optional xmldoc argument is provided and not None, the
parsed XML tree will be appended to that document, otherwise a new
document will be created. The return value is a tuple, the first
element of the tuple is the XML document and the second is a string
containing the MD5 digest in hex digits of the bytestream that was
parsed.
Example:
>>> from pycbc_glue.ligolw import ligolw
>>> import StringIO
>>> f = StringIO.StringIO('<?xml version="1.0" encoding="utf-8" ?><!DOCTYPE LIGO_LW SYSTEM "http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt"><LIGO_LW><Table Name="demo:table"><Column Name="name" Type="lstring"/><Column Name="value" Type="real8"/><Stream Name="demo:table" Type="Local" Delimiter=",">"mass",0.5,"velocity",34</Stream></Table></LIGO_LW>')
>>> xmldoc, digest = load_fileobj(f, contenthandler = ligolw.LIGOLWContentHandler)
>>> digest
'6bdcc4726b892aad913531684024ed8e'
The contenthandler argument specifies the SAX content handler to
use when parsing the document. The contenthandler is a required
argument. See the pycbc_glue.ligolw package documentation for typical
parsing scenario involving a custom content handler. See
pycbc_glue.ligolw.ligolw.PartialLIGOLWContentHandler and
pycbc_glue.ligolw.ligolw.FilteringLIGOLWContentHandler for examples of
custom content handlers used to load subsets of documents into
memory.
"""
fileobj = MD5File(fileobj)
md5obj = fileobj.md5obj
if gz or gz is None:
fileobj = RewindableInputFile(fileobj)
magic = fileobj.read(2)
fileobj.seek(0, os.SEEK_SET)
if gz or magic == '\037\213':
fileobj = gzip.GzipFile(mode = "rb", fileobj = fileobj)
if xmldoc is None:
xmldoc = ligolw.Document()
ligolw.make_parser(contenthandler(xmldoc)).parse(fileobj)
return xmldoc, md5obj.hexdigest() | python | {
"resource": ""
} |
q39646 | write_filename | train | def write_filename(xmldoc, filename, verbose = False, gz = False, **kwargs):
"""
Writes the LIGO Light Weight document tree rooted at xmldoc to the
file name filename. Friendly verbosity messages are printed while
doing so if verbose is True. The output data is gzip compressed on
the fly if gz is True.
Internally, write_fileobj() is used to perform the write. All
additional keyword arguments are passed to write_fileobj().
Example:
>>> write_filename(xmldoc, "demo.xml") # doctest: +SKIP
>>> write_filename(xmldoc, "demo.xml.gz", gz = True) # doctest: +SKIP
"""
if verbose:
print >>sys.stderr, "writing %s ..." % (("'%s'" % filename) if filename is not None else "stdout")
if filename is not None:
if not gz and filename.endswith(".gz"):
warnings.warn("filename '%s' ends in '.gz' but file is not being gzip-compressed" % filename, UserWarning)
fileobj = open(filename, "w")
else:
fileobj = sys.stdout
hexdigest = write_fileobj(xmldoc, fileobj, gz = gz, **kwargs)
if not fileobj is sys.stdout:
fileobj.close()
if verbose:
print >>sys.stderr, "md5sum: %s %s" % (hexdigest, (filename if filename is not None else "")) | python | {
"resource": ""
} |
q39647 | home | train | def home(request):
"""Show the home page. Send the list of polls"""
polls = []
for row in curDB.execute('SELECT id, title FROM Poll ORDER BY title'):
polls.append({'id': row[0], 'name': row[1]})
return {'polls': polls} | python | {
"resource": ""
} |
q39648 | show | train | def show(request, pollId):
"""Show a poll. We send informations about votes only if the user is an administrator"""
# Get the poll
curDB.execute('SELECT id, title, description FROM Poll WHERE id = ? ORDER BY title', (pollId,))
poll = curDB.fetchone()
if poll is None:
return {}
responses = []
totalVotes = 0
votedFor = 0
# Compute the list of responses
curDB.execute('SELECT id, title FROM Response WHERE pollId = ? ORDER BY title ', (poll[0],))
resps = curDB.fetchall()
for rowRep in resps:
votes = []
nbVotes = 0
# List each votes
for rowVote in curDB.execute('SELECT username FROM Vote WHERE responseId = ?', (rowRep[0],)):
nbVotes += 1
# If the user is and administrator, saves each votes
if request.args.get('ebuio_u_ebuio_admin') == 'True':
votes.append(rowVote[0])
# Save the vote of the current suer
if request.args.get('ebuio_u_username') == rowVote[0]:
votedFor = rowRep[0]
totalVotes += nbVotes
responses.append({'id': rowRep[0], 'title': rowRep[1], 'nbVotes': nbVotes, 'votes': votes})
return {'id': poll[0], 'name': poll[1], 'description': poll[2], 'responses': responses, 'totalVotes': totalVotes, 'votedFor': votedFor} | python | {
"resource": ""
} |
q39649 | vote | train | def vote(request, pollId, responseId):
"""Vote for a poll"""
username = request.args.get('ebuio_u_username')
# Remove old votes from the same user on the same poll
curDB.execute('DELETE FROM Vote WHERE username = ? AND responseId IN (SELECT id FROM Response WHERE pollId = ?) ', (username, pollId))
# Save the vote
curDB.execute('INSERT INTO Vote (username, responseID) VALUES (?, ?) ', (username, responseId))
coxDB.commit()
# To display a success page, comment this line
return PlugItRedirect("show/" + str(pollId))
# Line to display the success page
return {'id': pollId} | python | {
"resource": ""
} |
q39650 | create | train | def create(request):
"""Create a new poll"""
errors = []
success = False
listOfResponses = ['', '', ''] # 3 Blank lines by default
title = ''
description = ''
id = ''
if request.method == 'POST': # User saved the form
# Retrieve parameters
title = request.form.get('title')
description = request.form.get('description')
listOfResponses = []
for rep in request.form.getlist('rep[]'):
if rep != '':
listOfResponses.append(rep)
# Test if everything is ok
if title == "":
errors.append("Please set a title !")
if len(listOfResponses) == 0:
errors.append("Please set at least one response !")
# Can we save the new question ?
if len(errors) == 0:
# Yes. Let save data
curDB.execute("INSERT INTO Poll (title, description) VALUES (?, ?)", (title, description))
# The id of the poll
id = curDB.lastrowid
# Insert responses
for rep in listOfResponses:
curDB.execute("INSERT INTO Response (pollId, title) VALUES (?, ?)", (id, rep))
coxDB.commit()
success = True
# Minimum of 3 lines of questions
while len(listOfResponses) < 3:
listOfResponses.append('')
return {'errors': errors, 'success': success, 'listOfResponses': listOfResponses, 'title': title, 'description': description, 'id': id} | python | {
"resource": ""
} |
q39651 | HasNonLSCTables | train | def HasNonLSCTables(elem):
"""
Return True if the document tree below elem contains non-LSC
tables, otherwise return False.
"""
return any(t.Name not in TableByName for t in elem.getElementsByTagName(ligolw.Table.tagName)) | python | {
"resource": ""
} |
q39652 | ifos_from_instrument_set | train | def ifos_from_instrument_set(instruments):
"""
Convert an iterable of instrument names into a value suitable for
storage in the "ifos" column found in many tables. This function
is mostly for internal use by the .instruments properties of the
corresponding row classes. The input can be None or an iterable of
zero or more instrument names, none of which may be zero-length,
consist exclusively of spaces, or contain "," or "+" characters.
The output is a single string containing the unique instrument
names concatenated using "," as a delimiter. instruments will only
be iterated over once and so can be a generator expression.
Whitespace is allowed in instrument names but might not be
preserved. Repeated names will not be preserved.
NOTE: in the special case that there is 1 instrument name in the
iterable and it has an even number of characters > 2 in it, the
output will have a "," appended in order to force
instrument_set_from_ifos() to parse the string back into a single
instrument name. This is a special case included temporarily to
disambiguate the encoding until all codes have been ported to the
comma-delimited encoding. This behaviour will be discontinued at
that time. DO NOT WRITE CODE THAT RELIES ON THIS! You have been
warned.
Example:
>>> print ifos_from_instrument_set(None)
None
>>> ifos_from_instrument_set(())
u''
>>> ifos_from_instrument_set((u"H1",))
u'H1'
>>> ifos_from_instrument_set((u"H1",u"H1",u"H1"))
u'H1'
>>> ifos_from_instrument_set((u"H1",u"L1"))
u'H1,L1'
>>> ifos_from_instrument_set((u"SWIFT",))
u'SWIFT'
>>> ifos_from_instrument_set((u"H1L1",))
u'H1L1,'
"""
if instruments is None:
return None
_instruments = sorted(set(instrument.strip() for instrument in instruments))
# safety check: refuse to accept blank names, or names with commas
# or pluses in them as they cannot survive the encode/decode
# process
if not all(_instruments) or any(u"," in instrument or u"+" in instrument for instrument in _instruments):
raise ValueError(instruments)
if len(_instruments) == 1 and len(_instruments[0]) > 2 and not len(_instruments[0]) % 2:
# special case disambiguation. FIXME: remove when
# everything uses the comma-delimited encoding
return u"%s," % _instruments[0]
return u",".join(_instruments) | python | {
"resource": ""
} |
q39653 | use_in | train | def use_in(ContentHandler):
"""
Modify ContentHandler, a sub-class of
pycbc_glue.ligolw.LIGOLWContentHandler, to cause it to use the Table
classes defined in this module when parsing XML documents.
Example:
>>> from pycbc_glue.ligolw import ligolw
>>> class MyContentHandler(ligolw.LIGOLWContentHandler):
... pass
...
>>> use_in(MyContentHandler)
<class 'pycbc_glue.ligolw.lsctables.MyContentHandler'>
"""
ContentHandler = table.use_in(ContentHandler)
def startTable(self, parent, attrs, __orig_startTable = ContentHandler.startTable):
name = table.StripTableName(attrs[u"Name"])
if name in TableByName:
return TableByName[name](attrs)
return __orig_startTable(self, parent, attrs)
ContentHandler.startTable = startTable
return ContentHandler | python | {
"resource": ""
} |
q39654 | ProcessTable.get_ids_by_program | train | def get_ids_by_program(self, program):
"""
Return a set containing the process IDs from rows whose
program string equals the given program.
"""
return set(row.process_id for row in self if row.program == program) | python | {
"resource": ""
} |
q39655 | SearchSummaryTable.get_in_segmentlistdict | train | def get_in_segmentlistdict(self, process_ids = None):
"""
Return a segmentlistdict mapping instrument to in segment
list. If process_ids is a sequence of process IDs, then
only rows with matching IDs are included otherwise all rows
are included.
Note: the result is not coalesced, each segmentlist
contains the segments listed for that instrument as they
appeared in the table.
"""
seglists = segments.segmentlistdict()
for row in self:
ifos = row.instruments or (None,)
if process_ids is None or row.process_id in process_ids:
seglists.extend(dict((ifo, segments.segmentlist([row.in_segment])) for ifo in ifos))
return seglists | python | {
"resource": ""
} |
q39656 | SearchSummaryTable.get_out_segmentlistdict | train | def get_out_segmentlistdict(self, process_ids = None):
"""
Return a segmentlistdict mapping instrument to out segment
list. If process_ids is a sequence of process IDs, then
only rows with matching IDs are included otherwise all rows
are included.
Note: the result is not coalesced, each segmentlist
contains the segments listed for that instrument as they
appeared in the table.
"""
seglists = segments.segmentlistdict()
for row in self:
ifos = row.instruments or (None,)
if process_ids is None or row.process_id in process_ids:
seglists.extend(dict((ifo, segments.segmentlist([row.out_segment])) for ifo in ifos))
return seglists | python | {
"resource": ""
} |
q39657 | ExperimentTable.get_expr_id | train | def get_expr_id(self, search_group, search, lars_id, instruments, gps_start_time, gps_end_time, comments = None):
"""
Return the expr_def_id for the row in the table whose
values match the givens.
If a matching row is not found, returns None.
@search_group: string representing the search group (e.g., cbc)
@serach: string representing search (e.g., inspiral)
@lars_id: string representing lars_id
@instruments: the instruments; must be a python set
@gps_start_time: string or int representing the gps_start_time of the experiment
@gps_end_time: string or int representing the gps_end_time of the experiment
"""
# create string from instrument set
instruments = ifos_from_instrument_set(instruments)
# look for the ID
for row in self:
if (row.search_group, row.search, row.lars_id, row.instruments, row.gps_start_time, row.gps_end_time, row.comments) == (search_group, search, lars_id, instruments, gps_start_time, gps_end_time, comments):
# found it
return row.experiment_id
# experiment not found in table
return None | python | {
"resource": ""
} |
q39658 | ExperimentTable.write_new_expr_id | train | def write_new_expr_id(self, search_group, search, lars_id, instruments, gps_start_time, gps_end_time, comments = None):
"""
Creates a new def_id for the given arguments and returns it.
If an entry already exists with these, will just return that id.
@search_group: string representing the search group (e.g., cbc)
@serach: string representing search (e.g., inspiral)
@lars_id: string representing lars_id
@instruments: the instruments; must be a python set
@gps_start_time: string or int representing the gps_start_time of the experiment
@gps_end_time: string or int representing the gps_end_time of the experiment
"""
# check if id already exists
check_id = self.get_expr_id( search_group, search, lars_id, instruments, gps_start_time, gps_end_time, comments = comments )
if check_id:
return check_id
# experiment not found in table
row = self.RowType()
row.experiment_id = self.get_next_id()
row.search_group = search_group
row.search = search
row.lars_id = lars_id
row.instruments = ifos_from_instrument_set(instruments)
row.gps_start_time = gps_start_time
row.gps_end_time = gps_end_time
row.comments = comments
self.append(row)
# return new ID
return row.experiment_id | python | {
"resource": ""
} |
q39659 | ExperimentTable.get_row_from_id | train | def get_row_from_id(self, experiment_id):
"""
Returns row in matching the given experiment_id.
"""
row = [row for row in self if row.experiment_id == experiment_id]
if len(row) > 1:
raise ValueError("duplicate ids in experiment table")
if len(row) == 0:
raise ValueError("id '%s' not found in table" % experiment_id)
return row[0] | python | {
"resource": ""
} |
q39660 | ExperimentSummaryTable.get_expr_summ_id | train | def get_expr_summ_id(self, experiment_id, time_slide_id, veto_def_name, datatype, sim_proc_id = None):
"""
Return the expr_summ_id for the row in the table whose experiment_id,
time_slide_id, veto_def_name, and datatype match the given. If sim_proc_id,
will retrieve the injection run matching that sim_proc_id.
If a matching row is not found, returns None.
"""
# look for the ID
for row in self:
if (row.experiment_id, row.time_slide_id, row.veto_def_name, row.datatype, row.sim_proc_id) == (experiment_id, time_slide_id, veto_def_name, datatype, sim_proc_id):
# found it
return row.experiment_summ_id
# if get to here, experiment not found in table
return None | python | {
"resource": ""
} |
q39661 | ExperimentSummaryTable.write_experiment_summ | train | def write_experiment_summ(self, experiment_id, time_slide_id, veto_def_name, datatype, sim_proc_id = None ):
"""
Writes a single entry to the experiment_summ table. This can be used
for either injections or non-injection experiments. However, it is
recommended that this only be used for injection experiments; for
non-injection experiments write_experiment_summ_set should be used to
ensure that an entry gets written for every time-slide performed.
"""
# check if entry alredy exists; if so, return value
check_id = self.get_expr_summ_id(experiment_id, time_slide_id, veto_def_name, datatype, sim_proc_id = sim_proc_id)
if check_id:
return check_id
row = self.RowType()
row.experiment_summ_id = self.get_next_id()
row.experiment_id = experiment_id
row.time_slide_id = time_slide_id
row.veto_def_name = veto_def_name
row.datatype = datatype
row.sim_proc_id = sim_proc_id
row.nevents = None
row.duration = None
self.append(row)
return row.experiment_summ_id | python | {
"resource": ""
} |
q39662 | ExperimentSummaryTable.add_nevents | train | def add_nevents(self, experiment_summ_id, num_events, add_to_current = True):
"""
Add num_events to the nevents column in a specific entry in the table. If
add_to_current is set to False, will overwrite the current nevents entry in
the row with num_events. Otherwise, default is to add num_events to
the current value.
Note: Can subtract events by passing a negative number to num_events.
"""
for row in self:
if row.experiment_summ_id != experiment_summ_id:
continue
if row.nevents is None:
row.nevents = 0
if add_to_current:
row.nevents += num_events
return row.nevents
else:
row.nevents = num_events
return row.nevents
# if get to here, couldn't find experiment_summ_id in the table
raise ValueError("'%s' could not be found in the table" % (str(experiment_summ_id))) | python | {
"resource": ""
} |
q39663 | ExperimentMapTable.get_experiment_summ_ids | train | def get_experiment_summ_ids( self, coinc_event_id ):
"""
Gets all the experiment_summ_ids that map to a given coinc_event_id.
"""
experiment_summ_ids = []
for row in self:
if row.coinc_event_id == coinc_event_id:
experiment_summ_ids.append(row.experiment_summ_id)
if len(experiment_summ_ids) == 0:
raise ValueError("'%s' could not be found in the experiment_map table" % coinc_event_id)
return experiment_summ_ids | python | {
"resource": ""
} |
q39664 | SnglInspiralTable.ifocut | train | def ifocut(self, ifo, inplace=False):
"""
Return a SnglInspiralTable with rows from self having IFO equal
to the given ifo. If inplace, modify self directly, else create
a new table and fill it.
"""
if inplace:
iterutils.inplace_filter(lambda row: row.ifo == ifo, self)
return self
else:
ifoTrigs = self.copy()
ifoTrigs.extend([row for row in self if row.ifo == ifo])
return ifoTrigs | python | {
"resource": ""
} |
q39665 | SnglInspiralTable.getslide | train | def getslide(self,slide_num):
"""
Return the triggers with a specific slide number.
@param slide_num: the slide number to recover (contained in the event_id)
"""
slideTrigs = self.copy()
slideTrigs.extend(row for row in self if row.get_slide_number() == slide_num)
return slideTrigs | python | {
"resource": ""
} |
q39666 | SnglInspiral.get_id_parts | train | def get_id_parts(self):
"""
Return the three pieces of the int_8s-style sngl_inspiral
event_id.
"""
int_event_id = int(self.event_id)
a = int_event_id // 1000000000
slidenum = (int_event_id % 1000000000) // 100000
b = int_event_id % 100000
return int(a), int(slidenum), int(b) | python | {
"resource": ""
} |
q39667 | SnglInspiral.get_slide_number | train | def get_slide_number(self):
"""
Return the slide-number for this trigger
"""
a, slide_number, b = self.get_id_parts()
if slide_number > 5000:
slide_number = 5000 - slide_number
return slide_number | python | {
"resource": ""
} |
q39668 | MultiInspiralTable.get_null_snr | train | def get_null_snr(self):
"""
Get the coherent Null SNR for each row in the table.
"""
null_snr_sq = self.get_coinc_snr()**2 - self.get_column('snr')**2
null_snr_sq[null_snr_sq < 0] = 0.
return null_snr_sq**(1./2.) | python | {
"resource": ""
} |
q39669 | MultiInspiralTable.get_sigmasqs | train | def get_sigmasqs(self, instruments=None):
"""
Return dictionary of single-detector sigmas for each row in the
table.
"""
if len(self):
if not instruments:
instruments = map(str, \
instrument_set_from_ifos(self[0].ifos))
return dict((ifo, self.get_sigmasq(ifo))\
for ifo in instruments)
else:
return dict() | python | {
"resource": ""
} |
q39670 | MultiInspiralTable.get_sngl_snrs | train | def get_sngl_snrs(self, instruments=None):
"""
Get the single-detector SNRs for each row in the table.
"""
if len(self) and instruments is None:
instruments = map(str, \
instrument_set_from_ifos(self[0].ifos))
elif instruments is None:
instruments = []
return dict((ifo, self.get_sngl_snr(ifo))\
for ifo in instruments) | python | {
"resource": ""
} |
q39671 | MultiInspiralTable.get_bestnr | train | def get_bestnr(self, index=4.0, nhigh=3.0, null_snr_threshold=4.25,\
null_grad_thresh=20., null_grad_val = 1./5.):
"""
Get the BestNR statistic for each row in the table
"""
return [row.get_bestnr(index=index, nhigh=nhigh,
null_snr_threshold=null_snr_threshold,
null_grad_thresh=null_grad_thresh,
null_grad_val=null_grad_val)
for row in self] | python | {
"resource": ""
} |
q39672 | MultiInspiral.get_null_snr | train | def get_null_snr(self):
"""
Get the coherent Null SNR for this row.
"""
null_snr_sq = (numpy.asarray(self.get_sngl_snrs().values())**2)\
.sum() - self.snr**2
if null_snr_sq < 0:
return 0
else:
return null_snr_sq**(1./2.) | python | {
"resource": ""
} |
q39673 | MultiInspiral.get_sngl_snrs | train | def get_sngl_snrs(self):
"""
Return a dictionary of single-detector SNRs for this row.
"""
return dict((ifo, self.get_sngl_snr(ifo)) for ifo in\
instrument_set_from_ifos(self.ifos)) | python | {
"resource": ""
} |
q39674 | MultiInspiral.get_bestnr | train | def get_bestnr(self, index=4.0, nhigh=3.0, null_snr_threshold=4.25,\
null_grad_thresh=20., null_grad_val = 1./5.):
"""
Return the BestNR statistic for this row.
"""
# weight SNR by chisq
bestnr = self.get_new_snr(index=index, nhigh=nhigh,
column="chisq")
if len(self.get_ifos()) < 3:
return bestnr
# recontour null SNR threshold for higher SNRs
if self.snr > null_grad_thresh:
null_snr_threshold += (self.snr - null_grad_thresh) * null_grad_val
# weight SNR by null SNR
if self.get_null_snr() > null_snr_threshold:
bestnr /= 1 + self.get_null_snr() - null_snr_threshold
return bestnr | python | {
"resource": ""
} |
q39675 | SegmentSumTable.get | train | def get(self, segment_def_id = None):
"""
Return a segmentlist object describing the times spanned by
the segments carrying the given segment_def_id. If
segment_def_id is None then all segments are returned.
Note: the result is not coalesced, the segmentlist
contains the segments as they appear in the table.
"""
if segment_def_id is None:
return segments.segmentlist(row.segment for row in self)
return segments.segmentlist(row.segment for row in self if row.segment_def_id == segment_def_id) | python | {
"resource": ""
} |
q39676 | CoincDefTable.get_coinc_def_id | train | def get_coinc_def_id(self, search, search_coinc_type, create_new = True, description = None):
"""
Return the coinc_def_id for the row in the table whose
search string and search_coinc_type integer have the values
given. If a matching row is not found, the default
behaviour is to create a new row and return the ID assigned
to the new row. If, instead, create_new is False then
KeyError is raised when a matching row is not found. The
optional description parameter can be used to set the
description string assigned to the new row if one is
created, otherwise the new row is left with no description.
"""
# look for the ID
rows = [row for row in self if (row.search, row.search_coinc_type) == (search, search_coinc_type)]
if len(rows) > 1:
raise ValueError("(search, search coincidence type) = ('%s', %d) is not unique" % (search, search_coinc_type))
if len(rows) > 0:
return rows[0].coinc_def_id
# coinc type not found in table
if not create_new:
raise KeyError((search, search_coinc_type))
row = self.RowType()
row.coinc_def_id = self.get_next_id()
row.search = search
row.search_coinc_type = search_coinc_type
row.description = description
self.append(row)
# return new ID
return row.coinc_def_id | python | {
"resource": ""
} |
q39677 | DQSpec.apply_to_segmentlist | train | def apply_to_segmentlist(self, seglist):
"""
Apply our low and high windows to the segments in a
segmentlist.
"""
for i, seg in enumerate(seglist):
seglist[i] = seg.__class__(seg[0] - self.low_window, seg[1] + self.high_window) | python | {
"resource": ""
} |
q39678 | synchronizeLayout | train | def synchronizeLayout(primary, secondary, surface_size):
"""Synchronizes given layouts by normalizing height by using
max height of given layouts to avoid transistion dirty effects.
:param primary: Primary layout used.
:param secondary: Secondary layout used.
:param surface_size: Target surface size on which layout will be displayed.
"""
primary.configure_bound(surface_size)
secondary.configure_bound(surface_size)
# Check for key size.
if (primary.key_size < secondary.key_size):
logging.warning('Normalizing key size from secondary to primary')
secondary.key_size = primary.key_size
elif (primary.key_size > secondary.key_size):
logging.warning('Normalizing key size from primary to secondary')
primary.key_size = secondary.key_size
if (primary.size[1] > secondary.size[1]):
logging.warning('Normalizing layout size from secondary to primary')
secondary.set_size(primary.size, surface_size)
elif (primary.size[1] < secondary.size[1]):
logging.warning('Normalizing layout size from primary to secondary')
primary.set_size(secondary.size, surface_size) | python | {
"resource": ""
} |
q39679 | VKeyboardRenderer.draw_uppercase_key | train | def draw_uppercase_key(self, surface, key):
"""Default drawing method for uppercase key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
key.value = u'\u21e7'
if key.is_activated():
key.value = u'\u21ea'
self.draw_character_key(surface, key, True) | python | {
"resource": ""
} |
q39680 | VKeyboardRenderer.draw_special_char_key | train | def draw_special_char_key(self, surface, key):
"""Default drawing method for special char key. Drawn as character key.
:param surface: Surface background should be drawn in.
:param key: Target key to be drawn.
"""
key.value = u'#'
if key.is_activated():
key.value = u'Ab'
self.draw_character_key(surface, key, True) | python | {
"resource": ""
} |
q39681 | VKeyRow.add_key | train | def add_key(self, key, first=False):
"""Adds the given key to this row.
:param key: Key to be added to this row.
:param first: BOolean flag that indicates if key is added at the beginning or at the end.
"""
if first:
self.keys = [key] + self.keys
else:
self.keys.append(key)
if isinstance(key, VSpaceKey):
self.space = key | python | {
"resource": ""
} |
q39682 | VKeyRow.set_size | train | def set_size(self, position, size, padding):
"""Row size setter.
The size correspond to the row height, since the row width is constraint
to the surface width the associated keyboard belongs. Once size is settled,
the size for each child keys is associated.
:param position: Position of this row.
:param size: Size of the row (height)
:param padding: Padding between key.
"""
self.height = size
self.position = position
x = position[0]
for key in self.keys:
key.set_size(size)
key.position = (x, position[1])
x += padding + key.size[0] | python | {
"resource": ""
} |
q39683 | VKeyboardLayout.configure_specials_key | train | def configure_specials_key(self, keyboard):
"""Configures specials key if needed.
:param keyboard: Keyboard instance this layout belong.
"""
special_row = VKeyRow()
max_length = self.max_length
i = len(self.rows) - 1
current_row = self.rows[i]
special_keys = [VBackKey()]
if self.allow_uppercase: special_keys.append(VUppercaseKey(keyboard))
if self.allow_special_chars: special_keys.append(VSpecialCharKey(keyboard))
while len(special_keys) > 0:
first = False
while len(special_keys) > 0 and len(current_row) < max_length:
current_row.add_key(special_keys.pop(0), first=first)
first = not first
if i > 0:
i -= 1
current_row = self.rows[i]
else:
break
if self.allow_space:
space_length = len(current_row) - len(special_keys)
special_row.add_key(VSpaceKey(space_length))
first = True
# Adding left to the special bar.
while len(special_keys) > 0:
special_row.add_key(special_keys.pop(0), first=first)
first = not first
if len(special_row) > 0:
self.rows.append(special_row) | python | {
"resource": ""
} |
q39684 | VKeyboardLayout.configure_bound | train | def configure_bound(self, surface_size):
"""Compute keyboard bound regarding of this layout.
If key_size is None, then it will compute it regarding of the given surface_size.
:param surface_size: Size of the surface this layout will be rendered on.
:raise ValueError: If the layout model is empty.
"""
r = len(self.rows)
max_length = self.max_length
if self.key_size is None:
self.key_size = (surface_size[0] - (self.padding * (max_length + 1))) / max_length
height = self.key_size * r + self.padding * (r + 1)
if height >= surface_size[1] / 2:
logger.warning('Computed keyboard height outbound target surface, reducing key_size to match')
self.key_size = ((surface_size[1] / 2) - (self.padding * (r + 1))) / r
height = self.key_size * r + self.padding * (r + 1)
logger.warning('Normalized key_size to %spx' % self.key_size)
self.set_size((surface_size[0], height), surface_size) | python | {
"resource": ""
} |
q39685 | VKeyboardLayout.set_size | train | def set_size(self, size, surface_size):
"""Sets the size of this layout, and updates
position, and rows accordingly.
:param size: Size of this layout.
:param surface_size: Target surface size on which layout will be displayed.
"""
self.size = size
self.position = (0, surface_size[1] - self.size[1])
y = self.position[1] + self.padding
max_length = self.max_length
for row in self.rows:
r = len(row)
width = (r * self.key_size) + ((r + 1) * self.padding)
x = (surface_size[0] - width) / 2
if row.space is not None:
x -= ((row.space.length - 1) * self.key_size) / 2
row.set_size((x, y), self.key_size, self.padding)
y += self.padding + self.key_size | python | {
"resource": ""
} |
q39686 | VKeyboardLayout.invalidate | train | def invalidate(self):
""" Rests all keys states. """
for row in self.rows:
for key in row.keys:
key.state = 0 | python | {
"resource": ""
} |
q39687 | VKeyboardLayout.set_uppercase | train | def set_uppercase(self, uppercase):
"""Sets layout uppercase state.
:param uppercase: True if uppercase, False otherwise.
"""
for row in self.rows:
for key in row.keys:
if type(key) == VKey:
if uppercase:
key.value = key.value.upper()
else:
key.value = key.value.lower() | python | {
"resource": ""
} |
q39688 | VKeyboard.draw | train | def draw(self):
""" Draw the virtual keyboard into the delegate surface object if enabled. """
if self.state > 0:
self.renderer.draw_background(self.surface, self.layout.position, self.layout.size)
for row in self.layout.rows:
for key in row.keys:
self.renderer.draw_key(self.surface, key) | python | {
"resource": ""
} |
q39689 | VKeyboard.on_uppercase | train | def on_uppercase(self):
""" Uppercase key press handler. """
self.uppercase = not self.uppercase
self.original_layout.set_uppercase(self.uppercase)
self.special_char_layout.set_uppercase(self.uppercase)
self.invalidate() | python | {
"resource": ""
} |
q39690 | VKeyboard.on_special_char | train | def on_special_char(self):
""" Special char key press handler. """
self.special_char = not self.special_char
if self.special_char:
self.set_layout(self.special_char_layout)
else:
self.set_layout(self.original_layout)
self.invalidate() | python | {
"resource": ""
} |
q39691 | VKeyboard.on_event | train | def on_event(self, event):
"""Pygame event processing callback method.
:param event: Event to process.
"""
if self.state > 0:
if event.type == MOUSEBUTTONDOWN:
key = self.layout.get_key_at(pygame.mouse.get_pos())
if key is not None:
self.on_key_down(key)
elif event.type == MOUSEBUTTONUP:
self.on_key_up()
elif event.type == KEYDOWN:
value = pygame.key.name(event.key)
# TODO : Find from layout (consider checking layout key space ?)
elif event.type == KEYUP:
value = pygame.key.name(event.key) | python | {
"resource": ""
} |
q39692 | VKeyboard.set_key_state | train | def set_key_state(self, key, state):
"""Sets the key state and redraws it.
:param key: Key to update state for.
:param state: New key state.
"""
key.state = state
self.renderer.draw_key(self.surface, key) | python | {
"resource": ""
} |
q39693 | VKeyboard.on_key_up | train | def on_key_up(self):
""" Process key up event by updating buffer and release key. """
if (self.last_pressed is not None):
self.set_key_state(self.last_pressed, 0)
self.buffer = self.last_pressed.update_buffer(self.buffer)
self.text_consumer(self.buffer)
self.last_pressed = None | python | {
"resource": ""
} |
q39694 | Cell.same_player | train | def same_player(self, other):
"""
Compares name and color.
Returns True if both are owned by the same player.
"""
return self.name == other.name \
and self.color == other.color | python | {
"resource": ""
} |
q39695 | World.reset | train | def reset(self):
"""
Clears the `cells` and leaderboards, and sets all corners to `0,0`.
"""
self.cells.clear()
self.leaderboard_names.clear()
self.leaderboard_groups.clear()
self.top_left.set(0, 0)
self.bottom_right.set(0, 0) | python | {
"resource": ""
} |
q39696 | Player.cells_changed | train | def cells_changed(self):
"""
Calculates `total_size`, `total_mass`, `scale`, and `center`.
Has to be called when the controlled cells (`own_ids`) change.
"""
self.total_size = sum(cell.size for cell in self.own_cells)
self.total_mass = sum(cell.mass for cell in self.own_cells)
self.scale = pow(min(1.0, 64.0 / self.total_size), 0.4) \
if self.total_size > 0 else 1.0
if self.own_ids:
left = min(cell.pos.x for cell in self.own_cells)
right = max(cell.pos.x for cell in self.own_cells)
top = min(cell.pos.y for cell in self.own_cells)
bottom = max(cell.pos.y for cell in self.own_cells)
self.center = Vec(left + right, top + bottom) / 2 | python | {
"resource": ""
} |
q39697 | has_segment_tables | train | def has_segment_tables(xmldoc, name = None):
"""
Return True if the document contains a complete set of segment
tables. Returns False otherwise. If name is given and not None
then the return value is True only if the document's segment
tables, if present, contain a segment list by that name.
"""
try:
names = lsctables.SegmentDefTable.get_table(xmldoc).getColumnByName("name")
lsctables.SegmentTable.get_table(xmldoc)
lsctables.SegmentSumTable.get_table(xmldoc)
except (ValueError, KeyError):
return False
return name is None or name in names | python | {
"resource": ""
} |
q39698 | segmenttable_get_by_name | train | def segmenttable_get_by_name(xmldoc, name):
"""
Retrieve the segmentlists whose name equals name. The result is a
segmentlistdict indexed by instrument.
The output of this function is not coalesced, each segmentlist
contains the segments as found in the segment table.
NOTE: this is a light-weight version of the .get_by_name() method
of the LigolwSegments class intended for use when the full
machinery of that class is not required. Considerably less
document validation and error checking is performed by this
version. Consider using that method instead if your application
will be interfacing with the document via that class anyway.
"""
#
# find required tables
#
def_table = lsctables.SegmentDefTable.get_table(xmldoc)
seg_table = lsctables.SegmentTable.get_table(xmldoc)
#
# segment_def_id --> instrument names mapping but only for
# segment_definer entries bearing the requested name
#
instrument_index = dict((row.segment_def_id, row.instruments) for row in def_table if row.name == name)
#
# populate result segmentlistdict object from segment_def_map table
# and index
#
instruments = set(instrument for instruments in instrument_index.values() for instrument in instruments)
result = segments.segmentlistdict((instrument, segments.segmentlist()) for instrument in instruments)
for row in seg_table:
if row.segment_def_id in instrument_index:
seg = row.segment
for instrument in instrument_index[row.segment_def_id]:
result[instrument].append(seg)
#
# done
#
return result | python | {
"resource": ""
} |
q39699 | LigolwSegments.insert_from_segwizard | train | def insert_from_segwizard(self, fileobj, instruments, name, version = None, comment = None):
"""
Parse the contents of the file object fileobj as a
segwizard-format segment list, and insert the result as a
new list of "active" segments into this LigolwSegments
object. A new entry will be created in the segment_definer
table for the segment list, and instruments, name and
comment are used to populate the entry's metadata. Note
that the "valid" segments are left empty, nominally
indicating that there are no periods of validity.
"""
self.add(LigolwSegmentList(active = segmentsUtils.fromsegwizard(fileobj, coltype = LIGOTimeGPS), instruments = instruments, name = name, version = version, comment = comment)) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.