code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
min_priority = transcript_effect_priority_dict[min_priority_class]
return self.filter(
lambda effect: effect_priority(effect) >= min_priority)
|
def filter_by_effect_priority(self, min_priority_class)
|
Create a new EffectCollection containing only effects whose priority
falls below the given class.
| 5.625123
| 5.347862
| 1.051845
|
lines = []
# TODO: annoying to always write `groupby_result.items()`,
# consider makings a GroupBy class which iterates over pairs
# and also common helper methods like `map_values`.
for variant, variant_effects in self.groupby_variant().items():
lines.append("\n%s" % variant)
gene_effects_groups = variant_effects.groupby_gene_id()
for (gene_id, gene_effects) in gene_effects_groups.items():
if gene_id:
gene_name = variant.ensembl.gene_name_of_gene_id(gene_id)
lines.append(" Gene: %s (%s)" % (gene_name, gene_id))
# place transcript effects with more significant impact
# on top (e.g. FrameShift should go before NoncodingTranscript)
for effect in sorted(
gene_effects,
key=effect_priority,
reverse=True):
lines.append(" -- %s" % effect)
# if we only printed one effect for this gene then
# it's redundant to print it again as the highest priority effect
if len(variant_effects) > 1:
best = variant_effects.top_priority_effect()
lines.append(" Highest Priority Effect: %s" % best)
return "\n".join(lines)
|
def detailed_string(self)
|
Create a long string with all transcript effects for each mutation,
grouped by gene (if a mutation affects multiple genes).
| 6.101496
| 5.645601
| 1.080752
|
return OrderedDict(
(variant, top_priority_effect(variant_effects))
for (variant, variant_effects)
in self.groupby_variant().items())
|
def top_priority_effect_per_variant(self)
|
Highest priority effect for each unique variant
| 5.868579
| 5.342803
| 1.098408
|
return OrderedDict(
(transcript_id, top_priority_effect(variant_effects))
for (transcript_id, variant_effects)
in self.groupby_transcript_id().items())
|
def top_priority_effect_per_transcript_id(self)
|
Highest priority effect for each unique transcript ID
| 4.42996
| 4.250929
| 1.042116
|
return OrderedDict(
(gene_id, top_priority_effect(variant_effects))
for (gene_id, variant_effects)
in self.groupby_gene_id().items())
|
def top_priority_effect_per_gene_id(self)
|
Highest priority effect for each unique gene ID
| 4.763668
| 4.575391
| 1.04115
|
return OrderedDict(
(effect, expression_levels.get(effect.transcript.id, 0.0))
for effect in self
if effect.transcript is not None)
|
def effect_expression(self, expression_levels)
|
Parameters
----------
expression_levels : dict
Dictionary mapping transcript IDs to length-normalized expression
levels (either FPKM or TPM)
Returns dictionary mapping each transcript effect to an expression
quantity. Effects that don't have an associated transcript
(e.g. Intergenic) will not be included.
| 6.22694
| 4.880075
| 1.275993
|
effect_expression_dict = self.effect_expression(expression_levels)
if len(effect_expression_dict) == 0:
return None
def key_fn(effect_fpkm_pair):
(effect, fpkm) = effect_fpkm_pair
return (fpkm, multi_gene_effect_sort_key(effect))
return max(effect_expression_dict.items(), key=key_fn)[0]
|
def top_expression_effect(self, expression_levels)
|
Return effect whose transcript has the highest expression level.
If none of the effects are expressed or have associated transcripts,
then return None. In case of ties, add lexicographical sorting by
effect priority and transcript length.
| 4.070003
| 3.393116
| 1.199488
|
# list of properties to extract from Variant objects if they're
# not None
variant_properties = [
"contig",
"start",
"ref",
"alt",
"is_snv",
"is_transversion",
"is_transition"
]
def row_from_effect(effect):
row = OrderedDict()
row['variant'] = str(effect.variant.short_description)
for field_name in variant_properties:
# if effect.variant is None then this column value will be None
row[field_name] = getattr(effect.variant, field_name, None)
row['gene_id'] = effect.gene_id
row['gene_name'] = effect.gene_name
row['transcript_id'] = effect.transcript_id
row['transcript_name'] = effect.transcript_name
row['effect_type'] = effect.__class__.__name__
row['effect'] = effect.short_description
return row
return pd.DataFrame.from_records([row_from_effect(effect) for effect in self])
|
def to_dataframe(self)
|
Build a dataframe from the effect collection
| 3.137084
| 2.991401
| 1.048701
|
rng = random.Random(random_seed)
ensembl = genome_for_reference_name(genome_name)
if ensembl in _transcript_ids_cache:
transcript_ids = _transcript_ids_cache[ensembl]
else:
transcript_ids = ensembl.transcript_ids()
_transcript_ids_cache[ensembl] = transcript_ids
variants = []
# we should finish way before this loop is over but just in case
# something is wrong with PyEnsembl we want to avoid an infinite loop
for _ in range(count * 100):
if len(variants) < count:
transcript_id = rng.choice(transcript_ids)
transcript = ensembl.transcript_by_id(transcript_id)
if not transcript.complete:
continue
exon = rng.choice(transcript.exons)
base1_genomic_position = rng.randint(exon.start, exon.end)
transcript_offset = transcript.spliced_offset(base1_genomic_position)
seq = transcript.sequence
ref = str(seq[transcript_offset])
if transcript.on_backward_strand:
ref = reverse_complement(ref)
alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref]
if insertions:
nucleotide_pairs = [
x + y
for x in STANDARD_NUCLEOTIDES
for y in STANDARD_NUCLEOTIDES
]
alt_nucleotides.extend(nucleotide_pairs)
if deletions:
alt_nucleotides.append("")
alt = rng.choice(alt_nucleotides)
variant = Variant(
transcript.contig,
base1_genomic_position,
ref=ref,
alt=alt,
ensembl=ensembl)
variants.append(variant)
else:
return VariantCollection(variants)
raise ValueError(
("Unable to generate %d random variants, "
"there may be a problem with PyEnsembl") % count)
|
def random_variants(
count,
genome_name="GRCh38",
deletions=True,
insertions=True,
random_seed=None)
|
Generate a VariantCollection with random variants that overlap
at least one complete coding transcript.
| 2.996281
| 2.945374
| 1.017284
|
if anonymous == True:
logger.debug('Getting server info via Anonymous BIND on server %s' % self.target_server.get_host())
server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL)
conn = Connection(server, auto_bind=True)
logger.debug('Got server info')
else:
logger.debug('Getting server info via credentials supplied on server %s' % self.target_server.get_host())
server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL)
if self.use_sspi == True:
conn = self.monkeypatch()
else:
conn = Connection(self._srv, user=self.login_credential.get_msuser(), password=self.login_credential.get_password(), authentication=self.login_credential.get_authmethod())
logger.debug('Performing BIND to server %s' % self.target_server.get_host())
if not self._con.bind():
if 'description' in self._con.result:
raise Exception('Failed to bind to server! Reason: %s' % conn.result['description'])
raise Exception('Failed to bind to server! Reason: %s' % conn.result)
logger.debug('Connected to server!')
return server.info
|
def get_server_info(self, anonymous = True)
|
Performs bind on the server and grabs the DSA info object.
If anonymous is set to true, then it will perform anonymous bind, not using user credentials
Otherwise it will use the credentials set in the object constructor.
| 3.249518
| 3.007326
| 1.080534
|
logger.debug('Paged search, filter: %s attributes: %s' % (ldap_filter, ','.join(attributes)))
ctr = 0
entries = self._con.extend.standard.paged_search(self._tree, ldap_filter, attributes = attributes, paged_size = self.ldap_query_page_size)
for entry in entries:
if 'raw_attributes' in entry and 'attributes' in entry:
# TODO: return ldapuser object
ctr += 1
if ctr % self.ldap_query_page_size == 0:
logger.info('New page requested. Result count: %d' % ctr)
yield entry
|
def pagedsearch(self, ldap_filter, attributes)
|
Performs a paged search on the AD, using the filter and attributes as a normal query does.
Needs to connect to the server first!
ldap_filter: str : LDAP query filter
attributes: list : Attributes list to recieve in the result
| 3.481739
| 3.555023
| 0.979386
|
logger.debug('Polling AD for all user objects')
ldap_filter = r'(objectClass=user)'
attributes = MSADUser.ATTRS
for entry in self.pagedsearch(ldap_filter, attributes):
# TODO: return ldapuser object
yield MSADUser.from_ldap(entry, self._ldapinfo)
logger.debug('Finished polling for entries!')
|
def get_all_user_objects(self)
|
Fetches all user objects from the AD, and returns MSADUser object
| 8.169845
| 7.266865
| 1.12426
|
logger.debug('Polling AD for user %s'% sAMAccountName)
ldap_filter = r'(&(objectClass=user)(sAMAccountName=%s)' % sAMAccountName
attributes = MSADUser.ATTRS
for entry in self.pagedsearch(ldap_filter, attributes):
# TODO: return ldapuser object
yield MSADUser.from_ldap(entry, self._ldapinfo)
logger.debug('Finished polling for entries!')
|
def get_user(self, sAMAccountName)
|
Fetches one user object from the AD, based on the sAMAccountName attribute (read: username)
| 5.561793
| 5.495365
| 1.012088
|
logger.debug('Polling AD for basic info')
ldap_filter = r'(distinguishedName=%s)' % self._tree
attributes = MSADInfo.ATTRS
for entry in self.pagedsearch(ldap_filter, attributes):
self._ldapinfo = MSADInfo.from_ldap(entry)
return self._ldapinfo
logger.debug('Poll finished!')
|
def get_ad_info(self)
|
Polls for basic AD information (needed for determine password usage characteristics!)
| 7.3339
| 5.788798
| 1.266912
|
logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine)
if include_machine == True:
ldap_filter = r'(servicePrincipalName=*)'
else:
ldap_filter = r'(&(servicePrincipalName=*)(!(sAMAccountName = *$)))'
attributes = MSADUser.ATTRS
for entry in self.pagedsearch(ldap_filter, attributes):
# TODO: return ldapuser object
yield MSADUser.from_ldap(entry, self._ldapinfo)
logger.debug('Finished polling for entries!')
|
def get_all_service_user_objects(self, include_machine = False)
|
Fetches all service user objects from the AD, and returns MSADUser object.
Service user refers to an user whith SPN (servicePrincipalName) attribute set
| 6.809219
| 6.189128
| 1.10019
|
logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine)
if include_machine == True:
ldap_filter = r'(userAccountControl:1.2.840.113556.1.4.803:=4194304)'
else:
ldap_filter = r'(&(userAccountControl:1.2.840.113556.1.4.803:=4194304)(!(sAMAccountName = *$)))'
attributes = MSADUser.ATTRS
for entry in self.pagedsearch(ldap_filter, attributes):
# TODO: return ldapuser object
yield MSADUser.from_ldap(entry, self._ldapinfo)
logger.debug('Finished polling for entries!')
|
def get_all_knoreq_user_objects(self, include_machine = False)
|
Fetches all user objects with useraccountcontrol DONT_REQ_PREAUTH flag set from the AD, and returns MSADUser object.
| 3.910797
| 3.601452
| 1.085895
|
if x == []:
return None
if isinstance(x, list):
return '|'.join(x)
if isinstance(x, datetime):
return x.isoformat()
return x
|
def vn(x)
|
value or none, returns none if x is an empty list
| 3.204745
| 2.728928
| 1.174361
|
if hasattr(item, 'tags'):
tags = item.tags
if isinstance(tags, list):
tags_dict = {}
for kv_dict in tags:
if isinstance(kv_dict, dict) and 'Key' in kv_dict and 'Value' in kv_dict:
tags_dict[kv_dict['Key']] = kv_dict['Value']
return ObjectProxy(item, tags=tags_dict)
return item
|
def convert_tags_to_dict(item)
|
Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs
to a dict of {<key>: <value>} for easier querying.
This returns a proxied object over given item to return a different tags format as the tags
attribute is read-only and we cannot modify it directly.
| 2.536422
| 2.136958
| 1.186931
|
try:
for table in meta.tables:
self.load_table(table)
except NoCredentialsError:
help_link = 'http://boto3.readthedocs.io/en/latest/guide/configuration.html'
raise QueryError('Unable to locate AWS credential. '
'Please see {0} on how to configure AWS credential.'.format(help_link))
|
def load_tables(self, query, meta)
|
Load necessary resources tables into db to execute given query.
| 4.878566
| 4.745067
| 1.028134
|
region = table.database if table.database else self.default_region
resource_name, collection_name = table.table.split('_', 1)
# we use underscore "_" instead of dash "-" for region name but boto3 need dash
boto_region_name = region.replace('_', '-')
resource = self.boto3_session.resource(resource_name, region_name=boto_region_name)
if not hasattr(resource, collection_name):
raise QueryError(
'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name))
self.attach_region(region)
self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
|
def load_table(self, table)
|
Load resources as specified by given table into our db.
| 5.15372
| 4.72153
| 1.091536
|
if isinstance(obj, datetime):
return obj.isoformat()
if hasattr(obj, 'id'):
return jsonify(obj.id)
if hasattr(obj, 'name'):
return jsonify(obj.name)
raise TypeError('{0} is not JSON serializable'.format(obj))
|
def json_serialize(obj)
|
Simple generic JSON serializer for common objects.
| 2.631485
| 2.618444
| 1.00498
|
# return null if serialized_object is null or "serialized null"
if serialized_object is None:
return None
obj = json.loads(serialized_object)
if obj is None:
return None
if isinstance(field, int):
# array index access
res = obj[field] if 0 <= field < len(obj) else None
else:
# object field access
res = obj.get(field)
if not isinstance(res, (int, float, string_types)):
res = json.dumps(res)
return res
|
def json_get(serialized_object, field)
|
This emulates the HSTORE `->` get value operation.
It get value from JSON serialized column by given key and return `null` if not present.
Key can be either an integer for array index access or a string for object field access.
:return: JSON serialized value of key in object
| 3.182708
| 3.096293
| 1.027909
|
table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name
db.execute('DROP TABLE IF EXISTS {0}'.format(table))
columns_list = ', '.join(columns)
db.execute('CREATE TABLE {0} ({1})'.format(table, columns_list))
|
def create_table(db, schema_name, table_name, columns)
|
Create a table, schema_name.table_name, in given database with given list of column names.
| 1.979599
| 1.903215
| 1.040134
|
table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name
columns_list = ', '.join(columns)
values_list = ', '.join(['?'] * len(columns))
query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format(
table=table, columns=columns_list, values=values_list)
for item in items:
values = [getattr(item, col) for col in columns]
db.execute(query, values)
|
def insert_all(db, schema_name, table_name, columns, items)
|
Insert all item in given items list into the specified table, schema_name.table_name.
| 1.811734
| 1.778762
| 1.018537
|
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
|
def get_comics(self, *args, **kwargs)
|
Returns a full ComicDataWrapper object for this creator.
/creators/{creatorId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set.
| 6.534667
| 4.846529
| 1.348319
|
from .event import Event, EventDataWrapper
return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
|
def get_events(self, *args, **kwargs)
|
Returns a full EventDataWrapper object for this creator.
/creators/{creatorId}/events
:returns: EventDataWrapper -- A new request to API. Contains full results set.
| 9.476438
| 7.262035
| 1.304929
|
from .series import Series, SeriesDataWrapper
return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
|
def get_series(self, *args, **kwargs)
|
Returns a full SeriesDataWrapper object for this creator.
/creators/{creatorId}/series
:returns: SeriesDataWrapper -- A new request to API. Contains full results set.
| 8.952343
| 6.542262
| 1.368387
|
from .story import Story, StoryDataWrapper
return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
|
def get_stories(self, *args, **kwargs)
|
Returns a full StoryDataWrapper object for this creator.
/creators/{creatorId}/stories
:returns: StoriesDataWrapper -- A new request to API. Contains full results set.
| 9.25476
| 7.242579
| 1.277827
|
items = []
for item in _list:
items.append(_Class(_self.marvel, item))
return items
|
def list_to_instance_list(_self, _list, _Class)
|
Takes a list of resource dicts and returns a list
of resource instances, defined by the _Class param.
:param _self: Original resource calling the method
:type _self: core.MarvelObject
:param _list: List of dicts describing a Resource.
:type _list: list
:param _Class: The Resource class to create a list of (Comic, Creator, etc).
:type _Class: core.MarvelObject
:returns: list -- List of Resource instances (Comic, Creator, etc).
| 5.258574
| 4.347002
| 1.209701
|
url = "%s/%s/%s" % (_self.resource_url(), _self.id, _Class.resource_url())
response = json.loads(_self.marvel._call(url, _self.marvel._params(kwargs)).text)
return _ClassDataWrapper(_self.marvel, response)
|
def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs)
|
Takes a related resource Class
and returns the related resource DataWrapper.
For Example: Given a Character instance, return
a ComicsDataWrapper related to that character.
/character/{characterId}/comics
:param _Class: The Resource class retrieve
:type _Class: core.MarvelObject
:param _ClassDataWrapper: The Resource response object
:type _Class: core.MarvelObject
:param kwargs: dict of query params for the API
:type kwargs: dict
:returns: DataWrapper -- DataWrapper for requested Resource
| 4.296133
| 4.036373
| 1.064355
|
self.params['offset'] = str(int(self.params['offset']) + int(self.params['limit']))
return self.marvel.get_characters(self.marvel, (), **self.params)
|
def next(self)
|
Returns new CharacterDataWrapper
TODO: Don't raise offset past count - limit
| 6.004723
| 5.064366
| 1.185681
|
self.params['offset'] = str(int(self.params['offset']) - int(self.params['limit']))
return self.marvel.get_characters(self.marvel, (), **self.params)
|
def previous(self)
|
Returns new CharacterDataWrapper
TODO: Don't lower offset below 0
| 5.903298
| 5.397806
| 1.093648
|
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
|
def _call(self, resource_url, params=None)
|
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
| 3.030739
| 2.969042
| 1.02078
|
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
|
def _auth(self)
|
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
| 2.768818
| 2.250322
| 1.23041
|
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
|
def get_character(self, id)
|
Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
| 5.890991
| 6.120378
| 0.962521
|
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
|
def get_characters(self, *args, **kwargs)
|
Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
| 14.333901
| 16.026451
| 0.89439
|
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
|
def get_comic(self, id)
|
Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
| 6.210229
| 5.101939
| 1.217229
|
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
|
def get_comics(self, *args, **kwargs)
|
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
| 9.12301
| 7.67631
| 1.188463
|
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
|
def get_creator(self, id)
|
Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
| 7.03654
| 5.397425
| 1.303685
|
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
|
def get_creators(self, *args, **kwargs)
|
Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
| 10.676435
| 9.255613
| 1.153509
|
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
|
def get_event(self, id)
|
Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
| 6.622393
| 5.305665
| 1.248174
|
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
|
def get_events(self, *args, **kwargs)
|
Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
| 10.889918
| 10.385872
| 1.048532
|
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
|
def get_single_series(self, id)
|
Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
| 6.524349
| 5.130672
| 1.271636
|
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
|
def get_series(self, *args, **kwargs)
|
Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
| 11.392612
| 9.444053
| 1.206327
|
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
|
def get_story(self, id)
|
Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
| 6.275258
| 4.806903
| 1.305468
|
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response)
|
def get_stories(self, *args, **kwargs)
|
Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
| 10.275792
| 9.660239
| 1.06372
|
from .creator import Creator, CreatorDataWrapper
return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs)
|
def get_creators(self, *args, **kwargs)
|
Returns a full CreatorDataWrapper object for this story.
/stories/{storyId}/creators
:returns: CreatorDataWrapper -- A new request to API. Contains full results set.
| 8.84031
| 7.002931
| 1.262373
|
from .character import Character, CharacterDataWrapper
return self.get_related_resource(Character, CharacterDataWrapper, args, kwargs)
|
def get_characters(self, *args, **kwargs)
|
Returns a full CharacterDataWrapper object for this story.
/stories/{storyId}/characters
:returns: CharacterDataWrapper -- A new request to API. Contains full results set.
| 8.356674
| 7.002673
| 1.193355
|
cmd = [
'ffmpeg',
'-version'
]
output = sp.check_output(cmd)
aac_codecs = [
x for x in
output.splitlines() if "ffmpeg version " in str(x)
][0]
hay = aac_codecs.decode('ascii')
match = re.findall(r'ffmpeg version (\d+\.)?(\d+\.)?(\*|\d+)', hay)
if match:
return "".join(match[0])
else:
return None
|
def ffmpeg_version()
|
Returns the available ffmpeg version
Returns
----------
version : str
version number as string
| 4.072954
| 4.045066
| 1.006894
|
parser = argparse.ArgumentParser()
parser.add_argument(
'--version', '-V',
action='version',
version='%%(prog)s %s' % __version__
)
parser.add_argument(
'filename',
metavar="filename",
help="Input STEM file"
)
parser.add_argument(
'--id',
metavar='id',
type=int,
nargs='+',
help="A list of stem_ids"
)
parser.add_argument(
'-s',
type=float,
nargs='?',
help="start offset in seconds"
)
parser.add_argument(
'-t',
type=float,
nargs='?',
help="read duration"
)
parser.add_argument(
'outdir',
metavar='outdir',
nargs='?',
help="Output folder"
)
args = parser.parse_args(inargs)
stem2wav(args.filename, args.outdir, args.id, args.s, args.t)
|
def cli(inargs=None)
|
Commandline interface for receiving stem files
| 2.419235
| 2.382293
| 1.015507
|
cmd = [
'ffmpeg',
'-v', 'error',
'-codecs'
]
output = sp.check_output(cmd)
aac_codecs = [
x for x in
output.splitlines() if "AAC (Advanced Audio Coding)" in str(x)
][0]
hay = aac_codecs.decode('ascii')
match = re.findall(r'\(encoders: ([^\)]*) \)', hay)
if match:
return match[0].split(" ")
else:
return None
|
def check_available_aac_encoders()
|
Returns the available AAC encoders
Returns
----------
codecs : list(str)
List of available encoder codecs
| 4.099761
| 3.892541
| 1.053235
|
if int(stempeg.ffmpeg_version()[0]) < 3:
warnings.warn(
"Writing STEMS with FFMPEG version < 3 is unsupported", UserWarning
)
if codec is None:
avail = check_available_aac_encoders()
if avail is not None:
if 'libfdk_aac' in avail:
codec = 'libfdk_aac'
else:
codec = 'aac'
warnings.warn("For better quality, please install libfdc_aac")
else:
codec = 'aac'
warnings.warn("For better quality, please install libfdc_aac")
tmps = [
tmp.NamedTemporaryFile(delete=False, suffix='.wav')
for t in range(audio.shape[0])
]
if audio.shape[1] % 1024 != 0:
warnings.warn(
"Number of samples does not divide by 1024, be aware that "
"the AAC encoder add silence to the input signal"
)
for k in range(audio.shape[0]):
sf.write(tmps[k].name, audio[k], rate)
cmd = (
[
'ffmpeg', '-y',
"-f", 's%dle' % (16),
"-acodec", 'pcm_s%dle' % (16),
'-ar', "%d" % rate,
'-ac', "%d" % 2
] +
list(chain.from_iterable(
[['-i', i.name] for i in tmps]
)) +
list(chain.from_iterable(
[['-map', str(k)] for k, _ in enumerate(tmps)]
)) +
[
'-vn',
'-acodec', codec,
'-ar', "%d" % rate,
'-strict', '-2',
'-loglevel', 'error'
] +
(['-ab', str(bitrate)] if (bitrate is not None) else []) +
(ffmpeg_params if ffmpeg_params else []) +
[filename]
)
sp.call(cmd)
|
def write_stems(
audio,
filename,
rate=44100,
bitrate=256000,
codec=None,
ffmpeg_params=None
)
|
Write stems from numpy Tensor
Parameters
----------
audio : array_like
The tensor of Matrix of stems. The data shape is formatted as
:code:`stems x channels x samples`.
filename : str
Output file_name of the stems file
rate : int
Output samplerate. Defaults to 44100 Hz.
bitrate : int
AAC Bitrate in Bits per second. Defaults to 256 Kbit/s
codec : str
AAC codec used. Defaults to `None` which automatically selects
either `libfdk_aac` or `aac` in that order, determined by availability.
ffmpeg_params : list(str)
List of additional ffmpeg parameters
Notes
-----
Output is written as 16bit/44.1 kHz
| 2.986728
| 3.039181
| 0.982741
|
cmd = [
'ffprobe',
filename,
'-v', 'error',
'-print_format', 'json',
'-show_format', '-show_streams',
]
out = sp.check_output(cmd)
info = json.loads(out.decode('utf-8'))
return info
|
def read_info(
filename
)
|
Extracts FFMPEG info and returns info as JSON
Returns
-------
info : Dict
JSON info dict
| 2.541644
| 2.407881
| 1.055552
|
if info is None:
FFinfo = Info(filename)
else:
FFinfo = info
if stem_id is not None:
substreams = stem_id
else:
substreams = FFinfo.audio_stream_idx()
if not isinstance(substreams, list):
substreams = [substreams]
stems = []
tmps = [
tmp.NamedTemporaryFile(delete=False, suffix='.wav')
for t in substreams
]
for tmp_id, stem in enumerate(substreams):
rate = FFinfo.rate(stem)
channels = FFinfo.channels(stem)
cmd = [
'ffmpeg',
'-y',
'-vn',
'-i', filename,
'-map', '0:' + str(stem),
'-acodec', 'pcm_s16le',
'-ar', str(rate),
'-ac', str(channels),
'-loglevel', 'error',
tmps[tmp_id].name
]
if start:
cmd.insert(3, '-ss')
cmd.insert(4, str(start))
if duration is not None:
cmd.insert(-1, '-t')
cmd.insert(-1, str(duration))
sp.call(cmd)
# read wav files
audio, rate = sf.read(tmps[tmp_id].name)
tmps[tmp_id].close()
os.remove(tmps[tmp_id].name)
stems.append(audio)
# check if all stems have the same duration
stem_durations = np.array([t.shape[0] for t in stems])
if not (stem_durations == stem_durations[0]).all():
warnings.warn("Warning.......Stems differ in length and were shortend")
min_length = np.min(stem_durations)
stems = [t[:min_length, :] for t in stems]
stems = np.array(stems)
stems = np.squeeze(stems).astype(out_type)
return stems, rate
|
def read_stems(
filename,
out_type=np.float_,
stem_id=None,
start=0,
duration=None,
info=None
)
|
Read STEMS format into numpy Tensor
Parameters
----------
filename : str
Filename of STEMS format. Typically `filename.stem.mp4`.
out_type : type
Output type. Defaults to 32bit float aka `np.float32`.
stem_id : int
Stem ID (Stream ID) to read. Defaults to `None`, which reads all
available stems.
start : float
Start position (seek) in seconds, defaults to 0.
duration : float
Read `duration` seconds. End position then is `start + duration`.
Defaults to `None`: read till the end.
info : object
provide info object, useful if read_stems is called frequently on
file with same configuration (#streams, #channels, samplerate).
Returns
-------
stems : array_like
The tensor of Matrix of stems. The data shape is formatted as
:code:`stems x channels x samples`.
Notes
-----
Input is expected to be in 16bit/44.1 kHz
| 2.471807
| 2.558178
| 0.966237
|
result = self._index.ndim
assert result == 1, "Expected index to be 1D, got: {}D".format(result)
return result
|
def nDims(self)
|
The number of dimensions of the index. Will always be 1.
| 7.255003
| 5.309583
| 1.366398
|
if self._ndFrame is None:
return super(AbstractPandasNDFrameRti, self).elementTypeName
else:
try:
return str(self._ndFrame.dtype) # Series
except AttributeError:
return '<structured>'
|
def elementTypeName(self)
|
String representation of the element type.
| 12.995209
| 12.117687
| 1.072417
|
return PandasIndexRti(index=index, nodeName=nodeName, fileName=self.fileName,
iconColor=self._iconColor)
|
def _createIndexRti(self, index, nodeName)
|
Auxiliary method that creates a PandasIndexRti.
| 12.049903
| 7.017837
| 1.717039
|
assert self.isSliceable, "No underlying pandas object: self._ndFrame is None"
childItems = []
if self._standAlone:
childItems.append(self._createIndexRti(self._ndFrame.index, 'index'))
return childItems
|
def _fetchAllChildren(self)
|
Fetches the index if the showIndex member is True
Descendants can override this function to add the subdevicions.
| 16.468721
| 17.005814
| 0.968417
|
assert self.isSliceable, "No underlying pandas object: self._ndFrame is None"
childItems = []
for subName in self._ndFrame.columns: # Note that this is not the first dimension!
childItem = PandasSeriesRti(self._ndFrame[subName], nodeName=subName,
fileName=self.fileName, iconColor=self._iconColor,
standAlone=False)
childItems.append(childItem)
if self._standAlone:
childItems.append(self._createIndexRti(self._ndFrame.index, 'index'))
childItems.append(self._createIndexRti(self._ndFrame.columns, 'columns'))
return childItems
|
def _fetchAllChildren(self)
|
Fetches children items.
If this is stand-alone DataFrame the index, column etc are added as PandasIndexRti obj.
| 6.674377
| 5.083797
| 1.312873
|
assert self.isSliceable, "No underlying pandas object: self._ndFrame is None"
childItems = []
for subName in self._ndFrame.items:
childItem = PandasDataFrameRti(self._ndFrame[subName], nodeName=subName,
fileName=self.fileName, iconColor=self._iconColor,
standAlone=False)
childItems.append(childItem)
if self._standAlone:
childItems.append(self._createIndexRti(self._ndFrame.items, 'items'))
childItems.append(self._createIndexRti(self._ndFrame.major_axis, 'major_axis'))
childItems.append(self._createIndexRti(self._ndFrame.minor_axis, 'minor_axis'))
return childItems
|
def _fetchAllChildren(self)
|
Fetches children items.
If this is stand-alone Panel the index, column etc are added as PandasIndexRti obj.
| 4.90548
| 4.022735
| 1.219439
|
check_class(regItem, InspectorRegItem, allow_none=True)
self.inspectorTab.setCurrentRegItem(regItem)
|
def setCurrentInspectorRegItem(self, regItem)
|
Sets the current inspector given an InspectorRegItem
| 6.612761
| 6.49678
| 1.017852
|
return GroupCtiEditor(self, delegate, parent=parent)
|
def createEditor(self, delegate, parent, _option)
|
Creates a hidden widget so that only the reset button is visible during editing.
:type option: QStyleOptionViewItem
| 34.949013
| 83.992134
| 0.416099
|
level_nr = logging.getLevelName(level.upper())
if logger is None:
logger = logging.getLogger('main')
if msg :
logger.log(level_nr, "Logging dictionary: {}".format(msg))
if not dictionary:
logger.log(level_nr,"{}<empty dictionary>".format(item_prefix))
return
max_key_len = max([len(k) for k in dictionary.keys()])
for key, value in sorted(dictionary.items()):
logger.log(level_nr, "{0}{1:<{2}s} = {3}".format(item_prefix, key, max_key_len, value))
|
def log_dictionary(dictionary, msg='', logger=None, level='debug', item_prefix=' ')
|
Writes a log message with key and value for each item in the dictionary.
:param dictionary: the dictionary to be logged
:type dictionary: dict
:param name: An optional message that is logged before the contents
:type name: string
:param logger: A logging.Logger object to log to. If not set, the 'main' logger is used.
:type logger: logging.Logger or a string
:param level: log level. String or int as described in the logging module documentation.
Default: 'debug'.
:type level: string or int
:param item_prefix: String that will be prefixed to each line. Default: two spaces.
:type item_prefix: string
| 2.388145
| 2.605806
| 0.916471
|
return (s.startswith("'") and s.endswith("'")) or (s.startswith('"') and s.endswith('"'))
|
def is_quoted(s)
|
Returns True if the string begins and ends with quotes (single or double)
:param s: a string
:return: boolean
| 2.170656
| 3.208092
| 0.676619
|
import re
s = s.lower()
s = re.sub(r"\s+", white_space_becomes, s) # replace whitespace with underscores
s = re.sub(r"-", "_", s) # replace hyphens with underscores
s = re.sub(r"[^A-Za-z0-9_]", "", s) # remove everything that's not a character, a digit or a _
return s
|
def string_to_identifier(s, white_space_becomes='_')
|
Takes a string and makes it suitable for use as an identifier
Translates to lower case
Replaces white space by the white_space_becomes character (default=underscore).
Removes and punctuation.
| 2.381597
| 2.612196
| 0.911722
|
check_class(isOpen, bool, allow_none=True)
if fileName and not os.path.isabs(fileName):
fileName = os.path.join(self.ICONS_DIRECTORY, fileName)
if isOpen is None:
# Register both opened and closed variants
self._registry[(glyph, True)] = fileName
self._registry[(glyph, False)] = fileName
else:
self._registry[(glyph, isOpen)] = fileName
|
def registerIcon(self, fileName, glyph, isOpen=None)
|
Register an icon SVG file given a glyph, and optionally the open/close state.
:param fileName: filename to the SVG file.
If the filename is a relative path, the ICONS_DIRECTORY will be prepended.
:param glyph: a string describing the glyph (e.g. 'file', 'array')
:param isOpen: boolean that indicates if the RTI is open or closed.
If None, the icon will be registered for open is both True and False
:return: QIcon
| 3.574351
| 3.508474
| 1.018777
|
try:
fileName = self._registry[(glyph, isOpen)]
except KeyError:
logger.warn("Unregistered icon glyph: {} (open={})".format(glyph, isOpen))
from argos.utils.misc import log_dictionary
log_dictionary(self._registry, "registry", logger=logger)
raise
return self.loadIcon(fileName, color=color)
|
def getIcon(self, glyph, isOpen, color=None)
|
Returns a QIcon given a glyph name, open/closed state and color.
The reslulting icon is cached so that it only needs to be rendered once.
:param glyph: name of a registered glyph (e.g. 'file', 'array')
:param isOpen: boolean that indicates if the RTI is open or closed.
:param color: '#RRGGBB' string (e.g. '#FF0000' for red)
:return: QtGui.QIcon
| 6.265618
| 6.691662
| 0.936332
|
if not fileName:
return None
key = (fileName, color)
if key not in self._icons:
try:
with open(fileName, 'r') as input:
svg = input.read()
self._icons[key] = self.createIconFromSvg(svg, color=color)
except Exception as ex:
# It's preferable to show no icon in case of an error rather than letting
# the application fail. Icons are a (very) nice to have.
logger.warn("Unable to read icon: {}".format(ex))
if DEBUGGING:
raise
else:
return None
return self._icons[key]
|
def loadIcon(self, fileName, color=None)
|
Reads SVG from a file name and creates an QIcon from it.
Optionally replaces the color. Caches the created icons.
:param fileName: absolute path to an icon file.
If False/empty/None, None returned, which yields no icon.
:param color: '#RRGGBB' string (e.g. '#FF0000' for red)
:return: QtGui.QIcon
| 4.561989
| 4.281326
| 1.065555
|
if colorsToBeReplaced is None:
colorsToBeReplaced = self.colorsToBeReplaced
if color:
for oldColor in colorsToBeReplaced:
svg = svg.replace(oldColor, color)
# From http://stackoverflow.com/questions/15123544/change-the-color-of-an-svg-in-qt
qByteArray = QtCore.QByteArray()
qByteArray.append(svg)
svgRenderer = QtSvg.QSvgRenderer(qByteArray)
icon = QtGui.QIcon()
for size in self.renderSizes:
pixMap = QtGui.QPixmap(QtCore.QSize(size, size))
pixMap.fill(Qt.transparent)
pixPainter = QtGui.QPainter(pixMap)
pixPainter.setRenderHint(QtGui.QPainter.TextAntialiasing, True)
pixPainter.setRenderHint(QtGui.QPainter.Antialiasing, True)
svgRenderer.render(pixPainter)
pixPainter.end()
icon.addPixmap(pixMap)
return icon
|
def createIconFromSvg(self, svg, color=None, colorsToBeReplaced=None)
|
Creates a QIcon given an SVG string.
Optionally replaces the colors in colorsToBeReplaced by color.
:param svg: string containing Scalable Vector Graphics XML
:param color: '#RRGGBB' string (e.g. '#FF0000' for red)
:param colorsToBeReplaced: optional list of colors to be replaced by color
If None, it will be set to the fill colors of the snip-icon libary
:return: QtGui.QIcon
| 2.504932
| 2.521074
| 0.993597
|
if not index.isValid():
return None
if role not in (Qt.DisplayRole, self.SORT_ROLE, Qt.ForegroundRole):
return None
row = index.row()
col = index.column()
item = self.registry.items[row]
attrName = self.attrNames[col]
if role == Qt.DisplayRole:
return str(getattr(item, attrName))
elif role == self.SORT_ROLE:
# Use the fullName column as a tie-breaker
return (getattr(item, attrName), item.fullName)
elif role == Qt.ForegroundRole:
if item.successfullyImported is None:
return self.notImportedBrush
elif item.successfullyImported:
return self.regularBrush
else:
return self.errorBrush
else:
raise ValueError("Invalid role: {}".format(role))
|
def data(self, index, role=Qt.DisplayRole)
|
Returns the data stored under the given role for the item referred to by the index.
| 3.148373
| 3.088705
| 1.019318
|
if role == QtCore.Qt.DisplayRole:
if orientation == Qt.Horizontal:
return self.attrNames[section]
else:
return str(section)
else:
return None
|
def headerData(self, section, orientation, role)
|
Returns the header for a section (row or column depending on orientation).
Reimplemented from QAbstractTableModel to make the headers start at 0.
| 2.883291
| 2.749767
| 1.048559
|
if col < 0:
col = len(self.attrNames) - col
try:
row = self.registry.items.index(regItem)
except ValueError:
return QtCore.QModelIndex()
else:
return self.index(row, col)
|
def indexFromItem(self, regItem, col=0)
|
Gets the index (with column=0) for the row that contains the regItem
If col is negative, it is counted from the end
| 3.502231
| 3.537747
| 0.989961
|
leftIndex = self.indexFromItem(regItem, col=0)
rightIndex = self.indexFromItem(regItem, col=-1)
logger.debug("Data changed: {} ...{}".format(self.data(leftIndex), self.data(rightIndex)))
self.dataChanged.emit(leftIndex, rightIndex)
|
def emitDataChanged(self, regItem)
|
Emits the dataChagned signal for the regItem
| 3.429141
| 3.340956
| 1.026395
|
if not self.onlyShowImported:
return True
item = self.sourceModel().registry.items[sourceRow]
return bool(item.successfullyImported)
|
def filterAcceptsRow(self, sourceRow, sourceParent)
|
If onlyShowImported is True, regItems that were not (successfully) imported are
filtered out.
| 14.360195
| 6.62971
| 2.166037
|
leftData = self.sourceModel().data(leftIndex, RegistryTableModel.SORT_ROLE)
rightData = self.sourceModel().data(rightIndex, RegistryTableModel.SORT_ROLE)
return leftData < rightData
|
def lessThan(self, leftIndex, rightIndex)
|
Returns true if the value of the item referred to by the given index left is less than
the value of the item referred to by the given index right, otherwise returns false.
| 4.750061
| 5.449839
| 0.871596
|
sourceIndex = self.mapToSource(index)
return self.sourceModel().itemFromIndex(sourceIndex)
|
def itemFromIndex(self, index)
|
Gets the item given the model index
| 4.338479
| 3.818089
| 1.136296
|
sourceIndex = self.sourceModel().indexFromItem(regItem, col=col)
return self.mapFromSource(sourceIndex)
|
def indexFromItem(self, regItem, col=0)
|
Gets the index (with column=0) for the row that contains the regItem
If col is negative, it is counted from the end
| 4.580369
| 5.035995
| 0.909526
|
#self.sourceModel().emitDataChanged(regItem) # Does this work?
leftIndex = self.indexFromItem(regItem, col=0)
rightIndex = self.indexFromItem(regItem, col=-1)
self.dataChanged.emit(leftIndex, rightIndex)
|
def emitDataChanged(self, regItem)
|
Emits the dataChagned signal for the regItem
| 3.760097
| 3.859643
| 0.974208
|
rowIndex = self.model().indexFromItem(regItem)
if not rowIndex.isValid():
logger.warn("Can't select {!r} in table".format(regItem))
self.setCurrentIndex(rowIndex)
|
def setCurrentRegItem(self, regItem)
|
Sets the current registry item.
| 4.991305
| 5.391128
| 0.925837
|
registry = InspectorRegistry()
registry.loadOrInitSettings()
registry.registerInspector(fullName, fullClassName, pythonPath=pythonPath)
registry.saveSettings()
|
def persistentRegisterInspector(fullName, fullClassName, pythonPath='')
|
Registers an inspector
Loads or inits the inspector registry, register the inspector and saves the settings.
Important: instantiate a Qt application first to use the correct settings file/winreg.
| 4.844441
| 3.763716
| 1.287143
|
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
except Exception as ex:
logger.debug("AA_UseHighDpiPixmaps not available in PyQt4: {}".format(ex))
# Create
argosApp = ArgosApplication()
if resetProfile:
argosApp.deleteProfile(profile)
if resetAllProfiles:
argosApp.deleteAllProfiles()
if resetRegistry:
argosApp.deleteRegistries()
# Must be called before opening the files so that file formats are auto-detected.
argosApp.loadOrInitRegistries()
# Load data in common repository before windows are created.
argosApp.loadFiles(fileNames)
if DEBUGGING:
argosApp.repo.insertItem(createArgosTestData())
# Create windows for this profile.
argosApp.loadProfile(profile=profile, inspectorFullName=inspectorFullName)
if select:
for mainWindow in argosApp.mainWindows:
mainWindow.trySelectRtiByPath(select)
return argosApp.execute()
|
def browse(fileNames=None,
inspectorFullName=None,
select=None,
profile=DEFAULT_PROFILE,
resetProfile=False, # TODO: should probably be moved to the main program
resetAllProfiles=False, # TODO: should probably be moved to the main program
resetRegistry=False): # TODO: should probably be moved to the main program
# Imported here so this module can be imported without Qt being installed.
from argos.qt import QtWidgets, QtCore
from argos.application import ArgosApplication
from argos.repo.testdata import createArgosTestData
try
|
Opens the main window(s) for the persistent settings of the given profile,
and executes the application.
:param fileNames: List of file names that will be added to the repository
:param inspectorFullName: The full path name of the inspector that will be loaded
:param select: a path of the repository item that will selected at start up.
:param profile: the name of the profile that will be loaded
:param resetProfile: if True, the profile will be reset to it standard settings.
:param resetAllProfiles: if True, all profiles will be reset to it standard settings.
:param resetRegistry: if True, the registry will be reset to it standard settings.
:return:
| 4.766041
| 4.899637
| 0.972734
|
# Imported here so this module can be imported without Qt being installed.
from argos.application import ArgosApplication
argosApp = ArgosApplication()
argosApp.loadOrInitRegistries()
for regItem in argosApp.inspectorRegistry.items:
print(regItem.fullName)
|
def printInspectors()
|
Prints a list of inspectors
| 9.617411
| 9.508512
| 1.011453
|
about_str = "{} version: {}".format(PROJECT_NAME, VERSION)
parser = argparse.ArgumentParser(description = about_str)
parser.add_argument('fileNames', metavar='FILE', nargs='*', help='Input files')
parser.add_argument('-i', '--inspector', dest='inspector',
help=)
parser.add_argument('--list-inspectors', dest='list_inspectors', action = 'store_true',
help=)
parser.add_argument('-s', '--select', dest='selectPath',
help=)
parser.add_argument('-p', '--profile', dest='profile', default=DEFAULT_PROFILE,
help="Can be used to have different persistent settings for different use cases.")
parser.add_argument('--reset', '--reset-profile', dest='reset_profile', action = 'store_true',
help="If set, persistent settings will be reset for the current profile.")
parser.add_argument('--reset-all-profiles', dest='reset_all_profiles', action = 'store_true',
help="If set, persistent settings will be reset for the all profiles.")
parser.add_argument('--reset-registry', dest='reset_registry', action = 'store_true',
help="If set, the registry will be reset to contain only the default plugins.")
parser.add_argument('--version', action = 'store_true',
help="Prints the program version.")
parser.add_argument('-l', '--log-level', dest='log_level', default='warning',
help="Log level. Only log messages with a level higher or equal than this will be printed. "
"Default: 'warning'",
choices=('debug', 'info', 'warning', 'error', 'critical'))
args = parser.parse_args(remove_process_serial_number(sys.argv[1:]))
if DEBUGGING:
logger.info("Setting log level to: {}".format(args.log_level.upper()))
logger.setLevel(args.log_level.upper())
if DEBUGGING:
logger.warn("Debugging flag is on!")
if args.version:
print(about_str)
sys.exit(0)
if args.list_inspectors:
printInspectors()
sys.exit(0)
logger.info('Started {} {}'.format(PROJECT_NAME, VERSION))
logger.info("Python version: {}".format(sys.version).replace('\n', ''))
#logger.info('Using: {}'.format('PyQt' if USE_PYQT else 'PySide'))
# Imported here so this module can be imported without Qt being installed.
from argos.qt.misc import ABOUT_QT_BINDINGS
logger.info("Using {}".format(ABOUT_QT_BINDINGS))
# Browse will create an ArgosApplication with one MainWindow
browse(fileNames = args.fileNames,
inspectorFullName=args.inspector,
select=args.selectPath,
profile=args.profile,
resetProfile=args.reset_profile,
resetAllProfiles=args.reset_all_profiles,
resetRegistry=args.reset_registry)
logger.info('Done {}'.format(PROJECT_NAME))
|
def main()
|
Starts Argos main window
| 3.166283
| 3.140186
| 1.008311
|
check_class(model, BaseTreeModel)
super(ArgosTreeView, self).setModel(model)
|
def setModel(self, model)
|
Sets the model.
Checks that the model is a
| 17.024637
| 19.227463
| 0.885433
|
selectionModel = self.selectionModel()
selectionFlags = (QtCore.QItemSelectionModel.ClearAndSelect |
QtCore.QItemSelectionModel.Rows)
selectionModel.setCurrentIndex(currentIndex, selectionFlags)
|
def setCurrentIndex(self, currentIndex)
|
Sets the current item to be the item at currentIndex.
Also select the row as to give consistent user feedback.
See also the notes at the top of this module on current item vs selected item(s).
| 3.245542
| 3.344834
| 0.970315
|
curIndex = self.currentIndex()
col0Index = curIndex.sibling(curIndex.row(), 0)
return col0Index
|
def getRowCurrentIndex(self)
|
Returns the index of column 0 of the current item in the underlying model.
See also the notes at the top of this module on current item vs selected item(s).
| 5.796036
| 4.008351
| 1.44599
|
def getCurrentItem(self): # TODO: rename? getCurrentItemAndIndex? getCurrentTuple? getCurrent?
currentIndex = self.getRowCurrentIndex()
currentItem = self.model().getItem(currentIndex)
return currentItem, currentIndex
|
Find the current tree item (and the current index while we're at it)
Returns a tuple with the current item, and its index. The item may be None.
See also the notes at the top of this module on current item vs selected item(s).
| null | null | null |
|
iiPath = self.model().findItemAndIndexPath(path)
for (item, index) in iiPath[1:]: # skip invisible root
assert index.isValid(), "Sanity check: invalid index in path for item: {}".format(item)
self.expand(index)
leaf = iiPath[-1]
return leaf
|
def expandPath(self, path)
|
Follows the path and expand all nodes along the way.
Returns (item, index) tuple of the last node in the path (the leaf node). This can be
reused e.g. to select it.
| 10.770543
| 9.034292
| 1.192184
|
treeModel = self.model()
if index is None:
index = QtCore.QModelIndex()
if index.isValid():
self.setExpanded(index, expanded)
for rowNr in range(treeModel.rowCount(index)):
childIndex = treeModel.index(rowNr, 0, parentIndex=index)
self.expandBranch(index=childIndex, expanded=expanded)
|
def expandBranch(self, index=None, expanded=True)
|
Expands or collapses the node at the index and all it's descendants.
If expanded is True the nodes will be expanded, if False they will be collapsed.
If parentIndex is None, the invisible root will be used (i.e. the complete forest will
be expanded).
| 2.430437
| 2.67971
| 0.906977
|
logger.debug("Blocking collector signals")
for spinBox in self._spinBoxes:
spinBox.blockSignals(block)
for comboBox in self._comboBoxes:
comboBox.blockSignals(block)
result = self._signalsBlocked
self._signalsBlocked = block
return result
|
def blockChildrenSignals(self, block)
|
If block equals True, the signals of the combo boxes and spin boxes are blocked
Returns the old blocking state.
| 4.368502
| 3.386966
| 1.289798
|
numRtiDims = self.rti.nDims if self.rti and self.rti.isSliceable else 0
colCount = self.COL_FIRST_COMBO + max(numRtiDims, len(self.axisNames))
self.tree.model().setColumnCount(colCount)
return colCount
|
def _setColumnCountForContents(self)
|
Sets the column count given the current axes and selected RTI.
Returns the newly set column count.
| 8.333749
| 7.442691
| 1.119723
|
model = self.tree.model()
# Don't use model.clear(). it will delete the column sizes
model.removeRows(0, 1)
model.setRowCount(1)
self._setColumnCountForContents()
|
def clear(self)
|
Removes all VisItems
| 13.812981
| 14.125155
| 0.977899
|
logger.debug("Collector clearAndSetComboBoxes: {}".format(axesNames))
check_is_a_sequence(axesNames)
row = 0
self._deleteComboBoxes(row)
self.clear()
self._setAxesNames(axesNames)
self._createComboBoxes(row)
self._updateWidgets()
|
def clearAndSetComboBoxes(self, axesNames)
|
Removes all comboboxes.
| 6.588902
| 6.613809
| 0.996234
|
for col, _ in enumerate(self._fullAxisNames, self.COL_FIRST_COMBO):
self._setHeaderLabel(col, '')
self._axisNames = tuple(axisNames)
self._fullAxisNames = tuple([axName + self.AXIS_POST_FIX for axName in axisNames])
for col, label in enumerate(self._fullAxisNames, self.COL_FIRST_COMBO):
self._setHeaderLabel(col, label)
|
def _setAxesNames(self, axisNames)
|
Sets the axesnames, combobox lables and updates the headers. Removes old values first.
The comboLables is the axes name + '-axis'
| 4.150251
| 4.092522
| 1.014106
|
model = self.tree.model()
item = model.horizontalHeaderItem(col)
if item:
item.setText(text)
else:
model.setHorizontalHeaderItem(col, QtGui.QStandardItem(text))
|
def _setHeaderLabel(self, col, text)
|
Sets the header of column col to text.
Will increase the number of columns if col is larger than the current number.
| 3.024228
| 3.200846
| 0.944821
|
check_class(rti, BaseRti)
#assert rti.isSliceable, "RTI must be sliceable" # TODO: maybe later
self._rti = rti
self._updateWidgets()
self._updateRtiInfo()
|
def setRti(self, rti)
|
Updates the current VisItem from the contents of the repo tree item.
Is a slot but the signal is usually connected to the Collector, which then calls
this function directly.
| 10.360602
| 11.433567
| 0.906157
|
row = 0
model = self.tree.model()
# Create path label
nodePath = '' if self.rti is None else self.rti.nodePath
pathItem = QtGui.QStandardItem(nodePath)
pathItem.setToolTip(nodePath)
pathItem.setEditable(False)
if self.rti is not None:
pathItem.setIcon(self.rti.decoration)
model.setItem(row, 0, pathItem)
self._deleteSpinBoxes(row)
self._populateComboBoxes(row)
self._createSpinBoxes(row)
self._updateRtiInfo()
self.tree.resizeColumnsToContents(startCol=self.COL_FIRST_COMBO)
logger.debug("{} sigContentsChanged signal (_updateWidgets)"
.format("Blocked" if self.signalsBlocked() else "Emitting"))
self.sigContentsChanged.emit(UpdateReason.RTI_CHANGED)
|
def _updateWidgets(self)
|
Updates the combo and spin boxes given the new rti or axes.
Emits the sigContentsChanged signal.
| 6.34356
| 5.269391
| 1.203851
|
tree = self.tree
model = self.tree.model()
self._setColumnCountForContents()
for col, _ in enumerate(self._axisNames, self.COL_FIRST_COMBO):
logger.debug("Adding combobox at ({}, {})".format(row, col))
comboBox = QtWidgets.QComboBox()
comboBox.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
comboBox.activated.connect(self._comboBoxActivated)
self._comboBoxes.append(comboBox)
#editor = LabeledWidget(QtWidgets.QLabel(comboLabel), comboBox)
tree.setIndexWidget(model.index(row, col), comboBox)
|
def _createComboBoxes(self, row)
|
Creates a combo box for each of the fullAxisNames
| 4.299963
| 4.16767
| 1.031743
|
tree = self.tree
model = self.tree.model()
for col in range(self.COL_FIRST_COMBO, self.maxCombos):
logger.debug("Removing combobox at: ({}, {})".format(row, col))
tree.setIndexWidget(model.index(row, col), None)
self._comboBoxes = []
|
def _deleteComboBoxes(self, row)
|
Deletes all comboboxes of a row
| 5.041257
| 5.013633
| 1.00551
|
logger.debug("_populateComboBoxes")
for comboBox in self._comboBoxes:
comboBox.clear()
if not self.rtiIsSliceable:
# Add an empty item to the combo boxes so that resize to contents works.
for comboBoxNr, comboBox in enumerate(self._comboBoxes):
comboBox.addItem('', userData=None)
comboBox.setEnabled(False)
return
nDims = self._rti.nDims
nCombos = len(self._comboBoxes)
for comboBoxNr, comboBox in enumerate(self._comboBoxes):
# Add a fake dimension of length 1
comboBox.addItem(FAKE_DIM_NAME, userData = FAKE_DIM_OFFSET + comboBoxNr)
for dimNr in range(nDims):
comboBox.addItem(self._rti.dimensionNames[dimNr], userData=dimNr)
# Set combobox current index
if nDims >= nCombos:
# We set the nth combo-box index to the last item - n. This because the
# NetCDF-CF conventions have the preferred dimension order of T, Z, Y, X.
# The +1 below is from the fake dimension.
curIdx = nDims + 1 - nCombos + comboBoxNr
else:
# If there are less dimensions in the RTI than the inspector can show, we fill
# the comboboxes starting at the leftmost and set the remaining comboboxes to the
# fake dimension. This means that a table inspector fill have one column and many
# rows, which is the most convenient.
curIdx = comboBoxNr + 1 if comboBoxNr < nDims else 0
assert 0 <= curIdx <= nDims + 1, \
"curIdx should be <= {}, got {}".format(nDims + 1, curIdx)
comboBox.setCurrentIndex(curIdx)
comboBox.setEnabled(True)
|
def _populateComboBoxes(self, row)
|
Populates the combo boxes with values of the repo tree item
| 5.669634
| 5.721627
| 0.990913
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.