code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
for term in query: term = term.decode('utf-8') for match in re.findall('[^A-Z]+', term): # Ignore field identifiers match_re = re.compile(match, re.I) content = match_re.sub('<%s>%s</%s>' % (tag, term, tag), content) return content
def _do_highlight(content, query, tag='em')
Highlight `query` terms in `content` with html `tag`. This method assumes that the input text (`content`) does not contain any special formatting. That is, it does not contain any html tags or similar markup that could be screwed up by the highlighting. Required arguments: `content` -- Content to search for instances of `text` `text` -- The text to be highlighted
3.754462
4.664884
0.804835
spies = [] for facet in facets: slot = self.column[facet] spy = xapian.ValueCountMatchSpy(slot) # add attribute "slot" to know which column this spy is targeting. spy.slot = slot spies.append(spy) return spies
def _prepare_facet_field_spies(self, facets)
Returns a list of spies based on the facets used to count frequencies.
7.246255
5.853224
1.237994
facet_dict = {} for spy in spies: field = self.schema[spy.slot] field_name, field_type = field['field_name'], field['type'] facet_dict[field_name] = [] for facet in list(spy.values()): if field_type == 'float': # the float term is a Xapian serialized object, which is # in bytes. term = facet.term else: term = facet.term.decode('utf-8') facet_dict[field_name].append((_from_xapian_value(term, field_type), facet.termfreq)) return facet_dict
def _process_facet_field_spies(self, spies)
Returns a dict of facet names with lists of tuples of the form (term, term_frequency) from a list of spies that observed the enquire.
4.836967
4.528667
1.068077
facet_dict = {} for field in field_facets: facet_list = {} if not self._multi_value_field(field): continue for result in results: field_value = getattr(result, field) for item in field_value: # Facet each item in a MultiValueField facet_list[item] = facet_list.get(item, 0) + 1 facet_dict[field] = list(facet_list.items()) return facet_dict
def _do_multivalued_field_facets(self, results, field_facets)
Implements a multivalued field facet on the results. This is implemented using brute force - O(N^2) - because Xapian does not have it implemented yet (see http://trac.xapian.org/ticket/199)
3.040984
3.070614
0.99035
def next_datetime(previous, gap_value, gap_type): year = previous.year month = previous.month if gap_type == 'year': next = previous.replace(year=year + gap_value) elif gap_type == 'month': if month + gap_value <= 12: next = previous.replace(month=month + gap_value) else: next = previous.replace( month=((month + gap_value) % 12), year=(year + (month + gap_value) // 12) ) elif gap_type == 'day': next = previous + datetime.timedelta(days=gap_value) elif gap_type == 'hour': return previous + datetime.timedelta(hours=gap_value) elif gap_type == 'minute': next = previous + datetime.timedelta(minutes=gap_value) elif gap_type == 'second': next = previous + datetime.timedelta(seconds=gap_value) else: raise TypeError('\'gap_by\' must be ' '{second, minute, day, month, year}') return next facet_dict = {} for date_facet, facet_params in list(date_facets.items()): gap_type = facet_params.get('gap_by') gap_value = facet_params.get('gap_amount', 1) date_range = facet_params['start_date'] # construct the bins of the histogram facet_list = [] while date_range < facet_params['end_date']: facet_list.append((date_range, 0)) date_range = next_datetime(date_range, gap_value, gap_type) facet_list = sorted(facet_list, key=lambda x: x[0], reverse=True) for result in results: result_date = getattr(result, date_facet) # convert date to datetime if not isinstance(result_date, datetime.datetime): result_date = datetime.datetime(result_date.year, result_date.month, result_date.day) # ignore results outside the boundaries. if facet_list[0][0] < result_date < facet_list[-1][0]: continue # populate the histogram by putting the result on the right bin. for n, facet_date in enumerate(facet_list): if result_date > facet_date[0]: # equal to facet_list[n][1] += 1, but for a tuple facet_list[n] = (facet_list[n][0], (facet_list[n][1] + 1)) break # bin found; go to next result facet_dict[date_facet] = facet_list return facet_dict
def _do_date_facets(results, date_facets)
Private method that facets a document by date ranges Required arguments: `results` -- A list SearchResults to facet `date_facets` -- A dictionary containing facet parameters: {'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}} nb., gap must be one of the following: year|month|day|hour|minute|second For each date facet field in `date_facets`, generates a list of date ranges (from `start_date` to `end_date` by `gap_by`) then iterates through `results` and tallies the count for each date_facet. Returns a dictionary of date facets (fields) containing a list with entries for each range and a count of documents matching the range. eg. { 'pub_date': [ (datetime.datetime(2009, 1, 1, 0, 0), 5), (datetime.datetime(2009, 2, 1, 0, 0), 0), (datetime.datetime(2009, 3, 1, 0, 0), 0), (datetime.datetime(2008, 4, 1, 0, 0), 1), (datetime.datetime(2008, 5, 1, 0, 0), 2), ], }
2.360146
2.204152
1.070773
facet_dict = {} for field, query in list(dict(query_facets).items()): facet_dict[field] = (query, self.search(self.parse_query(query))['hits']) return facet_dict
def _do_query_facets(self, results, query_facets)
Private method that facets a document by query Required arguments: `results` -- A list SearchResults to facet `query_facets` -- A dictionary containing facet parameters: {'field': 'query', [...]} For each query in `query_facets`, generates a dictionary entry with the field name as the key and a tuple with the query and result count as the value. eg. {'name': ('a*', 5)}
4.884778
4.59495
1.063075
if spelling_query: if ' ' in spelling_query: return ' '.join([database.get_spelling_suggestion(term).decode('utf-8') for term in spelling_query.split()]) else: return database.get_spelling_suggestion(spelling_query).decode('utf-8') term_set = set() for term in query: for match in re.findall('[^A-Z]+', term.decode('utf-8')): # Ignore field identifiers term_set.add(database.get_spelling_suggestion(match).decode('utf-8')) return ' '.join(term_set)
def _do_spelling_suggestion(database, query, spelling_query)
Private method that returns a single spelling suggestion based on `spelling_query` or `query`. Required arguments: `database` -- The database to check spelling against `query` -- The query to check `spelling_query` -- If not None, this will be checked instead of `query` Returns a string with a suggested spelling
2.69007
2.846804
0.944944
if self.path == MEMORY_DB_NAME: if not self.inmemory_db: self.inmemory_db = xapian.inmemory_open() return self.inmemory_db if writable: database = xapian.WritableDatabase(self.path, xapian.DB_CREATE_OR_OPEN) else: try: database = xapian.Database(self.path) except xapian.DatabaseOpeningError: raise InvalidIndexError('Unable to open index at %s' % self.path) return database
def _database(self, writable=False)
Private method that returns a xapian.Database for use. Optional arguments: ``writable`` -- Open the database in read/write mode (default=False) Returns an instance of a xapian.Database or xapian.WritableDatabase
3.07641
3.002191
1.024722
try: return enquire.get_mset(start_offset, end_offset, checkatleast) except xapian.DatabaseModifiedError: database.reopen() return enquire.get_mset(start_offset, end_offset, checkatleast)
def _get_enquire_mset(database, enquire, start_offset, end_offset, checkatleast=DEFAULT_CHECK_AT_LEAST)
A safer version of Xapian.enquire.get_mset Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`, attempting a `database.reopen` as needed. Required arguments: `database` -- The database to be read `enquire` -- An instance of an Xapian.enquire object `start_offset` -- The start offset to pass to `enquire.get_mset` `end_offset` -- The end offset to pass to `enquire.get_mset`
2.642616
1.847775
1.430161
try: return document.get_data() except xapian.DatabaseModifiedError: database.reopen() return document.get_data()
def _get_document_data(database, document)
A safer version of Xapian.document.get_data Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`, attempting a `database.reopen` as needed. Required arguments: `database` -- The database to be read `document` -- An instance of an Xapian.document object
5.632086
2.834761
1.986794
return self._get_enquire_mset( database, enquire, 0, database.get_doccount() ).size()
def _get_hit_count(self, database, enquire)
Given a database and enquire instance, returns the estimated number of matches. Required arguments: `database` -- The database to be queried `enquire` -- The enquire instance
12.109031
14.995188
0.807528
for field_dict in self.schema: if field_dict['field_name'] == field: return field_dict['multi_valued'] == 'true' return False
def _multi_value_field(self, field)
Private method that returns `True` if a field is multi-valued, else `False`. Required arguemnts: `field` -- The field to lookup Returns a boolean value indicating whether the field is multi-valued.
3.711921
3.973377
0.934198
if field_name != 'content' and field_name not in self.backend.column: raise InvalidIndexError('field "%s" not indexed' % field_name) # It it is an AutoQuery, it has no filters # or others, thus we short-circuit the procedure. if isinstance(term, AutoQuery): if field_name != 'content': query = '%s:%s' % (field_name, term.prepare(self)) else: query = term.prepare(self) return [self.backend.parse_query(query)] query_list = [] # Handle `ValuesListQuerySet`. if hasattr(term, 'values_list'): term = list(term) if field_name == 'content': # content is the generic search: # force no field_name search # and the field_type to be 'text'. field_name = None field_type = 'text' # we don't know what is the type(term), so we parse it. # Ideally this would not be required, but # some filters currently depend on the term to make decisions. term = _to_xapian_term(term) query_list.append(self._filter_contains(term, field_name, field_type, is_not)) # when filter has no filter_type, haystack uses # filter_type = 'content'. Here we remove it # since the above query is already doing this if filter_type == 'content': filter_type = None else: # get the field_type from the backend field_type = self.backend.schema[self.backend.column[field_name]]['type'] # private fields don't accept 'contains' or 'startswith' # since they have no meaning. if filter_type in ('contains', 'startswith') and field_name in (ID, DJANGO_ID, DJANGO_CT): filter_type = 'exact' if field_type == 'text': # we don't know what type "term" is, but we know we are searching as text # so we parse it like that. # Ideally this would not be required since _term_query does it, but # some filters currently depend on the term to make decisions. if isinstance(term, list): term = [_to_xapian_term(term) for term in term] else: term = _to_xapian_term(term) # todo: we should check that the filter is valid for this field_type or raise InvalidIndexError if filter_type == 'contains': query_list.append(self._filter_contains(term, field_name, field_type, is_not)) elif filter_type in ('content', 'exact'): query_list.append(self._filter_exact(term, field_name, field_type, is_not)) elif filter_type == 'in': query_list.append(self._filter_in(term, field_name, field_type, is_not)) elif filter_type == 'startswith': query_list.append(self._filter_startswith(term, field_name, field_type, is_not)) elif filter_type == 'endswith': raise NotImplementedError("The Xapian search backend doesn't support endswith queries.") elif filter_type == 'gt': query_list.append(self._filter_gt(term, field_name, field_type, is_not)) elif filter_type == 'gte': query_list.append(self._filter_gte(term, field_name, field_type, is_not)) elif filter_type == 'lt': query_list.append(self._filter_lt(term, field_name, field_type, is_not)) elif filter_type == 'lte': query_list.append(self._filter_lte(term, field_name, field_type, is_not)) elif filter_type == 'range': query_list.append(self._filter_range(term, field_name, field_type, is_not)) return query_list
def _query_from_term(self, term, field_name, filter_type, is_not)
Uses arguments to construct a list of xapian.Query's.
2.975692
2.952984
1.00769
if field_type == 'text': term_list = term.split() else: term_list = [term] query = self._or_query(term_list, field_name, field_type) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) else: return query
def _filter_contains(self, term, field_name, field_type, is_not)
Splits the sentence in terms and join them with OR, using stemmed and un-stemmed. Assumes term is not a list.
3.219475
2.998349
1.073749
query_list = [self._filter_exact(term, field_name, field_type, is_not=False) for term in term_list] if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), xapian.Query(xapian.Query.OP_OR, query_list)) else: return xapian.Query(xapian.Query.OP_OR, query_list)
def _filter_in(self, term_list, field_name, field_type, is_not)
Returns a query that matches exactly ANY term in term_list. Notice that: A in {B,C} <=> (A = B or A = C) ~(A in {B,C}) <=> ~(A = B or A = C) Because OP_AND_NOT(C, D) <=> (C and ~D), then D=(A in {B,C}) requires `is_not=False`. Assumes term is a list.
2.63617
2.447288
1.07718
if field_type == 'text' and field_name not in (DJANGO_CT,): term = '^ %s $' % term query = self._phrase_query(term.split(), field_name, field_type) else: query = self._term_query(term, field_name, field_type, stemmed=False) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) else: return query
def _filter_exact(self, term, field_name, field_type, is_not)
Returns a query that matches exactly the un-stemmed term with positional order. Assumes term is not a list.
4.085574
3.854746
1.059881
if field_type == 'text': if len(term.split()) == 1: term = '^ %s*' % term query = self.backend.parse_query(term) else: term = '^ %s' % term query = self._phrase_query(term.split(), field_name, field_type) else: term = '^%s*' % term query = self.backend.parse_query(term) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) return query
def _filter_startswith(self, term, field_name, field_type, is_not)
Returns a startswith query on the un-stemmed term. Assumes term is not a list.
3.340535
3.166472
1.05497
term_list = [self._term_query(term, field, field_type) for term in term_list] return xapian.Query(xapian.Query.OP_OR, term_list)
def _or_query(self, term_list, field, field_type)
Joins each item of term_list decorated by _term_query with an OR.
2.868106
2.208445
1.298699
term_list = [self._term_query(term, field_name, field_type, stemmed=False) for term in term_list] query = xapian.Query(xapian.Query.OP_PHRASE, term_list) return query
def _phrase_query(self, term_list, field_name, field_type)
Returns a query that matches exact terms with positional order (i.e. ["this", "thing"] != ["thing", "this"]) and no stem. If `field_name` is not `None`, restrict to the field.
3.65656
4.054632
0.901823
constructor = '{prefix}{term}' # construct the prefix to be used. prefix = '' if field_name: prefix = TERM_PREFIXES['field'] + field_name.upper() term = _to_xapian_term(term) if field_name in (ID, DJANGO_ID, DJANGO_CT): # to ensure the value is serialized correctly. if field_name == DJANGO_ID: term = int(term) term = _term_to_xapian_value(term, field_type) return xapian.Query('%s%s' % (TERM_PREFIXES[field_name], term)) # we construct the query dates in a slightly different way if field_type == 'datetime': date, time = term.split() return xapian.Query(xapian.Query.OP_AND_MAYBE, constructor.format(prefix=prefix, term=date), constructor.format(prefix=prefix, term=time) ) # only use stem if field is text or "None" if field_type not in ('text', None): stemmed = False unstemmed_term = constructor.format(prefix=prefix, term=term) if stemmed: stem = xapian.Stem(self.backend.language) stemmed_term = 'Z' + constructor.format(prefix=prefix, term=stem(term).decode('utf-8')) return xapian.Query(xapian.Query.OP_OR, xapian.Query(stemmed_term), xapian.Query(unstemmed_term) ) else: return xapian.Query(unstemmed_term)
def _term_query(self, term, field_name, field_type, stemmed=True)
Constructs a query of a single term. If `field_name` is not `None`, the term is search on that field only. If exact is `True`, the search is restricted to boolean matches.
3.636822
3.62434
1.003444
vrp = XHValueRangeProcessor(self.backend) pos, begin, end = vrp('%s:%s' % (field_name, _term_to_xapian_value(term, field_type)), '*') if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end) ) return xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
def _filter_gte(self, term, field_name, field_type, is_not)
Private method that returns a xapian.Query that searches for any term that is greater than `term` in a specified `field`.
4.397281
3.669219
1.198424
if not company and not company_uri: raise Exception("glassdoor.gd.get(company='', company_uri=''): "\ " company or company_uri required") payload = {} if not company_uri: payload.update({'clickSource': 'searchBtn', 'sc.keyword': company }) uri = '%s/%s' % (GLASSDOOR_API, REVIEWS_URL) else: uri = '%s%s' % (GLASSDOOR_API, company_uri) r = requests.get(uri, params=payload) soup = BeautifulSoup(r.content) results = parse(soup) return results
def get(company='', company_uri='')
Performs a HTTP GET for a glassdoor page and returns json
4.005687
3.770142
1.062476
selector_comps = {'class': 'companyData'} companies = soup.findAll('div', selector_comps) def is_exact_match(c): selector_exact = {'class' : 'chickletExactMatch chicklet'} searched_name = soup.findAll('input', {'name': 'sc.keyword'})[0]['value'] actual_name = c.findAll('h3')[0].text names_match = searched_name.lower() == actual_name.lower() exact_tag = bool(c.findAll('i', selector_exact)) return exact_tag or names_match def parse_suggestion(c): return { 'name': c.findAll('h3')[0].text, 'uri': c.findAll('a')[1]['href'], 'exact': is_exact_match(c) } suggestions = [] for c in companies: try: suggestions.append(parse_suggestion(c)) except IndexError as e: pass return suggestions
def parse_suggestions(soup)
Suggests similar/related companies to query
3.740924
3.505131
1.067271
if is_direct_match(soup): return {'satisfaction': parse_satisfaction(soup), 'ceo': parse_ceo(soup), 'meta': parse_meta(soup), 'salary': parse_salary(soup) } suggestions = parse_suggestions(soup) exact_match = next((s for s in suggestions if s['exact']), None) if exact_match: return get(company_uri=exact_match['uri']) return suggestions
def parse(soup)
Parses the results for a company search and return the results if is_direct_match. If no company is found, a list of suggestions are returned as dict. If one such recommendation is found to be an exact match, re-perform request for this exact match
4.718046
3.612902
1.305888
# Save this so we can use it later in the extension if not hasattr(app, 'extensions'): # pragma: no cover app.extensions = {} app.extensions['flask-graphql-auth'] = self self._set_default__configuration_options(app)
def init_app(self, app)
Register this extension with the flask app. :param app: A flask application
6.75704
7.115058
0.949682
app.config.setdefault('JWT_TOKEN_ARGUMENT_NAME', "token") # Name of token argument in GraphQL request resolver app.config.setdefault('JWT_REFRESH_TOKEN_ARGUMENT_NAME', "refresh_token") app.config.setdefault('JWT_ACCESS_TOKEN_EXPIRES', datetime.timedelta(minutes=15)) app.config.setdefault('JWT_REFRESH_TOKEN_EXPIRES', datetime.timedelta(days=30)) app.config.setdefault('JWT_SECRET_KEY', None) app.config.setdefault('JWT_IDENTITY_CLAIM', 'identity') app.config.setdefault('JWT_USER_CLAIMS', 'user_claims')
def _set_default__configuration_options(app)
Sets the default configuration options used by this extension
2.312696
2.290972
1.009482
# This call verifies the ext, iat, and nbf claims data = jwt.decode(encoded_token, secret, algorithms=[algorithm]) # Make sure that any custom claims we expect in the token are present if 'jti' not in data: raise JWTDecodeError("Missing claim: jti") if identity_claim_key not in data: raise JWTDecodeError("Missing claim: {}".format(identity_claim_key)) if 'type' not in data or data['type'] not in ('refresh', 'access'): raise JWTDecodeError("Missing or invalid claim: type") if user_claims_key not in data: data[user_claims_key] = {} return data
def decode_jwt(encoded_token, secret, algorithm, identity_claim_key, user_claims_key)
Decodes an encoded JWT :param encoded_token: The encoded JWT string to decode :param secret: Secret key used to encode the JWT :param algorithm: Algorithm used to encode the JWT :param identity_claim_key: expected key that contains the identity :param user_claims_key: expected key that contains the user claims :return: Dictionary containing contents of the JWT
2.782923
3.017619
0.922225
jwt_data = decode_jwt( encoded_token=token, secret=current_app.config['JWT_SECRET_KEY'], algorithm='HS256', identity_claim_key=current_app.config['JWT_IDENTITY_CLAIM'], user_claims_key=current_app.config['JWT_USER_CLAIMS'] ) # token type verification if jwt_data['type'] != token_type: raise WrongTokenError('Only {} tokens are allowed'.format(token_type)) return jwt_data
def get_jwt_data(token, token_type)
Decodes encoded JWT token by using extension setting and validates token type :param token: The encoded JWT string to decode :param token_type: JWT type for type validation (access or refresh) :return: Dictionary containing contents of the JWT
2.658853
2.987367
0.890032
@wraps(fn) def wrapper(*args, **kwargs): print(args[0]) token = kwargs.pop(current_app.config['JWT_TOKEN_ARGUMENT_NAME']) try: verify_jwt_in_argument(token) except Exception as e: return AuthInfoField(message=str(e)) return fn(*args, **kwargs) return wrapper
def query_jwt_required(fn)
A decorator to protect a query resolver. If you decorate an resolver with this, it will ensure that the requester has a valid access token before allowing the resolver to be called. This does not check the freshness of the access token.
3.611525
4.220037
0.855804
@wraps(fn) def wrapper(cls, *args, **kwargs): token = kwargs.pop(current_app.config['JWT_TOKEN_ARGUMENT_NAME']) try: verify_jwt_in_argument(token) except Exception as e: return cls(AuthInfoField(message=str(e))) return fn(cls, *args, **kwargs) return wrapper
def mutation_jwt_required(fn)
A decorator to protect a mutation. If you decorate a mutation with this, it will ensure that the requester has a valid access token before allowing the mutation to be called. This does not check the freshness of the access token.
3.625794
4.204237
0.862414
@wraps(fn) def wrapper(cls, *args, **kwargs): token = kwargs.pop(current_app.config['JWT_REFRESH_TOKEN_ARGUMENT_NAME']) try: verify_refresh_jwt_in_argument(token) except Exception as e: return cls(AuthInfoField(message=str(e))) return fn(*args, **kwargs) return wrapper
def mutation_jwt_refresh_token_required(fn)
A decorator to protect a mutation. If you decorate anmutation with this, it will ensure that the requester has a valid refresh token before allowing the mutation to be called.
3.859932
4.23559
0.911309
start = hidapi.hid_enumerate(vendor_id, product_id) result = [] cur = ffi.new("struct hid_device_info*"); cur = start # Copy everything into python list while cur != ffi.NULL: result.append(HIDDevice(cur)) cur = cur.next # Free the C memory hidapi.hid_free_enumeration(start) return result
def _hid_enumerate(vendor_id=0, product_id=0)
Enumerates all the hid devices for VID:PID. Returns a list of `HIDDevice` objects. If vid is 0, then match any vendor id. Similarly, if pid is 0, match any product id. If both are zero, enumerate all HID devices.
4.798391
4.782676
1.003286
if self._is_open: raise HIDException("Failed to open device: HIDDevice already open") path = self.path.encode('utf-8') dev = hidapi.hid_open_path(path) if dev: self._is_open = True self._device = dev else: raise HIDException("Failed to open device")
def open(self)
Open the HID device for reading and writing.
4.164388
3.562997
1.168788
if self._is_open: self._is_open = False hidapi.hid_close(self._device)
def close(self)
Closes the hid device
5.848413
4.595288
1.272698
if not self._is_open: raise HIDException("HIDDevice not open") write_data = bytearray([report_id]) + bytearray(data) cdata = ffi.new("const unsigned char[]", bytes(write_data)) num_written = hidapi.hid_write(self._device, cdata, len(write_data)) if num_written < 0: raise HIDException("Failed to write to HID device: " + str(num_written)) else: return num_written
def write(self, data, report_id=0)
Writes data to the HID device on its endpoint. Parameters: data: data to send on the HID endpoint report_id: the report ID to use. Returns: The number of bytes written including the report ID.
3.110806
2.870578
1.083686
if not self._is_open: raise HIDException("HIDDevice not open") data = [0] * size cdata = ffi.new("unsigned char[]", data) bytes_read = 0 if timeout == None: bytes_read = hidapi.hid_read(self._device, cdata, len(cdata)) else: bytes_read = hidapi.hid_read_timeout(self._device, cdata, len(cdata), timeout) if bytes_read < 0: raise HIDException("Failed to read from HID device: " + str(bytes_read)) elif bytes_read == 0: return [] else: return bytearray(cdata)
def read(self, size=64, timeout=None)
Read from the hid device on its endpoint. Parameters: size: number of bytes to read timeout: length to wait in milliseconds Returns: The HID report read from the device. The first byte in the result will be the report ID if used.
2.486699
2.43341
1.021899
if self._is_open: err = hidapi.hid_read_timeout(self._device, ffi.NULL, 0, 0) if err == -1: return False else: return True else: en = Enumeration(vid=self.vendor_id, pid=self.product_id).find(path=self.path) if len(en) == 0: return False else: return True
def is_connected(self)
Checks if the USB device is still connected
4.541432
4.173224
1.088231
if not self._is_open: raise HIDException("HIDDevice not open") report = bytearray([report_id]) + bytearray(data) cdata = ffi.new("const unsigned char[]", bytes(report)) bytes_written = hidapi.hid_send_feature_report(self._device, cdata, len(report)) if bytes_written == -1: raise HIDException("Failed to send feature report to HID device") return bytes_written
def send_feature_report(self, data, report_id=0x00)
Send a Feature report to a HID device. Feature reports are sent over the Control endpoint as a Set_Report transfer. Parameters: data The data to send Returns: This function returns the actual number of bytes written
3.330489
3.646932
0.91323
data = [0] * (size+1) cdata = ffi.new("unsigned char[]", bytes(data)) cdata[0] = report_id bytes_read = hidapi.hid_get_feature_report(self._device, cdata, len(cdata)) if bytes_read == -1: raise HIDException("Failed to get feature report from HID device") return bytearray(cdata[1:size+1])
def get_feature_report(self, size, report_id=0x00)
Get a feature report from a HID device. Feature reports are sent over the Control endpoint as a Get_Report transfer. Parameters: size The number of bytes to read. report_id The report id to read Returns: They bytes read from the HID report
3.52071
3.895313
0.903832
err_str = hidapi.hid_error(self._device) if err_str == ffi.NULL: return None else: return ffi.string(err_str)
def get_error(self)
Get an error string from the device
5.858056
4.607792
1.271337
max_len = 128 str_buf = ffi.new("wchar_t[]", str(bytearray(max_len))) ret = hidapi.hid_get_indexed_string(self._device, index, str_buf, max_len) if ret < 0: raise HIDException(self._device.get_error()) elif ret == 0: return None else: return ffi.string(str_buf).encode('utf-8')
def get_indexed_string(self, index)
Get the string with the given index from the device
3.471856
3.123007
1.111703
return \ .format(self.path, self.vendor_id, self.product_id, self.manufacturer_string, self.product_string, self.serial_number, self.release_number, self.usage_page, self.usage, self.interface_number )
def description(self)
Get a string describing the HID descriptor.
3.98342
3.099504
1.28518
result = [] for dev in self.device_list: if vid not in [0, None] and dev.vendor_id != vid: continue if pid not in [0, None] and dev.product_id != pid: continue if serial and dev.serial_number != serial: continue if path and dev.path != path: continue if manufacturer and dev.manufacturer_string != manufacturer: continue if product and dev.product_string != product: continue if release_number != None and dev.release_number != release_number: continue if interface != None and dev.interface_number != interface: continue if usage != None and dev.usage != usage: continue if usage_page != None and dev.usage_page != usage_page: continue result.append(dev) return result
def find(self, vid=None, pid=None, serial=None, interface=None, \ path=None, release_number=None, manufacturer=None, product=None, usage=None, usage_page=None)
Attempts to open a device in this `Enumeration` object. Optional arguments can be provided to filter the resulting list based on various parameters of the HID devices. Args: vid: filters by USB Vendor ID pid: filters by USB Product ID serial: filters by USB serial string (.iSerialNumber) interface: filters by interface number (bInterfaceNumber) release_number: filters by the USB release number (.bcdDevice) manufacturer: filters by USB manufacturer string (.iManufacturer) product: filters by USB product string (.iProduct) usage: filters by HID usage usage_page: filters by HID usage_page path: filters by HID API path.
1.589043
1.597328
0.994813
if not self._track_list: tracks = itunespy.lookup(id=self.collection_id, entity=itunespy.entities['song'])[1:] for track in tracks: self._track_list.append(track) return self._track_list
def get_tracks(self)
Retrieves all the tracks of the album if they haven't been retrieved yet :return: List. Tracks of the current album
5.006522
4.738788
1.056498
if not self._track_list: self.get_tracks() if self._album_time is None: album_time = 0.0 for track in self._track_list: album_time += track.get_track_time_minutes() self._album_time = round(album_time, round_number) return self._album_time
def get_album_time(self, round_number=2)
Retrieves all of the track's length and returns the sum of all :param round_number: Int. Number of decimals to round the sum :return: Int. Sum of all the track's length
2.523716
2.473849
1.020158
return itunespy.lookup(id=self.artist_id, entity=itunespy.entities['movie'])[1:]
def get_movies(self)
Retrieves all the movies published by the artist :return: List. Movies published by the artist
18.179668
16.874836
1.077324
# Change units c_air = 1006. # [J/kg] air heat capacity, value from ASHRAE Fundamentals hlg = 2501000. # [J/kg] latent heat, value from ASHRAE Fundamentals cw = 1860. # [J/kg] value from ASHRAE Fundamentals P = P/1000. # convert from Pa to kPa Tdb = Tdb_in - 273.15 w = w_in # phi (RH) calculation from Tdb and w Pw = (w*P)/(0.621945 + w) # partial pressure of water vapor Pws = saturation_pressure(Tdb) # Get saturation pressure for given Tdb phi = Pw/Pws*100.0 # enthalpy calculation from Tdb and w h = c_air*Tdb + w*(hlg+cw*Tdb) # [J kga-1] # specific volume calculation from Tdb and w v = 0.287042 * (Tdb+273.15)*(1+1.607858*w)/P # ? # dew point calculation from w _pw = (w*P)/(0.621945 + w) # water vapor partial pressure in kPa alpha = log(_pw) Tdp = 6.54 + 14.526*alpha + pow(alpha,2)*0.7389 + pow(alpha,3)*0.09486 + pow(_pw,0.1984)*0.4569 # valid for Tdp between 0 C and 93 C return Tdb, w, phi, h, Tdp, v
def psychrometrics (Tdb_in, w_in, P)
Modified version of Psychometrics by Tea Zakula MIT Building Technology Lab Input: Tdb_in, w_in, P Output: Tdb, w, phi, h, Tdp, v where: Tdb_in = [K] dry bulb temperature w_in = [kgv/kgda] Humidity Ratio P = [P] Atmospheric Station Pressure Tdb: [C] dry bulb temperature w: [kgv/kgda] Humidity Ratio phi: [Pw/Pws*100] relative humidity Tdp: [C] dew point temperature h: [J/kga] enthalpy v: [m3/kga] specific volume
5.697037
5.160122
1.104051
search_url = _url_search_builder(term, country, media, entity, attribute, limit) r = requests.get(search_url) try: json = r.json()['results'] result_count = r.json()['resultCount'] except: raise ConnectionError(general_no_connection) if result_count == 0: raise LookupError(search_error + str(term)) return _get_result_list(json)
def search(term, country='US', media='all', entity=None, attribute=None, limit=50)
Returns the result of the search of the specified term in an array of result_item(s) :param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson. The method will take care of spaces so you don't have to. :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: An array of result_item(s)
3.642287
4.175779
0.872241
# If none of the basic lookup arguments are provided, raise a ValueError if id is None and artist_amg_id is None and upc is None: raise ValueError(lookup_no_ids) lookup_url = _url_lookup_builder(id, artist_amg_id, upc, country, media, entity, attribute, limit) r = requests.get(lookup_url) try: json = r.json()['results'] result_count = r.json()['resultCount'] except: raise ConnectionError(general_no_connection) if result_count == 0: raise LookupError(lookup_error) return _get_result_list(json)
def lookup(id=None, artist_amg_id=None, upc=None, country='US', media='all', entity=None, attribute=None, limit=50)
Returns the result of the lookup of the specified id, artist_amg_id or upc in an array of result_item(s) :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: An array of result_item(s)
3.083498
3.504871
0.879775
result_list = [] for item in json: if 'wrapperType' in item: # Music if item['wrapperType'] == 'artist' and item['artistType'] == 'Artist': music_artist_result = music_artist.MusicArtist(item) result_list.append(music_artist_result) elif item['wrapperType'] == 'collection' and item['collectionType'] == 'Album': music_album_result = music_album.MusicAlbum(item) result_list.append(music_album_result) elif item['wrapperType'] == 'track' and item['kind'] == 'song': music_track_result = track.Track(item) result_list.append(music_track_result) elif item['wrapperType'] == 'track' and item['kind'] == 'music-video': music_video_result = track.Track(item) result_list.append(music_video_result) # Movies elif item['wrapperType'] == 'artist' and item['artistType'] == 'Movie Artist': movie_artist_result = movie_artist.MovieArtist(item) result_list.append(movie_artist_result) elif item['wrapperType'] == 'track' and item['kind'] == 'feature-movie': movie_result = track.Track(item) result_list.append(movie_result) # Ebook Author elif item['wrapperType'] == 'artist' and item['artistType'] == 'Author': ebook_artist_result = ebook_artist.EbookArtist(item) result_list.append(ebook_artist_result) # Tv Shows elif item['wrapperType'] == 'collection' and item['collectionType'] == 'TV Season': tv_season_result = result_item.ResultItem(item) result_list.append(tv_season_result) elif item['wrapperType'] == 'track' and item['kind'] == 'tv-episode': tv_episode_result = track.Track(item) result_list.append(tv_episode_result) # Software elif item['wrapperType'] == 'software' and item['kind'] == 'software': software_result = result_item.ResultItem(item) result_list.append(software_result) elif item['wrapperType'] == 'software' and item['kind'] == 'mac-software': mac_software_result = result_item.ResultItem(item) result_list.append(mac_software_result) elif 'kind' in item and item['kind'] == 'ebook': ebook_result = result_item.ResultItem(item) result_list.append(ebook_result) else: unknown_result = result_item.ResultItem(item) result_list.append(unknown_result) return result_list
def _get_result_list(json)
Analyzes the provided JSON data and returns an array of result_item(s) based on its content :param json: Raw JSON data to analyze :return: An array of result_item(s) from the provided JSON data
1.63947
1.645373
0.996413
built_url = base_search_url + _parse_query(term) built_url += ampersand + parameters[1] + country built_url += ampersand + parameters[2] + media if entity is not None: built_url += ampersand + parameters[3] + entity if attribute is not None: built_url += ampersand + parameters[4] + attribute built_url += ampersand + parameters[5] + str(limit) return built_url
def _url_search_builder(term, country='US', media='all', entity=None, attribute=None, limit=50)
Builds the URL to perform the search based on the provided data :param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson. The method will take care of spaces so you don't have to. :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: The built URL as a string
2.388446
2.672488
0.893716
built_url = base_lookup_url has_one_argument = False if id is not None: built_url += parameters[6] + str(id) has_one_argument = True if artist_amg_id is not None: if has_one_argument: built_url += ampersand + parameters[7] + artist_amg_id else: built_url += parameters[7] + str(artist_amg_id) has_one_argument = True if upc is not None: if has_one_argument: built_url += ampersand + parameters[8] + upc else: built_url += parameters[8] + str(upc) built_url += ampersand + parameters[1] + country built_url += ampersand + parameters[2] + media if entity is not None: built_url += ampersand + parameters[3] + entity if attribute is not None: built_url += ampersand + parameters[4] + attribute built_url += ampersand + parameters[5] + str(limit) return built_url
def _url_lookup_builder(id=None, artist_amg_id=None, upc=None, country='US', media='music', entity=None, attribute=None, limit=50)
Builds the URL to perform the lookup based on the provided data :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: The built URL as a string
1.676905
1.738685
0.964468
return itunespy.lookup(id=self.artist_id, entity=itunespy.entities['album'])[1:]
def get_albums(self)
Retrieves all the albums by the artist :return: List. Albums published by the artist
13.800408
15.161406
0.910233
def helper_to_fl(s_): if s_ == "": return "null" elif "," in s_: s_ = s_.replace(",", "") try: return float(s_) except: return (s_) fl_lst = [] if isinstance(x[0], str): # Check if list of strings, then sent to conversion for xi in range(len(x)): fl_lst.append(helper_to_fl(x[xi])) elif isinstance(x[0], list): # Check if list of lists, then recurse for xi in range(len(x)): fl_lst.append(str2fl(x[xi])) else: return False return fl_lst
def str2fl(x)
Recurses through lists and converts lists of string to float Args: x: string or list of strings
3.726605
3.445904
1.081459
def procMat(materials, max_thickness, min_thickness): newmat = [] newthickness = [] k = materials.layerThermalCond Vhc = materials.layerVolHeat if len(materials.layerThickness) > 1: for j in range(len(materials.layerThickness)): # Break up each layer that's more than max thickness (0.05m) if materials.layerThickness[j] > max_thickness: nlayers = math.ceil(materials.layerThickness[j]/float(max_thickness)) for i in range(int(nlayers)): newmat.append(Material(k[j], Vhc[j], name=materials._name)) newthickness.append(materials.layerThickness[j]/float(nlayers)) # Material that's less then min_thickness is not added. elif materials.layerThickness[j] < min_thickness: print("WARNING: Material '{}' layer found too thin (<{:.2f}cm), ignored.").format( materials._name, min_thickness*100) else: newmat.append(Material(k[j], Vhc[j], name=materials._name)) newthickness.append(materials.layerThickness[j]) else: # Divide single layer into two (uwg assumes at least 2 layers) if materials.layerThickness[0] > max_thickness: nlayers = math.ceil(materials.layerThickness[0]/float(max_thickness)) for i in range(int(nlayers)): newmat.append(Material(k[0], Vhc[0], name=materials._name)) newthickness.append(materials.layerThickness[0]/float(nlayers)) # Material should be at least 1cm thick, so if we're here, # should give warning and stop. Only warning given for now. elif materials.layerThickness[0] < min_thickness*2: newthickness = [min_thickness/2., min_thickness/2.] newmat = [Material(k[0], Vhc[0], name=materials._name), Material(k[0], Vhc[0], name=materials._name)] print("WARNING: a thin (<2cm) single material '{}' layer found. May cause error.".format( materials._name)) else: newthickness = [materials.layerThickness[0]/2., materials.layerThickness[0]/2.] newmat = [Material(k[0], Vhc[0], name=materials._name), Material(k[0], Vhc[0], name=materials._name)] return newmat, newthickness
Processes material layer so that a material with single layer thickness is divided into two and material layer that is too thick is subdivided
null
null
null
def read_epw(self): # Make dir path to epw file self.climateDataPath = os.path.join(self.epwDir, self.epwFileName) # Open epw file and feed csv data to climate_data try: climate_data = utilities.read_csv(self.climateDataPath) except Exception as e: raise Exception("Failed to read epw file! {}".format(e.message)) # Read header lines (1 to 8) from EPW and ensure TMY2 format. self._header = climate_data[0:8] # Read weather data from EPW for each time step in weather file. (lines 8 - end) self.epwinput = climate_data[8:] # Read Lat, Long (line 1 of EPW) self.lat = float(self._header[0][6]) self.lon = float(self._header[0][7]) self.GMT = float(self._header[0][8]) # Read in soil temperature data (assumes this is always there) # ref: http://bigladdersoftware.com/epx/docs/8-2/auxiliary-programs/epw-csv-format-inout.html soilData = self._header[3] self.nSoil = int(soilData[1]) # Number of ground temperature depths self.Tsoil = utilities.zeros(self.nSoil, 12) # nSoil x 12 matrix for soil temperture (K) self.depth_soil = utilities.zeros(self.nSoil, 1) # nSoil x 1 matrix for soil depth (m) # Read monthly data for each layer of soil from EPW file for i in range(self.nSoil): self.depth_soil[i][0] = float(soilData[2 + (i*16)]) # get soil depth for each nSoil # Monthly data for j in range(12): # 12 months of soil T for specific depth self.Tsoil[i][j] = float(soilData[6 + (i*16) + j]) + 273.15 # Set new directory path for the moprhed EPW file self.newPathName = os.path.join(self.destinationDir, self.destinationFileName)
Section 2 - Read EPW file properties: self.climateDataPath self.newPathName self._header # header data self.epwinput # timestep data for weather self.lat # latitude self.lon # longitude self.GMT # GMT self.nSoil # Number of soil depths self.Tsoil # nSoil x 12 matrix for soil temperture (K) self.depth_soil # nSoil x 1 matrix for soil depth (m)
null
null
null
def set_input(self): # If a uwgParamFileName is set, then read inputs from .uwg file. # User-defined class properties will override the inputs from the .uwg file. if self.uwgParamFileName is not None: print("\nReading uwg file input.") self.read_input() else: print("\nNo .uwg file input.") self.check_required_inputs() # Modify zone to be used as python index self.zone = int(self.zone)-1
Set inputs from .uwg input file if not already defined, the check if all the required input parameters are there.
null
null
null
def init_BEM_obj(self): if not os.path.exists(self.readDOE_file_path): raise Exception("readDOE.pkl file: '{}' does not exist.".format(readDOE_file_path)) readDOE_file = open(self.readDOE_file_path, 'rb') # open pickle file in binary form refDOE = pickle.load(readDOE_file) refBEM = pickle.load(readDOE_file) refSchedule = pickle.load(readDOE_file) readDOE_file.close() # Define building energy models k = 0 self.r_glaze_total = 0. # Glazing ratio for total building stock self.SHGC_total = 0. # SHGC addition for total building stock self.alb_wall_total = 0. # albedo wall addition for total building stock h_floor = self.flr_h or 3.05 # average floor height total_urban_bld_area = math.pow(self.charLength, 2)*self.bldDensity * \ self.bldHeight/h_floor # total building floor area area_matrix = utilities.zeros(16, 3) self.BEM = [] # list of BEMDef objects self.Sch = [] # list of Schedule objects for i in range(16): # 16 building types for j in range(3): # 3 built eras if self.bld[i][j] > 0.: # Add to BEM list self.BEM.append(refBEM[i][j][self.zone]) self.BEM[k].frac = self.bld[i][j] self.BEM[k].fl_area = self.bld[i][j] * total_urban_bld_area # Overwrite with optional parameters if provided if self.glzR: self.BEM[k].building.glazingRatio = self.glzR if self.albRoof: self.BEM[k].roof.albedo = self.albRoof if self.vegRoof: self.BEM[k].roof.vegCoverage = self.vegRoof if self.SHGC: self.BEM[k].building.shgc = self.SHGC if self.albWall: self.BEM[k].wall.albedo = self.albWall if self.flr_h: self.BEM[k].building.floorHeight = self.flr_h # Keep track of total urban r_glaze, SHGC, and alb_wall for UCM model self.r_glaze_total += self.BEM[k].frac * self.BEM[k].building.glazingRatio self.SHGC_total += self.BEM[k].frac * self.BEM[k].building.shgc self.alb_wall_total += self.BEM[k].frac * self.BEM[k].wall.albedo # Add to schedule list self.Sch.append(refSchedule[i][j][self.zone]) k += 1
Define BEM for each DOE type (read the fraction) self.BEM # list of BEMDef objects self.r_glaze # Glazing ratio for total building stock self.SHGC # SHGC addition for total building stock self.alb_wall # albedo wall addition for total building stock
null
null
null
def hvac_autosize(self): for i in range(len(self.BEM)): if self.is_near_zero(self.autosize) == False: self.BEM[i].building.coolCap = 9999. self.BEM[i].building.heatCap = 9999.
Section 6 - HVAC Autosizing (unlimited cooling & heating)
null
null
null
def write_epw(self): epw_prec = self.epw_precision # precision of epw file input for iJ in range(len(self.UCMData)): # [iJ+self.simTime.timeInitial-8] = increments along every weather timestep in epw # [6 to 21] = column data of epw self.epwinput[iJ+self.simTime.timeInitial-8][6] = "{0:.{1}f}".format( self.UCMData[iJ].canTemp - 273.15, epw_prec) # dry bulb temperature [?C] # dew point temperature [?C] self.epwinput[iJ+self.simTime.timeInitial - 8][7] = "{0:.{1}f}".format(self.UCMData[iJ].Tdp, epw_prec) # relative humidity [%] self.epwinput[iJ+self.simTime.timeInitial - 8][8] = "{0:.{1}f}".format(self.UCMData[iJ].canRHum, epw_prec) self.epwinput[iJ+self.simTime.timeInitial-8][21] = "{0:.{1}f}".format( self.WeatherData[iJ].wind, epw_prec) # wind speed [m/s] # Writing new EPW file epw_new_id = open(self.newPathName, "w") for i in range(8): new_epw_line = '{}\n'.format(reduce(lambda x, y: x+","+y, self._header[i])) epw_new_id.write(new_epw_line) for i in range(len(self.epwinput)): printme = "" for ei in range(34): printme += "{}".format(self.epwinput[i][ei]) + ',' printme = printme + "{}".format(self.epwinput[i][ei]) new_epw_line = "{0}\n".format(printme) epw_new_id.write(new_epw_line) epw_new_id.close() print("New climate file '{}' is generated at {}.".format( self.destinationFileName, self.destinationDir))
Section 8 - Writing new EPW file
null
null
null
# Calculated per unit area (m^2) dens = forc.pres/(1000*0.287042*tempRef*(1.+1.607858*humRef)) # air density (kgd m-3) self.aeroCond = 5.8 + 3.7 * windRef # Convection coef (ref: uwg, eq. 12)) if (self.horizontal): # For roof, mass, road # Evaporation (m s-1), Film water & soil latent heat if not self.is_near_zero(self.waterStorage) and self.waterStorage > 0.0: # N.B In the current uwg code, latent heat from evapotranspiration, stagnant water, # or anthropogenic sources is not modelled due to the difficulty of validation, and # lack of reliability of precipitation data from EPW files.Therefore this condition # is never run because all elements have had their waterStorage hardcoded to 0. qtsat = self.qsat([self.layerTemp[0]],[forc.pres],parameter)[0] eg = self.aeroCond*parameter.colburn*dens*(qtsat-humRef)/parameter.waterDens/parameter.cp self.waterStorage = min(self.waterStorage + simTime.dt*(forc.prec-eg),parameter.wgmax) self.waterStorage = max(self.waterStorage,0.) # (m) else: eg = 0. soilLat = eg*parameter.waterDens*parameter.lv # Winter, no veg if simTime.month < parameter.vegStart and simTime.month > parameter.vegEnd: self.solAbs = (1.-self.albedo)*self.solRec # (W m-2) vegLat = 0. vegSens = 0. else: # Summer, veg self.solAbs = ((1.-self.vegCoverage)*(1.-self.albedo)+self.vegCoverage*(1.-parameter.vegAlbedo))*self.solRec vegLat = self.vegCoverage*parameter.grassFLat*(1.-parameter.vegAlbedo)*self.solRec vegSens = self.vegCoverage*(1.-parameter.grassFLat)*(1.-parameter.vegAlbedo)*self.solRec self.lat = soilLat + vegLat # Sensible & net heat flux self.sens = vegSens + self.aeroCond*(self.layerTemp[0]-tempRef) self.flux = -self.sens + self.solAbs + self.infra - self.lat # (W m-2) else: # For vertical surfaces (wall) self.solAbs = (1.-self.albedo)*self.solRec self.lat = 0. # Sensible & net heat flux self.sens = self.aeroCond*(self.layerTemp[0]-tempRef) self.flux = -self.sens + self.solAbs + self.infra - self.lat # (W m-2) self.layerTemp = self.Conduction(simTime.dt, self.flux, boundCond, forc.deepTemp, intFlux) self.T_ext = self.layerTemp[0] self.T_int = self.layerTemp[-1]
def SurfFlux(self,forc,parameter,simTime,humRef,tempRef,windRef,boundCond,intFlux)
Calculate net heat flux, and update element layer temperatures
6.801376
6.70354
1.014595
gamw = (parameter.cl - parameter.cpv) / parameter.rv betaw = (parameter.lvtt/parameter.rv) + (gamw * parameter.tt) alpw = math.log(parameter.estt) + (betaw /parameter.tt) + (gamw * math.log(parameter.tt)) work2 = parameter.r/parameter.rv foes_lst = [0 for i in range(len(temp))] work1_lst = [0 for i in range(len(temp))] qsat_lst = [0 for i in range(len(temp))] for i in range(len(temp)): # saturation vapor pressure foes_lst[i] = math.exp( alpw - betaw/temp[i] - gamw*math.log(temp[i]) ) work1_lst[i] = foes_lst[i]/pres[i] # saturation humidity qsat_lst[i] = work2*work1_lst[i] / (1. + (work2-1.) * work1_lst[i]) return qsat_lst
def qsat(self,temp,pres,parameter)
Calculate (qsat_lst) vector of saturation humidity from: temp = vector of element layer temperatures pres = pressure (at current timestep).
4.078382
3.911096
1.042772
X = [0 for i in range(nz)] for i in reversed(range(nz-1)): C[i] = C[i] - A[i][2] * C[i+1]/A[i+1][1] A[i][1] = A[i][1] - A[i][2] * A[i+1][0]/A[i+1][1] for i in range(1,nz,1): C[i] = C[i] - A[i][0] * C[i-1]/A[i-1][1] for i in range(nz): X[i] = C[i]/A[i][1] return X
def invert(self,nz,A,C)
Inversion and resolution of a tridiagonal matrix A X = C Input: nz number of layers a(*,1) lower diagonal (Ai,i-1) a(*,2) principal diagonal (Ai,i) a(*,3) upper diagonal (Ai,i+1) c Output x results
2.188806
2.223276
0.984496
didOpen = False result = None if isinstance(pathOrFile, (bytes, unicode)): pathOrFile = open(pathOrFile, 'rb') didOpen = True try: reader = PlistReader(pathOrFile) result = reader.parse() except NotBinaryPlistException as e: try: pathOrFile.seek(0) result = None if hasattr(plistlib, 'loads'): contents = None if isinstance(pathOrFile, (bytes, unicode)): with open(pathOrFile, 'rb') as f: contents = f.read() else: contents = pathOrFile.read() result = plistlib.loads(contents) else: result = plistlib.readPlist(pathOrFile) result = wrapDataObject(result, for_binary=True) except Exception as e: raise InvalidPlistException(e) finally: if didOpen: pathOrFile.close() return result
def readPlist(pathOrFile)
Raises NotBinaryPlistException, InvalidPlistException
2.414014
2.244436
1.075555
result = 0 if byteSize == 0: raise InvalidPlistException("Encountered integer with byte size of 0.") # 1, 2, and 4 byte integers are unsigned elif byteSize == 1: result = unpack('>B', data)[0] elif byteSize == 2: result = unpack('>H', data)[0] elif byteSize == 4: result = unpack('>L', data)[0] elif byteSize == 8: if as_number: result = unpack('>q', data)[0] else: result = unpack('>Q', data)[0] elif byteSize <= 16: # Handle odd-sized or integers larger than 8 bytes # Don't naively go over 16 bytes, in order to prevent infinite loops. result = 0 if hasattr(int, 'from_bytes'): result = int.from_bytes(data, 'big') else: for byte in data: if not isinstance(byte, int): # Python3.0-3.1.x return ints, 2.x return str byte = unpack_from('>B', byte)[0] result = (result << 8) | byte else: raise InvalidPlistException("Encountered integer longer than 16 bytes.") return result
def getSizedInteger(self, data, byteSize, as_number=False)
Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise.
3.071542
2.988843
1.027669
output = self.header wrapped_root = self.wrapRoot(root) self.computeOffsets(wrapped_root, asReference=True, isRoot=True) self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))}) self.writeObjectReference(wrapped_root, output) output = self.writeObject(wrapped_root, output, setReferencePosition=True) # output size at this point is an upper bound on how big the # object reference offsets need to be. self.trailer = self.trailer._replace(**{ 'offsetSize':self.intSize(len(output)), 'offsetCount':len(self.computedUniques), 'offsetTableOffset':len(output), 'topLevelObjectNumber':0 }) output = self.writeOffsetTable(output) output += pack('!xxxxxxBBQQQ', *self.trailer) self.file.write(output)
def writeRoot(self, root)
Strategy is: - write header - wrap root object so everything is hashable - compute size of objects which will be written - need to do this in order to know how large the object refs will be in the list/dict/set reference lists - write objects - keep objects in writtenReferences - keep positions of object references in referencePositions - write object references with the length computed previously - computer object reference length - write object reference positions - write trailer
6.017663
5.265874
1.142766
position = self.positionOfObjectReference(obj) if position is None: self.writtenReferences[obj] = len(self.writtenReferences) output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize) return (True, output) else: output += self.binaryInt(position, byteSize=self.trailer.objectRefSize) return (False, output)
def writeObjectReference(self, obj, output)
Tries to write an object reference, adding it to the references table. Does not write the actual object bytes or set the reference position. Returns a tuple of whether the object was a new reference (True if it was, False if it already was in the reference table) and the new output.
3.723527
3.271134
1.138299
all_positions = [] writtenReferences = list(self.writtenReferences.items()) writtenReferences.sort(key=lambda x: x[1]) for obj,order in writtenReferences: # Porting note: Elsewhere we deliberately replace empty unicdoe strings # with empty binary strings, but the empty unicode string # goes into writtenReferences. This isn't an issue in Py2 # because u'' and b'' have the same hash; but it is in # Py3, where they don't. if bytes != str and obj == unicodeEmpty: obj = b'' position = self.referencePositions.get(obj) if position is None: raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj) output += self.binaryInt(position, self.trailer.offsetSize) all_positions.append(position) return output
def writeOffsetTable(self, output)
Writes all of the object reference offsets.
9.219657
8.88585
1.037566
# SIGNED if obj < 0: # Signed integer, always 8 bytes return 8 # UNSIGNED elif obj <= 0xFF: # 1 byte return 1 elif obj <= 0xFFFF: # 2 bytes return 2 elif obj <= 0xFFFFFFFF: # 4 bytes return 4 # SIGNED # 0x7FFFFFFFFFFFFFFF is the max. elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed return 8 elif obj <= 0xffffffffffffffff: # 8 bytes unsigned return 16 else: raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.")
def intSize(self, obj)
Returns the number of bytes necessary to store the given integer.
3.696981
3.599911
1.026965
self.z_meso = [] z_meso_file_path = os.path.join(z_meso_path, self.Z_MESO_FILE_NAME) # Check if exists if not os.path.exists(z_meso_file_path): raise Exception("z_meso.txt file: '{}' does not exist.".format(uwg_param_file)) f = open(z_meso_file_path,'r') for txtline in f: z_ = float("".join(txtline.split())) # Strip all white spaces and change to float self.z_meso.append(z_) f.close()
def load_z_meso(self,z_meso_path)
Open the z_meso.txt file and return heights as list
3.404306
3.178253
1.071125
return itunespy.lookup(id=self.artist_id, entity=itunespy.entities['ebook'])[1:]
def get_books(self)
Retrieves all the books published by the artist :return: List. Books published by the artist
21.655867
18.86767
1.147776
client = mesos.DCOSClient() master = mesos.Master(client.get_master_state()) mesos_tasks = master.tasks(completed=completed, fltr=task_id) return [task.__dict__['_task'] for task in mesos_tasks]
def get_tasks(task_id='', completed=True)
Get a list of tasks, optionally filtered by task id. The task_id can be the abbrevated. Example: If a task named 'sleep' is scaled to 3 in marathon, there will be be 3 tasks starting with 'sleep.' :param task_id: task ID :type task_id: str :param completed: include completed tasks? :type completed: bool :return: a list of tasks :rtype: []
5.260568
5.792032
0.908242
tasks = get_tasks(task_id=task_id, completed=completed) if len(tasks) == 0: return None assert len(tasks) == 1, 'get_task should return at max 1 task for a task id' return tasks[0]
def get_task(task_id, completed=True)
Get a task by task id where a task_id is required. :param task_id: task ID :type task_id: str :param completed: include completed tasks? :type completed: bool :return: a task :rtype: obj
2.91112
4.109103
0.708456
tasks = get_tasks(task_id=task_id) completed_states = ('TASK_FINISHED', 'TASK_FAILED', 'TASK_KILLED', 'TASK_LOST', 'TASK_ERROR') for task in tasks: if task['state'] in completed_states: return True return False
def task_completed(task_id)
Check whether a task has completed. :param task_id: task ID :type task_id: str :return: True if completed, False otherwise :rtype: bool
2.866869
3.113976
0.920646
try: response = get_service_task(service, task) except Exception as e: pass return (response is not None) and (prop in response)
def task_property_present_predicate(service, task, prop)
True if the json_element passed is present for the task specified.
4.210993
4.21657
0.998677
return time_wait(lambda: task_predicate(service, task), timeout_seconds=timeout_sec)
def wait_for_task(service, task, timeout_sec=120)
Waits for a task which was launched to be launched
8.895722
8.791589
1.011845
return time_wait(lambda: task_property_present_predicate(service, task, prop), timeout_seconds=timeout_sec)
def wait_for_task_property(service, task, prop, timeout_sec=120)
Waits for a task to have the specified property
8.005037
8.756963
0.914134
if not username: username = shakedown.cli.ssh_user if not key_path: key_path = shakedown.cli.ssh_key_file key = validate_key(key_path) transport = get_transport(host, username, key) transport = start_transport(transport, username, key) if transport.is_authenticated(): start = time.time() channel = scp.SCPClient(transport) if action == 'get': print("\n{}scp {}:{} {}\n".format(shakedown.cli.helpers.fchr('>>'), host, remote_path, file_path)) channel.get(remote_path, file_path) else: print("\n{}scp {} {}:{}\n".format(shakedown.cli.helpers.fchr('>>'), file_path, host, remote_path)) channel.put(file_path, remote_path) print("{} bytes copied in {} seconds.".format(str(os.path.getsize(file_path)), str(round(time.time() - start, 2)))) try_close(channel) try_close(transport) return True else: print("error: unable to authenticate {}@{} with key {}".format(username, host, key_path)) return False
def copy_file( host, file_path, remote_path='.', username=None, key_path=None, action='put' )
Copy a file via SCP, proxied through the mesos master :param host: host or IP of the machine to execute the command on :type host: str :param file_path: the local path to the file to be copied :type file_path: str :param remote_path: the remote path to copy the file to :type remote_path: str :param username: SSH username :type username: str :param key_path: path to the SSH private key to use for SSH authentication :type key_path: str :return: True if successful, False otherwise :rtype: bool
2.865646
2.870421
0.998336
return copy_file(shakedown.master_ip(), file_path, remote_path, username, key_path)
def copy_file_to_master( file_path, remote_path='.', username=None, key_path=None )
Copy a file to the Mesos master
5.341017
5.460605
0.9781
return copy_file(host, file_path, remote_path, username, key_path)
def copy_file_to_agent( host, file_path, remote_path='.', username=None, key_path=None )
Copy a file to a Mesos agent, proxied through the master
3.230732
4.247884
0.760551
return copy_file(shakedown.master_ip(), file_path, remote_path, username, key_path, 'get')
def copy_file_from_master( remote_path, file_path='.', username=None, key_path=None )
Copy a file to the Mesos master
6.92146
7.3416
0.942773
return copy_file(host, file_path, remote_path, username, key_path, 'get')
def copy_file_from_agent( host, remote_path, file_path='.', username=None, key_path=None )
Copy a file to a Mesos agent, proxied through the master
4.704872
6.232692
0.75487
url = shakedown.dcos_url_path('dcos-metadata/{}'.format(json_path)) try: response = dcos.http.request('get', url) if response.status_code == 200: return response.json() except: pass return None
def __metadata_helper(json_path)
Returns json for specific cluster metadata. Important to realize that this was introduced in dcos-1.9. Clusters prior to 1.9 and missing metadata will return None
4.181563
3.511836
1.190706
cpus = 0 mem = 0 summary = DCOSClient().get_state_summary() if 'slaves' in summary: agents = summary.get('slaves') for agent in agents: if agent[rtype].get('cpus') is not None: cpus += agent[rtype].get('cpus') if agent[rtype].get('mem') is not None: mem += agent[rtype].get('mem') return Resources(cpus, mem)
def _get_resources(rtype='resources')
resource types from state summary include: resources, used_resources offered_resources, reserved_resources, unreserved_resources The default is resources. :param rtype: the type of resources to return :type rtype: str :param role: the name of the role if for reserved and if None all reserved :type rtype: str :return: resources(cpu,mem) :rtype: Resources
3.239721
3.065105
1.056969
rtype = 'reserved_resources' cpus = 0.0 mem = 0.0 summary = DCOSClient().get_state_summary() if 'slaves' in summary: agents = summary.get('slaves') for agent in agents: resource_reservations = agent.get(rtype) reservations = [] if role is None or '*' in role: reservations = resource_reservations.values() elif role in resource_reservations: reservations = [resource_reservations.get(role)] for reservation in reservations: if reservation.get('cpus') is not None: cpus += reservation.get('cpus') if reservation.get('mem') is not None: mem += reservation.get('mem') return Resources(cpus, mem)
def get_reserved_resources(role=None)
resource types from state summary include: reserved_resources :param role: the name of the role if for reserved and if None all reserved :type role: str :return: resources(cpu,mem) :rtype: Resources
2.847903
2.834218
1.004829
from functools import wraps @wraps(func) def with_logging(*args, **kwargs): self = args[0] decl = args[2] log(DEBUG, u" {}: {} {}".format( self.state['current_step'], decl.name, serialize(decl.value).strip()).encode('utf-8')) return func(*args, **kwargs) return with_logging
def log_decl_method(func)
Decorate do_declartion methods for debug logging.
5.005588
4.72697
1.058942
from cssselect import parse, HTMLTranslator from cssselect.parser import FunctionalPseudoElement # FIXME HACK need lessc to support functional-pseudo-selectors instead # of marking as strings and stripping " here. if not (css): return None sel = parse(css.strip('" '))[0] xpath = HTMLTranslator().selector_to_xpath(sel) first_letter = False if sel.pseudo_element is not None: if type(sel.pseudo_element) == FunctionalPseudoElement: if sel.pseudo_element.name in ('attr', 'first-letter'): xpath += '/@' + sel.pseudo_element.arguments[0].value if sel.pseudo_element.name == 'first-letter': first_letter = True elif isinstance(sel.pseudo_element, type(u'')): if sel.pseudo_element == 'first-letter': first_letter = True xp = etree.XPath(xpath, namespaces=css_namespaces) def toupper(u): loc = Locale(lang) if lang else Locale() return UnicodeString(u).toUpper(loc).encode('utf-8').decode('utf-8') def func(elem): res = xp(elem) if res: if etree.iselement(res[0]): res_str = etree.tostring(res[0], encoding='unicode', method="text") else: res_str = res[0] if first_letter: if res_str: if flags and 'nocase' in flags: return toupper(res_str[0]) else: return res_str[0] else: return res_str else: if flags and 'nocase' in flags: return toupper(res_str) else: return res_str return func
def css_to_func(css, flags, css_namespaces, lang)
Convert a css selector to an xpath, supporting pseudo elements.
3.865575
3.860199
1.001393
node = t.tree if string: if len(node) == 0: if node.text is not None: node.text += string else: node.text = string else: # Get last child child = list(node)[-1] if child.tail is not None: child.tail += string else: child.tail = string
def append_string(t, string)
Append a string to a node, as text or tail of last child.
2.726478
2.427443
1.123189
node = t.tree if node.text is not None: node.text += string else: node.text = string
def prepend_string(t, string)
Prepend a string to a target node as text.
4.226532
3.409892
1.239491
collator = Collator.createInstance(Locale(t.lang) if t.lang else Locale()) if value.tail is not None: val_prev = value.getprevious() if val_prev is not None: val_prev.tail = (val_prev.tail or '') + value.tail else: val_parent = value.getparent() if val_parent is not None: val_parent.text = (val_parent.text or '') + value.tail value.tail = None if t.isgroup and t.sort(value) is not None: if t.groupby: for child in t.tree: if child.get('class') == 'group-by': # child[0] is the label span order = collator.compare( t.groupby(child[1]) or '', t.groupby(value) or '') if order == 0: c_target = Target(child, sort=t.sort, lang=t.lang) insert_group(value, c_target) break elif order > 0: group = create_group(t.groupby(value)) group.append(value) child.addprevious(group) break else: group = create_group(t.groupby(value)) group.append(value) t.tree.append(group) else: insert_group(value, t) elif t.sort and t.sort(value) is not None: insert_sort(value, t) elif t.location == 'inside': for child in t.tree: value.append(child) value.text = t.tree.text t.tree.text = None t.tree.append(value) elif t.location == 'outside': value.tail = t.tree.tail t.tree.tail = None target_parent_descendants = ( [n.getparent() for n in t.parent.iterdescendants() if n == t.tree]) try: parent = target_parent_descendants[0] parent.insert(parent.index(t.tree), value) value.append(t.tree) except IndexError as e: logger.error('Target of outside has been moved or deleted') raise e elif t.location == 'before': value.tail = t.tree.text t.tree.text = None t.tree.insert(0, value) else: t.tree.append(value)
def grouped_insert(t, value)
Insert value into the target tree 't' with correct grouping.
3.027096
2.958036
1.023347
sort = target.sort lang = target.lang collator = Collator.createInstance(Locale(lang) if lang else Locale()) for child in target.tree: if collator.compare(sort(child) or '', sort(node) or '') > 0: child.addprevious(node) break else: target.tree.append(node)
def insert_sort(node, target)
Insert node into sorted position in target tree. Uses sort function and language from target
5.22972
4.915134
1.064003
group = target.sort lang = target.lang collator = Collator.createInstance(Locale(lang) if lang else Locale()) for child in target.tree: order = collator.compare(group(child) or '', group(node) or '') if order == 0: for nodechild in node[1:]: child.append(nodechild) break elif order > 0: child.addprevious(node) break else: target.tree.append(node)
def insert_group(node, target)
Insert node into in target tree, in appropriate group. Uses group and lang from target function. This assumes the node and target share a structure of a first child that determines the grouping, and a second child that will be accumulated in the group.
4.93331
4.580988
1.07691
node = etree.Element('div', attrib={'class': 'group-by'}) span = etree.Element('span', attrib={'class': 'group-label'}) span.text = value node.append(span) return node
def create_group(value)
Create the group wrapper node.
3.207199
2.886592
1.111067
from cssselect2.parser import (CombinedSelector, CompoundSelector, PseudoClassSelector, FunctionalPseudoClassSelector) steps = [] extras = [] if isinstance(sel, CombinedSelector): lstep, lextras = _extract_sel_info(sel.left) rstep, rextras = _extract_sel_info(sel.right) steps = lstep + rstep extras = lextras + rextras elif isinstance(sel, CompoundSelector): for ssel in sel.simple_selectors: s, e = _extract_sel_info(ssel) steps.extend(s) extras.extend(e) elif isinstance(sel, FunctionalPseudoClassSelector): if sel.name == 'pass': steps.append(serialize(sel.arguments).strip('"\'')) elif isinstance(sel, PseudoClassSelector): if sel.name == 'deferred': extras.append('deferred') return (steps, extras)
def _extract_sel_info(sel)
Recurse down parsed tree, return pseudo class info
3.244062
3.078275
1.053857
# walk the parsed_tree, looking for pseudoClass selectors, check names # add in steps and/or deferred extras steps, extras = _extract_sel_info(sel.parsed_tree) steps = sorted(set(steps)) extras = sorted(set(extras)) if len(steps) == 0: steps = ['default'] return (steps, extras)
def extract_selector_info(sel)
Return selector special pseudo class info (steps and other).
10.10742
8.318218
1.215094
roman_numeral_map = ( ('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100), ('XC', 90), ('L', 50), ('XL', 40), ('X', 10), ('IX', 9), ('V', 5), ('IV', 4), ('I', 1) ) if not (0 < num < 5000): log(WARN, 'Number out of range for roman (must be 1..4999)') return str(num) result = '' for numeral, integer in roman_numeral_map: while num >= integer: result += numeral num -= integer return result
def _to_roman(num)
Convert integer to roman numerals.
1.739481
1.743349
0.997781
mycopy = deepcopy(elem) for id_elem in mycopy.xpath('//*[@id]'): id_elem.set('id', id_elem.get('id') + suffix) return mycopy
def copy_w_id_suffix(elem, suffix="_copy")
Make a deep copy of the provided tree, altering ids.
2.669614
2.562172
1.041934
if self.use_repeatable_ids: self.repeatable_id_counter += 1 return 'autobaked-{}'.format(self.repeatable_id_counter) else: return str(uuid4())
def generate_id(self)
Generate a fresh id
5.285736
4.666935
1.132592
self.state = {} self.state['steps'] = [] self.state['current_step'] = None self.state['scope'] = [] self.state['counters'] = {} self.state['strings'] = {} for step in self.matchers: self.state[step] = {} self.state[step]['pending'] = {} self.state[step]['actions'] = [] self.state[step]['counters'] = {} self.state[step]['strings'] = {} # FIXME rather than boolean should ref HTML tree self.state[step]['recipe'] = False
def clear_state(self)
Clear the recipe state.
4.698215
4.361537
1.077193
if last_step is not None: try: self.state['steps'] = [s for s in self.state['steps'] if int(s) < int(last_step)] except ValueError: self.state['steps'] = [s for s in self.state['steps'] if s < last_step] for step in self.state['steps']: self.state['current_step'] = step self.state['scope'].insert(0, step) # Need to wrap each loop, since tree may have changed wrapped_html_tree = ElementWrapper.from_html_root(element) if not self.state[step]['recipe']: recipe = self.build_recipe(wrapped_html_tree, step) else: recipe = self.state[step] log(DEBUG, u'Recipe {} length: {}'.format( step, len(recipe['actions'])).encode('utf-8')) target = None old_content = {} node_counts = {} for action, value in recipe['actions']: if action == 'target': target = value old_content = {} elif action == 'tag': target.tree.tag = value elif action == 'clear': old_content['text'] = target.tree.text target.tree.text = None old_content['children'] = [] for child in target.tree: old_content['children'].append(child) target.tree.remove(child) elif action == 'content': if value is not None: append_string(target, value.text) for child in value: target.tree.append(child) elif old_content: append_string(target, old_content['text']) for child in old_content['children']: target.tree.append(child) elif action == 'attrib': attname, vals = value strval = u''.join([u'{}'.format(s) for s in vals]) target.tree.set(attname, strval) elif action == 'string': strval = u''.join([u'{}'.format(s) for s in value]) if target.location == 'before': prepend_string(target, strval) else: append_string(target, strval) elif action == 'move': grouped_insert(target, value) elif action == 'copy': mycopy = copy_w_id_suffix(value) mycopy.tail = None grouped_insert(target, mycopy) elif action == 'nodeset': node_counts[value] = node_counts.setdefault(value, 0) + 1 suffix = u'_copy_{}'.format(node_counts[value]) mycopy = copy_w_id_suffix(value, suffix) mycopy.tail = None grouped_insert(target, mycopy) else: log(WARN, u'Missing action {}'.format( action).encode('utf-8')) # Do numbering # Do label/link updates # Add an empty string to each element just to make sure the element # is closed. This is useful for browsers that parse the output # as HTML5 rather than as XHTML5. # # One use-case would be users that inject the content into an # existing HTML (not XHTML) document. walkAll = element.iter() for elt in walkAll: if elt.tag not in SELF_CLOSING_TAGS: if len(elt) == 0 and not elt.text: elt.text = ''
def bake(self, element, last_step=None)
Apply recipes to HTML tree. Will build recipes if needed.
3.617459
3.5202
1.027629