sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _do_highlight(content, query, tag='em'): """ Highlight `query` terms in `content` with html `tag`. This method assumes that the input text (`content`) does not contain any special formatting. That is, it does not contain any html tags or similar markup that could be screwed up by the highlighting. Required arguments: `content` -- Content to search for instances of `text` `text` -- The text to be highlighted """ for term in query: term = term.decode('utf-8') for match in re.findall('[^A-Z]+', term): # Ignore field identifiers match_re = re.compile(match, re.I) content = match_re.sub('<%s>%s</%s>' % (tag, term, tag), content) return content
Highlight `query` terms in `content` with html `tag`. This method assumes that the input text (`content`) does not contain any special formatting. That is, it does not contain any html tags or similar markup that could be screwed up by the highlighting. Required arguments: `content` -- Content to search for instances of `text` `text` -- The text to be highlighted
entailment
def _prepare_facet_field_spies(self, facets): """ Returns a list of spies based on the facets used to count frequencies. """ spies = [] for facet in facets: slot = self.column[facet] spy = xapian.ValueCountMatchSpy(slot) # add attribute "slot" to know which column this spy is targeting. spy.slot = slot spies.append(spy) return spies
Returns a list of spies based on the facets used to count frequencies.
entailment
def _process_facet_field_spies(self, spies): """ Returns a dict of facet names with lists of tuples of the form (term, term_frequency) from a list of spies that observed the enquire. """ facet_dict = {} for spy in spies: field = self.schema[spy.slot] field_name, field_type = field['field_name'], field['type'] facet_dict[field_name] = [] for facet in list(spy.values()): if field_type == 'float': # the float term is a Xapian serialized object, which is # in bytes. term = facet.term else: term = facet.term.decode('utf-8') facet_dict[field_name].append((_from_xapian_value(term, field_type), facet.termfreq)) return facet_dict
Returns a dict of facet names with lists of tuples of the form (term, term_frequency) from a list of spies that observed the enquire.
entailment
def _do_multivalued_field_facets(self, results, field_facets): """ Implements a multivalued field facet on the results. This is implemented using brute force - O(N^2) - because Xapian does not have it implemented yet (see http://trac.xapian.org/ticket/199) """ facet_dict = {} for field in field_facets: facet_list = {} if not self._multi_value_field(field): continue for result in results: field_value = getattr(result, field) for item in field_value: # Facet each item in a MultiValueField facet_list[item] = facet_list.get(item, 0) + 1 facet_dict[field] = list(facet_list.items()) return facet_dict
Implements a multivalued field facet on the results. This is implemented using brute force - O(N^2) - because Xapian does not have it implemented yet (see http://trac.xapian.org/ticket/199)
entailment
def _do_date_facets(results, date_facets): """ Private method that facets a document by date ranges Required arguments: `results` -- A list SearchResults to facet `date_facets` -- A dictionary containing facet parameters: {'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}} nb., gap must be one of the following: year|month|day|hour|minute|second For each date facet field in `date_facets`, generates a list of date ranges (from `start_date` to `end_date` by `gap_by`) then iterates through `results` and tallies the count for each date_facet. Returns a dictionary of date facets (fields) containing a list with entries for each range and a count of documents matching the range. eg. { 'pub_date': [ (datetime.datetime(2009, 1, 1, 0, 0), 5), (datetime.datetime(2009, 2, 1, 0, 0), 0), (datetime.datetime(2009, 3, 1, 0, 0), 0), (datetime.datetime(2008, 4, 1, 0, 0), 1), (datetime.datetime(2008, 5, 1, 0, 0), 2), ], } """ def next_datetime(previous, gap_value, gap_type): year = previous.year month = previous.month if gap_type == 'year': next = previous.replace(year=year + gap_value) elif gap_type == 'month': if month + gap_value <= 12: next = previous.replace(month=month + gap_value) else: next = previous.replace( month=((month + gap_value) % 12), year=(year + (month + gap_value) // 12) ) elif gap_type == 'day': next = previous + datetime.timedelta(days=gap_value) elif gap_type == 'hour': return previous + datetime.timedelta(hours=gap_value) elif gap_type == 'minute': next = previous + datetime.timedelta(minutes=gap_value) elif gap_type == 'second': next = previous + datetime.timedelta(seconds=gap_value) else: raise TypeError('\'gap_by\' must be ' '{second, minute, day, month, year}') return next facet_dict = {} for date_facet, facet_params in list(date_facets.items()): gap_type = facet_params.get('gap_by') gap_value = facet_params.get('gap_amount', 1) date_range = facet_params['start_date'] # construct the bins of the histogram facet_list = [] while date_range < facet_params['end_date']: facet_list.append((date_range, 0)) date_range = next_datetime(date_range, gap_value, gap_type) facet_list = sorted(facet_list, key=lambda x: x[0], reverse=True) for result in results: result_date = getattr(result, date_facet) # convert date to datetime if not isinstance(result_date, datetime.datetime): result_date = datetime.datetime(result_date.year, result_date.month, result_date.day) # ignore results outside the boundaries. if facet_list[0][0] < result_date < facet_list[-1][0]: continue # populate the histogram by putting the result on the right bin. for n, facet_date in enumerate(facet_list): if result_date > facet_date[0]: # equal to facet_list[n][1] += 1, but for a tuple facet_list[n] = (facet_list[n][0], (facet_list[n][1] + 1)) break # bin found; go to next result facet_dict[date_facet] = facet_list return facet_dict
Private method that facets a document by date ranges Required arguments: `results` -- A list SearchResults to facet `date_facets` -- A dictionary containing facet parameters: {'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}} nb., gap must be one of the following: year|month|day|hour|minute|second For each date facet field in `date_facets`, generates a list of date ranges (from `start_date` to `end_date` by `gap_by`) then iterates through `results` and tallies the count for each date_facet. Returns a dictionary of date facets (fields) containing a list with entries for each range and a count of documents matching the range. eg. { 'pub_date': [ (datetime.datetime(2009, 1, 1, 0, 0), 5), (datetime.datetime(2009, 2, 1, 0, 0), 0), (datetime.datetime(2009, 3, 1, 0, 0), 0), (datetime.datetime(2008, 4, 1, 0, 0), 1), (datetime.datetime(2008, 5, 1, 0, 0), 2), ], }
entailment
def _do_query_facets(self, results, query_facets): """ Private method that facets a document by query Required arguments: `results` -- A list SearchResults to facet `query_facets` -- A dictionary containing facet parameters: {'field': 'query', [...]} For each query in `query_facets`, generates a dictionary entry with the field name as the key and a tuple with the query and result count as the value. eg. {'name': ('a*', 5)} """ facet_dict = {} for field, query in list(dict(query_facets).items()): facet_dict[field] = (query, self.search(self.parse_query(query))['hits']) return facet_dict
Private method that facets a document by query Required arguments: `results` -- A list SearchResults to facet `query_facets` -- A dictionary containing facet parameters: {'field': 'query', [...]} For each query in `query_facets`, generates a dictionary entry with the field name as the key and a tuple with the query and result count as the value. eg. {'name': ('a*', 5)}
entailment
def _do_spelling_suggestion(database, query, spelling_query): """ Private method that returns a single spelling suggestion based on `spelling_query` or `query`. Required arguments: `database` -- The database to check spelling against `query` -- The query to check `spelling_query` -- If not None, this will be checked instead of `query` Returns a string with a suggested spelling """ if spelling_query: if ' ' in spelling_query: return ' '.join([database.get_spelling_suggestion(term).decode('utf-8') for term in spelling_query.split()]) else: return database.get_spelling_suggestion(spelling_query).decode('utf-8') term_set = set() for term in query: for match in re.findall('[^A-Z]+', term.decode('utf-8')): # Ignore field identifiers term_set.add(database.get_spelling_suggestion(match).decode('utf-8')) return ' '.join(term_set)
Private method that returns a single spelling suggestion based on `spelling_query` or `query`. Required arguments: `database` -- The database to check spelling against `query` -- The query to check `spelling_query` -- If not None, this will be checked instead of `query` Returns a string with a suggested spelling
entailment
def _database(self, writable=False): """ Private method that returns a xapian.Database for use. Optional arguments: ``writable`` -- Open the database in read/write mode (default=False) Returns an instance of a xapian.Database or xapian.WritableDatabase """ if self.path == MEMORY_DB_NAME: if not self.inmemory_db: self.inmemory_db = xapian.inmemory_open() return self.inmemory_db if writable: database = xapian.WritableDatabase(self.path, xapian.DB_CREATE_OR_OPEN) else: try: database = xapian.Database(self.path) except xapian.DatabaseOpeningError: raise InvalidIndexError('Unable to open index at %s' % self.path) return database
Private method that returns a xapian.Database for use. Optional arguments: ``writable`` -- Open the database in read/write mode (default=False) Returns an instance of a xapian.Database or xapian.WritableDatabase
entailment
def _get_enquire_mset(database, enquire, start_offset, end_offset, checkatleast=DEFAULT_CHECK_AT_LEAST): """ A safer version of Xapian.enquire.get_mset Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`, attempting a `database.reopen` as needed. Required arguments: `database` -- The database to be read `enquire` -- An instance of an Xapian.enquire object `start_offset` -- The start offset to pass to `enquire.get_mset` `end_offset` -- The end offset to pass to `enquire.get_mset` """ try: return enquire.get_mset(start_offset, end_offset, checkatleast) except xapian.DatabaseModifiedError: database.reopen() return enquire.get_mset(start_offset, end_offset, checkatleast)
A safer version of Xapian.enquire.get_mset Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`, attempting a `database.reopen` as needed. Required arguments: `database` -- The database to be read `enquire` -- An instance of an Xapian.enquire object `start_offset` -- The start offset to pass to `enquire.get_mset` `end_offset` -- The end offset to pass to `enquire.get_mset`
entailment
def _get_document_data(database, document): """ A safer version of Xapian.document.get_data Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`, attempting a `database.reopen` as needed. Required arguments: `database` -- The database to be read `document` -- An instance of an Xapian.document object """ try: return document.get_data() except xapian.DatabaseModifiedError: database.reopen() return document.get_data()
A safer version of Xapian.document.get_data Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`, attempting a `database.reopen` as needed. Required arguments: `database` -- The database to be read `document` -- An instance of an Xapian.document object
entailment
def _get_hit_count(self, database, enquire): """ Given a database and enquire instance, returns the estimated number of matches. Required arguments: `database` -- The database to be queried `enquire` -- The enquire instance """ return self._get_enquire_mset( database, enquire, 0, database.get_doccount() ).size()
Given a database and enquire instance, returns the estimated number of matches. Required arguments: `database` -- The database to be queried `enquire` -- The enquire instance
entailment
def _multi_value_field(self, field): """ Private method that returns `True` if a field is multi-valued, else `False`. Required arguemnts: `field` -- The field to lookup Returns a boolean value indicating whether the field is multi-valued. """ for field_dict in self.schema: if field_dict['field_name'] == field: return field_dict['multi_valued'] == 'true' return False
Private method that returns `True` if a field is multi-valued, else `False`. Required arguemnts: `field` -- The field to lookup Returns a boolean value indicating whether the field is multi-valued.
entailment
def _query_from_term(self, term, field_name, filter_type, is_not): """ Uses arguments to construct a list of xapian.Query's. """ if field_name != 'content' and field_name not in self.backend.column: raise InvalidIndexError('field "%s" not indexed' % field_name) # It it is an AutoQuery, it has no filters # or others, thus we short-circuit the procedure. if isinstance(term, AutoQuery): if field_name != 'content': query = '%s:%s' % (field_name, term.prepare(self)) else: query = term.prepare(self) return [self.backend.parse_query(query)] query_list = [] # Handle `ValuesListQuerySet`. if hasattr(term, 'values_list'): term = list(term) if field_name == 'content': # content is the generic search: # force no field_name search # and the field_type to be 'text'. field_name = None field_type = 'text' # we don't know what is the type(term), so we parse it. # Ideally this would not be required, but # some filters currently depend on the term to make decisions. term = _to_xapian_term(term) query_list.append(self._filter_contains(term, field_name, field_type, is_not)) # when filter has no filter_type, haystack uses # filter_type = 'content'. Here we remove it # since the above query is already doing this if filter_type == 'content': filter_type = None else: # get the field_type from the backend field_type = self.backend.schema[self.backend.column[field_name]]['type'] # private fields don't accept 'contains' or 'startswith' # since they have no meaning. if filter_type in ('contains', 'startswith') and field_name in (ID, DJANGO_ID, DJANGO_CT): filter_type = 'exact' if field_type == 'text': # we don't know what type "term" is, but we know we are searching as text # so we parse it like that. # Ideally this would not be required since _term_query does it, but # some filters currently depend on the term to make decisions. if isinstance(term, list): term = [_to_xapian_term(term) for term in term] else: term = _to_xapian_term(term) # todo: we should check that the filter is valid for this field_type or raise InvalidIndexError if filter_type == 'contains': query_list.append(self._filter_contains(term, field_name, field_type, is_not)) elif filter_type in ('content', 'exact'): query_list.append(self._filter_exact(term, field_name, field_type, is_not)) elif filter_type == 'in': query_list.append(self._filter_in(term, field_name, field_type, is_not)) elif filter_type == 'startswith': query_list.append(self._filter_startswith(term, field_name, field_type, is_not)) elif filter_type == 'endswith': raise NotImplementedError("The Xapian search backend doesn't support endswith queries.") elif filter_type == 'gt': query_list.append(self._filter_gt(term, field_name, field_type, is_not)) elif filter_type == 'gte': query_list.append(self._filter_gte(term, field_name, field_type, is_not)) elif filter_type == 'lt': query_list.append(self._filter_lt(term, field_name, field_type, is_not)) elif filter_type == 'lte': query_list.append(self._filter_lte(term, field_name, field_type, is_not)) elif filter_type == 'range': query_list.append(self._filter_range(term, field_name, field_type, is_not)) return query_list
Uses arguments to construct a list of xapian.Query's.
entailment
def _filter_contains(self, term, field_name, field_type, is_not): """ Splits the sentence in terms and join them with OR, using stemmed and un-stemmed. Assumes term is not a list. """ if field_type == 'text': term_list = term.split() else: term_list = [term] query = self._or_query(term_list, field_name, field_type) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) else: return query
Splits the sentence in terms and join them with OR, using stemmed and un-stemmed. Assumes term is not a list.
entailment
def _filter_in(self, term_list, field_name, field_type, is_not): """ Returns a query that matches exactly ANY term in term_list. Notice that: A in {B,C} <=> (A = B or A = C) ~(A in {B,C}) <=> ~(A = B or A = C) Because OP_AND_NOT(C, D) <=> (C and ~D), then D=(A in {B,C}) requires `is_not=False`. Assumes term is a list. """ query_list = [self._filter_exact(term, field_name, field_type, is_not=False) for term in term_list] if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), xapian.Query(xapian.Query.OP_OR, query_list)) else: return xapian.Query(xapian.Query.OP_OR, query_list)
Returns a query that matches exactly ANY term in term_list. Notice that: A in {B,C} <=> (A = B or A = C) ~(A in {B,C}) <=> ~(A = B or A = C) Because OP_AND_NOT(C, D) <=> (C and ~D), then D=(A in {B,C}) requires `is_not=False`. Assumes term is a list.
entailment
def _filter_exact(self, term, field_name, field_type, is_not): """ Returns a query that matches exactly the un-stemmed term with positional order. Assumes term is not a list. """ if field_type == 'text' and field_name not in (DJANGO_CT,): term = '^ %s $' % term query = self._phrase_query(term.split(), field_name, field_type) else: query = self._term_query(term, field_name, field_type, stemmed=False) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) else: return query
Returns a query that matches exactly the un-stemmed term with positional order. Assumes term is not a list.
entailment
def _filter_startswith(self, term, field_name, field_type, is_not): """ Returns a startswith query on the un-stemmed term. Assumes term is not a list. """ if field_type == 'text': if len(term.split()) == 1: term = '^ %s*' % term query = self.backend.parse_query(term) else: term = '^ %s' % term query = self._phrase_query(term.split(), field_name, field_type) else: term = '^%s*' % term query = self.backend.parse_query(term) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) return query
Returns a startswith query on the un-stemmed term. Assumes term is not a list.
entailment
def _or_query(self, term_list, field, field_type): """ Joins each item of term_list decorated by _term_query with an OR. """ term_list = [self._term_query(term, field, field_type) for term in term_list] return xapian.Query(xapian.Query.OP_OR, term_list)
Joins each item of term_list decorated by _term_query with an OR.
entailment
def _phrase_query(self, term_list, field_name, field_type): """ Returns a query that matches exact terms with positional order (i.e. ["this", "thing"] != ["thing", "this"]) and no stem. If `field_name` is not `None`, restrict to the field. """ term_list = [self._term_query(term, field_name, field_type, stemmed=False) for term in term_list] query = xapian.Query(xapian.Query.OP_PHRASE, term_list) return query
Returns a query that matches exact terms with positional order (i.e. ["this", "thing"] != ["thing", "this"]) and no stem. If `field_name` is not `None`, restrict to the field.
entailment
def _term_query(self, term, field_name, field_type, stemmed=True): """ Constructs a query of a single term. If `field_name` is not `None`, the term is search on that field only. If exact is `True`, the search is restricted to boolean matches. """ constructor = '{prefix}{term}' # construct the prefix to be used. prefix = '' if field_name: prefix = TERM_PREFIXES['field'] + field_name.upper() term = _to_xapian_term(term) if field_name in (ID, DJANGO_ID, DJANGO_CT): # to ensure the value is serialized correctly. if field_name == DJANGO_ID: term = int(term) term = _term_to_xapian_value(term, field_type) return xapian.Query('%s%s' % (TERM_PREFIXES[field_name], term)) # we construct the query dates in a slightly different way if field_type == 'datetime': date, time = term.split() return xapian.Query(xapian.Query.OP_AND_MAYBE, constructor.format(prefix=prefix, term=date), constructor.format(prefix=prefix, term=time) ) # only use stem if field is text or "None" if field_type not in ('text', None): stemmed = False unstemmed_term = constructor.format(prefix=prefix, term=term) if stemmed: stem = xapian.Stem(self.backend.language) stemmed_term = 'Z' + constructor.format(prefix=prefix, term=stem(term).decode('utf-8')) return xapian.Query(xapian.Query.OP_OR, xapian.Query(stemmed_term), xapian.Query(unstemmed_term) ) else: return xapian.Query(unstemmed_term)
Constructs a query of a single term. If `field_name` is not `None`, the term is search on that field only. If exact is `True`, the search is restricted to boolean matches.
entailment
def _filter_gte(self, term, field_name, field_type, is_not): """ Private method that returns a xapian.Query that searches for any term that is greater than `term` in a specified `field`. """ vrp = XHValueRangeProcessor(self.backend) pos, begin, end = vrp('%s:%s' % (field_name, _term_to_xapian_value(term, field_type)), '*') if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end) ) return xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
Private method that returns a xapian.Query that searches for any term that is greater than `term` in a specified `field`.
entailment
def get(company='', company_uri=''): """Performs a HTTP GET for a glassdoor page and returns json""" if not company and not company_uri: raise Exception("glassdoor.gd.get(company='', company_uri=''): "\ " company or company_uri required") payload = {} if not company_uri: payload.update({'clickSource': 'searchBtn', 'sc.keyword': company }) uri = '%s/%s' % (GLASSDOOR_API, REVIEWS_URL) else: uri = '%s%s' % (GLASSDOOR_API, company_uri) r = requests.get(uri, params=payload) soup = BeautifulSoup(r.content) results = parse(soup) return results
Performs a HTTP GET for a glassdoor page and returns json
entailment
def parse_suggestions(soup): """Suggests similar/related companies to query""" selector_comps = {'class': 'companyData'} companies = soup.findAll('div', selector_comps) def is_exact_match(c): """Determines if this company suggestion has an [exact match] html label or whether its name matches the company name the user searched for """ selector_exact = {'class' : 'chickletExactMatch chicklet'} searched_name = soup.findAll('input', {'name': 'sc.keyword'})[0]['value'] actual_name = c.findAll('h3')[0].text names_match = searched_name.lower() == actual_name.lower() exact_tag = bool(c.findAll('i', selector_exact)) return exact_tag or names_match def parse_suggestion(c): return { 'name': c.findAll('h3')[0].text, 'uri': c.findAll('a')[1]['href'], 'exact': is_exact_match(c) } suggestions = [] for c in companies: try: suggestions.append(parse_suggestion(c)) except IndexError as e: pass return suggestions
Suggests similar/related companies to query
entailment
def parse(soup): """Parses the results for a company search and return the results if is_direct_match. If no company is found, a list of suggestions are returned as dict. If one such recommendation is found to be an exact match, re-perform request for this exact match """ if is_direct_match(soup): return {'satisfaction': parse_satisfaction(soup), 'ceo': parse_ceo(soup), 'meta': parse_meta(soup), 'salary': parse_salary(soup) } suggestions = parse_suggestions(soup) exact_match = next((s for s in suggestions if s['exact']), None) if exact_match: return get(company_uri=exact_match['uri']) return suggestions
Parses the results for a company search and return the results if is_direct_match. If no company is found, a list of suggestions are returned as dict. If one such recommendation is found to be an exact match, re-perform request for this exact match
entailment
def init_app(self, app): """ Register this extension with the flask app. :param app: A flask application """ # Save this so we can use it later in the extension if not hasattr(app, 'extensions'): # pragma: no cover app.extensions = {} app.extensions['flask-graphql-auth'] = self self._set_default__configuration_options(app)
Register this extension with the flask app. :param app: A flask application
entailment
def _set_default__configuration_options(app): """ Sets the default configuration options used by this extension """ app.config.setdefault('JWT_TOKEN_ARGUMENT_NAME', "token") # Name of token argument in GraphQL request resolver app.config.setdefault('JWT_REFRESH_TOKEN_ARGUMENT_NAME', "refresh_token") app.config.setdefault('JWT_ACCESS_TOKEN_EXPIRES', datetime.timedelta(minutes=15)) app.config.setdefault('JWT_REFRESH_TOKEN_EXPIRES', datetime.timedelta(days=30)) app.config.setdefault('JWT_SECRET_KEY', None) app.config.setdefault('JWT_IDENTITY_CLAIM', 'identity') app.config.setdefault('JWT_USER_CLAIMS', 'user_claims')
Sets the default configuration options used by this extension
entailment
def decode_jwt(encoded_token, secret, algorithm, identity_claim_key, user_claims_key): """ Decodes an encoded JWT :param encoded_token: The encoded JWT string to decode :param secret: Secret key used to encode the JWT :param algorithm: Algorithm used to encode the JWT :param identity_claim_key: expected key that contains the identity :param user_claims_key: expected key that contains the user claims :return: Dictionary containing contents of the JWT """ # This call verifies the ext, iat, and nbf claims data = jwt.decode(encoded_token, secret, algorithms=[algorithm]) # Make sure that any custom claims we expect in the token are present if 'jti' not in data: raise JWTDecodeError("Missing claim: jti") if identity_claim_key not in data: raise JWTDecodeError("Missing claim: {}".format(identity_claim_key)) if 'type' not in data or data['type'] not in ('refresh', 'access'): raise JWTDecodeError("Missing or invalid claim: type") if user_claims_key not in data: data[user_claims_key] = {} return data
Decodes an encoded JWT :param encoded_token: The encoded JWT string to decode :param secret: Secret key used to encode the JWT :param algorithm: Algorithm used to encode the JWT :param identity_claim_key: expected key that contains the identity :param user_claims_key: expected key that contains the user claims :return: Dictionary containing contents of the JWT
entailment
def get_jwt_data(token, token_type): """ Decodes encoded JWT token by using extension setting and validates token type :param token: The encoded JWT string to decode :param token_type: JWT type for type validation (access or refresh) :return: Dictionary containing contents of the JWT """ jwt_data = decode_jwt( encoded_token=token, secret=current_app.config['JWT_SECRET_KEY'], algorithm='HS256', identity_claim_key=current_app.config['JWT_IDENTITY_CLAIM'], user_claims_key=current_app.config['JWT_USER_CLAIMS'] ) # token type verification if jwt_data['type'] != token_type: raise WrongTokenError('Only {} tokens are allowed'.format(token_type)) return jwt_data
Decodes encoded JWT token by using extension setting and validates token type :param token: The encoded JWT string to decode :param token_type: JWT type for type validation (access or refresh) :return: Dictionary containing contents of the JWT
entailment
def query_jwt_required(fn): """ A decorator to protect a query resolver. If you decorate an resolver with this, it will ensure that the requester has a valid access token before allowing the resolver to be called. This does not check the freshness of the access token. """ @wraps(fn) def wrapper(*args, **kwargs): print(args[0]) token = kwargs.pop(current_app.config['JWT_TOKEN_ARGUMENT_NAME']) try: verify_jwt_in_argument(token) except Exception as e: return AuthInfoField(message=str(e)) return fn(*args, **kwargs) return wrapper
A decorator to protect a query resolver. If you decorate an resolver with this, it will ensure that the requester has a valid access token before allowing the resolver to be called. This does not check the freshness of the access token.
entailment
def mutation_jwt_required(fn): """ A decorator to protect a mutation. If you decorate a mutation with this, it will ensure that the requester has a valid access token before allowing the mutation to be called. This does not check the freshness of the access token. """ @wraps(fn) def wrapper(cls, *args, **kwargs): token = kwargs.pop(current_app.config['JWT_TOKEN_ARGUMENT_NAME']) try: verify_jwt_in_argument(token) except Exception as e: return cls(AuthInfoField(message=str(e))) return fn(cls, *args, **kwargs) return wrapper
A decorator to protect a mutation. If you decorate a mutation with this, it will ensure that the requester has a valid access token before allowing the mutation to be called. This does not check the freshness of the access token.
entailment
def mutation_jwt_refresh_token_required(fn): """ A decorator to protect a mutation. If you decorate anmutation with this, it will ensure that the requester has a valid refresh token before allowing the mutation to be called. """ @wraps(fn) def wrapper(cls, *args, **kwargs): token = kwargs.pop(current_app.config['JWT_REFRESH_TOKEN_ARGUMENT_NAME']) try: verify_refresh_jwt_in_argument(token) except Exception as e: return cls(AuthInfoField(message=str(e))) return fn(*args, **kwargs) return wrapper
A decorator to protect a mutation. If you decorate anmutation with this, it will ensure that the requester has a valid refresh token before allowing the mutation to be called.
entailment
def _hid_enumerate(vendor_id=0, product_id=0): """ Enumerates all the hid devices for VID:PID. Returns a list of `HIDDevice` objects. If vid is 0, then match any vendor id. Similarly, if pid is 0, match any product id. If both are zero, enumerate all HID devices. """ start = hidapi.hid_enumerate(vendor_id, product_id) result = [] cur = ffi.new("struct hid_device_info*"); cur = start # Copy everything into python list while cur != ffi.NULL: result.append(HIDDevice(cur)) cur = cur.next # Free the C memory hidapi.hid_free_enumeration(start) return result
Enumerates all the hid devices for VID:PID. Returns a list of `HIDDevice` objects. If vid is 0, then match any vendor id. Similarly, if pid is 0, match any product id. If both are zero, enumerate all HID devices.
entailment
def open(self): """ Open the HID device for reading and writing. """ if self._is_open: raise HIDException("Failed to open device: HIDDevice already open") path = self.path.encode('utf-8') dev = hidapi.hid_open_path(path) if dev: self._is_open = True self._device = dev else: raise HIDException("Failed to open device")
Open the HID device for reading and writing.
entailment
def close(self): """ Closes the hid device """ if self._is_open: self._is_open = False hidapi.hid_close(self._device)
Closes the hid device
entailment
def write(self, data, report_id=0): """ Writes data to the HID device on its endpoint. Parameters: data: data to send on the HID endpoint report_id: the report ID to use. Returns: The number of bytes written including the report ID. """ if not self._is_open: raise HIDException("HIDDevice not open") write_data = bytearray([report_id]) + bytearray(data) cdata = ffi.new("const unsigned char[]", bytes(write_data)) num_written = hidapi.hid_write(self._device, cdata, len(write_data)) if num_written < 0: raise HIDException("Failed to write to HID device: " + str(num_written)) else: return num_written
Writes data to the HID device on its endpoint. Parameters: data: data to send on the HID endpoint report_id: the report ID to use. Returns: The number of bytes written including the report ID.
entailment
def read(self, size=64, timeout=None): """ Read from the hid device on its endpoint. Parameters: size: number of bytes to read timeout: length to wait in milliseconds Returns: The HID report read from the device. The first byte in the result will be the report ID if used. """ if not self._is_open: raise HIDException("HIDDevice not open") data = [0] * size cdata = ffi.new("unsigned char[]", data) bytes_read = 0 if timeout == None: bytes_read = hidapi.hid_read(self._device, cdata, len(cdata)) else: bytes_read = hidapi.hid_read_timeout(self._device, cdata, len(cdata), timeout) if bytes_read < 0: raise HIDException("Failed to read from HID device: " + str(bytes_read)) elif bytes_read == 0: return [] else: return bytearray(cdata)
Read from the hid device on its endpoint. Parameters: size: number of bytes to read timeout: length to wait in milliseconds Returns: The HID report read from the device. The first byte in the result will be the report ID if used.
entailment
def is_connected(self): """ Checks if the USB device is still connected """ if self._is_open: err = hidapi.hid_read_timeout(self._device, ffi.NULL, 0, 0) if err == -1: return False else: return True else: en = Enumeration(vid=self.vendor_id, pid=self.product_id).find(path=self.path) if len(en) == 0: return False else: return True
Checks if the USB device is still connected
entailment
def send_feature_report(self, data, report_id=0x00): """ Send a Feature report to a HID device. Feature reports are sent over the Control endpoint as a Set_Report transfer. Parameters: data The data to send Returns: This function returns the actual number of bytes written """ if not self._is_open: raise HIDException("HIDDevice not open") report = bytearray([report_id]) + bytearray(data) cdata = ffi.new("const unsigned char[]", bytes(report)) bytes_written = hidapi.hid_send_feature_report(self._device, cdata, len(report)) if bytes_written == -1: raise HIDException("Failed to send feature report to HID device") return bytes_written
Send a Feature report to a HID device. Feature reports are sent over the Control endpoint as a Set_Report transfer. Parameters: data The data to send Returns: This function returns the actual number of bytes written
entailment
def get_feature_report(self, size, report_id=0x00): """ Get a feature report from a HID device. Feature reports are sent over the Control endpoint as a Get_Report transfer. Parameters: size The number of bytes to read. report_id The report id to read Returns: They bytes read from the HID report """ data = [0] * (size+1) cdata = ffi.new("unsigned char[]", bytes(data)) cdata[0] = report_id bytes_read = hidapi.hid_get_feature_report(self._device, cdata, len(cdata)) if bytes_read == -1: raise HIDException("Failed to get feature report from HID device") return bytearray(cdata[1:size+1])
Get a feature report from a HID device. Feature reports are sent over the Control endpoint as a Get_Report transfer. Parameters: size The number of bytes to read. report_id The report id to read Returns: They bytes read from the HID report
entailment
def get_error(self): """ Get an error string from the device """ err_str = hidapi.hid_error(self._device) if err_str == ffi.NULL: return None else: return ffi.string(err_str)
Get an error string from the device
entailment
def get_indexed_string(self, index): """ Get the string with the given index from the device """ max_len = 128 str_buf = ffi.new("wchar_t[]", str(bytearray(max_len))) ret = hidapi.hid_get_indexed_string(self._device, index, str_buf, max_len) if ret < 0: raise HIDException(self._device.get_error()) elif ret == 0: return None else: return ffi.string(str_buf).encode('utf-8')
Get the string with the given index from the device
entailment
def description(self): """ Get a string describing the HID descriptor. """ return \ """HIDDevice: {} | {:x}:{:x} | {} | {} | {} release_number: {} usage_page: {} usage: {} interface_number: {}\ """.format(self.path, self.vendor_id, self.product_id, self.manufacturer_string, self.product_string, self.serial_number, self.release_number, self.usage_page, self.usage, self.interface_number )
Get a string describing the HID descriptor.
entailment
def find(self, vid=None, pid=None, serial=None, interface=None, \ path=None, release_number=None, manufacturer=None, product=None, usage=None, usage_page=None): """ Attempts to open a device in this `Enumeration` object. Optional arguments can be provided to filter the resulting list based on various parameters of the HID devices. Args: vid: filters by USB Vendor ID pid: filters by USB Product ID serial: filters by USB serial string (.iSerialNumber) interface: filters by interface number (bInterfaceNumber) release_number: filters by the USB release number (.bcdDevice) manufacturer: filters by USB manufacturer string (.iManufacturer) product: filters by USB product string (.iProduct) usage: filters by HID usage usage_page: filters by HID usage_page path: filters by HID API path. """ result = [] for dev in self.device_list: if vid not in [0, None] and dev.vendor_id != vid: continue if pid not in [0, None] and dev.product_id != pid: continue if serial and dev.serial_number != serial: continue if path and dev.path != path: continue if manufacturer and dev.manufacturer_string != manufacturer: continue if product and dev.product_string != product: continue if release_number != None and dev.release_number != release_number: continue if interface != None and dev.interface_number != interface: continue if usage != None and dev.usage != usage: continue if usage_page != None and dev.usage_page != usage_page: continue result.append(dev) return result
Attempts to open a device in this `Enumeration` object. Optional arguments can be provided to filter the resulting list based on various parameters of the HID devices. Args: vid: filters by USB Vendor ID pid: filters by USB Product ID serial: filters by USB serial string (.iSerialNumber) interface: filters by interface number (bInterfaceNumber) release_number: filters by the USB release number (.bcdDevice) manufacturer: filters by USB manufacturer string (.iManufacturer) product: filters by USB product string (.iProduct) usage: filters by HID usage usage_page: filters by HID usage_page path: filters by HID API path.
entailment
def get_tracks(self): """ Retrieves all the tracks of the album if they haven't been retrieved yet :return: List. Tracks of the current album """ if not self._track_list: tracks = itunespy.lookup(id=self.collection_id, entity=itunespy.entities['song'])[1:] for track in tracks: self._track_list.append(track) return self._track_list
Retrieves all the tracks of the album if they haven't been retrieved yet :return: List. Tracks of the current album
entailment
def get_album_time(self, round_number=2): """ Retrieves all of the track's length and returns the sum of all :param round_number: Int. Number of decimals to round the sum :return: Int. Sum of all the track's length """ if not self._track_list: self.get_tracks() if self._album_time is None: album_time = 0.0 for track in self._track_list: album_time += track.get_track_time_minutes() self._album_time = round(album_time, round_number) return self._album_time
Retrieves all of the track's length and returns the sum of all :param round_number: Int. Number of decimals to round the sum :return: Int. Sum of all the track's length
entailment
def get_movies(self): """ Retrieves all the movies published by the artist :return: List. Movies published by the artist """ return itunespy.lookup(id=self.artist_id, entity=itunespy.entities['movie'])[1:]
Retrieves all the movies published by the artist :return: List. Movies published by the artist
entailment
def solarcalcs(self): """ Solar Calculation Mutates RSM, BEM, and UCM objects based on following parameters: UCM # Urban Canopy - Building Energy Model object BEM # Building Energy Model object simTime # Simulation time bbject RSM # Rural Site & Vertical Diffusion Model Object forc # Forcing object parameter # Geo Param Object rural # Rural road Element object Properties self.dir # Direct sunlight self.dif # Diffuse sunlight self.tanzen self.critOrient self.horSol self.Kw_term self.Kr_term self.mr self.mw """ self.dir = self.forc.dir # Direct sunlight (perpendicular to the sun's ray) self.dif = self.forc.dif # Diffuse sunlight if self.dir + self.dif > 0.: self.logger.debug("{} Solar radiation > 0".format(__name__)) # calculate zenith tangent, and critOrient solar angles self.solarangles() self.horSol = max(math.cos(self.zenith)*self.dir, 0.0) # Direct horizontal radiation # Fractional terms for wall & road self.Kw_term = min(abs(1./self.UCM.canAspect*(0.5-self.critOrient/math.pi) \ + 1/math.pi*self.tanzen*(1-math.cos(self.critOrient))),1.) self.Kr_term = min(abs(2.*self.critOrient/math.pi \ - (2/math.pi*self.UCM.canAspect*self.tanzen)*(1-math.cos(self.critOrient))), 1-2*self.UCM.canAspect*self.Kw_term) # Direct and diffuse solar radiation self.bldSol = self.horSol*self.Kw_term + self.UCM.wallConf*self.dif # Assume trees are shorter than buildings self.roadSol = self.horSol*self.Kr_term + self.UCM.roadConf*self.dif # Solar reflections. Add diffuse radiation from vegetation to alb_road if in season if self.simTime.month < self.parameter.vegStart or self.simTime.month > self.parameter.vegEnd: alb_road = self.UCM.road.albedo else: alb_road = self.UCM.road.albedo*(1.-self.UCM.road.vegCoverage) + self.parameter.vegAlbedo*self.UCM.road.vegCoverage # First set of reflections rr = alb_road * self.roadSol rw = self.UCM.alb_wall * self.bldSol # bounces fr = (1. - (1. - 2.*self.UCM.wallConf) * self.UCM.alb_wall + (1. - self.UCM.roadConf) \ * self.UCM.wallConf * alb_road * self.UCM.alb_wall) # (1.0-self.UCM.roadConf) road to wall view self.mr = (rr + (1.0-self.UCM.roadConf) * alb_road * (rw + self.UCM.wallConf * self.UCM.alb_wall * rr)) / fr self.mw = (rw + self.UCM.wallConf * self.UCM.alb_wall * rr) / fr # Receiving solar, including bounces (W m-2) self.UCM.road.solRec = self.roadSol + (1 - self.UCM.roadConf)*self.mw for j in range(len(self.BEM)): self.BEM[j].roof.solRec = self.horSol + self.dif self.BEM[j].wall.solRec = self.bldSol + (1 - 2*self.UCM.wallConf) * self.mw + self.UCM.wallConf * self.mr self.rural.solRec = self.horSol + self.dif # Solar received by rural self.UCM.SolRecRoof = self.horSol + self.dif # Solar received by roof self.UCM.SolRecRoad = self.UCM.road.solRec # Solar received by road self.UCM.SolRecWall = self.bldSol+(1-2*self.UCM.wallConf)*self.UCM.road.albedo*self.roadSol # Solar received by wall # Vegetation heat (per m^2 of veg) self.UCM.treeSensHeat = (1-self.parameter.vegAlbedo)*(1-self.parameter.treeFLat)*self.UCM.SolRecRoad self.UCM.treeLatHeat = (1-self.parameter.vegAlbedo)*self.parameter.treeFLat*self.UCM.SolRecRoad else: # No Sun self.logger.debug("{} Solar radiation = 0".format(__name__)) self.UCM.road.solRec = 0. self.rural.solRec = 0. for j in range(len(self.BEM)): self.BEM[j].roof.solRec = 0. self.BEM[j].wall.solRec = 0. self.UCM.SolRecRoad = 0. # Solar received by road self.UCM.SolRecRoof = 0. # Solar received by roof self.UCM.SolRecWall = 0. # Solar received by wall self.UCM.treeSensHeat = 0. self.UCM.treeLatHeat = 0. return self.rural, self.UCM, self.BEM
Solar Calculation Mutates RSM, BEM, and UCM objects based on following parameters: UCM # Urban Canopy - Building Energy Model object BEM # Building Energy Model object simTime # Simulation time bbject RSM # Rural Site & Vertical Diffusion Model Object forc # Forcing object parameter # Geo Param Object rural # Rural road Element object Properties self.dir # Direct sunlight self.dif # Diffuse sunlight self.tanzen self.critOrient self.horSol self.Kw_term self.Kr_term self.mr self.mw
entailment
def solarangles (self): """ Calculation based on NOAA. Solves for zenith angle, tangent of zenithal angle, and critical canyon angle based on following parameters: canAspect # aspect Ratio of canyon simTime # simulation parameters RSM.lon # longitude (deg) RSM.lat # latitude (deg) RSM.GMT # GMT hour correction Properties self.ut # elapsed hours on current day self.ad # fractional year in radians self.eqtime self.decsol # solar declination angle self.zenith # Angle between normal to earth's surface and sun position self.tanzen # tangente of solar zenithal angle self.critOrient # critical canyon angle for which solar radiation reaches the road """ ln = self.RSM.lon month = self.simTime.month day = self.simTime.day secDay = self.simTime.secDay # Total elapsed seconds in simulation inobis = self.simTime.inobis # total days for first of month # i.e [0,31,59,90,120,151,181,212,243,273,304,334] canAspect = self.UCM.canAspect lon = self.RSM.lon lat = self.RSM.lat GMT = self.RSM.GMT self.ut = (24. + (int(secDay)/3600.%24.)) % 24. # Get elapsed hours on current day ibis = list(range(len(inobis))) for JI in range(1, 12): ibis[JI] = inobis[JI]+1 date = day + inobis[month-1]-1 # Julian day of the year # divide circle by 365 days, multiply by elapsed days + hours self.ad = 2.0 * math.pi/365. * (date-1 + (self.ut-(12/24.))) # Fractional year (radians) self.eqtime = 229.18 * (0.000075+0.001868*math.cos(self.ad)-0.032077*math.sin(self.ad) - \ 0.01461*math.cos(2*self.ad)-0.040849*math.sin(2*self.ad)) # Declination angle (angle of sun with equatorial plane) self.decsol = 0.006918-0.399912*math.cos(self.ad)+0.070257*math.sin(self.ad) \ -0.006758*math.cos(2.*self.ad)+0.000907*math.sin(2.*self.ad) \ -0.002697*math.cos(3.*self.ad)+0.00148 *math.sin(3.*self.ad) time_offset = self.eqtime - 4. * lon + 60 * GMT tst = secDay + time_offset * 60 ha = (tst/4./60.-180.) * math.pi/180. zlat = lat * (math.pi/180.) # change angle units to radians # Calculate zenith solar angle self.zenith = math.acos(math.sin(zlat)*math.sin(self.decsol) + math.cos(zlat)*math.cos(self.decsol)*math.cos(ha)) # tangente of solar zenithal angle if abs(0.5*math.pi - self.zenith) < 1e-6: if 0.5*math.pi - self.zenith > 0.: self.tanzen = math.tan(0.5*math.pi-1e-6) elif 0.5*math.pi - self.zenith <= 0.: self.tanzen = math.tan(0.5*math.pi+1e-6) elif abs(self.zenith) < 1e-6: # lim x->0 tan(x) -> 0 which results in division by zero error # when calculating the critical canyon angle # so set tanzen to 1e-6 which will result in critical canyon angle = 90 self.tanzen = 1e-6 else: self.tanzen = math.tan(self.zenith) # critical canyon angle for which solar radiation reaches the road # has to do with street canyon orientation for given solar angle self.critOrient = math.asin(min(abs( 1./self.tanzen)/canAspect, 1. ))
Calculation based on NOAA. Solves for zenith angle, tangent of zenithal angle, and critical canyon angle based on following parameters: canAspect # aspect Ratio of canyon simTime # simulation parameters RSM.lon # longitude (deg) RSM.lat # latitude (deg) RSM.GMT # GMT hour correction Properties self.ut # elapsed hours on current day self.ad # fractional year in radians self.eqtime self.decsol # solar declination angle self.zenith # Angle between normal to earth's surface and sun position self.tanzen # tangente of solar zenithal angle self.critOrient # critical canyon angle for which solar radiation reaches the road
entailment
def psychrometrics (Tdb_in, w_in, P): """ Modified version of Psychometrics by Tea Zakula MIT Building Technology Lab Input: Tdb_in, w_in, P Output: Tdb, w, phi, h, Tdp, v where: Tdb_in = [K] dry bulb temperature w_in = [kgv/kgda] Humidity Ratio P = [P] Atmospheric Station Pressure Tdb: [C] dry bulb temperature w: [kgv/kgda] Humidity Ratio phi: [Pw/Pws*100] relative humidity Tdp: [C] dew point temperature h: [J/kga] enthalpy v: [m3/kga] specific volume """ # Change units c_air = 1006. # [J/kg] air heat capacity, value from ASHRAE Fundamentals hlg = 2501000. # [J/kg] latent heat, value from ASHRAE Fundamentals cw = 1860. # [J/kg] value from ASHRAE Fundamentals P = P/1000. # convert from Pa to kPa Tdb = Tdb_in - 273.15 w = w_in # phi (RH) calculation from Tdb and w Pw = (w*P)/(0.621945 + w) # partial pressure of water vapor Pws = saturation_pressure(Tdb) # Get saturation pressure for given Tdb phi = Pw/Pws*100.0 # enthalpy calculation from Tdb and w h = c_air*Tdb + w*(hlg+cw*Tdb) # [J kga-1] # specific volume calculation from Tdb and w v = 0.287042 * (Tdb+273.15)*(1+1.607858*w)/P # ? # dew point calculation from w _pw = (w*P)/(0.621945 + w) # water vapor partial pressure in kPa alpha = log(_pw) Tdp = 6.54 + 14.526*alpha + pow(alpha,2)*0.7389 + pow(alpha,3)*0.09486 + pow(_pw,0.1984)*0.4569 # valid for Tdp between 0 C and 93 C return Tdb, w, phi, h, Tdp, v
Modified version of Psychometrics by Tea Zakula MIT Building Technology Lab Input: Tdb_in, w_in, P Output: Tdb, w, phi, h, Tdp, v where: Tdb_in = [K] dry bulb temperature w_in = [kgv/kgda] Humidity Ratio P = [P] Atmospheric Station Pressure Tdb: [C] dry bulb temperature w: [kgv/kgda] Humidity Ratio phi: [Pw/Pws*100] relative humidity Tdp: [C] dew point temperature h: [J/kga] enthalpy v: [m3/kga] specific volume
entailment
def search(term, country='US', media='all', entity=None, attribute=None, limit=50): """ Returns the result of the search of the specified term in an array of result_item(s) :param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson. The method will take care of spaces so you don't have to. :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: An array of result_item(s) """ search_url = _url_search_builder(term, country, media, entity, attribute, limit) r = requests.get(search_url) try: json = r.json()['results'] result_count = r.json()['resultCount'] except: raise ConnectionError(general_no_connection) if result_count == 0: raise LookupError(search_error + str(term)) return _get_result_list(json)
Returns the result of the search of the specified term in an array of result_item(s) :param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson. The method will take care of spaces so you don't have to. :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: An array of result_item(s)
entailment
def lookup(id=None, artist_amg_id=None, upc=None, country='US', media='all', entity=None, attribute=None, limit=50): """ Returns the result of the lookup of the specified id, artist_amg_id or upc in an array of result_item(s) :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: An array of result_item(s) """ # If none of the basic lookup arguments are provided, raise a ValueError if id is None and artist_amg_id is None and upc is None: raise ValueError(lookup_no_ids) lookup_url = _url_lookup_builder(id, artist_amg_id, upc, country, media, entity, attribute, limit) r = requests.get(lookup_url) try: json = r.json()['results'] result_count = r.json()['resultCount'] except: raise ConnectionError(general_no_connection) if result_count == 0: raise LookupError(lookup_error) return _get_result_list(json)
Returns the result of the lookup of the specified id, artist_amg_id or upc in an array of result_item(s) :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: An array of result_item(s)
entailment
def _get_result_list(json): """ Analyzes the provided JSON data and returns an array of result_item(s) based on its content :param json: Raw JSON data to analyze :return: An array of result_item(s) from the provided JSON data """ result_list = [] for item in json: if 'wrapperType' in item: # Music if item['wrapperType'] == 'artist' and item['artistType'] == 'Artist': music_artist_result = music_artist.MusicArtist(item) result_list.append(music_artist_result) elif item['wrapperType'] == 'collection' and item['collectionType'] == 'Album': music_album_result = music_album.MusicAlbum(item) result_list.append(music_album_result) elif item['wrapperType'] == 'track' and item['kind'] == 'song': music_track_result = track.Track(item) result_list.append(music_track_result) elif item['wrapperType'] == 'track' and item['kind'] == 'music-video': music_video_result = track.Track(item) result_list.append(music_video_result) # Movies elif item['wrapperType'] == 'artist' and item['artistType'] == 'Movie Artist': movie_artist_result = movie_artist.MovieArtist(item) result_list.append(movie_artist_result) elif item['wrapperType'] == 'track' and item['kind'] == 'feature-movie': movie_result = track.Track(item) result_list.append(movie_result) # Ebook Author elif item['wrapperType'] == 'artist' and item['artistType'] == 'Author': ebook_artist_result = ebook_artist.EbookArtist(item) result_list.append(ebook_artist_result) # Tv Shows elif item['wrapperType'] == 'collection' and item['collectionType'] == 'TV Season': tv_season_result = result_item.ResultItem(item) result_list.append(tv_season_result) elif item['wrapperType'] == 'track' and item['kind'] == 'tv-episode': tv_episode_result = track.Track(item) result_list.append(tv_episode_result) # Software elif item['wrapperType'] == 'software' and item['kind'] == 'software': software_result = result_item.ResultItem(item) result_list.append(software_result) elif item['wrapperType'] == 'software' and item['kind'] == 'mac-software': mac_software_result = result_item.ResultItem(item) result_list.append(mac_software_result) elif 'kind' in item and item['kind'] == 'ebook': ebook_result = result_item.ResultItem(item) result_list.append(ebook_result) else: unknown_result = result_item.ResultItem(item) result_list.append(unknown_result) return result_list
Analyzes the provided JSON data and returns an array of result_item(s) based on its content :param json: Raw JSON data to analyze :return: An array of result_item(s) from the provided JSON data
entailment
def _url_search_builder(term, country='US', media='all', entity=None, attribute=None, limit=50): """ Builds the URL to perform the search based on the provided data :param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson. The method will take care of spaces so you don't have to. :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: The built URL as a string """ built_url = base_search_url + _parse_query(term) built_url += ampersand + parameters[1] + country built_url += ampersand + parameters[2] + media if entity is not None: built_url += ampersand + parameters[3] + entity if attribute is not None: built_url += ampersand + parameters[4] + attribute built_url += ampersand + parameters[5] + str(limit) return built_url
Builds the URL to perform the search based on the provided data :param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson. The method will take care of spaces so you don't have to. :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: The built URL as a string
entailment
def _url_lookup_builder(id=None, artist_amg_id=None, upc=None, country='US', media='music', entity=None, attribute=None, limit=50): """ Builds the URL to perform the lookup based on the provided data :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: The built URL as a string """ built_url = base_lookup_url has_one_argument = False if id is not None: built_url += parameters[6] + str(id) has_one_argument = True if artist_amg_id is not None: if has_one_argument: built_url += ampersand + parameters[7] + artist_amg_id else: built_url += parameters[7] + str(artist_amg_id) has_one_argument = True if upc is not None: if has_one_argument: built_url += ampersand + parameters[8] + upc else: built_url += parameters[8] + str(upc) built_url += ampersand + parameters[1] + country built_url += ampersand + parameters[2] + media if entity is not None: built_url += ampersand + parameters[3] + entity if attribute is not None: built_url += ampersand + parameters[4] + attribute built_url += ampersand + parameters[5] + str(limit) return built_url
Builds the URL to perform the lookup based on the provided data :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: The built URL as a string
entailment
def get_albums(self): """ Retrieves all the albums by the artist :return: List. Albums published by the artist """ return itunespy.lookup(id=self.artist_id, entity=itunespy.entities['album'])[1:]
Retrieves all the albums by the artist :return: List. Albums published by the artist
entailment
def str2fl(x): """Recurses through lists and converts lists of string to float Args: x: string or list of strings """ def helper_to_fl(s_): """ deals with odd string imports converts to float""" if s_ == "": return "null" elif "," in s_: s_ = s_.replace(",", "") try: return float(s_) except: return (s_) fl_lst = [] if isinstance(x[0], str): # Check if list of strings, then sent to conversion for xi in range(len(x)): fl_lst.append(helper_to_fl(x[xi])) elif isinstance(x[0], list): # Check if list of lists, then recurse for xi in range(len(x)): fl_lst.append(str2fl(x[xi])) else: return False return fl_lst
Recurses through lists and converts lists of string to float Args: x: string or list of strings
entailment
def procMat(materials, max_thickness, min_thickness): """ Processes material layer so that a material with single layer thickness is divided into two and material layer that is too thick is subdivided """ newmat = [] newthickness = [] k = materials.layerThermalCond Vhc = materials.layerVolHeat if len(materials.layerThickness) > 1: for j in range(len(materials.layerThickness)): # Break up each layer that's more than max thickness (0.05m) if materials.layerThickness[j] > max_thickness: nlayers = math.ceil(materials.layerThickness[j]/float(max_thickness)) for i in range(int(nlayers)): newmat.append(Material(k[j], Vhc[j], name=materials._name)) newthickness.append(materials.layerThickness[j]/float(nlayers)) # Material that's less then min_thickness is not added. elif materials.layerThickness[j] < min_thickness: print("WARNING: Material '{}' layer found too thin (<{:.2f}cm), ignored.").format( materials._name, min_thickness*100) else: newmat.append(Material(k[j], Vhc[j], name=materials._name)) newthickness.append(materials.layerThickness[j]) else: # Divide single layer into two (uwg assumes at least 2 layers) if materials.layerThickness[0] > max_thickness: nlayers = math.ceil(materials.layerThickness[0]/float(max_thickness)) for i in range(int(nlayers)): newmat.append(Material(k[0], Vhc[0], name=materials._name)) newthickness.append(materials.layerThickness[0]/float(nlayers)) # Material should be at least 1cm thick, so if we're here, # should give warning and stop. Only warning given for now. elif materials.layerThickness[0] < min_thickness*2: newthickness = [min_thickness/2., min_thickness/2.] newmat = [Material(k[0], Vhc[0], name=materials._name), Material(k[0], Vhc[0], name=materials._name)] print("WARNING: a thin (<2cm) single material '{}' layer found. May cause error.".format( materials._name)) else: newthickness = [materials.layerThickness[0]/2., materials.layerThickness[0]/2.] newmat = [Material(k[0], Vhc[0], name=materials._name), Material(k[0], Vhc[0], name=materials._name)] return newmat, newthickness
Processes material layer so that a material with single layer thickness is divided into two and material layer that is too thick is subdivided
entailment
def read_epw(self): """Section 2 - Read EPW file properties: self.climateDataPath self.newPathName self._header # header data self.epwinput # timestep data for weather self.lat # latitude self.lon # longitude self.GMT # GMT self.nSoil # Number of soil depths self.Tsoil # nSoil x 12 matrix for soil temperture (K) self.depth_soil # nSoil x 1 matrix for soil depth (m) """ # Make dir path to epw file self.climateDataPath = os.path.join(self.epwDir, self.epwFileName) # Open epw file and feed csv data to climate_data try: climate_data = utilities.read_csv(self.climateDataPath) except Exception as e: raise Exception("Failed to read epw file! {}".format(e.message)) # Read header lines (1 to 8) from EPW and ensure TMY2 format. self._header = climate_data[0:8] # Read weather data from EPW for each time step in weather file. (lines 8 - end) self.epwinput = climate_data[8:] # Read Lat, Long (line 1 of EPW) self.lat = float(self._header[0][6]) self.lon = float(self._header[0][7]) self.GMT = float(self._header[0][8]) # Read in soil temperature data (assumes this is always there) # ref: http://bigladdersoftware.com/epx/docs/8-2/auxiliary-programs/epw-csv-format-inout.html soilData = self._header[3] self.nSoil = int(soilData[1]) # Number of ground temperature depths self.Tsoil = utilities.zeros(self.nSoil, 12) # nSoil x 12 matrix for soil temperture (K) self.depth_soil = utilities.zeros(self.nSoil, 1) # nSoil x 1 matrix for soil depth (m) # Read monthly data for each layer of soil from EPW file for i in range(self.nSoil): self.depth_soil[i][0] = float(soilData[2 + (i*16)]) # get soil depth for each nSoil # Monthly data for j in range(12): # 12 months of soil T for specific depth self.Tsoil[i][j] = float(soilData[6 + (i*16) + j]) + 273.15 # Set new directory path for the moprhed EPW file self.newPathName = os.path.join(self.destinationDir, self.destinationFileName)
Section 2 - Read EPW file properties: self.climateDataPath self.newPathName self._header # header data self.epwinput # timestep data for weather self.lat # latitude self.lon # longitude self.GMT # GMT self.nSoil # Number of soil depths self.Tsoil # nSoil x 12 matrix for soil temperture (K) self.depth_soil # nSoil x 1 matrix for soil depth (m)
entailment
def read_input(self): """Section 3 - Read Input File (.m, file) Note: UWG_Matlab input files are xlsm, XML, .m, file. properties: self._init_param_dict # dictionary of simulation initialization parameters self.sensAnth # non-building sensible heat (W/m^2) self.SchTraffic # Traffice schedule self.BEM # list of BEMDef objects extracted from readDOE self.Sch # list of Schedule objects extracted from readDOE """ uwg_param_file_path = os.path.join(self.uwgParamDir, self.uwgParamFileName) if not os.path.exists(uwg_param_file_path): raise Exception("Param file: '{}' does not exist.".format(uwg_param_file_path)) # Open .uwg file and feed csv data to initializeDataFile try: uwg_param_data = utilities.read_csv(uwg_param_file_path) except Exception as e: raise Exception("Failed to read .uwg file! {}".format(e.message)) # The initialize.uwg is read with a dictionary so that users changing # line endings or line numbers doesn't make reading input incorrect self._init_param_dict = {} count = 0 while count < len(uwg_param_data): row = uwg_param_data[count] row = [row[i].replace(" ", "") for i in range(len(row))] # strip white spaces # Optional parameters might be empty so handle separately is_optional_parameter = ( row != [] and ( row[0] == "albRoof" or row[0] == "vegRoof" or row[0] == "glzR" or row[0] == "hvac" or row[0] == "albWall" or row[0] == "SHGC" ) ) try: if row == [] or "#" in row[0]: count += 1 continue elif row[0] == "SchTraffic": # SchTraffic: 3 x 24 matrix trafficrows = uwg_param_data[count+1:count+4] self._init_param_dict[row[0]] = [utilities.str2fl(r[:24]) for r in trafficrows] count += 4 elif row[0] == "bld": # bld: 17 x 3 matrix bldrows = uwg_param_data[count+1:count+17] self._init_param_dict[row[0]] = [utilities.str2fl(r[:3]) for r in bldrows] count += 17 elif is_optional_parameter: self._init_param_dict[row[0]] = float(row[1]) if row[1] != "" else None count += 1 else: self._init_param_dict[row[0]] = float(row[1]) count += 1 except ValueError: print("Error while reading parameter at {} {}".format(count, row)) ipd = self._init_param_dict # Define Simulation and Weather parameters if self.Month is None: self.Month = ipd['Month'] if self.Day is None: self.Day = ipd['Day'] if self.nDay is None: self.nDay = ipd['nDay'] if self.dtSim is None: self.dtSim = ipd['dtSim'] if self.dtWeather is None: self.dtWeather = ipd['dtWeather'] # HVAC system and internal laod if self.autosize is None: self.autosize = ipd['autosize'] if self.sensOcc is None: self.sensOcc = ipd['sensOcc'] if self.LatFOcc is None: self.LatFOcc = ipd['LatFOcc'] if self.RadFOcc is None: self.RadFOcc = ipd['RadFOcc'] if self.RadFEquip is None: self.RadFEquip = ipd['RadFEquip'] if self.RadFLight is None: self.RadFLight = ipd['RadFLight'] # Define Urban microclimate parameters if self.h_ubl1 is None: self.h_ubl1 = ipd['h_ubl1'] if self.h_ubl2 is None: self.h_ubl2 = ipd['h_ubl2'] if self.h_ref is None: self.h_ref = ipd['h_ref'] if self.h_temp is None: self.h_temp = ipd['h_temp'] if self.h_wind is None: self.h_wind = ipd['h_wind'] if self.c_circ is None: self.c_circ = ipd['c_circ'] if self.c_exch is None: self.c_exch = ipd['c_exch'] if self.maxDay is None: self.maxDay = ipd['maxDay'] if self.maxNight is None: self.maxNight = ipd['maxNight'] if self.windMin is None: self.windMin = ipd['windMin'] if self.h_obs is None: self.h_obs = ipd['h_obs'] # Urban characteristics if self.bldHeight is None: self.bldHeight = ipd['bldHeight'] if self.h_mix is None: self.h_mix = ipd['h_mix'] if self.bldDensity is None: self.bldDensity = ipd['bldDensity'] if self.verToHor is None: self.verToHor = ipd['verToHor'] if self.charLength is None: self.charLength = ipd['charLength'] if self.alb_road is None: self.alb_road = ipd['albRoad'] if self.d_road is None: self.d_road = ipd['dRoad'] if self.sensAnth is None: self.sensAnth = ipd['sensAnth'] # if self.latAnth is None: self.latAnth = ipd['latAnth'] # Not used, taken out by JH. # climate Zone if self.zone is None: self.zone = ipd['zone'] # Vegetation parameters if self.vegCover is None: self.vegCover = ipd['vegCover'] if self.treeCoverage is None: self.treeCoverage = ipd['treeCoverage'] if self.vegStart is None: self.vegStart = ipd['vegStart'] if self.vegEnd is None: self.vegEnd = ipd['vegEnd'] if self.albVeg is None: self.albVeg = ipd['albVeg'] if self.rurVegCover is None: self.rurVegCover = ipd['rurVegCover'] if self.latGrss is None: self.latGrss = ipd['latGrss'] if self.latTree is None: self.latTree = ipd['latTree'] # Define Traffic schedule if self.SchTraffic is None: self.SchTraffic = ipd['SchTraffic'] # Define Road (Assume 0.5m of asphalt) if self.kRoad is None: self.kRoad = ipd['kRoad'] if self.cRoad is None: self.cRoad = ipd['cRoad'] # Building stock fraction if self.bld is None: self.bld = ipd['bld'] # Optional parameters if self.albRoof is None: self.albRoof = ipd['albRoof'] if self.vegRoof is None: self.vegRoof = ipd['vegRoof'] if self.glzR is None: self.glzR = ipd['glzR'] if self.albWall is None: self.albWall = ipd['albWall'] if self.SHGC is None: self.SHGC = ipd['SHGC']
Section 3 - Read Input File (.m, file) Note: UWG_Matlab input files are xlsm, XML, .m, file. properties: self._init_param_dict # dictionary of simulation initialization parameters self.sensAnth # non-building sensible heat (W/m^2) self.SchTraffic # Traffice schedule self.BEM # list of BEMDef objects extracted from readDOE self.Sch # list of Schedule objects extracted from readDOE
entailment
def set_input(self): """ Set inputs from .uwg input file if not already defined, the check if all the required input parameters are there. """ # If a uwgParamFileName is set, then read inputs from .uwg file. # User-defined class properties will override the inputs from the .uwg file. if self.uwgParamFileName is not None: print("\nReading uwg file input.") self.read_input() else: print("\nNo .uwg file input.") self.check_required_inputs() # Modify zone to be used as python index self.zone = int(self.zone)-1
Set inputs from .uwg input file if not already defined, the check if all the required input parameters are there.
entailment
def init_BEM_obj(self): """ Define BEM for each DOE type (read the fraction) self.BEM # list of BEMDef objects self.r_glaze # Glazing ratio for total building stock self.SHGC # SHGC addition for total building stock self.alb_wall # albedo wall addition for total building stock """ if not os.path.exists(self.readDOE_file_path): raise Exception("readDOE.pkl file: '{}' does not exist.".format(readDOE_file_path)) readDOE_file = open(self.readDOE_file_path, 'rb') # open pickle file in binary form refDOE = pickle.load(readDOE_file) refBEM = pickle.load(readDOE_file) refSchedule = pickle.load(readDOE_file) readDOE_file.close() # Define building energy models k = 0 self.r_glaze_total = 0. # Glazing ratio for total building stock self.SHGC_total = 0. # SHGC addition for total building stock self.alb_wall_total = 0. # albedo wall addition for total building stock h_floor = self.flr_h or 3.05 # average floor height total_urban_bld_area = math.pow(self.charLength, 2)*self.bldDensity * \ self.bldHeight/h_floor # total building floor area area_matrix = utilities.zeros(16, 3) self.BEM = [] # list of BEMDef objects self.Sch = [] # list of Schedule objects for i in range(16): # 16 building types for j in range(3): # 3 built eras if self.bld[i][j] > 0.: # Add to BEM list self.BEM.append(refBEM[i][j][self.zone]) self.BEM[k].frac = self.bld[i][j] self.BEM[k].fl_area = self.bld[i][j] * total_urban_bld_area # Overwrite with optional parameters if provided if self.glzR: self.BEM[k].building.glazingRatio = self.glzR if self.albRoof: self.BEM[k].roof.albedo = self.albRoof if self.vegRoof: self.BEM[k].roof.vegCoverage = self.vegRoof if self.SHGC: self.BEM[k].building.shgc = self.SHGC if self.albWall: self.BEM[k].wall.albedo = self.albWall if self.flr_h: self.BEM[k].building.floorHeight = self.flr_h # Keep track of total urban r_glaze, SHGC, and alb_wall for UCM model self.r_glaze_total += self.BEM[k].frac * self.BEM[k].building.glazingRatio self.SHGC_total += self.BEM[k].frac * self.BEM[k].building.shgc self.alb_wall_total += self.BEM[k].frac * self.BEM[k].wall.albedo # Add to schedule list self.Sch.append(refSchedule[i][j][self.zone]) k += 1
Define BEM for each DOE type (read the fraction) self.BEM # list of BEMDef objects self.r_glaze # Glazing ratio for total building stock self.SHGC # SHGC addition for total building stock self.alb_wall # albedo wall addition for total building stock
entailment
def init_input_obj(self): """Section 4 - Create uwg objects from input parameters self.simTime # simulation time parameter obj self.weather # weather obj for simulation time period self.forcIP # Forcing obj self.forc # Empty forcing obj self.geoParam # geographic parameters obj self.RSM # Rural site & vertical diffusion model obj self.USM # Urban site & vertical diffusion model obj self.UCM # Urban canopy model obj self.UBL # Urban boundary layer model self.road # urban road element self.rural # rural road element self.soilindex1 # soil index for urban rsoad depth self.soilindex2 # soil index for rural road depth self.Sch # list of Schedule objects """ climate_file_path = os.path.join(self.epwDir, self.epwFileName) self.simTime = SimParam(self.dtSim, self.dtWeather, self.Month, self.Day, self.nDay) # simulation time parametrs # weather file data for simulation time period self.weather = Weather(climate_file_path, self.simTime.timeInitial, self.simTime.timeFinal) self.forcIP = Forcing(self.weather.staTemp, self.weather) # initialized Forcing class self.forc = Forcing() # empty forcing class # Initialize geographic Param and Urban Boundary Layer Objects nightStart = 18. # arbitrary values for begin/end hour for night setpoint nightEnd = 8. maxdx = 250. # max dx (m) self.geoParam = Param(self.h_ubl1, self.h_ubl2, self.h_ref, self.h_temp, self.h_wind, self.c_circ, self.maxDay, self.maxNight, self.latTree, self.latGrss, self.albVeg, self.vegStart, self.vegEnd, nightStart, nightEnd, self.windMin, self.WGMAX, self.c_exch, maxdx, self.G, self.CP, self.VK, self.R, self.RV, self.LV, math.pi, self.SIGMA, self.WATERDENS, self.LVTT, self.TT, self.ESTT, self.CL, self.CPV, self.B, self.CM, self.COLBURN) self.UBL = UBLDef( 'C', self.charLength, self.weather.staTemp[0], maxdx, self.geoParam.dayBLHeight, self.geoParam.nightBLHeight) # Defining road emis = 0.93 asphalt = Material(self.kRoad, self.cRoad, 'asphalt') road_T_init = 293. road_horizontal = 1 # fraction of surface vegetation coverage road_veg_coverage = min(self.vegCover/(1-self.bldDensity), 1.) # define road layers road_layer_num = int(math.ceil(self.d_road/0.05)) # 0.5/0.05 ~ 10 x 1 matrix of 0.05 thickness thickness_vector = [0.05 for r in range(road_layer_num)] material_vector = [asphalt for r in range(road_layer_num)] self.road = Element(self.alb_road, emis, thickness_vector, material_vector, road_veg_coverage, road_T_init, road_horizontal, name="urban_road") self.rural = copy.deepcopy(self.road) self.rural.vegCoverage = self.rurVegCover self.rural._name = "rural_road" # Reference site class (also include VDM) self.RSM = RSMDef(self.lat, self.lon, self.GMT, self.h_obs, self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path) self.USM = RSMDef(self.lat, self.lon, self.GMT, self.bldHeight/10., self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path) T_init = self.weather.staTemp[0] H_init = self.weather.staHum[0] self.UCM = UCMDef(self.bldHeight, self.bldDensity, self.verToHor, self.treeCoverage, self.sensAnth, self.latAnth, T_init, H_init, self.weather.staUmod[0], self.geoParam, self.r_glaze_total, self.SHGC_total, self.alb_wall_total, self.road) self.UCM.h_mix = self.h_mix # Define Road Element & buffer to match ground temperature depth roadMat, newthickness = procMat(self.road, self.MAXTHICKNESS, self.MINTHICKNESS) for i in range(self.nSoil): # if soil depth is greater then the thickness of the road # we add new slices of soil at max thickness until road is greater or equal is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15) if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)): while self.depth_soil[i][0] > sum(newthickness): newthickness.append(self.MAXTHICKNESS) roadMat.append(self.SOIL) self.soilindex1 = i break self.road = Element(self.road.albedo, self.road.emissivity, newthickness, roadMat, self.road.vegCoverage, self.road.layerTemp[0], self.road.horizontal, self.road._name) # Define Rural Element ruralMat, newthickness = procMat(self.rural, self.MAXTHICKNESS, self.MINTHICKNESS) for i in range(self.nSoil): # if soil depth is greater then the thickness of the road # we add new slices of soil at max thickness until road is greater or equal is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15) if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)): while self.depth_soil[i][0] > sum(newthickness): newthickness.append(self.MAXTHICKNESS) ruralMat.append(self.SOIL) self.soilindex2 = i break self.rural = Element(self.rural.albedo, self.rural.emissivity, newthickness, ruralMat, self.rural.vegCoverage, self.rural.layerTemp[0], self.rural.horizontal, self.rural._name)
Section 4 - Create uwg objects from input parameters self.simTime # simulation time parameter obj self.weather # weather obj for simulation time period self.forcIP # Forcing obj self.forc # Empty forcing obj self.geoParam # geographic parameters obj self.RSM # Rural site & vertical diffusion model obj self.USM # Urban site & vertical diffusion model obj self.UCM # Urban canopy model obj self.UBL # Urban boundary layer model self.road # urban road element self.rural # rural road element self.soilindex1 # soil index for urban rsoad depth self.soilindex2 # soil index for rural road depth self.Sch # list of Schedule objects
entailment
def hvac_autosize(self): """ Section 6 - HVAC Autosizing (unlimited cooling & heating) """ for i in range(len(self.BEM)): if self.is_near_zero(self.autosize) == False: self.BEM[i].building.coolCap = 9999. self.BEM[i].building.heatCap = 9999.
Section 6 - HVAC Autosizing (unlimited cooling & heating)
entailment
def simulate(self): """ Section 7 - uwg main section self.N # Total hours in simulation self.ph # per hour self.dayType # 3=Sun, 2=Sat, 1=Weekday self.ceil_time_step # simulation timestep (dt) fitted to weather file timestep # Output of object instance vector self.WeatherData # Nx1 vector of forc instance self.UCMData # Nx1 vector of UCM instance self.UBLData # Nx1 vector of UBL instance self.RSMData # Nx1 vector of RSM instance self.USMData # Nx1 vector of USM instance """ self.N = int(self.simTime.days * 24) # total number of hours in simulation n = 0 # weather time step counter self.ph = self.simTime.dt/3600. # dt (simulation time step) in hours # Data dump variables time = range(self.N) self.WeatherData = [None for x in range(self.N)] self.UCMData = [None for x in range(self.N)] self.UBLData = [None for x in range(self.N)] self.RSMData = [None for x in range(self.N)] self.USMData = [None for x in range(self.N)] print('\nSimulating new temperature and humidity values for {} days from {}/{}.\n'.format( int(self.nDay), int(self.Month), int(self.Day))) self.logger.info("Start simulation") for it in range(1, self.simTime.nt, 1): # for every simulation time-step (i.e 5 min) defined by uwg # Update water temperature (estimated) if self.nSoil < 3: # correction to original matlab code # for BUBBLE/CAPITOUL/Singapore only self.forc.deepTemp = sum(self.forcIP.temp)/float(len(self.forcIP.temp)) self.forc.waterTemp = sum( self.forcIP.temp)/float(len(self.forcIP.temp)) - 10. # for BUBBLE/CAPITOUL/Singapore only else: # soil temperature by depth, by month self.forc.deepTemp = self.Tsoil[self.soilindex1][self.simTime.month-1] self.forc.waterTemp = self.Tsoil[2][self.simTime.month-1] # There's probably a better way to update the weather... self.simTime.UpdateDate() self.logger.info("\n{0} m={1}, d={2}, h={3}, s={4}".format( __name__, self.simTime.month, self.simTime.day, self.simTime.secDay/3600., self.simTime.secDay)) # simulation time increment raised to weather time step self.ceil_time_step = int(math.ceil(it * self.ph))-1 # minus one to be consistent with forcIP list index # Updating forcing instance # horizontal Infrared Radiation Intensity (W m-2) self.forc.infra = self.forcIP.infra[self.ceil_time_step] # wind speed (m s-1) self.forc.wind = max(self.forcIP.wind[self.ceil_time_step], self.geoParam.windMin) self.forc.uDir = self.forcIP.uDir[self.ceil_time_step] # wind direction # specific humidty (kg kg-1) self.forc.hum = self.forcIP.hum[self.ceil_time_step] self.forc.pres = self.forcIP.pres[self.ceil_time_step] # Pressure (Pa) self.forc.temp = self.forcIP.temp[self.ceil_time_step] # air temperature (C) self.forc.rHum = self.forcIP.rHum[self.ceil_time_step] # Relative humidity (%) self.forc.prec = self.forcIP.prec[self.ceil_time_step] # Precipitation (mm h-1) # horizontal solar diffuse radiation (W m-2) self.forc.dif = self.forcIP.dif[self.ceil_time_step] # normal solar direct radiation (W m-2) self.forc.dir = self.forcIP.dir[self.ceil_time_step] # Canyon humidity (absolute) same as rural self.UCM.canHum = copy.copy(self.forc.hum) # Update solar flux self.solar = SolarCalcs(self.UCM, self.BEM, self.simTime, self.RSM, self.forc, self.geoParam, self.rural) self.rural, self.UCM, self.BEM = self.solar.solarcalcs() # Update building & traffic schedule # Assign day type (1 = weekday, 2 = sat, 3 = sun/other) if self.is_near_zero(self.simTime.julian % 7): self.dayType = 3 # Sunday elif self.is_near_zero(self.simTime.julian % 7 - 6.): self.dayType = 2 # Saturday else: self.dayType = 1 # Weekday # Update anthropogenic heat load for each hour (building & UCM) self.UCM.sensAnthrop = self.sensAnth * (self.SchTraffic[self.dayType-1][self.simTime.hourDay]) # Update the energy components for building types defined in initialize.uwg for i in range(len(self.BEM)): # Set temperature self.BEM[i].building.coolSetpointDay = self.Sch[i].Cool[self.dayType - 1][self.simTime.hourDay] + 273.15 # add from temperature schedule for cooling self.BEM[i].building.coolSetpointNight = self.BEM[i].building.coolSetpointDay self.BEM[i].building.heatSetpointDay = self.Sch[i].Heat[self.dayType - 1][self.simTime.hourDay] + 273.15 # add from temperature schedule for heating self.BEM[i].building.heatSetpointNight = self.BEM[i].building.heatSetpointDay # Internal Heat Load Schedule (W/m^2 of floor area for Q) self.BEM[i].Elec = self.Sch[i].Qelec * self.Sch[i].Elec[self.dayType - 1][self.simTime.hourDay] # Qelec x elec fraction for day self.BEM[i].Light = self.Sch[i].Qlight * self.Sch[i].Light[self.dayType - 1][self.simTime.hourDay] # Qlight x light fraction for day self.BEM[i].Nocc = self.Sch[i].Nocc * self.Sch[i].Occ[self.dayType - 1][self.simTime.hourDay] # Number of occupants x occ fraction for day # Sensible Q occupant * fraction occupant sensible Q * number of occupants self.BEM[i].Qocc = self.sensOcc * (1 - self.LatFOcc) * self.BEM[i].Nocc # SWH and ventilation schedule self.BEM[i].SWH = self.Sch[i].Vswh * self.Sch[i].SWH[self.dayType - 1][self.simTime.hourDay] # litres per hour x SWH fraction for day # m^3/s/m^2 of floor self.BEM[i].building.vent = self.Sch[i].Vent self.BEM[i].Gas = self.Sch[i].Qgas * self.Sch[i].Gas[self.dayType - 1][self.simTime.hourDay] # Gas Equip Schedule, per m^2 of floor # This is quite messy, should update # Update internal heat and corresponding fractional loads intHeat = self.BEM[i].Light + self.BEM[i].Elec + self.BEM[i].Qocc # W/m2 from light, electricity, occupants self.BEM[i].building.intHeatDay = intHeat self.BEM[i].building.intHeatNight = intHeat # fraction of radiant heat from light and equipment of whole internal heat self.BEM[i].building.intHeatFRad = ( self.RadFLight * self.BEM[i].Light + self.RadFEquip * self.BEM[i].Elec) / intHeat # fraction of latent heat (from occupants) of whole internal heat self.BEM[i].building.intHeatFLat = self.LatFOcc * \ self.sensOcc * self.BEM[i].Nocc/intHeat # Update envelope temperature layers self.BEM[i].T_wallex = self.BEM[i].wall.layerTemp[0] self.BEM[i].T_wallin = self.BEM[i].wall.layerTemp[-1] self.BEM[i].T_roofex = self.BEM[i].roof.layerTemp[0] self.BEM[i].T_roofin = self.BEM[i].roof.layerTemp[-1] # Update rural heat fluxes & update vertical diffusion model (VDM) self.rural.infra = self.forc.infra - self.rural.emissivity * self.SIGMA * \ self.rural.layerTemp[0]**4. # Infrared radiation from rural road self.rural.SurfFlux(self.forc, self.geoParam, self.simTime, self.forc.hum, self.forc.temp, self.forc.wind, 2., 0.) self.RSM.VDM(self.forc, self.rural, self.geoParam, self.simTime) # Calculate urban heat fluxes, update UCM & UBL self.UCM, self.UBL, self.BEM = urbflux( self.UCM, self.UBL, self.BEM, self.forc, self.geoParam, self.simTime, self.RSM) self.UCM.UCModel(self.BEM, self.UBL.ublTemp, self.forc, self.geoParam) self.UBL.UBLModel(self.UCM, self.RSM, self.rural, self.forc, self.geoParam, self.simTime) """ # Experimental code to run diffusion model in the urban area # N.B Commented out in python uwg because computed wind speed in # urban VDM: y = =0.84*ln((2-x/20)/0.51) results in negative log # for building heights >= 40m. Uroad = copy.copy(self.UCM.road) Uroad.sens = copy.copy(self.UCM.sensHeat) Uforc = copy.copy(self.forc) Uforc.wind = copy.copy(self.UCM.canWind) Uforc.temp = copy.copy(self.UCM.canTemp) self.USM.VDM(Uforc,Uroad,self.geoParam,self.simTime) """ self.logger.info("dbT = {}".format(self.UCM.canTemp-273.15)) if n > 0: logging.info("dpT = {}".format(self.UCM.Tdp)) logging.info("RH = {}".format(self.UCM.canRHum)) if self.is_near_zero(self.simTime.secDay % self.simTime.timePrint) and n < self.N: self.logger.info("{0} ----sim time step = {1}----\n\n".format(__name__, n)) self.WeatherData[n] = copy.copy(self.forc) _Tdb, _w, self.UCM.canRHum, _h, self.UCM.Tdp, _v = psychrometrics( self.UCM.canTemp, self.UCM.canHum, self.forc.pres) self.UBLData[n] = copy.copy(self.UBL) self.UCMData[n] = copy.copy(self.UCM) self.RSMData[n] = copy.copy(self.RSM) self.logger.info("dbT = {}".format(self.UCMData[n].canTemp-273.15)) self.logger.info("dpT = {}".format(self.UCMData[n].Tdp)) self.logger.info("RH = {}".format(self.UCMData[n].canRHum)) n += 1
Section 7 - uwg main section self.N # Total hours in simulation self.ph # per hour self.dayType # 3=Sun, 2=Sat, 1=Weekday self.ceil_time_step # simulation timestep (dt) fitted to weather file timestep # Output of object instance vector self.WeatherData # Nx1 vector of forc instance self.UCMData # Nx1 vector of UCM instance self.UBLData # Nx1 vector of UBL instance self.RSMData # Nx1 vector of RSM instance self.USMData # Nx1 vector of USM instance
entailment
def write_epw(self): """ Section 8 - Writing new EPW file """ epw_prec = self.epw_precision # precision of epw file input for iJ in range(len(self.UCMData)): # [iJ+self.simTime.timeInitial-8] = increments along every weather timestep in epw # [6 to 21] = column data of epw self.epwinput[iJ+self.simTime.timeInitial-8][6] = "{0:.{1}f}".format( self.UCMData[iJ].canTemp - 273.15, epw_prec) # dry bulb temperature [?C] # dew point temperature [?C] self.epwinput[iJ+self.simTime.timeInitial - 8][7] = "{0:.{1}f}".format(self.UCMData[iJ].Tdp, epw_prec) # relative humidity [%] self.epwinput[iJ+self.simTime.timeInitial - 8][8] = "{0:.{1}f}".format(self.UCMData[iJ].canRHum, epw_prec) self.epwinput[iJ+self.simTime.timeInitial-8][21] = "{0:.{1}f}".format( self.WeatherData[iJ].wind, epw_prec) # wind speed [m/s] # Writing new EPW file epw_new_id = open(self.newPathName, "w") for i in range(8): new_epw_line = '{}\n'.format(reduce(lambda x, y: x+","+y, self._header[i])) epw_new_id.write(new_epw_line) for i in range(len(self.epwinput)): printme = "" for ei in range(34): printme += "{}".format(self.epwinput[i][ei]) + ',' printme = printme + "{}".format(self.epwinput[i][ei]) new_epw_line = "{0}\n".format(printme) epw_new_id.write(new_epw_line) epw_new_id.close() print("New climate file '{}' is generated at {}.".format( self.destinationFileName, self.destinationDir))
Section 8 - Writing new EPW file
entailment
def SurfFlux(self,forc,parameter,simTime,humRef,tempRef,windRef,boundCond,intFlux): """ Calculate net heat flux, and update element layer temperatures """ # Calculated per unit area (m^2) dens = forc.pres/(1000*0.287042*tempRef*(1.+1.607858*humRef)) # air density (kgd m-3) self.aeroCond = 5.8 + 3.7 * windRef # Convection coef (ref: uwg, eq. 12)) if (self.horizontal): # For roof, mass, road # Evaporation (m s-1), Film water & soil latent heat if not self.is_near_zero(self.waterStorage) and self.waterStorage > 0.0: # N.B In the current uwg code, latent heat from evapotranspiration, stagnant water, # or anthropogenic sources is not modelled due to the difficulty of validation, and # lack of reliability of precipitation data from EPW files.Therefore this condition # is never run because all elements have had their waterStorage hardcoded to 0. qtsat = self.qsat([self.layerTemp[0]],[forc.pres],parameter)[0] eg = self.aeroCond*parameter.colburn*dens*(qtsat-humRef)/parameter.waterDens/parameter.cp self.waterStorage = min(self.waterStorage + simTime.dt*(forc.prec-eg),parameter.wgmax) self.waterStorage = max(self.waterStorage,0.) # (m) else: eg = 0. soilLat = eg*parameter.waterDens*parameter.lv # Winter, no veg if simTime.month < parameter.vegStart and simTime.month > parameter.vegEnd: self.solAbs = (1.-self.albedo)*self.solRec # (W m-2) vegLat = 0. vegSens = 0. else: # Summer, veg self.solAbs = ((1.-self.vegCoverage)*(1.-self.albedo)+self.vegCoverage*(1.-parameter.vegAlbedo))*self.solRec vegLat = self.vegCoverage*parameter.grassFLat*(1.-parameter.vegAlbedo)*self.solRec vegSens = self.vegCoverage*(1.-parameter.grassFLat)*(1.-parameter.vegAlbedo)*self.solRec self.lat = soilLat + vegLat # Sensible & net heat flux self.sens = vegSens + self.aeroCond*(self.layerTemp[0]-tempRef) self.flux = -self.sens + self.solAbs + self.infra - self.lat # (W m-2) else: # For vertical surfaces (wall) self.solAbs = (1.-self.albedo)*self.solRec self.lat = 0. # Sensible & net heat flux self.sens = self.aeroCond*(self.layerTemp[0]-tempRef) self.flux = -self.sens + self.solAbs + self.infra - self.lat # (W m-2) self.layerTemp = self.Conduction(simTime.dt, self.flux, boundCond, forc.deepTemp, intFlux) self.T_ext = self.layerTemp[0] self.T_int = self.layerTemp[-1]
Calculate net heat flux, and update element layer temperatures
entailment
def Conduction(self, dt, flx1, bc, temp2, flx2): """ Solve the conductance of heat based on of the element layers. arg: flx1 : net heat flux on surface bc : boundary condition parameter (1 or 2) temp2 : deep soil temperature (ave of air temperature) flx2 : surface flux (sum of absorbed, emitted, etc.) key prop: za = [[ x00, x01, x02 ... x0w ] [ x10, x11, x12 ... x1w ] ... [ xh0, xh1, xh2 ... xhw ]] where h = matrix row index = element layer number w = matrix column index = 3 """ t = self.layerTemp # vector of layer temperatures (K) hc = self.layerVolHeat # vector of layer volumetric heat (J m-3 K-1) tc = self.layerThermalCond # vector of layer thermal conductivities (W m-1 K-1) d = self.layerThickness # vector of layer thicknesses (m) # flx1 : net heat flux on surface # bc : boundary condition parameter (1 or 2) # temp2 : deep soil temperature (avg of air temperature) # flx2 : surface flux (sum of absorbed, emitted, etc.) fimp = 0.5 # implicit coefficient fexp = 0.5 # explicit coefficient num = len(t) # number of layers # Mean thermal conductivity over distance between 2 layers (W/mK) tcp = [0 for x in range(num)] # Thermal capacity times layer depth (J/m2K) hcp = [0 for x in range(num)] # lower, main, and upper diagonals za = [[0 for y in range(3)] for x in range(num)] # RHS zy = [0 for x in range(num)] #-------------------------------------------------------------------------- # Define the column vectors for heat capactiy and conductivity hcp[0] = hc[0] * d[0] for j in range(1,num): tcp[j] = 2. / (d[j-1] / tc[j-1] + d[j] / tc[j]) hcp[j] = hc[j] * d[j] #-------------------------------------------------------------------------- # Define the first row of za matrix, and RHS column vector za[0][0] = 0. za[0][1] = hcp[0]/dt + fimp*tcp[1] za[0][2] = -fimp*tcp[1] zy[0] = hcp[0]/dt*t[0] - fexp*tcp[1]*(t[0]-t[1]) + flx1 #-------------------------------------------------------------------------- # Define other rows for j in range(1,num-1): za[j][0] = fimp*(-tcp[j]) za[j][1] = hcp[j]/dt + fimp*(tcp[j]+tcp[j+1]) za[j][2] = fimp*(-tcp[j+1]) zy[j] = hcp[j]/dt * t[j] + fexp * \ (tcp[j]*t[j-1] - tcp[j]*t[j] - tcp[j+1]*t[j] + tcp[j+1]*t[j+1]) #-------------------------------------------------------------------------- # Boundary conditions if self.is_near_zero(bc-1.): # heat flux za[num-1][0] = fimp * (-tcp[num-1]) za[num-1][1] = hcp[num-1]/dt + fimp*tcp[num-1] za[num-1][2] = 0. zy[num-1] = hcp[num-1]/dt*t[num-1] + fexp*tcp[num-1]*(t[num-2]-t[num-1]) + flx2 elif self.is_near_zero(bc-2.): # deep-temperature za[num-1][0] = 0. za[num-1][1] = 1. za[num-1][2] = 0. zy[num-1] = temp2 else: raise Exception(self.CONDUCTION_INPUT_MSG) #-------------------------------------------------------------------------- zx = self.invert(num,za,zy) #t(:) = zx(:); return zx
Solve the conductance of heat based on of the element layers. arg: flx1 : net heat flux on surface bc : boundary condition parameter (1 or 2) temp2 : deep soil temperature (ave of air temperature) flx2 : surface flux (sum of absorbed, emitted, etc.) key prop: za = [[ x00, x01, x02 ... x0w ] [ x10, x11, x12 ... x1w ] ... [ xh0, xh1, xh2 ... xhw ]] where h = matrix row index = element layer number w = matrix column index = 3
entailment
def qsat(self,temp,pres,parameter): """ Calculate (qsat_lst) vector of saturation humidity from: temp = vector of element layer temperatures pres = pressure (at current timestep). """ gamw = (parameter.cl - parameter.cpv) / parameter.rv betaw = (parameter.lvtt/parameter.rv) + (gamw * parameter.tt) alpw = math.log(parameter.estt) + (betaw /parameter.tt) + (gamw * math.log(parameter.tt)) work2 = parameter.r/parameter.rv foes_lst = [0 for i in range(len(temp))] work1_lst = [0 for i in range(len(temp))] qsat_lst = [0 for i in range(len(temp))] for i in range(len(temp)): # saturation vapor pressure foes_lst[i] = math.exp( alpw - betaw/temp[i] - gamw*math.log(temp[i]) ) work1_lst[i] = foes_lst[i]/pres[i] # saturation humidity qsat_lst[i] = work2*work1_lst[i] / (1. + (work2-1.) * work1_lst[i]) return qsat_lst
Calculate (qsat_lst) vector of saturation humidity from: temp = vector of element layer temperatures pres = pressure (at current timestep).
entailment
def invert(self,nz,A,C): """ Inversion and resolution of a tridiagonal matrix A X = C Input: nz number of layers a(*,1) lower diagonal (Ai,i-1) a(*,2) principal diagonal (Ai,i) a(*,3) upper diagonal (Ai,i+1) c Output x results """ X = [0 for i in range(nz)] for i in reversed(range(nz-1)): C[i] = C[i] - A[i][2] * C[i+1]/A[i+1][1] A[i][1] = A[i][1] - A[i][2] * A[i+1][0]/A[i+1][1] for i in range(1,nz,1): C[i] = C[i] - A[i][0] * C[i-1]/A[i-1][1] for i in range(nz): X[i] = C[i]/A[i][1] return X
Inversion and resolution of a tridiagonal matrix A X = C Input: nz number of layers a(*,1) lower diagonal (Ai,i-1) a(*,2) principal diagonal (Ai,i) a(*,3) upper diagonal (Ai,i+1) c Output x results
entailment
def readDOE(serialize_output=True): """ Read csv files of DOE buildings Sheet 1 = BuildingSummary Sheet 2 = ZoneSummary Sheet 3 = LocationSummary Sheet 4 = Schedules Note BLD8 & 10 = school Then make matrix of ref data as nested nested lists [16, 3, 16]: matrix refDOE = Building objs matrix Schedule = SchDef objs matrix refBEM (16,3,16) = BEMDef where: [16,3,16] is Type = 1-16, Era = 1-3, climate zone = 1-16 i.e. Type: FullServiceRestaurant, Era: Pre80, Zone: 6A Minneapolis Nested tree: [TYPE_1: ERA_1: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16 ERA_2: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16 ... ERA_3: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16] """ #Nested, nested lists of Building, SchDef, BEMDef objects refDOE = [[[None]*16 for k_ in range(3)] for j_ in range(16)] #refDOE(16,3,16) = Building Schedule = [[[None]*16 for k_ in range(3)] for j_ in range(16)] #Schedule (16,3,16) = SchDef refBEM = [[[None]*16 for k_ in range(3)] for j_ in range(16)] #refBEM (16,3,16) = BEMDef #Purpose: Loop through every DOE reference csv and extract building data #Nested loop = 16 types, 3 era, 16 zones = time complexity O(n*m*k) = 768 for i in range(16): #i = 16 types of buildings #print "\tType: {} @i={}".format(BLDTYPE[i], i) # Read building summary (Sheet 1) file_doe_name_bld = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_BuildingSummary.csv".format(i+1)) list_doe1 = read_csv(file_doe_name_bld) #listof(listof 3 era values) nFloor = str2fl(list_doe1[3][3:6]) # Number of Floors, this will be list of floats and str if "basement" glazing = str2fl(list_doe1[4][3:6]) # [?] Total hCeiling = str2fl(list_doe1[5][3:6]) # [m] Ceiling height ver2hor = str2fl(list_doe1[7][3:6]) # Wall to Skin Ratio AreaRoof = str2fl(list_doe1[8][3:6]) # [m2] Gross Dimensions - Total area # Read zone summary (Sheet 2) file_doe_name_zone = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_ZoneSummary.csv".format(i+1)) list_doe2 = read_csv(file_doe_name_zone) #listof(listof 3 eras) AreaFloor = str2fl([list_doe2[2][5],list_doe2[3][5],list_doe2[4][5]]) # [m2] Volume = str2fl([list_doe2[2][6],list_doe2[3][6],list_doe2[4][6]]) # [m3] AreaWall = str2fl([list_doe2[2][8],list_doe2[3][8],list_doe2[4][8]]) # [m2] AreaWindow = str2fl([list_doe2[2][9],list_doe2[3][9],list_doe2[4][9]]) # [m2] Occupant = str2fl([list_doe2[2][11],list_doe2[3][11],list_doe2[4][11]]) # Number of People Light = str2fl([list_doe2[2][12],list_doe2[3][12],list_doe2[4][12]]) # [W/m2] Elec = str2fl([list_doe2[2][13],list_doe2[3][13],list_doe2[4][13]]) # [W/m2] Electric Plug and Process Gas = str2fl([list_doe2[2][14],list_doe2[3][14],list_doe2[4][14]]) # [W/m2] Gas Plug and Process SHW = str2fl([list_doe2[2][15],list_doe2[3][15],list_doe2[4][15]]) # [Litres/hr] Peak Service Hot Water Vent = str2fl([list_doe2[2][17],list_doe2[3][17],list_doe2[4][17]]) # [L/s/m2] Ventilation Infil = str2fl([list_doe2[2][20],list_doe2[3][20],list_doe2[4][20]]) # Air Changes Per Hour (ACH) Infiltration # Read location summary (Sheet 3) file_doe_name_location = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_LocationSummary.csv".format(i+1)) list_doe3 = read_csv(file_doe_name_location) #(listof (listof 3 eras (listof 16 climate types))) TypeWall = [list_doe3[3][4:20],list_doe3[14][4:20],list_doe3[25][4:20]] # Construction type RvalWall = str2fl([list_doe3[4][4:20],list_doe3[15][4:20],list_doe3[26][4:20]]) # [m2*K/W] R-value TypeRoof = [list_doe3[5][4:20],list_doe3[16][4:20],list_doe3[27][4:20]] # Construction type RvalRoof = str2fl([list_doe3[6][4:20],list_doe3[17][4:20],list_doe3[28][4:20]]) # [m2*K/W] R-value Uwindow = str2fl([list_doe3[7][4:20],list_doe3[18][4:20],list_doe3[29][4:20]]) # [W/m2*K] U-factor SHGC = str2fl([list_doe3[8][4:20],list_doe3[19][4:20],list_doe3[30][4:20]]) # [-] coefficient HVAC = str2fl([list_doe3[9][4:20],list_doe3[20][4:20],list_doe3[31][4:20]]) # [kW] Air Conditioning HEAT = str2fl([list_doe3[10][4:20],list_doe3[21][4:20],list_doe3[32][4:20]]) # [kW] Heating COP = str2fl([list_doe3[11][4:20],list_doe3[22][4:20],list_doe3[33][4:20]]) # [-] Air Conditioning COP EffHeat = str2fl([list_doe3[12][4:20],list_doe3[23][4:20],list_doe3[34][4:20]]) # [%] Heating Efficiency FanFlow = str2fl([list_doe3[13][4:20],list_doe3[24][4:20],list_doe3[35][4:20]]) # [m3/s] Fan Max Flow Rate # Read Schedules (Sheet 4) file_doe_name_schedules = os.path.join("{}".format(DIR_DOE_PATH), "BLD{}".format(i+1),"BLD{}_Schedules.csv".format(i+1)) list_doe4 = read_csv(file_doe_name_schedules) #listof(listof weekday, sat, sun (list of 24 fractions))) SchEquip = str2fl([list_doe4[1][6:30],list_doe4[2][6:30],list_doe4[3][6:30]]) # Equipment Schedule 24 hrs SchLight = str2fl([list_doe4[4][6:30],list_doe4[5][6:30],list_doe4[6][6:30]]) # Light Schedule 24 hrs; Wkday=Sat=Sun=Hol SchOcc = str2fl([list_doe4[7][6:30],list_doe4[8][6:30],list_doe4[9][6:30]]) # Occupancy Schedule 24 hrs SetCool = str2fl([list_doe4[10][6:30],list_doe4[11][6:30],list_doe4[12][6:30]]) # Cooling Setpoint Schedule 24 hrs SetHeat = str2fl([list_doe4[13][6:30],list_doe4[14][6:30],list_doe4[15][6:30]]) # Heating Setpoint Schedule 24 hrs; summer design SchGas = str2fl([list_doe4[16][6:30],list_doe4[17][6:30],list_doe4[18][6:30]]) # Gas Equipment Schedule 24 hrs; wkday=sat SchSWH = str2fl([list_doe4[19][6:30],list_doe4[20][6:30],list_doe4[21][6:30]]) # Solar Water Heating Schedule 24 hrs; wkday=summerdesign, sat=winterdesgin for j in range(3): # j = 3 built eras #print"\tEra: {} @j={}".format(BUILTERA[j], j) for k in range(16): # k = 16 climate zones #print "\tClimate zone: {} @k={}".format(ZONETYPE[k], k) B = Building( hCeiling[j], # floorHeight by era 1, # intHeatNight 1, # intHeatDay 0.1, # intHeatFRad 0.1, # intHeatFLat Infil[j], # infil (ACH) by era Vent[j]/1000., # vent (m^3/s/m^2) by era, converted from liters glazing[j], # glazing ratio by era Uwindow[j][k], # uValue by era, by climate type SHGC[j][k], # SHGC, by era, by climate type 'AIR', # cooling condensation system type: AIR, WATER COP[j][k], # cop by era, climate type 297, # coolSetpointDay = 24 C 297, # coolSetpointNight 293, # heatSetpointDay = 20 C 293, # heatSetpointNight (HVAC[j][k]*1000.0)/AreaFloor[j], # coolCap converted to W/m2 by era, climate type EffHeat[j][k], # heatEff by era, climate type 293) # initialTemp at 20 C #Not defined in the constructor B.heatCap = (HEAT[j][k]*1000.0)/AreaFloor[j] # heating Capacity converted to W/m2 by era, climate type B.Type = BLDTYPE[i] B.Era = BUILTERA[j] B.Zone = ZONETYPE[k] refDOE[i][j][k] = B # Define wall, mass(floor), roof # Reference from E+ for conductivity, thickness (reference below) # Material: (thermalCond, volHeat = specific heat * density) Concrete = Material (1.311, 836.8 * 2240,"Concrete") Insulation = Material (0.049, 836.8 * 265.0, "Insulation") Gypsum = Material (0.16, 830.0 * 784.9, "Gypsum") Wood = Material (0.11, 1210.0 * 544.62, "Wood") Stucco = Material(0.6918, 837.0 * 1858.0, "Stucco") # Wall (1 in stucco, concrete, insulation, gypsum) # Check TypWall by era, by climate if TypeWall[j][k] == "MassWall": #Construct wall based on R value of Wall from refDOE and properties defined above # 1" stucco, 8" concrete, tbd insulation, 1/2" gypsum Rbase = 0.271087 # R val based on stucco, concrete, gypsum Rins = RvalWall[j][k] - Rbase #find insulation value D_ins = Rins * Insulation.thermalCond # depth of ins from m2*K/W * W/m*K = m if D_ins > 0.01: thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,D_ins,0.0127] layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Insulation,Gypsum] else: #if it's less then 1 cm don't include in layers thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,0.0127] layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Gypsum] wall = Element(0.08,0.92,thickness,layers,0.,293.,0.,"MassWall") # If mass wall, assume mass floor (4" concrete) # Mass (assume 4" concrete); alb = 0.2 emis = 0.9 thickness = [0.054,0.054] concrete = Material (1.31, 2240.0*836.8) mass = Element(alb,emis,thickness,[concrete,concrete],0,293,1,"MassFloor") elif TypeWall[j][k] == "WoodFrame": # 0.01m wood siding, tbd insulation, 1/2" gypsum Rbase = 0.170284091 # based on wood siding, gypsum Rins = RvalWall[j][k] - Rbase D_ins = Rins * Insulation.thermalCond #depth of insulatino if D_ins > 0.01: thickness = [0.01,D_ins,0.0127] layers = [Wood,Insulation,Gypsum] else: thickness = [0.01,0.0127] layers = [Wood,Gypsum] wall = Element(0.22,0.92,thickness,layers,0.,293.,0.,"WoodFrameWall") # If wood frame wall, assume wooden floor alb = 0.2 emis = 0.9 thickness = [0.05,0.05] wood = Material(1.31, 2240.0*836.8) mass = Element(alb,emis,thickness,[wood,wood],0.,293.,1.,"WoodFloor") elif TypeWall[j][k] == "SteelFrame": # 1" stucco, 8" concrete, tbd insulation, 1/2" gypsum Rbase = 0.271087 # based on stucco, concrete, gypsum Rins = RvalWall[j][k] - Rbase D_ins = Rins * Insulation.thermalCond if D_ins > 0.01: thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,D_ins,0.0127] layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Insulation,Gypsum] else: # If insulation is too thin, assume no insulation thickness = [0.0254,0.0508,0.0508,0.0508,0.0508,0.0127] layers = [Stucco,Concrete,Concrete,Concrete,Concrete,Gypsum] wall = Element(0.15,0.92,thickness,layers,0.,293.,0.,"SteelFrame") # If mass wall, assume mass foor # Mass (assume 4" concrete), alb = 0.2 emis = 0.93 thickness = [0.05,0.05] mass = Element(alb,emis,thickness,[Concrete,Concrete],0.,293.,1.,"MassFloor") elif TypeWall[j][k] == "MetalWall": # metal siding, insulation, 1/2" gypsum alb = 0.2 emis = 0.9 D_ins = max((RvalWall[j][k] * Insulation.thermalCond)/2, 0.01) #use derived insul thickness or 0.01 based on max thickness = [D_ins,D_ins,0.0127] materials = [Insulation,Insulation,Gypsum] wall = Element(alb,emis,thickness,materials,0,293,0,"MetalWall") # Mass (assume 4" concrete); alb = 0.2 emis = 0.9 thickness = [0.05, 0.05] concrete = Material(1.31, 2240.0*836.8) mass = Element(alb,emis,thickness,[concrete,concrete],0.,293.,1.,"MassFloor") # Roof if TypeRoof[j][k] == "IEAD": #Insulation Entirely Above Deck # IEAD-> membrane, insulation, decking alb = 0.2 emis = 0.93 D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01); roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"IEAD") elif TypeRoof[j][k] == "Attic": # IEAD-> membrane, insulation, decking alb = 0.2 emis = 0.9 D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01) roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"Attic") elif TypeRoof[j][k] == "MetalRoof": # IEAD-> membrane, insulation, decking alb = 0.2 emis = 0.9 D_ins = max(RvalRoof[j][k] * Insulation.thermalCond/2.,0.01) roof = Element(alb,emis,[D_ins,D_ins],[Insulation,Insulation],0.,293.,0.,"MetalRoof") # Define bulding energy model, set fraction of the urban floor space of this typology to zero refBEM[i][j][k] = BEMDef(B, mass, wall, roof, 0.0) refBEM[i][j][k].building.FanMax = FanFlow[j][k] # max fan flow rate (m^3/s) per DOE Schedule[i][j][k] = SchDef() Schedule[i][j][k].Elec = SchEquip # 3x24 matrix of schedule for fraction electricity (WD,Sat,Sun) Schedule[i][j][k].Light = SchLight # 3x24 matrix of schedule for fraction light (WD,Sat,Sun) Schedule[i][j][k].Gas = SchGas # 3x24 matrix of schedule for fraction gas (WD,Sat,Sun) Schedule[i][j][k].Occ = SchOcc # 3x24 matrix of schedule for fraction occupancy (WD,Sat,Sun) Schedule[i][j][k].Cool = SetCool # 3x24 matrix of schedule for fraction cooling temp (WD,Sat,Sun) Schedule[i][j][k].Heat = SetHeat # 3x24 matrix of schedule for fraction heating temp (WD,Sat,Sun) Schedule[i][j][k].SWH = SchSWH # 3x24 matrix of schedule for fraction SWH (WD,Sat,Sun Schedule[i][j][k].Qelec = Elec[j] # W/m^2 (max) for electrical plug process Schedule[i][j][k].Qlight = Light[j] # W/m^2 (max) for light Schedule[i][j][k].Nocc = Occupant[j]/AreaFloor[j] # Person/m^2 Schedule[i][j][k].Qgas = Gas[j] # W/m^2 (max) for gas Schedule[i][j][k].Vent = Vent[j]/1000.0 # m^3/m^2 per person Schedule[i][j][k].Vswh = SHW[j]/AreaFloor[j] # litres per hour per m^2 of floor # if not test serialize refDOE,refBEM,Schedule and store in resources if serialize_output: # create a binary file for serialized obj pkl_file_path = os.path.join(DIR_CURR,'refdata','readDOE.pkl') pickle_readDOE = open(pkl_file_path, 'wb') # dump in ../resources # Pickle objects, protocol 1 b/c binary file pickle.dump(refDOE, pickle_readDOE,1) pickle.dump(refBEM, pickle_readDOE,1) pickle.dump(Schedule, pickle_readDOE,1) pickle_readDOE.close() return refDOE, refBEM, Schedule
Read csv files of DOE buildings Sheet 1 = BuildingSummary Sheet 2 = ZoneSummary Sheet 3 = LocationSummary Sheet 4 = Schedules Note BLD8 & 10 = school Then make matrix of ref data as nested nested lists [16, 3, 16]: matrix refDOE = Building objs matrix Schedule = SchDef objs matrix refBEM (16,3,16) = BEMDef where: [16,3,16] is Type = 1-16, Era = 1-3, climate zone = 1-16 i.e. Type: FullServiceRestaurant, Era: Pre80, Zone: 6A Minneapolis Nested tree: [TYPE_1: ERA_1: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16 ERA_2: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16 ... ERA_3: CLIMATE_ZONE_1 ... CLIMATE_ZONE_16]
entailment
def readPlist(pathOrFile): """Raises NotBinaryPlistException, InvalidPlistException""" didOpen = False result = None if isinstance(pathOrFile, (bytes, unicode)): pathOrFile = open(pathOrFile, 'rb') didOpen = True try: reader = PlistReader(pathOrFile) result = reader.parse() except NotBinaryPlistException as e: try: pathOrFile.seek(0) result = None if hasattr(plistlib, 'loads'): contents = None if isinstance(pathOrFile, (bytes, unicode)): with open(pathOrFile, 'rb') as f: contents = f.read() else: contents = pathOrFile.read() result = plistlib.loads(contents) else: result = plistlib.readPlist(pathOrFile) result = wrapDataObject(result, for_binary=True) except Exception as e: raise InvalidPlistException(e) finally: if didOpen: pathOrFile.close() return result
Raises NotBinaryPlistException, InvalidPlistException
entailment
def getSizedInteger(self, data, byteSize, as_number=False): """Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise.""" result = 0 if byteSize == 0: raise InvalidPlistException("Encountered integer with byte size of 0.") # 1, 2, and 4 byte integers are unsigned elif byteSize == 1: result = unpack('>B', data)[0] elif byteSize == 2: result = unpack('>H', data)[0] elif byteSize == 4: result = unpack('>L', data)[0] elif byteSize == 8: if as_number: result = unpack('>q', data)[0] else: result = unpack('>Q', data)[0] elif byteSize <= 16: # Handle odd-sized or integers larger than 8 bytes # Don't naively go over 16 bytes, in order to prevent infinite loops. result = 0 if hasattr(int, 'from_bytes'): result = int.from_bytes(data, 'big') else: for byte in data: if not isinstance(byte, int): # Python3.0-3.1.x return ints, 2.x return str byte = unpack_from('>B', byte)[0] result = (result << 8) | byte else: raise InvalidPlistException("Encountered integer longer than 16 bytes.") return result
Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise.
entailment
def writeRoot(self, root): """ Strategy is: - write header - wrap root object so everything is hashable - compute size of objects which will be written - need to do this in order to know how large the object refs will be in the list/dict/set reference lists - write objects - keep objects in writtenReferences - keep positions of object references in referencePositions - write object references with the length computed previously - computer object reference length - write object reference positions - write trailer """ output = self.header wrapped_root = self.wrapRoot(root) self.computeOffsets(wrapped_root, asReference=True, isRoot=True) self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))}) self.writeObjectReference(wrapped_root, output) output = self.writeObject(wrapped_root, output, setReferencePosition=True) # output size at this point is an upper bound on how big the # object reference offsets need to be. self.trailer = self.trailer._replace(**{ 'offsetSize':self.intSize(len(output)), 'offsetCount':len(self.computedUniques), 'offsetTableOffset':len(output), 'topLevelObjectNumber':0 }) output = self.writeOffsetTable(output) output += pack('!xxxxxxBBQQQ', *self.trailer) self.file.write(output)
Strategy is: - write header - wrap root object so everything is hashable - compute size of objects which will be written - need to do this in order to know how large the object refs will be in the list/dict/set reference lists - write objects - keep objects in writtenReferences - keep positions of object references in referencePositions - write object references with the length computed previously - computer object reference length - write object reference positions - write trailer
entailment
def writeObjectReference(self, obj, output): """Tries to write an object reference, adding it to the references table. Does not write the actual object bytes or set the reference position. Returns a tuple of whether the object was a new reference (True if it was, False if it already was in the reference table) and the new output. """ position = self.positionOfObjectReference(obj) if position is None: self.writtenReferences[obj] = len(self.writtenReferences) output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize) return (True, output) else: output += self.binaryInt(position, byteSize=self.trailer.objectRefSize) return (False, output)
Tries to write an object reference, adding it to the references table. Does not write the actual object bytes or set the reference position. Returns a tuple of whether the object was a new reference (True if it was, False if it already was in the reference table) and the new output.
entailment
def writeObject(self, obj, output, setReferencePosition=False): """Serializes the given object to the output. Returns output. If setReferencePosition is True, will set the position the object was written. """ def proc_variable_length(format, length): result = b'' if length > 0b1110: result += pack('!B', (format << 4) | 0b1111) result = self.writeObject(length, result) else: result += pack('!B', (format << 4) | length) return result def timedelta_total_seconds(td): # Shim for Python 2.6 compatibility, which doesn't have total_seconds. # Make one argument a float to ensure the right calculation. return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10.0**6) / 10.0**6 if setReferencePosition: self.referencePositions[obj] = len(output) if obj is None: output += pack('!B', 0b00000000) elif isinstance(obj, BoolWrapper): if obj.value is False: output += pack('!B', 0b00001000) else: output += pack('!B', 0b00001001) elif isinstance(obj, Uid): size = self.intSize(obj.integer) output += pack('!B', (0b1000 << 4) | size - 1) output += self.binaryInt(obj.integer) elif isinstance(obj, (int, long)): byteSize = self.intSize(obj) root = math.log(byteSize, 2) output += pack('!B', (0b0001 << 4) | int(root)) output += self.binaryInt(obj, as_number=True) elif isinstance(obj, FloatWrapper): # just use doubles output += pack('!B', (0b0010 << 4) | 3) output += self.binaryReal(obj) elif isinstance(obj, datetime.datetime): try: timestamp = (obj - apple_reference_date).total_seconds() except AttributeError: timestamp = timedelta_total_seconds(obj - apple_reference_date) output += pack('!B', 0b00110011) output += pack('!d', float(timestamp)) elif isinstance(obj, Data): output += proc_variable_length(0b0100, len(obj)) output += obj elif isinstance(obj, StringWrapper): output += proc_variable_length(obj.encodingMarker, len(obj)) output += obj.encodedValue elif isinstance(obj, bytes): output += proc_variable_length(0b0101, len(obj)) output += obj elif isinstance(obj, HashableWrapper): obj = obj.value if isinstance(obj, (set, list, tuple)): if isinstance(obj, set): output += proc_variable_length(0b1100, len(obj)) else: output += proc_variable_length(0b1010, len(obj)) objectsToWrite = [] for objRef in sorted(obj) if isinstance(obj, set) else obj: (isNew, output) = self.writeObjectReference(objRef, output) if isNew: objectsToWrite.append(objRef) for objRef in objectsToWrite: output = self.writeObject(objRef, output, setReferencePosition=True) elif isinstance(obj, dict): output += proc_variable_length(0b1101, len(obj)) keys = [] values = [] objectsToWrite = [] for key, value in sorted(iteritems(obj)): keys.append(key) values.append(value) for key in keys: (isNew, output) = self.writeObjectReference(key, output) if isNew: objectsToWrite.append(key) for value in values: (isNew, output) = self.writeObjectReference(value, output) if isNew: objectsToWrite.append(value) for objRef in objectsToWrite: output = self.writeObject(objRef, output, setReferencePosition=True) return output
Serializes the given object to the output. Returns output. If setReferencePosition is True, will set the position the object was written.
entailment
def writeOffsetTable(self, output): """Writes all of the object reference offsets.""" all_positions = [] writtenReferences = list(self.writtenReferences.items()) writtenReferences.sort(key=lambda x: x[1]) for obj,order in writtenReferences: # Porting note: Elsewhere we deliberately replace empty unicdoe strings # with empty binary strings, but the empty unicode string # goes into writtenReferences. This isn't an issue in Py2 # because u'' and b'' have the same hash; but it is in # Py3, where they don't. if bytes != str and obj == unicodeEmpty: obj = b'' position = self.referencePositions.get(obj) if position is None: raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj) output += self.binaryInt(position, self.trailer.offsetSize) all_positions.append(position) return output
Writes all of the object reference offsets.
entailment
def intSize(self, obj): """Returns the number of bytes necessary to store the given integer.""" # SIGNED if obj < 0: # Signed integer, always 8 bytes return 8 # UNSIGNED elif obj <= 0xFF: # 1 byte return 1 elif obj <= 0xFFFF: # 2 bytes return 2 elif obj <= 0xFFFFFFFF: # 4 bytes return 4 # SIGNED # 0x7FFFFFFFFFFFFFFF is the max. elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed return 8 elif obj <= 0xffffffffffffffff: # 8 bytes unsigned return 16 else: raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.")
Returns the number of bytes necessary to store the given integer.
entailment
def load_z_meso(self,z_meso_path): """ Open the z_meso.txt file and return heights as list """ self.z_meso = [] z_meso_file_path = os.path.join(z_meso_path, self.Z_MESO_FILE_NAME) # Check if exists if not os.path.exists(z_meso_file_path): raise Exception("z_meso.txt file: '{}' does not exist.".format(uwg_param_file)) f = open(z_meso_file_path,'r') for txtline in f: z_ = float("".join(txtline.split())) # Strip all white spaces and change to float self.z_meso.append(z_) f.close()
Open the z_meso.txt file and return heights as list
entailment
def urbflux(UCM, UBL, BEM, forc, parameter, simTime, RSM): """ Calculate the surface heat fluxes Output: [UCM,UBL,BEM] """ T_can = UCM.canTemp Cp = parameter.cp UCM.Q_roof = 0. sigma = 5.67e-8 # Stephan-Boltzman constant UCM.roofTemp = 0. # Average urban roof temperature UCM.wallTemp = 0. # Average urban wall temperature for j in range(len(BEM)): # Building energy model BEM[j].building.BEMCalc(UCM, BEM[j], forc, parameter, simTime) BEM[j].ElecTotal = BEM[j].building.ElecTotal * BEM[j].fl_area # W m-2 # Update roof infra calc e_roof = BEM[j].roof.emissivity T_roof = BEM[j].roof.layerTemp[0] BEM[j].roof.infra = e_roof * (forc.infra - sigma * T_roof**4.) # update wall infra calc (road done later) e_wall = BEM[j].wall.emissivity T_wall = BEM[j].wall.layerTemp[0] # calculates the infrared radiation for wall, taking into account radiation exchange from road _infra_road_, BEM[j].wall.infra = infracalcs(UCM, forc, UCM.road.emissivity, e_wall, UCM.roadTemp, T_wall) # Update element temperatures BEM[j].mass.layerTemp = BEM[j].mass.Conduction(simTime.dt, BEM[j].building.fluxMass,1.,0.,BEM[j].building.fluxMass) BEM[j].roof.SurfFlux(forc,parameter,simTime,UCM.canHum,T_can,max(forc.wind,UCM.canWind),1.,BEM[j].building.fluxRoof) BEM[j].wall.SurfFlux(forc,parameter,simTime,UCM.canHum,T_can,UCM.canWind,1.,BEM[j].building.fluxWall) # Note the average wall & roof temperature UCM.wallTemp = UCM.wallTemp + BEM[j].frac*BEM[j].wall.layerTemp[0] UCM.roofTemp = UCM.roofTemp + BEM[j].frac*BEM[j].roof.layerTemp[0] # Update road infra calc (assume walls have similar emissivity, so use the last one) UCM.road.infra, _wall_infra = infracalcs(UCM,forc,UCM.road.emissivity,e_wall,UCM.roadTemp,UCM.wallTemp) UCM.road.SurfFlux(forc,parameter,simTime,UCM.canHum,T_can,UCM.canWind,2.,0.) UCM.roadTemp = UCM.road.layerTemp[0] # Sensible & latent heat flux (total) if UCM.latHeat != None: UCM.latHeat = UCM.latHeat + UCM.latAnthrop + UCM.treeLatHeat + UCM.road.lat*(1.-UCM.bldDensity) # --------------------------------------------------------------------- # Advective heat flux to UBL from VDM # # Note: UWG_Matlab code here is modified to compensate for rounding errors # that occur when recursively adding forDens, intAdv1, and intAdv2. # This causes issues in the UBL.advHeat calculatiuon when large (1e5) # numbers are subtracted to produce small numbers (1e-10) that can # differ from equivalent matlab calculations by a factor of 2. # Values this small are ~ 0, but for consistency's sake Kahan Summation # algorithm is applied to keep margin of difference from UWG_Matlab low. # --------------------------------------------------------------------- forDens = 0.0 intAdv1 = 0.0 intAdv2 = 0.0 # c1 & c2 stores values truncated by floating point rounding for values < 10^-16 c1 = 0.0 c2 = 0.0 c3 = 0.0 for iz in range(RSM.nzfor): # At c loss of precision at at low order of magnitude, that we need in UBL.advHeat calc # Algebraically t is 0, but with floating pt numbers c will accumulate truncated values y = RSM.densityProfC[iz]*RSM.dz[iz]/(RSM.z[RSM.nzfor-1] + RSM.dz[RSM.nzfor-1]/2.) t = forDens + y c1 += (t - forDens) - y forDens = t y = RSM.windProf[iz]*RSM.tempProf[iz]*RSM.dz[iz] t = intAdv1 + y c2 += (t - intAdv1) - y intAdv1 = t y = RSM.windProf[iz]*RSM.dz[iz] t = intAdv2 + y c3 += (t - intAdv2) - y intAdv2 = t # Add the truncated values back forDens -= c1 intAdv1 -= c2 intAdv2 -= c3 UBL.advHeat = UBL.paralLength*Cp*forDens*(intAdv1-(UBL.ublTemp*intAdv2))/UBL.urbArea # --------------------------------------------------------------------- # Convective heat flux to UBL from UCM (see Appendix - Bueno (2014)) # --------------------------------------------------------------------- zrUrb = 2*UCM.bldHeight zref = RSM.z[RSM.nzref-1] # Reference height # Reference wind speed & canyon air density windUrb = forc.wind*log(zref/RSM.z0r)/log(parameter.windHeight/RSM.z0r)*\ log(zrUrb/UCM.z0u)/log(zref/UCM.z0u) dens = forc.pres/(1000*0.287042*T_can*(1.+1.607858*UCM.canHum)) # Friction velocity UCM.ustar = parameter.vk*windUrb/log((zrUrb-UCM.l_disp)/UCM.z0u) # Convective scaling velocity wstar = (parameter.g*max(UCM.sensHeat,0.0)*zref/dens/Cp/T_can)**(1/3.) UCM.ustarMod = max(UCM.ustar,wstar) # Modified friction velocity UCM.uExch = parameter.exCoeff*UCM.ustarMod # Exchange velocity # Canyon wind speed, Eq. 27 Chp. 3 Hanna and Britter, 2002 # assuming CD = 1 and lambda_f = verToHor/4 UCM.canWind = UCM.ustarMod*(UCM.verToHor/8.)**(-1/2.) # Canyon turbulent velocities UCM.turbU = 2.4*UCM.ustarMod UCM.turbV = 1.9*UCM.ustarMod UCM.turbW = 1.3*UCM.ustarMod # Urban wind profile for iz in range(RSM.nzref): UCM.windProf.append(UCM.ustar/parameter.vk*\ log((RSM.z[iz]+UCM.bldHeight-UCM.l_disp)/UCM.z0u)) return UCM,UBL,BEM
Calculate the surface heat fluxes Output: [UCM,UBL,BEM]
entailment
def get_books(self): """ Retrieves all the books published by the artist :return: List. Books published by the artist """ return itunespy.lookup(id=self.artist_id, entity=itunespy.entities['ebook'])[1:]
Retrieves all the books published by the artist :return: List. Books published by the artist
entailment
def get_tasks(task_id='', completed=True): """ Get a list of tasks, optionally filtered by task id. The task_id can be the abbrevated. Example: If a task named 'sleep' is scaled to 3 in marathon, there will be be 3 tasks starting with 'sleep.' :param task_id: task ID :type task_id: str :param completed: include completed tasks? :type completed: bool :return: a list of tasks :rtype: [] """ client = mesos.DCOSClient() master = mesos.Master(client.get_master_state()) mesos_tasks = master.tasks(completed=completed, fltr=task_id) return [task.__dict__['_task'] for task in mesos_tasks]
Get a list of tasks, optionally filtered by task id. The task_id can be the abbrevated. Example: If a task named 'sleep' is scaled to 3 in marathon, there will be be 3 tasks starting with 'sleep.' :param task_id: task ID :type task_id: str :param completed: include completed tasks? :type completed: bool :return: a list of tasks :rtype: []
entailment
def get_task(task_id, completed=True): """ Get a task by task id where a task_id is required. :param task_id: task ID :type task_id: str :param completed: include completed tasks? :type completed: bool :return: a task :rtype: obj """ tasks = get_tasks(task_id=task_id, completed=completed) if len(tasks) == 0: return None assert len(tasks) == 1, 'get_task should return at max 1 task for a task id' return tasks[0]
Get a task by task id where a task_id is required. :param task_id: task ID :type task_id: str :param completed: include completed tasks? :type completed: bool :return: a task :rtype: obj
entailment
def task_completed(task_id): """ Check whether a task has completed. :param task_id: task ID :type task_id: str :return: True if completed, False otherwise :rtype: bool """ tasks = get_tasks(task_id=task_id) completed_states = ('TASK_FINISHED', 'TASK_FAILED', 'TASK_KILLED', 'TASK_LOST', 'TASK_ERROR') for task in tasks: if task['state'] in completed_states: return True return False
Check whether a task has completed. :param task_id: task ID :type task_id: str :return: True if completed, False otherwise :rtype: bool
entailment
def task_property_present_predicate(service, task, prop): """ True if the json_element passed is present for the task specified. """ try: response = get_service_task(service, task) except Exception as e: pass return (response is not None) and (prop in response)
True if the json_element passed is present for the task specified.
entailment
def wait_for_task(service, task, timeout_sec=120): """Waits for a task which was launched to be launched""" return time_wait(lambda: task_predicate(service, task), timeout_seconds=timeout_sec)
Waits for a task which was launched to be launched
entailment
def wait_for_task_property(service, task, prop, timeout_sec=120): """Waits for a task to have the specified property""" return time_wait(lambda: task_property_present_predicate(service, task, prop), timeout_seconds=timeout_sec)
Waits for a task to have the specified property
entailment
def copy_file( host, file_path, remote_path='.', username=None, key_path=None, action='put' ): """ Copy a file via SCP, proxied through the mesos master :param host: host or IP of the machine to execute the command on :type host: str :param file_path: the local path to the file to be copied :type file_path: str :param remote_path: the remote path to copy the file to :type remote_path: str :param username: SSH username :type username: str :param key_path: path to the SSH private key to use for SSH authentication :type key_path: str :return: True if successful, False otherwise :rtype: bool """ if not username: username = shakedown.cli.ssh_user if not key_path: key_path = shakedown.cli.ssh_key_file key = validate_key(key_path) transport = get_transport(host, username, key) transport = start_transport(transport, username, key) if transport.is_authenticated(): start = time.time() channel = scp.SCPClient(transport) if action == 'get': print("\n{}scp {}:{} {}\n".format(shakedown.cli.helpers.fchr('>>'), host, remote_path, file_path)) channel.get(remote_path, file_path) else: print("\n{}scp {} {}:{}\n".format(shakedown.cli.helpers.fchr('>>'), file_path, host, remote_path)) channel.put(file_path, remote_path) print("{} bytes copied in {} seconds.".format(str(os.path.getsize(file_path)), str(round(time.time() - start, 2)))) try_close(channel) try_close(transport) return True else: print("error: unable to authenticate {}@{} with key {}".format(username, host, key_path)) return False
Copy a file via SCP, proxied through the mesos master :param host: host or IP of the machine to execute the command on :type host: str :param file_path: the local path to the file to be copied :type file_path: str :param remote_path: the remote path to copy the file to :type remote_path: str :param username: SSH username :type username: str :param key_path: path to the SSH private key to use for SSH authentication :type key_path: str :return: True if successful, False otherwise :rtype: bool
entailment
def copy_file_to_master( file_path, remote_path='.', username=None, key_path=None ): """ Copy a file to the Mesos master """ return copy_file(shakedown.master_ip(), file_path, remote_path, username, key_path)
Copy a file to the Mesos master
entailment
def copy_file_to_agent( host, file_path, remote_path='.', username=None, key_path=None ): """ Copy a file to a Mesos agent, proxied through the master """ return copy_file(host, file_path, remote_path, username, key_path)
Copy a file to a Mesos agent, proxied through the master
entailment
def copy_file_from_master( remote_path, file_path='.', username=None, key_path=None ): """ Copy a file to the Mesos master """ return copy_file(shakedown.master_ip(), file_path, remote_path, username, key_path, 'get')
Copy a file to the Mesos master
entailment
def copy_file_from_agent( host, remote_path, file_path='.', username=None, key_path=None ): """ Copy a file to a Mesos agent, proxied through the master """ return copy_file(host, file_path, remote_path, username, key_path, 'get')
Copy a file to a Mesos agent, proxied through the master
entailment
def __metadata_helper(json_path): """ Returns json for specific cluster metadata. Important to realize that this was introduced in dcos-1.9. Clusters prior to 1.9 and missing metadata will return None """ url = shakedown.dcos_url_path('dcos-metadata/{}'.format(json_path)) try: response = dcos.http.request('get', url) if response.status_code == 200: return response.json() except: pass return None
Returns json for specific cluster metadata. Important to realize that this was introduced in dcos-1.9. Clusters prior to 1.9 and missing metadata will return None
entailment
def _get_resources(rtype='resources'): """ resource types from state summary include: resources, used_resources offered_resources, reserved_resources, unreserved_resources The default is resources. :param rtype: the type of resources to return :type rtype: str :param role: the name of the role if for reserved and if None all reserved :type rtype: str :return: resources(cpu,mem) :rtype: Resources """ cpus = 0 mem = 0 summary = DCOSClient().get_state_summary() if 'slaves' in summary: agents = summary.get('slaves') for agent in agents: if agent[rtype].get('cpus') is not None: cpus += agent[rtype].get('cpus') if agent[rtype].get('mem') is not None: mem += agent[rtype].get('mem') return Resources(cpus, mem)
resource types from state summary include: resources, used_resources offered_resources, reserved_resources, unreserved_resources The default is resources. :param rtype: the type of resources to return :type rtype: str :param role: the name of the role if for reserved and if None all reserved :type rtype: str :return: resources(cpu,mem) :rtype: Resources
entailment
def get_reserved_resources(role=None): """ resource types from state summary include: reserved_resources :param role: the name of the role if for reserved and if None all reserved :type role: str :return: resources(cpu,mem) :rtype: Resources """ rtype = 'reserved_resources' cpus = 0.0 mem = 0.0 summary = DCOSClient().get_state_summary() if 'slaves' in summary: agents = summary.get('slaves') for agent in agents: resource_reservations = agent.get(rtype) reservations = [] if role is None or '*' in role: reservations = resource_reservations.values() elif role in resource_reservations: reservations = [resource_reservations.get(role)] for reservation in reservations: if reservation.get('cpus') is not None: cpus += reservation.get('cpus') if reservation.get('mem') is not None: mem += reservation.get('mem') return Resources(cpus, mem)
resource types from state summary include: reserved_resources :param role: the name of the role if for reserved and if None all reserved :type role: str :return: resources(cpu,mem) :rtype: Resources
entailment
def log_decl_method(func): """Decorate do_declartion methods for debug logging.""" from functools import wraps @wraps(func) def with_logging(*args, **kwargs): self = args[0] decl = args[2] log(DEBUG, u" {}: {} {}".format( self.state['current_step'], decl.name, serialize(decl.value).strip()).encode('utf-8')) return func(*args, **kwargs) return with_logging
Decorate do_declartion methods for debug logging.
entailment
def css_to_func(css, flags, css_namespaces, lang): """Convert a css selector to an xpath, supporting pseudo elements.""" from cssselect import parse, HTMLTranslator from cssselect.parser import FunctionalPseudoElement # FIXME HACK need lessc to support functional-pseudo-selectors instead # of marking as strings and stripping " here. if not (css): return None sel = parse(css.strip('" '))[0] xpath = HTMLTranslator().selector_to_xpath(sel) first_letter = False if sel.pseudo_element is not None: if type(sel.pseudo_element) == FunctionalPseudoElement: if sel.pseudo_element.name in ('attr', 'first-letter'): xpath += '/@' + sel.pseudo_element.arguments[0].value if sel.pseudo_element.name == 'first-letter': first_letter = True elif isinstance(sel.pseudo_element, type(u'')): if sel.pseudo_element == 'first-letter': first_letter = True xp = etree.XPath(xpath, namespaces=css_namespaces) def toupper(u): """Use icu library for locale sensitive uppercasing (python2).""" loc = Locale(lang) if lang else Locale() return UnicodeString(u).toUpper(loc).encode('utf-8').decode('utf-8') def func(elem): res = xp(elem) if res: if etree.iselement(res[0]): res_str = etree.tostring(res[0], encoding='unicode', method="text") else: res_str = res[0] if first_letter: if res_str: if flags and 'nocase' in flags: return toupper(res_str[0]) else: return res_str[0] else: return res_str else: if flags and 'nocase' in flags: return toupper(res_str) else: return res_str return func
Convert a css selector to an xpath, supporting pseudo elements.
entailment
def append_string(t, string): """Append a string to a node, as text or tail of last child.""" node = t.tree if string: if len(node) == 0: if node.text is not None: node.text += string else: node.text = string else: # Get last child child = list(node)[-1] if child.tail is not None: child.tail += string else: child.tail = string
Append a string to a node, as text or tail of last child.
entailment
def prepend_string(t, string): """Prepend a string to a target node as text.""" node = t.tree if node.text is not None: node.text += string else: node.text = string
Prepend a string to a target node as text.
entailment
def grouped_insert(t, value): """Insert value into the target tree 't' with correct grouping.""" collator = Collator.createInstance(Locale(t.lang) if t.lang else Locale()) if value.tail is not None: val_prev = value.getprevious() if val_prev is not None: val_prev.tail = (val_prev.tail or '') + value.tail else: val_parent = value.getparent() if val_parent is not None: val_parent.text = (val_parent.text or '') + value.tail value.tail = None if t.isgroup and t.sort(value) is not None: if t.groupby: for child in t.tree: if child.get('class') == 'group-by': # child[0] is the label span order = collator.compare( t.groupby(child[1]) or '', t.groupby(value) or '') if order == 0: c_target = Target(child, sort=t.sort, lang=t.lang) insert_group(value, c_target) break elif order > 0: group = create_group(t.groupby(value)) group.append(value) child.addprevious(group) break else: group = create_group(t.groupby(value)) group.append(value) t.tree.append(group) else: insert_group(value, t) elif t.sort and t.sort(value) is not None: insert_sort(value, t) elif t.location == 'inside': for child in t.tree: value.append(child) value.text = t.tree.text t.tree.text = None t.tree.append(value) elif t.location == 'outside': value.tail = t.tree.tail t.tree.tail = None target_parent_descendants = ( [n.getparent() for n in t.parent.iterdescendants() if n == t.tree]) try: parent = target_parent_descendants[0] parent.insert(parent.index(t.tree), value) value.append(t.tree) except IndexError as e: logger.error('Target of outside has been moved or deleted') raise e elif t.location == 'before': value.tail = t.tree.text t.tree.text = None t.tree.insert(0, value) else: t.tree.append(value)
Insert value into the target tree 't' with correct grouping.
entailment
def insert_sort(node, target): """Insert node into sorted position in target tree. Uses sort function and language from target""" sort = target.sort lang = target.lang collator = Collator.createInstance(Locale(lang) if lang else Locale()) for child in target.tree: if collator.compare(sort(child) or '', sort(node) or '') > 0: child.addprevious(node) break else: target.tree.append(node)
Insert node into sorted position in target tree. Uses sort function and language from target
entailment