sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _get_login_page(self): """Go to the login page.""" try: raw_res = yield from self._session.get(HOME_URL, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not connect to login page") # Get login url content = yield from raw_res.text() soup = BeautifulSoup(content, 'html.parser') form_node = soup.find('form', {'name': 'fm'}) if form_node is None: raise PyHydroQuebecError("No login form find") login_url = form_node.attrs.get('action') if login_url is None: raise PyHydroQuebecError("Can not found login url") return login_url
Go to the login page.
entailment
def _post_login_page(self, login_url): """Login to HydroQuebec website.""" data = {"login": self.username, "_58_password": self.password} try: raw_res = yield from self._session.post(login_url, data=data, timeout=self._timeout, allow_redirects=False) except OSError: raise PyHydroQuebecError("Can not submit login form") if raw_res.status != 302: raise PyHydroQuebecError("Login error: Bad HTTP status code. " "Please check your username/password.") return True
Login to HydroQuebec website.
entailment
def _get_p_p_id_and_contract(self): """Get id of consumption profile.""" contracts = {} try: raw_res = yield from self._session.get(PROFILE_URL, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get profile page") # Parse html content = yield from raw_res.text() soup = BeautifulSoup(content, 'html.parser') # Search contracts for node in soup.find_all('span', {"class": "contrat"}): rematch = re.match("C[a-z]* ([0-9]{4} [0-9]{5})", node.text) if rematch is not None: contracts[rematch.group(1).replace(" ", "")] = None # search for links for node in soup.find_all('a', {"class": "big iconLink"}): for contract in contracts: if contract in node.attrs.get('href'): contracts[contract] = node.attrs.get('href') # Looking for p_p_id p_p_id = None for node in soup.find_all('span'): node_id = node.attrs.get('id', "") if node_id.startswith("p_portraitConsommation_WAR"): p_p_id = node_id[2:] break if p_p_id is None: raise PyHydroQuebecError("Could not get p_p_id") return p_p_id, contracts
Get id of consumption profile.
entailment
def _get_lonely_contract(self): """Get contract number when we have only one contract.""" contracts = {} try: raw_res = yield from self._session.get(MAIN_URL, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get main page") # Parse html content = yield from raw_res.text() soup = BeautifulSoup(content, 'html.parser') info_node = soup.find("div", {"class": "span3 contrat"}) if info_node is None: raise PyHydroQuebecError("Can not found contract") research = re.search("Contrat ([0-9]{4} [0-9]{5})", info_node.text) if research is not None: contracts[research.group(1).replace(" ", "")] = None if contracts == {}: raise PyHydroQuebecError("Can not found contract") return contracts
Get contract number when we have only one contract.
entailment
def _get_balances(self): """Get all balances. .. todo:: IT SEEMS balances are shown (MAIN_URL) in the same order that contracts in profile page (PROFILE_URL). Maybe we should ensure that. """ balances = [] try: raw_res = yield from self._session.get(MAIN_URL, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get main page") # Parse html content = yield from raw_res.text() soup = BeautifulSoup(content, 'html.parser') solde_nodes = soup.find_all("div", {"class": "solde-compte"}) if solde_nodes == []: raise PyHydroQuebecError("Can not found balance") for solde_node in solde_nodes: try: balance = solde_node.find("p").text except AttributeError: raise PyHydroQuebecError("Can not found balance") balances.append(float(balance[:-2] .replace(",", ".") .replace("\xa0", ""))) return balances
Get all balances. .. todo:: IT SEEMS balances are shown (MAIN_URL) in the same order that contracts in profile page (PROFILE_URL). Maybe we should ensure that.
entailment
def _load_contract_page(self, contract_url): """Load the profile page of a specific contract when we have multiple contracts.""" try: yield from self._session.get(contract_url, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get profile page for a " "specific contract")
Load the profile page of a specific contract when we have multiple contracts.
entailment
def _get_annual_data(self, p_p_id): """Get annual data.""" params = {"p_p_id": p_p_id, "p_p_lifecycle": 2, "p_p_state": "normal", "p_p_mode": "view", "p_p_resource_id": "resourceObtenirDonneesConsommationAnnuelles"} try: raw_res = yield from self._session.get(PROFILE_URL, params=params, timeout=self._timeout) except OSError: raise PyHydroQuebecAnnualError("Can not get annual data") try: json_output = yield from raw_res.json(content_type='text/json') except (OSError, json.decoder.JSONDecodeError): raise PyHydroQuebecAnnualError("Could not get annual data") if not json_output.get('success'): raise PyHydroQuebecAnnualError("Could not get annual data") if not json_output.get('results'): raise PyHydroQuebecAnnualError("Could not get annual data") if 'courant' not in json_output.get('results')[0]: raise PyHydroQuebecAnnualError("Could not get annual data") return json_output.get('results')[0]['courant']
Get annual data.
entailment
def _get_monthly_data(self, p_p_id): """Get monthly data.""" params = {"p_p_id": p_p_id, "p_p_lifecycle": 2, "p_p_resource_id": ("resourceObtenirDonnees" "PeriodesConsommation")} try: raw_res = yield from self._session.get(PROFILE_URL, params=params, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get monthly data") try: json_output = yield from raw_res.json(content_type='text/json') except (OSError, json.decoder.JSONDecodeError): raise PyHydroQuebecError("Could not get monthly data") if not json_output.get('success'): raise PyHydroQuebecError("Could not get monthly data") return json_output.get('results')
Get monthly data.
entailment
def _get_hourly_data(self, day_date, p_p_id): """Get Hourly Data.""" params = {"p_p_id": p_p_id, "p_p_lifecycle": 2, "p_p_state": "normal", "p_p_mode": "view", "p_p_resource_id": "resourceObtenirDonneesConsommationHoraires", "p_p_cacheability": "cacheLevelPage", "p_p_col_id": "column-2", "p_p_col_count": 1, "date": day_date, } try: raw_res = yield from self._session.get(PROFILE_URL, params=params, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get hourly data") try: json_output = yield from raw_res.json(content_type='text/json') except (OSError, json.decoder.JSONDecodeError): raise PyHydroQuebecAnnualError("Could not get hourly data") hourly_consumption_data = json_output['results']['listeDonneesConsoEnergieHoraire'] hourly_power_data = json_output['results']['listeDonneesConsoPuissanceHoraire'] params = {"p_p_id": p_p_id, "p_p_lifecycle": 2, "p_p_state": "normal", "p_p_mode": "view", "p_p_resource_id": "resourceObtenirDonneesMeteoHoraires", "p_p_cacheability": "cacheLevelPage", "p_p_col_id": "column-2", "p_p_col_count": 1, "dateDebut": day_date, "dateFin": day_date, } try: raw_res = yield from self._session.get(PROFILE_URL, params=params, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get hourly data") try: json_output = yield from raw_res.json(content_type='text/json') except (OSError, json.decoder.JSONDecodeError): raise PyHydroQuebecAnnualError("Could not get hourly data") hourly_weather_data = [] if not json_output.get('results'): # Missing Temperature data from Hydro-Quebec (but don't crash the app for that) hourly_weather_data = [None]*24 else: hourly_weather_data = json_output['results'][0]['listeTemperaturesHeure'] # Add temp in data processed_hourly_data = [{'hour': data['heure'], 'lower': data['consoReg'], 'high': data['consoHaut'], 'total': data['consoTotal'], 'temp': hourly_weather_data[i]} for i, data in enumerate(hourly_consumption_data)] raw_hourly_data = {'Energy': hourly_consumption_data, 'Power': hourly_power_data, 'Weather': hourly_weather_data} hourly_data = {'processed_hourly_data': processed_hourly_data, 'raw_hourly_data': raw_hourly_data} return hourly_data
Get Hourly Data.
entailment
def fetch_data_detailled_energy_use(self, start_date=None, end_date=None): """Get detailled energy use from a specific contract.""" if start_date is None: start_date = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1) if end_date is None: end_date = datetime.datetime.now(HQ_TIMEZONE) # Get http session yield from self._get_httpsession() # Get login page login_url = yield from self._get_login_page() # Post login page yield from self._post_login_page(login_url) # Get p_p_id and contracts p_p_id, contracts = yield from self._get_p_p_id_and_contract() # If we don't have any contrats that means we have only # onecontract. Let's get it if contracts == {}: contracts = yield from self._get_lonely_contract() # For all contracts for contract, contract_url in contracts.items(): if contract_url: yield from self._load_contract_page(contract_url) data = {} dates = [(start_date + datetime.timedelta(n)) for n in range(int((end_date - start_date).days))] for date in dates: # Get Hourly data day_date = date.strftime("%Y-%m-%d") hourly_data = yield from self._get_hourly_data(day_date, p_p_id) data[day_date] = hourly_data['raw_hourly_data'] # Add contract self._data[contract] = data
Get detailled energy use from a specific contract.
entailment
def fetch_data(self): """Get the latest data from HydroQuebec.""" # Get http session yield from self._get_httpsession() # Get login page login_url = yield from self._get_login_page() # Post login page yield from self._post_login_page(login_url) # Get p_p_id and contracts p_p_id, contracts = yield from self._get_p_p_id_and_contract() # If we don't have any contrats that means we have only # onecontract. Let's get it if contracts == {}: contracts = yield from self._get_lonely_contract() # Get balance balances = yield from self._get_balances() balances_len = len(balances) balance_id = 0 # For all contracts for contract, contract_url in contracts.items(): if contract_url: yield from self._load_contract_page(contract_url) # Get Hourly data try: yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1) day_date = yesterday.strftime("%Y-%m-%d") hourly_data = yield from self._get_hourly_data(day_date, p_p_id) hourly_data = hourly_data['processed_hourly_data'] except Exception: # pylint: disable=W0703 # We don't have hourly data for some reason hourly_data = {} # Get Annual data try: annual_data = yield from self._get_annual_data(p_p_id) except PyHydroQuebecAnnualError: # We don't have annual data, which is possible if your # contract is younger than 1 year annual_data = {} # Get Monthly data monthly_data = yield from self._get_monthly_data(p_p_id) monthly_data = monthly_data[0] # Get daily data start_date = monthly_data.get('dateDebutPeriode') end_date = monthly_data.get('dateFinPeriode') try: daily_data = yield from self._get_daily_data(p_p_id, start_date, end_date) except Exception: # pylint: disable=W0703 daily_data = [] # We have to test daily_data because it's empty # At the end/starts of a period if daily_data: daily_data = daily_data[0]['courant'] # format data contract_data = {"balance": balances[balance_id]} for key1, key2 in MONTHLY_MAP: contract_data[key1] = monthly_data[key2] for key1, key2 in ANNUAL_MAP: contract_data[key1] = annual_data.get(key2, "") # We have to test daily_data because it's empty # At the end/starts of a period if daily_data: for key1, key2 in DAILY_MAP: contract_data[key1] = daily_data[key2] # Hourly if hourly_data: contract_data['yesterday_hourly_consumption'] = hourly_data # Add contract self._data[contract] = contract_data balance_count = balance_id + 1 if balance_count < balances_len: balance_id += 1
Get the latest data from HydroQuebec.
entailment
def get_data(self, contract=None): """Return collected data.""" if contract is None: return self._data if contract in self._data.keys(): return {contract: self._data[contract]} raise PyHydroQuebecError("Contract {} not found".format(contract))
Return collected data.
entailment
def ping(self): """ Posts the current state of each device to the server and schedules the next call in n seconds. :param serverURL: :return: """ from datetime import datetime nextRun = datetime.utcnow().timestamp() + self.cfg.getPingInterval() self.sendHeartbeat() self.scheduleNextHeartbeat(nextRun)
Posts the current state of each device to the server and schedules the next call in n seconds. :param serverURL: :return:
entailment
def sendHeartbeat(self): """ Posts the current state to the server. :param serverURL: the URL to ping. :return: """ for name, md in self.cfg.recordingDevices.items(): try: data = marshal(md, recordingDeviceFields) data['serviceURL'] = self.cfg.getServiceURL() + API_PREFIX + '/devices/' + name targetURL = self.serverURL + API_PREFIX + '/devices/' + name logger.info("Pinging " + targetURL) resp = self.httpclient.put(targetURL, json=data) if resp.status_code != 200: logger.warning("Unable to ping server at " + targetURL + " with " + str(data.keys()) + ", response is " + str(resp.status_code)) else: logger.info("Pinged server at " + targetURL + " with " + str(data.items())) except: logger.exception("Unable to ping server")
Posts the current state to the server. :param serverURL: the URL to ping. :return:
entailment
def scheduleNextHeartbeat(self, nextRun): """ Schedules the next ping. :param nextRun: when we should run next. :param serverURL: the URL to ping. :return: """ import threading from datetime import datetime tilNextTime = max(nextRun - datetime.utcnow().timestamp(), 0) logging.getLogger('recorder').info("Scheduling next ping in " + str(round(tilNextTime, 3)) + " seconds") threading.Timer(tilNextTime, self.ping).start()
Schedules the next ping. :param nextRun: when we should run next. :param serverURL: the URL to ping. :return:
entailment
def _validate_argument(self, arg): """Validate a type or matcher argument to the constructor.""" if arg is None: return arg if isinstance(arg, type): return InstanceOf(arg) if not isinstance(arg, BaseMatcher): raise TypeError( "argument of %s can be a type or a matcher (got %r)" % ( self.__class__.__name__, type(arg))) return arg
Validate a type or matcher argument to the constructor.
entailment
def _initialize(self, *args, **kwargs): """Initiaize the mapping matcher with constructor arguments.""" self.items = None self.keys = None self.values = None if args: if len(args) != 2: raise TypeError("expected exactly two positional arguments, " "got %s" % len(args)) if kwargs: raise TypeError( "expected positional or keyword arguments, not both") # got positional arguments only self.keys, self.values = map(self._validate_argument, args) elif kwargs: has_kv = 'keys' in kwargs and 'values' in kwargs has_of = 'of' in kwargs if not (has_kv or has_of): raise TypeError("expected keys/values or items matchers, " "but got: %s" % list(kwargs.keys())) if has_kv and has_of: raise TypeError( "expected keys & values, or items matchers, not both") if has_kv: # got keys= and values= matchers self.keys = self._validate_argument(kwargs['keys']) self.values = self._validate_argument(kwargs['values']) else: # got of= matcher, which can be a tuple of matchers, # or a single matcher for dictionary items of = kwargs['of'] if isinstance(of, tuple): try: # got of= as tuple of matchers self.keys, self.values = \ map(self._validate_argument, of) except ValueError: raise TypeError( "of= tuple has to be a pair of matchers/types" % ( self.__class__.__name__,)) else: # got of= as a single matcher self.items = self._validate_argument(of)
Initiaize the mapping matcher with constructor arguments.
entailment
def docs(ctx, output='html', rebuild=False, show=True, verbose=True): """Build the docs and show them in default web browser.""" sphinx_build = ctx.run( 'sphinx-build -b {output} {all} {verbose} docs docs/_build'.format( output=output, all='-a -E' if rebuild else '', verbose='-v' if verbose else '')) if not sphinx_build.ok: fatal("Failed to build the docs", cause=sphinx_build) if show: path = os.path.join(DOCS_OUTPUT_DIR, 'index.html') if sys.platform == 'darwin': path = 'file://%s' % os.path.abspath(path) webbrowser.open_new_tab(path)
Build the docs and show them in default web browser.
entailment
def upload(ctx, yes=False): """Upload the package to PyPI.""" import callee version = callee.__version__ # check the packages version # TODO: add a 'release' to automatically bless a version as release one if version.endswith('-dev'): fatal("Can't upload a development version (%s) to PyPI!", version) # run the upload if it has been confirmed by the user if not yes: answer = input("Do you really want to upload to PyPI [y/N]? ") yes = answer.strip().lower() == 'y' if not yes: logging.warning("Aborted -- not uploading to PyPI.") return -2 logging.debug("Uploading version %s to PyPI...", version) setup_py_upload = ctx.run('python setup.py sdist upload') if not setup_py_upload.ok: fatal("Failed to upload version %s to PyPI!", version, cause=setup_py_upload) logging.info("PyPI upload completed successfully.") # add a Git tag and push git_tag = ctx.run('git tag %s' % version) if not git_tag.ok: fatal("Failed to add a Git tag for uploaded version %s", version, cause=git_tag) git_push = ctx.run('git push && git push --tags') if not git_push.ok: fatal("Failed to push the release upstream.", cause=git_push)
Upload the package to PyPI.
entailment
def fatal(*args, **kwargs): """Log an error message and exit. Following arguments are keyword-only. :param exitcode: Optional exit code to use :param cause: Optional Invoke's Result object, i.e. result of a subprocess invocation """ # determine the exitcode to return to the operating system exitcode = None if 'exitcode' in kwargs: exitcode = kwargs.pop('exitcode') if 'cause' in kwargs: cause = kwargs.pop('cause') if not isinstance(cause, Result): raise TypeError( "invalid cause of fatal error: expected %r, got %r" % ( Result, type(cause))) exitcode = exitcode or cause.return_code logging.error(*args, **kwargs) raise Exit(exitcode or -1)
Log an error message and exit. Following arguments are keyword-only. :param exitcode: Optional exit code to use :param cause: Optional Invoke's Result object, i.e. result of a subprocess invocation
entailment
def rungtd1d(time: Union[datetime, str, np.ndarray], altkm: np.ndarray, glat: float, glon: float) -> xarray.Dataset: """ This is the "atomic" function looped by other functions """ time = todt64(time) # %% get solar parameters for date f107Ap = gi.getApF107(time, smoothdays=81) f107a = f107Ap['f107s'].item() f107 = f107Ap['f107'].item() Ap = f107Ap['Ap'].item() # %% dimensions altkm = np.atleast_1d(altkm) assert altkm.ndim == 1 assert isinstance(glon, (int, float)) assert isinstance(glat, (int, float)) assert isinstance(time, np.datetime64) or (time.size == 1 and isinstance( time[0], np.datetime64)), 'if you have multiple times, for loop over them' # don't check ap, too complicated assert isinstance(MASS, (float, int)) assert len(TSELECOPS) == 25 # %% gtd7.tselec(TSELECOPS) # like the msis_driver example iyd, utsec, stl = datetime2gtd(time, glon) altkm = np.atleast_1d(altkm) gtd7.meters(1) # makes output in m^-3 and kg/m^-3 # %% if isinstance(Ap, (float, int)): Ap = [Ap]*7 # even if SW(9) == 1 due to f2py needs for array dens = np.empty((altkm.size, len(species))) temp = np.empty((altkm.size, len(ttypes))) for i, a in enumerate(altkm): dens[i, :], temp[i, :] = gtd7.gtd7(iyd, utsec, a, glat, glon, stl, f107a, f107, Ap, MASS) dsf = {k: (('time', 'alt_km', 'lat', 'lon'), v[None, :, None, None]) for (k, v) in zip(species, dens.T)} dsf.update({'Tn': (('time', 'alt_km', 'lat', 'lon'), temp[:, 1][None, :, None, None]), 'Texo': (('time', 'alt_km', 'lat', 'lon'), temp[:, 0][None, :, None, None])}) atmos = xarray.Dataset(dsf, coords={'time': time.astype(datetime), 'alt_km': altkm, 'lat': [glat], 'lon': [glon], }, attrs={'Ap': Ap, 'f107': f107, 'f107a': f107a, 'species': species}) return atmos
This is the "atomic" function looped by other functions
entailment
def form_valid(self, form): """ Call `form.save()` and super itself. """ form.save() return super(SubscriptionView, self).form_valid(form)
Call `form.save()` and super itself.
entailment
def _uses_aiohttp_session(func): """This is a decorator that creates an async with statement around a function, and makes sure that a _session argument is always passed. Only usable on async functions of course. The _session argument is (supposed to be) an aiohttp.ClientSession instance in all functions that this decorator has been used on. This is used to make sure that all session objects are properly entered and exited, or that they are passed into a function properly. This adds an session keyword argument to the method signature, and that session will be used as _session if it is not None.""" # The function the decorator returns async def decorated_func(*args, session=None, **kwargs): if session is not None: # There is a session passed return await func(*args, _session=session, **kwargs) else: # The session argument wasn't passed, so we create our own async with aiohttp.ClientSession() as new_session: return await func(*args, _session=new_session, **kwargs) # We return the decorated func return decorated_func
This is a decorator that creates an async with statement around a function, and makes sure that a _session argument is always passed. Only usable on async functions of course. The _session argument is (supposed to be) an aiohttp.ClientSession instance in all functions that this decorator has been used on. This is used to make sure that all session objects are properly entered and exited, or that they are passed into a function properly. This adds an session keyword argument to the method signature, and that session will be used as _session if it is not None.
entailment
def _add_request_parameters(func): """Adds the ratelimit and request timeout parameters to a function.""" # The function the decorator returns async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs): return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout, **kwargs) # We return the decorated func return decorated_func
Adds the ratelimit and request timeout parameters to a function.
entailment
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY), platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.""" if platform is None: platform = self.default_platform try: blob_dict = await self._base_request(battletag, "stats", _session, platform=platform, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout) except ProfileNotFoundError as e: # The battletag doesn't exist blob_dict = {} existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))} return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in existing_regions.items() if key in regions}
Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.
entailment
async def _base_request(self, battle_tag: str, endpoint_name: str, session: aiohttp.ClientSession, *, platform=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Does a request to some endpoint. This is also where ratelimit logic is handled.""" # We check the different optional arguments, and if they're not passed (are none) we set them to the default for the client object if platform is None: platform = self.default_platform if handle_ratelimit is None: handle_ratelimit = self.default_handle_ratelimit if max_tries is None: max_tries = self.default_max_tries if request_timeout is None: request_timeout = self.default_request_timeout # The battletag with #s removed san_battle_tag = self.sanitize_battletag(battle_tag) # The ratelimit logic for _ in range(max_tries): # We execute a request try: resp_json, status = await self._async_get( session, self.server_url + self._api_urlpath + "{battle_tag}/{endpoint}".format( battle_tag=san_battle_tag, endpoint=endpoint_name ), params={"platform": platform}, # Passed to _async_get and indicates what platform we're searching on headers={"User-Agent": "overwatch_python_api"}, # According to https://github.com/SunDwarf/OWAPI/blob/master/owapi/v3/v3_util.py#L18 we have to customise our User-Agent, so we do _async_timeout_seconds=request_timeout ) if status == 429 and resp_json["msg"] == "you are being ratelimited": raise RatelimitError except RatelimitError as e: # This excepts both RatelimitErrors and TimeoutErrors, ratelimiterrors for server returning a ratelimit, timeouterrors for the connection not being done in with in the timeout # We are ratelimited, so we check if we handle ratelimiting logic # If so, we wait and then execute the next iteration of the loop if handle_ratelimit: # We wait to remedy ratelimiting, and we wait a bit more than the response says we should await asyncio.sleep(resp_json["retry"] + 1) continue else: raise else: # We didn't get an error, so we exit the loop because it was a successful request break else: # The loop didn't stop because it got breaked, which means that we got ratelimited until the maximum number of tries were finished raise RatelimitError("Got ratelimited for each requests until the maximum number of retries were reached.") # Validate the response if status != 200: if status == 404 and resp_json["msg"] == "profile not found": raise ProfileNotFoundError( "Got HTTP 404, profile not found. This is caused by the given battletag not existing on the specified platform.") if status == 429 and resp_json["msg"] == "you are being ratelimited": raise RatelimitError( "Got HTTP 429, you are being ratelimited. This is caused by calls to the api too frequently.") raise ConnectionError("Did not get HTTP status 200, got: {0}".format(status)) return resp_json
Does a request to some endpoint. This is also where ratelimit logic is handled.
entailment
async def _async_get(self, session: aiohttp.ClientSession, *args, _async_timeout_seconds: int = 5, **kwargs): """Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds.""" # Taken almost directly from the aiohttp tutorial with async_timeout.timeout(_async_timeout_seconds): async with session.get(*args, **kwargs) as response: return await response.json(), response.status
Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds.
entailment
def is_method(arg, min_arity=None, max_arity=None): """Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum. """ if not callable(arg): return False if not any(is_(arg) for is_ in (inspect.ismethod, inspect.ismethoddescriptor, inspect.isbuiltin)): return False try: argnames, varargs, kwargs, defaults = getargspec(arg) except TypeError: # On CPython 2.x, built-in methods of file aren't inspectable, # so if it's file.read() or file.write(), we can't tell it for sure. # Given how this check is being used, assuming the best is probably # all we can do here. return True else: if argnames and argnames[0] == 'self': argnames = argnames[1:] if min_arity is not None: actual_min_arity = len(argnames) - len(defaults or ()) assert actual_min_arity >= 0, ( "Minimum arity of %r found to be negative (got %s)!" % ( arg, actual_min_arity)) if int(min_arity) != actual_min_arity: return False if max_arity is not None: actual_max_arity = sys.maxsize if varargs or kwargs else len(argnames) if int(max_arity) != actual_max_arity: return False return True
Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum.
entailment
def _is_readable(self, obj): """Check if the argument is a readable file-like object.""" try: read = getattr(obj, 'read') except AttributeError: return False else: return is_method(read, max_arity=1)
Check if the argument is a readable file-like object.
entailment
def _is_writable(self, obj): """Check if the argument is a writable file-like object.""" try: write = getattr(obj, 'write') except AttributeError: return False else: return is_method(write, min_arity=1, max_arity=1)
Check if the argument is a writable file-like object.
entailment
def run(time: datetime, altkm: float, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran. """ glat = np.atleast_2d(glat) glon = np.atleast_2d(glon) # has to be here # %% altitude 1-D if glat.size == 1 and glon.size == 1 and isinstance(time, (str, date, datetime, np.datetime64)): atmos = rungtd1d(time, altkm, glat.squeeze()[()], glon.squeeze()[()], f107a=f107a, f107=f107, Ap=Ap) # %% lat/lon grid at 1 altitude else: atmos = loopalt_gtd(time, glat, glon, altkm, f107a=f107a, f107=f107, Ap=Ap) return atmos
loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran.
entailment
def loopalt_gtd(time: datetime, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], altkm: Union[float, List[float], np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loop over location and time time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime glat: float or 2-D np.ndarray glon: float or 2-D np.ndarray altkm: float or list or 1-D np.ndarray """ glat = np.atleast_2d(glat) glon = np.atleast_2d(glon) assert glat.ndim == glon.ndim == 2 times = np.atleast_1d(time) assert times.ndim == 1 atmos = xarray.Dataset() for k, t in enumerate(times): print('computing', t) for i in range(glat.shape[0]): for j in range(glat.shape[1]): # atmos = xarray.concat((atmos, rungtd1d(t, altkm, glat[i,j], glon[i,j])), # data_vars='minimal',coords='minimal',dim='lon') atm = rungtd1d(t, altkm, glat[i, j], glon[i, j], f107a=f107a, f107=f107, Ap=Ap) atmos = xarray.merge((atmos, atm)) atmos.attrs = atm.attrs return atmos
loop over location and time time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime glat: float or 2-D np.ndarray glon: float or 2-D np.ndarray altkm: float or list or 1-D np.ndarray
entailment
def rungtd1d(time: datetime, altkm: np.ndarray, glat: float, glon: float, *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ This is the "atomic" function looped by other functions """ time = todatetime(time) # %% get solar parameters for date if f107a and f107a and Ap: pass else: f107Ap = gi.getApF107(time, smoothdays=81) f107a = f107Ap['f107s'].item() f107 = f107Ap['f107'].item() Ap = f107Ap['Ap'].item() # %% dimensions altkm = np.atleast_1d(altkm) assert altkm.ndim == 1 assert isinstance(glon, (int, float)) assert isinstance(glat, (int, float)) # %% iyd = time.strftime('%y%j') altkm = np.atleast_1d(altkm) # %% dens = np.empty((altkm.size, len(species))) temp = np.empty((altkm.size, len(ttypes))) for i, a in enumerate(altkm): cmd = [str(EXE), iyd, str(time.hour), str(time.minute), str(time.second), str(glat), str(glon), str(f107a), str(f107), str(Ap), str(a)] ret = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.DEVNULL) f = io.StringIO(ret) dens[i, :] = np.genfromtxt(f, max_rows=1) temp[i, :] = np.genfromtxt(f, max_rows=1) dsf = {k: (('time', 'alt_km', 'lat', 'lon'), v[None, :, None, None]) for (k, v) in zip(species, dens.T)} dsf.update({'Tn': (('time', 'alt_km', 'lat', 'lon'), temp[:, 1][None, :, None, None]), 'Texo': (('time', 'alt_km', 'lat', 'lon'), temp[:, 0][None, :, None, None])}) atmos = xarray.Dataset(dsf, coords={'time': [time], 'alt_km': altkm, 'lat': [glat], 'lon': [glon], }, attrs={'Ap': Ap, 'f107': f107, 'f107a': f107a, 'species': species}) return atmos
This is the "atomic" function looped by other functions
entailment
def _validate_desc(self, desc): """Validate the predicate description.""" if desc is None: return desc if not isinstance(desc, STRING_TYPES): raise TypeError( "predicate description for Matching must be a string, " "got %r" % (type(desc),)) # Python 2 mandates __repr__ to be an ASCII string, # so if Unicode is passed (usually due to unicode_literals), # it should be ASCII-encodable. if not IS_PY3 and isinstance(desc, unicode): try: desc = desc.encode('ascii', errors='strict') except UnicodeEncodeError: raise TypeError("predicate description must be " "an ASCII string in Python 2") return desc
Validate the predicate description.
entailment
def _get_placeholder_repr(self): """Return the placeholder part of matcher's ``__repr__``.""" placeholder = '...' if self.TRANSFORM is not None: placeholder = '%s(%s)' % (self.TRANSFORM.__name__, placeholder) return placeholder
Return the placeholder part of matcher's ``__repr__``.
entailment
def _validate_class_definition(meta, classname, bases, dict_): """Ensure the matcher class definition is acceptable. :raise RuntimeError: If there is a problem """ # let the BaseMatcher class be created without hassle if meta._is_base_matcher_class_definition(classname, dict_): return # ensure that no important magic methods are being overridden for name, member in dict_.items(): if not (name.startswith('__') and name.endswith('__')): continue # check if it's not a whitelisted magic method name name = name[2:-2] if not name: continue # unlikely case of a ``____`` function if name not in meta._list_magic_methods(BaseMatcher): continue if name in meta.USER_OVERRIDABLE_MAGIC_METHODS: continue # non-function attributes, like __slots__, are harmless if not inspect.isfunction(member): continue # classes in this very module are exempt, since they define # the very behavior of matchers we want to protect if member.__module__ == __name__: continue raise RuntimeError( "matcher class %s cannot override the __%s__ method" % ( classname, name))
Ensure the matcher class definition is acceptable. :raise RuntimeError: If there is a problem
entailment
def _is_base_matcher_class_definition(meta, classname, dict_): """Checks whether given class name and dictionary define the :class:`BaseMatcher`. """ if classname != 'BaseMatcher': return False methods = list(filter(inspect.isfunction, dict_.values())) return methods and all(m.__module__ == __name__ for m in methods)
Checks whether given class name and dictionary define the :class:`BaseMatcher`.
entailment
def _list_magic_methods(meta, class_): """Return names of magic methods defined by a class. :return: Iterable of magic methods, each w/o the ``__`` prefix/suffix """ return [ name[2:-2] for name, member in class_.__dict__.items() if len(name) > 4 and name.startswith('__') and name.endswith('__') and inspect.isfunction(member) ]
Return names of magic methods defined by a class. :return: Iterable of magic methods, each w/o the ``__`` prefix/suffix
entailment
def semver(version, loose): if isinstance(version, SemVer): if version.loose == loose: return version else: version = version.version elif not isinstance(version, str): # xxx: raise InvalidTypeIncluded("must be str, but {!r}".format(version)) """ if (!(this instanceof SemVer)) return new SemVer(version, loose); """ return SemVer(version, loose)
if (!(this instanceof SemVer)) return new SemVer(version, loose);
entailment
def autodoc_process_docstring(app, what, name, obj, options, lines): """Handler for the event emitted when autodoc processes a docstring. See http://sphinx-doc.org/ext/autodoc.html#event-autodoc-process-docstring. The TL;DR is that we can modify ``lines`` in-place to influence the output. """ # check that only symbols that can be directly imported from ``callee`` # package are being documented _, symbol = name.rsplit('.', 1) if symbol not in callee.__all__: raise SphinxError( "autodoc'd '%s' is not a part of the public API!" % name) # for classes exempt from automatic merging of class & __init__ docs, # pretend their __init__ methods have no docstring at all, # so that nothing will be appended to the class's docstring if what == 'class' and name in autoclass_content_exceptions: # amusingly, when autodoc reads the constructor's docstring # for appending it to class docstring, it will report ``what`` # as 'class' (again!); hence we must check what it actually read ctor_docstring_lines = prepare_docstring(obj.__init__.__doc__) if lines == ctor_docstring_lines: lines[:] = []
Handler for the event emitted when autodoc processes a docstring. See http://sphinx-doc.org/ext/autodoc.html#event-autodoc-process-docstring. The TL;DR is that we can modify ``lines`` in-place to influence the output.
entailment
def clean_email(self): """ Raise ValidationError if the contact exists. """ contacts = self.api.lists.contacts(id=self.list_id)['result'] for contact in contacts: if contact['email'] == self.cleaned_data['email']: raise forms.ValidationError( _(u'This email is already subscribed')) return self.cleaned_data['email']
Raise ValidationError if the contact exists.
entailment
def add_contact(self): """ Create a contact with using the email on the list. """ self.api.lists.addcontact( contact=self.cleaned_data['email'], id=self.list_id, method='POST')
Create a contact with using the email on the list.
entailment
def api(self): """ Get or create an Api() instance using django settings. """ api = getattr(self, '_api', None) if api is None: self._api = mailjet.Api() return self._api
Get or create an Api() instance using django settings.
entailment
def list_id(self): """ Get or create the list id. """ list_id = getattr(self, '_list_id', None) if list_id is None: for l in self.api.lists.all()['lists']: if l['name'] == self.list_name: self._list_id = l['id'] if not getattr(self, '_list_id', None): self._list_id = self.api.lists.create( label=self.list_label, name=self.list_name, method='POST')['list_id'] return self._list_id
Get or create the list id.
entailment
def getargspec(obj): """Portable version of inspect.getargspec(). Necessary because the original is no longer available starting from Python 3.6. :return: 4-tuple of (argnames, varargname, kwargname, defaults) Note that distinction between positional-or-keyword and keyword-only parameters will be lost, as the original getargspec() doesn't honor it. """ try: return inspect.getargspec(obj) except AttributeError: pass # we let a TypeError through # translate the signature object back into the 4-tuple argnames = [] varargname, kwargname = None, None defaults = [] for name, param in inspect.signature(obj): if param.kind == inspect.Parameter.VAR_POSITIONAL: varargname = name elif param.kind == inspect.Parameter.VAR_KEYWORD: kwargname = name else: argnames.append(name) if param.default is not inspect.Parameter.empty: defaults.append(param.default) defaults = defaults or None return argnames, varargname, kwargname, defaults
Portable version of inspect.getargspec(). Necessary because the original is no longer available starting from Python 3.6. :return: 4-tuple of (argnames, varargname, kwargname, defaults) Note that distinction between positional-or-keyword and keyword-only parameters will be lost, as the original getargspec() doesn't honor it.
entailment
def read_tags(filename): """Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags """ with open(filename) as f: ast_tree = ast.parse(f.read(), filename) res = {} for node in ast.walk(ast_tree): if type(node) is not ast.Assign: continue target = node.targets[0] if type(target) is not ast.Name: continue if not (target.id.startswith('__') and target.id.endswith('__')): continue name = target.id[2:-2] res[name] = ast.literal_eval(node.value) return res
Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags
entailment
def normalize_unicode(text): """ Normalize any unicode characters to ascii equivalent https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize """ if isinstance(text, six.text_type): return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8') else: return text
Normalize any unicode characters to ascii equivalent https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize
entailment
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True): """ Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. """ if ngrams is None: ngrams = 1 text = re.sub(re.compile('\'s'), '', text) # Simple heuristic text = re.sub(_re_punctuation, '', text) matched_tokens = re.findall(_re_token, text.lower()) for tokens in get_ngrams(matched_tokens, ngrams): for i in range(len(tokens)): tokens[i] = tokens[i].strip(punctuation) if len(tokens[i]) < min_length or tokens[i] in stopwords: break if ignore_numeric and isnumeric(tokens[i]): break else: yield tuple(tokens)
Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character.
entailment
def cmake_setup(): """ attempt to build using CMake >= 3 """ cmake_exe = shutil.which('cmake') if not cmake_exe: raise FileNotFoundError('CMake not available') wopts = ['-G', 'MinGW Makefiles', '-DCMAKE_SH="CMAKE_SH-NOTFOUND'] if os.name == 'nt' else [] subprocess.check_call([cmake_exe] + wopts + [str(SRCDIR)], cwd=BINDIR) ret = subprocess.run([cmake_exe, '--build', str(BINDIR)], stderr=subprocess.PIPE, universal_newlines=True) result(ret)
attempt to build using CMake >= 3
entailment
def meson_setup(): """ attempt to build with Meson + Ninja """ meson_exe = shutil.which('meson') ninja_exe = shutil.which('ninja') if not meson_exe or not ninja_exe: raise FileNotFoundError('Meson or Ninja not available') if not (BINDIR / 'build.ninja').is_file(): subprocess.check_call([meson_exe, str(SRCDIR)], cwd=BINDIR) ret = subprocess.run(ninja_exe, cwd=BINDIR, stderr=subprocess.PIPE, universal_newlines=True) result(ret)
attempt to build with Meson + Ninja
entailment
def add_term_occurrence(self, term, document): """ Adds an occurrence of the term in the specified document. """ if document not in self._documents: self._documents[document] = 0 if term not in self._terms: if self._freeze: return else: self._terms[term] = collections.Counter() if document not in self._terms[term]: self._terms[term][document] = 0 self._documents[document] += 1 self._terms[term][document] += 1
Adds an occurrence of the term in the specified document.
entailment
def get_total_term_frequency(self, term): """ Gets the frequency of the specified term in the entire corpus added to the HashedIndex. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) return sum(self._terms[term].values())
Gets the frequency of the specified term in the entire corpus added to the HashedIndex.
entailment
def get_term_frequency(self, term, document, normalized=False): """ Returns the frequency of the term specified in the document. """ if document not in self._documents: raise IndexError(DOCUMENT_DOES_NOT_EXIST) if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) result = self._terms[term].get(document, 0) if normalized: result /= self.get_document_length(document) return float(result)
Returns the frequency of the term specified in the document.
entailment
def get_document_frequency(self, term): """ Returns the number of documents the specified term appears in. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) else: return len(self._terms[term])
Returns the number of documents the specified term appears in.
entailment
def get_document_length(self, document): """ Returns the number of terms found within the specified document. """ if document in self._documents: return self._documents[document] else: raise IndexError(DOCUMENT_DOES_NOT_EXIST)
Returns the number of terms found within the specified document.
entailment
def get_documents(self, term): """ Returns all documents related to the specified term in the form of a Counter object. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) else: return self._terms[term]
Returns all documents related to the specified term in the form of a Counter object.
entailment
def get_tfidf(self, term, document, normalized=False): """ Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length. """ tf = self.get_term_frequency(term, document) # Speeds up performance by avoiding extra calculations if tf != 0.0: # Add 1 to document frequency to prevent divide by 0 # (Laplacian Correction) df = 1 + self.get_document_frequency(term) n = 2 + len(self._documents) if normalized: tf /= self.get_document_length(document) return tf * math.log10(n / df) else: return 0.0
Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length.
entailment
def generate_document_vector(self, doc, mode='tfidf'): """ Returns a representation of the specified document as a feature vector weighted according the mode specified (by default tf-dif). A custom weighting function can also be passed which receives the hashedindex instance, the selected term and document as parameters. The result will be returned in the form of a list. This can be converted into a numpy array if required using the `np.asarray` method Available built-in modes: * tfidf: Term Frequency Inverse Document Frequency * ntfidf: Normalized Term Frequency Inverse Document Frequency * tf: Term Frequency * ntf: Normalized Term Frequency """ if mode == 'tfidf': selected_function = HashedIndex.get_tfidf elif mode == 'ntfidf': selected_function = functools.partial(HashedIndex.get_tfidf, normalized=True) elif mode == 'tf': selected_function = HashedIndex.get_term_frequency elif mode == 'ntf': selected_function = functools.partial(HashedIndex.get_term_frequency, normalized=True) elif hasattr(mode, '__call__'): selected_function = mode else: raise ValueError('Unexpected mode: %s', mode) result = [] for term in self._terms: result.append(selected_function(self, term, doc)) return result
Returns a representation of the specified document as a feature vector weighted according the mode specified (by default tf-dif). A custom weighting function can also be passed which receives the hashedindex instance, the selected term and document as parameters. The result will be returned in the form of a list. This can be converted into a numpy array if required using the `np.asarray` method Available built-in modes: * tfidf: Term Frequency Inverse Document Frequency * ntfidf: Normalized Term Frequency Inverse Document Frequency * tf: Term Frequency * ntf: Normalized Term Frequency
entailment
def generate_feature_matrix(self, mode='tfidf'): """ Returns a feature matrix in the form of a list of lists which represents the terms and documents in this Inverted Index using the tf-idf weighting by default. The term counts in each document can alternatively be used by specifying scheme='count'. A custom weighting function can also be passed which receives a term and document as parameters. The size of the matrix is equal to m x n where m is the number of documents and n is the number of terms. The list-of-lists format returned by this function can be very easily converted to a numpy matrix if required using the `np.as_matrix` method. """ result = [] for doc in self._documents: result.append(self.generate_document_vector(doc, mode)) return result
Returns a feature matrix in the form of a list of lists which represents the terms and documents in this Inverted Index using the tf-idf weighting by default. The term counts in each document can alternatively be used by specifying scheme='count'. A custom weighting function can also be passed which receives a term and document as parameters. The size of the matrix is equal to m x n where m is the number of documents and n is the number of terms. The list-of-lists format returned by this function can be very easily converted to a numpy matrix if required using the `np.as_matrix` method.
entailment
def find_class_in_list(klass, lst): """ Returns the first occurrence of an instance of type `klass` in the given list, or None if no such instance is present. """ filtered = list(filter(lambda x: x.__class__ == klass, lst)) if filtered: return filtered[0] return None
Returns the first occurrence of an instance of type `klass` in the given list, or None if no such instance is present.
entailment
def find_classes_in_list(klasses, lst): """ Returns a tuple containing an entry corresponding to each of the requested class types, where the entry is either the first object instance of that type or None of no such instances are available. Example Usage: find_classes_in_list( [Address, Response], [<classes.Response...>, <classes.Amount...>, <classes.Address...>]) Produces: (<classes.Address...>, <classes.Response...>) """ if not isinstance(klasses, list): klasses = [klasses] return tuple(map(lambda klass: find_class_in_list(klass, lst), klasses))
Returns a tuple containing an entry corresponding to each of the requested class types, where the entry is either the first object instance of that type or None of no such instances are available. Example Usage: find_classes_in_list( [Address, Response], [<classes.Response...>, <classes.Amount...>, <classes.Address...>]) Produces: (<classes.Address...>, <classes.Response...>)
entailment
def _build_parmlist(self, parameters): """ Converts a dictionary of name and value pairs into a PARMLIST string value acceptable to the Payflow Pro API. """ args = [] for key, value in parameters.items(): if not value is None: # We always use the explicit-length keyname format, to reduce the chance # of requests failing due to unusual characters in parameter values. try: classinfo = unicode except NameError: classinfo = str if isinstance(value, classinfo): key = '%s[%d]' % (key.upper(), len(value.encode('utf-8'))) else: key = '%s[%d]' % (key.upper(), len(str(value))) args.append('%s=%s' % (key, value)) args.sort() parmlist = '&'.join(args) return parmlist
Converts a dictionary of name and value pairs into a PARMLIST string value acceptable to the Payflow Pro API.
entailment
def _parse_parmlist(self, parmlist): """ Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7") """ parmlist = "&" + parmlist name_re = re.compile(r'\&([A-Z0-9_]+)(\[\d+\])?=') results = {} offset = 0 match = name_re.search(parmlist, offset) while match: name, len_suffix = match.groups() offset = match.end() if len_suffix: val_len = int(len_suffix[1:-1]) else: next_match = name_re.search(parmlist, offset) if next_match: val_len = next_match.start() - match.end() else: # At end of parmlist val_len = len(parmlist) - match.end() value = parmlist[match.end() : match.end() + val_len] results[name.lower()] = value match = name_re.search(parmlist, offset) return results
Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7")
entailment
def request(self, url): """ Send a http request to the given *url*, try to decode the reply assuming it's JSON in UTF-8, and return the result :returns: Decoded result, or None in case of an error :rtype: mixed """ self.logger.debug('url:\n' + url) try: response = urlopen(url) return json.loads(response.read().decode('utf-8')) except URLError: self.logger.info('Server connection problem') except Exception: self.logger.info('Server format problem')
Send a http request to the given *url*, try to decode the reply assuming it's JSON in UTF-8, and return the result :returns: Decoded result, or None in case of an error :rtype: mixed
entailment
def query(self, address, acceptlanguage=None, limit=20, countrycodes=None): """ Issue a geocoding query for *address* to the Nominatim instance and return the decoded results :param address: a query string with an address or presumed parts of an address :type address: str or (if python2) unicode :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param limit: limit the number of results :type limit: int or None :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None """ url = self.url + '&q=' + quote_plus(address) if acceptlanguage: url += '&accept-language=' + acceptlanguage if limit: url += '&limit=' + str(limit) if countrycodes: url += '&countrycodes=' + ','.join(countrycodes) return self.request(url)
Issue a geocoding query for *address* to the Nominatim instance and return the decoded results :param address: a query string with an address or presumed parts of an address :type address: str or (if python2) unicode :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param limit: limit the number of results :type limit: int or None :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None
entailment
def query(self, lat=None, lon=None, osm_id=None, osm_type=None, acceptlanguage='', zoom=18): """ Issue a reverse geocoding query for a place given by *lat* and *lon*, or by *osm_id* and *osm_type* to the Nominatim instance and return the decoded results :param lat: the geograpical latitude of the place :param lon: the geograpical longitude of the place :param osm_id: openstreetmap identifier osm_id :type osm_id: str :param osm_type: openstreetmap type osm_type :type osm_type: str :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param zoom: zoom factor between from 0 to 18 :type zoom: int or None or a key in :data:`zoom_aliases` :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None :raise: NominatimException if invalid zoom value """ url = self.url if osm_id is not None and osm_type not in ('N', 'W', 'R'): raise NominatimException('invalid osm_type') if osm_id is not None and osm_type is not None: url += '&osm_id=' + osm_id + '&osm_type=' + osm_type elif lat is not None and lon is not None: url += '&lat=' + str(lat) + '&lon=' + str(lon) else: return None if acceptlanguage: url += '&accept-language=' + acceptlanguage if zoom in zoom_aliases: zoom = zoom_aliases[zoom] if not isinstance(zoom, int) or zoom < 0 or zoom > 18: raise NominatimException('zoom must effectively be betwen 0 and 18') url +='&zoom=' + str(zoom) return self.request(url)
Issue a reverse geocoding query for a place given by *lat* and *lon*, or by *osm_id* and *osm_type* to the Nominatim instance and return the decoded results :param lat: the geograpical latitude of the place :param lon: the geograpical longitude of the place :param osm_id: openstreetmap identifier osm_id :type osm_id: str :param osm_type: openstreetmap type osm_type :type osm_type: str :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param zoom: zoom factor between from 0 to 18 :type zoom: int or None or a key in :data:`zoom_aliases` :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None :raise: NominatimException if invalid zoom value
entailment
def from_model(cls, model_name, **kwargs): """ Define a grid using the specifications of a given model. Parameters ---------- model_name : string Name the model (see :func:`get_supported_models` for available model names). Supports multiple formats (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5'). **kwargs : string Parameters that override the model or default grid settings (See Other Parameters below). Returns ------- A :class:`CTMGrid` object. Other Parameters ---------------- resolution : (float, float) Horizontal grid resolution (lon, lat) or (DI, DJ) [degrees] Psurf : float Average surface pressure [hPa] (default: 1013.15) Notes ----- Regridded vertical models may have several valid names (e.g., 'GEOS5_47L' and 'GEOS5_REDUCED' refer to the same model). """ settings = _get_model_info(model_name) model = settings.pop('model_name') for k, v in list(kwargs.items()): if k in ('resolution', 'Psurf'): settings[k] = v return cls(model, **settings)
Define a grid using the specifications of a given model. Parameters ---------- model_name : string Name the model (see :func:`get_supported_models` for available model names). Supports multiple formats (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5'). **kwargs : string Parameters that override the model or default grid settings (See Other Parameters below). Returns ------- A :class:`CTMGrid` object. Other Parameters ---------------- resolution : (float, float) Horizontal grid resolution (lon, lat) or (DI, DJ) [degrees] Psurf : float Average surface pressure [hPa] (default: 1013.15) Notes ----- Regridded vertical models may have several valid names (e.g., 'GEOS5_47L' and 'GEOS5_REDUCED' refer to the same model).
entailment
def copy_from_model(cls, model_name, reference, **kwargs): """ Set-up a user-defined grid using specifications of a reference grid model. Parameters ---------- model_name : string name of the user-defined grid model. reference : string or :class:`CTMGrid` instance Name of the reference model (see :func:`get_supported_models`), or a :class:`CTMGrid` object from which grid set-up is copied. **kwargs Any set-up parameter which will override the settings of the reference model (see :class:`CTMGrid` parameters). Returns ------- A :class:`CTMGrid` object. """ if isinstance(reference, cls): settings = reference.__dict__.copy() settings.pop('model') else: settings = _get_model_info(reference) settings.pop('model_name') settings.update(kwargs) settings['reference'] = reference return cls(model_name, **settings)
Set-up a user-defined grid using specifications of a reference grid model. Parameters ---------- model_name : string name of the user-defined grid model. reference : string or :class:`CTMGrid` instance Name of the reference model (see :func:`get_supported_models`), or a :class:`CTMGrid` object from which grid set-up is copied. **kwargs Any set-up parameter which will override the settings of the reference model (see :class:`CTMGrid` parameters). Returns ------- A :class:`CTMGrid` object.
entailment
def get_layers(self, Psurf=1013.25, Ptop=0.01, **kwargs): """ Compute scalars or coordinates associated to the vertical layers. Parameters ---------- grid_spec : CTMGrid object CTMGrid containing the information necessary to re-construct grid levels for a given model coordinate system. Returns ------- dictionary of vertical grid components, including eta (unitless), sigma (unitless), pressure (hPa), and altitude (km) on both layer centers and edges, ordered from bottom-to-top. Notes ----- For pure sigma grids, sigma coordinates are given by the esig (edges) and csig (centers). For both pure sigma and hybrid grids, pressures at layers edges L are calculated as follows: .. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p) where :math:`P_{surf}`, :math:`P_{top}` Air pressures at the surface and the top of the modeled atmosphere (:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid` instance). :math:`A_p(L)`, :math:`Bp(L)` Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid grids, or respectively equals :math:`P_{top}` and :attr:`esig` attribute for pure sigma grids. :math:`Cp(L)` equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid grids. Pressures at grid centers are averages of pressures at grid edges: .. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2 For hybrid grids, ETA coordinates of grid edges and grid centers are given by; .. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top}) .. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top}) Altitude values are fit using a 5th-degree polynomial; see `gridspec.prof_altitude` for more details. """ Psurf = np.asarray(Psurf) output_ndims = Psurf.ndim + 1 if output_ndims > 3: raise ValueError("`Psurf` argument must be a float or an array" " with <= 2 dimensions (or None)") # Compute all variables: takes not much memory, fast # and better for code reading SIGe = None SIGc = None ETAe = None ETAc = None if self.hybrid: try: Ap = broadcast_1d_array(self.Ap, output_ndims) Bp = broadcast_1d_array(self.Bp, output_ndims) except KeyError: raise ValueError("Impossible to compute vertical levels," " data is missing (Ap, Bp)") Cp = 0. else: try: Bp = SIGe = broadcast_1d_array(self.esig, output_ndims) SIGc = broadcast_1d_array(self.csig, output_ndims) except KeyError: raise ValueError("Impossible to compute vertical levels," " data is missing (esig, csig)") Ap = Cp = Ptop Pe = Ap + Bp * (Psurf - Cp) Pc = 0.5 * (Pe[0:-1] + Pe[1:]) if self.hybrid: ETAe = (Pe - Ptop)/(Psurf - Ptop) ETAc = (Pc - Ptop)/(Psurf - Ptop) else: SIGe = SIGe * np.ones_like(Psurf) SIGc = SIGc * np.ones_like(Psurf) Ze = prof_altitude(Pe, **kwargs) Zc = prof_altitude(Pc, **kwargs) all_vars = {'eta_edges': ETAe, 'eta_centers': ETAc, 'sigma_edges': SIGe, 'sigma_centers': SIGc, 'pressure_edges': Pe, 'pressure_centers': Pc, 'altitude_edges': Ze, 'altitude_centers': Zc} return all_vars
Compute scalars or coordinates associated to the vertical layers. Parameters ---------- grid_spec : CTMGrid object CTMGrid containing the information necessary to re-construct grid levels for a given model coordinate system. Returns ------- dictionary of vertical grid components, including eta (unitless), sigma (unitless), pressure (hPa), and altitude (km) on both layer centers and edges, ordered from bottom-to-top. Notes ----- For pure sigma grids, sigma coordinates are given by the esig (edges) and csig (centers). For both pure sigma and hybrid grids, pressures at layers edges L are calculated as follows: .. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p) where :math:`P_{surf}`, :math:`P_{top}` Air pressures at the surface and the top of the modeled atmosphere (:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid` instance). :math:`A_p(L)`, :math:`Bp(L)` Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid grids, or respectively equals :math:`P_{top}` and :attr:`esig` attribute for pure sigma grids. :math:`Cp(L)` equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid grids. Pressures at grid centers are averages of pressures at grid edges: .. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2 For hybrid grids, ETA coordinates of grid edges and grid centers are given by; .. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top}) .. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top}) Altitude values are fit using a 5th-degree polynomial; see `gridspec.prof_altitude` for more details.
entailment
def get_lonlat(self): """ Calculate longitude-latitude grid for a specified resolution and configuration / ordering. Parameters ---------- rlon, rlat : float Resolution (in degrees) of longitude and latitude grids. halfpolar : bool (default=True) Polar grid boxes span half of rlat relative to the other grid cells. center180 : bool (default=True) Longitude grid should be centered at 180 degrees. """ rlon, rlat = self.resolution # Compute number of grid cells in each direction Nlon = int(360. / rlon) Nlat = int(180. / rlat) + self.halfpolar # Compute grid cell edges elon = np.arange(Nlon + 1) * rlon - np.array(180.) elon -= rlon / 2. * self.center180 elat = np.arange(Nlat + 1) * rlat - np.array(90.) elat -= rlat / 2. * self.halfpolar elat[0] = -90. elat[-1] = 90. # Compute grid cell centers clon = (elon - (rlon / 2.))[1:] clat = np.arange(Nlat) * rlat - np.array(90.) # Fix grid boundaries if halfpolar if self.halfpolar: clat[0] = (elat[0] + elat[1]) / 2. clat[-1] = -clat[0] else: clat += (elat[1] - elat[0]) / 2. return { "lon_centers": clon, "lat_centers": clat, "lon_edges": elon, "lat_edges": elat }
Calculate longitude-latitude grid for a specified resolution and configuration / ordering. Parameters ---------- rlon, rlat : float Resolution (in degrees) of longitude and latitude grids. halfpolar : bool (default=True) Polar grid boxes span half of rlat relative to the other grid cells. center180 : bool (default=True) Longitude grid should be centered at 180 degrees.
entailment
def _get_template_dirs(): """existing directories where to search for jinja2 templates. The order is important. The first found template from the first found dir wins!""" return filter(lambda x: os.path.exists(x), [ # user dir os.path.join(os.path.expanduser('~'), '.py2pack', 'templates'), # system wide dir os.path.join('/', 'usr', 'share', 'py2pack', 'templates'), # usually inside the site-packages dir os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'), ])
existing directories where to search for jinja2 templates. The order is important. The first found template from the first found dir wins!
entailment
def _license_from_classifiers(data): """try to get a license from the classifiers""" classifiers = data.get('classifiers', []) found_license = None for c in classifiers: if c.startswith("License :: OSI Approved :: "): found_license = c.replace("License :: OSI Approved :: ", "") return found_license
try to get a license from the classifiers
entailment
def _normalize_license(data): """try to get SDPX license""" license = data.get('license', None) if not license: # try to get license from classifiers license = _license_from_classifiers(data) if license: if license in SDPX_LICENSES.keys(): data['license'] = SDPX_LICENSES[license] else: data['license'] = "%s (FIXME:No SPDX)" % (license) else: data['license'] = ""
try to get SDPX license
entailment
def wrap_prompts_class(Klass): """ Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations. """ try: from prompt_toolkit.token import ZeroWidthEscape except ImportError: return Klass class ITerm2IPythonPrompt(Klass): def in_prompt_tokens(self, cli=None): return [ (ZeroWidthEscape, last_status(self.shell)+BEFORE_PROMPT), ]+\ super(ITerm2IPythonPrompt, self).in_prompt_tokens(cli)+\ [(ZeroWidthEscape, AFTER_PROMPT)] return ITerm2IPythonPrompt
Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations.
entailment
def get_all_keys(self, start=None): """ A generator which yields a list of all valid keys starting at the given `start` offset. If `start` is `None`, we will start from the root of the tree. """ s = self.stream if not start: start = HEADER_SIZE + self.block_size * self.root_block s.seek(start) block_type = s.read(2) if block_type == LEAF: reader = LeafReader(self) num_keys = struct.unpack('>i', reader.read(4))[0] for _ in range(num_keys): cur_key = reader.read(self.key_size) # We to a tell/seek here so that the user can read from # the file while this loop is still being run cur_pos = s.tell() yield cur_key s.seek(cur_pos) length = sbon.read_varint(reader) reader.seek(length, 1) elif block_type == INDEX: (_, num_keys, first_child) = struct.unpack('>Bii', s.read(9)) children = [first_child] for _ in range(num_keys): # Skip the key field. _ = s.read(self.key_size) # Read pointer to the child block. next_child = struct.unpack('>i', s.read(4))[0] children.append(next_child) for child_loc in children: for key in self.get_all_keys(HEADER_SIZE + self.block_size * child_loc): yield key elif block_type == FREE: pass else: raise Exception('Unhandled block type: {}'.format(block_type))
A generator which yields a list of all valid keys starting at the given `start` offset. If `start` is `None`, we will start from the root of the tree.
entailment
def _replace_star(fmt, size): """ Replace the `*` placeholder in a format string (fmt), so that struct.calcsize(fmt) is equal to the given `size` using the format following the placeholder. Raises `ValueError` if number of `*` is larger than 1. If no `*` in `fmt`, returns `fmt` without checking its size! Examples -------- >>> _replace_star('ii*fi', 40) 'ii7fi' """ n_stars = fmt.count('*') if n_stars > 1: raise ValueError("More than one `*` in format (%s)." % fmt) if n_stars: i = fmt.find('*') s = struct.calcsize(fmt.replace(fmt[i:i + 2], '')) n = old_div((size - s), struct.calcsize(fmt[i + 1])) fmt = fmt.replace('*', str(n)) return fmt
Replace the `*` placeholder in a format string (fmt), so that struct.calcsize(fmt) is equal to the given `size` using the format following the placeholder. Raises `ValueError` if number of `*` is larger than 1. If no `*` in `fmt`, returns `fmt` without checking its size! Examples -------- >>> _replace_star('ii*fi', 40) 'ii7fi'
entailment
def _fix(self, fmt='i'): """ Read pre- or suffix of line at current position with given format `fmt` (default 'i'). """ fmt = self.endian + fmt fix = self.read(struct.calcsize(fmt)) if fix: return struct.unpack(fmt, fix)[0] else: raise EOFError
Read pre- or suffix of line at current position with given format `fmt` (default 'i').
entailment
def readline(self, fmt=None): """ Return next unformatted "line". If format is given, unpack content, otherwise return byte string. """ prefix_size = self._fix() if fmt is None: content = self.read(prefix_size) else: fmt = self.endian + fmt fmt = _replace_star(fmt, prefix_size) content = struct.unpack(fmt, self.read(prefix_size)) try: suffix_size = self._fix() except EOFError: # when endian is invalid and prefix_size > total file size suffix_size = -1 if prefix_size != suffix_size: raise IOError(_FIX_ERROR) return content
Return next unformatted "line". If format is given, unpack content, otherwise return byte string.
entailment
def skipline(self): """ Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match. """ position = self.tell() prefix = self._fix() self.seek(prefix, 1) # skip content suffix = self._fix() if prefix != suffix: raise IOError(_FIX_ERROR) return position, prefix
Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match.
entailment
def writeline(self, fmt, *args): """ Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings). """ fmt = self.endian + fmt size = struct.calcsize(fmt) fix = struct.pack(self.endian + 'i', size) line = struct.pack(fmt, *args) self.write(fix) self.write(line) self.write(fix)
Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings).
entailment
def writelines(self, lines, fmt): """ Write `lines` with given `format`. """ if isinstance(fmt, basestring): fmt = [fmt] * len(lines) for f, line in zip(fmt, lines): self.writeline(f, line, self.endian)
Write `lines` with given `format`.
entailment
def read_varint(stream): """Read while the most significant bit is set, then put the 7 least significant bits of all read bytes together to create a number. """ value = 0 while True: byte = ord(stream.read(1)) if not byte & 0b10000000: return value << 7 | byte value = value << 7 | (byte & 0b01111111)
Read while the most significant bit is set, then put the 7 least significant bits of all read bytes together to create a number.
entailment
def open_bpchdataset(filename, fields=[], categories=[], tracerinfo_file='tracerinfo.dat', diaginfo_file='diaginfo.dat', endian=">", decode_cf=True, memmap=True, dask=True, return_store=False): """ Open a GEOS-Chem BPCH file output as an xarray Dataset. Parameters ---------- filename : string Path to the output file to read in. {tracerinfo,diaginfo}_file : string, optional Path to the metadata "info" .dat files which are used to decipher the metadata corresponding to each variable in the output dataset. If not provided, will look for them in the current directory or fall back on a generic set. fields : list, optional List of a subset of variable names to return. This can substantially improve read performance. Note that the field here is just the tracer name - not the category, e.g. 'O3' instead of 'IJ-AVG-$_O3'. categories : list, optional List a subset of variable categories to look through. This can substantially improve read performance. endian : {'=', '>', '<'}, optional Endianness of file on disk. By default, "big endian" (">") is assumed. decode_cf : bool Enforce CF conventions for variable names, units, and other metadata default_dtype : numpy.dtype, optional Default datatype for variables encoded in file on disk (single-precision float by default). memmap : bool Flag indicating that data should be memory-mapped from disk instead of eagerly loaded into memory dask : bool Flag indicating that data reading should be deferred (delayed) to construct a task-graph for later execution return_store : bool Also return the underlying DataStore to the user Returns ------- ds : xarray.Dataset Dataset containing the requested fields (or the entire file), with data contained in proxy containers for access later. store : xarray.AbstractDataStore Underlying DataStore which handles the loading and processing of bpch files on disk """ store = BPCHDataStore( filename, fields=fields, categories=categories, tracerinfo_file=tracerinfo_file, diaginfo_file=diaginfo_file, endian=endian, use_mmap=memmap, dask_delayed=dask ) ds = xr.Dataset.load_store(store) # Record what the file object underlying the store which we culled this # Dataset from is so that we can clean it up later ds._file_obj = store._bpch # Handle CF corrections if decode_cf: decoded_vars = OrderedDict() rename_dict = {} for v in ds.variables: cf_name = cf.get_valid_varname(v) rename_dict[v] = cf_name new_var = cf.enforce_cf_variable(ds[v]) decoded_vars[cf_name] = new_var ds = xr.Dataset(decoded_vars, attrs=ds.attrs.copy()) # ds.rename(rename_dict, inplace=True) # TODO: There's a bug with xr.decode_cf which eagerly loads data. # Re-enable this once that bug is fixed # Note that we do not need to decode the times because we explicitly # kept track of them as we parsed the data. # ds = xr.decode_cf(ds, decode_times=False) # Set attributes for CF conventions ts = get_timestamp() ds.attrs.update(dict( Conventions='CF1.6', source=filename, tracerinfo=tracerinfo_file, diaginfo=diaginfo_file, filetype=store._bpch.filetype, filetitle=store._bpch.filetitle, history=( "{}: Processed/loaded by xbpch-{} from {}" .format(ts, ver, filename) ), )) # To immediately load the data from the BPCHDataProxy paylods, need # to execute ds.data_vars for some reason... if return_store: return ds, store else: return ds
Open a GEOS-Chem BPCH file output as an xarray Dataset. Parameters ---------- filename : string Path to the output file to read in. {tracerinfo,diaginfo}_file : string, optional Path to the metadata "info" .dat files which are used to decipher the metadata corresponding to each variable in the output dataset. If not provided, will look for them in the current directory or fall back on a generic set. fields : list, optional List of a subset of variable names to return. This can substantially improve read performance. Note that the field here is just the tracer name - not the category, e.g. 'O3' instead of 'IJ-AVG-$_O3'. categories : list, optional List a subset of variable categories to look through. This can substantially improve read performance. endian : {'=', '>', '<'}, optional Endianness of file on disk. By default, "big endian" (">") is assumed. decode_cf : bool Enforce CF conventions for variable names, units, and other metadata default_dtype : numpy.dtype, optional Default datatype for variables encoded in file on disk (single-precision float by default). memmap : bool Flag indicating that data should be memory-mapped from disk instead of eagerly loaded into memory dask : bool Flag indicating that data reading should be deferred (delayed) to construct a task-graph for later execution return_store : bool Also return the underlying DataStore to the user Returns ------- ds : xarray.Dataset Dataset containing the requested fields (or the entire file), with data contained in proxy containers for access later. store : xarray.AbstractDataStore Underlying DataStore which handles the loading and processing of bpch files on disk
entailment
def open_mfbpchdataset(paths, concat_dim='time', compat='no_conflicts', preprocess=None, lock=None, **kwargs): """ Open multiple bpch files as a single dataset. You must have dask installed for this to work, as this greatly simplifies issues relating to multi-file I/O. Also, please note that this is not a very performant routine. I/O is still limited by the fact that we need to manually scan/read through each bpch file so that we can figure out what its contents are, since that metadata isn't saved anywhere. So this routine will actually sequentially load Datasets for each bpch file, then concatenate them along the "time" axis. You may wish to simply process each file individually, coerce to NetCDF, and then ingest through xarray as normal. Parameters ---------- paths : list of strs Filenames to load; order doesn't matter as they will be lexicographically sorted before we read in the data concat_dim : str, default='time' Dimension to concatenate Datasets over. We default to "time" since this is how GEOS-Chem splits output files compat : str (optional) String indicating how to compare variables of the same name for potential conflicts when merging: - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'equals': all values and dimensions must be the same. - 'identical': all values, dimensions and attributes must be the same. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable (optional) A pre-processing function to apply to each Dataset prior to concatenation lock : False, True, or threading.Lock (optional) Passed to :py:func:`dask.array.from_array`. By default, xarray employs a per-variable lock when reading data from NetCDF files, but this model has not yet been extended or implemented for bpch files and so this is not actually used. However, it is likely necessary before dask's multi-threaded backend can be used **kwargs : optional Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`. """ from xarray.backends.api import _MultiFileCloser # TODO: Include file locks? # Check for dask dask = kwargs.pop('dask', False) if not dask: raise ValueError("Reading multiple files without dask is not supported") kwargs['dask'] = True # Add th if isinstance(paths, basestring): paths = sorted(glob(paths)) if not paths: raise IOError("No paths to files were passed into open_mfbpchdataset") datasets = [open_bpchdataset(filename, **kwargs) for filename in paths] bpch_objs = [ds._file_obj for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] # Concatenate over time combined = xr.auto_combine(datasets, compat=compat, concat_dim=concat_dim) combined._file_obj = _MultiFileCloser(bpch_objs) combined.attrs = datasets[0].attrs ts = get_timestamp() fns_str = " ".join(paths) combined.attrs['history'] = ( "{}: Processed/loaded by xbpch-{} from {}" .format(ts, ver, fns_str) ) return combined
Open multiple bpch files as a single dataset. You must have dask installed for this to work, as this greatly simplifies issues relating to multi-file I/O. Also, please note that this is not a very performant routine. I/O is still limited by the fact that we need to manually scan/read through each bpch file so that we can figure out what its contents are, since that metadata isn't saved anywhere. So this routine will actually sequentially load Datasets for each bpch file, then concatenate them along the "time" axis. You may wish to simply process each file individually, coerce to NetCDF, and then ingest through xarray as normal. Parameters ---------- paths : list of strs Filenames to load; order doesn't matter as they will be lexicographically sorted before we read in the data concat_dim : str, default='time' Dimension to concatenate Datasets over. We default to "time" since this is how GEOS-Chem splits output files compat : str (optional) String indicating how to compare variables of the same name for potential conflicts when merging: - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'equals': all values and dimensions must be the same. - 'identical': all values, dimensions and attributes must be the same. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable (optional) A pre-processing function to apply to each Dataset prior to concatenation lock : False, True, or threading.Lock (optional) Passed to :py:func:`dask.array.from_array`. By default, xarray employs a per-variable lock when reading data from NetCDF files, but this model has not yet been extended or implemented for bpch files and so this is not actually used. However, it is likely necessary before dask's multi-threaded backend can be used **kwargs : optional Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`.
entailment
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for entry in orm['multilingual_news.NewsEntry'].objects.all(): self.migrate_placeholder( orm, entry, 'excerpt', 'multilingual_news_excerpt', 'excerpt') self.migrate_placeholder( orm, entry, 'content', 'multilingual_news_content', 'content')
Write your forwards methods here.
entailment
def image_bytes(b, filename=None, inline=1, width='auto', height='auto', preserve_aspect_ratio=None): """ Return a bytes string that displays image given by bytes b in the terminal If filename=None, the filename defaults to "Unnamed file" width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ if preserve_aspect_ratio is None: if width != 'auto' and height != 'auto': preserve_aspect_ratio = False else: preserve_aspect_ratio = True data = { 'name': base64.b64encode((filename or 'Unnamed file').encode('utf-8')).decode('ascii'), 'inline': inline, 'size': len(b), 'base64_img': base64.b64encode(b).decode('ascii'), 'width': width, 'height': height, 'preserve_aspect_ratio': int(preserve_aspect_ratio), } # IMAGE_CODE is a string because bytes doesn't support formatting return IMAGE_CODE.format(**data).encode('ascii')
Return a bytes string that displays image given by bytes b in the terminal If filename=None, the filename defaults to "Unnamed file" width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html
entailment
def display_image_bytes(b, filename=None, inline=1, width='auto', height='auto', preserve_aspect_ratio=None): """ Display the image given by the bytes b in the terminal. If filename=None the filename defaults to "Unnamed file". width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ sys.stdout.buffer.write(image_bytes(b, filename=filename, inline=inline, width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio)) sys.stdout.write('\n')
Display the image given by the bytes b in the terminal. If filename=None the filename defaults to "Unnamed file". width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html
entailment
def display_image_file(fn, width='auto', height='auto', preserve_aspect_ratio=None): """ Display an image in the terminal. A newline is not printed. width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ with open(os.path.realpath(os.path.expanduser(fn)), 'rb') as f: sys.stdout.buffer.write(image_bytes(f.read(), filename=fn, width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio))
Display an image in the terminal. A newline is not printed. width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html
entailment
def get_requirements(*args): """Get requirements from pip requirement files.""" requirements = set() contents = get_contents(*args) for line in contents.splitlines(): # Strip comments. line = re.sub(r'^#.*|\s#.*', '', line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\s+', '', line)) return sorted(requirements)
Get requirements from pip requirement files.
entailment
def get_holidays(self, division=None, year=None): """ Gets a list of all known bank holidays, optionally filtered by division and/or year :param division: see division constants; defaults to common holidays :param year: defaults to all available years :return: list of dicts with titles, dates, etc """ if division: holidays = self.data[division] else: holidays = self.data[self.ENGLAND_AND_WALES] dates_in_common = six.moves.reduce( set.intersection, (set(map(lambda holiday: holiday['date'], division_holidays)) for division, division_holidays in six.iteritems(self.data)) ) holidays = filter(lambda holiday: holiday['date'] in dates_in_common, holidays) if year: holidays = filter(lambda holiday: holiday['date'].year == year, holidays) return list(holidays)
Gets a list of all known bank holidays, optionally filtered by division and/or year :param division: see division constants; defaults to common holidays :param year: defaults to all available years :return: list of dicts with titles, dates, etc
entailment
def get_next_holiday(self, division=None, date=None): """ Returns the next known bank holiday :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: dict """ date = date or datetime.date.today() for holiday in self.get_holidays(division=division): if holiday['date'] > date: return holiday
Returns the next known bank holiday :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: dict
entailment
def is_holiday(self, date, division=None): """ True if the date is a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool """ return date in (holiday['date'] for holiday in self.get_holidays(division=division))
True if the date is a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool
entailment
def get_next_work_day(self, division=None, date=None): """ Returns the next work day, skipping weekends and bank holidays :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: datetime.date; NB: get_next_holiday returns a dict """ date = date or datetime.date.today() one_day = datetime.timedelta(days=1) holidays = set(holiday['date'] for holiday in self.get_holidays(division=division)) while True: date += one_day if date.weekday() not in self.weekend and date not in holidays: return date
Returns the next work day, skipping weekends and bank holidays :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: datetime.date; NB: get_next_holiday returns a dict
entailment
def is_work_day(self, date, division=None): """ True if the date is not a weekend or a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool """ return date.weekday() not in self.weekend and date not in ( holiday['date'] for holiday in self.get_holidays(division=division) )
True if the date is not a weekend or a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool
entailment
def get_all_regions_with_tiles(self): """ Generator which yields a set of (rx, ry) tuples which describe all regions for which the world has tile data """ for key in self.get_all_keys(): (layer, rx, ry) = struct.unpack('>BHH', key) if layer == 1: yield (rx, ry)
Generator which yields a set of (rx, ry) tuples which describe all regions for which the world has tile data
entailment
def get_entity_uuid_coords(self, uuid): """ Returns the coordinates of the given entity UUID inside this world, or `None` if the UUID is not found. """ if uuid in self._entity_to_region_map: coords = self._entity_to_region_map[uuid] entities = self.get_entities(*coords) for entity in entities: if 'uniqueId' in entity.data and entity.data['uniqueId'] == uuid: return tuple(entity.data['tilePosition']) return None
Returns the coordinates of the given entity UUID inside this world, or `None` if the UUID is not found.
entailment
def _entity_to_region_map(self): """ A dict whose keys are the UUIDs (or just IDs, in some cases) of entities, and whose values are the `(rx, ry)` coordinates in which that entity can be found. This can be used to easily locate particular entities inside the world. """ entity_to_region = {} for key in self.get_all_keys(): layer, rx, ry = struct.unpack('>BHH', key) if layer != 4: continue stream = io.BytesIO(self.get(layer, rx, ry)) num_entities = sbon.read_varint(stream) for _ in range(num_entities): uuid = sbon.read_string(stream) if uuid in entity_to_region: raise ValueError('Duplicate UUID {}'.format(uuid)) entity_to_region[uuid] = (rx, ry) return entity_to_region
A dict whose keys are the UUIDs (or just IDs, in some cases) of entities, and whose values are the `(rx, ry)` coordinates in which that entity can be found. This can be used to easily locate particular entities inside the world.
entailment
def create_fuzzy_pattern(pattern): """ Convert a string into a fuzzy regular expression pattern. :param pattern: The input pattern (a string). :returns: A compiled regular expression object. This function works by adding ``.*`` between each of the characters in the input pattern and compiling the resulting expression into a case insensitive regular expression. """ return re.compile(".*".join(map(re.escape, pattern)), re.IGNORECASE)
Convert a string into a fuzzy regular expression pattern. :param pattern: The input pattern (a string). :returns: A compiled regular expression object. This function works by adding ``.*`` between each of the characters in the input pattern and compiling the resulting expression into a case insensitive regular expression.
entailment
def filtered_entries(self): """A list of :class:`PasswordEntry` objects that don't match the exclude list.""" return [ e for e in self.entries if not any(fnmatch.fnmatch(e.name.lower(), p.lower()) for p in self.exclude_list) ]
A list of :class:`PasswordEntry` objects that don't match the exclude list.
entailment
def fuzzy_search(self, *filters): """ Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings). """ matches = [] logger.verbose( "Performing fuzzy search on %s (%s) ..", pluralize(len(filters), "pattern"), concatenate(map(repr, filters)) ) patterns = list(map(create_fuzzy_pattern, filters)) for entry in self.filtered_entries: if all(p.search(entry.name) for p in patterns): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using fuzzy search.", pluralize(len(matches), "password"), ) return matches
Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings).
entailment