signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def monthabbr(self):
|
return PERSIAN_MONTH_ABBRS[self.month]<EOL>
|
:rtype: unicode
:return: The corresponding persian month abbreviation:
[فر, ار, خر, تی, مر, شه, مه, آب, آذ, دی, به, اس]
|
f9577:c0:m26
|
def monthabbr_ascii(self):
|
return PERSIAN_MONTH_ABBRS_ASCII[self.month]<EOL>
|
:rtype: unicode
:return: The corresponding persian month abbreviation in ASCII: [F, O , Kh ... E].
|
f9577:c0:m27
|
def monthnameascii(self):
|
return PERSIAN_MONTH_NAMES_ASCII[self.month]<EOL>
|
:rtype: unicode
:return: The corresponding persian month name in ASCII:
[Farvardin - Esfand]
|
f9577:c0:m28
|
def localdateformat(self):
|
return self.strftime('<STR_LIT>')<EOL>
|
It's equivalent to:
.. testsetup:: api-date-localdateformat
from __future__ import print_function
from khayyam import JalaliDate
.. doctest:: api-date-localdateformat
>>> print(JalaliDate(1361, 6, 15).strftime('%A %D %B %N'))
دوشنبه ۱۵ شهریور ۱۳۶۱
For example:
.. doctest:: api-date-localdateformat
>>> print(JalaliDate(1394, 5, 6).localdateformat())
سه شنبه ۶ مرداد ۱۳۹۴
:return: Appropriate localized string representing a persian day
:rtype: unicode
|
f9577:c0:m29
|
def firstdayofyear(self):
|
return JalaliDate(self.year, <NUM_LIT:1>, <NUM_LIT:1>)<EOL>
|
As it's name says: it's referring to a :py:class:`JalaliDate`
representing the first day of current instance's year.
:return: First day of corresponding year.
:rtype: :py:class:`JalaliDate`
|
f9577:c0:m30
|
def dayofyear(self):
|
return (self - self.firstdayofyear()).days + <NUM_LIT:1><EOL>
|
:return: Day of year az integer: 1-35[5,6]
:rtype: int
|
f9577:c0:m31
|
def weekofyear(self, first_day_of_week=SATURDAY):
|
first_day_of_year = self.firstdayofyear()<EOL>days = (self - first_day_of_year).days<EOL>offset = first_day_of_week - first_day_of_year.weekday()<EOL>if offset < <NUM_LIT:0>:<EOL><INDENT>offset += <NUM_LIT:7><EOL><DEDENT>if days < offset:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>return int((days - offset) / <NUM_LIT:7> + <NUM_LIT:1>)<EOL>
|
weekofyear(first_day_of_week=SATURDAY)
:param first_day_of_week: One of the
:py:data:`khayyam.SATURDAY`,
:py:data:`khayyam.SUNDAY`,
:py:data:`khayyam.MONDAY`,
:py:data:`khayyam.TUESDAY`,
:py:data:`khayyam.WEDNESDAY`,
:py:data:`khayyam.THURSDAY` or
:py:data:`khayyam.FRIDAY`
:return: The week number of the year.
:rtype: int
|
f9577:c0:m32
|
def __init__(self, status, reason='<STR_LIT>'):
|
self.status = status<EOL>self.reason = reason<EOL>
|
Args:
:param int status: Integer HTTP response status
|
f9584:c0:m0
|
def get_client(project_id=None, credentials=None,<EOL>service_url=None, service_account=None,<EOL>private_key=None, private_key_file=None,<EOL>json_key=None, json_key_file=None,<EOL>readonly=True, swallow_results=True,<EOL>num_retries=<NUM_LIT:0>):
|
if not credentials:<EOL><INDENT>assert (service_account and (private_key or private_key_file)) or (<EOL>json_key or json_key_file),'<STR_LIT>'<EOL><DEDENT>if not project_id:<EOL><INDENT>assert json_key or json_key_file,'<STR_LIT>'<EOL><DEDENT>if service_url is None:<EOL><INDENT>service_url = DISCOVERY_URI<EOL><DEDENT>scope = BIGQUERY_SCOPE_READ_ONLY if readonly else BIGQUERY_SCOPE<EOL>if private_key_file:<EOL><INDENT>credentials = _credentials().from_p12_keyfile(service_account,<EOL>private_key_file,<EOL>scopes=scope)<EOL><DEDENT>if private_key:<EOL><INDENT>try:<EOL><INDENT>if isinstance(private_key, basestring):<EOL><INDENT>private_key = private_key.decode('<STR_LIT:utf-8>')<EOL><DEDENT><DEDENT>except NameError:<EOL><INDENT>pass<EOL><DEDENT>credentials = _credentials().from_p12_keyfile_buffer(<EOL>service_account,<EOL>StringIO(private_key),<EOL>scopes=scope)<EOL><DEDENT>if json_key_file:<EOL><INDENT>with open(json_key_file, '<STR_LIT:r>') as key_file:<EOL><INDENT>json_key = json.load(key_file)<EOL><DEDENT><DEDENT>if json_key:<EOL><INDENT>credentials = _credentials().from_json_keyfile_dict(json_key,<EOL>scopes=scope)<EOL>if not project_id:<EOL><INDENT>project_id = json_key['<STR_LIT>']<EOL><DEDENT><DEDENT>bq_service = _get_bq_service(credentials=credentials,<EOL>service_url=service_url)<EOL>return BigQueryClient(bq_service, project_id, swallow_results,<EOL>num_retries)<EOL>
|
Return a singleton instance of BigQueryClient. Either
AssertionCredentials or a service account and private key combination need
to be provided in order to authenticate requests to BigQuery.
Parameters
----------
project_id : str, optional
The BigQuery project id, required unless json_key or json_key_file is
provided.
credentials : oauth2client.client.SignedJwtAssertionCredentials, optional
AssertionCredentials instance to authenticate requests to BigQuery
(optional, must provide `service_account` and (`private_key` or
`private_key_file`) or (`json_key` or `json_key_file`) if not included
service_url : str, optional
A URI string template pointing to the location of Google's API
discovery service. Requires two parameters {api} and {apiVersion} that
when filled in produce an absolute URI to the discovery document for
that service. If not set then the default googleapiclient discovery URI
is used. See `credentials`
service_account : str, optional
The Google API service account name. See `credentials`
private_key : str, optional
The private key associated with the service account in PKCS12 or PEM
format. See `credentials`
private_key_file : str, optional
The name of the file containing the private key associated with the
service account in PKCS12 or PEM format. See `credentials`
json_key : dict, optional
The JSON key associated with the service account. See `credentials`
json_key_file : str, optional
The name of the JSON key file associated with the service account. See
`credentials`.
readonly : bool
Bool indicating if BigQuery access is read-only. Has no effect if
credentials are provided. Default True.
swallow_results : bool
If set to False, then return the actual response value instead of
converting to boolean. Default True.
num_retries : int, optional
The number of times to retry the request. Default 0 (no retry).
Returns
-------
BigQueryClient
An instance of the BigQuery client.
|
f9586:m0
|
def get_projects(bq_service):
|
projects_request = bq_service.projects().list().execute()<EOL>projects = []<EOL>for project in projects_request.get('<STR_LIT>', []):<EOL><INDENT>project_data = {<EOL>'<STR_LIT:id>': project['<STR_LIT:id>'],<EOL>'<STR_LIT:name>': project['<STR_LIT>']<EOL>}<EOL>projects.append(project_data)<EOL><DEDENT>return projects<EOL>
|
Given the BigQuery service, return data about all projects.
|
f9586:m1
|
def _get_bq_service(credentials=None, service_url=None):
|
assert credentials, '<STR_LIT>'<EOL>http = credentials.authorize(Http())<EOL>service = build(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>http=http,<EOL>discoveryServiceUrl=service_url,<EOL>cache_discovery=False<EOL>)<EOL>return service<EOL>
|
Construct an authorized BigQuery service object.
|
f9586:m2
|
def _credentials():
|
from oauth2client.service_account import ServiceAccountCredentials<EOL>return ServiceAccountCredentials<EOL>
|
Import and return SignedJwtAssertionCredentials class
|
f9586:m3
|
def _get_project_id(self, project_id=None):
|
if project_id is None:<EOL><INDENT>project_id = self.project_id<EOL><DEDENT>return project_id<EOL>
|
Get new project_id
Default is self.project_id, which is the project client authenticate to.
A new project_id is specified when client wants to authenticate to 1 project,
but run jobs in a different project.
Parameters
----------
project_id : str
BigQuery project_id
Returns
-------
project_id: BigQuery project_id
|
f9586:c0:m1
|
def _submit_query_job(self, query_data):
|
logger.debug('<STR_LIT>' % query_data)<EOL>job_collection = self.bigquery.jobs()<EOL>try:<EOL><INDENT>query_reply = job_collection.query(<EOL>projectId=self.project_id, body=query_data).execute(<EOL>num_retries=self.num_retries)<EOL><DEDENT>except HttpError as e:<EOL><INDENT>if query_data.get("<STR_LIT>", False):<EOL><INDENT>return None, json.loads(e.content.decode('<STR_LIT:utf8>'))<EOL><DEDENT>raise<EOL><DEDENT>job_id = query_reply['<STR_LIT>'].get('<STR_LIT>')<EOL>schema = query_reply.get('<STR_LIT>', {'<STR_LIT>': None})['<STR_LIT>']<EOL>rows = query_reply.get('<STR_LIT>', [])<EOL>job_complete = query_reply.get('<STR_LIT>', False)<EOL>if not job_complete and query_data.get("<STR_LIT>", False):<EOL><INDENT>logger.error('<STR_LIT>' % job_id)<EOL>raise BigQueryTimeoutException()<EOL><DEDENT>return job_id, [self._transform_row(row, schema) for row in rows]<EOL>
|
Submit a query job to BigQuery.
This is similar to BigQueryClient.query, but gives the user
direct access to the query method on the offical BigQuery
python client.
For fine-grained control over a query job, see:
https://google-api-client-libraries.appspot.com/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#query
Parameters
----------
query_data
query object as per "configuration.query" in
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query
Returns
-------
tuple
job id and query results if query completed. If dry_run is True,
job id will be None and results will be empty if the query is valid
or a dict containing the response if invalid.
Raises
------
BigQueryTimeoutException
On timeout
|
f9586:c0:m2
|
def _get_job_reference(self, job_id):
|
job_reference = {<EOL>"<STR_LIT>": self.project_id,<EOL>"<STR_LIT>": job_id<EOL>}<EOL>return job_reference<EOL>
|
Get job reference from job_id
For more details, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#resource
Parameters
----------
job_id:
Id of the job
Returns
-------
job_reference: json of job_reference
|
f9586:c0:m3
|
def _insert_job(self, body_object):
|
logger.debug('<STR_LIT>' % body_object)<EOL>job_collection = self.bigquery.jobs()<EOL>return job_collection.insert(<EOL>projectId=self.project_id,<EOL>body=body_object<EOL>).execute(num_retries=self.num_retries)<EOL>
|
Submit a job to BigQuery
Direct proxy to the insert() method of the offical BigQuery
python client.
Able to submit load, link, query, copy, or extract jobs.
For more details, see:
https://google-api-client-libraries.appspot.com/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#insert
Parameters
----------
body_object : body object passed to bigquery.jobs().insert()
Returns
-------
response of the bigquery.jobs().insert().execute() call
Raises
------
BigQueryTimeoutException on timeout
|
f9586:c0:m4
|
def query(self, query, max_results=None, timeout=<NUM_LIT:0>, dry_run=False, use_legacy_sql=None, external_udf_uris=None):
|
logger.debug('<STR_LIT>' % query)<EOL>query_data = {<EOL>'<STR_LIT>': query,<EOL>'<STR_LIT>': timeout * <NUM_LIT:1000>,<EOL>'<STR_LIT>': dry_run,<EOL>'<STR_LIT>': max_results<EOL>}<EOL>if use_legacy_sql is not None:<EOL><INDENT>query_data['<STR_LIT>'] = use_legacy_sql<EOL><DEDENT>if external_udf_uris:<EOL><INDENT>query_data['<STR_LIT>'] =[ {'<STR_LIT>': u} for u in external_udf_uris ]<EOL><DEDENT>return self._submit_query_job(query_data)<EOL>
|
Submit a query to BigQuery.
Parameters
----------
query : str
BigQuery query string
max_results : int, optional
The maximum number of rows to return per page of results.
timeout : float, optional
How long to wait for the query to complete, in seconds before
the request times out and returns.
dry_run : bool, optional
If True, the query isn't actually run. A valid query will return an
empty response, while an invalid one will return the same error
message it would if it wasn't a dry run.
use_legacy_sql : bool, optional. Default True.
If False, the query will use BigQuery's standard SQL (https://cloud.google.com/bigquery/sql-reference/)
external_udf_uris : list, optional
Contains external UDF URIs. If given, URIs must be Google Cloud
Storage and have .js extensions.
Returns
-------
tuple
(job id, query results) if the query completed. If dry_run is True,
job id will be None and results will be empty if the query is valid
or a ``dict`` containing the response if invalid.
Raises
------
BigQueryTimeoutException
on timeout
|
f9586:c0:m5
|
def get_query_schema(self, job_id):
|
query_reply = self.get_query_results(job_id, offset=<NUM_LIT:0>, limit=<NUM_LIT:0>)<EOL>if not query_reply['<STR_LIT>']:<EOL><INDENT>logger.warning('<STR_LIT>' % job_id)<EOL>raise UnfinishedQueryException()<EOL><DEDENT>return query_reply['<STR_LIT>']['<STR_LIT>']<EOL>
|
Retrieve the schema of a query by job id.
Parameters
----------
job_id : str
The job_id that references a BigQuery query
Returns
-------
list
A ``list`` of ``dict`` objects that represent the schema.
|
f9586:c0:m6
|
def get_table_schema(self, dataset, table, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>result = self.bigquery.tables().get(<EOL>projectId=project_id,<EOL>tableId=table,<EOL>datasetId=dataset).execute(num_retries=self.num_retries)<EOL><DEDENT>except HttpError as e:<EOL><INDENT>if int(e.resp['<STR_LIT:status>']) == <NUM_LIT>:<EOL><INDENT>logger.warn('<STR_LIT>', dataset, table)<EOL>return None<EOL><DEDENT>raise<EOL><DEDENT>return result['<STR_LIT>']['<STR_LIT>']<EOL>
|
Return the table schema.
Parameters
----------
dataset : str
The dataset containing the `table`.
table : str
The table to get the schema for
project_id: str, optional
The project of the dataset.
Returns
-------
list
A ``list`` of ``dict`` objects that represent the table schema. If
the table doesn't exist, None is returned.
|
f9586:c0:m7
|
def check_job(self, job_id):
|
query_reply = self.get_query_results(job_id, offset=<NUM_LIT:0>, limit=<NUM_LIT:0>)<EOL>return (query_reply.get('<STR_LIT>', False),<EOL>int(query_reply.get('<STR_LIT>', <NUM_LIT:0>)))<EOL>
|
Return the state and number of results of a query by job id.
Parameters
----------
job_id : str
The job id of the query to check.
Returns
-------
tuple
(``bool``, ``int``) Whether or not the query has completed and the
total number of rows included in the query table if it has
completed (else 0)
|
f9586:c0:m8
|
def get_query_rows(self, job_id, offset=None, limit=None, timeout=<NUM_LIT:0>):
|
<EOL>query_reply = self.get_query_results(job_id, offset=offset,<EOL>limit=limit, timeout=timeout)<EOL>if not query_reply['<STR_LIT>']:<EOL><INDENT>logger.warning('<STR_LIT>' % job_id)<EOL>raise UnfinishedQueryException()<EOL><DEDENT>schema = query_reply["<STR_LIT>"]["<STR_LIT>"]<EOL>rows = query_reply.get('<STR_LIT>', [])<EOL>page_token = query_reply.get("<STR_LIT>")<EOL>records = [self._transform_row(row, schema) for row in rows]<EOL>while page_token and (not limit or len(records) < limit):<EOL><INDENT>query_reply = self.get_query_results(<EOL>job_id, offset=offset, limit=limit, page_token=page_token,<EOL>timeout=timeout)<EOL>page_token = query_reply.get("<STR_LIT>")<EOL>rows = query_reply.get('<STR_LIT>', [])<EOL>records += [self._transform_row(row, schema) for row in rows]<EOL><DEDENT>return records[:limit] if limit else records<EOL>
|
Retrieve a list of rows from a query table by job id.
This method will append results from multiple pages together. If you
want to manually page through results, you can use `get_query_results`
method directly.
Parameters
----------
job_id : str
The job id that references a BigQuery query.
offset : int, optional
The offset of the rows to pull from BigQuery
limit : int, optional
The number of rows to retrieve from a query table.
timeout : float, optional
Timeout in seconds.
Returns
-------
list
A ``list`` of ``dict`` objects that represent table rows.
|
f9586:c0:m9
|
def check_dataset(self, dataset_id, project_id=None):
|
dataset = self.get_dataset(dataset_id, project_id)<EOL>return bool(dataset)<EOL>
|
Check to see if a dataset exists.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
bool
True if dataset at `dataset_id` exists, else Fasle
|
f9586:c0:m10
|
def get_dataset(self, dataset_id, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>dataset = self.bigquery.datasets().get(<EOL>projectId=project_id, datasetId=dataset_id).execute(<EOL>num_retries=self.num_retries)<EOL><DEDENT>except HttpError:<EOL><INDENT>dataset = {}<EOL><DEDENT>return dataset<EOL>
|
Retrieve a dataset if it exists, otherwise return an empty dict.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
dict
Contains dataset object if it exists, else empty
|
f9586:c0:m11
|
def check_table(self, dataset, table, project_id=None):
|
table = self.get_table(dataset, table, project_id)<EOL>return bool(table)<EOL>
|
Check to see if a table exists.
Parameters
----------
dataset : str
The dataset to check
table : str
The name of the table
project_id: str, optional
The project the table is in
Returns
-------
bool
True if table exists, else False
|
f9586:c0:m12
|
def get_table(self, dataset, table, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>table = self.bigquery.tables().get(<EOL>projectId=project_id, datasetId=dataset,<EOL>tableId=table).execute(num_retries=self.num_retries)<EOL><DEDENT>except HttpError:<EOL><INDENT>table = {}<EOL><DEDENT>return table<EOL>
|
Retrieve a table if it exists, otherwise return an empty dict.
Parameters
----------
dataset : str
The dataset that the table is in
table : str
The name of the table
project_id: str, optional
The project that the table is in
Returns
-------
dict
Containing the table object if it exists, else empty
|
f9586:c0:m13
|
def create_table(self, dataset, table, schema,<EOL>expiration_time=None, time_partitioning=False,<EOL>project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>body = {<EOL>'<STR_LIT>': {'<STR_LIT>': schema},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': table,<EOL>'<STR_LIT>': project_id,<EOL>'<STR_LIT>': dataset<EOL>}<EOL>}<EOL>if expiration_time is not None:<EOL><INDENT>body['<STR_LIT>'] = expiration_time<EOL><DEDENT>if time_partitioning:<EOL><INDENT>body['<STR_LIT>'] = {'<STR_LIT:type>': '<STR_LIT>'}<EOL><DEDENT>try:<EOL><INDENT>table = self.bigquery.tables().insert(<EOL>projectId=project_id,<EOL>datasetId=dataset,<EOL>body=body<EOL>).execute(num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return table<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error(('<STR_LIT>'<EOL>'<STR_LIT>').format(project_id, dataset, table, e.content))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Create a new table in the dataset.
Parameters
----------
dataset : str
The dataset to create the table in
table : str
The name of the table to create
schema : dict
The table schema
expiration_time : int or double, optional
The expiry time in milliseconds since the epoch.
time_partitioning : bool, optional
Create a time partitioning.
project_id: str, optional
The project to create the table in
Returns
-------
Union[bool, dict]
If the table was successfully created, or response from BigQuery
if swallow_results is set to False
|
f9586:c0:m14
|
def update_table(self, dataset, table, schema, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>body = {<EOL>'<STR_LIT>': {'<STR_LIT>': schema},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': table,<EOL>'<STR_LIT>': project_id,<EOL>'<STR_LIT>': dataset<EOL>}<EOL>}<EOL>try:<EOL><INDENT>result = self.bigquery.tables().update(<EOL>projectId=project_id,<EOL>tableId= table,<EOL>datasetId=dataset,<EOL>body=body<EOL>).execute(num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return result<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error(('<STR_LIT>'<EOL>'<STR_LIT>').format(project_id, dataset, table, e.content))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Update an existing table in the dataset.
Parameters
----------
dataset : str
The dataset to update the table in
table : str
The name of the table to update
schema : dict
Table schema
project_id: str, optional
The project to update the table in
Returns
-------
Union[bool, dict]
bool indicating if the table was successfully updated or not,
or response from BigQuery if swallow_results is set to False.
|
f9586:c0:m15
|
def patch_table(self, dataset, table, schema, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>body = {<EOL>'<STR_LIT>': {'<STR_LIT>': schema},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': table,<EOL>'<STR_LIT>': project_id,<EOL>'<STR_LIT>': dataset<EOL>}<EOL>}<EOL>try:<EOL><INDENT>result = self.bigquery.tables().patch(<EOL>projectId=project_id,<EOL>datasetId=dataset,<EOL>body=body<EOL>).execute(num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return result<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error(('<STR_LIT>'<EOL>'<STR_LIT>').format(project_id, dataset, table, e.content))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Patch an existing table in the dataset.
Parameters
----------
dataset : str
The dataset to patch the table in
table : str
The name of the table to patch
schema : dict
The table schema
project_id: str, optional
The project to patch the table in
Returns
-------
Union[bool, dict]
Bool indicating if the table was successfully patched or not,
or response from BigQuery if swallow_results is set to False
|
f9586:c0:m16
|
def create_view(self, dataset, view, query, use_legacy_sql=None, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>body = {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': view,<EOL>'<STR_LIT>': project_id,<EOL>'<STR_LIT>': dataset<EOL>},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': query<EOL>}<EOL>}<EOL>if use_legacy_sql is not None:<EOL><INDENT>body['<STR_LIT>']['<STR_LIT>'] = use_legacy_sql<EOL><DEDENT>try:<EOL><INDENT>view = self.bigquery.tables().insert(<EOL>projectId=project_id,<EOL>datasetId=dataset,<EOL>body=body<EOL>).execute(num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return view<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error(('<STR_LIT>'<EOL>'<STR_LIT>').format(dataset, view, e.content))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Create a new view in the dataset.
Parameters
----------
dataset : str
The dataset to create the view in
view : str
The name of the view to create
query : dict
A query that BigQuery executes when the view is referenced.
use_legacy_sql : bool, optional
If False, the query will use BigQuery's standard SQL
(https://cloud.google.com/bigquery/sql-reference/)
project_id: str, optional
The project to create the view in
Returns
-------
Union[bool, dict]
bool indicating if the view was successfully created or not,
or response from BigQuery if swallow_results is set to False.
|
f9586:c0:m17
|
def delete_table(self, dataset, table, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>response = self.bigquery.tables().delete(<EOL>projectId=project_id,<EOL>datasetId=dataset,<EOL>tableId=table<EOL>).execute(num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return response<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error(('<STR_LIT>'<EOL>'<STR_LIT>').format(dataset, table, e.content))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Delete a table from the dataset.
Parameters
----------
dataset : str
The dataset to delete the table from.
table : str
The name of the table to delete
project_id: str, optional
String id of the project
Returns
-------
Union[bool, dict]
bool indicating if the table was successfully deleted or not,
or response from BigQuery if swallow_results is set for False.
|
f9586:c0:m18
|
def get_tables(self, dataset_id, app_id, start_time, end_time, project_id=None):
|
if isinstance(start_time, datetime):<EOL><INDENT>start_time = calendar.timegm(start_time.utctimetuple())<EOL><DEDENT>if isinstance(end_time, datetime):<EOL><INDENT>end_time = calendar.timegm(end_time.utctimetuple())<EOL><DEDENT>every_table = self._get_all_tables(dataset_id, project_id)<EOL>app_tables = every_table.get(app_id, {})<EOL>return self._filter_tables_by_time(app_tables, start_time, end_time)<EOL>
|
Retrieve a list of tables that are related to the given app id
and are inside the range of start and end times.
Parameters
----------
dataset_id : str
The BigQuery dataset id to consider.
app_id : str
The appspot name
start_time : Union[datetime, int]
The datetime or unix time after which records will be fetched.
end_time : Union[datetime, int]
The datetime or unix time up to which records will be fetched.
project_id: str, optional
String id of the project
Returns
-------
list
A ``list`` of table names.
|
f9586:c0:m19
|
def import_data_from_uris(<EOL>self,<EOL>source_uris,<EOL>dataset,<EOL>table,<EOL>schema=None, <EOL>job=None,<EOL>source_format=None,<EOL>create_disposition=None,<EOL>write_disposition=None,<EOL>encoding=None,<EOL>ignore_unknown_values=None,<EOL>max_bad_records=None,<EOL>allow_jagged_rows=None,<EOL>allow_quoted_newlines=None,<EOL>field_delimiter=None,<EOL>quote=None,<EOL>skip_leading_rows=None,<EOL>project_id=None,<EOL>):
|
source_uris = source_uris if isinstance(source_uris, list)else [source_uris]<EOL>project_id = self._get_project_id(project_id)<EOL>configuration = {<EOL>"<STR_LIT>": {<EOL>"<STR_LIT>": project_id,<EOL>"<STR_LIT>": table,<EOL>"<STR_LIT>": dataset<EOL>},<EOL>"<STR_LIT>": source_uris,<EOL>}<EOL>if max_bad_records:<EOL><INDENT>configuration['<STR_LIT>'] = max_bad_records<EOL><DEDENT>if ignore_unknown_values:<EOL><INDENT>configuration['<STR_LIT>'] = ignore_unknown_values<EOL><DEDENT>if create_disposition:<EOL><INDENT>configuration['<STR_LIT>'] = create_disposition<EOL><DEDENT>if write_disposition:<EOL><INDENT>configuration['<STR_LIT>'] = write_disposition<EOL><DEDENT>if encoding:<EOL><INDENT>configuration['<STR_LIT>'] = encoding<EOL><DEDENT>if schema:<EOL><INDENT>configuration['<STR_LIT>'] = {'<STR_LIT>': schema}<EOL><DEDENT>if source_format:<EOL><INDENT>configuration['<STR_LIT>'] = source_format<EOL><DEDENT>if not job:<EOL><INDENT>hex = self._generate_hex_for_uris(source_uris)<EOL>job = "<STR_LIT>".format(<EOL>dataset=dataset,<EOL>table=table,<EOL>digest=hex<EOL>)<EOL><DEDENT>if source_format == JOB_SOURCE_FORMAT_CSV:<EOL><INDENT>if field_delimiter:<EOL><INDENT>configuration['<STR_LIT>'] = field_delimiter<EOL><DEDENT>if allow_jagged_rows:<EOL><INDENT>configuration['<STR_LIT>'] = allow_jagged_rows<EOL><DEDENT>if allow_quoted_newlines:<EOL><INDENT>configuration['<STR_LIT>'] = allow_quoted_newlines<EOL><DEDENT>if quote:<EOL><INDENT>configuration['<STR_LIT>'] = quote<EOL><DEDENT>if skip_leading_rows:<EOL><INDENT>configuration['<STR_LIT>'] = skip_leading_rows<EOL><DEDENT><DEDENT>elif field_delimiter or allow_jagged_rowsor allow_quoted_newlines or quote or skip_leading_rows:<EOL><INDENT>all_values = dict(field_delimiter=field_delimiter,<EOL>allow_jagged_rows=allow_jagged_rows,<EOL>allow_quoted_newlines=allow_quoted_newlines,<EOL>skip_leading_rows=skip_leading_rows,<EOL>quote=quote)<EOL>non_null_values = dict((k, v) for k, v<EOL>in list(all_values.items())<EOL>if v)<EOL>raise Exception("<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>% non_null_values)<EOL><DEDENT>body = {<EOL>"<STR_LIT>": {<EOL>'<STR_LIT>': configuration<EOL>},<EOL>"<STR_LIT>": self._get_job_reference(job)<EOL>}<EOL>logger.debug("<STR_LIT>" % body)<EOL>job_resource = self._insert_job(body)<EOL>self._raise_insert_exception_if_error(job_resource)<EOL>return job_resource<EOL>
|
Imports data into a BigQuery table from cloud storage. Optional
arguments that are not specified are determined by BigQuery as
described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
source_urls : list
A ``list`` of ``str`` objects representing the urls on cloud
storage of the form: gs://bucket/filename
dataset : str
String id of the dataset
table : str
String id of the table
schema : list, optional
Represents the BigQuery schema
job : str, optional
Identifies the job (a unique job id is automatically generated if
not provided)
source_format : str, optional
One of the JOB_SOURCE_FORMAT_* constants
create_disposition : str, optional
One of the JOB_CREATE_* constants
write_disposition : str, optional
One of the JOB_WRITE_* constants
encoding : str, optional
One of the JOB_ENCODING_* constants
ignore_unknown_values : bool, optional
Whether or not to ignore unknown values
max_bad_records : int, optional
Maximum number of bad records
allow_jagged_rows : bool, optional
For csv only
allow_quoted_newlines : bool, optional
For csv only
field_delimiter : str, optional
For csv only
quote : str, optional
Quote character for csv only
skip_leading_rows : int, optional
For csv only
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job response
Raises
------
JobInsertException
on http/auth failures or error in result
|
f9586:c0:m20
|
def export_data_to_uris(<EOL>self,<EOL>destination_uris,<EOL>dataset,<EOL>table, <EOL>job=None,<EOL>compression=None,<EOL>destination_format=None,<EOL>print_header=None,<EOL>field_delimiter=None,<EOL>project_id=None,<EOL>):
|
destination_uris = destination_urisif isinstance(destination_uris, list) else [destination_uris]<EOL>project_id = self._get_project_id(project_id)<EOL>configuration = {<EOL>"<STR_LIT>": {<EOL>"<STR_LIT>": project_id,<EOL>"<STR_LIT>": table,<EOL>"<STR_LIT>": dataset<EOL>},<EOL>"<STR_LIT>": destination_uris,<EOL>}<EOL>if compression:<EOL><INDENT>configuration['<STR_LIT>'] = compression<EOL><DEDENT>if destination_format:<EOL><INDENT>configuration['<STR_LIT>'] = destination_format<EOL><DEDENT>if print_header is not None:<EOL><INDENT>configuration['<STR_LIT>'] = print_header<EOL><DEDENT>if field_delimiter:<EOL><INDENT>configuration['<STR_LIT>'] = field_delimiter<EOL><DEDENT>if not job:<EOL><INDENT>hex = self._generate_hex_for_uris(destination_uris)<EOL>job = "<STR_LIT>".format(<EOL>dataset=dataset,<EOL>table=table,<EOL>digest=hex<EOL>)<EOL><DEDENT>body = {<EOL>"<STR_LIT>": {<EOL>'<STR_LIT>': configuration<EOL>},<EOL>"<STR_LIT>": self._get_job_reference(job)<EOL>}<EOL>logger.info("<STR_LIT>" % body)<EOL>job_resource = self._insert_job(body)<EOL>self._raise_insert_exception_if_error(job_resource)<EOL>return job_resource<EOL>
|
Export data from a BigQuery table to cloud storage. Optional arguments
that are not specified are determined by BigQuery as described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
destination_uris : Union[str, list]
``str`` or ``list`` of ``str`` objects representing the URIs on
cloud storage of the form: gs://bucket/filename
dataset : str
String id of the dataset
table : str
String id of the table
job : str, optional
String identifying the job (a unique jobid is automatically
generated if not provided)
compression : str, optional
One of the JOB_COMPRESSION_* constants
destination_format : str, optional
One of the JOB_DESTination_FORMAT_* constants
print_header : bool, optional
Whether or not to print the header
field_delimiter : str, optional
Character separating fields in delimited file
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job resource
Raises
------
JobInsertException
On http/auth failures or error in result
|
f9586:c0:m21
|
def write_to_table(<EOL>self,<EOL>query,<EOL>dataset=None,<EOL>table=None, <EOL>external_udf_uris=None,<EOL>allow_large_results=None,<EOL>use_query_cache=None,<EOL>priority=None,<EOL>create_disposition=None,<EOL>write_disposition=None,<EOL>use_legacy_sql=None,<EOL>maximum_billing_tier=None,<EOL>flatten=None,<EOL>project_id=None,<EOL>):
|
configuration = {<EOL>"<STR_LIT>": query,<EOL>}<EOL>project_id = self._get_project_id(project_id)<EOL>if dataset and table:<EOL><INDENT>configuration['<STR_LIT>'] = {<EOL>"<STR_LIT>": project_id,<EOL>"<STR_LIT>": table,<EOL>"<STR_LIT>": dataset<EOL>}<EOL><DEDENT>if allow_large_results is not None:<EOL><INDENT>configuration['<STR_LIT>'] = allow_large_results<EOL><DEDENT>if flatten is not None:<EOL><INDENT>configuration['<STR_LIT>'] = flatten<EOL><DEDENT>if maximum_billing_tier is not None:<EOL><INDENT>configuration['<STR_LIT>'] = maximum_billing_tier<EOL><DEDENT>if use_query_cache is not None:<EOL><INDENT>configuration['<STR_LIT>'] = use_query_cache<EOL><DEDENT>if use_legacy_sql is not None:<EOL><INDENT>configuration['<STR_LIT>'] = use_legacy_sql<EOL><DEDENT>if priority:<EOL><INDENT>configuration['<STR_LIT>'] = priority<EOL><DEDENT>if create_disposition:<EOL><INDENT>configuration['<STR_LIT>'] = create_disposition<EOL><DEDENT>if write_disposition:<EOL><INDENT>configuration['<STR_LIT>'] = write_disposition<EOL><DEDENT>if external_udf_uris:<EOL><INDENT>configuration['<STR_LIT>'] =[ {'<STR_LIT>': u} for u in external_udf_uris ]<EOL><DEDENT>body = {<EOL>"<STR_LIT>": {<EOL>'<STR_LIT>': configuration<EOL>}<EOL>}<EOL>logger.info("<STR_LIT>" % body)<EOL>job_resource = self._insert_job(body)<EOL>self._raise_insert_exception_if_error(job_resource)<EOL>return job_resource<EOL>
|
Write query result to table. If dataset or table is not provided,
Bigquery will write the result to temporary table. Optional arguments
that are not specified are determined by BigQuery as described:
https://developers.google.com/bigquery/docs/reference/v2/jobs
Parameters
----------
query : str
BigQuery query string
dataset : str, optional
String id of the dataset
table : str, optional
String id of the table
external_udf_uris : list, optional
Contains external UDF URIs. If given, URIs must be Google Cloud
Storage and have .js extensions.
allow_large_results : bool, optional
Whether or not to allow large results
use_query_cache : bool, optional
Whether or not to use query cache
priority : str, optional
One of the JOB_PRIORITY_* constants
create_disposition : str, optional
One of the JOB_CREATE_* constants
write_disposition : str, optional
One of the JOB_WRITE_* constants
use_legacy_sql: bool, optional
If False, the query will use BigQuery's standard SQL
(https://cloud.google.com/bigquery/sql-reference/)
maximum_billing_tier : integer, optional
Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If
unspecified, this will be set to your project default. For more
information,
see https://cloud.google.com/bigquery/pricing#high-compute
flatten : bool, optional
Whether or not to flatten nested and repeated fields
in query results
project_id: str, optional
String id of the project
Returns
-------
dict
A BigQuery job resource
Raises
------
JobInsertException
On http/auth failures or error in result
|
f9586:c0:m22
|
def wait_for_job(self, job, interval=<NUM_LIT:5>, timeout=<NUM_LIT>):
|
complete = False<EOL>job_id = str(job if isinstance(job,<EOL>(six.binary_type, six.text_type, int))<EOL>else job['<STR_LIT>']['<STR_LIT>'])<EOL>job_resource = None<EOL>start_time = time()<EOL>elapsed_time = <NUM_LIT:0><EOL>while not (complete or elapsed_time > timeout):<EOL><INDENT>sleep(interval)<EOL>request = self.bigquery.jobs().get(projectId=self.project_id,<EOL>jobId=job_id)<EOL>job_resource = request.execute(num_retries=self.num_retries)<EOL>self._raise_executing_exception_if_error(job_resource)<EOL>complete = job_resource.get('<STR_LIT:status>').get('<STR_LIT:state>') == u'<STR_LIT>'<EOL>elapsed_time = time() - start_time<EOL><DEDENT>if not complete:<EOL><INDENT>logger.error('<STR_LIT>' % job_id)<EOL>raise BigQueryTimeoutException()<EOL><DEDENT>return job_resource<EOL>
|
Waits until the job indicated by job_resource is done or has failed
Parameters
----------
job : Union[dict, str]
``dict`` representing a BigQuery job resource, or a ``str``
representing the BigQuery job id
interval : float, optional
Polling interval in seconds, default = 5
timeout : float, optional
Timeout in seconds, default = 60
Returns
-------
dict
Final state of the job resouce, as described here:
https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#get
Raises
------
Union[JobExecutingException, BigQueryTimeoutException]
On http/auth failures or timeout
|
f9586:c0:m23
|
def push_rows(self, dataset, table, rows, insert_id_key=None,<EOL>skip_invalid_rows=None, ignore_unknown_values=None,<EOL>template_suffix=None, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>table_data = self.bigquery.tabledata()<EOL>rows_data = []<EOL>for row in rows:<EOL><INDENT>each_row = {}<EOL>each_row["<STR_LIT>"] = row<EOL>if insert_id_key is not None:<EOL><INDENT>keys = insert_id_key.split('<STR_LIT:.>')<EOL>val = reduce(lambda d, key: d.get(key) if d else None, keys, row)<EOL>if val is not None:<EOL><INDENT>each_row["<STR_LIT>"] = val<EOL><DEDENT><DEDENT>rows_data.append(each_row)<EOL><DEDENT>data = {<EOL>"<STR_LIT>": "<STR_LIT>",<EOL>"<STR_LIT>": rows_data<EOL>}<EOL>if skip_invalid_rows is not None:<EOL><INDENT>data['<STR_LIT>'] = skip_invalid_rows<EOL><DEDENT>if ignore_unknown_values is not None:<EOL><INDENT>data['<STR_LIT>'] = ignore_unknown_values<EOL><DEDENT>if template_suffix is not None:<EOL><INDENT>data['<STR_LIT>'] = template_suffix<EOL><DEDENT>try: <EOL><INDENT>response = table_data.insertAll(<EOL>projectId=project_id,<EOL>datasetId=dataset,<EOL>tableId=table,<EOL>body=data<EOL>).execute(num_retries=self.num_retries)<EOL>if response.get('<STR_LIT>'):<EOL><INDENT>logger.error('<STR_LIT>' % response)<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return response<EOL><DEDENT><DEDENT>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return response<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.exception('<STR_LIT>')<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {<EOL>'<STR_LIT>': [{<EOL>'<STR_LIT>': [{<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:message>': e<EOL>}]<EOL>}]<EOL>}<EOL><DEDENT><DEDENT>
|
Upload rows to BigQuery table.
Parameters
----------
dataset : str
The dataset to upload to
table : str
The name of the table to insert rows into
rows : list
A ``list`` of rows (``dict`` objects) to add to the table
insert_id_key : str, optional
Key for insertId in row.
You can use dot separated key for nested column.
skip_invalid_rows : bool, optional
Insert all valid rows of a request, even if invalid rows exist.
ignore_unknown_values : bool, optional
Accept rows that contain values that do not match the schema.
template_suffix : str, optional
Inserts the rows into an {table}{template_suffix}.
If table {table}{template_suffix} doesn't exist, create from {table}.
project_id: str, optional
The project to upload to
Returns
-------
Union[bool, dict]
bool indicating if insert succeeded or not, or response
from BigQuery if swallow_results is set for False.
|
f9586:c0:m24
|
def get_all_tables(self, dataset_id, project_id=None):
|
tables_data = self._get_all_tables_for_dataset(dataset_id, project_id)<EOL>tables = []<EOL>for table in tables_data.get('<STR_LIT>', []):<EOL><INDENT>table_name = table.get('<STR_LIT>', {}).get('<STR_LIT>')<EOL>if table_name:<EOL><INDENT>tables.append(table_name)<EOL><DEDENT><DEDENT>return tables<EOL>
|
Retrieve a list of tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table data for.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
A ``list`` with all table names
|
f9586:c0:m25
|
def _get_all_tables(self, dataset_id, cache=False, project_id=None):
|
do_fetch = True<EOL>if cache and self.cache.get(dataset_id):<EOL><INDENT>time, result = self.cache.get(dataset_id)<EOL>if datetime.now() - time < CACHE_TIMEOUT:<EOL><INDENT>do_fetch = False<EOL><DEDENT><DEDENT>if do_fetch:<EOL><INDENT>result = self._get_all_tables_for_dataset(dataset_id, project_id)<EOL>self.cache[dataset_id] = (datetime.now(), result)<EOL><DEDENT>return self._parse_table_list_response(result)<EOL>
|
Retrieve the list of tables for dataset, that respect the formats:
* appid_YYYY_MM
* YYYY_MM_appid
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
cache : bool, optional
To use cached value or not (default False). Timeout value equals
CACHE_TIMEOUT.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` of app ids mapped to their table names
|
f9586:c0:m26
|
def _get_all_tables_for_dataset(self, dataset_id, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>result = self.bigquery.tables().list(<EOL>projectId=project_id,<EOL>datasetId=dataset_id).execute(num_retries=self.num_retries)<EOL>page_token = result.get('<STR_LIT>')<EOL>while page_token:<EOL><INDENT>res = self.bigquery.tables().list(<EOL>projectId=project_id,<EOL>datasetId=dataset_id,<EOL>pageToken=page_token<EOL>).execute(num_retries=self.num_retries)<EOL>page_token = res.get('<STR_LIT>')<EOL>result['<STR_LIT>'] += res.get('<STR_LIT>', [])<EOL><DEDENT>return result<EOL>
|
Retrieve a list of all tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` containing tables key with all tables
|
f9586:c0:m27
|
def _parse_table_list_response(self, list_response):
|
tables = defaultdict(dict)<EOL>for table in list_response.get('<STR_LIT>', []):<EOL><INDENT>table_ref = table.get('<STR_LIT>')<EOL>if not table_ref:<EOL><INDENT>continue<EOL><DEDENT>table_id = table_ref.get('<STR_LIT>', '<STR_LIT>')<EOL>year_month, app_id = self._parse_table_name(table_id)<EOL>if not year_month:<EOL><INDENT>continue<EOL><DEDENT>table_date = datetime.strptime(year_month, '<STR_LIT>')<EOL>unix_seconds = calendar.timegm(table_date.timetuple())<EOL>tables[app_id].update({table_id: unix_seconds})<EOL><DEDENT>tables.default_factory = None<EOL>return tables<EOL>
|
Parse the response received from calling list on tables.
Parameters
----------
list_response
The response found by calling list on a BigQuery table object.
Returns
-------
dict
Dates referenced by table names
|
f9586:c0:m28
|
def _parse_table_name(self, table_id):
|
<EOL>attributes = table_id.split('<STR_LIT:_>')<EOL>year_month = "<STR_LIT:->".join(attributes[:<NUM_LIT:2>])<EOL>app_id = "<STR_LIT:->".join(attributes[<NUM_LIT:2>:])<EOL>if year_month.count("<STR_LIT:->") == <NUM_LIT:1> and all(<EOL>[num.isdigit() for num in year_month.split('<STR_LIT:->')]):<EOL><INDENT>return year_month, app_id<EOL><DEDENT>attributes = table_id.split('<STR_LIT:_>')<EOL>year_month = "<STR_LIT:->".join(attributes[-<NUM_LIT:2>:])<EOL>app_id = "<STR_LIT:->".join(attributes[:-<NUM_LIT:2>])<EOL>if year_month.count("<STR_LIT:->") == <NUM_LIT:1> and all(<EOL>[num.isdigit() for num in year_month.split('<STR_LIT:->')]) and len(year_month) == <NUM_LIT:7>:<EOL><INDENT>return year_month, app_id<EOL><DEDENT>return None, None<EOL>
|
Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Returns (None, None) in the event of a name like <desc>_YYYYMMDD_<int>
Parameters
----------
table_id : str
The table id as listed by BigQuery
Returns
-------
tuple
(year/month, app id), or (None, None) if the table id cannot be
parsed.
|
f9586:c0:m29
|
def _filter_tables_by_time(self, tables, start_time, end_time):
|
return [table_name for (table_name, unix_seconds) in tables.items()<EOL>if self._in_range(start_time, end_time, unix_seconds)]<EOL>
|
Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
|
f9586:c0:m30
|
def _in_range(self, start_time, end_time, time):
|
ONE_MONTH = <NUM_LIT> <EOL>return start_time <= time <= end_time ortime <= start_time <= time + ONE_MONTH ortime <= end_time <= time + ONE_MONTH<EOL>
|
Indicate if the given time falls inside of the given range.
Parameters
----------
start_time : int
The unix time for the start of the range
end_time : int
The unix time for the end of the range
time : int
The unix time to check
Returns
-------
bool
True if the time falls within the range, False otherwise.
|
f9586:c0:m31
|
def get_query_results(self, job_id, offset=None, limit=None,<EOL>page_token=None, timeout=<NUM_LIT:0>):
|
job_collection = self.bigquery.jobs()<EOL>return job_collection.getQueryResults(<EOL>projectId=self.project_id,<EOL>jobId=job_id,<EOL>startIndex=offset,<EOL>maxResults=limit,<EOL>pageToken=page_token,<EOL>timeoutMs=timeout * <NUM_LIT:1000>).execute(num_retries=self.num_retries)<EOL>
|
Execute the query job indicated by the given job id. This is direct
mapping to bigquery api
https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults
Parameters
----------
job_id : str
The job id of the query to check
offset : optional
The index the result set should start at.
limit : int, optional
The maximum number of results to retrieve.
page_token : optional
Page token, returned by previous call, to request the next page of
results.
timeout : float, optional
Timeout in seconds
Returns
-------
out
The query reply
|
f9586:c0:m32
|
def _transform_row(self, row, schema):
|
log = {}<EOL>for index, col_dict in enumerate(schema):<EOL><INDENT>col_name = col_dict['<STR_LIT:name>']<EOL>row_value = row['<STR_LIT:f>'][index]['<STR_LIT:v>']<EOL>if row_value is None:<EOL><INDENT>log[col_name] = None<EOL>continue<EOL><DEDENT>if col_dict['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>row_value = self._recurse_on_row(col_dict, row_value)<EOL><DEDENT>elif col_dict['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>row_value = int(row_value)<EOL><DEDENT>elif col_dict['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>row_value = float(row_value)<EOL><DEDENT>elif col_dict['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>row_value = row_value in ('<STR_LIT:True>', '<STR_LIT:true>', '<STR_LIT>')<EOL><DEDENT>elif col_dict['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>row_value = float(row_value)<EOL><DEDENT>log[col_name] = row_value<EOL><DEDENT>return log<EOL>
|
Apply the given schema to the given BigQuery data row.
Parameters
----------
row
A single BigQuery row to transform
schema : list
The BigQuery table schema to apply to the row, specifically
the list of field dicts.
Returns
-------
dict
Mapping schema to row
|
f9586:c0:m33
|
def _recurse_on_row(self, col_dict, nested_value):
|
row_value = None<EOL>if col_dict['<STR_LIT>'] == '<STR_LIT>' and isinstance(nested_value, list):<EOL><INDENT>row_value = [self._transform_row(record['<STR_LIT:v>'], col_dict['<STR_LIT>'])<EOL>for record in nested_value]<EOL><DEDENT>else:<EOL><INDENT>row_value = self._transform_row(nested_value, col_dict['<STR_LIT>'])<EOL><DEDENT>return row_value<EOL>
|
Apply the schema specified by the given dict to the nested value by
recursing on it.
Parameters
----------
col_dict : dict
The schema to apply to the nested value.
nested_value : A value nested in a BigQuery row.
Returns
-------
Union[dict, list]
``dict`` or ``list`` of ``dict`` objects from applied schema.
|
f9586:c0:m34
|
def _generate_hex_for_uris(self, uris):
|
return sha256(("<STR_LIT::>".join(uris) + str(time())).encode()).hexdigest()<EOL>
|
Given uris, generate and return hex version of it
Parameters
----------
uris : list
Containing all uris
Returns
-------
str
Hexed uris
|
f9586:c0:m35
|
def create_dataset(self, dataset_id, friendly_name=None, description=None,<EOL>access=None, location=None, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>datasets = self.bigquery.datasets()<EOL>dataset_data = self.dataset_resource(dataset_id, <EOL>project_id=project_id,<EOL>friendly_name=friendly_name,<EOL>description=description,<EOL>access=access,<EOL>location=location<EOL>)<EOL>response = datasets.insert(projectId=project_id,<EOL>body=dataset_data).execute(<EOL>num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return response<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>'.format(dataset_id, e))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Create a new BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceID of the dataset, not the integer id of the dataset)
friendly_name: str, optional
A human readable name
description: str, optional
Longer string providing a description
access : list, optional
Indicating access permissions (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
location : str, optional
Indicating where dataset should be stored: EU or US (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if dataset was created or not, or response
from BigQuery if swallow_results is set for False
|
f9586:c0:m38
|
def get_datasets(self, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>datasets = self.bigquery.datasets()<EOL>request = datasets.list(projectId=project_id)<EOL>result = request.execute(num_retries=self.num_retries)<EOL>return result.get('<STR_LIT>', [])<EOL><DEDENT>except HttpError as e:<EOL><INDENT>logger.error("<STR_LIT>".format(e))<EOL>return None<EOL><DEDENT>
|
List all datasets in the project.
Parameters
----------
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
list
Dataset resources
|
f9586:c0:m39
|
def delete_dataset(self, dataset_id, delete_contents=False, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>datasets = self.bigquery.datasets()<EOL>request = datasets.delete(projectId=project_id,<EOL>datasetId=dataset_id,<EOL>deleteContents=delete_contents)<EOL>response = request.execute(num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return response<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>'.format(dataset_id, e))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Delete a BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceId of the dataset)
Unique ``str`` identifying the BigQuery project contains the dataset
delete_contents : bool, optional
If True, forces the deletion of the dataset even when the dataset
contains data (Default = False)
project_id: str, optional
Returns
-------
Union[bool, dict[
ool indicating if the delete was successful or not, or response
from BigQuery if swallow_results is set for False
Raises
-------
HttpError
404 when dataset with dataset_id does not exist
|
f9586:c0:m40
|
def update_dataset(self, dataset_id, friendly_name=None, description=None,<EOL>access=None, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>datasets = self.bigquery.datasets()<EOL>body = self.dataset_resource(dataset_id, <EOL>friendly_name=friendly_name,<EOL>description=description, <EOL>access=access,<EOL>project_id=project_id)<EOL>request = datasets.update(projectId=project_id,<EOL>datasetId=dataset_id,<EOL>body=body)<EOL>response = request.execute(num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return response<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>'.format(dataset_id, e))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referencedId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the update was successful or not, or
response from BigQuery if swallow_results is set for False.
|
f9586:c0:m41
|
def patch_dataset(self, dataset_id, friendly_name=None, description=None,<EOL>access=None, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>try: <EOL><INDENT>datasets = self.bigquery.datasets()<EOL>body = self.dataset_resource(dataset_id, <EOL>friendly_name=friendly_name,<EOL>description=description, <EOL>access=access,<EOL>project_id=project_id)<EOL>request = datasets.patch(projectId=project_id,<EOL>datasetId=dataset_id, body=body)<EOL>response = request.execute(num_retries=self.num_retries)<EOL>if self.swallow_results:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return response<EOL><DEDENT><DEDENT>except HttpError as e:<EOL><INDENT>logger.error('<STR_LIT>'.format(dataset_id, e))<EOL>if self.swallow_results:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>
|
Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique string idenfitying the dataset with the project (the
referenceId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions.
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the patch was successful or not, or response
from BigQuery if swallow_results is set for False.
|
f9586:c0:m42
|
def dataset_resource(self, ref_id, friendly_name=None, description=None,<EOL>access=None, location=None, project_id=None):
|
project_id = self._get_project_id(project_id)<EOL>data = {<EOL>"<STR_LIT>": {<EOL>"<STR_LIT>": ref_id,<EOL>"<STR_LIT>": project_id<EOL>}<EOL>}<EOL>if friendly_name:<EOL><INDENT>data["<STR_LIT>"] = friendly_name<EOL><DEDENT>if description:<EOL><INDENT>data["<STR_LIT:description>"] = description<EOL><DEDENT>if access:<EOL><INDENT>data["<STR_LIT>"] = access<EOL><DEDENT>if location:<EOL><INDENT>data["<STR_LIT:location>"] = location<EOL><DEDENT>return data<EOL>
|
See
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource
Parameters
----------
ref_id : str
Dataset id (the reference id, not the integer id)
friendly_name : str, optional
An optional descriptive name for the dataset
description : str, optional
An optional description for the dataset
access : list, optional
Indicating access permissions
location: str, optional, 'EU' or 'US'
An optional geographical location for the dataset(EU or US)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
Representing BigQuery dataset resource
|
f9586:c0:m43
|
@classmethod<EOL><INDENT>def schema_from_record(cls, record):<DEDENT>
|
from bigquery.schema_builder import schema_from_record<EOL>return schema_from_record(record)<EOL>
|
Given a dict representing a record instance to be inserted into
BigQuery, calculate the schema.
Parameters
----------
record : dict
representing a record to be inserted into big query,
where all keys are ``str`` objects (representing column names in
the record) and all values are of type ``int``, ``str``,
``unicode``, ``float``, ``bool``, ``datetime``, or ``dict``. A
``dict`` value represents a record, and must conform to the same
restrictions as record.
Returns
-------
list
BigQuery schema
Notes
-----
Results are undefined if a different value type is provided for a
repeated field: E.g.
>>> { rfield: [ { x: 1}, {x: "a string"} ] } # undefined!
|
f9586:c0:m44
|
def render_query(dataset, tables, select=None, conditions=None,<EOL>groupings=None, having=None, order_by=None, limit=None):
|
if None in (dataset, tables):<EOL><INDENT>return None<EOL><DEDENT>query = "<STR_LIT>" % (<EOL>_render_select(select),<EOL>_render_sources(dataset, tables),<EOL>_render_conditions(conditions),<EOL>_render_groupings(groupings),<EOL>_render_having(having),<EOL>_render_order(order_by),<EOL>_render_limit(limit)<EOL>)<EOL>return query<EOL>
|
Render a query that will run over the given tables using the specified
parameters.
Parameters
----------
dataset : str
The BigQuery dataset to query data from
tables : Union[dict, list]
The table in `dataset` to query.
select : dict, optional
The keys function as column names and the values function as options to
apply to the select field such as alias and format. For example,
select['start_time'] might have the form
{'alias': 'StartTime', 'format': 'INTEGER-FORMAT_UTC_USEC'}, which
would be represented as 'SEC_TO_TIMESTAMP(INTEGER(start_time)) as
StartTime' in a query. Pass `None` to select all.
conditions : list, optional
a ``list`` of ``dict`` objects to filter results by. Each dict should
have the keys 'field', 'type', and 'comparators'. The first two map to
strings representing the field (e.g. 'foo') and type (e.g. 'FLOAT').
'comparators' maps to another ``dict`` containing the keys 'condition',
'negate', and 'value'.
If 'comparators' = {'condition': '>=', 'negate': False, 'value': 1},
this example will be rendered as 'foo >= FLOAT('1')' in the query.
``list`` of field names to group by
order_by : dict, optional
Keys = {'field', 'direction'}. `dict` should be formatted as
{'field':'TimeStamp, 'direction':'desc'} or similar
limit : int, optional
Limit the amount of data needed to be returned.
Returns
-------
str
A rendered query
|
f9587:m0
|
def _render_select(selections):
|
if not selections:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>rendered_selections = []<EOL>for name, options in selections.items():<EOL><INDENT>if not isinstance(options, list):<EOL><INDENT>options = [options]<EOL><DEDENT>original_name = name<EOL>for options_dict in options:<EOL><INDENT>name = original_name<EOL>alias = options_dict.get('<STR_LIT>')<EOL>alias = "<STR_LIT>" % alias if alias else "<STR_LIT>"<EOL>formatter = options_dict.get('<STR_LIT>')<EOL>if formatter:<EOL><INDENT>name = _format_select(formatter, name)<EOL><DEDENT>rendered_selections.append("<STR_LIT>" % (name, alias))<EOL><DEDENT><DEDENT>return "<STR_LIT>" + "<STR_LIT:U+002CU+0020>".join(rendered_selections)<EOL>
|
Render the selection part of a query.
Parameters
----------
selections : dict
Selections for a table
Returns
-------
str
A string for the "select" part of a query
See Also
--------
render_query : Further clarification of `selections` dict formatting
|
f9587:m1
|
def _format_select(formatter, name):
|
for caster in formatter.split('<STR_LIT:->'):<EOL><INDENT>if caster == '<STR_LIT>':<EOL><INDENT>name = "<STR_LIT>" % name<EOL><DEDENT>elif '<STR_LIT::>' in caster:<EOL><INDENT>caster, args = caster.split('<STR_LIT::>')<EOL>name = "<STR_LIT>" % (caster, name, args)<EOL><DEDENT>else:<EOL><INDENT>name = "<STR_LIT>" % (caster, name)<EOL><DEDENT><DEDENT>return name<EOL>
|
Modify the query selector by applying any formatters to it.
Parameters
----------
formatter : str
Hyphen-delimited formatter string where formatters are
applied inside-out, e.g. the formatter string
SEC_TO_MICRO-INTEGER-FORMAT_UTC_USEC applied to the selector
foo would result in FORMAT_UTC_USEC(INTEGER(foo*1000000)).
name: str
The name of the selector to apply formatters to.
Returns
-------
str
The formatted selector
|
f9587:m2
|
def _render_sources(dataset, tables):
|
if isinstance(tables, dict):<EOL><INDENT>if tables.get('<STR_LIT>', False):<EOL><INDENT>try:<EOL><INDENT>dataset_table = '<STR_LIT:.>'.join([dataset, tables['<STR_LIT>']])<EOL>return "<STR_LIT>""<STR_LIT>".format(dataset_table,<EOL>tables['<STR_LIT>'],<EOL>tables['<STR_LIT>'])<EOL><DEDENT>except KeyError as exp:<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>' % (exp))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>return "<STR_LIT>" + "<STR_LIT:U+002CU+0020>".join(<EOL>["<STR_LIT>" % (dataset, table) for table in tables])<EOL><DEDENT>
|
Render the source part of a query.
Parameters
----------
dataset : str
The data set to fetch log data from.
tables : Union[dict, list]
The tables to fetch log data from
Returns
-------
str
A string that represents the "from" part of a query.
|
f9587:m3
|
def _render_conditions(conditions):
|
if not conditions:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>rendered_conditions = []<EOL>for condition in conditions:<EOL><INDENT>field = condition.get('<STR_LIT>')<EOL>field_type = condition.get('<STR_LIT:type>')<EOL>comparators = condition.get('<STR_LIT>')<EOL>if None in (field, field_type, comparators) or not comparators:<EOL><INDENT>logger.warn('<STR_LIT>' % condition)<EOL>continue<EOL><DEDENT>rendered_conditions.append(<EOL>_render_condition(field, field_type, comparators))<EOL><DEDENT>if not rendered_conditions:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>return "<STR_LIT>" % ("<STR_LIT>".join(rendered_conditions))<EOL>
|
Render the conditions part of a query.
Parameters
----------
conditions : list
A list of dictionary items to filter a table.
Returns
-------
str
A string that represents the "where" part of a query
See Also
--------
render_query : Further clarification of `conditions` formatting.
|
f9587:m4
|
def _render_condition(field, field_type, comparators):
|
field_type = field_type.upper()<EOL>negated_conditions, normal_conditions = [], []<EOL>for comparator in comparators:<EOL><INDENT>condition = comparator.get("<STR_LIT>").upper()<EOL>negated = "<STR_LIT>" if comparator.get("<STR_LIT>") else "<STR_LIT>"<EOL>value = comparator.get("<STR_LIT:value>")<EOL>if condition == "<STR_LIT>":<EOL><INDENT>if isinstance(value, (list, tuple, set)):<EOL><INDENT>value = '<STR_LIT:U+002CU+0020>'.join(<EOL>sorted([_render_condition_value(v, field_type)<EOL>for v in value])<EOL>)<EOL><DEDENT>else:<EOL><INDENT>value = _render_condition_value(value, field_type)<EOL><DEDENT>value = "<STR_LIT:(>" + value + "<STR_LIT:)>"<EOL><DEDENT>elif condition == "<STR_LIT>" or condition == "<STR_LIT>":<EOL><INDENT>return field + "<STR_LIT:U+0020>" + condition<EOL><DEDENT>elif condition == "<STR_LIT>":<EOL><INDENT>if isinstance(value, (tuple, list, set)) and len(value) == <NUM_LIT:2>:<EOL><INDENT>value = '<STR_LIT>'.join(<EOL>sorted([_render_condition_value(v, field_type)<EOL>for v in value])<EOL>)<EOL><DEDENT>elif isinstance(value, (tuple, list, set)) and len(value) != <NUM_LIT:2>:<EOL><INDENT>logger.warn('<STR_LIT>' % condition)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>value = _render_condition_value(value, field_type)<EOL><DEDENT>rendered_sub_condition = "<STR_LIT>" % (<EOL>negated, field, condition, value)<EOL>if comparator.get("<STR_LIT>"):<EOL><INDENT>negated_conditions.append(rendered_sub_condition)<EOL><DEDENT>else:<EOL><INDENT>normal_conditions.append(rendered_sub_condition)<EOL><DEDENT><DEDENT>rendered_normal = "<STR_LIT>".join(normal_conditions)<EOL>rendered_negated = "<STR_LIT>".join(negated_conditions)<EOL>if rendered_normal and rendered_negated:<EOL><INDENT>return "<STR_LIT>" % (rendered_normal, rendered_negated)<EOL><DEDENT>return "<STR_LIT>" % (rendered_normal or rendered_negated)<EOL>
|
Render a single query condition.
Parameters
----------
field : str
The field the condition applies to
field_type : str
The data type of the field.
comparators : array_like
An iterable of logic operators to use.
Returns
-------
str
a condition string.
|
f9587:m5
|
def _render_condition_value(value, field_type):
|
<EOL>if field_type == "<STR_LIT>":<EOL><INDENT>value = <NUM_LIT:1> if value else <NUM_LIT:0><EOL><DEDENT>elif field_type in ("<STR_LIT>", "<STR_LIT>", "<STR_LIT>"):<EOL><INDENT>value = "<STR_LIT>" % (value)<EOL><DEDENT>elif field_type in ("<STR_LIT>"):<EOL><INDENT>value = "<STR_LIT>" % (str(value))<EOL><DEDENT>return "<STR_LIT>" % (field_type, value)<EOL>
|
Render a query condition value.
Parameters
----------
value : Union[bool, int, float, str, datetime]
The value of the condition
field_type : str
The data type of the field
Returns
-------
str
A value string.
|
f9587:m6
|
def _render_groupings(fields):
|
if not fields:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>return "<STR_LIT>" + "<STR_LIT:U+002CU+0020>".join(fields)<EOL>
|
Render the group by part of a query.
Parameters
----------
fields : list
A list of fields to group by.
Returns
-------
str
A string that represents the "group by" part of a query.
|
f9587:m7
|
def _render_having(having_conditions):
|
if not having_conditions:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>rendered_conditions = []<EOL>for condition in having_conditions:<EOL><INDENT>field = condition.get('<STR_LIT>')<EOL>field_type = condition.get('<STR_LIT:type>')<EOL>comparators = condition.get('<STR_LIT>')<EOL>if None in (field, field_type, comparators) or not comparators:<EOL><INDENT>logger.warn('<STR_LIT>' % condition)<EOL>continue<EOL><DEDENT>rendered_conditions.append(<EOL>_render_condition(field, field_type, comparators))<EOL><DEDENT>if not rendered_conditions:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>return "<STR_LIT>" % ("<STR_LIT>".join(rendered_conditions))<EOL>
|
Render the having part of a query.
Parameters
----------
having_conditions : list
A ``list`` of ``dict``s to filter the rows
Returns
-------
str
A string that represents the "having" part of a query.
See Also
--------
render_query : Further clarification of `conditions` formatting.
|
f9587:m8
|
def _render_order(order):
|
if not order or '<STR_LIT>' not in order or '<STR_LIT>' not in order:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return "<STR_LIT>" % ("<STR_LIT:U+002CU+0020>".join(order['<STR_LIT>']), order['<STR_LIT>'])<EOL>
|
Render the order by part of a query.
Parameters
----------
order : dict
A dictionary with two keys, fields and direction.
Such that the dictionary should be formatted as
{'fields': ['TimeStamp'], 'direction':'desc'}.
Returns
-------
str
A string that represents the "order by" part of a query.
|
f9587:m9
|
def _render_limit(limit):
|
if not limit:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return "<STR_LIT>" % limit<EOL>
|
Render the limit part of a query.
Parameters
----------
limit : int, optional
Limit the amount of data needed to be returned.
Returns
-------
str
A string that represents the "limit" part of a query.
|
f9587:m10
|
def schema_from_record(record, timestamp_parser=default_timestamp_parser):
|
return [describe_field(k, v, timestamp_parser=timestamp_parser)<EOL>for k, v in list(record.items())]<EOL>
|
Generate a BigQuery schema given an example of a record that is to be
inserted into BigQuery.
Parameters
----------
record : dict
Example of a record that is to be inserted into BigQuery
timestamp_parser : function, optional
Unary function taking a ``str`` and returning and ``bool`` that is
True if the string represents a date
Returns
-------
Schema: list
|
f9589:m1
|
def describe_field(k, v, timestamp_parser=default_timestamp_parser):
|
def bq_schema_field(name, bq_type, mode):<EOL><INDENT>return {"<STR_LIT:name>": name, "<STR_LIT:type>": bq_type, "<STR_LIT>": mode}<EOL><DEDENT>if isinstance(v, list):<EOL><INDENT>if len(v) == <NUM_LIT:0>:<EOL><INDENT>raise Exception(<EOL>"<STR_LIT>".format(k))<EOL><DEDENT>v = v[<NUM_LIT:0>]<EOL>mode = "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>mode = "<STR_LIT>"<EOL><DEDENT>bq_type = bigquery_type(v, timestamp_parser=timestamp_parser)<EOL>if not bq_type:<EOL><INDENT>raise InvalidTypeException(k, v)<EOL><DEDENT>field = bq_schema_field(k, bq_type, mode)<EOL>if bq_type == "<STR_LIT>":<EOL><INDENT>try:<EOL><INDENT>field['<STR_LIT>'] = schema_from_record(v, timestamp_parser)<EOL><DEDENT>except InvalidTypeException as e:<EOL><INDENT>raise InvalidTypeException("<STR_LIT>" % (k, e.key), e.value)<EOL><DEDENT><DEDENT>return field<EOL>
|
Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]}
|
f9589:m2
|
def bigquery_type(o, timestamp_parser=default_timestamp_parser):
|
t = type(o)<EOL>if t in six.integer_types:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>elif (t == six.binary_type and six.PY2) or t == six.text_type:<EOL><INDENT>if timestamp_parser and timestamp_parser(o):<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>return "<STR_LIT:string>"<EOL><DEDENT><DEDENT>elif t == float:<EOL><INDENT>return "<STR_LIT:float>"<EOL><DEDENT>elif t == bool:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>elif t == dict:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>elif t == datetime:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>
|
Given a value, return the matching BigQuery type of that value. Must be
one of str/unicode/int/float/datetime/record, where record is a dict
containing value which have matching BigQuery types.
Parameters
----------
o : object
A Python object
time_stamp_parser : function, optional
Unary function taking a ``str`` and returning and ``bool`` that is
True if the string represents a date
Returns
-------
Union[str, None]
Name of the corresponding BigQuery type for `o`, or None if no type
could be found
Examples
--------
>>> bigquery_type("abc")
"string"
>>> bigquery_type(123)
"integer"
|
f9589:m3
|
def confirm(text, default=True):
|
if default:<EOL><INDENT>legend = "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>legend = "<STR_LIT>"<EOL><DEDENT>res = "<STR_LIT>"<EOL>while (res != "<STR_LIT:y>") and (res != "<STR_LIT:n>"):<EOL><INDENT>res = input(text + "<STR_LIT>".format(legend)).lower()<EOL>if not res and default:<EOL><INDENT>res = "<STR_LIT:y>"<EOL><DEDENT>elif not res and not default:<EOL><INDENT>res = "<STR_LIT:n>"<EOL><DEDENT><DEDENT>if res[<NUM_LIT:0>] == "<STR_LIT:y>":<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
|
Console confirmation dialog based on raw_input.
|
f9593:m0
|
def ensure_dir(d):
|
if not os.path.exists(d):<EOL><INDENT>os.makedirs(d)<EOL><DEDENT>
|
Check does directory exist, and create an empty one if not.
|
f9593:m1
|
def read_file(fname):
|
res = []<EOL>try:<EOL><INDENT>with open(fname, '<STR_LIT:r>') as f:<EOL><INDENT>for line in f:<EOL><INDENT>line = line.rstrip('<STR_LIT:\n>').rstrip('<STR_LIT:\r>')<EOL>if line and (line[<NUM_LIT:0>] != '<STR_LIT:#>'):<EOL><INDENT>regexline = "<STR_LIT>" + re.sub("<STR_LIT>", "<STR_LIT>", line) + "<STR_LIT>"<EOL>res.append(regexline.lower())<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except IOError:<EOL><INDENT>pass<EOL><DEDENT>return res<EOL>
|
Read file, convert wildcards into regular expressions, skip empty lines
and comments.
|
f9593:m2
|
def drop_it(title, filters, blacklist):
|
title = title.lower()<EOL>matched = False<EOL>for f in filters:<EOL><INDENT>if re.match(f, title):<EOL><INDENT>matched = True<EOL><DEDENT><DEDENT>if not matched:<EOL><INDENT>return True<EOL><DEDENT>for b in blacklist:<EOL><INDENT>if re.match(b, title):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>
|
The found torrents should be in filters list and shouldn't be in blacklist.
|
f9593:m3
|
def do_list():
|
dirs = os.walk(CONFIG_ROOT).next()[<NUM_LIT:1>]<EOL>if dirs:<EOL><INDENT>print("<STR_LIT>")<EOL>for d in dirs:<EOL><INDENT>print("<STR_LIT>".format(d))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>
|
CLI action "list configurations".
|
f9593:m4
|
def do_create(config, config_dir):
|
if os.path.exists(config_dir):<EOL><INDENT>print("<STR_LIT>".format(config))<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>os.makedirs(config_dir)<EOL>print("<STR_LIT>")<EOL>url = input("<STR_LIT>")<EOL>torrent_dir = input("<STR_LIT>".format(DEFAULT_TORRRENT_DIR)) or DEFAULT_TORRRENT_DIR<EOL>update_interval = input("<STR_LIT>".format(DEFAULT_UPDATE_INTERVAL)) or DEFAULT_UPDATE_INTERVAL<EOL>editor = os.environ["<STR_LIT>"]<EOL>config_filter = os.path.join(config_dir, '<STR_LIT>')<EOL>if confirm("<STR_LIT>", False):<EOL><INDENT>call([editor, config_filter])<EOL>print("<STR_LIT>")<EOL><DEDENT>config_blacklist = os.path.join(config_dir, '<STR_LIT>')<EOL>if confirm("<STR_LIT>", False):<EOL><INDENT>call([editor, config_filter])<EOL>print("<STR_LIT>")<EOL><DEDENT>config_file = os.path.join(config_dir, '<STR_LIT>')<EOL>config_data = json.dumps({<EOL>"<STR_LIT:url>": url,<EOL>"<STR_LIT>": torrent_dir,<EOL>"<STR_LIT>": update_interval<EOL>}, sort_keys=True, indent=<NUM_LIT:4>, separators=('<STR_LIT:U+002C>', '<STR_LIT>'))<EOL>with open(config_file, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(config_data)<EOL><DEDENT>ct = CronTab(user=True)<EOL>cmd = "<STR_LIT>".format(sys.executable,<EOL>os.path.abspath(__file__),<EOL>config)<EOL>job = ct.new(command=cmd)<EOL>job.minute.every(update_interval)<EOL>job.enable()<EOL>ct.write()<EOL>print("<STR_LIT>")<EOL>print("<STR_LIT>".format(config))<EOL>
|
CLI action "create new configuration".
|
f9593:m5
|
def do_update(config, config_dir):
|
if not os.path.exists(config_dir):<EOL><INDENT>print("<STR_LIT>".format(config))<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>config_file = os.path.join(config_dir, '<STR_LIT>')<EOL>with open(config_file, '<STR_LIT:r>') as f:<EOL><INDENT>old_config_data = json.load(f)<EOL><DEDENT>old_url = old_config_data['<STR_LIT:url>']<EOL>old_torrent_dir = old_config_data['<STR_LIT>']<EOL>old_update_interval = old_config_data['<STR_LIT>']<EOL>url = input("<STR_LIT>".format(old_url)) or old_url<EOL>torrent_dir = input("<STR_LIT>".format(old_torrent_dir)) or old_torrent_dir<EOL>update_interval = input("<STR_LIT>".format(old_update_interval)) or old_update_interval<EOL>editor = os.environ["<STR_LIT>"]<EOL>config_filter = os.path.join(config_dir, '<STR_LIT>')<EOL>if confirm("<STR_LIT>", False):<EOL><INDENT>call([editor, config_filter])<EOL>print("<STR_LIT>")<EOL><DEDENT>config_blacklist = os.path.join(config_dir, '<STR_LIT>')<EOL>if confirm("<STR_LIT>", False):<EOL><INDENT>call([editor, config_filter])<EOL>print("<STR_LIT>")<EOL><DEDENT>config_data = json.dumps({<EOL>"<STR_LIT:url>": url,<EOL>"<STR_LIT>": torrent_dir,<EOL>"<STR_LIT>": update_interval<EOL>}, sort_keys=True, indent=<NUM_LIT:4>, separators=('<STR_LIT:U+002C>', '<STR_LIT>'))<EOL>with open(config_file, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(config_data)<EOL><DEDENT>ct = CronTab(user=True)<EOL>for job in ct:<EOL><INDENT>if re.match('<STR_LIT>'.format(config), job.command):<EOL><INDENT>ct.remove(job)<EOL><DEDENT><DEDENT>cmd = "<STR_LIT>".format(sys.executable,<EOL>os.path.abspath(__file__),<EOL>config)<EOL>new_job = ct.new(command=cmd)<EOL>new_job.minute.every(update_interval)<EOL>new_job.enable()<EOL>ct.write()<EOL>print("<STR_LIT>")<EOL>print("<STR_LIT>".format(config))<EOL>
|
CLI action "update new configuration".
|
f9593:m6
|
def do_remove(config, config_dir):
|
if not os.path.exists(config_dir):<EOL><INDENT>print("<STR_LIT>".format(config))<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>if confirm("<STR_LIT>".format(config)):<EOL><INDENT>shutil.rmtree(config_dir)<EOL>print("<STR_LIT>".format(config))<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>
|
CLI action "remove configuration".
|
f9593:m7
|
def do_exec(config, config_dir):
|
if not os.path.exists(config_dir):<EOL><INDENT>print("<STR_LIT>".format(config))<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>print("<STR_LIT>".format(config))<EOL>config_file = os.path.join(config_dir, '<STR_LIT>')<EOL>with open(config_file, '<STR_LIT:r>') as f:<EOL><INDENT>config_data = json.load(f)<EOL><DEDENT>url = config_data['<STR_LIT:url>']<EOL>torrent_dir = config_data['<STR_LIT>']<EOL>ensure_dir(torrent_dir)<EOL>filters_file = os.path.join(config_dir, '<STR_LIT>')<EOL>filters = read_file(filters_file)<EOL>blacklist_file = os.path.join(config_dir, '<STR_LIT>')<EOL>blacklist = read_file(blacklist_file)<EOL>print("<STR_LIT>".format(url))<EOL>r = requests.get(url)<EOL>if r.status_code != <NUM_LIT:200>:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>xml = r.text.encode('<STR_LIT:utf-8>')<EOL>parser = etree.XMLParser(ns_clean=True, recover=True, encoding='<STR_LIT:utf-8>')<EOL>tree = etree.fromstring(xml, parser)<EOL>items = tree.xpath('<STR_LIT>')<EOL>downloaded = <NUM_LIT:0><EOL>for e in items:<EOL><INDENT>e_title = e.xpath('<STR_LIT:title>')[<NUM_LIT:0>].text<EOL>e_link = e.xpath('<STR_LIT>')[<NUM_LIT:0>].text<EOL>if not drop_it(e_title, filters, blacklist):<EOL><INDENT>downloaded += <NUM_LIT:1><EOL>target_file = os.path.join(torrent_dir, e_title + '<STR_LIT>')<EOL>r = requests.get(e_link, stream=True)<EOL>with open(target_file, '<STR_LIT:wb>') as f:<EOL><INDENT>for chunk in r.iter_content(<NUM_LIT>):<EOL><INDENT>f.write(chunk)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>print("<STR_LIT>".format(len(items), downloaded))<EOL>
|
CLI action "process the feed from specified configuration".
|
f9593:m8
|
def do_filter(config, config_dir):
|
if not os.path.exists(config_dir):<EOL><INDENT>print("<STR_LIT>".format(config))<EOL>exit(<NUM_LIT:1>) <EOL><DEDENT>editor = os.environ["<STR_LIT>"]<EOL>config_filter = os.path.join(config_dir, '<STR_LIT>')<EOL>call([editor, config_filter])<EOL>print("<STR_LIT>")<EOL>
|
CLI action "run editor for filters list".
|
f9593:m9
|
def do_blacklist(config, config_dir):
|
if not os.path.exists(config_dir):<EOL><INDENT>print("<STR_LIT>".format(config))<EOL>exit(<NUM_LIT:1>) <EOL><DEDENT>editor = os.environ["<STR_LIT>"]<EOL>config_blacklist = os.path.join(config_dir, '<STR_LIT>')<EOL>call([editor, config_blacklist])<EOL>print("<STR_LIT>")<EOL>
|
CLI action "run editor for blacklist".
|
f9593:m10
|
def action(act, config):
|
if not config:<EOL><INDENT>pass<EOL><DEDENT>elif act is "<STR_LIT:list>":<EOL><INDENT>do_list()<EOL><DEDENT>else:<EOL><INDENT>config_dir = os.path.join(CONFIG_ROOT, config)<EOL>globals()["<STR_LIT>" + act](config, config_dir)<EOL><DEDENT>
|
CLI action preprocessor
|
f9593:m11
|
def register(opener_function):
|
openers.append(opener_function)<EOL>return opener_function<EOL>
|
Decorator that adds decorated opener function to the list of openers.
:param opener_function: Opener function.
:return: Opener function.
|
f9595:m0
|
def filehandles(path, openers_list=openers, pattern='<STR_LIT>', verbose=False):
|
if not verbose:<EOL><INDENT>logging.disable(logging.VERBOSE)<EOL><DEDENT>for opener in openers_list:<EOL><INDENT>try:<EOL><INDENT>for filehandle in opener(path=path, pattern=pattern, verbose=verbose):<EOL><INDENT>with closing(filehandle):<EOL><INDENT>yield filehandle<EOL><DEDENT><DEDENT>break <EOL><DEDENT>except (zipfile.BadZipfile, tarfile.ReadError, GZValidationError,<EOL>BZ2ValidationError, IOError, NotADirectoryError):<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>logger.verbose('<STR_LIT>'.format(path))<EOL>yield None<EOL><DEDENT><DEDENT>
|
Main function that iterates over list of openers and decides which opener to use.
:param str path: Path.
:param list openers_list: List of openers.
:param str pattern: Regular expression pattern.
:param verbose: Print additional information.
:type verbose: :py:obj:`True` or :py:obj:`False`
:return: Filehandle(s).
|
f9595:m1
|
@register<EOL>def directory_opener(path, pattern='<STR_LIT>', verbose=False):
|
if not os.path.isdir(path):<EOL><INDENT>raise NotADirectoryError<EOL><DEDENT>else:<EOL><INDENT>openers_list = [opener for opener in openers if not opener.__name__.startswith('<STR_LIT>')] <EOL>for root, dirlist, filelist in os.walk(path):<EOL><INDENT>for filename in filelist:<EOL><INDENT>if pattern and not re.match(pattern, filename):<EOL><INDENT>logger.verbose('<STR_LIT>'.format(os.path.abspath(filename), pattern))<EOL>continue<EOL><DEDENT>filename_path = os.path.abspath(os.path.join(root, filename))<EOL>for filehandle in filehandles(filename_path, openers_list=openers_list, pattern=pattern, verbose=verbose):<EOL><INDENT>yield filehandle<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
|
Directory opener.
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
|
f9595:m2
|
@register<EOL>def ziparchive_opener(path, pattern='<STR_LIT>', verbose=False):
|
with zipfile.ZipFile(io.BytesIO(urlopen(path).read()), '<STR_LIT:r>') if is_url(path) else zipfile.ZipFile(path, '<STR_LIT:r>') as ziparchive:<EOL><INDENT>for zipinfo in ziparchive.infolist():<EOL><INDENT>if not zipinfo.filename.endswith('<STR_LIT:/>'):<EOL><INDENT>source = os.path.join(path, zipinfo.filename)<EOL>if pattern and not re.match(pattern, zipinfo.filename):<EOL><INDENT>logger.verbose('<STR_LIT>'.format(os.path.abspath(zipinfo.filename), pattern))<EOL>continue<EOL><DEDENT>logger.verbose('<STR_LIT>'.format(source))<EOL>filehandle = ziparchive.open(zipinfo)<EOL>yield filehandle<EOL><DEDENT><DEDENT><DEDENT>
|
Opener that opens files from zip archive..
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
|
f9595:m3
|
@register<EOL>def tararchive_opener(path, pattern='<STR_LIT>', verbose=False):
|
with tarfile.open(fileobj=io.BytesIO(urlopen(path).read())) if is_url(path) else tarfile.open(path) as tararchive:<EOL><INDENT>for tarinfo in tararchive:<EOL><INDENT>if tarinfo.isfile():<EOL><INDENT>source = os.path.join(path, tarinfo.name)<EOL>if pattern and not re.match(pattern, tarinfo.name):<EOL><INDENT>logger.verbose('<STR_LIT>'.format(os.path.abspath(tarinfo.name), pattern))<EOL>continue<EOL><DEDENT>logger.verbose('<STR_LIT>'.format(source))<EOL>filehandle = tararchive.extractfile(tarinfo)<EOL>yield filehandle<EOL><DEDENT><DEDENT><DEDENT>
|
Opener that opens files from tar archive.
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
|
f9595:m4
|
@register<EOL>def gzip_opener(path, pattern='<STR_LIT>', verbose=False):
|
source = path if is_url(path) else os.path.abspath(path)<EOL>filename = os.path.basename(path)<EOL>if pattern and not re.match(pattern, filename):<EOL><INDENT>logger.verbose('<STR_LIT>'.format(os.path.abspath(filename), pattern))<EOL>return<EOL><DEDENT>try:<EOL><INDENT>filehandle = gzip.GzipFile(fileobj=io.BytesIO(urlopen(path).read())) if is_url(path) else gzip.open(path)<EOL>filehandle.read(<NUM_LIT:1>)<EOL>filehandle.seek(<NUM_LIT:0>)<EOL>logger.verbose('<STR_LIT>'.format(source))<EOL>yield filehandle<EOL><DEDENT>except (OSError, IOError):<EOL><INDENT>raise GZValidationError<EOL><DEDENT>
|
Opener that opens single gzip compressed file.
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
|
f9595:m5
|
@register<EOL>def bz2_opener(path, pattern='<STR_LIT>', verbose=False):
|
source = path if is_url(path) else os.path.abspath(path)<EOL>filename = os.path.basename(path)<EOL>if pattern and not re.match(pattern, filename):<EOL><INDENT>logger.verbose('<STR_LIT>'.format(os.path.abspath(path), pattern))<EOL>return<EOL><DEDENT>try:<EOL><INDENT>filehandle = bz2.open(io.BytesIO(urlopen(path).read())) if is_url(path) else bz2.open(path)<EOL>filehandle.read(<NUM_LIT:1>)<EOL>filehandle.seek(<NUM_LIT:0>)<EOL>logger.verbose('<STR_LIT>'.format(source))<EOL>yield filehandle<EOL><DEDENT>except (OSError, IOError):<EOL><INDENT>raise BZ2ValidationError<EOL><DEDENT>
|
Opener that opens single bz2 compressed file.
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
|
f9595:m6
|
@register<EOL>def text_opener(path, pattern='<STR_LIT>', verbose=False):
|
source = path if is_url(path) else os.path.abspath(path)<EOL>filename = os.path.basename(path)<EOL>if pattern and not re.match(pattern, filename):<EOL><INDENT>logger.verbose('<STR_LIT>'.format(os.path.abspath(path), pattern))<EOL>return<EOL><DEDENT>filehandle = urlopen(path) if is_url(path) else open(path)<EOL>logger.verbose('<STR_LIT>'.format(source))<EOL>yield filehandle<EOL>
|
Opener that opens single text file.
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
|
f9595:m7
|
def is_url(path):
|
try:<EOL><INDENT>parse_result = urlparse(path)<EOL>return all((parse_result.scheme, parse_result.netloc, parse_result.path))<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>
|
Test if path represents a valid URL string.
:param str path: Path to file.
:return: True if path is valid url string, False otherwise.
:rtype: :py:obj:`True` or :py:obj:`False`
|
f9595:m8
|
@register.inclusion_tag('<STR_LIT>')<EOL>def render_honeypot_field(field_name=None):
|
if not field_name:<EOL><INDENT>field_name = settings.HONEYPOT_FIELD_NAME<EOL><DEDENT>value = getattr(settings, '<STR_LIT>', '<STR_LIT>')<EOL>if callable(value):<EOL><INDENT>value = value()<EOL><DEDENT>return {'<STR_LIT>': field_name, '<STR_LIT:value>': value}<EOL>
|
Renders honeypot field named field_name (defaults to HONEYPOT_FIELD_NAME).
|
f9598:m0
|
def honeypot_equals(val):
|
expected = getattr(settings, '<STR_LIT>', '<STR_LIT>')<EOL>if callable(expected):<EOL><INDENT>expected = expected()<EOL><DEDENT>return val == expected<EOL>
|
Default verifier used if HONEYPOT_VERIFIER is not specified.
Ensures val == HONEYPOT_VALUE or HONEYPOT_VALUE() if it's a callable.
|
f9602:m0
|
def verify_honeypot_value(request, field_name):
|
verifier = getattr(settings, '<STR_LIT>', honeypot_equals)<EOL>if request.method == '<STR_LIT:POST>':<EOL><INDENT>field = field_name or settings.HONEYPOT_FIELD_NAME<EOL>if field not in request.POST or not verifier(request.POST[field]):<EOL><INDENT>resp = render_to_string('<STR_LIT>',<EOL>{'<STR_LIT>': field})<EOL>return HttpResponseBadRequest(resp)<EOL><DEDENT><DEDENT>
|
Verify that request.POST[field_name] is a valid honeypot.
Ensures that the field exists and passes verification according to
HONEYPOT_VERIFIER.
|
f9602:m1
|
def check_honeypot(func=None, field_name=None):
|
<EOL>if isinstance(func, six.string_types):<EOL><INDENT>func, field_name = field_name, func<EOL><DEDENT>def decorated(func):<EOL><INDENT>def inner(request, *args, **kwargs):<EOL><INDENT>response = verify_honeypot_value(request, field_name)<EOL>if response:<EOL><INDENT>return response<EOL><DEDENT>else:<EOL><INDENT>return func(request, *args, **kwargs)<EOL><DEDENT><DEDENT>return wraps(func, assigned=available_attrs(func))(inner)<EOL><DEDENT>if func is None:<EOL><INDENT>def decorator(func):<EOL><INDENT>return decorated(func)<EOL><DEDENT>return decorator<EOL><DEDENT>return decorated(func)<EOL>
|
Check request.POST for valid honeypot field.
Takes an optional field_name that defaults to HONEYPOT_FIELD_NAME if
not specified.
|
f9602:m2
|
def honeypot_exempt(view_func):
|
<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>return view_func(*args, **kwargs)<EOL><DEDENT>wrapped.honeypot_exempt = True<EOL>return wraps(view_func, assigned=available_attrs(view_func))(wrapped)<EOL>
|
Mark view as exempt from honeypot validation
|
f9602:m3
|
def u2handlers(self):
|
return []<EOL>
|
Override suds HTTP Transport as it does not properly honor local
system configuration for proxy settings
Derived from https://gist.github.com/rbarrois/3721801
|
f9605:c0:m0
|
def __init__(self, wsdl=None, api_key=None, timeout=<NUM_LIT:5>):
|
if not wsdl:<EOL><INDENT>wsdl = os.environ['<STR_LIT>']<EOL><DEDENT>if not api_key:<EOL><INDENT>api_key = os.environ['<STR_LIT>']<EOL><DEDENT>self._soap_client = Client(wsdl, transport=WellBehavedHttpTransport())<EOL>self._soap_client.set_options(timeout=timeout)<EOL>token3 = Element('<STR_LIT>', ns=DARWIN_WEBSERVICE_NAMESPACE)<EOL>token_value = Element('<STR_LIT>', ns=DARWIN_WEBSERVICE_NAMESPACE)<EOL>token_value.setText(api_key)<EOL>token3.append(token_value)<EOL>self._soap_client.set_options(soapheaders=(token3))<EOL>
|
Constructor
Keyword arguments:
wsdl -- the URL of the Darwin LDB WSDL document. Will fall back to
using the DARWIN_WEBSERVICE_WSDL environment variable if not supplied
api_key -- a valid API key for the Darwin LDB webservice. Will fall
back to the DARWIN_WEBSERVICE_API_KEY if not supplied
timeout -- a timeout in seconds for calls to the LDB Webservice
(default 5)
|
f9605:c1:m0
|
def get_station_board(<EOL>self,<EOL>crs,<EOL>rows=<NUM_LIT>,<EOL>include_departures=True,<EOL>include_arrivals=False,<EOL>destination_crs=None,<EOL>origin_crs=None<EOL>):
|
<EOL>if include_departures and include_arrivals:<EOL><INDENT>query_type = '<STR_LIT>'<EOL><DEDENT>elif include_departures:<EOL><INDENT>query_type = '<STR_LIT>'<EOL><DEDENT>elif include_arrivals:<EOL><INDENT>query_type = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>q = partial(self._base_query()[query_type], crs=crs, numRows=rows)<EOL>if destination_crs:<EOL><INDENT>if origin_crs:<EOL><INDENT>log.warn(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>q = partial(q, filterCrs=destination_crs, filterType='<STR_LIT:to>')<EOL><DEDENT>elif origin_crs:<EOL><INDENT>q = partial(q, filterCrs=origin_crs, filterType='<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>soap_response = q()<EOL><DEDENT>except WebFault:<EOL><INDENT>raise WebServiceError<EOL><DEDENT>return StationBoard(soap_response)<EOL>
|
Query the darwin webservice to obtain a board for a particular station
and return a StationBoard instance
Positional arguments:
crs -- the three letter CRS code of a UK station
Keyword arguments:
rows -- the number of rows to retrieve (default 10)
include_departures -- include departing services in the departure board
(default True)
include_arrivals -- include arriving services in the departure board
(default False)
destination_crs -- filter results so they only include services
calling at a particular destination (default None)
origin_crs -- filter results so they only include services
originating from a particular station (default None)
|
f9605:c1:m2
|
def get_service_details(self, service_id):
|
service_query =self._soap_client.service['<STR_LIT>']['<STR_LIT>']<EOL>try:<EOL><INDENT>soap_response = service_query(serviceID=service_id)<EOL><DEDENT>except WebFault:<EOL><INDENT>raise WebServiceError<EOL><DEDENT>return ServiceDetails(soap_response)<EOL>
|
Get the details of an individual service and return a ServiceDetails
instance.
Positional arguments:
service_id: A Darwin LDB service id
|
f9605:c1:m3
|
@property<EOL><INDENT>def generated_at(self):<DEDENT>
|
return self._generated_at<EOL>
|
The time at which the station board was generated.
|
f9605:c3:m1
|
@property<EOL><INDENT>def crs(self):<DEDENT>
|
return self._crs<EOL>
|
The CRS code for the station.
|
f9605:c3:m2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.