signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def lookup(self, nick):
|
query = dict(nick=nick)<EOL>order = [('<STR_LIT:time>', pymongo.DESCENDING)]<EOL>recs = self.db.pastes.find(query).sort(order).limit(<NUM_LIT:1>)<EOL>try:<EOL><INDENT>return next(recs)['<STR_LIT>']<EOL><DEDENT>except StopIteration:<EOL><INDENT>pass<EOL><DEDENT>
|
Looks for the most recent paste by a given nick.
Returns the uid or None
|
f4190:c0:m6
|
def short_key():
|
firstlast = list(ascii_letters + digits)<EOL>middle = firstlast + list('<STR_LIT>')<EOL>return '<STR_LIT>'.join((<EOL>choice(firstlast), choice(middle), choice(middle),<EOL>choice(middle), choice(firstlast),<EOL>))<EOL>
|
Generate a short key.
>>> key = short_key()
>>> len(key)
5
|
f4191:m0
|
def init_datastore(config):
|
if '<STR_LIT>' in config:<EOL><INDENT>return config['<STR_LIT>']<EOL><DEDENT>factory = config.pop('<STR_LIT>')<EOL>if isinstance(factory, str):<EOL><INDENT>"""<STR_LIT>"""<EOL>factory = pkg_resources.EntryPoint.parse('<STR_LIT>' + factory).resolve()<EOL><DEDENT>return factory(**config)<EOL>
|
Take the config definition and initialize the datastore.
The config must contain either a 'datastore' parameter, which
will be simply returned, or
must contain a 'factory' which is a callable or entry
point definition. The callable should take the remainder of
the params in config as kwargs and return a DataStore instance.
|
f4191:m1
|
@abc.abstractmethod<EOL><INDENT>def _store(self, uid, content, data):<DEDENT>
|
Store the given dict of content at uid. Nothing returned.
|
f4191:c0:m1
|
|
@abc.abstractmethod<EOL><INDENT>def _storeLog(self, nick, time, uid):<DEDENT>
|
Adds the nick & uid to the log for a given time/order. No return.
|
f4191:c0:m2
|
|
@abc.abstractmethod<EOL><INDENT>def _retrieve(self, uid):<DEDENT>
|
Return a dict with the contents of the paste, including the raw
data, if any, as the key 'data'. Must pass in uid, not shortid.
|
f4191:c0:m3
|
|
def delete(self, id):
|
return self._delete(self._resolve_id(id))<EOL>
|
Delete the paste with the indicated id.
|
f4191:c0:m4
|
@abc.abstractmethod<EOL><INDENT>def _delete(self, uid):<DEDENT>
|
Delete the paste with the indicated uid.
|
f4191:c0:m5
|
|
@abc.abstractmethod<EOL><INDENT>def lookup(self, nick):<DEDENT>
|
Looks for the most recent paste by a given nick.
Return the uid or None
|
f4191:c0:m6
|
|
@abc.abstractmethod<EOL><INDENT>def _lookupUid(self, short_uid):<DEDENT>
|
Given a short UID, return the equivalent long UID.
|
f4191:c0:m7
|
|
@abc.abstractmethod<EOL><INDENT>def list(self):<DEDENT>
|
Generate all stored UIDs.
|
f4191:c0:m8
|
|
def store(<EOL>self, type, nick, time, fmt=None, code=None, filename=None,<EOL>mime=None, data=None, makeshort=True):
|
uid = str(uuid.uuid4())<EOL>shortid = short_key() if makeshort else None<EOL>paste = assign_params(self.build_paste, locals())()<EOL>self._store(uid, paste, data)<EOL>if nick:<EOL><INDENT>self._storeLog(nick, time, uid)<EOL><DEDENT>return uid, shortid<EOL>
|
Store code or a file. Returns a tuple containing the uid and shortid
|
f4191:c0:m9
|
@staticmethod<EOL><INDENT>def build_paste(uid, shortid, type, nick, time, fmt, code, filename, mime):<DEDENT>
|
return locals()<EOL>
|
Build a 'paste' object
|
f4191:c0:m10
|
def retrieve(self, id):
|
return self._retrieve(self._resolve_id(id))<EOL>
|
Retrieve a paste. Returns a dictionary containing all metadata
and the file data, if it's a file.
|
f4191:c0:m11
|
def _resolve_id(self, id):
|
return self._lookupUid(id) if len(id) < <NUM_LIT:10> else id<EOL>
|
Resolve a short id to a UID
|
f4191:c0:m12
|
@staticmethod<EOL><INDENT>def migrate(dest_datastore, source_datastore):<DEDENT>
|
for uid in source_datastore.list():<EOL><INDENT>try:<EOL><INDENT>paste = source_datastore._retrieve(uid)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>print(<EOL>"<STR_LIT>"<EOL>.format(exc=exc, uid=uid),<EOL>file=sys.stderr)<EOL>continue<EOL><DEDENT>data = paste.pop('<STR_LIT:data>', None)<EOL>try:<EOL><INDENT>dest_datastore._store(uid, paste, data)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>print(<EOL>"<STR_LIT>"<EOL>.format(exc=exc, uid=uid),<EOL>file=sys.stderr)<EOL>continue<EOL><DEDENT><DEDENT>
|
Copy all records from source_datastore to dest_datastore
|
f4191:c0:m13
|
@command()<EOL>def paste(client, event, channel, nick, rest):
|
path = '<STR_LIT>'.format(**locals())<EOL>paste_root = pmxbot.config.get('<STR_LIT>', '<STR_LIT>')<EOL>url = urllib.parse.urljoin(paste_root, path)<EOL>auth = pmxbot.config.get('<STR_LIT>')<EOL>resp = requests.head(url, auth=_request_friendly(auth))<EOL>if not resp.ok:<EOL><INDENT>return "<STR_LIT>" + url<EOL><DEDENT>return resp.headers['<STR_LIT:location>']<EOL>
|
Drop a link to your latest paste
|
f4194:m0
|
def import_class(val):
|
try:<EOL><INDENT>parts = val.split('<STR_LIT:.>')<EOL>module_path, class_name = '<STR_LIT:.>'.join(parts[:-<NUM_LIT:1>]), parts[-<NUM_LIT:1>]<EOL>module = import_module(module_path)<EOL>return getattr(module, class_name)<EOL><DEDENT>except (ImportError, AttributeError) as e:<EOL><INDENT>msg = "<STR_LIT>".format(<EOL>val, e.__class__.__name__, e)<EOL>raise ImportError(msg)<EOL><DEDENT>
|
Import a class from a string module path.
Pattern borrowed from Django REST Framework.
See rest_framework/settings.py#L170-L182
|
f4213:m0
|
@staticmethod<EOL><INDENT>def get_series(series):<DEDENT>
|
if series == "<STR_LIT>":<EOL><INDENT>return census.acs1dp<EOL><DEDENT>elif series == "<STR_LIT>":<EOL><INDENT>return census.acs5<EOL><DEDENT>elif series == "<STR_LIT>":<EOL><INDENT>return census.sf1<EOL><DEDENT>elif series == "<STR_LIT>":<EOL><INDENT>return census.sf3<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>
|
Returns a census series API handler.
|
f4218:c0:m0
|
def write_county_estimate(self, table, variable, code, datum):
|
try:<EOL><INDENT>division = Division.objects.get(<EOL>code="<STR_LIT>".format(datum["<STR_LIT:state>"], datum["<STR_LIT>"]),<EOL>level=self.COUNTY_LEVEL,<EOL>)<EOL>CensusEstimate.objects.update_or_create(<EOL>division=division,<EOL>variable=variable,<EOL>defaults={"<STR_LIT>": datum[code] or <NUM_LIT:0>},<EOL>)<EOL><DEDENT>except ObjectDoesNotExist:<EOL><INDENT>print("<STR_LIT>".format(datum["<STR_LIT>"], datum["<STR_LIT:state>"]))<EOL><DEDENT>
|
Creates new estimate from a census series.
Data has following signature from API:
{
'B00001_001E': '5373',
'NAME': 'Anderson County, Texas',
'county': '001',
'state': '48'
}
|
f4218:c0:m2
|
def get_district_estimates_by_state(<EOL>self, api, table, variable, estimate, state<EOL>):
|
state = Division.objects.get(level=self.STATE_LEVEL, code=state)<EOL>district_data = api.get(<EOL>("<STR_LIT>", estimate),<EOL>{<EOL>"<STR_LIT>": "<STR_LIT>",<EOL>"<STR_LIT>": "<STR_LIT>".format(state.code),<EOL>},<EOL>year=int(table.year),<EOL>)<EOL>for datum in district_data:<EOL><INDENT>self.write_district_estimate(table, variable, estimate, datum)<EOL><DEDENT>
|
Calls API for all districts in a state and a given estimate.
|
f4218:c0:m4
|
def get_county_estimates_by_state(<EOL>self, api, table, variable, estimate, state<EOL>):
|
state = Division.objects.get(level=self.STATE_LEVEL, code=state)<EOL>county_data = api.get(<EOL>("<STR_LIT>", estimate),<EOL>{"<STR_LIT>": "<STR_LIT>", "<STR_LIT>": "<STR_LIT>".format(state.code)},<EOL>year=int(table.year),<EOL>)<EOL>for datum in county_data:<EOL><INDENT>self.write_county_estimate(table, variable, estimate, datum)<EOL><DEDENT>
|
Calls API for all counties in a state and a given estimate.
|
f4218:c0:m5
|
def get_state_estimates_by_state(<EOL>self, api, table, variable, estimate, state<EOL>):
|
state = Division.objects.get(level=self.STATE_LEVEL, code=state)<EOL>state_data = api.get(<EOL>("<STR_LIT>", estimate),<EOL>{"<STR_LIT>": "<STR_LIT>".format(state.code)},<EOL>year=int(table.year),<EOL>)<EOL>for datum in state_data:<EOL><INDENT>self.write_state_estimate(table, variable, estimate, datum)<EOL><DEDENT>
|
Calls API for a state and a given estimate.
|
f4218:c0:m6
|
def fetch_census_data(self, states):
|
print("<STR_LIT>")<EOL>for table in CensusTable.objects.all():<EOL><INDENT>api = self.get_series(table.series)<EOL>for variable in table.variables.all():<EOL><INDENT>estimate = "<STR_LIT>".format(table.code, variable.code)<EOL>print(<EOL>"<STR_LIT>".format(<EOL>table.year, table.series, estimate<EOL>)<EOL>)<EOL>for state in tqdm(states):<EOL><INDENT>self.get_state_estimates_by_state(<EOL>api=api,<EOL>table=table,<EOL>variable=variable,<EOL>estimate=estimate,<EOL>state=state,<EOL>)<EOL>self.get_county_estimates_by_state(<EOL>api=api,<EOL>table=table,<EOL>variable=variable,<EOL>estimate=estimate,<EOL>state=state,<EOL>)<EOL>self.get_district_estimates_by_state(<EOL>api=api,<EOL>table=table,<EOL>variable=variable,<EOL>estimate=estimate,<EOL>state=state,<EOL>)<EOL><DEDENT><DEDENT><DEDENT>
|
Fetch census estimates from table.
|
f4218:c0:m7
|
@staticmethod<EOL><INDENT>def aggregate_variable(estimate, id):<DEDENT>
|
estimates = [<EOL>variable.estimates.get(division__id=id).estimate<EOL>for variable in estimate.variable.label.variables.all()<EOL>]<EOL>method = estimate.variable.label.aggregation<EOL>if method == "<STR_LIT:s>":<EOL><INDENT>aggregate = sum(estimates)<EOL><DEDENT>elif method == "<STR_LIT:a>":<EOL><INDENT>aggregate = statistics.mean(estimates)<EOL><DEDENT>elif method == "<STR_LIT:m>":<EOL><INDENT>aggregate = statistics.median(estimates)<EOL><DEDENT>else:<EOL><INDENT>aggregate = None<EOL><DEDENT>return aggregate<EOL>
|
Aggregate census table variables by a custom label.
|
f4218:c0:m8
|
def aggregate_national_estimates_by_state(self):
|
data = {}<EOL>fips = "<STR_LIT>"<EOL>aggregated_labels = []<EOL>states = Division.objects.filter(level=self.STATE_LEVEL)<EOL>estimates = CensusEstimate.objects.filter(<EOL>division__level=self.STATE_LEVEL<EOL>)<EOL>for estimate in estimates:<EOL><INDENT>series = estimate.variable.table.series<EOL>year = estimate.variable.table.year<EOL>table = estimate.variable.table.code<EOL>label = estimate.variable.label.label<EOL>table_label = "<STR_LIT>".format(table, label)<EOL>code = estimate.variable.code<EOL>if series not in data:<EOL><INDENT>data[series] = {}<EOL><DEDENT>if year not in data[series]:<EOL><INDENT>data[series][year] = {}<EOL><DEDENT>if table not in data[series][year]:<EOL><INDENT>data[series][year][table] = {}<EOL><DEDENT>if fips not in data[series][year][table]:<EOL><INDENT>data[series][year][table][fips] = {}<EOL><DEDENT>if label is not None:<EOL><INDENT>if table_label not in aggregated_labels:<EOL><INDENT>aggregated_labels.append(table_label)<EOL>data[series][year][table][fips][label] = [<EOL>self.aggregate_variable(estimate, division.id)<EOL>for division in states<EOL>if len(<EOL>CensusEstimate.objects.filter(<EOL>variable=estimate.variable,<EOL>division=division.id,<EOL>)<EOL>)<EOL>> <NUM_LIT:0><EOL>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if code in data[series][year][table][fips]:<EOL><INDENT>data[series][year][table][fips][code].append(<EOL>estimate.estimate<EOL>)<EOL><DEDENT>else:<EOL><INDENT>data[series][year][table][fips][code] = [estimate.estimate]<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>
|
Aggregates state-level estimates for each table within the country.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/states.json
|
f4218:c0:m9
|
def aggregate_national_estimates_by_district(self):
|
data = {}<EOL>fips = "<STR_LIT>"<EOL>aggregated_labels = []<EOL>states = Division.objects.filter(level=self.DISTRICT_LEVEL)<EOL>estimates = CensusEstimate.objects.filter(<EOL>division__level=self.DISTRICT_LEVEL<EOL>)<EOL>for estimate in estimates:<EOL><INDENT>series = estimate.variable.table.series<EOL>year = estimate.variable.table.year<EOL>table = estimate.variable.table.code<EOL>label = estimate.variable.label.label<EOL>table_label = "<STR_LIT>".format(table, label)<EOL>code = estimate.variable.code<EOL>if series not in data:<EOL><INDENT>data[series] = {}<EOL><DEDENT>if year not in data[series]:<EOL><INDENT>data[series][year] = {}<EOL><DEDENT>if table not in data[series][year]:<EOL><INDENT>data[series][year][table] = {}<EOL><DEDENT>if fips not in data[series][year][table]:<EOL><INDENT>data[series][year][table][fips] = {}<EOL><DEDENT>if label is not None:<EOL><INDENT>if table_label not in aggregated_labels:<EOL><INDENT>aggregated_labels.append(table_label)<EOL>data[series][year][table][fips][label] = [<EOL>self.aggregate_variable(estimate, division.id)<EOL>for division in states<EOL>if len(<EOL>CensusEstimate.objects.filter(<EOL>variable=estimate.variable,<EOL>division=division.id,<EOL>)<EOL>)<EOL>> <NUM_LIT:0><EOL>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if code in data[series][year][table][fips]:<EOL><INDENT>data[series][year][table][fips][code].append(<EOL>estimate.estimate<EOL>)<EOL><DEDENT>else:<EOL><INDENT>data[series][year][table][fips][code] = [estimate.estimate]<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>
|
Aggregates district-level estimates for each table within the country.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/districts.json
|
f4218:c0:m10
|
def aggregate_state_estimates_by_county(self, parent):
|
data = {}<EOL>for division in tqdm(<EOL>Division.objects.filter(level=self.COUNTY_LEVEL, parent=parent)<EOL>):<EOL><INDENT>fips = division.code<EOL>id = division.id<EOL>aggregated_labels = [] <EOL>for estimate in division.census_estimates.all():<EOL><INDENT>series = estimate.variable.table.series<EOL>year = estimate.variable.table.year<EOL>table = estimate.variable.table.code<EOL>label = estimate.variable.label.label<EOL>table_label = "<STR_LIT>".format(table, label)<EOL>code = estimate.variable.code<EOL>if series not in data:<EOL><INDENT>data[series] = {}<EOL><DEDENT>if year not in data[series]:<EOL><INDENT>data[series][year] = {}<EOL><DEDENT>if table not in data[series][year]:<EOL><INDENT>data[series][year][table] = {}<EOL><DEDENT>if fips not in data[series][year][table]:<EOL><INDENT>data[series][year][table][fips] = {}<EOL><DEDENT>if label is not None:<EOL><INDENT>if table_label not in aggregated_labels:<EOL><INDENT>aggregated_labels.append(table_label)<EOL>data[series][year][table][fips][<EOL>label<EOL>] = self.aggregate_variable(estimate, id)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>data[series][year][table][division.code][<EOL>code<EOL>] = estimate.estimate<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>
|
Aggregates county-level estimates for each table within a given state.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/{state_fips}/counties.json
|
f4218:c0:m11
|
def aggregate_state_estimates_by_district(self, state):
|
data = {}<EOL>for division in tqdm(<EOL>Division.objects.filter(level=self.DISTRICT_LEVEL, parent=state)<EOL>):<EOL><INDENT>fips = division.code<EOL>id = division.id<EOL>aggregated_labels = [] <EOL>for estimate in division.census_estimates.all():<EOL><INDENT>series = estimate.variable.table.series<EOL>year = estimate.variable.table.year<EOL>table = estimate.variable.table.code<EOL>label = estimate.variable.label.label<EOL>table_label = "<STR_LIT>".format(table, label)<EOL>code = estimate.variable.code<EOL>if series not in data:<EOL><INDENT>data[series] = {}<EOL><DEDENT>if year not in data[series]:<EOL><INDENT>data[series][year] = {}<EOL><DEDENT>if table not in data[series][year]:<EOL><INDENT>data[series][year][table] = {}<EOL><DEDENT>if fips not in data[series][year][table]:<EOL><INDENT>data[series][year][table][fips] = {}<EOL><DEDENT>if label is not None:<EOL><INDENT>if table_label not in aggregated_labels:<EOL><INDENT>aggregated_labels.append(table_label)<EOL>data[series][year][table][fips][<EOL>label<EOL>] = self.aggregate_variable(estimate, id)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>data[series][year][table][division.code][<EOL>code<EOL>] = estimate.estimate<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>
|
Aggregates district-level estimates for each table within a
given state.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/{state_fips}/districts.json
|
f4218:c0:m12
|
@staticmethod<EOL><INDENT>def aggregate_variable(estimate, id):<DEDENT>
|
estimates = [<EOL>variable.estimates.get(division__id=id).estimate<EOL>for variable in estimate.variable.label.variables.all()<EOL>]<EOL>method = estimate.variable.label.aggregation<EOL>if method == "<STR_LIT:s>":<EOL><INDENT>aggregate = sum(estimates)<EOL><DEDENT>elif method == "<STR_LIT:a>":<EOL><INDENT>aggregate = statistics.mean(estimates)<EOL><DEDENT>elif method == "<STR_LIT:m>":<EOL><INDENT>aggregate = statistics.median(estimates)<EOL><DEDENT>else:<EOL><INDENT>aggregate = None<EOL><DEDENT>return aggregate<EOL>
|
Aggregate census table variables by a custom label.
|
f4227:c0:m0
|
def aggregate_state_estimates_by_county(self, parent):
|
data = {}<EOL>for division in tqdm(<EOL>Division.objects.filter(level=self.COUNTY_LEVEL, parent=parent)<EOL>):<EOL><INDENT>fips = division.code<EOL>id = division.id<EOL>aggregated_labels = [] <EOL>for estimate in division.census_estimates.all():<EOL><INDENT>series = estimate.variable.table.series<EOL>year = estimate.variable.table.year<EOL>table = estimate.variable.table.code<EOL>label = None<EOL>if estimate.variable.label:<EOL><INDENT>label = estimate.variable.label.label<EOL>table_label = "<STR_LIT>".format(table, label)<EOL><DEDENT>code = estimate.variable.code<EOL>if series not in data:<EOL><INDENT>data[series] = {}<EOL><DEDENT>if year not in data[series]:<EOL><INDENT>data[series][year] = {}<EOL><DEDENT>if table not in data[series][year]:<EOL><INDENT>data[series][year][table] = {}<EOL><DEDENT>if fips not in data[series][year][table]:<EOL><INDENT>data[series][year][table][fips] = {}<EOL><DEDENT>if label is not None:<EOL><INDENT>if table_label not in aggregated_labels:<EOL><INDENT>aggregated_labels.append(table_label)<EOL>data[series][year][table][fips][<EOL>label<EOL>] = self.aggregate_variable(estimate, id)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>data[series][year][table][division.code][<EOL>code<EOL>] = estimate.estimate<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>
|
Aggregates county-level estimates for each table within a given state.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/{state_fips}/counties.json
|
f4229:c0:m0
|
def aggregate_state_estimates_by_district(self, state):
|
data = {}<EOL>for division in tqdm(<EOL>Division.objects.filter(level=self.DISTRICT_LEVEL, parent=state)<EOL>):<EOL><INDENT>fips = division.code<EOL>id = division.id<EOL>aggregated_labels = [] <EOL>for estimate in division.census_estimates.all():<EOL><INDENT>series = estimate.variable.table.series<EOL>year = estimate.variable.table.year<EOL>table = estimate.variable.table.code<EOL>label = None<EOL>if estimate.variable.label:<EOL><INDENT>label = estimate.variable.label.label<EOL>table_label = "<STR_LIT>".format(table, label)<EOL><DEDENT>code = estimate.variable.code<EOL>if series not in data:<EOL><INDENT>data[series] = {}<EOL><DEDENT>if year not in data[series]:<EOL><INDENT>data[series][year] = {}<EOL><DEDENT>if table not in data[series][year]:<EOL><INDENT>data[series][year][table] = {}<EOL><DEDENT>if fips not in data[series][year][table]:<EOL><INDENT>data[series][year][table][fips] = {}<EOL><DEDENT>if label is not None:<EOL><INDENT>if table_label not in aggregated_labels:<EOL><INDENT>aggregated_labels.append(table_label)<EOL>data[series][year][table][fips][<EOL>label<EOL>] = self.aggregate_variable(estimate, id)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>data[series][year][table][division.code][<EOL>code<EOL>] = estimate.estimate<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>
|
Aggregates district-level estimates for each table within a
given state.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/{state_fips}/districts.json
|
f4229:c0:m1
|
def aggregate_national_estimates_by_state(self):
|
data = {}<EOL>states = Division.objects.filter(level=self.STATE_LEVEL)<EOL>for state in tqdm(states):<EOL><INDENT>aggregated_labels = []<EOL>estimates = CensusEstimate.objects.filter(division=state)<EOL>for estimate in estimates:<EOL><INDENT>series = estimate.variable.table.series<EOL>year = estimate.variable.table.year<EOL>table = estimate.variable.table.code<EOL>label = None<EOL>if estimate.variable.label:<EOL><INDENT>label = estimate.variable.label.label<EOL>table_label = "<STR_LIT>".format(table, label)<EOL><DEDENT>code = estimate.variable.code<EOL>if series not in data:<EOL><INDENT>data[series] = {}<EOL><DEDENT>if year not in data[series]:<EOL><INDENT>data[series][year] = {}<EOL><DEDENT>if table not in data[series][year]:<EOL><INDENT>data[series][year][table] = {}<EOL><DEDENT>if state.code not in data[series][year][table]:<EOL><INDENT>data[series][year][table][state.code] = {}<EOL><DEDENT>if label is not None:<EOL><INDENT>if table_label not in aggregated_labels:<EOL><INDENT>aggregated_labels.append(table_label)<EOL>if (<EOL>len(<EOL>CensusEstimate.objects.filter(<EOL>variable=estimate.variable,<EOL>division=state.id,<EOL>)<EOL>)<EOL>> <NUM_LIT:0><EOL>):<EOL><INDENT>data[series][year][table][state.code][<EOL>label<EOL>] = self.aggregate_variable(estimate, state.id)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>data[series][year][table][state.code][<EOL>code<EOL>] = estimate.estimate<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>
|
Aggregates state-level estimates for each table within the country.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/states.json
|
f4230:c0:m0
|
def aggregate_national_estimates_by_district(self):
|
data = {}<EOL>states = Division.objects.filter(level=self.STATE_LEVEL)<EOL>for state in tqdm(states):<EOL><INDENT>districts = Division.objects.filter(<EOL>level=self.DISTRICT_LEVEL, parent=state<EOL>)<EOL>for district in districts:<EOL><INDENT>aggregated_labels = []<EOL>estimates = CensusEstimate.objects.filter(division=district)<EOL>for estimate in estimates:<EOL><INDENT>series = estimate.variable.table.series<EOL>year = estimate.variable.table.year<EOL>table = estimate.variable.table.code<EOL>label = None<EOL>if estimate.variable.label:<EOL><INDENT>label = estimate.variable.label.label<EOL>table_label = "<STR_LIT>".format(table, label)<EOL><DEDENT>code = estimate.variable.code<EOL>if series not in data:<EOL><INDENT>data[series] = {}<EOL><DEDENT>if year not in data[series]:<EOL><INDENT>data[series][year] = {}<EOL><DEDENT>if table not in data[series][year]:<EOL><INDENT>data[series][year][table] = {}<EOL><DEDENT>if state.code not in data[series][year][table]:<EOL><INDENT>data[series][year][table][state.code] = {}<EOL><DEDENT>if (<EOL>district.code<EOL>not in data[series][year][table][state.code]<EOL>):<EOL><INDENT>data[series][year][table][state.code][<EOL>district.code<EOL>] = {}<EOL><DEDENT>if label is not None:<EOL><INDENT>if table_label not in aggregated_labels:<EOL><INDENT>aggregated_labels.append(table_label)<EOL>if (<EOL>len(<EOL>CensusEstimate.objects.filter(<EOL>variable=estimate.variable,<EOL>division=district.id,<EOL>)<EOL>)<EOL>> <NUM_LIT:0><EOL>):<EOL><INDENT>data[series][year][table][state.code][<EOL>district.code<EOL>][label] = self.aggregate_variable(<EOL>estimate, district.id<EOL>)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>data[series][year][table][state.code][district.code][<EOL>code<EOL>] = estimate.estimate<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return data<EOL>
|
Aggregates district-level estimates for each table within the country.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/districts.json
|
f4230:c0:m1
|
def write_county_estimate(self, table, variable, code, datum):
|
try:<EOL><INDENT>division = Division.objects.get(code='<STR_LIT>'.format(<EOL>datum['<STR_LIT:state>'],<EOL>datum['<STR_LIT>']<EOL>), level=self.COUNTY_LEVEL)<EOL>CensusEstimate.objects.update_or_create(<EOL>division=division,<EOL>variable=variable,<EOL>defaults={<EOL>'<STR_LIT>': datum[code] or <NUM_LIT:0><EOL>}<EOL>)<EOL><DEDENT>except ObjectDoesNotExist:<EOL><INDENT>print('<STR_LIT>'.format(datum['<STR_LIT>'], datum['<STR_LIT:state>']))<EOL><DEDENT>
|
Creates new estimate from a census series.
Data has following signature from API:
{
'B00001_001E': '5373',
'NAME': 'Anderson County, Texas',
'county': '001',
'state': '48'
}
|
f4233:c0:m0
|
def get_series(self, series):
|
if series == "<STR_LIT>":<EOL><INDENT>return self.census.acs1dp<EOL><DEDENT>elif series == "<STR_LIT>":<EOL><INDENT>return self.census.acs5<EOL><DEDENT>elif series == "<STR_LIT>":<EOL><INDENT>return self.census.sf1<EOL><DEDENT>elif series == "<STR_LIT>":<EOL><INDENT>return self.census.sf3<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>
|
Returns a census series API handler.
|
f4237:c0:m0
|
def get_county_estimates_by_state(<EOL>self, api, table, variable, estimate, state<EOL>):
|
state = Division.objects.get(level=self.STATE_LEVEL, code=state)<EOL>county_data = api.get(<EOL>("<STR_LIT>", estimate),<EOL>{"<STR_LIT>": "<STR_LIT>", "<STR_LIT>": "<STR_LIT>".format(state.code)},<EOL>year=int(table.year),<EOL>)<EOL>for datum in county_data:<EOL><INDENT>self.write_county_estimate(table, variable, estimate, datum)<EOL><DEDENT>
|
Calls API for all counties in a state and a given estimate.
|
f4238:c0:m0
|
def fetch_state_data(self, states):
|
print("<STR_LIT>")<EOL>for table in CensusTable.objects.all():<EOL><INDENT>api = self.get_series(table.series)<EOL>for variable in table.variables.all():<EOL><INDENT>estimate = "<STR_LIT>".format(table.code, variable.code)<EOL>print(<EOL>"<STR_LIT>".format(<EOL>table.year, table.series, estimate<EOL>)<EOL>)<EOL>for state in tqdm(states):<EOL><INDENT>self.get_state_estimates_by_state(<EOL>api=api,<EOL>table=table,<EOL>variable=variable,<EOL>estimate=estimate,<EOL>state=state,<EOL>)<EOL>self.get_county_estimates_by_state(<EOL>api=api,<EOL>table=table,<EOL>variable=variable,<EOL>estimate=estimate,<EOL>state=state,<EOL>)<EOL>self.get_district_estimates_by_state(<EOL>api=api,<EOL>table=table,<EOL>variable=variable,<EOL>estimate=estimate,<EOL>state=state,<EOL>)<EOL><DEDENT><DEDENT><DEDENT>
|
Fetch census estimates from table.
|
f4239:c0:m1
|
def get_district_estimates_by_state(<EOL>self, api, table, variable, estimate, state<EOL>):
|
state = Division.objects.get(level=self.STATE_LEVEL, code=state)<EOL>district_data = api.get(<EOL>("<STR_LIT>", estimate),<EOL>{<EOL>"<STR_LIT>": "<STR_LIT>",<EOL>"<STR_LIT>": "<STR_LIT>".format(state.code),<EOL>},<EOL>year=int(table.year),<EOL>)<EOL>for datum in district_data:<EOL><INDENT>self.write_district_estimate(table, variable, estimate, datum)<EOL><DEDENT>
|
Calls API for all districts in a state and a given estimate.
|
f4240:c0:m0
|
def get_state_estimates_by_state(<EOL>self, api, table, variable, estimate, state<EOL>):
|
state = Division.objects.get(level=self.STATE_LEVEL, code=state)<EOL>state_data = api.get(<EOL>("<STR_LIT>", estimate),<EOL>{"<STR_LIT>": "<STR_LIT>".format(state.code)},<EOL>year=int(table.year),<EOL>)<EOL>for datum in state_data:<EOL><INDENT>self.write_state_estimate(table, variable, estimate, datum)<EOL><DEDENT>
|
Calls API for a state and a given estimate.
|
f4241:c0:m0
|
def __init__(self, app):
|
self.client = None<EOL>if app is not None:<EOL><INDENT>self.app = app<EOL>self.init_app(self.app)<EOL><DEDENT>else:<EOL><INDENT>self.app = None<EOL><DEDENT>
|
Implements the Flask extension pattern.
|
f4257:c0:m0
|
def _init_request_hooks(self):
|
for method_type in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>for method in _METHODS:<EOL><INDENT>event = getattr(self.app, '<STR_LIT>' + method_type + '<STR_LIT:_>' + method)<EOL>event_hook = getattr(hooks, method_type + '<STR_LIT:_>' + method)<EOL>event += event_hook<EOL><DEDENT><DEDENT>
|
initialize pre request hooks
|
f4257:c0:m3
|
def _init_database_hooks(self):
|
pass<EOL>
|
initialize database hooks we might want to monitor database calls
|
f4257:c0:m4
|
def remove(item):
|
if os.path.isdir(item):<EOL><INDENT>shutil.rmtree(item)<EOL><DEDENT>else:<EOL><INDENT>os.remove(item)<EOL><DEDENT>
|
Delete item, whether it's a file, a folder, or a folder
full of other files and folders.
|
f4261:m1
|
def remove_pattern(root, pat, verbose=True):
|
print("<STR_LIT>", root, pat)<EOL>combined = root + pat<EOL>print('<STR_LIT>', combined)<EOL>items = glob.glob(combined)<EOL>print('<STR_LIT>', items)<EOL>for item in items:<EOL><INDENT>print('<STR_LIT>', item)<EOL>if is_inside(root, item):<EOL><INDENT>remove(item)<EOL><DEDENT>elif verbose:<EOL><INDENT>print("<STR_LIT>".format(**vars()))<EOL><DEDENT><DEDENT>
|
Given a directory, and a pattern of files like "garbage.txt" or
"*pyc" inside it, remove them.
Try not to delete the whole OS while you're at it.
|
f4261:m2
|
def get_slugignores(root, fname='<STR_LIT>'):
|
try:<EOL><INDENT>with open(os.path.join(root, fname)) as f:<EOL><INDENT>return [l.rstrip('<STR_LIT:\n>') for l in f]<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>return []<EOL><DEDENT>
|
Given a root path, read any .slugignore file inside and return a list of
patterns that should be removed prior to slug compilation.
Return empty list if file does not exist.
|
f4261:m3
|
def clean_slug_dir(root):
|
if not root.endswith('<STR_LIT:/>'):<EOL><INDENT>root += '<STR_LIT:/>'<EOL><DEDENT>for pattern in get_slugignores(root):<EOL><INDENT>print("<STR_LIT>", pattern)<EOL>remove_pattern(root, pattern)<EOL><DEDENT>
|
Given a path, delete anything specified in .slugignore.
|
f4261:m4
|
def _write_buildproc_yaml(build_data, env, user, cmd, volumes, app_folder):
|
buildproc = ProcData({<EOL>'<STR_LIT>': str(app_folder),<EOL>'<STR_LIT>': build_data.app_name,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': env,<EOL>'<STR_LIT:host>': '<STR_LIT>',<EOL>'<STR_LIT:port>': <NUM_LIT:0>,<EOL>'<STR_LIT:version>': build_data.version,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': {},<EOL>'<STR_LIT:user>': user,<EOL>'<STR_LIT>': cmd,<EOL>'<STR_LIT>': volumes,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': build_data.image_name,<EOL>'<STR_LIT>': build_data.image_url,<EOL>'<STR_LIT>': build_data.image_md5,<EOL>})<EOL>with open('<STR_LIT>', '<STR_LIT:w>') as f:<EOL><INDENT>f.write(buildproc.as_yaml())<EOL><DEDENT>return get_container_path(buildproc)<EOL>
|
Write a proc.yaml for the container and return the container path
|
f4262:m4
|
def assert_compile_finished(app_folder):
|
fpath = os.path.join(app_folder, '<STR_LIT>')<EOL>if not os.path.isfile(fpath):<EOL><INDENT>msg = ('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>raise AssertionError(msg)<EOL><DEDENT>try:<EOL><INDENT>os.remove(fpath)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>
|
Once builder.sh has invoked the compile script, it should return and we
should set a flag to the script returned. If that flag is missing, then
it is an indication that the container crashed, and we generate an error.
This function will clean up the flag after the check is performed, so only
call this function once. See issue #141.
|
f4262:m5
|
def recover_release_data(app_folder):
|
with open(os.path.join(app_folder, '<STR_LIT>'), '<STR_LIT:rb>') as f:<EOL><INDENT>return yaml.safe_load(f)<EOL><DEDENT>
|
Given the path to an app folder where an app was just built, return a
dictionary containing the data emitted from running the buildpack's release
script.
Relies on the builder.sh script storing the release data in ./.release.yaml
inside the app folder.
|
f4262:m6
|
def recover_buildpack(app_folder):
|
filepath = os.path.join(app_folder, '<STR_LIT>')<EOL>with open(filepath) as f:<EOL><INDENT>buildpack_picked = f.read()<EOL><DEDENT>buildpack_picked = buildpack_picked.lstrip('<STR_LIT:/>')<EOL>buildpack_picked = buildpack_picked.rstrip('<STR_LIT:\n>')<EOL>buildpack_picked = os.path.join(os.getcwd(), buildpack_picked)<EOL>return BuildPack(buildpack_picked)<EOL>
|
Given the path to an app folder where an app was just built, return a
BuildPack object pointing to the dir for the buildpack used during the
build.
Relies on the builder.sh script storing the buildpack location in
/.buildpack inside the container.
|
f4262:m7
|
def pull_buildpack(url):
|
defrag = _defrag(urllib.parse.urldefrag(url))<EOL>with lock_or_wait(defrag.url):<EOL><INDENT>bp = update_buildpack(url)<EOL>dest = bp.basename + '<STR_LIT:->' + hash_text(defrag.url)<EOL>shutil.copytree(bp.folder, dest)<EOL><DEDENT>path.Path(dest).chmod('<STR_LIT>')<EOL>return dest<EOL>
|
Update a buildpack in its shared location, then make a copy into the
current directory, using an md5 of the url.
|
f4262:m9
|
def save_compile_log(self, app_folder):
|
self._save_logfile(app_folder, '<STR_LIT>', '<STR_LIT>')<EOL>
|
Copy compilation log into outfolder
|
f4262:c1:m2
|
def save_lxcdebug_log(self, app_folder):
|
self._save_logfile(app_folder, '<STR_LIT>', '<STR_LIT>')<EOL>
|
Copy lxc debug log into outfolder
|
f4262:c1:m3
|
def make_tarball(self, app_folder, build_data):
|
<EOL>clean_slug_dir(app_folder)<EOL>with tarfile.open('<STR_LIT>', '<STR_LIT>') as tar:<EOL><INDENT>tar.add(app_folder, arcname='<STR_LIT>')<EOL><DEDENT>build_data.build_md5 = file_md5('<STR_LIT>')<EOL>tardest = os.path.join(self.outfolder, '<STR_LIT>')<EOL>shutil.move('<STR_LIT>', tardest)<EOL>build_data_path = os.path.join(self.outfolder, '<STR_LIT>')<EOL>print("<STR_LIT>", build_data_path)<EOL>with open(build_data_path, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(build_data.as_yaml())<EOL><DEDENT>
|
Following a successful build, create a tarball and build result.
|
f4262:c1:m4
|
def update_buildpack(url, packs_dir=PACKS_HOME, vcs_type=None):
|
defrag = _defrag(urllib.parse.urldefrag(url))<EOL>bpfolder = repo.basename(url) + '<STR_LIT:->' + hash_text(defrag.url)<EOL>dest = os.path.join(packs_dir, bpfolder)<EOL>mkdir(packs_dir)<EOL>bp = BuildPack(dest, url, vcs_type=vcs_type)<EOL>bp.update()<EOL>return bp<EOL>
|
Checkout/update a buildpack, given its URL.
Buildpacks are checked out into folders whose names start with something
nicely readable, followed by an MD5 hash of the full URL (thus
distinguishing two buildpacks with the same 'name' but different URLs).
|
f4266:m0
|
def get_unique_repo_folder(repo_url):
|
return '<STR_LIT>' % (repo.basename(repo_url), hash_text(repo_url))<EOL>
|
Given a repository URL, return a folder name that's human-readable,
filesystem-friendly, and guaranteed unique to that repo.
|
f4266:m2
|
def detect(self, app):
|
script = os.path.join(self.folder, '<STR_LIT>', '<STR_LIT>')<EOL>cmd = '<STR_LIT>' % (script, app.folder)<EOL>result = run(cmd)<EOL>return result.status_code == <NUM_LIT:0><EOL>
|
Given an app, run detect script on it to determine whether it can be
built with this pack. Return True/False.
|
f4266:c0:m0
|
def tar(self, appname, appversion):
|
name_tmpl = '<STR_LIT>'<EOL>time = utc.now()<EOL>name = name_tmpl % {'<STR_LIT>': appname,<EOL>'<STR_LIT:version>': appversion,<EOL>'<STR_LIT:time>': time.strftime('<STR_LIT>')}<EOL>if not os.path.exists(TARBALL_HOME):<EOL><INDENT>os.mkdir(TARBALL_HOME)<EOL><DEDENT>tarball = os.path.join(TARBALL_HOME, name)<EOL>tar_params = {'<STR_LIT:filename>': tarball, '<STR_LIT>': self.folder}<EOL>tar_result = run('<STR_LIT>' % tar_params)<EOL>tar_result.raise_for_status()<EOL>return Build(appname, appversion, time, tarball)<EOL>
|
Given an app name and version to be used in the tarball name,
create a tar.bz2 file with all of this folder's contents inside.
Return a Build object with attributes for appname, appversion,
time, and path.
|
f4266:c1:m2
|
def _simpleparsefun(date):
|
if hasattr(date, '<STR_LIT>'):<EOL><INDENT>return date<EOL><DEDENT>try:<EOL><INDENT>date = datetime.datetime.strptime(date, '<STR_LIT>')<EOL><DEDENT>except ValueError:<EOL><INDENT>date = datetime.datetime.strptime(date, '<STR_LIT>')<EOL><DEDENT>return date<EOL>
|
Simple date parsing function
|
f4274:m0
|
def _dateutilparsefun(date):
|
if hasattr(date, '<STR_LIT>'):<EOL><INDENT>return date<EOL><DEDENT>return _dateutil_parse(date)<EOL>
|
dateutil parsing function
|
f4274:m1
|
def warn(message):
|
warnings.warn(CalendarHolidayWarning(message), stacklevel=<NUM_LIT:3>)<EOL>
|
Throw warning with a message
|
f4274:m2
|
def __init__(self, workdays=None, holidays=None):
|
if workdays is None:<EOL><INDENT>self.workdays = [MO, TU, WE, TH, FR]<EOL><DEDENT>else:<EOL><INDENT>self.workdays = sorted(list(set(workdays))) <EOL><DEDENT>if holidays is None:<EOL><INDENT>holidays = []<EOL><DEDENT>weekdaymap = []<EOL>for wkday in range(<NUM_LIT:0>, <NUM_LIT:7>):<EOL><INDENT>wmap = {}<EOL>wmap['<STR_LIT>'] = wkday<EOL>if wkday in self.workdays:<EOL><INDENT>wmap['<STR_LIT>'] = True<EOL>i = self.workdays.index(wkday)<EOL>if i == len(self.workdays) - <NUM_LIT:1>: <EOL><INDENT>wmap['<STR_LIT>'] = self.workdays[<NUM_LIT:0>]<EOL>wmap['<STR_LIT>'] = wmap['<STR_LIT>'] + <NUM_LIT:7> - wkday<EOL><DEDENT>else:<EOL><INDENT>wmap['<STR_LIT>'] = self.workdays[i+<NUM_LIT:1>]<EOL>wmap['<STR_LIT>'] = wmap['<STR_LIT>'] - wkday<EOL><DEDENT>if i == <NUM_LIT:0>: <EOL><INDENT>wmap['<STR_LIT>'] = self.workdays[-<NUM_LIT:1>]<EOL>wmap['<STR_LIT>'] = wmap['<STR_LIT>'] - wkday - <NUM_LIT:7><EOL><DEDENT>else:<EOL><INDENT>wmap['<STR_LIT>'] = self.workdays[i-<NUM_LIT:1>]<EOL>wmap['<STR_LIT>'] = wmap['<STR_LIT>'] - wkday<EOL><DEDENT><DEDENT>else:<EOL><INDENT>wmap['<STR_LIT>'] = False<EOL>after = [x for x in range(wkday+<NUM_LIT:1>, <NUM_LIT:7>) if x in self.workdays]<EOL>if after: <EOL><INDENT>wmap['<STR_LIT>'] = after[<NUM_LIT:0>]<EOL>wmap['<STR_LIT>'] = wmap['<STR_LIT>'] - wkday<EOL><DEDENT>else:<EOL><INDENT>wmap['<STR_LIT>'] = self.workdays[<NUM_LIT:0>]<EOL>wmap['<STR_LIT>'] = wmap['<STR_LIT>'] + <NUM_LIT:7> - wkday<EOL><DEDENT>before = [x for x in range(<NUM_LIT:0>, wkday) if x in self.workdays]<EOL>if before: <EOL><INDENT>wmap['<STR_LIT>'] = before[-<NUM_LIT:1>]<EOL>wmap['<STR_LIT>'] = wmap['<STR_LIT>'] - wkday<EOL><DEDENT>else:<EOL><INDENT>wmap['<STR_LIT>'] = self.workdays[-<NUM_LIT:1>]<EOL>wmap['<STR_LIT>'] = wmap['<STR_LIT>'] - wkday - <NUM_LIT:7><EOL><DEDENT><DEDENT>weekdaymap.append(DayOfWeek(**wmap))<EOL><DEDENT>self.weekdaymap = weekdaymap<EOL>holidays = set([parsefun(hol) for hol in holidays])<EOL>self.holidays = sorted(<EOL>[hol for hol in holidays if weekdaymap[hol.weekday()].isworkday])<EOL>
|
Initialize object and creates the week day map.
Args:
workdays: List or tuple of week days considered 'work days'.
Anything not in this list is considered a rest day.
Defaults to [MO, TU, WE, TH, FR].
holidays: List or tuple of holidays (or strings).
Default is [].
|
f4274:c1:m0
|
def isworkday(self, date):
|
date = parsefun(date)<EOL>return self.weekdaymap[date.weekday()].isworkday<EOL>
|
Check if a given date is a work date, ignoring holidays.
Args:
date (date, datetime or str): Date to be checked.
Returns:
bool: True if the date is a work date, False otherwise.
|
f4274:c1:m1
|
def isholiday(self, date):
|
date = parsefun(date)<EOL>if self.holidays:<EOL><INDENT>i = bisect.bisect_left(self.holidays, date)<EOL>if i == <NUM_LIT:0> and date < self.holidays[<NUM_LIT:0>]:<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' % date)<EOL><DEDENT>elif i == len(self.holidays):<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' % date)<EOL><DEDENT>elif self.holidays[i] == date:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>
|
Check if a given date is a holiday.
Args:
date (date, datetime or str): Date to be checked.
Returns:
bool: True if the date is a holiday, False otherwise.
|
f4274:c1:m2
|
def isbusday(self, date):
|
return self.isworkday(date) and not self.isholiday(date)<EOL>
|
Check if a given date is a business date, taking into consideration
the work days and holidays.
Args:
date (date, datetime or str): Date to be checked.
Returns:
bool: True if the date is a business date, False otherwise.
|
f4274:c1:m3
|
def adjust(self, date, mode):
|
date = parsefun(date)<EOL>if self.isbusday(date):<EOL><INDENT>return date<EOL><DEDENT>if mode == FOLLOWING:<EOL><INDENT>dateadj = self.addbusdays(date, <NUM_LIT:1>)<EOL><DEDENT>elif mode == PREVIOUS:<EOL><INDENT>dateadj = self.addbusdays(date, -<NUM_LIT:1>)<EOL><DEDENT>elif mode == MODIFIEDFOLLOWING:<EOL><INDENT>dateadj = self.addbusdays(date, <NUM_LIT:1>)<EOL>if dateadj.month != date.month:<EOL><INDENT>dateadj = self.addbusdays(dateadj, -<NUM_LIT:1>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % mode)<EOL><DEDENT>return dateadj<EOL>
|
Adjust the date to the closest work date.
Args:
date (date, datetime or str): Date to be adjusted.
mode (integer): FOLLOWING, PREVIOUS or MODIFIEDFOLLOWING.
Note:
If date is already a business date than it is returned unchanged.
How to use the adjustment constants:
**FOLLOWING**:
Adjust to the next business date.
**PREVIOUS**:
Adjust to the previous business date.
**MODIFIEDFOLLOWING**:
Adjust to the next business date unless it falls on a
different month, in which case adjust to the previous business
date.
Returns:
datetime: Adjusted date.
|
f4274:c1:m4
|
def addworkdays(self, date, offset):
|
date = parsefun(date)<EOL>if offset == <NUM_LIT:0>:<EOL><INDENT>return date<EOL><DEDENT>if offset > <NUM_LIT:0>:<EOL><INDENT>direction = <NUM_LIT:1><EOL>idx_offset = Calendar._idx_offsetnext<EOL>idx_next = Calendar._idx_nextworkday<EOL>idx_offset_other = Calendar._idx_offsetprev<EOL>idx_next_other = Calendar._idx_prevworkday<EOL><DEDENT>else:<EOL><INDENT>direction = -<NUM_LIT:1><EOL>idx_offset = Calendar._idx_offsetprev<EOL>idx_next = Calendar._idx_prevworkday<EOL>idx_offset_other = Calendar._idx_offsetnext<EOL>idx_next_other = Calendar._idx_nextworkday<EOL><DEDENT>weekdaymap = self.weekdaymap <EOL>datewk = date.weekday()<EOL>if not weekdaymap[datewk].isworkday:<EOL><INDENT>date += datetime.timedelta(days=weekdaymap[datewk][idx_offset_other])<EOL>datewk = weekdaymap[datewk][idx_next_other]<EOL><DEDENT>nw, nd = divmod(abs(offset), len(self.workdays))<EOL>ndays = nw * <NUM_LIT:7><EOL>while nd > <NUM_LIT:0>:<EOL><INDENT>ndays += abs(weekdaymap[datewk][idx_offset])<EOL>datewk = weekdaymap[datewk][idx_next]<EOL>nd -= <NUM_LIT:1><EOL><DEDENT>date += datetime.timedelta(days=ndays*direction)<EOL>return date<EOL>
|
Add work days to a given date, ignoring holidays.
Note:
By definition, a zero offset causes the function to return the
initial date, even it is not a work date. An offset of 1
represents the next work date, regardless of date being a work
date or not.
Args:
date (date, datetime or str): Date to be incremented.
offset (integer): Number of work days to add. Positive values move
the date forward and negative values move the date back.
Returns:
datetime: New incremented date.
|
f4274:c1:m5
|
def addbusdays(self, date, offset):
|
date = parsefun(date)<EOL>if offset == <NUM_LIT:0>:<EOL><INDENT>return date<EOL><DEDENT>dateoffset = self.addworkdays(date, offset)<EOL>holidays = self.holidays <EOL>if not holidays:<EOL><INDENT>return dateoffset<EOL><DEDENT>weekdaymap = self.weekdaymap <EOL>datewk = dateoffset.weekday()<EOL>if offset > <NUM_LIT:0>:<EOL><INDENT>i = bisect.bisect_right(holidays, date)<EOL>if i == len(holidays):<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' %(date, offset))<EOL><DEDENT>else:<EOL><INDENT>while holidays[i] <= dateoffset:<EOL><INDENT>dateoffset += datetime.timedelta(days=weekdaymap[datewk].offsetnext)<EOL>datewk = weekdaymap[datewk].nextworkday<EOL>i += <NUM_LIT:1><EOL>if i == len(holidays):<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' %(date, offset))<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>i = bisect.bisect_left(holidays, date) - <NUM_LIT:1><EOL>if i == -<NUM_LIT:1>:<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>'% (date, offset))<EOL><DEDENT>else:<EOL><INDENT>while holidays[i] >= dateoffset:<EOL><INDENT>dateoffset += datetime.timedelta(days=weekdaymap[datewk].offsetprev)<EOL>datewk = weekdaymap[datewk].prevworkday<EOL>i -= <NUM_LIT:1><EOL>if i == -<NUM_LIT:1>:<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' %(date, offset))<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return dateoffset<EOL>
|
Add business days to a given date, taking holidays into consideration.
Note:
By definition, a zero offset causes the function to return the
initial date, even it is not a business date. An offset of 1
represents the next business date, regardless of date being a
business date or not.
Args:
date (date, datetime or str): Date to be incremented.
offset (integer): Number of business days to add. Positive values
move the date forward and negative values move the date back.
Returns:
datetime: New incremented date.
|
f4274:c1:m6
|
def _workdaycount(self, date1, date2):
|
assert date2 >= date1<EOL>date1wd = date1.weekday()<EOL>date2wd = date2.weekday()<EOL>if not self.weekdaymap[date2wd].isworkday:<EOL><INDENT>date2 += datetime.timedelta(days=self.weekdaymap[date2wd].offsetprev)<EOL>date2wd = self.weekdaymap[date2wd].prevworkday<EOL><DEDENT>if date2 <= date1:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>nw, nd = divmod((date2 - date1).days, <NUM_LIT:7>)<EOL>ndays = nw * len(self.workdays)<EOL>if nd > <NUM_LIT:0>:<EOL><INDENT>date1wd = date1.weekday()<EOL>date2wd = date2.weekday()<EOL>while date1wd != date2wd:<EOL><INDENT>ndays += <NUM_LIT:1><EOL>date1wd = self.weekdaymap[date1wd].nextworkday<EOL><DEDENT><DEDENT>return ndays<EOL>
|
(PRIVATE) Count work days between two dates, ignoring holidays.
|
f4274:c1:m7
|
def workdaycount(self, date1, date2):
|
date1 = parsefun(date1)<EOL>date2 = parsefun(date2)<EOL>if date1 == date2:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>elif date1 > date2:<EOL><INDENT>date1, date2 = date2, date1<EOL>direction = -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>direction = <NUM_LIT:1><EOL><DEDENT>ndays = self._workdaycount(date1, date2)<EOL>return ndays * direction<EOL>
|
Count work days between two dates, ignoring holidays.
Args:
date1 (date, datetime or str): Date start of interval.
date2 (date, datetime or str): Date end of interval.
Note:
The adopted notation is COB to COB, so effectively date1 is not
included in the calculation result.
Example:
>>> cal = Calendar()
>>> date1 = datetime.datetime.today()
>>> date2 = cal.addworkdays(date1, 1)
>>> cal.workdaycount(date1, date2)
1
Returns:
int: Number of work days between the two dates. If the dates
are equal the result is zero. If date1 > date2 the result is
negative.
|
f4274:c1:m8
|
def busdaycount(self, date1, date2):
|
date1 = parsefun(date1)<EOL>date2 = parsefun(date2)<EOL>if date1 == date2:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>elif date1 > date2:<EOL><INDENT>date1, date2 = date2, date1<EOL>direction = -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>direction = <NUM_LIT:1><EOL><DEDENT>ndays = self._workdaycount(date1, date2)<EOL>if self.holidays:<EOL><INDENT>holidays = self.holidays <EOL>if date1 > holidays[-<NUM_LIT:1>]:<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' %(date1, date2))<EOL><DEDENT>elif date2 < holidays[<NUM_LIT:0>]:<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' %(date1, date2))<EOL><DEDENT>else:<EOL><INDENT>if date1 < holidays[<NUM_LIT:0>]:<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' %(date1, date2))<EOL><DEDENT>if date2 > holidays[-<NUM_LIT:1>]:<EOL><INDENT>warn('<STR_LIT>''<STR_LIT>' %(date1, date2))<EOL><DEDENT>i = bisect.bisect_right(holidays, date1)<EOL>while holidays[i] <= date2:<EOL><INDENT>ndays -= <NUM_LIT:1><EOL>i += <NUM_LIT:1><EOL>if i == len(holidays):<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return ndays * direction<EOL>
|
Count business days between two dates (private), taking holidays into
consideration.
Args:
date1 (date, datetime or str): Date start of interval.
date2 (date, datetime or str): Date end of interval.
Note:
The adopted notation is COB to COB, so effectively date1 is not
included in the calculation result.
Example:
>>> cal = Calendar()
>>> date1 = datetime.datetime.today()
>>> date2 = cal.addbusdays(date1, 1)
>>> cal.busdaycount(date1, date2)
1
Returns:
int: Number of business days between the two dates. If the dates
are equal the result is zero. If date1 > date2 the result is
negative.
|
f4274:c1:m9
|
@staticmethod<EOL><INDENT>def caleom(date):<DEDENT>
|
date = parsefun(date)<EOL>date += datetime.timedelta(days=<NUM_LIT:32>-date.day)<EOL>date -= datetime.timedelta(days=date.day)<EOL>return date<EOL>
|
Adjust date to last day of the month, regardless of work days.
Args:
date (date, datetime or str): Date to be adjusted.
Returns:
datetime: Adjusted date.
|
f4274:c1:m10
|
def buseom(self, date):
|
return self.adjust(self.caleom(date), PREVIOUS)<EOL>
|
Adjust date to last business day of the month, taking holidays into
consideration.
Args:
date (date, datetime or str): Date to be adjusted.
Returns:
datetime: Adjusted date.
|
f4274:c1:m11
|
def range(self, date1, date2):
|
date1 = self.adjust(parsefun(date1), FOLLOWING)<EOL>date2 = parsefun(date2)<EOL>holidays = []<EOL>holidx = <NUM_LIT:0><EOL>if len(self.holidays):<EOL><INDENT>index1 = bisect.bisect_left(self.holidays, date1)<EOL>index2 = bisect.bisect_left(self.holidays, date2)<EOL>if index2 > index1:<EOL><INDENT>holidays = self.holidays[index1:index2]<EOL><DEDENT><DEDENT>datewk = date1.weekday()<EOL>while date1 < date2:<EOL><INDENT>if (holidx < len(holidays)) and (holidays[holidx] == date1):<EOL><INDENT>holidx += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>yield date1<EOL><DEDENT>date1 += datetime.timedelta(days=self.weekdaymap[datewk].offsetnext)<EOL>datewk = self.weekdaymap[datewk].nextworkday<EOL><DEDENT>
|
Generate business days between two dates, taking holidays into
consideration.
Args:
date1 (date, datetime or str): Date start of interval.
date2 (date, datetime or str): Date end of interval, not included.
Note:
All business days between date1 (inc) and date2 (exc) are returned,
and date2 must be bigger than date1.
Yields:
datetime: Business days in the specified range.
|
f4274:c1:m12
|
def read(*paths):
|
with open(os.path.join(*paths), '<STR_LIT:r>') as f:<EOL><INDENT>return f.read()<EOL><DEDENT>
|
Build a file path from *paths* and return the contents.
|
f4275:m0
|
def migrate_origin_locations(apps, _):
|
Country = apps.get_model("<STR_LIT>", "<STR_LIT>")<EOL>GeographicLocation = apps.get_model("<STR_LIT>", "<STR_LIT>")<EOL>WorkOrigin = apps.get_model("<STR_LIT>", "<STR_LIT>")<EOL>WorkBase = apps.get_model("<STR_LIT>", "<STR_LIT>")<EOL>countries = dict(((c.iso_country, c) for c in Country.objects.all()))<EOL>for w in WorkBase.objects.all():<EOL><INDENT>if w.origin_country orw.origin_state_province orw.origin_city orw.origin_neighborhood orw.origin_colloquial:<EOL><INDENT>loc, created = GeographicLocation.objects.get_or_create(<EOL>country=countries.get(w.origin_country, None),<EOL>state_province=w.origin_state_province,<EOL>city=w.origin_city,<EOL>neighborhood=w.origin_neighborhood,<EOL>colloquial_historical=w.origin_colloquial<EOL>)<EOL>WorkOrigin.objects.create(work=w, geographic_location=loc)<EOL><DEDENT><DEDENT>
|
Copy origin_* fields to a new GeographicLocation.
|
f4304:m0
|
def work(request, slug):
|
<EOL>item = get_object_or_404(models.WorkBase.objects.visible(), slug=slug)<EOL>if not item:<EOL><INDENT>raise Http404<EOL><DEDENT>context = RequestContext(request, {<EOL>'<STR_LIT>': item,<EOL>'<STR_LIT>': item,<EOL>})<EOL>template = '<STR_LIT>'<EOL>return TemplateResponse(request, template, context)<EOL>
|
:param request: Django request object.
:param event_id: The `id` associated with the event.
:param is_preview: Should the listing page be generated as a preview? This
will allow preview specific actions to be done in the
template such as turning off tracking options or adding
links to the admin.
:return: TemplateResponse
|
f4318:m0
|
def creator(request, slug):
|
<EOL>item = get_object_or_404(models.CreatorBase.objects.visible(), slug=slug)<EOL>if not item:<EOL><INDENT>raise Http404<EOL><DEDENT>context = RequestContext(request, {<EOL>'<STR_LIT>': item,<EOL>'<STR_LIT>': item,<EOL>})<EOL>template = '<STR_LIT>'<EOL>return TemplateResponse(request, template, context)<EOL>
|
:param request: Django request object.
:param event_id: The `id` associated with the event.
:param is_preview: Should the listing page be generated as a preview? This
will allow preview specific actions to be done in the
template such as turning off tracking options or adding
links to the admin.
:return: TemplateResponse
|
f4318:m1
|
def derive_and_set_name_fields_and_slug(<EOL>self, set_name_sort=True, set_slug=True<EOL>):
|
super(PersonCreator, self).derive_and_set_name_fields_and_slug(<EOL>set_name_sort=False, set_slug=False)<EOL>person_names = [<EOL>name for name in [self.name_family, self.name_given]<EOL>if not is_empty(name)<EOL>]<EOL>if set_name_sort and is_empty(self.name_sort):<EOL><INDENT>if person_names:<EOL><INDENT>self.name_sort = '<STR_LIT:U+002CU+0020>'.join(person_names)<EOL><DEDENT>else:<EOL><INDENT>self.name_sort = self.name_full<EOL><DEDENT><DEDENT>if set_slug and is_empty(self.slug):<EOL><INDENT>if person_names:<EOL><INDENT>self.slug = slugify('<STR_LIT:U+0020>'.join(person_names))<EOL><DEDENT>else:<EOL><INDENT>self.slug = slugify(self.name_full)<EOL><DEDENT><DEDENT>
|
Override this method from `CreatorBase` to handle additional name
fields for Person creators.
This method is called during `save()`
|
f4324:c0:m0
|
def lifespan_for_web(self, join="<STR_LIT>"):
|
birth = "<STR_LIT:U+002CU+0020>".join(filter(None, (<EOL>self.start_date_display,<EOL>self.start_place<EOL>)))<EOL>death = "<STR_LIT:U+002CU+0020>".join(filter(None, (<EOL>self.end_date_display,<EOL>self.end_place<EOL>)))<EOL>if death and not birth:<EOL><INDENT>if not death.startswith("<STR_LIT>"):<EOL><INDENT>return mark_safe("<STR_LIT>" + death)<EOL><DEDENT><DEDENT>return mark_safe(join.join(filter(None, (birth, death))))<EOL>
|
Returns lifespan formatted for the web, for example:
1850 - 1922
1850, Cologne - 1922, Berlin
1968 -
n.d. - 1540s
|
f4324:c0:m1
|
def creators_grouped_by_role(self):
|
role = -<NUM_LIT:1><EOL>creators = []<EOL>for wc in self:<EOL><INDENT>if wc.role != role:<EOL><INDENT>if creators:<EOL><INDENT>yield (role, creators)<EOL><DEDENT>role = wc.role<EOL>creators = []<EOL><DEDENT>creators.append(wc.creator)<EOL><DEDENT>if creators:<EOL><INDENT>yield (role, creators)<EOL><DEDENT>
|
:return: A generator yielding 2-tuples of (role, [creators]) where
adjacent creators who share the same role are grouped together.
|
f4371:c0:m0
|
def images_grouped_by_type(self):
|
type = -<NUM_LIT:1><EOL>images = []<EOL>for wc in self:<EOL><INDENT>if wc.type != type:<EOL><INDENT>if images:<EOL><INDENT>yield (type, images)<EOL><DEDENT>role = wc.role<EOL>creators = []<EOL>images.append(wc.image)<EOL><DEDENT><DEDENT>if images:<EOL><INDENT>yield (type, images)<EOL><DEDENT>
|
:return: A generator yielding 2-tuples of (type, [creators]) where
adjacent creators who share the same role are grouped together.
|
f4371:c1:m0
|
def derive_and_set_name_fields_and_slug(<EOL>self, set_name_sort=True, set_slug=True<EOL>):
|
<EOL>if is_empty(self.name_full):<EOL><INDENT>if not is_empty(self.name_display):<EOL><INDENT>self.name_full = self.name_display<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>u"<STR_LIT>" % type(self).__name__)<EOL><DEDENT><DEDENT>if set_name_sort and is_empty(self.name_sort):<EOL><INDENT>self.name_sort = self.derive_sort_name()<EOL><DEDENT>if set_slug and is_empty(self.slug):<EOL><INDENT>self.slug = slugify(self.name_display or self.name_full)<EOL><DEDENT>
|
Derive subordinate name_* field values from the `name_full` field
unless these fields are set in their own right.
This method is called during `save()`
|
f4375:c1:m3
|
def get_works(self):
|
qs = self.get_draft().works<EOL>return qs.filter(publishing_linked=None)<EOL>
|
:return: The works that should be presented as visible on the front
end. If self is draft, show visible related items. If self is
published, show published related items.
Normal behaviour is to return published works if possible
AND draft works if they haven't been published. Draft works are
to be shown without links.
|
f4375:c1:m5
|
def get_works_count(self):
|
return self.get_works().count()<EOL>
|
To be used in Admin listings
|
f4375:c1:m6
|
def get_roles(self):
|
work_ids = self.get_works().values_list('<STR_LIT:id>', flat=True)<EOL>return self.works.through.objects.filter(<EOL>creator=self.get_draft(),<EOL>work_id__in=work_ids,<EOL>).select_related('<STR_LIT>')<EOL>
|
Return the m2m relations connecting me to works
|
f4375:c1:m11
|
def get_primary_roles(self):
|
return self.get_roles().filter(is_primary=True)<EOL>
|
Return the m2m relations connecting me to works as primary creator
|
f4375:c1:m12
|
def derive_and_set_slug(self, set_name_sort=True, set_slug=True):
|
<EOL>if is_empty(self.title):<EOL><INDENT>raise ValueError(<EOL>u"<STR_LIT>" % type(self).__name__)<EOL><DEDENT>if set_slug and is_empty(self.slug):<EOL><INDENT>self.slug = slugify(self.title)<EOL><DEDENT>
|
Derive `slug` field from `title` unless it is set in its own right.
This method is called during `save()`
|
f4375:c3:m2
|
def get_creators(self):
|
qs = self.get_draft().creators<EOL>return qs.filter(publishing_linked=None)<EOL>
|
:return: The creaors that should be presented as visible on the front
end.
Normal behaviour is to return published creators if possible
AND draft creators if they haven't been published. Draft creators are
to be shown without links.
|
f4375:c3:m9
|
def get_roles(self):
|
creator_ids = self.get_creators().values_list('<STR_LIT:id>', flat=True)<EOL>return self.creators.through.objects.filter(<EOL>work=self.get_draft(),<EOL>creator_id__in=creator_ids,<EOL>).select_related('<STR_LIT>')<EOL>
|
Return the m2m relations connecting me to creators.
There's some publishing-related complexity here. The role relations
(self.creators.through) connect to draft objects, which then need to
be modified to point to visible() objects.
|
f4375:c3:m10
|
def get_primary_roles(self):
|
return self.get_roles().filter(is_primary=True)<EOL>
|
Return the m2m relations connecting me to creators as primary creator
|
f4375:c3:m11
|
def update_json_analysis(analysis, j):
|
def _analyze_list(l, parent="<STR_LIT>"):<EOL><INDENT>for v in l:<EOL><INDENT>if isinstance(v, (dict, CaseInsensitiveDict)):<EOL><INDENT>_analyze_json(v, parent=parent)<EOL><DEDENT>elif isinstance(v, list):<EOL><INDENT>_analyze_list(v, parent=parent+"<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>analysis[parent].add(v)<EOL><DEDENT><DEDENT><DEDENT>def _analyze_json(d, parent="<STR_LIT>"):<EOL><INDENT>for k, v in d.items():<EOL><INDENT>if parent:<EOL><INDENT>path = "<STR_LIT:.>".join([parent, k])<EOL><DEDENT>else:<EOL><INDENT>path = k<EOL><DEDENT>if isinstance(v, (dict, CaseInsensitiveDict)):<EOL><INDENT>_analyze_json(v, parent=path)<EOL><DEDENT>elif isinstance(v, list):<EOL><INDENT>_analyze_list(v, parent=path+"<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>analysis[path].add(v)<EOL><DEDENT><DEDENT><DEDENT>if isinstance(j, list):<EOL><INDENT>_analyze_list(j)<EOL><DEDENT>if isinstance(j, (dict, CaseInsensitiveDict)):<EOL><INDENT>_analyze_json(j)<EOL><DEDENT>
|
Step through the items in a piece of json, and update an analysis dict
with the values found.
|
f4385:m0
|
def wikipedia_slugify(value, do_unidecode=False):
|
if do_unidecode:<EOL><INDENT>value = unidecode(value)<EOL><DEDENT>value = value.strip()<EOL>return mark_safe(re.sub('<STR_LIT>', '<STR_LIT:_>', value))<EOL>
|
Converts to ASCII via unidecode.
Converts spaces to underscore.
Removes characters that
aren't alphanumerics, underscores, or hyphens.
Preserve case.
Also strips leading and trailing whitespace.
|
f4386:m0
|
def ensure_unique(qs, field_name, value, exclude_id=None):
|
orig = value<EOL>if not value:<EOL><INDENT>value = "<STR_LIT:None>"<EOL><DEDENT>for x in itertools.count(<NUM_LIT:1>):<EOL><INDENT>if not qs.exclude(id=exclude_id).filter(**{field_name: value}).exists():<EOL><INDENT>break<EOL><DEDENT>if orig:<EOL><INDENT>value = '<STR_LIT>' % (orig, x)<EOL><DEDENT>else:<EOL><INDENT>value = '<STR_LIT>' % x<EOL><DEDENT><DEDENT>return value<EOL>
|
Makes sure that `value` is unique on model.fieldname. And nonempty.
|
f4386:m2
|
def ndashify(s):
|
return re.sub(r'<STR_LIT>', '<STR_LIT>', str(s))<EOL>
|
replace ' - ' with an n-dash character
|
f4386:m4
|
def fix_line_breaks(s):
|
l = s.splitlines()<EOL>x = [i.strip() for i in l]<EOL>x = [i for i in x if i] <EOL>return "<STR_LIT:\n>".join(x)<EOL>
|
Convert \r\n and \r to \n chars. Strip any leading or trailing whitespace
on each line. Remove blank lines.
|
f4386:m5
|
def strip_line_breaks(s):
|
return re.sub(r'<STR_LIT>', '<STR_LIT:U+0020>', s).strip()<EOL>
|
Remove \r and \n chars, replacing with a space. Strip leading/trailing
whitespace on each line. Remove blank lines.
|
f4386:m6
|
def origin_data(self):
|
raise NotImplementedError<EOL>
|
:return: An iterable of the datasource that is being extracted
|
f4387:c0:m0
|
def items_to_extract(self, offset=<NUM_LIT:0>, length=None):
|
endoffset = length and offset + length<EOL>qs = self.origin_data()[offset:endoffset]<EOL>self.items_to_extract_length = qs.count()<EOL>return qs<EOL>
|
Return an iterable of specific items to extract.
As a side-effect, set self.items_to_extract_length.
:param offset: where to start extracting
:param length: how many to extract
:return: An iterable of the specific
|
f4387:c0:m1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.