_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q8400
|
IrcObject.from_config
|
train
|
def from_config(cls, cfg, **kwargs):
"""return an instance configured with the ``cfg`` dict"""
cfg = dict(cfg, **kwargs)
pythonpath = cfg.get('pythonpath', [])
if 'here' in cfg:
pythonpath.append(cfg['here'])
for path in pythonpath:
sys.path.append(os.path.expanduser(path))
prog = cls.server and 'irc3d' or 'irc3'
if cfg.get('debug'):
cls.venusian_categories.append(prog + '.debug')
if cfg.get('interactive'): # pragma: no cover
import irc3.testing
context = getattr(irc3.testing, cls.__name__)(**cfg)
else:
context = cls(**cfg)
if cfg.get('raw'):
context.include('irc3.plugins.log',
venusian_categories=[prog + '.debug'])
return context
|
python
|
{
"resource": ""
}
|
q8401
|
AsyncLibrary.async_run
|
train
|
def async_run(self, keyword, *args, **kwargs):
''' Executes the provided Robot Framework keyword in a separate thread and immediately returns a handle to be used with async_get '''
handle = self._last_thread_handle
thread = self._threaded(keyword, *args, **kwargs)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
|
python
|
{
"resource": ""
}
|
q8402
|
AsyncLibrary.async_get
|
train
|
def async_get(self, handle):
''' Blocks until the thread created by async_run returns '''
assert handle in self._thread_pool, 'Invalid async call handle'
result = self._thread_pool[handle].result_queue.get()
del self._thread_pool[handle]
return result
|
python
|
{
"resource": ""
}
|
q8403
|
AsyncLibrary._get_handler_from_keyword
|
train
|
def _get_handler_from_keyword(self, keyword):
''' Gets the Robot Framework handler associated with the given keyword '''
if EXECUTION_CONTEXTS.current is None:
raise RobotNotRunningError('Cannot access execution context')
return EXECUTION_CONTEXTS.current.get_handler(keyword)
|
python
|
{
"resource": ""
}
|
q8404
|
PMMail._set_attachments
|
train
|
def _set_attachments(self, value):
'''
A special set function to ensure
we're setting with a list
'''
if value is None:
setattr(self, '_PMMail__attachments', [])
elif isinstance(value, list):
setattr(self, '_PMMail__attachments', value)
else:
raise TypeError('Attachments must be a list')
|
python
|
{
"resource": ""
}
|
q8405
|
PMMail._check_values
|
train
|
def _check_values(self):
'''
Make sure all values are of the appropriate
type and are not missing.
'''
if not self.__api_key:
raise PMMailMissingValueException('Cannot send an e-mail without a Postmark API Key')
elif not self.__sender:
raise PMMailMissingValueException('Cannot send an e-mail without a sender (.sender field)')
elif not self.__to:
raise PMMailMissingValueException('Cannot send an e-mail without at least one recipient (.to field)')
elif (self.__template_id or self.__template_model) and not all([self.__template_id, self.__template_model]):
raise PMMailMissingValueException(
'Cannot send a template e-mail without a both template_id and template_model set')
elif not any([self.__template_id, self.__template_model, self.__subject]):
raise PMMailMissingValueException('Cannot send an e-mail without a subject')
elif not self.__html_body and not self.__text_body and not self.__template_id:
raise PMMailMissingValueException('Cannot send an e-mail without either an HTML or text version of your e-mail body')
if self.__track_opens and not self.__html_body:
print('WARNING: .track_opens set to True with no .html_body set. Tracking opens will not work; message will still send.')
|
python
|
{
"resource": ""
}
|
q8406
|
PMMail.send
|
train
|
def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err)
|
python
|
{
"resource": ""
}
|
q8407
|
PMBatchMail.remove_message
|
train
|
def remove_message(self, message):
'''
Remove a message from the batch
'''
if message in self.__messages:
self.__messages.remove(message)
|
python
|
{
"resource": ""
}
|
q8408
|
PMBounceManager.delivery_stats
|
train
|
def delivery_stats(self):
'''
Returns a summary of inactive emails and bounces by type.
'''
self._check_values()
req = Request(
__POSTMARK_URL__ + 'deliverystats',
None,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark:'
result = urlopen(req)
with closing(result):
if result.code == 200:
return json.loads(result.read())
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
return err
|
python
|
{
"resource": ""
}
|
q8409
|
PMBounceManager.get_all
|
train
|
def get_all(self, inactive='', email_filter='', tag='', count=25, offset=0):
'''
Fetches a portion of bounces according to the specified input criteria. The count and offset
parameters are mandatory. You should never retrieve all bounces as that could be excessively
slow for your application. To know how many bounces you have, you need to request a portion
first, usually the first page, and the service will return the count in the TotalCount property
of the response.
'''
self._check_values()
params = '?inactive=' + inactive + '&emailFilter=' + email_filter +'&tag=' + tag
params += '&count=' + str(count) + '&offset=' + str(offset)
req = Request(
__POSTMARK_URL__ + 'bounces' + params,
None,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark:'
result = urlopen(req)
with closing(result):
if result.code == 200:
return json.loads(result.read())
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
return err
|
python
|
{
"resource": ""
}
|
q8410
|
PMBounceManager.activate
|
train
|
def activate(self, bounce_id):
'''
Activates a deactivated bounce.
'''
self._check_values()
req_url = '/bounces/' + str(bounce_id) + '/activate'
# print req_url
h1 = HTTPConnection('api.postmarkapp.com')
dta = urlencode({"data": "blank"}).encode('utf8')
req = h1.request(
'PUT',
req_url,
dta,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
r = h1.getresponse()
return json.loads(r.read())
|
python
|
{
"resource": ""
}
|
q8411
|
EmailBackend._build_message
|
train
|
def _build_message(self, message):
"""A helper method to convert a PMEmailMessage to a PMMail"""
if not message.recipients():
return False
recipients = ','.join(message.to)
recipients_cc = ','.join(message.cc)
recipients_bcc = ','.join(message.bcc)
text_body = message.body
html_body = None
if isinstance(message, EmailMultiAlternatives):
for alt in message.alternatives:
if alt[1] == "text/html":
html_body = alt[0]
break
elif getattr(message, 'content_subtype', None) == 'html':
# Don't send html content as plain text
text_body = None
html_body = message.body
reply_to = ','.join(message.reply_to)
custom_headers = {}
if message.extra_headers and isinstance(message.extra_headers, dict):
if 'Reply-To' in message.extra_headers:
reply_to = message.extra_headers.pop('Reply-To')
if len(message.extra_headers):
custom_headers = message.extra_headers
attachments = []
if message.attachments and isinstance(message.attachments, list):
if len(message.attachments):
for item in message.attachments:
if isinstance(item, tuple):
(f, content, m) = item
content = base64.b64encode(content)
# b64decode returns bytes on Python 3. PMMail needs a
# str (for JSON serialization). Convert on Python 3
# only to avoid a useless performance hit on Python 2.
if not isinstance(content, str):
content = content.decode()
attachments.append((f, content, m))
else:
attachments.append(item)
postmark_message = PMMail(api_key=self.api_key,
subject=message.subject,
sender=message.from_email,
to=recipients,
cc=recipients_cc,
bcc=recipients_bcc,
text_body=text_body,
html_body=html_body,
reply_to=reply_to,
custom_headers=custom_headers,
attachments=attachments)
postmark_message.tag = getattr(message, 'tag', None)
postmark_message.track_opens = getattr(message, 'track_opens', False)
return postmark_message
|
python
|
{
"resource": ""
}
|
q8412
|
Nature.handle_starttag
|
train
|
def handle_starttag(self, tag, attrs):
'''
PDF link handler; never gets explicitly called by user
'''
if tag == 'a' and ( ('class', 'download-pdf') in attrs or ('id', 'download-pdf') in attrs ):
for attr in attrs:
if attr[0] == 'href':
self.download_link = 'http://www.nature.com' + attr[1]
|
python
|
{
"resource": ""
}
|
q8413
|
genpass
|
train
|
def genpass(pattern=r'[\w]{32}'):
"""generates a password with random chararcters
"""
try:
return rstr.xeger(pattern)
except re.error as e:
raise ValueError(str(e))
|
python
|
{
"resource": ""
}
|
q8414
|
CStruct.unpack
|
train
|
def unpack(self, string):
"""
Unpack the string containing packed C structure data
"""
if string is None:
string = CHAR_ZERO * self.__size__
data = struct.unpack(self.__fmt__, string)
i = 0
for field in self.__fields__:
(vtype, vlen) = self.__fields_types__[field]
if vtype == 'char': # string
setattr(self, field, data[i])
i = i + 1
elif isinstance(vtype, CStructMeta):
num = int(vlen / vtype.size)
if num == 1: # single struct
sub_struct = vtype()
sub_struct.unpack(EMPTY_BYTES_STRING.join(data[i:i+sub_struct.size]))
setattr(self, field, sub_struct)
i = i + sub_struct.size
else: # multiple struct
sub_structs = []
for j in range(0, num):
sub_struct = vtype()
sub_struct.unpack(EMPTY_BYTES_STRING.join(data[i:i+sub_struct.size]))
i = i + sub_struct.size
sub_structs.append(sub_struct)
setattr(self, field, sub_structs)
elif vlen == 1:
setattr(self, field, data[i])
i = i + vlen
else:
setattr(self, field, list(data[i:i+vlen]))
i = i + vlen
|
python
|
{
"resource": ""
}
|
q8415
|
CStruct.pack
|
train
|
def pack(self):
"""
Pack the structure data into a string
"""
data = []
for field in self.__fields__:
(vtype, vlen) = self.__fields_types__[field]
if vtype == 'char': # string
data.append(getattr(self, field))
elif isinstance(vtype, CStructMeta):
num = int(vlen / vtype.size)
if num == 1: # single struct
v = getattr(self, field, vtype())
v = v.pack()
if sys.version_info >= (3, 0):
v = ([bytes([x]) for x in v])
data.extend(v)
else: # multiple struct
values = getattr(self, field, [])
for j in range(0, num):
try:
v = values[j]
except:
v = vtype()
v = v.pack()
if sys.version_info >= (3, 0):
v = ([bytes([x]) for x in v])
data.extend(v)
elif vlen == 1:
data.append(getattr(self, field))
else:
v = getattr(self, field)
v = v[:vlen] + [0] * (vlen - len(v))
data.extend(v)
return struct.pack(self.__fmt__, *data)
|
python
|
{
"resource": ""
}
|
q8416
|
get_all
|
train
|
def get_all():
"""Get all subclasses of BaseImporter from module and return and generator
"""
_import_all_importer_files()
for module in (value for key, value in globals().items()
if key in __all__):
for klass_name, klass in inspect.getmembers(module, inspect.isclass):
if klass is not BaseImporter and issubclass(klass, BaseImporter):
yield klass
for klass in _get_importers_from_entry_points():
yield klass
|
python
|
{
"resource": ""
}
|
q8417
|
list_database
|
train
|
def list_database(db):
"""Print credential as a table"""
credentials = db.credentials()
if credentials:
table = Table(
db.config['headers'],
table_format=db.config['table_format'],
colors=db.config['colors'],
hidden=db.config['hidden'],
hidden_string=db.config['hidden_string'],
)
click.echo(table.render(credentials))
|
python
|
{
"resource": ""
}
|
q8418
|
check_config
|
train
|
def check_config(db, level):
"""Show current configuration for shell"""
if level == 'global':
configuration = config.read(config.HOMEDIR, '.passpierc')
elif level == 'local':
configuration = config.read(os.path.join(db.path))
elif level == 'current':
configuration = db.config
if configuration:
click.echo(yaml.safe_dump(configuration, default_flow_style=False))
|
python
|
{
"resource": ""
}
|
q8419
|
RequestHandler.process
|
train
|
def process(self, data=None):
"""Fetch incoming data from the Flask request object when no data is supplied
to the process method. By default, the RequestHandler expects the
incoming data to be sent as JSON.
"""
return super(RequestHandler, self).process(data=data or self.get_request_data())
|
python
|
{
"resource": ""
}
|
q8420
|
DBMixin.save
|
train
|
def save(self, obj):
"""Add ``obj`` to the SQLAlchemy session and commit the changes back to
the database.
:param obj: SQLAlchemy object being saved
:returns: The saved object
"""
session = self.get_db_session()
session.add(obj)
session.commit()
return obj
|
python
|
{
"resource": ""
}
|
q8421
|
DBObjectMixin.filter_by_id
|
train
|
def filter_by_id(self, query):
"""Apply the primary key filter to query to filter the results for a specific
instance by id.
The filter applied by the this method by default can be controlled using the
url_id_param
:param query: SQLAlchemy Query
:returns: A SQLAlchemy Query object
"""
if self.model is None:
raise ArrestedException('DBObjectMixin requires a model to be set.')
idfield = getattr(self.model, self.model_id_param, None)
if not idfield:
raise ArrestedException('DBObjectMixin could not find a valid Model.id.')
return query.filter(idfield == self.kwargs[self.url_id_param])
|
python
|
{
"resource": ""
}
|
q8422
|
DBObjectMixin.delete_object
|
train
|
def delete_object(self, obj):
"""Deletes an object from the session by calling session.delete and then commits
the changes to the database.
:param obj: The SQLAlchemy instance being deleted
:returns: None
"""
session = self.get_db_session()
session.delete(obj)
session.commit()
|
python
|
{
"resource": ""
}
|
q8423
|
ArrestedAPI.init_app
|
train
|
def init_app(self, app):
"""Initialise the ArrestedAPI object by storing a pointer to a Flask app object.
This method is typically used when initialisation is deferred.
:param app: Flask application object
Usage::
app = Flask(__name__)
ap1_v1 = ArrestedAPI()
api_v1.init_app(app)
"""
self.app = app
if self.deferred:
self.register_all(self.deferred)
|
python
|
{
"resource": ""
}
|
q8424
|
Endpoint.dispatch_request
|
train
|
def dispatch_request(self, *args, **kwargs):
"""Dispatch the incoming HTTP request to the appropriate handler.
"""
self.args = args
self.kwargs = kwargs
self.meth = request.method.lower()
self.resource = current_app.blueprints.get(request.blueprint, None)
if not any([self.meth in self.methods, self.meth.upper() in self.methods]):
return self.return_error(405)
self.process_before_request_hooks()
resp = super(Endpoint, self).dispatch_request(*args, **kwargs)
resp = self.make_response(resp)
resp = self.process_after_request_hooks(resp)
return resp
|
python
|
{
"resource": ""
}
|
q8425
|
Endpoint.return_error
|
train
|
def return_error(self, status, payload=None):
"""Error handler called by request handlers when an error occurs and the request
should be aborted.
Usage::
def handle_post_request(self, *args, **kwargs):
self.request_handler = self.get_request_handler()
try:
self.request_handler.process(self.get_data())
except SomeException as e:
self.return_error(400, payload=self.request_handler.errors)
return self.return_create_response()
"""
resp = None
if payload is not None:
payload = json.dumps(payload)
resp = self.make_response(payload, status=status)
if status in [405]:
abort(status)
else:
abort(status, response=resp)
|
python
|
{
"resource": ""
}
|
q8426
|
KimResponseHandler.handle
|
train
|
def handle(self, data, **kwargs):
"""Run serialization for the specified mapper_class.
Supports both .serialize and .many().serialize Kim interfaces.
:param data: Objects to be serialized.
:returns: Serialized data according to mapper configuration
"""
if self.many:
return self.mapper.many(raw=self.raw, **self.mapper_kwargs).serialize(
data, role=self.role
)
else:
return self.mapper(obj=data, raw=self.raw, **self.mapper_kwargs).serialize(
role=self.role
)
|
python
|
{
"resource": ""
}
|
q8427
|
KimRequestHandler.handle_error
|
train
|
def handle_error(self, exp):
"""Called if a Mapper returns MappingInvalid. Should handle the error
and return it in the appropriate format, can be overridden in order
to change the error format.
:param exp: MappingInvalid exception raised
"""
payload = {
"message": "Invalid or incomplete data provided.",
"errors": exp.errors
}
self.endpoint.return_error(self.error_status, payload=payload)
|
python
|
{
"resource": ""
}
|
q8428
|
KimRequestHandler.handle
|
train
|
def handle(self, data, **kwargs):
"""Run marshalling for the specified mapper_class.
Supports both .marshal and .many().marshal Kim interfaces. Handles errors raised
during marshalling and automatically returns a HTTP error response.
:param data: Data to be marshaled.
:returns: Marshaled object according to mapper configuration
:raises: :class:`werkzeug.exceptions.UnprocessableEntity`
"""
try:
if self.many:
return self.mapper.many(raw=self.raw, **self.mapper_kwargs).marshal(
data, role=self.role
)
else:
return self.mapper(
data=data,
obj=self.obj,
partial=self.partial,
**self.mapper_kwargs
).marshal(role=self.role)
except MappingInvalid as e:
self.handle_error(e)
|
python
|
{
"resource": ""
}
|
q8429
|
KimEndpoint.get_response_handler_params
|
train
|
def get_response_handler_params(self, **params):
"""Return a config object that will be used to configure the KimResponseHandler
:returns: a dictionary of config options
:rtype: dict
"""
params = super(KimEndpoint, self).get_response_handler_params(**params)
params['mapper_class'] = self.mapper_class
params['role'] = self.serialize_role
# After a successfull attempt to marshal an object has been made, a response
# is generated using the RepsonseHandler. Rather than taking the class level
# setting for many by default, pull it from the request handler params config to
# ensure Marshaling and Serializing are run the same way.
if self._is_marshal_request():
req_params = self.get_request_handler_params()
params['many'] = req_params.get('many', self.many)
else:
params['many'] = self.many
return params
|
python
|
{
"resource": ""
}
|
q8430
|
KimEndpoint.get_request_handler_params
|
train
|
def get_request_handler_params(self, **params):
"""Return a config object that will be used to configure the KimRequestHandler
:returns: a dictionary of config options
:rtype: dict
"""
params = super(KimEndpoint, self).get_request_handler_params(**params)
params['mapper_class'] = self.mapper_class
params['role'] = self.marshal_role
params['many'] = False
# when handling a PUT or PATCH request, self.obj will be set.. There might be a
# more robust way to handle this?
params['obj'] = getattr(self, 'obj', None)
params['partial'] = self.is_partial()
return params
|
python
|
{
"resource": ""
}
|
q8431
|
GetListMixin.list_response
|
train
|
def list_response(self, status=200):
"""Pull the processed data from the response_handler and return a response.
:param status: The HTTP status code returned with the response
.. seealso:
:meth:`Endpoint.make_response`
:meth:`Endpoint.handle_get_request`
"""
return self._response(self.response.get_response_data(), status=status)
|
python
|
{
"resource": ""
}
|
q8432
|
CreateMixin.create_response
|
train
|
def create_response(self, status=201):
"""Generate a Response object for a POST request. By default, the newly created
object will be passed to the specified ResponseHandler and will be serialized
as the response body.
"""
self.response = self.get_response_handler()
self.response.process(self.obj)
return self._response(self.response.get_response_data(), status=status)
|
python
|
{
"resource": ""
}
|
q8433
|
fsm.concatenate
|
train
|
def concatenate(*fsms):
'''
Concatenate arbitrarily many finite state machines together.
'''
alphabet = set().union(*[fsm.alphabet for fsm in fsms])
def connect_all(i, substate):
'''
Take a state in the numbered FSM and return a set containing it, plus
(if it's final) the first state from the next FSM, plus (if that's
final) the first state from the next but one FSM, plus...
'''
result = {(i, substate)}
while i < len(fsms) - 1 and substate in fsms[i].finals:
i += 1
substate = fsms[i].initial
result.add((i, substate))
return result
# Use a superset containing states from all FSMs at once.
# We start at the start of the first FSM. If this state is final in the
# first FSM, then we are also at the start of the second FSM. And so on.
initial = set()
if len(fsms) > 0:
initial.update(connect_all(0, fsms[0].initial))
initial = frozenset(initial)
def final(state):
'''If you're in a final state of the final FSM, it's final'''
for (i, substate) in state:
if i == len(fsms) - 1 and substate in fsms[i].finals:
return True
return False
def follow(current, symbol):
'''
Follow the collection of states through all FSMs at once, jumping to the
next FSM if we reach the end of the current one
TODO: improve all follow() implementations to allow for dead metastates?
'''
next = set()
for (i, substate) in current:
fsm = fsms[i]
if substate in fsm.map and symbol in fsm.map[substate]:
next.update(connect_all(i, fsm.map[substate][symbol]))
if len(next) == 0:
raise OblivionError
return frozenset(next)
return crawl(alphabet, initial, final, follow).reduce()
|
python
|
{
"resource": ""
}
|
q8434
|
fsm.times
|
train
|
def times(self, multiplier):
'''
Given an FSM and a multiplier, return the multiplied FSM.
'''
if multiplier < 0:
raise Exception("Can't multiply an FSM by " + repr(multiplier))
alphabet = self.alphabet
# metastate is a set of iterations+states
initial = {(self.initial, 0)}
def final(state):
'''If the initial state is final then multiplying doesn't alter that'''
for (substate, iteration) in state:
if substate == self.initial \
and (self.initial in self.finals or iteration == multiplier):
return True
return False
def follow(current, symbol):
next = []
for (substate, iteration) in current:
if iteration < multiplier \
and substate in self.map \
and symbol in self.map[substate]:
next.append((self.map[substate][symbol], iteration))
# final of self? merge with initial on next iteration
if self.map[substate][symbol] in self.finals:
next.append((self.initial, iteration + 1))
if len(next) == 0:
raise OblivionError
return frozenset(next)
return crawl(alphabet, initial, final, follow).reduce()
|
python
|
{
"resource": ""
}
|
q8435
|
fsm.everythingbut
|
train
|
def everythingbut(self):
'''
Return a finite state machine which will accept any string NOT
accepted by self, and will not accept any string accepted by self.
This is more complicated if there are missing transitions, because the
missing "dead" state must now be reified.
'''
alphabet = self.alphabet
initial = {0 : self.initial}
def follow(current, symbol):
next = {}
if 0 in current and current[0] in self.map and symbol in self.map[current[0]]:
next[0] = self.map[current[0]][symbol]
return next
# state is final unless the original was
def final(state):
return not (0 in state and state[0] in self.finals)
return crawl(alphabet, initial, final, follow).reduce()
|
python
|
{
"resource": ""
}
|
q8436
|
fsm.islive
|
train
|
def islive(self, state):
'''A state is "live" if a final state can be reached from it.'''
reachable = [state]
i = 0
while i < len(reachable):
current = reachable[i]
if current in self.finals:
return True
if current in self.map:
for symbol in self.map[current]:
next = self.map[current][symbol]
if next not in reachable:
reachable.append(next)
i += 1
return False
|
python
|
{
"resource": ""
}
|
q8437
|
fsm.cardinality
|
train
|
def cardinality(self):
'''
Consider the FSM as a set of strings and return the cardinality of that
set, or raise an OverflowError if there are infinitely many
'''
num_strings = {}
def get_num_strings(state):
# Many FSMs have at least one oblivion state
if self.islive(state):
if state in num_strings:
if num_strings[state] is None: # "computing..."
# Recursion! There are infinitely many strings recognised
raise OverflowError(state)
return num_strings[state]
num_strings[state] = None # i.e. "computing..."
n = 0
if state in self.finals:
n += 1
if state in self.map:
for symbol in self.map[state]:
n += get_num_strings(self.map[state][symbol])
num_strings[state] = n
else:
# Dead state
num_strings[state] = 0
return num_strings[state]
return get_num_strings(self.initial)
|
python
|
{
"resource": ""
}
|
q8438
|
call_fsm
|
train
|
def call_fsm(method):
'''
Take a method which acts on 0 or more regular expression objects... return a
new method which simply converts them all to FSMs, calls the FSM method
on them instead, then converts the result back to a regular expression.
We do this for several of the more annoying operations.
'''
fsm_method = getattr(fsm.fsm, method.__name__)
def new_method(*legos):
alphabet = set().union(*[lego.alphabet() for lego in legos])
return from_fsm(fsm_method(*[lego.to_fsm(alphabet) for lego in legos]))
return new_method
|
python
|
{
"resource": ""
}
|
q8439
|
from_fsm
|
train
|
def from_fsm(f):
'''
Turn the supplied finite state machine into a `lego` object. This is
accomplished using the Brzozowski algebraic method.
'''
# Make sure the supplied alphabet is kosher. It must contain only single-
# character strings or `fsm.anything_else`.
for symbol in f.alphabet:
if symbol == fsm.anything_else:
continue
if isinstance(symbol, str) and len(symbol) == 1:
continue
raise Exception("Symbol " + repr(symbol) + " cannot be used in a regular expression")
# We need a new state not already used
outside = object()
# The set of strings that would be accepted by this FSM if you started
# at state i is represented by the regex R_i.
# If state i has a sole transition "a" to state j, then we know R_i = a R_j.
# If state i is final, then the empty string is also accepted by this regex.
# And so on...
# From this we can build a set of simultaneous equations in len(f.states)
# variables. This system is easily solved for all variables, but we only
# need one: R_a, where a is the starting state.
# The first thing we need to do is organise the states into order of depth,
# so that when we perform our back-substitutions, we can start with the
# last (deepest) state and therefore finish with R_a.
states = [f.initial]
i = 0
while i < len(states):
current = states[i]
if current in f.map:
for symbol in sorted(f.map[current], key=fsm.key):
next = f.map[current][symbol]
if next not in states:
states.append(next)
i += 1
# Our system of equations is represented like so:
brz = {}
for a in f.states:
brz[a] = {}
for b in f.states | {outside}:
brz[a][b] = nothing
# Populate it with some initial data.
for a in f.map:
for symbol in f.map[a]:
b = f.map[a][symbol]
if symbol == fsm.anything_else:
brz[a][b] |= ~charclass(f.alphabet - {fsm.anything_else})
else:
brz[a][b] |= charclass({symbol})
if a in f.finals:
brz[a][outside] |= emptystring
# Now perform our back-substitution
for i in reversed(range(len(states))):
a = states[i]
# Before the equation for R_a can be substituted into the other
# equations, we need to resolve the self-transition (if any).
# e.g. R_a = 0 R_a | 1 R_b | 2 R_c
# becomes R_a = 0*1 R_b | 0*2 R_c
loop = brz[a][a] * star # i.e. "0*"
del brz[a][a]
for right in brz[a]:
brz[a][right] = loop + brz[a][right]
# Note: even if we're down to our final equation, the above step still
# needs to be performed before anything is returned.
# Now we can substitute this equation into all of the previous ones.
for j in range(i):
b = states[j]
# e.g. substituting R_a = 0*1 R_b | 0*2 R_c
# into R_b = 3 R_a | 4 R_c | 5 R_d
# yields R_b = 30*1 R_b | (30*2|4) R_c | 5 R_d
univ = brz[b][a] # i.e. "3"
del brz[b][a]
for right in brz[a]:
brz[b][right] |= univ + brz[a][right]
return brz[f.initial][outside].reduce()
|
python
|
{
"resource": ""
}
|
q8440
|
multiplier.common
|
train
|
def common(self, other):
'''
Find the shared part of two multipliers. This is the largest multiplier
which can be safely subtracted from both the originals. This may
return the "zero" multiplier.
'''
mandatory = min(self.mandatory, other.mandatory)
optional = min(self.optional, other.optional)
return multiplier(mandatory, mandatory + optional)
|
python
|
{
"resource": ""
}
|
q8441
|
conc.dock
|
train
|
def dock(self, other):
'''
Subtract another conc from this one.
This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF,
then logically ABCDEF - DEF = ABC.
'''
# e.g. self has mults at indices [0, 1, 2, 3, 4, 5, 6] len=7
# e.g. other has mults at indices [0, 1, 2] len=3
new = list(self.mults)
for i in reversed(range(len(other.mults))): # [2, 1, 0]
# e.g. i = 1, j = 7 - 3 + 1 = 5
j = len(self.mults) - len(other.mults) + i
new[j] = new[j].dock(other.mults[i])
if new[j].multiplier == zero:
# omit that mult entirely since it has been factored out
del new[j]
# If the subtraction is incomplete but there is more to
# other.mults, then we have a problem. For example, "ABC{2} - BC"
# subtracts the C successfully but leaves something behind,
# then tries to subtract the B too, which isn't possible
else:
if i != 0:
raise Exception("Can't subtract " + repr(other) + " from " + repr(self))
return conc(*new)
|
python
|
{
"resource": ""
}
|
q8442
|
pattern.dock
|
train
|
def dock(self, other):
'''
The opposite of concatenation. Remove a common suffix from the present
pattern; that is, from each of its constituent concs.
AYZ|BYZ|CYZ - YZ = A|B|C.
'''
return pattern(*[c.dock(other) for c in self.concs])
|
python
|
{
"resource": ""
}
|
q8443
|
delete_name
|
train
|
def delete_name(name):
''' This function don't use the plugin. '''
session = create_session()
try:
user = session.query(User).filter_by(name=name).first()
session.delete(user)
session.commit()
except SQLAlchemyError, e:
session.rollback()
raise bottle.HTTPError(500, "Database Error", e)
finally:
session.close()
|
python
|
{
"resource": ""
}
|
q8444
|
SQLAlchemyPlugin.setup
|
train
|
def setup(self, app):
''' Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available.'''
for other in app.plugins:
if not isinstance(other, SQLAlchemyPlugin):
continue
if other.keyword == self.keyword:
raise bottle.PluginError("Found another SQLAlchemy plugin with "\
"conflicting settings (non-unique keyword).")
elif other.name == self.name:
self.name += '_%s' % self.keyword
if self.create and not self.metadata:
raise bottle.PluginError('Define metadata value to create database.')
|
python
|
{
"resource": ""
}
|
q8445
|
GreenSocket.send_multipart
|
train
|
def send_multipart(self, *args, **kwargs):
"""wrap send_multipart to prevent state_changed on each partial send"""
self.__in_send_multipart = True
try:
msg = super(GreenSocket, self).send_multipart(*args, **kwargs)
finally:
self.__in_send_multipart = False
self.__state_changed()
return msg
|
python
|
{
"resource": ""
}
|
q8446
|
GreenSocket.recv_multipart
|
train
|
def recv_multipart(self, *args, **kwargs):
"""wrap recv_multipart to prevent state_changed on each partial recv"""
self.__in_recv_multipart = True
try:
msg = super(GreenSocket, self).recv_multipart(*args, **kwargs)
finally:
self.__in_recv_multipart = False
self.__state_changed()
return msg
|
python
|
{
"resource": ""
}
|
q8447
|
archive
|
train
|
def archive(source, archive, path_in_arc=None, remove_source=False,
compression=zipfile.ZIP_DEFLATED, compresslevel=-1):
"""Archives a MRIO database as zip file
This function is a wrapper around zipfile.write,
to ease the writing of an archive and removing the source data.
Note
----
In contrast to zipfile.write, this function raises an
error if the data (path + filename) are identical in the zip archive.
Background: the zip standard allows that files with the same name and path
are stored side by side in a zip file. This becomes an issue when unpacking
this files as they overwrite each other upon extraction.
Parameters
----------
source: str or pathlib.Path or list of these
Location of the mrio data (folder).
If not all data should be archived, pass a list of
all files which should be included in the archive (absolute path)
archive: str or pathlib.Path
Full path with filename for the archive.
path_in_arc: string, optional
Path within the archive zip file where data should be stored.
'path_in_arc' must be given without leading dot and slash.
Thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'mrio_v1' pass 'mrio_v1/'.
If None (default) data will be stored in the root of the archive.
remove_source: boolean, optional
If True, deletes the source file from the disk (all files
specified in 'source' or the specified directory, depending if a
list of files or directory was passed). If False, leaves the
original files on disk. Also removes all empty directories
in source including source.
compression: ZIP compression method, optional
This is passed to zipfile.write. By default it is set to ZIP_DEFLATED.
NB: This is different from the zipfile default (ZIP_STORED) which would
not give any compression. See
https://docs.python.org/3/library/zipfile.html#zipfile-objects for
further information. Depending on the value given here additional
modules might be necessary (e.g. zlib for ZIP_DEFLATED). Futher
information on this can also be found in the zipfile python docs.
compresslevel: int, optional
This is passed to zipfile.write and specifies the compression level.
Acceptable values depend on the method specified at the parameter
'compression'. By default, it is set to -1 which gives a compromise
between speed and size for the ZIP_DEFLATED compression (this is
internally interpreted as 6 as described here:
https://docs.python.org/3/library/zlib.html#zlib.compressobj )
NB: This is only used if python version >= 3.7
Raises
------
FileExistsError: In case a file to be archived already present in the
archive.
"""
archive = Path(archive)
if type(source) is not list:
source_root = str(source)
source_files = [f for f in Path(source).glob('**/*') if f.is_file()]
else:
source_root = os.path.commonpath([str(f) for f in source])
source_files = [Path(f) for f in source]
path_in_arc = '' if not path_in_arc else path_in_arc
arc_file_names = {
str(f): os.path.join(path_in_arc, str(f.relative_to(source_root)))
for f in source_files}
if archive.exists():
with zipfile.ZipFile(file=str(archive), mode='r') as zf:
already_present = zf.namelist()
duplicates = {ff: zf for ff, zf in arc_file_names.items()
if zf in already_present}
if duplicates:
raise FileExistsError(
'These files already exists in {arc} for '
'path_in_arc "{pa}":\n {filelist}'.format(
pa=path_in_arc, arc=archive,
filelist='\n '.join(duplicates.values())))
if sys.version_info.major == 3 and sys.version_info.minor >= 7:
zip_open_para = dict(file=str(archive), mode='a',
compression=compression,
compresslevel=compresslevel)
else:
zip_open_para = dict(file=str(archive), mode='a',
compression=compression)
with zipfile.ZipFile(**zip_open_para) as zz:
for fullpath, zippath in arc_file_names.items():
zz.write(str(fullpath), str(zippath))
if remove_source:
for f in source_files:
os.remove(str(f))
for root, dirs, files in os.walk(source_root, topdown=False):
for name in dirs:
dir_path = os.path.join(root, name)
if not os.listdir(dir_path):
os.rmdir(os.path.join(root, name))
try:
os.rmdir(source_root)
except OSError:
pass
|
python
|
{
"resource": ""
}
|
q8448
|
parse_exio12_ext
|
train
|
def parse_exio12_ext(ext_file, index_col, name, drop_compartment=True,
version=None, year=None, iosystem=None, sep=','):
""" Parse an EXIOBASE version 1 or 2 like extension file into pymrio.Extension
EXIOBASE like extensions files are assumed to have two
rows which are used as columns multiindex (region and sector)
and up to three columns for the row index (see Parameters).
For EXIOBASE 3 - extension can be loaded directly with pymrio.load
Notes
-----
So far this only parses factor of production extensions F (not
final demand extensions FY nor coeffiecents S).
Parameters
----------
ext_file : string or pathlib.Path
File to parse
index_col : int
The number of columns (1 to 3) at the beginning of the file
to use as the index. The order of the index_col must be
- 1 index column: ['stressor']
- 2 index columns: ['stressor', 'unit']
- 3 index columns: ['stressor', 'compartment', 'unit']
- > 3: everything up to three index columns will be removed
name : string
Name of the extension
drop_compartment : boolean, optional
If True (default) removes the compartment from the index.
version : string, optional
see pymrio.Extension
iosystem : string, optional
see pymrio.Extension
year : string or int
see pymrio.Extension
sep : string, optional
Delimiter to use; default ','
Returns
-------
pymrio.Extension
with F (and unit if available)
"""
ext_file = os.path.abspath(str(ext_file))
F = pd.read_table(
ext_file,
header=[0, 1],
index_col=list(range(index_col)),
sep=sep)
F.columns.names = ['region', 'sector']
if index_col == 1:
F.index.names = ['stressor']
elif index_col == 2:
F.index.names = ['stressor', 'unit']
elif index_col == 3:
F.index.names = ['stressor', 'compartment', 'unit']
else:
F.reset_index(level=list(range(3, index_col)),
drop=True,
inplace=True)
F.index.names = ['stressor', 'compartment', 'unit']
unit = None
if index_col > 1:
unit = pd.DataFrame(F.iloc[:, 0].
reset_index(level='unit').unit)
F.reset_index(level='unit', drop=True, inplace=True)
if drop_compartment:
F.reset_index(level='compartment',
drop=True, inplace=True)
unit.reset_index(level='compartment',
drop=True, inplace=True)
return Extension(name=name,
F=F,
unit=unit,
iosystem=iosystem,
version=version,
year=year,
)
|
python
|
{
"resource": ""
}
|
q8449
|
get_exiobase12_version
|
train
|
def get_exiobase12_version(filename):
""" Returns the EXIOBASE version for the given filename,
None if not found
"""
try:
ver_match = re.search(r'(\d+\w*(\.|\-|\_))*\d+\w*', filename)
version = ver_match.string[ver_match.start():ver_match.end()]
if re.search('\_\d\d\d\d', version[-5:]):
version = version[:-5]
except AttributeError:
version = None
return version
|
python
|
{
"resource": ""
}
|
q8450
|
parse_exiobase1
|
train
|
def parse_exiobase1(path):
""" Parse the exiobase1 raw data files.
This function works with
- pxp_ita_44_regions_coeff_txt
- ixi_fpa_44_regions_coeff_txt
- pxp_ita_44_regions_coeff_src_txt
- ixi_fpa_44_regions_coeff_src_txt
which can be found on www.exiobase.eu
The parser works with the compressed (zip) files as well as the unpacked
files.
Parameters
----------
path : pathlib.Path or string
Path of the exiobase 1 data
Returns
-------
pymrio.IOSystem with exio1 data
"""
path = os.path.abspath(os.path.normpath(str(path)))
exio_files = get_exiobase_files(path)
if len(exio_files) == 0:
raise ParserError("No EXIOBASE files found at {}".format(path))
system = _get_MRIO_system(path)
if not system:
logging.warning("Could not determine system (pxp or ixi)"
" set system parameter manually")
io = generic_exiobase12_parser(exio_files, system=system)
return io
|
python
|
{
"resource": ""
}
|
q8451
|
parse_exiobase3
|
train
|
def parse_exiobase3(path):
""" Parses the public EXIOBASE 3 system
This parser works with either the compressed zip
archive as downloaded or the extracted system.
Note
----
The exiobase 3 parser does so far not include
population and characterization data.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the EXIOBASE files
or the compressed archive.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 3 data
"""
io = load_all(path)
# need to rename the final demand satellite,
# wrong name in the standard distribution
try:
io.satellite.FY = io.satellite.F_hh.copy()
del io.satellite.F_hh
except AttributeError:
pass
# some ixi in the exiobase 3.4 official distribution
# have a country name mixup. Clean it here:
io.rename_regions(
{'AUS': 'AU',
'AUT': 'AT',
'BEL': 'BE',
'BGR': 'BG',
'BRA': 'BR',
'CAN': 'CA',
'CHE': 'CH',
'CHN': 'CN',
'CYP': 'CY',
'CZE': 'CZ',
'DEU': 'DE',
'DNK': 'DK',
'ESP': 'ES',
'EST': 'EE',
'FIN': 'FI',
'FRA': 'FR',
'GBR': 'GB',
'GRC': 'GR',
'HRV': 'HR',
'HUN': 'HU',
'IDN': 'ID',
'IND': 'IN',
'IRL': 'IE',
'ITA': 'IT',
'JPN': 'JP',
'KOR': 'KR',
'LTU': 'LT',
'LUX': 'LU',
'LVA': 'LV',
'MEX': 'MX',
'MLT': 'MT',
'NLD': 'NL',
'NOR': 'NO',
'POL': 'PL',
'PRT': 'PT',
'ROM': 'RO',
'RUS': 'RU',
'SVK': 'SK',
'SVN': 'SI',
'SWE': 'SE',
'TUR': 'TR',
'TWN': 'TW',
'USA': 'US',
'ZAF': 'ZA',
'WWA': 'WA',
'WWE': 'WE',
'WWF': 'WF',
'WWL': 'WL',
'WWM': 'WM'})
return io
|
python
|
{
"resource": ""
}
|
q8452
|
__get_WIOD_SEA_extension
|
train
|
def __get_WIOD_SEA_extension(root_path, year, data_sheet='DATA'):
""" Utility function to get the extension data from the SEA file in WIOD
This function is based on the structure in the WIOD_SEA_July14 file.
Missing values are set to zero.
The function works if the SEA file is either in path or in a subfolder
named 'SEA'.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the SEA data.
year : str or int
Year to return for the extension
sea_data_sheet : string, optional
Worksheet with the SEA data in the excel file
Returns
-------
SEA data as extension for the WIOD MRIO
"""
sea_ext = '.xlsx'
sea_start = 'WIOD_SEA'
_SEA_folder = os.path.join(root_path, 'SEA')
if not os.path.exists(_SEA_folder):
_SEA_folder = root_path
sea_folder_content = [ff for ff in os.listdir(_SEA_folder)
if os.path.splitext(ff)[-1] == sea_ext and
ff[:8] == sea_start]
if sea_folder_content:
# read data
sea_file = os.path.join(_SEA_folder, sorted(sea_folder_content)[0])
df_sea = pd.read_excel(sea_file,
sheet_name=data_sheet,
header=0,
index_col=[0, 1, 2, 3])
# fix years
ic_sea = df_sea.columns.tolist()
ic_sea = [yystr.lstrip('_') for yystr in ic_sea]
df_sea.columns = ic_sea
try:
ds_sea = df_sea[str(year)]
except KeyError:
warnings.warn(
'SEA extension does not include data for the '
'year {} - SEA-Extension not included'.format(year),
ParserWarning)
return None, None
# get useful data (employment)
mt_sea = ['EMP', 'EMPE', 'H_EMP', 'H_EMPE']
ds_use_sea = pd.concat(
[ds_sea.xs(key=vari, level='Variable', drop_level=False)
for vari in mt_sea])
ds_use_sea.drop(labels='TOT', level='Code', inplace=True)
ds_use_sea.reset_index('Description', drop=True, inplace=True)
# RoW not included in SEA but needed to get it consistent for
# all countries. Just add a dummy with 0 for all accounts.
if 'RoW' not in ds_use_sea.index.get_level_values('Country'):
ds_RoW = ds_use_sea.xs('USA',
level='Country', drop_level=False)
ds_RoW.ix[:] = 0
df_RoW = ds_RoW.reset_index()
df_RoW['Country'] = 'RoW'
ds_use_sea = pd.concat(
[ds_use_sea.reset_index(), df_RoW]).set_index(
['Country', 'Code', 'Variable'])
ds_use_sea.fillna(value=0, inplace=True)
df_use_sea = ds_use_sea.unstack(level=['Country', 'Code'])[str(year)]
df_use_sea.index.names = IDX_NAMES['VA_row_single']
df_use_sea.columns.names = IDX_NAMES['F_col']
df_use_sea = df_use_sea.astype('float')
df_unit = pd.DataFrame(
data=[ # this data must be in the same order as mt_sea
'thousand persons',
'thousand persons',
'mill hours',
'mill hours',
],
columns=['unit'],
index=df_use_sea.index)
return df_use_sea, df_unit
else:
warnings.warn(
'SEA extension raw data file not found - '
'SEA-Extension not included', ParserWarning)
return None, None
|
python
|
{
"resource": ""
}
|
q8453
|
MRIOMetaData._add_history
|
train
|
def _add_history(self, entry_type, entry):
""" Generic method to add entry as entry_type to the history """
meta_string = "{time} - {etype} - {entry}".format(
time=self._time(),
etype=entry_type.upper(),
entry=entry)
self._content['history'].insert(0, meta_string)
self.logger(meta_string)
|
python
|
{
"resource": ""
}
|
q8454
|
MRIOMetaData.change_meta
|
train
|
def change_meta(self, para, new_value, log=True):
""" Changes the meta data
This function does nothing if None is passed as new_value.
To set a certain value to None pass the str 'None'
Parameters
----------
para: str
Meta data entry to change
new_value: str
New value
log: boolean, optional
If True (default) records the meta data change
in the history
"""
if not new_value:
return
para = para.lower()
if para == 'history':
raise ValueError(
'History can only be extended - use method "note"')
old_value = self._content.get(para, None)
if new_value == old_value:
return
self._content[para] = new_value
if old_value and log:
self._add_history(entry_type="METADATA_CHANGE",
entry='Changed parameter "{para}" '
'from "{old}" to "{new}"'.format(
para=para,
old=old_value,
new=new_value))
|
python
|
{
"resource": ""
}
|
q8455
|
MRIOMetaData.save
|
train
|
def save(self, location=None):
""" Saves the current status of the metadata
This saves the metadata at the location of the previously loaded
metadata or at the file/path given in location.
Specify a location if the metadata should be stored in a different
location or was never stored before. Subsequent saves will use the
location set here.
Parameters
----------
location: str, optional
Path or file for saving the metadata.
This can be the full file path or just the storage folder.
In the latter case, the filename defined in
DEFAULT_FILE_NAMES['metadata'] (currently 'metadata.json') is
assumed.
"""
if location:
location = Path(location)
if os.path.splitext(str(location))[1] == '':
self._metadata_file = location / DEFAULT_FILE_NAMES['metadata']
else:
self._metadata_file = location
if self._metadata_file:
with self._metadata_file.open(mode='w') as mdf:
json.dump(self._content, mdf, indent=4)
else:
logging.error("No metadata file given for storing the file")
|
python
|
{
"resource": ""
}
|
q8456
|
calc_x
|
train
|
def calc_x(Z, Y):
""" Calculate the industry output x from the Z and Y matrix
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
Y : pandas.DataFrame or numpy.array
final demand with categories (1.order) for each country (2.order)
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of Z. If DataFrame index as Z
"""
x = np.reshape(np.sum(np.hstack((Z, Y)), 1), (-1, 1))
if type(Z) is pd.DataFrame:
x = pd.DataFrame(x, index=Z.index, columns=['indout'])
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x
|
python
|
{
"resource": ""
}
|
q8457
|
calc_x_from_L
|
train
|
def calc_x_from_L(L, y):
""" Calculate the industry output x from L and a y vector
Parameters
----------
L : pandas.DataFrame or numpy.array
Symmetric input output Leontief table
y : pandas.DataFrame or numpy.array
a column vector of the total final demand
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of L. If DataFrame index as L
"""
x = L.dot(y)
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x
|
python
|
{
"resource": ""
}
|
q8458
|
calc_L
|
train
|
def calc_L(A):
""" Calculate the Leontief L from A
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
Returns
-------
pandas.DataFrame or numpy.array
Leontief input output table L
The type is determined by the type of A.
If DataFrame index/columns as A
"""
I = np.eye(A.shape[0]) # noqa
if type(A) is pd.DataFrame:
return pd.DataFrame(np.linalg.inv(I-A),
index=A.index, columns=A.columns)
else:
return np.linalg.inv(I-A)
|
python
|
{
"resource": ""
}
|
q8459
|
recalc_M
|
train
|
def recalc_M(S, D_cba, Y, nr_sectors):
""" Calculate Multipliers based on footprints.
Parameters
----------
D_cba : pandas.DataFrame or numpy array
Footprint per sector and country
Y : pandas.DataFrame or numpy array
Final demand: aggregated across categories or just one category, one
column per country. This will be diagonalized per country block.
The diagonolized form must be invertable for this method to work.
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
pandas.DataFrame or numpy.array
Multipliers M
The type is determined by the type of D_cba.
If DataFrame index/columns as D_cba
"""
Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors)
Y_inv = np.linalg.inv(Y_diag)
M = D_cba.dot(Y_inv)
if type(D_cba) is pd.DataFrame:
M.columns = D_cba.columns
M.index = D_cba.index
return M
|
python
|
{
"resource": ""
}
|
q8460
|
calc_accounts
|
train
|
def calc_accounts(S, L, Y, nr_sectors):
""" Calculate sector specific cba and pba based accounts, imp and exp accounts
The total industry output x for the calculation
is recalculated from L and y
Parameters
----------
L : pandas.DataFrame
Leontief input output table L
S : pandas.DataFrame
Direct impact coefficients
Y : pandas.DataFrame
Final demand: aggregated across categories or just one category, one
column per country
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
Tuple
(D_cba, D_pba, D_imp, D_exp)
Format: D_row x L_col (=nr_countries*nr_sectors)
- D_cba Footprint per sector and country
- D_pba Total factur use per sector and country
- D_imp Total global factor use to satisfy total final demand in
the country per sector
- D_exp Total factor use in one country to satisfy final demand
in all other countries (per sector)
"""
# diagonalize each sector block per country
# this results in a disaggregated y with final demand per country per
# sector in one column
Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors)
x_diag = L.dot(Y_diag)
x_tot = x_diag.values.sum(1)
del Y_diag
D_cba = pd.DataFrame(S.values.dot(x_diag),
index=S.index,
columns=S.columns)
# D_pba = S.dot(np.diagflat(x_tot))
# faster broadcasted calculation:
D_pba = pd.DataFrame(S.values*x_tot.reshape((1, -1)),
index=S.index,
columns=S.columns)
# for the traded accounts set the domestic industry output to zero
dom_block = np.zeros((nr_sectors, nr_sectors))
x_trade = ioutil.set_block(x_diag.values, dom_block)
D_imp = pd.DataFrame(S.values.dot(x_trade),
index=S.index,
columns=S.columns)
x_exp = x_trade.sum(1)
# D_exp = S.dot(np.diagflat(x_exp))
# faster broadcasted version:
D_exp = pd.DataFrame(S.values * x_exp.reshape((1, -1)),
index=S.index,
columns=S.columns)
return (D_cba, D_pba, D_imp, D_exp)
|
python
|
{
"resource": ""
}
|
q8461
|
_get_url_datafiles
|
train
|
def _get_url_datafiles(url_db_view, url_db_content,
mrio_regex, access_cookie=None):
""" Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
"""
# Use post here - NB: get could be necessary for some other pages
# but currently works for wiod and eora
returnvalue = namedtuple('url_content',
['raw_text', 'data_urls'])
url_text = requests.post(url_db_view, cookies=access_cookie).text
data_urls = [url_db_content + ff
for ff in re.findall(mrio_regex, url_text)]
return returnvalue(raw_text=url_text, data_urls=data_urls)
|
python
|
{
"resource": ""
}
|
q8462
|
_download_urls
|
train
|
def _download_urls(url_list, storage_folder, overwrite_existing,
meta_handler, access_cookie=None):
""" Save url from url_list to storage_folder
Parameters
----------
url_list: list of str
Valid url to download
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download depends on the setting in 'overwrite_existing'.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
meta_handler: instance of MRIOMetaData
Returns
-------
The meta_handler is passed back
"""
for url in url_list:
filename = os.path.basename(url)
if not overwrite_existing and filename in os.listdir(storage_folder):
continue
storage_file = os.path.join(storage_folder, filename)
# Using requests here - tried with aiohttp but was actually slower
# Also don’t use shutil.copyfileobj - corrupts zips from Eora
req = requests.post(url, stream=True, cookies=access_cookie)
with open(storage_file, 'wb') as lf:
for chunk in req.iter_content(1024*5):
lf.write(chunk)
meta_handler._add_fileio('Downloaded {} to {}'.format(url, filename))
meta_handler.save()
return meta_handler
|
python
|
{
"resource": ""
}
|
q8463
|
download_wiod2013
|
train
|
def download_wiod2013(storage_folder, years=None, overwrite_existing=False,
satellite_urls=WIOD_CONFIG['satellite_urls']):
""" Downloads the 2013 wiod release
Note
----
Currently, pymrio only works with the 2013 release of the wiod tables. The
more recent 2016 release so far (October 2017) lacks the environmental and
social extensions.
Parameters
----------
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download of the specific file will be skipped.
years: list of int or str, optional
If years is given only downloads the specific years. This
only applies to the IO tables because extensions are stored
by country and not per year.
The years can be given in 2 or 4 digits.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
satellite_urls : list of str (urls), optional
Which satellite accounts to download. Default: satellite urls defined
in WIOD_CONFIG - list of all available urls Remove items from this list
to only download a subset of extensions
"""
try:
os.makedirs(storage_folder)
except FileExistsError:
pass
if type(years) is int or type(years) is str:
years = [years]
years = years if years else range(1995, 2012)
years = [str(yy).zfill(2)[-2:] for yy in years]
wiod_web_content = _get_url_datafiles(
url_db_view=WIOD_CONFIG['url_db_view'],
url_db_content=WIOD_CONFIG['url_db_content'],
mrio_regex='protected.*?wiot\d\d.*?xlsx')
restricted_wiod_io_urls = [url for url in wiod_web_content.data_urls if
re.search(r"(wiot)(\d\d)",
os.path.basename(url)).group(2)
in years]
meta = MRIOMetaData(location=storage_folder,
description='WIOD metadata file for pymrio',
name='WIOD',
system='ixi',
version='data13')
meta = _download_urls(url_list=restricted_wiod_io_urls + satellite_urls,
storage_folder=storage_folder,
overwrite_existing=overwrite_existing,
meta_handler=meta)
meta.save()
return meta
|
python
|
{
"resource": ""
}
|
q8464
|
get_timestamp
|
train
|
def get_timestamp(length):
"""Get a timestamp of `length` in string"""
s = '%.6f' % time.time()
whole, frac = map(int, s.split('.'))
res = '%d%d' % (whole, frac)
return res[:length]
|
python
|
{
"resource": ""
}
|
q8465
|
mkdir_p
|
train
|
def mkdir_p(path):
"""mkdir -p path"""
if PY3:
return os.makedirs(path, exist_ok=True)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
python
|
{
"resource": ""
}
|
q8466
|
monkey_patch
|
train
|
def monkey_patch():
"""
Monkey patches `zmq.Context` and `zmq.Socket`
If test_suite is True, the pyzmq test suite will be patched for
compatibility as well.
"""
ozmq = __import__('zmq')
ozmq.Socket = zmq.Socket
ozmq.Context = zmq.Context
ozmq.Poller = zmq.Poller
ioloop = __import__('zmq.eventloop.ioloop')
ioloop.Poller = zmq.Poller
|
python
|
{
"resource": ""
}
|
q8467
|
CoreSystem.reset_to_coefficients
|
train
|
def reset_to_coefficients(self):
""" Keeps only the coefficient.
This can be used to recalculate the IO tables for a new finald demand.
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
# Development note: The coefficient attributes are
# defined in self.__coefficients__
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__coefficients__]
return self
|
python
|
{
"resource": ""
}
|
q8468
|
CoreSystem.copy
|
train
|
def copy(self, new_name=None):
""" Returns a deep copy of the system
Parameters
-----------
new_name: str, optional
Set a new meta name parameter.
Default: <old_name>_copy
"""
_tmp = copy.deepcopy(self)
if not new_name:
new_name = self.name + '_copy'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
_tmp.meta.note('IOSystem copy {new} based on {old}'.format(
new=new_name, old=self.meta.name))
_tmp.meta.change_meta('name', new_name, log=False)
else:
_tmp.name = new_name
return _tmp
|
python
|
{
"resource": ""
}
|
q8469
|
CoreSystem.get_Y_categories
|
train
|
def get_Y_categories(self, entries=None):
""" Returns names of y cat. of the IOSystem as unique names in order
Parameters
----------
entries : List, optional
If given, retuns an list with None for all values not in entries.
Returns
-------
Index
List of categories, None if no attribute to determine
list is available
"""
possible_dataframes = ['Y', 'FY']
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
try:
ind = getattr(self, df).columns.get_level_values(
'category').unique()
except (AssertionError, KeyError):
ind = getattr(self, df).columns.get_level_values(
1).unique()
if entries:
if type(entries) is str:
entries = [entries]
ind = ind.tolist()
return [None if ee not in entries else ee for ee in ind]
else:
return ind
else:
logging.warn("No attributes available to get Y categories")
return None
|
python
|
{
"resource": ""
}
|
q8470
|
CoreSystem.get_index
|
train
|
def get_index(self, as_dict=False, grouping_pattern=None):
""" Returns the index of the DataFrames in the system
Parameters
----------
as_dict: boolean, optional
If True, returns a 1:1 key-value matching for further processing
prior to groupby functions. Otherwise (default) the index
is returned as pandas index.
grouping_pattern: dict, optional
Dictionary with keys being regex patterns matching index and
values the name for the grouping. If the index is a pandas
multiindex, the keys must be tuples of length levels in the
multiindex, with a valid regex expression at each position.
Otherwise, the keys need to be strings.
Only relevant if as_dict is True.
"""
possible_dataframes = ['A', 'L', 'Z', 'Y', 'F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
orig_idx = getattr(self, df).index
break
else:
logging.warn("No attributes available to get index")
return None
if as_dict:
dd = {k: k for k in orig_idx}
if grouping_pattern:
for pattern, new_group in grouping_pattern.items():
if type(pattern) is str:
dd.update({k: new_group for k, v in dd.items() if
re.match(pattern, k)})
else:
dd.update({k: new_group for k, v in dd.items() if
all([re.match(pat, k[nr])
for nr, pat in enumerate(pattern)])})
return dd
else:
return orig_idx
|
python
|
{
"resource": ""
}
|
q8471
|
CoreSystem.set_index
|
train
|
def set_index(self, index):
""" Sets the pd dataframe index of all dataframes in the system to index
"""
for df in self.get_DataFrame(data=True, with_population=False):
df.index = index
|
python
|
{
"resource": ""
}
|
q8472
|
CoreSystem.get_DataFrame
|
train
|
def get_DataFrame(self, data=False, with_unit=True, with_population=True):
""" Yields all panda.DataFrames or there names
Notes
-----
For IOSystem this does not include the DataFrames in the extensions.
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the DataFrames.
If False, returns a generator which
yields only the names of the DataFrames
with_unit: boolean, optional
If True, includes the 'unit' DataFrame
If False, does not include the 'unit' DataFrame.
The method than only yields the numerical data tables
with_population: boolean, optional
If True, includes the 'population' vector
If False, does not include the 'population' vector.
Returns
-------
DataFrames or string generator, depending on parameter data
"""
for key in self.__dict__:
if (key is 'unit') and not with_unit:
continue
if (key is 'population') and not with_population:
continue
if type(self.__dict__[key]) is pd.DataFrame:
if data:
yield getattr(self, key)
else:
yield key
|
python
|
{
"resource": ""
}
|
q8473
|
CoreSystem.save
|
train
|
def save(self, path, table_format='txt', sep='\t',
table_ext=None, float_format='%.12g'):
""" Saving the system to path
Parameters
----------
path : pathlib.Path or string
path for the saved data (will be created if necessary, data
within will be overwritten).
table_format : string
Format to save the DataFrames:
- 'pkl' : Binary pickle files,
alias: 'pickle', 'bin', 'binary'
- 'txt' : Text files (default), alias: 'text', 'csv'
table_ext : string, optional
File extension,
default depends on table_format(.pkl for pickle, .txt for text)
sep : string, optional
Field delimiter for the output file, only for txt files.
Default: tab ('\t')
float_format : string, optional
Format for saving the DataFrames,
default = '%.12g', only for txt files
"""
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
para_file_path = path / DEFAULT_FILE_NAMES['filepara']
file_para = dict()
file_para['files'] = dict()
if table_format in ['text', 'csv', 'txt']:
table_format = 'txt'
elif table_format in ['pickle', 'bin', 'binary', 'pkl']:
table_format = 'pkl'
else:
raise ValueError('Unknown table format "{}" - '
'must be "txt" or "pkl"'.format(table_format))
return None
if not table_ext:
if table_format == 'txt':
table_ext = '.txt'
if table_format == 'pkl':
table_ext = '.pkl'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
file_para['systemtype'] = GENERIC_NAMES['iosys']
elif str(type(self)) == "<class 'pymrio.core.mriosystem.Extension'>":
file_para['systemtype'] = GENERIC_NAMES['ext']
file_para['name'] = self.name
else:
logging.warn('Unknown system type {} - set to "undef"'.format(
str(type(self))))
file_para['systemtype'] = 'undef'
for df, df_name in zip(self.get_DataFrame(data=True),
self.get_DataFrame()):
if type(df.index) is pd.MultiIndex:
nr_index_col = len(df.index.levels)
else:
nr_index_col = 1
if type(df.columns) is pd.MultiIndex:
nr_header = len(df.columns.levels)
else:
nr_header = 1
save_file = df_name + table_ext
save_file_with_path = path / save_file
logging.info('Save file {}'.format(save_file_with_path))
if table_format == 'txt':
df.to_csv(save_file_with_path, sep=sep,
float_format=float_format)
else:
df.to_pickle(save_file_with_path)
file_para['files'][df_name] = dict()
file_para['files'][df_name]['name'] = save_file
file_para['files'][df_name]['nr_index_col'] = str(nr_index_col)
file_para['files'][df_name]['nr_header'] = str(nr_header)
with para_file_path.open(mode='w') as pf:
json.dump(file_para, pf, indent=4)
if file_para['systemtype'] == GENERIC_NAMES['iosys']:
if not self.meta:
self.meta = MRIOMetaData(name=self.name,
location=path)
self.meta._add_fileio("Saved {} to {}".format(self.name, path))
self.meta.save(location=path)
return self
|
python
|
{
"resource": ""
}
|
q8474
|
CoreSystem.rename_regions
|
train
|
def rename_regions(self, regions):
""" Sets new names for the regions
Parameters
----------
regions : list or dict
In case of dict: {'old_name' : 'new_name'} with a
entry for each old_name which should be renamed
In case of list: List of new names in order and complete
without repetition
"""
if type(regions) is list:
regions = {old: new for old, new in
zip(self.get_regions(), regions)}
for df in self.get_DataFrame(data=True):
df.rename(index=regions, columns=regions, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=regions, columns=regions, inplace=True)
except:
pass
self.meta._add_modify("Changed country names")
return self
|
python
|
{
"resource": ""
}
|
q8475
|
CoreSystem.rename_sectors
|
train
|
def rename_sectors(self, sectors):
""" Sets new names for the sectors
Parameters
----------
sectors : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition
"""
if type(sectors) is list:
sectors = {old: new for old, new in
zip(self.get_sectors(), sectors)}
for df in self.get_DataFrame(data=True):
df.rename(index=sectors, columns=sectors, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=sectors, columns=sectors, inplace=True)
except:
pass
self.meta._add_modify("Changed sector names")
return self
|
python
|
{
"resource": ""
}
|
q8476
|
CoreSystem.rename_Y_categories
|
train
|
def rename_Y_categories(self, Y_categories):
""" Sets new names for the Y_categories
Parameters
----------
Y_categories : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition
"""
if type(Y_categories) is list:
Y_categories = {old: new for old, new in
zip(self.get_Y_categories(), Y_categories)}
for df in self.get_DataFrame(data=True):
df.rename(index=Y_categories, columns=Y_categories, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=Y_categories,
columns=Y_categories,
inplace=True)
except:
pass
self.meta._add_modify("Changed Y category names")
return self
|
python
|
{
"resource": ""
}
|
q8477
|
Extension.get_rows
|
train
|
def get_rows(self):
""" Returns the name of the rows of the extension"""
possible_dataframes = ['F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
return getattr(self, df).index.get_values()
else:
logging.warn("No attributes available to get row names")
return None
|
python
|
{
"resource": ""
}
|
q8478
|
Extension.get_row_data
|
train
|
def get_row_data(self, row, name=None):
""" Returns a dict with all available data for a row in the extension
Parameters
----------
row : tuple, list, string
A valid index for the extension DataFrames
name : string, optional
If given, adds a key 'name' with the given value to the dict. In
that case the dict can be
used directly to build a new extension.
Returns
-------
dict object with the data (pandas DataFrame)for the specific rows
"""
retdict = {}
for rowname, data in zip(self.get_DataFrame(),
self.get_DataFrame(data=True)):
retdict[rowname] = pd.DataFrame(data.ix[row])
if name:
retdict['name'] = name
return retdict
|
python
|
{
"resource": ""
}
|
q8479
|
Extension.diag_stressor
|
train
|
def diag_stressor(self, stressor, name=None):
""" Diagonalize one row of the stressor matrix for a flow analysis.
This method takes one row of the F matrix and diagonalize
to the full region/sector format. Footprints calculation based
on this matrix show the flow of embodied stressors from the source
region/sector (row index) to the final consumer (column index).
Note
----
Since the type of analysis based on the disaggregated matrix is based
on flow, direct household emissions (FY) are not included.
Parameters
----------
stressor : str or int - valid index for one row of the F matrix
This must be a tuple for a multiindex, a string otherwise.
The stressor to diagonalize.
name : string (optional)
The new name for the extension,
if None (default): string based on the given stressor (row name)
Returns
-------
Extension
"""
if type(stressor) is int:
stressor = self.F.index[stressor]
if len(stressor) == 1:
stressor = stressor[0]
if not name:
if type(stressor) is str:
name = stressor
else:
name = '_'.join(stressor) + '_diag'
ext_diag = Extension(name)
ext_diag.F = pd.DataFrame(
index=self.F.columns,
columns=self.F.columns,
data=np.diag(self.F.loc[stressor, :])
)
try:
ext_diag.unit = pd.DataFrame(
index=ext_diag.F.index,
columns=self.unit.columns,
data=self.unit.loc[stressor].unit)
except AttributeError:
# If no unit in stressor, self.unit.columns break
ext_diag.unit = None
return ext_diag
|
python
|
{
"resource": ""
}
|
q8480
|
IOSystem.calc_system
|
train
|
def calc_system(self):
"""
Calculates the missing part of the core IOSystem
The method checks Z, x, A, L and calculates all which are None
"""
# Possible cases:
# 1) Z given, rest can be None and calculated
# 2) A and x given, rest can be calculated
# 3) A and Y , calc L (if not given) - calc x and the rest
# this catches case 3
if self.x is None and self.Z is None:
# in that case we need L or at least A to calculate it
if self.L is None:
self.L = calc_L(self.A)
logging.info('Leontief matrix L calculated')
self.x = calc_x_from_L(self.L, self.Y.sum(axis=1))
self.meta._add_modify('Industry Output x calculated')
# this chains of ifs catch cases 1 and 2
if self.Z is None:
self.Z = calc_Z(self.A, self.x)
self.meta._add_modify('Flow matrix Z calculated')
if self.x is None:
self.x = calc_x(self.Z, self.Y)
self.meta._add_modify('Industry output x calculated')
if self.A is None:
self.A = calc_A(self.Z, self.x)
self.meta._add_modify('Coefficient matrix A calculated')
if self.L is None:
self.L = calc_L(self.A)
self.meta._add_modify('Leontief matrix L calculated')
return self
|
python
|
{
"resource": ""
}
|
q8481
|
IOSystem.calc_extensions
|
train
|
def calc_extensions(self, extensions=None, Y_agg=None):
""" Calculates the extension and their accounts
For the calculation, y is aggregated across specified y categories
The method calls .calc_system of each extension (or these given in the
extensions parameter)
Parameters
----------
extensions : list of strings, optional
A list of key names of extensions which shall be calculated.
Default: all dictionaries of IOSystem are assumed to be extensions
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
"""
ext_list = list(self.get_extensions(data=False))
extensions = extensions or ext_list
if type(extensions) == str:
extensions = [extensions]
for ext_name in extensions:
self.meta._add_modify(
'Calculating accounts for extension {}'.format(ext_name))
ext = getattr(self, ext_name)
ext.calc_system(x=self.x,
Y=self.Y,
L=self.L,
Y_agg=Y_agg,
population=self.population
)
return self
|
python
|
{
"resource": ""
}
|
q8482
|
IOSystem.report_accounts
|
train
|
def report_accounts(self, path, per_region=True,
per_capita=False, pic_size=1000,
format='rst', **kwargs):
""" Generates a report to the given path for all extension
This method calls .report_accounts for all extensions
Notes
-----
This looks prettier with the seaborn module (import seaborn before
calling this method)
Parameters
----------
path : string
Root path for the report
per_region : boolean, optional
If true, reports the accounts per region
per_capita : boolean, optional
If true, reports the accounts per capita
If per_capita and per_region are False, nothing will be done
pic_size : int, optional
size for the figures in px, 1000 by default
format : string, optional
file format of the report:
'rst'(default), 'html', 'latex', ...
except for rst all depend on the module docutils (all writer_name
from docutils can be used as format)
ffname : string, optional
root file name (without extension, per_capita or per_region will be
attached) and folder names If None gets passed (default), self.name
with be modified to get a valid name for the operation system
without blanks
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
(through the self.plot_account method)
"""
for ext in self.get_extensions(data=True):
ext.report_accounts(path=path,
per_region=per_region,
per_capita=per_capita,
pic_size=pic_size,
format=format,
**kwargs)
|
python
|
{
"resource": ""
}
|
q8483
|
IOSystem.get_extensions
|
train
|
def get_extensions(self, data=False):
""" Yields the extensions or their names
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the extensions.
If False, returns a generator which yields the names of
the extensions (default)
Returns
-------
Generator for Extension or string
"""
ext_list = [key for key in
self.__dict__ if type(self.__dict__[key]) is Extension]
for key in ext_list:
if data:
yield getattr(self, key)
else:
yield key
|
python
|
{
"resource": ""
}
|
q8484
|
IOSystem.reset_all_to_flows
|
train
|
def reset_all_to_flows(self, force=False):
""" Resets the IOSystem and all extensions to absolute flows
This method calls reset_to_flows for the IOSystem and for
all Extensions in the system.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
self.reset_to_flows(force=force)
[ee.reset_to_flows(force=force)
for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to absolute flows")
return self
|
python
|
{
"resource": ""
}
|
q8485
|
IOSystem.reset_all_to_coefficients
|
train
|
def reset_all_to_coefficients(self):
""" Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
self.reset_to_coefficients()
[ee.reset_to_coefficients() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to coefficients")
return self
|
python
|
{
"resource": ""
}
|
q8486
|
IOSystem.save_all
|
train
|
def save_all(self, path, table_format='txt', sep='\t',
table_ext=None, float_format='%.12g'):
""" Saves the system and all extensions
Extensions are saved in separate folders (names based on extension)
Parameters are passed to the .save methods of the IOSystem and
Extensions. See parameters description there.
"""
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
self.save(path=path,
table_format=table_format,
sep=sep,
table_ext=table_ext,
float_format=float_format)
for ext, ext_name in zip(self.get_extensions(data=True),
self.get_extensions()):
ext_path = path / ext_name
ext.save(path=ext_path,
table_format=table_format,
sep=sep,
table_ext=table_ext,
float_format=float_format)
return self
|
python
|
{
"resource": ""
}
|
q8487
|
IOSystem.remove_extension
|
train
|
def remove_extension(self, ext=None):
""" Remove extension from IOSystem
For single Extensions the same can be achieved with del
IOSystem_name.Extension_name
Parameters
----------
ext : string or list, optional
The extension to remove, this can be given as the name of the
instance or of Extension.name (the latter will be checked if no
instance was found)
If ext is None (default) all Extensions will be removed
"""
if ext is None:
ext = list(self.get_extensions())
if type(ext) is str:
ext = [ext]
for ee in ext:
try:
del self.__dict__[ee]
except KeyError:
for exinstancename, exdata in zip(
self.get_extensions(data=False),
self.get_extensions(data=True)):
if exdata.name == ee:
del self.__dict__[exinstancename]
finally:
self.meta._add_modify("Removed extension {}".format(ee))
return self
|
python
|
{
"resource": ""
}
|
q8488
|
is_vector
|
train
|
def is_vector(inp):
""" Returns true if the input can be interpreted as a 'true' vector
Note
----
Does only check dimensions, not if type is numeric
Parameters
----------
inp : numpy.ndarray or something that can be converted into ndarray
Returns
-------
Boolean
True for vectors: ndim = 1 or ndim = 2 and shape of one axis = 1
False for all other arrays
"""
inp = np.asarray(inp)
nr_dim = np.ndim(inp)
if nr_dim == 1:
return True
elif (nr_dim == 2) and (1 in inp.shape):
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q8489
|
get_file_para
|
train
|
def get_file_para(path, path_in_arc=''):
""" Generic method to read the file parameter file
Helper function to consistently read the file parameter file, which can
either be uncompressed or included in a zip archive. By default, the file
name is to be expected as set in DEFAULT_FILE_NAMES['filepara'] (currently
file_parameters.json), but can defined otherwise by including the file
name of the parameter file in the parameter path.
Parameters
----------
path: pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' needs to be specific to
further indicate the location of the data in the compressed file.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass ''
(default), for data in e.g. the folder 'emissions' pass 'emissions/'.
Only used if parameter 'path' points to an compressed zip file.
Returns
-------
Returns a namedtuple with
.folder: str with the absolute path containing the
file parameter file. In case of a zip the path
is relative to the root in the zip
.name: Filename without folder of the used parameter file.
.content: Dictionary with the content oft the file parameter file
Raises
------
FileNotFoundError if parameter file not found
"""
if type(path) is str:
path = Path(path.rstrip('\\'))
if zipfile.is_zipfile(str(path)):
para_file_folder = str(path_in_arc)
with zipfile.ZipFile(file=str(path)) as zf:
files = zf.namelist()
else:
para_file_folder = str(path)
files = [str(f) for f in path.glob('**/*')]
if para_file_folder not in files:
para_file_full_path = os.path.join(
para_file_folder, DEFAULT_FILE_NAMES['filepara'])
else:
para_file_full_path = para_file_folder
para_file_folder = os.path.dirname(para_file_full_path)
if para_file_full_path not in files:
raise FileNotFoundError(
'File parameter file {} not found'.format(
para_file_full_path))
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path)) as zf:
para_file_content = json.loads(
zf.read(para_file_full_path).decode('utf-8'))
else:
with open(para_file_full_path, 'r') as pf:
para_file_content = json.load(pf)
return namedtuple('file_parameter',
['folder', 'name', 'content'])(
para_file_folder,
os.path.basename(para_file_full_path),
para_file_content)
|
python
|
{
"resource": ""
}
|
q8490
|
build_agg_matrix
|
train
|
def build_agg_matrix(agg_vector, pos_dict=None):
""" Agg. matrix based on mapping given in input as numerical or str vector.
The aggregation matrix has the from nxm with
-n new classificaction
-m old classification
Parameters
----------
agg_vector : list or vector like numpy ndarray
This can be row or column vector.
Length m with position given for n and -1 if values
should not be included
or
length m with id_string for the aggregation
pos_dict : dictionary
(only possible if agg_vector is given as string)
output order for the new matrix
must be given as dict with
'string in agg_vector' = pos
(as int, -1 if value should not be included in the aggregation)
Example 1:
input vector: np.array([0, 1, 1, 2]) or ['a', 'b', 'b', 'c']
agg matrix:
m0 m1 m2 m3
n0 1 0 0 0
n1 0 1 1 0
n2 0 0 0 1
Example 2:
input vector: np.array([1, 0, 0, 2]) or
(['b', 'a', 'a', 'c'], dict(a=0,b=1,c=2))
agg matrix:
m0 m1 m2 m3
n0 0 1 1 0
n1 1 0 0 0
n2 0 0 0 1
"""
if isinstance(agg_vector, np.ndarray):
agg_vector = agg_vector.flatten().tolist()
if type(agg_vector[0]) == str:
str_vector = agg_vector
agg_vector = np.zeros(len(str_vector))
if pos_dict:
if len(pos_dict.keys()) != len(set(str_vector)):
raise ValueError(
'Posistion elements inconsistent with aggregation vector')
seen = pos_dict
else:
seen = {}
counter = 0
for ind, item in enumerate(str_vector):
if item not in seen:
seen[item] = counter
counter += 1
agg_vector[ind] = seen[item]
agg_vector = np.array(agg_vector, dtype=int)
agg_vector = agg_vector.reshape((1, -1))
row_corr = agg_vector
col_corr = np.arange(agg_vector.size)
agg_matrix = np.zeros((row_corr.max()+1, col_corr.max()+1))
agg_matrix[row_corr, col_corr] = 1
# set columns with -1 value to 0
agg_matrix[np.tile(agg_vector == -1, (np.shape(agg_matrix)[0], 1))] = 0
return agg_matrix
|
python
|
{
"resource": ""
}
|
q8491
|
diagonalize_blocks
|
train
|
def diagonalize_blocks(arr, blocksize):
""" Diagonalize sections of columns of an array for the whole array
Parameters
----------
arr : numpy array
Input array
blocksize : int
number of rows/colums forming one block
Returns
-------
numpy ndarray with shape (columns 'arr' * blocksize,
columns 'arr' * blocksize)
Example
--------
arr: output: (blocksize = 3)
3 1 3 0 0 1 0 0
4 2 0 4 0 0 2 0
5 3 0 0 5 0 0 3
6 9 6 0 0 9 0 0
7 6 0 7 0 0 6 0
8 4 0 0 8 0 0 4
"""
nr_col = arr.shape[1]
nr_row = arr.shape[0]
if np.mod(nr_row, blocksize):
raise ValueError(
'Number of rows of input array must be a multiple of blocksize')
arr_diag = np.zeros((nr_row, blocksize*nr_col))
for col_ind, col_val in enumerate(arr.T):
col_start = col_ind*blocksize
col_end = blocksize + col_ind*blocksize
for _ind in range(int(nr_row/blocksize)):
row_start = _ind*blocksize
row_end = blocksize + _ind * blocksize
arr_diag[row_start:row_end,
col_start:col_end] = np.diag(col_val[row_start:row_end])
return arr_diag
|
python
|
{
"resource": ""
}
|
q8492
|
set_block
|
train
|
def set_block(arr, arr_block):
""" Sets the diagonal blocks of an array to an given array
Parameters
----------
arr : numpy ndarray
the original array
block_arr : numpy ndarray
the block array for the new diagonal
Returns
-------
numpy ndarray (the modified array)
"""
nr_col = arr.shape[1]
nr_row = arr.shape[0]
nr_col_block = arr_block.shape[1]
nr_row_block = arr_block.shape[0]
if np.mod(nr_row, nr_row_block) or np.mod(nr_col, nr_col_block):
raise ValueError('Number of rows/columns of the input array '
'must be a multiple of block shape')
if nr_row/nr_row_block != nr_col/nr_col_block:
raise ValueError('Block array can not be filled as '
'diagonal blocks in the given array')
arr_out = arr.copy()
for row_ind in range(int(nr_row/nr_row_block)):
row_start = row_ind*nr_row_block
row_end = nr_row_block+nr_row_block*row_ind
col_start = row_ind*nr_col_block
col_end = nr_col_block+nr_col_block*row_ind
arr_out[row_start:row_end, col_start:col_end] = arr_block
return arr_out
|
python
|
{
"resource": ""
}
|
q8493
|
unique_element
|
train
|
def unique_element(ll):
""" returns unique elements from a list preserving the original order """
seen = {}
result = []
for item in ll:
if item in seen:
continue
seen[item] = 1
result.append(item)
return result
|
python
|
{
"resource": ""
}
|
q8494
|
build_agg_vec
|
train
|
def build_agg_vec(agg_vec, **source):
""" Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
"""
# build a dict with aggregation vectors in source and folder
if type(agg_vec) is str:
agg_vec = [agg_vec]
agg_dict = dict()
for entry in agg_vec:
try:
agg_dict[entry] = source[entry]
except KeyError:
folder = source.get('path', './')
folder = os.path.join(PYMRIO_PATH[folder], 'concordance')
for file in os.listdir(folder):
if entry == os.path.splitext(file)[0]:
_tmp = np.genfromtxt(os.path.join(folder, file), dtype=str)
if _tmp.ndim == 1:
agg_dict[entry] = [None if ee == 'None'
else ee for ee in _tmp.tolist()]
else:
agg_dict[entry] = [None if ee == 'None'
else ee
for ee in _tmp[:, -1].tolist()]
break
else:
logging.error(
'Aggregation vector -- {} -- not found'
.format(str(entry)))
# build the summary aggregation vector
def _rep(ll, ii, vv): ll[ii] = vv
miss_val = source.get('miss', 'REST')
vec_list = [agg_dict[ee] for ee in agg_vec]
out = [None, ] * len(vec_list[0])
for currvec in vec_list:
if len(currvec) != len(out):
logging.warn('Inconsistent vector length')
[_rep(out, ind, val) for ind, val in
enumerate(currvec) if not out[ind]]
[_rep(out, ind, miss_val) for ind, val in enumerate(out) if not val]
return out
|
python
|
{
"resource": ""
}
|
q8495
|
find_first_number
|
train
|
def find_first_number(ll):
""" Returns nr of first entry parseable to float in ll, None otherwise"""
for nr, entry in enumerate(ll):
try:
float(entry)
except (ValueError, TypeError) as e:
pass
else:
return nr
return None
|
python
|
{
"resource": ""
}
|
q8496
|
sniff_csv_format
|
train
|
def sniff_csv_format(csv_file,
potential_sep=['\t', ',', ';', '|', '-', '_'],
max_test_lines=10,
zip_file=None):
""" Tries to get the separator, nr of index cols and header rows in a csv file
Parameters
----------
csv_file: str
Path to a csv file
potential_sep: list, optional
List of potential separators (delimiters) to test.
Default: '\t', ',', ';', '|', '-', '_'
max_test_lines: int, optional
How many lines to test, default: 10 or available lines in csv_file
zip_file: str, optional
Path to a zip file containing the csv file (if any, default: None).
If a zip file is given, the path given at 'csv_file' is assumed
to be the path to the file within the zip_file.
Returns
-------
dict with
sep: string (separator)
nr_index_col: int
nr_header_row: int
Entries are set to None if inconsistent information in the file
"""
def read_first_lines(filehandle):
lines = []
for i in range(max_test_lines):
line = ff.readline()
if line == '':
break
try:
line = line.decode('utf-8')
except AttributeError:
pass
lines.append(line[:-1])
return lines
if zip_file:
with zipfile.ZipFile(zip_file, 'r') as zz:
with zz.open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
else:
with open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
sep_aly_lines = [sorted([(line.count(sep), sep)
for sep in potential_sep if line.count(sep) > 0],
key=lambda x: x[0], reverse=True) for line in test_lines]
for nr, (count, sep) in enumerate(sep_aly_lines[0]):
for line in sep_aly_lines:
if line[nr][0] == count:
break
else:
sep = None
if sep:
break
nr_header_row = None
nr_index_col = None
if sep:
nr_index_col = find_first_number(test_lines[-1].split(sep))
if nr_index_col:
for nr_header_row, line in enumerate(test_lines):
if find_first_number(line.split(sep)) == nr_index_col:
break
return dict(sep=sep,
nr_header_row=nr_header_row,
nr_index_col=nr_index_col)
|
python
|
{
"resource": ""
}
|
q8497
|
GreenPoller._get_descriptors
|
train
|
def _get_descriptors(self):
"""Returns three elements tuple with socket descriptors ready
for gevent.select.select
"""
rlist = []
wlist = []
xlist = []
for socket, flags in self.sockets.items():
if isinstance(socket, zmq.Socket):
rlist.append(socket.getsockopt(zmq.FD))
continue
elif isinstance(socket, int):
fd = socket
elif hasattr(socket, 'fileno'):
try:
fd = int(socket.fileno())
except:
raise ValueError('fileno() must return an valid integer fd')
else:
raise TypeError('Socket must be a 0MQ socket, an integer fd '
'or have a fileno() method: %r' % socket)
if flags & zmq.POLLIN:
rlist.append(fd)
if flags & zmq.POLLOUT:
wlist.append(fd)
if flags & zmq.POLLERR:
xlist.append(fd)
return (rlist, wlist, xlist)
|
python
|
{
"resource": ""
}
|
q8498
|
GreenPoller.poll
|
train
|
def poll(self, timeout=-1):
"""Overridden method to ensure that the green version of
Poller is used.
Behaves the same as :meth:`zmq.core.Poller.poll`
"""
if timeout is None:
timeout = -1
if timeout < 0:
timeout = -1
rlist = None
wlist = None
xlist = None
if timeout > 0:
tout = gevent.Timeout.start_new(timeout/1000.0)
try:
# Loop until timeout or events available
rlist, wlist, xlist = self._get_descriptors()
while True:
events = super(GreenPoller, self).poll(0)
if events or timeout == 0:
return events
# wait for activity on sockets in a green way
select.select(rlist, wlist, xlist)
except gevent.Timeout, t:
if t is not tout:
raise
return []
finally:
if timeout > 0:
tout.cancel()
|
python
|
{
"resource": ""
}
|
q8499
|
_instantiate_task
|
train
|
def _instantiate_task(api, kwargs):
"""Create a Task object from raw kwargs"""
file_id = kwargs['file_id']
kwargs['file_id'] = file_id if str(file_id).strip() else None
kwargs['cid'] = kwargs['file_id'] or None
kwargs['rate_download'] = kwargs['rateDownload']
kwargs['percent_done'] = kwargs['percentDone']
kwargs['add_time'] = get_utcdatetime(kwargs['add_time'])
kwargs['last_update'] = get_utcdatetime(kwargs['last_update'])
is_transferred = (kwargs['status'] == 2 and kwargs['move'] == 1)
if is_transferred:
kwargs['pid'] = api.downloads_directory.cid
else:
kwargs['pid'] = None
del kwargs['rateDownload']
del kwargs['percentDone']
if 'url' in kwargs:
if not kwargs['url']:
kwargs['url'] = None
else:
kwargs['url'] = None
task = Task(api, **kwargs)
if is_transferred:
task._parent = api.downloads_directory
return task
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.