code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
'''
Given an FSM and a multiplier, return the multiplied FSM.
'''
if multiplier < 0:
raise Exception("Can't multiply an FSM by " + repr(multiplier))
alphabet = self.alphabet
# metastate is a set of iterations+states
initial = {(self.initial, 0)}
def final(state):
'''If the initial state is final then multiplying doesn't alter that'''
for (substate, iteration) in state:
if substate == self.initial \
and (self.initial in self.finals or iteration == multiplier):
return True
return False
def follow(current, symbol):
next = []
for (substate, iteration) in current:
if iteration < multiplier \
and substate in self.map \
and symbol in self.map[substate]:
next.append((self.map[substate][symbol], iteration))
# final of self? merge with initial on next iteration
if self.map[substate][symbol] in self.finals:
next.append((self.initial, iteration + 1))
if len(next) == 0:
raise OblivionError
return frozenset(next)
return crawl(alphabet, initial, final, follow).reduce()
|
def times(self, multiplier)
|
Given an FSM and a multiplier, return the multiplied FSM.
| 4.809729
| 4.496314
| 1.069705
|
'''
Return a finite state machine which will accept any string NOT
accepted by self, and will not accept any string accepted by self.
This is more complicated if there are missing transitions, because the
missing "dead" state must now be reified.
'''
alphabet = self.alphabet
initial = {0 : self.initial}
def follow(current, symbol):
next = {}
if 0 in current and current[0] in self.map and symbol in self.map[current[0]]:
next[0] = self.map[current[0]][symbol]
return next
# state is final unless the original was
def final(state):
return not (0 in state and state[0] in self.finals)
return crawl(alphabet, initial, final, follow).reduce()
|
def everythingbut(self)
|
Return a finite state machine which will accept any string NOT
accepted by self, and will not accept any string accepted by self.
This is more complicated if there are missing transitions, because the
missing "dead" state must now be reified.
| 6.280563
| 2.949206
| 2.129577
|
'''
Return a new FSM such that for every string that self accepts (e.g.
"beer", the new FSM accepts the reversed string ("reeb").
'''
alphabet = self.alphabet
# Start from a composite "state-set" consisting of all final states.
# If there are no final states, this set is empty and we'll find that
# no other states get generated.
initial = frozenset(self.finals)
# Find every possible way to reach the current state-set
# using this symbol.
def follow(current, symbol):
next = frozenset([
prev
for prev in self.map
for state in current
if symbol in self.map[prev] and self.map[prev][symbol] == state
])
if len(next) == 0:
raise OblivionError
return next
# A state-set is final if the initial state is in it.
def final(state):
return self.initial in state
# Man, crawl() is the best!
return crawl(alphabet, initial, final, follow)
|
def reversed(self)
|
Return a new FSM such that for every string that self accepts (e.g.
"beer", the new FSM accepts the reversed string ("reeb").
| 7.096805
| 4.868345
| 1.457745
|
'''A state is "live" if a final state can be reached from it.'''
reachable = [state]
i = 0
while i < len(reachable):
current = reachable[i]
if current in self.finals:
return True
if current in self.map:
for symbol in self.map[current]:
next = self.map[current][symbol]
if next not in reachable:
reachable.append(next)
i += 1
return False
|
def islive(self, state)
|
A state is "live" if a final state can be reached from it.
| 2.498192
| 2.057833
| 1.213992
|
'''
Generate strings (lists of symbols) that this FSM accepts. Since there may
be infinitely many of these we use a generator instead of constructing a
static list. Strings will be sorted in order of length and then lexically.
This procedure uses arbitrary amounts of memory but is very fast. There
may be more efficient ways to do this, that I haven't investigated yet.
You can use this in list comprehensions.
'''
# Many FSMs have "dead states". Once you reach a dead state, you can no
# longer reach a final state. Since many strings may end up here, it's
# advantageous to constrain our search to live states only.
livestates = set(state for state in self.states if self.islive(state))
# We store a list of tuples. Each tuple consists of an input string and the
# state that this input string leads to. This means we don't have to run the
# state machine from the very beginning every time we want to check a new
# string.
strings = []
# Initial entry (or possibly not, in which case this is a short one)
cstate = self.initial
cstring = []
if cstate in livestates:
if cstate in self.finals:
yield cstring
strings.append((cstring, cstate))
# Fixed point calculation
i = 0
while i < len(strings):
(cstring, cstate) = strings[i]
if cstate in self.map:
for symbol in sorted(self.map[cstate], key=key):
nstate = self.map[cstate][symbol]
nstring = cstring + [symbol]
if nstate in livestates:
if nstate in self.finals:
yield nstring
strings.append((nstring, nstate))
i += 1
|
def strings(self)
|
Generate strings (lists of symbols) that this FSM accepts. Since there may
be infinitely many of these we use a generator instead of constructing a
static list. Strings will be sorted in order of length and then lexically.
This procedure uses arbitrary amounts of memory but is very fast. There
may be more efficient ways to do this, that I haven't investigated yet.
You can use this in list comprehensions.
| 4.910528
| 2.918118
| 1.682772
|
'''
Consider the FSM as a set of strings and return the cardinality of that
set, or raise an OverflowError if there are infinitely many
'''
num_strings = {}
def get_num_strings(state):
# Many FSMs have at least one oblivion state
if self.islive(state):
if state in num_strings:
if num_strings[state] is None: # "computing..."
# Recursion! There are infinitely many strings recognised
raise OverflowError(state)
return num_strings[state]
num_strings[state] = None # i.e. "computing..."
n = 0
if state in self.finals:
n += 1
if state in self.map:
for symbol in self.map[state]:
n += get_num_strings(self.map[state][symbol])
num_strings[state] = n
else:
# Dead state
num_strings[state] = 0
return num_strings[state]
return get_num_strings(self.initial)
|
def cardinality(self)
|
Consider the FSM as a set of strings and return the cardinality of that
set, or raise an OverflowError if there are infinitely many
| 3.974552
| 3.095526
| 1.283967
|
'''
For completeness only, since `set.copy()` also exists. FSM objects are
immutable, so I can see only very odd reasons to need this.
'''
return fsm(
alphabet = self.alphabet,
states = self.states,
initial = self.initial,
finals = self.finals,
map = self.map,
)
|
def copy(self)
|
For completeness only, since `set.copy()` also exists. FSM objects are
immutable, so I can see only very odd reasons to need this.
| 7.277059
| 2.076032
| 3.505274
|
'''
Compute the Brzozowski derivative of this FSM with respect to the input
string of symbols. <https://en.wikipedia.org/wiki/Brzozowski_derivative>
If any of the symbols are not members of the alphabet, that's a KeyError.
If you fall into oblivion, then the derivative is an FSM accepting no
strings.
'''
try:
# Consume the input string.
state = self.initial
for symbol in input:
if not symbol in self.alphabet:
if not anything_else in self.alphabet:
raise KeyError(symbol)
symbol = anything_else
# Missing transition = transition to dead state
if not (state in self.map and symbol in self.map[state]):
raise OblivionError
state = self.map[state][symbol]
# OK so now we have consumed that string, use the new location as the
# starting point.
return fsm(
alphabet = self.alphabet,
states = self.states,
initial = state,
finals = self.finals,
map = self.map,
)
except OblivionError:
# Fell out of the FSM. The derivative of this FSM is the empty FSM.
return null(self.alphabet)
|
def derive(self, input)
|
Compute the Brzozowski derivative of this FSM with respect to the input
string of symbols. <https://en.wikipedia.org/wiki/Brzozowski_derivative>
If any of the symbols are not members of the alphabet, that's a KeyError.
If you fall into oblivion, then the derivative is an FSM accepting no
strings.
| 4.884939
| 2.800274
| 1.74445
|
if self.url_prefix:
url = '{prefix}{url}'.format(prefix=self.url_prefix, url=endpoint.url)
else:
url = endpoint.url
self.add_url_rule(
url,
view_func=endpoint.as_view(endpoint.get_name()),
)
|
def add_endpoint(self, endpoint)
|
Register an :class:`.Endpoint` aginst this resource.
:param endpoint: :class:`.Endpoint` API Endpoint class
Usage::
foo_resource = Resource('example', __name__)
class MyEndpoint(Endpoint):
url = '/example'
name = 'myendpoint'
foo_resource.add_endpoint(MyEndpoint)
| 2.801992
| 2.872587
| 0.975425
|
'''reduce() the result of this method call (unless you already reduced it).'''
def new_method(self, *args, **kwargs):
result = method(self, *args, **kwargs)
if result == self:
return result
return result.reduce()
return new_method
|
def reduce_after(method)
|
reduce() the result of this method call (unless you already reduced it).
| 4.399828
| 3.058843
| 1.438396
|
'''
Take a method which acts on 0 or more regular expression objects... return a
new method which simply converts them all to FSMs, calls the FSM method
on them instead, then converts the result back to a regular expression.
We do this for several of the more annoying operations.
'''
fsm_method = getattr(fsm.fsm, method.__name__)
def new_method(*legos):
alphabet = set().union(*[lego.alphabet() for lego in legos])
return from_fsm(fsm_method(*[lego.to_fsm(alphabet) for lego in legos]))
return new_method
|
def call_fsm(method)
|
Take a method which acts on 0 or more regular expression objects... return a
new method which simply converts them all to FSMs, calls the FSM method
on them instead, then converts the result back to a regular expression.
We do this for several of the more annoying operations.
| 6.993196
| 2.26703
| 3.08474
|
'''
Parse the entire supplied string as an instance of the present class.
Mainly for internal use in unit tests because it drops through to match()
in a convenient way.
'''
obj, i = cls.match(string, 0)
if i != len(string):
raise Exception("Could not parse '" + string + "' beyond index " + str(i))
return obj
|
def parse(cls, string)
|
Parse the entire supplied string as an instance of the present class.
Mainly for internal use in unit tests because it drops through to match()
in a convenient way.
| 6.970455
| 2.3675
| 2.944226
|
'''
Each time next() is called on this iterator, a new string is returned
which will the present lego piece can match. StopIteration is raised once
all such strings have been returned, although a regex with a * in may
match infinitely many strings.
'''
# In the case of a regex like "[^abc]", there are infinitely many (well, a
# very large finite number of) single characters which will match. It's not
# productive to iterate over all of these giving every single example.
# You must supply your own "otherchar" to stand in for all of these
# possibilities.
for string in self.to_fsm().strings():
# Have to represent `fsm.anything_else` somehow.
if fsm.anything_else in string:
if otherchar == None:
raise Exception("Please choose an 'otherchar'")
string = [
otherchar if char == fsm.anything_else else char
for char in string
]
yield "".join(string)
|
def strings(self, otherchar=None)
|
Each time next() is called on this iterator, a new string is returned
which will the present lego piece can match. StopIteration is raised once
all such strings have been returned, although a regex with a * in may
match infinitely many strings.
| 9.932012
| 5.149224
| 1.928837
|
'''
Multiplication is not well-defined for all pairs of multipliers because
the resulting possibilities do not necessarily form a continuous range.
For example:
{0,x} * {0,y} = {0,x*y}
{2} * {3} = {6}
{2} * {1,2} = ERROR
The proof isn't simple but suffice it to say that {p,p+q} * {r,r+s} is
equal to {pr, (p+q)(r+s)} only if s=0 or qr+1 >= p. If not, then at least
one gap appears in the range. The first inaccessible number is (p+q)r + 1.
'''
return other.optional == bound(0) or \
self.optional * other.mandatory + bound(1) >= self.mandatory
|
def canmultiplyby(self, other)
|
Multiplication is not well-defined for all pairs of multipliers because
the resulting possibilities do not necessarily form a continuous range.
For example:
{0,x} * {0,y} = {0,x*y}
{2} * {3} = {6}
{2} * {1,2} = ERROR
The proof isn't simple but suffice it to say that {p,p+q} * {r,r+s} is
equal to {pr, (p+q)(r+s)} only if s=0 or qr+1 >= p. If not, then at least
one gap appears in the range. The first inaccessible number is (p+q)r + 1.
| 10.365919
| 1.707423
| 6.071089
|
'''
Intersection is not well-defined for all pairs of multipliers.
For example:
{2,3} & {3,4} = {3}
{2,} & {1,7} = {2,7}
{2} & {5} = ERROR
'''
return not (self.max < other.min or other.max < self.min)
|
def canintersect(self, other)
|
Intersection is not well-defined for all pairs of multipliers.
For example:
{2,3} & {3,4} = {3}
{2,} & {1,7} = {2,7}
{2} & {5} = ERROR
| 5.060441
| 1.66865
| 3.032655
|
'''Union is not defined for all pairs of multipliers. e.g. {0,1} | {3,4}'''
return not (self.max + bound(1) < other.min or other.max + bound(1) < self.min)
|
def canunion(self, other)
|
Union is not defined for all pairs of multipliers. e.g. {0,1} | {3,4}
| 7.822034
| 3.54139
| 2.208747
|
'''
Find the shared part of two multipliers. This is the largest multiplier
which can be safely subtracted from both the originals. This may
return the "zero" multiplier.
'''
mandatory = min(self.mandatory, other.mandatory)
optional = min(self.optional, other.optional)
return multiplier(mandatory, mandatory + optional)
|
def common(self, other)
|
Find the shared part of two multipliers. This is the largest multiplier
which can be safely subtracted from both the originals. This may
return the "zero" multiplier.
| 6.817889
| 2.351487
| 2.899395
|
'''
"Dock" another mult from this one (i.e. remove part of the tail) and
return the result. The reverse of concatenation. This is a lot trickier.
e.g. a{4,5} - a{3} = a{1,2}
'''
if other.multiplicand != self.multiplicand:
raise Exception("Can't subtract " + repr(other) + " from " + repr(self))
return mult(self.multiplicand, self.multiplier - other.multiplier)
|
def dock(self, other)
|
"Dock" another mult from this one (i.e. remove part of the tail) and
return the result. The reverse of concatenation. This is a lot trickier.
e.g. a{4,5} - a{3} = a{1,2}
| 6.399021
| 1.939736
| 3.298914
|
'''
Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree.
'''
if self.multiplicand == other.multiplicand:
return mult(self.multiplicand, self.multiplier.common(other.multiplier))
# Multiplicands disagree, no common part at all.
return mult(nothing, zero)
|
def common(self, other)
|
Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree.
| 6.216348
| 2.237873
| 2.777793
|
'''
Return the common prefix of these two concs; that is, the largest conc
which can be safely beheaded() from the front of both.
The result could be emptystring.
"ZYAA, ZYBB" -> "ZY"
"CZ, CZ" -> "CZ"
"YC, ZC" -> ""
With the "suffix" flag set, works from the end. E.g.:
"AAZY, BBZY" -> "ZY"
"CZ, CZ" -> "CZ"
"CY, CZ" -> ""
'''
mults = []
indices = range(min(len(self.mults), len(other.mults))) # e.g. [0, 1, 2, 3]
# Work backwards from the end of both concs instead.
if suffix:
indices = [-i - 1 for i in indices] # e.g. [-1, -2, -3, -4]
for i in indices:
common = self.mults[i].common(other.mults[i])
# Happens when multiplicands disagree (e.g. "A.common(B)") or if
# the multiplicand is shared but the common multiplier is zero
# (e.g. "ABZ*.common(CZ)".)
if common.multiplier == zero:
break
mults.append(common)
# If we did not remove the entirety of both mults, we cannot continue.
if common != self.mults[i] or common != other.mults[i]:
break
if suffix:
mults = reversed(mults)
return conc(*mults)
|
def common(self, other, suffix=False)
|
Return the common prefix of these two concs; that is, the largest conc
which can be safely beheaded() from the front of both.
The result could be emptystring.
"ZYAA, ZYBB" -> "ZY"
"CZ, CZ" -> "CZ"
"YC, ZC" -> ""
With the "suffix" flag set, works from the end. E.g.:
"AAZY, BBZY" -> "ZY"
"CZ, CZ" -> "CZ"
"CY, CZ" -> ""
| 4.873902
| 2.467895
| 1.974923
|
'''
Subtract another conc from this one.
This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF,
then logically ABCDEF - DEF = ABC.
'''
# e.g. self has mults at indices [0, 1, 2, 3, 4, 5, 6] len=7
# e.g. other has mults at indices [0, 1, 2] len=3
new = list(self.mults)
for i in reversed(range(len(other.mults))): # [2, 1, 0]
# e.g. i = 1, j = 7 - 3 + 1 = 5
j = len(self.mults) - len(other.mults) + i
new[j] = new[j].dock(other.mults[i])
if new[j].multiplier == zero:
# omit that mult entirely since it has been factored out
del new[j]
# If the subtraction is incomplete but there is more to
# other.mults, then we have a problem. For example, "ABC{2} - BC"
# subtracts the C successfully but leaves something behind,
# then tries to subtract the B too, which isn't possible
else:
if i != 0:
raise Exception("Can't subtract " + repr(other) + " from " + repr(self))
return conc(*new)
|
def dock(self, other)
|
Subtract another conc from this one.
This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF,
then logically ABCDEF - DEF = ABC.
| 5.456419
| 4.19175
| 1.301704
|
'''
The opposite of concatenation. Remove a common suffix from the present
pattern; that is, from each of its constituent concs.
AYZ|BYZ|CYZ - YZ = A|B|C.
'''
return pattern(*[c.dock(other) for c in self.concs])
|
def dock(self, other)
|
The opposite of concatenation. Remove a common suffix from the present
pattern; that is, from each of its constituent concs.
AYZ|BYZ|CYZ - YZ = A|B|C.
| 18.485912
| 2.271767
| 8.137241
|
'''
Like dock() but the other way around. Remove a common prefix from the
present pattern; that is, from each of its constituent concs.
ZA|ZB|ZC.behead(Z) = A|B|C
'''
return pattern(*[c.behead(other) for c in self.concs])
|
def behead(self, other)
|
Like dock() but the other way around. Remove a common prefix from the
present pattern; that is, from each of its constituent concs.
ZA|ZB|ZC.behead(Z) = A|B|C
| 15.273121
| 2.101367
| 7.268183
|
'''
Find the longest conc which acts as prefix to every conc in this pattern.
This could be the empty string. Return the common prefix along with all
the leftovers after truncating that common prefix from each conc.
"ZA|ZB|ZC" -> "Z", "(A|B|C)"
"ZA|ZB|ZC|Z" -> "Z", "(A|B|C|)"
"CZ|CZ" -> "CZ", "()"
If "suffix" is True, the same result but for suffixes.
'''
if len(self.concs) == 0:
raise Exception("Can't call _commonconc on " + repr(self))
from functools import reduce
return reduce(
lambda x, y: x.common(y, suffix=suffix),
self.concs
)
|
def _commonconc(self, suffix=False)
|
Find the longest conc which acts as prefix to every conc in this pattern.
This could be the empty string. Return the common prefix along with all
the leftovers after truncating that common prefix from each conc.
"ZA|ZB|ZC" -> "Z", "(A|B|C)"
"ZA|ZB|ZC|Z" -> "Z", "(A|B|C|)"
"CZ|CZ" -> "CZ", "()"
If "suffix" is True, the same result but for suffixes.
| 5.343563
| 1.629715
| 3.278833
|
''' This function don't use the plugin. '''
session = create_session()
try:
user = session.query(User).filter_by(name=name).first()
session.delete(user)
session.commit()
except SQLAlchemyError, e:
session.rollback()
raise bottle.HTTPError(500, "Database Error", e)
finally:
session.close()
|
def delete_name(name)
|
This function don't use the plugin.
| 3.479029
| 2.806621
| 1.239579
|
''' Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available.'''
for other in app.plugins:
if not isinstance(other, SQLAlchemyPlugin):
continue
if other.keyword == self.keyword:
raise bottle.PluginError("Found another SQLAlchemy plugin with "\
"conflicting settings (non-unique keyword).")
elif other.name == self.name:
self.name += '_%s' % self.keyword
if self.create and not self.metadata:
raise bottle.PluginError('Define metadata value to create database.')
|
def setup(self, app)
|
Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available.
| 9.02348
| 5.261705
| 1.714935
|
self.__in_send_multipart = True
try:
msg = super(GreenSocket, self).send_multipart(*args, **kwargs)
finally:
self.__in_send_multipart = False
self.__state_changed()
return msg
|
def send_multipart(self, *args, **kwargs)
|
wrap send_multipart to prevent state_changed on each partial send
| 3.665802
| 2.809468
| 1.304803
|
self.__in_recv_multipart = True
try:
msg = super(GreenSocket, self).recv_multipart(*args, **kwargs)
finally:
self.__in_recv_multipart = False
self.__state_changed()
return msg
|
def recv_multipart(self, *args, **kwargs)
|
wrap recv_multipart to prevent state_changed on each partial recv
| 3.635674
| 2.776999
| 1.30921
|
ext_file = os.path.abspath(str(ext_file))
F = pd.read_table(
ext_file,
header=[0, 1],
index_col=list(range(index_col)),
sep=sep)
F.columns.names = ['region', 'sector']
if index_col == 1:
F.index.names = ['stressor']
elif index_col == 2:
F.index.names = ['stressor', 'unit']
elif index_col == 3:
F.index.names = ['stressor', 'compartment', 'unit']
else:
F.reset_index(level=list(range(3, index_col)),
drop=True,
inplace=True)
F.index.names = ['stressor', 'compartment', 'unit']
unit = None
if index_col > 1:
unit = pd.DataFrame(F.iloc[:, 0].
reset_index(level='unit').unit)
F.reset_index(level='unit', drop=True, inplace=True)
if drop_compartment:
F.reset_index(level='compartment',
drop=True, inplace=True)
unit.reset_index(level='compartment',
drop=True, inplace=True)
return Extension(name=name,
F=F,
unit=unit,
iosystem=iosystem,
version=version,
year=year,
)
|
def parse_exio12_ext(ext_file, index_col, name, drop_compartment=True,
version=None, year=None, iosystem=None, sep=',')
|
Parse an EXIOBASE version 1 or 2 like extension file into pymrio.Extension
EXIOBASE like extensions files are assumed to have two
rows which are used as columns multiindex (region and sector)
and up to three columns for the row index (see Parameters).
For EXIOBASE 3 - extension can be loaded directly with pymrio.load
Notes
-----
So far this only parses factor of production extensions F (not
final demand extensions FY nor coeffiecents S).
Parameters
----------
ext_file : string or pathlib.Path
File to parse
index_col : int
The number of columns (1 to 3) at the beginning of the file
to use as the index. The order of the index_col must be
- 1 index column: ['stressor']
- 2 index columns: ['stressor', 'unit']
- 3 index columns: ['stressor', 'compartment', 'unit']
- > 3: everything up to three index columns will be removed
name : string
Name of the extension
drop_compartment : boolean, optional
If True (default) removes the compartment from the index.
version : string, optional
see pymrio.Extension
iosystem : string, optional
see pymrio.Extension
year : string or int
see pymrio.Extension
sep : string, optional
Delimiter to use; default ','
Returns
-------
pymrio.Extension
with F (and unit if available)
| 2.442121
| 1.966424
| 1.24191
|
try:
ver_match = re.search(r'(\d+\w*(\.|\-|\_))*\d+\w*', filename)
version = ver_match.string[ver_match.start():ver_match.end()]
if re.search('\_\d\d\d\d', version[-5:]):
version = version[:-5]
except AttributeError:
version = None
return version
|
def get_exiobase12_version(filename)
|
Returns the EXIOBASE version for the given filename,
None if not found
| 3.838233
| 3.978773
| 0.964678
|
path = os.path.normpath(str(path))
if coefficients:
exio_core_regex = dict(
# don’t match file if starting with _
A=re.compile('(?<!\_)mrIot.*txt'),
Y=re.compile('(?<!\_)mrFinalDemand.*txt'),
S_factor_inputs=re.compile('(?<!\_)mrFactorInputs.*txt'),
S_emissions=re.compile('(?<!\_)mrEmissions.*txt'),
S_materials=re.compile('(?<!\_)mrMaterials.*txt'),
S_resources=re.compile('(?<!\_)mrResources.*txt'),
FY_resources=re.compile('(?<!\_)mrFDResources.*txt'),
FY_emissions=re.compile('(?<!\_)mrFDEmissions.*txt'),
FY_materials=re.compile('(?<!\_)mrFDMaterials.*txt'),
)
else:
exio_core_regex = dict(
# don’t match file if starting with _
Z=re.compile('(?<!\_)mrIot.*txt'),
Y=re.compile('(?<!\_)mrFinalDemand.*txt'),
F_fac=re.compile('(?<!\_)mrFactorInputs.*txt'),
F_emissions=re.compile('(?<!\_)mrEmissions.*txt'),
F_materials=re.compile('(?<!\_)mrMaterials.*txt'),
F_resources=re.compile('(?<!\_)mrResources.*txt'),
FY_emissions=re.compile('(?<!\_)mrFDEmissions.*txt'),
FY_materials=re.compile('(?<!\_)mrFDMaterials.*txt'),
)
repo_content = get_repo_content(path)
exio_files = dict()
for kk, vv in exio_core_regex.items():
found_file = [vv.search(ff).string for ff in repo_content.filelist
if vv.search(ff)]
if len(found_file) > 1:
logging.warning(
"Multiple files found for {}: {}"
" - USING THE FIRST ONE".format(kk, found_file))
found_file = found_file[0:1]
elif len(found_file) == 0:
continue
else:
if repo_content.iszip:
format_para = sniff_csv_format(found_file[0],
zip_file=path)
else:
format_para = sniff_csv_format(os.path.join(path,
found_file[0]))
exio_files[kk] = dict(
root_repo=path,
file_path=found_file[0],
version=get_exiobase12_version(
os.path.basename(found_file[0])),
index_rows=format_para['nr_header_row'],
index_col=format_para['nr_index_col'],
unit_col=format_para['nr_index_col'] - 1,
sep=format_para['sep'])
return exio_files
|
def get_exiobase_files(path, coefficients=True)
|
Gets the EXIOBASE files in path (which can be a zip file)
Parameters
----------
path: str or pathlib.Path
Path to exiobase files or zip file
coefficients: boolean, optional
If True (default), considers the mrIot file as A matrix,
and the extensions as S matrices. Otherwise as Z and F, respectively
Returns
-------
dict of dict
| 2.749621
| 2.635972
| 1.043115
|
ispxp = True if re.search('pxp', path, flags=re.IGNORECASE) else False
isixi = True if re.search('ixi', path, flags=re.IGNORECASE) else False
if ispxp == isixi:
system = None
else:
system = 'pxp' if ispxp else 'ixi'
return system
|
def _get_MRIO_system(path)
|
Extract system information (ixi, pxp) from file path.
Returns 'ixi' or 'pxp', None in undetermined
| 2.948079
| 2.350095
| 1.254451
|
path = os.path.abspath(os.path.normpath(str(path)))
exio_files = get_exiobase_files(path)
if len(exio_files) == 0:
raise ParserError("No EXIOBASE files found at {}".format(path))
system = _get_MRIO_system(path)
if not system:
logging.warning("Could not determine system (pxp or ixi)"
" set system parameter manually")
io = generic_exiobase12_parser(exio_files, system=system)
return io
|
def parse_exiobase1(path)
|
Parse the exiobase1 raw data files.
This function works with
- pxp_ita_44_regions_coeff_txt
- ixi_fpa_44_regions_coeff_txt
- pxp_ita_44_regions_coeff_src_txt
- ixi_fpa_44_regions_coeff_src_txt
which can be found on www.exiobase.eu
The parser works with the compressed (zip) files as well as the unpacked
files.
Parameters
----------
path : pathlib.Path or string
Path of the exiobase 1 data
Returns
-------
pymrio.IOSystem with exio1 data
| 6.513347
| 5.555137
| 1.172491
|
io = load_all(path)
# need to rename the final demand satellite,
# wrong name in the standard distribution
try:
io.satellite.FY = io.satellite.F_hh.copy()
del io.satellite.F_hh
except AttributeError:
pass
# some ixi in the exiobase 3.4 official distribution
# have a country name mixup. Clean it here:
io.rename_regions(
{'AUS': 'AU',
'AUT': 'AT',
'BEL': 'BE',
'BGR': 'BG',
'BRA': 'BR',
'CAN': 'CA',
'CHE': 'CH',
'CHN': 'CN',
'CYP': 'CY',
'CZE': 'CZ',
'DEU': 'DE',
'DNK': 'DK',
'ESP': 'ES',
'EST': 'EE',
'FIN': 'FI',
'FRA': 'FR',
'GBR': 'GB',
'GRC': 'GR',
'HRV': 'HR',
'HUN': 'HU',
'IDN': 'ID',
'IND': 'IN',
'IRL': 'IE',
'ITA': 'IT',
'JPN': 'JP',
'KOR': 'KR',
'LTU': 'LT',
'LUX': 'LU',
'LVA': 'LV',
'MEX': 'MX',
'MLT': 'MT',
'NLD': 'NL',
'NOR': 'NO',
'POL': 'PL',
'PRT': 'PT',
'ROM': 'RO',
'RUS': 'RU',
'SVK': 'SK',
'SVN': 'SI',
'SWE': 'SE',
'TUR': 'TR',
'TWN': 'TW',
'USA': 'US',
'ZAF': 'ZA',
'WWA': 'WA',
'WWE': 'WE',
'WWF': 'WF',
'WWL': 'WL',
'WWM': 'WM'})
return io
|
def parse_exiobase3(path)
|
Parses the public EXIOBASE 3 system
This parser works with either the compressed zip
archive as downloaded or the extracted system.
Note
----
The exiobase 3 parser does so far not include
population and characterization data.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the EXIOBASE files
or the compressed archive.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 3 data
| 2.578382
| 2.622417
| 0.983208
|
sea_ext = '.xlsx'
sea_start = 'WIOD_SEA'
_SEA_folder = os.path.join(root_path, 'SEA')
if not os.path.exists(_SEA_folder):
_SEA_folder = root_path
sea_folder_content = [ff for ff in os.listdir(_SEA_folder)
if os.path.splitext(ff)[-1] == sea_ext and
ff[:8] == sea_start]
if sea_folder_content:
# read data
sea_file = os.path.join(_SEA_folder, sorted(sea_folder_content)[0])
df_sea = pd.read_excel(sea_file,
sheet_name=data_sheet,
header=0,
index_col=[0, 1, 2, 3])
# fix years
ic_sea = df_sea.columns.tolist()
ic_sea = [yystr.lstrip('_') for yystr in ic_sea]
df_sea.columns = ic_sea
try:
ds_sea = df_sea[str(year)]
except KeyError:
warnings.warn(
'SEA extension does not include data for the '
'year {} - SEA-Extension not included'.format(year),
ParserWarning)
return None, None
# get useful data (employment)
mt_sea = ['EMP', 'EMPE', 'H_EMP', 'H_EMPE']
ds_use_sea = pd.concat(
[ds_sea.xs(key=vari, level='Variable', drop_level=False)
for vari in mt_sea])
ds_use_sea.drop(labels='TOT', level='Code', inplace=True)
ds_use_sea.reset_index('Description', drop=True, inplace=True)
# RoW not included in SEA but needed to get it consistent for
# all countries. Just add a dummy with 0 for all accounts.
if 'RoW' not in ds_use_sea.index.get_level_values('Country'):
ds_RoW = ds_use_sea.xs('USA',
level='Country', drop_level=False)
ds_RoW.ix[:] = 0
df_RoW = ds_RoW.reset_index()
df_RoW['Country'] = 'RoW'
ds_use_sea = pd.concat(
[ds_use_sea.reset_index(), df_RoW]).set_index(
['Country', 'Code', 'Variable'])
ds_use_sea.fillna(value=0, inplace=True)
df_use_sea = ds_use_sea.unstack(level=['Country', 'Code'])[str(year)]
df_use_sea.index.names = IDX_NAMES['VA_row_single']
df_use_sea.columns.names = IDX_NAMES['F_col']
df_use_sea = df_use_sea.astype('float')
df_unit = pd.DataFrame(
data=[ # this data must be in the same order as mt_sea
'thousand persons',
'thousand persons',
'mill hours',
'mill hours',
],
columns=['unit'],
index=df_use_sea.index)
return df_use_sea, df_unit
else:
warnings.warn(
'SEA extension raw data file not found - '
'SEA-Extension not included', ParserWarning)
return None, None
|
def __get_WIOD_SEA_extension(root_path, year, data_sheet='DATA')
|
Utility function to get the extension data from the SEA file in WIOD
This function is based on the structure in the WIOD_SEA_July14 file.
Missing values are set to zero.
The function works if the SEA file is either in path or in a subfolder
named 'SEA'.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the SEA data.
year : str or int
Year to return for the extension
sea_data_sheet : string, optional
Worksheet with the SEA data in the excel file
Returns
-------
SEA data as extension for the WIOD MRIO
| 3.655887
| 3.742366
| 0.976892
|
meta_string = "{time} - {etype} - {entry}".format(
time=self._time(),
etype=entry_type.upper(),
entry=entry)
self._content['history'].insert(0, meta_string)
self.logger(meta_string)
|
def _add_history(self, entry_type, entry)
|
Generic method to add entry as entry_type to the history
| 5.695344
| 5.346391
| 1.065269
|
if not new_value:
return
para = para.lower()
if para == 'history':
raise ValueError(
'History can only be extended - use method "note"')
old_value = self._content.get(para, None)
if new_value == old_value:
return
self._content[para] = new_value
if old_value and log:
self._add_history(entry_type="METADATA_CHANGE",
entry='Changed parameter "{para}" '
'from "{old}" to "{new}"'.format(
para=para,
old=old_value,
new=new_value))
|
def change_meta(self, para, new_value, log=True)
|
Changes the meta data
This function does nothing if None is passed as new_value.
To set a certain value to None pass the str 'None'
Parameters
----------
para: str
Meta data entry to change
new_value: str
New value
log: boolean, optional
If True (default) records the meta data change
in the history
| 3.873816
| 4.38262
| 0.883904
|
if self._path_in_arc:
with zipfile.ZipFile(file=str(self._metadata_file)) as zf:
self._content = json.loads(
zf.read(self._path_in_arc).decode('utf-8'),
object_pairs_hook=OrderedDict)
else:
with self._metadata_file.open('r') as mdf:
self._content = json.load(mdf,
object_pairs_hook=OrderedDict)
|
def _read_content(self)
|
Reads metadata from location (and path_in_arc if archive)
This function is called during the init process and
should not be used in isolation: it overwrites
unsafed metadata.
| 2.824835
| 2.314905
| 1.220281
|
if location:
location = Path(location)
if os.path.splitext(str(location))[1] == '':
self._metadata_file = location / DEFAULT_FILE_NAMES['metadata']
else:
self._metadata_file = location
if self._metadata_file:
with self._metadata_file.open(mode='w') as mdf:
json.dump(self._content, mdf, indent=4)
else:
logging.error("No metadata file given for storing the file")
|
def save(self, location=None)
|
Saves the current status of the metadata
This saves the metadata at the location of the previously loaded
metadata or at the file/path given in location.
Specify a location if the metadata should be stored in a different
location or was never stored before. Subsequent saves will use the
location set here.
Parameters
----------
location: str, optional
Path or file for saving the metadata.
This can be the full file path or just the storage folder.
In the latter case, the filename defined in
DEFAULT_FILE_NAMES['metadata'] (currently 'metadata.json') is
assumed.
| 3.401207
| 3.202421
| 1.062074
|
x = np.reshape(np.sum(np.hstack((Z, Y)), 1), (-1, 1))
if type(Z) is pd.DataFrame:
x = pd.DataFrame(x, index=Z.index, columns=['indout'])
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x
|
def calc_x(Z, Y)
|
Calculate the industry output x from the Z and Y matrix
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
Y : pandas.DataFrame or numpy.array
final demand with categories (1.order) for each country (2.order)
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of Z. If DataFrame index as Z
| 2.728836
| 3.133611
| 0.870828
|
x = L.dot(y)
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x
|
def calc_x_from_L(L, y)
|
Calculate the industry output x from L and a y vector
Parameters
----------
L : pandas.DataFrame or numpy.array
Symmetric input output Leontief table
y : pandas.DataFrame or numpy.array
a column vector of the total final demand
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of L. If DataFrame index as L
| 4.345253
| 4.723167
| 0.919987
|
if (type(x) is pd.DataFrame) or (type(x) is pd.Series):
x = x.values
x = x.reshape((1, -1)) # use numpy broadcasting - much faster
# (but has to ensure that x is a row vector)
# old mathematical form:
# return A.dot(np.diagflat(x))
if type(A) is pd.DataFrame:
return pd.DataFrame(A.values * x, index=A.index, columns=A.columns)
else:
return A*x
|
def calc_Z(A, x)
|
calculate the Z matrix (flows) from A and x
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
x : pandas.DataFrame or numpy.array
Industry output column vector
Returns
-------
pandas.DataFrame or numpy.array
Symmetric input output table (flows) Z
The type is determined by the type of A.
If DataFrame index/columns as A
| 4.913734
| 5.602151
| 0.877116
|
if (type(x) is pd.DataFrame) or (type(x) is pd.Series):
x = x.values
if (type(x) is not np.ndarray) and (x == 0):
recix = 0
else:
with warnings.catch_warnings():
# catch the divide by zero warning
# we deal wit that by setting to 0 afterwards
warnings.simplefilter('ignore')
recix = 1/x
recix[recix == np.inf] = 0
recix = recix.reshape((1, -1))
# use numpy broadcasting - factor ten faster
# Mathematical form - slow
# return Z.dot(np.diagflat(recix))
if type(Z) is pd.DataFrame:
return pd.DataFrame(Z.values * recix, index=Z.index, columns=Z.columns)
else:
return Z*recix
|
def calc_A(Z, x)
|
Calculate the A matrix (coefficients) from Z and x
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
x : pandas.DataFrame or numpy.array
Industry output column vector
Returns
-------
pandas.DataFrame or numpy.array
Symmetric input output table (coefficients) A
The type is determined by the type of Z.
If DataFrame index/columns as Z
| 4.473062
| 5.04419
| 0.886775
|
I = np.eye(A.shape[0]) # noqa
if type(A) is pd.DataFrame:
return pd.DataFrame(np.linalg.inv(I-A),
index=A.index, columns=A.columns)
else:
return np.linalg.inv(I-A)
|
def calc_L(A)
|
Calculate the Leontief L from A
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
Returns
-------
pandas.DataFrame or numpy.array
Leontief input output table L
The type is determined by the type of A.
If DataFrame index/columns as A
| 2.655099
| 3.086918
| 0.860113
|
Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors)
Y_inv = np.linalg.inv(Y_diag)
M = D_cba.dot(Y_inv)
if type(D_cba) is pd.DataFrame:
M.columns = D_cba.columns
M.index = D_cba.index
return M
|
def recalc_M(S, D_cba, Y, nr_sectors)
|
Calculate Multipliers based on footprints.
Parameters
----------
D_cba : pandas.DataFrame or numpy array
Footprint per sector and country
Y : pandas.DataFrame or numpy array
Final demand: aggregated across categories or just one category, one
column per country. This will be diagonalized per country block.
The diagonolized form must be invertable for this method to work.
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
pandas.DataFrame or numpy.array
Multipliers M
The type is determined by the type of D_cba.
If DataFrame index/columns as D_cba
| 3.969727
| 3.992249
| 0.994359
|
# diagonalize each sector block per country
# this results in a disaggregated y with final demand per country per
# sector in one column
Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors)
x_diag = L.dot(Y_diag)
x_tot = x_diag.values.sum(1)
del Y_diag
D_cba = pd.DataFrame(S.values.dot(x_diag),
index=S.index,
columns=S.columns)
# D_pba = S.dot(np.diagflat(x_tot))
# faster broadcasted calculation:
D_pba = pd.DataFrame(S.values*x_tot.reshape((1, -1)),
index=S.index,
columns=S.columns)
# for the traded accounts set the domestic industry output to zero
dom_block = np.zeros((nr_sectors, nr_sectors))
x_trade = ioutil.set_block(x_diag.values, dom_block)
D_imp = pd.DataFrame(S.values.dot(x_trade),
index=S.index,
columns=S.columns)
x_exp = x_trade.sum(1)
# D_exp = S.dot(np.diagflat(x_exp))
# faster broadcasted version:
D_exp = pd.DataFrame(S.values * x_exp.reshape((1, -1)),
index=S.index,
columns=S.columns)
return (D_cba, D_pba, D_imp, D_exp)
|
def calc_accounts(S, L, Y, nr_sectors)
|
Calculate sector specific cba and pba based accounts, imp and exp accounts
The total industry output x for the calculation
is recalculated from L and y
Parameters
----------
L : pandas.DataFrame
Leontief input output table L
S : pandas.DataFrame
Direct impact coefficients
Y : pandas.DataFrame
Final demand: aggregated across categories or just one category, one
column per country
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
Tuple
(D_cba, D_pba, D_imp, D_exp)
Format: D_row x L_col (=nr_countries*nr_sectors)
- D_cba Footprint per sector and country
- D_pba Total factur use per sector and country
- D_imp Total global factor use to satisfy total final demand in
the country per sector
- D_exp Total factor use in one country to satisfy final demand
in all other countries (per sector)
| 4.199969
| 3.53553
| 1.187932
|
# Use post here - NB: get could be necessary for some other pages
# but currently works for wiod and eora
returnvalue = namedtuple('url_content',
['raw_text', 'data_urls'])
url_text = requests.post(url_db_view, cookies=access_cookie).text
data_urls = [url_db_content + ff
for ff in re.findall(mrio_regex, url_text)]
return returnvalue(raw_text=url_text, data_urls=data_urls)
|
def _get_url_datafiles(url_db_view, url_db_content,
mrio_regex, access_cookie=None)
|
Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
| 7.237755
| 7.077549
| 1.022636
|
for url in url_list:
filename = os.path.basename(url)
if not overwrite_existing and filename in os.listdir(storage_folder):
continue
storage_file = os.path.join(storage_folder, filename)
# Using requests here - tried with aiohttp but was actually slower
# Also don’t use shutil.copyfileobj - corrupts zips from Eora
req = requests.post(url, stream=True, cookies=access_cookie)
with open(storage_file, 'wb') as lf:
for chunk in req.iter_content(1024*5):
lf.write(chunk)
meta_handler._add_fileio('Downloaded {} to {}'.format(url, filename))
meta_handler.save()
return meta_handler
|
def _download_urls(url_list, storage_folder, overwrite_existing,
meta_handler, access_cookie=None)
|
Save url from url_list to storage_folder
Parameters
----------
url_list: list of str
Valid url to download
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download depends on the setting in 'overwrite_existing'.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
meta_handler: instance of MRIOMetaData
Returns
-------
The meta_handler is passed back
| 4.686969
| 5.295656
| 0.885059
|
try:
os.makedirs(storage_folder)
except FileExistsError:
pass
if type(years) is int or type(years) is str:
years = [years]
years = years if years else range(1995, 2012)
years = [str(yy).zfill(2)[-2:] for yy in years]
wiod_web_content = _get_url_datafiles(
url_db_view=WIOD_CONFIG['url_db_view'],
url_db_content=WIOD_CONFIG['url_db_content'],
mrio_regex='protected.*?wiot\d\d.*?xlsx')
restricted_wiod_io_urls = [url for url in wiod_web_content.data_urls if
re.search(r"(wiot)(\d\d)",
os.path.basename(url)).group(2)
in years]
meta = MRIOMetaData(location=storage_folder,
description='WIOD metadata file for pymrio',
name='WIOD',
system='ixi',
version='data13')
meta = _download_urls(url_list=restricted_wiod_io_urls + satellite_urls,
storage_folder=storage_folder,
overwrite_existing=overwrite_existing,
meta_handler=meta)
meta.save()
return meta
|
def download_wiod2013(storage_folder, years=None, overwrite_existing=False,
satellite_urls=WIOD_CONFIG['satellite_urls'])
|
Downloads the 2013 wiod release
Note
----
Currently, pymrio only works with the 2013 release of the wiod tables. The
more recent 2016 release so far (October 2017) lacks the environmental and
social extensions.
Parameters
----------
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download of the specific file will be skipped.
years: list of int or str, optional
If years is given only downloads the specific years. This
only applies to the IO tables because extensions are stored
by country and not per year.
The years can be given in 2 or 4 digits.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
satellite_urls : list of str (urls), optional
Which satellite accounts to download. Default: satellite urls defined
in WIOD_CONFIG - list of all available urls Remove items from this list
to only download a subset of extensions
| 5.179879
| 5.004388
| 1.035067
|
s = '%.6f' % time.time()
whole, frac = map(int, s.split('.'))
res = '%d%d' % (whole, frac)
return res[:length]
|
def get_timestamp(length)
|
Get a timestamp of `length` in string
| 4.578208
| 4.23881
| 1.080069
|
if PY3:
return os.makedirs(path, exist_ok=True)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
def mkdir_p(path)
|
mkdir -p path
| 1.765566
| 1.782433
| 0.990537
|
ozmq = __import__('zmq')
ozmq.Socket = zmq.Socket
ozmq.Context = zmq.Context
ozmq.Poller = zmq.Poller
ioloop = __import__('zmq.eventloop.ioloop')
ioloop.Poller = zmq.Poller
|
def monkey_patch()
|
Monkey patches `zmq.Context` and `zmq.Socket`
If test_suite is True, the pyzmq test suite will be patched for
compatibility as well.
| 3.556066
| 3.647326
| 0.974979
|
# Attriubtes to keep must be defined in the init: __basic__
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
"'force=True')".format(df))
if _meta:
_meta._add_modify("Reset system to Z and Y")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__basic__]
return self
|
def reset_full(self, force=False, _meta=None)
|
Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
| 9.868936
| 9.55399
| 1.032965
|
# Development note: The attributes which should be removed are
# defined in self.__non_agg_attributes__
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
"'force=True')".format(df))
if _meta:
_meta._add_modify("Reset to absolute flows")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None) for key in self.__non_agg_attributes__]
return self
|
def reset_to_flows(self, force=False, _meta=None)
|
Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
| 8.939805
| 8.140778
| 1.098151
|
# Development note: The coefficient attributes are
# defined in self.__coefficients__
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__coefficients__]
return self
|
def reset_to_coefficients(self)
|
Keeps only the coefficient.
This can be used to recalculate the IO tables for a new finald demand.
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
| 10.005669
| 11.986589
| 0.834739
|
_tmp = copy.deepcopy(self)
if not new_name:
new_name = self.name + '_copy'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
_tmp.meta.note('IOSystem copy {new} based on {old}'.format(
new=new_name, old=self.meta.name))
_tmp.meta.change_meta('name', new_name, log=False)
else:
_tmp.name = new_name
return _tmp
|
def copy(self, new_name=None)
|
Returns a deep copy of the system
Parameters
-----------
new_name: str, optional
Set a new meta name parameter.
Default: <old_name>_copy
| 4.971914
| 4.830913
| 1.029187
|
possible_dataframes = ['Y', 'FY']
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
try:
ind = getattr(self, df).columns.get_level_values(
'category').unique()
except (AssertionError, KeyError):
ind = getattr(self, df).columns.get_level_values(
1).unique()
if entries:
if type(entries) is str:
entries = [entries]
ind = ind.tolist()
return [None if ee not in entries else ee for ee in ind]
else:
return ind
else:
logging.warn("No attributes available to get Y categories")
return None
|
def get_Y_categories(self, entries=None)
|
Returns names of y cat. of the IOSystem as unique names in order
Parameters
----------
entries : List, optional
If given, retuns an list with None for all values not in entries.
Returns
-------
Index
List of categories, None if no attribute to determine
list is available
| 3.897225
| 3.853076
| 1.011458
|
possible_dataframes = ['A', 'L', 'Z', 'Y', 'F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
orig_idx = getattr(self, df).index
break
else:
logging.warn("No attributes available to get index")
return None
if as_dict:
dd = {k: k for k in orig_idx}
if grouping_pattern:
for pattern, new_group in grouping_pattern.items():
if type(pattern) is str:
dd.update({k: new_group for k, v in dd.items() if
re.match(pattern, k)})
else:
dd.update({k: new_group for k, v in dd.items() if
all([re.match(pat, k[nr])
for nr, pat in enumerate(pattern)])})
return dd
else:
return orig_idx
|
def get_index(self, as_dict=False, grouping_pattern=None)
|
Returns the index of the DataFrames in the system
Parameters
----------
as_dict: boolean, optional
If True, returns a 1:1 key-value matching for further processing
prior to groupby functions. Otherwise (default) the index
is returned as pandas index.
grouping_pattern: dict, optional
Dictionary with keys being regex patterns matching index and
values the name for the grouping. If the index is a pandas
multiindex, the keys must be tuples of length levels in the
multiindex, with a valid regex expression at each position.
Otherwise, the keys need to be strings.
Only relevant if as_dict is True.
| 3.16188
| 3.107037
| 1.017651
|
for df in self.get_DataFrame(data=True, with_population=False):
df.index = index
|
def set_index(self, index)
|
Sets the pd dataframe index of all dataframes in the system to index
| 14.512142
| 11.633889
| 1.247402
|
for key in self.__dict__:
if (key is 'unit') and not with_unit:
continue
if (key is 'population') and not with_population:
continue
if type(self.__dict__[key]) is pd.DataFrame:
if data:
yield getattr(self, key)
else:
yield key
|
def get_DataFrame(self, data=False, with_unit=True, with_population=True)
|
Yields all panda.DataFrames or there names
Notes
-----
For IOSystem this does not include the DataFrames in the extensions.
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the DataFrames.
If False, returns a generator which
yields only the names of the DataFrames
with_unit: boolean, optional
If True, includes the 'unit' DataFrame
If False, does not include the 'unit' DataFrame.
The method than only yields the numerical data tables
with_population: boolean, optional
If True, includes the 'population' vector
If False, does not include the 'population' vector.
Returns
-------
DataFrames or string generator, depending on parameter data
| 2.461757
| 2.450471
| 1.004606
|
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
para_file_path = path / DEFAULT_FILE_NAMES['filepara']
file_para = dict()
file_para['files'] = dict()
if table_format in ['text', 'csv', 'txt']:
table_format = 'txt'
elif table_format in ['pickle', 'bin', 'binary', 'pkl']:
table_format = 'pkl'
else:
raise ValueError('Unknown table format "{}" - '
'must be "txt" or "pkl"'.format(table_format))
return None
if not table_ext:
if table_format == 'txt':
table_ext = '.txt'
if table_format == 'pkl':
table_ext = '.pkl'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
file_para['systemtype'] = GENERIC_NAMES['iosys']
elif str(type(self)) == "<class 'pymrio.core.mriosystem.Extension'>":
file_para['systemtype'] = GENERIC_NAMES['ext']
file_para['name'] = self.name
else:
logging.warn('Unknown system type {} - set to "undef"'.format(
str(type(self))))
file_para['systemtype'] = 'undef'
for df, df_name in zip(self.get_DataFrame(data=True),
self.get_DataFrame()):
if type(df.index) is pd.MultiIndex:
nr_index_col = len(df.index.levels)
else:
nr_index_col = 1
if type(df.columns) is pd.MultiIndex:
nr_header = len(df.columns.levels)
else:
nr_header = 1
save_file = df_name + table_ext
save_file_with_path = path / save_file
logging.info('Save file {}'.format(save_file_with_path))
if table_format == 'txt':
df.to_csv(save_file_with_path, sep=sep,
float_format=float_format)
else:
df.to_pickle(save_file_with_path)
file_para['files'][df_name] = dict()
file_para['files'][df_name]['name'] = save_file
file_para['files'][df_name]['nr_index_col'] = str(nr_index_col)
file_para['files'][df_name]['nr_header'] = str(nr_header)
with para_file_path.open(mode='w') as pf:
json.dump(file_para, pf, indent=4)
if file_para['systemtype'] == GENERIC_NAMES['iosys']:
if not self.meta:
self.meta = MRIOMetaData(name=self.name,
location=path)
self.meta._add_fileio("Saved {} to {}".format(self.name, path))
self.meta.save(location=path)
return self
|
def save(self, path, table_format='txt', sep='\t',
table_ext=None, float_format='%.12g')
|
Saving the system to path
Parameters
----------
path : pathlib.Path or string
path for the saved data (will be created if necessary, data
within will be overwritten).
table_format : string
Format to save the DataFrames:
- 'pkl' : Binary pickle files,
alias: 'pickle', 'bin', 'binary'
- 'txt' : Text files (default), alias: 'text', 'csv'
table_ext : string, optional
File extension,
default depends on table_format(.pkl for pickle, .txt for text)
sep : string, optional
Field delimiter for the output file, only for txt files.
Default: tab ('\t')
float_format : string, optional
Format for saving the DataFrames,
default = '%.12g', only for txt files
| 2.590286
| 2.550145
| 1.015741
|
if type(regions) is list:
regions = {old: new for old, new in
zip(self.get_regions(), regions)}
for df in self.get_DataFrame(data=True):
df.rename(index=regions, columns=regions, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=regions, columns=regions, inplace=True)
except:
pass
self.meta._add_modify("Changed country names")
return self
|
def rename_regions(self, regions)
|
Sets new names for the regions
Parameters
----------
regions : list or dict
In case of dict: {'old_name' : 'new_name'} with a
entry for each old_name which should be renamed
In case of list: List of new names in order and complete
without repetition
| 3.779451
| 3.890427
| 0.971475
|
if type(sectors) is list:
sectors = {old: new for old, new in
zip(self.get_sectors(), sectors)}
for df in self.get_DataFrame(data=True):
df.rename(index=sectors, columns=sectors, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=sectors, columns=sectors, inplace=True)
except:
pass
self.meta._add_modify("Changed sector names")
return self
|
def rename_sectors(self, sectors)
|
Sets new names for the sectors
Parameters
----------
sectors : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition
| 3.342457
| 3.423836
| 0.976232
|
if type(Y_categories) is list:
Y_categories = {old: new for old, new in
zip(self.get_Y_categories(), Y_categories)}
for df in self.get_DataFrame(data=True):
df.rename(index=Y_categories, columns=Y_categories, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=Y_categories,
columns=Y_categories,
inplace=True)
except:
pass
self.meta._add_modify("Changed Y category names")
return self
|
def rename_Y_categories(self, Y_categories)
|
Sets new names for the Y_categories
Parameters
----------
Y_categories : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition
| 3.182742
| 3.305863
| 0.962757
|
possible_dataframes = ['F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
return getattr(self, df).index.get_values()
else:
logging.warn("No attributes available to get row names")
return None
|
def get_rows(self)
|
Returns the name of the rows of the extension
| 4.236165
| 3.989152
| 1.061921
|
retdict = {}
for rowname, data in zip(self.get_DataFrame(),
self.get_DataFrame(data=True)):
retdict[rowname] = pd.DataFrame(data.ix[row])
if name:
retdict['name'] = name
return retdict
|
def get_row_data(self, row, name=None)
|
Returns a dict with all available data for a row in the extension
Parameters
----------
row : tuple, list, string
A valid index for the extension DataFrames
name : string, optional
If given, adds a key 'name' with the given value to the dict. In
that case the dict can be
used directly to build a new extension.
Returns
-------
dict object with the data (pandas DataFrame)for the specific rows
| 4.572218
| 4.7759
| 0.957352
|
if type(stressor) is int:
stressor = self.F.index[stressor]
if len(stressor) == 1:
stressor = stressor[0]
if not name:
if type(stressor) is str:
name = stressor
else:
name = '_'.join(stressor) + '_diag'
ext_diag = Extension(name)
ext_diag.F = pd.DataFrame(
index=self.F.columns,
columns=self.F.columns,
data=np.diag(self.F.loc[stressor, :])
)
try:
ext_diag.unit = pd.DataFrame(
index=ext_diag.F.index,
columns=self.unit.columns,
data=self.unit.loc[stressor].unit)
except AttributeError:
# If no unit in stressor, self.unit.columns break
ext_diag.unit = None
return ext_diag
|
def diag_stressor(self, stressor, name=None)
|
Diagonalize one row of the stressor matrix for a flow analysis.
This method takes one row of the F matrix and diagonalize
to the full region/sector format. Footprints calculation based
on this matrix show the flow of embodied stressors from the source
region/sector (row index) to the final consumer (column index).
Note
----
Since the type of analysis based on the disaggregated matrix is based
on flow, direct household emissions (FY) are not included.
Parameters
----------
stressor : str or int - valid index for one row of the F matrix
This must be a tuple for a multiindex, a string otherwise.
The stressor to diagonalize.
name : string (optional)
The new name for the extension,
if None (default): string based on the given stressor (row name)
Returns
-------
Extension
| 3.071768
| 2.858664
| 1.074547
|
# Possible cases:
# 1) Z given, rest can be None and calculated
# 2) A and x given, rest can be calculated
# 3) A and Y , calc L (if not given) - calc x and the rest
# this catches case 3
if self.x is None and self.Z is None:
# in that case we need L or at least A to calculate it
if self.L is None:
self.L = calc_L(self.A)
logging.info('Leontief matrix L calculated')
self.x = calc_x_from_L(self.L, self.Y.sum(axis=1))
self.meta._add_modify('Industry Output x calculated')
# this chains of ifs catch cases 1 and 2
if self.Z is None:
self.Z = calc_Z(self.A, self.x)
self.meta._add_modify('Flow matrix Z calculated')
if self.x is None:
self.x = calc_x(self.Z, self.Y)
self.meta._add_modify('Industry output x calculated')
if self.A is None:
self.A = calc_A(self.Z, self.x)
self.meta._add_modify('Coefficient matrix A calculated')
if self.L is None:
self.L = calc_L(self.A)
self.meta._add_modify('Leontief matrix L calculated')
return self
|
def calc_system(self)
|
Calculates the missing part of the core IOSystem
The method checks Z, x, A, L and calculates all which are None
| 4.455092
| 3.97296
| 1.121354
|
ext_list = list(self.get_extensions(data=False))
extensions = extensions or ext_list
if type(extensions) == str:
extensions = [extensions]
for ext_name in extensions:
self.meta._add_modify(
'Calculating accounts for extension {}'.format(ext_name))
ext = getattr(self, ext_name)
ext.calc_system(x=self.x,
Y=self.Y,
L=self.L,
Y_agg=Y_agg,
population=self.population
)
return self
|
def calc_extensions(self, extensions=None, Y_agg=None)
|
Calculates the extension and their accounts
For the calculation, y is aggregated across specified y categories
The method calls .calc_system of each extension (or these given in the
extensions parameter)
Parameters
----------
extensions : list of strings, optional
A list of key names of extensions which shall be calculated.
Default: all dictionaries of IOSystem are assumed to be extensions
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
| 4.894117
| 4.689147
| 1.043712
|
for ext in self.get_extensions(data=True):
ext.report_accounts(path=path,
per_region=per_region,
per_capita=per_capita,
pic_size=pic_size,
format=format,
**kwargs)
|
def report_accounts(self, path, per_region=True,
per_capita=False, pic_size=1000,
format='rst', **kwargs)
|
Generates a report to the given path for all extension
This method calls .report_accounts for all extensions
Notes
-----
This looks prettier with the seaborn module (import seaborn before
calling this method)
Parameters
----------
path : string
Root path for the report
per_region : boolean, optional
If true, reports the accounts per region
per_capita : boolean, optional
If true, reports the accounts per capita
If per_capita and per_region are False, nothing will be done
pic_size : int, optional
size for the figures in px, 1000 by default
format : string, optional
file format of the report:
'rst'(default), 'html', 'latex', ...
except for rst all depend on the module docutils (all writer_name
from docutils can be used as format)
ffname : string, optional
root file name (without extension, per_capita or per_region will be
attached) and folder names If None gets passed (default), self.name
with be modified to get a valid name for the operation system
without blanks
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
(through the self.plot_account method)
| 2.229424
| 2.284874
| 0.975732
|
ext_list = [key for key in
self.__dict__ if type(self.__dict__[key]) is Extension]
for key in ext_list:
if data:
yield getattr(self, key)
else:
yield key
|
def get_extensions(self, data=False)
|
Yields the extensions or their names
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the extensions.
If False, returns a generator which yields the names of
the extensions (default)
Returns
-------
Generator for Extension or string
| 3.627363
| 3.802001
| 0.954067
|
super().reset_full(force=force, _meta=self.meta)
return self
|
def reset_full(self, force=False)
|
Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
| 9.267924
| 14.507744
| 0.638826
|
self.reset_full()
[ee.reset_full() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset all calculated data")
return self
|
def reset_all_full(self, force=False)
|
Removes all accounts that can be recalculated (IOSystem and extensions)
This calls reset_full for the core system and all extension.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
| 15.502797
| 16.725517
| 0.926895
|
super().reset_to_flows(force=force, _meta=self.meta)
return self
|
def reset_to_flows(self, force=False)
|
Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
| 7.694775
| 14.872345
| 0.517388
|
self.reset_to_flows(force=force)
[ee.reset_to_flows(force=force)
for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to absolute flows")
return self
|
def reset_all_to_flows(self, force=False)
|
Resets the IOSystem and all extensions to absolute flows
This method calls reset_to_flows for the IOSystem and for
all Extensions in the system.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
| 10.089065
| 9.137995
| 1.104079
|
self.reset_to_coefficients()
[ee.reset_to_coefficients() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to coefficients")
return self
|
def reset_all_to_coefficients(self)
|
Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
| 10.497519
| 9.524316
| 1.102181
|
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
self.save(path=path,
table_format=table_format,
sep=sep,
table_ext=table_ext,
float_format=float_format)
for ext, ext_name in zip(self.get_extensions(data=True),
self.get_extensions()):
ext_path = path / ext_name
ext.save(path=ext_path,
table_format=table_format,
sep=sep,
table_ext=table_ext,
float_format=float_format)
return self
|
def save_all(self, path, table_format='txt', sep='\t',
table_ext=None, float_format='%.12g')
|
Saves the system and all extensions
Extensions are saved in separate folders (names based on extension)
Parameters are passed to the .save methods of the IOSystem and
Extensions. See parameters description there.
| 2.197171
| 2.108382
| 1.042112
|
if ext is None:
ext = list(self.get_extensions())
if type(ext) is str:
ext = [ext]
for ee in ext:
try:
del self.__dict__[ee]
except KeyError:
for exinstancename, exdata in zip(
self.get_extensions(data=False),
self.get_extensions(data=True)):
if exdata.name == ee:
del self.__dict__[exinstancename]
finally:
self.meta._add_modify("Removed extension {}".format(ee))
return self
|
def remove_extension(self, ext=None)
|
Remove extension from IOSystem
For single Extensions the same can be achieved with del
IOSystem_name.Extension_name
Parameters
----------
ext : string or list, optional
The extension to remove, this can be given as the name of the
instance or of Extension.name (the latter will be checked if no
instance was found)
If ext is None (default) all Extensions will be removed
| 4.100125
| 3.755125
| 1.091875
|
inp = np.asarray(inp)
nr_dim = np.ndim(inp)
if nr_dim == 1:
return True
elif (nr_dim == 2) and (1 in inp.shape):
return True
else:
return False
|
def is_vector(inp)
|
Returns true if the input can be interpreted as a 'true' vector
Note
----
Does only check dimensions, not if type is numeric
Parameters
----------
inp : numpy.ndarray or something that can be converted into ndarray
Returns
-------
Boolean
True for vectors: ndim = 1 or ndim = 2 and shape of one axis = 1
False for all other arrays
| 2.497269
| 3.145548
| 0.793906
|
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(str(path)) as zz:
filelist = [info.filename for info in zz.infolist()]
iszip = True
else:
iszip = False
filelist = [str(f) for f in path.glob('**/*') if f.is_file()]
return namedtuple('repocontent', ['iszip', 'filelist'])(iszip, filelist)
|
def get_repo_content(path)
|
List of files in a repo (path or zip)
Parameters
----------
path: string or pathlib.Path
Returns
-------
Returns a namedtuple with .iszip and .filelist
The path in filelist are pure strings.
| 2.6664
| 2.152183
| 1.238928
|
if type(path) is str:
path = Path(path.rstrip('\\'))
if zipfile.is_zipfile(str(path)):
para_file_folder = str(path_in_arc)
with zipfile.ZipFile(file=str(path)) as zf:
files = zf.namelist()
else:
para_file_folder = str(path)
files = [str(f) for f in path.glob('**/*')]
if para_file_folder not in files:
para_file_full_path = os.path.join(
para_file_folder, DEFAULT_FILE_NAMES['filepara'])
else:
para_file_full_path = para_file_folder
para_file_folder = os.path.dirname(para_file_full_path)
if para_file_full_path not in files:
raise FileNotFoundError(
'File parameter file {} not found'.format(
para_file_full_path))
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path)) as zf:
para_file_content = json.loads(
zf.read(para_file_full_path).decode('utf-8'))
else:
with open(para_file_full_path, 'r') as pf:
para_file_content = json.load(pf)
return namedtuple('file_parameter',
['folder', 'name', 'content'])(
para_file_folder,
os.path.basename(para_file_full_path),
para_file_content)
|
def get_file_para(path, path_in_arc='')
|
Generic method to read the file parameter file
Helper function to consistently read the file parameter file, which can
either be uncompressed or included in a zip archive. By default, the file
name is to be expected as set in DEFAULT_FILE_NAMES['filepara'] (currently
file_parameters.json), but can defined otherwise by including the file
name of the parameter file in the parameter path.
Parameters
----------
path: pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' needs to be specific to
further indicate the location of the data in the compressed file.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass ''
(default), for data in e.g. the folder 'emissions' pass 'emissions/'.
Only used if parameter 'path' points to an compressed zip file.
Returns
-------
Returns a namedtuple with
.folder: str with the absolute path containing the
file parameter file. In case of a zip the path
is relative to the root in the zip
.name: Filename without folder of the used parameter file.
.content: Dictionary with the content oft the file parameter file
Raises
------
FileNotFoundError if parameter file not found
| 2.145385
| 1.873938
| 1.144854
|
if isinstance(agg_vector, np.ndarray):
agg_vector = agg_vector.flatten().tolist()
if type(agg_vector[0]) == str:
str_vector = agg_vector
agg_vector = np.zeros(len(str_vector))
if pos_dict:
if len(pos_dict.keys()) != len(set(str_vector)):
raise ValueError(
'Posistion elements inconsistent with aggregation vector')
seen = pos_dict
else:
seen = {}
counter = 0
for ind, item in enumerate(str_vector):
if item not in seen:
seen[item] = counter
counter += 1
agg_vector[ind] = seen[item]
agg_vector = np.array(agg_vector, dtype=int)
agg_vector = agg_vector.reshape((1, -1))
row_corr = agg_vector
col_corr = np.arange(agg_vector.size)
agg_matrix = np.zeros((row_corr.max()+1, col_corr.max()+1))
agg_matrix[row_corr, col_corr] = 1
# set columns with -1 value to 0
agg_matrix[np.tile(agg_vector == -1, (np.shape(agg_matrix)[0], 1))] = 0
return agg_matrix
|
def build_agg_matrix(agg_vector, pos_dict=None)
|
Agg. matrix based on mapping given in input as numerical or str vector.
The aggregation matrix has the from nxm with
-n new classificaction
-m old classification
Parameters
----------
agg_vector : list or vector like numpy ndarray
This can be row or column vector.
Length m with position given for n and -1 if values
should not be included
or
length m with id_string for the aggregation
pos_dict : dictionary
(only possible if agg_vector is given as string)
output order for the new matrix
must be given as dict with
'string in agg_vector' = pos
(as int, -1 if value should not be included in the aggregation)
Example 1:
input vector: np.array([0, 1, 1, 2]) or ['a', 'b', 'b', 'c']
agg matrix:
m0 m1 m2 m3
n0 1 0 0 0
n1 0 1 1 0
n2 0 0 0 1
Example 2:
input vector: np.array([1, 0, 0, 2]) or
(['b', 'a', 'a', 'c'], dict(a=0,b=1,c=2))
agg matrix:
m0 m1 m2 m3
n0 0 1 1 0
n1 1 0 0 0
n2 0 0 0 1
| 2.568204
| 2.633969
| 0.975032
|
nr_col = arr.shape[1]
nr_row = arr.shape[0]
if np.mod(nr_row, blocksize):
raise ValueError(
'Number of rows of input array must be a multiple of blocksize')
arr_diag = np.zeros((nr_row, blocksize*nr_col))
for col_ind, col_val in enumerate(arr.T):
col_start = col_ind*blocksize
col_end = blocksize + col_ind*blocksize
for _ind in range(int(nr_row/blocksize)):
row_start = _ind*blocksize
row_end = blocksize + _ind * blocksize
arr_diag[row_start:row_end,
col_start:col_end] = np.diag(col_val[row_start:row_end])
return arr_diag
|
def diagonalize_blocks(arr, blocksize)
|
Diagonalize sections of columns of an array for the whole array
Parameters
----------
arr : numpy array
Input array
blocksize : int
number of rows/colums forming one block
Returns
-------
numpy ndarray with shape (columns 'arr' * blocksize,
columns 'arr' * blocksize)
Example
--------
arr: output: (blocksize = 3)
3 1 3 0 0 1 0 0
4 2 0 4 0 0 2 0
5 3 0 0 5 0 0 3
6 9 6 0 0 9 0 0
7 6 0 7 0 0 6 0
8 4 0 0 8 0 0 4
| 2.356679
| 2.432216
| 0.968943
|
nr_col = arr.shape[1]
nr_row = arr.shape[0]
nr_col_block = arr_block.shape[1]
nr_row_block = arr_block.shape[0]
if np.mod(nr_row, nr_row_block) or np.mod(nr_col, nr_col_block):
raise ValueError('Number of rows/columns of the input array '
'must be a multiple of block shape')
if nr_row/nr_row_block != nr_col/nr_col_block:
raise ValueError('Block array can not be filled as '
'diagonal blocks in the given array')
arr_out = arr.copy()
for row_ind in range(int(nr_row/nr_row_block)):
row_start = row_ind*nr_row_block
row_end = nr_row_block+nr_row_block*row_ind
col_start = row_ind*nr_col_block
col_end = nr_col_block+nr_col_block*row_ind
arr_out[row_start:row_end, col_start:col_end] = arr_block
return arr_out
|
def set_block(arr, arr_block)
|
Sets the diagonal blocks of an array to an given array
Parameters
----------
arr : numpy ndarray
the original array
block_arr : numpy ndarray
the block array for the new diagonal
Returns
-------
numpy ndarray (the modified array)
| 2.153008
| 2.172856
| 0.990866
|
seen = {}
result = []
for item in ll:
if item in seen:
continue
seen[item] = 1
result.append(item)
return result
|
def unique_element(ll)
|
returns unique elements from a list preserving the original order
| 2.079761
| 1.953131
| 1.064835
|
for nr, entry in enumerate(ll):
try:
float(entry)
except (ValueError, TypeError) as e:
pass
else:
return nr
return None
|
def find_first_number(ll)
|
Returns nr of first entry parseable to float in ll, None otherwise
| 4.059216
| 2.492769
| 1.628397
|
def read_first_lines(filehandle):
lines = []
for i in range(max_test_lines):
line = ff.readline()
if line == '':
break
try:
line = line.decode('utf-8')
except AttributeError:
pass
lines.append(line[:-1])
return lines
if zip_file:
with zipfile.ZipFile(zip_file, 'r') as zz:
with zz.open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
else:
with open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
sep_aly_lines = [sorted([(line.count(sep), sep)
for sep in potential_sep if line.count(sep) > 0],
key=lambda x: x[0], reverse=True) for line in test_lines]
for nr, (count, sep) in enumerate(sep_aly_lines[0]):
for line in sep_aly_lines:
if line[nr][0] == count:
break
else:
sep = None
if sep:
break
nr_header_row = None
nr_index_col = None
if sep:
nr_index_col = find_first_number(test_lines[-1].split(sep))
if nr_index_col:
for nr_header_row, line in enumerate(test_lines):
if find_first_number(line.split(sep)) == nr_index_col:
break
return dict(sep=sep,
nr_header_row=nr_header_row,
nr_index_col=nr_index_col)
|
def sniff_csv_format(csv_file,
potential_sep=['\t', ',', ';', '|', '-', '_'],
max_test_lines=10,
zip_file=None)
|
Tries to get the separator, nr of index cols and header rows in a csv file
Parameters
----------
csv_file: str
Path to a csv file
potential_sep: list, optional
List of potential separators (delimiters) to test.
Default: '\t', ',', ';', '|', '-', '_'
max_test_lines: int, optional
How many lines to test, default: 10 or available lines in csv_file
zip_file: str, optional
Path to a zip file containing the csv file (if any, default: None).
If a zip file is given, the path given at 'csv_file' is assumed
to be the path to the file within the zip_file.
Returns
-------
dict with
sep: string (separator)
nr_index_col: int
nr_header_row: int
Entries are set to None if inconsistent information in the file
| 2.254467
| 2.104145
| 1.071441
|
rlist = []
wlist = []
xlist = []
for socket, flags in self.sockets.items():
if isinstance(socket, zmq.Socket):
rlist.append(socket.getsockopt(zmq.FD))
continue
elif isinstance(socket, int):
fd = socket
elif hasattr(socket, 'fileno'):
try:
fd = int(socket.fileno())
except:
raise ValueError('fileno() must return an valid integer fd')
else:
raise TypeError('Socket must be a 0MQ socket, an integer fd '
'or have a fileno() method: %r' % socket)
if flags & zmq.POLLIN:
rlist.append(fd)
if flags & zmq.POLLOUT:
wlist.append(fd)
if flags & zmq.POLLERR:
xlist.append(fd)
return (rlist, wlist, xlist)
|
def _get_descriptors(self)
|
Returns three elements tuple with socket descriptors ready
for gevent.select.select
| 2.63343
| 2.340781
| 1.125022
|
if timeout is None:
timeout = -1
if timeout < 0:
timeout = -1
rlist = None
wlist = None
xlist = None
if timeout > 0:
tout = gevent.Timeout.start_new(timeout/1000.0)
try:
# Loop until timeout or events available
rlist, wlist, xlist = self._get_descriptors()
while True:
events = super(GreenPoller, self).poll(0)
if events or timeout == 0:
return events
# wait for activity on sockets in a green way
select.select(rlist, wlist, xlist)
except gevent.Timeout, t:
if t is not tout:
raise
return []
finally:
if timeout > 0:
tout.cancel()
|
def poll(self, timeout=-1)
|
Overridden method to ensure that the green version of
Poller is used.
Behaves the same as :meth:`zmq.core.Poller.poll`
| 4.042084
| 3.632162
| 1.112859
|
file_id = kwargs['file_id']
kwargs['file_id'] = file_id if str(file_id).strip() else None
kwargs['cid'] = kwargs['file_id'] or None
kwargs['rate_download'] = kwargs['rateDownload']
kwargs['percent_done'] = kwargs['percentDone']
kwargs['add_time'] = get_utcdatetime(kwargs['add_time'])
kwargs['last_update'] = get_utcdatetime(kwargs['last_update'])
is_transferred = (kwargs['status'] == 2 and kwargs['move'] == 1)
if is_transferred:
kwargs['pid'] = api.downloads_directory.cid
else:
kwargs['pid'] = None
del kwargs['rateDownload']
del kwargs['percentDone']
if 'url' in kwargs:
if not kwargs['url']:
kwargs['url'] = None
else:
kwargs['url'] = None
task = Task(api, **kwargs)
if is_transferred:
task._parent = api.downloads_directory
return task
|
def _instantiate_task(api, kwargs)
|
Create a Task object from raw kwargs
| 3.455542
| 3.38128
| 1.021963
|
r = self.session.get(url, params=params)
return self._response_parser(r, expect_json=False)
|
def get(self, url, params=None)
|
Initiate a GET request
| 4.365252
| 4.710257
| 0.926755
|
r = self.session.post(url, data=data, params=params)
return self._response_parser(r, expect_json=False)
|
def post(self, url, data, params=None)
|
Initiate a POST request
| 3.65593
| 4.039637
| 0.905015
|
r = self.session.request(method=request.method,
url=request.url,
params=request.params,
data=request.data,
files=request.files,
headers=request.headers)
return self._response_parser(r, expect_json, ignore_content)
|
def send(self, request, expect_json=True, ignore_content=False)
|
Send a formatted API request
:param request: a formatted request object
:type request: :class:`.Request`
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object
| 2.280971
| 2.612687
| 0.873037
|
if r.ok:
try:
j = r.json()
return Response(j.get('state'), j)
except ValueError:
# No JSON-encoded data returned
if expect_json:
logger = logging.getLogger(conf.LOGGING_API_LOGGER)
logger.debug(r.text)
raise InvalidAPIAccess('Invalid API access.')
# Raw response
if ignore_content:
res = Response(True, None)
else:
res = Response(True, r.text)
return res
else:
r.raise_for_status()
|
def _response_parser(self, r, expect_json=True, ignore_content=False)
|
:param :class:`requests.Response` r: a response object of the Requests
library
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object
| 4.307644
| 3.958281
| 1.088261
|
self._init_cookies()
if os.path.exists(self.cookies.filename):
self.cookies.load(ignore_discard=ignore_discard,
ignore_expires=ignore_expires)
self._reset_cache()
|
def load_cookies(self, ignore_discard=True, ignore_expires=True)
|
Load cookies from the file :attr:`.API.cookies_filename`
| 3.211503
| 3.050335
| 1.052836
|
if not isinstance(self.cookies, cookielib.FileCookieJar):
m = 'Cookies must be a cookielib.FileCookieJar object to be saved.'
raise APIError(m)
self.cookies.save(ignore_discard=ignore_discard,
ignore_expires=ignore_expires)
|
def save_cookies(self, ignore_discard=True, ignore_expires=True)
|
Save cookies to the file :attr:`.API.cookies_filename`
| 3.0543
| 2.89833
| 1.053814
|
if self.has_logged_in:
return True
if username is None or password is None:
credential = conf.get_credential(section)
username = credential['username']
password = credential['password']
passport = Passport(username, password)
r = self.http.post(LOGIN_URL, passport.form)
if r.state is True:
# Bind this passport to API
self.passport = passport
passport.data = r.content['data']
self._user_id = r.content['data']['USER_ID']
return True
else:
msg = None
if 'err_name' in r.content:
if r.content['err_name'] == 'account':
msg = 'Account does not exist.'
elif r.content['err_name'] == 'passwd':
msg = 'Password is incorrect.'
raise AuthenticationError(msg)
|
def login(self, username=None, password=None,
section='default')
|
Created the passport with ``username`` and ``password`` and log in.
If either ``username`` or ``password`` is None or omitted, the
credentials file will be parsed.
:param str username: username to login (email, phone number or user ID)
:param str password: password
:param str section: section name in the credential file
:raise: raises :class:`.AuthenticationError` if failed to login
| 3.721547
| 3.685889
| 1.009674
|
if self._user_id is None:
if self.has_logged_in:
self._user_id = self._req_get_user_aq()['data']['uid']
else:
raise AuthenticationError('Not logged in.')
return self._user_id
|
def user_id(self)
|
User id of the current API user
| 6.009076
| 5.341576
| 1.124963
|
if self._username is None:
if self.has_logged_in:
self._username = self._get_username()
else:
raise AuthenticationError('Not logged in.')
return self._username
|
def username(self)
|
Username of the current API user
| 3.573343
| 3.540927
| 1.009155
|
r = self.http.get(CHECKPOINT_URL)
if r.state is False:
return True
# If logged out, flush cache
self._reset_cache()
return False
|
def has_logged_in(self)
|
Check whether the API has logged in
| 12.072679
| 10.248111
| 1.178039
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.