sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def check_dict_expected_keys(self, expected_keys, current, dict_name):
""" Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected.
"""
if not isinstance(current, dict):
raise ParseError(u"'{}' key must be a dict".format(dict_name),
YAML_EXAMPLE)
expected_keys = set(expected_keys)
current_keys = {key for key in current}
extra_keys = current_keys - expected_keys
if extra_keys:
message = u"{}: the keys {} are unexpected. (allowed keys: {})"
raise ParseError(
message.format(
dict_name,
list(extra_keys),
list(expected_keys),
),
YAML_EXAMPLE,
)
|
Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected.
|
entailment
|
def parse(self):
"""Check input and return a :class:`Migration` instance."""
if not self.parsed.get('migration'):
raise ParseError(u"'migration' key is missing", YAML_EXAMPLE)
self.check_dict_expected_keys(
{'options', 'versions'}, self.parsed['migration'], 'migration',
)
return self._parse_migrations()
|
Check input and return a :class:`Migration` instance.
|
entailment
|
def _parse_migrations(self):
"""Build a :class:`Migration` instance."""
migration = self.parsed['migration']
options = self._parse_options(migration)
versions = self._parse_versions(migration, options)
return Migration(versions, options)
|
Build a :class:`Migration` instance.
|
entailment
|
def _parse_options(self, migration):
"""Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances."""
options = migration.get('options', {})
install_command = options.get('install_command')
backup = options.get('backup')
if backup:
self.check_dict_expected_keys(
{'command', 'ignore_if', 'stop_on_failure'},
options['backup'], 'backup',
)
backup = MigrationBackupOption(
command=backup.get('command'),
ignore_if=backup.get('ignore_if'),
stop_on_failure=backup.get('stop_on_failure', True),
)
return MigrationOption(
install_command=install_command,
backup=backup,
)
|
Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances.
|
entailment
|
def set_message(self, title, msg, typ, url=None):
"""
Sets user notification message.
Args:
title: Msg. title
msg: Msg. text
typ: Msg. type
url: Additional URL (if exists)
Returns:
Message ID.
"""
return self.user.send_notification(title=title,
message=msg,
typ=typ,
url=url)
|
Sets user notification message.
Args:
title: Msg. title
msg: Msg. text
typ: Msg. type
url: Additional URL (if exists)
Returns:
Message ID.
|
entailment
|
def is_auth(self):
"""
A property that indicates if current user is logged in or not.
Returns:
Boolean.
"""
if self.user_id is None:
self.user_id = self.session.get('user_id')
return bool(self.user_id)
|
A property that indicates if current user is logged in or not.
Returns:
Boolean.
|
entailment
|
def has_permission(self, perm):
"""
Checks if current user (or role) has the given permission.
Args:
perm: Permmission code or object.
Depends on the :attr:`~zengine.auth.auth_backend.AuthBackend` implementation.
Returns:
Boolean.
"""
return self.user.superuser or self.auth.has_permission(perm)
|
Checks if current user (or role) has the given permission.
Args:
perm: Permmission code or object.
Depends on the :attr:`~zengine.auth.auth_backend.AuthBackend` implementation.
Returns:
Boolean.
|
entailment
|
def msg_box(self, msg, title=None, typ='info'):
"""
Create a message box
:param str msg:
:param str title:
:param str typ: 'info', 'error', 'warning'
"""
self.output['msgbox'] = {'type': typ, "title": title or msg[:20], "msg": msg}
|
Create a message box
:param str msg:
:param str title:
:param str typ: 'info', 'error', 'warning'
|
entailment
|
def sendoff_current_user(self):
"""
Tell current user that s/he finished it's job for now.
We'll notify if workflow arrives again to his/her WF Lane.
"""
msgs = self.task_data.get('LANE_CHANGE_MSG', DEFAULT_LANE_CHANGE_MSG)
self.msg_box(title=msgs['title'], msg=msgs['body'])
|
Tell current user that s/he finished it's job for now.
We'll notify if workflow arrives again to his/her WF Lane.
|
entailment
|
def invite_other_parties(self, possible_owners):
"""
Invites the next lane's (possible) owner(s) to participate
"""
signals.lane_user_change.send(sender=self.user,
current=self,
old_lane=self.old_lane,
possible_owners=possible_owners
)
|
Invites the next lane's (possible) owner(s) to participate
|
entailment
|
def _update_task(self, task):
"""
Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object.
"""
self.task = task
self.task.data.update(self.task_data)
self.task_type = task.task_spec.__class__.__name__
self.spec = task.task_spec
self.task_name = task.get_name()
self.activity = getattr(self.spec, 'service_class', '')
self._set_lane_data()
|
Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object.
|
entailment
|
def set_client_cmds(self):
"""
This is method automatically called on each request and
updates "object_id", "cmd" and "flow" client variables
from current.input.
"flow" and "object_id" variables will always exists in the
task_data so app developers can safely check for their
values in workflows.
Their values will be reset to None if they not exists
in the current input data set.
On the other side, if there isn't a "cmd" in the current.input
cmd will be removed from task_data.
"""
self.task_data['cmd'] = self.input.get('cmd')
self.task_data['flow'] = self.input.get('flow')
filters = self.input.get('filters', {})
try:
if isinstance(filters, dict):
# this is the new form, others will be removed when ui be ready
self.task_data['object_id'] = filters.get('object_id')['values'][0]
elif filters[0]['field'] == 'object_id':
self.task_data['object_id'] = filters[0]['values'][0]
except:
if 'object_id' in self.input:
self.task_data['object_id'] = self.input.get('object_id')
|
This is method automatically called on each request and
updates "object_id", "cmd" and "flow" client variables
from current.input.
"flow" and "object_id" variables will always exists in the
task_data so app developers can safely check for their
values in workflows.
Their values will be reset to None if they not exists
in the current input data set.
On the other side, if there isn't a "cmd" in the current.input
cmd will be removed from task_data.
|
entailment
|
def generate_move(self, position):
"""
Returns valid and legal move given position
:type: position: Board
:rtype: Move
"""
while True:
print(position)
raw = input(str(self.color) + "\'s move \n")
move = converter.short_alg(raw, self.color, position)
if move is None:
continue
return move
|
Returns valid and legal move given position
:type: position: Board
:rtype: Move
|
entailment
|
def in_check_as_result(self, pos, move):
"""
Finds if playing my move would make both kings meet.
:type: pos: Board
:type: move: Move
:rtype: bool
"""
test = cp(pos)
test.update(move)
test_king = test.get_king(move.color)
return self.loc_adjacent_to_opponent_king(test_king.location, test)
|
Finds if playing my move would make both kings meet.
:type: pos: Board
:type: move: Move
:rtype: bool
|
entailment
|
def loc_adjacent_to_opponent_king(self, location, position):
"""
Finds if 2 kings are touching given the position of one of the kings.
:type: location: Location
:type: position: Board
:rtype: bool
"""
for fn in self.cardinal_directions:
try:
if isinstance(position.piece_at_square(fn(location)), King) and \
position.piece_at_square(fn(location)).color != self.color:
return True
except IndexError:
pass
return False
|
Finds if 2 kings are touching given the position of one of the kings.
:type: location: Location
:type: position: Board
:rtype: bool
|
entailment
|
def add(self, func, position):
"""
Adds all 8 cardinal directions as moves for the King if legal.
:type: function: function
:type: position: Board
:rtype: gen
"""
try:
if self.loc_adjacent_to_opponent_king(func(self.location), position):
return
except IndexError:
return
if position.is_square_empty(func(self.location)):
yield self.create_move(func(self.location), notation_const.MOVEMENT)
elif position.piece_at_square(func(self.location)).color != self.color:
yield self.create_move(func(self.location), notation_const.CAPTURE)
|
Adds all 8 cardinal directions as moves for the King if legal.
:type: function: function
:type: position: Board
:rtype: gen
|
entailment
|
def _rook_legal_for_castle(self, rook):
"""
Decides if given rook exists, is of this color, and has not moved so it
is eligible to castle.
:type: rook: Rook
:rtype: bool
"""
return rook is not None and \
type(rook) is Rook and \
rook.color == self.color and \
not rook.has_moved
|
Decides if given rook exists, is of this color, and has not moved so it
is eligible to castle.
:type: rook: Rook
:rtype: bool
|
entailment
|
def _empty_not_in_check(self, position, direction):
"""
Checks if set of squares in between ``King`` and ``Rook`` are empty and safe
for the king to castle.
:type: position: Position
:type: direction: function
:type: times: int
:rtype: bool
"""
def valid_square(square):
return position.is_square_empty(square) and \
not self.in_check(position, square)
return valid_square(direction(self.location, 1)) and \
valid_square(direction(self.location, 2))
|
Checks if set of squares in between ``King`` and ``Rook`` are empty and safe
for the king to castle.
:type: position: Position
:type: direction: function
:type: times: int
:rtype: bool
|
entailment
|
def add_castle(self, position):
"""
Adds kingside and queenside castling moves if legal
:type: position: Board
"""
if self.has_moved or self.in_check(position):
return
if self.color == color.white:
rook_rank = 0
else:
rook_rank = 7
castle_type = {
notation_const.KING_SIDE_CASTLE: {
"rook_file": 7,
"direction": lambda king_square, times: king_square.shift_right(times)
},
notation_const.QUEEN_SIDE_CASTLE: {
"rook_file": 0,
"direction": lambda king_square, times: king_square.shift_left(times)
}
}
for castle_key in castle_type:
castle_dict = castle_type[castle_key]
castle_rook = position.piece_at_square(Location(rook_rank, castle_dict["rook_file"]))
if self._rook_legal_for_castle(castle_rook) and \
self._empty_not_in_check(position, castle_dict["direction"]):
yield self.create_move(castle_dict["direction"](self.location, 2), castle_key)
|
Adds kingside and queenside castling moves if legal
:type: position: Board
|
entailment
|
def possible_moves(self, position):
"""
Generates list of possible moves
:type: position: Board
:rtype: list
"""
# Chain used to combine multiple generators
for move in itertools.chain(*[self.add(fn, position) for fn in self.cardinal_directions]):
yield move
for move in self.add_castle(position):
yield move
|
Generates list of possible moves
:type: position: Board
:rtype: list
|
entailment
|
def in_check(self, position, location=None):
"""
Finds if the king is in check or if both kings are touching.
:type: position: Board
:return: bool
"""
location = location or self.location
for piece in position:
if piece is not None and piece.color != self.color:
if not isinstance(piece, King):
for move in piece.possible_moves(position):
if move.end_loc == location:
return True
else:
if self.loc_adjacent_to_opponent_king(piece.location, position):
return True
return False
|
Finds if the king is in check or if both kings are touching.
:type: position: Board
:return: bool
|
entailment
|
def set_keep_alive(sock, idle=10, interval=5, fails=5):
"""Sets the keep-alive setting for the peer socket.
:param sock: Socket to be configured.
:param idle: Interval in seconds after which for an idle connection a keep-alive probes
is start being sent.
:param interval: Interval in seconds between probes.
:param fails: Maximum number of failed probes.
"""
import sys
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if sys.platform in ('linux', 'linux2'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, idle)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, fails)
elif sys.platform == 'darwin':
sock.setsockopt(socket.IPPROTO_TCP, 0x10, interval)
else:
# Do nothing precise for unsupported platforms.
pass
|
Sets the keep-alive setting for the peer socket.
:param sock: Socket to be configured.
:param idle: Interval in seconds after which for an idle connection a keep-alive probes
is start being sent.
:param interval: Interval in seconds between probes.
:param fails: Maximum number of failed probes.
|
entailment
|
def init_default(cls):
"""
Creates a ``Board`` with the standard chess starting position.
:rtype: Board
"""
return cls([
# First rank
[Rook(white, Location(0, 0)), Knight(white, Location(0, 1)), Bishop(white, Location(0, 2)),
Queen(white, Location(0, 3)), King(white, Location(0, 4)), Bishop(white, Location(0, 5)),
Knight(white, Location(0, 6)), Rook(white, Location(0, 7))],
# Second rank
[Pawn(white, Location(1, file)) for file in range(8)],
# Third rank
[None for _ in range(8)],
# Fourth rank
[None for _ in range(8)],
# Fifth rank
[None for _ in range(8)],
# Sixth rank
[None for _ in range(8)],
# Seventh rank
[Pawn(black, Location(6, file)) for file in range(8)],
# Eighth rank
[Rook(black, Location(7, 0)), Knight(black, Location(7, 1)), Bishop(black, Location(7, 2)),
Queen(black, Location(7, 3)), King(black, Location(7, 4)), Bishop(black, Location(7, 5)),
Knight(black, Location(7, 6)), Rook(black, Location(7, 7))]
])
|
Creates a ``Board`` with the standard chess starting position.
:rtype: Board
|
entailment
|
def material_advantage(self, input_color, val_scheme):
"""
Finds the advantage a particular side possesses given a value scheme.
:type: input_color: Color
:type: val_scheme: PieceValues
:rtype: double
"""
if self.get_king(input_color).in_check(self) and self.no_moves(input_color):
return -100
if self.get_king(-input_color).in_check(self) and self.no_moves(-input_color):
return 100
return sum([val_scheme.val(piece, input_color) for piece in self])
|
Finds the advantage a particular side possesses given a value scheme.
:type: input_color: Color
:type: val_scheme: PieceValues
:rtype: double
|
entailment
|
def advantage_as_result(self, move, val_scheme):
"""
Calculates advantage after move is played
:type: move: Move
:type: val_scheme: PieceValues
:rtype: double
"""
test_board = cp(self)
test_board.update(move)
return test_board.material_advantage(move.color, val_scheme)
|
Calculates advantage after move is played
:type: move: Move
:type: val_scheme: PieceValues
:rtype: double
|
entailment
|
def all_possible_moves(self, input_color):
"""
Checks if all the possible moves has already been calculated
and is stored in `possible_moves` dictionary. If not, it is calculated
with `_calc_all_possible_moves`.
:type: input_color: Color
:rtype: list
"""
position_tuple = self.position_tuple
if position_tuple not in self.possible_moves:
self.possible_moves[position_tuple] = tuple(self._calc_all_possible_moves(input_color))
return self.possible_moves[position_tuple]
|
Checks if all the possible moves has already been calculated
and is stored in `possible_moves` dictionary. If not, it is calculated
with `_calc_all_possible_moves`.
:type: input_color: Color
:rtype: list
|
entailment
|
def _calc_all_possible_moves(self, input_color):
"""
Returns list of all possible moves
:type: input_color: Color
:rtype: list
"""
for piece in self:
# Tests if square on the board is not empty
if piece is not None and piece.color == input_color:
for move in piece.possible_moves(self):
test = cp(self)
test_move = Move(end_loc=move.end_loc,
piece=test.piece_at_square(move.start_loc),
status=move.status,
start_loc=move.start_loc,
promoted_to_piece=move.promoted_to_piece)
test.update(test_move)
if self.king_loc_dict is None:
yield move
continue
my_king = test.piece_at_square(self.king_loc_dict[input_color])
if my_king is None or \
not isinstance(my_king, King) or \
my_king.color != input_color:
self.king_loc_dict[input_color] = test.find_king(input_color)
my_king = test.piece_at_square(self.king_loc_dict[input_color])
if not my_king.in_check(test):
yield move
|
Returns list of all possible moves
:type: input_color: Color
:rtype: list
|
entailment
|
def runInParallel(*fns):
"""
Runs multiple processes in parallel.
:type: fns: def
"""
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join()
|
Runs multiple processes in parallel.
:type: fns: def
|
entailment
|
def find_piece(self, piece):
"""
Finds Location of the first piece that matches piece.
If none is found, Exception is raised.
:type: piece: Piece
:rtype: Location
"""
for i, _ in enumerate(self.position):
for j, _ in enumerate(self.position):
loc = Location(i, j)
if not self.is_square_empty(loc) and \
self.piece_at_square(loc) == piece:
return loc
raise ValueError("{} \nPiece not found: {}".format(self, piece))
|
Finds Location of the first piece that matches piece.
If none is found, Exception is raised.
:type: piece: Piece
:rtype: Location
|
entailment
|
def get_piece(self, piece_type, input_color):
"""
Gets location of a piece on the board given the type and color.
:type: piece_type: Piece
:type: input_color: Color
:rtype: Location
"""
for loc in self:
piece = self.piece_at_square(loc)
if not self.is_square_empty(loc) and \
isinstance(piece, piece_type) and \
piece.color == input_color:
return loc
raise Exception("{} \nPiece not found: {}".format(self, piece_type))
|
Gets location of a piece on the board given the type and color.
:type: piece_type: Piece
:type: input_color: Color
:rtype: Location
|
entailment
|
def place_piece_at_square(self, piece, location):
"""
Places piece at given get_location
:type: piece: Piece
:type: location: Location
"""
self.position[location.rank][location.file] = piece
piece.location = location
|
Places piece at given get_location
:type: piece: Piece
:type: location: Location
|
entailment
|
def move_piece(self, initial, final):
"""
Moves piece from one location to another
:type: initial: Location
:type: final: Location
"""
self.place_piece_at_square(self.piece_at_square(initial), final)
self.remove_piece_at_square(initial)
|
Moves piece from one location to another
:type: initial: Location
:type: final: Location
|
entailment
|
def update(self, move):
"""
Updates position by applying selected move
:type: move: Move
"""
if move is None:
raise TypeError("Move cannot be type None")
if self.king_loc_dict is not None and isinstance(move.piece, King):
self.king_loc_dict[move.color] = move.end_loc
# Invalidates en-passant
for square in self:
pawn = square
if isinstance(pawn, Pawn):
pawn.just_moved_two_steps = False
# Sets King and Rook has_moved property to True is piece has moved
if type(move.piece) is King or type(move.piece) is Rook:
move.piece.has_moved = True
elif move.status == notation_const.MOVEMENT and \
isinstance(move.piece, Pawn) and \
fabs(move.end_loc.rank - move.start_loc.rank) == 2:
move.piece.just_moved_two_steps = True
if move.status == notation_const.KING_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 7), Location(move.end_loc.rank, 5))
self.piece_at_square(Location(move.end_loc.rank, 5)).has_moved = True
elif move.status == notation_const.QUEEN_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 0), Location(move.end_loc.rank, 3))
self.piece_at_square(Location(move.end_loc.rank, 3)).has_moved = True
elif move.status == notation_const.EN_PASSANT:
self.remove_piece_at_square(Location(move.start_loc.rank, move.end_loc.file))
elif move.status == notation_const.PROMOTE or \
move.status == notation_const.CAPTURE_AND_PROMOTE:
try:
self.remove_piece_at_square(move.start_loc)
self.place_piece_at_square(move.promoted_to_piece(move.color, move.end_loc), move.end_loc)
except TypeError as e:
raise ValueError("Promoted to piece cannot be None in Move {}\n{}".format(repr(move), e))
return
self.move_piece(move.piece.location, move.end_loc)
|
Updates position by applying selected move
:type: move: Move
|
entailment
|
def _balance(self, ):
""" calc unbalanced charges and radicals for skin atoms
"""
meta = h.meta
for n in (skin_reagent.keys() | skin_product.keys()):
lost = skin_reagent[n]
cycle_lost = cycle(lost)
new = skin_product[n]
cycle_new = cycle(new)
atom = h._node[n]
dr = atom.p_radical - atom.radical
# radical balancing
if dr > 0: # radical added or increased.
for _, m in zip(range(dr), cycle_lost): # homolysis
s_atom = h._node[m]
s_atom.p_multiplicity = radical_unmap[s_atom.p_radical + 1]
meta.setdefault('rule #14. atom lost. common atom radical added or increased. '
'lost atom radical added', []).append((m, n))
for m in lost[dr:]:
meta.setdefault('rule #15. atom lost. common atom radical added or increased. '
'lost atom radical unchanged', []).append((m, n))
elif dr < 0: # radical removed or decreased.
if n in skin_product:
for m in lost:
meta.setdefault('rule #20. atom lost. common atom radical removed or decreased. '
'lost atom radical unchanged', []).append((m, n))
else:
for _, m in zip(range(-dr), cycle_lost): # radical elimination
s_atom = h._node[m]
s_atom.p_multiplicity = radical_unmap[s_atom.p_radical + 1]
meta.setdefault('rule #21. atom lost. common atom radical removed or decreased. '
'lost atom radical added', []).append((m, n))
for m in lost[-dr:]:
meta.setdefault('rule #20. atom lost. common atom radical removed or decreased. '
'lost atom radical unchanged', []).append((m, n))
else:
env = h.environment(n)
sv = atom.get_valence([(b.reagent, a.reagent) for b, a in env if b.order])
pv = atom.p_get_valence([(b.product, a.product) for b, a in env if b.p_order])
sh, ph = h.atom_total_h(n)
dv = pv - sv
dh = ph - sh
dc = atom.p_charge - atom.charge
if not (dv or dh or dc): # common atom unchanged. Substitution, Elimination
for m in skins:
meta.setdefault('rule #1. atom lost. common atom unchanged. '
'substitution, elimination, addition', []).append((m, n))
elif dv == dh == dc < 0: # explicit hydrogen removing
for m in skins:
h._node[m].p_charge = 1
meta.setdefault('rule #4. atom lost. common atom deprotonation', []).append((m, n))
else:
for m in skins:
meta.setdefault('rule #5. atom lost. common atom changed. '
'convert to reduction or oxidation', []).append((m, n))
pth = ph + sum(h.atom_total_h(x)[1] for x in skins)
if n in skin_product:
sth = sh + sum(h.atom_total_h(x)[0] for x in skin_product[n])
else:
sth = sh
dth = pth - sth
for n, skins in skin_product.items():
cycle_skins = cycle(skins)
atom = h._node[n]
dr = atom.p_radical - atom.radical
# radical balancing
if dr > 0: # radical added or increased.
if n in skin_reagent:
for m in skins:
meta.setdefault('rule #16. atom new. common atom radical added or increased. '
'new atom radical unchanged', []).append((m, n))
else:
for _, m in zip(range(dr), cycle_skins): # radical addition
s_atom = h._node[m]
s_atom.multiplicity = radical_unmap[s_atom.radical + 1]
meta.setdefault('rule #17. atom new. common atom radical added or increased. '
'new atom radical added', []).append((m, n))
for m in skins[dr:]:
meta.setdefault('rule #16. atom new. common atom radical added or increased. '
'new atom radical unchanged', []).append((m, n))
elif dr < 0: # radical removed or decreased.
for _, m in zip(range(-dr), cycle_skins): # recombination
s_atom = h._node[m]
s_atom.multiplicity = radical_unmap[s_atom.radical + 1]
meta.setdefault('rule #18. atom new. common atom radical removed or decreased. '
'new atom radical added', []).append((m, n))
for m in skins[-dr:]:
meta.setdefault('rule #19. atom new. common atom radical removed or decreased. '
'new atom radical unchanged', []).append((m, n))
else:
env = h.environment(n)
sv = atom.get_valence([(b.reagent, a.reagent) for b, a in env if b.order])
pv = atom.p_get_valence([(b.product, a.product) for b, a in env if b.p_order])
sh, ph = h.atom_total_h(n)
dv = pv - sv
dh = ph - sh
dc = atom.p_charge - atom.charge
if not (dv or dh or dc): # common atom unchanged. Substitution, Addition
for m in skins:
meta.setdefault('rule #2. atom new. common atom unchanged. '
'substitution, elimination, addition', []).append((m, n))
elif dv == dh == dc > 0: # explicit hydrogen addition
for m in skins:
h._node[m].charge = 1
h.meta.setdefault('rule #3. atom new. common atom protonation', []).append((m, n))
else:
for m in skins:
meta.setdefault('rule #6. atom new. common atom changed. '
'convert to reduction or oxidation', []).append((m, n))
sth = sh + sum(h.atom_total_h(x)[0] for x in skins)
if n in skin_reagent:
pth = ph + sum(h.atom_total_h(x)[1] for x in skin_reagent[n])
else:
pth = ph
dth = pth - sth
for n, sp in reverse_ext.items():
# charge neutralization
if dc > 0:
for _ in range(dc):
h.meta.setdefault('rule #7. charge neutralization. hydroxide radical added',
[]).append(h.add_atom(O(multiplicity=2), O(charge=-1)))
elif dc < 0:
for _ in range(-dc):
h.meta.setdefault('rule #8. charge neutralization. hydrogen radical added',
[]).append(h.add_atom(H(multiplicity=2), H(charge=1)))
# hydrogen balancing
if dth > 0:
red_e = 0
for m in sp['products']:
if h.nodes[m]['element'] == 'H': # set reduction H if explicit H count increased
h.nodes[m]['s_radical'] = 2
red_e += 1
h.meta.setdefault('rule #11. protonation. new explicit hydrogen radical added',
[]).append(m)
red = []
for _ in range(dth - red_e): # add reduction agents
m = h.add_atom(H(multiplicity=2), H())
red.append(m)
h.meta.setdefault('rule #10. protonation. hydrogen radical added', []).append(m)
red = iter(red)
dih = sub(*h.atom_implicit_h(n))
if dih < 0: # attach reduction H to central atom if implicit H atoms count increased
for _ in range(-dih):
m = next(red)
h.add_bond(m, n, None)
h.meta.setdefault('rule #12. protonation. new implicit hydrogen radical added',
[]).append(m)
for m in sp['reagents']: # attach reduction H if detached group implicit H count increased
dih = sub(*h.atom_implicit_h(m))
if dih < 0:
for _ in range(-dih):
o = next(red)
h.add_bond(o, m, None)
elif dth < 0:
oxo = []
for _ in range(-dth):
m = h.add_atom(O(multiplicity=2), O())
oxo.append(m)
h.meta.setdefault('rule #9. deprotonation. hydroxide radical added', []).append(m)
oxo = iter(oxo)
for m in sp['reagents']:
if h.nodes[m]['element'] == 'H':
o = next(oxo)
h.add_bond(o, m, None)
h.meta.setdefault('rule #13. hydrogen accepting by hydroxide radical added',
[]).append(m)
return h
|
calc unbalanced charges and radicals for skin atoms
|
entailment
|
def clone_subgraphs(self, g):
if not isinstance(g, CGRContainer):
raise InvalidData('only CGRContainer acceptable')
r_group = []
x_group = {}
r_group_clones = []
newcomponents = []
''' search bond breaks and creations
'''
components, lost_bonds, term_atoms = self.__split_graph(g)
lost_map = {x: y for x, y in lost_bonds}
''' extract subgraphs and sort by group type (R or X)
'''
x_terminals = set(lost_map.values())
r_terminals = set(lost_map)
for i in components:
x_terminal_atom = x_terminals.intersection(i)
if x_terminal_atom:
x_group[x_terminal_atom.pop()] = i
continue
r_terminal_atom = r_terminals.intersection(i)
if r_terminal_atom:
r_group.append([r_terminal_atom, i])
continue
newcomponents.append(i)
''' search similar R groups and patch.
'''
tmp = g
for i in newcomponents:
for k, j in r_group:
gm = GraphMatcher(j, i, node_match=self.__node_match_products,
edge_match=self.__edge_match_products)
''' search for similar R-groups started from bond breaks.
'''
mapping = next((x for x in gm.subgraph_isomorphisms_iter() if k.issubset(x) and
all(x[y] in term_atoms for y in k)), None)
if mapping:
r_group_clones.append([k, mapping])
tmp = compose(tmp, self.__remap_group(j, tmp, mapping)[0])
break
''' add lose X groups to R groups
'''
for i, j in r_group_clones:
for k in i:
remappedgroup, mapping = self.__remap_group(x_group[lost_map[k]], tmp, {})
tmp = CGRcore.union(tmp, remappedgroup)
tmp.add_edge(j[k], mapping[lost_map[k]], s_bond=1, sp_bond=(1, None))
if r_group_clones:
tmp.meta.update(g.meta)
return tmp
return tmp.copy()
|
search bond breaks and creations
|
entailment
|
def __get_substitution_paths(g):
"""
get atoms paths from detached atom to attached
:param g: CGRContainer
:return: tuple of atoms numbers
"""
for n, nbrdict in g.adjacency():
for m, l in combinations(nbrdict, 2):
nms = nbrdict[m]['sp_bond']
nls = nbrdict[l]['sp_bond']
if nms == (1, None) and nls == (None, 1):
yield m, n, l
elif nms == (None, 1) and nls == (1, None):
yield l, n, m
|
get atoms paths from detached atom to attached
:param g: CGRContainer
:return: tuple of atoms numbers
|
entailment
|
def versions(self):
""" Read versions from the table
The versions are kept in cache for the next reads.
"""
if self._versions is None:
with self.database.cursor_autocommit() as cursor:
query = """
SELECT number,
date_start,
date_done,
log,
addons
FROM {}
""".format(self.table_name)
cursor.execute(query)
rows = cursor.fetchall()
versions = []
for row in rows:
row = list(row)
# convert 'addons' to json
row[4] = json.loads(row[4]) if row[4] else []
versions.append(
self.VersionRecord(*row)
)
self._versions = versions
return self._versions
|
Read versions from the table
The versions are kept in cache for the next reads.
|
entailment
|
def simple_crud():
"""
Prepares menu entries for auto-generated model CRUD views.
This is simple version of :attr:`get_crud_menus()` without
Category support and permission control.
Just for development purposes.
Returns:
Dict of list of dicts (``{'':[{}],}``). Menu entries.
"""
results = defaultdict(list)
for mdl in model_registry.get_base_models():
results['other'].append({"text": mdl.Meta.verbose_name_plural,
"wf": 'crud',
"model": mdl.__name__,
"kategori": settings.DEFAULT_OBJECT_CATEGORY_NAME})
return results
|
Prepares menu entries for auto-generated model CRUD views.
This is simple version of :attr:`get_crud_menus()` without
Category support and permission control.
Just for development purposes.
Returns:
Dict of list of dicts (``{'':[{}],}``). Menu entries.
|
entailment
|
def get_crud_menus(self):
"""
Generates menu entries according to
:attr:`zengine.settings.OBJECT_MENU` and permissions
of current user.
Returns:
Dict of list of dicts (``{'':[{}],}``). Menu entries.
"""
results = defaultdict(list)
for object_type in settings.OBJECT_MENU:
for model_data in settings.OBJECT_MENU[object_type]:
if self.current.has_permission(model_data.get('wf', model_data['name'])):
self._add_crud(model_data, object_type, results)
return results
|
Generates menu entries according to
:attr:`zengine.settings.OBJECT_MENU` and permissions
of current user.
Returns:
Dict of list of dicts (``{'':[{}],}``). Menu entries.
|
entailment
|
def _add_crud(self, model_data, object_type, results):
"""
Creates a menu entry for given model data.
Updates results in place.
Args:
model_data: Model data.
object_type: Relation name.
results: Results dict.
"""
model = model_registry.get_model(model_data['name'])
field_name = model_data.get('field')
verbose_name = model_data.get('verbose_name', model.Meta.verbose_name_plural)
category = model_data.get('category', settings.DEFAULT_OBJECT_CATEGORY_NAME)
wf_dict = {"text": verbose_name,
"wf": model_data.get('wf', "crud"),
"model": model_data['name'],
"kategori": category}
if field_name:
wf_dict['param'] = field_name
results[object_type].append(wf_dict)
self._add_to_quick_menu(wf_dict['model'], wf_dict)
|
Creates a menu entry for given model data.
Updates results in place.
Args:
model_data: Model data.
object_type: Relation name.
results: Results dict.
|
entailment
|
def _add_to_quick_menu(self, key, wf):
"""
Appends menu entries to dashboard quickmenu according
to :attr:`zengine.settings.QUICK_MENU`
Args:
key: workflow name
wf: workflow menu entry
"""
if key in settings.QUICK_MENU:
self.output['quick_menu'].append(wf)
|
Appends menu entries to dashboard quickmenu according
to :attr:`zengine.settings.QUICK_MENU`
Args:
key: workflow name
wf: workflow menu entry
|
entailment
|
def _get_workflow_menus(self):
"""
Creates menu entries for custom workflows.
Returns:
Dict of list of dicts (``{'':[{}],}``). Menu entries.
"""
results = defaultdict(list)
from zengine.lib.cache import WFSpecNames
for name, title, category in WFSpecNames().get_or_set():
if self.current.has_permission(name) and category != 'hidden':
wf_dict = {
"text": title,
"wf": name,
"kategori": category,
"param": "id"
}
results['other'].append(wf_dict)
self._add_to_quick_menu(name, wf_dict)
return results
|
Creates menu entries for custom workflows.
Returns:
Dict of list of dicts (``{'':[{}],}``). Menu entries.
|
entailment
|
def process_response(self, request, response, resource):
"""
Do response processing
"""
origin = request.get_header('Origin')
if not settings.DEBUG:
if origin in settings.ALLOWED_ORIGINS or not origin:
response.set_header('Access-Control-Allow-Origin', origin)
else:
log.debug("CORS ERROR: %s not allowed, allowed hosts: %s" % (origin,
settings.ALLOWED_ORIGINS))
raise falcon.HTTPForbidden("Denied", "Origin not in ALLOWED_ORIGINS: %s" % origin)
# response.status = falcon.HTTP_403
else:
response.set_header('Access-Control-Allow-Origin', origin or '*')
response.set_header('Access-Control-Allow-Credentials', "true")
response.set_header('Access-Control-Allow-Headers', 'Content-Type')
# This could be overridden in the resource level
response.set_header('Access-Control-Allow-Methods', 'OPTIONS')
|
Do response processing
|
entailment
|
def process_request(self, req, resp):
"""
Do response processing
"""
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable(
'This API only supports responses encoded as JSON.',
href='http://docs.examples.com/api/json')
if req.method in ('POST', 'PUT'):
if req.content_length != 0 and \
'application/json' not in req.content_type and \
'text/plain' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(
'This API only supports requests encoded as JSON.',
href='http://docs.examples.com/api/json')
|
Do response processing
|
entailment
|
def process_request(self, req, resp):
"""
Do response processing
"""
# req.stream corresponds to the WSGI wsgi.input environ variable,
# and allows you to read bytes from the request body.
#
# See also: PEP 3333
if req.content_length in (None, 0):
# Nothing to do
req.context['data'] = req.params.copy()
req.context['result'] = {}
return
else:
req.context['result'] = {}
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
try:
json_data = body.decode('utf-8')
req.context['data'] = json.loads(json_data)
try:
log.info("REQUEST DATA: %s" % json_data)
except:
log.exception("ERR: REQUEST DATA CANT BE LOGGED ")
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753,
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.')
|
Do response processing
|
entailment
|
def process_response(self, req, resp, resource):
"""
Serializes ``req.context['result']`` to resp.body as JSON.
If :attr:`~zengine.settings.DEBUG` is True,
``sys._debug_db_queries`` (set by pyoko) added to response.
"""
if 'result' not in req.context:
return
req.context['result']['is_login'] = 'user_id' in req.env['session']
if settings.DEBUG:
req.context['result']['_debug_queries'] = sys._debug_db_queries
sys._debug_db_queries = []
if resp.body is None and req.context['result']:
resp.body = json.dumps(req.context['result'])
try:
log.debug("RESPONSE: %s" % resp.body)
except:
log.exception("ERR: RESPONSE CANT BE LOGGED ")
|
Serializes ``req.context['result']`` to resp.body as JSON.
If :attr:`~zengine.settings.DEBUG` is True,
``sys._debug_db_queries`` (set by pyoko) added to response.
|
entailment
|
def connect(self):
"""
Creates connection to RabbitMQ server
"""
if self.connecting:
log.info('PikaClient: Already connecting to RabbitMQ')
return
log.info('PikaClient: Connecting to RabbitMQ')
self.connecting = True
self.connection = TornadoConnection(NON_BLOCKING_MQ_PARAMS,
stop_ioloop_on_close=False,
custom_ioloop=self.io_loop,
on_open_callback=self.on_connected)
|
Creates connection to RabbitMQ server
|
entailment
|
def on_connected(self, connection):
"""
AMQP connection callback.
Creates input channel.
Args:
connection: AMQP connection
"""
log.info('PikaClient: connected to RabbitMQ')
self.connected = True
self.in_channel = self.connection.channel(self.on_channel_open)
|
AMQP connection callback.
Creates input channel.
Args:
connection: AMQP connection
|
entailment
|
def on_channel_open(self, channel):
"""
Input channel creation callback
Queue declaration done here
Args:
channel: input channel
"""
self.in_channel.exchange_declare(exchange='input_exc', type='topic', durable=True)
channel.queue_declare(callback=self.on_input_queue_declare, queue=self.INPUT_QUEUE_NAME)
|
Input channel creation callback
Queue declaration done here
Args:
channel: input channel
|
entailment
|
def on_input_queue_declare(self, queue):
"""
Input queue declaration callback.
Input Queue/Exchange binding done here
Args:
queue: input queue
"""
self.in_channel.queue_bind(callback=None,
exchange='input_exc',
queue=self.INPUT_QUEUE_NAME,
routing_key="#")
|
Input queue declaration callback.
Input Queue/Exchange binding done here
Args:
queue: input queue
|
entailment
|
def wsgi_app(self, request):
"""Incoming request handler.
:param request: Werkzeug request object
"""
try:
if request.method != 'POST':
abort(400)
try:
# Python 2.7 compatibility
data = request.data
if isinstance(data, str):
body = json.loads(data)
else:
body = json.loads(data.decode('utf-8'))
except ValueError:
abort(400)
if self.validate:
valid_cert = util.validate_request_certificate(
request.headers, request.data)
valid_ts = util.validate_request_timestamp(body)
if not valid_cert or not valid_ts:
log.error('failed to validate request')
abort(403)
resp_obj = self.alexa.dispatch_request(body)
return Response(response=json.dumps(resp_obj, indent=4),
status=200,
mimetype='application/json')
except HTTPException as exc:
log.exception('Failed to handle request')
return exc
|
Incoming request handler.
:param request: Werkzeug request object
|
entailment
|
def close(self, force=False):
"""
close opened file
:param force: force closing of externally opened file or buffer
"""
if self.__write:
self.write = self.__write_adhoc
self.__write = False
if not self._is_buffer or force:
self._file.close()
|
close opened file
:param force: force closing of externally opened file or buffer
|
entailment
|
def _convert_reaction(self, reaction):
if not (reaction['reactants'] or reaction['products'] or reaction['reagents']):
raise ValueError('empty reaction')
maps = {'reactants': [], 'products': [], 'reagents': []}
for i, tmp in maps.items():
for molecule in reaction[i]:
used = set()
for atom in molecule['atoms']:
m = atom['mapping']
if m:
if m in used:
if not self._ignore:
raise MappingError('mapping in molecules should be unique')
warning(f'non-unique mapping in molecule: {m}')
else:
used.add(m)
tmp.append(m)
length = count(max(max(maps['products'], default=0), max(maps['reactants'], default=0),
max(maps['reagents'], default=0)) + 1)
''' map unmapped atoms.
'''
for i, tmp in maps.items():
used = set()
maps[i] = remap = []
for m in tmp:
if not m:
remap.append(next(length))
elif m in used:
if not self._ignore:
raise MappingError('mapping in reagents or products or reactants should be unique')
# force remap non unique atoms in molecules.
remap.append(next(length))
warning(f'mapping changed: {m} to {remap[-1]}')
else:
remap.append(m)
used.add(m)
if maps['reagents']:
tmp = (set(maps['reactants']) | set(maps['products'])) & set(maps['reagents'])
if tmp:
e = f'reagents has map intersection with reactants or products: {tmp}'
if not self._ignore:
raise MappingError(e)
warning(e)
maps['reagents'] = [x if x not in tmp else next(length) for x in maps['reagents']]
''' find breaks in map. e.g. 1,2,5,6. 3,4 - skipped
'''
if self.__remap:
lose = sorted(set(range(1, next(length))) - set(maps['reactants']) - set(maps['products']) -
set(maps['reagents']), reverse=True)
if lose:
for i, tmp in maps.items():
if not tmp:
continue
for j in lose:
maps[i] = tmp = [x if x < j else x - 1 for x in tmp]
''' end
'''
rc = ReactionContainer(meta=reaction['meta'])
for i, tmp in maps.items():
shift = 0
for j in reaction[i]:
atom_len = len(j['atoms'])
remapped = {x: y for x, y in enumerate(tmp[shift: atom_len + shift])}
shift += atom_len
g = self.__convert_structure(j, remapped)
rc[i].append(g)
return rc
|
map unmapped atoms.
|
entailment
|
def aromatize(self):
"""
convert structure to aromatic form
:return: number of processed rings
"""
rings = [x for x in self.sssr if 4 < len(x) < 7]
if not rings:
return 0
total = 0
while True:
c = self._quinonize(rings, 'order')
if c:
total += c
elif total:
break
c = self._aromatize(rings, 'order')
if not c:
break
total += c
if total:
self.flush_cache()
return total
|
convert structure to aromatic form
:return: number of processed rings
|
entailment
|
def crawl_cmd(self, seed_list, n):
'''
Runs the crawl job for n rounds
:param seed_list: lines of seed URLs
:param n: number of rounds
:return: number of successful rounds
'''
print("Num Rounds "+str(n))
cc = self.proxy.Crawl(seed=seed_list, rounds=n)
rounds = cc.waitAll()
print("Completed %d rounds" % len(rounds))
return len(rounds)
|
Runs the crawl job for n rounds
:param seed_list: lines of seed URLs
:param n: number of rounds
:return: number of successful rounds
|
entailment
|
def load_xml_conf(self, xml_file, id):
'''
Creates a new config from xml file.
:param xml_file: path to xml file. Format : nutch-site.xml or nutch-default.xml
:param id:
:return: config object
'''
# converting nutch-site.xml to key:value pairs
import xml.etree.ElementTree as ET
tree = ET.parse(xml_file)
params = {}
for prop in tree.getroot().findall(".//property"):
params[prop.find('./name').text.strip()] = prop.find('./value').text.strip()
return self.proxy.Configs().create(id, configData=params)
|
Creates a new config from xml file.
:param xml_file: path to xml file. Format : nutch-site.xml or nutch-default.xml
:param id:
:return: config object
|
entailment
|
def create_cmd(self, args):
'''
'create' sub-command
:param args: cli arguments
:return:
'''
cmd = args.get('cmd_create')
if cmd == 'conf':
conf_file = args['conf_file']
conf_id = args['id']
return self.load_xml_conf(conf_file, conf_id)
else:
print("Error: Create %s is invalid or not implemented" % cmd)
|
'create' sub-command
:param args: cli arguments
:return:
|
entailment
|
def close(self, *args, **kwargs):
"""
write close tag of MRV file and close opened file
:param force: force closing of externally opened file or buffer
"""
if not self.__finalized:
self._file.write('</cml>')
self.__finalized = True
super().close(*args, **kwargs)
|
write close tag of MRV file and close opened file
:param force: force closing of externally opened file or buffer
|
entailment
|
def write(self, data):
"""
write single molecule or reaction into file
"""
self._file.write('<cml>')
self.__write(data)
self.write = self.__write
|
write single molecule or reaction into file
|
entailment
|
def _load_cache(self):
"""
the method is implemented for the purpose of optimization, byte positions will not be re-read from a file
that has already been used, if the content of the file has changed, and the name has been left the same,
the old version of byte offsets will be loaded
:return: list of byte offsets from existing file
"""
try:
with open(self.__cache_path, 'rb') as f:
return load(f)
except FileNotFoundError:
return
except IsADirectoryError as e:
raise IsADirectoryError(f'Please delete {self.__cache_path} directory') from e
except (UnpicklingError, EOFError) as e:
raise UnpicklingError(f'Invalid cache file {self.__cache_path}. Please delete it') from e
|
the method is implemented for the purpose of optimization, byte positions will not be re-read from a file
that has already been used, if the content of the file has changed, and the name has been left the same,
the old version of byte offsets will be loaded
:return: list of byte offsets from existing file
|
entailment
|
def _dump_cache(self, _shifts):
"""
_shifts dumps in /tmp directory after reboot it will drop
"""
with open(self.__cache_path, 'wb') as f:
dump(_shifts, f)
|
_shifts dumps in /tmp directory after reboot it will drop
|
entailment
|
def get_task_types(current):
"""
List task types for current user
.. code-block:: python
# request:
{
'view': '_zops_get_task_types',
}
# response:
{
'task_types': [
{'name': string, # wf name
'title': string, # title of workflow
},]
}
"""
current.output['task_types'] = [{'name': bpmn_wf.name,
'title': bpmn_wf.title}
for bpmn_wf in BPMNWorkflow.objects.all()
if current.has_permission(bpmn_wf.name)]
|
List task types for current user
.. code-block:: python
# request:
{
'view': '_zops_get_task_types',
}
# response:
{
'task_types': [
{'name': string, # wf name
'title': string, # title of workflow
},]
}
|
entailment
|
def get_task_detail(current):
"""
Show task details
.. code-block:: python
# request:
{
'view': '_zops_get_task_detail',
'key': key,
}
# response:
{
'task_title': string,
'task_detail': string, # markdown formatted text
}
"""
task_inv = TaskInvitation.objects.get(current.input['key'])
obj = task_inv.instance.get_object()
current.output['task_title'] = task_inv.instance.task.name
current.output['task_detail'] = """Explain: %s
State: %s""" % (obj.__unicode__() if obj else '', task_inv.progress)
|
Show task details
.. code-block:: python
# request:
{
'view': '_zops_get_task_detail',
'key': key,
}
# response:
{
'task_title': string,
'task_detail': string, # markdown formatted text
}
|
entailment
|
def get_task_actions(current):
"""
List task types for current user
.. code-block:: python
# request:
{
'view': '_zops_get_task_actions',
'key': key,
}
# response:
{
'key': key,
'actions': [{"title":':'Action Title', "wf": "workflow_name"},]
}
"""
task_inv = TaskInvitation.objects.get(current.input['key'])
actions = [{"title": __(u"Assign Someone Else"), "wf": "assign_same_abstract_role"},
{"title": __(u"Suspend"), "wf": "suspend_workflow"},
{"title": __(u"Postpone"), "wf": "postpone_workflow"}]
if task_inv.instance.current_actor != current.role:
actions.append({"title": __(u"Assign Yourself"), "wf": "task_assign_yourself"})
current.output['key'] = task_inv.key
current.output['actions'] = actions
|
List task types for current user
.. code-block:: python
# request:
{
'view': '_zops_get_task_actions',
'key': key,
}
# response:
{
'key': key,
'actions': [{"title":':'Action Title', "wf": "workflow_name"},]
}
|
entailment
|
def get_tasks(current):
"""
List task invitations of current user
.. code-block:: python
# request:
{
'view': '_zops_get_tasks',
'state': string, # one of these:
# "active", "future", "finished", "expired"
'inverted': boolean, # search on other people's tasks
'query': string, # optional. for searching on user's tasks
'wf_type': string, # optional. only show tasks of selected wf_type
'start_date': datetime, # optional. only show tasks starts after this date
'finish_date': datetime, # optional. only show tasks should end before this date
}
# response:
{
'task_list': [
{'token': key, # wf token (key of WFInstance)
{'key': key, # wf token (key of TaskInvitation)
'title': string, # name of workflow
'wf_type': string, # unread message count
'title': string, # task title
'state': int, # state of invitation
# zengine.models.workflow_manager.TASK_STATES
'start_date': string, # start date
'finish_date': string, # end date
},],
'active_task_count': int,
'future_task_count': int,
'finished_task_count': int,
'expired_task_count': int,
}
"""
# TODO: Also return invitations for user's other roles
# TODO: Handle automatic role switching
STATE_DICT = {
'active': [20, 30],
'future': 10,
'finished': 40,
'expired': 90
}
state = STATE_DICT[current.input['state']]
if isinstance(state, list):
queryset = TaskInvitation.objects.filter(progress__in=state)
else:
queryset = TaskInvitation.objects.filter(progress=state)
if 'inverted' in current.input:
# show other user's tasks
allowed_workflows = [bpmn_wf.name for bpmn_wf in BPMNWorkflow.objects.all()
if current.has_permission(bpmn_wf.name)]
queryset = queryset.exclude(role_id=current.role_id).filter(wf_name__in=allowed_workflows)
else:
# show current user's tasks
queryset = queryset.filter(role_id=current.role_id)
if 'query' in current.input:
queryset = queryset.filter(search_data__contains=current.input['query'].lower())
if 'wf_type' in current.input:
queryset = queryset.filter(wf_name=current.input['wf_type'])
if 'start_date' in current.input:
queryset = queryset.filter(start_date__gte=datetime.strptime(current.input['start_date'], "%d.%m.%Y"))
if 'finish_date' in current.input:
queryset = queryset.filter(finish_date__lte=datetime.strptime(current.input['finish_date'], "%d.%m.%Y"))
current.output['task_list'] = [
{
'token': inv.instance.key,
'key': inv.key,
'title': inv.title,
'wf_type': inv.wf_name,
'state': inv.progress,
'start_date': format_date(inv.start_date),
'finish_date': format_date(inv.finish_date),
'description': inv.instance.wf.description,
'status': inv.ownership}
for inv in queryset
]
task_inv_list = TaskInvitation.objects.filter(role_id=current.role_id)
current.output['task_count']= {
'active': task_inv_list.filter(progress__in=STATE_DICT['active']).count(),
'future' : task_inv_list.filter(progress=STATE_DICT['future']).count(),
'finished' : task_inv_list.filter(progress=STATE_DICT['finished']).count(),
'expired' : task_inv_list.filter(progress=STATE_DICT['expired']).count()
}
|
List task invitations of current user
.. code-block:: python
# request:
{
'view': '_zops_get_tasks',
'state': string, # one of these:
# "active", "future", "finished", "expired"
'inverted': boolean, # search on other people's tasks
'query': string, # optional. for searching on user's tasks
'wf_type': string, # optional. only show tasks of selected wf_type
'start_date': datetime, # optional. only show tasks starts after this date
'finish_date': datetime, # optional. only show tasks should end before this date
}
# response:
{
'task_list': [
{'token': key, # wf token (key of WFInstance)
{'key': key, # wf token (key of TaskInvitation)
'title': string, # name of workflow
'wf_type': string, # unread message count
'title': string, # task title
'state': int, # state of invitation
# zengine.models.workflow_manager.TASK_STATES
'start_date': string, # start date
'finish_date': string, # end date
},],
'active_task_count': int,
'future_task_count': int,
'finished_task_count': int,
'expired_task_count': int,
}
|
entailment
|
def reduce_memory_usage(df):
"""reduce memory usage of the dataframe
- convert runIDs to categorical
- downcast ints and floats
"""
usage_pre = df.memory_usage(deep=True).sum()
if "runIDs" in df:
df.loc[:, "runIDs"] = df.loc[:, "runIDs"].astype("category")
df_int = df.select_dtypes(include=['int'])
df_float = df.select_dtypes(include=['float'])
df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')
df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')
usage_post = df.memory_usage(deep=True).sum()
logging.info("Reduced DataFrame memory usage from {}Mb to {}Mb".format(
usage_pre / 1024**2, usage_post / 1024**2))
if usage_post > 4e9 and "readIDs" in df:
logging.info("DataFrame of features is too big, dropping read identifiers.")
return df.drop(["readIDs"], axis=1, errors="ignore")
else:
return df
|
reduce memory usage of the dataframe
- convert runIDs to categorical
- downcast ints and floats
|
entailment
|
def check_existance(f):
"""Check if the file supplied as input exists."""
if not opath.isfile(f):
logging.error("Nanoget: File provided doesn't exist or the path is incorrect: {}".format(f))
sys.exit("File provided doesn't exist or the path is incorrect: {}".format(f))
|
Check if the file supplied as input exists.
|
entailment
|
def list_user_roles(self):
"""
Lists user roles as selectable except user's current role.
"""
_form = JsonForm(current=self.current, title=_(u"Switch Role"))
_form.help_text = "Your current role: %s %s" % (self.current.role.unit.name,
self.current.role.abstract_role.name)
switch_roles = self.get_user_switchable_roles()
_form.role_options = fields.Integer(_(u"Please, choose the role you want to switch:")
, choices=switch_roles, default=switch_roles[0][0],
required=True)
_form.switch = fields.Button(_(u"Switch"))
self.form_out(_form)
|
Lists user roles as selectable except user's current role.
|
entailment
|
def change_user_role(self):
"""
Changes user's role from current role to chosen role.
"""
# Get chosen role_key from user form.
role_key = self.input['form']['role_options']
# Assign chosen switch role key to user's last_login_role_key field
self.current.user.last_login_role_key = role_key
self.current.user.save()
auth = AuthBackend(self.current)
# According to user's new role, user's session set again.
auth.set_user(self.current.user)
# Dashboard is reloaded according to user's new role.
self.current.output['cmd'] = 'reload'
|
Changes user's role from current role to chosen role.
|
entailment
|
def get_user_switchable_roles(self):
"""
Returns user's role list except current role as a tuple
(role.key, role.name)
Returns:
(list): list of tuples, user's role list except current role
"""
roles = []
for rs in self.current.user.role_set:
# rs.role != self.current.role is not True after python version 2.7.12
if rs.role.key != self.current.role.key:
roles.append((rs.role.key, '%s %s' % (rs.role.unit.name,
rs.role.abstract_role.name)))
return roles
|
Returns user's role list except current role as a tuple
(role.key, role.name)
Returns:
(list): list of tuples, user's role list except current role
|
entailment
|
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
"""
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
|
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
|
entailment
|
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
|
Debug deletes obj of obj[_type] with id of obj['_id']
|
entailment
|
def json_dumps(self, obj):
"""Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
|
Serializer for consistency
|
entailment
|
def safe_filename(self, otype, oid):
"""Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
|
Santize obj name into fname and verify doesn't already exist
|
entailment
|
def write_object_to_file(self, obj, path='.', filename=None):
"""Convert obj (dict) to json string and write to file"""
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
|
Convert obj (dict) to json string and write to file
|
entailment
|
def write_pkg_to_file(self, name, objects, path='.', filename=None):
"""Write a list of related objs to file"""
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
|
Write a list of related objs to file
|
entailment
|
def get_objects(self, search_field, search_val):
"""Return all objects of type (assumes < MAX_HITS)"""
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
|
Return all objects of type (assumes < MAX_HITS)
|
entailment
|
def get_dashboard_full(self, db_name):
"""Get DB and all objs needed to duplicate it"""
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
Get DB and all objs needed to duplicate it
|
entailment
|
def parse_node(self, node):
"""
Overrides ProcessParser.parse_node
Parses and attaches the inputOutput tags that created by Camunda Modeller
Args:
node: xml task node
Returns:
TaskSpec
"""
spec = super(CamundaProcessParser, self).parse_node(node)
spec.data = self._parse_input_data(node)
spec.data['lane_data'] = self._get_lane_properties(node)
spec.defines = spec.data
service_class = node.get(full_attr('assignee'))
if service_class:
self.parsed_nodes[node.get('id')].service_class = node.get(full_attr('assignee'))
return spec
|
Overrides ProcessParser.parse_node
Parses and attaches the inputOutput tags that created by Camunda Modeller
Args:
node: xml task node
Returns:
TaskSpec
|
entailment
|
def _get_description(self):
"""
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns:
"""
ns = {'ns': '{%s}' % BPMN_MODEL_NS}
desc = (
self.doc_xpath('.//{ns}collaboration/{ns}documentation'.format(**ns)) or
self.doc_xpath('.//{ns}process/{ns}documentation'.format(**ns)) or
self.doc_xpath('.//{ns}collaboration/{ns}participant/{ns}documentation'.format(**ns))
)
if desc:
return desc[0].findtext('.')
|
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns:
|
entailment
|
def get_name(self):
"""
Tries to get WF name from 'process' or 'collobration' or 'pariticipant'
Returns:
str. WF name.
"""
ns = {'ns': '{%s}' % BPMN_MODEL_NS}
for path in ('.//{ns}process',
'.//{ns}collaboration',
'.//{ns}collaboration/{ns}participant/'):
tag = self.doc_xpath(path.format(**ns))
if tag:
name = tag[0].get('name')
if name:
return name
return self.get_id()
|
Tries to get WF name from 'process' or 'collobration' or 'pariticipant'
Returns:
str. WF name.
|
entailment
|
def _parse_input_data(self, node):
"""
Parses inputOutput part camunda modeller extensions.
Args:
node: SpiffWorkflow Node object.
Returns:
Data dict.
"""
data = DotDict()
try:
for nod in self._get_input_nodes(node):
data.update(self._parse_input_node(nod))
except Exception as e:
log.exception("Error while processing node: %s" % node)
return data
|
Parses inputOutput part camunda modeller extensions.
Args:
node: SpiffWorkflow Node object.
Returns:
Data dict.
|
entailment
|
def _get_lane_properties(self, node):
"""
Parses the given XML node
Args:
node (xml): XML node.
.. code-block:: xml
<bpmn2:lane id="Lane_8" name="Lane 8">
<bpmn2:extensionElements>
<camunda:properties>
<camunda:property value="foo,bar" name="perms"/>
</camunda:properties>
</bpmn2:extensionElements>
</bpmn2:lane>
Returns:
{'perms': 'foo,bar'}
"""
lane_name = self.get_lane(node.get('id'))
lane_data = {'name': lane_name}
for a in self.xpath(".//bpmn:lane[@name='%s']/*/*/" % lane_name):
lane_data[a.attrib['name']] = a.attrib['value'].strip()
return lane_data
|
Parses the given XML node
Args:
node (xml): XML node.
.. code-block:: xml
<bpmn2:lane id="Lane_8" name="Lane 8">
<bpmn2:extensionElements>
<camunda:properties>
<camunda:property value="foo,bar" name="perms"/>
</camunda:properties>
</bpmn2:extensionElements>
</bpmn2:lane>
Returns:
{'perms': 'foo,bar'}
|
entailment
|
def _parse_input_node(cls, node):
"""
:param node: xml node
:return: dict
"""
data = {}
child = node.getchildren()
if not child and node.get('name'):
val = node.text
elif child: # if tag = "{http://activiti.org/bpmn}script" then data_typ = 'script'
data_typ = child[0].tag.split('}')[1]
val = getattr(cls, '_parse_%s' % data_typ)(child[0])
data[node.get('name')] = val
return data
|
:param node: xml node
:return: dict
|
entailment
|
def package_in_memory(cls, workflow_name, workflow_files):
"""
Generates wf packages from workflow diagrams.
Args:
workflow_name: Name of wf
workflow_files: Diagram file.
Returns:
Workflow package (file like) object
"""
s = StringIO()
p = cls(s, workflow_name, meta_data=[])
p.add_bpmn_files_by_glob(workflow_files)
p.create_package()
return s.getvalue()
|
Generates wf packages from workflow diagrams.
Args:
workflow_name: Name of wf
workflow_files: Diagram file.
Returns:
Workflow package (file like) object
|
entailment
|
def compose(self, data):
"""
condense reaction container to CGR. see init for details about cgr_type
:param data: ReactionContainer
:return: CGRContainer
"""
g = self.__separate(data) if self.__cgr_type in (1, 2, 3, 4, 5, 6) else self.__condense(data)
g.meta.update(data.meta)
return g
|
condense reaction container to CGR. see init for details about cgr_type
:param data: ReactionContainer
:return: CGRContainer
|
entailment
|
def add_atom(self, atom, _map=None):
"""
new atom addition
"""
if _map is None:
_map = max(self, default=0) + 1
elif _map in self._node:
raise KeyError('atom with same number exists')
attr_dict = self.node_attr_dict_factory()
if isinstance(atom, str):
attr_dict.element = atom
elif isinstance(atom, int):
attr_dict.element = elements_list[atom - 1]
else:
attr_dict.update(atom)
self._adj[_map] = self.adjlist_inner_dict_factory()
self._node[_map] = attr_dict
self.flush_cache()
return _map
|
new atom addition
|
entailment
|
def add_bond(self, atom1, atom2, bond):
"""
implementation of bond addition
"""
if atom1 == atom2:
raise KeyError('atom loops impossible')
if atom1 not in self._node or atom2 not in self._node:
raise KeyError('atoms not found')
if atom1 in self._adj[atom2]:
raise KeyError('atoms already bonded')
attr_dict = self.edge_attr_dict_factory()
if isinstance(bond, int):
attr_dict.order = bond
else:
attr_dict.update(bond)
self._adj[atom1][atom2] = self._adj[atom2][atom1] = attr_dict
self.flush_cache()
|
implementation of bond addition
|
entailment
|
def delete_bond(self, n, m):
"""
implementation of bond removing
"""
self.remove_edge(n, m)
self.flush_cache()
|
implementation of bond removing
|
entailment
|
def environment(self, atom):
"""
pairs of (bond, atom) connected to atom
:param atom: number
:return: list
"""
return tuple((bond, self._node[n]) for n, bond in self._adj[atom].items())
|
pairs of (bond, atom) connected to atom
:param atom: number
:return: list
|
entailment
|
def substructure(self, atoms, meta=False, as_view=True):
"""
create substructure containing atoms from nbunch list
:param atoms: list of atoms numbers of substructure
:param meta: if True metadata will be copied to substructure
:param as_view: If True, the returned graph-view provides a read-only view
of the original structure scaffold without actually copying any data.
"""
s = self.subgraph(atoms)
if as_view:
s.add_atom = s.add_bond = s.delete_atom = s.delete_bond = frozen # more informative exception
return s
s = s.copy()
if not meta:
s.graph.clear()
return s
|
create substructure containing atoms from nbunch list
:param atoms: list of atoms numbers of substructure
:param meta: if True metadata will be copied to substructure
:param as_view: If True, the returned graph-view provides a read-only view
of the original structure scaffold without actually copying any data.
|
entailment
|
def augmented_substructure(self, atoms, dante=False, deep=1, meta=False, as_view=True):
"""
create substructure containing atoms and their neighbors
:param atoms: list of core atoms in graph
:param dante: if True return list of graphs containing atoms, atoms + first circle, atoms + 1st + 2nd,
etc up to deep or while new nodes available
:param deep: number of bonds between atoms and neighbors
:param meta: copy metadata to each substructure
:param as_view: If True, the returned graph-view provides a read-only view
of the original graph without actually copying any data
"""
nodes = [set(atoms)]
for i in range(deep):
n = {y for x in nodes[-1] for y in self._adj[x]} | nodes[-1]
if n in nodes:
break
nodes.append(n)
if dante:
return [self.substructure(a, meta, as_view) for a in nodes]
else:
return self.substructure(nodes[-1], meta, as_view)
|
create substructure containing atoms and their neighbors
:param atoms: list of core atoms in graph
:param dante: if True return list of graphs containing atoms, atoms + first circle, atoms + 1st + 2nd,
etc up to deep or while new nodes available
:param deep: number of bonds between atoms and neighbors
:param meta: copy metadata to each substructure
:param as_view: If True, the returned graph-view provides a read-only view
of the original graph without actually copying any data
|
entailment
|
def split(self, meta=False):
"""
split disconnected structure to connected substructures
:param meta: copy metadata to each substructure
:return: list of substructures
"""
return [self.substructure(c, meta, False) for c in connected_components(self)]
|
split disconnected structure to connected substructures
:param meta: copy metadata to each substructure
:return: list of substructures
|
entailment
|
def bonds(self):
"""
iterate other all bonds
"""
seen = set()
for n, m_bond in self._adj.items():
seen.add(n)
for m, bond in m_bond.items():
if m not in seen:
yield n, m, bond
|
iterate other all bonds
|
entailment
|
def _get_subclass(name):
"""
need for cyclic import solving
"""
return next(x for x in BaseContainer.__subclasses__() if x.__name__ == name)
|
need for cyclic import solving
|
entailment
|
def _default_make_pool(http, proxy_info):
"""Creates a urllib3.PoolManager object that has SSL verification enabled
and uses the certifi certificates."""
if not http.ca_certs:
http.ca_certs = _certifi_where_for_ssl_version()
ssl_disabled = http.disable_ssl_certificate_validation
cert_reqs = 'CERT_REQUIRED' if http.ca_certs and not ssl_disabled else None
if isinstance(proxy_info, collections.Callable):
proxy_info = proxy_info()
if proxy_info:
if proxy_info.proxy_user and proxy_info.proxy_pass:
proxy_url = 'http://{}:{}@{}:{}/'.format(
proxy_info.proxy_user, proxy_info.proxy_pass,
proxy_info.proxy_host, proxy_info.proxy_port,
)
proxy_headers = urllib3.util.request.make_headers(
proxy_basic_auth='{}:{}'.format(
proxy_info.proxy_user, proxy_info.proxy_pass,
)
)
else:
proxy_url = 'http://{}:{}/'.format(
proxy_info.proxy_host, proxy_info.proxy_port,
)
proxy_headers = {}
return urllib3.ProxyManager(
proxy_url=proxy_url,
proxy_headers=proxy_headers,
ca_certs=http.ca_certs,
cert_reqs=cert_reqs,
)
return urllib3.PoolManager(
ca_certs=http.ca_certs,
cert_reqs=cert_reqs,
)
|
Creates a urllib3.PoolManager object that has SSL verification enabled
and uses the certifi certificates.
|
entailment
|
def patch(make_pool=_default_make_pool):
"""Monkey-patches httplib2.Http to be httplib2shim.Http.
This effectively makes all clients of httplib2 use urlilb3. It's preferable
to specify httplib2shim.Http explicitly where you can, but this can be
useful in situations where you do not control the construction of the http
object.
Args:
make_pool: A function that returns a urllib3.Pool-like object. This
allows you to specify special arguments to your connection pool if
needed. By default, this will create a urllib3.PoolManager with
SSL verification enabled using the certifi certificates.
"""
setattr(httplib2, '_HttpOriginal', httplib2.Http)
httplib2.Http = Http
Http._make_pool = make_pool
|
Monkey-patches httplib2.Http to be httplib2shim.Http.
This effectively makes all clients of httplib2 use urlilb3. It's preferable
to specify httplib2shim.Http explicitly where you can, but this can be
useful in situations where you do not control the construction of the http
object.
Args:
make_pool: A function that returns a urllib3.Pool-like object. This
allows you to specify special arguments to your connection pool if
needed. By default, this will create a urllib3.PoolManager with
SSL verification enabled using the certifi certificates.
|
entailment
|
def _is_ipv6(addr):
"""Checks if a given address is an IPv6 address."""
try:
socket.inet_pton(socket.AF_INET6, addr)
return True
except socket.error:
return False
|
Checks if a given address is an IPv6 address.
|
entailment
|
def _certifi_where_for_ssl_version():
"""Gets the right location for certifi certifications for the current SSL
version.
Older versions of SSL don't support the stronger set of root certificates.
"""
if not ssl:
return
if ssl.OPENSSL_VERSION_INFO < (1, 0, 2):
warnings.warn(
'You are using an outdated version of OpenSSL that '
'can\'t use stronger root certificates.')
return certifi.old_where()
return certifi.where()
|
Gets the right location for certifi certifications for the current SSL
version.
Older versions of SSL don't support the stronger set of root certificates.
|
entailment
|
def _map_response(response, decode=False):
"""Maps a urllib3 response to a httplib/httplib2 Response."""
# This causes weird deepcopy errors, so it's commented out for now.
# item._urllib3_response = response
item = httplib2.Response(response.getheaders())
item.status = response.status
item['status'] = str(item.status)
item.reason = response.reason
item.version = response.version
# httplib2 expects the content-encoding header to be stripped and the
# content length to be the length of the uncompressed content.
# This does not occur for 'HEAD' requests.
if decode and item.get('content-encoding') in ['gzip', 'deflate']:
item['content-length'] = str(len(response.data))
item['-content-encoding'] = item.pop('content-encoding')
return item
|
Maps a urllib3 response to a httplib/httplib2 Response.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.