sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def elasticsearch(line):
'''
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
'''
# TODO we need to handle case2 logs
elasticsearch_log = line
actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log)
if len(actuallog) == 1:
keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms']
values = re.findall(r'\[(.*?)\]', actuallog[0])
for index, i in enumerate(values):
if not isinstance(i, str):
continue
if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]:
num = re.split('ms', i)[0]
values[index] = float(num)
continue
if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]:
num = re.split('s', i)[0]
values[index] = float(num) * 1000
continue
data = dict(zip(keys,values))
if 'level' in data and data['level'][-1] == ' ':
data['level'] = data['level'][:-1]
if 'gc_count' in data:
data['gc_count'] = float(data['gc_count'])
event = data['message']
level=values[1]
timestamp=values[0]
return dict(
timestamp=timestamp,
level=level,
type='metric',
data=data,
event=event
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.now()),
data={'raw': line}
)
|
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
|
entailment
|
def elasticsearch_ispartial_log(line):
'''
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
'''
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True
|
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
|
entailment
|
def get(self, attr, default=None):
"""Get an attribute defined by this session"""
attrs = self.body.get('attributes') or {}
return attrs.get(attr, default)
|
Get an attribute defined by this session
|
entailment
|
def get_all(self, cat):
"""
if data can't found in cache then it will be fetched from db,
parsed and stored to cache for each lang_code.
:param cat: cat of catalog data
:return:
"""
return self._get_from_local_cache(cat) or self._get_from_cache(cat) or self._get_from_db(cat)
|
if data can't found in cache then it will be fetched from db,
parsed and stored to cache for each lang_code.
:param cat: cat of catalog data
:return:
|
entailment
|
def _fill_get_item_cache(self, catalog, key):
"""
get from redis, cache locally then return
:param catalog: catalog name
:param key:
:return:
"""
lang = self._get_lang()
keylist = self.get_all(catalog)
self.ITEM_CACHE[lang][catalog] = dict([(i['value'], i['name']) for i in keylist])
return self.ITEM_CACHE[lang][catalog].get(key)
|
get from redis, cache locally then return
:param catalog: catalog name
:param key:
:return:
|
entailment
|
def run(self, host, port, debug=True, validate_requests=True):
"""Utility method to quickly get a server up and running.
:param debug: turns on Werkzeug debugger, code reloading, and full
logging.
:param validate_requests: whether or not to ensure that requests are
sent by Amazon. This can be usefulfor manually testing the server.
"""
if debug:
# Turn on all alexandra log output
logging.basicConfig(level=logging.DEBUG)
app = self.create_wsgi_app(validate_requests)
run_simple(host, port, app, use_reloader=debug, use_debugger=debug)
|
Utility method to quickly get a server up and running.
:param debug: turns on Werkzeug debugger, code reloading, and full
logging.
:param validate_requests: whether or not to ensure that requests are
sent by Amazon. This can be usefulfor manually testing the server.
|
entailment
|
def dispatch_request(self, body):
"""Given a parsed JSON request object, call the correct Intent, Launch,
or SessionEnded function.
This function is called after request parsing and validaion and will
raise a `ValueError` if an unknown request type comes in.
:param body: JSON object loaded from incoming request's POST data.
"""
req_type = body.get('request', {}).get('type')
session_obj = body.get('session')
session = Session(session_obj) if session_obj else None
if req_type == 'LaunchRequest':
return self.launch_fn(session)
elif req_type == 'IntentRequest':
intent = body['request']['intent']['name']
intent_fn = self.intent_map.get(intent, self.unknown_intent_fn)
slots = {
slot['name']: slot.get('value')
for _, slot in
body['request']['intent'].get('slots', {}).items()
}
arity = intent_fn.__code__.co_argcount
if arity == 2:
return intent_fn(slots, session)
return intent_fn()
elif req_type == 'SessionEndedRequest':
return self.session_end_fn()
log.error('invalid request type: %s', req_type)
raise ValueError('bad request: %s', body)
|
Given a parsed JSON request object, call the correct Intent, Launch,
or SessionEnded function.
This function is called after request parsing and validaion and will
raise a `ValueError` if an unknown request type comes in.
:param body: JSON object loaded from incoming request's POST data.
|
entailment
|
def intent(self, intent_name):
"""Decorator to register a handler for the given intent.
The decorated function can either take 0 or 2 arguments. If two are
specified, it will be provided a dictionary of `{slot_name: value}` and
a :py:class:`alexandra.session.Session` instance.
If no session was provided in the request, the session object will be
`None`. ::
@alexa_app.intent('FooBarBaz')
def foo_bar_baz_intent(slots, session):
pass
@alexa_app.intent('NoArgs')
def noargs_intent():
pass
"""
# nested decorator so we can have params.
def _decorator(func):
arity = func.__code__.co_argcount
if arity not in [0, 2]:
raise ValueError("expected 0 or 2 argument function")
self.intent_map[intent_name] = func
return func
return _decorator
|
Decorator to register a handler for the given intent.
The decorated function can either take 0 or 2 arguments. If two are
specified, it will be provided a dictionary of `{slot_name: value}` and
a :py:class:`alexandra.session.Session` instance.
If no session was provided in the request, the session object will be
`None`. ::
@alexa_app.intent('FooBarBaz')
def foo_bar_baz_intent(slots, session):
pass
@alexa_app.intent('NoArgs')
def noargs_intent():
pass
|
entailment
|
def set_password(self, raw_password):
"""
Kullanıcı şifresini encrypt ederek set eder.
Args:
raw_password (str)
"""
self.password = pbkdf2_sha512.encrypt(raw_password, rounds=10000,
salt_size=10)
|
Kullanıcı şifresini encrypt ederek set eder.
Args:
raw_password (str)
|
entailment
|
def encrypt_password(self):
""" encrypt password if not already encrypted """
if self.password and not self.password.startswith('$pbkdf2'):
self.set_password(self.password)
|
encrypt password if not already encrypted
|
entailment
|
def send_notification(self, title, message, typ=1, url=None, sender=None):
"""
sends message to users private mq exchange
Args:
title:
message:
sender:
url:
typ:
"""
self.created_channels.channel.add_message(
channel_key=self.prv_exchange,
body=message,
title=title,
typ=typ,
url=url,
sender=sender,
receiver=self
)
|
sends message to users private mq exchange
Args:
title:
message:
sender:
url:
typ:
|
entailment
|
def send_client_cmd(self, data, cmd=None, via_queue=None):
"""
Send arbitrary cmd and data to client
if queue name passed by "via_queue" parameter,
that queue will be used instead of users private exchange.
Args:
data: dict
cmd: string
via_queue: queue name,
"""
mq_channel = self._connect_mq()
if cmd:
data['cmd'] = cmd
if via_queue:
mq_channel.basic_publish(exchange='',
routing_key=via_queue,
body=json.dumps(data))
else:
mq_channel.basic_publish(exchange=self.prv_exchange,
routing_key='',
body=json.dumps(data))
|
Send arbitrary cmd and data to client
if queue name passed by "via_queue" parameter,
that queue will be used instead of users private exchange.
Args:
data: dict
cmd: string
via_queue: queue name,
|
entailment
|
def seek(self, offset):
"""
shifts on a given number of record in the original file
:param offset: number of record
"""
if self._shifts:
if 0 <= offset < len(self._shifts):
current_pos = self._file.tell()
new_pos = self._shifts[offset]
if current_pos != new_pos:
if current_pos == self._shifts[-1]: # reached the end of the file
self._data = self.__reader()
self.__file = iter(self._file.readline, '')
self._file.seek(0)
next(self._data)
if offset: # move not to the beginning of the file
self._file.seek(new_pos)
else:
if not self.__already_seeked:
if self._shifts[0] < current_pos: # in the middle of the file
self._data.send(True)
self.__already_seeked = True
self._file.seek(new_pos)
else:
raise IndexError('invalid offset')
else:
raise self._implement_error
|
shifts on a given number of record in the original file
:param offset: number of record
|
entailment
|
def tell(self):
"""
:return: number of records processed from the original file
"""
if self._shifts:
t = self._file.tell()
if t == self._shifts[0]:
return 0
elif t == self._shifts[-1]:
return len(self._shifts) - 1
elif t in self._shifts:
return bisect_left(self._shifts, t)
else:
return bisect_left(self._shifts, t) - 1
raise self._implement_error
|
:return: number of records processed from the original file
|
entailment
|
def assign_yourself(self):
"""
Assigning the workflow to itself.
The selected job is checked to see if there is an assigned role.
If it does not have a role assigned to it, it takes the job to itself
and displays a message that the process is successful.
If there is a role assigned to it, it does not do any operation
and the message is displayed on the screen.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
if not wfi.current_actor.exist:
wfi.current_actor = self.current.role
wfi.save()
[inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if
not inv == task_invitation]
title = _(u"Successful")
msg = _(u"You have successfully assigned the job to yourself.")
else:
title = _(u"Unsuccessful")
msg = _(u"Unfortunately, this job is already taken by someone else.")
self.current.msg_box(title=title, msg=msg)
|
Assigning the workflow to itself.
The selected job is checked to see if there is an assigned role.
If it does not have a role assigned to it, it takes the job to itself
and displays a message that the process is successful.
If there is a role assigned to it, it does not do any operation
and the message is displayed on the screen.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
|
entailment
|
def select_role(self):
"""
The workflow method to be assigned to the person with the same role and unit as the user.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
roles = [(m.key, m.__unicode__()) for m in RoleModel.objects.filter(
abstract_role=self.current.role.abstract_role,
unit=self.current.role.unit) if m != self.current.role]
if roles:
_form = forms.JsonForm(title=_(u'Assign to workflow'))
_form.select_role = fields.Integer(_(u"Chose Role"), choices=roles)
_form.explain_text = fields.String(_(u"Explain Text"), required=False)
_form.send_button = fields.Button(_(u"Send"))
self.form_out(_form)
else:
title = _(u"Unsuccessful")
msg = _(u"Assign role not found")
self.current.msg_box(title=title, msg=msg)
|
The workflow method to be assigned to the person with the same role and unit as the user.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
|
entailment
|
def send_workflow(self):
"""
With the workflow instance and the task invitation is assigned a role.
"""
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
select_role = self.input['form']['select_role']
if wfi.current_actor == self.current.role:
task_invitation.role = RoleModel.objects.get(select_role)
wfi.current_actor = RoleModel.objects.get(select_role)
wfi.save()
task_invitation.save()
[inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if
not inv == task_invitation]
title = _(u"Successful")
msg = _(u"The workflow was assigned to someone else with success.")
else:
title = _(u"Unsuccessful")
msg = _(u"This workflow does not belong to you, you cannot assign it to someone else.")
self.current.msg_box(title=title, msg=msg)
|
With the workflow instance and the task invitation is assigned a role.
|
entailment
|
def select_postponed_date(self):
"""
The time intervals at which the workflow is to be extended are determined.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
_form = forms.JsonForm(title="Postponed Workflow")
_form.start_date = fields.DateTime("Start Date")
_form.finish_date = fields.DateTime("Finish Date")
_form.save_button = fields.Button("Save")
self.form_out(_form)
|
The time intervals at which the workflow is to be extended are determined.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
|
entailment
|
def save_date(self):
"""
Invitations with the same workflow status are deleted.
Workflow instance and invitation roles change.
"""
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
if wfi.current_actor.exist and wfi.current_actor == self.current.role:
dt_start = datetime.strptime(self.input['form']['start_date'], "%d.%m.%Y")
dt_finish = datetime.strptime(self.input['form']['finish_date'], "%d.%m.%Y")
task_invitation.start_date = dt_start
task_invitation.finish_date = dt_finish
task_invitation.save()
wfi.start_date = dt_start
wfi.finish_date = dt_finish
wfi.save()
title = _(u"Successful")
msg = _(u"You've extended the workflow time.")
else:
title = _(u"Unsuccessful")
msg = _(u"This workflow does not belong to you.")
self.current.msg_box(title=title, msg=msg)
|
Invitations with the same workflow status are deleted.
Workflow instance and invitation roles change.
|
entailment
|
def suspend(self):
"""
If there is a role assigned to the workflow and
it is the same as the user, it can drop the workflow.
If it does not exist, it can not do anything.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
if wfi.current_actor.exist and wfi.current_actor == self.current.role:
for m in RoleModel.objects.filter(abstract_role=self.current.role.abstract_role,
unit=self.current.role.unit):
if m != self.current.role:
task_invitation.key = ''
task_invitation.role = m
task_invitation.save()
wfi.current_actor = RoleModel()
wfi.save()
title = _(u"Successful")
msg = _(u"You left the workflow.")
else:
title = _(u"Unsuccessful")
msg = _(u"Unfortunately, this workflow does not belong to you or is already idle.")
self.current.msg_box(title=title, msg=msg)
|
If there is a role assigned to the workflow and
it is the same as the user, it can drop the workflow.
If it does not exist, it can not do anything.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
|
entailment
|
def on_home_row(self, location=None):
"""
Finds out if the piece is on the home row.
:return: bool for whether piece is on home row or not
"""
location = location or self.location
return (self.color == color.white and location.rank == 1) or \
(self.color == color.black and location.rank == 6)
|
Finds out if the piece is on the home row.
:return: bool for whether piece is on home row or not
|
entailment
|
def would_move_be_promotion(self, location=None):
"""
Finds if move from current get_location would result in promotion
:type: location: Location
:rtype: bool
"""
location = location or self.location
return (location.rank == 1 and self.color == color.black) or \
(location.rank == 6 and self.color == color.white)
|
Finds if move from current get_location would result in promotion
:type: location: Location
:rtype: bool
|
entailment
|
def square_in_front(self, location=None):
"""
Finds square directly in front of Pawn
:type: location: Location
:rtype: Location
"""
location = location or self.location
return location.shift_up() if self.color == color.white else location.shift_down()
|
Finds square directly in front of Pawn
:type: location: Location
:rtype: Location
|
entailment
|
def forward_moves(self, position):
"""
Finds possible moves one step and two steps in front
of Pawn.
:type: position: Board
:rtype: list
"""
if position.is_square_empty(self.square_in_front(self.location)):
"""
If square in front is empty add the move
"""
if self.would_move_be_promotion():
for move in self.create_promotion_moves(notation_const.PROMOTE):
yield move
else:
yield self.create_move(end_loc=self.square_in_front(self.location),
status=notation_const.MOVEMENT)
if self.on_home_row() and \
position.is_square_empty(self.two_squares_in_front(self.location)):
"""
If pawn is on home row and two squares in front of the pawn is empty
add the move
"""
yield self.create_move(
end_loc=self.square_in_front(self.square_in_front(self.location)),
status=notation_const.MOVEMENT
)
|
Finds possible moves one step and two steps in front
of Pawn.
:type: position: Board
:rtype: list
|
entailment
|
def _one_diagonal_capture_square(self, capture_square, position):
"""
Adds specified diagonal as a capture move if it is one
"""
if self.contains_opposite_color_piece(capture_square, position):
if self.would_move_be_promotion():
for move in self.create_promotion_moves(status=notation_const.CAPTURE_AND_PROMOTE,
location=capture_square):
yield move
else:
yield self.create_move(end_loc=capture_square,
status=notation_const.CAPTURE)
|
Adds specified diagonal as a capture move if it is one
|
entailment
|
def capture_moves(self, position):
"""
Finds out all possible capture moves
:rtype: list
"""
try:
right_diagonal = self.square_in_front(self.location.shift_right())
for move in self._one_diagonal_capture_square(right_diagonal, position):
yield move
except IndexError:
pass
try:
left_diagonal = self.square_in_front(self.location.shift_left())
for move in self._one_diagonal_capture_square(left_diagonal, position):
yield move
except IndexError:
pass
|
Finds out all possible capture moves
:rtype: list
|
entailment
|
def on_en_passant_valid_location(self):
"""
Finds out if pawn is on enemy center rank.
:rtype: bool
"""
return (self.color == color.white and self.location.rank == 4) or \
(self.color == color.black and self.location.rank == 3)
|
Finds out if pawn is on enemy center rank.
:rtype: bool
|
entailment
|
def _is_en_passant_valid(self, opponent_pawn_location, position):
"""
Finds if their opponent's pawn is next to this pawn
:rtype: bool
"""
try:
pawn = position.piece_at_square(opponent_pawn_location)
return pawn is not None and \
isinstance(pawn, Pawn) and \
pawn.color != self.color and \
position.piece_at_square(opponent_pawn_location).just_moved_two_steps
except IndexError:
return False
|
Finds if their opponent's pawn is next to this pawn
:rtype: bool
|
entailment
|
def add_one_en_passant_move(self, direction, position):
"""
Yields en_passant moves in given direction if it is legal.
:type: direction: function
:type: position: Board
:rtype: gen
"""
try:
if self._is_en_passant_valid(direction(self.location), position):
yield self.create_move(
end_loc=self.square_in_front(direction(self.location)),
status=notation_const.EN_PASSANT
)
except IndexError:
pass
|
Yields en_passant moves in given direction if it is legal.
:type: direction: function
:type: position: Board
:rtype: gen
|
entailment
|
def en_passant_moves(self, position):
"""
Finds possible en passant moves.
:rtype: list
"""
# if pawn is not on a valid en passant get_location then return None
if self.on_en_passant_valid_location():
for move in itertools.chain(self.add_one_en_passant_move(lambda x: x.shift_right(), position),
self.add_one_en_passant_move(lambda x: x.shift_left(), position)):
yield move
|
Finds possible en passant moves.
:rtype: list
|
entailment
|
def possible_moves(self, position):
"""
Finds out the locations of possible moves given board.Board position.
:pre get_location is on board and piece at specified get_location on position
:type: position: Board
:rtype: list
"""
for move in itertools.chain(self.forward_moves(position),
self.capture_moves(position),
self.en_passant_moves(position)):
yield move
|
Finds out the locations of possible moves given board.Board position.
:pre get_location is on board and piece at specified get_location on position
:type: position: Board
:rtype: list
|
entailment
|
def main():
"""
Main method
"""
print("Creating a new game...")
new_game = Game(Human(color.white), Human(color.black))
result = new_game.play()
print("Result is ", result)
|
Main method
|
entailment
|
def respond(text=None, ssml=None, attributes=None, reprompt_text=None,
reprompt_ssml=None, end_session=True):
""" Build a dict containing a valid response to an Alexa request.
If speech output is desired, either of `text` or `ssml` should
be specified.
:param text: Plain text speech output to be said by Alexa device.
:param ssml: Speech output in SSML form.
:param attributes: Dictionary of attributes to store in the session.
:param end_session: Should the session be terminated after this response?
:param reprompt_text, reprompt_ssml: Works the same as
`text`/`ssml`, but instead sets the reprompting speech output.
"""
obj = {
'version': '1.0',
'response': {
'outputSpeech': {'type': 'PlainText', 'text': ''},
'shouldEndSession': end_session
},
'sessionAttributes': attributes or {}
}
if text:
obj['response']['outputSpeech'] = {'type': 'PlainText', 'text': text}
elif ssml:
obj['response']['outputSpeech'] = {'type': 'SSML', 'ssml': ssml}
reprompt_output = None
if reprompt_text:
reprompt_output = {'type': 'PlainText', 'text': reprompt_text}
elif reprompt_ssml:
reprompt_output = {'type': 'SSML', 'ssml': reprompt_ssml}
if reprompt_output:
obj['response']['reprompt'] = {'outputSpeech': reprompt_output}
return obj
|
Build a dict containing a valid response to an Alexa request.
If speech output is desired, either of `text` or `ssml` should
be specified.
:param text: Plain text speech output to be said by Alexa device.
:param ssml: Speech output in SSML form.
:param attributes: Dictionary of attributes to store in the session.
:param end_session: Should the session be terminated after this response?
:param reprompt_text, reprompt_ssml: Works the same as
`text`/`ssml`, but instead sets the reprompting speech output.
|
entailment
|
def reprompt(text=None, ssml=None, attributes=None):
"""Convenience method to save a little bit of typing for the common case of
reprompting the user. Simply calls :py:func:`alexandra.util.respond` with
the given arguments and holds the session open.
One of either the `text` or `ssml` should be provided if any
speech output is desired.
:param text: Plain text speech output
:param ssml: Speech output in SSML format
:param attributes: Dictionary of attributes to store in the current session
"""
return respond(
reprompt_text=text,
reprompt_ssml=ssml,
attributes=attributes,
end_session=False
)
|
Convenience method to save a little bit of typing for the common case of
reprompting the user. Simply calls :py:func:`alexandra.util.respond` with
the given arguments and holds the session open.
One of either the `text` or `ssml` should be provided if any
speech output is desired.
:param text: Plain text speech output
:param ssml: Speech output in SSML format
:param attributes: Dictionary of attributes to store in the current session
|
entailment
|
def validate_request_timestamp(req_body, max_diff=150):
"""Ensure the request's timestamp doesn't fall outside of the
app's specified tolerance.
Returns True if this request is valid, False otherwise.
:param req_body: JSON object parsed out of the raw POST data of a request.
:param max_diff: Maximum allowable difference in seconds between request
timestamp and system clock. Amazon requires <= 150 seconds for
published skills.
"""
time_str = req_body.get('request', {}).get('timestamp')
if not time_str:
log.error('timestamp not present %s', req_body)
return False
req_ts = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
diff = (datetime.utcnow() - req_ts).total_seconds()
if abs(diff) > max_diff:
log.error('timestamp difference too high: %d sec', diff)
return False
return True
|
Ensure the request's timestamp doesn't fall outside of the
app's specified tolerance.
Returns True if this request is valid, False otherwise.
:param req_body: JSON object parsed out of the raw POST data of a request.
:param max_diff: Maximum allowable difference in seconds between request
timestamp and system clock. Amazon requires <= 150 seconds for
published skills.
|
entailment
|
def validate_request_certificate(headers, data):
"""Ensure that the certificate and signature specified in the
request headers are truely from Amazon and correctly verify.
Returns True if certificate verification succeeds, False otherwise.
:param headers: Dictionary (or sufficiently dictionary-like) map of request
headers.
:param data: Raw POST data attached to this request.
"""
# Make sure we have the appropriate headers.
if 'SignatureCertChainUrl' not in headers or \
'Signature' not in headers:
log.error('invalid request headers')
return False
cert_url = headers['SignatureCertChainUrl']
sig = base64.b64decode(headers['Signature'])
cert = _get_certificate(cert_url)
if not cert:
return False
try:
# ... wtf kind of API decision is this
crypto.verify(cert, sig, data, 'sha1')
return True
except:
log.error('invalid request signature')
return False
|
Ensure that the certificate and signature specified in the
request headers are truely from Amazon and correctly verify.
Returns True if certificate verification succeeds, False otherwise.
:param headers: Dictionary (or sufficiently dictionary-like) map of request
headers.
:param data: Raw POST data attached to this request.
|
entailment
|
def _get_certificate(cert_url):
"""Download and validate a specified Amazon PEM file."""
global _cache
if cert_url in _cache:
cert = _cache[cert_url]
if cert.has_expired():
_cache = {}
else:
return cert
url = urlparse(cert_url)
host = url.netloc.lower()
path = posixpath.normpath(url.path)
# Sanity check location so we don't get some random person's cert.
if url.scheme != 'https' or \
host not in ['s3.amazonaws.com', 's3.amazonaws.com:443'] or \
not path.startswith('/echo.api/'):
log.error('invalid cert location %s', cert_url)
return
resp = urlopen(cert_url)
if resp.getcode() != 200:
log.error('failed to download certificate')
return
cert = crypto.load_certificate(crypto.FILETYPE_PEM, resp.read())
if cert.has_expired() or cert.get_subject().CN != 'echo-api.amazon.com':
log.error('certificate expired or invalid')
return
_cache[cert_url] = cert
return cert
|
Download and validate a specified Amazon PEM file.
|
entailment
|
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done)
|
Check if version is already applied in the database.
:param db_versions:
|
entailment
|
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop
|
Check if version is a no operation version.
|
entailment
|
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode
|
Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
|
entailment
|
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
)
|
Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
|
entailment
|
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup
|
Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
|
entailment
|
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations
|
Return pre-operations only for the mode asked
|
entailment
|
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations
|
Return post-operations only for the mode asked
|
entailment
|
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
|
Return merged set of main addons and mode's addons
|
entailment
|
def copy(self):
"""
get copy of object
:return: ReactionContainer
"""
return type(self)(reagents=[x.copy() for x in self.__reagents], meta=self.__meta.copy(),
products=[x.copy() for x in self.__products],
reactants=[x.copy() for x in self.__reactants])
|
get copy of object
:return: ReactionContainer
|
entailment
|
def implicify_hydrogens(self):
"""
remove explicit hydrogens if possible
:return: number of removed hydrogens
"""
total = 0
for ml in (self.__reagents, self.__reactants, self.__products):
for m in ml:
if hasattr(m, 'implicify_hydrogens'):
total += m.implicify_hydrogens()
if total:
self.flush_cache()
return total
|
remove explicit hydrogens if possible
:return: number of removed hydrogens
|
entailment
|
def reset_query_marks(self):
"""
set or reset hyb and neighbors marks to atoms.
"""
for ml in (self.__reagents, self.__reactants, self.__products):
for m in ml:
if hasattr(m, 'reset_query_marks'):
m.reset_query_marks()
self.flush_cache()
|
set or reset hyb and neighbors marks to atoms.
|
entailment
|
def compose(self):
"""
get CGR of reaction
reagents will be presented as unchanged molecules
:return: CGRContainer
"""
rr = self.__reagents + self.__reactants
if rr:
if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in rr):
raise TypeError('Queries not composable')
r = reduce(or_, rr)
else:
r = MoleculeContainer()
if self.__products:
if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in self.__products):
raise TypeError('Queries not composable')
p = reduce(or_, self.__products)
else:
p = MoleculeContainer()
return r ^ p
|
get CGR of reaction
reagents will be presented as unchanged molecules
:return: CGRContainer
|
entailment
|
def calculate2d(self, force=True):
"""
recalculate 2d coordinates. currently rings can be calculated badly.
:param force: ignore existing coordinates of atoms
"""
for ml in (self.__reagents, self.__reactants, self.__products):
for m in ml:
m.calculate2d(force)
self.fix_positions()
|
recalculate 2d coordinates. currently rings can be calculated badly.
:param force: ignore existing coordinates of atoms
|
entailment
|
def fix_positions(self):
"""
fix coordinates of molecules in reaction
"""
shift_x = 0
for m in self.__reactants:
max_x = self.__fix_positions(m, shift_x, 0)
shift_x = max_x + 1
arrow_min = shift_x
if self.__reagents:
for m in self.__reagents:
max_x = self.__fix_positions(m, shift_x, 1.5)
shift_x = max_x + 1
else:
shift_x += 3
arrow_max = shift_x - 1
for m in self.__products:
max_x = self.__fix_positions(m, shift_x, 0)
shift_x = max_x + 1
self._arrow = (arrow_min, arrow_max)
self.flush_cache()
|
fix coordinates of molecules in reaction
|
entailment
|
def get_role_keys(cls, unit_key):
"""
:param unit_key: Parent unit key
:return: role keys of subunits
"""
stack = Role.objects.filter(unit_id=unit_key).values_list('key', flatten=True)
for unit_key in cls.objects.filter(parent_id=unit_key).values_list('key', flatten=True):
stack.extend(cls.get_role_keys(unit_key))
return stack
|
:param unit_key: Parent unit key
:return: role keys of subunits
|
entailment
|
def get_permissions(self):
"""
Permissions of the user.
Returns:
List of Permission objects.
"""
user_role = self.last_login_role() if self.last_login_role_key else self.role_set[0].role
return user_role.get_permissions()
|
Permissions of the user.
Returns:
List of Permission objects.
|
entailment
|
def get_permissions(self):
"""
Soyut role ait Permission nesnelerini bulur ve code değerlerini
döner.
Returns:
list: Permission code değerleri
"""
return [p.permission.code for p in self.Permissions if p.permission.code]
|
Soyut role ait Permission nesnelerini bulur ve code değerlerini
döner.
Returns:
list: Permission code değerleri
|
entailment
|
def add_permission(self, perm):
"""
Soyut Role Permission nesnesi tanımlamayı sağlar.
Args:
perm (object):
"""
self.Permissions(permission=perm)
PermissionCache.flush()
self.save()
|
Soyut Role Permission nesnesi tanımlamayı sağlar.
Args:
perm (object):
|
entailment
|
def add_permission_by_name(self, code, save=False):
"""
Adds a permission with given name.
Args:
code (str): Code name of the permission.
save (bool): If False, does nothing.
"""
if not save:
return ["%s | %s" % (p.name, p.code) for p in
Permission.objects.filter(code__contains=code)]
for p in Permission.objects.filter(code__contains=code):
if p not in self.Permissions:
self.Permissions(permission=p)
if p:
self.save()
|
Adds a permission with given name.
Args:
code (str): Code name of the permission.
save (bool): If False, does nothing.
|
entailment
|
def send_notification(self, title, message, typ=1, url=None, sender=None):
"""
sends a message to user of this role's private mq exchange
"""
self.user.send_notification(title=title, message=message, typ=typ, url=url,
sender=sender)
|
sends a message to user of this role's private mq exchange
|
entailment
|
def would_move_be_promotion(self):
"""
Finds if move from current location would be a promotion
"""
return (self._end_loc.rank == 0 and not self.color) or \
(self._end_loc.rank == 7 and self.color)
|
Finds if move from current location would be a promotion
|
entailment
|
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
|
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
|
entailment
|
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
|
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
|
entailment
|
def migrate(config):
"""Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config
"""
webapp = WebApp(config.web_host, config.web_port,
custom_maintenance_file=config.web_custom_html)
webserver = WebServer(webapp)
webserver.daemon = True
webserver.start()
migration_parser = YamlParser.parse_from_file(config.migration_file)
migration = migration_parser.parse()
database = Database(config)
with database.connect() as lock_connection:
application_lock = ApplicationLock(lock_connection)
application_lock.start()
while not application_lock.acquired:
time.sleep(0.5)
else:
if application_lock.replica:
# when a replica could finally acquire a lock, it
# means that the concurrent process has finished the
# migration or that it failed to run it.
# In both cases after the lock is released, this process will
# verify if it has still to do something (if the other process
# failed mainly).
application_lock.stop = True
application_lock.join()
# we are not in the replica or the lock is released: go on for the
# migration
try:
table = MigrationTable(database)
runner = Runner(config, migration, database, table)
runner.perform()
finally:
application_lock.stop = True
application_lock.join()
|
Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config
|
entailment
|
def main():
"""Parse the command line and run :func:`migrate`."""
parser = get_args_parser()
args = parser.parse_args()
config = Config.from_parse_args(args)
migrate(config)
|
Parse the command line and run :func:`migrate`.
|
entailment
|
def get_permissions(cls):
"""
Generates permissions for all CrudView based class methods.
Returns:
List of Permission objects.
"""
perms = []
for kls_name, kls in cls.registry.items():
for method_name in cls.__dict__.keys():
if method_name.endswith('_view'):
perms.append("%s.%s" % (kls_name, method_name))
return perms
|
Generates permissions for all CrudView based class methods.
Returns:
List of Permission objects.
|
entailment
|
def _get_object_menu_models():
"""
we need to create basic permissions
for only CRUD enabled models
"""
from pyoko.conf import settings
enabled_models = []
for entry in settings.OBJECT_MENU.values():
for mdl in entry:
if 'wf' not in mdl:
enabled_models.append(mdl['name'])
return enabled_models
|
we need to create basic permissions
for only CRUD enabled models
|
entailment
|
def add(cls, code_name, name='', description=''):
"""
create a custom permission
"""
if code_name not in cls.registry:
cls.registry[code_name] = (code_name, name or code_name, description)
return code_name
|
create a custom permission
|
entailment
|
def get_mapping(self, other):
"""
get self to other mapping
"""
m = next(self._matcher(other).isomorphisms_iter(), None)
if m:
return {v: k for k, v in m.items()}
|
get self to other mapping
|
entailment
|
def get_substructure_mapping(self, other, limit=1):
"""
get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts
"""
i = self._matcher(other).subgraph_isomorphisms_iter()
if limit == 1:
m = next(i, None)
if m:
return {v: k for k, v in m.items()}
return
elif limit == 0:
return ({v: k for k, v in m.items()} for m in i)
return [{v: k for k, v in m.items()} for m in islice(i, limit)]
|
get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts
|
entailment
|
def from_string(cls, alg_str):
"""
Creates a location from a two character string consisting of
the file then rank written in algebraic notation.
Examples: e4, b5, a7
:type: alg_str: str
:rtype: Location
"""
try:
return cls(int(alg_str[1]) - 1, ord(alg_str[0]) - 97)
except ValueError as e:
raise ValueError("Location.from_string {} invalid: {}".format(alg_str, e))
|
Creates a location from a two character string consisting of
the file then rank written in algebraic notation.
Examples: e4, b5, a7
:type: alg_str: str
:rtype: Location
|
entailment
|
def shift(self, direction):
"""
Shifts in direction provided by ``Direction`` enum.
:type: direction: Direction
:rtype: Location
"""
try:
if direction == Direction.UP:
return self.shift_up()
elif direction == Direction.DOWN:
return self.shift_down()
elif direction == Direction.RIGHT:
return self.shift_right()
elif direction == Direction.LEFT:
return self.shift_left()
else:
raise IndexError("Invalid direction {}".format(direction))
except IndexError as e:
raise IndexError(e)
|
Shifts in direction provided by ``Direction`` enum.
:type: direction: Direction
:rtype: Location
|
entailment
|
def shift_up(self, times=1):
"""
Finds Location shifted up by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file)
except IndexError as e:
raise IndexError(e)
|
Finds Location shifted up by 1
:rtype: Location
|
entailment
|
def shift_down(self, times=1):
"""
Finds Location shifted down by 1
:rtype: Location
"""
try:
return Location(self._rank - times, self._file)
except IndexError as e:
raise IndexError(e)
|
Finds Location shifted down by 1
:rtype: Location
|
entailment
|
def shift_right(self, times=1):
"""
Finds Location shifted right by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file + times)
except IndexError as e:
raise IndexError(e)
|
Finds Location shifted right by 1
:rtype: Location
|
entailment
|
def shift_left(self, times=1):
"""
Finds Location shifted left by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file - times)
except IndexError as e:
raise IndexError(e)
|
Finds Location shifted left by 1
:rtype: Location
|
entailment
|
def shift_up_right(self, times=1):
"""
Finds Location shifted up right by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file + times)
except IndexError as e:
raise IndexError(e)
|
Finds Location shifted up right by 1
:rtype: Location
|
entailment
|
def shift_up_left(self, times=1):
"""
Finds Location shifted up left by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file - times)
except IndexError as e:
raise IndexError(e)
|
Finds Location shifted up left by 1
:rtype: Location
|
entailment
|
def shift_down_right(self, times=1):
"""
Finds Location shifted down right by 1
:rtype: Location
"""
try:
return Location(self._rank - times, self._file + times)
except IndexError as e:
raise IndexError(e)
|
Finds Location shifted down right by 1
:rtype: Location
|
entailment
|
def shift_down_left(self, times=1):
"""
Finds Location shifted down left by 1
:rtype: Location
"""
try:
return Location(self._rank - times, self._file - times)
except IndexError as e:
raise IndexError(e)
|
Finds Location shifted down left by 1
:rtype: Location
|
entailment
|
def standardize(self):
"""
standardize functional groups
:return: number of found groups
"""
self.reset_query_marks()
seen = set()
total = 0
for n, atom in self.atoms():
if n in seen:
continue
for k, center in central.items():
if center != atom:
continue
shell = tuple((bond, self._node[m]) for m, bond in self._adj[n].items())
for shell_query, shell_patch, atom_patch in query_patch[k]:
if shell_query != shell:
continue
total += 1
for attr_name, attr_value in atom_patch.items():
setattr(atom, attr_name, attr_value)
for (bond_patch, atom_patch), (bond, atom) in zip(shell_patch, shell):
bond.update(bond_patch)
for attr_name, attr_value in atom_patch.items():
setattr(atom, attr_name, attr_value)
seen.add(n)
seen.update(self._adj[n])
break
else:
continue
break
if total:
self.flush_cache()
return total
|
standardize functional groups
:return: number of found groups
|
entailment
|
def get_staged_files():
"""Get all files staged for the current commit.
"""
proc = subprocess.Popen(('git', 'status', '--porcelain'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, _ = proc.communicate()
staged_files = modified_re.findall(out)
return staged_files
|
Get all files staged for the current commit.
|
entailment
|
def runserver(host=None, port=None):
"""
Run Tornado server
"""
host = host or os.getenv('HTTP_HOST', '0.0.0.0')
port = port or os.getenv('HTTP_PORT', '9001')
zioloop = ioloop.IOLoop.instance()
# setup pika client:
pc = QueueManager(zioloop)
app.pc = pc
pc.connect()
app.listen(port, host)
zioloop.start()
|
Run Tornado server
|
entailment
|
def open(self):
"""
Called on new websocket connection.
"""
sess_id = self._get_sess_id()
if sess_id:
self.application.pc.websockets[self._get_sess_id()] = self
self.write_message(json.dumps({"cmd": "status", "status": "open"}))
else:
self.write_message(json.dumps({"cmd": "error", "error": "Please login", "code": 401}))
|
Called on new websocket connection.
|
entailment
|
def on_message(self, message):
"""
called on new websocket message,
"""
log.debug("WS MSG for %s: %s" % (self._get_sess_id(), message))
self.application.pc.redirect_incoming_message(self._get_sess_id(), message, self.request)
|
called on new websocket message,
|
entailment
|
def _handle_headers(self):
"""
Do response processing
"""
origin = self.request.headers.get('Origin')
if not settings.DEBUG:
if origin in settings.ALLOWED_ORIGINS or not origin:
self.set_header('Access-Control-Allow-Origin', origin)
else:
log.debug("CORS ERROR: %s not allowed, allowed hosts: %s" % (origin,
settings.ALLOWED_ORIGINS))
raise HTTPError(403, "Origin not in ALLOWED_ORIGINS: %s" % origin)
else:
self.set_header('Access-Control-Allow-Origin', origin or '*')
self.set_header('Access-Control-Allow-Credentials', "true")
self.set_header('Access-Control-Allow-Headers', 'Content-Type')
self.set_header('Access-Control-Allow-Methods', 'OPTIONS')
self.set_header('Content-Type', 'application/json')
|
Do response processing
|
entailment
|
def post(self, view_name):
"""
login handler
"""
sess_id = None
input_data = {}
# try:
self._handle_headers()
# handle input
input_data = json_decode(self.request.body) if self.request.body else {}
input_data['path'] = view_name
# set or get session cookie
if not self.get_cookie(COOKIE_NAME) or 'username' in input_data:
sess_id = uuid4().hex
self.set_cookie(COOKIE_NAME, sess_id) # , domain='127.0.0.1'
else:
sess_id = self.get_cookie(COOKIE_NAME)
# h_sess_id = "HTTP_%s" % sess_id
input_data = {'data': input_data,
'_zops_remote_ip': self.request.remote_ip}
log.info("New Request for %s: %s" % (sess_id, input_data))
self.application.pc.register_websocket(sess_id, self)
self.application.pc.redirect_incoming_message(sess_id,
json_encode(input_data),
self.request)
|
login handler
|
entailment
|
def load_formatter_fn(formatter):
'''
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
'''
obj = util.load_object(formatter)
if not hasattr(obj, 'ispartial'):
obj.ispartial = util.ispartial
return obj
|
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
|
entailment
|
def _remove_redundancy(self, log):
"""Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
"""
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log
|
Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
|
entailment
|
def validate_log_format(self, log):
'''
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
'''
keys_in_log = set(log)
keys_in_log_structure = set(self.LOG_STRUCTURE)
try:
assert (keys_in_log == keys_in_log_structure)
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_not_found = list(keys_in_log_structure-keys_in_log),
extra_keys_found = list(keys_in_log-keys_in_log_structure),
num_logs=1,
type='metric')
return 'failed'
for key in log:
try:
assert isinstance(log[key], self.LOG_STRUCTURE[key])
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_datatype_not_matched = key,
datatype_expected = type(self.LOG_STRUCTURE[key]),
datatype_got = type(log[key]),
num_logs=1,
type='metric')
return 'failed'
return 'passed'
|
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
|
entailment
|
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
)
|
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
|
entailment
|
def _scan_fpatterns(self, state):
'''
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
'''
for f in self.fpaths:
fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1))
self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter)
# TODO code for scanning fpatterns for the files not yet present goes here
fpaths = glob.glob(fpattern)
# Load formatter_fn if not in list
fpaths = list(set(fpaths) - set(state.files_tracked))
for fpath in fpaths:
try:
formatter_fn = self.formatters.get(formatter,
load_formatter_fn(formatter))
self.log.info('found_formatter_fn', fn=formatter)
self.formatters[formatter] = formatter_fn
except (SystemExit, KeyboardInterrupt): raise
except (ImportError, AttributeError):
self.log.exception('formatter_fn_not_found', fn=formatter)
sys.exit(-1)
# Start a thread for every file
self.log.info('found_log_file', log_file=fpath)
log_f = dict(fpath=fpath, fpattern=fpattern,
formatter=formatter, formatter_fn=formatter_fn)
log_key = (fpath, fpattern, formatter)
if log_key not in self.log_reader_threads:
self.log.info('starting_collect_log_lines_thread', log_key=log_key)
# There is no existing thread tracking this log file. Start one
log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,))
self.log_reader_threads[log_key] = log_reader_thread
state.files_tracked.append(fpath)
time.sleep(self.SCAN_FPATTERNS_INTERVAL)
|
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
|
entailment
|
def get_links(self, **kw):
"""
Prepare links of form by mimicing pyoko's get_links method's result
Args:
**kw:
Returns: list of link dicts
"""
links = [a for a in dir(self) if isinstance(getattr(self, a), Model)
and not a.startswith('_model')]
return [
{
'field': l,
'mdl': getattr(self, l).__class__,
} for l in links
]
|
Prepare links of form by mimicing pyoko's get_links method's result
Args:
**kw:
Returns: list of link dicts
|
entailment
|
def set_data(self, data):
"""
Fills form with data
Args:
data (dict): Data to assign form fields.
Returns:
Self. Form object.
"""
for name in self._fields:
setattr(self, name, data.get(name))
return self
|
Fills form with data
Args:
data (dict): Data to assign form fields.
Returns:
Self. Form object.
|
entailment
|
def serialize(self):
"""
Converts the form/model into JSON ready dicts/lists compatible
with `Ulakbus-UI API`_.
Example:
.. code-block:: json
{
"forms": {
"constraints": {},
"model": {
"code": null,
"name": null,
"save_edit": null,
},
"grouping": {},
"form": [
{
"helpvalue": null,
"type": "help"
},
"name",
"code",
"save_edit"
],
"schema": {
"required": [
"name",
"code",
"save_edit"
],
"type": "object",
"properties": {
"code": {
"type": "string",
"title": "Code Name"
},
"name": {
"type": "string",
"title": "Name"
},
"save_edit": {
"cmd": "save::add_edit_form",
"type": "button",
"title": "Save"
}
},
"title": "Add Permission"
}
}
}
"""
result = {
"schema": {
"title": self.title,
"type": "object",
"properties": {},
"required": []
},
"form": [
{
"type": "help",
"helpvalue": self.help_text
}
],
"model": {}
}
for itm in self.META_TO_FORM_ROOT:
if itm in self.Meta.__dict__:
result[itm] = self.Meta.__dict__[itm]
if self._model.is_in_db():
result["model"]['object_key'] = self._model.key
result["model"]['model_type'] = self._model.__class__.__name__
result["model"]['unicode'] = six.text_type(self._model)
# if form intentionally marked as fillable from task data by assigning False to always_blank
# field in Meta class, form_data is retrieved from task_data if exist in else None
form_data = None
if not self.Meta.always_blank:
form_data = self.context.task_data.get(self.__class__.__name__, None)
for itm in self._serialize():
item_props = {'type': itm['type'], 'title': itm['title']}
if not itm.get('value') and 'kwargs' in itm and 'value' in itm['kwargs']:
itm['value'] = itm['kwargs'].pop('value')
if 'kwargs' in itm and 'widget' in itm['kwargs']:
item_props['widget'] = itm['kwargs'].pop('widget')
if form_data:
if form_data[itm['name']] and (itm['type'] == 'date' or itm['type'] == 'datetime'):
value_to_serialize = datetime.strptime(
form_data[itm['name']], itm['format'])
else:
value_to_serialize = form_data[itm['name']]
value = self._serialize_value(value_to_serialize)
if itm['type'] == 'button':
value = None
# if form_data is empty, value will be None, so it is needed to fill the form from model
# or leave empty
else:
# if itm['value'] is not None returns itm['value']
# else itm['default']
if itm['value'] is not None:
value = itm['value']
else:
value = itm['default']
result["model"][itm['name']] = value
if itm['type'] == 'model':
item_props['model_name'] = itm['model_name']
if itm['type'] not in ['ListNode', 'model', 'Node']:
if 'hidden' in itm['kwargs']:
# we're simulating HTML's hidden form fields
# by just setting it in "model" dict and bypassing other parts
continue
else:
item_props.update(itm['kwargs'])
if itm.get('choices'):
self._handle_choices(itm, item_props, result)
else:
result["form"].append(itm['name'])
if 'help_text' in itm:
item_props['help_text'] = itm['help_text']
if 'schema' in itm:
item_props['schema'] = itm['schema']
# this adds default directives for building
# add and list views of linked models
if item_props['type'] == 'model':
# this control for passing test.
# object gets context but do not use it. why is it for?
if self.context:
if self.context.has_permission("%s.select_list" % item_props['model_name']):
item_props.update({
'list_cmd': 'select_list',
'wf': 'crud',
})
if self.context.has_permission("%s.add_edit_form" % item_props['model_name']):
item_props.update({
'add_cmd': 'add_edit_form',
'wf': 'crud',
})
else:
item_props.update({
'list_cmd': 'select_list',
'add_cmd': 'add_edit_form',
'wf': 'crud'
})
result["schema"]["properties"][itm['name']] = item_props
if itm['required']:
result["schema"]["required"].append(itm['name'])
self._cache_form_details(result)
return result
|
Converts the form/model into JSON ready dicts/lists compatible
with `Ulakbus-UI API`_.
Example:
.. code-block:: json
{
"forms": {
"constraints": {},
"model": {
"code": null,
"name": null,
"save_edit": null,
},
"grouping": {},
"form": [
{
"helpvalue": null,
"type": "help"
},
"name",
"code",
"save_edit"
],
"schema": {
"required": [
"name",
"code",
"save_edit"
],
"type": "object",
"properties": {
"code": {
"type": "string",
"title": "Code Name"
},
"name": {
"type": "string",
"title": "Name"
},
"save_edit": {
"cmd": "save::add_edit_form",
"type": "button",
"title": "Save"
}
},
"title": "Add Permission"
}
}
}
|
entailment
|
def _cache_form_details(self, form):
"""
Caches some form details to lates process and validate incoming (response) form data
Args:
form: form dict
"""
cache = FormCache()
form['model']['form_key'] = cache.form_id
form['model']['form_name'] = self.__class__.__name__
cache.set(
{
'model': list(form['model'].keys()), # In Python 3, dictionary keys are not serializable
'non_data_fields': self.non_data_fields
}
)
|
Caches some form details to lates process and validate incoming (response) form data
Args:
form: form dict
|
entailment
|
def _parse_msg_for_mongodb(self, msgs):
'''
>>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = [{u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> records = mdbf._parse_msg_for_mongodb(log)
>>> from pprint import pprint
>>> pprint(records)
[{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
u'data': {u'_': {u'file': u'log.py',
u'fn': u'start',
u'ln': 8,
u'name': u'__main__'},
u'a': 1,
u'b': 2,
u'msg': u'this is a dummy log'},
u'error': False,
u'error_tb': u'',
u'event': u'some_log',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info',
u'raw': u'{...}',
u'timestamp': u'2018-04-09T09:59:24.733945Z',
u'type': u'metric'}]
'''
msgs_list = []
for msg in msgs:
try:
msg['_id'] = msg.pop('id')
except KeyError:
self.log.exception('collector_failure_id_not_found', log=msg)
msgs_list.append(msg)
return msgs_list
|
>>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = [{u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> records = mdbf._parse_msg_for_mongodb(log)
>>> from pprint import pprint
>>> pprint(records)
[{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
u'data': {u'_': {u'file': u'log.py',
u'fn': u'start',
u'ln': 8,
u'name': u'__main__'},
u'a': 1,
u'b': 2,
u'msg': u'this is a dummy log'},
u'error': False,
u'error_tb': u'',
u'event': u'some_log',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info',
u'raw': u'{...}',
u'timestamp': u'2018-04-09T09:59:24.733945Z',
u'type': u'metric'}]
|
entailment
|
def _tag_and_field_maker(self, event):
'''
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = {u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'__ignore_this': 'some_string',
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}
>>> tags, fields = idbf._tag_and_field_maker(log)
>>> from pprint import pprint
>>> pprint(tags)
{u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'}
>>> pprint(fields)
{u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}",
u'data.a': 1,
u'data.b': 2}
'''
data = event.pop('data')
data = flatten_dict({'data': data})
t = dict((k, event[k]) for k in event if k not in self.EXCLUDE_TAGS)
f = dict()
for k in data:
v = data[k]
if is_number(v) or isinstance(v, MarkValue):
f[k] = v
else:
#if v.startswith('_'): f[k] = eval(v.split('_', 1)[1])
t[k] = v
return t, f
|
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = {u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'__ignore_this': 'some_string',
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}
>>> tags, fields = idbf._tag_and_field_maker(log)
>>> from pprint import pprint
>>> pprint(tags)
{u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'}
>>> pprint(fields)
{u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}",
u'data.a': 1,
u'data.b': 2}
|
entailment
|
def _parse_msg_for_influxdb(self, msgs):
'''
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
'''
series = []
for msg in msgs:
if msg.get('error'):
continue
if msg.get('type').lower() == 'metric':
time = msg.get('timestamp')
measurement = msg.get('event')
tags, fields = self._tag_and_field_maker(msg)
pointvalues = {
"time": time,
"measurement": measurement,
"fields": fields,
"tags": tags}
series.append(pointvalues)
return series
|
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
|
entailment
|
def get_input(source, files, threads=4, readtype="1D",
combine="simple", names=None, barcoded=False):
"""Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None
"""
proc_functions = {
'fastq': ex.process_fastq_plain,
'fasta': ex.process_fasta,
'bam': ex.process_bam,
'summary': ex.process_summary,
'fastq_rich': ex.process_fastq_rich,
'fastq_minimal': ex.process_fastq_minimal,
'cram': ex.process_cram,
'ubam': ex.process_ubam, }
filethreads = min(len(files), threads)
threadsleft = threads - filethreads
with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor:
extration_function = partial(proc_functions[source],
threads=threadsleft,
readtype=readtype,
barcoded=barcoded)
datadf = combine_dfs(
dfs=[out for out in executor.map(extration_function, files)],
names=names or files,
method=combine)
if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any():
datadf.drop("readIDs", axis='columns', inplace=True)
datadf = calculate_start_time(datadf)
logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf)))
if len(datadf) == 0:
logging.critical("Nanoget: no reads retrieved.".format(len(datadf)))
sys.exit("Fatal: No reads found in input.")
else:
return datadf
|
Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None
|
entailment
|
def combine_dfs(dfs, names, method):
"""Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column."""
if method == "track":
res = list()
for df, identifier in zip(dfs, names):
df["dataset"] = identifier
res.append(df)
return pd.concat(res, ignore_index=True)
elif method == "simple":
return pd.concat(dfs, ignore_index=True)
|
Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column.
|
entailment
|
def calculate_start_time(df):
"""Calculate the star_time per read.
Time data is either
a "time" (in seconds, derived from summary files) or
a "timestamp" (in UTC, derived from fastq_rich format)
and has to be converted appropriately in a datetime format time_arr
For both the time_zero is the minimal value of the time_arr,
which is then used to subtract from all other times
In the case of method=track (and dataset is a column in the df) then this
subtraction is done per dataset
"""
if "time" in df:
df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]')
elif "timestamp" in df:
df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]")
else:
return df
if "dataset" in df:
for dset in df["dataset"].unique():
time_zero = df.loc[df["dataset"] == dset, "time_arr"].min()
df.loc[df["dataset"] == dset, "start_time"] = \
df.loc[df["dataset"] == dset, "time_arr"] - time_zero
else:
df["start_time"] = df["time_arr"] - df["time_arr"].min()
return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore")
|
Calculate the star_time per read.
Time data is either
a "time" (in seconds, derived from summary files) or
a "timestamp" (in UTC, derived from fastq_rich format)
and has to be converted appropriately in a datetime format time_arr
For both the time_zero is the minimal value of the time_arr,
which is then used to subtract from all other times
In the case of method=track (and dataset is a column in the df) then this
subtraction is done per dataset
|
entailment
|
def parser_from_buffer(cls, fp):
"""Construct YamlParser from a file pointer."""
yaml = YAML(typ="safe")
return cls(yaml.load(fp))
|
Construct YamlParser from a file pointer.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.