content stringlengths 5 1.05M |
|---|
from apps.senator.models import (Parliamentarian, Mandate, Alternate, Exercise)
__author__ = "Gilson Paulino"
__date__ = "Created by 24/03/18"
__copyright__ = "Copyright 2018"
__email__ = "gilsonbp@gmail.com"
class ImportSenatorJSON(object):
"""
Importing parliamentarians through the endpoint (http://legis.senado.gov.br/dadosabertos/senador/lista/atual)
"""
@staticmethod
def parliamentarian(jsonresponse):
"""
Method for importing parliamentarians
:param jsonresponse: JSON containing the list of parliamentarians
:return: Quantity of parliamentarians imported.
"""
# Clears all parliamentarians and dependent tables before re-importing
Parliamentarian.objects.all().delete()
rows = 0
# loop on the list of parliamentarians
for pl in jsonresponse['ListaParlamentarEmExercicio']['Parlamentares']['Parlamentar']:
idp = pl.get('IdentificacaoParlamentar')
if idp.get('CodigoParlamentar', False): # Checking if there is a parliamentary code
# Recording the handles in the database
parliamentarian = Parliamentarian.objects.create(
codigo=idp.get('CodigoParlamentar'),
nome=idp.get('NomeParlamentar', None),
nome_completo=idp.get('NomeCompletoParlamentar', None),
sexo=idp.get('SexoParlamentar', None),
forma_tratamento=idp.get('FormaTratamento', None),
url_foto=idp.get('UrlFotoParlamentar', None),
url_pagina=idp.get('UrlPaginaParlamentar', None),
email=idp.get('EmailParlamentar', None),
sigla_partido=idp.get('SiglaPartidoParlamentar', None),
uf=idp.get('UfParlamentar', None),
glossario=pl.get('UrlGlossario', None)
)
mdt = pl.get('Mandato', False)
if mdt: # Checking for Mandates
holder = mdt.get('Titular', False)
if holder:
t_descricao_participacao = holder.get(
'DescricaoParticipacao', None)
t_codigo_parlamentar = holder.get(
'CodigoParlamentar', None)
t_nome_parlamentar = holder.get(
'NomeParlamentar', None)
else:
t_descricao_participacao = None
t_codigo_parlamentar = None
t_nome_parlamentar = None
# Writing mandates to the database
mandate = Mandate.objects.create(
parliamentarian=parliamentarian,
codigo=mdt.get('CodigoMandato'),
uf=mdt.get('UfParlamentar', None),
pl_numero=mdt['PrimeiraLegislaturaDoMandato']['NumeroLegislatura'],
pl_data_inicio=mdt['PrimeiraLegislaturaDoMandato']['DataInicio'],
pl_data_fim=mdt['PrimeiraLegislaturaDoMandato']['DataFim'],
sl_numero=mdt['SegundaLegislaturaDoMandato']['NumeroLegislatura'],
sl_data_inicio=mdt['SegundaLegislaturaDoMandato']['DataInicio'],
sl_data_fim=mdt['SegundaLegislaturaDoMandato']['DataFim'],
descricao_participacao=mdt.get(
'DescricaoParticipacao', None),
t_descricao_participacao=t_descricao_participacao,
t_codigo_parlamentar=t_codigo_parlamentar,
t_nome_parlamentar=t_nome_parlamentar
)
alternates = mdt.get('Suplentes', False)
if alternates: # checking whether they were Alternates
# Testing the Alternates data type, when there is only one return is one
if type(alternates.get('Suplente')) == dict:
supl = alternates.get('Suplente')
Alternate.objects.create(
mandate=mandate,
codigo_parlamentar=supl.get(
'CodigoParlamentar'),
descricao_participacao=supl.get(
'DescricaoParticipacao'),
nome_parlamentar=supl.get('NomeParlamentar'),
)
else: # when there is more than one the return is a list
for supl in alternates.get('Suplente'):
Alternate.objects.create(
mandate=mandate,
codigo_parlamentar=supl.get(
'CodigoParlamentar'),
descricao_participacao=supl.get(
'DescricaoParticipacao'),
nome_parlamentar=supl.get(
'NomeParlamentar'),
)
exercises = mdt.get('Exercicios', False)
if exercises: # Checking if there are exercises
# Testing the exercise data type, when there is only one return is a dictionary
if type(exercises.get('Exercicio')) == dict:
exer = exercises.get('Exercicio')
Exercise.objects.create(
mandate=mandate,
codigo=exer.get('CodigoExercicio'),
data_inicio=exer.get('DataInicio'),
data_fim=exer.get('DataFim'),
sigla_causa_afastamento=exer.get(
'SiglaCausaAfastamento'),
descricao_causa_afastamento=exer.get(
'DescricaoCausaAfastamento'),
data_leitura=exer.get('DataLeitura'),
)
else: # when there is more than one the return is a list
for exer in exercises.get('Exercicio'):
Exercise.objects.create(
mandate=mandate,
codigo=exer.get('CodigoExercicio'),
data_inicio=exer.get('DataInicio'),
data_fim=exer.get('DataFim'),
sigla_causa_afastamento=exer.get(
'SiglaCausaAfastamento'),
descricao_causa_afastamento=exer.get(
'DescricaoCausaAfastamento'),
data_leitura=exer.get('DataLeitura'),
)
rows += 1
return rows
|
import sys
import subprocess
import telnetlib
import logging
import distutils
from os.path import abspath
if sys.version_info < (3, 0):
import Queue as queue
else:
import queue
END_OF_MSG = b'\r\n\r>'
class OpenOCDProtocol(object):
"""
This class implements the openocd protocol.
Although OpenOCD itself is very powerful, it is only used as monitor
protocol, since all other functionalities are also exposed via the
gdb-interface, which is easier to parse in an automatic manner.
:param openocd_script: The openocd scripts to be executed.
:type openocd_script: str or list
:param openocd_executable: The executable
:param additional_args: Additional arguments delivered to openocd.
:type additional_args: list
:param telnet_port: the port used for the telnet connection
:param gdb_port: the port used for openocds gdb-server
"""
def __init__(self, openocd_script, openocd_executable="openocd",
additional_args=[], telnet_port=4444, gdb_port=3333,
origin=None, output_directory='/tmp'):
if isinstance(openocd_script, str):
self.openocd_files = [openocd_script]
elif isinstance(openocd_script, list):
self.openocd_files = openocd_script
else:
raise TypeError("Wrong type for OpenOCD configuration files")
self._telnet = None
self._telnet_port = telnet_port
executable_path = distutils.spawn.find_executable(openocd_executable)
self._cmd_line = [executable_path ,
'--command', 'telnet_port %d' % telnet_port,
'--command', 'gdb_port %d' % gdb_port]
self._cmd_line += additional_args
self._cmd_line += [e for l
in [['-f', abspath(f)] for f in self.openocd_files]
for e in l]
self._openocd = None
with open("%s/openocd_out.txt" % output_directory, "wb") as out, \
open("%s/openocd_err.txt" % output_directory, "wb") as err:
self._openocd = subprocess.Popen(self._cmd_line,
stdout=out, stderr=err)#, shell=True)
self.log = logging.getLogger('%s.%s' %
(origin.log.name, self.__class__.__name__)
) if origin else \
logging.getLogger(self.__class__.__name__)
def connect(self):
"""
Connects to OpenOCDs telnet-server for all subsequent communication
returns: True on success, else False
"""
self._telnet = telnetlib.Telnet('127.0.0.1', self._telnet_port)
resp = self._telnet.read_until(END_OF_MSG)
if 'Open On-Chip Debugger' in str(resp):
return True
else:
self.log.error('Failed to connect to OpenOCD')
return False
def reset(self):
"""
Resets the target
returns: True on success, else False
"""
self._telnet.write('reset halt\n'.encode('ascii'))
resp = self._telnet.read_until(END_OF_MSG)
if 'target state: halted' in str(resp):
return True
else:
self.log.error('Failed to reset the target with OpenOCD')
return False
def shutdown(self):
"""
Shuts down OpenOCD
returns: True on success, else False
"""
if self._telnet:
self._telnet.close()
if self._openocd is not None:
self._openocd.terminate()
self._openocd = None
|
#!/usr/bin/python
#filename: using_dict.py
#'ab' is short for 'a'ddress 'b'ook
ab={'Swaroop':'swaroopch@byteofpython.info','Larry':'larry@wall.org',\
'Matsumoto':'matz@rub-lang.org','Spammer':'spammer@hotmail.com'}
print("Swaroop's address is %s"%ab['Swaroop'])
#Adding a key/value pair
ab['Guido']='guido@python.org'
#deleting a key/value pair
del ab['Spammer']
print('\nThere are %d contacts in the address-book\n'%len(ab))
for name,address in ab.items():
print('contact %s at %s'%(name,address))
if 'Guido' in ab:
print("\nGuido's address is %s"%ab['Guido'])
|
import matplotlib
matplotlib.use('Agg')
import sys
import os.path
import slack
import warnings
import h5py
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
from matplotlib.collections import EllipseCollection
# exec(open(os.path.abspath(os.path.join(
# os.path.dirname(__file__), os.path.pardir, 'visualisation', 'light_mode.py'))).read())
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
warnings.filterwarnings("ignore")
from import_toolkit.cluster import Cluster
cluster = Cluster(simulation_name='bahamas',
clusterID=0,
redshift='z000p000',
comovingframe=False,
fastbrowsing=True)
filepath = "/local/scratch/altamura/analysis_results/"
filename = f"bahamas-box-5r200spheres.jpg"
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
# ax.set_xlim([0, 400])
# ax.set_ylim([0, 400])
ax.set_xlabel(r'$x$ \quad [cMpc]')
ax.set_ylabel(r'$y$ \quad [cMpc]')
n_largeM = 0
n_total = 0
for counter, file in enumerate(cluster.groups_filePaths()):
print(f"[+] Analysing eagle_subfind_tab file {counter}")
with h5py.File(file, 'r') as group_file:
cop = group_file['/FOF/GroupCentreOfPotential'][:]
m500 = group_file['/FOF/Group_M_Crit500'][:]*10**10
r200 = group_file['/FOF/Group_R_Crit200'][:]
n_total += len(m500)
m_filter = np.where(m500 > 10**13)[0]
n_largeM += len(m_filter)
ax.scatter(cop[m_filter,0], cop[m_filter,1], marker='o', s=2, c='r', alpha=1)
offsets = list(zip(cop[m_filter,0], cop[m_filter,1]))
ax.add_collection(EllipseCollection(widths=r200[m_filter]*5, heights=r200[m_filter]*5, angles=0, units='xy',
facecolors='r', offsets=offsets, alpha=0.3,
transOffset=ax.transData))
m_filter = np.where(m500 < 10 ** 13)[0]
ax.scatter(cop[m_filter, 0], cop[m_filter, 1], marker='o', s=2, c='k', alpha=0.02)
# offsets = list(zip(cop[m_filter, 0], cop[m_filter, 1]))
# ax.add_collection(EllipseCollection(widths=r200[m_filter] * 5, heights=r200[m_filter] * 5, angles=0, units='xy',
# facecolors='k', offsets=offsets, alpha=0.3,
# transOffset=ax.transData))
print('n_largeM', n_largeM)
print('n_total', n_total)
blue_star = mlines.Line2D([], [], color='k', marker='o', linestyle='None', markersize=10, label=r'$M_{500~crit} < 10^{13}\ M_\odot$')
red_square = mlines.Line2D([], [], color='r', marker='o', linestyle='None', markersize=10, label=r'$M_{500~crit} > 10^{13}\ M_\odot$')
plt.legend(handles=[blue_star, red_square])
plt.savefig(filepath+filename)
# Send files to Slack: init slack client with access token
print(f"[+] Forwarding {filename} to the `#personal` Slack channel...")
slack_token = 'xoxp-452271173797-451476014913-1101193540773-57eb7b0d416e8764be6849fdeda52ce8'
client = slack.WebClient(token=slack_token)
response = client.files_upload(
file=f"{filepath+filename}",
initial_comment=f"This file was sent upon completion of the plot factory pipeline.\nAttachments: {filename}",
channels='#personal'
) |
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the test module for java_jar target.
"""
import blade_test
class TestJavaJar(blade_test.TargetTest):
"""Test java_jar """
def setUp(self):
"""setup method. """
self.doSetUp('test_java_jar/java', ':poppy_java_client',
generate_php=False)
self.upper_target_path = 'test_java_jar'
def testLoadBuildsNotNone(self):
"""Test direct targets and all command targets are not none. """
pass
def testGenerateRules(self):
"""Test that rules are generated correctly. """
self.all_targets = self.blade.analyze_targets()
self.rules_buf = self.blade.generate_build_rules()
swig_library = (self.upper_target_path, 'poppy_client')
java_client = (self.target_path, 'poppy_java_client')
proto_library = (self.upper_target_path, 'rpc_option_proto')
self.command_file = 'cmds.tmp'
self.assertIn(swig_library, self.all_targets.keys())
self.assertIn(java_client, self.all_targets.keys())
self.assertIn(proto_library, self.all_targets.keys())
self.assertTrue(self.dryRun())
com_proto_cpp_option = ''
com_proto_java_option = ''
com_proto_cpp_meta = ''
com_proto_java_meta = ''
com_proto_option_cc = ''
com_proto_meta_cc = ''
com_swig_python = ''
com_swig_java = ''
com_swig_python_cxx = ''
com_swig_java_cxx = ''
swig_python_so = ''
swig_java_so = ''
java_com_line = ''
java_so_line = ''
jar_line = ''
java_com_idx = 0
java_so_idx = 0
jar_idx = 0
index = 0
for line in self.scons_output:
index += 1
if 'protobuf/bin/protoc' in line:
if 'cpp_out' in line:
if 'rpc_option.proto' in line:
com_proto_cpp_option = line
elif 'rpc_meta_info.proto' in line:
com_proto_cpp_meta = line
if 'java_out' in line:
if 'rpc_option.proto' in line:
com_proto_java_option = line
elif 'rpc_meta_info.proto' in line:
com_proto_java_meta = line
if 'rpc_option.pb.cc.o -c' in line:
com_proto_option_cc = line
if 'rpc_meta_info.pb.cc.o -c' in line:
com_proto_meta_cc = line
if 'swig -python' in line:
com_swig_python = line
if 'swig -java' in line:
com_swig_java = line
if 'poppy_client_pywrap.cxx.o -c' in line:
com_swig_python_cxx = line
if 'poppy_client_javawrap.cxx.o -c' in line:
com_swig_java_cxx = line
if 'javac -classpath' in line:
java_com_line = line
java_com_idx = index
if 'libpoppy_client_java.so -m64' in line:
java_so_line = line
java_so_idx = index
if 'jar cf' in line:
jar_line = line
jar_idx = index
self.assertTrue(com_proto_cpp_option)
self.assertTrue(com_proto_cpp_meta)
self.assertTrue(com_proto_java_option)
self.assertTrue(com_proto_java_meta)
self.assertIn('-fPIC', com_proto_option_cc)
self.assertNotIn('-Wall -Wextra', com_proto_option_cc)
self.assertNotIn('-Wframe-larger-than=', com_proto_option_cc)
self.assertNotIn('-Werror=overloaded-virtual', com_proto_option_cc)
self.assertIn('-fPIC', com_proto_meta_cc)
self.assertIn('poppy_client_pywrap.cxx', com_swig_python)
self.assertIn('poppy_client_javawrap.cxx', com_swig_java)
self.assertIn('-fno-omit-frame-pointer', com_swig_python_cxx)
self.assertIn('-mcx16 -pipe -g', com_swig_python_cxx)
self.assertIn('-DNDEBUG -D_FILE_OFFSET_BITS', com_swig_python_cxx)
self.assertIn('-fno-omit-frame-pointer', com_swig_java_cxx)
self.assertIn('-mcx16 -pipe -g', com_swig_java_cxx)
self.assertIn('-DNDEBUG -D_FILE_OFFSET_BITS', com_swig_java_cxx)
self.assertTrue(java_com_line)
self.assertTrue(java_so_line)
self.assertTrue(jar_line)
self.assertIn('test_java_jar/java/lib/junit.jar', java_com_line)
self.assertIn('com/soso/poppy/swig/*.java', java_com_line)
self.assertIn('com/soso/poppy/*.java', java_com_line)
whole_archive = ('--whole-archive build64_release/test_java_jar/'
'librpc_meta_info_proto.a build64_release/test_java_jar/'
'librpc_option_proto.a -Wl,--no-whole-archive')
self.assertIn(whole_archive, java_so_line)
self.assertGreater(jar_idx, java_com_idx)
self.assertGreater(jar_idx, java_so_idx)
if __name__ == '__main__':
blade_test.run(TestJavaJar)
|
import asyncio
from collections import namedtuple
from functools import partial
from logging import getLogger
from typing import Optional, Callable, Any
import aiormq
from aiormq.types import DeliveredMessage
from .exceptions import QueueEmpty
from .exchange import Exchange, ExchangeType_
from .message import IncomingMessage
from .tools import create_task, shield
log = getLogger(__name__)
ConsumerTag = str
DeclarationResult = namedtuple(
'DeclarationResult', ('message_count', 'consumer_count')
)
async def consumer(callback, msg: DeliveredMessage, *, no_ack, loop):
message = IncomingMessage(msg, no_ack=no_ack)
return create_task(callback, message, loop=loop)
class Queue:
""" AMQP queue abstraction """
def __init__(self, connection, channel: aiormq.Channel, name,
durable, exclusive, auto_delete, arguments,
passive: bool = False):
self.loop = connection.loop
self._channel = channel
self.name = name or ''
self.durable = durable
self.exclusive = exclusive
self.auto_delete = auto_delete
self.arguments = arguments
self.passive = passive
self.declaration_result = None # type: aiormq.spec.Queue.DeclareOk
self._get_lock = asyncio.Lock(loop=self.loop)
@property
def channel(self) -> aiormq.Channel:
if self._channel is None:
raise RuntimeError("Channel not opened")
return self._channel
def __str__(self):
return "%s" % self.name
def __repr__(self):
return (
"<Queue(%s): "
"auto_delete=%s, "
"durable=%s, "
"exclusive=%s, "
"arguments=%r>"
) % (
self,
self.auto_delete,
self.durable,
self.exclusive,
self.arguments,
)
async def declare(self, timeout: int=None) -> aiormq.spec.Queue.DeclareOk:
""" Declare queue.
:param timeout: execution timeout
:param passive: Only check to see if the queue exists.
:return: :class:`None`
"""
log.debug("Declaring queue: %r", self)
self.declaration_result = await asyncio.wait_for(
self._channel.queue_declare(
queue=self.name, durable=self.durable,
exclusive=self.exclusive, auto_delete=self.auto_delete,
arguments=self.arguments, passive=self.passive,
), timeout=timeout, loop=self.loop
) # type: aiormq.spec.Queue.DeclareOk
self.name = self.declaration_result.queue
return self.declaration_result
async def bind(
self, exchange: ExchangeType_, routing_key: str=None, *,
arguments=None, timeout: int=None
) -> aiormq.spec.Queue.DeclareOk:
""" A binding is a relationship between an exchange and a queue.
This can be simply read as: the queue is interested in messages
from this exchange.
Bindings can take an extra routing_key parameter. To avoid
the confusion with a basic_publish parameter we're going to
call it a binding key.
:param exchange: :class:`aio_pika.exchange.Exchange` instance
:param routing_key: routing key
:param arguments: additional arguments (will be passed to `pika`)
:param timeout: execution timeout
:raises asyncio.TimeoutError:
when the binding timeout period has elapsed.
:return: :class:`None`
"""
if routing_key is None:
routing_key = self.name
log.debug(
"Binding queue %r: exchange=%r, routing_key=%r, arguments=%r",
self, exchange, routing_key, arguments
)
return await asyncio.wait_for(
self.channel.queue_bind(
self.name,
exchange=Exchange._get_exchange_name(exchange),
routing_key=routing_key,
arguments=arguments
), timeout=timeout, loop=self.loop
)
async def unbind(
self, exchange: ExchangeType_, routing_key: str=None,
arguments: dict=None, timeout: int=None
) -> aiormq.spec.Queue.UnbindOk:
""" Remove binding from exchange for this :class:`Queue` instance
:param exchange: :class:`aio_pika.exchange.Exchange` instance
:param routing_key: routing key
:param arguments: additional arguments (will be passed to `pika`)
:param timeout: execution timeout
:raises asyncio.TimeoutError:
when the unbinding timeout period has elapsed.
:return: :class:`None`
"""
if routing_key is None:
routing_key = self.name
log.debug(
"Unbinding queue %r: exchange=%r, routing_key=%r, arguments=%r",
self, exchange, routing_key, arguments
)
return await asyncio.wait_for(
self.channel.queue_unbind(
queue=self.name,
exchange=Exchange._get_exchange_name(exchange),
routing_key=routing_key,
arguments=arguments
), timeout=timeout, loop=self.loop
)
async def consume(
self, callback: Callable[[IncomingMessage], Any], no_ack: bool = False,
exclusive: bool = False, arguments: dict = None,
consumer_tag=None, timeout=None
) -> ConsumerTag:
""" Start to consuming the :class:`Queue`.
:param timeout: :class:`asyncio.TimeoutError` will be raises when the
Future was not finished after this time.
:param callback: Consuming callback. Could be a coroutine.
:param no_ack:
if :class:`True` you don't need to call
:func:`aio_pika.message.IncomingMessage.ack`
:param exclusive:
Makes this queue exclusive. Exclusive queues may only
be accessed by the current connection, and are deleted
when that connection closes. Passive declaration of an
exclusive queue by other connections are not allowed.
:param arguments: extended arguments for pika
:param consumer_tag: optional consumer tag
:raises asyncio.TimeoutError:
when the consuming timeout period has elapsed.
:return str: consumer tag :class:`str`
"""
log.debug("Start to consuming queue: %r", self)
return (await asyncio.wait_for(
self.channel.basic_consume(
queue=self.name,
consumer_callback=partial(
consumer, callback, no_ack=no_ack, loop=self.loop
),
exclusive=exclusive,
no_ack=no_ack,
arguments=arguments,
consumer_tag=consumer_tag,
),
timeout=timeout, loop=self.loop
)).consumer_tag
async def cancel(self, consumer_tag: ConsumerTag, timeout=None,
nowait: bool=False) -> aiormq.spec.Basic.CancelOk:
""" This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send any more
messages for that consumer. The client may receive an arbitrary number
of messages in between sending the cancel method and receiving the
cancel-ok reply. It may also be sent from the server to the client in
the event of the consumer being unexpectedly cancelled (i.e. cancelled
for any reason other than the server receiving the corresponding
basic.cancel from the client). This allows clients to be notified of
the loss of consumers due to events such as queue deletion.
:param consumer_tag:
consumer tag returned by :func:`~aio_pika.Queue.consume`
:param timeout: execution timeout
:param bool nowait: Do not expect a Basic.CancelOk response
:return: Basic.CancelOk when operation completed successfully
"""
return await asyncio.wait_for(
self.channel.basic_cancel(
consumer_tag=consumer_tag,
nowait=nowait
),
timeout=timeout, loop=self.loop
)
async def get(
self, *, no_ack=False, fail=True, timeout=5
) -> Optional[IncomingMessage]:
""" Get message from the queue.
:param no_ack: if :class:`True` you don't need to call
:func:`aio_pika.message.IncomingMessage.ack`
:param timeout: execution timeout
:param fail: Should return :class:`None` instead of raise an
exception :class:`aio_pika.exceptions.QueueEmpty`.
:return: :class:`aio_pika.message.IncomingMessage`
"""
msg = await asyncio.wait_for(self.channel.basic_get(
self.name, no_ack=no_ack
), timeout=timeout, loop=self.loop
) # type: Optional[DeliveredMessage]
if msg is None:
if fail:
raise QueueEmpty
return
return IncomingMessage(msg, no_ack=no_ack)
async def purge(
self, no_wait=False, timeout=None
) -> aiormq.spec.Queue.PurgeOk:
""" Purge all messages from the queue.
:param no_wait: no wait response
:param timeout: execution timeout
:return: :class:`None`
"""
log.info("Purging queue: %r", self)
return await asyncio.wait_for(
self.channel.queue_purge(
self.name,
nowait=no_wait,
), timeout=timeout, loop=self.loop
)
async def delete(self, *, if_unused=True, if_empty=True,
timeout=None) -> aiormq.spec.Queue.DeclareOk:
""" Delete the queue.
:param if_unused: Perform delete only when unused
:param if_empty: Perform delete only when empty
:param timeout: execution timeout
:return: :class:`None`
"""
log.info("Deleting %r", self)
result = await asyncio.wait_for(
self.channel.queue_delete(
self.name, if_unused=if_unused, if_empty=if_empty
), timeout=timeout, loop=self.loop
)
return result
def __aiter__(self) -> 'QueueIterator':
return self.iterator()
def iterator(self, **kwargs) -> 'QueueIterator':
""" Returns an iterator for async for expression.
Full example:
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect()
async with connection:
channel = await connection.channel()
queue = await channel.declare_queue('test')
async with queue.iterator() as q:
async for message in q:
print(message.body)
When your program runs with run_forever the iterator will be closed
in background. In this case the context processor for iterator might
be skipped and the queue might be used in the "async for"
expression directly.
.. code-block:: python
import aio_pika
async def main():
connection = await aio_pika.connect()
async with connection:
channel = await connection.channel()
queue = await channel.declare_queue('test')
async for message in queue:
print(message.body)
:return: QueueIterator
"""
return QueueIterator(self, **kwargs)
class QueueIterator:
@shield
async def close(self):
if not self._consumer_tag:
return
await self._amqp_queue.cancel(self._consumer_tag)
self._consumer_tag = None
def get_msg():
try:
return self._queue.get_nowait()
except asyncio.QueueEmpty:
return
# Reject all messages
msg = get_msg() # type: IncomingMessage
while msg and not self._amqp_queue.channel.closing.done():
await msg.reject(requeue=True)
msg = get_msg() # type: IncomingMessage
def __str__(self):
return 'queue[%s](...)' % self._amqp_queue.name
def __init__(self, queue: Queue, **kwargs):
self.loop = queue.loop
self._amqp_queue = queue
self._queue = asyncio.Queue(loop=self.loop)
self._consumer_tag = None
self._consume_kwargs = kwargs
async def on_message(self, message: IncomingMessage):
await self._queue.put(message)
async def consume(self):
self._consumer_tag = await self._amqp_queue.consume(
self.on_message,
**self._consume_kwargs
)
def __aiter__(self):
return self
async def __aenter__(self):
if self._consumer_tag is None:
await self.consume()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def __anext__(self) -> IncomingMessage:
if not self._consumer_tag:
await self.consume()
try:
return await self._queue.get()
except asyncio.CancelledError:
await self.close()
raise
__all__ = 'Queue', 'QueueIterator', 'DeclarationResult', 'ConsumerTag'
|
from django.conf import settings
CLIENT_ID = getattr(settings, 'OAUTH2_APP_ID', None)
CLIENT_SECRET = getattr(settings, 'OAUTH2_API_SECRET', None)
SCOPE = getattr(settings, 'OAUTH2_SCOPE', 'user:email')
API_DOMAIN = getattr(settings, 'OAUTH2_API_DOMAIN','oauth2.com/api')
BASE_DOMAIN = getattr(settings, 'OAUTH2_BASE_DOMAIN', 'oauth2.com')
ACCESS_TOKEN_URL = 'https://{0}/oauth/v2/token'.format(BASE_DOMAIN)
AUTHORIZE_URL = 'https://{0}/oauth/v2/auth'.format(BASE_DOMAIN)
|
import os
from django.conf import settings, global_settings
from oscar import OSCAR_CORE_APPS, OSCAR_MAIN_TEMPLATE_DIR
def configure(nose_args=None):
if not settings.configured:
from oscar.defaults import OSCAR_SETTINGS
# Helper function to extract absolute path
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), x)
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'sorl.thumbnail',
] + OSCAR_CORE_APPS,
TEMPLATE_CONTEXT_PROCESSORS=(
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
'oscar.apps.search.context_processors.search_form',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
),
TEMPLATE_DIRS=(
location('templates'),
OSCAR_MAIN_TEMPLATE_DIR,
),
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES + (
'oscar.apps.basket.middleware.BasketMiddleware',
),
AUTHENTICATION_BACKENDS=(
'oscar.apps.customer.auth_backends.Emailbackend',
'django.contrib.auth.backends.ModelBackend',
),
HAYSTACK_CONNECTIONS={
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
}
},
ROOT_URLCONF='tests.site.urls',
LOGIN_REDIRECT_URL='/accounts/',
DEBUG=False,
SITE_ID=1,
APPEND_SLASH=True,
NOSE_ARGS=nose_args,
**OSCAR_SETTINGS
)
|
# import skimage.external.tifffile as tifffile
import tifffile
import multiprocessing
import numpy as np
def imread(path):
""" Reads Tiff file into a numpy array in memory.
:param path: path to tif image to open
:return: numpy ndarray with image data
"""
return tifffile.imread(files=path)
def imsave(path, data, compress=1):
""" Saves numpy array as a TIF image.
:param path: path to tif image to create / overwrite
:param data: numpy ndarray with image data
:param compress: int (0-9) indicating the degree of lossless compression
"""
tifffile.imsave(file=path, data=data, compress=compress)
def imread_parallel(paths, nb_workers):
""" Reads Tiff files into a numpy array in memory.
:param paths: A list of tiff paths to read (order is preserved)
:param nb_workers: An int indicating how many parallel processes to use
"""
img = imread(paths[0])
with multiprocessing.Pool(nb_workers) as pool:
data = np.array(pool.map(imread, paths), img.dtype)
return data
|
import matplotlib.pyplot as plt
import torch
import numpy as np
import time
from tqdm.notebook import tqdm
import cv2
print('Imports complete')
|
import shlex
import subprocess
from django.core.management.base import BaseCommand
from django.utils import autoreload
def restart_celery(*args, **kwargs):
kill_worker_cmd = 'pkill -9 celery'
subprocess.call(shlex.split(kill_worker_cmd))
start_worker_cmd = 'celery -A screendoor_app worker -l info'
subprocess.call(shlex.split(start_worker_cmd))
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Starting celery worker with autoreload...')
autoreload.run_with_reloader(restart_celery, args=None, kwargs=None)
|
import justpy as jp
from typing import Any, Awaitable, Callable, Dict, List, Optional, Union
from .value_element import ValueElement
class ChoiceElement(ValueElement):
def __init__(self,
view: jp.HTMLBaseComponent,
options: Union[List, Dict],
*,
value: Any,
on_change: Optional[Union[Callable, Awaitable]] = None,
):
if isinstance(options, List):
view.options = [{'label': option, 'value': option} for option in options]
else:
view.options = [{'label': value, 'value': key} for key, value in options.items()]
super().__init__(view, value=value, on_change=on_change)
|
#
# PySNMP MIB module CYAN-TENGPORT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CYAN-TENGPORT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:34:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion")
cyanEntityModules, = mibBuilder.importSymbols("CYAN-MIB", "cyanEntityModules")
CyanOffOnTc, CyanOpStateTc, CyanTPConnectionStateTc, CyanSecServiceStateTc, CyanAdminStateTc, CyanXGOSignalTypeTc, CyanOpStateQualTc, CyanEnDisabledTc, CyanTxControlTc, CyanLoopbackControlTc = mibBuilder.importSymbols("CYAN-TC-MIB", "CyanOffOnTc", "CyanOpStateTc", "CyanTPConnectionStateTc", "CyanSecServiceStateTc", "CyanAdminStateTc", "CyanXGOSignalTypeTc", "CyanOpStateQualTc", "CyanEnDisabledTc", "CyanTxControlTc", "CyanLoopbackControlTc")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibIdentifier, Bits, iso, ObjectIdentity, NotificationType, Counter64, TimeTicks, Counter32, ModuleIdentity, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, IpAddress, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "iso", "ObjectIdentity", "NotificationType", "Counter64", "TimeTicks", "Counter32", "ModuleIdentity", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "IpAddress", "Unsigned32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
cyanTENGPortModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150))
cyanTENGPortModule.setRevisions(('2014-12-07 05:45',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: cyanTENGPortModule.setRevisionsDescriptions(('Release 6.0 build 1416362081',))
if mibBuilder.loadTexts: cyanTENGPortModule.setLastUpdated('201412070545Z')
if mibBuilder.loadTexts: cyanTENGPortModule.setOrganization('Cyan, Inc.')
if mibBuilder.loadTexts: cyanTENGPortModule.setContactInfo(' E-mail: support@cyaninc.com Postal: Cyan, Inc. 1390 N. McDowell Blvd., # G-327 Petaluma, CA 94954 USA Tel: +1-707-735-2300')
if mibBuilder.loadTexts: cyanTENGPortModule.setDescription('MIB module for Ten Gig Port')
cyanTENGPortMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1))
cyanTENGPortTable = MibTable((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1), )
if mibBuilder.loadTexts: cyanTENGPortTable.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortTable.setDescription('A list of TENGPort entries.')
cyanTENGPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1), ).setIndexNames((0, "CYAN-TENGPORT-MIB", "cyanTENGPortShelfId"), (0, "CYAN-TENGPORT-MIB", "cyanTENGPortModuleId"), (0, "CYAN-TENGPORT-MIB", "cyanTENGPortXcvrId"), (0, "CYAN-TENGPORT-MIB", "cyanTENGPortPortId"))
if mibBuilder.loadTexts: cyanTENGPortEntry.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortEntry.setDescription('An entry of TENGPort.')
cyanTENGPortShelfId = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)))
if mibBuilder.loadTexts: cyanTENGPortShelfId.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortShelfId.setDescription('Shelf Id')
cyanTENGPortModuleId = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 2), Unsigned32())
if mibBuilder.loadTexts: cyanTENGPortModuleId.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortModuleId.setDescription('Module Id')
cyanTENGPortXcvrId = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 3), Unsigned32())
if mibBuilder.loadTexts: cyanTENGPortXcvrId.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortXcvrId.setDescription('Transceiver Id')
cyanTENGPortPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 4), Unsigned32())
if mibBuilder.loadTexts: cyanTENGPortPortId.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortPortId.setDescription('Port Id')
cyanTENGPortAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 5), CyanAdminStateTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortAdminState.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortAdminState.setDescription('Administrative state')
cyanTENGPortAutoinserviceSoakTimeSec = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortAutoinserviceSoakTimeSec.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortAutoinserviceSoakTimeSec.setDescription('Auto-In-Service soak time')
cyanTENGPortConnectionState = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 7), CyanTPConnectionStateTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortConnectionState.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortConnectionState.setDescription('Termination point connection state')
cyanTENGPortDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortDescription.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortDescription.setDescription('Description')
cyanTENGPortExternalFiberMultishelfLink = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 9), CyanEnDisabledTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortExternalFiberMultishelfLink.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortExternalFiberMultishelfLink.setDescription('Assign a port to an inter-node link')
cyanTENGPortExternalFiberRemotePort = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 45))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortExternalFiberRemotePort.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortExternalFiberRemotePort.setDescription('Remote connection point of the inter-node link')
cyanTENGPortLoopbackControl = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 11), CyanLoopbackControlTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortLoopbackControl.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortLoopbackControl.setDescription('Port loopback control')
cyanTENGPortOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 12), CyanOpStateTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortOperState.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortOperState.setDescription('Primary Operation State')
cyanTENGPortOperStateQual = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 13), CyanOpStateQualTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortOperStateQual.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortOperStateQual.setDescription('Operation state qualifier')
cyanTENGPortRxPwr = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortRxPwr.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortRxPwr.setDescription('RX Power')
cyanTENGPortSecServState = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 15), CyanSecServiceStateTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortSecServState.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortSecServState.setDescription('Secondary service state')
cyanTENGPortSignalType = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 16), CyanXGOSignalTypeTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortSignalType.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortSignalType.setDescription('Client signal type or port mode')
cyanTENGPortTransmitControl = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 17), CyanTxControlTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortTransmitControl.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortTransmitControl.setDescription('Transmitter control')
cyanTENGPortTxPwr = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortTxPwr.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortTxPwr.setDescription('TX Power')
cyanTENGPortTxStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 1, 1, 1, 19), CyanOffOnTc()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cyanTENGPortTxStatus.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortTxStatus.setDescription('Transmitter status')
cyanTENGPortObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 20)).setObjects(("CYAN-TENGPORT-MIB", "cyanTENGPortAdminState"), ("CYAN-TENGPORT-MIB", "cyanTENGPortAutoinserviceSoakTimeSec"), ("CYAN-TENGPORT-MIB", "cyanTENGPortConnectionState"), ("CYAN-TENGPORT-MIB", "cyanTENGPortDescription"), ("CYAN-TENGPORT-MIB", "cyanTENGPortExternalFiberMultishelfLink"), ("CYAN-TENGPORT-MIB", "cyanTENGPortExternalFiberRemotePort"), ("CYAN-TENGPORT-MIB", "cyanTENGPortLoopbackControl"), ("CYAN-TENGPORT-MIB", "cyanTENGPortOperState"), ("CYAN-TENGPORT-MIB", "cyanTENGPortOperStateQual"), ("CYAN-TENGPORT-MIB", "cyanTENGPortRxPwr"), ("CYAN-TENGPORT-MIB", "cyanTENGPortSecServState"), ("CYAN-TENGPORT-MIB", "cyanTENGPortSignalType"), ("CYAN-TENGPORT-MIB", "cyanTENGPortTransmitControl"), ("CYAN-TENGPORT-MIB", "cyanTENGPortTxPwr"), ("CYAN-TENGPORT-MIB", "cyanTENGPortTxStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cyanTENGPortObjectGroup = cyanTENGPortObjectGroup.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortObjectGroup.setDescription('Group of objects that comes with TENGPort module')
cyanTENGPortCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 28533, 5, 30, 150, 30)).setObjects(("CYAN-TENGPORT-MIB", "cyanTENGPortObjectGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cyanTENGPortCompliance = cyanTENGPortCompliance.setStatus('current')
if mibBuilder.loadTexts: cyanTENGPortCompliance.setDescription('The basic info needed to be a cyan TENGPort')
mibBuilder.exportSymbols("CYAN-TENGPORT-MIB", PYSNMP_MODULE_ID=cyanTENGPortModule, cyanTENGPortCompliance=cyanTENGPortCompliance, cyanTENGPortAutoinserviceSoakTimeSec=cyanTENGPortAutoinserviceSoakTimeSec, cyanTENGPortExternalFiberMultishelfLink=cyanTENGPortExternalFiberMultishelfLink, cyanTENGPortLoopbackControl=cyanTENGPortLoopbackControl, cyanTENGPortOperStateQual=cyanTENGPortOperStateQual, cyanTENGPortTransmitControl=cyanTENGPortTransmitControl, cyanTENGPortOperState=cyanTENGPortOperState, cyanTENGPortAdminState=cyanTENGPortAdminState, cyanTENGPortTxStatus=cyanTENGPortTxStatus, cyanTENGPortModuleId=cyanTENGPortModuleId, cyanTENGPortConnectionState=cyanTENGPortConnectionState, cyanTENGPortShelfId=cyanTENGPortShelfId, cyanTENGPortExternalFiberRemotePort=cyanTENGPortExternalFiberRemotePort, cyanTENGPortMibObjects=cyanTENGPortMibObjects, cyanTENGPortEntry=cyanTENGPortEntry, cyanTENGPortXcvrId=cyanTENGPortXcvrId, cyanTENGPortPortId=cyanTENGPortPortId, cyanTENGPortSecServState=cyanTENGPortSecServState, cyanTENGPortTable=cyanTENGPortTable, cyanTENGPortSignalType=cyanTENGPortSignalType, cyanTENGPortDescription=cyanTENGPortDescription, cyanTENGPortTxPwr=cyanTENGPortTxPwr, cyanTENGPortObjectGroup=cyanTENGPortObjectGroup, cyanTENGPortModule=cyanTENGPortModule, cyanTENGPortRxPwr=cyanTENGPortRxPwr)
|
from models.produto import Produto
ps4 = Produto('Playstation 4', 1789.44)
xbox = Produto('Xbox 360', 1699.99)
print(ps4,'\n')
print(xbox)from models.produto import Produto
ps4 = Produto('Playstation 4', 1789.44)
xbox = Produto('Xbox 360', 1699.99)
print(ps4,'\n')
print(xbox)from models.produto import Produto
ps4 = Produto('Playstation 4', 1789.44)
xbox = Produto('Xbox 360', 1699.99)
print(ps4,'\n')
print(xbox) |
import json
from collections import defaultdict
from pkg_resources import resource_filename
import discord
from discord.ext import commands, tasks
class Voting(commands.Cog):
def __init__(self, bot, mafia, players, voting_options, message, losing_team, winning_team,
villagers):
self.bot = bot
self.mafia = mafia
self.players = players
# Map from emoji to mention
self.voting_options = voting_options
self.message = message
self.losing_team = losing_team
self.winning_team = winning_team
self.villagers = villagers
self.check_votes.start()
@tasks.loop(seconds=1.0)
async def check_votes(self):
message = await self.message.channel.fetch_message(self.message.id)
total_reactions = sum(x.count for x in message.reactions)
if total_reactions >= 2 * len(self.players):
votes = {
self.voting_options[reaction.emoji]: list(filter(lambda x: not x.bot, await reaction.users().flatten()))
for reaction in message.reactions}
points = self.calculate_points(votes)
embed = discord.Embed()
for (user, points) in points.items():
embed.add_field(name="**{}**".format(user.name), value=points)
await message.channel.send("**Points:**", embed=embed)
self.bot.remove_cog('Voting')
self.check_votes.stop()
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if user.bot:
return
if reaction.message.id != self.message.id:
return
votes = len([reaction for reaction in reaction.message.reactions if user in await reaction.users().flatten()])
if votes > 1:
await reaction.remove(user)
return
if reaction.emoji not in self.voting_options:
await reaction.remove(user)
return
def calculate_points(self, votes):
points = defaultdict(int)
with open(resource_filename(__name__, "config/points.json")) as f:
point_values = json.load(f)
villager_point_values = point_values['villager']
mafia_point_values = point_values['mafia']
for m in self.mafia:
# Guessed mafia
for player in votes[m]:
if player not in self.mafia:
points[player] += villager_point_values['guessedMafia']
# Mafia not chosen in majority
if len(votes[m]) * 2 < len(self.players):
points[m] += mafia_point_values['notKilled']
# Killed mafia
else:
for villager in self.villagers:
points[villager] += villager_point_values['killedMafia']
# No votes against mafia
if len(votes[m]) == 0:
points[m] += mafia_point_values['noVotesAgainst']
# Mafia losing game
for player in self.losing_team:
if player in self.mafia:
points[player] += mafia_point_values['teamLost']
# Winning game
for player in self.winning_team:
if player not in self.mafia:
points[player] += villager_point_values['teamWon']
if player in self.mafia:
points[player] += mafia_point_values['teamWon']
points[player] *= mafia_point_values['teamWonMultiplier']
return points
|
from .wrappers import _linear
from .wrappers import _trees
all = ('_linear',
'_trees')
|
# Can be used for BFS
from collections import deque
def possible_bipartition(dislikes):
""" Will return True or False if the given graph
can be bipartitioned without neighboring nodes put
into the same partition.
Time Complexity: O(n2)
Space Complexity: O(n)
"""
if not dislikes:
return True
queue = deque()
queue.append(0)
check = len(dislikes) * [False]
whites = set()
blacks = set()
while queue:
x = queue.popleft()
for i in dislikes[x]:
if not check[i]:
queue.append(i)
if x not in whites:
if i in blacks:
return False
whites.add(i)
else:
if i in whites:
return False
blacks.add(i)
check[x] = True
if not dislikes[x]:
queue.append(x + 1)
return True
|
charset = range(10)
for c1 in charset:
for c2 in charset:
for c3 in charset:
print(c1, c2, c3)
|
import argparse
import json
import random
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Harvests Watchtower SDK call execution time information from GAE backups.')
parser.add_argument('--file', '-f', dest='file', default=None)
parser.add_argument('--mock_total', '-m', dest='mock_total', action='store_true', default=False)
args = parser.parse_args()
if not args.file:
print 'File argument is required'
sys.exit(1)
fp = open(args.file, 'r')
records = json.load(fp)
fp.close()
sample_key = random.choice(records.keys())
calls = sorted(records[sample_key].keys())
for call in calls:
print call,
print 'Total'
sorted_keys = sorted(records.keys())
for key in sorted_keys:
data = records[key]
for call in calls:
print data[call],
if args.mock_total:
print sum(data.values()) + random.randint(0, 20)
else:
print sum(data.values())
|
import math
import torch
from torch import nn
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, in_channels=3):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
print(x.size())
x = self.layer1(x)
print(x.size())
x = self.layer2(x)
print(x.size())
x = self.layer3(x)
print(x.size())
x = self.layer4(x)
print(x.size())
return x
# ----
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
encoder_params = {
'resnet34':
{'filters': [64, 64, 128, 256, 512],
'init_op': resnet34,
'url': model_urls['resnet34']},
'resnet50':
{'filters': [64, 256, 512, 1024, 2048],
'init_op': resnet50,
'url': model_urls['resnet50']},
'resnet101':
{'filters': [64, 256, 512, 1024, 2048],
'init_op': resnet101,
'url': model_urls['resnet101']},
}
class ConvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, dec, enc):
x = torch.cat([dec, enc], dim=1)
return self.seq(x)
class PlusBottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
def forward(self, dec, enc):
return enc + dec
class UnetDecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.layer = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.layer(x)
class AbstractModel(nn.Module):
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
# Kaiming He normal initialization
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def initialize_encoder(self, model, model_url):
pretrained_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model.load_state_dict(pretrained_dict)
def _get_layers_params(layers):
return sum((list(l.parameters()) for l in layers), [])
class EncoderDecoder(AbstractModel):
def __init__(self, num_classes, num_channels=3, encoder_name='resnet34'):
super().__init__()
self.filters = encoder_params[encoder_name]['filters']
self.num_channels = num_channels
if not hasattr(self, 'bottleneck_type'):
self.bottleneck_type = ConvBottleneck
self.bottlenecks = nn.ModuleList([self.bottleneck_type(f * 2, f) for f in reversed(self.filters[:-1])])
self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(1, len(self.filters))])
self.last_upsample = UnetDecoderBlock(self.filters[0], self.filters[0] // 2)
self.final = self.make_final_classifier(self.filters[0] // 2, num_classes)
self._initialize_weights()
encoder = encoder_params[encoder_name]['init_op']()
self.encoder_stages = nn.ModuleList([self.get_encoder(encoder, idx) for idx in range(len(self.filters))])
if num_channels == 3 and encoder_params[encoder_name]['url'] is not None:
self.initialize_encoder(encoder, encoder_params[encoder_name]['url'])
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
enc_results = []
for idx, stage in enumerate(self.encoder_stages):
x = stage(x)
if idx < len(self.encoder_stages) - 1:
enc_results.append(x.clone())
for idx, bottleneck in enumerate(self.bottlenecks):
rev_idx = - (idx + 1)
x = self.decoder_stages[rev_idx](x)
x = bottleneck(x, enc_results[rev_idx])
x = self.last_upsample(x)
f = self.final(x)
return f
def get_decoder(self, layer):
return UnetDecoderBlock(self.filters[layer], self.filters[max(layer - 1, 0)])
def make_final_classifier(self, in_filters, num_classes):
return nn.Sequential(
nn.Conv2d(in_filters, num_classes, 3, padding=1)
)
def get_encoder(self, encoder, layer):
raise NotImplementedError
@property
def first_layer_params_names(self):
raise NotImplementedError
class Resnet(EncoderDecoder):
def __init__(self, num_classes, num_channels, encoder_name):
super().__init__(num_classes, num_channels, encoder_name)
def get_encoder(self, encoder, layer):
if layer == 0:
return nn.Sequential(
encoder.conv1,
encoder.bn1,
encoder.relu)
elif layer == 1:
return nn.Sequential(
encoder.maxpool,
encoder.layer1)
elif layer == 2:
return encoder.layer2
elif layer == 3:
return encoder.layer3
elif layer == 4:
return encoder.layer4
class Resnet34_upsample(Resnet):
def __init__(self, num_classes, num_channels=3):
super().__init__(num_classes, num_channels, encoder_name='resnet34')
class Resnet50_upsample(Resnet):
def __init__(self, num_classes, num_channels=3):
super().__init__(num_classes, num_channels, encoder_name='resnet50')
class Resnet101_upsample(Resnet):
def __init__(self, num_classes, num_channels=3):
super().__init__(num_classes, num_channels, encoder_name='resnet101')
|
# Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import numpy as np
import tensorflow as tf
from tensorflow.python.feature_column import feature_column_v2 as fc_lib
from elasticdl.python.common.log_utils import default_logger as logger
from elasticdl.python.common.save_utils import CheckpointSaver
from elasticdl.python.elasticdl.feature_column.feature_column import (
EmbeddingColumn,
embedding_column,
)
from elasticdl.python.elasticdl.layers.embedding import Embedding
from elasticdl.python.ps.embedding_table import EmbeddingTable
from elasticdl_client.common.constants import DistributionStrategy
from elasticdl_preprocessing.layers import SparseEmbedding
def _get_trained_params_from_checkpoint(checkpoint_dir):
"""Get parameters from a checkpoint directory saved by ElasticDL"""
parameters = CheckpointSaver.restore_params_from_checkpoint(
checkpoint_dir, 0, 1
)
trained_params = parameters.non_embedding_params
for name, table in parameters.embedding_params.items():
trained_params[name] = table
return trained_params
def _convert_embedding_table_to_numpy_array(embedding_table, embedding_shape):
"""Convert an embedding table to a np.ndarray which can be assigned
to trainable weights in keras embedding layers.
Args:
embedding_table: A `EmbeddingTable` instance.
embedding_shape: a tuple with two elements
Returns:
A np.ndarray
"""
embedding_ids = list(embedding_table.embedding_vectors.keys())
embedding_values = list(embedding_table.embedding_vectors.values())
embedding_weights = np.zeros(embedding_shape)
embedding_weights[embedding_ids] = embedding_values
return embedding_weights
def _get_embedding_column_input_dim(embedding_column):
if type(embedding_column) != fc_lib.EmbeddingColumn:
raise Exception("The input should be EmbeddingColumn type.")
default_num_buckets = (
embedding_column.categorical_column.num_buckets
if embedding_column._is_v2_column
else embedding_column.categorical_column._num_buckets
) # pylint: disable=protected-access
num_buckets = getattr(
embedding_column.categorical_column, "num_buckets", default_num_buckets
)
return num_buckets
def _need_partition_embedding(embedding_object):
"""The embedding layer will be partitioned on multiple
PS instances if the memory of the layer.train_weights is
bigger than 2MB.
"""
if isinstance(embedding_object, tf.keras.layers.Layer):
return _need_partition_embedding_from_shape_info(
embedding_object.input_dim, embedding_object.output_dim
)
elif isinstance(embedding_object, fc_lib.EmbeddingColumn):
return _need_partition_embedding_from_shape_info(
_get_embedding_column_input_dim(embedding_object),
embedding_object.dimension,
)
else:
raise Exception(
"Unsupported type {} for embedding".format(type(embedding_object))
)
def _need_partition_embedding_from_shape_info(input_dim, output_dim):
EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION = 2 * 1024 * 1024 # 2MB
FLOAT32_BYTES = 4
weights_memory = input_dim * output_dim * FLOAT32_BYTES
return weights_memory > EMBEDDING_SIZE_THRESHOLD_FOR_PARTITION
def _replace_tf_embedding_column_with_edl(dense_features_layer):
new_feature_columns = []
for column in dense_features_layer._feature_columns:
if isinstance(
column, fc_lib.EmbeddingColumn
) and _need_partition_embedding(column):
logger.info(
"Replace embedding_column {} from TensorFlow "
"version to ElasticDL version".format(column.name)
)
new_column = embedding_column(
column.categorical_column, dimension=column.dimension
)
new_column.set_dense_features_layer_name(dense_features_layer.name)
new_feature_columns.append(new_column)
else:
new_feature_columns.append(column)
return tf.keras.layers.DenseFeatures(
feature_columns=new_feature_columns, name=dense_features_layer.name
)
def _replace_edl_embedding_column_with_tf(dense_features_layer):
new_feature_columns = []
for column in dense_features_layer._feature_columns:
if isinstance(column, EmbeddingColumn):
logger.info(
"Replace embedding_column {} from ElasticDL "
"version to TF version".format(column.name)
)
new_column = fc_lib.embedding_column(
column.categorical_column, dimension=column.dimension
)
new_feature_columns.append(new_column)
else:
new_feature_columns.append(column)
return tf.keras.layers.DenseFeatures(
feature_columns=new_feature_columns, name=dense_features_layer.name
)
class ModelHandler(metaclass=abc.ABCMeta):
"""Generate the model to train in ElasticDL for different distributed
strategies and export trained model in ElasticDL to SavedModel.
"""
@abc.abstractmethod
def get_model_to_train(self, model):
"""Generate a model to train in ElasticDL.
Args:
model: A native keras model instance.
Returns:
A keras model instance for ElasticDL training.
"""
@abc.abstractmethod
def get_model_to_export(self, model, dataset):
"""Get the model which can be exported a SavedModel
by tf.saved_model.save.
Args:
model: A keras model instance trained by ElasticDL and
it may contains `elasticdl.layers.Embedding` layers.
dataset: A `tf.data.Dataset` instance which has the same outputs as
the training dataset.
Returns:
A keras model instance trained by ElasticDL.
"""
@classmethod
def get_model_handler(
cls, distribution_strategy=None, checkpoint_dir=None
):
"""Create a model handler to process the model for the
distributed strategy.
Args:
distribution_strategy (string): distribution strategy name
checkpoint_dir: Checkpoint directory to save model parametes
during training.
Return:
ModelHandler subclass instance.
"""
if distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ParameterServerModelHandler(checkpoint_dir=checkpoint_dir)
return DefaultModelHandler()
class DefaultModelHandler(ModelHandler):
"""Return the origin model to train and export."""
def get_model_to_train(self, model):
return model
def get_model_to_export(self, model, dataset):
"""
Get model with inputs and trained parameters to export.
"""
if not model.inputs:
model._build_model_with_inputs(inputs=dataset, targets=None)
return model
class ParameterServerModelHandler(ModelHandler):
"""Model handler for parameter server strategy.
For training, The handler will replace `tf.keras.layers.Embedding`
layers with`elasticdl.layers.Embedding` for training.
For saving model, the handler will restore Keras model definition and
pull trained parameters from parameter server(s) for the model.
"""
def __init__(self, checkpoint_dir=None):
"""
Arguments:
checkpoint_dir: A checkpoint directory to save all model
parameters during training.
"""
self._checkpoint_dir = checkpoint_dir
def get_model_to_train(self, model):
"""Replace the tf.keras.layers.Embedding layer in the model with
an elasticdl.layers.Embedding layer in ParameterServerStrategy.
"""
# clear keras model session to avoid clutter from old models/layers.
tf.keras.backend.clear_session()
if type(model) == tf.keras.Sequential or model._is_graph_network:
model = self._clone_model_with_edl_embedding(model)
else:
model = self._replace_attr_with_edl_embedding(model)
return model
def get_model_to_export(self, model, dataset):
"""Get the model which can be exported to a SavedModel by
`tf.saved_model.save`.
"""
model = self._restore_keras_model_def(model)
if not model.inputs:
# build model to add inputs and outputs that
# can be consumed by tf-serving
model._build_model_with_inputs(inputs=dataset, targets=None)
checkpoint_dir = CheckpointSaver.get_valid_lastest_version_dir(
self._checkpoint_dir
)
if checkpoint_dir is None:
logger.warning("No available checkpoint to export model")
return model
trained_params = _get_trained_params_from_checkpoint(checkpoint_dir)
for var in model.trainable_variables:
if isinstance(trained_params[var.name], EmbeddingTable):
embedding_params = _convert_embedding_table_to_numpy_array(
trained_params[var.name], var.shape
)
var.assign(embedding_params)
else:
var.assign(trained_params[var.name].numpy())
return model
def _restore_keras_model_def(self, model):
"""Restore Keras model definition by replacing
`elasticdl.layers.Embedding` layers with
`tf.keras.layers.Embedding` layers.
"""
# clear keras model session to avoid clutter from old models/layers.
tf.keras.backend.clear_session()
if (
isinstance(model, tf.keras.models.Model)
and not model._is_graph_network
):
model = self._replace_attr_with_keras_embedding(model)
else:
model = self._clone_model_with_keras_embedding(model)
return model
@staticmethod
def _clone_model_with_edl_embedding(model):
"""Clone a new model and replace keras embedding layers including
`tf.keras.layers.Embedding` and `SparseEmbedding` with
`elasticdl.layers.Embedding`
"""
def _clone_function(layer):
if type(layer) in [
tf.keras.layers.Embedding,
SparseEmbedding,
] and _need_partition_embedding(layer):
logger.debug(
"Replace {} with {}".format(layer.name, Embedding)
)
# ElasticDL embedding only accept a string type initializer
init = tf.keras.initializers.serialize(
layer.embeddings_initializer
)["class_name"]
if type(layer) == tf.keras.layers.Embedding:
embedding_layer = Embedding(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=init,
mask_zero=layer.mask_zero,
input_length=layer.input_length,
name=layer.name,
)
else:
embedding_layer = Embedding(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=init,
name=layer.name,
combiner=layer.combiner,
)
embedding_layer.set_embedding_weight_name(
layer.trainable_weights[0].name
)
return embedding_layer
elif type(layer) == tf.keras.layers.DenseFeatures:
return _replace_tf_embedding_column_with_edl(layer)
return layer
return tf.keras.models.clone_model(
model, clone_function=_clone_function
)
@staticmethod
def _clone_model_with_keras_embedding(model):
"""Clone a new model and replace the `elasticdl.layers.Embedding`
layers with `tf.keras.layers.Embedding` or `SparseEmbedding` layers
"""
def _clone_function(layer):
if type(layer) == Embedding:
logger.info(
"Replace embedding layer with "
"elasticdl.layers.Embedding"
)
# The combiner is not None only for SparseEmbedding,
if layer.combiner is not None:
embedding_layer = SparseEmbedding(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=layer.embeddings_initializer,
name=layer.name,
combiner=layer.combiner,
)
else:
embedding_layer = tf.keras.layers.Embedding(
output_dim=layer.output_dim,
input_dim=layer.input_dim,
embeddings_initializer=layer.embeddings_initializer,
mask_zero=layer.mask_zero,
input_length=layer.input_length,
name=layer.name,
)
return embedding_layer
elif type(layer) == tf.keras.layers.DenseFeatures:
return _replace_edl_embedding_column_with_tf(layer)
return layer
return tf.keras.models.clone_model(
model, clone_function=_clone_function
)
@staticmethod
def _replace_attr_with_edl_embedding(model):
"""Replace the keras embedding attributes in the model with
`elasticdl.layers.Embedding` layers.
"""
for name, value in model.__dict__.items():
if type(
value
) == tf.keras.layers.Embedding and _need_partition_embedding(
value
):
logger.info(
"Replace {} layer with "
"elasticdl.layers.Embedding".format(value)
)
initializer_name = tf.keras.initializers.serialize(
value.embeddings_initializer
)["class_name"]
embedding_layer = Embedding(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=initializer_name,
mask_zero=value.mask_zero,
input_length=value.input_length,
name=value.name,
)
# The weights of subclass model is None, so we need to create
# the weight name which is "{layer_name}/embeddings:0" in
# tf.keras.layers.Embedding.
embedding_layer.set_embedding_weight_name(
value.name + "/embeddings:0"
)
setattr(model, name, embedding_layer)
elif type(value) == SparseEmbedding and _need_partition_embedding(
value
):
logger.info(
"Replace {} layer with "
"elasticdl.layers.Embedding".format(value)
)
embedding_layer = Embedding(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=initializer_name,
combiner=value.combiner,
name=value.name,
)
embedding_layer.set_embedding_weight_name(
value.name + "/embeddings:0"
)
setattr(model, name, embedding_layer)
elif type(value) == tf.keras.layers.DenseFeatures:
feature_layer = _replace_tf_embedding_column_with_edl(value)
setattr(model, name, feature_layer)
return model
@staticmethod
def _replace_attr_with_keras_embedding(model):
"""Replace the elasticdl.layers.Embedding attributes in the model
with `tf.keras.layers.Embedding` or `SparseEmbedding` layers.
"""
for name, value in model.__dict__.items():
if type(value) == Embedding:
# The combiner is not None only for SparseEmbedding,
if value.combiner is not None:
logger.info("Replace elasticdl with SparseEmbedding")
embedding_layer = SparseEmbedding(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=value.embeddings_initializer,
combiner=value.combiner,
)
else:
logger.info(
"Replace elasticdl with tf.kerasl.layers.Embedding"
)
embedding_layer = tf.keras.layers.Embedding(
output_dim=value.output_dim,
input_dim=value.input_dim,
embeddings_initializer=value.embeddings_initializer,
mask_zero=value.mask_zero,
input_length=value.input_length,
)
setattr(model, name, embedding_layer)
elif type(value) == tf.keras.layers.DenseFeatures:
feature_layer = _replace_edl_embedding_column_with_tf(value)
setattr(model, name, feature_layer)
return model
|
__author__ = 'eddie'
|
#! /usr/bin/env python3
import argparse
from argparse import RawTextHelpFormatter
from collections import Counter
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import pysam
def calculate_variant_position(read, flank_length):
start = read.pos
perf_counter = 0 # This counter will count the number of matches + mismatches around the variant position
varpos = start # Store the alignment start position
cig_list = [] # This list will contain the CIGAR values: 10M will become [0,0,0,0,0,0,0,0,0,0]
begin = 0
local_region_size = 5
# Create the list:
for (cig_type, cig_length) in read.cigartuples:
for i in range(begin, begin + cig_length):
cig_list.append(cig_type)
begin += cig_length
# Deletions need to be counted in the new variant position:
read_var_pos = start + flank_length + 1 + cig_list[0:flank_length + 1].count(pysam.CDEL)
for operator in range(0, flank_length + 2 + local_region_size + cig_list[0:flank_length+1].count(pysam.CDEL)):
# Stop incrementing varpos once we've reached read_var_pos:
if start + operator < read_var_pos:
# Mismatch/matches and deletions increase the position counter:
if cig_list[operator] == pysam.CMATCH or cig_list[operator] == pysam.CDEL:
varpos += 1
# If we are in the local region around the variant, but not at the position of the variant:
if ((start + operator >= (read_var_pos - local_region_size))
and (start + operator <= (read_var_pos + local_region_size))
and (start + operator != read_var_pos)):
if cig_list[operator] == 0: # Match or mismatch
perf_counter += 1 # Increase the counter for perfect local region
return varpos, perf_counter, local_region_size
def is_read_valid(read, counter, flank_length, score_cutoff, diff_cutoff):
if read.is_secondary: # We only want to deal with primary reads
return False
counter['total'] += 1
if read.is_unmapped: # Can be decoded with bitwise flag with & 4 (4 means unmapped)
counter['unmapped'] += 1
return False
# Getting AS and XS:
AS = read.get_tag('AS')
try:
XS = read.get_tag('XS') # Some variants don't have secondary alignments, which throws an error
except KeyError:
XS = -flank_length # Set an arbitrary low value for the 'artificial' secondary alignment
if AS <= int(score_cutoff):
counter['primary_poor'] += 1
return False
if AS - XS < diff_cutoff:
counter['gap_small'] += 1
return False
varpos, perf_counter, local_region_size = calculate_variant_position(read, flank_length)
if perf_counter != 2 * local_region_size:
counter['context_bad'] += 1
return False
# Filter out AS's that are too low and XS's that are too close to AS, and those with a non-perfect local alignment:
counter['remapped'] += 1
return True
def process_bam_file(bam_file_path, output_file, old_ref_allele_output_file, flank_length, score_perc, diff_AS_XS_perc):
bamfile = pysam.AlignmentFile(bam_file_path, 'rb')
# Calculate the score cutoff based on flanking seq length
score_cutoff = -(flank_length * score_perc)
diff_cutoff = flank_length * diff_AS_XS_perc
counter = Counter()
# Reverses the allele for variants that got mapped onto the reverse strand of the new genome, and prints everything
# into correct columns
with open(output_file, 'w') as outfile, open(old_ref_allele_output_file, 'w') as old_ref_alleles:
for read in bamfile:
if is_read_valid(read, counter, flank_length, score_cutoff, diff_cutoff):
name = read.query_name
info = name.split('|')
nucl = info[3]
# Mapped onto reverse strand:
if read.is_reverse: # Can be decoded with bitwise flag with & 16
nucl = Seq(nucl, generic_dna).complement()
# Write it all to the file:
varpos, perf_counter, local_region_size = calculate_variant_position(read, flank_length)
outfile.write(
'%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (read.reference_name, varpos, info[4], nucl, info[5], info[6], info[7]))
# Store old reference allele:
old_ref_alleles.write('%s\n' % info[2])
print(counter['total'])
print(counter['unmapped'])
print(counter['primary_poor'])
print(counter['gap_small'])
print(counter['context_bad'])
print(counter['remapped'])
print('% of variants rejected for:')
print('Unmapped: {:.2%}'.format(counter['unmapped'] / counter['total']))
print('Primary alignment too poor: {:.2%}'.format(counter['primary_poor'] / counter['total']))
print('Primary and secondary alignments too close: {:.2%}'.format(counter['gap_small'] / counter['total']))
print('Local region around variant too poorly aligned: {:.2%}'.format(counter['context_bad'] / counter['total']))
def main():
description = (
'Reverses the allele for variants that got mapped onto the reverse strand of the new genome, and prints'
'everything to a new file\n')
parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
parser.add_argument('-i', '--bam', help='bam file containing remapped variants ')
parser.add_argument('-o', '--outfile', help='name of new file')
parser.add_argument('-p', '--old_ref_alleles', help='name of output old ref alleles')
parser.add_argument('-f', '--flankingseqlength', type=int, help='length of each of the flanking sequences')
parser.add_argument('-s', '--scoreperc', type=float,
help='the alignment score cut off percentage of flanking seq' 'length (keeps values strictly above)')
parser.add_argument('-d', '--difference_AS_XS', type=float, help='difference threshold % between AS and XS')
args = parser.parse_args()
process_bam_file(
bam_file_path=args.bam,
output_file=args.outfile,
old_ref_allele_output_file=args.old_ref_alleles,
flank_length=args.flankingseqlength,
score_perc=args.scoreperc,
diff_AS_XS_perc=args.difference_AS_XS
)
if __name__ == '__main__':
main()
|
import logging
import torch.nn as nn
from typing import List, Optional, Union
from c_unet.architectures.decoder import DecoderBlock
from c_unet.architectures.encoder import EncoderBlock
class Unet(nn.Module):
""" U-net architecture, that can be used either with normal convolutions, or with group convolutions.
The available groups are defined in equiHippo/groups
Args:
- group (str): Shorthand name representing the group to use
- group_dim (int): Group dimension
- in_channels (int): Number of input channels
- out_channels (int): Number of output channels
- divider (int): Divides the base for the number
of channels in the model. Must be a power of two between 1 and 16. Defulats to 1.
- pool_size (int): Size of the pooling kernel. Defaults to 2.
- pool_stride (Union[int, List[int]]): Stride of the pooling. Defaults to 2.
- pool_padding (Union[str, int]): Zero-padding added to all three sides of the input at pooling. Defaults to 0.
- tconv_kernel_size (int): Size of the kernel. Defaults to 4.
- tconv_stride (Union[int, List[int]]): Stride of the upsampling. Defaults to 2.
- tconv_padding (Union[str, int]): Zero-padding added to all three sides of the input at upsampling. Defaults to 1.
- output_padding (Union[str, int]): Additional size added to one side of each dimension in the output shape. Defaults to 0.
- dropout (float, optional) : Value of dropout to use. Defaults to 0.1
- stride (Union[int, List[int]]): Stride of the convolution. Defaults to 1.
- padding (Union[str, int]): Zero-padding added to all three sides of the input. Defaults to 1.
- kernel_size (int): Size of the kernel. Defaults to 3.
- bias (bool): If True, adds a learnable bias to the output. Defaults to True.
- dilation (int): Spacing between kernel elements. Defaults to 1.
- nonlinearity (Optional[str], optional): Non-linear function to apply. Defaults to "relu".
- normalization (Optional[str], optional): Normalization to apply. Defaults to "bn".
- model_depth (int): Depth of the encoding path. Defaults to 4.
- final_activation (str): Name of the final activation to use. Defaults to sigmoid.
Raises:
ValueError: Invalid normalization value
ValueError: Invalid nonlinearity value
"""
def __init__(
self,
# Group arguments
group: Union[str, None],
group_dim: int,
# Channels arguments
in_channels: int,
out_channels: int,
divider: int = 1,
# Pooling
pool_size: int = 2,
pool_stride: Union[int, List[int]] = 2,
pool_padding: Union[str, int] = 0,
pool_reduction: Optional[str] = "mean",
pool_factor: Optional[int] = 2,
# Transpose convolutions arguments
tconv_kernel_size: int = 4,
tconv_stride: Union[int, List[int]] = 2,
tconv_padding: Union[str, int] = 1,
output_padding: Union[str, int] = 0,
# Convolutional arguments
dropout: Optional[bool] = 0.1,
stride: Union[int, List[int]] = 1,
padding: Union[str, int] = "same",
kernel_size: int = 3,
bias: bool = True,
dilation: int = 1,
# Additional layers
nonlinearity: Optional[str] = "relu",
normalization: Optional[str] = "bn",
# Architecture arguments
model_depth=4,
final_activation: str = "sigmoid"):
super(Unet, self).__init__()
self.logger = logging.getLogger(__name__)
self.group = group
# Model constants
self.root_feat_maps = 32 // divider
self.num_feat_maps = 16 // divider
self.encoder = EncoderBlock(in_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
pool_size=pool_size,
pool_stride=pool_stride,
pool_padding=pool_padding,
pool_reduction=pool_reduction,
pool_factor=pool_factor,
dropout=dropout,
bias=bias,
dilation=dilation,
nonlinearity=nonlinearity,
normalization=normalization,
model_depth=model_depth,
root_feat_maps=self.root_feat_maps,
group=group,
group_dim=group_dim)
self.decoder = DecoderBlock(out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
tconv_kernel_size=tconv_kernel_size,
tconv_stride=tconv_stride,
tconv_padding=tconv_padding,
output_padding=output_padding,
dropout=dropout,
bias=bias,
dilation=dilation,
nonlinearity=nonlinearity,
normalization=normalization,
model_depth=model_depth,
num_feat_maps=self.num_feat_maps,
final_activation=final_activation,
group=group,
group_dim=group_dim)
def forward(self, x):
"""
Args:
- x: input feature map
Returns:
- output feature map, the segmentation of the input image
"""
x, downsampling_features = self.encoder(x)
x = self.decoder(x, downsampling_features)
self.logger.debug(f"Final output shape: {x.shape}")
return x
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
'buildbucket',
'cq',
'step',
]
from PB.go.chromium.org.luci.buildbucket.proto.build import Build
def RunSteps(api):
assert not api.cq.do_not_retry_build
api.cq.set_do_not_retry_build()
assert api.cq.do_not_retry_build
api.cq.set_do_not_retry_build() # noop.
def GenTests(api):
yield api.test('example')
|
# -*- coding: utf-8 -*-
"""Generate text corpus from random walks on graph."""
import numpy as np
from joblib import Parallel, delayed
from joblib.pool import has_shareable_memory
from jwalk import walks
__all__ = ['walk_graph', 'build_corpus']
def walk_random(normalized_csr, labels, walk_length):
"""Generate random walks for each node in a normalized sparse csr matrix.
Args:
normalized_csr (scipy.sparse.csr_matrix): normalized adjacency matrix
labels (np.ndarray): array of node labels
walk_length (int): length of walk
Returns:
np.array walks, np.array word frequencies
"""
# need to wrap walks.walk_random otherwise joblib complains in Py2
return walks.walk_random(normalized_csr, labels, walk_length)
def normalize_csr_matrix(csr_matrix):
"""Normalize adjacency matrix weights.
Args:
scipy.sparse.csr_matrix: adjacency matrix
Returns:
scipy.sparse.csr_matrix
"""
row_sums = np.array(csr_matrix.sum(axis=1))[:, 0]
row_indices, col_indices = csr_matrix.nonzero()
normalized = csr_matrix.copy()
normalized.data /= row_sums[row_indices]
return normalized
def walk_graph(csr_matrix, labels, walk_length=40, num_walks=1, n_jobs=1):
"""Perform random walks on adjacency matrix.
Args:
csr_matrix: adjacency matrix.
labels: list of node labels where index align with CSR matrix
walk_length: maximum length of random walk (default=40)
num_walks: number of walks to do for each node
n_jobs: number of cores to use (default=1)
Returns:
np.ndarray: list of random walks
"""
normalized = normalize_csr_matrix(csr_matrix)
results = (Parallel(n_jobs=n_jobs, max_nbytes=None)
(delayed(walk_random, has_shareable_memory)
(normalized, labels, walk_length)
for _ in range(num_walks)))
walks, freqs = zip(*results)
random_walks = np.concatenate(walks)
word_freqs = np.sum(freqs, axis=0)
return random_walks, dict(zip(labels, word_freqs))
def build_corpus(walks, outpath):
"""Build corpus by shuffling and then saving as text file.
Args:
walks: random walks
outpath: file to write to
Returns:
str: file path of corpus
"""
np.random.shuffle(walks)
np.savetxt(outpath, walks, delimiter=' ', fmt='%s')
return outpath
|
from unittest import TestCase
import markdown
from mdx_picture import PictureExtension
class MdxPictureBlockVadiationTest(TestCase):
def testMinimalBlockConfiguration(self):
"""Test minimal block configuration with image"""
no_sources_mdx = """
Hello World my Babald
[picture]
![This picture loads on non-supporting browsers.]\
(image.jpg "The image title")
[/picture]
One more time
"""
no_sources_html = """<p>Hello World my Babald</p>\n\
<picture>\
<img alt="This picture loads on non-supporting browsers." \
src="image.jpg" title="The image title" />\
</picture>\
<p>One more time</p>"""
self.assertEqual(no_sources_html,
markdown.markdown(no_sources_mdx,
extensions=[PictureExtension()]))
def testCompleteBlockConfiguration(self):
"""Test Complete block configuration with image and multimpe sources"""
sources_mdx = """
Hello World my Babald
[picture]
[64em]: high-res.jpg
[37.5em]: med-res.jpg
[0em]: low-res.jpg
![This picture loads on non-supporting browsers.]\
(image.jpg "The image title")
[/picture]
One more time
"""
sources_html = """<p>Hello World my Babald</p>\n\
<picture><source media="(min-width: 64em)" srcset="high-res.jpg" />\
<source media="(min-width: 37.5em)" srcset="med-res.jpg" />\
<source media="(min-width: 0em)" srcset="low-res.jpg" />\
<img alt="This picture loads on non-supporting browsers." src="image.jpg" \
title="The image title" /></picture>\
<p>One more time</p>"""
self.assertEqual(sources_html,
markdown.markdown(sources_mdx,
extensions=[PictureExtension()]))
def testInvalidNoImageBlockConfiguration(self):
"""Test invalid block configuration with source but without image"""
no_img_mdx = """
Hello World my Babald
[picture]
[37.5em]: med-res.jpg
[/pitture]
One more time
"""
no_img_html = """<p>Hello World my Babald</p>\n\
<p>[picture]\n [37.5em]: med-res.jpg\n[/pitture]</p>\n<p>One more time</p>"""
self.assertEqual(no_img_html,
markdown.markdown(no_img_mdx,
extensions=[PictureExtension()]))
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
import numpy
extensions = [
Extension(
"CPC_audio.cpc.eval.ABX.dtw",
["CPC_audio/cpc/eval/ABX/dtw.pyx"],
include_dirs=[numpy.get_include()],
),
]
setup(
name='CPC_audio',
version='1.0',
description='An implementation of the contrast predictive coding (CPC) '
'training method for audio data.',
author='Facebook AI Research',
packages=find_packages(),
classifiers=["License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Programming Language :: Python"],
ext_modules=cythonize(extensions, language_level="3")
)
|
# This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
import logging
from typing import Any, Iterable, List, Tuple, Type, Optional, Callable
from volatility3.framework import interfaces, constants, layers, exceptions
from volatility3.framework.automagic import symbol_cache
from volatility3.framework.configuration import requirements
from volatility3.framework.layers import scanners
vollog = logging.getLogger(__name__)
class SymbolFinder(interfaces.automagic.AutomagicInterface):
"""Symbol loader based on signature strings."""
priority = 40
banner_config_key: str = "banner"
banner_cache: Optional[Type[symbol_cache.SymbolBannerCache]] = None
symbol_class: Optional[str] = None
find_aslr: Optional[Callable] = None
def __init__(self, context: interfaces.context.ContextInterface, config_path: str) -> None:
super().__init__(context, config_path)
self._requirements: List[Tuple[str, interfaces.configuration.RequirementInterface]] = []
self._banners: symbol_cache.BannersType = {}
@property
def banners(self) -> symbol_cache.BannersType:
"""Creates a cached copy of the results, but only it's been
requested."""
if not self._banners:
if not self.banner_cache:
raise RuntimeError(f"Cache has not been properly defined for {self.__class__.__name__}")
self._banners = self.banner_cache.load_banners()
return self._banners
def __call__(self,
context: interfaces.context.ContextInterface,
config_path: str,
requirement: interfaces.configuration.RequirementInterface,
progress_callback: constants.ProgressCallback = None) -> None:
"""Searches for SymbolTableRequirements and attempt to populate
them."""
# Bomb out early if our details haven't been configured
if self.symbol_class is None:
return
self._requirements = self.find_requirements(
context,
config_path,
requirement, (requirements.TranslationLayerRequirement, requirements.SymbolTableRequirement),
shortcut = False)
for (sub_path, requirement) in self._requirements:
parent_path = interfaces.configuration.parent_path(sub_path)
if (isinstance(requirement, requirements.SymbolTableRequirement)
and requirement.unsatisfied(context, parent_path)):
for (tl_sub_path, tl_requirement) in self._requirements:
tl_parent_path = interfaces.configuration.parent_path(tl_sub_path)
# Find the TranslationLayer sibling to the SymbolTableRequirement
if (isinstance(tl_requirement, requirements.TranslationLayerRequirement)
and tl_parent_path == parent_path):
if context.config.get(tl_sub_path, None):
self._banner_scan(context, parent_path, requirement, context.config[tl_sub_path],
progress_callback)
break
def _banner_scan(self,
context: interfaces.context.ContextInterface,
config_path: str,
requirement: interfaces.configuration.ConstructableRequirementInterface,
layer_name: str,
progress_callback: constants.ProgressCallback = None) -> None:
"""Accepts a context, config_path and SymbolTableRequirement, with a
constructed layer_name and scans the layer for banners."""
# Bomb out early if there's no banners
if not self.banners:
return
mss = scanners.MultiStringScanner([x for x in self.banners if x is not None])
layer = context.layers[layer_name]
# Check if the Stacker has already found what we're looking for
if layer.config.get(self.banner_config_key, None):
banner_list = [(0, bytes(layer.config[self.banner_config_key],
'raw_unicode_escape'))] # type: Iterable[Any]
else:
# Swap to the physical layer for scanning
# Only traverse down a layer if it's an intel layer
# TODO: Fix this so it works for layers other than just Intel
if isinstance(layer, layers.intel.Intel):
layer = context.layers[layer.config['memory_layer']]
banner_list = layer.scan(context = context, scanner = mss, progress_callback = progress_callback)
for _, banner in banner_list:
vollog.debug(f"Identified banner: {repr(banner)}")
symbol_files = self.banners.get(banner, None)
if symbol_files:
isf_path = symbol_files[0]
vollog.debug(f"Using symbol library: {symbol_files[0]}")
clazz = self.symbol_class
# Set the discovered options
path_join = interfaces.configuration.path_join
context.config[path_join(config_path, requirement.name, "class")] = clazz
context.config[path_join(config_path, requirement.name, "isf_url")] = isf_path
context.config[path_join(config_path, requirement.name, "symbol_mask")] = layer.address_mask
# Set a default symbol_shift when attempt to determine it,
# so we can create the symbols which are used in finding the aslr_shift anyway
if not context.config.get(path_join(config_path, requirement.name, "symbol_shift"), None):
# Don't overwrite it if it's already been set, it will be manually refound if not present
prefound_kaslr_value = context.layers[layer_name].metadata.get('kaslr_value', 0)
context.config[path_join(config_path, requirement.name, "symbol_shift")] = prefound_kaslr_value
# Construct the appropriate symbol table
requirement.construct(context, config_path)
# Apply the ASLR masking (only if we're not already shifted)
if self.find_aslr and not context.config.get(path_join(config_path, requirement.name, "symbol_shift"),
None):
unmasked_symbol_table_name = context.config.get(path_join(config_path, requirement.name), None)
if not unmasked_symbol_table_name:
raise exceptions.SymbolSpaceError("Symbol table could not be constructed")
if not isinstance(layer, layers.intel.Intel):
raise TypeError("Layer name {} is not an intel space")
aslr_shift = self.find_aslr(context, unmasked_symbol_table_name, layer.config['memory_layer'])
context.config[path_join(config_path, requirement.name, "symbol_shift")] = aslr_shift
context.symbol_space.clear_symbol_cache(unmasked_symbol_table_name)
break
else:
if symbol_files:
vollog.debug(f"Symbol library path not found: {symbol_files[0]}")
# print("Kernel", banner, hex(banner_offset))
else:
vollog.debug("No existing banners found")
# TODO: Fallback to generic regex search?
|
import requests
from time import sleep
from pprint import pprint
from cyberpy import Transaction
from config import ACCOUNT_API, LCD_API, WALLET
def get_account_data(address: str, account_api: str = ACCOUNT_API, print_message: bool = False):
_res = requests.get(f'{account_api}{address}')
try:
_account_number = int(_res.json()['account']['account_number'])
_sequence = int(_res.json()['account']['sequence'])
except KeyError:
_account_number = int(_res.json()['account']['base_vesting_account']['base_account']['account_number'])
_sequence = int(_res.json()['account']['base_vesting_account']['base_account']['sequence'])
if print_message:
print(f'address: {address}\naccount number: {_account_number}\nsequence: {_sequence}')
return _account_number, _sequence
def linking(link_candidates: list, wallet: dict = WALLET, sleep_time: float = 0, print_message: bool = True):
_account_number, _sequence = get_account_data(wallet['address'])
_tx = Transaction(
privkey=wallet['private_key'],
account_num=_account_number,
sequence=_sequence,
fee=0,
gas=100000+100000*len(link_candidates),
memo="",
chain_id="bostrom-testnet-3",
sync_mode="broadcast_tx_sync",
)
for _link_candidate in link_candidates:
if print_message:
print(f'cyberLink from {_link_candidate[0]} to {_link_candidate[1]}')
_tx.add_cyberlink(cid_from=_link_candidate[0], cid_to=_link_candidate[1])
_pushable_tx = _tx.get_pushable()
_res = requests.post(url=LCD_API, data=_pushable_tx)
if print_message:
pprint(_res.json()['result'])
sleep(sleep_time)
|
from django import template
from django.utils.timezone import now
register = template.Library()
MOMENT = 120 # duration in seconds within which the time difference
# will be rendered as 'a moment ago'
@register.filter
def fmt_float(value):
"""
Finds the difference between the datetime value given and now()
and returns appropriate humanize form
"""
if value is None:
return None
if value == 0:
return 0
if value <= 1:
return round(value, 3)
if value <= 10:
return round(value, 2)
if value <= 100:
return round(value,1)
return int(value)
|
from __future__ import absolute_import
from updater.changelog.gerrit import GerritServer, GerritJSONEncoder
from updater.changelog import get_changes, get_timestamp
from updater.database import Rom, ApiKey, Device
from flask import Flask, jsonify, request, abort, render_template
from flask_mongoengine import MongoEngine
from flask_caching import Cache
from functools import wraps
from pydoc import locate
from uuid import uuid4
import click
import datetime
import json
import os
import requests
import sys
import time
os.environ['TZ'] = 'UTC'
app = Flask(__name__)
app.config.from_pyfile("{}/app.cfg".format(os.getcwd()))
app.json_encoder = GerritJSONEncoder
db = MongoEngine(app)
cache = Cache(app)
gerrit = GerritServer(app.config['GERRIT_URL'])
def api_key_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
print(request.headers)
if 'Apikey' in request.headers:
if ApiKey.objects(apikey=request.headers.get('Apikey')).first():
return f(*args, **kwargs)
return abort(403)
return decorated_function
@app.cli.command()
@click.option('--filename', '-f', 'filename', required=True)
@click.option('--device', '-d', 'device', required=True)
@click.option('--version', '-v', 'version', required=True)
@click.option('--datetime', '-t', 'datetime', required=True)
@click.option('--romtype', '-r', 'romtype', required=True)
@click.option('--md5sum', '-m', 'md5sum', required=True)
@click.option('--url', '-u', 'url', required=True)
def addrom(filename, device, version, datetime, romtype, md5sum, url):
Rom(filename=filename, datetime=datetime, device=device, version=version, romtype=romtype, md5sum=md5sum, url=url).save()
@app.cli.command()
@click.option('--filename', '-f', 'filename', required=True)
def delrom(filename):
Rom.objects(filename=filename).delete()
@app.cli.command()
@click.option("--comment", 'comment', required=False)
@click.option("--remove", "remove", default=False)
@click.option("--print", "echo", flag_value='echo', default=False)
def api_key(comment, remove, echo):
if echo:
for i in ApiKey.objects():
print(i.apikey, i.comment)
elif remove:
for i in ApiKey.objects(apikey=apikey):
i.delete()
elif comment:
key = uuid4().hex
ApiKey(apikey=key, comment=comment).save()
print(key)
else:
print("comment or print required")
@app.cli.command()
def import_devices():
with open("devices.json", "r") as f:
data = json.load(f)
for device in data:
d = Device.objects(model=device['model'])
if d:
d.update(**device)
else:
Device(**device).save()
if os.path.isfile("devices_local.json"):
with open("devices_local.json", "r") as f:
data = json.load(f)
for device in data:
d = Device.objects(model=device['model'])
if d:
d.update(**device)
else:
Device(**device).save()
@app.cli.command()
def check_builds():
for r in Rom.objects():
if requests.head(r.url).status_code == 404:
print("Rom.objects(filename=\"{}\").delete()".format(r.filename))
@cache.memoize(timeout=3600)
def get_build_types(device, romtype, after, version):
roms = Rom.get_roms(device=device, romtype=romtype, before=app.config['BUILD_SYNC_TIME'])
if after:
roms = roms(datetime__gt=after)
if version:
roms = roms(version=version)
roms = roms.order_by('datetime')
data = []
for rom in roms:
data.append({
"id": str(rom.id),
"url": rom.url,
"romtype": rom.romtype,
"datetime": int(time.mktime(rom.datetime.timetuple())),
"version": rom.version,
"filename": rom.filename
})
return jsonify({'response': data})
@app.route('/api/v1/<string:device>/<string:romtype>/<string:incrementalversion>')
#cached via memoize on get_build_types
def index(device, romtype, incrementalversion):
after = request.args.get("after")
version = request.args.get("version")
return get_build_types(device, romtype, after, version)
@app.route('/api/v1/types/<string:device>/')
@cache.cached(timeout=3600)
def get_types(device):
types = set(["nightly"])
for rtype in Rom.get_types(device):
types.add(rtype)
return jsonify({'response': list(types)})
@app.route('/api/v1/requestfile/<string:file_id>')
def requestfile(file_id):
rom = Rom.objects.get(id=file_id)
if not rom['url']:
url = config['baseurl']
if url[-1:] != '/':
url += '/'
url += rom['filename']
else:
url = rom['url']
return jsonify({ 'url': url, 'md5sum': rom['md5sum']})
@app.route("/api/v1/auth")
@api_key_required
def test_auth():
return "pass"
@app.route('/api/v1/add_build', methods=['POST',])
@api_key_required
def add_build():
data = request.get_json()
validate = {"filename": "str", "device": "str", "version": "str", "md5sum": "str", "url": "str", "romtype": "str"}
#bad data sent
if not data:
return jsonify(validate), 400
#validate keys all exist
for key in validate.keys():
if key not in data:
return jsonify(validate), 406
# validate types
for key in validate.keys():
try:
locate(validate[key])(data[key])
except:
return jsonify({"error": "{} must be parseable by python's {} class".format(key, validate[key])}), 406
rom = Rom(**data)
rom.save()
return "ok", 200
@app.route('/api/v1/changes/<device>/')
@app.route('/api/v1/changes/<device>/<int:before>/')
@app.route('/api/v1/changes/<device>/-1/')
@cache.cached(timeout=3600)
def changes(device='all', before=-1):
if device == 'all':
device = None
return jsonify(get_changes(gerrit, device, before, Rom.get_device_version(device)))
@app.route('/<device>/changes/<int:before>/')
@app.route('/<device>/changes/')
@app.route('/')
@cache.cached(timeout=3600)
def show_changelog(device='all', before=-1):
devices = sorted([x for x in Device.get_devices() if x['model'] in Rom.get_devices()], key=lambda device: device['name'])
oems = sorted(list(set([x['oem'] for x in devices])))
return render_template('changes.html', active_device=None, oems=oems, devices=devices, device=device, before=before, changelog=True)
@app.route('/api/v1/devices')
@cache.cached(timeout=3600)
def api_v1_devices():
return jsonify(Rom.get_current_devices_by_version())
@app.route('/api/v1/<string:filename>', methods=['DELETE',])
@api_key_required
def api_v1_delete_file(filename):
Rom.objects(filename=filename).delete()
return '', 200
@app.route('/api/v1/purgecache', methods=['POST',])
@api_key_required
def purge_cache():
cache.clear()
return 'ok', 200
@app.route("/<string:device>")
@cache.cached(timeout=3600)
def web_device(device):
devices = sorted([x for x in Device.get_devices() if x['model'] in Rom.get_devices()], key=lambda device: device['name'])
oems = sorted(list(set([x['oem'] for x in devices])))
roms = Rom.get_roms(device=device, before=app.config['BUILD_SYNC_TIME'])
active_oem = [x['oem'] for x in devices if x['model'] == device]
active_oem = active_oem[0] if active_oem else None
active_device = Device.objects(model=device).first()
return render_template("device.html", active_oem=active_oem, active_device=active_device, oems=oems, devices=devices, roms=roms, get_timestamp=get_timestamp)
@app.route("/extras")
@cache.cached(timeout=3600)
def web_extras():
devices = sorted([x for x in Device.get_devices() if x['model'] in Rom.get_devices()], key=lambda device: device['name'])
oems = sorted(list(set([x['oem'] for x in devices])))
return render_template("extras.html", active_device=None, oems=oems, devices=devices, extras=True)
|
from keras.models import Sequential
from tensorflow.keras import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Lambda
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
def get_conv(input_shape=(64, 64, 3), filename=None):
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=input_shape, output_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation='relu', name='conv1', input_shape=input_shape, padding="same"))
model.add(Conv2D(64, (3, 3), activation='relu', name='conv2', padding="same"))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (8, 8), activation="relu", name="dense1"))
model.add(Dropout(0.5))
model.add(Conv2D(1, (14, 14), name="dense2", activation="sigmoid"))
for layer in model.layers:
print(layer.input_shape, layer.output_shape)
if filename:
model.load_weights(filename)
model.add(Flatten())
model.compile(loss='mse', optimizer='adadelta', metrics=['accuracy'])
return model
|
from django.contrib import admin
from .models import Airline, Airport, Route, FlightNumber
from rest_framework.authtoken.admin import TokenAdmin
TokenAdmin.raw_id_fields = ('user',)
admin.site.register(Airline)
admin.site.register(Airport)
admin.site.register(Route)
admin.site.register(FlightNumber) |
"""The yr component."""
|
#!/usr/bin/env python
names = ['raymond', 'rachel', 'matthew', 'roger', 'betty', 'melissa', 'judith', 'charlie']
print ', '.join(names)
names.pop(0)
print ', '.join(names)
names.insert(0, 'mark')
print ', '.join(names)
from collections import deque
nicks = deque(['raymond', 'rachel', 'matthew', 'roger', 'betty', 'melissa', 'judith', 'charlie'])
print 'nicks=', ', '.join(nicks)
#nicks1 = nicks.popleft()
#print ', '.join(nicks1)
nicks2 = nicks.appendleft('mark')
print ', '.join(nicks2)
|
# -*- coding: utf-8 -*-
from bert import modeling
import os
import tensorflow as tf
from scipy.special import softmax
import time
import numpy as np
import collections
from utils import load_run
from config import config_dict
from functions import model_fn_builder, input_fn_builder
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"device", "0",
"CUDA device number")
flags.DEFINE_string(
"output_path", None,
"output path"
)
flags.DEFINE_integer(
'kc', None,
'kc in the paper'
)
flags.DEFINE_string(
'third_model_path', None,
'path of the third model'
)
flags.DEFINE_integer(
'batch_size', None,
'batch size for training and evaluation'
)
flags.DEFINE_string(
'tpu', None,
'tpu address'
)
flags.DEFINE_string(
'dataset', None,
"dataset: robust04 or gov2"
)
flags.DEFINE_integer(
'rerank_num', None,
"the number of documents to be re-ranked"
)
flags.DEFINE_integer(
'max_seq_length', 384,
"max sequence length for BERT"
)
flags.DEFINE_string(
'model_size', None,
"BERT model size used in the current phase"
)
flags.DEFINE_string(
'first_model_path', None,
"first model path"
)
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.device
init_checkpoint = None # @param {type:"string"}
print('***** BERT Init Checkpoint: {} *****'.format(init_checkpoint))
# Parameters
use_tpu = False if FLAGS.tpu is None else True
iterations_per_loop = 500
num_tpu_cores = 8
def main(_):
bert_config = modeling.BertConfig.from_json_file(config_dict[FLAGS.model_size])
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tpu_cluster_resolver = None
if use_tpu:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
keep_checkpoint_max=1,
model_dir=FLAGS.output_path,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=2,
init_checkpoint=init_checkpoint,
use_tpu=use_tpu,
use_one_hot_embeddings=use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size,
predict_batch_size=FLAGS.batch_size,
params={"qc_scores": "qc_scores"})
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.batch_size)
for split in ["valid", "test"]:
maxp_run = load_run(os.path.join(FLAGS.first_model_path, "{}_{}_result.trec".format(FLAGS.dataset, split)))
query_docids_map = []
data_path = os.path.join(FLAGS.output_path, "rerank-{0}_kc-{1}".format(FLAGS.rerank_num, FLAGS.kc), "data")
result_path = os.path.join(FLAGS.output_path, "rerank-{0}_kc-{1}".format(FLAGS.rerank_num, FLAGS.kc), "result")
if not tf.gfile.Exists(result_path):
tf.gfile.MakeDirs(result_path)
with tf.gfile.Open(os.path.join(data_path, "chunk_passage_ids_{0}.txt".format(split))) as ref_file:
for line in ref_file:
query_docids_map.append(line.strip().split("\t"))
predict_input_fn = input_fn_builder(
dataset_path=os.path.join(data_path, "chunk_passage_{0}.tf".format(split)),
is_training=False,
seq_length=FLAGS.max_seq_length,
drop_remainder=False)
total_count = 0
result_file = tf.gfile.Open(os.path.join(result_path, "{0}_{1}_result.trec".format(FLAGS.dataset, split)), 'w')
ckpt = tf.train.latest_checkpoint(checkpoint_dir=FLAGS.third_model_path)
print("use latest ckpt: {0}".format(ckpt))
result = estimator.predict(input_fn=predict_input_fn,
yield_single_examples=True,
checkpoint_path=ckpt)
start_time = time.time()
results = []
result_dict = collections.OrderedDict()
for item in result:
results.append((item["qc_scores"], item["probs"]))
total_count += 1
if total_count == len(query_docids_map) or query_docids_map[total_count][0] != \
query_docids_map[total_count - 1][0]:
chunk_num = len(results) // FLAGS.rerank_num
assert chunk_num <= FLAGS.kc
qc_scores, probs = list(zip(*results))
qc_scores = np.stack(qc_scores)
cp_scores = np.stack(probs)[:, 1]
qc_scores = np.reshape(qc_scores, [FLAGS.rerank_num, chunk_num])
cp_scores = np.reshape(cp_scores, [FLAGS.rerank_num, chunk_num])
# softmax normalization
qc_scores = softmax(qc_scores, axis=-1)
scores = np.sum(np.multiply(qc_scores, cp_scores), axis=-1, keepdims=False)
start_idx = total_count - FLAGS.rerank_num * chunk_num
end_idx = total_count
query_ids, chunk_ids, passage_ids, labels, qc_scores = zip(*query_docids_map[start_idx:end_idx])
assert len(set(query_ids)) == 1, "Query ids must be all the same."
query_id = query_ids[0]
candidate_docs = list()
for pid in passage_ids:
doc_id = pid.split("_")[0]
if doc_id not in candidate_docs:
candidate_docs.append(doc_id)
result_dict[query_id] = dict()
for i, doc in enumerate(candidate_docs):
result_dict[query_id][doc] = scores[i]
rerank_list = sorted(result_dict[query_id].items(), key=lambda x: x[1], reverse=True)
last_score = rerank_list[-1][1]
for doc in maxp_run[query_id][FLAGS.rerank_num:]:
current_score = last_score - 0.01
result_dict[query_id][doc] = current_score
last_score = current_score
ranking_list = sorted(result_dict[query_id].items(), key=lambda x: x[1], reverse=True)
for rank, (doc_id, score) in enumerate(ranking_list):
result_file.write(
"\t".join([query_id, "Q0", doc_id, str(rank + 1), str(score), "chunk_passage_PRF"]) + "\n")
results = []
if total_count % 1000 == 0:
tf.logging.warn("Read {} examples in {} secs".format(
total_count, int(time.time() - start_time)))
result_file.close()
tf.logging.info("Done Evaluating!")
if __name__ == "__main__":
flags.mark_flag_as_required("model_size")
flags.mark_flag_as_required("output_path")
flags.mark_flag_as_required("kc")
flags.mark_flag_as_required("third_model_path")
flags.mark_flag_as_required('batch_size')
flags.mark_flag_as_required('dataset')
flags.mark_flag_as_required('rerank_num')
flags.mark_flag_as_required('first_model_path')
tf.app.run()
|
'''
TACO: Multi-sample transcriptome assembly from RNA-Seq
'''
import numpy as np
from scipy.stats import distributions
from taco.lib.scipy.norm_sf import norm_sf
from scipy.stats import mannwhitneyu as scipy_mwu
from taco.lib.stats import mannwhitneyu as mwu
def test_mannwhitneyu():
x = [1, 2, 3, 4, 5]
y = [6, 7, 8, 9, 10]
p1 = scipy_mwu(x, y).pvalue
p2 = mwu(x, y).pvalue
assert abs(p1 - p2) < 1e-5
|
import re
from pymocky.models.config import Config
from pymocky.utils.log import Log
class CherryPyUpdateScenario(object):
def __init__(self, mapping_handler):
self.mapping_handler = mapping_handler
def response(self):
string = '{"success": true, "message": "updated"}'
return string
@staticmethod
def is_update_scenario(url):
is_update_scenario_url = (
re.match(
r"^.*(127\.0\.0\.1|localhost|pymocky)(:\d*)?/update-scenario$", url
)
is not None
)
return is_update_scenario_url
def cherry_py_check_update_scenario(func):
def parse_update_scenario(*args, **kwargs):
self = args[0]
if not hasattr(self, "update_scenario"):
self.update_scenario = CherryPyUpdateScenario(self.mapping_handler)
if self.update_scenario.is_update_scenario(self.cherrypy.url()):
if Config.verbose:
Log.info("Accessing: update scenario")
scenario = self.cherrypy.request.params.get("scenario")
if not scenario:
scenario = "default"
Config.scenario = scenario
Log.info("Scenario changed to: {0}".format(scenario))
return self.update_scenario.response()
return func(self)
return parse_update_scenario
|
"""Convert straight quotation marks to typographic ones
"""
from __future__ import annotations
import re
from typing import Any
from .state_core import StateCore
from ..common.utils import charCodeAt
from ..common.utils import isWhiteSpace, isPunctChar, isMdAsciiPunct
from ..token import Token
QUOTE_TEST_RE = re.compile(r"['\"]")
QUOTE_RE = re.compile(r"['\"]")
APOSTROPHE = "\u2019" # ’
def replaceAt(string: str, index: int, ch: str) -> str:
# When the index is negative, the behavior is different from the js version.
# But basically, the index will not be negative.
assert index >= 0
return string[:index] + ch + string[index + 1 :]
def process_inlines(tokens: list[Token], state: StateCore) -> None:
stack: list[dict[str, Any]] = []
for i in range(len(tokens)):
token = tokens[i]
thisLevel = token.level
j = 0
for j in range(len(stack))[::-1]:
if stack[j]["level"] <= thisLevel:
break
else:
# When the loop is terminated without a "break".
# Subtract 1 to get the same index as the js version.
j -= 1
stack = stack[: j + 1]
if token.type != "text":
continue
text = token.content
pos = 0
maximum = len(text)
while pos < maximum:
goto_outer = False
lastIndex = pos
t = QUOTE_RE.search(text[lastIndex:])
if not t:
break
canOpen = canClose = True
pos = t.start(0) + lastIndex + 1
isSingle = t.group(0) == "'"
# Find previous character,
# default to space if it's the beginning of the line
lastChar = 0x20
if t.start(0) + lastIndex - 1 >= 0:
lastChar = charCodeAt(text, t.start(0) + lastIndex - 1)
else:
for j in range(i)[::-1]:
# lastChar defaults to 0x20
if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak":
break
# should skip all tokens except 'text', 'html_inline' or 'code_inline'
if not tokens[j].content:
continue
lastChar = charCodeAt(tokens[j].content, len(tokens[j].content) - 1)
break
# Find next character,
# default to space if it's the end of the line
nextChar = 0x20
if pos < maximum:
nextChar = charCodeAt(text, pos)
else:
for j in range(i + 1, len(tokens)):
# nextChar defaults to 0x20
if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak":
break
# should skip all tokens except 'text', 'html_inline' or 'code_inline'
if not tokens[j].content:
continue
nextChar = charCodeAt(tokens[j].content, 0)
break
isLastPunctChar = isMdAsciiPunct(lastChar) or isPunctChar(chr(lastChar))
isNextPunctChar = isMdAsciiPunct(nextChar) or isPunctChar(chr(nextChar))
isLastWhiteSpace = isWhiteSpace(lastChar)
isNextWhiteSpace = isWhiteSpace(nextChar)
if isNextWhiteSpace:
canOpen = False
elif isNextPunctChar:
if not (isLastWhiteSpace or isLastPunctChar):
canOpen = False
if isLastWhiteSpace:
canClose = False
elif isLastPunctChar:
if not (isNextWhiteSpace or isNextPunctChar):
canClose = False
if nextChar == 0x22 and t.group(0) == '"': # 0x22: "
if lastChar >= 0x30 and lastChar <= 0x39: # 0x30: 0, 0x39: 9
# special case: 1"" - count first quote as an inch
canClose = canOpen = False
if canOpen and canClose:
# Replace quotes in the middle of punctuation sequence, but not
# in the middle of the words, i.e.:
#
# 1. foo " bar " baz - not replaced
# 2. foo-"-bar-"-baz - replaced
# 3. foo"bar"baz - not replaced
canOpen = isLastPunctChar
canClose = isNextPunctChar
if not canOpen and not canClose:
# middle of word
if isSingle:
token.content = replaceAt(
token.content, t.start(0) + lastIndex, APOSTROPHE
)
continue
if canClose:
# this could be a closing quote, rewind the stack to get a match
for j in range(len(stack))[::-1]:
item = stack[j]
if stack[j]["level"] < thisLevel:
break
if item["single"] == isSingle and stack[j]["level"] == thisLevel:
item = stack[j]
if isSingle:
openQuote = state.md.options.quotes[2]
closeQuote = state.md.options.quotes[3]
else:
openQuote = state.md.options.quotes[0]
closeQuote = state.md.options.quotes[1]
# replace token.content *before* tokens[item.token].content,
# because, if they are pointing at the same token, replaceAt
# could mess up indices when quote length != 1
token.content = replaceAt(
token.content, t.start(0) + lastIndex, closeQuote
)
tokens[item["token"]].content = replaceAt(
tokens[item["token"]].content, item["pos"], openQuote
)
pos += len(closeQuote) - 1
if item["token"] == i:
pos += len(openQuote) - 1
text = token.content
maximum = len(text)
stack = stack[:j]
goto_outer = True
break
if goto_outer:
goto_outer = False
continue
if canOpen:
stack.append(
{
"token": i,
"pos": t.start(0) + lastIndex,
"single": isSingle,
"level": thisLevel,
}
)
elif canClose and isSingle:
token.content = replaceAt(
token.content, t.start(0) + lastIndex, APOSTROPHE
)
def smartquotes(state: StateCore) -> None:
if not state.md.options.typographer:
return
for token in state.tokens:
if token.type != "inline" or not QUOTE_RE.search(token.content):
continue
assert token.children is not None
process_inlines(token.children, state)
|
from . import stage_groups as sz_stage
from . import groups
import stages
class Summary:
def header_for(self, experiment_class):
return [
'id', 'num_stages', 'start_time',
'duration', 'is_complete', 'size_in_bytes'
]
def data_for(self, experiment):
return [
experiment.experiment_id(), experiment.num_stages(), experiment.time_start(),
experiment.time_duration(), experiment.is_complete(), experiment.size_in_bytes()
]
def description_for(self, experiment_class):
return [
'Identificador único del experimento',
'Cantidad de etapas del experimento (en total son 8)',
'Fecha del inicio del experimento, en milisegundos desde 1/1/1970',
'Duración en milisegundos desde el inicio hasta su fin',
'Verdadero si están todas las etapas, y falso si falta alguna',
'Tamaño en bytes del experimento, aumenta cuantos más clicks y movimientos hubo'
]
class Full:
def __init__(self):
serializers = sz_stage.all_by_category()
self.summary = Summary()
obj = groups.Composite([
serializers['flat'], serializers['recursive']
])
self.serializer = groups.SingleWrapper(obj)
def header_for(self, experiment_class):
result = []
result.extend(self.summary.header_for(experiment_class))
for stage in stages.all_stages():
sn = stage.stage_name()
fields = self.serializer.header_for(stage)
fields = ["{}_{}".format(sn, f) for f in fields]
result.extend(fields)
return result
def data_for(self, experiment):
result = []
result.extend(self.summary.data_for(experiment))
for stage in stages.all_stages():
sn = stage.stage_name()
if experiment.has_stage(sn):
current = experiment.get_stage(sn)
fields = self.serializer.data_for(current)
else:
headers = self.serializer.header_for(stage)
fields = ['missing'] * len(headers)
result.extend(fields)
return result
def description_for(self, experiment_class):
result = []
result.extend(self.summary.description_for(experiment_class))
for stage in stages.all_stages():
sn = stage.stage_name()
fields = self.serializer.description_for(stage)
fields = ['{} para la etapa "{}"'.format(f, sn) for f in fields]
result.extend(fields)
return result
|
# -*- coding: utf-8 -*-
"""
617. Merge Two Binary Trees
Given two binary trees and imagine that when you put one of them to cover the other,
some nodes of the two trees are overlapped while the others are not.
You need to merge them into a new binary tree.
The merge rule is that if two nodes overlap, then sum node values up as the new value of the merged node.
Otherwise, the NOT null node will be used as the node of new tree.
Note: The merging process must start from the root nodes of both trees.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:
if t1 is None:
return t2
elif t2 is None:
return t1
val = t1.val + t2.val
lnode = self.mergeTrees(t1.left, t2.left)
rnode = self.mergeTrees(t1.right, t2.right)
return TreeNode(val=val, left=lnode, right=rnode)
|
import numpy as np
import matplotlib.pyplot as plt
from parameters import *
from models import *
def simulate_stochastic_clb(params, Y0, Omega, T_end, dt = 1):
state = np.array(Y0)
Y_total = np.zeros([1+T_end//dt, len(state)])
T = np.zeros(1+T_end//dt)
t = 0
Y_total[0, :] = state
T[0] = t
N = CLB_generate_stoichiometry()
i = 1
last_time = t
while t < T_end:
"""
if t < T_end/3:
rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = 0, 5, 5, 0, 5, 0, 5, 0
elif t < 2*T_end/3:
rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = 0, 0, 0, 0, 0, 0, 0, 0
else:
rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = 5, 0, 0, 5, 0, 0, 0, 0
params[-8:] = rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b
if t > T_end/2:
S = np.array([1, 0])
state[24:26] = S*Omega
"""
if t > T_end/2:
#params[-8:] = rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b
params[-8:] = 0, 0, 0, 0, 0, 0, 0, 0
#choose two random numbers
r = np.random.uniform(size=2)
r1 = r[0]
r2 = r[1]
a = CLB_model_stochastic(state, params, Omega)
asum = np.cumsum(a)
a0 = np.sum(a)
#get tau
tau = (1.0/a0)*np.log(1.0/r1)
#print(t)
#select reaction
reaction_number = np.argwhere(asum > r2*a0)[0,0] #get first element
#update concentrations
state = state + N[:,reaction_number]
#update time
t = t + tau
if (t - last_time >= dt) or (t >= T_end):
last_time = t
Y_total[i, :] = state
T[i] = t
i += 1
return T[:i], Y_total[:i,:]
Y0 = np.zeros(59)
# number of cells: toggle switches
N_I0 = np.array([1,1])
N_I1 = np.array([1,1])
N_I2 = np.array([1,1])
N_I3 = np.array([1,1])
Y0[4:6] = N_I0
Y0[10:12] = N_I1
Y0[16:18] = N_I2
Y0[22:24] = N_I3
# number of cells: mux
#Y0[22-4+24:38-4+24] = 1 # number of cells
Y0[42:58] = 1 # number of cells
# reaction space volume for the whole cell population
# N_cells should be set to 1
Omega = 10
t_end = 500
states = [([0,0], [0,0,0,0]),
([0,0], [1,0,0,0]),
([1,0], [1,0,0,0]),
([1,0], [0,1,0,0]),
([0,1], [0,1,0,0]),
([0,1], [0,0,1,0]),
([1,1], [0,0,1,0]),
([1,1], [0,0,0,1])]
"""
states = [([0,0], [0,0,0,0]), ([0,0], [1,0,0,0]),
([1,0], [1,0,0,0]), ([1,0], [1,1,0,0]),
([0,1], [1,1,0,0]), ([0,1], [1,1,1,0]),
([1,1], [1,1,1,0]), ([1,1], [1,1,1,1])]
"""
for iteration, state in enumerate(states):
S = state[0]
I = state[1]
I0, I1, I2, I3 = I
rho_x = 0
rho_y = 0
if iteration > 0 and states[iteration-1][1] == I:
#rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = (1-I0) * 5, I0*5, (1-I1)*5, I1*5, (1-I2)*5, I2*5, (1-I3)*5, I3*5
rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = 0, 0, 0, 0, 0, 0, 0, 0
else:
rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = (1-I0) * 5, I0*5, (1-I1)*5, I1*5, (1-I2)*5, I2*5, (1-I3)*5, I3*5
#rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = 5, 0, 5, 0, 5, 0, 5, 0
params = [delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x, r_X, r_Y,
rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b]
if iteration:
Y0 = Y_full[-1,:]
#else:
# Y0 *= N_cells
#print(Y0)
Y0[24:26] = np.array(S) * Omega
T, Y = simulate_stochastic_clb(params, Y0, Omega, t_end)
if not iteration:
Y_full = Y
T_full = T
else:
Y_full = np.append(Y_full, Y, axis = 0)
T_full = np.append(T_full, T + T_full[-1], axis = 0)
Y = Y_full
T = T_full
"""
results
"""
out = Y[:,-1]
S0, S1 = Y[:,24], Y[:,25]
I0_a, I0_b = Y[:,2], Y[:,3]
I1_a, I1_b = Y[:,8], Y[:,9]
I2_a, I2_b = Y[:,14], Y[:,15]
I3_a, I3_b = Y[:,20], Y[:,21]
# plot
"""
ax1 = plt.subplot(241)
ax1.plot(T, I0_a)
ax1.plot(T, I0_b)
ax1.legend(["I0_a = I0", "I0_b"])
ax1.set_title('I0 toggle')
ax2 = plt.subplot(242)
ax2.plot(T, I1_a)
ax2.plot(T, I1_b)
ax2.legend(["I1_a = I1", "I1_b"])
ax2.set_title('I1 toggle')
ax3 = plt.subplot(243)
ax3.plot(T, I2_a)
ax3.plot(T, I2_b)
ax3.legend(["I2_a = I2", "I2_b"])
ax3.set_title('I2 toggle')
ax4 = plt.subplot(244)
ax4.plot(T, I3_a)
ax4.plot(T, I3_b)
ax4.legend(["I3_a = I3", "I3_b"])
ax4.set_title('I3 toggle')
ax5 = plt.subplot(212)
ax5.plot(T,out)
ax5.set_title('out')
plt.suptitle(f"S = [{S[1]},{S[0]}]")
plt.show()
"""
# plot
ax1 = plt.subplot(341)
ax1.plot(T, I0_a, color="#800000ff", alpha=0.75)
ax1.plot(T, I0_b, color="#999999ff", alpha=0.75)
ax1.legend(["$I_0$", "$\\overline{I_0}$"])
#ax1.set_title('$I_0$ toggle')
ax1.set_xlabel("Time [min]")
ax1.set_ylabel("Molecules")
ax2 = plt.subplot(342)
ax2.plot(T, I1_a, color = "#00ff00ff", alpha=0.75)
ax2.plot(T, I1_b, color = "#666666ff")#, alpha=0.75)
ax2.legend(["$I_1$", "$\\overline{I_1}$"])
#ax2.set_title('$I_1$ toggle')
ax2.set_xlabel("Time [min]")
ax2.set_ylabel("Molecules")
ax3 = plt.subplot(343)
ax3.plot(T, I2_a, color = "#0000ffff", alpha=0.75)
ax3.plot(T, I2_b, color = "#ecececfe")#, alpha=0.75)
ax3.legend(["$I_2$", "$\\overline{I_2}$"])
#ax3.set_title('$I_2$ toggle')
ax3.set_xlabel("Time [min]")
ax3.set_ylabel("Molecules")
ax4 = plt.subplot(344)
ax4.plot(T, I3_a, color = "#800080ff", alpha=0.75)
ax4.plot(T, I3_b, color = "#999999fc")#, alpha=0.75)
ax4.legend(["$I_3$", "$\\overline{I_3}$"])
#ax4.set_title('$I_3$ toggle')
ax4.set_xlabel("Time [min]")
ax4.set_ylabel("Molecules")
ax5 = plt.subplot(312)
ax5.plot(T,S0, color = "#ff6600ff", alpha=0.75)
ax5.plot(T,S1, color = "#ffff00ff")#, alpha=0.75)
ax5.legend(["$S_0$", "$S_1$"])
#ax5.set_title('Select inputs')
ax5.set_xlabel("Time [min]")
ax5.set_ylabel("Molecules")
ax6 = plt.subplot(313)
ax6.plot(T,out, color = "#8080805a", alpha=0.75)
#ax6.set_title('out')
ax6.legend(['out'])
ax6.set_xlabel("Time [min]")
ax6.set_ylabel("Molecules")
#step = int(self.N)
#ax6.plot(T[step::step], out[step::step], 'x')
#plt.suptitle("$out = \\overline{S}_1 \\overline{S}_0 I_0 \\vee \\overline{S}_1 S_0 I_1 \\vee S_1 \\overline{S}_0 I_2 \\vee S_1 S_0 I_3$")
plt.gcf().set_size_inches(15,10)
#plt.savefig("figs\\CBLB_ssa.pdf", bbox_inches = 'tight')
plt.show() |
from django.contrib import admin
from myapp.models import Person, Image
class PersonAdmin(admin.ModelAdmin):
list_display=[f.name for f in Person._meta.fields]
admin.site.register(Person,PersonAdmin)
class ImageAdmin(admin.ModelAdmin):
list_display=[f.name for f in Image._meta.fields]
admin.site.register(Image,ImageAdmin) |
# Generated by Django 2.1.7 on 2019-03-23 00:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0009_auto_20190322_1755'),
]
operations = [
migrations.DeleteModel(
name='ExternalUser',
),
migrations.AlterField(
model_name='viewer',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='visibleTo',
to='posts.Post'),
),
]
|
from ..tmx_tile_layer import load_tmx_tile_layer
from defusedxml import ElementTree
from io import StringIO
import unittest
class TestTmxTileLayer(unittest.TestCase):
"""Test loading TMX tile layer entries."""
def test_flips_y_coordinate(self):
"""Y coordinates are flipped to place origin at bottom left."""
mock_xml = '<layer width="3" height="2">\n'
mock_xml += '\t<data encoding="csv">\n'
mock_xml += '\t\t1,2,3,\n'
mock_xml += '\t\t4,5,6\n'
mock_xml += '\t</data>\n'
mock_xml += '</layer>'
tile_layer_node = ElementTree.parse(StringIO(mock_xml)).getroot()
# Right-up order: (0, 0) is the top left corner of the map
expected = [
(0, 1, 1), (1, 1, 2),
(2, 1, 3), (0, 0, 4),
(1, 0, 5), (2, 0, 6)]
# Load the tile layer
actual = list(load_tmx_tile_layer(tile_layer_node))
self.assertEqual(expected, actual)
|
"""
Given a 32-bit signed integer, reverse digits of an integer.
Assume we are dealing with an environment that could only store integers within the 32-bit signed integer range: [−2^31, 2^31 − 1].
For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
Example 1:
Input: x = 123
Output: 321
Example 2:
Input: x = -123
Output: -321
Example 3:
Input: x = 120
Output: 21
Example 4:
Input: x = 0
Output: 0
Constraints:
-2^31 <= x <= 2^31 - 1
"""
# define an input for testing purposes
x = 1534236469
# actual code to submit
test = []
for i in str(x):
test += i
for z in range(len(test)):
if test[-1] == '0':
test.pop(-1)
else:
break
if test == []:
test.append('0')
if test[0] == '-':
test.append('-')
test.pop(0)
test.reverse()
solved = "".join(test)
# use print statement to check if it works
if int(solved) > -2147483648 and int(solved) < 2147483648:
print(int(solved))
else:
print(0)
# My Submission: https://leetcode.com/submissions/detail/433340125/
|
"""
notebook imports
"""
import warnings
try:
import ipywidgets
import IPython
from notebooktools import *
from ontologysearch import OntologySearch
from parameterslider import ParameterSlider
from speciessearch import SearchBySpeciesForm
except ImportError:
warnings.warn("Notebook tools are not imported, due to missing dependencies.")
|
from typing import Optional, Sequence
import dataclasses
import tensorflow as tf
import vcm
class ThermoBasis:
"""A thermodynamic basis with specific humidity as the prognostic variable"""
u: tf.Tensor
v: tf.Tensor
T: tf.Tensor
q: tf.Tensor
dp: tf.Tensor
dz: tf.Tensor
rh: tf.Tensor
rho: tf.Tensor
qc: Optional[tf.Tensor] = None
scalars: Sequence[tf.Tensor]
def to_rh(self) -> "RelativeHumidityBasis":
return RelativeHumidityBasis(
self.u,
self.v,
self.T,
self.rh,
self.rho,
self.dz,
scalars=self.scalars,
qc=self.qc,
)
def to_q(self) -> "SpecificHumidityBasis":
return SpecificHumidityBasis(
self.u,
self.v,
self.T,
self.q,
self.dp,
self.dz,
scalars=self.scalars,
qc=self.qc,
)
@property
def args(self) -> Sequence[tf.Tensor]:
raise NotImplementedError()
@dataclasses.dataclass
class SpecificHumidityBasis(ThermoBasis):
"""A thermodynamic basis with specific humidity as the prognostic variable"""
u: tf.Tensor
v: tf.Tensor
T: tf.Tensor
q: tf.Tensor
dp: tf.Tensor
dz: tf.Tensor
qc: Optional[tf.Tensor] = None
scalars: Sequence[tf.Tensor] = dataclasses.field(default_factory=list)
@property
def rho(self) -> tf.Tensor:
return vcm.density(self.dp, self.dz, math=tf)
@property
def rh(self) -> tf.Tensor:
return vcm.relative_humidity(self.T, self.q, self.rho, math=tf)
@property
def args(self) -> Sequence[tf.Tensor]:
return (self.u, self.v, self.T, self.q, self.dp, self.dz) + tuple(self.scalars)
@dataclasses.dataclass
class RelativeHumidityBasis(ThermoBasis):
u: tf.Tensor
v: tf.Tensor
T: tf.Tensor
rh: tf.Tensor
rho: tf.Tensor
dz: tf.Tensor
qc: Optional[tf.Tensor] = None
scalars: Sequence[tf.Tensor] = dataclasses.field(default_factory=list)
@property
def q(self) -> tf.Tensor:
return vcm.specific_humidity_from_rh(self.T, self.rh, self.rho, math=tf)
@property
def dp(self) -> tf.Tensor:
return vcm.pressure_thickness(self.rho, self.dz, math=tf)
@property
def args(self) -> Sequence[tf.Tensor]:
return (self.u, self.v, self.T, self.rh, self.rho, self.dz) + tuple(
self.scalars
)
|
# Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import glob
import json
import multiprocessing as mp
import os
import shutil
import threading as td
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Tuple, Any, Union, Callable, Optional
import grpc
from cortex_internal.lib import util
from cortex_internal.lib.client.tensorflow import TensorFlowClient
from cortex_internal.lib.concurrency import LockedFile, get_locked_files
from cortex_internal.lib.exceptions import CortexException, WithBreak
from cortex_internal.lib.log import configure_logger
from cortex_internal.lib.model import (
find_all_s3_models,
validate_model_paths,
TensorFlowServingAPI,
TensorFlowServingAPIClones,
ModelsHolder,
ids_to_models,
LockedGlobalModelsGC,
LockedModel,
get_models_from_api_spec,
ModelsTree,
)
from cortex_internal.lib.storage import S3
from cortex_internal.lib.telemetry import get_default_tags, init_sentry
from cortex_internal.lib.type import (
handler_type_from_api_spec,
PythonHandlerType,
TensorFlowHandlerType,
TensorFlowNeuronHandlerType,
)
logger = configure_logger("cortex", os.environ["CORTEX_LOG_CONFIG_FILE"])
class AbstractLoopingThread(td.Thread):
"""
Abstract class of the td.Thread class.
Takes a method and keeps calling it in a loop every certain interval.
"""
def __init__(self, interval: int, runnable: Callable[[], None]):
td.Thread.__init__(self, daemon=True)
self._interval = interval
self._runnable = runnable
if not callable(self._runnable):
raise ValueError("runnable parameter must be a callable method")
self._event_stopper = td.Event()
self._stopped = False
def run(self):
"""
td.Thread-specific method.
"""
while not self._event_stopper.is_set():
self._runnable()
time.sleep(self._interval)
self._stopped = True
def stop(self, blocking: bool = False):
"""
Stop the thread.
Args:
blocking: Whether to wait until the thread is stopped or not.
"""
self._event_stopper.set()
if blocking:
self.join()
def join(self):
"""
Block until the thread finishes.
"""
while not self._stopped:
time.sleep(0.001)
class FileBasedModelsTreeUpdater(mp.Process):
"""
Monitors the S3 path(s)/dir and continuously updates the file-based tree.
The model paths are validated - the bad paths are ignored.
When a new model is found, it updates the tree and downloads it - likewise when a model is removed.
"""
def __init__(
self,
interval: int,
api_spec: dict,
download_dir: str,
temp_dir: str = "/tmp/cron",
lock_dir: str = "/run/cron",
):
"""
Args:
interval: How often to update the models tree. Measured in seconds.
api_spec: Identical copy of pkg.type.spec.api.API.
download_dir: Path to where the models are stored.
temp_dir: Path to where the models are temporarily stored.
lock_dir: Path to where the resource locks are stored.
"""
mp.Process.__init__(self, daemon=True)
self._interval = interval
self._api_spec = api_spec
self._download_dir = download_dir
self._temp_dir = temp_dir
self._lock_dir = lock_dir
self._s3_paths = []
self._spec_models = get_models_from_api_spec(self._api_spec)
self._s3_model_names = self._spec_models.get_s3_model_names()
for model_name in self._s3_model_names:
self._s3_paths.append(self._spec_models[model_name]["path"])
self._handler_type = handler_type_from_api_spec(self._api_spec)
if (
self._handler_type == PythonHandlerType
and self._api_spec["handler"]["multi_model_reloading"]
):
models = self._api_spec["handler"]["multi_model_reloading"]
elif self._handler_type != PythonHandlerType:
models = self._api_spec["handler"]["models"]
else:
models = None
if models is None:
raise CortexException("no specified model")
if models["dir"] is not None:
self._is_dir_used = True
self._models_dir = models["dir"]
else:
self._is_dir_used = False
self._models_dir = None
try:
os.mkdir(self._lock_dir)
except FileExistsError:
pass
self._ran_once = mp.Event()
self._event_stopper = mp.Event()
self._stopped = mp.Event()
def run(self):
"""
mp.Process-specific method.
"""
init_sentry(tags=get_default_tags())
while not self._event_stopper.is_set():
self._update_models_tree()
if not self._ran_once.is_set():
self._ran_once.set()
time.sleep(self._interval)
self._stopped.set()
def stop(self, blocking: bool = False):
"""
Trigger the process of stopping the process.
Args:
blocking: Whether to wait until the process is stopped or not.
"""
self._event_stopper.set()
if blocking:
self.join()
def join(self):
"""
Block until the process exits.
"""
while not self._stopped.is_set():
time.sleep(0.001)
def ran_once(self) -> bool:
"""
Tells whether the FileBasedModelsTreeUpdater loop has run at least once.
"""
return self._ran_once.is_set()
def _update_models_tree(self) -> None:
# get updated/validated paths/versions of the s3 models
(
model_names,
versions,
model_paths,
sub_paths,
timestamps,
bucket_names,
) = find_all_s3_models(
self._is_dir_used,
self._models_dir,
self._handler_type,
self._s3_paths,
self._s3_model_names,
)
# update models on the local disk if changes have been detected
# a model is updated if its directory tree has changed, if it's not present or if it doesn't exist on the upstream
with ThreadPoolExecutor(max_workers=5) as executor:
futures = []
for idx, (model_name, bucket_name, bucket_sub_paths) in enumerate(
zip(model_names, bucket_names, sub_paths)
):
futures += [
executor.submit(
self._refresh_model,
idx,
model_name,
model_paths[idx],
versions[model_name],
timestamps[idx],
bucket_sub_paths,
bucket_name,
)
]
[future.result() for future in futures]
# remove models that no longer appear in model_names
for model_name, versions in find_ondisk_models_with_lock(self._lock_dir).items():
if model_name in model_names or model_name in self._local_model_names:
continue
for ondisk_version in versions:
resource = os.path.join(self._lock_dir, model_name + "-" + ondisk_version + ".txt")
ondisk_model_version_path = os.path.join(
self._download_dir, model_name, ondisk_version
)
with LockedFile(resource, "w+") as f:
shutil.rmtree(ondisk_model_version_path)
f.write("not-available")
shutil.rmtree(os.path.join(self._download_dir, model_name))
logger.debug(f"{self.__class__.__name__} cron heartbeat")
def _refresh_model(
self,
idx: int,
model_name: str,
model_path: str,
versions: List[str],
timestamps: List[datetime.datetime],
sub_paths: List[str],
bucket_name: str,
) -> None:
client = S3(bucket_name)
ondisk_model_path = os.path.join(self._download_dir, model_name)
for version, model_ts in zip(versions, timestamps):
# for the lock file
resource = os.path.join(self._lock_dir, model_name + "-" + version + ".txt")
# check if a model update is mandated
update_model = False
ondisk_model_version_path = os.path.join(ondisk_model_path, version)
if os.path.exists(ondisk_model_version_path):
local_paths = glob.glob(
os.path.join(ondisk_model_version_path, "**"), recursive=True
)
local_paths = util.remove_non_empty_directory_paths(local_paths)
local_paths = [
os.path.relpath(local_path, ondisk_model_version_path)
for local_path in local_paths
]
local_paths = [path for path in local_paths if not path.startswith("../")]
s3_model_version_path = os.path.join(model_path, version)
s3_paths = [
os.path.relpath(sub_path, s3_model_version_path) for sub_path in sub_paths
]
s3_paths = [path for path in s3_paths if not path.startswith("../")]
s3_paths = util.remove_non_empty_directory_paths(s3_paths)
# update if the paths don't match
if set(local_paths) != set(s3_paths):
update_model = True
# update if the timestamp is newer
with LockedFile(resource, "r", reader_lock=True) as f:
file_status = f.read()
if file_status == "" or file_status == "not-available":
raise WithBreak
current_model_ts = int(file_status.split(" ")[1])
if current_model_ts < int(model_ts.timestamp()):
update_model = True
else:
update_model = True
if update_model:
# download to a temp directory
temp_dest = os.path.join(self._temp_dir, model_name, version)
s3_src = os.path.join(model_path, version)
client.download_dir_contents(s3_src, temp_dest)
# validate the downloaded model
model_contents = glob.glob(os.path.join(temp_dest, "**"), recursive=True)
model_contents = util.remove_non_empty_directory_paths(model_contents)
try:
validate_model_paths(model_contents, self._handler_type, temp_dest)
passed_validation = True
except CortexException:
passed_validation = False
shutil.rmtree(temp_dest)
s3_path = S3.construct_s3_path(bucket_name, s3_src)
logger.debug(
f"failed validating model {model_name} of version {version} found at {s3_path} path"
)
# move the model to its destination directory
if passed_validation:
with LockedFile(resource, "w+") as f:
if os.path.exists(ondisk_model_version_path):
shutil.rmtree(ondisk_model_version_path)
shutil.move(temp_dest, ondisk_model_version_path)
f.write("available " + str(int(model_ts.timestamp())))
# remove the temp model directory if it exists
model_temp_dest = os.path.join(self._temp_dir, model_name)
if os.path.exists(model_temp_dest):
os.rmdir(model_temp_dest)
# remove model versions if they are not found on the upstream
# except when the model version found on disk is 1 and the number of detected versions on the upstream is 0,
# thus indicating the 1-version on-disk model must be a model that came without a version
if os.path.exists(ondisk_model_path):
ondisk_model_versions = glob.glob(os.path.join(ondisk_model_path, "**"))
ondisk_model_versions = [
os.path.relpath(path, ondisk_model_path) for path in ondisk_model_versions
]
for ondisk_version in ondisk_model_versions:
if ondisk_version not in versions and (ondisk_version != "1" or len(versions) > 0):
resource = os.path.join(
self._lock_dir, model_name + "-" + ondisk_version + ".txt"
)
ondisk_model_version_path = os.path.join(ondisk_model_path, ondisk_version)
with LockedFile(resource, "w+") as f:
shutil.rmtree(ondisk_model_version_path)
f.write("not-available")
# remove the model directory if there are no models left
if len(glob.glob(os.path.join(ondisk_model_path, "**"))) == 0:
shutil.rmtree(ondisk_model_path)
# if it's a non-versioned model ModelVersion.NOT_PROVIDED
if len(versions) == 0 and len(sub_paths) > 0:
# for the lock file
resource = os.path.join(self._lock_dir, model_name + "-" + "1" + ".txt")
model_ts = int(timestamps[0].timestamp())
# check if a model update is mandated
update_model = False
ondisk_model_version_path = os.path.join(ondisk_model_path, "1")
if os.path.exists(ondisk_model_version_path):
local_paths = glob.glob(
os.path.join(ondisk_model_version_path, "**"), recursive=True
)
local_paths = util.remove_non_empty_directory_paths(local_paths)
local_paths = [
os.path.relpath(local_path, ondisk_model_version_path)
for local_path in local_paths
]
local_paths = [path for path in local_paths if not path.startswith("../")]
s3_model_version_path = model_path
s3_paths = [
os.path.relpath(sub_path, s3_model_version_path) for sub_path in sub_paths
]
s3_paths = [path for path in s3_paths if not path.startswith("../")]
s3_paths = util.remove_non_empty_directory_paths(s3_paths)
# update if the paths don't match
if set(local_paths) != set(s3_paths):
update_model = True
# update if the timestamp is newer
with LockedFile(resource, "r", reader_lock=True) as f:
file_status = f.read()
if file_status == "" or file_status == "not-available":
raise WithBreak()
current_model_ts = int(file_status.split(" ")[1])
if current_model_ts < model_ts:
update_model = True
else:
update_model = True
if not update_model:
return
# download to a temp directory
temp_dest = os.path.join(self._temp_dir, model_name)
client.download_dir_contents(model_path, temp_dest)
# validate the downloaded model
model_contents = glob.glob(os.path.join(temp_dest, "**"), recursive=True)
model_contents = util.remove_non_empty_directory_paths(model_contents)
try:
validate_model_paths(model_contents, self._handler_type, temp_dest)
passed_validation = True
except CortexException:
passed_validation = False
shutil.rmtree(temp_dest)
s3_path = S3.construct_s3_path(bucket_name, model_path)
logger.debug(
f"failed validating model {model_name} of version {version} found at {s3_path} path"
)
# move the model to its destination directory
if passed_validation:
with LockedFile(resource, "w+") as f:
if os.path.exists(ondisk_model_version_path):
shutil.rmtree(ondisk_model_version_path)
shutil.move(temp_dest, ondisk_model_version_path)
f.write("available " + str(model_ts))
class FileBasedModelsGC(AbstractLoopingThread):
"""
GC for models that no longer exist on disk. To be used with FileBasedModelsTreeUpdater.
There has to be a FileBasedModelsGC cron for each API process.
This needs to run on the API process because the FileBasedModelsTreeUpdater process cannot
unload the models from the API process' memory by itself. API process has to rely on this cron to do this periodically.
This is for the case when the FileBasedModelsTreeUpdater process has removed models from disk and there are still models loaded into the API process' memory.
"""
def __init__(
self,
interval: int,
models: ModelsHolder,
download_dir: str,
lock_dir: str = "/run/cron",
):
"""
Args:
interval: How often to run the GC. Measured in seconds.
download_dir: Path to where the models are stored.
lock_dir: Path to where the resource locks are stored.
"""
AbstractLoopingThread.__init__(self, interval, self._run_gc)
self._models = models
self._download_dir = download_dir
self._lock_dir = lock_dir
def _run_gc(self):
on_disk_model_ids = find_ondisk_model_ids_with_lock(self._lock_dir)
in_memory_model_ids = self._models.get_model_ids()
logger.debug(f"{self.__class__.__name__} cron heartbeat")
for in_memory_id in in_memory_model_ids:
if in_memory_id in on_disk_model_ids:
continue
with LockedModel(self._models, "w", model_id=in_memory_id):
if self._models.has_model_id(in_memory_id)[0] == "in-memory":
model_name, model_version = in_memory_id.rsplit("-", maxsplit=1)
logger.info(
f"removing model {model_name} of version {model_version} from memory as it's no longer present on disk/S3 (thread {td.get_ident()})"
)
self._models.remove_model_by_id(
in_memory_id, mem=True, disk=False, del_reference=True
)
def find_ondisk_models_with_lock(
lock_dir: str, include_timestamps: bool = False
) -> Union[Dict[str, List[str]], Dict[str, Dict[str, Any]]]:
"""
Returns all available models from the disk.
To be used in conjunction with FileBasedModelsTreeUpdater/FileBasedModelsGC.
Can be used for Python/TensorFlow clients.
Args:
lock_dir: Path to where the resource locks are stored.
include_timestamps: Whether to include timestamps for each version of each model.
Returns:
Dictionary with available model names and their associated versions when include_timestamps is False.
{
"model-A": ["177", "245", "247"],
"model-B": ["1"],
...
}
Dictionary with available model names and their associated versions/timestamps when include_timestamps is True.
{
"model-A": {
"versions": ["177", "245", "247"],
"timestamps": [1602198945, 1602198946, 1602198947]
}
"model-B": {
"versions": ["1"],
"timestamps": [1602198567]
},
...
}
"""
models = {}
for locked_file in get_locked_files(lock_dir):
with LockedFile(os.path.join(lock_dir, locked_file), "r", reader_lock=True) as f:
status = f.read()
if status.startswith("available"):
timestamp = int(status.split(" ")[1])
_model_name, _model_version = os.path.splitext(locked_file)[0].rsplit("-", maxsplit=1)
if _model_name not in models:
if include_timestamps:
models[_model_name] = {"versions": [_model_version], "timestamps": [timestamp]}
else:
models[_model_name] = [_model_version]
else:
if include_timestamps:
models[_model_name]["versions"] += [_model_version]
models[_model_name]["timestamps"] += [timestamp]
else:
models[_model_name] += [_model_version]
return models
def find_ondisk_model_ids_with_lock(lock_dir: str) -> List[str]:
"""
Returns all available model IDs from the disk.
To be used in conjunction with FileBasedModelsTreeUpdater/FileBasedModelsGC.
Can be used for Python/TensorFlow clients.
Args:
lock_dir: Path to where the resource locks are stored.
Returns:
A list with all model IDs present on disk.
"""
model_ids = []
for locked_file in get_locked_files(lock_dir):
with LockedFile(os.path.join(lock_dir, locked_file), "r", reader_lock=True) as f:
status = f.read()
if status.startswith("available"):
model_id = os.path.splitext(locked_file)[0]
model_ids.append(model_id)
return model_ids
def find_ondisk_model_info(lock_dir: str, model_name: str) -> Tuple[List[str], List[int]]:
"""
Returns all available versions/timestamps of a model from the disk.
To be used in conjunction with FileBasedModelsTreeUpdater/FileBasedModelsGC.
Can be used for Python/TensorFlow clients.
Args:
lock_dir: Path to where the resource locks are stored.
model_name: Name of the model as specified in handler:models:paths:name, _cortex_default when handler:models:path is set or the discovered model names when handler:models:dir is used.
Returns:
2-element tuple made of a list with the available versions and a list with the corresponding timestamps for each model. Empty when the model is not available.
"""
versions = []
timestamps = []
for locked_file in get_locked_files(lock_dir):
_model_name, _model_version = os.path.splitext(locked_file)[0].rsplit("-", maxsplit=1)
if _model_name != model_name:
continue
with LockedFile(os.path.join(lock_dir, locked_file), "r", reader_lock=True) as f:
status = f.read()
if not status.startswith("available"):
continue
current_upstream_ts = int(status.split(" ")[1])
timestamps.append(current_upstream_ts)
versions.append(_model_version)
return (versions, timestamps)
class TFSModelLoader(mp.Process):
"""
Monitors the S3 path(s)/dir and continuously updates the models on TFS.
The model paths are validated - the bad paths are ignored.
When a new model is found, it updates the tree, downloads it and loads it into memory - likewise when a model is removed.
"""
def __init__(
self,
interval: int,
api_spec: dict,
tfs_model_dir: str,
download_dir: str,
address: Optional[str] = None,
addresses: Optional[List[str]] = None,
temp_dir: str = "/tmp/cron",
lock_dir: str = "/run/cron",
):
"""
Args:
interval: How often to update the models tree. Measured in seconds.
api_spec: Identical copy of pkg.type.spec.api.API.
address: An address with the "host:port" format to where TFS is located.
addresses: A list of addresses with the "host:port" format to where the TFS servers are located.
tfs_model_dir: Path to where the models are stored within the TFS container.
download_dir: Path to where the models are stored.
temp_dir: Directory where models are temporarily stored.
lock_dir: Directory in which model timestamps are stored.
"""
if address and addresses:
raise ValueError("address and addresses arguments cannot be passed in at the same time")
if not address and not addresses:
raise ValueError("must pass in at least one of the two arguments: address or addresses")
mp.Process.__init__(self, daemon=True)
self._interval = interval
self._api_spec = api_spec
self._tfs_model_dir = tfs_model_dir
self._download_dir = download_dir
self._temp_dir = temp_dir
self._lock_dir = lock_dir
if address:
self._tfs_address = address
self._tfs_addresses = None
else:
self._tfs_address = None
self._tfs_addresses = addresses
self._s3_paths = []
self._spec_models = get_models_from_api_spec(self._api_spec)
self._s3_model_names = self._spec_models.get_s3_model_names()
for model_name in self._s3_model_names:
self._s3_paths.append(self._spec_models[model_name]["path"])
if (
self._api_spec["handler"]["models"] is not None
and self._api_spec["handler"]["models"]["dir"] is not None
):
self._is_dir_used = True
self._models_dir = self._api_spec["handler"]["models"]["dir"]
else:
self._is_dir_used = False
self._models_dir = None
if self._api_spec["handler"]["type"] == "tensorflow":
if self._api_spec["compute"]["inf"] > 0:
self._handler_type = TensorFlowNeuronHandlerType
else:
self._handler_type = TensorFlowHandlerType
else:
raise CortexException(
"'tensorflow' handler type is the only allowed type for this cron"
)
self._ran_once = mp.Event()
self._event_stopper = mp.Event()
self._stopped = mp.Event()
# keeps an old record of the model timestamps
self._old_ts_state = {}
def run(self):
"""
mp.Process-specific method.
"""
init_sentry(tags=get_default_tags())
if self._tfs_address:
self._client = TensorFlowServingAPI(self._tfs_address)
else:
self._client = TensorFlowServingAPIClones(self._tfs_addresses)
# wait until TFS is responsive
while not self._client.is_tfs_accessible():
self._reset_when_tfs_unresponsive()
time.sleep(1.0)
while not self._event_stopper.is_set():
success = self._update_models()
if success and not self._ran_once.is_set():
self._ran_once.set()
logger.debug(f"{self.__class__.__name__} cron heartbeat")
time.sleep(self._interval)
self._stopped.set()
def stop(self, blocking: bool = False):
"""
Trigger the process of stopping the process.
Args:
blocking: Whether to wait until the process is stopped or not.
"""
self._event_stopper.set()
if blocking:
self.join()
def join(self):
"""
Block until the process exits.
"""
while not self._stopped.is_set():
time.sleep(0.001)
def ran_once(self) -> bool:
"""
Tells whether the TFS loader loop has run at least once.
"""
return self._ran_once.is_set()
def _update_models(self) -> bool:
# get updated/validated paths/versions of the S3 models
(
model_names,
versions,
model_paths,
sub_paths,
timestamps,
bucket_names,
) = find_all_s3_models(
self._is_dir_used,
self._models_dir,
self._handler_type,
self._s3_paths,
self._s3_model_names,
)
# update models on the local disk if changes have been detected
# a model is updated if its directory tree has changed, if it's not present or if it doesn't exist on the upstream
with ThreadPoolExecutor(max_workers=5) as executor:
futures = []
for idx, (model_name, bucket_name, bucket_sub_paths) in enumerate(
zip(model_names, bucket_names, sub_paths)
):
futures += [
executor.submit(
self._refresh_model,
idx,
model_name,
model_paths[idx],
versions[model_name],
timestamps[idx],
bucket_sub_paths,
bucket_name,
)
]
[future.result() for future in futures]
# remove models that no longer appear in model_names
for model_name, model_versions in find_ondisk_models(self._download_dir).items():
if model_name in model_names:
continue
for ondisk_version in model_versions:
ondisk_model_version_path = os.path.join(
self._download_dir, model_name, ondisk_version
)
shutil.rmtree(ondisk_model_version_path)
shutil.rmtree(os.path.join(self._download_dir, model_name))
self._client.remove_models([model_name], [model_versions])
# check tfs connection
if not self._client.is_tfs_accessible():
self._reset_when_tfs_unresponsive()
return False
# remove versioned models from TFS that no longer exist on disk
tfs_model_ids = self._client.get_registered_model_ids()
ondisk_models = find_ondisk_models(self._download_dir)
ondisk_model_ids = []
for model_name, model_versions in ondisk_models.items():
for model_version in model_versions:
ondisk_model_ids.append(f"{model_name}-{model_version}")
for tfs_model_id in tfs_model_ids:
if tfs_model_id not in ondisk_model_ids:
try:
model_name, model_version = tfs_model_id.rsplit("-", maxsplit=1)
self._client.remove_single_model(model_name, model_version)
logger.info(
"model '{}' of version '{}' has been unloaded".format(
model_name, model_version
)
)
except grpc.RpcError as err:
if err.code() == grpc.StatusCode.UNAVAILABLE:
logger.warning(
"TFS server unresponsive after trying to load model '{}' of version '{}': {}".format(
model_name, model_version, str(err)
)
)
self._reset_when_tfs_unresponsive()
return False
# # update TFS models
current_ts_state = {}
for model_name, model_versions in ondisk_models.items():
try:
ts = self._update_tfs_model(
model_name, model_versions, timestamps, model_names, versions
)
except grpc.RpcError:
return False
current_ts_state = {**current_ts_state, **ts}
# save model timestamp states
for model_id, ts in current_ts_state.items():
self._old_ts_state[model_id] = ts
# remove model timestamps that no longer exist
loaded_model_ids = self._client.models.keys()
aux_ts_state = self._old_ts_state.copy()
for model_id in self._old_ts_state.keys():
if model_id not in loaded_model_ids:
del aux_ts_state[model_id]
self._old_ts_state = aux_ts_state
# save model timestamp states to disk
# could be cast to a short-lived thread
# required for printing the model stats when cortex getting
resource = os.path.join(self._lock_dir, "model_timestamps.json")
with open(resource, "w") as f:
json.dump(self._old_ts_state, f, indent=2)
# save model stats for TFS to disk
resource = os.path.join(self._lock_dir, "models_tfs.json")
with open(resource, "w") as f:
json.dump(self._client.models, f, indent=2)
return True
def _refresh_model(
self,
idx: int,
model_name: str,
model_path: str,
versions: List[str],
timestamps: List[datetime.datetime],
sub_paths: List[str],
bucket_name: str,
) -> None:
client = S3(bucket_name)
ondisk_model_path = os.path.join(self._download_dir, model_name)
for version, model_ts in zip(versions, timestamps):
# check if a model update is mandated
update_model = False
ondisk_model_version_path = os.path.join(ondisk_model_path, version)
if os.path.exists(ondisk_model_version_path):
local_paths = glob.glob(
os.path.join(ondisk_model_version_path, "**"), recursive=True
)
local_paths = util.remove_non_empty_directory_paths(local_paths)
local_paths = [
os.path.relpath(local_path, ondisk_model_version_path)
for local_path in local_paths
]
local_paths = [path for path in local_paths if not path.startswith("../")]
s3_model_version_path = os.path.join(model_path, version)
s3_paths = [
os.path.relpath(sub_path, s3_model_version_path) for sub_path in sub_paths
]
s3_paths = [path for path in s3_paths if not path.startswith("../")]
s3_paths = util.remove_non_empty_directory_paths(s3_paths)
if set(local_paths) != set(s3_paths):
update_model = True
model_id = f"{model_name}-{version}"
if self._is_this_a_newer_model_id(model_id, int(model_ts.timestamp())):
update_model = True
else:
update_model = True
if update_model:
# download to a temp directory
temp_dest = os.path.join(self._temp_dir, model_name, version)
s3_src = os.path.join(model_path, version)
client.download_dir_contents(s3_src, temp_dest)
# validate the downloaded model
model_contents = glob.glob(os.path.join(temp_dest, "**"), recursive=True)
model_contents = util.remove_non_empty_directory_paths(model_contents)
try:
validate_model_paths(model_contents, self._handler_type, temp_dest)
passed_validation = True
except CortexException:
passed_validation = False
shutil.rmtree(temp_dest)
s3_path = S3.construct_s3_path(bucket_name, model_path)
logger.debug(
f"failed validating model {model_name} of version {version} found at {s3_path} path"
)
# move the model to its destination directory
if passed_validation:
if os.path.exists(ondisk_model_version_path):
shutil.rmtree(ondisk_model_version_path)
shutil.move(temp_dest, ondisk_model_version_path)
# remove the temp model directory if it exists
model_temp_dest = os.path.join(self._temp_dir, model_name)
if os.path.exists(model_temp_dest):
os.rmdir(model_temp_dest)
# remove model versions if they are not found on the upstream
# except when the model version found on disk is 1 and the number of detected versions on the upstream is 0,
# thus indicating the 1-version on-disk model must be a model that came without a version
if os.path.exists(ondisk_model_path):
ondisk_model_versions = glob.glob(os.path.join(ondisk_model_path, "**"))
ondisk_model_versions = [
os.path.relpath(path, ondisk_model_path) for path in ondisk_model_versions
]
for ondisk_version in ondisk_model_versions:
if ondisk_version not in versions and (ondisk_version != "1" or len(versions) > 0):
ondisk_model_version_path = os.path.join(ondisk_model_path, ondisk_version)
shutil.rmtree(ondisk_model_version_path)
if len(glob.glob(os.path.join(ondisk_model_path, "**"))) == 0:
shutil.rmtree(ondisk_model_path)
# if it's a non-versioned model ModelVersion.NOT_PROVIDED
if len(versions) == 0 and len(sub_paths) > 0:
model_ts = timestamps[0]
# check if a model update is mandated
update_model = False
ondisk_model_version_path = os.path.join(ondisk_model_path, "1")
if os.path.exists(ondisk_model_version_path):
local_paths = glob.glob(
os.path.join(ondisk_model_version_path, "**"), recursive=True
)
local_paths = util.remove_non_empty_directory_paths(local_paths)
local_paths = [
os.path.relpath(local_path, ondisk_model_version_path)
for local_path in local_paths
]
local_paths = [path for path in local_paths if not path.startswith("../")]
s3_model_version_path = model_path
s3_paths = [
os.path.relpath(sub_path, s3_model_version_path) for sub_path in sub_paths
]
s3_paths = [path for path in s3_paths if not path.startswith("../")]
s3_paths = util.remove_non_empty_directory_paths(s3_paths)
# update if the paths don't match
if set(local_paths) != set(s3_paths):
update_model = True
model_id = f"{model_name}-1"
if self._is_this_a_newer_model_id(model_id, int(model_ts.timestamp())):
update_model = True
else:
update_model = True
if not update_model:
return
# download to a temp directory
temp_dest = os.path.join(self._temp_dir, model_name)
client.download_dir_contents(model_path, temp_dest)
# validate the downloaded model
model_contents = glob.glob(os.path.join(temp_dest, "**"), recursive=True)
model_contents = util.remove_non_empty_directory_paths(model_contents)
try:
validate_model_paths(model_contents, self._handler_type, temp_dest)
passed_validation = True
except CortexException:
passed_validation = False
shutil.rmtree(temp_dest)
s3_path = S3.construct_s3_path(bucket_name, model_path)
logger.debug(
f"failed validating model {model_name} of version {version} found at {s3_path} path"
)
# move the model to its destination directory
if passed_validation:
if os.path.exists(ondisk_model_version_path):
shutil.rmtree(ondisk_model_version_path)
shutil.move(temp_dest, ondisk_model_version_path)
def _update_tfs_model(
self,
model_name: str,
model_versions: List[str],
_s3_timestamps: List[List[datetime.datetime]],
_s3_model_names: List[str],
_s3_versions: Dict[str, List[str]],
) -> Optional[dict]:
"""
Compares the existing models from TFS with those present on disk.
Does the loading/unloading/reloading of models.
From the _s3_timestamps, _s3_model_names, _s3_versions params, only the fields of the respective model name are used.
"""
# to prevent overwriting mistakes
s3_timestamps = copy.deepcopy(_s3_timestamps)
s3_model_names = copy.deepcopy(_s3_model_names)
s3_versions = copy.deepcopy(_s3_versions)
current_ts_state = {}
# get the right order of model versions with respect to the model ts order
model_timestamps = s3_timestamps[s3_model_names.index(model_name)]
filtered_model_versions = []
if len(s3_versions[model_name]) == 0:
filtered_model_versions = ["1"] * len(model_timestamps)
else:
for idx in range(len(model_timestamps)):
if s3_versions[model_name][idx] in model_versions:
filtered_model_versions.append(s3_versions[model_name][idx])
for model_version, model_ts in zip(filtered_model_versions, model_timestamps):
model_ts = int(model_ts.timestamp())
# remove outdated model
model_id = f"{model_name}-{model_version}"
is_model_outdated = False
first_time_load = False
if model_id in self._old_ts_state and self._old_ts_state[model_id] != model_ts:
try:
self._client.remove_single_model(model_name, model_version)
except grpc.RpcError as err:
if err.code() == grpc.StatusCode.UNAVAILABLE:
logger.warning(
"TFS server unresponsive after trying to unload model '{}' of version '{}': {}".format(
model_name, model_version, str(err)
)
)
logger.warning("waiting for tensorflow serving")
raise
is_model_outdated = True
elif model_id not in self._old_ts_state:
first_time_load = True
if not is_model_outdated and not first_time_load:
continue
# load model
model_disk_path = os.path.join(self._tfs_model_dir, model_name)
try:
self._client.add_single_model(
model_name,
model_version,
model_disk_path,
self._determine_model_signature_key(model_name),
timeout=30.0,
)
except Exception as e:
try:
self._client.remove_single_model(model_name, model_version)
logger.warning(
"model '{}' of version '{}' couldn't be loaded: {}".format(
model_name, model_version, str(e)
)
)
except grpc.RpcError as err:
if err.code() == grpc.StatusCode.UNAVAILABLE:
logger.warning(
"TFS server unresponsive after trying to load model '{}' of version '{}': {}".format(
model_name, model_version, str(err)
)
)
self._reset_when_tfs_unresponsive()
raise
is_model_outdated = False
first_time_load = False
# save timestamp of loaded model
current_ts_state[model_id] = model_ts
if is_model_outdated:
logger.info(
"model '{}' of version '{}' has been reloaded".format(model_name, model_version)
)
elif first_time_load:
logger.info(
"model '{}' of version '{}' has been loaded".format(model_name, model_version)
)
return current_ts_state
def _is_this_a_newer_model_id(self, model_id: str, timestamp: int) -> bool:
return model_id in self._old_ts_state and self._old_ts_state[model_id] < timestamp
def _determine_model_signature_key(self, model_name: str) -> Optional[str]:
if self._models_dir:
signature_key = self._api_spec["handler"]["models"]["signature_key"]
else:
signature_key = self._spec_models[model_name]["signature_key"]
return signature_key
def _reset_when_tfs_unresponsive(self):
logger.warning("waiting for tensorflow serving")
if self._tfs_address:
self._client = TensorFlowServingAPI(self._tfs_address)
else:
self._client = TensorFlowServingAPIClones(self._tfs_addresses)
resource = os.path.join(self._lock_dir, "models_tfs.json")
with open(resource, "w") as f:
json.dump(self._client.models, f, indent=2)
class TFSAPIServingThreadUpdater(AbstractLoopingThread):
"""
When live reloading and the TensorFlow type is used, the serving container
needs to have a way of accessing the models' metadata which is generated using the TFSModelLoader cron.
This cron runs on each serving process and periodically reads the exported metadata from the TFSModelLoader cron.
This is then fed into each serving process.
"""
def __init__(
self,
interval: Union[int, float],
client: TensorFlowClient,
lock_dir: str = "/run/cron",
):
AbstractLoopingThread.__init__(self, interval, self._run_tfs)
self._client = client
self._lock_dir = lock_dir
def _run_tfs(self) -> None:
self._client.sync_models(self._lock_dir)
def find_ondisk_models(models_dir: str) -> Dict[str, List[str]]:
"""
Returns all available models from the disk.
To be used in conjunction with TFSModelLoader.
This function should never be used for determining whether a model has to be loaded or not.
Can be used for Python/TensorFlow clients.
Args:
models_dir: Path to where the models are stored.
Returns:
Dictionary with available model names and their associated versions.
{
"model-A": [177, 245, 247],
"model-B": [1],
...
}
"""
models = {}
model_names = [os.path.basename(file) for file in os.listdir(models_dir)]
for model_name in model_names:
model_versions = os.listdir(os.path.join(models_dir, model_name))
models[model_name] = model_versions
return models
class ModelsGC(AbstractLoopingThread):
"""
GC for models loaded into memory and/or stored on disk.
If the number of models exceeds the cache size, then evict the LRU models.
Also removes models that are no longer present in the model tree.
"""
def __init__(
self,
interval: int,
api_spec: dict,
models: ModelsHolder,
tree: ModelsTree,
):
"""
Args:
interval: How often to update the models tree. Measured in seconds.
api_spec: Identical copy of pkg.type.spec.api.API.
models: The object holding all models in memory / on disk.
tree: Model tree representation of the available models on the S3 upstream.
"""
AbstractLoopingThread.__init__(self, interval, self._run_gc)
self._api_spec = api_spec
self._models = models
self._tree = tree
self._spec_models = get_models_from_api_spec(self._api_spec)
# run the cron every 10 seconds
self._lock_timeout = 10.0
self._event_stopper = td.Event()
self._stopped = False
def _run_gc(self) -> None:
# are there any models to collect (aka remove) from cache
with LockedGlobalModelsGC(self._models, "r"):
collectible, _, _ = self._models.garbage_collect(dry_run=True)
if not collectible:
self._remove_stale_models()
return
# try to grab exclusive access to all models with shared access preference
# and if it works, remove excess models from cache
self._models.set_global_preference_policy("r")
with LockedGlobalModelsGC(self._models, "w", self._lock_timeout) as lg:
acquired = lg.acquired
if not acquired:
raise WithBreak
_, memory_evicted_model_ids, disk_evicted_model_ids = self._models.garbage_collect()
# otherwise, grab exclusive access to all models with exclusive access preference
# and remove excess models from cache
if not acquired:
self._models.set_global_preference_policy("w")
with LockedGlobalModelsGC(self._models, "w"):
_, memory_evicted_model_ids, disk_evicted_model_ids = self._models.garbage_collect()
self._models.set_global_preference_policy("r")
memory_evicted_models = ids_to_models(memory_evicted_model_ids)
disk_evicted_models = ids_to_models(disk_evicted_model_ids)
self._log_removed_models(memory_evicted_models, memory=True)
self._log_removed_models(disk_evicted_models, disk=True)
self._remove_stale_models()
def _remove_stale_models(self) -> None:
"""
Remove models that exist locally in-memory and on-disk that no longer appear on S3
"""
# get available upstream S3 model IDs
s3_model_names = self._tree.get_model_names()
s3_model_versions = [
self._tree.model_info(model_name)["versions"] for model_name in s3_model_names
]
s3_model_ids = []
for model_name, model_versions in zip(s3_model_names, s3_model_versions):
if len(model_versions) == 0:
continue
for model_version in model_versions:
s3_model_ids.append(f"{model_name}-{model_version}")
# get model IDs loaded into memory or on disk.
with LockedGlobalModelsGC(self._models, "r"):
present_model_ids = self._models.get_model_ids()
# remove models that don't exist in the S3 upstream
ghost_model_ids = list(set(present_model_ids) - set(s3_model_ids))
for model_id in ghost_model_ids:
model_name, model_version = model_id.rsplit("-", maxsplit=1)
with LockedModel(self._models, "w", model_name, model_version):
status, ts = self._models.has_model(model_name, model_version)
if status == "in-memory":
logger.info(
f"unloading stale model {model_name} of version {model_version} using the garbage collector"
)
self._models.unload_model(model_name, model_version)
if status in ["in-memory", "on-disk"]:
logger.info(
f"removing stale model {model_name} of version {model_version} using the garbage collector"
)
self._models.remove_model(model_name, model_version)
def _log_removed_models(
self, models: Dict[str, List[str]], memory: bool = False, disk: bool = False
) -> None:
"""
Log the removed models from disk/memory.
"""
if len(models) == 0:
return None
if len(models) > 1:
message = "models "
else:
message = "model "
for idx, (model_name, versions) in enumerate(models.items()):
message += f"{model_name} "
if len(versions) == 1:
message += f"(version {versions[0]})"
else:
message += f"(versions {','.join(versions)})"
if idx + 1 < len(models):
message += ", "
else:
if memory:
message += " removed from the memory cache using the garbage collector"
if disk:
message += " removed from the disk cache using the garbage collector"
logger.info(message)
class ModelTreeUpdater(AbstractLoopingThread):
"""
Model tree updater. Updates a local representation of all available models from the S3 upstreams.
"""
def __init__(self, interval: int, api_spec: dict, tree: ModelsTree, ondisk_models_dir: str):
"""
Args:
interval: How often to update the models tree. Measured in seconds.
api_spec: Identical copy of pkg.type.spec.api.API.
tree: Model tree representation of the available models on S3.
ondisk_models_dir: Where the models are stored on disk. Necessary when local models are used.
"""
AbstractLoopingThread.__init__(self, interval, self._update_models_tree)
self._api_spec = api_spec
self._tree = tree
self._ondisk_models_dir = ondisk_models_dir
self._s3_paths = []
self._spec_models = get_models_from_api_spec(self._api_spec)
self._s3_model_names = self._spec_models.get_s3_model_names()
for model_name in self._s3_model_names:
self._s3_paths.append(self._spec_models[model_name]["path"])
self._handler_type = handler_type_from_api_spec(self._api_spec)
if (
self._handler_type == PythonHandlerType
and self._api_spec["handler"]["multi_model_reloading"]
):
models = self._api_spec["handler"]["multi_model_reloading"]
elif self._handler_type != PythonHandlerType:
models = self._api_spec["handler"]["models"]
else:
models = None
if models is None:
raise CortexException("no specified model")
if models and models["dir"] is not None:
self._is_dir_used = True
self._models_dir = models["dir"]
else:
self._is_dir_used = False
self._models_dir = None
def _update_models_tree(self) -> None:
# get updated/validated paths/versions of the S3 models
(
model_names,
versions,
model_paths,
sub_paths,
timestamps,
bucket_names,
) = find_all_s3_models(
self._is_dir_used,
self._models_dir,
self._handler_type,
self._s3_paths,
self._s3_model_names,
)
# update model tree
self._tree.update_models(
model_names,
versions,
model_paths,
sub_paths,
timestamps,
bucket_names,
)
logger.debug(f"{self.__class__.__name__} cron heartbeat")
|
class Solution:
def characterReplacement(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
max_len, res, counter = 0, 0, collections.Counter()
for i, ch in enumerate(s):
counter[ch] += 1
max_len = max(max_len, counter[ch])
if res - max_len < k: res += 1
else: counter[s[i - res]] -= 1
return res |
import os,sys,re,subprocess
from glob import glob;
dir = sys.argv[1];
for result_dir in glob(dir + '/skip_*'):
extractRunNum = lambda f: re.sub('.*homog_(.+)\.(txt|msh)$','\\1', f)
# Get the mesh stats from the deg1 mesh (mesh_convert doesn't currently support deg 2 meshes).
meshStats = {}
for m in glob(result_dir + '/deg_1/homog_*.msh'):
out = subprocess.check_output([os.environ['MeshFEM'] + '/mesh_convert', '-i', m])
meshStats[extractRunNum(m)] = map(lambda s: s.split("\t")[1], out.strip().split("\n")[-3:])
for deg in [1,2]:
outTablePath = result_dir + ('/deg_%i.txt' % deg)
outTable = open(outTablePath, 'w')
homogOutputs = glob(result_dir + ('/deg_%i/' % deg) + 'homog_*.txt')
for hout in homogOutputs:
runNum = extractRunNum(hout)
cornerAngle = None;
moduli = [0, 0, 0, 0]
for line in open(hout, 'r'):
m = re.search('corner angle:\s(\S+)', line)
if (m): cornerAngle = float(m.group(1))
m = re.search('Young moduli:\s(\S+)\s(\S+)', line)
if (m): moduli[0:2] = map(float, m.groups())
m = re.search('v_yx, v_xy:\s(\S+)\s(\S+)', line)
if (m): moduli[2] = float(m.group(1))
m = re.search('shear modul.*:\s(\S+)', line)
if (m): moduli[3] = float(m.group(1))
# sample the max max stresses for the fluctuation fields
# also sample the fluctuation displacements at (0.5, 0.5), (0.38, 0.38)
cmd = [os.environ['MeshFEM'] + '/tools/msh_processor', re.sub('.txt$', '.msh', hout)]
for ij in range(3): cmd += ['-e', 'strain w_ij %i' % ij, '--elementAverage', '--eigenvalues', '--max', '--max']
for ij in range(3): cmd += ['-e', 'w_ij %i' % ij, '--sample', '0.50,0.50']
for ij in range(3): cmd += ['-e', 'w_ij %i' % ij, '--sample', '0.38,0.38']
cmd += ['--reverse', '--applyAll', '--print']
sampledStats = subprocess.check_output(cmd)
# mesh_num corner_angle minEdgeLength medianEdgeLength maxEdgeLength Ex Ey nu_yx mu_xy max_max_strain_0 max_max_strain_1 max_max_strain_1 w_ij_0_sample0.5_x w_ij_0_sample0.5_y ...
outTable.write("%s\t%f\t" % (runNum, cornerAngle))
outTable.write("\t".join(meshStats[runNum]) + "\t")
outTable.write("\t".join(map(str, moduli)) + '\t')
outTable.write("\t".join(sampledStats.strip().split("\n")) + "\n");
outTable.close()
print outTablePath
|
from markdown.util import etree
from markdown.extensions import Extension
from markdown.blockprocessors import BlockProcessor
from markdown.inlinepatterns import Pattern
from markdown.preprocessors import Preprocessor
import subprocess
import re
BLOCK_RE = re.compile(r'''(?:^|\n)<\ ?(?P<class>[\w\-]+)
(?P<title>(?:\ "(.*?)"))?>*\n(?P<content>.*?)
(?<=\n)?(?:^|\n)<\/ ?(?P=class)?>''',
re.MULTILINE | re.DOTALL | re.VERBOSE)
OPEN_RE = re.compile(r'(?:^|\n)<\ ?([\w\-]+)(?:\ "(.*?)")?>')
GUIDE_RE = re.compile(r'(?:^|\n)<guide>')
CLOSE_RE = re.compile(r'(?:^|\n)<\/ ?([\w\-]+)>')
class HighlightBlocksExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
md.parser.blockprocessors.add('highlightblocks', HighlightBlocksProcessor(md.parser), '_begin')
class HighlightBlocksProcessor(BlockProcessor):
def test(self, parent, block):
sibling = self.lastChild(parent)
print GUIDE_RE.search(block)
return GUIDE_RE.search(block)
def run(self, parent, blocks):
print ''
print 'running'
print ''
sibling = self.lastChild(parent)
block = blocks.pop(0)
m = GUIDE_RE.search(block)
print m
if m:
block = block[m.end() + 1:] # removes the first line
div = etree.SubElement(parent, 'div')
div.set('class', '%s' % "powpow")
else:
div = sibling
self.parser.parseChunk(div, block)
def makeExtension(*args, **kwargs) :
return HighlightBlocksExtension(*args, **kwargs) |
from django.conf.urls import url
from .views import SearchView
urlpatterns = [
url(r'^$', SearchView.as_view(), name='default'),
] |
import os
from unittest.mock import patch
from flexlate import branch_update
from flexlate.add_mode import AddMode
from flexlate.config import FlexlateConfig
from flexlate.exc import (
CannotRemoveTemplateSourceException,
CannotRemoveAppliedTemplateException,
)
from flexlate.ext_git import repo_has_merge_conflicts
from flexlate.remover import Remover
from flexlate.transactions.transaction import FlexlateTransaction
from tests.config import (
COOKIECUTTER_ONE_NAME,
COOKIECUTTER_TWO_NAME,
COOKIECUTTER_REMOTE_NAME,
)
from tests.fixtures.templated_repo import *
from tests.fixtures.transaction import (
remove_source_transaction,
remove_output_transaction,
add_source_transaction,
)
from tests.gitutils import accept_theirs_in_merge_conflict
def test_remove_template_source(
repo_with_cookiecutter_one_template_source: Repo,
remove_source_transaction: FlexlateTransaction,
):
repo = repo_with_cookiecutter_one_template_source
remover = Remover()
config_path = GENERATED_REPO_DIR / "flexlate.json"
with change_directory_to(GENERATED_REPO_DIR):
assert config_path.exists()
remover.remove_template_source(
repo, COOKIECUTTER_ONE_NAME, remove_source_transaction
)
assert not config_path.exists()
def test_remove_template_source_when_multiple_exist(
repo_with_cookiecutter_one_template_source: Repo,
cookiecutter_two_template: CookiecutterTemplate,
remove_source_transaction: FlexlateTransaction,
add_source_transaction: FlexlateTransaction,
):
repo = repo_with_cookiecutter_one_template_source
remover = Remover()
adder = Adder()
config_path = GENERATED_REPO_DIR / "flexlate.json"
with change_directory_to(GENERATED_REPO_DIR):
adder.add_template_source(
repo, cookiecutter_two_template, add_source_transaction
)
remover.remove_template_source(
repo, COOKIECUTTER_ONE_NAME, remove_source_transaction
)
config = FlexlateConfig.load(config_path)
assert len(config.applied_templates) == 0
assert len(config.template_sources) == 1
template_source = config.template_sources[0]
assert template_source.name == COOKIECUTTER_TWO_NAME
def test_remove_template_source_when_outputs_from_another_source_exist(
repo_with_cookiecutter_one_template_source_and_output: Repo,
cookiecutter_two_template: CookiecutterTemplate,
remove_source_transaction: FlexlateTransaction,
add_source_transaction: FlexlateTransaction,
):
repo = repo_with_cookiecutter_one_template_source_and_output
remover = Remover()
adder = Adder()
config_path = GENERATED_REPO_DIR / "flexlate.json"
with change_directory_to(GENERATED_REPO_DIR):
adder.add_template_source(
repo, cookiecutter_two_template, add_source_transaction
)
remover.remove_template_source(
repo, COOKIECUTTER_TWO_NAME, remove_source_transaction
)
# Check source successfully removed
config = FlexlateConfig.load(config_path)
assert len(config.applied_templates) == 0
assert len(config.template_sources) == 1
template_source = config.template_sources[0]
assert template_source.name == COOKIECUTTER_ONE_NAME
# Check no side effects
unrelated_config_path = GENERATED_REPO_DIR / "b" / "flexlate.json"
config = FlexlateConfig.load(unrelated_config_path)
assert len(config.template_sources) == 0
assert len(config.applied_templates) == 1
at = config.applied_templates[0]
assert at.add_mode == AddMode.LOCAL
assert at.version == COOKIECUTTER_ONE_VERSION
assert at.data == {"a": "b", "c": ""}
assert at.name == "one"
assert at.root == Path("..")
def test_remove_non_existing_template_source(
repo_with_placeholder_committed: Repo,
remove_source_transaction: FlexlateTransaction,
):
repo = repo_with_placeholder_committed
remover = Remover()
with change_directory_to(GENERATED_REPO_DIR):
with pytest.raises(CannotRemoveTemplateSourceException) as exc_info:
remover.remove_template_source(
repo, COOKIECUTTER_ONE_NAME, remove_source_transaction
)
assert "Cannot find any template source" in str(exc_info.value)
def test_remove_template_source_when_output_exists(
repo_with_cookiecutter_one_template_source_and_output: Repo,
remove_source_transaction: FlexlateTransaction,
):
repo = repo_with_cookiecutter_one_template_source_and_output
remover = Remover()
with change_directory_to(GENERATED_REPO_DIR):
with pytest.raises(CannotRemoveTemplateSourceException) as exc_info:
remover.remove_template_source(
repo, COOKIECUTTER_ONE_NAME, remove_source_transaction
)
assert "has existing outputs" in str(exc_info.value)
def test_remove_template_source_with_merge_conflict_resolution(
repo_with_cookiecutter_remote_version_one_template_source_that_will_have_merge_conflict_on_flexlate_operation: Repo,
remove_source_transaction: FlexlateTransaction,
):
repo = repo_with_cookiecutter_remote_version_one_template_source_that_will_have_merge_conflict_on_flexlate_operation
remover = Remover()
config_path = GENERATED_REPO_DIR / "flexlate.json"
def _resolve_conflicts_then_type_yes(prompt: str) -> bool:
assert repo_has_merge_conflicts(repo)
os.remove(config_path)
stage_and_commit_all(repo, "Manually resolve conflicts")
return True
with change_directory_to(GENERATED_REPO_DIR):
assert config_path.exists()
with patch.object(
branch_update, "confirm_user", _resolve_conflicts_then_type_yes
):
remover.remove_template_source(
repo, COOKIECUTTER_REMOTE_NAME, remove_source_transaction
)
assert not config_path.exists()
def test_remove_applied_template(
repo_with_template_branch_from_cookiecutter_one: Repo,
remove_output_transaction: FlexlateTransaction,
):
repo = repo_with_template_branch_from_cookiecutter_one
remover = Remover()
config_path = GENERATED_REPO_DIR / "flexlate.json"
output_path = GENERATED_REPO_DIR / "b" / "text.txt"
with change_directory_to(GENERATED_REPO_DIR):
assert output_path.read_text() == "b"
remover.remove_applied_template_and_output(
repo, COOKIECUTTER_ONE_NAME, remove_output_transaction
)
assert not output_path.exists()
config = FlexlateConfig.load(config_path)
assert len(config.template_sources) == 1
assert len(config.applied_templates) == 0
def test_remove_applied_template_that_does_not_exist(
repo_with_cookiecutter_one_template_source: Repo,
remove_output_transaction: FlexlateTransaction,
):
repo = repo_with_cookiecutter_one_template_source
remover = Remover()
with change_directory_to(GENERATED_REPO_DIR):
with pytest.raises(CannotRemoveAppliedTemplateException) as exc_info:
remover.remove_applied_template_and_output(
repo, COOKIECUTTER_ONE_NAME, remove_output_transaction
)
assert "Cannot find any applied template with name" in str(exc_info.value)
def test_remove_applied_template_when_multiple_exist(
repo_with_cookiecutter_one_template_source_and_output: Repo,
remove_output_transaction: FlexlateTransaction,
add_output_transaction: FlexlateTransaction,
cookiecutter_one_template: CookiecutterTemplate,
):
repo = repo_with_cookiecutter_one_template_source_and_output
remover = Remover()
adder = Adder()
subdir = GENERATED_REPO_DIR / "subdir"
subdir.mkdir()
config_path = subdir / "b" / "flexlate.json"
output_path = subdir / "b" / "text.txt"
with change_directory_to(subdir):
adder.apply_template_and_add(
repo, cookiecutter_one_template, add_output_transaction, no_input=True
)
assert output_path.read_text() == "b"
assert config_path.exists()
remover.remove_applied_template_and_output(
repo, COOKIECUTTER_ONE_NAME, remove_output_transaction
)
# Ensure that remove went correctly
assert not output_path.exists()
assert not config_path.exists()
# Ensure no side effects
unrelated_config_path = GENERATED_REPO_DIR / "b" / "flexlate.json"
config = FlexlateConfig.load(unrelated_config_path)
assert len(config.template_sources) == 0
assert len(config.applied_templates) == 1
at = config.applied_templates[0]
assert at.add_mode == AddMode.LOCAL
assert at.version == COOKIECUTTER_ONE_VERSION
assert at.data == {"a": "b", "c": ""}
assert at.name == "one"
assert at.root == Path("..")
def test_remove_applied_template_with_merge_conflict_resolution(
repo_with_cookiecutter_remote_version_one_template_source_and_output_that_will_have_merge_conflict_on_flexlate_operation: Repo,
remove_output_transaction: FlexlateTransaction,
):
repo = repo_with_cookiecutter_remote_version_one_template_source_and_output_that_will_have_merge_conflict_on_flexlate_operation
remover = Remover()
config_path = GENERATED_REPO_DIR / "flexlate.json"
output_path = GENERATED_REPO_DIR / "abc" / "abc.txt"
def _resolve_conflicts_then_type_yes(prompt: str) -> bool:
assert repo_has_merge_conflicts(repo)
accept_theirs_in_merge_conflict(repo)
stage_and_commit_all(repo, "Manually resolve conflicts")
return True
with change_directory_to(GENERATED_REPO_DIR):
assert output_path.read_text() == "some new header\nvalue"
with patch.object(
branch_update, "confirm_user", _resolve_conflicts_then_type_yes
):
remover.remove_applied_template_and_output(
repo,
COOKIECUTTER_REMOTE_NAME,
remove_output_transaction,
add_mode=AddMode.PROJECT,
)
assert not output_path.exists()
config = FlexlateConfig.load(config_path)
assert len(config.template_sources) == 1
assert len(config.applied_templates) == 0
|
import subprocess
import tempfile
from graphviz import Digraph
from suls.mealymachine import MealyMachine
import re
from util.dotloader import load_mealy_dot
distsetpath = "/home/tom/projects/lstar/util/distset"
def _render(fsm: MealyMachine, filename):
g = Digraph('G', filename=filename)
g.attr(rankdir='LR')
states = fsm.get_states()
# Add states
for state in states:
g.node(state.name)
# Add transitions:
for state in states:
for action, (other_state, output) in state.edges.items():
g.edge(state.name, other_state.name, label=f'{action}/{output}')
g.save()
def _check_distinguishing_set(fsm, dset):
outputs = _get_dset_outputs(fsm, dset)
if len(set(outputs.values())) < len(outputs):
print("Dset Not unique!")
return False
else:
print('Dset succes!', len(outputs), 'states,', len(set(outputs)), 'unique outputs')
return True
def _get_dset_outputs(fsm, dset):
states = fsm.get_states()
outputs = {}
for state in states:
mm = MealyMachine(state)
out = []
for dseq in dset:
out.append(mm.process_input(dseq))
mm.reset()
outputs[state] = tuple(out.copy())
return outputs
def get_distinguishing_set(fsm: MealyMachine, check=True):
path = tempfile.mktemp(".gv")
_render(fsm, path)
dset = _run_distset(path)
if check:
_check_distinguishing_set(fsm, dset)
return dset
def _run_distset(path_to_dot):
cases = {
"State": {},
"Output": {},
"Input": {}
}
suffixes = []
result = subprocess.run([distsetpath, '-path', path_to_dot], capture_output=True)
for line in result.stdout.decode().split('\n'):
if re.match("State|Output|Input .*", line):
case, original, id = line.split(' ')
cases[case][id] = original
if re.match("Suffix .*", line):
suffix = []
line = line.replace("Suffix ", "")
for a in line.strip().split(" "):
if len(a) > 0:
suffix.append(cases["Input"][a])
suffixes.append(tuple(suffix))
return suffixes
if __name__ == "__main__":
path = "/home/tom/projects/lstar/rers/industrial/m132.dot"
fsm = load_mealy_dot(path)
get_distinguishing_set(fsm, check=True)
|
# Convex hull of a random set of points:
import numpy as np
import matplotlib.pyplot as plt
# from scipy.spatial import ConvexHull
FILE_NAME = "layer1"
def get_degree_all(x_arr, y_arr):
return np.arctan2(y_arr, x_arr) * 180 / np.pi
def ccw(p1, p2, p3):
x = [p1[0], p2[0], p3[0]]
y = [p1[1], p2[1], p3[1]]
S = ((x[1] - x[0]) * (y[2] - y[0])) - ((y[1] - y[0]) * (x[2] - x[0]))
if S > 0:
return 1 # counter clock-wise
elif S < 0:
return -1 # clock-wise
else:
return 0 # same
def MyConvexHull(_points):
convex_points = np.copy(_points)
y_min_idx = 0
for idx in range(1, np.alen(convex_points)):
if convex_points[idx, 1] < convex_points[y_min_idx, 1]:
y_min_idx = idx
elif convex_points[idx, 1] == convex_points[y_min_idx, 1]:
if convex_points[idx, 0] < convex_points[idx, 0]:
y_min_idx = idx
for idx in range(np.alen(convex_points)):
if idx != y_min_idx:
convex_points[idx, 0] -= convex_points[y_min_idx, 0]
convex_points[idx, 1] -= convex_points[y_min_idx, 1]
convex_points[y_min_idx] = np.array([0, 0])
degrees = get_degree_all(convex_points[:, 0], convex_points[:, 1])
for idx, degree in enumerate(degrees[1:]):
degrees[idx + 1] = degree
point_dict = {}
for idx in range(1, np.alen(convex_points)):
if degrees[idx] in point_dict.keys():
x1, y1 = convex_points[idx]
x2, y2 = point_dict[degrees[idx]][1]
if x1 ** 2 + y1 ** 2 > x2 ** 2 + y2 ** 2:
point_dict[degrees[idx]] = (idx, convex_points[idx])
else:
point_dict[degrees[idx]] = (idx, np.copy(convex_points[idx]))
point_list = [[degrees[0], 0, np.copy(convex_points[0])]]
for degree in point_dict:
point_list.append([degree, point_dict[degree][0], point_dict[degree][1]])
point_list.sort()
sorted_list = []
for idx in range(np.alen(point_list)):
sorted_list.append([point_list[idx][1], point_list[idx][2]])
stack = [sorted_list[0], sorted_list[1]]
idx = 2
while idx < np.alen(sorted_list):
if ccw(stack[-2][1], stack[-1][1], sorted_list[idx][1]) > 0:
stack.append(sorted_list[idx])
idx += 1
else:
stack.pop()
result = np.array([], dtype=int)
for idx in range(len(stack) - 1):
result = np.append(result, np.array([stack[idx][0], stack[idx + 1][0]]))
result = np.append(result, np.array([stack[len(stack) - 1][0], stack[0][0]]))
result = result.reshape(int(np.alen(result) / 2), 2)
return result
idx_list = list()
points = np.array([], dtype=int)
inp_file = open("{}.txt".format(FILE_NAME), "r")
N = int(inp_file.readline())
for i in range(N):
line = inp_file.readline()
points = np.append(points, np.array([int(line.split()[0]), int(line.split()[1])]))
idx_list.append(i)
inp_file.close()
points = points.reshape(int(np.alen(points) / 2), 2)
out_file = open("{}_out.txt".format(FILE_NAME), "w")
while True:
if np.alen(points) < 3:
break
# hull = ConvexHull(points)
hull = MyConvexHull(points)
plt.plot(points[:, 0], points[:, 1], 'o')
last_simplex = None
point_set = set()
location_set = set()
# hull = hull.simplices
for simplex in hull: # left src node 번호, right tgt node 번호
plt.plot(points[simplex, 0], points[simplex, 1], 'r--', alpha=0.6)
point_set.add(simplex[0])
point_set.add(simplex[1])
remove_set = set()
new_list = []
for simplex in hull:
new_list.append(simplex[0])
for idx in range(np.alen(points)):
if idx != simplex[0] and idx != simplex[1]:
if ccw(points[simplex[0]], points[idx], points[simplex[1]]) == 0:
remove_set.add(idx)
new_list.append(idx)
i = new_list.index(min(new_list))
for _ in range(len(new_list)):
out_file.write("{} ".format(idx_list[new_list[(i % len(new_list))]]))
i += 1
out_file.write("\n")
next_point = np.array([], dtype=int)
import pprint
for i in range(np.alen(points)):
if np.array_equal(points[i], np.array([0, 0])):
next_point = np.append(next_point, np.array(points[i]))
elif not (i in point_set) and not (i in remove_set):
next_point = np.append(next_point, np.array(points[i]))
else:
idx_list[i] = -1
next_point = next_point.reshape(int(np.alen(next_point) / 2), 2)
points = np.copy(next_point)
new_list = []
for value in idx_list:
if value != -1:
new_list.append(value)
idx_list = new_list
plt.show()
out_file.close()
|
from django.contrib import admin
from django.urls import path,include
from game import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.homepage, name='homepage'),
path('loginRegisterPage/', views.loginRegisterPage, name='loginRegisterPage'),
path('mytodos/', views.mytodos, name='mytodos')
]
|
from temboo.Library._23andMe.Ancestry import Ancestry, AncestryInputSet, AncestryResultSet, AncestryChoreographyExecution
from temboo.Library._23andMe.Genomes import Genomes, GenomesInputSet, GenomesResultSet, GenomesChoreographyExecution
from temboo.Library._23andMe.Genotype import Genotype, GenotypeInputSet, GenotypeResultSet, GenotypeChoreographyExecution
from temboo.Library._23andMe.Haplogroups import Haplogroups, HaplogroupsInputSet, HaplogroupsResultSet, HaplogroupsChoreographyExecution
from temboo.Library._23andMe.Names import Names, NamesInputSet, NamesResultSet, NamesChoreographyExecution
from temboo.Library._23andMe.User import User, UserInputSet, UserResultSet, UserChoreographyExecution
|
# -*- coding: utf-8 -*-
from sys import argv, exit
from src import raflib
def help():
print "Usage: -u or --unpack: Unpacks the content of RAF (Riot Archive File).\n" \
"Example: python rafpy.py -u Archive.raf Archive.raf.dat"
if __name__ == "__main__":
print "RAFPy 1"
if len(argv) == 4:
if argv[1] == "-u" or argv[1] == "--unpack":
raflib.RAFLib(argv[2], argv[3]).unpack_content()
exit()
else:
help()
exit()
else:
help()
exit() |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 Oleksandr Moskalenko <om@rc.ufl.edu>
#
# Distributed under terms of the MIT license.
"""
Main module for the TranD package. Define a CLI and drive execution of all analyses.
"""
import argparse
import logging
import re
import os
import sys
from loguru import logger
from pathlib import Path
from pybedtools import cleanup
from trand.io import prepare_outdir
from trand.event_analysis import process_single_file
from trand.event_analysis import process_two_files
# CONFIGURATION
# Output file selections
# One file:
# common, gene OR pairwise
# Two files:
# common, two_gtfs, pairwise
common_outfiles = {
"ea_fh": "event_analysis.csv",
"jc_fh": "junction_catalog.csv",
"er_fh": "event_analysis_er.csv",
"ef_fh": "event_analysis_ef.csv",
"ir_fh": "ir_transcripts.csv",
"ue_fh": "uniq_exons_per_gene.csv",
}
pairwise_outfiles = {
"td_fh": "pairwise_transcript_distance.csv",
}
gene_outfiles = {
"er_fh": "event_analysis_er.csv",
"ef_fh": "event_analysis_ef.csv",
"ir_fh": "ir_transcripts.csv",
"ue_fh": "uniq_exons_per_gene.csv",
}
two_gtfs_outfiles = {
"gtf1_fh": "gtf1_only.gtf",
"gtf2_fh": "gtf2_only.gtf",
"md_fh": "minimum_pairwise_transcript_distance.csv",
}
consol_outfiles = {
"key_fh": "transcript_id_2_consolidation_id.csv",
"consol_gtf_fh": "consolidated_transcriptome.gtf",
}
def parse_args(print_help=False):
"""Parse command-line arguments"""
class MyParser(argparse.ArgumentParser):
"""Subclass ArgumentParser for better help printing"""
def error(self, message):
sys.stderr.write("error: %s\n" % message)
self.print_help()
sys.exit(2)
parser = MyParser(
description="Perform transcript distance, complexity and "
"transcriptome comparison analyses."
)
parser.add_argument(
dest="infiles",
metavar="input_file",
type=str,
nargs="+",
help="One or two input GTF file(s).",
)
parser.add_argument(
"-o",
"--outdir",
action="store",
type=str,
required=False,
help="Output directory, created if missing. Default: current directory.",
)
parser.add_argument(
"-l",
"--logfile",
dest="log_file",
action="store",
type=str,
default=None,
required=False,
help="Log file name for logging processing events to file.",
)
parser.add_argument(
"--consolidate",
dest="consolidate",
action="store_true",
help="""Used with 1 GTF input file. Consolidate transcripts remove 5'/3' transcript end
variation in redundantly spliced transcripts) with identical junctions prior to complexity
calculations, events and summary plotting. Default: No consolidation""",
)
parser.add_argument(
"--consolPrefix",
dest="consol_prefix",
type=str,
default="tr",
help="""Used with 1 GTF input file. Requires '--consolidate' flag. Specify the prefix to use
for consolidated transcript_id values. Prefix must be alphanumeric with no spaces.
Underscore (\"_\") is the only allowed special character. Default: 'tr'""",
)
parser.add_argument(
"-c",
"--complexityOnly",
dest="complexity_only",
action="store_true",
help="""Used with 1 or 2 GTF input file(s). Output only complexity measures. If used in
presence of the '--consolidate' flag, complexity is calculated on the consolidated GTF(s).
Default: Perform all analyses and comparisons including complexity calculations""",
)
parser.add_argument(
"-e",
"--ea",
dest="ea_mode",
type=str,
choices=["pairwise", "gene"],
default="pairwise",
help="""Specify type of within gene transcript comparison: pairwise - Used with 1 or 2 GTF
input files. Compare pairs of transcripts within a gene. gene - Used iwth 1 GTF input file.
Compare all transcripts within a gene Default: pairwise""",
)
parser.add_argument(
"-k",
"--keepir",
dest="keep_ir",
action="store_true",
help="""Keep transcripts with Intron Retention(s) when generating transcript events. Only
used with 1 GTF input file. Default: remove""",
)
parser.add_argument(
"-p",
"--pairs",
type=str,
choices=["all", "both", "first", "second"],
dest="out_pairs",
default="both",
help="""Used with 2 GTF input files. The TranD metrics can be for all transcript pairs in
both GTF files or for a subset of transcript pairs using the following options: both -
Trand metrics for the minimum pairs in both GTF files, first - TranD metrics for the
minimum pairs in the first GTF file, second - TranD metrics for the minimum pairs in the
second GTF file all - TranD metrics for all transcript pairs in both GTF files Default:
both""",
)
parser.add_argument(
"-1",
"--name1",
dest="name1",
default="d1",
required=False,
help="""Used with 2 GTF input files. User-specified name to be used for labeling output
files related to the first GTF file. Name must be alphanumeric, can only include \"_\"
special character and not contain any spaces. Default: d1""",
)
parser.add_argument(
"-2",
"--name2",
dest="name2",
default="d2",
required=False,
help="""Used with 2 GTF input files. User-specified name to be used for labeling output
files related to the second GTF file. Name must be alphanumeric, can only include \"_\"
special character and not contain any spaces. Default: d2""",
)
parser.add_argument(
"-n",
"--cpus",
dest="cpu_cores",
type=int,
default=1,
required=False,
help="Number of CPU cores to use for parallelization. Default: 1",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="Force overwrite existing output directory and files within.",
)
parser.add_argument(
"-s",
"--skip-plots",
dest="skip_plots",
action="store_true",
help="Skip generation of all plots.",
)
parser.add_argument(
"-i",
"--skip-intermediate",
dest="skip_interm",
action="store_true",
help="Skip intermediate file output (junction and exon region/fragment files).",
)
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument("-d", "--debug", action="store_true", help=argparse.SUPPRESS)
if print_help:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
if len(args.infiles) > 2:
print("\nToo many input files - pass one or two GTF/GFF files as input.\n")
parser.print_help()
sys.exit(2)
if args.ea_mode == "gene":
if len(args.infiles) > 1:
logger.warning(
"EA 'gene' mode is ignored for two GTF files - only pairwise is done."
)
regex = re.compile(r'^\w+$', re.ASCII)
# Validate prefixes
if not regex.match(args.consol_prefix):
logger.error("Invalid prefix format for consolidated transcript_id values: "
"Must be alphanumeric."
"Only '_' (underscore) special character is allowed"
)
parser.print_help()
sys.exit(2)
if not regex.match(args.name1):
logger.error(
"Invalid name for dataset 1: Must be alphanumeric and can only "
"include '_' special character"
)
parser.print_help()
sys.exit(2)
if not regex.match(args.name2):
logger.error(
"Invalid name for dataset 2: Must be alphanumeric and can only "
"include '_' special character"
)
parser.print_help()
sys.exit(2)
# Multiprocessing checks
if args.cpu_cores < 1:
logger.error(
"Invalid value for the number of CPU cores. Must be 1 or greater."
)
parser.print_help()
sys.exit(2)
# os.sched_getaffinity is accurate and linux HPC specific. os.cpu_count = total system cores.
try:
avail_cores = len(os.sched_getaffinity(0))
except AttributeError:
avail_cores = os.cpu_count()
if args.cpu_cores > avail_cores:
# Does this have to be an error since it leads to lowered performance?
logger.warning("Requested CPU cores exceed the number of available cores!")
return args
def setup_logging(debug, verbose, logfile):
"""Set the correct logging level and sinks."""
logger.remove()
if verbose:
level = logging.INFO
else:
level = logging.WARN
if debug:
level = logging.DEBUG
logger.add(sys.stderr, level=level)
logger.debug("Debugging output enabled")
if logfile:
logger.debug("Logging to {}", logfile)
else:
logger.add(sys.stderr, level=level)
if logfile:
logger.info("Logging to {}", logfile)
if logfile:
logger.add(logfile, level=level)
logger.debug("Logging level set to : {}", level)
def cli():
"""CLI interface for the 'trand' executable"""
args = parse_args()
setup_logging(args.debug, args.verbose, args.log_file)
logger.debug("Args: {}", args)
if not args.outdir:
args.outdir = str(Path.cwd())
args.force = True
prepare_outdir(args.outdir, args.force)
if len(args.infiles) == 1:
logger.debug("Single file {} analysis", args.ea_mode)
outfiles = common_outfiles
if args.ea_mode == "pairwise":
outfiles.update(pairwise_outfiles)
else:
outfiles.update(gene_outfiles)
try:
process_single_file(
args.infiles[0],
args.ea_mode,
args.keep_ir,
args.outdir,
outfiles,
args.cpu_cores,
args.complexity_only,
args.skip_plots,
args.skip_interm,
args.consolidate,
args.consol_prefix,
consol_outfiles
)
finally:
# Only for bedtools. Remove when bedtools are refactored out.
cleanup()
else:
logger.debug("Two files pairwise analysis")
outfiles = common_outfiles
outfiles.update(two_gtfs_outfiles)
outfiles.update(pairwise_outfiles)
try:
process_two_files(
args.infiles,
args.outdir,
outfiles,
args.cpu_cores,
args.out_pairs,
args.complexity_only,
args.skip_plots,
args.skip_interm,
args.name1,
args.name2,
)
finally:
# Only for bedtools. Remove when bedtools are refactored out.
cleanup()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-04-02 18:10:31
# @Author : guanglinzhou (xdzgl812@163.com)
# @Link : https://github.com/GuanglinZhou
# @Version : $Id$
import numpy as np
import pandas as pd
# inputfile = open('/Users/guanglinzhou/Documents/gpsDataProject/processDataFromJin/export.sql', 'r')
# outputfile = open('data.txt', 'w')
# i = 0
# for s in inputfile:
# if (s.find('values') != -1):
# index = s.index('values')
# outputfile.write(s[index + 8:-3])
# outputfile.write('\n')
# i += 1
# if (i % 100000 == 0):
# print(i)
# # print(s)
# inputfile.close()
# outputfile.close()
# 判断经纬度是否在路网范围内
def checkLatLongInRange(lat, lon):
if (float(lat) < 31.283233 or float(lat) > 32.3576973):
return False
if (float(lon) < 116.8899771 or float(lon) > 117.8087044):
return False
return True
with open('gpsDataFromJin.csv', 'w') as csvFile:
csvFile.write(
'Latitude,' + 'Longitude,' + 'Address')
with open('data.txt', 'r') as file:
line_num = 0
for line in file:
# 从前往后按','分割读经纬度,从后往前查看'\''读路名,因为在START_ADDRESS_DESC和END_ADDRESS_DESC这两个字段可能包含不止一个','
# 完全按照','split会出错
infoList = line.split(',')
start_lt = infoList[9]
start_lg = infoList[11]
end_lt = infoList[10]
end_lg = infoList[12]
quoNum = 0
index3 = 0
index4 = 0
index7 = 0
index8 = 0
for i in reversed(range(len(line))):
if (line[i] == '\''):
quoNum += 1
if (quoNum == 3):
index3 = i
if (quoNum == 4):
index4 = i
if (quoNum == 7):
index7 = i
if (quoNum == 8):
index8 = i
start_address = line[index8 + 1:index7]
end_address = line[index4 + 1:index3]
line_num += 1
if (line_num % 1000 == 0):
# break
print(line_num)
if (checkLatLongInRange(start_lt,
start_lg) and start_address != 'null' and ',' not in start_address and '安徽省合肥市' in start_address):
csvFile.write('\n')
csvFile.write('{},{},{}'.format(start_lt, start_lg, start_address))
if (checkLatLongInRange(end_lt,
end_lg) and end_address != 'null' and ',' not in end_address and '安徽省合肥市' in end_address):
csvFile.write('\n')
csvFile.write('{},{},{}'.format(end_lt, end_lg, end_address))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.images.blocks
import wagtail.core.fields
import wagtail.core.blocks
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
('home', '0002_create_homepage'),
]
operations = [
migrations.CreateModel(
name='CustomPage',
fields=[
('page_ptr', models.OneToOneField(serialize=False, primary_key=True, auto_created=True, parent_link=True, to='wagtailcore.Page', on_delete=django.db.models.deletion.SET_NULL)),
('author', models.CharField(max_length=255)),
('date', models.DateField(verbose_name='Post date')),
('body', wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='LandingPage',
fields=[
('page_ptr', models.OneToOneField(serialize=False, primary_key=True, auto_created=True, parent_link=True, to='wagtailcore.Page', on_delete=django.db.models.deletion.SET_NULL)),
('body', wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
migrations.AddField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())), default=None),
preserve_default=False,
),
]
|
from __future__ import annotations
from typing import Any
from wolfbot import const
from wolfbot.enums import Role, SwitchPriority, lru_cache
from wolfbot.game_utils import get_player, swap_characters
from wolfbot.log import logger
from wolfbot.roles.player import Player
from wolfbot.statements import Statement
class Robber(Player):
"""Robber Player class."""
def __init__(self, player_index: int, choice_ind: int, new_role: Role):
super().__init__(player_index)
self.choice_ind = choice_ind
self.new_role = new_role
self.statements += self.get_robber_statements(
player_index, choice_ind, new_role
)
@classmethod
def awake_init(cls, player_index: int, game_roles: list[Role]) -> Robber:
"""Initializes Robber - switches roles with another player."""
is_user = const.IS_USER[player_index]
choice_ind = get_player(is_user, (player_index,))
choice_char = game_roles[choice_ind]
logger.debug(
f"[Hidden] Robber switches with Player {choice_ind} "
f"and becomes a {choice_char}."
)
if is_user:
logger.info(
f"You switched with Player {choice_ind} and are now a {choice_char}!",
cache=True,
)
swap_characters(game_roles, player_index, choice_ind)
return cls(player_index, choice_ind, choice_char)
@staticmethod
@lru_cache
def get_robber_statements(
player_index: int, choice_ind: int, choice_char: Role
) -> tuple[Statement, ...]:
"""Gets Robber Statement."""
sentence = (
f"I am a Robber and I swapped with Player {choice_ind}. "
f"I am now a {choice_char}."
)
knowledge = (
(player_index, frozenset({Role.ROBBER})),
(choice_ind, frozenset({choice_char})),
)
switches = ((SwitchPriority.ROBBER, player_index, choice_ind),)
return (Statement(sentence, knowledge, switches),)
@staticmethod
@lru_cache
def get_all_statements(player_index: int) -> tuple[Statement, ...]:
"""Required for all player types. Returns all possible role statements."""
statements: tuple[Statement, ...] = ()
for i in range(const.NUM_PLAYERS):
for role in const.SORTED_ROLE_SET:
if (
player_index != i
): # OK: 'I robbed Player 0 and now I'm a Wolf... ;)'
statements += Robber.get_robber_statements(player_index, i, role)
return statements
def json_repr(self) -> dict[str, Any]:
"""Gets JSON representation of a Robber player."""
return super().json_repr() | {
"choice_ind": self.choice_ind,
"new_role": self.new_role,
}
|
# Copy this file to setting_local.py
# Set this to False if on a production server.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Replace this with the real key.
SECRET_KEY = '..................................................'
|
#!/usr/bin/env python
# -*- python -*-
from __future__ import print_function
import os, sys, re, MySQLdb, time
dirNm, execName = os.path.split(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.realpath(dirNm))
from LMODdb import LMODdb
import argparse
def dbConfigFn(dbname):
"""
Build config file name from dbname.
@param dbname: db name
"""
return dbname + "_db.conf"
def strDate2dA(s):
dA = re.split(r'[-_/.]', s)
dA = [ int(dA[0]), int(dA[1]) ]
return dA
def add_month(dA):
dA[1] += 1
if (dA[1] > 12):
dA[0] += 1
dA[1] = 1
return dA
def substract_month(dA):
dA[1] -= 1
if (dA[1] < 1):
dA[0] -= 1
dA[1] = 12
return dA
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
dA = add_month(strDate2dA(time.strftime('%Y-%m-%d', time.localtime(time.time()))))
partNm = "%04d-%02d" % (dA[0], dA[1])
parser = argparse.ArgumentParser()
parser.add_argument("--dbname", dest='dbname', action="store", default = "lmod", help="lmod")
parser.add_argument("--startPart", dest='firstPart', action="store", default = partNm, help="first partition date")
args = parser.parse_args()
return args
def main():
"""
This program creates the Database used by Lmod.
"""
args = CmdLineOptions().execute()
configFn = dbConfigFn(args.dbname)
if (not os.path.isfile(configFn)):
dirNm, exe = os.path.split(sys.argv[0])
fn = os.path.join(dirNm, configFn)
if (os.path.isfile(fn)):
configFn = fn
else:
configFn = os.path.abspath(os.path.join(dirNm, "../site", configFn))
lmod = LMODdb(configFn)
try:
conn = lmod.connect()
cursor = conn.cursor()
# If MySQL version < 4.1, comment out the line below
cursor.execute("SET SQL_MODE=\"NO_AUTO_VALUE_ON_ZERO\"")
# If the database does not exist, create it, otherwise, switch to the database.
cursor.execute("CREATE DATABASE IF NOT EXISTS %s DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci" % lmod.db())
cursor.execute("USE "+lmod.db())
idx = 1
print("start")
# 1
cursor.execute("""
CREATE TABLE `userT` (
`user_id` int(11) unsigned NOT NULL auto_increment,
`user` varchar(64) NOT NULL,
PRIMARY KEY (`user_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci AUTO_INCREMENT=1
""")
print("(%d) create userT table" % idx); idx += 1
# 2
cursor.execute("""
CREATE TABLE `moduleT` (
`mod_id` int(11) unsigned NOT NULL auto_increment,
`path` varchar(1024) NOT NULL,
`module` varchar(64) NOT NULL,
`syshost` varchar(32) NOT NULL,
PRIMARY KEY (`mod_id`),
INDEX `thekey` (`path`(128), `syshost`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci AUTO_INCREMENT=1
""")
print("(%d) create moduleT table" % idx ); idx += 1;
# 3
dA = strDate2dA(args.firstPart)
partNm = "p%04d_%02d" % (dA[0], dA[1])
dateStr = "%04d-%02d-01" % (dA[0], dA[1])
query = "CREATE TABLE `join_user_module` (" +\
" `join_id` int(11) unsigned NOT NULL auto_increment," +\
" `user_id` int(11) unsigned NOT NULL," +\
" `mod_id` int(11) unsigned NOT NULL," +\
" `date` DATETIME NOT NULL," +\
" PRIMARY KEY (`join_id`, `date`)," +\
" INDEX `index_date` (`date`)" +\
") ENGINE=InnoDB DEFAULT CHARSET=utf8 " +\
" COLLATE=utf8_general_ci AUTO_INCREMENT=1 " +\
" PARTITION BY RANGE( TO_DAYS(`date`) ) (" +\
" PARTITION "+ partNm + " VALUES LESS THAN (TO_DAYS('"+dateStr+"')))"
cursor.execute(query)
print("(%d) create join_link_object table" % idx); idx += 1
sqlCommands = """
create procedure CreateDataPartition (newPartValue DATETIME, tbName VARCHAR(50))
begin
DECLARE keepStmt VARCHAR(2000) DEFAULT @stmt;
SET @stmt = CONCAT('ALTER TABLE ', tbName ,' ADD PARTITION (PARTITION p',
DATE_FORMAT(newPartValue, '%Y_%m'),
' VALUES LESS THAN (TO_DAYS(\\'',
DATE_FORMAT(newPartValue, '%Y-%m-01'),
'\\')))');
PREPARE pStmt FROM @stmt;
execute pStmt;
DEALLOCATE PREPARE pStmt;
set @stmt = keepStmt;
END
"""
cursor.execute(sqlCommands)
print("(%d) Create stored procedure CreateDataPartition" % idx); idx += 1
sqlCommands = """
CREATE EVENT eventCreatePartition
ON SCHEDULE EVERY 1 MONTH
STARTS '2016-02-01 00:00:00'
DO
BEGIN
call CreateDataPartition(NOW() + interval 1 MONTH,'join_user_module');
END
"""
cursor.execute(sqlCommands)
print("(%d) Create event eventCreatePartition" % idx); idx += 1
dA = add_month(dA)
now = time.strftime('%Y-%m-%d', time.localtime(time.time()))
eA = add_month(strDate2dA(now))
while ((dA[0] < eA[0]) or (dA[0] == eA[0] and dA[1] <= eA[1])):
partNm = "p%04d_%02d" % (dA[0], dA[1])
dateStr = "%04d-%02d-01" % (dA[0], dA[1])
query = "ALTER TABLE join_user_module ADD PARTITION (PARTITION " +\
partNm + " VALUES LESS THAN (TO_DAYS('"+dateStr+"')))"
print("query:",query)
cursor.execute(query)
dA = add_month(dA)
cursor.close()
except MySQLdb.Error, e:
print ("Error %d: %s" % (e.args[0], e.args[1]))
sys.exit (1)
if ( __name__ == '__main__'): main()
|
from magicbot import AutonomousStateMachine, tunable, timed_state, state
from components.Input.ballCounter import BallCounter
from components.Actuators.LowLevel.driveTrain import DriveTrain
from components.Actuators.LowLevel.intakeMotor import IntakeMotor
from components.Actuators.LowLevel.limelight import Limelight
from components.Actuators.HighLevel.shooterLogic import ShooterLogic
from components.Actuators.HighLevel.loaderLogic import LoaderLogic
from components.Actuators.HighLevel.driveTrainHandler import DriveTrainHandler, ControlMode
from components.Actuators.HighLevel.turretScan import TurretScan
from components.Actuators.LowLevel.pneumatics import Pneumatics
from components.Actuators.AutonomousControl.turnToAngle import TurnToAngle
from components.Actuators.AutonomousControl.turretTurn import TurretTurn
from components.Actuators.AutonomousControl.driveTrainGoToDist import GoToDist
from components.Actuators.HighLevel.turretCalibrate import CalibrateTurret, TurretThreshold
from components.Actuators.LowLevel.winch import Winch
from utils.DirectionEnums import Direction
import logging as log
class Autonomous(AutonomousStateMachine):
"""Creates the autonomous code"""
shootTime = 4
DEFAULT = True
MODE_NAME = "Big Brain Autonomous"
turretScan: TurretScan
driveTrain: DriveTrain
goToDist: GoToDist
shooter: ShooterLogic
pneumatics: Pneumatics
driveTrain: DriveTrain
turnToAngle: TurnToAngle
turretCalibrate: CalibrateTurret
turretTurn: TurretTurn
turretThreshold: TurretThreshold
ballCounter: BallCounter
intakeMotor: IntakeMotor
loader: LoaderLogic
winch:Winch
limelight: Limelight
driveTrainHandler: DriveTrainHandler
drive_speed = tunable(.1)
allianceColor: str
afterShootState = "moveBack"
moveComplete = False
currentMove = 0
robotPosition = tunable(1)
turnToAnglePrevRunning = False
goToDistPrevRunning = False
turretTurnPrev = True
# In degrees and feet
# Positions are left to right 1,2,3 for the spots with balls
moveSequences = [[["drive", 46],
["turn", 180]],
[["turn", 59.993],
["drive", 5.62733*12]],
[["turn", -59.993],
["drive", 5.62733*12]]]
@state(first=True)
def init(self):
self.ballCounter.addBall(2, self.allianceColor.lower())
self.loader.setIsAutonomous(True)
self.shooter.autonomousEnabled()
self.driveTrain.resetDistTraveled()
self.pneumatics.deployLoader()
self.assessPosition()
self.next_state("winchUp")
def assessPosition(self):
"""
Pick a movement sequence based on the tunable robotPosition.
If it has not been changed, do the default sequence. If it has,
pick the correct set of movements
"""
self.moveSequence = []
if int(self.robotPosition) == 0:
log.error("You forgot to choose")
self.moveSequence = [["turn", 90]]
else:
self.moveSequence = self.moveSequences[int(self.robotPosition) - 1]
@state
def engage_shooter(self):
"""Starts shooter and fires"""
self.shooter.engage()
self.shooter.startShooting()
self.next_state('shooter_wait')
@timed_state(duration = shootTime, next_state=afterShootState)
def shooter_wait(self):
"""Waits for shooter to finish, then next state"""
pass
@state
def calibrateTurret_move(self):
"""
Calibrates the turret's deadzones
while moving
"""
self.winch.stop()
self.assessPosition()
self.shooter.shooterMotors.stopShooter()
self.driveTrain.setBraking(True)
if not self.moveComplete:
move = self.moveSequence[self.currentMove]
if move[0] == "turn":
self.intakeMotor.runIntake(0, Direction.kForwards)
log.error("Turning")
if (not self.turnToAngle.running) and self.turnToAnglePrevRunning:
log.error("Moving to drive")
self.currentMove += 1
elif not self.turnToAngle.running:
self.turnToAngle.setRelAngle(move[1])
self.turnToAngle.engage()
elif move[0] == "drive":
if not self.goToDist.running:
self.goToDist.setTargetDist(move[1])
if (((not self.goToDist.running) and self.goToDistPrevRunning)
or self.ballCounter.getBallCount()[0] != None):
log.error("Finishing")
self.currentMove += 1
self.intakeMotor.runIntake(.4, Direction.kForwards)
self.intakeMotor.execute()
self.goToDist.engage()
if self.currentMove == len(self.moveSequence):
self.moveComplete = True
self.turnToAnglePrevRunning = self.turnToAngle.running
self.goToDistPrevRunning = self.goToDist.running
self.turretCalibrate.setUseMotor(True)
self.turretCalibrate.engage()
self.next_state("calibrateTurret_move")
if self.turretThreshold.calibrated == True:
self.turretTurn.done()
self.turretThreshold.setTurretspeed(0)
if self.turretThreshold.calibrated == True and self.moveComplete:
self.afterShootState = "moveBack"
self.next_state("finishCalibration")
@timed_state(duration=.3, next_state="calibrateTurret_move")
def winchUp(self):
self.winch.setLower()
@state
def turn_turret_rough(self):
self.turretTurn.setEncoderControl()
self.turretTurn.setAngle(self.turretThreshold.rightLim - 103)
self.turretTurn.engage()
self.next_state("turn_turret_rough")
if self.turretTurn.withinTolerance() and not self.turretTurnPrev:
self.next_state("turn_turret_smart")
self.turretTurnPrev = self.turretTurn.withinTolerance()
@state
def turn_turret_smart(self):
self.limelight.LEDOn()
self.turretTurn.setLimeLightControl()
self.turretTurn.engage()
self.next_state("turn_turret_smart")
if self.turretScan.hasTarget() and self.turretTurn.withinTolerance():
self.next_state("engage_shooter")
elif not self.turretScan.hasTarget():
self.turretScan.engage()
@state
def finishCalibration(self):
self.turretThreshold.setTurretspeed(0)
self.next_state("turn_turret_rough")
@state
def engage_Shooter2(self):
ball1 = self.ballCounter.getBallCount()[0]
self.afterShootState = "stop"
# We don't need the second condition, but it sounds fun
if ball1 != None and ball1.getColor() == self.allianceColor:
self.shooter.engage()
self.shooter.startShooting()
self.next_state('shooter_wait')
elif ball1 != None and ball1.getColor() != self.allianceColor:
self.shooter.autoShootingSpeed1 = 1000
self.shooter.autoShootingSpeed2 = 500
self.shooter.engage()
self.shooter.startShooting()
self.next_state('shooter_wait')
else:
self.next_state("stop")
@timed_state(duration=2, next_state="stop")
def moveBack(self):
self.driveTrainHandler.setDriveTrain(self, ControlMode.kTankDrive, self.drive_speed, self.drive_speed)
@state(must_finish = True)
def stop(self):
"""Stops driving bot"""
self.driveTrain.setTank(0, 0)
self.turnToAngle.done()
self.goToDist.done()
self.turretTurn.done()
self.done()
|
import tkinter
# inserindo uma imagem no app
root = tkinter.Tk()
root.title("Aplicação")
img = tkinter.PhotoImage(file='imagens/smol.png')
label_imagem = tkinter.Label(root, image=img).grid()
root.mainloop() |
from random import randint
print('===================================')
print('======= JOGO DA ADIVINHAÇÃO =======')
print('===================================')
aleat = randint(0, 5)
n = int(input('Digite um número: '))
if n == aleat:
print('Você acertou!! Miseravi')
else:
print('Errou!! Teste mais tarde.')
print(f'Número pensado pela máquina: {aleat}')
|
import unittest
import person
class PersonTest(unittest.TestCase):
# run before tests
def setUp(self) -> None:
self.obj = person.person("Mahan", "Bi")
self.obj2 = person.person("Ali", "Delbande")
self.obj3 = person.person("John", "Doe")
# run after tests
def tearDown(self) -> None:
print("Done...")
def test_fullname(self):
self.assertEqual(self.obj.fullname(), "Mahan Bi")
self.assertEqual(self.obj2.fullname(), "Ali Delbande")
self.assertEqual(self.obj3.fullname(), "John Doe")
def test_email(self):
self.assertEqual(self.obj.email(), "MahanBi@gmail.com")
self.assertEqual(self.obj2.email(), "AliDelbande@gmail.com")
self.assertEqual(self.obj3.email(), "JohnDoe@gmail.com")
if __name__ == "__main__":
unittest.main()
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh-Tensorflow Model in tensor2tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import six
from tensor2tensor.utils import hparams_lib
from tensor2tensor.utils import learning_rate
from tensor2tensor.utils import metrics
from tensor2tensor.utils import t2t_model
import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
class MtfModel(t2t_model.T2TModel):
"""Toy model to test mesh_tensorflow."""
@classmethod
def estimator_model_fn(cls,
hparams,
features,
labels,
mode,
config=None,
params=None,
decode_hparams=None,
use_tpu=False):
hparams = hparams_lib.copy_hparams(hparams)
hparams.use_tpu = use_tpu
# merge decode_hparams into hparams if present
if mode == tf.estimator.ModeKeys.PREDICT and decode_hparams is not None:
for k, v in six.iteritems(decode_hparams.values()):
if hasattr(hparams, k) and getattr(hparams, k) != v:
tf.logging.warning("Overriding hparams.%s with %s from decode_hparams"
% (k, v))
setattr(hparams, k, v)
# Instantiate model
data_parallelism = None
if not use_tpu and config:
data_parallelism = config.data_parallelism
model = cls(
hparams,
mode,
data_parallelism=data_parallelism,
decode_hparams=decode_hparams)
global_step = tf.train.get_global_step()
mesh_shape = mtf.convert_to_shape(hparams.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(hparams.layout)
if use_tpu:
ctx = params["context"]
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
# TODO(ylc): Better estimation of replica cache size?
replica_cache_size = 300 * 1000000 # 300M per replica
# Worker 0 caches all the TPU binaries.
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
mesh_devices = [""] * mesh_shape.size
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
mesh_shape, layout_rules, mesh_devices, ctx.device_assignment)
else:
var_placer = None
if data_parallelism is None or len(data_parallelism.ps_devices) == 1:
mesh_devices = [""] * mesh_shape.size
else:
assert len(data_parallelism.ps_devices) == mesh_shape.size
mesh_devices = data_parallelism.ps_devices
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, mesh_devices)
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh", var_placer)
# PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
return model.estimator_spec_predict(features, mesh, mesh_impl, use_tpu)
logits, loss = model.mtf_model_fn(features, mesh)
if use_tpu and logits is not None:
logits = mtf.anonymize(logits)
# TRAIN mode
if mode == tf.estimator.ModeKeys.TRAIN:
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
lr = learning_rate.learning_rate_schedule(hparams)
tf.summary.scalar("learning_rate", lr)
mtf_lr = mtf.import_tf_tensor(
mesh, tf.convert_to_tensor(lr, dtype=tf.float32), mtf.Shape([]))
optimizer = mtf.optimize.make_optimizer(hparams, mtf_lr)
update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_loss = lowering.export_to_tf_tensor(loss)
tf_loss = tf.to_float(tf_loss)
if logits and mode != tf.estimator.ModeKeys.TRAIN:
tf_logits = lowering.export_to_tf_tensor(logits)
if mode == tf.estimator.ModeKeys.TRAIN:
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
# tf.logging.info("tf_update_ops: {}".format(tf_update_ops))
train_op = tf.group(tf_update_ops)
with mtf.utils.outside_all_rewrites():
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
hparams.model_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
tf_logits = lowering.export_to_tf_tensor(logits)
return model.estimator_spec_eval(features, tf_logits, labels, tf_loss,
restore_hook, use_tpu)
if use_tpu:
# TPU host call. Important: need to be called before remove_summaries()
if hparams.tpu_enable_host_call:
host_call = t2t_model.create_host_call(hparams.model_dir)
else:
host_call = None
if hparams.warm_start_from:
def scaffold_fn():
t2t_model.initialize_from_ckpt(
ckpt_dir=hparams.warm_start_from, hparams=hparams)
return tf.train.Scaffold()
else:
scaffold_fn = None
t2t_model.remove_summaries()
return tpu_estimator.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=tf_loss,
train_op=train_op,
host_call=host_call,
training_hooks=[restore_hook, saver_hook],
scaffold_fn=scaffold_fn)
else:
if hparams.warm_start_from:
t2t_model.initialize_from_ckpt(
ckpt_dir=hparams.warm_start_from, hparams=hparams)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op,
training_chief_hooks=[restore_hook, saver_hook])
def estimator_spec_eval(
self, features, logits, labels, loss, restore_hook, use_tpu):
"""Construct EstimatorSpec for EVAL mode."""
hparams = self.hparams
problem = hparams.problem
if logits.get_shape().ndims == 3:
logits = tf.expand_dims(tf.expand_dims(logits, 2), 3)
# Support for multiproblem
task_list = [problem]
if hasattr(problem, "task_list"):
task_list = problem.task_list
eval_metrics_fns = metrics.create_evaluation_metrics(task_list, hparams)
if use_tpu:
def metric_fn(tf_logits, labels):
with tf.device("cpu:0"), mtf.utils.outside_all_rewrites():
eval_metrics = {}
for metric_name, metric_fn in six.iteritems(eval_metrics_fns):
if metric_name.split("/")[-1] not in t2t_model.TPU_METRIC_BLACKLIST:
eval_metrics[metric_name] = metric_fn(
tf_logits, None, tf.identity(labels))
return eval_metrics
return tpu_estimator.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
evaluation_hooks=[restore_hook],
loss=loss,
eval_metrics=(metric_fn, [logits, labels]))
else:
eval_metrics = {}
predictions = {"predictions": logits}
for metric_name, metric_fn in six.iteritems(eval_metrics_fns):
eval_metrics[metric_name] = metric_fn(logits, features,
features["targets"])
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.EVAL,
predictions=predictions,
eval_metric_ops=eval_metrics,
evaluation_hooks=[restore_hook],
loss=loss)
def estimator_spec_predict(self, features, mesh, mesh_impl, use_tpu):
mtf_samples = mtf.anonymize(self.sample(features, mesh))
lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl})
outputs = lowering.export_to_tf_tensor(mtf_samples)
if self.has_input:
ndims = len(outputs.shape.as_list())
actual_batch_size = tf.shape(features["inputs"])[0]
outputs = tf.slice(
outputs, [0] * ndims, [actual_batch_size] + [-1] * (ndims - 1))
predictions = {
"outputs": outputs
}
if features.get("infer_targets") is not None:
predictions["infer_targets"] = features["infer_targets"]
if features.get("inputs") is not None:
predictions["inputs"] = features["inputs"]
if use_tpu:
t2t_model.remove_summaries()
return tpu_estimator.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
prediction_hooks=[mtf.MtfRestoreHook(lowering)])
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
prediction_hooks=[mtf.MtfRestoreHook(lowering)])
def sample(self, features, mesh):
"""Sample from the model."""
raise NotImplementedError("TODO(noam): write generic slow mtf sample.")
def mtf_model_fn(self, features, mesh):
raise NotImplementedError("Not implemented")
|
import textract
import re
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import LabelEncoder
import sklearn.datasets
import nltk
nltk.data.path.append('/home/husein/nltk_data/')
from textblob import TextBlob
import random
import collections
from collections import OrderedDict
from fuzzywuzzy import fuzz
import numpy as np
def clearstring(string):
string = re.sub('[^A-Za-z ]+', '', string)
string = string.split(' ')
string = filter(None, string)
string = [y.strip() for y in string]
string = ' '.join(string)
return string.lower()
df = pd.read_csv('processed_mbti.csv')
label = df.type.unique()
labelset = LabelEncoder().fit_transform(df.type)
trainset = df.posts.values
for i in range(trainset.shape[0]):
trainset[i] = ' '.join(trainset[i].split('|||'))
def separate_dataset(trainset):
datastring = []
datatarget = []
for i in range(len(trainset.data)):
data_ = trainset.data[i].split('\n')
data_ = filter(None, data_)
for n in range(len(data_)):
data_[n] = clearstring(data_[n])
datastring += data_
for n in range(len(data_)):
datatarget.append(trainset.target[i])
return datastring, datatarget
job = sklearn.datasets.load_files(container_path = 'jobdescription', decode_error = 'replace')
job.data, job.target = separate_dataset(job)
c = list(zip(job.data, job.target))
random.shuffle(c)
job.data, job.target = zip(*c)
dev_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1, 2))), ('clf', SGDClassifier(loss = 'modified_huber', penalty = 'l2', alpha = 1e-4, n_iter = 100, random_state = 42))])
dev_clf.fit(job.data, job.target)
clf = Pipeline([('vect', CountVectorizer()), ('clf', SGDClassifier(loss = 'modified_huber', penalty = 'l2', alpha = 1e-4, n_iter = 100, random_state = 42))])
clf.fit(trainset, labelset)
def clearstring_pdf(string):
string = re.sub(r'[^\x00-\x7F]', '', string)
string = string.split(' ')
string = filter(None, string)
string = [y.strip() for y in string]
string = ' '.join(string)
return string
def get_detail(text):
text = filter(None, [clearstring_pdf(t) for t in text.split('\n')])
blobs = [TextBlob(i).tags for i in text]
nouns = []
for blob in blobs:
nouns += [b[0] for b in blob if b[1] == 'NNP' or b[1] == 'NN']
nouns = [n.lower() for n in nouns][15:]
prob = dev_clf.predict_proba(text)
prob = np.mean(prob, axis = 0)
dict_prob = {}
for i in range(prob.shape[0]):
dict_prob[job.target_names[i]] = float(prob[i])
personality = clf.predict_proba([' '.join(text)])[0]
unique = np.unique(personality)
loc = np.where(personality == unique[-1])[0]
personalities = []
for i in loc:
personalities += list(label[i])
personalities_unique, personalities_count = np.unique(personalities, return_counts = True)
personalities_count = (personalities_count * 1.0) / np.sum(personalities_count)
counts = collections.Counter(personalities)
new_list = sorted(personalities, key = lambda x: -counts[x])
new_list = ''.join(list(OrderedDict.fromkeys(new_list))[:4])
new_type = label[np.argmax([fuzz.ratio(new_list, i) for i in label])]
nouns_unique, nouns_count = np.unique(nouns, return_counts = True)
return {'developer': dict_prob, 'personality_percent': personalities_count.tolist(), 'personality': personalities_unique.tolist(), 'type': new_type,
'nouns': nouns_unique.tolist(), 'nouns_count': nouns_count.tolist()}
|
# Generated by Django 2.0.5 on 2018-06-20 15:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("core", "0001_initial")]
operations = [
migrations.AlterModelOptions(name="system", options={"ordering": ["name"]})
]
|
'''RALEIGH algebra implementations on CPU and GPU.
CPU implementations are based on numpy.ndarray.
'''
|
from .tcmr import TCMR
from .motion_discriminator import MotionDiscriminator
|
from spyd.registry_manager import register
@register('room_client_event_handler')
class StopDemoRecordingHandler(object):
event_type = 'stop_demo_recording'
@staticmethod
def handle(room, client):
pass
|
#The basic ADA interface, such as executing end effector motions, getting the state of the robot, etc.
from AdaAssistancePolicy import *
from UserBot import *
import AssistancePolicyVisualizationTools as vistools
#from GoalPredictor import *
from DataRecordingUtils import TrajectoryData
import numpy as np
import math
import time
import os, sys
import cPickle as pickle
import argparse
import tf
import tf.transformations as transmethods
import rospkg
import rospy
#import roslib
import openravepy
import adapy
import prpy
from ada_teleoperation.AdaTeleopHandler import AdaTeleopHandler, Is_Done_Func_Button_Hold
from ada_teleoperation.RobotState import *
SIMULATE_DEFAULT = False
#SIMULATE_VELOCITY_MOVEJOINT = False #NOTE if true, SIMULATE should also be true
#FLOATING_HAND_ONLY = False
RESAVE_GRASP_POSES = True
cached_data_dir = 'cached_data'
CONTROL_HZ = 40.
num_control_modes = 2
#def Is_Done_Func_Button_Hold(env, robot, user_input):
#return user_input.buttons_held[0]
#if user_input.
class AdaHandler:
def __init__(self, env, robot, goals, goal_objects, input_interface_name, num_input_dofs, use_finger_mode=True, goal_object_poses=None):
# self.params = {'rand_start_radius':0.04,
# 'noise_pwr': 0.3, # magnitude of noise
# 'vel_scale': 4., # scaling when sending velocity commands to robot
# 'max_alpha': 0.6} # maximum blending alpha value blended robot policy
self.env = env
self.robot = robot
self.goals = goals
self.goal_objects = goal_objects
if not goal_object_poses and goal_objects:
self.goal_object_poses = [goal_obj.GetTransform() for goal_obj in goal_objects]
else:
self.goal_object_poses = goal_object_poses
self.sim = robot.simulated
self.manip = self.robot.arm
self.ada_teleop = AdaTeleopHandler(env, robot, input_interface_name, num_input_dofs, use_finger_mode)#, is_done_func=Teleop_Done)
self.robot_state = self.ada_teleop.robot_state
self.robot_policy = AdaAssistancePolicy(self.goals)
self.user_input_mapper = self.ada_teleop.user_input_mapper
def GetEndEffectorTransform(self):
# if FLOATING_HAND_ONLY:
# return self.floating_hand.GetTransform()
# else:
return self.manip.GetEndEffectorTransform()
#def Get_Robot_Policy_Action(self, goal_distribution):
# end_effector_trans = self.GetEndEffectorTransform()
# return self.robot_policy.get_action(goal_distribution, end_effector_trans)
def execute_policy(self, simulate_user=False, direct_teleop_only=False, blend_only=False, fix_magnitude_user_command=False, is_done_func=Is_Done_Func_Button_Hold, finish_trial_func=None, traj_data_recording=None):
#goal_distribution = np.array([0.333, 0.333, 0.333])
if simulate_user:
self.user_bot = UserBot(self.goals)
self.user_bot.set_user_goal(0)
vis = vistools.VisualizationHandler()
robot_state = self.robot_state
robot_state.ee_trans = self.GetEndEffectorTransform()
time_per_iter = 1./CONTROL_HZ
if direct_teleop_only:
use_assistance = False
else:
use_assistance = True
#set the huber constants differently if the robot movement magnitude is fixed to user input magnitude
if not direct_teleop_only and fix_magnitude_user_command:
for goal_policy in self.robot_policy.assist_policy.goal_assist_policies:
for target_policy in goal_policy.target_assist_policies:
target_policy.set_constants(huber_translation_linear_multiplier=1.55, huber_translation_delta_switch=0.11, huber_translation_constant_add=0.2, huber_rotation_linear_multiplier=0.20, huber_rotation_delta_switch=np.pi/72., huber_rotation_constant_add=0.3, huber_rotation_multiplier=0.20, robot_translation_cost_multiplier=14.0, robot_rotation_cost_multiplier=0.05)
#if specified traj data for recording, initialize
if traj_data_recording:
assist_type = 'shared_auton'
if direct_teleop_only:
assist_type = 'None'
elif blend_only:
assist_type = 'blend'
elif fix_magnitude_user_command:
assist_type = 'shared_auton_prop'
traj_data_recording.set_init_info(start_state=copy.deepcopy(robot_state), goals=copy.deepcopy(self.goals), input_interface_name=self.ada_teleop.teleop_interface, assist_type=assist_type)
while True:
start_time = time.time()
robot_state.ee_trans = self.GetEndEffectorTransform()
ee_trans = robot_state.ee_trans
robot_dof_values = self.robot.GetDOFValues()
if simulate_user:
#get pose of min value target for user's goal
user_goal = self.user_bot.goal_num
min_val_target_pose = self.robot_policy.assist_policy.goal_assist_policies[user_goal].get_min_value_pose()
user_input_velocity = self.user_bot.get_usr_cmd(ee_trans, goal_pose=min_val_target_pose)
user_input_all = UserInputData(user_input_velocity)
#user_input_all.switch_assistance_val
if self.goals[user_goal].at_goal(ee_trans):
user_input_all.close_hand_velocity = 1.0
else:
user_input_all.close_hand_velocity = 0.
else:
user_input_all = self.ada_teleop.joystick_listener.get_most_recent_cmd()
#print user_input_all
# user_input_velocity = user_input_all.move_velocity
# user_input_closehand = user_input_all.close_hand_velocity
direct_teleop_action = self.user_input_mapper.input_to_action(user_input_all, robot_state)
#if left trigger not being hit, then execute with assistance
if not direct_teleop_only and user_input_all.button_changes[1] == 1:
use_assistance = not use_assistance
self.robot_policy.update(robot_state, direct_teleop_action)
if use_assistance and not direct_teleop_only:
#action = self.user_input_mapper.input_to_action(user_input_all, robot_state)
if blend_only:
action = self.robot_policy.get_blend_action()
else:
action = self.robot_policy.get_action(fix_magnitude_user_command=fix_magnitude_user_command)
else:
#if left trigger is being hit, direct teleop
action = direct_teleop_action
self.ada_teleop.ExecuteAction(action)
### visualization ###
vis.draw_probability_text(self.goal_object_poses, self.robot_policy.goal_predictor.get_distribution())
# for goal,goal_obj in zip(self.goals, self.goal_objects):
# marker_ns = goal_obj.GetName() + '_targets'
# vis.draw_hand_poses(goal.target_poses, marker_ns=marker_ns)
#vis.draw_hand_poses([self.GetEndEffectorTransform()], marker_ns='ee_axis')
vis.draw_action_arrows(ee_trans, direct_teleop_action.twist[0:3], action.twist[0:3]-direct_teleop_action.twist[0:3])
### end visualization ###
end_time=time.time()
if traj_data_recording:
traj_data_recording.add_datapoint(robot_state=copy.deepcopy(robot_state), robot_dof_values=copy.copy(robot_dof_values), user_input_all=copy.deepcopy(user_input_all), direct_teleop_action=copy.deepcopy(direct_teleop_action), executed_action=copy.deepcopy(action), goal_distribution=self.robot_policy.goal_predictor.get_distribution())
#print ('time: %.5f' % (end_time-start_time)) + ' per iter: ' + str(time_per_iter)
#print 'sleep time: ' + str(max(0., time_per_iter - (end_time-start_time)))
rospy.sleep( max(0., time_per_iter - (end_time-start_time)))
#if (max(action.finger_vel) > 0.5):
if is_done_func(self.env, self.robot, user_input_all):
break
#set the intended goal and write data to file
if traj_data_recording:
values, qvalues = self.robot_policy.assist_policy.get_values()
traj_data_recording.set_end_info(intended_goal_ind=np.argmin(values))
traj_data_recording.tofile()
#execute zero velocity to stop movement
self.ada_teleop.execute_joint_velocities(np.zeros(len(self.manip.GetDOFValues())))
if finish_trial_func:
finish_trial_func()
|
"""Tests src/common/io.py"""
import os
import pytest
from common.io import input_file_path
def test_input_file_path_success(temporary_dir):
"""Tests input_file_path function with a unique file"""
test1_dir = os.path.join(temporary_dir, "test1")
os.makedirs(test1_dir, exist_ok=False)
with open(os.path.join(test1_dir, "random.out"), "w") as out_file:
out_file.write("something")
full_file_path = os.path.join(test1_dir, "random.out")
assert input_file_path(test1_dir) == full_file_path
assert input_file_path(full_file_path) == full_file_path
def test_input_file_path_failure(temporary_dir):
"""Tests input_file_path function when 2 files are provided (should except)"""
test2_dir = os.path.join(temporary_dir, "test2")
os.makedirs(test2_dir, exist_ok=False)
with open(os.path.join(test2_dir, "random1.out"), "w") as out_file:
out_file.write("something")
with open(os.path.join(test2_dir, "random2.out"), "w") as out_file:
out_file.write("something else")
with pytest.raises(Exception):
assert input_file_path(test2_dir)
|
# Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import socket
import mock
from nova import exception
from nova.tests.unit.virt.xenapi import stubs
from nova import version
from nova.virt.xenapi.client import session
class SessionTestCase(stubs.XenAPITestBaseNoDB):
@mock.patch.object(session.XenAPISession, '_create_session')
@mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
@mock.patch.object(session.XenAPISession, '_verify_plugin_version')
def test_session_passes_version(self, mock_verify, mock_version,
create_session):
sess = mock.Mock()
create_session.return_value = sess
mock_version.return_value = ('version', 'brand')
session.XenAPISession('url', 'username', 'password')
expected_version = '%s %s %s' % (version.vendor_string(),
version.product_string(),
version.version_string_with_package())
sess.login_with_password.assert_called_with('username', 'password',
expected_version,
'OpenStack')
@mock.patch('eventlet.timeout.Timeout')
@mock.patch.object(session.XenAPISession, '_create_session')
@mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
@mock.patch.object(session.XenAPISession, '_verify_plugin_version')
def test_session_login_with_timeout(self, mock_verify, mock_version,
create_session, mock_timeout):
self.flags(connection_concurrent=2, group='xenserver')
sess = mock.Mock()
create_session.return_value = sess
mock_version.return_value = ('version', 'brand')
session.XenAPISession('url', 'username', 'password')
self.assertEqual(2, sess.login_with_password.call_count)
self.assertEqual(2, mock_timeout.call_count)
@mock.patch('eventlet.timeout.Timeout')
@mock.patch.object(session.XenAPISession, '_create_session')
@mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
@mock.patch.object(session.XenAPISession, '_verify_plugin_version')
@mock.patch.object(session.XenAPISession, '_get_host_uuid')
@mock.patch.object(session.XenAPISession, '_get_host_ref')
def test_session_raises_exception(self, mock_ref, mock_uuid,
mock_verify, mock_version,
create_session, mock_timeout):
import XenAPI
self.flags(connection_concurrent=2, group='xenserver')
sess = mock.Mock()
create_session.return_value = sess
# First login fails, second login in except block succeeds,
# third login for the pool succeeds
sess.login_with_password.side_effect = [
XenAPI.Failure(['HOST_IS_SLAVE', 'master']), None, None]
mock_version.return_value = ('version', 'brand')
session.XenAPISession('url', 'username', 'password')
self.assertEqual(3, sess.login_with_password.call_count)
self.assertEqual(3, mock_timeout.call_count)
class ApplySessionHelpersTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(ApplySessionHelpersTestCase, self).setUp()
self.session = mock.Mock()
session.apply_session_helpers(self.session)
def test_apply_session_helpers_add_VM(self):
self.session.VM.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
def test_apply_session_helpers_add_SR(self):
self.session.SR.get_X("ref")
self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
def test_apply_session_helpers_add_VDI(self):
self.session.VDI.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
def test_apply_session_helpers_add_VBD(self):
self.session.VBD.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
def test_apply_session_helpers_add_PBD(self):
self.session.PBD.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
def test_apply_session_helpers_add_PIF(self):
self.session.PIF.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
def test_apply_session_helpers_add_VLAN(self):
self.session.VLAN.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
def test_apply_session_helpers_add_host(self):
self.session.host.get_X("ref")
self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
def test_apply_session_helpers_add_network(self):
self.session.network.get_X("ref")
self.session.call_xenapi.assert_called_once_with("network.get_X",
"ref")
def test_apply_session_helpers_add_pool(self):
self.session.pool.get_X("ref")
self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
class CallPluginTestCase(stubs.XenAPITestBaseNoDB):
def _get_fake_xapisession(self):
class FakeXapiSession(session.XenAPISession):
def __init__(self, **kwargs):
"Skip the superclass's dirty init"
self.XenAPI = mock.MagicMock()
return FakeXapiSession()
def setUp(self):
super(CallPluginTestCase, self).setUp()
self.session = self._get_fake_xapisession()
def test_serialized_with_retry_socket_error_conn_reset(self):
exc = socket.error()
exc.errno = errno.ECONNRESET
plugin = 'glance'
fn = 'download_vhd'
num_retries = 1
callback = None
retry_cb = mock.Mock()
with mock.patch.object(self.session, 'call_plugin_serialized',
spec=True) as call_plugin_serialized:
call_plugin_serialized.side_effect = exc
self.assertRaises(exception.PluginRetriesExceeded,
self.session.call_plugin_serialized_with_retry, plugin, fn,
num_retries, callback, retry_cb)
call_plugin_serialized.assert_called_with(plugin, fn)
self.assertEqual(2, call_plugin_serialized.call_count)
self.assertEqual(2, retry_cb.call_count)
def test_serialized_with_retry_socket_error_reraised(self):
exc = socket.error()
exc.errno = errno.ECONNREFUSED
plugin = 'glance'
fn = 'download_vhd'
num_retries = 1
callback = None
retry_cb = mock.Mock()
with mock.patch.object(self.session, 'call_plugin_serialized',
spec=True) as call_plugin_serialized:
call_plugin_serialized.side_effect = exc
self.assertRaises(socket.error,
self.session.call_plugin_serialized_with_retry, plugin, fn,
num_retries, callback, retry_cb)
call_plugin_serialized.assert_called_once_with(plugin, fn)
self.assertEqual(0, retry_cb.call_count)
def test_serialized_with_retry_socket_reset_reraised(self):
exc = socket.error()
exc.errno = errno.ECONNRESET
plugin = 'glance'
fn = 'download_vhd'
num_retries = 1
callback = None
retry_cb = mock.Mock()
with mock.patch.object(self.session, 'call_plugin_serialized',
spec=True) as call_plugin_serialized:
call_plugin_serialized.side_effect = exc
self.assertRaises(exception.PluginRetriesExceeded,
self.session.call_plugin_serialized_with_retry, plugin, fn,
num_retries, callback, retry_cb)
call_plugin_serialized.assert_called_with(plugin, fn)
self.assertEqual(2, call_plugin_serialized.call_count)
|
import datetime
import math
from bson.objectid import ObjectId
from mongoschema.bson_utils import get_dtype, _get_int
class TestMapper(object):
def test_for_null_one(self):
assert get_dtype(None) == "null"
def test_for_null_two(self):
assert get_dtype("") != "null"
def test_for_null_three(self):
assert get_dtype([]) != "null"
def test_for_int_int(self):
assert get_dtype(7) == "int"
def test_for_int_long_one(self):
assert get_dtype(21474836478) == "long"
def test_for_int_long_one(self):
assert get_dtype(-21474836476) == "long"
def test_for_objectId(self):
assert get_dtype(ObjectId()) == "ObjectId"
def test_for_bool_true(self):
assert get_dtype(True) == "bool"
def test_for_bool_false(self):
assert get_dtype(False) == "bool"
def test_for_bool_not_a_bool(self):
assert get_dtype(0) != "bool"
def test_for_date_with_date(self):
unix_time = datetime.date(1970, 1, 1)
assert get_dtype(unix_time) == "Date"
def test_for_date_with_datetime(self):
unix_time = datetime.datetime(1970, 1, 1)
assert get_dtype(unix_time) == "Date"
def test_for_string(self):
assert get_dtype("test") == "string"
def test_for_array_with_list(self):
assert get_dtype(["test"]) == "array"
def test_for_array_with_python_array(self):
"""TODO test with pythons array"""
pass
def test_for_array_with_np_array(self):
"""TODO test with numpy arrays"""
pass
def test_for_object_with_empty_dict(self):
assert get_dtype({}) == "object"
def test_for_object_with_dict(self):
assert get_dtype({"a":"test"}) == "object"
def test_for_byte(self):
assert get_dtype(b"11") == "binData"
class TestGetInt(object):
def test_for_int_with_zero(self):
assert _get_int(0) == "int"
def test_for_int_with_positive_integer(self):
assert _get_int(42) == "int"
def test_for_int_with_negative_integer(self):
assert _get_int(-42) == "int"
def test_for_int_with_pi(self):
assert _get_int(math.pi) == "int"
def test_for_int_with_positive_long(self):
assert _get_int(2147483648) == "long"
def test_for_int_with_border_positive_int(self):
assert _get_int(2147483647) != "long"
def test_for_int_with_negative_long(self):
assert _get_int(-2147483649) == "long"
def test_for_int_with_border_negative_int(self):
assert _get_int(-2147483647) != "long"
def test_for_int_with_infinity(self):
assert _get_int(math.inf) == "long"
assert _get_int(math.inf) != "int"
|
from chpass.dal.db_connection import DBConnection
from chpass.dal.models.download import Download
class DownloadsTableAdapter(object):
def __init__(self, db_connection: DBConnection) -> None:
self._db_connection = db_connection
def get_chrome_downloads(self, serializable: bool = False) -> list:
return self._db_connection.select(Download, serializable=serializable)
|
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import one_hot
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dropout, Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import GlobalMaxPooling1D
from tensorflow.keras import layers
from tensorflow.keras.layers import Embedding
from sklearn.model_selection import train_test_split
from tensorflow.keras import regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
import numpy as np
import re
tokenizer = None
def verwerk_invoerdata(X_train, X_test, y_train, y_test, maxlen=100, maxwords=5000):
global tokenizer
tokenizer = Tokenizer(num_words=maxwords)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
return X_train.astype(np.float32), X_test.astype(np.float32), y_train.astype(np.float32), y_test.astype(np.float32)
def maak_klaar_voor_voorspelling(tekst, maxlen):
global tokenizer
tekst = tekst_opschonen_hidden(tekst)
tokens = tokenizer.texts_to_sequences([tekst])
tokens = pad_sequences(tokens, padding='post', maxlen=maxlen)
return tokens.astype(np.float32)
opkuisregel1 = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});') # Selecteert alle html elementen
opkuisregel2 = re.compile('[^a-zA-Z]') # Selecteer alle karakters die geen letters zijn
opkuisregel3 = re.compile('\s+[a-zA-Z]\s+') # Selecteer alle enkelvoudige karakters
opkuisregel4 = re.compile('\s+') # Selecteer alle meervoudige spaties
def verwijder_html(tekst):
return re.sub(opkuisregel1, ' ', tekst)
def vervang_leestekens(tekst):
return re.sub(opkuisregel2, ' ', tekst)
def verwijder_enkelvoudige_karakters(tekst):
return re.sub(opkuisregel3, ' ', tekst)
def verwijder_meervoudige_spaties(tekst):
return re.sub(opkuisregel4, ' ', tekst)
def maak_staafdiagram_polariteiten(polariteiten, aantallen):
plt.bar([0, 1], aantallen, align='center') # Maak een staafdiagram
plt.xticks([0, 1], polariteiten) # Benoem de x-as
plt.show() # Toon de grafiek
def tekst_opschonen_hidden(tekst):
tekst = verwijder_html(tekst) # Verwijdert alle html elementen uit de tekst
tekst = vervang_leestekens(tekst) # Vervangt alle leestekens door spaties
tekst = verwijder_enkelvoudige_karakters(tekst) # Verwijdert alle enkelvoudige letters
propere_tekst = verwijder_meervoudige_spaties(tekst) # Vervangt meervoudige spaties door één spatie
return propere_tekst
def kuis_dataset_op(dataset):
return np.apply_along_axis(lambda x: tekst_opschonen_hidden(x[0]), 1, dataset[:, 0, None])
def zet_polariteiten_om_naar_getallen(dataset):
return np.apply_along_axis(lambda x: 1 if x[0] == 'positive' else 0, 1, dataset[:, 1, None])
def stel_deeplearning_model_op(maximum_aantal_woorden):
model = Sequential()
model.add(layers.Embedding(maximum_aantal_woorden, 100, input_length=100, name='Omzetting_woord_naar_eigenschappen')) #The embedding layer
model.add(Flatten(name='Samenvoegen_eigenschappen_zin'))
model.add(layers.Dense(1,activation='sigmoid', name='Voorspellen_sentiment'))
return model
def train_model(model, X_train, y_train, X_test, y_test):
model.compile(optimizer='adam',loss='binary_crossentropy', metrics=['accuracy'])
checkpoint1 = ModelCheckpoint("best_model.hdf5", monitor='val_accuracy', verbose=0, save_best_only=True, save_freq='epoch', save_weights_only=False)
history = model.fit(X_train, y_train, epochs=5, validation_data=(X_test, y_test),callbacks=[checkpoint1])
def plot_learning_curves(history):
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(history.history['accuracy'])
ax1.plot(history.history['val_accuracy'])
ax1.set_title('model accuracy')
ax2.set(xlabel='epoch', ylabel='accuracy')
ax1.legend(['train', 'val'], loc='upper left')
ax2.plot(history.history['loss'])
ax2.plot(history.history['val_loss'])
ax2.set_title('model loss')
ax2.set(xlabel='epoch', ylabel='loss')
ax2.legend(['train', 'val'], loc='upper left')
plt.show()
|
from algorithms.treap import Treap
from algorithms.queue import Queue |
from abc import ABC, abstractmethod
class BaseSamlParser(ABC):
"""
Generalized SAML response parser
"""
def __init__(self):
"""
Parses SAML response from base64 input.
Args:
response (basestring): SAML response as a base64-encoded string
Raises:
(SamlResponseEncryptedError) Raised when SAML response is encrypted
"""
pass
@classmethod
@abstractmethod
def from_xml(cls, xml):
"""
Instantiates the class using XML input.
Args:
xml (basestring): SAML response as stringified XML document
Returns:
(BaseSamlParser) parsed SAML response object
"""
pass
@classmethod
@abstractmethod
def from_base64(cls, base64):
"""
Instantiates the class using base64-encoded XML input.
Args:
base64 (basestring): SAML response as base64-encoded XML document
Returns:
(BaseSamlParser) parsed SAML response object
"""
pass
@abstractmethod
def get_certificate(self):
"""
Retrieves text of X.509 public certificate included in the SAML response.
Returns:
(basestring) Certificate contents as string
Raises:
(ValueError) Raised when the certificate entry is not found in the data
"""
pass
@abstractmethod
def get_subject_name_id(self):
"""
Retrieves the Name ID value from the subject section.
Returns:
(basestring) Value of the Name ID
Raises:
(ValueError) Raised when the Name ID entry is not found in the data
"""
pass
@abstractmethod
def get_subject_name_id_format(self):
"""
Retrieves the Name ID format from the subject section.
Returns:
(basestring) Format attribute of Name ID
Raises:
(ValueError) Raised when the Name ID entry is not found in the data
"""
pass
@abstractmethod
def get_assertion_consumer_service_url(self):
"""
Retrieves the service provider's Assertion Consumer Service URL.
Returns:
(basestring) Value of Assertion Consumer Service URL
Raises:
(ValueError) Raised when the Assertion Consumer Service
entry is not found in the data
"""
pass
@abstractmethod
def get_encryption_algorithm(self):
"""
Retrieves the encryption algorithm used for certificate. Should be
"sha1" or "sha256".
Returns:
(basestring) Value of encryption algorithm
Raises:
(ValueError) Raised when the encryption algorithm
entry is not found in the data
"""
pass
@abstractmethod
def get_audience_url(self):
"""
Retrieves the service provider's Audience URL.
Returns:
(basestring) Value of encryption algorithm
Raises:
(ValueError) Raised when the Audience URL
entry is not found in the data
"""
pass
@abstractmethod
def get_issuer_uri(self):
"""
Retrieves the identity provider's Audience URL.
Returns:
(basestring) Value of encryption algorithm
Raises:
(ValueError) Raised when the Issuer URI
entry is not found in the data
"""
pass
@abstractmethod
def get_attributes(self):
"""
Retrieves the identity provider's claim attributes.
Returns:
(basestring) Value of encryption algorithm
Raises:
(ValueError) Raised when the attributes
are not found in the data
"""
pass
@abstractmethod
def is_assertion_found(self):
"""
Checks if the response contains exactly one assertion.
Returns:
(bool): True if the response contains one assertion, False otherwise
"""
pass
@abstractmethod
def get_xml(self, pretty=False):
"""
Return raw XML of SAML response
Args:
pretty (bool): Pretty-prints XML if True. False is XML in one line.
Default: False.
Returns:
(basestring) SAML response as XML string
"""
pass
@abstractmethod
def found_any_values(self):
"""
Checks to see if we were able to parse any values at all
Returns:
(bool) True if any values were able to be parsed, False otherwise
"""
pass
|
import json
from dataclasses import dataclass, field
from typing import Callable
from pathlib import Path
from bitarray import bitarray, frozenbitarray
from bitarray.util import int2ba, ba2int
from frozendict import frozendict
from .circuit_parser import GateShape, GateReference, SPECIAL_RED, NORMAL, CircuitPin, BigShape, SCHEMATICS_PATH, \
CUSTOM, \
Circuit, SPECIAL_GREEN
from .logic_nodes import LogicNodeType, DirectLogicNodeType, InputPin, OutputPin, build_or as ln_build_or, \
builtins_gates, CombinedLogicNode, Wire
from .specification_parser import load_all_components, spec_components
def ram_func(args, state: frozenbitarray, delayed):
address = ba2int(args["address"])
if args["load"].any():
ret = frozendict({
"value_out": state[address * 8:address * 8 + 8]
})
else:
ret = frozendict({
"value_out": frozenbitarray((0,) * 8)
})
if args["save"].any() and delayed:
new_state = state[:address * 8] + args["value_in"] + state[address * 8 + 8:]
else:
new_state = state
return ret, new_state
program = bitarray([0] * 8 * (2 ** 8), endian="little")
def build_rom(shape: GateShape, data):
def f(args, state: frozenbitarray, delayed):
address = ba2int(args["address"])
ret = frozendict({
name: program[(address + i) % 256 * 8:(address + i) % 256 * 8 + 8]
for i, name in enumerate(out_names)
})
return ret, state
out_names = [name for name, p in shape.pins.items() if not p.is_input]
return DirectLogicNodeType(
shape.name, frozendict({
"address": InputPin(8)
}), frozendict({
name: OutputPin(8) for name in out_names
}), 0, f
)
screens = {}
@dataclass
class AsciiScreen:
background_color: tuple[int, int, int]
ascii_screen: bytearray = field(default_factory=lambda: bytearray([0] * 18 * 14 * 2))
ascii_screen_buffer: bytearray = field(default_factory=lambda: bytearray([0] * 18 * 14 * 2))
ascii_cursor: int = 0
def func(self, args, state: frozenbitarray, delayed):
if not delayed:
return frozendict(), state
if args["write_cursor"].any():
match ba2int(args["cursor"]):
case 252:
state = ~state
case 253:
self.ascii_screen, self.ascii_screen_buffer = self.ascii_screen_buffer, self.ascii_screen
case 254:
self.ascii_screen[:] = (0,) * len(self.ascii_screen)
case 255:
pass
case v:
self.ascii_cursor = v
buffered = state[0]
target = self.ascii_screen_buffer if buffered else self.ascii_screen
if args["write_color"].any():
target[self.ascii_cursor * 2] = ba2int(args["color"])
if args["write_char"].any():
target[self.ascii_cursor * 2 + 1] = ba2int(args["char"])
return frozendict(), state
def build_ascii(gate):
if gate.id not in screens:
c = gate.custom_data
screens[gate.id] = AsciiScreen((0, 0, 0) if not c else (int(c[0:2], 16), int(c[2:4], 16), int(c[4:6], 16)))
screen = screens[gate.id]
return DirectLogicNodeType("AsciiScreen", frozendict({
"write_cursor": InputPin(1, True),
"cursor": InputPin(8, True),
"write_color": InputPin(1, True),
"color": InputPin(8, True),
"write_char": InputPin(1, True),
"char": InputPin(8, True),
}), frozendict(), 1, screen.func)
def stack_func(args, state: frozenbitarray, delayed):
assert len(state[-8:]) == 8
address = ba2int(state[-8:])
if args["load"].any():
address -= 1
address %= 256
ret = frozendict({
"value_out": state[address * 8:address * 8 + 8]
})
else:
ret = frozendict({
"value_out": frozenbitarray((0,) * 8)
})
if args["save"].any():
new_state = state[:address * 8] + args["value_in"] + state[address * 8 + 8:]
address += 1
address %= 256
else:
new_state = state
if delayed:
new_state = new_state[:-8] + frozenbitarray(int2ba(address, 8, endian="little"))
else:
new_state = state
return ret, new_state
def error(*args):
raise ValueError("Can't execute this node")
def byte_constant(_, raw_value):
i_value = 0 if not raw_value else int(raw_value)
value = frozenbitarray(int2ba(i_value, 8, endian="little"), )
res = frozendict({"out": value})
def f(*args):
return res, None
return DirectLogicNodeType(f"Constant{i_value}", frozendict(), frozendict({
"out": OutputPin(8)
}), 0, f)
def buffer(args, *_):
return frozendict({"out": args["in"]}), None
last_key: int = 0
def keyboard(args, _, _1):
if args["enable"].any():
key = last_key
else:
key = 0
return frozendict({"out": frozenbitarray(int2ba(key, 8, "little"))}), None
one = frozenbitarray("1")
zero = frozenbitarray("0")
def mul_func(args, _, _1):
a = ba2int(args["a"], signed=False)
b = ba2int(args["b"], signed=False)
return frozendict({"out": int2ba((a * b) % 256, 8, "little")}), None
def less_func(args, _, _1):
au, as_ = ba2int(args["a"], signed=False), ba2int(args["a"], signed=True)
bu, bs = ba2int(args["b"], signed=False), ba2int(args["b"], signed=True)
return frozendict({
"unsigned": one if au < bu else zero,
"signed": one if as_ < bs else zero
}), None
def build_counter(gate_name, custom_data):
delta = int(custom_data)
return CombinedLogicNode(
f"Counter{delta}", frozendict({
"counter": spec_components["VARCOUNTER_8"],
"delta": byte_constant("", delta)
}), frozendict({
"in": InputPin(8, True),
"overwrite": InputPin(1, True),
}), frozendict({
"out": OutputPin(8)
}), (
Wire((None, "in"), ("counter", "in")),
Wire((None, "overwrite"), ("counter", "save")),
Wire(("delta", "out"), ("counter", "delta")),
Wire(("counter", "out"), (None, "out"))
))
category_colors = {
"IO": SPECIAL_RED,
"special": SPECIAL_RED,
"special_green": SPECIAL_GREEN,
"normal": NORMAL,
}
text_functions = {
"ByteConstant": lambda gate: str(gate.custom_data or 0),
"Program1": lambda gate: gate.name,
"Program2": lambda gate: gate.name,
"Program3": lambda gate: gate.name,
"Program4": lambda gate: gate.name,
}
def build_or(shape: GateShape, data):
is_byte = None
ins = []
out = None
for n, p in shape.pins.items():
if is_byte is None:
is_byte = p.is_byte
else:
assert is_byte == p.is_byte
if p.is_input:
ins.append(n)
else:
assert out is None
out = n
return ln_build_or(*ins, bit_size=[0, 8][is_byte], out_name=out)
def noop(*_):
return frozendict(), None
def load_components():
with Path(__file__).with_name("tc_components.json").open() as f:
data = json.load(f)
components: dict[str, tuple[GateShape, LogicNodeType | Callable[[GateReference], LogicNodeType]]] = {}
node_to_component: dict[str, tuple[str, str]] = {}
for category, raw in data.items():
assert category in category_colors, category
color = category_colors[category]
for name, d in raw.items():
shape = GateShape(
name,
color,
{
str(pn): CircuitPin(pd["pos"], pd["type"] == "input", pd["size"] == "byte", pd.get("is_delayed", False), pn)
for pn, pd in d["pins"].items()
},
d["blocks"],
category == "IO",
text_functions.get(name, lambda gate: str(gate.custom_data or gate.name)),
(BigShape(*d["big_shape"]) if "big_shape" in d else None)
)
if d["type"] == "generate":
node = eval(d["func"])
elif d["type"] in ("direct", "error", "virtual"):
node = DirectLogicNodeType(
name, frozendict({
pn: InputPin((1, 8)[cp.is_byte], cp.is_delayed)
for pn, cp in shape.pins.items() if cp.is_input
}), frozendict({
pn: OutputPin((1, 8)[cp.is_byte])
for pn, cp in shape.pins.items() if not cp.is_input
}), d["state_size"], eval(d.get("func", "error")))
elif d["type"] == "build":
node = eval(d["func"])(shape, d)
elif d["type"] == "builtin":
node = builtins_gates[d["builtin_name"]]
elif d["type"] == "combined":
node = spec_components[d["spec"]]
else:
assert False, d["type"]
components[name] = shape, node
if isinstance(node, LogicNodeType):
assert node.name not in node_to_component, f"Non unique node name {node.name}"
node_to_component[node.name] = name, ""
return components, node_to_component
def compute_gate_shape(circuit, name: str) -> GateShape:
if circuit.shape is not None:
if circuit.shape.name is None:
circuit.shape.name = name
assert name is None or circuit.shape.name == name, (circuit.shape.name, name)
return circuit.shape
def translate(p):
return (int((p[0] + 30) // 8 - 3), int((p[1] + 30) // 8 - 3))
blocks = set()
for gate in circuit.gates:
blocks.add(translate(gate.pos))
pins = {}
for gate in circuit.gates:
if gate.name in std_components and ((io_shape := std_components[gate.name][0]).is_io):
p = translate(gate.pos)
if p in blocks:
blocks.remove(p)
for pin_name, pin in io_shape.pins.items():
if len(io_shape.pins) > 1:
out_pin = f"{gate.id}.{pin_name}"
else:
out_pin = gate.id
if gate.custom_data:
cc_pin_name = gate.custom_data.partition(':')[-1]
else:
cc_pin_name = out_pin
pins[str(out_pin)] = CircuitPin(p, not pin.is_input, pin.is_byte, name=cc_pin_name)
circuit.shape = GateShape(name, CUSTOM, pins, list(blocks), text=lambda _: name)
return circuit.shape
@dataclass
class _CustomComponentRef:
path: Path
circuit: Circuit
shape: GateShape = None
node: LogicNodeType = None
def get(self, no_node: bool = False):
if self.shape is None:
self.shape = compute_gate_shape(self.circuit, f"Custom_{self.path.name}")
if self.node is None and not no_node:
from .circuit_compiler import build_gate
self.node = build_gate(f"Custom_{self.path.name}", self.circuit)
if self.node.name in rev_components:
raise ValueError(f"Non unique node name {self.node.name} (for Custom component {self.path.name})")
rev_components[self.node.name] = ("Custom", self.id)
return self.shape, self.node
@property
def id(self) -> int:
return self.circuit.save_version
@property
def name(self):
return self.path.name
def load_custom():
global cc_by_path, cc_by_id
base = SCHEMATICS_PATH / "component_factory"
for path in Path(base).rglob("circuit.data"):
try:
circuit = Circuit.parse(path.read_bytes())
# Don't compile immediately. Wait if we are asked
ref = _CustomComponentRef(path.relative_to(base).parent, circuit)
cc_by_id[circuit.save_version] = ref
cc_by_path[str(path.relative_to(base).parent)] = ref
except Exception as e:
print(type(e), e)
std_components: dict[str, tuple[GateShape, LogicNodeType]]
rev_components: dict[str, tuple[str, str | int]]
std_components, rev_components = load_components()
cc_by_path: dict[str, _CustomComponentRef] = {}
cc_by_id: dict[int, _CustomComponentRef] = {}
if SCHEMATICS_PATH is not None:
load_custom()
def get_custom_component(custom_data: str | int, no_node: bool = False):
try:
ref = cc_by_id[int(custom_data)]
except (ValueError, KeyError):
ref = cc_by_path[custom_data]
ref.get(no_node)
return ref
def get_component(gate_name: str | GateReference, custom_data: str | int = None, no_node: bool = False) \
-> tuple[GateShape, LogicNodeType | None]:
if isinstance(gate_name, GateReference):
custom_data = gate_name.custom_id if gate_name.name == "Custom" else gate_name.custom_data
gate_name = gate_name.name
if gate_name == "Custom":
return get_custom_component(custom_data, no_node).get(no_node)
s, n = std_components[gate_name]
if callable(n):
if not no_node:
n = n(gate_name, custom_data)
if n.name in rev_components:
prev = rev_components[n.name]
if prev[0] != gate_name:
assert False, f"Non unique name {n.name} for {(gate_name, custom_data)} (previous: {prev})"
if prev[1] != custom_data and {prev[1], custom_data} != {"", "0"}:
assert False, f"Non unique name {n.name} for {(gate_name, custom_data)} (previous: {prev})"
else:
rev_components[n.name] = gate_name, custom_data
else:
n = None
return s, n
else:
return s, n
|
import pygit2
import tempfile
import shutil
from os import path
def git_clone(repo_url, branch='master'):
'''
clones repo to a /tmp/ dir
'''
if repo_url == '':
return False
_tmp_dir = tempfile.mkdtemp(prefix='armada', dir='/tmp')
pygit2.clone_repository(repo_url, _tmp_dir, checkout_branch=branch)
return _tmp_dir
def source_cleanup(target_dir):
'''
Clean up source
'''
if path.exists(target_dir):
shutil.rmtree(target_dir)
|
from django.conf import settings
from django.db.models import signals
from django.apps import apps
from django.db import DEFAULT_DB_ALIAS
from .models import Heartbeat
def post_migrate_receiver(app_config, verbosity=2, interactive=False, using=DEFAULT_DB_ALIAS, **kwargs):
"""
Finalize the website loading.
"""
Heartbeat.objects.get_or_create(id=1)
|
# coding=utf-8
"""
invite.py - Invite all the things
Copyright 2016 Max Gurela
Licensed under the Eiffel Forum License 2.
"""
from __future__ import unicode_literals, absolute_import, print_function, division
from sopel.config.types import StaticSection, ValidatedAttribute
from sopel.logger import get_logger
from sopel.module import (commands, event, rule, priority, interval,
require_privilege, require_chanmsg, require_privmsg, require_admin, OP)
from sopel.tools import events, SopelMemory
from threading import Timer
LOGGER = get_logger(__name__)
class InviteSection(StaticSection):
minimum_users = ValidatedAttribute('minimum_users', parse=int, default=2)
delay = ValidatedAttribute('delay', parse=float, default=1)
def configure(config):
config.define_section('invite', InviteSection, validate=False)
config.invite.configure_setting(
'minimum_users',
'Enter the minimum number of users required for Sopel to stay in the channel'
)
config.invite.configure_setting(
'delay',
'Enter the number of minutes Sopel should stay in a channel after it falls below the minimum user population'
)
def setup(bot):
bot.config.define_section('invite', InviteSection)
if not bot.memory.contains('departure_scheduler'):
bot.memory['departure_scheduler'] = SopelMemory()
# Module has been hot-loaded, join now.
if bot.connection_registered:
join_known(bot)
def join_known(bot):
"""
Auto-join invited channels
"""
try:
cursor = bot.db.execute('SELECT DISTINCT channel, value FROM channel_values WHERE key="autojoin";')
except:
return
channels_joined = 0
for row in cursor.fetchall():
try:
channel = str(row[0])
autojoin = str(row[1]).lower() == 'true'
if autojoin and channel not in bot.channels.keys():
LOGGER.info('Auto-joining {}'.format(channel))
# If we aren't yet authenticated with NickServ, coretasks handles re-attempting join
if bot.config.core.throttle_join:
throttle_rate = int(bot.config.core.throttle_join)
channels_joined += 1
if not channels_joined % throttle_rate:
time.sleep(1)
bot.join(channel)
except:
pass
@event(events.RPL_WELCOME, events.RPL_LUSERCLIENT)
@rule('.*')
@priority('low')
def agressive_join(bot, trigger):
join_known(bot)
@interval(60)
def check_empty_chan(bot):
for channel in bot.channels.values():
if channel.name in bot.config.core.channels:
# Don't ever leave force-joined channels
continue
if channel.name in bot.memory['departure_scheduler'].keys():
# Already messaged channel and started timer
continue
user_count = len(channel.users)
if user_count < bot.config.invite.minimum_users:
LOGGER.info('Scheduling {} for departure, below minimum user count ({}<{})'
.format(channel.name, user_count, bot.config.invite.minimum_users))
if (bot.config.invite.delay > 0):
bot.say('{} is below my minimum user population, scheduling departure for {} minute(s) from now.'
.format(channel.name, bot.config.invite.delay, channel.name), channel.name)
else:
bot.say('{} is below my minimum user population, departing now.'.format(channel.name), channel.name)
timer = Timer(bot.config.invite.delay * 60, depart_channel, (bot, channel.name))
timer.daemon = True
timer.start()
bot.memory['departure_scheduler'][channel.name] = timer
def depart_channel(bot, name):
channel = bot.channels[name]
if len(channel.users) >= bot.config.invite.minimum_users:
LOGGER.info('Departure from {} cancelled, population has reached the minimum user count.'.format(name))
bot.say('Cancelling departure, population has reached acceptable minimum.', name)
return
LOGGER.info('Departing from {}, population did not reach minimum in delay period.'.format(name))
bot.part(name, 'Goodbye {}!'.format(name))
bot.db.set_channel_value(name, 'autojoin', False)
del bot.memory['departure_scheduler'][name]
@event('INVITE')
@rule('.*')
@priority('low')
def invite_join_chan(bot, trigger):
"""
Join a channel Sopel is invited to, allows anyone to have the bot in their chan.
"""
if trigger.args[1].lower() in [chan.lower() for chan in bot.channels]:
return
bot.db.set_channel_value(trigger.args[1], 'autojoin', True)
bot.join(trigger.args[1])
bot.msg(trigger.args[1], 'Hi {}! I was invited by {}. If you need assistance, please use \'.help\'. I may respond to other '
'bots in specific circumstances, though I will prevent myself from repeating the same message 6 '
'times in a row.'.format(trigger.args[1], trigger.nick))
bot.msg(trigger.args[1], 'If my presence is unwanted, simply have a chanop say \'.part\' and I will gladly leave you alone.')
@commands('part')
@require_privilege(OP, 'You are not a channel operator.')
@require_chanmsg
@priority('low')
def part_chanop(bot, trigger):
bot.part(trigger.sender, 'Part requested by {}'.format(trigger.nick))
bot.db.set_channel_value(trigger.sender, 'autojoin', False)
@commands('channels')
@require_privmsg
@require_admin
@priority('low')
def channel_list(bot, trigger):
bot.say('My connected channels ({}): {}'.format(len(bot.channels), ', '.join(bot.channels)), max_messages=3)
|
from pyfiglet import figlet_format
def print_art():
input_data = input()
result = figlet_format(input_data)
print(result)
print_art()
|
# This file is run by the RasPi of the IoT to allow it to communicate with the
# main server through MQTT and with the actual device through an Arduino
#
#Communicating with the TV:
# topic - "TV"
#
# There are 2 possible actions with the TV
# To turn on the TV:
# payload - "on_tv"
# To turn off the TV:
# payload - "off_tv"
import paho.mqtt.client as mqtt
import sys
import serial
ADDR_OF_ARDUINO = '/dev/ttyACM0'
BAUD_RATE = 9600
ADDR_TO_CONN = "192.168.43.86" #connect to Broker
PORT_TO_CONN = 1883
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
if rc == 0:
pass
else:
sys.exit("Connction refused.") #Broker refused to connect. Exit with error msg.
# The RasPi forwards messages from the publisher to the Arduino
def on_message(client, userdata, msg):
instruction = msg.payload
print(instruction)
topic = msg.topic
if topic == "TV":
ser.write(instruction)
ser = serial.Serial(ADDR_OF_ARDUINO, BAUD_RATE)
client = mqtt.Client(client_id="TV", clean_session=True, protocol=mqtt.MQTTv311)
client.on_connect = on_connect
client.on_message = on_message
client.connect(ADDR_TO_CONN, port=PORT_TO_CONN) #client connects
client.subscribe("TV") #subscribe to topic "TV"
client.loop_forever()
|
# Each array includes alternate names for each system.
# You can add your own systems and/or alternate names.
# If the first value is "Advanced", then it will only show up when "Show Advanced Systems" is enabled.
# Capitalization doesn't matter, and each names is already checked with and without " - " where applicable (for example, "Nokia - N-Gage" covers both "Nokia - N-Gage" and "Nokia N-Gage")
systemNamesDict = {
"Acorn - Archimedes" : ("Advanced", ["Archimedes"]),
"Acorn - Atom" : ("Advanced", ["Atom"]),
"Acorn - BBC Micro" : ("Advanced", ["bbcmicro", "BBC Micro"]),
"ACT - Apricot PC Xi" : ("Advanced", ["Apricot PC Xi"]),
"APF - Imagination Machine" : ("Advanced", ["Imagination Machine"]),
"APF - MP-1000" : ("Advanced", ["MP-1000"]),
"Apple - I" : ("Advanced", ["apple1"]),
"Apple - II" : ("Advanced", ["apple2"]),
"Apple - II Plus" : ("Advanced", ["apple2plus"]),
"Apple - IIe" : ("Advanced", ["apple2e"]),
"Apple - IIGS" : ("Advanced", ["apple2gs"]),
"Apple - Macintosh" : ("Advanced", ["Macintosh"]),
"Atari - 2600" : (" ", ["atari2600", "Atari - Atari 2600", "a2600"]),
"Atari - 5200" : (" ", ["atari5200", "Atari - Atari 5200"]),
"Atari - 7800" : (" ", ["atari7800"]),
"Atari - 8-bit Family" : ("Advanced", ["atari800", "Atari - 800"]),
"Atari - Jaguar" : (" ", ["Jaguar"]),
"Atari - Lynx" : (" ", ["lynx"]),
"Atari - ST" : ("Advanced", ["atarist", "Atari - Atari ST"]),
"Atari - ST (Tapes)" : ("Advanced", ["Atari - Atari ST (Tapes)"]),
"Bally - Astrocade" : ("Advanced", ["Astrocade"]),
"Bally - Astrocade (Tapes)" : ("Advanced", ["Astrocade (Tapes)"]),
"Bandai - Design Master Denshi Mangajuku" : ("Advanced", ["Design Master Denshi Mangajuku"]),
"Bandai - Gundam RX-78" : ("Advanced", ["Gundam RX-78"]),
"Bandai - WonderSwan" : (" ", ["wswan", "WonderSwan"]),
"Bandai - WonderSwan Color" : (" ", ["wswanc", "WonderSwan Color"]),
"Benesse - Pocket Challenge V2" : ("Advanced", ["Pocket Challenge V2"]),
"Benesse - Pocket Challenge W" : ("Advanced", ["Pocket Challenge W"]),
"Bit Corporation - Gamate" : ("Advanced", ["Gamate"]),
"Casio - Loopy" : ("Advanced", ["Loopy"]),
"Casio - PV-1000" : ("Advanced", ["PV-1000"]),
"Coleco - ColecoVision" : (" ", ["colecovision"]),
"Commodore - Amiga" : (" ", ["Amiga"]),
"Commodore - Commodore 64" : (" ", ["c64", "Commodore 64"]),
"Commodore - Commodore 64 (PP)" : ("Advanced", ["Commodore 64 (PP)"]),
"Commodore - Commodore 64 (Tapes)" : ("Advanced", ["Commodore 64 (Tapes)"]),
"Commodore - Plus-4" : ("Advanced", []),
"Commodore - VIC-20" : ("Advanced", ["vic20", "VIC-20", "Commodore - VIC20"]),
"Emerson - Arcadia 2001" : ("Advanced", ["Arcadia 2001"]),
"Entex - Adventure Vision" : ("Advanced", ["Adventure Vision"]),
"Epoch - Game Pocket Computer" : ("Advanced", ["Game Pocket Computer"]),
"Epoch - Super Cassette Vision" : ("Advanced", ["Super Cassette Vision"]),
"Fairchild - Channel F" : ("Advanced", ["channelf", "Channel F"]),
"Fukutake Publishing - StudyBox" : ("Advanced", ["StudyBox"]),
"Funtech - Super Acan" : ("Advanced", ["Super Acan"]),
"GamePark - GP2X" : ("Advanced", ["GP2X"]),
"GamePark - GP2X (Digital)" : ("Advanced", ["GP2X (Digital)"]),
"GamePark - GP32" : ("Advanced", ["GP32"]),
"GCE - Vectrex" : (" ", ["vectrex"]),
"Google - Android (Misc)" : ("Advanced", ["Android (Misc)"]),
"Hartung - Game Master" : ("Advanced", ["Game Master"]),
"IBM - PC and Compatibles" : ("Advanced", []),
"IBM - PC and Compatibles (Digital) (Desura)" : ("Advanced", []),
"IBM - PC and Compatibles (Digital) (Misc)" : ("Advanced", []),
"IBM - PC and Compatibles (Digital) (Misc) (Adult)" : ("Advanced", []),
"IBM - PC and Compatibles (Digital) (Unknown)" : ("Advanced", []),
"Interton - VC 4000" : ("Advanced", ["VC 4000"]),
"iQue - iQue" : ("Advanced", ["iQue"]),
"Konami - Picno" : ("Advanced", ["Picno"]),
"LeapFrog - LeapPad" : ("Advanced", ["LeapPad"]),
"LeapFrog - Leapster Learning Game System" : ("Advanced", ["Leapster Learning Game System"]),
"LeapFrog - My First LeapPad" : ("Advanced", ["My First LeapPad"]),
"Magnavox - Odyssey2" : ("Advanced", ["Odyssey2", "Magnavox - Odyssey 2"]),
"Mattel - Intellivision" : (" ", ["intellivision"]),
"Microsoft - MSX" : (" ", ["msx1", "MSX"]),
"Microsoft - MSX2" : (" ", ["msx2"]),
"Microsoft - XBOX 360 (Digital)" : ("Advanced", ["XBOX 360 (Digital)"]),
"Mobile - J2ME" : ("Advanced", ["J2ME"]),
"Mobile - Palm OS" : ("Advanced", ["palm", "Palm OS"]),
"Mobile - Symbian" : ("Advanced", ["Symbian"]),
"NEC - PC Engine - TurboGrafx 16" : (" ", ["pcengine", "tg16", "PC Engine - TurboGrafx 16", "NEC - PC Engine", "PC Engine", "NEC - TurboGrafx 16", "TurboGrafx 16", "NEC - TurboGrafx16", "TurboGrafx16"]),
"NEC - PC Engine SuperGrafx" : ("Advanced", ["supergrafx", "PC Engine SuperGrafx"]),
"Nichibutsu - My Vision" : ("Advanced", ["My Vision"]),
"Nintendo - amiibo" : ("Advanced", ["amiibo"]),
"Nintendo - e-Reader" : ("Advanced", ["e-Reader"]),
"Nintendo - Family Computer Disk System" : (" ", ["fds", "Family Computer Disk System", "Nintendo - Famicom Disk System", "Famicom Disk System"]),
"Nintendo - Family Computer Network System" : ("Advanced", ["Family Computer Network System"]),
"Nintendo - Game & Watch" : (" ", ["gw", "Nintendo - Game & Watch", "Game & Watch", "Game & Watch"]),
"Nintendo - Game Boy" : (" ", ["gb", "Game Boy", "Gameboy"]),
"Nintendo - Game Boy Advance" : (" ", ["gba", "Game Boy Advance", "Gameboy Advance", "Nintendo - Gameboy Advance"]),
"Nintendo - Game Boy Advance (Multiboot)" : (" ", ["Game Boy Advance (Multiboot)"]),
"Nintendo - Game Boy Color" : (" ", ["gbc", "Game Boy Color", "Gameboy Color", "Nintendo - Gameboy Color"]),
"Nintendo - Kiosk Video Compact Flash" : ("Advanced", ["Kiosk Video Compact Flash"]),
"Nintendo - Mario no Photopi SmartMedia" : ("Advanced", ["Mario no Photopi SmartMedia"]),
"Nintendo - Misc" : ("Advanced", []),
"Nintendo - New Nintendo 3DS" : ("Advanced", ["New Nintendo 3DS", "New 3DS"]),
"Nintendo - New Nintendo 3DS (Digital)" : ("Advanced", ["New Nintendo 3DS (Digital)"]),
"Nintendo - Nintendo 3DS" : ("Advanced", ["Nintendo 3DS", "3DS"]),
"Nintendo - Nintendo 3DS (Digital)" : ("Advanced", ["Nintendo 3DS (Digital)", "3DS (Digital)"]),
"Nintendo - Nintendo 3DS (Digital) (CDN)" : ("Advanced", ["Nintendo 3DS (Digital) (CDN", "3DS (Digital) (CDN"]),
"Nintendo - Nintendo 64" : (" ", ["n64", "Nintendo 64"]),
"Nintendo - Nintendo 64DD" : (" ", ["64dd", "Nintendo 64DD"]),
"Nintendo - Nintendo DS" : (" ", ["ds", "nds", "Nintendo DS"]),
"Nintendo - Nintendo DS (Download Play)" : (" ", ["Nintendo DS (Download Play)"]),
"Nintendo - Nintendo DSi" : ("Advanced", ["Nintendo DSi", "DSi"]),
"Nintendo - Nintendo DSi (Digital)" : ("Advanced", ["Nintendo DSi (Digital)"]),
"Nintendo - Nintendo DSi (Digital) (CDN)" : ("Advanced", ["Nintendo DSi (Digital) (CDN)"]),
"Nintendo - Nintendo Entertainment System" : (" ", ["nes", "Nintendo Entertainment System", "Nintendo - Famicom", "Famicom"]),
"Nintendo - Play-Yan" : ("Advanced", ["Play-Yan"]),
"Nintendo - Pokemon Mini" : (" ", ["pokemini", "Pokemon Mini"]),
"Nintendo - Satellaview" : (" ", ["satellaview"]),
"Nintendo - Sufami Turbo" : ("Advanced", ["sufami", "Sufami Turbo"]),
"Nintendo - Super Nintendo Entertainment System" : (" ", ["snes", "Super Nintendo Entertainment System", "Super Nintendo", "Nintendo - Super Famicom", "Super Famicom"]),
"Nintendo - Virtual Boy" : (" ", ["virtualboy", "Virtual Boy"]),
"Nintendo - Wii (Digital) (CDN)" : ("Advanced", ["Wii (Digital) (CDN)"]),
"Nintendo - Wii U (Digital) (CDN)" : ("Advanced", ["Wii U (Digital) (CDN)"]),
"Nokia - N-Gage" : ("Advanced", ["N-Gage"]),
"Nokia - N-Gage 2.0" : ("Advanced", ["N-Gage 2.0"]),
"Ouya - Ouya" : ("Advanced", ["Ouya"]),
"Philips - Videopac+" : ("Advanced", ["videopacplus", "Videopac+"]),
"RCA - Studio II" : ("Advanced", ["Studio II"]),
"Sega - 32X" : (" ", ["sega32x", "32X"]),
"Sega - Beena" : ("Advanced", ["Beena"]),
"Sega - Game Gear" : (" ", ["gamegear", "Game Gear"]),
"Sega - Master System - Mark III" : (" ", ["mastersystem", "Master System - Mark III", "Sega - Master System", "Master System", "SMS"]),
"Sega - Mega Drive - Genesis" : (" ", ["megadrive", "Sega Genesis", "Genesis", "Sega Mega Drive", "Mega Drive"]),
"Sega - PICO" : ("Advanced", ["PICO"]),
"Sega - SG-1000" : (" ", ["sg1000", "SG-1000"]),
"Seta - Aleck64" : ("Advanced", ["Aleck64"]),
"Sinclair - ZX Spectrum +3" : (" ", ["zxspectrum", "ZX Spectrum +3", "ZX Spectrum"]),
"SNK - Neo Geo Pocket" : (" ", ["ngp", "Neo Geo Pocket"]),
"SNK - Neo Geo Pocket Color" : (" ", ["ngpc", "Neo Geo Pocket Color"]),
"Sony - PlayStation (PS one Classics) (PSN)" : ("Advanced", ["PlayStation (PS one Classics) (PSN)"]),
"Sony - PlayStation 3 (PSN) (Content)" : ("Advanced", ["PlayStation 3 (PSN) (Content)"]),
"Sony - PlayStation Mobile (PSN)" : ("Advanced", ["PlayStation Mobile (PSN)"]),
"Sony - PlayStation Portable (PSN) (Encrypted)" : ("Advanced", ["PlayStation Portable (PSN) (Encrypted)"]),
"Sony - PlayStation Vita (PSN) (Content)" : ("Advanced", ["PlayStation Vita (PSN) (Content)"]),
"Tiger - Game.com" : ("Advanced", ["Game.com"]),
"Toshiba - Pasopia" : ("Advanced", ["Pasopia"]),
"Toshiba - Visicom" : ("Advanced", ["Visicom"]),
"VTech - CreatiVision" : ("Advanced", ["CreatiVision"]),
"VTech - Mobigo" : ("Advanced", ["Mobigo"]),
"VTech - V.Smile" : ("Advanced", ["V.Smile"]),
"Watara - Supervision" : ("Advanced", ["supervision"]),
"Yamaha - Copera" : ("Advanced", ["Copera"]),
"Zeebo - Zeebo" : ("Advanced", ["Zeebo"]),
}
|
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
from joblib import load
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from io import BytesIO
import base64
import numpy
from numpy import arange
import numpy as np
# Imports from this application
from app import app
from joblib import load
pipeline = load('assets/pipeline.joblib')
df = pd.read_csv('assets/top_asteroids.csv') #this will be exported ten asteroids with proper columns
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown("""
### Instructions:
This predictor will use the information associated with a particular asteroid selected from the drop down menu on the right. However, in an effort to make the app more interactive to the user, one can edit the following parameters: 'Number of Observations Used', 'Albedo', and 'Orbit Classification'. This will override the values for the given asteroid and generate a new prediction, but be careful this could totally throw off the accuracy of the prediction!! After selecting your inputs and asteroid, you will see the predicted diameter value versus the actual diameter of the asteroid in question with the given parameters. These will be accompanied by a shapely plot to give a rundown on why the diameter was predicted to be that specific size. Hope you enjoy and have fun!
"""
),
dcc.Markdown('## User Inputs', className='mb-5'),
dcc.Markdown('#### Number of Observations Used'),
dcc.Slider(
id='n_obs_used',
min=100,
max=1500,
step=100,
value=605, #when someone selects asteroid what will happen with slider update?
marks={n: str(n) for n in range(100,1500,100)},
className='mb-5',
),
dcc.Markdown('#### Albedo (Proportion of the Light Reflected off Asteroid)'),
dcc.Slider(
id='albedo',
min=0,
max=1,
step=.10,
value=.10,
marks={n: str(n) for n in np.linspace(0,1,num=9)},
className='mb-5',
),
dcc.Markdown('''
### Orbit Classification
For more info on orbit classifications use this link:
[Click Here](https://pdssbn.astro.umd.edu/data_other/objclass.shtml)
'''),
dcc.Dropdown(
id='classes',
options = [
{'label': 'Main Belt Asteroid', 'value': 'MBA'}, #mba write out for label
{'label': 'Inner Main-Belt Asteroid', 'value': 'IMB'},
{'label': 'Mars-Crossing Asteroid', 'value': 'MCA'},
{'label': 'Apollo', 'value': 'APO'},
{'label': 'Aten', 'value': 'ATE'},
{'label': 'Outer Main-Belt Asteroid', 'value': 'OMB'},
{'label': 'Amor', 'value': 'AMO'},
{'label': 'Jupiter Trojan', 'value': 'TJN'},
{'label': 'Centaur', 'value': 'CEN'},
{'label': 'Asteroid (no matches)', 'value': 'AST'},
{'label': 'TransNeptunian Object', 'value': 'TNO'},
],
value = 'MBA',
className='mb-5',
),
],
md=6,
)
#select asteroid here in a dropdown !!!!!!!!!!
column2 = dbc.Col(
[
dcc.Markdown('#### Asteroids'),
dcc.Dropdown(
id='asteroid',
options = [
{'label': 'Agathe', 'value': ' 228 Agathe'},
{'label': 'Bruna', 'value': ' 290 Bruna'},
{'label': 'Phaetusa', 'value': ' 296 Phaetusa'},
{'label': 'Constantia', 'value': ' 315 Constantia'},
{'label': 'Adalberta', 'value': ' 330 Adalberta (A910 CB)'},
{'label': 'Hungaria', 'value': ' 434 Hungaria (1898 DR)'},
{'label': 'Adelaide', 'value': ' 525 Adelaide (1908 EKa)'},
{'label': 'Kundry', 'value': ' 553 Kundry (1904 PP)'},
{'label': 'Reginhild', 'value': ' 574 Reginhild (1905 RD)'},
{'label': 'Mireille', 'value': ' 594 Mireille (1906 TW)'},
{'label': 'Agnes', 'value': ' 641 Agnes (1907 ZX)'},
{'label': 'Kastalia', 'value': ' 646 Kastalia (1907 AC)'},
{'label': 'Adelgunde', 'value': ' 647 Adelgunde (1907 AD)'},
{'label': 'Josefa', 'value': ' 649 Josefa (1907 AF)'},
{'label': 'Noemi', 'value': ' 703 Noemi (1910 KT)'},
],
value = 'Kastalia',
className='mb-5',
),
#html.Img(src='assets/galaxy.jpg', style={'width':'100%'}),
],
)
#### Want to have shapely plot here
column3 = dbc.Col(
[
html.H2('Predicted Diameter:', className='mb-5', style={'display':'inline'}), #need ###style="display:inline;"
html.Span(id='prediction-content', className='lead', style={'font-size':'36px'}), #need ###style="font-size:36px;"
#html.Button('Explain Prediction', id='explain-btn'),
#html.Div([html.Img(id='shap-img', height=200, width=1000)])
],
)
layout = html.Div(
[
dbc.Row([column1, column2]),
dbc.Row(column3)
],
)
####BRUNO NOTES#####
# def fig_to_uri(in_fig, close_all=True, **save_args):
# """
# Save a figure as a URI
# :param in_fig:
# :return:
# """
# out_img = BytesIO()
# in_fig.savefig(out_img, format='png', **save_args)
# if close_all:
# in_fig.clf()
# plt.close('all')
# out_img.seek(0) # rewind file
# encoded = base64.b64encode(out_img.read()).decode("ascii").replace("\n", "")
# return "data:image/png;base64,{}".format(encoded)
@app.callback(
Output('prediction-content', 'children'),
[Input('n_obs_used', 'value'),
Input('albedo', 'value'),
Input('classes', 'value'),
Input('asteroid', 'value')],
)
def predict(n_obs_used, albedo, classes, asteroids):
#conver input to dataframe
pred_df = df[df.full_name == asteroids]
pred_df = pred_df[['orbit_id', 'e', 'a', 'i', 'om', 'w', 'ma', 'n', 'tp', 'moid',
'moid_jup', 'classes', 'producer', 'data_arc', 'n_obs_used', 'rms',
'albedo', 'diameter_sigma', 'first_year_obs', 'first_month_obs',
'last_obs_year', 'last_obs_month']]
if pred_df.n_obs_used.values[0] != 605:
pred_df.at[int(pred_df.index.values[0]), 'n_obs_used'] = n_obs_used
if pred_df.albedo.values[0] != .10:
pred_df.at[int(pred_df.index.values[0]), 'albedo'] = albedo
if pred_df.classes.values[0] != 'MBA':
pred_df.at[int(pred_df.index.values[0]), 'classes'] = classes
y_pred = pipeline.predict(pred_df)[0]
return (f'{y_pred:.4f} km')
# SHAP INPUT UNCOMMENT ONCE DONE GETTING RUN
# # Get steps from pipeline and transform
#model = pipeline.named_steps['xgbclassifier']
# encoder = pipeline.named_steps['ordinalencoder']
# df_processed = encoder.transform(df)
# # Get shapley additive explanations
# explainer = shap.TreeExplainer(model)
# shap_values = explainer.shap_values(df_processed)
# # Plot shapley and save matplotlib plot to base64 encoded buffer for rendering
# fig = shap.force_plot(
# base_value=explainer.expected_value,
# shap_values=shap_values,
# features=df_processed,
# show=False,
# matplotlib=True)
# out_url = fig_to_uri(fig)
# # Return image
# return out_url
|
from structure import SofaType
from readers import date_reader, datetime_reader
from writers import boolean_writer, date_writer, datetime_writer
from validators import (
NumericIdValidator,
StringIdValidator,
BooleanValidator,
IntegerValidator,
FloatValidator,
StringValidator,
DateValidator,
DatetimeValidator,
EmailValidator,
ZipCodeValidator,
)
class NumericId(SofaType):
"""
Represents integer-based resource IDs
"""
def __init__(self):
self.validator = NumericIdValidator()
def __repr__(self):
return "<NumericId()>"
class StringId(SofaType):
"""
Represents string-based resource IDs
"""
def __init__(self, id_length):
self.id_length = id_length
self.validator = StringIdValidator(id_length=id_length)
def __repr__(self):
return "<StringId(id_length=%r)>" % self.id_length
class Boolean(SofaType):
def __init__(self, nullable=False):
self.nullable = nullable
self.validator = BooleanValidator(nullable=nullable)
self.writer = boolean_writer
def __repr__(self):
return "<Boolean()>"
class Integer(SofaType):
def __init__(self, min=None, max=None, unique=False, nullable=False):
self.min = min
self.max = max
self.unique = unique
self.nullable = nullable
self.validator = IntegerValidator(min=min, max=max, unique=unique, nullable=nullable)
self.writer = lambda value: int(value)
def __repr__(self):
return "<Integer(min=%r, max=%r)>" % (self.min, self.max)
class Float(SofaType):
def __init__(self, min=None, max=None, unique=False, nullable=False):
self.min = min
self.max = max
self.unique = unique
self.nullable = nullable
self.validator = FloatValidator(min=min, max=max, unique=unique, nullable=nullable)
self.writer = lambda value: float(value)
def __repr__(self):
return "<Float(min=%r, max=%r)>" % (self.min, self.max)
class String(SofaType):
def __init__(self, min_len=None, max_len=None, allow_digits=True, allow_special_chars=True, valid_values=None, unique=False, nullable=False):
self.min_len = min_len
self.max_len = max_len
self.allow_digits = allow_digits
self.allow_special_chars = allow_special_chars
self.valid_values = valid_values
self.unique = unique
self.nullable = nullable
self.validator = StringValidator(min_len=min_len, max_len=max_len,
allow_digits=allow_digits,
allow_special_chars=allow_special_chars,
valid_values=valid_values,
unique=unique,
nullable=nullable)
def __repr__(self):
return "<String()>"
class Date(SofaType):
def __init__(self, require_future=False, require_past=False, nullable=False):
self.require_future = require_future
self.require_past = require_past
self.nullable = nullable
self.validator = DateValidator(require_future=require_future, require_past=require_past, nullable=nullable)
self.reader = date_reader
self.writer = date_writer
def __repr__(self):
return "<Date(require_future=%r, require_past=%r)>" % (self.require_future, self.require_past)
class Datetime(SofaType):
def __init__(self, require_future=False, require_past=False, nullable=False):
self.require_future = require_future
self.require_past = require_past
self.validator = DatetimeValidator(require_future=require_future, require_past=require_past, nullable=nullable)
self.reader = datetime_reader
self.writer = datetime_writer
def __repr__(self):
return "<Datetime(require_future=%r, require_past=%r)>" % (self.require_future, self.require_past)
class Email(SofaType):
def __init__(self, unique=False, nullable=False):
self.unique = unique
self.nullable = nullable
self.validator = EmailValidator(unique=unique, nullable=nullable)
def __repr__(self):
return "<Email()>"
class ZipCode(SofaType):
def __init__(self, unique=False, nullable=False):
self.unique = unique
self.nullable = nullable
self.validator = ZipCodeValidator(unique=unique, nullable=nullable)
def __repr__(self):
return "<ZipCode()>"
|
# Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved.
import pkg_resources
from .core import AMQPChannelLayer # noqa
__version__ = pkg_resources.require('asgi_amqp')[0].version
|
# Generated by Django 3.1 on 2020-10-07 00:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0016_job_auth_token'),
]
operations = [
migrations.AlterField(
model_name='report',
name='logs',
field=models.CharField(max_length=16384),
),
]
|
from scsr.models.elements import ElementAO, ElementReferenceAO
def create_element(enName,ptName, refs):
el=ElementAO()
el.add_name('en',enName)
el.add_name('pt',ptName)
el.save()# TODO: Create the ElementReference representation and code the reference adding here.
er=ElementReferenceAO.get_reference(el)
er.set_reference(refs)
er.save()
def start_multiple():
create_element("Dynamic Difficulty Adjustment","Ajuste Dinâmico de Dificuldade",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Dynamic_Difficulty_Adjustment","http://virt10.itu.chalmers.se/index.php/Dynamic_Difficulty_Adjustment"]})
create_element("Lives","Vidas",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Lives","http://virt10.itu.chalmers.se/index.php/Lives"]})
create_element("Multiplayer","Multijogadores",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Multiplayer","http://virt10.itu.chalmers.se/index.php/Multiplayer_Games"]})
create_element("Randomness","Aleatoriedade",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Randomness","http://virt10.itu.chalmers.se/index.php/Randomness"]})
create_element("Wave","Onda",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Wave", "http://virt10.itu.chalmers.se/index.php/Waves"]})
create_element("Level","Nivel",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Level","http://virt10.itu.chalmers.se/index.php/Levels"]})
create_element("Spawn Point","Ponto de Início",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Spawnpoint","http://virt10.itu.chalmers.se/index.php/Spawn_Points"]})
create_element("To Shoot","Atirar",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/To_Shoot","http://virt10.itu.chalmers.se/index.php/Aim_%26_Shoot"]})
create_element("To Evade","Desviar",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/To_Evade","http://virt10.itu.chalmers.se/index.php/Evade"]})
create_element("To Traverse","Cruzar",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/To_Traverse","http://virt10.itu.chalmers.se/index.php/Traverse"]})
create_element("To Visit","Visitar",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/To_Visit","http://virt10.itu.chalmers.se/index.php/Visits"], "ambiguity":True})
create_element("To Capture","Capturar",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/To_Capture","http://virt10.itu.chalmers.se/index.php/Capture"]})
create_element("Optional Goals","Objetivos Opcionais",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Optional_Goals","http://virt10.itu.chalmers.se/index.php/Optional_Goals"]})
create_element("Checkpoint","Checkpoint",
{"type":"multi", "source":"link", "ref":["http://www.gameontology.com/index.php/Spatial_Checkpoint","http://virt10.itu.chalmers.se/index.php/Check_Points"]})
create_element("Head Up Display (HUD)","Interface HUD",
{"type":"multi","source":"link", "ref":["http://www.gameontology.com/index.php/Head_Up_Display","http://virt10.itu.chalmers.se/index.php/HUD_Interfaces"]})
|
import redis
import boto3
import hashlib
import os
import base64
from pymongo import MongoClient
connection = MongoClient("localhost:32769")
id = base64.urlsafe_b64encode(os.urandom(9)).decode('utf-8')
ids = id[:-3]
re = redis.Redis(host='localhost', port=32770, db=0)
re.set('name' + '_' + ids, id)
print(re.get('name'))
print(id)
print(ids)
# id = base64.urlsafe_b64encode(os.urandom(9)).decode('utf-8')
#
# r = redis.StrictRedis(host='', port=6379, db=0)
# r.set('mullvad', '3844909252002295')
# out = r.get('mullvad')
# print out
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2015-2017 Wind River Systems, Inc.
#
from oslo_serialization import jsonutils
from nova.api.openstack import wsgi as os_wsgi
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
NW_CACHE = [
{
'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'vif_model': 'virtio',
'network': {
'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [
{
'cidr': '192.168.1.0/24',
'ips': [
{
'address': '192.168.1.100',
'type': 'fixed',
'floating_ips': [
{'address': '5.0.0.1', 'type': 'floating'},
],
},
],
},
]
}
},
{
'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'vif_model': None,
'network': {
'bridge': 'br1',
'id': 2,
'label': 'public',
'subnets': [
{
'cidr': '10.0.0.0/24',
'ips': [
{
'address': '10.0.0.100',
'type': 'fixed',
'floating_ips': [
{'address': '5.0.0.2', 'type': 'floating'},
],
}
],
},
]
}
}
]
ALL_NICS = []
for index, cache in enumerate(NW_CACHE):
name = 'nic' + str(index + 1)
nic = {name: {'port_id': cache['id'],
'mac_address': cache['address'],
'vif_model': cache['vif_model'],
'vif_pci_address': '',
'mtu': None, # only available from neutron in real env
'network': cache['network']['label']}}
ALL_NICS.append(nic)
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance_obj(None, 1, uuid=UUID3, nw_cache=NW_CACHE)
return inst
def fake_compute_get_all(*args, **kwargs):
inst_list = [
fakes.stub_instance_obj(None, 1, uuid=UUID1, nw_cache=NW_CACHE),
fakes.stub_instance_obj(None, 2, uuid=UUID2, nw_cache=NW_CACHE),
]
return objects.InstanceList(objects=inst_list)
class WrsServerIfTestV21(test.TestCase):
content_type = 'application/json'
prefix = 'wrs-if'
_prefix = "/v2/fake"
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def setUp(self):
super(WrsServerIfTestV21, self).setUp()
self.flags(use_neutron=False)
fakes.stub_out_nw_api(self)
self.stub_out('nova.compute.api.API.get', fake_compute_get)
self.stub_out('nova.compute.api.API.get_all', fake_compute_get_all)
return_server = fakes.fake_instance_get()
self.stub_out('nova.db.instance_get_by_uuid', return_server)
def _make_request(self, url):
req = fakes.HTTPRequest.blank(url)
req.accept = self.content_type
res = req.get_response(self._get_app())
return res
def _get_app(self):
return fakes.wsgi_app_v21()
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_nics(self, server):
return server['wrs-if:nics']
def assertServerNics(self, server):
self.assertEqual(ALL_NICS, self._get_nics(server))
def test_show(self):
url = self._prefix + '/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerNics(self._get_server(res.body))
def test_detail(self):
url = self._prefix + '/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
self.assertServerNics(server)
|
SECRET_KEY = 'development key'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgres://postgres@localhost/monkeybook'
SQLALCHEMY_ECHO = False
|
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from _hardware import HardwareException, Expectation
from _hardware_android import HardwareAndroid
CPU_CLOCK_RATE = 1326000
# If you run adb cat /sys/devices/57000000.gpu/pstate it shows all
# possible configurations, with a * next to the current one.
GPU_EMC_PROFILE = '04: core 307 MHz emc 1065 MHz a A d D *'
GPU_EMC_PROFILE_ID = '04'
class HardwarePixelC(HardwareAndroid):
def __init__(self, adb):
HardwareAndroid.__init__(self, adb)
def __enter__(self):
HardwareAndroid.__enter__(self)
if not self._adb.is_root():
return self
self._adb.shell('\n'.join([
# pylint: disable=line-too-long
# Based on https://android.googlesource.com/platform/frameworks/base/+/master/libs/hwui/tests/scripts/prep_ryu.sh
# All CPUs have the same scaling settings, so we only need to set it once
'''
stop thermal-engine
stop perfd
echo userspace > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo %i > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
echo %i > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo %i > /sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed
''' % tuple(CPU_CLOCK_RATE for _ in range(3)),
# turn off the fourth core. This will hopefully produce less heat, allowing
# for more consistent results. 3 cores should be enough to run Ganesh,
# the graphics driver, and the OS.
'''
echo 0 > /sys/devices/system/cpu/cpu3/online''',
# lock gpu/emc clocks.
'''
chown root:root /sys/devices/57000000.gpu/pstate
echo %s > /sys/devices/57000000.gpu/pstate''' % GPU_EMC_PROFILE_ID]))
return self
def filter_line(self, line):
JUNK = ['NvRmPrivGetChipPlatform: Could not read platform information',
'Expected on kernels without fuse support, using silicon']
return False if line in JUNK else HardwareAndroid.filter_line(self, line)
def sanity_check(self):
HardwareAndroid.sanity_check(self)
if not self._adb.is_root():
return
# only issue one shell command in an attempt to minimize interference.
result = self._adb.check('''\
cat /sys/class/power_supply/bq27742-0/capacity \
/sys/devices/system/cpu/online \
/sys/class/thermal/thermal_zone7/temp \
/sys/class/thermal/thermal_zone0/temp \
/sys/class/thermal/thermal_zone1/temp \
/sys/class/thermal/thermal_zone7/cdev1/cur_state \
/sys/class/thermal/thermal_zone7/cdev0/cur_state
for N in 0 1 2; do
cat /sys/devices/system/cpu/cpu$N/cpufreq/scaling_cur_freq
done
cat /sys/devices/57000000.gpu/pstate | grep \*$''')
expectations = \
[Expectation(int, min_value=30, name='battery', sleeptime=30*60),
Expectation(str, exact_value='0-2', name='online cpus'),
Expectation(int, max_value=40000, name='skin temperature'),
Expectation(int, max_value=86000, name='cpu temperature'),
Expectation(int, max_value=87000, name='gpu temperature'),
Expectation(int, exact_value=0, name='cpu throttle'),
Expectation(int, exact_value=0, name='gpu throttle')] + \
[Expectation(int, exact_value=CPU_CLOCK_RATE,
name='cpu_%i clock rate' % i, sleeptime=30)
for i in (0, 1, 2)] + \
[Expectation(str, exact_value=GPU_EMC_PROFILE, name='gpu/emc profile')]
Expectation.check_all(expectations, result.splitlines())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.