content stringlengths 5 1.05M |
|---|
import gzip
import os
import random
import tempfile
import uuid
from io import BytesIO
from unittest import TestCase
import corehq.blobs.util as mod
from corehq.blobs.exceptions import GzipStreamError
class TestRandomUrlId(TestCase):
sample_size = 100
def setUp(self):
self.ids = [mod.random_url_id(8) for x in range(self.sample_size)]
def test_random_id_length(self):
self.assertGreater(min(len(id) for id in self.ids), 0, self.ids)
self.assertEqual(max(len(id) for id in self.ids), 11, self.ids)
def test_random_id_randomness(self):
self.assertEqual(len(set(self.ids)), self.sample_size, self.ids)
class TestGzipStream(TestCase):
def test_compression(self):
desired_size = mod.GzipStream.CHUNK_SIZE * 4
content = uuid.uuid4().bytes * 4
while len(content) < desired_size:
content += uuid.uuid4().bytes * 4
compress_stream = mod.GzipStream(BytesIO(content))
with tempfile.NamedTemporaryFile() as compressed_f:
compressed_f.write(compress_stream.read())
compressed_f.flush()
with gzip.open(compressed_f.name, 'r') as reader:
actual = reader.read()
file_size = os.stat(compressed_f.name).st_size
self.assertGreater(len(content), file_size)
self.assertEqual(content, actual)
self.assertEqual(len(content), compress_stream.content_length)
def test_content_length_access(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b"x" * 11)
f.seek(0)
compress_stream = mod.GzipStream(f)
# Try to read content_length without reading the stream
with self.assertRaises(GzipStreamError):
compress_stream.content_length # noqa
# Try to read content_length after partially reading the stream
content_length = len(compress_stream.read(5))
with self.assertRaises(GzipStreamError):
compress_stream.content_length # noqa
# Read content_length after completely reading the stream and check
# that it's correct
content_length += len(compress_stream.read())
self.assertNotEqual(compress_stream.content_length, content_length)
self.assertEqual(compress_stream.content_length, 11)
def test_content_length_0(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b""))
zipper.read(10)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, 0)
def test_content_length_1(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b"x"))
zipper.read(10)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, 1)
def test_content_length_10x_chunk(self):
# NOTE invariant based on GzipFile implementation
self.addCleanup(random.seed)
random.seed(42)
size = mod.GzipStream.CHUNK_SIZE * 10
data = bytes(random.getrandbits(8) for _ in range(size))
zipper = mod.GzipStream(BytesIO(data))
zipper.read(16405)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, size, "bad content length")
def test_content_length_after_partial_read_and_close(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b""))
zipper.read(1)
assert zipper._buf.size, f"invariant failed ({zipper._buf.size})"
zipper.close()
with self.assertRaises(GzipStreamError):
zipper.content_length
def test_content_length_after_full_read_and_close(self):
zipper = mod.GzipStream(BytesIO(b"x"))
zipper.read()
zipper.close()
self.assertEqual(zipper.content_length, 1)
|
from itertools import product
from datetime import datetime
from django.db import IntegrityError
from django.urls import reverse
from .base import TestBase
from ..models import Task, Event, Role, Person, Organization
class TestTask(TestBase):
"Tests for the task model, its manager and views"
def setUp(self):
self.fixtures = {}
test_host = Organization.objects.create(domain='example.com',
fullname='Test Organization')
test_person_1 = Person.objects.create(personal='Test',
family='Person1',
username="person1")
test_person_2 = Person.objects.create(personal='Test',
family='Person2',
username="person2")
test_event_1 = Event.objects.create(start=datetime.now(),
slug='test_event_1',
host=test_host,
admin_fee=0)
test_event_2 = Event.objects.create(start=datetime.now(),
slug='test_event_2',
host=test_host,
admin_fee=0)
test_event_3 = Event.objects.create(start=datetime.now(),
slug='test_event_3',
host=test_host,
admin_fee=0)
instructor_role = Role.objects.create(name="instructor")
learner_role = Role.objects.create(name="learner")
helper_role = Role.objects.create(name="helper")
roles = [instructor_role, learner_role, helper_role]
people = [test_person_1, test_person_2]
for role, person in product(roles, people):
Task.objects.create(person=person, role=role, event=test_event_3)
test_role_1 = Role.objects.create(name='test_role_1')
test_role_2 = Role.objects.create(name='test_role_2')
test_task_1 = Task.objects.create(person=test_person_1,
event=test_event_1,
role=test_role_1)
test_task_2 = Task.objects.create(person=test_person_2,
event=test_event_2,
role=test_role_2)
self.fixtures['test_task_1'] = test_task_1
self.fixtures['test_task_2'] = test_task_2
self._setUpUsersAndLogin()
def test_task_detail_view_reachable_from_event_person_and_role_of_task(self):
correct_task = self.fixtures['test_task_1']
response = self.client.get(reverse('task_details', args=[str(correct_task.id)]))
assert response.context['task'].pk == correct_task.pk
def test_add_task_with_correct_url(self):
'''Ensure that task can be saved with correct URL field'''
task = self.fixtures['test_task_1']
payload = {
'event': task.event.pk,
'person': task.person.pk,
'role': task.role.pk,
'title': 'Task title',
'url': 'http://example.org',
}
response = self.client.post(
reverse('task_edit', kwargs={'task_id':task.pk}),
payload,
follow=True
)
self.assertRedirects(
response,
reverse('task_details', kwargs={'task_id':task.pk})
)
task.refresh_from_db()
self.assertEqual(task.url, 'http://example.org')
self.assertEqual(response.context['task'].url, 'http://example.org')
def test_add_task_with_incorrect_url(self):
'''Ensure that a task object cannot be saved with incorrect URL field'''
task = self.fixtures['test_task_1']
payload = {
'event': task.event.pk,
'person': task.person.pk,
'role': task.role.pk,
'title': 'Task title',
'url': 'htp://example.org',
}
response = self.client.post(
reverse('task_edit', kwargs={'task_id':task.pk}),
payload,
)
self.assertEqual(response.status_code, 200)
task.refresh_from_db()
self.assertEqual(task.url, '')
def test_add_duplicate_task(self):
'''Ensure that duplicate tasks with empty url field cannot exist'''
task_1 = self.fixtures['test_task_1']
with self.assertRaises(IntegrityError):
Task.objects.create(
event=task_1.event,
person=task_1.person,
role=task_1.role,
)
def test_add_duplicate_task_with_url(self):
'''Ensure that duplicate tasks cannot exist'''
task_1 = self.fixtures['test_task_1']
task_1.url = 'http://example.org'
task_1.save()
with self.assertRaises(IntegrityError):
Task.objects.create(
event=task_1.event,
person=task_1.person,
role=task_1.role,
url=task_1.url,
)
def test_task_edit_view_reachable_from_event_person_and_role_of_task(self):
correct_task = self.fixtures['test_task_1']
url_kwargs = {'task_id': correct_task.id}
response = self.client.get(reverse('task_edit',
kwargs=url_kwargs))
assert response.context['task'].pk == correct_task.pk
def test_task_manager_roles_lookup(self):
"""Test TaskManager methods for looking up roles by names."""
event = Event.objects.get(slug='test_event_3')
instructors = event.task_set.instructors()
learners = event.task_set.learners()
helpers = event.task_set.helpers()
tasks = event.task_set.all()
assert set(tasks) == set(instructors) | set(learners) | set(helpers)
def test_delete_task(self):
"""Make sure deleted task is longer accessible."""
for task in Task.objects.all():
rv = self.client.post(reverse('task_delete', args=[task.pk, ]))
assert rv.status_code == 302
with self.assertRaises(Task.DoesNotExist):
Task.objects.get(pk=task.pk)
|
"""An async reimplementation of the blocking elements from botocore.retries.bucket."""
import asyncio
from botocore.exceptions import CapacityNotAvailableError
from botocore.retries.bucket import Clock as Clock # reexport # noqa
class AsyncTokenBucket:
"""A reimplementation of TokenBucket that doesn't block."""
# Most of the code here is pulled straight up from botocore, with slight changes
# to the interface to switch to async methods.
# This class doesn't inherit from the botocore TokenBucket, as the interface is
# different: the `max_rate` setter in the original class is replaced by the
# async `set_max_rate`.
# (a Python setter can't be async).
_MIN_RATE = 0.5
def __init__(self, max_rate, clock, min_rate=_MIN_RATE):
self._fill_rate = None
self._max_capacity = None
self._current_capacity = 0
self._clock = clock
self._last_timestamp = None
self._min_rate = min_rate
self._set_max_rate(max_rate)
# The main difference between this implementation and the botocore TokenBucket
# implementation is replacing a threading.Condition by this asyncio.Condition.
self._new_fill_rate_condition = asyncio.Condition()
@property
def max_rate(self):
return self._fill_rate
async def set_max_rate(self, value):
async with self._new_fill_rate_condition:
self._set_max_rate(value)
self._new_fill_rate_condition.notify()
def _set_max_rate(self, value):
# Before we can change the rate we need to fill any pending
# tokens we might have based on the current rate. If we don't
# do this it means everything since the last recorded timestamp
# will accumulate at the rate we're about to set which isn't
# correct.
self._refill()
self._fill_rate = max(value, self._min_rate)
if value >= 1:
self._max_capacity = value
else:
self._max_capacity = 1
# If we're scaling down, we also can't have a capacity that's
# more than our max_capacity.
self._current_capacity = min(
self._current_capacity,
self._max_capacity
)
@property
def max_capacity(self):
return self._max_capacity
@property
def available_capacity(self):
return self._current_capacity
async def acquire(self, amount=1, block=True):
"""Acquire token or return amount of time until next token available.
If block is True, then this method will return when there's sufficient
capacity to acquire the desired amount. This won't block the event loop.
If block is False, then this method will return True if capacity
was successfully acquired, False otherwise.
"""
async with self._new_fill_rate_condition:
return await self._acquire(amount=amount, block=block)
async def _acquire(self, amount, block):
self._refill()
if amount <= self._current_capacity:
self._current_capacity -= amount
return True
else:
if not block:
raise CapacityNotAvailableError()
# Not enough capacity.
sleep_amount = self._sleep_amount(amount)
while sleep_amount > 0:
try:
await asyncio.wait_for(
self._new_fill_rate_condition.wait(), sleep_amount
)
except asyncio.TimeoutError:
pass
self._refill()
sleep_amount = self._sleep_amount(amount)
self._current_capacity -= amount
return True
def _sleep_amount(self, amount):
return (amount - self._current_capacity) / self._fill_rate
def _refill(self):
timestamp = self._clock.current_time()
if self._last_timestamp is None:
self._last_timestamp = timestamp
return
current_capacity = self._current_capacity
fill_amount = (timestamp - self._last_timestamp) * self._fill_rate
new_capacity = min(self._max_capacity, current_capacity + fill_amount)
self._current_capacity = new_capacity
self._last_timestamp = timestamp
|
# 96
# Faça um programa que tenha uma função chamada área(), que receba as dimensões de um terreno retangular (largura e comprimento) e mostre a área do terreno.
def area(larg, comp):
area = larg * comp
print(f'A área de um terreno de \33[36m{larg}\33[m x \33[36m{comp}\33[m é de \33[36m{area}\33[m m²')
print('Controle de Terrenos')
print('-' * 20)
largura = float(input('LARGURA (m): '))
comprimento = float(input('COMPRIMENTO (m):'))
area(largura, comprimento) |
from django.shortcuts import render, redirect
from .models import Url, BoundingBox
import cv2
from imutils import url_to_image
face_cascade = cv2.CascadeClassifier('/haarcascade_frontalface_default.xml')
def index(request):
if request.method == 'POST':
url = Url.objects.create(image_url=request.POST.get('image_url'))
url.save()
img = url_to_image(request.POST.get('image_url'))
ih, iw, _ = img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
top = round(y * 100 / ih, 2)
right = round((iw - x - w) * 100 / iw, 2)
left = round(x * 100 / iw, 2)
bottom = round((ih - y - h) * 100 / ih, 2)
bounding_box = BoundingBox.objects.create(top=top,
right=right,
left=left,
bottom=bottom,
image=url)
bounding_box.save()
return redirect('/face')
image_urls = Url.objects.all()
context = {'image_urls': image_urls}
return render(request, 'face/index.html', context=context)
def delete(request, url_id):
item = Url.objects.get(pk=url_id)
item.delete()
return redirect('/face') |
from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
from armulator.armv6.bits_ops import add_with_carry
from armulator.armv6.enums import InstrSet
class SubsPcLrThumb(AbstractOpcode):
def __init__(self, imm32, n):
super(SubsPcLrThumb, self).__init__()
self.imm32 = imm32
self.n = n
def execute(self, processor):
if processor.condition_passed():
if (processor.registers.current_mode_is_user_or_system() or
processor.registers.current_instr_set() == InstrSet.InstrSet_ThumbEE):
print "unpredictable"
else:
operand2 = self.imm32
result = add_with_carry(processor.registers.get(self.n), ~operand2, "1")[0]
if (processor.registers.cpsr.get_m() == "0b11010" and
processor.registers.cpsr.get_j() and
processor.registers.cpsr.get_t()):
print "unpredictable"
else:
processor.branch_write_pc(result)
|
"""
Copyright 2020 TrialDataSolutions
"""
import requests
import json
class CastorApi(object):
def __init__(self, config):
'''
Takes a config-dictionary as parameter. To create an object of this class
the dictionary should at least have Values for Keys api_url, client_id and client_secret
'''
# default to zero length strings
self.url = 'https://api_url.in.config'
self.client_id = ''
self.client_secret = ''
try:
self.url = config['api_url']
self.client_id = config['client_id']
self.client_secret = config['client_secret']
except Exception as _error:
print(_error)
self.headers = {"content-type": "application/x-www-form-urlencoded"}
self.access_token = 'x'
self.utils = _Utils(self)
self.sessions = _Sessions(self)
self.studies = _Studies(self)
self.study = _Study(self)
self.records = _Records(self)
self.users = _Users(self)
class _Utils(object):
def __init__(self, castor_api):
self.api = castor_api
def request(self, data=None, request_type=None, url=None, headers=None, verbose=False):
"""
Return the result of an API call, or None.
Exceptions are logged rather than raised.
Parameters
:param data: Method name and parameters to send to the API.
:type data: String
:param url: Location of the LimeSurvey API endpoint.
:type url: String
:param headers: HTTP headers to add to the request.
:type headers: Dict
:param request_type: either post or get
Return
:return: response of API call, or None.
"""
if url is None:
url = self.api.url
if headers is None:
headers = self.api.headers
# by default return nothing
return_value = None
if verbose == True:
print("pre url= %s " % url)
print("pre headers= %s " % headers)
print("pre data= %s " % data)
print("pre type= %s \n" % request_type)
try:
if request_type == 'post':
response = requests.post(url, headers=headers, data=data)
if request_type == 'get':
response = requests.get(url, headers=headers, data=data)
if verbose == True:
print("req url = %s " % response.request.url)
print("req headers = %s " % response.request.headers)
print("req body = %s " % response.request.body)
print("resp status code= %s " % response.status_code)
print("resp text = %s \n" % response.text)
return_value = response
except requests.ConnectionError as pe:
# TODO: some handling here, for now just print pe
print('when a request to the castor api was made, the following error was raised %s' % (pe))
return_value = None
return return_value
class _Records(object):
'''
endpoint called record, but containing information about all records in a study
'''
def __init__(self, castor_api):
self.api = castor_api
def list(self, study_id, verbose=False):
"""
Get all records in json for the study with this study_id
Set verbose=True to get the complete request plus response
"""
my_url = self.api.url + "/api/study/" + study_id + "/record"
my_authorization = "Bearer %s" % (self.api.access_token)
my_headers = {'Authorization': my_authorization}
response = self.api.utils.request(request_type='get', headers=my_headers, url=my_url, data=None, verbose=verbose)
return_data = {'records': []}
if response is not None:
if response.status_code == 200:
finished_looping = False
while not finished_looping:
resp_json = json.loads(response.text)
for one_record in resp_json['_embedded']['records']:
return_data['records'].append(one_record)
# if the page count > 0 then go to the next page
if resp_json['page_count'] == 0:
finished_looping = True
else:
# first we must check if this page is the same as the last page
if resp_json['_links']['self']['href']==resp_json['_links']['last']['href']:
# we're done, so stop looping
finished_looping = True
else:
# go to the next url
my_url = resp_json['_links']['next']['href']
response = self.api.utils.request(request_type='get', headers=my_headers, url=my_url, data=None, verbose=verbose)
return return_data
class _Sessions(object):
def __init__(self, castor_api):
self.api = castor_api
def get_access_token(self, verbose=False):
"""
Get an access token for all subsequent API calls.
"""
token_url = self.api.url + "/oauth/token"
token_data = "grant_type=client_credentials&client_id=%s&client_secret=%s" % (self.api.client_id, self.api.client_secret)
response = self.api.utils.request(data=token_data, request_type='post', url=token_url, verbose=verbose)
# did we get anything from our request
if response is not None:
# set the access_token only if the response status was 200
if response.status_code == 200:
resp_json =json.loads(response.text)
self.api.access_token = resp_json['access_token']
return response
class _Study(object):
'''
endpoint called study, but containing information about all studies in castor
'''
def __init__(self, castor_api):
self.api = castor_api
def list(self, study_id, verbose=False):
"""
Get all studies in json for the current user
Set verbose=True to get the complete request plus response
Set complete_output=True to get the complete response; if set to False
you will skip the nodes ['_embedded']['study']
"""
my_url = self.api.url + "/api/study/" + study_id
my_authorization = "Bearer %s" % (self.api.access_token)
my_headers = {'Authorization': my_authorization}
response = self.api.utils.request(request_type='get', headers=my_headers, url=my_url, data=None, verbose=verbose)
resp_json = {}
if response is not None:
if response.status_code == 200:
resp_json = json.loads(response.text)
return resp_json
class _Studies(object):
'''
endpoint called study, but containing information about all studies in castor
'''
def __init__(self, castor_api):
self.api = castor_api
def list(self, verbose=False, complete_output=False):
"""
Get all studies in json for the current user
Set verbose=True to get the complete request plus response
Set complete_output=True to get the complete response; if set to False
you will skip the nodes ['_embedded']['study']
"""
my_url = self.api.url + "/api/study"
my_authorization = "Bearer %s" % (self.api.access_token)
my_headers = {'Authorization': my_authorization}
response = self.api.utils.request(request_type='get', headers=my_headers, url=my_url, data=None, verbose=verbose)
if response is not None:
if response.status_code == 200:
if complete_output:
resp_json = json.loads(response.text)
else:
resp_json = json.loads(response.text)['_embedded']['study']
return resp_json
class _Users(object):
def __init__(self, castor_api):
self.api = castor_api
def list(self, user_id=None, verbose=False):
"""
Retrieve a list of users the currently authenticated user is authorized to see.
Default to own User.
if a user_id is given, then only data about this user are returned
:type user_id: String
"""
my_url = self.api.url + "/api/user"
# if a specific user id is given as parameter:
if user_id is not None:
my_url = my_url + "/" + user_id
my_authorization = "Bearer %s" % (self.api.access_token)
my_headers = {'Authorization': my_authorization}
response = self.api.utils.request(request_type='get', headers=my_headers, url=my_url, data=None, verbose=verbose)
return response
|
#!/usr/bin/env python
import math
import string
import subprocess
import roslib; roslib.load_manifest('mav_fcs')
import rospy
#===============================================================================
def getCpuUsage():
usage = 0.0
f = open('/proc/loadavg', 'r')
line = f.readline()
tokens = line.split(' ')
if (len(tokens) > 0):
usage = string.atof(tokens[0])
f.close()
return math.ceil(usage)
def getRamUsage():
usage = 0.0
f = open('/proc/meminfo', 'r')
total = 0
line = f.readline()
tokens = line.split(' ')
size =len(tokens)
if (size > 2):
total = string.atoi(tokens[size-2])
free = 0
line = f.readline()
tokens = line.split(' ')
size =len(tokens)
if (size > 2):
free = string.atoi(tokens[size-2])
f.close()
if (total > 0):
usage = math.ceil( ((total-free)*100.0) / total )
return usage
def getStorageUsage():
usage = 0
cmd = ['df', '-h', '--type=ext4']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
lines = out.split('\n')
#print lines
if (len(lines)<3):
return 0.0
tokens = lines[1].split(' ')
#print tokens
size = len(tokens)
if (size > 2):
percentage = tokens[size-2]
percentage = percentage.replace("%", "")
usage = string.atoi(percentage)
return usage
|
# Most of the code below is shamelessly copied from the six library from
# Benjamin Peterson
import sys
import types
PY3 = sys.version_info[0] == 3
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
|
from hestia.list_utils import to_list
from rest_framework.exceptions import ValidationError
from schemas import PersistenceConfig
from stores.exceptions import StoreNotFoundError
from stores.validators import validate_persistence_data, validate_persistence_outputs
def _set_persistence(instance, default_persistence_data=None, default_persistence_outputs=None):
if instance.persistence:
return
data_refs = None
artifact_refs = None
cond = (instance.specification and
instance.specification.environment and
instance.specification.environment.data_refs)
if cond:
data_refs = instance.specification.environment.data_refs
cond = (instance.specification and
instance.specification.environment and
instance.specification.environment.artifact_refs)
if cond:
# TODO: this is a temp workaround until the finalized Polyflow version
artifact_refs = to_list(instance.specification.environment.artifact_refs)[0]
if not data_refs and default_persistence_data:
data_refs = default_persistence_data
if not artifact_refs and default_persistence_outputs:
artifact_refs = default_persistence_outputs
persistence_data = validate_persistence_data(persistence_data=data_refs)
persistence_outputs = validate_persistence_outputs(persistence_outputs=artifact_refs)
persistence_config = PersistenceConfig(data=persistence_data, outputs=persistence_outputs)
instance.persistence = persistence_config.to_dict()
def set_persistence(instance, default_persistence_data=None, default_persistence_outputs=None):
try:
_set_persistence(instance=instance,
default_persistence_data=default_persistence_data,
default_persistence_outputs=default_persistence_outputs)
except StoreNotFoundError as e:
raise ValidationError(e)
|
#! /usr/bin/env python3
import sys
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", dest="file", type=str,
help="Markdown location (within repository)")
parser.add_argument("-o","--output", dest="output", type=str,
help="Output docsite file")
args = parser.parse_args()
base = os.path.basename(args.file)
title = os.path.splitext(base)[0]
output = '''---
title: {0}
---
{{% include_absolute {1} %}}
'''.format(title, args.file)
with open(args.output, "w") as output_file:
output_file.write(output)
|
# -*- coding: utf-8 -*-
"""
The view of the hive-node and vault management.
"""
from flask_restful import Resource
from src.modules.provider.provider import Provider
class Vaults(Resource):
def __init__(self):
self.provider = Provider()
def get(self):
""" Get all vault information in this hive node.
.. :quickref: 09 Provider; Get Vaults
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 200 OK
.. code-block:: json
{
"vaults": [{
"pricing_using": <pricing name|str>,
"max_storage": <int>,
"file_use_storage": <int>,
"db_use_storage": <int>,
"user_did": <str>,
}]
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
"""
return self.provider.get_vaults()
class Backups(Resource):
def __init__(self):
self.provider = Provider()
def get(self):
""" Get all backup information in this hive node.
.. :quickref: 09 Provider; Get Backups
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 200 OK
.. code-block:: json
{
"backups": [{
"pricing_using": <pricing name|str>,
"max_storage": <int>,
"use_storage": <int>,
"user_did": <user did|str>,
}]
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
"""
return self.provider.get_backups()
class FilledOrders(Resource):
def __init__(self):
self.provider = Provider()
def get(self):
""" Get all payment information in this hive node.
.. :quickref: 09 Provider; Get Payments
**Request**:
.. sourcecode:: http
None
**Response OK**:
.. sourcecode:: http
HTTP/1.1 200 OK
.. code-block:: json
{
"orders": [{
"order_id": <str>,
"receipt_id": <str>,
"user_did": <str>,
"subscription": <vault,backup|str>,
"pricing_name": <str>,
"ela_amount": <float>,
"ela_address": <str>,
"paid_did": <user did|str>,
}]
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
.. sourcecode:: http
HTTP/1.1 401 Unauthorized
.. sourcecode:: http
HTTP/1.1 403 Forbidden
.. sourcecode:: http
HTTP/1.1 404 Not Found
"""
return self.provider.get_filled_orders()
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from hpccm.version import __version__
from hpccm.base_object import base_object
from hpccm.common import cpu_arch
from hpccm.common import container_type
from hpccm.common import linux_distro
from hpccm.Stage import Stage
from hpccm.recipe import include
from hpccm.recipe import recipe
from hpccm.toolchain import toolchain
import hpccm.building_blocks
import hpccm.templates
import hpccm.primitives
# Templates
# For backwards compatibility with recipes that use "hpccm.git()", etc.
from hpccm.templates.ConfigureMake import ConfigureMake
from hpccm.templates.git import git
from hpccm.templates.rm import rm
from hpccm.templates.sed import sed
from hpccm.templates.tar import tar
from hpccm.templates.wget import wget
|
from . import newsapi_wrapper
from .newsapi_wrapper import NewsApiWrapper
__all__ = ['NewsApiWrapper']
|
"""Tests for ocsp.py"""
# pylint: disable=protected-access
import unittest
import mock
from certbot import errors
out = """Missing = in header key=value
ocsp: Use -help for summary.
"""
class OCSPTest(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
from certbot import ocsp
with mock.patch('certbot.ocsp.Popen') as mock_popen:
with mock.patch('certbot.util.exe_exists') as mock_exists:
mock_communicate = mock.MagicMock()
mock_communicate.communicate.return_value = (None, out)
mock_popen.return_value = mock_communicate
mock_exists.return_value = True
self.checker = ocsp.RevocationChecker()
def tearDown(self):
pass
@mock.patch('certbot.ocsp.logger.info')
@mock.patch('certbot.ocsp.Popen')
@mock.patch('certbot.util.exe_exists')
def test_init(self, mock_exists, mock_popen, mock_log):
mock_communicate = mock.MagicMock()
mock_communicate.communicate.return_value = (None, out)
mock_popen.return_value = mock_communicate
mock_exists.return_value = True
from certbot import ocsp
checker = ocsp.RevocationChecker()
self.assertEqual(mock_popen.call_count, 1)
self.assertEqual(checker.host_args("x"), ["Host=x"])
mock_communicate.communicate.return_value = (None, out.partition("\n")[2])
checker = ocsp.RevocationChecker()
self.assertEqual(checker.host_args("x"), ["Host", "x"])
self.assertEqual(checker.broken, False)
mock_exists.return_value = False
mock_popen.call_count = 0
checker = ocsp.RevocationChecker()
self.assertEqual(mock_popen.call_count, 0)
self.assertEqual(mock_log.call_count, 1)
self.assertEqual(checker.broken, True)
@mock.patch('certbot.ocsp.RevocationChecker.determine_ocsp_server')
@mock.patch('certbot.util.run_script')
def test_ocsp_revoked(self, mock_run, mock_determine):
self.checker.broken = True
mock_determine.return_value = ("", "")
self.assertEqual(self.checker.ocsp_revoked("x", "y"), False)
self.checker.broken = False
mock_run.return_value = tuple(openssl_happy[1:])
self.assertEqual(self.checker.ocsp_revoked("x", "y"), False)
self.assertEqual(mock_run.call_count, 0)
mock_determine.return_value = ("http://x.co", "x.co")
self.assertEqual(self.checker.ocsp_revoked("blah.pem", "chain.pem"), False)
mock_run.side_effect = errors.SubprocessError("Unable to load certificate launcher")
self.assertEqual(self.checker.ocsp_revoked("x", "y"), False)
self.assertEqual(mock_run.call_count, 2)
@mock.patch('certbot.ocsp.logger.info')
@mock.patch('certbot.util.run_script')
def test_determine_ocsp_server(self, mock_run, mock_info):
uri = "http://ocsp.stg-int-x1.letsencrypt.org/"
host = "ocsp.stg-int-x1.letsencrypt.org"
mock_run.return_value = uri, ""
self.assertEqual(self.checker.determine_ocsp_server("beep"), (uri, host))
mock_run.return_value = "ftp:/" + host + "/", ""
self.assertEqual(self.checker.determine_ocsp_server("beep"), (None, None))
self.assertEqual(mock_info.call_count, 1)
c = "confusion"
mock_run.side_effect = errors.SubprocessError(c)
self.assertEqual(self.checker.determine_ocsp_server("beep"), (None, None))
@mock.patch('certbot.ocsp.logger')
@mock.patch('certbot.util.run_script')
def test_translate_ocsp(self, mock_run, mock_log):
# pylint: disable=protected-access,star-args
mock_run.return_value = openssl_confused
from certbot import ocsp
self.assertEqual(ocsp._translate_ocsp_query(*openssl_happy), False)
self.assertEqual(ocsp._translate_ocsp_query(*openssl_confused), False)
self.assertEqual(mock_log.debug.call_count, 1)
self.assertEqual(mock_log.warn.call_count, 0)
mock_log.debug.call_count = 0
self.assertEqual(ocsp._translate_ocsp_query(*openssl_unknown), False)
self.assertEqual(mock_log.debug.call_count, 1)
self.assertEqual(mock_log.warn.call_count, 0)
self.assertEqual(ocsp._translate_ocsp_query(*openssl_expired_ocsp), False)
self.assertEqual(mock_log.debug.call_count, 2)
self.assertEqual(ocsp._translate_ocsp_query(*openssl_broken), False)
self.assertEqual(mock_log.warn.call_count, 1)
mock_log.info.call_count = 0
self.assertEqual(ocsp._translate_ocsp_query(*openssl_revoked), True)
self.assertEqual(mock_log.info.call_count, 0)
self.assertEqual(ocsp._translate_ocsp_query(*openssl_expired_ocsp_revoked), True)
self.assertEqual(mock_log.info.call_count, 1)
# pylint: disable=line-too-long
openssl_confused = ("", """
/etc/letsencrypt/live/example.org/cert.pem: good
This Update: Dec 17 00:00:00 2016 GMT
Next Update: Dec 24 00:00:00 2016 GMT
""",
"""
Response Verify Failure
139903674214048:error:27069065:OCSP routines:OCSP_basic_verify:certificate verify error:ocsp_vfy.c:138:Verify error:unable to get local issuer certificate
""")
openssl_happy = ("blah.pem", """
blah.pem: good
This Update: Dec 20 18:00:00 2016 GMT
Next Update: Dec 27 18:00:00 2016 GMT
""",
"Response verify OK")
openssl_revoked = ("blah.pem", """
blah.pem: revoked
This Update: Dec 20 01:00:00 2016 GMT
Next Update: Dec 27 01:00:00 2016 GMT
Revocation Time: Dec 20 01:46:34 2016 GMT
""",
"""Response verify OK""")
openssl_unknown = ("blah.pem", """
blah.pem: unknown
This Update: Dec 20 18:00:00 2016 GMT
Next Update: Dec 27 18:00:00 2016 GMT
""",
"Response verify OK")
openssl_broken = ("", "tentacles", "Response verify OK")
openssl_expired_ocsp = ("blah.pem", """
blah.pem: WARNING: Status times invalid.
140659132298912:error:2707307D:OCSP routines:OCSP_check_validity:status expired:ocsp_cl.c:372:
good
This Update: Apr 6 00:00:00 2016 GMT
Next Update: Apr 13 00:00:00 2016 GMT
""",
"""Response verify OK""")
openssl_expired_ocsp_revoked = ("blah.pem", """
blah.pem: WARNING: Status times invalid.
140659132298912:error:2707307D:OCSP routines:OCSP_check_validity:status expired:ocsp_cl.c:372:
revoked
This Update: Apr 6 00:00:00 2016 GMT
Next Update: Apr 13 00:00:00 2016 GMT
""",
"""Response verify OK""")
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
from chp5.browser_render import BrowserRender
br = BrowserRender()
br.download('http://example.python-scraping.com/search')
br.attr('#search_term', 'value', '.')
br.text('#page_size option:checked', '1000')
br.click('#search')
elements = br.wait_load('#results a')
countries_or_districts = [e.toPlainText().strip() for e in elements]
print(countries_or_districts)
|
import emoji
from time import sleep
print('\33[31m=' * 20, 'Contagem Regressiva para os Fogos de Artíficio', '=' * 20, '\33[m')
for c in range(10, -1, -1):
print(c)
sleep(1)
print(emoji.emojize('\33[34mOs fogos estão explodindo :fireworks:\33[m', use_aliases= True))
print('\33[35mBUM, BUM, BUM, BUM, BUM, BUM, BUM, POOW!!!\33[m') |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import datetime as dt
import json
import os
import attr
import boto3
import requests
@attr.s
class CurrencyAmount:
unit = attr.ib()
# Turning strings like '233.205000000000012505552149377763271331787109375'
# into floats is a little cheaty, but a floating-point error here is
# insignificant, so we'll take the risk!
amount = attr.ib(converter=float)
def __str__(self):
if self.unit == "USD":
return f"${self.amount:.2f}"
else:
return f"{self.unit} {self.amount:.2f}"
def __gt__(self, other):
if self.unit != other.unit:
raise ValueError(
"Cannot compare {type(self).__name__} with different "
"currencies: {self.unit!r} != {other.unit!r}"
)
return self.amount > other.amount
def __lt__(self, other):
if self.unit != other.unit:
raise ValueError(
"Cannot compare {type(self).__name__} with different "
"currencies: {self.unit!r} != {other.unit!r}"
)
return self.amount < other.amount
@attr.s
class Budget:
"""Wrapper around an budget from the DescribeBudgets API."""
data = attr.ib()
@property
def name(self):
return (
self.data["BudgetName"].replace("budget", "").replace("Budget", "").strip()
)
@property
def budget_limit(self):
spend = self.data["BudgetLimit"]
return CurrencyAmount(unit=spend["Unit"], amount=spend["Amount"])
@property
def current_spend(self):
spend = self.data["CalculatedSpend"]["ActualSpend"]
return CurrencyAmount(unit=spend["Unit"], amount=spend["Amount"])
@property
def forecasted_spend(self):
spend = self.data["CalculatedSpend"]["ForecastedSpend"]
return CurrencyAmount(unit=spend["Unit"], amount=spend["Amount"])
def get_budgets(account_id):
"""
Returns all the budgets for a given account.
"""
client = boto3.client("budgets")
# Describe the current budgets on the account
resp = client.describe_budgets(AccountId=account_id)
for b in resp["Budgets"]:
yield Budget(b)
def build_slack_payload(budgets, image_url):
"""
Builds the payload that is sent to the Slack webhook about
our budget overspend.
"""
details = "\n".join(
[f"{b.name}: {b.forecasted_spend} > {b.budget_limit}" for b in budgets]
)
return {
"username": "aws-budgets",
"icon_emoji": ":money_with_wings:",
"attachments": [
{
"color": "warning",
"title": "AWS is forecasting an overspend on our budgets!",
"image_url": image_url,
"text": details,
}
],
}
def draw_diagram(budgets):
"""Draws a quick diagram to illustrate the overspend budgets."""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
# Define some parameters. The Slack image previews are ~360x150px, so
# we need fonts and sizes that fit that well.
matplotlib.rcParams.update(
{"font.family": "Arial", "font.size": 20, "figure.figsize": (11, 4.6)}
)
# BECAUSE I CAN, DAMNIT.
with plt.xkcd():
fig, axes = plt.subplots()
# First we draw a box plot. We don't actually use any of the graph
# elements it creates, but it sets up some axes, labels and tick marks
# so we don't have to do that manually.
#
# Based on
# https://matplotlib.org/examples/pylab_examples/boxplot_demo.html
data = [
[b.budget_limit.amount, b.current_spend.amount, b.forecasted_spend.amount]
for b in budgets
]
labels = [b.name for b in budgets]
axes.boxplot(
data,
labels=labels,
vert=False,
# This parameter ensures that the boxplot elements are drawn on
# a low layer in the image.
zorder=0.0,
)
for i, budget in enumerate(budgets, start=1):
# Now we immediately discard most of what we've just drawn!
# We draw over it with a white box at a higher z-layer, so we can
# draw lines ourselves.
min_value = min(
[
budget.budget_limit.amount,
budget.current_spend.amount,
budget.forecasted_spend.amount,
]
)
max_value = max(
[
budget.budget_limit.amount,
budget.current_spend.amount,
budget.forecasted_spend.amount,
]
)
axes.add_patch(
Rectangle(
xy=(min_value - 10, i - 0.25),
width=(max_value - min_value + 10),
height=0.5,
fill=True,
color="white",
zorder=1.0,
)
)
# Then we draw our own lines to show the different parts of
# this budget.
line_limit = Line2D(
xdata=[budget.budget_limit.amount, budget.budget_limit.amount],
ydata=[i - 0.2, i + 0.2],
color="green",
linewidth=6,
linestyle=":",
)
axes.add_line(line_limit)
line_forecast = Line2D(
xdata=[budget.forecasted_spend.amount, budget.forecasted_spend.amount],
ydata=[i - 0.2, i + 0.2],
color="red",
linewidth=6,
linestyle=":",
)
axes.add_line(line_forecast)
line_current = Line2D(
xdata=[budget.current_spend.amount, budget.current_spend.amount],
ydata=[i - 0.25, i + 0.25],
color="black",
linewidth=10,
)
axes.add_line(line_current)
# Finally, we add these three lines to the legend. There's probably a
# neater way of doing these with line styles, but I don't care enough to
# learn how to do it "properly".
legend_limit = Line2D(xdata=[], ydata=[], color="green", label="budget limit")
legend_forecast = Line2D(xdata=[], ydata=[], color="red", label="forecast")
legend_current = Line2D(
xdata=[], ydata=[], color="black", label="current spend"
)
plt.legend(handles=[legend_limit, legend_forecast, legend_current])
plt.savefig("figure.png", bbox_inches="tight")
return "figure.png"
def main(account_id, hook_url, s3_bucket):
all_budgets = get_budgets(account_id=account_id)
overspend_budgets = [b for b in all_budgets if b.forecasted_spend > b.budget_limit]
if not overspend_budgets:
print("No overspend in our budgets! Nothing to do...")
return
filename = draw_diagram(overspend_budgets)
s3_client = boto3.client("s3")
s3_key = f"budget_graphs/{dt.datetime.now().isoformat()}"
s3_client.upload_file(
Filename=filename,
Bucket=s3_bucket,
Key=s3_key,
ExtraArgs={"ACL": "public-read"},
)
payload = build_slack_payload(
overspend_budgets,
image_url=f"https://s3-eu-west-1.amazonaws.com/{s3_bucket}/{s3_key}",
)
resp = requests.post(
hook_url, data=json.dumps(payload), headers={"Content-Type": "application/json"}
)
resp.raise_for_status()
if __name__ == "__main__":
account_id = os.environ["ACCOUNT_ID"]
hook_url = os.environ["SLACK_WEBHOOK"]
s3_bucket = os.environ["S3_BUCKET"]
main(account_id=account_id, hook_url=hook_url, s3_bucket=s3_bucket)
|
#!/home/mercer/receipts-dates/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
# pylint: disable=E1101,E0611,F0401
# E1101: Pylint cannot resolve specific win32 modules.
# E0611: "shell" exists in win32com but Pylint cannot detect it.
# F0401: "win32com.shell" exists but Pylint cannot import.
#
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module provides some utility classes / function for platform-independent
file system access.
"""
_WIN32_PLATFORM = "win32"
import logging
import os
import sys
if sys.platform == _WIN32_PLATFORM:
import pythoncom
import pywintypes
import win32api
from win32com.shell import shell
import win32netcon
import win32wnet
from datafinder.persistence.error import PersistenceError
__version__ = "$Revision-Id:$"
_log = logging.getLogger(None)
class ShortCut(object):
""" Implements platform-independent shortcut / symbolic link implementation. """
_WINDOWS_LINK_EXTENSION = ".lnk"
def __init__(self, destination):
""" Constructor. """
self._destination = destination
def create(self, source):
""" Creates the shortcut / symbolic link. """
if sys.platform == _WIN32_PLATFORM:
try:
sh = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None, \
pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
persist = sh.QueryInterface(pythoncom.IID_IPersistFile)
sh.SetPath(source)
persist.Save(self._destination, 1)
except pywintypes.com_error, error:
errorMessage = "Cannot create symbolic link '%s'. Reason: '%s'." % (self._destination, error[0])
raise PersistenceError(errorMessage)
else:
try:
os.symlink(source, self._destination)
except OSError, error:
reason = os.strerror(error.errno)
errorMessage = "Cannot create symbolic link '%s'. Reason: '%s'" % (self._destination, reason)
raise PersistenceError(errorMessage)
def resolve(self):
""" Resolves the link. """
if sys.platform == _WIN32_PLATFORM:
try:
sh = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None, \
pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
persist = sh.QueryInterface(pythoncom.IID_IPersistFile)
persist.Load(self._destination)
return sh.GetPath(shell.SLGP_UNCPRIORITY)[0]
except pywintypes.com_error, error:
errorMessage = "Cannot resolve symbolic link '%s'. Reason: '%s'." % (self._destination, error[0])
raise PersistenceError(errorMessage)
else:
try:
return os.readlink(self._destination)
except OSError, error:
reason = os.strerror(error.errno)
errorMessage = "Cannot resolve symbolic link '%s'. Reason: '%s'" % (self._destination, reason)
raise PersistenceError(errorMessage)
def isLink(self):
""" Figures out if the associated path is a symbolic link. """
if sys.platform == _WIN32_PLATFORM:
result = False
if self._destination.endswith(self._WINDOWS_LINK_EXTENSION):
try:
sh = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None, \
pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
persist = sh.QueryInterface(pythoncom.IID_IPersistFile)
persist.Load(self._destination)
result = True
except pywintypes.com_error:
result = False
return result
else:
return os.path.islink(self._destination)
def createShortcut(path):
""" Creates a platform-specific shortcut representation. """
return ShortCut(path)
def isWindowsRootPath(path):
""" Checks whether the given path corresponds to the virtual root directory on WIndows. """
isWindowsRootPath_ = False
if path == "/" and sys.platform == _WIN32_PLATFORM:
isWindowsRootPath_ = True
return isWindowsRootPath_
def listDirectory(directoryPath):
""" Lists the given directory. """
if directoryPath == "/" and sys.platform == _WIN32_PLATFORM:
result = [driveLetter for driveLetter in win32api.GetLogicalDriveStrings().split("\000") if driveLetter]
else:
result = list()
if directoryPath.endswith(":") and sys.platform == _WIN32_PLATFORM: # it is a drive letter
directoryPath += "\\" # Required to fix issue with os.listdir / os.path.join in Python 2.7.10
for path in os.listdir(directoryPath):
path = os.path.join(directoryPath, path)
decodedPath = _binaryToUnicodeFilePathDecoding(path)
if not decodedPath is None:
result.append(decodedPath)
else:
_log.debug("Unable to decode path string. Ignoring it.")
return result
def _binaryToUnicodeFilePathDecoding(binaryString):
"""
Decodes the given binary string into an unicode string.
The primarily use is for decoding file system paths.
In order to perform the decoding the default file system encoding
is used. If it fails on non-Windows operating systems, it will be tried
to use the Windows encoding "cp437". This encoding is used when a
a file name is written via a Samba share from a Windows client.
If the given string is already an unicode string this string is returned and
no conversion is tried.
@param binaryString: String to decode.
@type binaryString: C{string}
@retrun: Unicode representation of the binary string.
@rtype: C{unicode}
"""
fileSystemEncoding = sys.getfilesystemencoding()
if fileSystemEncoding is None:
fileSystemEncoding = "utf-8"
if not isinstance(binaryString, unicode):
try:
unicodeString = binaryString.decode(fileSystemEncoding)
except UnicodeDecodeError:
if sys.platform != "win32":
try:
unicodeString = binaryString.decode("cp437")
except UnicodeDecodeError:
return None
else:
unicodeString = binaryString
return unicodeString
def connectWindowsShare(share, username, password):
"""
Connects a windows-share.
@param share: Windows share in UNC path representation.
@type share: C{unicode}
@raise PersistenecError: raised if connection to a SMB-share failed
"""
if sys.platform == _WIN32_PLATFORM:
components = os.path.normpath(share).split("\\")
if len(components) < 3:
raise PersistenceError("Wrong file share configuration information!")
else:
if not os.path.exists(share):
try:
win32wnet.WNetAddConnection2(win32netcon.RESOURCETYPE_DISK,
None, #unused_drive,
share,
None,
username,
password,
0)
except pywintypes.error, error:
raise PersistenceError("Could not connect to '%s'.\nReason: %s" % (share, error[2]))
class ItemIdentifierMapper(object):
""" Maps identifiers. """
def __init__(self, basePath):
"""
Constructor.
@param basePath: Root path of the file system.
@type basePath: C{unicode}
"""
self.__basePath = basePath
def mapIdentifier(self, identifier):
"""
Maps the identifier to persistence representation.
"""
mappedId = os.path.join(self.__basePath, identifier[1:])
if mappedId.startswith("/"): # Ensures correct format on WIN32 when addressing a drive letter
driveLetter, _ = os.path.splitdrive(mappedId[1:])
if len(driveLetter) > 0:
mappedId = mappedId[1:]
if mappedId == driveLetter:
mappedId += "/"
return mappedId
def mapPersistenceIdentifier(self, persisentenceId):
"""
Maps the persistence identifier to the path used to address the items logically.
"""
mappedId = persisentenceId
if persisentenceId.startswith(self.__basePath):
mappedId = persisentenceId[len(self.__basePath):]
if not mappedId.startswith("/"):
mappedId = "/" + mappedId
mappedId = mappedId.replace("\\", "/")
if mappedId.endswith("/"):
mappedId = mappedId[:-1]
return mappedId
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file contains functions
"""
from llist import dllist
import numpy as np
def rotate_teams(entities, stay_chance, eval_time, dt):
"""
Args:
entities: tuple with pointers to teams, boxes and agents objects.
eval_time: current time step of simulation, measured in seconds
dt: size of simulation time step
"""
teams, boxes, agents = entities
for team in teams:
if team.duty:
offset = team.duty["offset"] * 24*60*60 # serving period shortcurs,
on = team.duty[ "on"] * 24*60*60 # converted to seconds
off = team.duty[ "off"] * 24*60*60
# subjective time within the service-leave cycle for each team
st = (eval_time + offset) % (on+off)
# initial teams positioning
if eval_time == 0:
if st < on:
# transfer to barracks
team.currBox = team.homeBox
for idx in team.agent_idxs:
agents[idx].transfer(team.currBox)
else:
# transfer to freeland
team.currBox = boxes["civilian"]
for idx in team.agent_idxs:
agents[idx].transfer(team.currBox)
# time to serve
if 0 <= st < dt:
team.currBox = team.homeBox
for idx in team.agent_idxs:
agents[idx].transfer(team.currBox)
# time to leave
if on <= st < on+dt:
team.currBox = boxes["civilian"]
for idx in team.agent_idxs:
if np.random.rand() > stay_chance:
agents[idx].transfer(team.currBox)
def queue_sotilaskoti(entities, q, eval_time, dt, config):
teams, boxes, agents = entities
start = config['sotilaskoti']['openingHours']['start'] * 3600
stop = config['sotilaskoti']['openingHours']['stop'] * 3600
day_time = eval_time % (24*60*60)
if start <= day_time < start+dt :
teams_mil, teams_civ = [], []
for team in teams:
if team.duty and team.currBox != "civilian":
teams_mil.append(team)
else:
teams_civ.append(team)
cons_n = config['sotilaskoti']['participants']['conscripts']
civ_n = config['sotilaskoti']['participants'][ 'civilians']
# Randomly choose agents from conscripted and civilian teams
if teams_mil:
for _ in range(cons_n):
team = np.random.choice(teams_mil)
idx = np.random.choice(team.agent_idxs)
q.append({"idx" : idx,
"originBox" : agents[idx].allowed_box})
if teams_civ:
for _ in range(civ_n):
team = np.random.choice(teams_civ)
idx = np.random.choice(team.agent_idxs)
q.append({"idx" : idx,
"originBox" : agents[idx].allowed_box})
# Populate sotilaskoti cafeteria queue with the chosen agents
for p in q: # person in queue
agents[p["idx"]].transfer(boxes["sotilaskoti"])
if stop <= day_time < stop+dt:
# Empty the queue and return people to their respective boxes
while q:
p = q.pop(0)
agents[p["idx"]].transfer(p["originBox"])
def increment_agent_positions(agents):
"""
Update positions and dx,dy to keep agents within boxes
"""
for agent in agents:
cage = agent.allowed_box
x, y = agent.x, agent.y
dx, dy = agent.dx, agent.dy
if not( cage.left < (x + dx) < cage.right):
agent.dx = -dx
if not (cage.bottom < (y + dy) < cage.top):
agent.dy = -dy
agent.x = x + agent.dx;
agent.y = y + agent.dy;
def x_sort(dl):
"""
Sorts the doubly linked list of agents (dl) along the x-ordinate
"""
n = dl.nodeat(0) # node
nn = n.next # next node
while nn:
n = nn
nn = n.next
nb = n.prev # previous node
dis = False # disordered: if neighbour pair of agents is in the wrong
# order. By default innocent until found guilty.
while True:
if not nb: # if the list start is reached, just insert there
e = dl.remove(n) # e: element stored within the node
dl.appendleft(e)
break
if not dis: # if things are already ok
if nb.value.x < n.value.x:
break
dis = True
if nb.value.x < n.value.x: # proper position is found, insert here
e = dl.remove(n)
dl.insert(e, nb.next)
break
nb = nb.prev
return dl
def initial_sort(agents):
"""
Perform an initial sort of agents along the x-ordinate (later such sorted
list is needed for a bit faster neighbours finding computation). Since
agents x-positions are initially randomly distributed, an off-the-shelf
numpy quicksort appears to be an optimal choice.
Args:
agents: list with references to (spatial) agents instances
Out:
dl: sorted doubly linked list with references to agents instances
"""
IX = [] # list of indexes and positions along the x-ordinate
for agent in agents: IX.append([agent.idx, agent.x])
IXs = np.argsort(IX, axis=0) # sorted according to x-ordinate positions
Is = IXs[:,1] # leave just indices
agents_x_sorted = np.array(agents)[Is]
dl = dllist(agents_x_sorted) # to doubly linked list
return dl
def detect_meetings(agents_x_sorted, eval_time, config, visualize):
"""
Args:
agents: list with agents objects
eval_time: time in seconds elapsed from the simulation start
config: config read from the yaml
Out:
meets_curr: set of frozensets
Contains info about close agents at this step of the simulation. Each
frozenset contains two numbers - indexes of agents that form one
connection.
"""
if visualize:
for agent in agents_x_sorted:
agent.color = (1.0, 1.0, 1.0, 0.0)
rad = config["infection"]["radius"]
meets_curr = dict()
n = agents_x_sorted.nodeat(0) # node (contains the reference agent)
nn = n.next # next node (contains the following agent)
while nn:
nears = []
n = nn
nn = n.next
nb = n.prev
while nb:
dx = n.value.x - nb.value.x
if dx < rad:
dy = n.value.y - nb.value.y
dist = (dx*dx + dy*dy)**0.5
if dist < rad:
nears.append(nb.value)
else:
break
nb = nb.prev
for near in nears:
link = frozenset({n.value.idx, near.idx}) # who with who
place = n.value.allowed_box.name # where
meets_curr[link] = place
# paint agents within a Euclidean circle red
if visualize:
color = (1.0, 0.0, 0.051, 1.0)
if nears: n.value.color = color
for near in nears:
near.color = color
return meets_curr
|
'''
/*
* @Author: Shawn Zhang
* @Date: 2019-09-08 17:15:03
* @Last Modified by: Shawn Zhang
* @Last Modified time: 2019-09-08 22:50:07
*/
'''
from flask import current_app
from . import index_bp
import os
@index_bp.route('/', methods=['GET'])
def index():
return os.path.abspath(os.path.dirname(__file__)) |
import os
from cv2 import cv2
from .config import MODEL_PATH, CLASSES_PATH, USE_GPU
modelConfig = os.path.join(MODEL_PATH, "yolov3.cfg")
modelWeights = os.path.join(MODEL_PATH, "yolov3.weights")
modelClass = os.path.join(CLASSES_PATH, "coco.names")
backend = cv2.dnn.DNN_BACKEND_OPENCV
target = cv2.dnn.DNN_TARGET_CPU
if USE_GPU:
backend = cv2.dnn.DNN_TARGET_CUDA
target = cv2.dnn.DNN_TARGET_CUDA
def load_model():
"""
Function to load the yolo model and class names
"""
with open(modelClass, 'rt') as f:
classNames = f.read().strip('\n')
modelNet = cv2.dnn.readNetFromDarknet(modelConfig, modelWeights) # Loading the YOLO Object Detector Model
modelNet.setPreferableBackend(backend) # Setting OpenCV as our preferable backend
modelNet.setPreferableTarget(target) # Setting Target as CPU
return classNames, modelNet
|
n = int(input(":"))
if n<=1:
print("None")
else:
listn=[]
i=2
while i<=n:
listn.append(i)
i+=1
for x in range(n):
l=len(listn)
for y in listn[x+1:l]:
if y%listn[x]==0:
listn.remove(y)
listn.append(-3)
listt=[]
for i in range(1,len(listn)-1):
listt.append(listn[i])
if listn[i+1]!=listn[i]+2:
if len(listt)>=2:
print listt
listt=[]
else:
listt=[]
|
# Adversarial autoencoder
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import tensorflow as tf
import matplotlib.pyplot as plt
import time
from tfutil_deprecated import LayerManager
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('max_steps', 100000, 'Number of steps to run trainer.')
flags.DEFINE_float('learning_rate', 0.0003, 'Initial learning rate.')
flags.DEFINE_string('data_dir', '/tmp/data', 'Directory for storing data')
flags.DEFINE_string('summaries_dir', '/tmp/aae/logs', 'Summaries directory')
flags.DEFINE_string('train_dir', '/tmp/aae/save', 'Saves directory')
TRAIN_SIZE = 60000
TEST_SIZE = 10000
SIG_LEN = 256
NUM_COMPONENTS = 3
BATCH_SIZE = 64
PRIOR_BATCH_SIZE = 5
PRETRAIN = False
TRAIN = True
LATENT_DIM = 20
NUM_HIDDEN_LAYERS = 3
HIDDEN_LAYER_SIZE = 200
def log(s):
print('[%s] ' % time.asctime() + s)
def rand_periodic(num_components, num_signals, signal_length):
time = numpy.arange(signal_length, dtype=numpy.float32).reshape(1, signal_length)
period = numpy.random.rand(num_signals, 1) * 80 + 40
counter = 2*numpy.pi*time / period
sin_coeff = numpy.random.randn(num_components, num_signals)
cos_coeff = numpy.random.randn(num_components, num_signals)
arg = numpy.arange(1, num_components + 1).reshape(num_components, 1, 1) * counter
return numpy.einsum('ij,ijk->jk', sin_coeff, numpy.sin(arg)) + numpy.einsum('ij,ijk->jk', cos_coeff, numpy.cos(arg))
def train():
# Import data
log('simulating data')
numpy.random.seed(3737)
test_data = rand_periodic(NUM_COMPONENTS, TEST_SIZE, SIG_LEN)
if TRAIN:
train_data = rand_periodic(NUM_COMPONENTS, TRAIN_SIZE, SIG_LEN)
else: # Don't waste time computing training data
train_data = numpy.zeros((TRAIN_SIZE, SIG_LEN))
log('done simulating')
lm_ae = LayerManager()
lm_disc = LayerManager()
with tf.name_scope('input'):
all_train_data_initializer = tf.placeholder(tf.float32, [TRAIN_SIZE, SIG_LEN])
all_train_data = tf.Variable(all_train_data_initializer, trainable=False, collections=[])
random_training_example = tf.train.slice_input_producer([all_train_data])
training_batch = tf.train.batch([random_training_example], batch_size=BATCH_SIZE, enqueue_many=True)
fed_input_data = tf.placeholder(tf.float32, [None, SIG_LEN])
def log_std_act(log_std):
return tf.clip_by_value(log_std, -4.0, 4.0)
def id_act(z):
return z
def double_relu(z):
return [tf.nn.relu(z), tf.nn.relu(-z)]
def encoder(data):
last = data
for i in range(NUM_HIDDEN_LAYERS):
last = lm_ae.nn_layer(last, HIDDEN_LAYER_SIZE, 'encoder/hidden{}'.format(i), act=double_relu)
with tf.variable_scope('latent'):
latent_mean = lm_ae.nn_layer(last, LATENT_DIM, 'mean', act=id_act)
latent_log_std = lm_ae.nn_layer(last, LATENT_DIM, 'log_std', act=log_std_act)
return latent_mean, latent_log_std
def decoder(code):
last = code
for i in range(NUM_HIDDEN_LAYERS):
last = lm_ae.nn_layer(last, HIDDEN_LAYER_SIZE, 'decoder/hidden{}'.format(i), act=double_relu)
output_mean = lm_ae.nn_layer(last, SIG_LEN, 'output/mean', act=id_act)
output_log_std = lm_ae.nn_layer(last, SIG_LEN, 'output/log_std', act=log_std_act)
return output_mean, output_log_std
def discriminator(latent):
last = latent
for i in range(1):
last = lm_disc.nn_layer(last, 20, 'discriminator/hidden{}'.format(i), act=double_relu)
output_logit = lm_disc.nn_layer(last, 1, 'discrimator/prediction', act=id_act)
return output_logit
def full_model(data):
latent_mean, latent_log_std = encoder(data)
#latent_sample = lm_ae.reparam_normal_sample(latent_mean, latent_log_std, 'sample')
latent_sample = latent_mean
output_mean, output_log_std = decoder(latent_sample)
disc_neg_logit = discriminator(latent_sample)
tf.get_variable_scope().reuse_variables()
latent_prior_sample = tf.random_normal(tf.shape(latent_mean))
latent_prior_sample.set_shape(latent_mean.get_shape().as_list())
disc_pos_logit = discriminator(latent_prior_sample)
reconstruction_error = tf.reduce_sum(
-0.5 * numpy.log(2 * numpy.pi) - output_log_std - 0.5 * tf.square(output_mean - data) / tf.exp(
2.0 * output_log_std), reduction_indices=[1])
disc_cross_entropy = 0.5*tf.nn.sigmoid_cross_entropy_with_logits(disc_neg_logit, tf.zeros(tf.shape(disc_neg_logit))) \
+ 0.5*tf.nn.sigmoid_cross_entropy_with_logits(disc_pos_logit, tf.ones( tf.shape(disc_pos_logit)))
num_copies = 85
image = tf.reshape(
tf.tile(tf.expand_dims(tf.transpose(tf.pack([data, output_mean, data - output_mean]), perm=[1, 0, 2]), 2),
[1, 1, num_copies, 1]), [-1, 3 * num_copies, SIG_LEN])
lm_ae.summaries.image_summary('posterior_sample', tf.expand_dims(image, -1), 5)
rough_error = tf.reduce_mean(tf.square(tf.reduce_mean(tf.square(output_mean), reduction_indices=[1]) - tf.reduce_mean(tf.square(data), reduction_indices=[1])))
return output_mean, tf.reduce_mean(reconstruction_error), tf.reduce_mean(disc_cross_entropy), rough_error
def prior_model():
latent_sample = tf.random_normal((PRIOR_BATCH_SIZE, LATENT_DIM))
output_mean, output_log_std = decoder(latent_sample)
num_copies = 255
image = tf.tile(tf.expand_dims(output_mean, 1), [1, num_copies, 1])
sample_image = lm_ae.summaries.image_summary('prior_sample', tf.expand_dims(image, -1), 5)
return output_mean, sample_image
with tf.name_scope('posterior'):
posterior_mean, reconstruction_error, disc_cross_entropy, rough_error = full_model(training_batch)
training_merged = lm_ae.summaries.merge_all_summaries()
tf.get_variable_scope().reuse_variables()
with tf.name_scope('prior'):
prior_mean, prior_sample = prior_model()
lm_ae.summaries.reset()
with tf.name_scope('test'):
test_posterior_mean, test_reconstruction_error, test_dist_cross_entropy, _ = full_model(fed_input_data)
test_merged = lm_ae.summaries.merge_all_summaries()
saver = tf.train.Saver(tf.trainable_variables())
batch = tf.Variable(0)
learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, batch, 5000, 0.8, staircase=True)
pretrain_step = tf.train.AdamOptimizer(0.03).minimize(rough_error, var_list=lm_ae.scale_factory.variables)
ae_train_step = tf.train.AdamOptimizer(learning_rate).minimize(-reconstruction_error, global_step=batch, var_list=lm_ae.weight_factory.variables + lm_ae.bias_factory.variables)
ae_fool_disc_train_step = tf.train.AdamOptimizer(learning_rate/3.0).minimize(-disc_cross_entropy, global_step=batch, var_list=lm_ae.weight_factory.variables + lm_ae.bias_factory.variables)
disc_train_step = tf.train.AdamOptimizer(learning_rate/3.0).minimize(disc_cross_entropy, global_step=batch, var_list=lm_disc.weight_factory.variables + lm_disc.bias_factory.variables)
def feed_dict(mode):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if mode == 'test':
return {fed_input_data: test_data}
else:
return {}
def validate(i, write_summary=True):
summary, ce, re = sess.run([test_merged, test_dist_cross_entropy, test_reconstruction_error],
feed_dict=feed_dict('test'))
log('batch %s: Test set cross entropy = %s, test set reconstruction error = %s' % (i, ce, re))
if write_summary:
test_writer.add_summary(summary, i)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(all_train_data.initializer, feed_dict={all_train_data_initializer: train_data})
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
if TRAIN:
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')
try:
if PRETRAIN:
log('starting pre-training')
for i in range(3000):
err, _ = sess.run([rough_error, pretrain_step], feed_dict=feed_dict('train'))
if i % 100 == 99:
log('batch %s: single training batch rough error = %s' % (i, err))
#validate(0, write_summary=False)
log('initializing ae')
for i in range(5000):
re, _ = sess.run([reconstruction_error, ae_train_step], feed_dict=feed_dict('train'))
if i % 1000 == 999:
log('batch %s: single training batch reconstruction error = %s' % (i, re))
# #validate(0, write_summary=False)
# log('initializing discriminator')
# for i in range(5000):
# ce, _ = sess.run([disc_cross_entropy, disc_train_step], feed_dict('train'))
# if i % 1000 == 999:
# log('batch %s: single training batch cross entropy = %s' % (i, ce))
#validate(0, write_summary=False)
log('starting training')
for i in range(FLAGS.max_steps):
if i % 1000 == 999: # Do test set
validate(i)
# if i % 10 == 0:
# sess.run([disc_train_step], feed_dict('train'))
if i % 100 == 99: # Record a summary
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, prior_sample_summary, _ = sess.run([training_merged, prior_sample, ae_train_step],
feed_dict=feed_dict('train'),
options=run_options,
run_metadata=run_metadata)
train_writer.add_summary(summary, i)
train_writer.add_summary(prior_sample_summary, i)
train_writer.add_run_metadata(run_metadata, 'batch%d' % i)
else:
# sess.run([disc_train_step], feed_dict=feed_dict('train'))
# sess.run([ae_fool_disc_train_step], feed_dict=feed_dict('train'))
# sess.run([ae_train_step], feed_dict=feed_dict('train'))
sess.run([ae_train_step, ae_fool_disc_train_step, disc_train_step], feed_dict=feed_dict('train'))
finally:
log('saving')
saver.save(sess, FLAGS.train_dir, global_step=batch)
log('done')
else:
log('restoring')
saver.restore(sess, FLAGS.train_dir + '-' + str(FLAGS.max_steps))
fig = plt.figure()
ax = fig.add_subplot(111)
def plot_prior(_):
prior_means, = sess.run([prior_mean], feed_dict=feed_dict('prior'))
plt.cla()
ax.plot(prior_means[0, :])
plt.draw()
plot_prior(None)
cid = fig.canvas.mpl_connect('button_press_event', plot_prior)
plt.show()
fig.canvas.mpl_disconnect(cid)
coord.request_stop()
coord.join(threads)
sess.close()
def main(_):
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
from fastapi import APIRouter
from apis.login.controller import login_router
from apis.users.controller import user_router
from apis.record.controller import record_router
from apis.role.controller import role_router
from apis.menu.controller import menu_router
from apis.perm.controller import perm_router
from apis.cmdb.controller import cmdb_router
from apis.issue.controller import issue_router
api_router = APIRouter()
# router注册
api_router.include_router(login_router, tags=["login"])
api_router.include_router(user_router, prefix="/users", tags=["users"])
api_router.include_router(record_router, prefix="/record", tags=["record"])
api_router.include_router(role_router, prefix="/role", tags=["role"])
api_router.include_router(menu_router, prefix="/menu", tags=["menu"])
api_router.include_router(perm_router, prefix="/perm", tags=["perm"])
api_router.include_router(cmdb_router, prefix="/cmdb", tags=["cmdb"])
api_router.include_router(issue_router, prefix="/issue", tags=["issue"])
|
from core import views
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework.authtoken import views as authtoken_views
router = DefaultRouter()
router.register(r'category', views.CategoryViewSet)
router.register(r'provider', views.ProviderViewSet)
router.register(r'subscription', views.SubscriptionViewSet)
router.register(r'entry', views.EntryViewSet)
urlpatterns = [
url(r'^v1/', include(router.urls)),
url(r'^v1/upload/$', views.UploadView.as_view()),
url(r'^v1/sources/$', views.SourceListView.as_view()),
url(r'^v1/api-token-auth/$', authtoken_views.obtain_auth_token),
url(r'^v1/api-token-auth/(?P<backend>[^/]+)/$', views.ObtainAuthToken.as_view())
]
|
###
### Author: David Wallin, Daniel Costa
### Time-stamp: <2020-03-26 15:47:00 dwa>
from multicorn import ForeignDataWrapper
from multicorn.utils import log_to_postgres as log2pg
from pymongo import MongoClient
from dateutil.parser import parse
from functools import partial, reduce
dict_traverser = partial(reduce,
lambda x, y: x.get(y, {}) if type(x) == dict else x)
def coltype_formatter(coltype):
if coltype in ('timestamp without time zone', 'timestamp with time zone', 'date'):
return lambda x: x if hasattr(x, 'isoformat') else parse(x)
else:
return None
class Mongoose_fdw (ForeignDataWrapper):
def __init__(self, options, columns):
super(Mongoose_fdw, self).__init__(options, columns)
self.host_name = options.get('host', 'localhost')
self.port_nr = int(options.get('port', '27017'))
self.user = options.get('user')
self.password = options.get('password')
self.db_name = options.get('db', 'test')
self.collection_name = options.get('collection', 'test')
self.c = MongoClient(host=self.host_name,
port=self.port_nr)
self.auth_db = options.get('auth_db', self.db_name)
self.c.userprofile.authenticate(self.user,
self.password,
source=self.auth_db)
self.db = getattr(self.c, self.db_name)
self.coll = getattr(self.db, self.collection_name)
# log2pg('cols: {}'.format(columns))
self.fields = {col: {'formatter': coltype_formatter(coldef.type_name),
'path': col.split('.')} for (col, coldef) in columns.items()}
def build_spec(self, quals):
Q = {}
comp_mapper = {'>': '$gt',
'>=': '$gte',
'<=': '$lte',
'<': '$lt'}
for qual in quals:
val_formatter = self.fields[qual.field_name]['formatter']
vform = lambda val: val_formatter(val) if val_formatter is not None else val
if qual.operator == '=':
Q[qual.field_name] = vform(qual.value)
elif qual.operator in ('>', '>=', '<=', '<'):
comp = Q.setdefault(qual.field_name, {})
comp[comp_mapper[qual.operator]] = vform(qual.value)
Q[qual.field_name] = comp
else:
log2pg('Qual operator {} not implemented yet: {}'.format(qual.field_name, qual))
return Q
def execute(self, quals, columns):
## Only request fields of interest:
fields = {k: True for k in columns}
if '_id' not in fields:
fields['_id'] = False
Q = self.build_spec(quals)
# log2pg('spec: {}'.format(Q))
# log2pg('fields: {}'.format(fields))
cur = self.coll.find(filter=Q, projection=fields)
for doc in cur:
yield {col: dict_traverser(self.fields[col]['path'], doc) for col in columns}
## Local Variables: ***
## mode:python ***
## coding: utf-8 ***
## End: ***
|
#!/usr/bin/env python3
# -- coding:utf-8 --
# @Author: markushammered@gmail.com
# @Development Tool: Pycharm
# @Create Time: 2022/5/2
# @File Name: auth.py
import os
from flask import request
from flask_httpauth import HTTPTokenAuth
auth = HTTPTokenAuth()
@auth.verify_token
def verify_token(token):
secret_key = os.getenv('ACCESS_KEY')
if token == secret_key:
return True
elif (token == 'unittest' or
request.args.get('key') == 'unittest') and \
'system' in request.path.split('/'): # 限定在system这个接口使用unittest密钥获取数据
return True
return False
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class FormDesignerConfig(AppConfig):
name = 'form_designer'
verbose_name = _("Form Designer")
|
#__LICENSE_GOES_HERE__
'''
A script that drives buildexts.py and installs the built extension into a platform
subdir of the main ext dir. This way we won't have cgui.py or cgui.pyd/so for various
platforms overwriting each other.
'''
import sys, os, glob
scriptDir = os.path.abspath(sys.path[0])
opj = os.path.join
digsbyDir = opj(scriptDir, "..")
sys.path += [digsbyDir]
clean = False
from config import *
platDir = opj(scriptDir, platformName)
if "clean" in sys.argv:
os.system("cd %s;%s buildexts.py clean" %
(scriptDir, sys.executable))
for afile in glob.glob(os.path.join(platDir, "*")):
os.remove(afile)
sys.exit(0)
argsAsString = ""
for arg in sys.argv[1:]:
argsAsString += " " + arg
if "PYTHONPATH" in os.environ:
print "PYTHONPATH is %s" % os.environ["PYTHONPATH"]
else:
print "PYTHONPATH not set!"
os.system("cd %s;%s buildexts.py install --install-platlib=%s --install-scripts=%s%s" %
(scriptDir, sys.executable, platDir, platDir, argsAsString))
|
from typing import List
from typing_extensions import Annotated
from alpyro_msgs import RosMessage, float64, string
from alpyro_msgs.std_msgs.header import Header
class JointJog(RosMessage):
__msg_typ__ = "control_msgs/JointJog"
__msg_def__ = "c3RkX21zZ3MvSGVhZGVyIGhlYWRlcgogIHVpbnQzMiBzZXEKICB0aW1lIHN0YW1wCiAgc3RyaW5nIGZyYW1lX2lkCnN0cmluZ1tdIGpvaW50X25hbWVzCmZsb2F0NjRbXSBkaXNwbGFjZW1lbnRzCmZsb2F0NjRbXSB2ZWxvY2l0aWVzCmZsb2F0NjQgZHVyYXRpb24KCg=="
__md5_sum__ = "1685da700c8c2e1254afc92a5fb89c96"
header: Header
joint_names: Annotated[List[string], 0, 0]
displacements: Annotated[List[float64], 0, 0]
velocities: Annotated[List[float64], 0, 0]
duration: float64
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"helloworld": "helloWorld.ipynb"}
modules = ["helloworld.py"]
doc_url = "https://thanakijwanavit.github.io/src/"
git_url = "https://github.com/thanakijwanavit/src/tree/main/"
def custom_doc_links(name): return None
|
# Source : https://github.com/GuessWhatGame/generic/tree/master/data_provider
import numpy as np
from utils.file_handlers import pickle_loader
class GloveEmbeddings(object):
def __init__(self, file, glove_dim=300):
self.glove = pickle_loader(file)
self.glove_dim = glove_dim
def get_embeddings(self, tokens):
vectors = []
for token in tokens:
token = token.lower().replace("\'s", "")
if token in self.glove:
vectors.append(np.array(self.glove[token]))
else:
vectors.append(np.zeros((self.glove_dim,)))
return vectors
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
mslib.mswms.mswms
~~~~~~~~~~~~~~~~~
The module can be run with the Python Flask framework and can be run as
python mswms.py.
:copyright: Copyright 2016 Reimar Bauer
:copyright: Copyright 2016-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import logging
import sys
from mslib import __version__
from mslib.mswms.wms import mss_wms_settings, server
from mslib.mswms.wms import app as application
from mslib.utils import setup_logging
from mslib.msui.mss_qt import Updater, Worker
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", help="show version", action="store_true", default=False)
parser.add_argument("--host", help="hostname",
default="127.0.0.1", dest="host")
parser.add_argument("--port", help="port", dest="port", default="8081")
parser.add_argument("--threadpool", help="threadpool", dest="use_threadpool", action="store_true", default=False)
parser.add_argument("--debug", help="show debugging log messages on console", action="store_true", default=False)
parser.add_argument("--logfile", help="If set to a name log output goes to that file", dest="logfile",
default=None)
parser.add_argument("--update", help="Updates MSS to the newest version", action="store_true", default=False)
subparsers = parser.add_subparsers(help='Available actions', dest='action')
gallery = subparsers.add_parser("gallery", help="Subcommands surrounding the gallery")
gallery.add_argument("--create", action="store_true", default=False,
help="Generates plots of all layers not already present")
gallery.add_argument("--clear", action="store_true", default=False,
help="Deletes all plots and corresponding code")
gallery.add_argument("--refresh", action="store_true", default=False,
help="Deletes all plots and regenerates them, a mix of --clear and --create")
gallery.add_argument("--levels", default="", help="A comma-separated list of all levels visible on the gallery.\n"
"E.g. --levels 200,300"
"Use --levels all to include all levels.\n"
"Default is the middle level.")
gallery.add_argument("--itimes", default="", help="A comma-separated list of all init times visible on the gallery"
", in ISO format.\nE.g. --itimes 2012-10-17T12:00:00\n"
"Use --itimes all to use all available itimes.\n"
"Default is the latest itime.")
gallery.add_argument("--vtimes", default="", help="A comma-separated list of all valid times visible on the gallery"
", in ISO format.\nE.g. --vtimes 2012-10-19T12:00:00\n"
"Use --vtimes all to use all available vtimes.\n"
"Default is the latest vtime")
gallery.add_argument("--show-code", action="store_true", default=False,
help="Generates plots of all layers not already present, "
"and generates code snippets for each plot when clicking on the image")
gallery.add_argument("--url-prefix", default="",
help="Normally the plot images should appear at the relative url /static/plots/*.png.\n"
"In case they are prefixed by something, e.g. /demo/static/plots/*.png,"
" please provide the prefix /demo here.")
args = parser.parse_args()
if args.version:
print("***********************************************************************")
print("\n Mission Support System (mss)\n")
print("***********************************************************************")
print("Documentation: http://mss.rtfd.io")
print("Version:", __version__)
sys.exit()
updater = Updater()
if args.update:
updater.on_update_available.connect(lambda old, new: updater.update_mss())
updater.on_log_update.connect(lambda s: print(s.replace("\n", "")))
updater.on_status_update.connect(lambda s: print(s.replace("\n", "")))
updater.run()
while Worker.workers:
list(Worker.workers)[0].wait()
sys.exit()
setup_logging(args)
if args.action == "gallery":
create = args.create or args.refresh
clear = args.clear or args.refresh
server.generate_gallery(create, clear, args.show_code, url_prefix=args.url_prefix, levels=args.levels,
itimes=args.itimes, vtimes=args.vtimes)
logging.info("Gallery generation done.")
sys.exit()
updater.on_update_available.connect(lambda old, new: logging.info("MSS can be updated from %s to %s.\nRun"
" the --update argument to update the server."
%(old,new)))
updater.run()
logging.info("Configuration File: '%s'"%mss_wms_settings.__file__)
application.run(args.host, args.port)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
""" IPython extension: add %clear magic """
from IPython.core import ipapi
import gc
ip = ipapi.get()
def clear_f(self,arg):
""" Clear various data (e.g. stored history data)
%clear in - clear input history
%clear out - clear output history
%clear shadow_compress - Compresses shadow history (to speed up ipython)
%clear shadow_nuke - permanently erase all entries in shadow history
%clear dhist - clear dir history
%clear array - clear only variables that are NumPy arrays
Examples:
In [1]: clear in
Flushing input history
In [2]: clear shadow_compress
Compressing shadow history
In [3]: clear shadow_nuke
Erased all keys from shadow history
In [4]: clear dhist
Clearing directory history
"""
api = self.getapi()
user_ns = self.user_ns # local lookup, heavily used
for target in arg.split():
if target == 'out':
print "Flushing output cache (%d entries)" % len(user_ns['_oh'])
self.outputcache.flush()
elif target == 'in':
print "Flushing input history"
pc = self.outputcache.prompt_count + 1
for n in range(1, pc):
key = '_i'+`n`
user_ns.pop(key,None)
try:
del user_ns[key]
except: pass
# must be done in-place
self.input_hist[:] = ['\n'] * pc
self.input_hist_raw[:] = ['\n'] * pc
elif target == 'array':
# Support cleaning up numpy arrays
try:
from numpy import ndarray
# This must be done with items and not iteritems because we're
# going to modify the dict in-place.
for x,val in user_ns.items():
if isinstance(val,ndarray):
del user_ns[x]
except AttributeError:
print "Clear array only works if Numpy is available."
elif target == 'shadow_compress':
print "Compressing shadow history"
api.db.hcompress('shadowhist')
elif target == 'shadow_nuke':
print "Erased all keys from shadow history "
for k in ip.db.keys('shadowhist/*'):
del ip.db[k]
elif target == 'dhist':
print "Clearing directory history"
del user_ns['_dh'][:]
gc.collect()
# Activate the extension
ip.define_magic("clear",clear_f)
import ipy_completers
ipy_completers.quick_completer(
'%clear','in out shadow_nuke shadow_compress dhist')
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
settings.configure(
SESSION_ENGINE='redis_sessions_fork.session',
# SESSION_SERIALIZER='redis_sessions_fork.serializers.UjsonSerializer',
SESSION_REDIS_PREFIX='django_sessions_tests',
INSTALLED_APPS=(
'django.contrib.sessions',
'redis_sessions_fork'
),
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
)
try:
# django 1.7 standalone app setup
import django
django.setup()
except AttributeError:
pass
|
from enum import Enum
class Topics(Enum):
SEISMIC = "/pi/test"
T_AND_H = "/pi/temp"
ULTRASOUND = "/pi/ultrasound"
EXAMPLE = "/example"
|
# Runs after normalization and per_person_ratio_and_factor and pre_plot_aggregation.
import shutil
from pathlib import Path
import itertools
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import collections
from scipy.optimize import minimize_scalar
cols1 = ['F1_' + str(i) for i in range(3, 20, 2)]
cols2 = ['F2_' + str(i) for i in range(3, 20, 2)]
kCols = cols1 + cols2
def PlotWithSlices(df, data_name, output_dir):
for group_name in ['Gender', 'AgeGroup', 'Family1', 'Family2', 'Family3', 'Family4', 'Education1', 'Career1', 'Career2', 'Language1', 'Word']:
grouped_df = df.groupby([group_name])[kCols].mean()
# grouped_df.to_csv(output_dir / (data_name + '_' + group_name + '_raw.csv'), index=True)
full_group_name = '@'.join([data_name, group_name])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
print(group_name)
z_label = grouped_df.index.to_numpy().tolist()
print(z_label)
cmap = plt.get_cmap('viridis')
colors = cmap(np.linspace(0, 1, len(z_label)))
for key in z_label:
x = np.arange(0, 9)
color = colors[z_label.index(key)]
z = z_label.index(key)
mdf = grouped_df.loc[key]
y1 = mdf[cols1].to_numpy(dtype='float')
y2 = mdf[cols2].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
inflection1y = line1(inflection1)
inflection2y = line2(inflection2)
ax.plot(x, y1, zs=z, zdir='x', c=color, label='F1', linewidth=3.0)
ax.plot(x, y2, zs=z, zdir='x', c=color, label='F2')
ax.plot([inflection1, inflection1], [inflection1y-100, inflection1y+100], zs=z, zdir='x', c='black')
ax.plot([inflection2, inflection2], [inflection2y-100, inflection2y+100], zs=z, zdir='x', c='black')
ax.set(xticks=range(len(z_label)), xticklabels=z_label)
plt.title(full_group_name)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.savefig(output_dir / (full_group_name + '.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
def PlotNoSlice(df, full_group_name, output_dir):
x = np.arange(0, 9)
y1 = df[cols1].to_numpy(dtype='float')
y2 = df[cols2].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 break')
plt.axvline(x=inflection2, linestyle='-.', label='F2 break')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.break.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
def SlicePlotDataS(df, output_dir):
cols1 = ['F1_' + str(i) for i in range(3, 20, 2)]
cols2 = ['F2_' + str(i) for i in range(3, 20, 2)]
kCols = cols1 + cols2
matched_rows = []
sa_a1_sb_a1 = df[df['IsSba2'] == 'No']
#sa_a1_sb_a1.to_csv(output_dir / 'sa_a1_sb_a1_raw.csv', index=False)
sa_a1_sb_a1_mean = sa_a1_sb_a1.groupby(['Pos'])[kCols].mean()
#sa_a1_sb_a1_mean.to_csv(output_dir / 'sa_a1_sb_a1_mean.csv', index=True)
PlotNoSlice(sa_a1_sb_a1_mean.iloc[0], 'sa_a1_sb_a1_a', output_dir)
PlotNoSlice(sa_a1_sb_a1_mean.iloc[1], 'sa_a1_sb_a1_b', output_dir)
sa_a1_sb_a2 = df[df['IsSba2'] == 'Yes']
#sa_a1_sb_a2.to_csv(output_dir / 'sa_a1_sb_a2_raw.csv', index=False)
sa_a1_sb_a2_mean = sa_a1_sb_a2.groupby(['Pos'])[kCols].mean()
#sa_a1_sb_a2_mean.to_csv(output_dir / 'sa_a1_sb_a2_mean.csv', index=True)
PlotNoSlice(sa_a1_sb_a2_mean.iloc[0], 'sa_a1_sb_a2_a', output_dir)
PlotNoSlice(sa_a1_sb_a2_mean.iloc[1], 'sa_a1_sb_a2_b', output_dir)
matched_rows = []
for _, row in df.iterrows():
comps = row['Filename'].split('_')
lang = comps[0]
pos = comps[4]
if lang == 'S' and pos == 'b' and row['Annotation'] == 'a2':
matched_rows.append(row)
input_df = pd.DataFrame(matched_rows)
PlotWithSlices(input_df, 'all_s_sb_a2', output_dir)
def SlicePlotDataM(df, output_dir):
m_sb_a1 = df[df['IsSba2'] == 'No']
PlotWithSlices(m_sb_a1, 'm_sb_a1', output_dir)
m_sb_a1_mean = m_sb_a1.groupby(['IsSba2'])[kCols].mean()
PlotNoSlice(m_sb_a1_mean.iloc[0], 'm_sb_a1', output_dir)
m_sb_a2 = df[df['IsSba2'] == 'Yes']
PlotWithSlices(m_sb_a2, 'm_sb_a2', output_dir)
m_sb_a2_mean = m_sb_a2.groupby(['IsSba2'])[kCols].mean()
PlotNoSlice(m_sb_a2_mean.iloc[0], 'm_sb_a2', output_dir)
input_base_dir = Path('./analysis/output/')
output_base_dir = Path('./analysis/output/break/')
shutil.rmtree(output_base_dir, ignore_errors=True)
output_base_dir.mkdir(parents=True, exist_ok=True)
df = pd.read_csv(input_base_dir / 'S_all_plot_raw_data.csv')
SlicePlotDataS(df, output_base_dir)
df = pd.read_csv(input_base_dir / 'M_all_plot_raw_data.csv')
SlicePlotDataM(df, output_base_dir) |
### Tools
__all__ = ["Dtool_ObjectToDict", "Dtool_funcToMethod", "Dtool_PreloadDLL"]
import imp, sys, os
# The following code exists to work around a problem that exists
# with Python 2.5 or greater.
# Specifically, Python 2.5 is designed to import files named *.pyd
# only; it will not import files named *.dll (or *.so). We work
# around this problem by explicitly preloading all of the dll's we
# expect to need.
dll_suffix = ''
if sys.platform == "win32":
# On Windows, dynamic libraries end in ".dll".
dll_ext = '.dll'
module_ext = '.pyd'
# We allow the caller to preload dll_suffix into the sys module.
dll_suffix = getattr(sys, 'dll_suffix', None)
if dll_suffix is None:
# Otherwise, we try to determine it from the executable name:
# python_d.exe implies _d across the board.
dll_suffix = ''
if sys.executable.endswith('_d.exe'):
dll_suffix = '_d'
elif sys.platform == "darwin":
# On OSX, the dynamic libraries usually end in .dylib, but
# sometimes we need .so.
try:
from direct.extensions_native.extensions_darwin import dll_ext
except ImportError:
dll_ext = '.dylib'
module_ext = '.so'
else:
# On most other UNIX systems (including linux), .so is used.
dll_ext = '.so'
module_ext = '.so'
if sys.platform == "win32":
# On Windows, we must furthermore ensure that the PATH is modified
# to locate all of the DLL files.
# First, search for the directory that contains all of our compiled
# modules.
target = None
filename = "libpandaexpress%s%s" % (dll_suffix, dll_ext)
for dir in sys.path + [sys.prefix]:
lib = os.path.join(dir, filename)
if (os.path.exists(lib)):
target = dir
if target == None:
message = "Cannot find %s" % (filename)
raise ImportError(message)
# And add that directory to the system path.
path = os.environ["PATH"]
if not path.startswith(target + ";"):
os.environ["PATH"] = target + ";" + path
def Dtool_FindModule(module):
# Finds a .pyd module on the Python path.
filename = module.replace('.', os.path.sep) + module_ext
for dir in sys.path:
lib = os.path.join(dir, filename)
if (os.path.exists(lib)):
return lib
return None
def Dtool_PreloadDLL(module):
if module in sys.modules:
return
# First find it as a .pyd module on the Python path.
if Dtool_FindModule(module):
# OK, we should have no problem importing it as is.
return
# Nope, we'll need to search for a dynamic lib and preload it.
# Search for the appropriate directory.
target = None
filename = module.replace('.', os.path.sep) + dll_suffix + dll_ext
for dir in sys.path + [sys.prefix]:
lib = os.path.join(dir, filename)
if (os.path.exists(lib)):
target = dir
break
if target is None:
message = "DLL loader cannot find %s." % (module)
raise ImportError(message)
# Now import the file explicitly.
pathname = os.path.join(target, filename)
imp.load_dynamic(module, pathname)
# Nowadays, we can compile libpandaexpress with libpanda into a
# .pyd file called panda3d/core.pyd which can be imported without
# any difficulty. Let's see if this is the case.
# In order to support things like py2exe that play games with the
# physical python files on disk, we can't entirely rely on
# Dtool_FindModule to find our panda3d.core module. However, we
# should be able to import it. To differentiate the old-style Panda
# build (with .dll's) from the new-style Panda build (with .pyd's), we
# first try to import panda3d.core directly; if it succeeds we're in a
# new-style build, and if it fails we must be in an old-style build.
try:
from panda3d.core import *
except ImportError:
Dtool_PreloadDLL("libpandaexpress")
from libpandaexpress import *
def Dtool_ObjectToDict(cls, name, obj):
cls.DtoolClassDict[name] = obj;
def Dtool_funcToMethod(func, cls, method_name=None):
"""Adds func to class so it is an accessible method; use method_name to specify the name to be used for calling the method.
The new method is accessible to any instance immediately."""
if sys.version_info < (3, 0):
func.im_class = cls
func.im_func = func
func.im_self = None
if not method_name:
method_name = func.__name__
cls.DtoolClassDict[method_name] = func;
|
''' The flipper GUI application. '''
from . import main
from . import pieces
from . import options
from . import inputbox
from . import choicebox
from . import progress
from . import widgets
# Set up shorter names for all of the different classes and some common constructors.
start = main.start
Options = options.Options
FlipperApplication = main.FlipperApplication
ColourPalette = pieces.ColourPalette
CanvasVertex = pieces.CanvasVertex
CanvasEdge = pieces.CanvasEdge
CanvasTriangle = pieces.CanvasTriangle
CurveComponent = pieces.CurveComponent
TrainTrackBlock = pieces.TrainTrackBlock
ProgressApp = progress.ProgressApp
SplitButton = widgets.SplitButton
Meter = widgets.Meter
AnimatedCanvas = widgets.AnimatedCanvas
lines_intersect = pieces.lines_intersect
interpolate = pieces.interpolate
apply_progression = progress.apply_progression
get_input = inputbox.get_input
get_choice = choicebox.get_choice
|
import gc
from collections import defaultdict
from typing import Any, Dict, Tuple
import numpy as np
import pandas as pd
# Funcion for user stats with loops
def add_features(
df: pd.DataFrame,
answered_correctly_u_count: Dict[int, int],
answered_correctly_u_sum: Dict[int, int],
elapsed_time_u_sum: Dict[int, int],
explanation_u_sum: Dict[int, int],
timestamp_u: Dict[int, int],
timestamp_u_incorrect: Dict[int, int],
answered_correctly_q_count: Dict[int, int],
answered_correctly_q_sum: Dict[int, int],
elapsed_time_q_sum: Dict[int, int],
explanation_q_sum: Dict[int, int],
answered_correctly_uq: Dict[int, int],
update: bool = True,
) -> pd.DataFrame:
# -----------------------------------------------------------------------
# Client features
answered_correctly_u_avg = np.zeros(len(df), dtype=np.float32)
elapsed_time_u_avg = np.zeros(len(df), dtype=np.float32)
explanation_u_avg = np.zeros(len(df), dtype=np.float32)
timestamp_u_recency_1 = np.zeros(len(df), dtype=np.float32)
timestamp_u_recency_2 = np.zeros(len(df), dtype=np.float32)
timestamp_u_recency_3 = np.zeros(len(df), dtype=np.float32)
timestamp_u_incorrect_recency = np.zeros(len(df), dtype=np.float32)
# -----------------------------------------------------------------------
# Question features
answered_correctly_q_avg = np.zeros(len(df), dtype=np.float32)
elapsed_time_q_avg = np.zeros(len(df), dtype=np.float32)
explanation_q_avg = np.zeros(len(df), dtype=np.float32)
# -----------------------------------------------------------------------
# User Question
answered_correctly_uq_count = np.zeros(len(df), dtype=np.int32)
# -----------------------------------------------------------------------
for num, row in enumerate(
df[
[
"user_id",
"answered_correctly",
"content_id",
"prior_question_elapsed_time",
"prior_question_had_explanation",
"timestamp",
]
].values
):
# Client features assignation
# ------------------------------------------------------------------
if answered_correctly_u_count[row[0]] != 0:
answered_correctly_u_avg[num] = (
answered_correctly_u_sum[row[0]] / answered_correctly_u_count[row[0]]
)
elapsed_time_u_avg[num] = (
elapsed_time_u_sum[row[0]] / answered_correctly_u_count[row[0]]
)
explanation_u_avg[num] = (
explanation_u_sum[row[0]] / answered_correctly_u_count[row[0]]
)
else:
answered_correctly_u_avg[num] = np.nan
elapsed_time_u_avg[num] = np.nan
explanation_u_avg[num] = np.nan
if len(timestamp_u[row[0]]) == 0:
timestamp_u_recency_1[num] = np.nan
timestamp_u_recency_2[num] = np.nan
timestamp_u_recency_3[num] = np.nan
elif len(timestamp_u[row[0]]) == 1:
timestamp_u_recency_1[num] = row[5] - timestamp_u[row[0]][0]
timestamp_u_recency_2[num] = np.nan
timestamp_u_recency_3[num] = np.nan
elif len(timestamp_u[row[0]]) == 2:
timestamp_u_recency_1[num] = row[5] - timestamp_u[row[0]][1]
timestamp_u_recency_2[num] = row[5] - timestamp_u[row[0]][0]
timestamp_u_recency_3[num] = np.nan
elif len(timestamp_u[row[0]]) == 3:
timestamp_u_recency_1[num] = row[5] - timestamp_u[row[0]][2]
timestamp_u_recency_2[num] = row[5] - timestamp_u[row[0]][1]
timestamp_u_recency_3[num] = row[5] - timestamp_u[row[0]][0]
if len(timestamp_u_incorrect[row[0]]) == 0:
timestamp_u_incorrect_recency[num] = np.nan
else:
timestamp_u_incorrect_recency[num] = (
row[5] - timestamp_u_incorrect[row[0]][0]
)
# ------------------------------------------------------------------
# Question features assignation
if answered_correctly_q_count[row[2]] != 0:
answered_correctly_q_avg[num] = (
answered_correctly_q_sum[row[2]] / answered_correctly_q_count[row[2]]
)
elapsed_time_q_avg[num] = (
elapsed_time_q_sum[row[2]] / answered_correctly_q_count[row[2]]
)
explanation_q_avg[num] = (
explanation_q_sum[row[2]] / answered_correctly_q_count[row[2]]
)
else:
answered_correctly_q_avg[num] = np.nan
elapsed_time_q_avg[num] = np.nan
explanation_q_avg[num] = np.nan
# ------------------------------------------------------------------
# Client Question assignation
answered_correctly_uq_count[num] = answered_correctly_uq[row[0]][row[2]]
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Client features updates
answered_correctly_u_count[row[0]] += 1
elapsed_time_u_sum[row[0]] += row[3]
explanation_u_sum[row[0]] += int(row[4])
if len(timestamp_u[row[0]]) == 3:
timestamp_u[row[0]].pop(0)
timestamp_u[row[0]].append(row[5])
else:
timestamp_u[row[0]].append(row[5])
# ------------------------------------------------------------------
# Question features updates
answered_correctly_q_count[row[2]] += 1
elapsed_time_q_sum[row[2]] += row[3]
explanation_q_sum[row[2]] += int(row[4])
# ------------------------------------------------------------------
# Client Question updates
answered_correctly_uq[row[0]][row[2]] += 1
# ------------------------------------------------------------------
# Flag for training and inference
if update:
# ------------------------------------------------------------------
# Client features updates
answered_correctly_u_sum[row[0]] += row[1]
if row[1] == 0:
if len(timestamp_u_incorrect[row[0]]) == 1:
timestamp_u_incorrect[row[0]].pop(0)
timestamp_u_incorrect[row[0]].append(row[5])
else:
timestamp_u_incorrect[row[0]].append(row[5])
# ------------------------------------------------------------------
# Question features updates
answered_correctly_q_sum[row[2]] += row[1]
# ------------------------------------------------------------------
user_df = pd.DataFrame(
{
"answered_correctly_u_avg": answered_correctly_u_avg,
"elapsed_time_u_avg": elapsed_time_u_avg,
"explanation_u_avg": explanation_u_avg,
"answered_correctly_q_avg": answered_correctly_q_avg,
"elapsed_time_q_avg": elapsed_time_q_avg,
"explanation_q_avg": explanation_q_avg,
"answered_correctly_uq_count": answered_correctly_uq_count,
"timestamp_u_recency_1": timestamp_u_recency_1,
"timestamp_u_recency_2": timestamp_u_recency_2,
"timestamp_u_recency_3": timestamp_u_recency_3,
"timestamp_u_incorrect_recency": timestamp_u_incorrect_recency,
}
)
df = pd.concat([df, user_df], axis=1)
return df
def update_features(
df,
answered_correctly_u_sum: Dict[int, int],
answered_correctly_q_sum: Dict[int, int],
timestamp_u_incorrect: Dict[int, int],
):
for row in df[
["user_id", "answered_correctly", "content_id", "content_type_id", "timestamp"]
].values:
if row[3] == 0:
# ------------------------------------------------------------------
# Client features updates
answered_correctly_u_sum[row[0]] += row[1]
if row[1] == 0:
if len(timestamp_u_incorrect[row[0]]) == 1:
timestamp_u_incorrect[row[0]].pop(0)
timestamp_u_incorrect[row[0]].append(row[4])
else:
timestamp_u_incorrect[row[0]].append(row[4])
# ------------------------------------------------------------------
# Question features updates
answered_correctly_q_sum[row[2]] += row[1]
# ------------------------------------------------------------------
def read_and_preprocess(
feature_engineering: bool = False,
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.Series, Dict[int, Any]]:
train_pickle = "../input/riiid-cross-validation-files/cv1_train.pickle"
valid_pickle = "../input/riiid-cross-validation-files/cv1_valid.pickle"
question_file = "../input/riiid-test-answer-prediction/questions.csv"
# Read data
feld_needed = [
"timestamp",
"user_id",
"answered_correctly",
"content_id",
"content_type_id",
"prior_question_elapsed_time",
"prior_question_had_explanation",
]
train = pd.read_pickle(train_pickle)[feld_needed]
valid = pd.read_pickle(valid_pickle)[feld_needed]
# Delete some trianing data to don't have ram problems
if feature_engineering:
train = train.iloc[-40000000:]
# Filter by content_type_id to discard lectures
train = train.loc[train.content_type_id == False].reset_index(drop=True)
valid = valid.loc[valid.content_type_id == False].reset_index(drop=True)
# Changing dtype to avoid lightgbm error
train[
"prior_question_had_explanation"
] = train.prior_question_had_explanation.fillna(False).astype("int8")
valid[
"prior_question_had_explanation"
] = valid.prior_question_had_explanation.fillna(False).astype("int8")
# Fill prior question elapsed time with the mean
prior_question_elapsed_time_mean = (
train["prior_question_elapsed_time"].dropna().mean()
)
train["prior_question_elapsed_time"].fillna(
prior_question_elapsed_time_mean, inplace=True
)
valid["prior_question_elapsed_time"].fillna(
prior_question_elapsed_time_mean, inplace=True
)
# Merge with question dataframe
questions_df = pd.read_csv(question_file)
questions_df["part"] = questions_df["part"].astype(np.int32)
questions_df["bundle_id"] = questions_df["bundle_id"].astype(np.int32)
train = pd.merge(
train,
questions_df[["question_id", "part"]],
left_on="content_id",
right_on="question_id",
how="left",
)
valid = pd.merge(
valid,
questions_df[["question_id", "part"]],
left_on="content_id",
right_on="question_id",
how="left",
)
# Client dictionaries
answered_correctly_u_count = defaultdict(int)
answered_correctly_u_sum = defaultdict(int)
elapsed_time_u_sum = defaultdict(int)
explanation_u_sum = defaultdict(int)
timestamp_u = defaultdict(list)
timestamp_u_incorrect = defaultdict(list)
# Question dictionaries
answered_correctly_q_count = defaultdict(int)
answered_correctly_q_sum = defaultdict(int)
elapsed_time_q_sum = defaultdict(int)
explanation_q_sum = defaultdict(int)
# Client Question dictionary
answered_correctly_uq = defaultdict(lambda: defaultdict(int))
print("User feature calculation started...")
print("\n")
train = add_features(
train,
answered_correctly_u_count,
answered_correctly_u_sum,
elapsed_time_u_sum,
explanation_u_sum,
timestamp_u,
timestamp_u_incorrect,
answered_correctly_q_count,
answered_correctly_q_sum,
elapsed_time_q_sum,
explanation_q_sum,
answered_correctly_uq,
)
valid = add_features(
valid,
answered_correctly_u_count,
answered_correctly_u_sum,
elapsed_time_u_sum,
explanation_u_sum,
timestamp_u,
timestamp_u_incorrect,
answered_correctly_q_count,
answered_correctly_q_sum,
elapsed_time_q_sum,
explanation_q_sum,
answered_correctly_uq,
)
gc.collect()
print("User feature calculation completed...")
print("\n")
features_dicts = {
"answered_correctly_u_count": answered_correctly_u_count,
"answered_correctly_u_sum": answered_correctly_u_sum,
"elapsed_time_u_sum": elapsed_time_u_sum,
"explanation_u_sum": explanation_u_sum,
"answered_correctly_q_count": answered_correctly_q_count,
"answered_correctly_q_sum": answered_correctly_q_sum,
"elapsed_time_q_sum": elapsed_time_q_sum,
"explanation_q_sum": explanation_q_sum,
"answered_correctly_uq": answered_correctly_uq,
"timestamp_u": timestamp_u,
"timestamp_u_incorrect": timestamp_u_incorrect,
}
return train, valid, questions_df, prior_question_elapsed_time_mean, features_dicts
|
# Generated by Django 2.2.13 on 2020-10-30 17:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publications', '0002_initial_data'),
('origins', '0003_site_references'),
]
operations = [
migrations.AddField(
model_name='taxon',
name='references',
field=models.ManyToManyField(blank=True, to='publications.Publication'),
),
]
|
"""The file system abstraction."""
import urllib.parse
from io import BytesIO
from flask import current_app
from .eatfirst_osfs import EatFirstOSFS
from .eatfirst_s3 import EatFirstS3
class EFS:
"""The EatFirst File system."""
def __init__(self, *args, storage="local", **kwargs):
"""The constructor method of the filesystem abstraction."""
self.separator = kwargs.get("separator", "/")
self.current_file = ""
if storage.lower() == "local":
self.home = EatFirstOSFS(current_app.config["LOCAL_STORAGE"], create=True, *args, **kwargs)
elif storage.lower() == "s3":
self.home = EatFirstS3(
current_app.config["S3_BUCKET"],
# We always called make_public after upload, with this we do one less call to aws API
acl="public-read",
*args,
**kwargs
)
else:
raise RuntimeError("{} does not support {} storage".format(self.__class__.__name__, storage))
def upload(self, path, content, content_type=None):
"""Upload a file and return its size in bytes.
:param path: the relative path to file, including filename.
:param content: the content to be written.
:param content_type: Enforce content-type on destination.
:return: size of the saved file.
"""
path_list = path.split(self.separator)
if len(path_list) > 1:
self.home.makedirs(self.separator.join(path_list[:-1]), recreate=True)
self.home.create(path, wipe=False)
# s3fs' API sucks and we have to set the content type on construction
# assuming this will always be called in a synchronous way, we just override the inner variable (O.o) and
# restore it back
content_type_key = "ContentType"
has_upload_args = hasattr(self.home, "upload_args")
old_content_type_exists = False
old_content_type = None
if has_upload_args and content_type:
old_content_type_exists = content_type_key in self.home.upload_args
old_content_type = self.home.upload_args.pop(content_type_key, None)
self.home.upload_args[content_type_key] = content_type
if isinstance(content, BytesIO):
# TODO: The underlying library only expects bytes instead of verifying what is coming
# maybe we should send a pr as this can result in more memory usage
content = content.read()
if isinstance(content, str):
content = content.encode()
self.home.setbytes(path, content)
if has_upload_args:
if old_content_type_exists:
self.home.upload_args[content_type_key] = old_content_type
else:
self.home.upload_args.pop(content_type_key, None)
def open(self, path, *args, **kwargs):
"""Open a file and return a file pointer.
:param path: the relative path to file, including filename.
:return: a pointer to the file.
"""
# Maybe we should store paths as relative paths to avoid having to do this
root_path = getattr(self.home, "_root_path", None)
path = path.replace(root_path, "") if root_path else path
if not self.home.exists(path):
exp = FileNotFoundError()
exp.filename = path
raise exp
return self.home.openbin(path, *args, **kwargs)
def remove(self, path):
"""Remove a file or folder.
:param path: the relative path to file, including filename.
"""
if self.home.isdir(path):
self.home.removetree(path)
else:
self.home.remove(path)
def rename(self, path, new_path):
"""Rename a file.
:param path: the relative path to file, including filename.
:param new_path: the relative path to new file, including new filename.
"""
self.home.move(path, new_path)
def move(self, path, new_path):
"""Move a file.
:param path: the relative path to file, including filename.
:param new_path: the relative path to new file, including filename.
"""
path_list = new_path.split(self.separator)
if len(path_list) > 1:
self.home.makedir(self.separator.join(path_list[:-1]), recreate=True)
self.home.move(path, new_path, overwrite=True)
def file_url(self, path, with_cdn=True):
"""Get a file url.
:param path: the relative path to file, including filename.
:param with_cdn: specify if the url should return with the cdn information, only used for images.
"""
url = self.home.geturl(path)
if current_app.config.get("S3_CDN_URL", None) and with_cdn:
parsed_url = urllib.parse.urlparse(url)
url = url.replace(parsed_url.hostname, current_app.config["S3_CDN_URL"])
url = url.split("?")[0] # Remove any trace of query string
url = url.replace("http://", "https://")
return url
@classmethod
def get_filesystem(cls):
"""Return an instance of the filesystem abstraction."""
return cls(storage=current_app.config["DEFAULT_STORAGE"])
|
#! /usr/bin python3
import numpy as np
import math
class Net(object):
def __init__(self):
self.inputSize = 3
self.outputSize = 3
self.hiddenSize = 5
# Weights
self.W1 = np.random.rand(self.inputSize, self.hiddenSize)
self.W2 = np.random.rand(self.hiddenSize, self.outputSize)
# Synapses
self.z2 = np.zeros(self.inputSize, self.hiddenSize)
self.z3 = np.zeros(self.hiddenSize, self.outputSize)
# Activations
self.a2 = np.zeros(self.inputSize, self.hiddenSize)
def forward(self, X):
self.z2 = np.dot(X, self.W1)
self.a2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.a2, self.W2)
y_hat = self.sigmoid(self.z3, True)
return y_hat
def costFunctionPrime(self, X, y):
# Compute derivative with respect to W and W2 for a given X and y:
y_hat = self.forward(X)
delta3 = np.multiply(-(y - y_hat), self.sigmoid(self.z3, True))
dJdW2 = np.dot(self.a2.T, delta3)
delta2 = np.dot(delta3, self.W2.T) * self.sigmoid(self.z2, True)
dJdW1 = np.dot(X.T, delta2)
return dJdW1, dJdW2
def sigmoid(self, x, derivative=False):
return x * (1 - x) if derivative else 1 / (1 + np.exp(-x))
def derivnonlin(self, x):
if x > 0:
return 1
else:
return 0
def costfunctionPrime(self, X, y):
self.yHat = self.forward(X)
delta3 = np.multiply(-(y - self.yHat), self.derivnonlin(self.z3))
djdW2 = np.dot(self.a2.T, delta3)
delta2 = np.dot(delta3, self.W2.T) * self.derivnonlin(self.z2)
djdW1 = np.dot(X.T, delta2)
return djdW1, djdW2
trainingSet = [
[255, 0, 0],
[0, 255, 0],
[0, 0, 255]
]
def train():
pass
if __name__ == "__main__":
train()
|
__version_tuple__ = (0, 7, 0, 'alpha.5')
__version__ = '0.7.0-alpha.5'
|
import os
from random import randrange
# data
root = '/home/ales/Documents/Extended/datasets/STYRIA/output/articles+comments-workshop-2500-samples/articles_only'
# lead baseline
tgt_folder = 'output/articles+comments-workshop-2500-samples/baseline-lead'
os.makedirs(tgt_folder, exist_ok=True)
for file in os.scandir(root):
with open(file.path, 'r') as f:
lines = [line.strip() for line in f.readlines()]
with open(os.path.join(tgt_folder, file.name), 'w') as out:
# write first two sentences
for s in range(2):
out.write(lines[s])
out.write('\n')
# random baseline
tgt_folder = 'output/articles+comments-workshop-2500-samples/baseline-random'
os.makedirs(tgt_folder, exist_ok=True)
for file in os.scandir(root):
with open(file.path, 'r') as f:
lines = [line.strip() for line in f.readlines()]
with open(os.path.join(tgt_folder, file.name), 'w') as out:
# write random two sentences
for _ in range(2):
s = randrange(len(lines))
out.write(lines[s])
out.write('\n')
|
# Copyright 2018 Google, Inc.,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Cart-Pole and train an Agent via policy gradient."""
import argparse
import time
import sys
import tensorflow as tf
from vaeseq.examples.play import environment as env_mod
from vaeseq.examples.play import hparams as hparams_mod
from vaeseq.examples.play import model as model_mod
from vaeseq import util
def train(flags):
model = model_mod.Model(
hparams=hparams_mod.make_hparams(flags.hparams),
session_params=flags)
model.train("train", flags.num_steps)
def run(flags):
hparams = hparams_mod.make_hparams(flags.hparams)
hparams.batch_size = 1
hparams.sequence_size = flags.max_moves
batch_size = util.batch_size(hparams)
model = model_mod.Model(hparams=hparams, session_params=flags)
if flags.agent == "trained":
agent = model.agent
elif flags.agent == "random":
agent = model_mod.agent_mod.RandomAgent(hparams)
else:
raise ValueError("I don't understand --agent " + flags.agent)
outputs = agent.drive_rnn(
model.env,
sequence_size=util.sequence_size(hparams),
initial_state=agent.initial_state(batch_size=batch_size),
cell_initial_state=model.env.initial_state(batch_size=batch_size))
score = tf.reduce_sum(outputs["score"])
with model.eval_session() as sess:
model.env.start_render_thread()
for _ in range(flags.num_games):
print("Score: ", sess.run(score))
sys.stdout.flush()
model.env.stop_render_thread()
# Argument parsing code below.
def common_args(args):
model_mod.Model.SessionParams.add_parser_arguments(args)
args.add_argument(
"--hparams", default="",
help="Model hyperparameter overrides.")
def train_args(args):
common_args(args)
args.add_argument(
"--num-steps", type=int, default=int(1e6),
help="Number of training iterations.")
args.set_defaults(entry=train)
def run_args(args):
common_args(args)
args.add_argument(
"--max-moves", type=int, default=1000,
help="Maximum number of moves per game.")
args.add_argument(
"--num-games", type=int, default=1,
help="Number of games to play.")
args.add_argument(
"--agent", default="trained", choices=["trained", "random"],
help="Which agent to use.")
args.set_defaults(entry=run)
def main():
args = argparse.ArgumentParser()
subcommands = args.add_subparsers(title="subcommands")
train_args(subcommands.add_parser(
"train", help="Train a model."))
run_args(subcommands.add_parser(
"run", help="Run a traned model."))
flags, unparsed_args = args.parse_known_args(sys.argv[1:])
if not hasattr(flags, "entry"):
args.print_help()
return 1
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=lambda _unused_argv: flags.entry(flags),
argv=[sys.argv[0]] + unparsed_args)
if __name__ == "__main__":
main()
|
"""
I need a *what* An *inverted sphere*? What in tarnation is an
inverted sphere, and where do I find one?
This files writes out a .BAM file of a Skybox, pasting together
six PNG files into a crude SkySphere.
Note there are several hardcoded full filepaths here - I had a
lot of problems with relative-paths, they were embedded in the BAM
file, so I couldn't see what they were (well, without using bam2egg).
I finally wound up putting the BAM file in the same directory as main.py,
which is kludgy, but works consistently. Someday I need to work on
that relative-pathing.
There's a tool for constructing Skymaps at http://alexcpeterson.com/spacescape/,
but it doesn't run on Linux, so I just put together half a dozen manually-
generated PNGs. Which I actually did using PaintShop Pro 7 on Windows, oddly - I
can't stand GIMP.
https://www.panda3d.org/manual/index.php/Cube_Maps
"""
import sys
from pathlib import Path
# noinspection PyPackageRequirements
from direct.showbase.ShowBase import ShowBase
import panda3d.core as core
# This is needed to handle all cases of relative-imports. There may
# be a better way, but so far I haven't found it.
TOP_DIR = str(Path(__file__).resolve().parents[1])
sys.path.insert(0, TOP_DIR)
BOX_PATH = '/home/crawford/repos/Panda3DCraft/gfx/sky/box_#.png'
SPHERE_PATH = '/home/crawford/repos/Panda3DCraft/gfx/sky/InvertedSphere.egg'
BOX_OUT = '/home/crawford/repos/Panda3DCraft/sky_box.bam'
Base = ShowBase()
def build_the_box():
stage_defaults = core.TextureStage.getDefault()
texture = Base.loader.loadCubeMap(BOX_PATH)
sphere = Base.loader.loadModel(SPHERE_PATH)
sphere.setTexGen(stage_defaults, core.TexGenAttrib.MWorldPosition)
sphere.setTexProjector(stage_defaults, Base.render, sphere)
sphere.setTexPos(stage_defaults, 0, 0, 0)
sphere.setTexScale(stage_defaults, 0.5)
sphere.setTexture(texture)
sphere.setLightOff()
sphere.setScale(1000)
# To preview it:
# sphere.reparentTo(Base.render)
# Base.run()
# To write it:
res = sphere.writeBamFile(BOX_OUT)
print(f"wrote file [{BOX_OUT}] with result {res}")
if __name__ == '__main__':
build_the_box()
|
import subprocess
import os
from report.models import *
import utils.report_handle as report_handle
import utils.cmd as cmd
"""
Lay danh sach cac tool
"""
def get_tool_list():
return ['Qark', 'Dependency Check', 'Sniffgit', 'AndroBugs', 'DB Parser']
"""
Lay danh sach cac app trong dien thoai
"""
def get_app_list():
apps = open("outdir/app_list.txt", "r")
appList = []
for app in apps:
appList.append(app.rstrip())
return appList
"""
Pull apk tu dien thoai
"""
def pull_apk(app_name):
# kiem tra file ton tai
if not os.path.isfile('outdir/file/' + app_name):
cmd.do_cmd(["sh", "scripts/pull_file.sh", app_name])
"""
xoa resource cu
"""
def delete_old_resource():
# delete files
cmd.do_cmd(["sh", "scripts/remove.sh"])
# delete db
ReportQark.objects.all().delete()
ReportDc.objects.all().delete()
ReportSniffgit.objects.all().delete()
ReportAndro.objects.all().delete()
ReportDbParser.objects.all().delete()
ReportHistory.objects.all().delete()
"""
dua vao tool_index, chay tool duoc yeu cau
"""
def use_tool(app_name, tool_index):
# cac tool ko can decompile
if "3" == tool_index:
report_andro(app_name)
return
if "4" == tool_index:
report_dbparser(app_name)
return
# qark se decomplie, cac tool ben duoi can decompile truoc
report_qark(app_name)
# kiem tra db xem da su dung tool_index chua
if "1" == tool_index:
report_dc(app_name)
if "2" == tool_index:
report_sniffgit(app_name)
# neu chua thi --> switch case
return
"""
su dung tool qark
"""
def report_qark(app):
# kiem tra db xem da su dung tool nay cho app can scan chua
if 0 == ReportHistory.objects.filter(app_name=app).filter(tool_index='0').count():
# chay qark
cmd.do_cmd(["sh", "scripts/report_qark.sh", app])
ReportHistory(app_name=app, tool_index='0').save()
# xu ly ket qua report
report_handle.handle_qark_report(app)
"""
su dung tool dependency check
"""
def report_dc(app):
if 0 == ReportHistory.objects.filter(app_name=app).filter(tool_index='1').count():
# chay dependency check
cmd.do_cmd(["sh", "scripts/report_dc.sh", app.replace('.apk', '')])
ReportHistory(app_name=app, tool_index='1').save()
# xu ly ket qua report
report_handle.handle_dc_report(app)
"""
su dung tool sniffgit
"""
def report_sniffgit(app):
if 0 == ReportHistory.objects.filter(app_name=app).filter(tool_index='2').count():
try:
cmd.do_cmd(["sh", "scripts/report_sniffgit.sh", app])
ReportHistory(app_name=app, tool_index='2').save()
except:
pass
report_handle.handle_sniffgit_report(app)
# kiem tra co ket noi khong
if 0 == ReportHistory.objects.filter(app_name=app).filter(tool_index='2').count():
connectResult = cmd.do_cmd(["adb", "get-state"])
if connectResult.strip() == "device":
try:
data_location = app.strip()[ : app.find('-')]
cmd.do_cmd(["sh", "scripts/pull_data.sh", data_location, app.strip()])
cmd.do_cmd(["sh", "scripts/report_sniffgit_data.sh", app.strip()])
except:
pass
report_handle.handle_sniffgit_report_data(app)
"""
su dung tool AndrogBugs
"""
def report_andro(app):
if 0 == ReportHistory.objects.filter(app_name=app).filter(tool_index='3').count():
cmd.do_cmd(["sh", "scripts/report_andro.sh", app])
ReportHistory(app_name=app, tool_index='3').save()
report_handle.handle_andro_report(app)
"""
su dung tool DB Parser
"""
def report_dbparser(app):
if 0 == ReportHistory.objects.filter(app_name=app).filter(tool_index='4').count():
# kiem tra co pull ve chua
if not os.path.isdir("outdir/source/" + app + "/app_data"):
data_location = app.strip()[ : app.find('-')]
cmd.do_cmd(["sh", "scripts/pull_data.sh", data_location, app.strip()])
ReportHistory(app_name=app, tool_index='4').save()
# tim tat ca cac file db
report_handle.handle_dbparser_report(app)
def run_all_tool(app):
try:
report_qark(app)
except:
pass
try:
report_dc(app)
except:
pass
try:
report_sniffgit(app)
except:
pass
try:
report_andro(app)
except:
pass
try:
report_dbparser(app)
except:
pass
|
""" Utilities for CumulusCI Core"""
import copy
import glob
import time
import typing as T
import warnings
from datetime import datetime, timedelta
from logging import getLogger
from pathlib import Path
import pytz
from cumulusci.core.exceptions import ConfigMergeError, TaskOptionsError
def import_global(path: str):
"""Import a class from a string module class path"""
components = path.split(".")
module = components[:-1]
module = ".".join(module)
mod = __import__(module, fromlist=[str(components[-1])])
return getattr(mod, str(components[-1]))
# For backwards-compatibility
import_class = import_global
def parse_datetime(dt_str, format):
"""Create a timezone-aware datetime object from a datetime string."""
t = time.strptime(dt_str, format)
return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)
def process_bool_arg(arg: T.Union[int, str, None]):
"""Determine True/False from argument.
Similar to parts of the Salesforce API, there are a few true-ish and false-ish strings,
but "True" and "False" are the canonical ones.
None is accepted as "False" for backwards compatiblity reasons, but this usage is deprecated.
"""
if isinstance(arg, (int, bool)):
return bool(arg)
elif arg is None:
# backwards compatible behaviour that some tasks
# rely upon.
import traceback
warnings.warn("".join(traceback.format_stack(limit=4)), DeprecationWarning)
warnings.warn(
"Future versions of CCI will not accept 'None' as an argument to process_bool_arg",
DeprecationWarning,
)
return False
elif isinstance(arg, str):
# these are values that Salesforce's bulk loader accepts
# there doesn't seem to be any harm in acccepting the
# full list to be coordinated with a "Salesforce standard"
if arg.lower() in ["yes", "y", "true", "on", "1"]:
return True
elif arg.lower() in ["no", "n", "false", "off", "0"]:
return False
raise TypeError(f"Cannot interpret as boolean: `{arg}`")
def process_glob_list_arg(arg):
"""Convert a list of glob patterns or filenames into a list of files
The initial list can take the form of a comma-separated string or
a proper list. Order is preserved, but duplicates will be removed.
Note: this function processes glob patterns, but doesn't validate
that the files actually exist. For example, if the pattern is
'foo.bar' and there is no file named 'foo.bar', the literal string
'foo.bar' will be included in the returned files.
Similarly, if the pattern is '*.baz' and it doesn't match any files,
the literal string '*.baz' will be returned.
"""
initial_list = process_list_arg(arg)
if not arg:
return []
files = []
for path in initial_list:
more_files = glob.glob(path, recursive=True)
if len(more_files):
files += sorted(more_files)
else:
files.append(path)
# In python 3.6+ dict is ordered, so we'll use it to weed
# out duplicates. We can't use a set because sets aren't ordered.
return list(dict.fromkeys(files))
def process_list_arg(arg):
"""Parse a string into a list separated by commas with whitespace stripped"""
if isinstance(arg, Path):
arg = str(arg)
if isinstance(arg, list):
return arg
elif isinstance(arg, str):
args = []
for part in arg.split(","):
args.append(part.strip())
return args
elif arg is None:
# backwards compatible behaviour.
return None
else:
getLogger(__file__).warn(
f"Unknown option type `{type(arg)}` for value `{arg}`."
"This will be an error in a future version of CCI."
)
def process_list_of_pairs_dict_arg(arg):
"""Process an arg in the format "aa:bb,cc:dd" """
if isinstance(arg, dict):
return arg
elif isinstance(arg, str):
rc = {}
for key_value in arg.split(","):
subparts = key_value.split(":", 1)
if len(subparts) == 2:
key, value = subparts
if key in rc:
raise TaskOptionsError(f"Var specified twice: {key}")
rc[key] = value
else:
raise TaskOptionsError(f"Var is not a name/value pair: {key_value}")
return rc
else:
raise TaskOptionsError(f"Arg is not a dict or string ({type(arg)}): {arg}")
def decode_to_unicode(content):
"""decode ISO-8859-1 to unicode, when using sf api"""
if content and not isinstance(content, str):
try:
# Try to decode ISO-8859-1 to unicode
return content.decode("ISO-8859-1")
except UnicodeEncodeError:
# Assume content is unicode already
return content
return content
def merge_config(configs):
"""recursively deep-merge the configs into one another (highest priority comes first)"""
new_config = {}
for name, config in configs.items():
new_config = dictmerge(new_config, config, name)
return new_config
def dictmerge(a, b, name=None):
"""Deeply merge two ``dict``s that consist of lists, dicts, and scalars.
This function (recursively) merges ``b`` INTO ``a``, does not copy any values, and returns ``a``.
based on https://stackoverflow.com/a/15836901/5042831
NOTE: tuples and arbitrary objects are NOT handled and will raise TypeError"""
key = None
if b is None:
return a
try:
if a is None or isinstance(a, (bytes, int, str, float)):
# first run, or if ``a``` is a scalar
a = b
elif isinstance(a, list):
# lists can be only appended
if isinstance(b, list):
# merge lists
a.extend(b)
else:
# append to list
a.append(b)
elif isinstance(a, dict):
# dicts must be merged
if isinstance(b, dict):
for key in b:
if key in a:
a[key] = dictmerge(a[key], b[key], name)
else:
a[key] = copy.deepcopy(b[key])
else:
raise TypeError(
f'Cannot merge non-dict of type "{type(b)}" into dict "{a}"'
)
else:
raise TypeError(
f'dictmerge does not supporting merging "{type(b)}" into "{type(a)}"'
)
except TypeError as e:
raise ConfigMergeError(
f'TypeError "{e}" in key "{key}" when merging "{type(b)}" into "{type(a)}"',
config_name=name,
)
return a
def format_duration(duration: timedelta):
hours, remainder = divmod(duration.total_seconds(), 3600)
minutes, seconds = divmod(remainder, 60)
hours = f"{int(hours)}h:" if hours > 0 else ""
minutes = f"{int(minutes)}m:" if (hours or minutes) else ""
seconds = f"{str(int(seconds))}s"
return hours + minutes + seconds
|
"""
This file will populate the tables using Faker
"""
import random
import decimal
from datetime import datetime
from django.db import models
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from faker import Faker
from shop.models import Cart, Customer, LineItem, Order, Product
class Command(BaseCommand):
help = 'Load data into the tables'
def handle(self, *args, **options):
# drop the tables - use this order due to foreign keys - so that we can rerun the file as needed without repeating values
Cart.objects.all().delete()
LineItem.objects.all().delete()
Order.objects.all().delete()
Product.objects.all().delete()
Customer.objects.all().delete()
User.objects.all().delete()
print("tables dropped successfully")
fake = Faker()
# create some customers
# we convert some values from tuples to strings
for i in range(10):
first_name = fake.first_name(),
first_name = str(first_name[0])
last_name = fake.last_name(),
last_name = str(last_name[0])
username = first_name + last_name,
username = username[0]
user = User.objects.create_user(
username = username,
first_name = first_name,
last_name = last_name,
email = fake.ascii_free_email(),
password = 'p@ssw0rd')
customer = Customer.objects.get(user = user)
customer.address = fake.address(),
customer.address = str(customer.address[0])
customer.save()
# create some products
for i in range(10):
product = Product.objects.create(
name = fake.catch_phrase(),
price = int( decimal.Decimal(random.randrange(155,899))/100),
)
product.save()
# create some carts
products = list(Product.objects.all())
for i in range(10):
random_id = random.randint(1,9)
cart = Cart.objects.create(
product = products[random_id],
quantity = random.randrange(1,42),
)
cart.save()
# create orders from customers
customers = Customer.objects.all()
for customer in customers:
for i in range(3):
order = Order.objects.create(
customer = customer,
)
order.save()
# attach line_items to orders
orders = Order.objects.all()
carts = Cart.objects.all()
for order in orders:
for cart in carts:
line_item = LineItem.objects.create(
quantity = cart.quantity,
product = cart.product,
cart = cart,
order = order,
)
line_item.save()
print("tables successfully loaded")
|
from django.db import models
from .teachers import teachers
class UniqueClasses(models.Model):
Strength = models.IntegerField()
Teacher = models.ForeignKey(teachers) |
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
from roast.machines.microblaze import *
PLNX_BSP = "xilinx-kc705-v{version}-final.bsp"
plnx_proj = "xilinx-kc705-{version}"
|
import os
import sys
from samplerunner.source import get_sources
from samplerunner.project import requires_params, get_project_type_by_name
def run(args):
if args.language:
_run_language(args.language)
elif args.project:
_run_project(args.project)
elif args.source:
_run_source(args.source)
else:
_run_all()
def _get_archive_path():
path_to_directory_containing_this_file = os.path.dirname(os.path.abspath(__file__))
return os.path.join(path_to_directory_containing_this_file, '..', 'archive')
def _prompt_params(project_type):
if not requires_params(project_type):
return ''
return input(f'input parameters for "{project_type}": ')
def _build_and_run(source, params):
print()
print(f'Running "{source.name}{source.extension}"...')
source.build()
print(source.run(params))
def _error_and_exit(msg):
print(msg)
sys.exit(1)
def _run_all():
sources_by_type = get_sources(_get_archive_path())
for project_type, sources in sources_by_type.items():
params = _prompt_params(project_type)
for source in sources:
_build_and_run(source, params)
def _run_language(language):
sources_by_type = get_sources(path=os.path.join(_get_archive_path(), language[0], language))
if all([len(sources) <= 0 for _, sources in sources_by_type.items()]):
_error_and_exit(f'No valid sources found for language: "{language}"')
for project_type, sources in sources_by_type.items():
for source in sources:
params = _prompt_params(project_type)
_build_and_run(source, params)
def _run_project(project):
sources_by_type = get_sources(_get_archive_path())
project_type = get_project_type_by_name(project, case_insensitive=True)
if project_type is None or project_type not in sources_by_type:
_error_and_exit(f'No valid sources found for project: "{project}"')
sources = sources_by_type[project_type]
params = _prompt_params(project_type)
for source in sources:
_build_and_run(source, params)
def _run_source(source):
sources_by_type = get_sources(_get_archive_path())
for project_type, sources in sources_by_type.items():
for src in sources:
if f'{src.name}{src.extension}'.lower() == source.lower():
params = _prompt_params(project_type)
_build_and_run(src, params)
break
else: # If didn't break inner loop continue
continue
break # Else break this loop as well
else:
_error_and_exit(f'Source "{source}" could not be found')
|
import os
class RenderHTML:
def __init__(self, file):
self._file = file
if not os.path.exists(self._file):
raise FileNotFoundError(f"No such HTML file: {self._file}")
with open(file, 'r') as f:
self._html = f.read()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._file)
def get_html(self):
return str(self._html)
__str__ = get_html
|
import tornado.web
import tornado.ioloop
import json
import os
import datetime
import base64
import requests # pip install requests
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
define("users", default="./users.json", help="path to the file with enabled user credentials")
PROJECT_ID = os.environ['PROJECT_ID'] if 'PROJECT_ID' in os.environ else '__FAKE_PROJECT_ID__'
ACCESS_TOKEN = os.environ['ACCESS_TOKEN'] if 'ACCESS_TOKEN' in os.environ else '__FAKE_ACCESS_TOKEN__'
class Collector(tornado.web.RequestHandler):
def initialize(self, enabledUsers):
self.enabledUsers = enabledUsers
def submitValuesToSplunkStorm(self, values, auth_key, ip):
def appStats(label, data):
status = data['status']
result = "{label}Status={status}, {label}DownloadTime={downloadTime}, ".format(
label = label,
status = status,
downloadTime = data['timing']
)
if status == 200:
result += "{label}Signature={signature}, {label}Size={size}, ".format(
label = label,
signature = data['signature'],
size = data['size']
)
return result
def timingStats(data):
if 'error' in data:
result = "error=" + data['error']
else:
result = "knock={knock}, connect={connect}, credentialCheck={credentialCheck}, getUserDetails={getUserDetails}, totalTiming={totalTiming}, ".format(
knock = data['knock'],
connect = data['connect'],
credentialCheck = data['credentialCheck'],
getUserDetails = data['getUserDetails'],
totalTiming = data['total']
)
return result
log = "{timestamp}Z ip={ip}, user={user}, baseUrl={host}, authDescription=\"{authDescription}\", {app_info}{beta_info}{gamma_info}{delta_info}{timing}".format(
timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
ip = ip,
user = values['info']['user'],
host = values['info']['host'],
authDescription = self.enabledUsers[auth_key],
app_info = appStats('app', values['beta']),
beta_info = appStats('beta', values['beta']),
gamma_info = appStats('gamma', values['gamma']),
delta_info = appStats('delta', values['delta']),
timing = timingStats(values['timing'])
)
print(log)
params = { 'project':PROJECT_ID, 'host':auth_key, 'sourcetype':'collector', 'source':ip }
# print("SENDING DATA: " + json.dumps(params, indent=4))
# print("ACCESS_TOKEN: " + ACCESS_TOKEN)
response = requests.post('https://api.splunkstorm.com/1/inputs/http', log, params=params, auth=('x', ACCESS_TOKEN), verify=False)
if response.status_code != 200:
raise Exception("problem saving data")
def post(self):
ip = self.request.headers.get('X-Forwarded-For') if 'X-Forwarded-For' in self.request.headers else self.request.headers.get('remote_addr')
print("IP: " + ip)
auth_hdr = self.request.headers.get('Authorization')
if (auth_hdr == None) or (not auth_hdr.startswith('Basic ')):
print("No authorization header found")
return self.notAuthorized()
auth_decoded = base64.decodestring(auth_hdr[6:])
username, auth_key = auth_decoded.split(':', 2)
print("Auth key: " + auth_key)
if (username != 'x') or (not auth_key in self.enabledUsers):
print("Auth key not found!")
return self.notAuthorized()
values = json.loads(self.request.body)
self.submitValuesToSplunkStorm(values, auth_key, ip)
self.write("Thanks")
def notAuthorized(self):
self.set_status(401)
self.set_header('WWW-Authenticate','Basic realm="collector.stats.clipperz.is"')
self.finish()
return None
def main():
tornado.options.parse_command_line()
print("PROJECT ID: " + PROJECT_ID)
print("ACCESS_TOKEN: " + ACCESS_TOKEN)
application = tornado.web.Application([(r"/submit", Collector, dict(enabledUsers=json.load(open(options.users))))])
application.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
import uuid as generator
import numpy as np
import math
from utils import *
ANGLE_RES = 50 #resolution for the rounded corners
class Straight:
#id
#startNode = id
#startNode = id
#nextStraight = id
#previousStraight = id
def __init__(self, start, end):
self.id = generator.uuid1().int
self.start_node = start.id
self.end_node = end.id
self.next_straight = None
self.prev_straight = None
self.is_start = False
self.start_perc = None
def setNextStraight(self, next):
self.next_straight = next.id
def setPreviousStraight(self, previous):
self.prev_straight = previous.id
def flagStart(self, startPerc=0.5):
self.is_start = True
self.start_perc = startPerc
def __str__(self):
return ("startNode: " +str(self.start_node)+"\n endNode: " +str(self.end_node)+ "\n")
class Corner:
#id
#x
#y
#previousStraight = id
#nextStraight = id
#spline = bool
#radius = calc
#center = calc
#arcStart = calc
#arcFInish = calc
#arc_points = []
def __init__(self, x, y):
self.id = generator.uuid1().int
self.x = x
self.y = y
self.prev_straight = None
self.next_straight = None
self.spline = False
self.blend = False
self.radius = None
self.center = None
self.arc_start = None
self.arc_finish = None
#array of coordinates for the circle
self.arc_points = []
def setPreviousStraight(self, previous):
self.prev_straight = previous.id
def setNextStraight(self, next):
self.next_straight = next.id
def flagSpline(self):
self.spline = True
def flagBlend(self):
self.blend = True
def __str__(self):
return ("id: " +str(self.id)+ "\n")
def _vecFromTo(self, S, E):
return [E[0]-S[0], E[1]-S[1]]
def _vecCrossProd(self, A, B):
return A[0]*B[1]-A[1]*B[0]
def _angleVec(self, V):
return math.degrees(np.arctan2(V[1],V[0]))%360
def _sign(self, a):
if a > 0:
return 1
elif a < 0:
return -1
else:
return 0
def roundify(self, v, includeLimits=False):
if self.blend and self.arc_start != None and self.arc_finish != None:
circle_coords = lambda b : [self.center[0]+self.radius*math.cos(math.radians(b)), self.center[1]+self.radius*math.sin(math.radians(b))]
vec_start = self._vecFromTo(self.center, self.arc_start)
vec_end = self._vecFromTo(self.center, self.arc_finish)
s = self._sign(self._vecCrossProd(vec_start, vec_end))
angle_start = self._angleVec(vec_start)
angle_end = self._angleVec(vec_end)
theta = math.degrees(angle_3_points(self.arc_start, self.center, self.arc_finish))
if v > 1:
print("AS: "+str(angle_start))
print("AE: "+str(angle_end))
print("T: "+str(theta))
anglespace = np.linspace(0, theta, num=ANGLE_RES, endpoint=includeLimits)
for b in anglespace:
p=circle_coords(angle_start + s*b)
self.arc_points.append(p)
if not includeLimits:
self.arc_points.pop(0)
else:
return
|
import click
from clickhouse_migrate.common.db import DbRegister
from clickhouse_migrate.conf.settings import Settings
from clickhouse_migrate.interfaces.service import MigrationService
@click.group()
def cli():
pass
@click.command(name="migrate")
@click.option("-c", "--config", help="Path to config *.ini file", required=False)
@click.option(
"-db", "--databases", help="Databases list", required=False, multiple=True
)
@click.option(
"-dir",
"--migration_dir",
help="Migrations directory",
required=False,
type=click.Path(exists=True),
)
def migrate(config: str, databases: str, migration_dir: str):
Settings().init_config(
config_path=config, databases=databases, migration_dir=migration_dir
)
DbRegister().setup_db()
MigrationService.apply_initial_step()
MigrationService().apply_all_migrations()
@click.command(name="create_migration")
@click.option("-n", "--name", help="migration name", required=True)
@click.option("-c", "--config", help="Path to config *.ini file", required=False)
@click.option(
"-db", "--databases", help="Databases list", required=False, multiple=True
)
@click.option(
"-dir",
"--migration_dir",
help="Migrations directory",
required=False,
type=click.Path(exists=True),
)
def create_migration(name: str, config: str, databases: str, migration_dir: str):
Settings().init_config(
config_path=config, databases=databases, migration_dir=migration_dir
)
MigrationService.create_new_migration(name)
cli.add_command(migrate)
cli.add_command(create_migration)
def main():
cli()
if __name__ == "__main__":
cli()
|
# The usage of __init__.py file is it indicates that the files in this dir are part of python package.
# Without an __init__.py file, you cannot import files from another directory in a Python project.
#__init__ can be empty but can also be used to set up imports. There are 3 kinds of imports:
# 1. example_package/__init__.py and explicit imports:
# from .example import sample_funciton
# Where sample_function is the name of the function and .file1 is the name of the module/file
#Now in main.py I can direcly improt this function
# 2.main_package/__init__.py and standard import:
# import example_package
# This imports the entire package
# 3. main_package/__init__.py and wild card import
# In __init__.py, set an __all__ variable to a list of the modules/files in the package.
# __all__ = ["file1", "file2", "file3"]
|
from .Action import Action
class DropMsg(Exception):
def __init__(self):
super(type(self), self).__init__()
class DropAction(Action):
def __init__(self):
super(DropAction, self).__init__(actionId = "drop", actionType = "drop")
def run(self, record):
super(type(self), self).run(record)
raise DropMsg()
def __str__(self):
return "DROP\n"
|
#!/usr/bin/env python3
import os
from aws_cdk import aws_codebuild as codebuild
from aws_cdk import aws_codepipeline as codepipeline
from aws_cdk import aws_codepipeline_actions as codepipeline_actions
from aws_cdk import aws_codestarnotifications as notifications
from aws_cdk import aws_iam as iam
from aws_cdk import aws_s3 as s3
from aws_cdk import core
app = core.App()
pipeline_params = app.node.try_get_context('examples-pipeline')
deployment_secret = pipeline_params['deployment-secret']
stack = core.Stack(
app, 'EMRLaunchExamplesDeploymentPipeline', env=core.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"]))
artifacts_bucket = s3.Bucket(stack, 'ArtifactsBucket')
source_output = codepipeline.Artifact('SourceOutput')
code_build_role = iam.Role(
stack, 'EMRLaunchExamplesBuildRole',
role_name='EMRLaunchExamplesBuildRole',
assumed_by=iam.ServicePrincipal('codebuild.amazonaws.com'),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name('PowerUserAccess'),
iam.ManagedPolicy.from_aws_managed_policy_name('IAMFullAccess')
],
)
pipeline = codepipeline.Pipeline(
stack, 'CodePipeline',
pipeline_name='EMR_Launch_Examples',
restart_execution_on_update=True,
artifact_bucket=artifacts_bucket, stages=[
codepipeline.StageProps(stage_name='Source', actions=[
codepipeline_actions.GitHubSourceAction(
action_name='GitHub_Source',
repo='aws-emr-launch',
branch=pipeline_params['github-branch'],
owner=pipeline_params['github-owner'],
oauth_token=core.SecretValue.secrets_manager(
secret_id=deployment_secret['secret-id'],
json_field=deployment_secret['json-fields']['github-oauth-token']),
trigger=codepipeline_actions.GitHubTrigger.WEBHOOK,
output=source_output,
)]),
codepipeline.StageProps(stage_name='Self-Update', actions=[
codepipeline_actions.CodeBuildAction(
action_name='Self_Deploy',
project=codebuild.PipelineProject(
stack, 'CodePipelineBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/pipelines-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(value='codepipeline'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='examples_pipeline.py')
}
)
),
input=source_output
)
]),
codepipeline.StageProps(stage_name='Examples-Environment', actions=[
codepipeline_actions.CodeBuildAction(
action_name='Environment_Deploy',
project=codebuild.PipelineProject(
stack, 'EnvironmentBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/examples-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(value='examples/environment_stack'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='app.py')
}
)
),
input=source_output
)
]),
codepipeline.StageProps(stage_name='Control-Plane', actions=[
codepipeline_actions.CodeBuildAction(
action_name='ControlPlane_Deploy',
project=codebuild.PipelineProject(
stack, 'ControlPlaneBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/examples-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(value='examples/control_plane'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='app.py')
}
)
),
input=source_output
)
]),
codepipeline.StageProps(stage_name='Profiles-and-Configurations', actions=[
codepipeline_actions.CodeBuildAction(
action_name='EMRProfiles_Deploy',
project=codebuild.PipelineProject(
stack, 'EMRProfilesBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/examples-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(value='examples/emr_profiles'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='app.py')
}
)
),
input=source_output,
),
codepipeline_actions.CodeBuildAction(
action_name='ClusterConfigurations_Deploy',
project=codebuild.PipelineProject(
stack, 'ClusterConfigurationsBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/examples-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(value='examples/cluster_configurations'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='app.py')
}
)
),
input=source_output,
),
]),
codepipeline.StageProps(stage_name='EMR-Launch-Function', actions=[
codepipeline_actions.CodeBuildAction(
action_name='EMRLaunchFunction_Deploy',
project=codebuild.PipelineProject(
stack, 'EMRLaunchFunctionBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/examples-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(value='examples/emr_launch_function'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='app.py')
}
)
),
input=source_output
)
]),
codepipeline.StageProps(stage_name='Pipelines', actions=[
codepipeline_actions.CodeBuildAction(
action_name='TransientClusterPipeline_Deploy',
project=codebuild.PipelineProject(
stack, 'TransientClusterPipelineBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/examples-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(
value='examples/transient_cluster_pipeline'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='app.py')
}
)
),
input=source_output,
),
codepipeline_actions.CodeBuildAction(
action_name='PersistentClusterPipeline_Deploy',
project=codebuild.PipelineProject(
stack, 'PersistentClusterPipelineBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/examples-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(
value='examples/persistent_cluster_pipeline'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='app.py')
}
)
),
input=source_output,
),
codepipeline_actions.CodeBuildAction(
action_name='SNSTriggeredPipeline_Deploy',
project=codebuild.PipelineProject(
stack, 'SNSTriggeredPipelineBuild',
build_spec=codebuild.BuildSpec.from_source_filename(
'codepipeline/examples-buildspec.yaml'),
role=code_build_role,
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
environment_variables={
'PROJECT_DIR': codebuild.BuildEnvironmentVariable(value='examples/sns_triggered_pipeline'),
'STACK_FILE': codebuild.BuildEnvironmentVariable(value='app.py')
}
)
),
input=source_output
)
]),
])
notification_rule = notifications.CfnNotificationRule(
stack, 'CodePipelineNotifications',
detail_type='FULL',
event_type_ids=[
'codepipeline-pipeline-pipeline-execution-failed',
'codepipeline-pipeline-pipeline-execution-canceled',
'codepipeline-pipeline-pipeline-execution-succeeded'
],
name='aws-emr-launch-codepipeline-notifications',
resource=pipeline.pipeline_arn,
targets=[
notifications.CfnNotificationRule.TargetProperty(
target_address=core.Token.as_string(core.SecretValue.secrets_manager(
secret_id=deployment_secret['secret-id'],
json_field=deployment_secret['json-fields']['slack-chatbot'])),
target_type='AWSChatbotSlack')
],
)
app.synth()
|
a = input("Which file?")
if a == "main":
import main.py
elif a == "gui":
import gui.py
elif a == "commands":
import commands.py
elif a == "admin":
print ("There is no admin, you fool!") |
import io
import os
from shutil import rmtree
import tempfile
import pytest
from extractnet import data_processing
FIXTURES = os.path.join('test', 'datafiles')
@pytest.fixture(scope="module")
def fileroots():
return ["bbc.co.story", "f1", "sad8-2sdkfj"]
@pytest.fixture(scope="class")
def datadir(fileroots):
datadir = tempfile.mkdtemp()
for froot in fileroots:
fname = os.path.join(datadir, "{}.html.corrected.txt".format(froot))
with io.open(fname, mode="wt", encoding='utf8') as f:
f.write(u".")
yield datadir
rmtree(datadir)
@pytest.mark.usefixtures("datadir")
class TestGetFilenames(object):
def test_get_filenames(self, fileroots, datadir):
filenames = list(data_processing.get_filenames(datadir))
assert (
filenames ==
["{}.html.corrected.txt".format(froot) for froot in fileroots]
)
def test_get_filenames_full_path(self, fileroots, datadir):
filenames = list(data_processing.get_filenames(datadir, full_path=True))
assert (
filenames ==
[os.path.join(datadir, "{}.html.corrected.txt".format(froot))
for froot in fileroots]
)
def test_get_filenames_match_regex(self, datadir):
filenames = list(data_processing.get_filenames(datadir, match_regex='f1'))
assert filenames == ['f1.html.corrected.txt']
filenames = list(data_processing.get_filenames(datadir, match_regex='foo'))
assert filenames == []
def test_get_filenames_extension(self, fileroots, datadir):
filenames = list(data_processing.get_filenames(datadir, extension='.txt'))
assert (
filenames ==
['{}.html.corrected.txt'.format(froot) for froot in fileroots]
)
filenames = list(data_processing.get_filenames(datadir, extension='.foo'))
assert filenames == []
class TestReadGoldStandard(object):
def test_read_gold_standard(self):
tests = {
'ascii': u'ascii yo!',
'iso-8859-1': u'\xd3',
'utf-8': u'\xae',
'utf-16': u'\xae',
}
for encoding, expected in tests.items():
content_comments = data_processing.read_gold_standard_file(
FIXTURES, encoding)
assert content_comments[0] == u"Content here\nmore content\n" + expected
assert content_comments[1] == "some comments"
def test_utf8_chinese(self):
actual_chinese_content = u'<h>\u9ad8\u8003\u8bed\u6587\u5168\u7a0b\u68c0\u6d4b\u4e09\uff1a\u6b63\u786e\u4f7f\u7528\u8bcd\u8bed\uff08\u719f\u8bed\u4e00\uff09\n\n\n <h>LEARNING.SOHU.COM 2004\u5e745\u670822\u65e515:36 '
gs = " ".join(data_processing.read_gold_standard_file(FIXTURES, "utf-8_chinese"))
assert gs == actual_chinese_content
def make_filepath(s):
return os.path.join(FIXTURES, "block_corrected", "{}.block_corrected.txt".format(s))
class TestExtractGoldStandard(object):
def test_extract_blank_label(self):
pass |
from collections import OrderedDict
import json
from gui.tree_widget_configuration import *
from nodes import *
from core.node_edge import Edge
# TODO melhorar comandos para undo e redo
class Serialization:
def __init__(self, Session=None, currentInstance=None, sender=None, filename=None):
self.Session = Session
# self.instance = NodeEditorWnd
self.instance = currentInstance
if isinstance(filename, type(None)):
self.filename = 'examples/SCENE.txt'
else:
self.filename = filename
print(self.filename)
if not isinstance(sender, type(None)):
if sender == 'Salvar':
logging.info(f'Salvar Arquivo {self.filename}')
self.instance = currentInstance
print('Entrei em Serialization ')
logging.info(f'Salver {currentInstance}')
logging.info('Classe Serialization')
self.serialize()
elif sender == 'Abrir' or sender == 'Abrir Recente':
logging.info(f'Abrir Arquivo {self.filename}')
with open(self.filename, "r") as file:
raw_data = file.read()
data = json.loads(raw_data, encoding='utf-8')
logging.info(f'Arquivo {self.filename} carregado com sucesso')
self.deserialize(data)
elif sender == 'Salvar como':
logging.info(f'Salvando Scene em {filename}')
self.serialize()
def serialize(self):
self.sceneSerialization(self.instance.scene)
def lembreteCheckBoxSerialization(self, checkbox):
return OrderedDict([('state', checkbox.isChecked())])
def lembreteSerialization(self, lembrete):
checkboxes = []
for checkbox in lembrete.checkboxes:
checkboxes.append(self.lembreteCheckBoxSerialization(checkbox))
return OrderedDict([('texto', lembrete.plaintextedit.toPlainText()),
('checkbox', checkboxes),
('width', lembrete.width()),
('height', lembrete.height()),
('x', lembrete.pos().x()),
('y', lembrete.pos().y())])
def sceneSerialization(self, scene, only_scene=0):
nodes, edges, lembretes = [], [], []
for node in scene.nodes:
nodes.append(self.nodeSerialization(node))
for edge in scene.edges:
edges.append(self.edgeSerialization(edge))
if not isinstance(self.Session, type(None)):
for lembrete in self.Session.lembretes: lembretes.append(self.lembreteSerialization(lembrete))
retString = OrderedDict([('id', scene.id),
('scene', scene.title),
('width', scene.scene_width),
('height', scene.scene_height),
('nodes', nodes),
('edges', edges),
('lembretes', lembretes)
])
if only_scene:
return retString
with open(self.filename, 'w') as file:
file.write(json.dumps(retString, indent=4))
logging.info(f'ARQUIVO {file} SALVO COM SUCESSO! ')
def nodeSerialization(self, node):
inputsockets = []
outputsockets = []
entrys = []
checkboxes = []
plainText = []
for sockets in node.inputs:
inputsockets.append(self.socketSerialization(sockets))
for sockets in node.outputs:
outputsockets.append(self.socketSerialization(sockets))
if hasattr(node.content, 'entrys'):
logging.info('Salvando dados dos nós fornecidos pelo usuário')
for entry in node.content.entrys:
entrys.append(self.entrySerialization(node, entry))
if hasattr(node.content, 'checkboxes'):
for checkbox in node.content.checkboxes:
checkboxes.append(self.checkboxSerialization(node, checkbox))
if hasattr(node.content, 'plainText'):
for plaintext in node.content.plainText:
plainText.append(self.plainTextSerialization(node, plaintext))
return OrderedDict([('id', node.id),
('class', type(node).__name__),
('module', node.module_str),
('x_pos', node.pos.x()),
('y_pos', node.pos.y()),
('title', node.title),
('inputs', inputsockets),
('outputs', outputsockets),
('sender', node.method),
('entrys', entrys),
('kwargs', node.kwargs),
('args', node.args),
('checkboxes', checkboxes),
('plainText', plainText),
])
def plainTextSerialization(self, node, plainText):
return OrderedDict([('text', plainText.toPlainText())])
def checkboxSerialization(self, node, checkbox):
index = node.content.checkboxes.index(checkbox)
status = checkbox.isChecked()
return OrderedDict([('index', index),
('status', status)])
def entrySerialization(self, node, entry):
index = node.content.entrys.index(entry)
value = entry.text()
return OrderedDict([('index', index),
('valor', value)])
def edgeSerialization(self, edge):
return OrderedDict([('id', id(edge.id)),
('StartSocket', id(edge.start_socket)),
('EndSocket', id(edge.end_socket)),
('edge_type', edge.edge_type)])
def socketSerialization(self, socket):
return OrderedDict([('id', id(socket)),
('position', socket.position),
('color', socket.socket_color),
('status', socket.status)
])
def nodeDeserialize(self, data, scene=None, hashmap={}, same_id=True):
node_class = globals()[data['class']]
if isinstance(scene, type(None)):
node_scene = self.Session.current_project.scene
node_session = self.Session
else:
node_scene = scene
node_session = scene.parent.mainwindow
Node = node_class(node_scene,
parent=node_session,
method=data['sender'],
*data['args'],
**data['kwargs'])
Node.setPos(data['x_pos'], data['y_pos'])
if same_id:
Node.id = data['id']
for cont, inputs in enumerate(Node.inputs):
inputs.id = data['inputs'][cont]['id']
inputs.setStatus(data['inputs'][cont]['status'])
for cont, outputs in enumerate(Node.outputs):
outputs.id = data['outputs'][cont]['id']
outputs.setStatus(data['outputs'][cont]['status'])
logging.info('Carregando dados do Nó')
try:
for entrydata in data['entrys']:
Node.content.entrys[entrydata['index']].setText(entrydata['valor'])
except AttributeError:
logging.error('Nao encontrada entrys, pular')
for checkboxdata in data['checkboxes']:
Node.content.checkboxes[checkboxdata['index']].setChecked(checkboxdata['status'])
for index, plainText in enumerate(data['plainText']):
Node.content.plainText[index].setText(plainText['text'])
return Node
def edgeDeserialize(self, edge, nodes, hashmap={}):
logging.info('Carregar edge')
start_socket_id = edge['StartSocket']
end_socket_id = edge['EndSocket']
logging.info(f'carregando edge de {start_socket_id} a {end_socket_id}')
for node in nodes:
for cont, socketinput in enumerate(node['inputs']):
if (socketinput['id'] == end_socket_id):
startSocketNode = node
startSocket = cont
for cont, socketoutput in enumerate(node['outputs']):
if socketoutput['id'] == start_socket_id:
endSocketNode = node
endSocket = cont
for node in self.Session.current_project.scene.nodes:
if node.id == startSocketNode['id']:
startNode = node
elif node.id == endSocketNode['id']:
endNode = node
drawedge = Edge(self.Session.current_project.scene,
endNode.outputs[endSocket],
startNode.inputs[startSocket],
edge_type=edge['edge_type'])
drawedge.start_socket = endNode.outputs[endSocket]
drawedge.end_socket = startNode.inputs[startSocket]
drawedge.start_socket.setConnectedEdge(drawedge)
drawedge.end_socket.setConnectedEdge(drawedge)
drawedge.updatePositions()
drawedge.id = edge['id']
logging.info('edge carregada')
def sceneDeserialize(self, data, scene=None, hashmap={}):
if not isinstance(scene, type(None)):
for nodedata in data['nodes']:
self.nodeDeserialize(nodedata, scene=scene)
for edgedata in data['edges']:
self.edgeDeserialize(edgedata, data['nodes'])
return
logging.info('Carregando projeto')
titulo = data['scene']
self.Session.new_project(title=titulo)
self.Session.current_project.scene.id = data['id']
for nodedata in data['nodes']:
self.nodeDeserialize(nodedata)
for edgedata in data['edges']:
self.edgeDeserialize(edgedata, data['nodes'])
self.lembreteDeserialize(data['lembretes'])
def lembreteDeserialize(self, lembretes, hashmap={}):
for cont, lembrete in enumerate(lembretes):
texto = lembrete['texto']
cb_states = [cb['state'] for cb in lembrete['checkbox']]
self.Session.lembrete_function()
self.Session.lembretes[cont].plaintextedit.setPlainText(texto)
self.Session.lembretes[cont].resize(lembrete['width'], lembrete['height'])
for cont2, cboxes in enumerate(self.Session.lembretes[cont].checkboxes):
cboxes.setChecked(cb_states[cont2])
def deserialize(self, data, hashmap={}):
self.sceneDeserialize(data)
|
import ee
from ee_plugin import Map
# Load an image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
centroid = image.geometry().centroid().coordinates()
lon = centroid.get(0).getInfo()
lat = centroid.get(1).getInfo()
# print(centroid.getInfo())
# Create an NDWI image, define visualization parameters and display.
ndwi = image.normalizedDifference(['B3', 'B5'])
ndwiViz = {'min': 0.5, 'max': 1, 'palette': ['00FFFF', '0000FF']}
# Mask the non-watery parts of the image, where NDWI < 0.4.
ndwiMasked = ndwi.updateMask(ndwi.gte(0.4))
Map.setCenter(lon, lat, 10)
Map.addLayer(ndwi, ndwiViz, 'NDWI', False)
Map.addLayer(ndwiMasked, ndwiViz, 'NDWI masked')
|
"""
Encrypt a file using GPG, then FTP
it to a remote server.
"""
from gnupg import GPG
from ftplib import FPT
# Create instance and set gpg working directory
gpg = GPG(gnupghome=".gpg")
# import an existing public key
with open("mykey.asc", 'r') as fp:
key_data = fp.read()
import_status = gpg.import_keys(key_data)
print("ok: {}".format(import_status.results["ok"]))
print("text: {}".format(import_status.results["text"]))
# Encrypt a file using the public key.
with open("plain.txt", 'rb') as fp:
encrypted_file = "encrypted.asc"
encrypt_status = gpg.encrypt_file(
fp,
recipients=import_status.fingerprints,
always_trust=True,
output=encrypted_file)
print("ok {}".format(encrypt_status.ok))
print("text: {}".format(encrypt_status.text))
print("stderr: {}".format(encrypt_status.stderr))
# FTP the file
with FTP("ftp.somehost.com") as ftp:
ftp.connect("a_username", "a_password")
fp = open(encrypted_file, 'rb')
ftp.storbinary("STOR {}".format(encrypted_file), fp)
ftp.quit()
fp.close() |
from setuptools import setup, find_packages
setup(
name='ethsign-cli',
packages=find_packages(),
entry_points={
'console_scripts': ['ethsign-cli=eth_sign_cli:cli']
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'eth-utils == 1.7.0',
'requests == 2.22.0',
'eth-account == 0.4.0',
'click == 7.0',
'tornado == 6.0.3',
'eth_abi == 2.0.0'
],
version="0.1"
) |
# Copyright (c) 2017 The sqlalchemy-bigquery Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Integration between SQLAlchemy and BigQuery."""
from __future__ import absolute_import
from __future__ import unicode_literals
from decimal import Decimal
import random
import operator
import uuid
from google import auth
import google.api_core.exceptions
from google.cloud.bigquery import dbapi
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import TableReference
from google.api_core.exceptions import NotFound
import sqlalchemy
import sqlalchemy.sql.expression
import sqlalchemy.sql.functions
import sqlalchemy.sql.sqltypes
import sqlalchemy.sql.type_api
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import types, util
from sqlalchemy.sql.compiler import (
SQLCompiler,
GenericTypeCompiler,
DDLCompiler,
IdentifierPreparer,
)
from sqlalchemy.sql.sqltypes import Integer, String, NullType, Numeric
from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext
from sqlalchemy.engine.base import Engine
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql import elements, selectable
import re
from .parse_url import parse_url
from sqlalchemy_bigquery import _helpers
try:
from .geography import GEOGRAPHY
except ImportError:
pass
FIELD_ILLEGAL_CHARACTERS = re.compile(r"[^\w]+")
TABLE_VALUED_ALIAS_ALIASES = "bigquery_table_valued_alias_aliases"
def assert_(cond, message="Assertion failed"): # pragma: NO COVER
if not cond:
raise AssertionError(message)
class BigQueryIdentifierPreparer(IdentifierPreparer):
"""
Set containing everything
https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py
"""
def __init__(self, dialect):
super(BigQueryIdentifierPreparer, self).__init__(
dialect, initial_quote="`",
)
def quote_column(self, value):
"""
Quote a column.
Fields are quoted separately from the record name.
"""
parts = value.split(".")
return ".".join(self.quote_identifier(x) for x in parts)
def quote(self, ident, force=None, column=False):
"""
Conditionally quote an identifier.
"""
force = getattr(ident, "quote", None)
if force is None or force:
return self.quote_column(ident) if column else self.quote_identifier(ident)
else:
return ident
def format_label(self, label, name=None):
name = name or label.name
# Fields must start with a letter or underscore
if not name[0].isalpha() and name[0] != "_":
name = "_" + name
# Fields must contain only letters, numbers, and underscores
name = FIELD_ILLEGAL_CHARACTERS.sub("_", name)
result = self.quote(name)
return result
_type_map = {
"ARRAY": types.ARRAY,
"BIGNUMERIC": types.Numeric,
"BOOLEAN": types.Boolean,
"BOOL": types.Boolean,
"BYTES": types.BINARY,
"DATETIME": types.DATETIME,
"DATE": types.DATE,
"FLOAT64": types.Float,
"FLOAT": types.Float,
"INT64": types.Integer,
"INTEGER": types.Integer,
"NUMERIC": types.Numeric,
"RECORD": types.JSON,
"STRING": types.String,
"TIMESTAMP": types.TIMESTAMP,
"TIME": types.TIME,
}
# By convention, dialect-provided types are spelled with all upper case.
ARRAY = _type_map["ARRAY"]
BIGNUMERIC = _type_map["NUMERIC"]
BOOLEAN = _type_map["BOOLEAN"]
BOOL = _type_map["BOOL"]
BYTES = _type_map["BYTES"]
DATETIME = _type_map["DATETIME"]
DATE = _type_map["DATE"]
FLOAT64 = _type_map["FLOAT64"]
FLOAT = _type_map["FLOAT"]
INT64 = _type_map["INT64"]
INTEGER = _type_map["INTEGER"]
NUMERIC = _type_map["NUMERIC"]
RECORD = _type_map["RECORD"]
STRING = _type_map["STRING"]
TIMESTAMP = _type_map["TIMESTAMP"]
TIME = _type_map["TIME"]
try:
_type_map["GEOGRAPHY"] = GEOGRAPHY
except NameError:
pass
class BigQueryExecutionContext(DefaultExecutionContext):
def create_cursor(self):
# Set arraysize
c = super(BigQueryExecutionContext, self).create_cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def get_insert_default(self, column): # pragma: NO COVER
# Only used by compliance tests
if isinstance(column.type, Integer):
return random.randint(-9223372036854775808, 9223372036854775808) # 1<<63
elif isinstance(column.type, String):
return str(uuid.uuid4())
__remove_type_from_empty_in = _helpers.substitute_string_re_method(
r"""
\sIN\sUNNEST\(\[\s # ' IN UNNEST([ '
(
(?:NULL|\(NULL(?:,\sNULL)+\))\) # '(NULL)' or '((NULL, NULL, ...))'
\s(?:AND|OR)\s\(1\s!?=\s1 # ' and 1 != 1' or ' or 1 = 1'
)
(?:[:][A-Z0-9]+)? # Maybe ':TYPE' (e.g. ':INT64')
\s\]\) # Close: ' ])'
""",
flags=re.IGNORECASE | re.VERBOSE,
repl=r" IN(\1)",
)
@_helpers.substitute_re_method(
r"""
\sIN\sUNNEST\(\[\s # ' IN UNNEST([ '
( # Placeholders. See below.
%\([^)]+_\d+\)s # Placeholder '%(foo_1)s'
(?:,\s # 0 or more placeholders
%\([^)]+_\d+\)s
)*
)?
:([A-Z0-9]+) # Type ':TYPE' (e.g. ':INT64')
\s\]\) # Close: ' ])'
""",
flags=re.IGNORECASE | re.VERBOSE,
)
def __distribute_types_to_expanded_placeholders(self, m):
# If we have an in parameter, it sometimes gets expaned to 0 or more
# parameters and we need to move the type marker to each
# parameter.
# (The way SQLAlchemy handles this is a bit awkward for our
# purposes.)
# In the placeholder part of the regex above, the `_\d+
# suffixes refect that when an array parameter is expanded,
# numeric suffixes are added. For example, a placeholder like
# `%(foo)s` gets expaneded to `%(foo_0)s, `%(foo_1)s, ...`.
placeholders, type_ = m.groups()
if placeholders:
placeholders = placeholders.replace(")", f":{type_})")
else:
placeholders = ""
return f" IN UNNEST([ {placeholders} ])"
def pre_exec(self):
self.statement = self.__distribute_types_to_expanded_placeholders(
self.__remove_type_from_empty_in(self.statement)
)
class BigQueryCompiler(SQLCompiler):
compound_keywords = SQLCompiler.compound_keywords.copy()
compound_keywords[selectable.CompoundSelect.UNION] = "UNION DISTINCT"
compound_keywords[selectable.CompoundSelect.UNION_ALL] = "UNION ALL"
def __init__(self, dialect, statement, *args, **kwargs):
if isinstance(statement, Column):
kwargs["compile_kwargs"] = util.immutabledict({"include_table": False})
super(BigQueryCompiler, self).__init__(dialect, statement, *args, **kwargs)
def visit_insert(self, insert_stmt, asfrom=False, **kw):
# The (internal) documentation for `inline` is confusing, but
# having `inline` be true prevents us from generating default
# primary-key values when we're doing executemany, which seem broken.
# We can probably do this in the constructor, but I want to
# make sure this only affects insert, because I'm paranoid. :)
self.inline = False
return super(BigQueryCompiler, self).visit_insert(
insert_stmt, asfrom=False, **kw
)
def visit_table_valued_alias(self, element, **kw):
# When using table-valued functions, like UNNEST, BigQuery requires a
# FROM for any table referenced in the function, including expressions
# in function arguments.
#
# For example, given SQLAlchemy code:
#
# print(
# select([func.unnest(foo.c.objects).alias('foo_objects').column])
# .compile(engine))
#
# Left to it's own devices, SQLAlchemy would outout:
#
# SELECT `foo_objects`
# FROM unnest(`foo`.`objects`) AS `foo_objects`
#
# But BigQuery diesn't understand the `foo` reference unless
# we add as reference to `foo` in the FROM:
#
# SELECT foo_objects
# FROM `foo`, UNNEST(`foo`.`objects`) as foo_objects
#
# This is tricky because:
# 1. We have to find the table references.
# 2. We can't know practically if there's already a FROM for a table.
#
# We leverage visit_column to find a table reference. Whenever we find
# one, we create an alias for it, so as not to conflict with an existing
# reference if one is present.
#
# This requires communicating between this function and visit_column.
# We do this by sticking a dictionary in the keyword arguments.
# This dictionary:
# a. Tells visit_column that it's an a table-valued alias expresssion, and
# b. Gives it a place to record the aliases it creates.
#
# This function creates aliases in the FROM list for any aliases recorded
# by visit_column.
kw[TABLE_VALUED_ALIAS_ALIASES] = {}
ret = super().visit_table_valued_alias(element, **kw)
aliases = kw.pop(TABLE_VALUED_ALIAS_ALIASES)
if aliases:
aliases = ", ".join(
f"{self.preparer.quote(tablename)} {self.preparer.quote(alias)}"
for tablename, alias in aliases.items()
)
ret = f"{aliases}, {ret}"
return ret
def visit_column(
self,
column,
add_to_result_map=None,
include_table=True,
result_map_targets=(),
**kwargs,
):
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
targets = (column, name, column.key) + result_map_targets
if getattr(column, "_tq_label", None):
# _tq_label was added in SQLAlchemy 1.4
targets += (column._tq_label,)
add_to_result_map(name, orig_name, targets, column.type)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name, column=True)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
elif TABLE_VALUED_ALIAS_ALIASES in kwargs:
aliases = kwargs[TABLE_VALUED_ALIAS_ALIASES]
if tablename not in aliases:
aliases[tablename] = self.anon_map[
f"{TABLE_VALUED_ALIAS_ALIASES} {tablename}"
]
tablename = aliases[tablename]
return self.preparer.quote(tablename) + "." + name
def visit_label(self, *args, within_group_by=False, **kwargs):
# Use labels in GROUP BY clause.
#
# Flag set in the group_by_clause method. Works around missing
# equivalent to supports_simple_order_by_label for group by.
if within_group_by:
kwargs["render_label_as_label"] = args[0]
return super(BigQueryCompiler, self).visit_label(*args, **kwargs)
def group_by_clause(self, select, **kw):
return super(BigQueryCompiler, self).group_by_clause(
select, **kw, within_group_by=True
)
############################################################################
# Handle parameters in in
# Due to details in the way sqlalchemy arranges the compilation we
# expect the bind parameter as an array and unnest it.
# As it happens, bigquery can handle arrays directly, but there's
# no way to tell sqlalchemy that, so it works harder than
# necessary and makes us do the same.
__sqlalchemy_version_info = tuple(map(int, sqlalchemy.__version__.split(".")))
__expandng_text = (
"EXPANDING" if __sqlalchemy_version_info < (1, 4) else "POSTCOMPILE"
)
__in_expanding_bind = _helpers.substitute_string_re_method(
fr"""
\sIN\s\( # ' IN ('
(
\[ # Expanding placeholder
{__expandng_text} # e.g. [EXPANDING_foo_1]
_[^\]]+ #
\]
(:[A-Z0-9]+)? # type marker (e.g. ':INT64'
)
\)$ # close w ending )
""",
flags=re.IGNORECASE | re.VERBOSE,
repl=r" IN UNNEST([ \1 ])",
)
def visit_in_op_binary(self, binary, operator_, **kw):
return self.__in_expanding_bind(
self._generate_generic_binary(binary, " IN ", **kw)
)
def visit_empty_set_expr(self, element_types):
return ""
def visit_not_in_op_binary(self, binary, operator, **kw):
return (
"("
+ self.__in_expanding_bind(
self._generate_generic_binary(binary, " NOT IN ", **kw)
)
+ ")"
)
visit_notin_op_binary = visit_not_in_op_binary # before 1.4
############################################################################
############################################################################
# Correct for differences in the way that SQLAlchemy escape % and _ (/)
# and BigQuery does (\\).
@staticmethod
def _maybe_reescape(binary):
binary = binary._clone()
escape = binary.modifiers.pop("escape", None)
if escape and escape != "\\":
binary.right.value = escape.join(
v.replace(escape, "\\")
for v in binary.right.value.split(escape + escape)
)
return binary
def visit_contains_op_binary(self, binary, operator, **kw):
return super(BigQueryCompiler, self).visit_contains_op_binary(
self._maybe_reescape(binary), operator, **kw
)
def visit_notcontains_op_binary(self, binary, operator, **kw):
return super(BigQueryCompiler, self).visit_notcontains_op_binary(
self._maybe_reescape(binary), operator, **kw
)
def visit_startswith_op_binary(self, binary, operator, **kw):
return super(BigQueryCompiler, self).visit_startswith_op_binary(
self._maybe_reescape(binary), operator, **kw
)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return super(BigQueryCompiler, self).visit_notstartswith_op_binary(
self._maybe_reescape(binary), operator, **kw
)
def visit_endswith_op_binary(self, binary, operator, **kw):
return super(BigQueryCompiler, self).visit_endswith_op_binary(
self._maybe_reescape(binary), operator, **kw
)
def visit_notendswith_op_binary(self, binary, operator, **kw):
return super(BigQueryCompiler, self).visit_notendswith_op_binary(
self._maybe_reescape(binary), operator, **kw
)
############################################################################
__placeholder = re.compile(r"%\(([^\]:]+)(:[^\]:]+)?\)s$").match
__expanded_param = re.compile(fr"\(\[" fr"{__expandng_text}" fr"_[^\]]+\]\)$").match
__remove_type_parameter = _helpers.substitute_string_re_method(
r"""
(STRING|BYTES|NUMERIC|BIGNUMERIC) # Base type
\( # Dimensions e.g. '(42)', '(4, 2)':
\s*\d+\s* # First dimension
(?:,\s*\d+\s*)* # Remaining dimensions
\)
""",
repl=r"\1",
flags=re.VERBOSE | re.IGNORECASE,
)
def visit_bindparam(
self,
bindparam,
within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs,
):
type_ = bindparam.type
unnest = False
if (
bindparam.expanding
and not isinstance(type_, NullType)
and not literal_binds
):
# Normally, when performing an IN operation, like:
#
# foo IN (some_sequence)
#
# SQAlchemy passes `foo` as a parameter and unpacks
# `some_sequence` and passes each element as a parameter.
# This mechanism is refered to as "expanding". It's
# inefficient and can't handle large arrays. (It's also
# very complicated, but that's not the issue we care about
# here. :) ) BigQuery lets us use arrays directly in this
# context, we just need to call UNNEST on an array when
# it's used in IN.
#
# So, if we get an `expanding` flag, and if we have a known type
# (and don't have literal binds, which are implemented in-line in
# in the SQL), we turn off expanding and we set an unnest flag
# so that we add an UNNEST() call (below).
#
# The NullType/known-type check has to do with some extreme
# edge cases having to do with empty in-lists that get special
# hijinks from SQLAlchemy that we don't want to disturb. :)
if getattr(bindparam, "expand_op", None) is not None:
assert bindparam.expand_op.__name__.endswith("in_op") # in in
bindparam.expanding = False
unnest = True
param = super(BigQueryCompiler, self).visit_bindparam(
bindparam,
within_columns_clause,
literal_binds,
skip_bind_expression,
**kwargs,
)
if literal_binds or isinstance(type_, NullType):
return param
if (
isinstance(type_, Numeric)
and (type_.precision is None or type_.scale is None)
and isinstance(bindparam.value, Decimal)
):
t = bindparam.value.as_tuple()
if type_.precision is None:
type_.precision = len(t.digits)
if type_.scale is None and t.exponent < 0:
type_.scale = -t.exponent
bq_type = self.dialect.type_compiler.process(type_)
if bq_type[-1] == ">" and bq_type.startswith("ARRAY<"):
# Values get arrayified at a lower level.
bq_type = bq_type[6:-1]
bq_type = self.__remove_type_parameter(bq_type)
assert_(param != "%s", f"Unexpected param: {param}")
if bindparam.expanding:
assert_(self.__expanded_param(param), f"Unexpected param: {param}")
param = param.replace(")", f":{bq_type})")
else:
m = self.__placeholder(param)
if m:
name, type_ = m.groups()
assert_(type_ is None)
param = f"%({name}:{bq_type})s"
if unnest:
param = f"UNNEST({param})"
return param
class BigQueryTypeCompiler(GenericTypeCompiler):
def visit_INTEGER(self, type_, **kw):
return "INT64"
visit_BIGINT = visit_SMALLINT = visit_INTEGER
def visit_BOOLEAN(self, type_, **kw):
return "BOOL"
def visit_FLOAT(self, type_, **kw):
return "FLOAT64"
visit_REAL = visit_FLOAT
def visit_STRING(self, type_, **kw):
if (type_.length is not None) and isinstance(
kw.get("type_expression"), Column
): # column def
return f"STRING({type_.length})"
return "STRING"
visit_CHAR = visit_NCHAR = visit_STRING
visit_VARCHAR = visit_NVARCHAR = visit_TEXT = visit_STRING
def visit_ARRAY(self, type_, **kw):
return "ARRAY<{}>".format(self.process(type_.item_type, **kw))
def visit_BINARY(self, type_, **kw):
if type_.length is not None:
return f"BYTES({type_.length})"
return "BYTES"
visit_VARBINARY = visit_BLOB = visit_BINARY
def visit_NUMERIC(self, type_, **kw):
if (type_.precision is not None) and isinstance(
kw.get("type_expression"), Column
): # column def
if type_.scale is not None:
suffix = f"({type_.precision}, {type_.scale})"
else:
suffix = f"({type_.precision})"
else:
suffix = ""
return (
"BIGNUMERIC"
if (type_.precision is not None and type_.precision > 38)
or (type_.scale is not None and type_.scale > 9)
else "NUMERIC"
) + suffix
visit_DECIMAL = visit_NUMERIC
class BigQueryDDLCompiler(DDLCompiler):
# BigQuery has no support for foreign keys.
def visit_foreign_key_constraint(self, constraint):
return None
# BigQuery has no support for primary keys.
def visit_primary_key_constraint(self, constraint):
return None
# BigQuery has no support for unique constraints.
def visit_unique_constraint(self, constraint):
return None
def get_column_specification(self, column, **kwargs):
colspec = super(BigQueryDDLCompiler, self).get_column_specification(
column, **kwargs
)
if column.comment is not None:
colspec = "{} OPTIONS(description={})".format(
colspec, process_string_literal(column.comment)
)
return colspec
def post_create_table(self, table):
bq_opts = table.dialect_options["bigquery"]
opts = []
if ("description" in bq_opts) or table.comment:
description = process_string_literal(
bq_opts.get("description", table.comment)
)
opts.append(f"description={description}")
if "friendly_name" in bq_opts:
opts.append(
"friendly_name={}".format(
process_string_literal(bq_opts["friendly_name"])
)
)
if opts:
return "\nOPTIONS({})".format(", ".join(opts))
return ""
def visit_set_table_comment(self, create):
table_name = self.preparer.format_table(create.element)
description = self.sql_compiler.render_literal_value(
create.element.comment, sqlalchemy.sql.sqltypes.String()
)
return f"ALTER TABLE {table_name} SET OPTIONS(description={description})"
def visit_drop_table_comment(self, drop):
table_name = self.preparer.format_table(drop.element)
return f"ALTER TABLE {table_name} SET OPTIONS(description=null)"
def process_string_literal(value):
return repr(value.replace("%", "%%"))
class BQString(String):
def literal_processor(self, dialect):
return process_string_literal
class BQBinary(sqlalchemy.sql.sqltypes._Binary):
@staticmethod
def __process_bytes_literal(value):
return repr(value.replace(b"%", b"%%"))
def literal_processor(self, dialect):
return self.__process_bytes_literal
class BQClassTaggedStr(sqlalchemy.sql.type_api.TypeEngine):
"""Type that can get literals via str
"""
@staticmethod
def process_literal_as_class_tagged_str(value):
return f"{value.__class__.__name__.upper()} {repr(str(value))}"
def literal_processor(self, dialect):
return self.process_literal_as_class_tagged_str
class BQTimestamp(sqlalchemy.sql.type_api.TypeEngine):
"""Type that can get literals via str
"""
@staticmethod
def process_timestamp_literal(value):
return f"TIMESTAMP {process_string_literal(str(value))}"
def literal_processor(self, dialect):
return self.process_timestamp_literal
class BQArray(sqlalchemy.sql.sqltypes.ARRAY):
def literal_processor(self, dialect):
item_processor = self.item_type._cached_literal_processor(dialect)
if not item_processor:
raise NotImplementedError(
f"Don't know how to literal-quote values of type {self.item_type}"
)
def process_array_literal(value):
return "[" + ", ".join(item_processor(v) for v in value) + "]"
return process_array_literal
class BigQueryDialect(DefaultDialect):
name = "bigquery"
driver = "bigquery"
preparer = BigQueryIdentifierPreparer
statement_compiler = BigQueryCompiler
type_compiler = BigQueryTypeCompiler
ddl_compiler = BigQueryDDLCompiler
execution_ctx_cls = BigQueryExecutionContext
supports_alter = False
supports_comments = True
inline_comments = True
supports_pk_autoincrement = False
supports_default_values = False
supports_empty_insert = False
supports_multivalues_insert = True
supports_unicode_statements = True
supports_unicode_binds = True
supports_native_decimal = True
description_encoding = None
supports_native_boolean = True
supports_simple_order_by_label = True
postfetch_lastrowid = False
preexecute_autoincrement_sequences = False
colspecs = {
String: BQString,
sqlalchemy.sql.sqltypes._Binary: BQBinary,
sqlalchemy.sql.sqltypes.Date: BQClassTaggedStr,
sqlalchemy.sql.sqltypes.DateTime: BQClassTaggedStr,
sqlalchemy.sql.sqltypes.Time: BQClassTaggedStr,
sqlalchemy.sql.sqltypes.TIMESTAMP: BQTimestamp,
sqlalchemy.sql.sqltypes.ARRAY: BQArray,
}
def __init__(
self,
arraysize=5000,
credentials_path=None,
location=None,
credentials_info=None,
list_tables_page_size=1000,
*args,
**kwargs,
):
super(BigQueryDialect, self).__init__(*args, **kwargs)
self.arraysize = arraysize
self.credentials_path = credentials_path
self.credentials_info = credentials_info
self.location = location
self.dataset_id = None
self.list_tables_page_size = list_tables_page_size
@classmethod
def dbapi(cls):
return dbapi
@staticmethod
def _build_formatted_table_id(table):
"""Build '<dataset_id>.<table_id>' string using given table."""
return "{}.{}".format(table.reference.dataset_id, table.table_id)
@staticmethod
def _add_default_dataset_to_job_config(job_config, project_id, dataset_id):
# If dataset_id is set, then we know the job_config isn't None
if dataset_id:
# If project_id is missing, use default project_id for the current environment
if not project_id:
_, project_id = auth.default()
job_config.default_dataset = "{}.{}".format(project_id, dataset_id)
def create_connect_args(self, url):
(
project_id,
location,
dataset_id,
arraysize,
credentials_path,
default_query_job_config,
list_tables_page_size,
) = parse_url(url)
self.arraysize = arraysize or self.arraysize
self.list_tables_page_size = list_tables_page_size or self.list_tables_page_size
self.location = location or self.location
self.credentials_path = credentials_path or self.credentials_path
self.dataset_id = dataset_id
self._add_default_dataset_to_job_config(
default_query_job_config, project_id, dataset_id
)
client = _helpers.create_bigquery_client(
credentials_path=self.credentials_path,
credentials_info=self.credentials_info,
project_id=project_id,
location=self.location,
default_query_job_config=default_query_job_config,
)
return ([client], {})
def _json_deserializer(self, row):
"""JSON deserializer for RECORD types.
The DB-API layer already deserializes JSON to a dictionary, so this
just returns the input.
"""
return row
def _get_table_or_view_names(self, connection, table_type, schema=None):
current_schema = schema or self.dataset_id
get_table_name = (
self._build_formatted_table_id
if self.dataset_id is None
else operator.attrgetter("table_id")
)
client = connection.connection._client
datasets = client.list_datasets()
result = []
for dataset in datasets:
if current_schema is not None and current_schema != dataset.dataset_id:
continue
try:
tables = client.list_tables(
dataset.reference, page_size=self.list_tables_page_size
)
for table in tables:
if table_type == table.table_type:
result.append(get_table_name(table))
except google.api_core.exceptions.NotFound:
# It's possible that the dataset was deleted between when we
# fetched the list of datasets and when we try to list the
# tables from it. See:
# https://github.com/googleapis/python-bigquery-sqlalchemy/issues/105
pass
return result
@staticmethod
def _split_table_name(full_table_name):
# Split full_table_name to get project, dataset and table name
dataset = None
table_name = None
project = None
table_name_split = full_table_name.split(".")
if len(table_name_split) == 1:
table_name = full_table_name
elif len(table_name_split) == 2:
dataset, table_name = table_name_split
elif len(table_name_split) == 3:
project, dataset, table_name = table_name_split
else:
raise ValueError(
"Did not understand table_name: {}".format(full_table_name)
)
return (project, dataset, table_name)
def _table_reference(
self, provided_schema_name, provided_table_name, client_project
):
project_id_from_table, dataset_id_from_table, table_id = self._split_table_name(
provided_table_name
)
project_id_from_schema = None
dataset_id_from_schema = None
if provided_schema_name is not None:
provided_schema_name_split = provided_schema_name.split(".")
if len(provided_schema_name_split) == 1:
if dataset_id_from_table:
project_id_from_schema = provided_schema_name_split[0]
else:
dataset_id_from_schema = provided_schema_name_split[0]
elif len(provided_schema_name_split) == 2:
project_id_from_schema = provided_schema_name_split[0]
dataset_id_from_schema = provided_schema_name_split[1]
else:
raise ValueError(
"Did not understand schema: {}".format(provided_schema_name)
)
if (
dataset_id_from_schema
and dataset_id_from_table
and dataset_id_from_schema != dataset_id_from_table
):
raise ValueError(
"dataset_id specified in schema and table_name disagree: "
"got {} in schema, and {} in table_name".format(
dataset_id_from_schema, dataset_id_from_table
)
)
if (
project_id_from_schema
and project_id_from_table
and project_id_from_schema != project_id_from_table
):
raise ValueError(
"project_id specified in schema and table_name disagree: "
"got {} in schema, and {} in table_name".format(
project_id_from_schema, project_id_from_table
)
)
project_id = project_id_from_schema or project_id_from_table or client_project
dataset_id = dataset_id_from_schema or dataset_id_from_table or self.dataset_id
table_ref = TableReference.from_string(
"{}.{}.{}".format(project_id, dataset_id, table_id)
)
return table_ref
def _get_table(self, connection, table_name, schema=None):
if isinstance(connection, Engine):
connection = connection.connect()
client = connection.connection._client
table_ref = self._table_reference(schema, table_name, client.project)
try:
table = client.get_table(table_ref)
except NotFound:
raise NoSuchTableError(table_name)
return table
def has_table(self, connection, table_name, schema=None):
try:
self._get_table(connection, table_name, schema)
return True
except NoSuchTableError:
return False
def _get_columns_helper(self, columns, cur_columns):
"""
Recurse into record type and return all the nested field names.
As contributed by @sumedhsakdeo on issue #17
"""
results = []
for col in columns:
results += [col]
if col.field_type == "RECORD":
cur_columns.append(col)
fields = [
SchemaField.from_api_repr(
dict(f.to_api_repr(), name=f"{col.name}.{f.name}")
)
for f in col.fields
]
results += self._get_columns_helper(fields, cur_columns)
cur_columns.pop()
return results
def get_columns(self, connection, table_name, schema=None, **kw):
table = self._get_table(connection, table_name, schema)
columns = self._get_columns_helper(table.schema, [])
result = []
for col in columns:
try:
coltype = _type_map[col.field_type]
except KeyError:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (col.field_type, col.name)
)
coltype = types.NullType
if col.field_type.endswith("NUMERIC"):
coltype = coltype(precision=col.precision, scale=col.scale)
elif col.field_type == "STRING" or col.field_type == "BYTES":
coltype = coltype(col.max_length)
result.append(
{
"name": col.name,
"type": types.ARRAY(coltype) if col.mode == "REPEATED" else coltype,
"nullable": col.mode == "NULLABLE" or col.mode == "REPEATED",
"comment": col.description,
"default": None,
"precision": col.precision,
"scale": col.scale,
"max_length": col.max_length,
}
)
return result
def get_table_comment(self, connection, table_name, schema=None, **kw):
table = self._get_table(connection, table_name, schema)
return {
"text": table.description,
}
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# BigQuery has no support for foreign keys.
return []
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# BigQuery has no support for primary keys.
return {"constrained_columns": []}
def get_indexes(self, connection, table_name, schema=None, **kw):
table = self._get_table(connection, table_name, schema)
indexes = []
if table.time_partitioning:
indexes.append(
{
"name": "partition",
"column_names": [table.time_partitioning.field],
"unique": False,
}
)
if table.clustering_fields:
indexes.append(
{
"name": "clustering",
"column_names": table.clustering_fields,
"unique": False,
}
)
return indexes
def get_schema_names(self, connection, **kw):
if isinstance(connection, Engine):
connection = connection.connect()
datasets = connection.connection._client.list_datasets()
return [d.dataset_id for d in datasets]
def get_table_names(self, connection, schema=None, **kw):
if isinstance(connection, Engine):
connection = connection.connect()
return self._get_table_or_view_names(connection, "TABLE", schema)
def get_view_names(self, connection, schema=None, **kw):
if isinstance(connection, Engine):
connection = connection.connect()
return self._get_table_or_view_names(connection, "VIEW", schema)
def do_rollback(self, dbapi_connection):
# BigQuery has no support for transactions.
pass
def _check_unicode_returns(self, connection, additional_tests=None):
# requests gives back Unicode strings
return True
def get_view_definition(self, connection, view_name, schema=None, **kw):
if isinstance(connection, Engine):
connection = connection.connect()
client = connection.connection._client
if self.dataset_id:
view_name = f"{self.dataset_id}.{view_name}"
view = client.get_table(view_name)
return view.view_query
class unnest(sqlalchemy.sql.functions.GenericFunction):
def __init__(self, *args, **kwargs):
expr = kwargs.pop("expr", None)
if expr is not None:
args = (expr,) + args
if len(args) != 1:
raise TypeError("The unnest function requires a single argument.")
arg = args[0]
if isinstance(arg, sqlalchemy.sql.expression.ColumnElement):
if not isinstance(arg.type, sqlalchemy.sql.sqltypes.ARRAY):
raise TypeError("The argument to unnest must have an ARRAY type.")
self.type = arg.type.item_type
super().__init__(*args, **kwargs)
dialect = BigQueryDialect
try:
import alembic # noqa
except ImportError:
pass
else:
from alembic.ddl import impl
class SqlalchemyBigqueryImpl(impl.DefaultImpl):
__dialect__ = "bigquery"
|
import sys
from . import logger
from . import cli
from . import input_
from . import process
from . import format_
from . import output
def main():
try:
options = cli.parse_options()
logger.init_logger(options.verbose)
history = input_.input_git_history(
options.repo,
options.revs,
options.start,
options.author,
)
data = process.process_git_history(history)
unique_data = process.unique_git_history(data)
representation = format_.format_git_history(
options.project,
unique_data,
)
output.copy_git_history(representation)
if options.output is not None:
output.output_git_history(options.output, representation)
except Exception as exception:
sys.exit('error: ' + str(exception))
|
QUALITY_SCORE_STRING = '''
!"#$%&'()*+,-./0123456789:;<=>?@
ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`
abcdefghijklmnopqrstuvwxyz{|}~
'''
QUALITY_SCORE = 29 # threshold quality_score must be between 0 and 93
TRANSLATION_CODE = {
'AAA':'K','AAC':'N','AAG':'K','AAU':'N',
'ACA':'T','ACC':'T','ACG':'T','ACU':'T',
'AGA':'R','AGC':'S','AGG':'R','AGU':'S',
'AUA':'I','AUC':'I','AUG':'M','AUU':'I',
'CAA':'Q','CAC':'H','CAG':'Q','CAU':'H',
'CCA':'P','CCC':'P','CCG':'P','CCU':'P',
'CGA':'R','CGC':'R','CGG':'R','CGU':'R',
'CUA':'L','CUC':'L','CUG':'L','CUU':'L',
'GAA':'E','GAC':'D','GAG':'E','GAU':'D',
'GCA':'A','GCC':'A','GCG':'A','GCU':'A',
'GGA':'G','GGC':'G','GGG':'G','GGU':'G',
'GUA':'V','GUC':'V','GUG':'V','GUU':'V',
'UAA':'#','UAC':'Y','UAG':'*','UAU':'Y',
'UCA':'S','UCC':'S','UCG':'S','UCU':'S',
'UGA':'&','UGC':'C','UGG':'W','UGU':'C',
'UUA':'L','UUC':'F','UUG':'L','UUU':'F'
}
TRANSCRIPTION_CODE = {
'A':'A','C':'C','G':'G','T':'U','U':'T'
}
CDNA_MIN_LENGTH = 24
CDNA_MAX_LENGTH = 240
START_SEQUENCE = 'TAATACGACTCACTATAGGGTTAACTTTAAGAAGGAGATATACATATG' # NNK - T7g10M.F48
STOP_SEQUENCE = 'TGCGGCAGCGGCAGCGGCAGCTAGGACGGGGGGCGGAAA' #NNK - CGS3an13.R39
# START_SEQUENCE = 'TAATACGACTCACTATAGGGTTGAACTTTAAGTAGGAGATATATCCATG' #NNU - T7-CH-F49
# STOP_SEQUENCE = 'TGTGGGTCTGGGTCTGGGTCTTAGGACGGGGGGCGGAAA' #NNU - CGS3-CH-R39
# START_SEQUENCE = 'ATG' # Met codon
# STOP_SEQUENCE = 'TGCGGCAGC'# Akane seams to have trimmed siquences
# START_SEQUENCE = 'TAGGGTTAACTTTAAGAAGGAGATATACATATG'# Oxford, Akane and Tom
# STOP_SEQUENCE = 'TGCGGC'# Oxford, Akane and Tom
# STOP_SEQUENCE = 'TAG' # amber stop codon
TOP_SORTED_PEPTIDES = [
'VWDPRTFYLSRI', 'WDANTIFIKRV', 'WNPRTIFIKRA', 'VWDPRTFYLSRT',
'IWDTGTFYLSRT', 'WWNTRSFYLSRI', 'FWDPRTFYLSRI', 'VWDPSTFYLSRI',
'KWDTRTFYLSRY', 'KWDTRTFYLSRI', 'IWDPRTFYLSRI', 'IWDTGTFYLSRI',
'VWDPRTFYLSRM', 'AWDPRTFYLSRI', 'VWDSRTFYLSRI', 'VWDPGTFYLSRI',
'VWDPRTFYMSRI', 'VWDPRTFYLSRS', 'VWDPRTFYLSRV', 'WNPRTIFIKRV',
'VRDPRTFYLSRI', 'VWDPKTFYLSRI', 'VWDPRTFYLSRN', 'FRFPFYIQRR'
]
TOP_PEPTIDES_KDS = {
'VWDPRTFYLSRI' : '3',
'WDANTIFIKRV' : '4',
'WNPRTIFIKRA' : '>1000',
'VWDPRTFYLSRT' : '3',
'IWDTGTFYLSRT' : '7',
'WWNTRSFYLSRI' : '12',
'FWDPRTFYLSRI' : '4',
'VWDPSTFYLSRI' : '3',
'KWDTRTFYLSRY' : '5',
'KWDTRTFYLSRI' : '6',
'IWDPRTFYLSRI' : '1',
'VWDPRTFYLSRM' : '4',
'IWDTGTFYLSRI' : '>1000',
'VWDPGTFYLSRI' : '<1',
'VWDSRTFYLSRI' : '3',
'AWDPRTFYLSRI': '6',
'VWDPRTFYLSRS' : '6',
'VWDPRTFYMSRI' : '1',
'VWDPRTFYLSRV' : '3',
'WNPRTIFIKRV' : '>1000',
'VRDPRTFYLSRI' : '>1000',
'VWDPRTFYLSRN' : '>1000',
'VWDPKTFYLSRI' : '14',
'FRFPFYIQRR' : '>1000'
}
def display_summaryReport(
data_directory_path, base_cycle, n_top_peptides, file_name):
today = TodaysDate()
display_summaryFileNameCSV = str(today) + 'display_summary' + file_name + '.csv'
display_summaryReportFile = open(display_summaryFileNameCSV, 'w')
display_summary = Completedisplay_summary(data_directory_path)
SortedRoundsList = sorted(display_summary.keys())
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path)
Totalpeptides_BY_Round = TotalReads_BY_Round(data_directory_path)
BaseRoundSortedpeptides = BaseRoundSortedpeptidesList(data_directory_path, base_cycle)
#for i in range(len(BaseRoundSortedpeptides)):
# print ('>seq' + str(i + 1) + '\n' + BaseRoundSortedpeptides[i])
#BaseRoundTopSortedpeptides = BaseRoundSortedpeptides[0 : (n_top_peptides)]
BaseRoundTopSortedpeptides = ['VWDPRTFYLSRI', 'WDANTIFIKRV', 'WNPRTIFIKRA', 'VWDPRTFYLSRT',
'IWDTGTFYLSRT', 'WWNTRSFYLSRI', 'FWDPRTFYLSRI', 'VWDPSTFYLSRI',
'KWDTRTFYLSRY', 'KWDTRTFYLSRI', 'IWDPRTFYLSRI', 'IWDTGTFYLSRI',
'VWDPRTFYLSRM', 'AWDPRTFYLSRI', 'VWDSRTFYLSRI', 'VWDPGTFYLSRI',
'VWDPRTFYMSRI', 'VWDPRTFYLSRS', 'VWDPRTFYLSRV', 'WNPRTIFIKRV',
'VRDPRTFYLSRI', 'VWDPKTFYLSRI', 'VWDPRTFYLSRN', 'FRFPFYIQRR'
]
BaseRoundpeptidesRank = peptidesRank_IN_BaseRound(data_directory_path, base_cycle)
#print (BaseRoundSortedpeptides)
Top24peptidesKDs = {'VWDPRTFYLSRI' : '3', 'WDANTIFIKRV' : '4', 'WNPRTIFIKRA' : '>1000', 'VWDPRTFYLSRT' : '3',
'IWDTGTFYLSRT' : '7', 'WWNTRSFYLSRI' : '12', 'FWDPRTFYLSRI' : '4', 'VWDPSTFYLSRI' : '3',
'KWDTRTFYLSRY' : '5', 'KWDTRTFYLSRI' : '6', 'IWDPRTFYLSRI' : '1', 'VWDPRTFYLSRM' : '4',
'IWDTGTFYLSRI' : '>1000', 'VWDPGTFYLSRI' : '<1', 'VWDSRTFYLSRI' : '3', 'AWDPRTFYLSRI': '6',
'VWDPRTFYLSRS' : '6', 'VWDPRTFYMSRI' : '1', 'VWDPRTFYLSRV' : '3', 'WNPRTIFIKRV' : '>1000',
'VRDPRTFYLSRI' : '>1000', 'VWDPRTFYLSRN' : '>1000', 'VWDPKTFYLSRI' : '14', 'FRFPFYIQRR' : '>1000'
}
display_summaryReportFile.write('peptide sequence' + ',' +
'rank (#)' + ',' +
'cDNA mutants' + ',')
for Round in SortedRoundsList:
display_summaryReportFile.write('C' +
str(Round) +
' count (#) [frequency(%)]' + ',')
display_summaryReportFile.write('\n')
for peptide in BaseRoundTopSortedpeptides:
#for peptide in Top24peptidesKDs:
BaseRoundpeptideFraction = float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[base_cycle])
peptideRank = BaseRoundpeptidesRank[peptide]
Formatedpeptide = HammingDistanceBasedFormating(BaseRoundTopSortedpeptides[0], peptide)
peptidecDNAMutants = len(display_summary[base_cycle][peptide])
display_summaryReportFile.write(Formatedpeptide + ',' +
str(peptideRank) + ',' +
str(peptidecDNAMutants) + ',')
for Round in SortedRoundsList:
peptideFraction = float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[Round])
BaseFraction = peptideFraction
display_summaryReportFile.write(str(peptides_BY_Round[Round].get(peptide, 0)) +
' [' + '{:.1%}'.format(peptideFraction) + ']' + ',')
BaseFraction = peptideFraction
display_summaryReportFile.write('\n')
display_summaryReportFile.write('total count (#)' + ',' + ',')
for Round in SortedRoundsList:
display_summaryReportFile.write(str(Totalpeptides_BY_Round[Round]) + ',')
display_summaryReportFile.write('\n\n\n')
display_summaryReportFile.close()
#-------------------------------------------------------------------------------
# Create a figure of size 8x6 inches, 500 dots per inch
plt.figure(
figsize = (8, 6),
dpi = 500)
# Create 'ggplot' style
plt.style.use('fivethirtyeight')
# Create a new subplot from a grid of 1x1
Graph = plt.subplot(1, 1, 1)
Xs = []
Ys = []
Rank = 1
peptideFractionInFinalRound = 0
# Map colours onto lines
cNorm = matplotlib.colors.Normalize(vmin = 0,
vmax = n_top_peptides - 1)
scalarMap = matplotlib.cm.ScalarMappable(norm = cNorm,
cmap = 'Paired')
peptideLabels = []
for peptide in BaseRoundTopSortedpeptides:
#for peptide in Top24peptidesKDs:
peptidesFractions_BY_Round = []
for Round in SortedRoundsList:
peptidesFractions_BY_Round += [float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[Round])]
x = SortedRoundsList
y = peptidesFractions_BY_Round
Xs += x
Ys += y
peptideColour = scalarMap.to_rgba(BaseRoundTopSortedpeptides.index(peptide))
peptideRank = str(BaseRoundpeptidesRank[peptide])
# peptideColour = scalarMap.to_rgba(peptideRank)
peptideKD = Top24peptidesKDs[peptide]
Formatedpeptide = HammingDistanceBasedFormating(BaseRoundTopSortedpeptides[0], peptide)
peptideLabel = Formatedpeptide + ' (' + peptideRank + ', ' + peptideKD +' nM)'
#Set peptideLabel
peptideLabels += [peptideLabel]
plt.plot(x, y,
'o-',
c = peptideColour,
lw = 2.0,
ms = 4.0,
mew = 0.1,
mec = '#191919')
XMin = min(Xs) - 0.05*(max(Xs) - min(Xs))
XMax = max(Xs) + 0.05*(max(Xs) - min(Xs))
YMin = min(Ys) - 0.05*(max(Ys) - min(Ys))
YMax = max(Ys) + 0.05*(max(Ys) - min(Ys))
plt.axis([XMin, XMax, YMin, YMax])
plt.xticks(fontsize = 10)
plt.yticks(fontsize = 10)
plt.xlabel('Selection Cycle (#)',
fontsize = 10)
plt.ylabel('peptide Fraction (%)',
fontsize = 10)
legend = plt.legend(peptideLabels,
title = 'cyclic-peptide random region',
loc = 'upper center',
bbox_to_anchor = (0.5, -0.10),
fancybox = True,
shadow = False,
fontsize = 10,
ncol = 3)
Graph.get_legend().get_title().set_size('small')
display_summaryFileNamePNG = str(today) + 'display_summary' + file_name + '.png'
plt.savefig(display_summaryFileNamePNG,
bbox_extra_artists = [legend],
bbox_inches = 'tight',
dpi = 300)
plt.show()
plt.close() |
from importlib import import_module
class Base:
pass
def load_alarm_model(model):
try:
return import_module('alerta.models.alarms.%s' % model.lower())
except Exception:
raise ImportError('Failed to load %s alarm model' % model)
class AlarmModel(Base):
Severity = {} # type: ignore
Colors = {} # type: ignore
DEFAULT_STATUS = None # type: str
DEFAULT_NORMAL_SEVERITY = None # type: str
DEFAULT_PREVIOUS_SEVERITY = None # type: str
NORMAL_SEVERITY_LEVEL = None # type: str
def __init__(self, app=None):
self.app = None
if app is not None:
self.register(app)
def init_app(self, app):
cls = load_alarm_model(app.config['ALARM_MODEL'])
self.__class__ = type('AlarmModelImpl', (cls.StateMachine, AlarmModel), {})
try:
self.register(app)
except Exception as e:
app.logger.warning(e)
def register(self, app):
raise NotImplementedError
def trend(self, previous, current):
raise NotImplementedError
def transition(self, alert, current_status=None, previous_status=None, action=None, **kwargs):
raise NotImplementedError
@staticmethod
def is_suppressed(alert):
raise NotImplementedError
|
from model_mommy import mommy
from django.core.urlresolvers import reverse
from decisiontree.multitenancy import models as link_models
from .. import models
from .cases import DecisionTreeTestCase, DeleteViewTestMixin
class TestAnswerDelete(DeleteViewTestMixin, DecisionTreeTestCase):
link_model = link_models.AnswerLink
model = models.Answer
success_url_name = 'answer_list'
url_name = 'delete_answer'
class TestPathDelete(DeleteViewTestMixin, DecisionTreeTestCase):
link_model = link_models.TransitionLink
model = models.Transition
success_url_name = 'path_list'
url_name = 'delete_path'
class TestMessageDelete(DeleteViewTestMixin, DecisionTreeTestCase):
link_model = link_models.MessageLink
model = models.Message
success_url_name = 'list-messages'
url_name = 'delete_message'
class TestStateDelete(DeleteViewTestMixin, DecisionTreeTestCase):
link_model = link_models.TreeStateLink
model = models.TreeState
success_url_name = 'state_list'
url_name = 'delete_state'
class TestSurveyDelete(DeleteViewTestMixin, DecisionTreeTestCase):
link_model = link_models.TreeLink
model = models.Tree
success_url_name = 'list-surveys'
url_name = 'delete_tree'
class TestTagDelete(DeleteViewTestMixin, DecisionTreeTestCase):
link_model = link_models.TagLink
model = models.Tag
success_url_name = 'list-tags'
url_name = 'delete-tag'
class TestSurveySessionClose(DecisionTreeTestCase):
url_name = 'session_close'
def setUp(self):
super(TestSurveySessionClose, self).setUp()
self.user = mommy.make('auth.User', is_superuser=True)
self.make_tenant_manager(self.user)
self.login_user(self.user)
self.session = mommy.make('decisiontree.Session',
state=mommy.make('decisiontree.TreeState'),
connection=self.connection)
def get_url(self, **kwargs):
kwargs.setdefault('group_slug', self.tenant.group.slug)
kwargs.setdefault('tenant_slug', self.tenant.slug)
kwargs.setdefault('pk', self.session.pk)
return reverse(self.url_name, kwargs=kwargs)
def test_get(self):
"""Session close view requires POST."""
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 405)
self.session = models.Session.objects.get(pk=self.session.pk)
self.assertTrue(self.session.is_open())
self.assertFalse(self.session.is_closed())
def test_delete(self):
"""Session close view requires POST."""
response = self.client.delete(self.get_url())
self.assertEqual(response.status_code, 405)
self.session = models.Session.objects.get(pk=self.session.pk)
self.assertTrue(self.session.is_open())
self.assertFalse(self.session.is_closed())
def test_non_existant(self):
"""View should return 404 response if session does not exist."""
response = self.client.post(self.get_url(pk=1234))
self.assertEqual(response.status_code, 404)
self.session = models.Session.objects.get(pk=self.session.pk)
self.assertTrue(self.session.is_open())
self.assertFalse(self.session.is_closed())
def test_already_closed(self):
"""Session close view does not act on closed session."""
# TODO: mock to ensure that save is not called.
self.session.close()
response = self.client.post(self.get_url())
expected_url = reverse('recent_sessions', kwargs={
'group_slug': self.tenant.group.slug,
'tenant_slug': self.tenant.slug,
'pk': self.session.tree.pk,
})
self.assertRedirectsNoFollow(response, expected_url)
self.session = models.Session.objects.get(pk=self.session.pk)
self.assertTrue(self.session.is_closed())
self.assertFalse(self.session.is_open())
def test_close(self):
"""Session close view cancels the session."""
response = self.client.post(self.get_url())
expected_url = reverse('recent_sessions', kwargs={
'group_slug': self.tenant.group.slug,
'tenant_slug': self.tenant.slug,
'pk': self.session.tree.pk,
})
self.assertRedirectsNoFollow(response, expected_url)
self.session = models.Session.objects.get(pk=self.session.pk)
self.assertTrue(self.session.is_closed())
self.assertFalse(self.session.is_open())
|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
from mo.utils.error import Error
from mo.utils.cli_parser import parse_transform
def get_available_transformations():
try:
from openvino.offline_transformations import ApplyLowLatencyTransformation # pylint: disable=import-error,no-name-in-module
return {
'LowLatency2': ApplyLowLatencyTransformation,
}
except Exception as e:
return {}
# net should be openvino.inference_engine.IENetwork type, but IE Engine is still optional dependency
def apply_user_transformations(net: object, transforms: list):
available_transformations = get_available_transformations()
for name, args in transforms:
if name not in available_transformations.keys():
raise Error("Transformation {} is not available.".format(name))
available_transformations[name](net, **args)
def apply_moc_transformations(net: object):
from openvino.offline_transformations import ApplyMOCTransformations # pylint: disable=import-error,no-name-in-module
ApplyMOCTransformations(net, False)
def apply_offline_transformations(input_model: str, framework: str, transforms: list):
# This variable is only needed by GenerateMappingFile transformation
# to produce correct mapping
extract_names = framework in ['tf', 'mxnet', 'kaldi']
from openvino.inference_engine import read_network # pylint: disable=import-error,no-name-in-module
from openvino.offline_transformations import GenerateMappingFile # pylint: disable=import-error,no-name-in-module
net = read_network(input_model + "_tmp.xml", input_model + "_tmp.bin")
apply_user_transformations(net, transforms)
apply_moc_transformations(net)
net.serialize(input_model + ".xml", input_model + ".bin")
path_to_mapping = input_model + ".mapping"
GenerateMappingFile(net, path_to_mapping.encode('utf-8'), extract_names)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_model")
parser.add_argument("--framework")
parser.add_argument("--transform")
args = parser.parse_args()
apply_offline_transformations(args.input_model, args.framework, parse_transform(args.transform)) |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional, Tuple, Union
import attr
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from scipy.stats import norm, t, ttest_ind # @manual
from statsmodels.stats import multitest
# from np.typing import ArrayLike
ArrayLike = np.ndarray
# Single Spike object
@attr.s(auto_attribs=True)
class SingleSpike:
time: datetime
value: float
n_sigma: float
@property
def time_str(self) -> str:
return datetime.strftime(self.time, "%Y-%m-%d")
# Changepoint Interval object
@attr.s(auto_attribs=True)
class ChangePointInterval:
start_time: datetime
end_time: datetime
previous_interval: Optional[ChangePointInterval] = attr.ib(default=None, init=False)
_all_spikes: Union[
Optional[List[SingleSpike]], Optional[List[List[SingleSpike]]]
] = attr.ib(default=None, init=False)
spike_std_threshold: float = attr.ib(default=2.0, init=False)
data_df: Optional[pd.DataFrame] = attr.ib(None, init=False)
_ts_cols: List[str] = attr.ib(factory=lambda: ["value"], init=False)
num_series: int = 1
@property
def data(self) -> Optional[ArrayLike]:
df = self.data_df
if df is None:
return None
elif self.num_series == 1:
return df.value.values
else:
return df[self._ts_cols].values
@data.setter
def data(self, data: TimeSeriesData) -> None:
if not data.is_univariate():
self._ts_cols = list(data.value.columns)
self.num_series = len(self._ts_cols)
all_data_df = data.to_dataframe()
all_data_df.columns = ["time"] + self._ts_cols
all_data_df["time"] = pd.to_datetime(all_data_df["time"])
all_data_df = all_data_df.loc[
(all_data_df.time >= self.start_time) & (all_data_df.time < self.end_time)
]
self.data_df = all_data_df
def _detect_spikes(self) -> Union[List[SingleSpike], List[List[SingleSpike]]]:
df = self.data_df
if df is None:
raise ValueError("data must be set before spike detection")
if self.num_series == 1:
df["z_score"] = (df.value - self.mean_val) / np.sqrt(self.variance_val)
spike_df = df.query(f"z_score >={self.spike_std_threshold}")
return [
SingleSpike(
time=row["time"], value=row["value"], n_sigma=row["z_score"]
)
for counter, row in spike_df.iterrows()
]
else:
spikes = []
for i, c in enumerate(self._ts_cols):
mean_val, variance_val = self.mean_val, self.variance_val
if isinstance(mean_val, float) or isinstance(variance_val, float):
raise ValueError(
f"num_series = {self.num_series} so mean_val and variance_val should have type ArrayLike."
)
df[f"z_score_{c}"] = (df[c] - mean_val[i]) / np.sqrt(variance_val[i])
spike_df = df.query(f"z_score_{c} >={self.spike_std_threshold}")
if spike_df.shape[0] == 0:
continue
else:
spikes.append(
[
SingleSpike(
time=row["time"],
value=row[c],
n_sigma=row[f"z_score_{c}"],
)
for counter, row in spike_df.iterrows()
]
)
return spikes
def extend_data(self, data: TimeSeriesData) -> None:
"""
extends the data.
"""
new_data_df = data.to_dataframe()
new_data_df.columns = ["time"] + self._ts_cols
df = self.data_df
if df is not None:
new_data_df = pd.concat([df, new_data_df])
self.data_df = new_data_df.loc[
(new_data_df.time >= self.start_time) & (new_data_df.time < self.end_time)
]
@property
def start_time_str(self) -> str:
return datetime.strftime(self.start_time, "%Y-%m-%d")
@property
def end_time_str(self) -> str:
return datetime.strftime(self.end_time, "%Y-%m-%d")
@property
def mean_val(self) -> Union[float, ArrayLike]:
if self.num_series == 1:
vals = self.data
return 0.0 if vals is None else np.mean(vals)
else:
data_df = self.data_df
if data_df is None:
return np.zeros(self.num_series)
return np.array([np.mean(data_df[c].values) for c in self._ts_cols])
@property
def variance_val(self) -> Union[float, ArrayLike]:
if self.num_series == 1:
vals = self.data
return 0.0 if vals is None else np.var(vals)
else:
data_df = self.data_df
if data_df is None:
return np.zeros(self.num_series)
return np.array([np.var(data_df[c].values) for c in self._ts_cols])
def __len__(self) -> int:
df = self.data_df
return 0 if df is None else len(df)
@property
def spikes(self) -> Union[List[SingleSpike], List[List[SingleSpike]]]:
spikes = self._all_spikes
if spikes is None:
spikes = self._detect_spikes()
self._all_spikes = spikes
return spikes
# Percentage Change Object
class PercentageChange:
def __init__(
self,
current: ChangePointInterval,
previous: ChangePointInterval,
method="fdr_bh",
):
self.current = current
self.previous = previous
self.upper = None
self.lower = None
self._t_score = None
self._p_value = None
self.alpha = 0.05
self.method = method
self.num_series = self.current.num_series
@property
def ratio_estimate(self) -> Union[float, np.ndarray]:
# pyre-ignore[6]: Expected float for 1st positional only parameter to call float.__truediv__ but got Union[float, np.ndarray].
return self.current.mean_val / self.previous.mean_val
@property
def perc_change(self) -> float:
return (self.ratio_estimate - 1.0) * 100.0
@property
def perc_change_upper(self) -> float:
if self.upper is None:
self._delta_method()
return (self.upper - 1) * 100.0
@property
def perc_change_lower(self) -> float:
if self.lower is None:
self._delta_method()
return (self.lower - 1) * 100.0
@property
def direction(self) -> Union[str, ArrayLike]:
if self.num_series > 1:
return np.vectorize(lambda x: "up" if x > 0 else "down")(self.perc_change)
elif self.perc_change > 0.0:
return "up"
else:
return "down"
@property
def stat_sig(self) -> Union[bool, ArrayLike]:
if self.upper is None:
self._delta_method()
if self.num_series > 1:
return np.array(
[
False if self.upper[i] > 1.0 and self.lower[i] < 1 else True
for i in range(self.current.num_series)
]
)
# not stat sig e.g. [0.88, 1.55]
return not (self.upper > 1.0 and self.lower < 1.0)
@property
def score(self) -> float:
if self._t_score is None:
self._ttest()
return self._t_score
@property
def p_value(self) -> float:
if self._p_value is None:
self._ttest()
return self._p_value
@property
def mean_previous(self) -> Union[float, np.ndarray]:
return self.previous.mean_val
@property
def mean_difference(self) -> Union[float, np.ndarray]:
# pyre-ignore[6]: Expected `float` for 1st param but got `Union[float,
# np.ndarray]`.
_mean_diff = self.current.mean_val - self.previous.mean_val
return _mean_diff
@property
def ci_upper(self) -> float:
sp_mean = self._pooled_stddev()
df = self._get_df()
# the minus sign here is non intuitive.
# this is because, for example, t.ppf(0.025, 30) ~ -1.96
_ci_upper = self.previous.mean_val - t.ppf(self.alpha / 2, df) * sp_mean
return _ci_upper
@property
def ci_lower(self) -> float:
sp_mean = self._pooled_stddev()
df = self._get_df()
# the plus sign here is non-intuitive. See comment
# above
_ci_lower = self.previous.mean_val + t.ppf(self.alpha / 2, df) * sp_mean
return _ci_lower
def _get_df(self) -> float:
"""
degree of freedom of t-test
"""
n_1 = len(self.previous)
n_2 = len(self.current)
df = n_1 + n_2 - 2
return df
def _pooled_stddev(self) -> float:
"""
This calculates the pooled standard deviation for t-test
as defined in https://online.stat.psu.edu/stat500/lesson/7/7.3/7.3.1/7.3.1.1
"""
s_1_sq = self.previous.variance_val
s_2_sq = self.current.variance_val
n_1 = len(self.previous)
n_2 = len(self.current)
if n_1 == 0 or n_2 == 0:
return 0.0
# pyre-ignore[58]: * is not supported for operand types int and Union[float, np.ndarray].
s_p = np.sqrt(((n_1 - 1) * s_1_sq + (n_2 - 1) * s_2_sq) / (n_1 + n_2 - 2))
# s_p_mean = s_p * np.sqrt((1. / n_1) + (1./ n_2))
return s_p
def _ttest_manual(self) -> Tuple[float, float]:
"""
scipy's t-test gives nan when one of the arrays has a
size of 1.
To repro, run:
>>> ttest_ind(np.array([1,2,3,4]), np.array([11]), equal_var=True, nan_policy='omit')
This is implemented to fix this issue
"""
sp_mean = self._pooled_stddev()
df = self._get_df()
# pyre-ignore[6]: Expected float for 1st positional only parameter to call float.__sub__ but got Union[float, np.ndarray].
t_score = (self.current.mean_val - self.previous.mean_val) / sp_mean
p_value = t.sf(np.abs(t_score), df) * 2 # sf = 1 - cdf
return t_score, p_value
def _ttest(self) -> None:
if self.num_series > 1:
self._ttest_multivariate()
return
n_1 = len(self.previous)
n_2 = len(self.current)
# if both control and test have one value
# then using a t test does not make any sense
if n_1 == 1 and n_2 == 1:
self._t_score = np.inf
self._p_value = 0.0
# when sample size is 1, scipy's t test gives nan,
# hence we separately handle this case
# if n_1 == 1 or n_2 == 1:
# self._t_score, self._p_value = self._ttest_manual()
# else:
# self._t_score, self._p_value = ttest_ind(
# current_data, prev_data, equal_var=True, nan_policy='omit'
# )
# Always use ttest_manual because we changed the std to not include
# np.sqrt((1. / n_1) + (1./ n_2))
self._t_score, self._p_value = self._ttest_manual()
def _ttest_multivariate(self) -> None:
num_series = self.num_series
p_value_start = np.zeros(num_series)
t_value_start = np.zeros(num_series)
n_1 = len(self.previous)
n_2 = len(self.current)
if n_1 == 1 and n_2 == 1:
self._t_score = np.inf * np.ones(num_series)
self._p_value = np.zeros(num_series)
return
elif n_1 == 1 or n_2 == 1:
t_value_start, p_value_start = self._ttest_manual()
else:
current_data = self.current.data
prev_data = self.previous.data
if current_data is None or prev_data is None:
raise ValueError("Interval data not set")
for i in range(num_series):
current_slice = current_data[:, i]
prev_slice = prev_data[:, i]
t_value_start[i], p_value_start[i] = ttest_ind(
current_slice, prev_slice, equal_var=True, nan_policy="omit"
)
# The new p-values are the old p-values rescaled so that self.alpha is still the threshold for rejection
_, self._p_value, _, _ = multitest.multipletests(
p_value_start, alpha=self.alpha, method=self.method
)
self._t_score = np.zeros(num_series)
# We are using a two-sided test here, so we take inverse_tcdf(self._p_value / 2) with df = len(self.current) + len(self.previous) - 2
for i in range(self.current.num_series):
if t_value_start[i] < 0:
self._t_score[i] = t.ppf(self._p_value[i] / 2, self._get_df())
else:
self._t_score[i] = t.ppf(1 - self._p_value[i] / 2, self._get_df())
def _calc_cov(self) -> float:
"""
Calculates the covariance of x and y
"""
current = self.current.data
previous = self.previous.data
if current is None or previous is None:
return np.nan
n_min = min(len(current), len(previous))
if n_min == 0:
return np.nan
current = current[-n_min:-1]
previous = previous[-n_min:-1]
return np.cov(current, previous)[0, 1] / n_min
def _delta_method(self) -> None:
test_mean = self.current.mean_val
control_mean = self.previous.mean_val
test_var = self.current.variance_val
control_var = self.previous.variance_val
n_test = len(self.current)
n_control = len(self.previous)
cov_xy = self._calc_cov()
sigma_sq_ratio = (
test_var / (n_test * (control_mean ** 2))
- 2 * (test_mean * cov_xy) / (control_mean ** 3)
+ (control_var * (test_mean ** 2)) / (n_control * (control_mean ** 4))
)
# the signs appear flipped because norm.ppf(0.025) ~ -1.96
self.lower = self.ratio_estimate + norm.ppf(self.alpha / 2) * np.sqrt(
abs(sigma_sq_ratio)
)
self.upper = self.ratio_estimate - norm.ppf(self.alpha / 2) * np.sqrt(
abs(sigma_sq_ratio)
)
@dataclass
class ConfidenceBand:
lower: TimeSeriesData
upper: TimeSeriesData
class AnomalyResponse:
def __init__(
self,
scores: TimeSeriesData,
confidence_band: ConfidenceBand,
predicted_ts: TimeSeriesData,
anomaly_magnitude_ts: TimeSeriesData,
stat_sig_ts: TimeSeriesData,
):
self.scores = scores
self.confidence_band = confidence_band
self.predicted_ts = predicted_ts
self.anomaly_magnitude_ts = anomaly_magnitude_ts
self.stat_sig_ts = stat_sig_ts
self.key_mapping = []
self.num_series = 1
if not self.scores.is_univariate():
self.num_series = len(scores.value.columns)
self.key_mapping = list(scores.value.columns)
def update(
self,
time: datetime,
score: Union[float, ArrayLike],
ci_upper: Union[float, ArrayLike],
ci_lower: Union[float, ArrayLike],
pred: Union[float, ArrayLike],
anom_mag: Union[float, ArrayLike],
stat_sig: Union[float, ArrayLike],
) -> None:
"""
Add one more point and remove the last point
"""
self.scores = self._update_ts_slice(self.scores, time, score)
self.confidence_band = ConfidenceBand(
lower=self._update_ts_slice(self.confidence_band.lower, time, ci_lower),
upper=self._update_ts_slice(self.confidence_band.upper, time, ci_upper),
)
self.predicted_ts = self._update_ts_slice(self.predicted_ts, time, pred)
self.anomaly_magnitude_ts = self._update_ts_slice(
self.anomaly_magnitude_ts, time, anom_mag
)
self.stat_sig_ts = self._update_ts_slice(self.stat_sig_ts, time, stat_sig)
def _update_ts_slice(
self, ts: TimeSeriesData, time: datetime, value: Union[float, ArrayLike]
) -> TimeSeriesData:
time = ts.time.iloc[1:].append(pd.Series(time))
time.reset_index(drop=True, inplace=True)
if self.num_series == 1:
value = ts.value.iloc[1:].append(pd.Series(value))
value.reset_index(drop=True, inplace=True)
return TimeSeriesData(time=time, value=value)
else:
if isinstance(value, float):
raise ValueError(
f"num_series = {self.num_series} so value should have type ArrayLike."
)
value_dict = {}
for i, value_col in enumerate(self.key_mapping):
value_dict[value_col] = (
ts.value[value_col].iloc[1:].append(pd.Series(value[i]))
)
value_dict[value_col].reset_index(drop=True, inplace=True)
return TimeSeriesData(
pd.DataFrame(
{
**{"time": time},
**{
value_col: value_dict[value_col]
for value_col in self.key_mapping
},
}
)
)
def inplace_update(
self,
time: datetime,
score: Union[float, ArrayLike],
ci_upper: Union[float, ArrayLike],
ci_lower: Union[float, ArrayLike],
pred: Union[float, ArrayLike],
anom_mag: Union[float, ArrayLike],
stat_sig: Union[float, ArrayLike],
) -> None:
"""
Add one more point and remove the last point
"""
self._inplace_update_ts(self.scores, time, score)
self._inplace_update_ts(self.confidence_band.lower, time, ci_lower),
self._inplace_update_ts(self.confidence_band.upper, time, ci_upper)
self._inplace_update_ts(self.predicted_ts, time, pred)
self._inplace_update_ts(self.anomaly_magnitude_ts, time, anom_mag)
self._inplace_update_ts(self.stat_sig_ts, time, stat_sig)
def _inplace_update_ts(
self, ts: TimeSeriesData, time: datetime, value: Union[float, ArrayLike]
) -> None:
if self.num_series == 1:
ts.value.loc[ts.time == time] = value
else:
ts.value.loc[ts.time == time] = pd.DataFrame(value)
def get_last_n(self, N: int) -> AnomalyResponse:
"""
returns the response for the last N days
"""
return AnomalyResponse(
scores=self.scores[-N:],
confidence_band=ConfidenceBand(
upper=self.confidence_band.upper[-N:],
lower=self.confidence_band.lower[-N:],
),
predicted_ts=self.predicted_ts[-N:],
anomaly_magnitude_ts=self.anomaly_magnitude_ts[-N:],
stat_sig_ts=self.stat_sig_ts[-N:],
)
def __str__(self) -> str:
str_ret = f"""
Time: {self.scores.time.values},
Scores: {self.scores.value.values},
Upper Confidence Bound: {self.confidence_band.upper.value.values},
Lower Confidence Bound: {self.confidence_band.lower.value.values},
Predicted Time Series: {self.predicted_ts.value.values},
stat_sig:{self.stat_sig_ts.value.values}
"""
return str_ret
|
from gamepack.item.Item import Item
class StackableItem(Item):
def __init__(self, name = '', count = 0, value = 0):
Item.__init__(self, name, count, value)
self.stackable = True
def add_to_stack(self, item):
if item.name == self.name:
self.count += item.count
self.value = (item.count * self.value)
def remove_from_stack(self, count):
if count > 0 and count >= self.count:
self.count -= count
self.value = (self.value - (count * self.value))
def __str__(self):
return Item.__str__(self) |
import unittest
from affine_cipher import decode, encode
# Tests adapted from `problem-specifications//canonical-data.json`
class AffineCipherTest(unittest.TestCase):
def test_encode_yes(self):
self.assertEqual(encode("yes", 5, 7), "xbt")
def test_encode_no(self):
self.assertEqual(encode("no", 15, 18), "fu")
def test_encode_omg(self):
self.assertEqual(encode("OMG", 21, 3), "lvz")
def test_encode_o_m_g(self):
self.assertEqual(encode("O M G", 25, 47), "hjp")
def test_encode_mindblowingly(self):
self.assertEqual(encode("mindblowingly", 11, 15), "rzcwa gnxzc dgt")
def test_encode_numbers(self):
self.assertEqual(
encode("Testing,1 2 3, testing.", 3, 4), "jqgjc rw123 jqgjc rw"
)
def test_encode_deep_thought(self):
self.assertEqual(encode("Truth is fiction.", 5, 17), "iynia fdqfb ifje")
def test_encode_all_the_letters(self):
self.assertEqual(
encode("The quick brown fox jumps over the lazy dog.", 17, 33),
"swxtj npvyk lruol iejdc blaxk swxmh qzglf",
)
def test_encode_with_a_not_coprime_to_m(self):
with self.assertRaisesWithMessage(ValueError):
encode("This is a test.", 6, 17)
def test_decode_exercism(self):
self.assertEqual(decode("tytgn fjr", 3, 7), "exercism")
def test_decode_a_sentence(self):
self.assertEqual(
decode("qdwju nqcro muwhn odqun oppmd aunwd o", 19, 16),
"anobstacleisoftenasteppingstone",
)
def test_decode_numbers(self):
self.assertEqual(decode("odpoz ub123 odpoz ub", 25, 7), "testing123testing")
def test_decode_all_the_letters(self):
self.assertEqual(
decode("swxtj npvyk lruol iejdc blaxk swxmh qzglf", 17, 33),
"thequickbrownfoxjumpsoverthelazydog",
)
def test_decode_with_no_spaces_in_input(self):
self.assertEqual(
decode("swxtjnpvyklruoliejdcblaxkswxmhqzglf", 17, 33),
"thequickbrownfoxjumpsoverthelazydog",
)
def test_decode_with_too_many_spaces(self):
self.assertEqual(
decode("vszzm cly yd cg qdp", 15, 16), "jollygreengiant"
)
def test_decode_with_a_not_coprime_to_m(self):
with self.assertRaisesWithMessage(ValueError):
decode("Test", 13, 5)
# Utility functions
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
unittest.main()
|
peso = float(input("Qual seu peso(kg)? "))
altura = float(input("Qual sua altura(m)? "))
imc = round(peso / (altura**2),2)
print(f"Com IMC = {imc}, o usíario esta", end=" ")
if imc < 18.5:
print("abaixo do peso")
elif imc < 25:
print("no peso ideal")
elif imc < 30:
print("com sobrepeso")
elif imc < 40:
print("com obesidade")
else:
print("com obesidade mórbida")
|
#Get a string which is n (non-negative integer) copies of a given string
#
#function to display the string
def dispfunc(iteration):
output=str("")
for i in range(iteration):
output=output+entry
print(output)
#
entry=str(input("\nenter a string : "))
displaynumber=int(input("how many times must it be displayed? : "))
dispfunc(displaynumber)
#experimental
feedback=str(input("\nwould you try it for the stringlength? : "))
if feedback == "yes" or "Yes" or "YES" or "yeah":
dispfunc(len(entry))
#program ends here |
from aoc2019.intcode_computer import IntCodeComputer
import multiprocessing as mp
def part1():
comp = IntCodeComputer(stdio=False)
iq = mp.Queue()
oq = mp.Queue()
with open('input.txt') as f:
instructions = f.read()
iq.put(1)
comp.run_program(instructions, mem=10000, input_queue=iq, output_queues=[oq])
while True:
try:
print(oq.get(block=False))
except:
break
if __name__ == '__main__':
part1() |
# -*- coding: utf-8
from django.apps import AppConfig
class HvadCleanerversionConfig(AppConfig):
name = 'hvad_cleanerversion'
|
import sys
alfabetoMay = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S','T', 'U', 'V', 'W', 'X', 'Y', 'Z']
alfabetoMin = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
def main(argv = sys.argv):
num = int(argv[1])
# palabra = input("> ")
palabra = '''Rpnd Yjaxd Réhpg, fjt th jcd st adh igth báh vgpcsth rpexipcth st ap Wxhidgxp rdc Patypcsgd Bpvcd n rd
c Cpedatóc, th ipbqxéc jcd st adh igth báh rdchxstgpqath wxhidgxpsdgth apixcdh, rdc Rpnd Rgxhed Hpajhixd n rdc Ixid Axkxd, udgbpcsd ta tytbeapg igxjckxgpid sta etgídsd raáhxrd edg tmrtatcrxp
, etgídsd ktgspstgpbtcit «ájgtd» st aph atigph apixcph.
Hx wph advgpsd aatvpg wphip pfjí, ij gtrdbetchp htgá ap st hpqtg fjt ap rdcigphtñp epgp hjetgpg thit gtid th Ratdepigp. N Yjaxd Réhpg th idsd thid, ixtct ipa hxvcxuxrprxóc, egtrxhpbtcit rdbd
wxhidgxpsdg st hí bxhbd, cpggpsdg st hjh egdexph wpopñph vjtggtgph n st hj edaíixrp.'''
print(calcula(num, palabra))
def calcula(num, palabra):
resultado = ''
for i in palabra:
if i == ' ':
resultado += ' '
continue
contador = 0
exito = False
alfabeto = alfabetoMay if isMay(i) else alfabetoMin
for j in alfabeto:
if i == j:
if (contador - num) >= len(alfabeto):
resultado += alfabeto[(contador - num) % len(alfabeto)]
else:
resultado += alfabeto[contador - num]
exito = True
break
contador+=1
if not exito:
resultado += i
return resultado
def isMay(letra):
return letra == letra.upper()
if __name__ == "__main__":
sys.exit(main())
|
import google.oauth2.service_account
import pytest
@pytest.fixture(params=["env"])
def project(request, project_id):
if request.param == "env":
return project_id
elif request.param == "none":
return None
@pytest.fixture()
def credentials(private_key_path):
return google.oauth2.service_account.Credentials.from_service_account_file(
private_key_path
)
@pytest.fixture()
def gbq_connector(project, credentials):
from pandas_gbq import gbq
return gbq.GbqConnector(project, credentials=credentials)
@pytest.fixture()
def random_dataset(bigquery_client, random_dataset_id):
from google.cloud import bigquery
dataset_ref = bigquery_client.dataset(random_dataset_id)
dataset = bigquery.Dataset(dataset_ref)
bigquery_client.create_dataset(dataset)
return dataset
@pytest.fixture()
def tokyo_dataset(bigquery_client, random_dataset_id):
from google.cloud import bigquery
dataset_ref = bigquery_client.dataset(random_dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = "asia-northeast1"
bigquery_client.create_dataset(dataset)
return random_dataset_id
@pytest.fixture()
def tokyo_table(bigquery_client, tokyo_dataset):
table_id = "tokyo_table"
# Create a random table using DDL.
# https://github.com/GoogleCloudPlatform/golang-samples/blob/2ab2c6b79a1ea3d71d8f91609b57a8fbde07ae5d/bigquery/snippets/snippet.go#L739
bigquery_client.query(
"""CREATE TABLE {}.{}
AS SELECT
2000 + CAST(18 * RAND() as INT64) as year,
IF(RAND() > 0.5,"foo","bar") as token
FROM UNNEST(GENERATE_ARRAY(0,5,1)) as r
""".format(
tokyo_dataset, table_id
),
location="asia-northeast1",
).result()
return table_id
@pytest.fixture()
def gbq_dataset(project, credentials):
from pandas_gbq import gbq
return gbq._Dataset(project, credentials=credentials)
@pytest.fixture()
def gbq_table(project, credentials, random_dataset_id):
from pandas_gbq import gbq
return gbq._Table(project, random_dataset_id, credentials=credentials)
|
import torch
import math
from .vqa_model import VQAModel
class Clean_Uniter(torch.nn.Module):
def __init__(self):
super(Clean_Uniter, self).__init__()
# Load pretrained UNITER
self.uniter = VQAModel(num_answers = 69, model = 'uniter')
self.uniter.encoder.load('./Transformers_VQA/models/pretrained/uniter-base.pt')
# Convert input (Ojb_idx: 512, KB: 1024, visual: 512, pos: 3*128)
self.lin_vis = torch.nn.Linear(512, 2048)
# Convert output
self.clsHead = torch.nn.Linear(768, 1)
def forward(self, input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask):
# combine object features
vis_feats = self.lin_vis(vis_feats.float())
word_embeddings = self.uniter.encoder.model.uniter.embeddings(input_ids, txt_seg_ids)
img_type_embeddings = self.uniter.encoder.model.uniter.embeddings.token_type_embeddings(vis_seg)
img_embeddings = self.uniter.encoder.model.uniter.img_embeddings(vis_feats, bboxes, img_type_embeddings)
embeddings = torch.cat([word_embeddings,img_embeddings],dim=1)
lang_v_feats = self.uniter.encoder.model.uniter.encoder(hidden_states = embeddings, attention_mask = extended_attention_mask)
out = self.clsHead(lang_v_feats)
return out |
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
class HomeHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'templates/home.html')
self.response.out.write(template.render(path, {}))
|
# MIT License
#
# Copyright (c) 2020 Andrew Krepps
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import util
def count_letter_instances(password, letter):
return sum([1 for c in password if c == letter])
def password_is_valid_for_part1(entry):
letter_count = count_letter_instances(entry["password"], entry["letter"])
return entry["min"] <= letter_count <= entry["max"]
def password_is_valid_for_part2(entry):
min_char_matches = entry["password"][entry["min"] - 1] == entry["letter"]
max_char_matches = entry["password"][entry["max"] - 1] == entry["letter"]
return min_char_matches != max_char_matches
def count_valid_passwords(entries, validation_rule):
return sum([1 for entry in entries if validation_rule(entry)])
def parse_entries(lines):
entries = []
for line in lines:
tokens = line.split()
counts = tokens[0].split("-")
letter = tokens[1][0]
password = tokens[2]
entries.append({
"password": password,
"letter": letter,
"min": int(counts[0]),
"max": int(counts[1])
})
return entries
def get_part1_answer(entries):
return count_valid_passwords(entries, password_is_valid_for_part1)
def get_part2_answer(entries):
return count_valid_passwords(entries, password_is_valid_for_part2)
def run():
entries = parse_entries(util.get_input_file_lines("day02.txt"))
print(f"The answer to part 1 is {get_part1_answer(entries)}")
print(f"The answer to part 2 is {get_part2_answer(entries)}")
|
from .core import DirMap
|
# Script to run experiments using CHKMRT strategy
import copy
import random
from timeit import default_timer as timer
import networkx as nx
import csv
import sys
sys.path.append('../code')
from probesim.evaluation import seqstats2 as stats
from probesim.strategy import chkmrt as strategy
from probesim.topology import inputgen
from probesim.topology import weighting
from probesim import util as ps
from probesim import jrnx
from probesim.topology import dcell
from probesim.topology import xgft
import multiprocessing as mp
from os.path import isdir
from os.path import isfile as isfile
from os import makedirs
import glob
DEBUG = False
import argparse
parser = argparse.ArgumentParser(description='Run simulations using CHKMRT (Cohen et al.) MAX and SUM programs')
parser.add_argument('--simroot', metavar='DIR', type=str, default='../', help='Root directory for sim hierarchy (likely netpas/)')
parser.add_argument('--ifsuff', type=str, default='', help='Suffix on input file (excluding extension)')
parser.add_argument('--nprocs', type=int, default=None, help='Number of processes to use')
seed_group = parser.add_mutually_exclusive_group(required=False)
seed_group.add_argument('--seed', type=int, default=None, help='Seed to use for top-level RNG')
seed_group.add_argument('--seedfile', action='store_true', default=None, help='Use seed file(s)')
parser.add_argument('--k', type=int, default=None, help='Number of paths k to probe (in expectation) per timestep')
parser.add_argument('--maxs', type=int, default=None, help='Maximum number of timesteps to probe')
# Which topology collection/class to use
top_group = parser.add_mutually_exclusive_group(required=True)
top_group.add_argument('--zoo', action='store_true', help='Use topologies from the topology zoo')
top_group.add_argument('--cat', action='store_true', help='Use topologies from the catalog')
top_group.add_argument('--smallcat', action='store_true', help='Use topologies from the small catalog')
top_group.add_argument('--dcell', action='store_true', help='Use DCell topologies')
top_group.add_argument('--spdcell', action='store_true', help='Use DCell topologies with shortest-paths routing')
top_group.add_argument('--xgft', action='store_true', help='Use XGFT topologies')
top_group.add_argument('--gnm', action='store_true', help='Use random G_{n,m} graphs')
top_group.add_argument('--ba', action='store_true', help='Use Barabasi--Albert graphs')
top_group.add_argument('--fracba', action='store_true', help='Use fractional version of Barabasi--Albert graphs')
# How many trials?
trials_group = parser.add_mutually_exclusive_group(required=False)
trials_group.add_argument('--T100', action='store_true', help='Run 100 trials; overridden by --test')
trials_group.add_argument('--T316', action='store_true', help='Run 316 trials; overridden by --test')
trials_group.add_argument('--T1000', action='store_true', help='Run 1000 trials; overridden by --test')
trials_group.add_argument('--T3162', action='store_true', help='Run 3162 trials; overridden by --test')
trials_group.add_argument('--T10000', action='store_true', help='Run 10000 trials; overridden by --test')
# Which data directory to use (default is local, i.e., data/)
data_group = parser.add_mutually_exclusive_group(required=False)
data_group.add_argument('--test', action='store_true', help='Use data from testdata/; always runs three trials')
data_group.add_argument('--server', action='store_true', help='Use data from server-data/')
data_group.add_argument('--local', action='store_true', help='Use data from local-data/; always runs 25 trials')
# Which goal to optimize
goal_group = parser.add_mutually_exclusive_group(required=True)
goal_group.add_argument('--max', action='store_true', help='Use max goal with cvxopt solver')
goal_group.add_argument('--maxcvxpy', action='store_true', help='Use max goal with cvxpy solver')
goal_group.add_argument('--sum', action='store_true', help='Use sum goal')
########
args = parser.parse_args()
########
def chkmrt_trial_from_paths(chkmrt_args):
"""
Run a single CHKMRT trial when given the graph and probing paths.
:param chkmrt_args is a tuple with the following:
ofname: file name for output
header_row: header for use in opening a DictWriter
trial_num: index of this trial
trial_seed: main seed used for this trial
P: set of probing paths
G: NetworkX graph object
testset: collection of tests to be evaluated
q: distribution on tests
k: number of paths to probe in expectation per timestep
maxs: maximum number of timesteps to simulate
output: dict with partial info about this trial; this is mutated here
statImp: importance weight for computing result statistics
statTol: tolerance weight for computing result statistics
This runs a trial, computes the statistics on the probing sequence, and writes the result to ofile (after taking a lock on it so that many copies of this can be run in parallel).
"""
(ofname, header_row, trial_num, trial_seed, P, G, testset, q, k, maxs, output, statImp, statTol) = chkmrt_args
# Create the RNG for this trial and seed it with the seed from the
# top-level RNG passed in the argument. Even if we don't use all these
# seeds, create them in the same order so that, e.g., sim_seed is always
# the seventh output of our trial's RNG. Save the relevant ones in
# the output dict to be written to file.
output['trialseed'] = trial_seed
output['trialnum'] = trial_num
trial_RNG = random.Random()
trial_RNG.seed(a = trial_seed)
g_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
b_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
p_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
w_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
t_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
res_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
sim_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
# Seeds:
# g: graph G
# b: beacons
# p: paths P
# w: weighting
# t: test set
# res: reserved for future use
# sim: simulation
output['gseed'] = 'na'
output['bseed'] = 'na'
output['pseed'] = 'na'
output['wseed'] = 'na'
output['tseed'] = 'na'
output['simseed'] = sim_seed
# Below, olock is a global lock that is shared by all processes
# running this in parallel. It is acquired before writing to the
# output file and released after the writing is done.
# Run a trial, with sim_seed as the randomness, to produce the
# probing sequence seq
try:
t = timer()
start_time = timer()
seq = strategy.chkmrt_trial(testset, q, trialseed=sim_seed, maxsteps=maxs, k=k)
output['simtime'] = timer() - start_time
except Exception as e:
output['simtime'] = timer() - start_time
output['status'] = 'Exception raised during simulate: ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Compute the statistics on seq
try:
t = timer()
results = stats.all_sequence_stats(seq, P, G, importance=statImp, tolerance=statTol)
except Exception as e:
output['status'] = 'Exception raised during stats computation ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Update output with the statistics and write it to file
output.update(results)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
def chkmrt_gnm_trial(chkmrt_args):
"""
Run a single CHKMRT trial for specified G_{n,m} parameters
:param chkmrt_args is a tuple with the following:
ofname: file name for output
header_row: header for use in opening a DictWriter
trial_num: index of this trial
b: number of beacons to use
n: number of graph nodes
m: number of graph edges
trial_seed: main seed used for this trial
goalStr: string identifying goal ('max' or 'sum')
k: number of paths to probe in expectation per timestep
maxs: maximum number of timesteps to simulate
output: dict with partial info about this trial; this is mutated here
edge_attr: weight attribute for edges
node_attr: weight attribute for nodes
statImp: importance weight for computing result statistics
statTol: tolerance weight for computing result statistics
This generates a G_{n,m} graph, chooses beacons, computes probing paths, runs a trial, computes the statistics on the probing sequence, and writes the result to ofile (after taking a lock on it so that many copies of this can be run in parallel).
"""
(ofname, header_row, trial_num, b, n, m, trial_seed, goalStr, k, maxs, output, edge_attr, node_attr, statImp, statTol) = chkmrt_args
# Create the RNG for this trial and seed it with the seed from the
# top-level RNG passed in the argument. Even if we don't use all these
# seeds, create them in the same order so that, e.g., sim_seed is always
# the seventh output of our trial's RNG. Save the relevant ones in
# the output dict to be written to file.
output['trialseed'] = trial_seed
output['trialnum'] = trial_num
trial_RNG = random.Random()
trial_RNG.seed(a = trial_seed)
g_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
b_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
p_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
w_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
t_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
res_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
sim_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
# Seeds:
# g: graph G
# b: beacons
# p: paths P
# w: weighting
# t: test set
# res: reserved for future use
# sim: simulation
output['gseed'] = g_seed
output['bseed'] = b_seed
output['pseed'] = p_seed
output['wseed'] = 'na'
output['tseed'] = 'na'
output['simseed'] = sim_seed
# Below, olock is a global lock that is shared by all processes
# running this in parallel. It is acquired before writing to the
# output file and released after the writing is done.
# Construct a G_{n,m} graph using g_seed as its randomness. Make
# sure this is connected.
G = jrnx.jr_gnm_random_graph(n,m,seed=g_seed)
if not nx.is_connected(G):
output['status'] = 'G is unexpectedly not connected!'
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Choose b beacons using a RNG seeded with b_seed
beacon_RNG = random.Random()
beacon_RNG.seed(a=b_seed)
beacons = beacon_RNG.sample(sorted(G.nodes()),b)
# Construct all paths in G with randomness p_seed
all_paths = jrnx.jr_all_pairs_shortest_path(G, seed=p_seed)
P = dict()
# Now select only the paths that are between (distinct) beacons
for s in beacons:
for d in beacons:
if d != s:
P.setdefault(s, dict())[d] = all_paths[s][d]
# Add weights to edges and nodes if desired
if edge_attr is not None:
weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
if node_attr is not None:
weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)
# Compute tests and distribution over them
testset = strategy.generate_tests(P)
q = strategy.compute_distribution(G, testset, importance=edge_attr, goal=goalStr)
try:
t = timer()
start_time = timer()
seq = strategy.chkmrt_trial(testset, q, trialseed=sim_seed, maxsteps=maxs, k=k)
output['simtime'] = timer() - start_time
except Exception as e:
output['simtime'] = timer() - start_time
output['status'] = 'Exception raised during simulate: ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Compute the statistics on seq
try:
t = timer()
results = stats.all_sequence_stats(seq, P, G, importance=statImp, tolerance=statTol)
except Exception as e:
output['status'] = 'Exception raised during stats computation ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
output.update(results)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
def chkmrt_ba_trial(chkmrt_args):
"""
Run a single CHKMRT trial for specified B--A graph parameters
:param chkmrt_args is a tuple with the following:
fractional: whether to use fractional version of B--A algorithm
ofname: file name for output
header_row: header for use in opening a DictWriter
trial_num: index of this trial
b: number of beacons to use
n: number of graph nodes
bam: number of edges for each new node in B--A model; int for non-fractional model, float for fractional model
trial_seed: main seed used for this trial
goalStr: string identifying goal ('max' or 'sum')
k: number of paths to probe in expectation per timestep
maxs: maximum number of timesteps to simulate
output: dict with partial info about this trial; this is mutated here
edge_attr: weight attribute for edges
node_attr: weight attribute for nodes
statImp: importance weight for computing result statistics
statTol: tolerance weight for computing result statistics
This generates a B--A preferential-attachment graph, chooses beacons, computes probing paths, runs a trial, computes the statistics on the probing sequence, and writes the result to ofile (after taking a lock on it so that many copies of this can be run in parallel).
"""
(fractional, ofname, header_row, trial_num, b, n, bam, trial_seed, goalStr, k, maxs, output, edge_attr, node_attr, statImp, statTol) = chkmrt_args
# Create the RNG for this trial and seed it with the seed from the
# top-level RNG passed in the argument. Even if we don't use all these
# seeds, create them in the same order so that, e.g., sim_seed is always
# the seventh output of our trial's RNG. Save the relevant ones in
# the output dict to be written to file.
output['trialseed'] = trial_seed
output['trialnum'] = trial_num
output['fractional'] = fractional
trial_RNG = random.Random()
trial_RNG.seed(a = trial_seed)
g_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
b_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
p_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
w_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
t_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
res_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
sim_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
# Seeds:
# g: graph G
# b: beacons
# p: paths P
# w: weighting
# t: test set
# res: reserved for future use
# sim: simulation
output['gseed'] = g_seed
output['bseed'] = b_seed
output['pseed'] = p_seed
output['wseed'] = 'na'
output['tseed'] = 'na'
output['simseed'] = sim_seed
# Below, olock is a global lock that is shared by all processes
# running this in parallel. It is acquired before writing to the
# output file and released after the writing is done.
# Construct a B--A graph using g_seed as its randomness. Use fractional
# version as indicated by fractional parameter. Make
# sure G is connected.
if fractional:
G = jrnx.jr_fractional_barabasi_albert_graph(n,bam,seed=g_seed)
else:
G = jrnx.jr_barabasi_albert_graph(n,bam,seed=g_seed)
if not nx.is_connected(G):
output['status'] = 'G is unexpectedly not connected!'
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Choose b beacons using a RNG seeded with b_seed
beacon_RNG = random.Random()
beacon_RNG.seed(a=b_seed)
beacons = beacon_RNG.sample(sorted(G.nodes()),b)
# Construct all paths in G with randomness p_seed
all_paths = jrnx.jr_all_pairs_shortest_path(G, seed=p_seed)
P = dict()
# Now select only the paths that are between (distinct) beacons
for s in beacons:
for d in beacons:
if d != s:
P.setdefault(s, dict())[d] = all_paths[s][d]
# Add weights to edges and nodes if desired
if edge_attr is not None:
weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
if node_attr is not None:
weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)
# Compute tests and distribution over them
testset = strategy.generate_tests(P)
q = strategy.compute_distribution(G, testset, importance=edge_attr, goal=goalStr)
try:
t = timer()
start_time = timer()
seq = strategy.chkmrt_trial(testset, q, trialseed=sim_seed, maxsteps=maxs, k=k)
output['simtime'] = timer() - start_time
except Exception as e:
output['simtime'] = timer() - start_time
output['status'] = 'Exception raised during simulate: ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Compute the statistics on seq
try:
t = timer()
results = stats.all_sequence_stats(seq, P, G, importance=statImp, tolerance=statTol)
except Exception as e:
output['status'] = 'Exception raised during stats computation ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
output.update(results)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
def chkmrt_trial_from_beacons(chkmrt_args):
"""
Run a single CHKMRT trial when given the graph and beacon set
:param chkmrt_args is a tuple with the following:
ofname: file name for output
header_row: header for use in opening a DictWriter
trial_num: index of this trial
G: NetworkX graph object
B: set of nodes from G to use as beacons
trial_seed: main seed used for this trial
goalStr: string identifying goal ('max' or 'sum')
k: number of paths to probe in expectation per timestep
maxs: maximum number of timesteps to simulate
output: dict with partial info about this trial; this is mutated here
edge_attr: weight attribute for edges
node_attr: weight attribute for nodes
statImp: importance weight for computing result statistics
statTol: tolerance weight for computing result statistics
This chooses probing paths between the specified set of beacons, runs a trial, computes the statistics on the probing sequence, and writes the result to ofile (after taking a lock on it so that many copies of this can be run in parallel).
"""
(ofname, header_row, trial_num, G, B, trial_seed, goalStr, k, maxs, output, edge_attr, node_attr, statImp, statTol) = chkmrt_args
# Create the RNG for this trial and seed it with the seed from the
# top-level RNG passed in the argument. Even if we don't use all these
# seeds, create them in the same order so that, e.g., sim_seed is always
# the seventh output of our trial's RNG. Save the relevant ones in
# the output dict to be written to file.
output['trialseed'] = trial_seed
output['trialnum'] = trial_num
trial_RNG = random.Random()
trial_RNG.seed(a = trial_seed)
g_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
b_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
p_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
w_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
t_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
res_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
sim_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
# Seeds:
# g: graph G
# b: beacons
# p: paths P
# w: weighting
# t: test set
# res: reserved for future use
# sim: simulation
output['gseed'] = g_seed
output['bseed'] = 'na'
output['pseed'] = p_seed
output['wseed'] = 'na'
output['tseed'] = 'na'
output['simseed'] = sim_seed
# Below, olock is a global lock that is shared by all processes
# running this in parallel. It is acquired before writing to the
# output file and released after the writing is done.
# Construct all paths in G with randomness p_seed
all_paths = jrnx.jr_all_pairs_shortest_path(G, seed=p_seed)
P = dict()
# Now select only the paths that are between (distinct) beacons
for s in B:
for d in B:
if d != s:
P.setdefault(s, dict())[d] = all_paths[s][d]
# Add weights to edges and nodes if desired
if edge_attr is not None:
weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
if node_attr is not None:
weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)
# Compute tests and distribution over them
testset = strategy.generate_tests(P)
q = strategy.compute_distribution(G, testset, importance=edge_attr, goal=goalStr)
# Run a trial, with sim_seed as the randomness, to produce the
# probing sequence seq
try:
t = timer()
start_time = timer()
seq = strategy.chkmrt_trial(testset, q, trialseed=sim_seed, maxsteps=maxs, k=k)
output['simtime'] = timer() - start_time
except Exception as e:
output['simtime'] = timer() - start_time
output['status'] = 'Exception raised during simulate: ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Compute the statistics on seq
try:
t = timer()
results = stats.all_sequence_stats(seq, P, G, importance=statImp, tolerance=statTol)
except Exception as e:
output['status'] = 'Exception raised during stats computation ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Update output with the statistics and write it to file
output.update(results)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
def chkmrt_trial_from_num_beacons(chkmrt_args):
"""
Run a single CHKMRT trial when given the graph and number of beacons
:param chkmrt_args is a tuple with the following:
ofname: file name for output
header_row: header for use in opening a DictWriter
trial_num: index of this trial
G: NetworkX graph object
num_beacons: number of nodes from G to use as beacons
trial_seed: main seed used for this trial
goalStr: string identifying goal ('max' or 'sum')
k: number of paths to probe in expectation per timestep
maxs: maximum number of timesteps to simulate
output: dict with partial info about this trial; this is mutated here
edge_attr: weight attribute for edges
node_attr: weight attribute for nodes
statImp: importance weight for computing result statistics
statTol: tolerance weight for computing result statistics
This chooses a set of beacons of specified size, chooses probing paths between the set of beacons, runs a trial, computes the statistics on the probing sequence, and writes the result to ofile (after taking a lock on it so that many copies of this can be run in parallel).
"""
(ofname, header_row, trial_num, G, num_beacons, trial_seed, goalStr, k, maxs, output, edge_attr, node_attr, statImp, statTol) = chkmrt_args
# Create the RNG for this trial and seed it with the seed from the
# top-level RNG passed in the argument. Even if we don't use all these
# seeds, create them in the same order so that, e.g., sim_seed is always
# the seventh output of our trial's RNG. Save the relevant ones in
# the output dict to be written to file.
output['trialseed'] = trial_seed
output['trialnum'] = trial_num
trial_RNG = random.Random()
trial_RNG.seed(a = trial_seed)
g_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
b_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
p_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
w_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
t_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
res_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
sim_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
# Seeds:
# g: graph G
# b: beacons
# p: paths P
# w: weighting
# t: test set
# res: reserved for future use
# sim: simulation
output['gseed'] = g_seed
output['bseed'] = b_seed
output['pseed'] = p_seed
output['wseed'] = 'na'
output['tseed'] = 'na'
output['simseed'] = sim_seed
# Below, olock is a global lock that is shared by all processes
# running this in parallel. It is acquired before writing to the
# output file and released after the writing is done.
# Choose num_beacons beacons using a RNG seeded with b_seed
beacon_RNG = random.Random()
beacon_RNG.seed(a=b_seed)
beacons = beacon_RNG.sample(sorted(G.nodes()),num_beacons)
# Construct all paths in G with randomness p_seed
all_paths = jrnx.jr_all_pairs_shortest_path(G, seed=p_seed)
P = dict()
# Now select only the paths that are between (distinct) beacons
for s in beacons:
for d in beacons:
if d != s:
P.setdefault(s, dict())[d] = all_paths[s][d]
# Add weights to edges and nodes if desired
if edge_attr is not None:
weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
if node_attr is not None:
weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)
# Compute tests and distribution over them
testset = strategy.generate_tests(P)
q = strategy.compute_distribution(G, testset, importance=edge_attr, goal=goalStr)
try:
t = timer()
start_time = timer()
seq = strategy.chkmrt_trial(testset, q, trialseed=sim_seed, maxsteps=maxs, k=k)
output['simtime'] = timer() - start_time
except Exception as e:
output['simtime'] = timer() - start_time
output['status'] = 'Exception raised during simulate: ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
# Compute the statistics on seq
try:
t = timer()
results = stats.all_sequence_stats(seq, P, G, importance=statImp, tolerance=statTol)
except Exception as e:
output['status'] = 'Exception raised during stats computation ' + repr(e)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
output.update(results)
with olock:
with open(ofname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writerow(output)
return
stats_columns = stats.all_stats_header()
if args.zoo:
topology_list = glob.glob(args.simroot + 'zoo/*.graphml')
ofname = 'zoo-chkmrt'
header_row = ['topology', 'trialnum', 'status', 'k', 'maxS', 'goal', 'edgeAttr', 'nodeAttr', 'statImp', 'statTol', 'simtime', 'trialseed', 'gseed', 'bseed', 'pseed', 'wseed', 'tseed', 'simseed'] + stats_columns
if args.test or args.local:
topology_list = topology_list[:3]
elif args.dcell or args.spdcell:
top_param_list = [(2,2),(3,2)] # This should be an iterable of (dcn,dck) tuples
# top_param_list = [(1,2),(1,3),(2,2),(3,2),(4,2)] # This should be an iterable of (dcn,dck) tuples
if args.dcell:
ofname = 'dcell-chkmrt'
else:
ofname = 'spdcell-chkmrt'
header_row = ['dcn', 'dck', 'trialnum', 'status', 'k', 'maxS', 'goal', 'edgeAttr', 'nodeAttr', 'statImp', 'statTol', 'simtime', 'trialseed', 'gseed', 'bseed', 'pseed', 'wseed', 'tseed', 'simseed'] + stats_columns
if args.test or args.local:
top_param_list = top_param_list[:3]
elif args.gnm:
top_param_list = [(42,63,84), (64,192,1024), (81,401,960), (156,208,312)]
# top_param_list = [(2,4,3)] # This should be an iterable of (b,n,m) tuples
# top_param_list = [(6,12,12), (42,84,105), (42,63,84), (156,208,312), (2,4,3), (9,25,48), (16,36,80), (16,46,120), (27,105,234), (6,9,9), (12,16,18), (16,48,128), (64,256,768), (64,190,504), (20,25,30)] # This should be an iterable of (b,n,m) tuples
ofname = 'gnm-chkmrt'
header_row = ['b', 'n', 'm', 'trialnum', 'status', 'k', 'maxS', 'goal', 'edgeAttr', 'nodeAttr', 'statImp', 'statTol', 'simtime', 'trialseed', 'gseed', 'bseed', 'pseed', 'wseed', 'tseed', 'simseed'] + stats_columns
if args.test or args.local:
top_param_list = top_param_list[:3]
elif args.xgft:
top_param_list = [(4,[3,3,3,3],[8,1,1,1]), (2,[8,8],[8,8])]
# top_param_list = [(2,[3,3],[4,1]),(2,[4,4],[6,1]),(3,[3,3,3],[6,1,1]),(2,[4,4],[4,4]),(3,[4,4,4],[4,4,4]),(3,[4,4,4],[6,1,1])]
ofname = 'xgft-chkmrt'
header_row = ['h', 'mlist', 'wlist', 'trialnum', 'status', 'k', 'maxS', 'goal', 'edgeAttr', 'nodeAttr', 'statImp', 'statTol', 'simtime', 'trialseed', 'gseed', 'bseed', 'pseed', 'wseed', 'tseed', 'simseed'] + stats_columns
if args.test or args.local:
top_param_list = top_param_list[:3]
elif args.ba:
# top_param_list = [(2,4,3)] # This should be an iterable of (b,n,m) tuples
top_param_list = [(6,12,1), (42,84,1), (42,63,1), (156,208,2), (2,4,1), (9,25,2), (16,36,2), (16,46,3), (27,105,2), (6,9,1), (12,16,1), (16,48,3), (64,256,3), (64,190,3), (20,25,1)] # This should be an iterable of (b,n,m) tuples
ofname = 'ba-chkmrt'
header_row = ['fractional', 'b', 'n', 'bam', 'trialnum', 'status', 'k', 'maxS', 'goal', 'edgeAttr', 'nodeAttr', 'statImp', 'statTol', 'simtime', 'trialseed', 'gseed', 'bseed', 'pseed', 'wseed', 'tseed', 'simseed'] + stats_columns
if args.test or args.local:
top_param_list = top_param_list[:3]
elif args.fracba:
top_param_list = [(42,63,1.36281), (81,401,2.40848), (64,192,5.49033), (156,208,1.51098)] # This should be an iterable of (b,n,bam) tuples
# top_param_list = [(2,4,3)] # This should be an iterable of (b,n,m) tuples
# top_param_list = [(6,12,1.10102), (42,84,1.26918), (42,63,1.36281), (156,208,1.51098), (2,4,1.0), (9,25,2.09567), (16,36,2.3795), (16,46,2.77625), (27,105,2.27799), (6,9,1.1459), (12,16,1.21767), (16,48,2.83399), (64,256,3.03601), (64,190,2.69074), (20,25,1.2639)] # This should be an iterable of (b,n,m) tuples
ofname = 'fracba-chkmrt'
header_row = ['fractional', 'b', 'n', 'bam', 'trialnum', 'status', 'k', 'maxS', 'goal', 'edgeAttr', 'nodeAttr', 'statImp', 'statTol', 'simtime', 'trialseed', 'gseed', 'bseed', 'pseed', 'wseed', 'tseed', 'simseed'] + stats_columns
if args.test or args.local:
top_param_list = top_param_list[:3]
elif args.smallcat:
icatalog = args.simroot + 'catalog/inputcat-small.csv'
gcatalog = args.simroot + 'catalog/graphcat-small.csv'
ofname = 'smallcat-chkmrt'
header_row = ['IID', 'trialnum', 'status', 'k', 'maxS', 'goal', 'edgeAttr', 'nodeAttr', 'statImp', 'statTol', 'simtime', 'trialseed', 'gseed', 'bseed', 'pseed', 'wseed', 'tseed', 'simseed'] + stats_columns
if args.test or args.local:
catalog_range = {'start': 1, 'stop': 4, 'step': 1}
else:
catalog_range = {'start': 1, 'stop': None, 'step': 1}
# The first few entries in the catalog only have a single path
elif args.cat:
icatalog = args.simroot + 'catalog/inputcat.csv'
gcatalog = args.simroot + 'catalog/graphcat.csv'
ofname = 'cat-chkmrt'
header_row = ['IID', 'trialnum', 'status', 'k', 'maxS', 'goal', 'edgeAttr', 'nodeAttr', 'statImp', 'statTol', 'simtime', 'trialseed', 'gseed', 'bseed', 'pseed', 'wseed', 'tseed', 'simseed'] + stats_columns
if args.test or args.local:
catalog_range = {'start': 4, 'stop': 7, 'step': 1}
else:
catalog_range = {'start': 1, 'stop': None, 'step': 1}
# Value(s) of k to use here
if args.k is None:
kList = [3]
else:
kList = [args.k]
if args.maxs is None:
maxs = 100
else:
maxs = args.maxs
if args.test:
number_of_trials = 3
elif args.local:
number_of_trials = 25
tstr = 'T25/'
elif args.T100:
number_of_trials = 100
tstr = 'T100/'
elif args.T316:
number_of_trials = 316
tstr = 'T316/'
elif args.T1000:
number_of_trials = 1000
tstr = 'T1000/'
elif args.T3162:
number_of_trials = 3162
tstr = 'T3162/'
elif args.T10000:
number_of_trials = 10000
tstr = 'T10000/'
odir_test = args.simroot + 'testdata/'
odir = args.simroot + 'data/'
odir_server = args.simroot + 'server-data/'
odir_local = args.simroot + 'local-data/'
if args.test:
ofile_base = odir_test + ofname
if not isdir(odir_test):
makedirs(odir_test)
elif args.server:
ofile_base = odir_server + tstr + ofname
if not isdir(odir_server + tstr):
makedirs(odir_server + tstr)
elif args.local:
ofile_base = odir_local + tstr + ofname
if not isdir(odir_local + tstr):
makedirs(odir_local + tstr)
else: # Default: Local, but not testing
ofile_base = odir + tstr + ofname
if not isdir(odir + tstr):
makedirs(odir + tstr)
# Program goal:
if args.max:
goalStr = 'max'
elif args.maxcvxpy:
goalStr = 'max-cvxpy'
elif args.sum:
goalStr = 'sum'
# Various weights
# The corresponding strings are so that we can easily
# write 'None' to file if the weight is None
statImp = 'weight'
if statImp is None:
statImpStr = 'None'
else:
statImpStr = statImp
statTol = 'inv_weight'
if statTol is None:
statTolStr = 'None'
else:
statTolStr = statTol
# Do we weight edges and nodes?
edge_attr = statImp
if edge_attr is None:
edge_attr_str = 'None'
else:
edge_attr_str = edge_attr
node_attr = statImp
if node_attr is None:
node_attr_str = 'None'
else:
node_attr_str = node_attr
if args.nprocs is None:
nprocs = mp.cpu_count()
else:
nprocs = args.nprocs
# Initialization for pool processes. Make the mp.Lock() global
# and accessible to all pool processes. This lock will be acquired
# by each process before it writes to the common output file.
def pool_init(l):
global olock
olock = l
writer_lock = mp.Lock()
pool = mp.Pool(processes=nprocs, initializer = pool_init, initargs=(writer_lock,), maxtasksperchild=1)
for k in kList:
if args.max:
ofile_sans_idx = ofile_base + '-max-k%03d-s%04d-' % (k,maxs)
elif args.sum:
ofile_sans_idx = ofile_base + '-sum-k%03d-s%04d-' % (k,maxs)
file_idx = 0
ofile = ofile_sans_idx + str(file_idx) + '.csv'
while isfile(ofile):
file_idx += 1
ofile = ofile_sans_idx + str(file_idx) + '.csv'
with open(ofile, 'w') as csvfile:
# Ignores extra values in the dictionary
writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
writer.writeheader()
# Behavior now depends on topology type
if args.zoo:
for topofile in topology_list:
# For each topology, read in the graph G
# Use all nodes as beacons.
G = ps.largest_connected_component(nx.Graph(nx.read_graphml(topofile)))
num_beacons = len(G.nodes())
# Set up the top-level RNG. Outputs from this will be used to seed
# the different trials.
control_RNG = random.Random()
control_RNG.seed(a=1)
# The output that doesn't depend on the specific trial
output = {'topology': topofile, 'goal':goalStr, 'k':k, 'maxS':maxs, 'edgeAttr':edge_attr_str, 'nodeAttr':node_attr_str, 'statImp':statImpStr, 'statTol':statTolStr}
# Set up a generator that produces arguments to the trial
# function. Call pool.map to apply the trial function to these
# arguments
trial_arg_gen = ((ofile, header_row, i, G, num_beacons, control_RNG.randint(-sys.maxint - 1, sys.maxint), goalStr, k, maxs, copy.deepcopy(output), edge_attr, node_attr, statImp, statTol) for i in range(number_of_trials))
pool.map(chkmrt_trial_from_num_beacons, trial_arg_gen)
elif args.cat or args.smallcat:
IID_list, tuples_list = inputgen.inputgen_from_catalog_range(icatalog, gcatalog, **catalog_range)
for j in range(len(IID_list)):
# Here, read in G and P from the catalog.
# Add weights, set up the RNG, and start the parallel
# processes. Here, we use the trial function for the case
# where we already have the paths.
IID = IID_list[j]
G = nx.Graph(tuples_list[j][0])
P = ps.removeSelfLoops(tuples_list[j][2])
if edge_attr is not None:
weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
if node_attr is not None:
weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)
control_RNG = random.Random()
control_RNG.seed(a=1)
testset = strategy.generate_tests(P)
q = strategy.compute_distribution(G, testset, importance=edge_attr, goal=goalStr)
output = {'IID': IID, 'goal':goalStr, 'k':k, 'maxS':maxs, 'edgeAttr':edge_attr_str, 'nodeAttr':node_attr_str, 'statImp':statImpStr, 'statTol':statTolStr}
trial_arg_gen = ((ofile, header_row, i, control_RNG.randint(-sys.maxint - 1, sys.maxint), P, G, testset, q, k, maxs, copy.deepcopy(output), statImp, statTol) for i in range(number_of_trials))
pool.map(chkmrt_trial_from_paths, trial_arg_gen)
elif args.dcell:
for (dcn,dck) in top_param_list:
# Here, generate G from parameters and P from the
# built-in DCell routing.
# Add weights, set up the RNG, and start the parallel
# processes. Here, we use the trial function for the case
# where we already have the paths.
G = dcell.DCellGraph(dcn,dck)
P = ps.removeSelfLoops(dcell.all_server_pairs_DCell_routes(G,dcn,dck))
if edge_attr is not None:
weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
if node_attr is not None:
weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)
control_RNG = random.Random()
control_RNG.seed(a=1)
testset = strategy.generate_tests(P)
q = strategy.compute_distribution(G, testset, importance=edge_attr, goal=goalStr)
output = {'dcn':dcn, 'dck':dck, 'goal':goalStr,'k':k, 'maxS':maxs, 'edgeAttr':edge_attr_str, 'nodeAttr':node_attr_str, 'statImp':statImpStr, 'statTol':statTolStr}
trial_arg_gen = ((ofile, header_row, i, control_RNG.randint(-sys.maxint - 1, sys.maxint), P, G, testset, q, k, maxs, copy.deepcopy(output), statImp, statTol) for i in range(number_of_trials))
pool.map(chkmrt_trial_from_paths, trial_arg_gen)
elif args.spdcell:
for (dcn,dck) in top_param_list:
# Here, generate G from parameters. B is the collection
# of all non-switches in the DCell.
# Add weights, set up the RNG, and start the parallel
# processes. Here, we use the trial function for the case
# where we already have the beacon set.
G = dcell.DCellGraph(dcn,dck)
B = []
for v in sorted(G.nodes()):
if dcell.IsNotSwitch(v):
B.append(v)
control_RNG = random.Random()
control_RNG.seed(a=1)
output = {'dcn':dcn, 'dck':dck, 'goal':goalStr,'k':k, 'maxS':maxs, 'edgeAttr':edge_attr_str, 'nodeAttr':node_attr_str, 'statImp':statImpStr, 'statTol':statTolStr}
trial_arg_gen = ((ofile, header_row, i, G, B, control_RNG.randint(-sys.maxint - 1, sys.maxint), goalStr, k, maxs, copy.deepcopy(output), edge_attr, node_attr, statImp, statTol) for i in range(number_of_trials))
pool.map(chkmrt_trial_from_beacons, trial_arg_gen)
elif args.xgft:
for (h, mList, wList) in top_param_list:
# Here, generate G from parameters. B is the collection
# of all non-switches in the DCell.
# Add weights, set up the RNG, and start the parallel
# processes. Here, we use the trial function for the case
# where we already have the beacon set.
G = xgft.XGFTgraph(h,mList,wList)
B = []
for v in sorted(G.nodes()):
if v[0] == 0:
B.append(v)
control_RNG = random.Random()
control_RNG.seed(a=1)
output = {'h':h, 'mlist':mList, 'wlist':wList, 'goal':goalStr, 'k':k, 'maxS':maxs, 'edgeAttr':edge_attr_str, 'nodeAttr':node_attr_str, 'statImp':statImpStr, 'statTol':statTolStr}
trial_arg_gen = ((ofile, header_row, i, G, B, control_RNG.randint(-sys.maxint - 1, sys.maxint), goalStr, k, maxs, copy.deepcopy(output), edge_attr, node_attr, statImp, statTol) for i in range(number_of_trials))
pool.map(chkmrt_trial_from_beacons, trial_arg_gen)
elif args.gnm:
for (b,n,m) in top_param_list:
# Set up the top-level RNG. A seedfile might be given
# with precomputed top-level seeds that produce connected
# graphs for the (n,m) parameters in question. G is generated
# inside the trial function.
# Here, we use the trial function for the G_{n,m} case.
control_RNG = random.Random()
control_RNG.seed(a=1)
output = {'b':b, 'n':n, 'm':m, 'goal':goalStr, 'k':k,'maxS':maxs, 'edgeAttr':edge_attr_str, 'nodeAttr':node_attr_str, 'statImp':statImpStr, 'statTol':statTolStr}
if args.seedfile:
seed_list = []
seed_ifile = './gnm-rng/gnm-seeds-' + str(b) + '-' + str(n) + '-' + str(m) + '.csv'
with open(seed_ifile) as istream:
reader = csv.DictReader(istream)
i = 0
for row in reader:
seed_list += [int(row['proc_seed'])]
i += 1
if i == number_of_trials:
break
gnm_trial_arg_gen = ((ofile, header_row, i, b, n, m, seed_list[i], goalStr, k, maxs, copy.deepcopy(output), edge_attr, node_attr, statImp, statTol) for i in range(number_of_trials))
else:
gnm_trial_arg_gen = ((ofile, header_row, i, b, n, m, control_RNG.randint(-sys.maxint - 1, sys.maxint), goalStr, k, maxs, copy.deepcopy(output), edge_attr, node_attr, statImp, statTol) for i in range(number_of_trials))
pool.map(chkmrt_gnm_trial, gnm_trial_arg_gen)
elif args.ba or args.fracba:
for (b,n,bam) in top_param_list:
# Here, generate G randomly in the trial, so we just set up
# the top-level RNG. No seedfile is expected because these
# graphs are always connected by construction.
# Here, we use the trial function for the B--A or fractional
# B--A case. In the former, bam is an int; in the latter, it
# is a float.
control_RNG = random.Random()
control_RNG.seed(a=1)
output = {'b':b, 'n':n, 'bam':bam, 'goal':goalStr, 'k':k,'maxS':maxs, 'edgeAttr':edge_attr_str, 'nodeAttr':node_attr_str, 'statImp':statImpStr, 'statTol':statTolStr}
if args.ba:
ba_trial_arg_gen = ((False, ofile, header_row, i, b, n, bam, control_RNG.randint(-sys.maxint - 1, sys.maxint), goalStr, k, maxs, copy.deepcopy(output), edge_attr, node_attr, statImp, statTol) for i in range(number_of_trials))
elif args.fracba:
ba_trial_arg_gen = ((True, ofile, header_row, i, b, n, bam, control_RNG.randint(-sys.maxint - 1, sys.maxint), goalStr, k, maxs, copy.deepcopy(output), edge_attr, node_attr, statImp, statTol) for i in range(number_of_trials))
pool.map(chkmrt_ba_trial, ba_trial_arg_gen)
else:
raise RuntimeError('No supported topologies specified!')
pool.close()
pool.join()
|
# Generated by Django 4.0.2 on 2022-02-15 15:52
import datetime
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('userId', models.AutoField(primary_key=True, serialize=False)),
('userType', models.CharField(choices=[('Customer', 'Customer'), ('Vendor', 'Vendor')], max_length=10)),
('name', models.CharField(max_length=40)),
('address', models.CharField(max_length=100)),
('phoneNumber', models.CharField(max_length=10, validators=[django.core.validators.MinLengthValidator(10)])),
('email', models.CharField(max_length=30)),
('status', models.BooleanField()),
('createdTs', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
from .__head__ import *
class ICUDomain(BaseDomain):
def __init__(self, y_dim=2):
self.base_name = "icu"
self.name = self.base_name + f"_{y_dim}"
self.static_in_dim = 32
self.series_in_dim = 37
self.static_bin_dim = 29
self.static_con_dim = 3
self.out_dim = 37
self.bin_out_dim = 0
self.con_out_dim = 37
self.terminate = 0.0601
valid_y = [2, 4, 8]
assert y_dim in valid_y
self.y_dim = y_dim
TForce_config = {
"hidden_dim": 128,
"lr": 1e-3,
"hidden_layers": 3,
"lstm_layers": 2,
"dropout": 0.2,
"adam_betas": (0.9, 0.99),
"epochs": 30,
}
SS_config = {
"state_space_size": 10,
"encoder_hidden_dim": 64,
"emitter_hidden_dim": 64,
"hidden_dim": 64,
"mix_components": 5,
"markov_order": 5,
"lr": 1e-3,
"hidden_layers": 1,
"adam_betas": (0.9, 0.99),
"epochs": 30,
}
SVAE_config = {
"latent_size": 10,
"ae_hidden_dim": 128,
"ae_hidden_layers": 1,
"t_hidden_dim": 128,
"t_lstm_layers": 2,
"lr": 1e-5,
"adam_betas": (0.9, 0.99),
"epochs": 50,
}
CRN_config = {
"hidden_dim": 128,
"lr": 1e-3,
"hidden_layers": 3,
"lstm_layers": 2,
"dropout": 0.2,
"adam_betas": (0.9, 0.99),
"epochs": 30,
}
LSTM_config = {
"hidden_dim": 64,
"lr": 1e-3,
"hidden_layers": 2,
"lstm_layers": 1,
"dropout": 0.2,
"adam_betas": (0.9, 0.99),
"epochs": 30,
}
MLP_config = {
"hidden_dim": 128,
"lr": 1e-3,
"hidden_layers": 3,
"adam_betas": (0.9, 0.99),
"epochs": 30,
}
Linear_config = {"lr": 1e-3, "adam_betas": (0.9, 0.99), "epochs": 30}
VAE_config = {
"latent_size": 10,
"hidden_units": 100,
"lr": 1e-5,
"hidden_layers": 3,
"adam_betas": (0.9, 0.9),
"epochs": 200,
}
self.env_config_dict = {
"tforce": TForce_config,
"statespace": SS_config,
"SVAE": SVAE_config,
"CRN": CRN_config,
}
self.pol_config_dict = {
"lstm": LSTM_config,
"mlp": MLP_config,
"linear": Linear_config,
}
self.init_config_dict = {"VAE": VAE_config}
self.static_names = [
"age",
"weight",
"height",
"urgency",
"gender",
"surgical",
"sepsis_at_admission",
"sepsis_antibiotics",
"other_antibiotics",
"sepsis_cultures",
"General surgery",
"Internal medicine",
"Non-operative cardiovascular",
"Non-operative gastro-intestinal",
"Non-operative hematological",
"Non-operative metabolic",
"Non-operative neurologic",
"Non-operative genitourinary",
"Non-operative respiratory",
"Non-operative musculo-skeletal",
"Non-operative transplant",
"Non-operative trauma",
"Post-operative cardiovascular",
"Post-operative Gastro-intestinal",
"Post-operative hematological",
"Post-operative metabolic",
"Post-operative neurologic",
"Post-operative genitourinary",
"Post-operative respiratory",
"Post-operative musculo-skeletal",
"Post-operative transplant",
"Post-operative trauma",
]
self.series_names = [
"Diastolic ABP",
"Average ABP",
"Systolic ABP",
"ALAT (blood)",
"APTT (blood)",
"ASAT (blood)",
"Act.HCO3 (blood)",
"Breathing rate",
"Alb.Chem (blood)",
"Alk.Fosf. (blood)",
"B.E. (blood)",
"Bilirubine (blood)",
"CRP (blood)",
"Ca (alb.corr.) (blood)",
"Calcium total (blood)",
"Cl (blood)",
"Exp. tidal volume",
"FiO2 %",
"Phosphate (blood)",
"Glucose (blood)",
"Heartrate",
"Hb (blood)",
"Potassium (blood)",
"Creatinine (blood)",
"Lactate (blood)",
"Leukocytes (blood)",
"Magnesium (blood)",
"Sodium (blood)",
"O2 concentration",
"O2 l/min",
"O2-Saturation (blood)",
"PO2 (blood)",
"Saturation (Monitor)",
"Thrombo's (blood)",
"Urea (blood)",
"pCO2 (blood)",
"pH (blood)",
]
self.action_names = ["antibiotics"]
if y_dim > 2:
self.action_names += ["ventilation"]
if y_dim > 4:
self.action_names += ["vasopressors"]
return
|
from time import sleep
from typing import List, Tuple
from asciimatics.constants import COLOUR_RED
from asciimatics.screen import Screen
from player import Player
class Level:
"""Framework for levels"""
def __init__(self, screen: Screen):
"""Initiates a level"""
self.screen = screen
self.width, self.height = 70, 30
self.x_pad, self.y_pad = 30, 5
self.player_x = self.width // 2 + self.x_pad
self.player_y = self.height - 1 + self.y_pad
self.path_taken: Tuple(int, int) = []
self.grid = [["" for _ in range(self.width)] for __ in range(self.height)]
self.player = Player(self.player_x, self.player_y)
def _draw_stage(self) -> None:
"""Draws static elements"""
# THIS WILL BE REWORKED ALTOGETHER ONCE THE LEVEL MAKER IS IMPLEMENTED
# the goal is to use self.grid to draw what is passable and what isn't
self.screen.move(self.x_pad, self.y_pad)
self.screen.draw(self.x_pad, self.y_pad + self.height, char="*")
self.screen.draw(self.x_pad + self.width, self.y_pad + self.height, char="*")
self.screen.draw(self.x_pad + self.width, self.y_pad, char="*")
self.screen.draw(self.x_pad, self.y_pad, char="*")
def draw_path(self) -> None:
"""Draws path"""
for (x, y) in self.path_taken:
self.screen.highlight(x, y, 1, 1, None, COLOUR_RED)
def run(self, moves: List[str]) -> None:
"""Runs moves"""
self.screen.clear()
for m in moves:
self._draw_stage()
self.player.move(m)
self.player.render(self.screen)
self.draw_path()
self.screen.refresh()
self.screen.clear_buffer(0, 1, 0)
self.path_taken.append((self.player.x, self.player.y))
sleep(0.2)
|
import logging.config
LOGGING_CONFIG = {
'version': 1,
'loggers': {
'': {
'level': 'DEBUG',
'handlers': ['console', 'mail'],
},
'console': {
'level': 'DEBUG',
'handlers': ['console'],
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'info',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
},
'mail': {
'level': 'ERROR',
'formatter': 'error',
'class': 'logging.handlers.SMTPHandler',
'mailhost': 'localhost',
'fromaddr': 'monitoring@domain.com',
'toaddrs': ['dev@domain.com', 'qa@domain.com'],
'subject': 'Critical error with application name'
}
},
'formatters': {
'info': {
'format': '%(asctime)s | %(levelname)s | %(name)s (%(module)s) | %(lineno)s | %(message)s'
},
'error': {
'format': '%(asctime)s | %(levelname)s | %(name)s (%(module)s) | %(lineno)s | %(message)s'
},
},
}
# Loggers
logging.config.dictConfig(LOGGING_CONFIG)
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
|
#!/usr/bin/env python
u"""
gesdisc_merra_download.py
Written by Tyler Sutterley (09/2019)
This program downloads MERRA-2 products using a links list provided by the
Goddard Earth Sciences Data and Information Server Center
https://gmao.gsfc.nasa.gov/reanalysis/MERRA-2/
https://wiki.earthdata.nasa.gov/display/EL/How+To+Access+Data+With+Python
Register with NASA Earthdata Login system:
https://urs.earthdata.nasa.gov
Add "NASA GESDISC DATA ARCHIVE" to Earthdata Applications:
https://urs.earthdata.nasa.gov/approve_app?client_id=e2WVk8Pw6weeLUKZYOxvTQ
CALLING SEQUENCE:
python gesdisc_merra_sync.py --user=<username>
where <username> is your NASA Earthdata username
COMMAND LINE OPTIONS:
--help: list the command line options
-D X, --directory: Full path to output directory
--user: username for NASA Earthdata Login
-M X, --mode=X: Local permissions mode of the directories and files created
--log: output log of files downloaded
PYTHON DEPENDENCIES:
future: Compatibility layer between Python 2 and Python 3
(http://python-future.org/)
UPDATE HISTORY:
Updated 09/2019: added ssl context to urlopen headers
Updated 08/2019: new GESDISC server and links list file format
increased timeout to 20 seconds
Updated 06/2018: using python3 compatible octal, input and urllib
Written 03/2018
"""
from __future__ import print_function
import future.standard_library
import sys
import os
import re
import ssl
import getopt
import shutil
import getpass
import builtins
import posixpath
import calendar, time
if sys.version_info[0] == 2:
from cookielib import CookieJar
import urllib2
else:
from http.cookiejar import CookieJar
import urllib.request as urllib2
#-- PURPOSE: check internet connection
def check_connection():
#-- attempt to connect to http GESDISC host
try:
HOST = 'http://disc.sci.gsfc.nasa.gov/'
urllib2.urlopen(HOST,timeout=20,context=ssl.SSLContext())
except urllib2.URLError:
raise RuntimeError('Check internet connection')
else:
return True
#-- PURPOSE: sync local MERRA-2 files with GESDISC server
def gesdisc_merra_download(links_list_file, USER='', PASSWORD='',
DIRECTORY=None, LOG=False, MODE=None):
#-- check if DIRECTORY exists and recursively create if not
os.makedirs(DIRECTORY,MODE) if not os.path.exists(DIRECTORY) else None
#-- create log file with list of synchronized files (or print to terminal)
if LOG:
#-- format: NASA_GESDISC_MERRA2_download_2002-04-01.log
today = time.strftime('%Y-%m-%d',time.localtime())
LOGFILE = 'NASA_GESDISC_MERRA2_download_{0}.log'.format(today)
fid = open(os.path.join(DIRECTORY,LOGFILE),'w')
print('NASA MERRA-2 Sync Log ({0})'.format(today), file=fid)
else:
#-- standard output (terminal output)
fid = sys.stdout
#-- https://docs.python.org/3/howto/urllib2.html#id5
#-- create a password manager
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
#-- Add the username and password for NASA Earthdata Login system
password_mgr.add_password(None, 'https://urs.earthdata.nasa.gov',
USER, PASSWORD)
#-- Create cookie jar for storing cookies. This is used to store and return
#-- the session cookie given to use by the data server (otherwise will just
#-- keep sending us back to Earthdata Login to authenticate).
cookie_jar = CookieJar()
#-- create "opener" (OpenerDirector instance)
opener = urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(password_mgr),
urllib2.HTTPSHandler(context=ssl.SSLContext()),
urllib2.HTTPCookieProcessor(cookie_jar))
#-- Now all calls to urllib2.urlopen use our opener.
urllib2.install_opener(opener)
#-- All calls to urllib2.urlopen will now use handler
#-- Make sure not to include the protocol in with the URL, or
#-- HTTPPasswordMgrWithDefaultRealm will be confused.
#-- read the links list file
with open(links_list_file,'rb') as fileID:
lines = fileID.read().decode("utf-8-sig").encode("utf-8").splitlines()
#-- for each line in the links_list_file
for f in lines:
#-- extract filename from url
if re.search(b'LABEL\=(.*?)\&SHORTNAME',f):
FILE, = re.findall('LABEL\=(.*?)\&SHORTNAME', f.decode('utf-8'))
elif re.search(b'MERRA2_(\d+).(.*?).(\d+).(.*?).nc',f):
rx = re.compile('MERRA2_(\d+)\.(.*?)\.(\d+)\.(.*?).nc')
MOD,DSET,YMD,AUX = rx.findall(f.decode('utf-8')).pop()
FILE = 'MERRA2_{0}.{1}.{2}.SUB.nc'.format(MOD,DSET,YMD)
else:
FILE = posixpath.split(f.decode('utf-8'))[1]
#-- Printing files transferred
local_file = os.path.join(DIRECTORY,FILE)
print('{0} -->\n\t{1}\n'.format(f.decode('utf-8'),local_file), file=fid)
#-- Create and submit request. There are a wide range of exceptions
#-- that can be thrown here, including HTTPError and URLError.
request = urllib2.Request(url=f.decode('utf-8').strip())
response = urllib2.urlopen(request, timeout=20)
#-- chunked transfer encoding size
CHUNK = 16 * 1024
#-- copy contents to local file using chunked transfer encoding
#-- transfer should work properly with ascii and binary data formats
with open(local_file, 'wb') as f:
shutil.copyfileobj(response, f, CHUNK)
#-- change permissions to MODE
os.chmod(local_file, MODE)
#-- close request
request = None
#-- close log file and set permissions level to MODE
if LOG:
fid.close()
os.chmod(os.path.join(DIRECTORY,LOGFILE), MODE)
#-- PURPOSE: help module to describe the optional input parameters
def usage():
print('\nHelp: {}'.format(os.path.basename(sys.argv[0])))
print(' -D X, --directory=X\t\tFull path to working data directory')
print(' -U X, --user=X\t\tUsername for NASA Earthdata Login')
print(' -M X, --mode=X\t\tPermission mode of directories and files synced')
print(' -l, --log\t\tOutput log file')
today = time.strftime('%Y-%m-%d',time.localtime())
LOGFILE = 'NASA_GESDISC_MERRA2_download_{0}.log'.format(today)
print(' Log file format: {}\n'.format(LOGFILE))
#-- Main program that calls gesdisc_merra_download()
def main():
#-- Read the system arguments listed after the program
long_options = ['help','directory=','user=','log','mode=']
optlist,arglist = getopt.getopt(sys.argv[1:],'hD:U:M:l',long_options)
#-- command line parameters
DIRECTORY = os.getcwd()
USER = ''
LOG = False
#-- permissions mode of the local directories and files (number in octal)
MODE = 0o775
for opt, arg in optlist:
if opt in ('-h','--help'):
usage()
sys.exit()
elif opt in ("--directory"):
DIRECTORY = os.path.expanduser(arg)
elif opt in ("-U","--user"):
USER = arg
elif opt in ("-l","--log"):
LOG = True
elif opt in ("-M","--mode"):
MODE = int(arg, 8)
#-- NASA Earthdata hostname
HOST = 'urs.earthdata.nasa.gov'
#-- check that NASA Earthdata credentials were entered
if not USER:
USER = builtins.input('Username for {0}: '.format(HOST))
#-- enter password securely from command-line
PASSWORD = getpass.getpass('Password for {0}@{1}: '.format(USER,HOST))
#-- check internet connection before attempting to run program
if check_connection():
#-- for each links list file from GESDISC
for fi in arglist:
gesdisc_merra_download(os.path.expanduser(fi), USER=USER,
PASSWORD=PASSWORD, DIRECTORY=DIRECTORY, LOG=LOG, MODE=MODE)
#-- run main program
if __name__ == '__main__':
main()
|
##Drone Start##
import time, sys
import ps_drone
drone = ps_drone.Drone()
drone.startup()
drone.reset()
while (drone.getBattery()[0]==-1): time.sleep(0.1) #Reset completed?
print "Battery: "+str(drone.getBattery()[0])+"%" +str(drone.getBattery()[1])
if drone.getBattery()[1]=="empty": sys.exit() #If battery low, abort
drone.useDemoMode(True) #15 datasets/sec
drone.getNDpackage(["demo", "vision_detect"]) #Packets to be decoded, only decode what is neccassary for increased performance
time.sleep(0.5) #give drone time to wake properly
###Main program
CDC = drone.ConfigDataCount
drone.setConfigAllID() #Go to multiconfig mode
drone.sdVideo() #Choose lower res
drone.frontCam() #Choose front view
while CDC == drone.ConfigDataCount: time.sleep(0.001) #wait until config done
drone.startVideo() #Start video function
drone.showVideo() #Display the video
print "<space> to toggle camera, any other key to stop"
IMC = drone.VideoImageCount #Num. endoded videoframes
stop = False
while not stop:
while drone.VideoImageCount == IMC: time.sleep(0.01) #wait for new videoframes
IMC = drone.VideoImageCount
key = drone.getKey()
if key: stop = True
|
import pygame
pygame.init()
screenWidth=720
screenHeight=720
run=True
bl=(255,105,180)
white=(255,255, 255)
x=50
y=50
width=50
height=50
speed=40
isJump= False
jumpCount=10
win=pygame.display.set_mode((screenHeight, screenWidth))
pygame.display.set_caption("New", "My")
while run:
#for (this) event
pygame.time.delay(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run=False
key=pygame.key.get_pressed()
if key[pygame.K_LEFT] and x>0:
x-=speed
if key[pygame.K_RIGHT] and x<screenWidth-width-100:
x+=speed
if not isJump:
if key[pygame.K_UP] :
isJump=True
else:
if jumpCount >=-10:
if jumpCount>0:
y-=(jumpCount**2)*0.5
else:
y+=(jumpCount**2)*0.5
jumpCount-=1
else:
jumpCount=10
isJump=False
if key[pygame.K_DOWN] and y<screenHeight-height-100:
y+=speed
win.fill((0, 120, 255))
pygame.draw.rect(win, white,(x,y, width, height))
#pygame.draw.circle(win, bl, (20,15), 15)
#Lets disable for now circluar object
pygame.display.update()
pygame.quit()
|
import os
import importlib
import requests
import self as self
import urllib3
# importlib.import_module("python-twitter")
print(os.system('pip install python-twitter'))
print(os.system('pip install requests'))
print(os.system('pip install urllib3'))
def parse_sites(filename):
with open(filename, "r", encoding='UTF-8') as f:
raw_sites = f.read().rstrip().lstrip()
raw_sites = raw_sites.replace("\t", ",") \
.replace("\r", ",") \
.replace("\n", ",") \
.replace(" ", ",")
raw_sites = raw_sites.split(",")
sites = list()
for raw_site in raw_sites:
site = raw_site.lstrip().rstrip()
if site:
sites.append(site)
return sites
cur_dir = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(cur_dir, "sites.txt")
sites = parse_sites(filename)
for site in sites:
site = site[0:site.find('%', 1)]
user_filename = os.path.join(cur_dir, site)
if os.path.exists(user_filename):
command = 'twphotos -u ' + site + ' -i'
else:
command = 'twphotos -u ' + site
os.system(command)
input(print("下载已完成,请按任意键退出!")) |
# Copyright 2017 by Kurt Rathjen. All Rights Reserved.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import shutil
import platform
import traceback
from studioqt import QtGui
from studioqt import QtCore
from studioqt import QtWidgets
try:
import maya.mel
import maya.cmds
except ImportError:
traceback.print_exc()
import mutils
class MayaUtilsError(Exception):
"""Base class for exceptions in this module."""
class ObjectsError(MayaUtilsError):
pass
class SelectionError(MayaUtilsError):
pass
class NoMatchFoundError(MayaUtilsError):
pass
class NoObjectFoundError(MayaUtilsError):
pass
class MoreThanOneObjectFoundError(MayaUtilsError):
pass
class ModelPanelNotInFocusError(MayaUtilsError):
pass
def system():
return platform.system().lower()
def isMac():
return system().startswith("mac") or system().startswith("os") \
or system().startswith("darwin")
def isWindows():
return system().lower().startswith("win")
def isLinux():
return system().lower().startswith("lin")
def isMaya():
"""
:rtype: bool
"""
try:
import maya.cmds
maya.cmds.about(batch=True)
return True
except ImportError:
return False
def selectionModifiers():
"""
Return the current selection modifiers.
:rtype: dict
"""
result = {"add": False, "deselect": False}
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ShiftModifier:
result["deselect"] = True
elif modifiers == QtCore.Qt.ControlModifier:
result["add"] = True
return result
def ls(*args, **kwargs):
"""
:rtype: list[Node]
"""
return [mutils.Node(name) for name in maya.cmds.ls(*args, **kwargs) or []]
def listAttr(node, **kwargs):
"""
:type node: mutils.Node
:type kwargs: dict
:rtype: list[mutils.Attribute]
"""
attrs = maya.cmds.listAttr(node.name(), **kwargs)
return [mutils.Attribute(node.name(), attr) for attr in attrs or []]
def currentFrameRange():
"""
:rtype: (int, int)
"""
start, end = selectedFrameRange()
if end == start:
start, end = selectedObjectsFrameRange()
if start == end:
start, end = playbackFrameRange()
return start, end
def playbackFrameRange():
"""
:rtype: (int, int)
"""
start = maya.cmds.playbackOptions(query=True, min=True)
end = maya.cmds.playbackOptions(query=True, max=True)
return start, end
def selectedFrameRange():
"""
:rtype: (int, int)
"""
result = maya.mel.eval("timeControl -q -range $gPlayBackSlider")
start, end = result.replace('"', "").split(":")
start, end = int(start), int(end)
if end - start == 1:
end = start
return start, end
def selectedObjectsFrameRange(dagPaths=None):
"""
:rtype : (int, int)
"""
start = 0
end = 0
if not dagPaths:
dagPaths = maya.cmds.ls(selection=True) or []
if dagPaths:
start = int(maya.cmds.findKeyframe(dagPaths, which='first'))
end = int(maya.cmds.findKeyframe(dagPaths, which='last'))
return start, end
def connectedAttrs(objects):
"""
"""
result = []
if not objects:
raise Exception("No objects specified")
connections = maya.cmds.listConnections(objects, connections=True, p=True, d=False, s=True) or []
for i in range(0, len(connections), 2):
dstObj = connections[i]
srcObj = connections[i+1]
nodeType = maya.cmds.nodeType(srcObj)
if "animCurve" not in nodeType:
result.append(dstObj)
return result
def currentModelPanel():
"""
:rtype: str or None
"""
currentPanel = maya.cmds.getPanel(withFocus=True)
currentPanelType = maya.cmds.getPanel(typeOf=currentPanel)
if currentPanelType not in ['modelPanel']:
return None
return currentPanel
def bakeConnected(objects, time, sampleBy=1):
"""
"""
bakeAttrs = connectedAttrs(objects)
if bakeAttrs:
maya.cmds.bakeResults(
bakeAttrs,
time=time,
shape=False,
simulation=True,
sampleBy=sampleBy,
controlPoints=False,
minimizeRotation=True,
bakeOnOverrideLayer=False,
preserveOutsideKeys=False,
sparseAnimCurveBake=False,
disableImplicitControl=True,
removeBakedAttributeFromLayer=False,
)
else:
print "cannot find connection to bake!"
def disconnectAll(name):
"""
:type name: str
"""
for destination in maya.cmds.listConnections(name, plugs=True, source=False) or []:
source, = maya.cmds.listConnections(destination, plugs=True)
maya.cmds.disconnectAttr(source, destination)
def getSelectedObjects():
"""
:rtype: list[str]
:raise mutils.SelectionError:
"""
selection = maya.cmds.ls(selection=True)
if not selection:
raise mutils.SelectionError("No objects selected!")
return selection
def animCurve(fullname):
"""
Return the animation curve for the give attribute.
:type fullname:
:rtype: None | str
"""
attribute = mutils.Attribute(fullname)
return attribute.animCurve()
def deleteUnknownNodes():
"""
"""
nodes = maya.cmds.ls(type="unknown")
if nodes:
for node in nodes:
if maya.cmds.objExists(node) and \
maya.cmds.referenceQuery(node, inr=True):
maya.cmds.delete(node)
def getSelectedAttrs():
"""
:rtype: list[str]
"""
attributes = maya.cmds.channelBox("mainChannelBox", q=True, selectedMainAttributes=True)
if attributes is not None:
attributes = str(attributes)
attributes = attributes.replace("tx", "translateX")
attributes = attributes.replace("ty", "translateY")
attributes = attributes.replace("tz", "translateZ")
attributes = attributes.replace("rx", "rotateX")
attributes = attributes.replace("ry", "rotateY")
attributes = attributes.replace("rz", "rotateZ")
attributes = eval(attributes)
return attributes
def getDurationFromNodes(nodes):
"""
:type nodes: list[str]
:rtype: float
"""
if nodes:
s = maya.cmds.findKeyframe(nodes, which='first')
l = maya.cmds.findKeyframe(nodes, which='last')
if s == l:
if maya.cmds.keyframe(nodes, query=True, keyframeCount=True) > 0:
return 1
else:
return 0
return l - s
else:
return 0
|
# -*- coding: UTF-8 -*-
"""
爱丽丝和鲍勃一起玩游戏,他们轮流行动。爱丽丝先手开局。
最初,黑板上有一个数字 N 。在每个玩家的回合,玩家需要执行以下操作:
选出任一 x,满足 0 < x < N 且 N % x == 0 。
用 N - x 替换黑板上的数字 N 。
如果玩家无法执行这些操作,就会输掉游戏
只有在爱丽丝在游戏中取得胜利时才返回 True,否则返回 false。假设两个玩家都以最佳状态参与游戏。
示例 1:
输入:2
输出:true
解释:爱丽丝选择 1,鲍勃无法进行操作。
示例 2:
输入:3
输出:false
解释:爱丽丝选择 1,鲍勃也选择 1,然后爱丽丝无法进行操作。
提示:
1 <= N <= 1000
链接:https://leetcode-cn.com/problems/divisor-game
"""
class Solution:
"""一脸懵逼"""
def divisorGame(self, N: int) -> bool:
return N % 2 == 0
if __name__ == '__main__':
N = 5
s = Solution()
r = s.divisorGame(N)
print(r)
|
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('comment', views.blogComment , name='blogComment'), #Endpoint for comment API
path('<str:slug>', views.blogPost , name='blogPost'), #enpoint for viewing blog
path('catview/allcat', views.allCat , name='allcat'), #endpoint for viewing all cats
path('catview/cat/<str:cat>', views.catView , name='catview'), #endpoint for viewing posts within a cat
path('author/<str:auth>', views.authView , name='authview'), #endpoint for viewing author profile
]
|
from django.core.management.base import BaseCommand
from django.db.models import Count
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
Dataset,
Experiment,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SampleComputedFileAssociation,
SurveyJob,
SurveyJobKeyValue,
)
logger = get_and_configure_logger(__name__)
MIN = 100
class Command(BaseCommand):
def handle(self, *args, **options):
""" Dispatch QN_REFERENCE creation jobs for all Organisms with a platform with enough processed samples. """
organisms = Organism.objects.all()
for organism in organisms:
samples = Sample.processed_objects.filter(
organism=organism,
has_raw=True,
technology="MICROARRAY",
is_processed=True,
platform_name__contains="Affymetrix",
)
if samples.count() < MIN:
logger.info(
"Total proccessed samples don't meet minimum threshhold",
organism=organism,
count=samples.count(),
min=MIN,
)
continue
platform_counts = (
samples.values("platform_accession_code")
.annotate(dcount=Count("platform_accession_code"))
.order_by("-dcount")
)
biggest_platform = platform_counts[0]["platform_accession_code"]
sample_codes_results = Sample.processed_objects.filter(
platform_accession_code=biggest_platform,
has_raw=True,
technology="MICROARRAY",
organism=organism,
is_processed=True,
).values("accession_code")
if sample_codes_results.count() < MIN:
logger.info(
"Number of processed samples for largest platform didn't mean threshold.",
organism=organism,
platform_accession_code=biggest_platform,
count=sample_codes_results.count(),
min=MIN,
)
continue
sample_codes = [res["accession_code"] for res in sample_codes_results]
dataset = Dataset()
dataset.data = {organism.name + "_(" + biggest_platform + ")": sample_codes}
dataset.aggregate_by = "ALL"
dataset.scale_by = "NONE"
dataset.quantile_normalize = False
dataset.save()
job = ProcessorJob()
job.pipeline_applied = "QN_REFERENCE"
job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dataset
pjda.save()
logger.info(
"Sending QN_REFERENCE for Organism", job_id=str(job.pk), organism=str(organism)
)
send_job(ProcessorPipeline.QN_REFERENCE, job)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.