content
stringlengths 5
1.05M
|
|---|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Stream dataset.
"""
import os
from os.path import join, exists
import numpy as np
from pgl.utils.data.dataloader import Dataloader
from pgl.utils.data.dataset import StreamDataset as PglStreamDataset
from pahelix.utils.data_utils import save_data_list_to_npz, load_npz_to_data_list
__all__ = ['StreamDataset']
class StreamDataset(object):
"""tbd"""
def __init__(self,
data_generator=None,
npz_data_path=None):
super(StreamDataset, self).__init__()
assert (data_generator is None) ^ (npz_data_path is None), \
"Only data_generator or npz_data_path should be set."
self.data_generator = data_generator
self.npz_data_path = npz_data_path
if not npz_data_path is None:
self.data_generator = self._load_npz_data(npz_data_path)
def _load_npz_data(self, data_path):
files = [file for file in os.listdir(data_path) if file.endswith('.npz')]
for file in files:
data_list = load_npz_to_data_list(join(data_path, file))
for data in data_list:
yield data
def _save_npz_data(self, data_list, data_path, max_num_per_file=10000):
if not exists(data_path):
os.makedirs(data_path)
sub_data_list = []
count = 0
for data in self.data_generator:
sub_data_list.append(data)
if len(sub_data_list) == 0:
file = 'part-%05d.npz' % count
save_data_list_to_npz(join(data_path, file), sub_data_list)
sub_data_list = []
count += 1
if len(sub_data_list) > 0:
file = 'part-%05d.npz' % count
save_data_list_to_npz(join(data_path, file), sub_data_list)
def save_data(self, data_path):
"""tbd"""
self._save_npz_data(self.data_generator, data_path)
def iter_batch(self, batch_size, num_workers=4, shuffle_size=1000, collate_fn=None):
"""tbd"""
class _TempDataset(PglStreamDataset):
def __init__(self, data_generator):
self.data_generator = data_generator
def __iter__(self):
for data in self.data_generator:
yield data
return Dataloader(_TempDataset(self.data_generator),
batch_size=batch_size,
num_workers=num_workers,
stream_shuffle_size=shuffle_size,
collate_fn=collate_fn)
|
from django.db.models import Q
from ct.models import UnitStatus, Response, InquiryCount
from core.common.mongo import c_faq_data, c_chat_context
from .chat import get_lesson_url
class START(object):
"""
Initialize data for viewing a courselet, and go immediately
to first lesson (not yet completed).
"""
def start_event(self, node, fsmStack, request, **kwargs):
"""
Event handler for START node.
"""
unit = fsmStack.state.get_data_attr('unit')
fsmStack.state.title = 'Study: %s' % unit.title
try: # use unitStatus if provided
unitStatus = fsmStack.state.get_data_attr('unitStatus')
except AttributeError: # create new, empty unitStatus
unitStatus = UnitStatus(unit=unit, user=request.user)
unitStatus.save()
fsmStack.state.set_data_attr('unitStatus', unitStatus)
unit_lesson, chat = kwargs.get('unitlesson'), kwargs.get('chat')
fsmStack.state.unitLesson = unit_lesson
fsmStack.state.set_data_attr('updates', kwargs.get('updates', False))
fsmStack.state.set_data_attr('new_faqs', kwargs.get('new_faqs', False)) if kwargs.get('new_faqs') else None
faqs_for_ul = unit_lesson.response_set.filter(
~Q(author=request.user),
kind=Response.STUDENT_QUESTION,
is_preview=False,
is_test=False
).exclude(title__isnull=True).exclude(title__exact='').exists()
if faqs_for_ul:
c_faq_data().update_one(
{
"chat_id": chat.id,
"ul_id": unit_lesson.id
},
{"$set": {"faqs": {}}},
upsert=True
)
_next = 'show_faq' if faqs_for_ul else 'ask_new_faq'
return fsmStack.state.transition(
fsmStack, request, _next, useCurrent=True, **kwargs
)
# node specification data goes here
title = 'Start This Courselet'
edges = (
dict(name='show_faq', toNode='INTRO_MSG', title='View Next Lesson'),
dict(name='ask_new_faq', toNode='ASK_NEW_FAQ', title='View Next Lesson'),
)
class INTRO_MSG(object):
path = 'fsm:fsm_node'
title = 'Would any of the following questions help you? Select the one(s) you with to view.'
edges = (
dict(name='next', toNode='SHOW_FAQS', title='Go to the end'),
)
class SHOW_FAQS(object):
get_path = get_lesson_url
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
return edge.toNode if request.data.get('selected') else fsm.get_node(name='ASK_NEW_FAQ')
title = 'SHOW_FAQS'
edges = (
dict(name='next', toNode='SHOW_FAQ_BY_ONE', title='Go to the end'),
)
class MSG_FOR_INQUIRY(object):
path = 'fsm:fsm_node'
title = 'MSG_FOR_INQUIRY'
edges = (
dict(name='next', toNode='SHOW_FAQ_BY_ONE', title='Go to the end'),
)
class SHOW_FAQ_BY_ONE(object):
path = 'fsm:fsm_node'
title = 'SHOW_FAQ_BY_ONE'
edges = (
dict(name='next', toNode='ASK_FOR_FAQ_ANSWER', title='Go to the end'),
)
class ASK_FOR_FAQ_ANSWER(object):
path = 'fsm:fsm_node'
title = 'Would the answer to this question help you?'
edges = (
dict(name='next', toNode='GET_FOR_FAQ_ANSWER', title='Go to the end'),
)
class GET_FOR_FAQ_ANSWER(object):
path = 'fsm:fsm_node'
title = 'GET_FOR_FAQ_ANSWER'
@staticmethod
def get_pending_faqs(chat_id, ul_id):
# TODO change to the Assignment expressions in Python3.8
faq_data = c_faq_data().find_one({"chat_id": chat_id, "ul_id": ul_id})
return faq_data.get('faqs', {}) if faq_data else {}
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
next_node = edge.toNode
ul_id = c_chat_context().find_one({"chat_id": fsmStack.id}).get('actual_ul_id')
if fsmStack.next_point.text.lower() == 'yes':
actual_faq_id = c_chat_context().find_one(
{"chat_id": fsmStack.id}).get('actual_faq_id', None)
faq = Response.objects.filter(id=int(actual_faq_id)).first()
try:
ob, _ = InquiryCount.objects.get_or_create(response=faq, addedBy=request.user)
except InquiryCount.MultipleObjectsReturned:
ob = InquiryCount.objects.filter(
response=faq, addedBy=request.user
).order_by('-atime').first()
faq.notify_instructors()
c_chat_context().update_one(
{"chat_id": fsmStack.id},
{"$set": {"actual_inquiry_id": ob.id}},
)
faq_answers = faq.response_set.all()
if faq_answers:
next_node = fsm.get_node('SHOW_FAQ_ANSWERS')
c_faq_data().update_one(
{
"chat_id": fsmStack.id,
"ul_id": ul_id
},
{"$set": {"faqs.{}.answers".format(actual_faq_id): [
{"done": False, "answer_id": answer.id} for answer in faq_answers
]}}
)
else:
next_node = fsm.get_node('WILL_TRY_MESSAGE_2')
else:
show_another_faq = False
for key, value in list(self.get_pending_faqs(chat_id=fsmStack.id, ul_id=ul_id).items()):
if not value.get('status').get('done', False):
show_another_faq = True
break
if show_another_faq:
next_node = fsm.get_node('SHOW_FAQ_BY_ONE')
return next_node
edges = (
dict(name='next', toNode='ASK_NEW_FAQ', title='Go to the end'),
)
class SHOW_FAQ_ANSWERS(object):
path = 'fsm:fsm_node'
title = 'SHOW_FAQ_ANSWERS'
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
ul_id = c_chat_context().find_one({"chat_id": fsmStack.id}).get('actual_ul_id')
actual_faq_id = c_chat_context().find_one(
{"chat_id": fsmStack.id}).get('actual_faq_id', None)
if actual_faq_id:
faq_answers = c_faq_data().find_one(
{
"chat_id": fsmStack.id,
"ul_id": ul_id,
"faqs.{}.answers.done".format(actual_faq_id): False
}
)
next_node = fsm.get_node('SHOW_FAQ_ANSWERS') if faq_answers else fsm.get_node('ASK_UNDERSTANDING')
return next_node
return edge.toNode
edges = (
dict(name='next', toNode='ASK_UNDERSTANDING', title='Go to the end'),
)
class ASK_UNDERSTANDING(object):
path = 'fsm:fsm_node'
title = 'How well do you feel you understand now? If you need more clarification, tell us.'
edges = (
dict(name='next', toNode='GET_UNDERSTANDING', title='Go to the end'),
)
class GET_UNDERSTANDING(object):
path = 'fsm:fsm_node'
title = 'GET_UNDERSTANDING'
@staticmethod
def get_pending_faqs(chat_id, ul_id):
# TODO change to the Assignment expressions in Python3.8
faq_data = c_faq_data().find_one({"chat_id": chat_id, "ul_id": ul_id})
return faq_data.get('faqs', {}) if faq_data else {}
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
inquiry_id = c_chat_context().find_one({"chat_id": fsmStack.id}).get('actual_inquiry_id')
if inquiry_id:
ob = InquiryCount.objects.filter(id=inquiry_id).first()
ob.status = fsmStack.next_point.text.lower()
ob.save()
# Defaul value - go to asking new faq from Student
next_node = edge.toNode
if fsmStack.next_point.text.lower() == 'help':
next_node = fsm.get_node('WILL_TRY_MESSAGE_3')
else:
ul_id = c_chat_context().find_one({"chat_id": fsmStack.id}).get('actual_ul_id')
show_another_faq = False
for key, value in list(self.get_pending_faqs(chat_id=fsmStack.id, ul_id=ul_id).items()):
if not value.get('status').get('done', False):
show_another_faq = True
break
if show_another_faq:
next_node = fsm.get_node('SHOW_FAQ_BY_ONE')
return next_node
edges = (
dict(name='next', toNode='ASK_NEW_FAQ', title='Go to the end'),
)
class WILL_TRY_MESSAGE_2(object):
path = 'fsm:fsm_node'
title = 'We will try to get you an answer to this.'
@staticmethod
def get_pending_faqs(chat_id, ul_id):
# TODO change to the Assignment expressions in Python3.8
faq_data = c_faq_data().find_one({"chat_id": chat_id, "ul_id": ul_id})
return faq_data.get('faqs', {}) if faq_data else {}
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
ul_id = c_chat_context().find_one({"chat_id": fsmStack.id}).get('actual_ul_id')
show_another_faq = False
for key, value in list(self.get_pending_faqs(chat_id=fsmStack.id, ul_id=ul_id).items()):
if not value.get('status').get('done', False):
show_another_faq = True
break
return fsm.get_node('SHOW_FAQ_BY_ONE') if show_another_faq else edge.toNode
edges = (
dict(name='next', toNode='ASK_NEW_FAQ', title='Go to the end'),
)
class WILL_TRY_MESSAGE_3(object):
path = 'fsm:fsm_node'
title = 'We will try to provide more explanation for this.'
@staticmethod
def get_pending_faqs(chat_id, ul_id):
# TODO change to the Assignment expressions in Python3.8
faq_data = c_faq_data().find_one({"chat_id": chat_id, "ul_id": ul_id})
return faq_data.get('faqs', {}) if faq_data else {}
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
ul_id = c_chat_context().find_one({"chat_id": fsmStack.id}).get('actual_ul_id')
show_another_faq = False
for key, value in list(self.get_pending_faqs(chat_id=fsmStack.id, ul_id=ul_id).items()):
if not value.get('status').get('done', False):
show_another_faq = True
break
return fsm.get_node('SHOW_FAQ_BY_ONE') if show_another_faq else edge.toNode
edges = (
dict(name='next', toNode='ASK_NEW_FAQ', title='Go to the end'),
)
class SELECT_NEXT_FAQ(object):
path = 'fsm:fsm_node'
title = 'SELECT_NEXT_FAQ'
@staticmethod
def get_pending_faqs(chat_id, ul_id):
# TODO change to the Assignment expressions in Python3.8
faq_data = c_faq_data().find_one({"chat_id": chat_id, "ul_id": ul_id})
return faq_data.get('faqs', {}) if faq_data else {}
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
ul_id = c_chat_context().find_one({"chat_id": fsmStack.id}).get('actual_ul_id')
show_another_faq = False
for key, value in list(self.get_pending_faqs(chat_id=fsmStack.id, ul_id=ul_id).items()):
if not value.get('status').get('done', False):
show_another_faq = True
break
return fsm.get_node('SHOW_FAQ_BY_ONE') if show_another_faq else edge.toNode
edges = (
dict(name='next', toNode='ASK_NEW_FAQ', title='Go to the end'),
)
class ASK_NEW_FAQ(object):
path = 'fsm:fsm_node'
title = 'Is there anything else you\'re wondering about, where you\'d like clarification or something you\'re unsure about this point?'
edges = (
dict(name='next', toNode='GET_NEW_FAQ', title='Go to the end'),
)
class GET_NEW_FAQ(object):
path = 'fsm:fsm_node'
title = 'GET_NEW_FAQ'
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
fsm = edge.fromNode.fsm
next_point = fsmStack.next_point
return fsm.get_node('NEW_FAQ_TITLE') if next_point.text and next_point.text.lower() == 'yes' else edge.toNode
edges = (
dict(name='next', toNode='END', title='Go to the end'),
)
class FUCK(object):
path = 'fsm:fsm_node'
title = 'FUCK'
edges = (
dict(name='next', toNode='END', title='Go to the end'),
)
class ADDING_FAQ(object):
path = 'fsm:fsm_node'
title = 'ADDING_FAQ'
def get_help(self, node, state, request):
return """
First, write a 'headline version' of you question
as a single sentence, as clearly and simply
as you can. (You'll have a chance to explain your
question fully in the next step)
"""
edges = (
dict(name='next', toNode='NEW_FAQ_TITLE', title='Go to the end'),
)
class NEW_FAQ_TITLE(object):
path = 'fsm:fsm_node'
title = 'First, write a \'headline version\' of your question as a single sentence, as cleary and simply as you can. (You\'ll have a chance to explain your question fully in the next step)'
edges = (
dict(name='next', toNode='GET_NEW_FAQ_TITLE', title='Go to the end'),
)
class GET_NEW_FAQ_TITLE(object):
path = 'fsm:fsm_node'
title = 'GET_NEW_FAQ_TITLE'
edges = (
dict(name='next', toNode='NEW_FAQ_DESCRIPTION', title='Go to the end'),
)
class NEW_FAQ_DESCRIPTION(object):
path = 'fsm:fsm_node'
title = 'Next, let\'s nail down exactly what you\'re unsure about, by applying your question to a real-world situation, to indentify what specific outcome you\'re unsure about (e.g. is A going to happen, or B?\')'
edges = (
dict(name='next', toNode='GET_NEW_FAQ_DESCRIPTION', title='Go to the end'),
)
class GET_NEW_FAQ_DESCRIPTION(object):
path = 'fsm:fsm_node'
title = 'GET_NEW_FAQ_DESCRIPTION'
edges = (
dict(name='next', toNode='WILL_TRY_MESSAGE', title='Go to the end'),
)
class WILL_TRY_MESSAGE(object):
path = 'fsm:fsm_node'
title = 'We\'ll try to get you an answer to this.'
edges = (
dict(name='next', toNode='END', title='Go to the end'),
)
class END(object):
def get_path(self, node, state, request, **kwargs):
"""
Get URL for next steps in this unit.
"""
unitStatus = state.get_data_attr('unitStatus')
return unitStatus.unit.get_study_url(request.path)
# node specification data goes here
title = 'Additional lessons completed'
help = '''OK, let's continue.'''
def get_specs():
"""
Get FSM specifications stored in this file.
"""
from fsm.fsmspec import FSMSpecification
spec = FSMSpecification(
name='faq',
hideTabs=True,
title='Take the courselet core lessons',
pluginNodes=[
START,
SHOW_FAQS,
INTRO_MSG,
MSG_FOR_INQUIRY,
SHOW_FAQ_BY_ONE,
ASK_FOR_FAQ_ANSWER,
GET_FOR_FAQ_ANSWER,
SHOW_FAQ_ANSWERS,
ASK_UNDERSTANDING,
GET_UNDERSTANDING,
WILL_TRY_MESSAGE_2,
WILL_TRY_MESSAGE_3,
SELECT_NEXT_FAQ,
ASK_NEW_FAQ,
GET_NEW_FAQ,
ADDING_FAQ,
NEW_FAQ_TITLE,
GET_NEW_FAQ_TITLE,
NEW_FAQ_DESCRIPTION,
GET_NEW_FAQ_DESCRIPTION,
WILL_TRY_MESSAGE,
FUCK,
END],
)
return (spec,)
|
from __future__ import absolute_import, unicode_literals
import datetime
from decimal import Decimal
from time import sleep
import requests
from celery import shared_task
from django.contrib.auth import get_user_model
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils import timezone
from coinginie.utils.function import emailInvoiceClient
from coinginie.wallet.models import Deposit, Transactions, Wallet
from config import celery_app
from logger import LOGGER
User = get_user_model()
today = datetime.date.today()
now = timezone.now()
@shared_task
def sleepy(duration):
sleep(duration)
return None
@celery_app.task()
def send_deposit_mail(to_email, subject, body):
sleep(2)
emailInvoiceClient(to_email, subject, body)
return None
@celery_app.task()
def send_admin_mail(to_email, subject, body):
sleep(2)
emailInvoiceClient(to_email, subject, body)
return None
@celery_app.task()
def daily_roi(instance_id):
instance = Deposit.objects.get(id=instance_id)
profit = Decimal(instance.amount) * Decimal(instance.user.subscription.contract.profit)
if instance.verified == True:
Transactions.objects.create(
user = instance.user,
currency = instance.currency,
type = Transactions.ROI,
amount = profit,
verified = True
)
if instance.currency == Deposit.BITCOIN:
bal = instance.profit + instance.user.wallet.bitcoin_balance
t_inv = instance.user.wallet.total_investment + profit
Wallet.objects.filter(
user=instance.user,
).update(bitcoin_balance=bal, recent_balance_added=profit, total_investment=t_inv)
elif instance.currency == Deposit.LITECOIN:
bal = profit + instance.user.wallet.litecoin_balance
t_inv = instance.user.wallet.total_investment + profit
Wallet.objects.filter(
user=instance.user,
).update(litecoin_balance=bal, recent_balance_added=profit, total_investment=t_inv)
elif instance.currency == Deposit.ETHEREUM:
bal = profit + instance.user.wallet.ethereum_balance
t_inv = instance.user.wallet.total_investment + profit
Wallet.objects.filter(
user=instance.user,
).update(ethereum_balance=bal, recent_balance_added=profit, total_investment=t_inv)
|
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
import doctest
from typing import Optional
import numpy as np
import math
def check_df_image_size(df: pd.DataFrame, target_column: str) -> None:
"""
Checks image sizes from Pandas DataFrame and adds two additional columns to it with sizes
:param df: Pandas dataFrame where you have info.
:param target_column: Column where paths to images are defined
:return: None
"""
col_indx = list(df.columns).index(target_column)
for i, row in df.iterrows():
im = Image.open(row[col_indx])
w, h = im.size
df.at[i, 'width'] = w
df.at[i, 'height'] = h
# df.at[i, 'shape'] = str(f'{w},{h}')
def plot_df_images(
df: pd.DataFrame,
path_column: str,
image_count: int,
label_column: Optional[str]=None,
random_plot: Optional[bool]=False,
) -> None:
"""
Plots images from Pandas DataFrame column where paths are added.
:param df: Pandas DataFrame with needed info.
:param path_column: Column where paths are defined.
:param image_count: How many images you want to plot.
:param label_column: image label if you want to add it to the name.
:param random_plot: Whether you want to plot images from DataFrame randomly.
:return: None
"""
pictures = df[path_column].tolist()
if image_count < 3:
cols, rows = (image_count, 1)
else:
cols, rows = (3, math.ceil(image_count / 3))
fig = plt.figure(figsize=(5 * cols, 5 * rows))
if len(pictures) < image_count:
image_count = len(pictures)
if not random_plot:
images_to_plot = range(image_count)
else:
images_to_plot = np.random.choice(range(len(pictures)), image_count, replace=False)
for i, value in enumerate(images_to_plot):
image = pictures[value]
if label_column:
label = f' / {df[df[path_column] == image][label_column].values[0]}'
else:
label = ''
if image.find('/'):
img_name = image.split('/')[-1]
else:
img_name = image
fig.add_subplot(rows, cols, i + 1, title=f'{img_name}{label}')
plt.imshow(Image.open(image))
plt.show()
if __name__ == '__main__':
pass
# plot_df_images(df, target_column, 3, 1)
|
import radio
from microbit import *
# Set the queue length to 1 to ensure that the next message is the most recent
radio.on()
#radio.config(queue=1)
state = { 'a': False, 'b': False }
motors = { 'l': 0, 'r': 0 }
pin1.write_digital(0)
# Capacitor: 3.3uF, Resistor: 15kO (I think, could be 10kO)
# Signal resolution: 16, corresponds to about 5 on the RCX
DELAY=1000
def setnum(n,m):
# display.show(str(n))
pin1.write_analog(m)
# sleep(DELAY)
pin1.set_analog_period(1)
while True:
data = radio.receive()
if data == 'None':
continue
if type(data) is not str:
continue
v = int(data.split(':')[0],16)
b = (v & 1 == 1)
v >>= 1
a = (v & 1 == 1)
v >>= 1
z = v & 255
v >>= 8
y = v & 255
v >>= 8
x = v & 255
y >>= 4
y -= (y & 8)*2 - 8
setnum(y,y*64)
|
import json
from django.db.models import CharField
from django.contrib.postgres.fields import JSONField
from model_utils.managers import InheritanceManager
from rest_framework.utils.encoders import JSONEncoder
from .graph import AbstractGraph
from .node import AbstractNode
from .edge import AbstractEdge
from .structure import AbstractStructure
from .analysis import AbstractAnalysis
from .visualization import AbstractVisualization
from cctool.common.enums import (
FunctionOption,
FunctionShortcode,
ControllabilityOption,
ControllabilityShortcode,
VulnerabilityOption,
VulnerabilityShortcode,
ImportanceOption,
ImportanceShortcode,
ConnectionOption,
ConnectionShortcode,
)
class Graph(AbstractGraph):
class Meta(AbstractGraph.Meta):
abstract = False
def save(self, *args, **kwargs):
if not hasattr(self, 'structure'):
self.structure = Structure.objects.create()
if not hasattr(self, 'visualization'):
self.visualization = Visualization.objects.create()
super(Graph, self).save(*args, **kwargs)
class Node(AbstractNode):
objects = InheritanceManager()
class Meta(AbstractNode.Meta):
abstract = False
class Edge(AbstractEdge):
objects = InheritanceManager()
class Meta(AbstractEdge.Meta):
abstract = False
class Analysis(AbstractAnalysis):
class Meta(AbstractAnalysis.Meta):
abstract = False
def save(self, *args, **kwargs):
if not hasattr(self, 'visualization'):
self.visualization = Visualization.objects.create()
super(Analysis, self).save(*args, **kwargs)
class Structure(AbstractStructure):
class Meta(AbstractStructure.Meta):
abstract = False
class Visualization(AbstractVisualization):
class Meta(AbstractVisualization.Meta):
abstract = False
class NodePlus(Node):
function = CharField(
choices=list(zip(FunctionShortcode.__values__, FunctionOption.__values__)),
blank=False,
default=FunctionShortcode.LINEAR_FUNCTION.value,
max_length=1,
verbose_name='node function'
)
controllability = CharField(
choices=list(zip(ControllabilityShortcode.__values__, ControllabilityOption.__values__)),
blank=False,
default=ControllabilityShortcode.NO_CONTROLLABILITY.value,
max_length=1,
verbose_name='node controllability'
)
vulnerability = CharField(
choices=list(zip(VulnerabilityShortcode.__values__, VulnerabilityOption.__values__)),
blank=False,
default=VulnerabilityShortcode.NO_VULNERABILITY.value,
max_length=1,
verbose_name='node vulnerability'
)
importance = CharField(
choices=list(zip(ImportanceShortcode.__values__, ImportanceOption.__values__)),
blank=False,
default=ImportanceShortcode.NO_IMPORTANCE.value,
max_length=1,
verbose_name='node importance'
)
tags = JSONField(
default=list,
null=True,
blank=True
)
custom = JSONField(
default=dict,
null=True,
blank=True
)
def save(self, *args, **kwargs):
if self.tags:
if 'intervention' in self.tags:
self.tags = list(map(lambda x:'Intervention' if x == 'intervention' else x, self.tags))
if 'outcome' in self.tags:
self.tags =list(map(lambda x:'Outcome' if x == 'outcome' else x, self.tags))
super(NodePlus, self).save(*args, **kwargs)
def to_json(self, use_dict=False, **kwargs):
"""
Representation of Node object in Json format
"""
properties = dict()
properties['function'] = self.function
properties['controllability'] = self.controllability
properties['vulnerability'] = self.vulnerability
properties['importance'] = self.importance
if self.tags:
properties['tags'] = self.tags
if self.custom:
properties['custom'] = self.custom
output = super(Node, self).to_json(use_dict=True, **kwargs)
output['cctool'] = properties
if use_dict:
return output
return json.dumps(output, cls=JSONEncoder, **kwargs)
class EdgePlus(Edge):
weight = CharField(
choices=list(zip(ConnectionShortcode.__values__, ConnectionOption.__values__)),
blank=False,
default=ConnectionShortcode.COMPLEX_CONNECTION.value,
max_length=2,
verbose_name='edge weight'
)
tags = JSONField(
default=list,
null=True,
blank=True
)
custom = JSONField(
default=dict,
null=True,
blank=True
)
def to_json(self, use_dict=False, **kwargs):
"""
Representation of Node object in Json format
"""
properties = dict()
properties['weight'] = self.weight
if self.tags:
properties['tags'] = self.tags
if self.custom:
properties['custom'] = self.custom
output = super(Edge, self).to_json(use_dict=True, **kwargs)
output['cctool'] = properties
if use_dict:
return output
return json.dumps(output, cls=JSONEncoder, **kwargs)
|
from .propagation import UHECRPropagationResult, UHECRPropagationSolverBDF, UHECRPropagationSolverEULER
|
import os
import numpy as np
import torch
from PIL import Image
import numpy as np
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import numpy as np
from warpctc_pytorch import CTCLoss
import os
os.chdir('../../')
from train.ocr.dataset import PathDataset, randomSequentialSampler, alignCollate
from glob import glob
from sklearn.model_selection import train_test_split
roots = glob('./train/data/ocr/*/*.jpg')
# 训练字符集
alphabetChinese = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
trainP,testP = train_test_split(roots, test_size=0.1) ##此处未考虑字符平衡划分
traindataset = PathDataset(trainP, alphabetChinese)
testdataset = PathDataset(testP, alphabetChinese)
batchSize = 32
workers = 1
imgH = 32
imgW = 280
keep_ratio = True
cuda = True
ngpu = 1
nh =256
sampler = randomSequentialSampler(traindataset, batchSize)
train_loader = torch.utils.data.DataLoader(
traindataset, batch_size=batchSize,
shuffle=False, sampler=None,
num_workers=int(workers),
collate_fn=alignCollate(imgH=imgH, imgW=imgW, keep_ratio=keep_ratio))
train_iter = iter(train_loader)
## 加载预训练模型权重
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
from crnn.models.crnn import CRNN
from config import ocrModel, LSTMFLAG, GPU
model = CRNN(32, 1, len(alphabetChinese) + 1, 256, 1, lstmFlag=LSTMFLAG)
model.apply(weights_init)
preWeightDict = torch.load(ocrModel, map_location=lambda storage, loc: storage) ##加入项目训练的权重
modelWeightDict = model.state_dict()
for k, v in preWeightDict.items():
name = k.replace('module.', '') # remove `module.`
if 'rnn.1.embedding' not in name: ##不加载最后一层权重
modelWeightDict[name] = v
model.load_state_dict(modelWeightDict)
##优化器
from crnn.util import strLabelConverter
lr = 0.1
optimizer = optim.Adadelta(model.parameters(), lr=lr)
converter = strLabelConverter(''.join(alphabetChinese))
criterion = CTCLoss()
from train.ocr.dataset import resizeNormalize
from crnn.util import loadData
image = torch.FloatTensor(batchSize, 3, imgH, imgH)
text = torch.IntTensor(batchSize * 5)
length = torch.IntTensor(batchSize)
if torch.cuda.is_available():
model.cuda()
model = torch.nn.DataParallel(model, device_ids=[0])##转换为多GPU训练模型
image = image.cuda()
criterion = criterion.cuda()
def trainBatch(net, criterion, optimizer, cpu_images, cpu_texts):
# data = train_iter.next()
# cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
loadData(text, t)
loadData(length, l)
preds = net(image)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
net.zero_grad()
cost.backward()
optimizer.step()
return cost
def predict(im):
"""
预测
"""
image = im.convert('L')
scale = image.size[1] * 1.0 / 32
w = image.size[0] / scale
w = int(w)
transformer = resizeNormalize((w, 32))
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
return sim_pred
def val(net, dataset, max_iter=100):
for p in net.parameters():
p.requires_grad = False
net.eval()
i = 0
n_correct = 0
N = len(dataset)
max_iter = min(max_iter, N)
for i in range(max_iter):
im, label = dataset[np.random.randint(0, N)]
if im.size[0] > 1024:
continue
pred = predict(im)
if pred.strip() == label:
n_correct += 1
accuracy = n_correct / float(max_iter)
return accuracy
## 模型训练
## 冻结预训练模型层参数
nepochs = 10
acc = 0
interval = len(train_loader) // 2 ##评估模型
for i in range(nepochs):
print('epoch:{}/{}'.format(i, nepochs))
n = len(train_loader)
pbar = Progbar(target=n)
train_iter = iter(train_loader)
loss = 0
for j in range(n):
for p in model.named_parameters():
p[1].requires_grad = True
if 'rnn.1.embedding' in p[0]:
p[1].requires_grad = True
else:
p[1].requires_grad = False ##冻结模型层
model.train()
cpu_images, cpu_texts = train_iter.next()
cost = trainBatch(model, criterion, optimizer, cpu_images, cpu_texts)
loss += cost.data.numpy()
if (j + 1) % interval == 0:
curAcc = val(model, testdataset, max_iter=1024)
if curAcc > acc:
acc = curAcc
torch.save(model.state_dict(), 'train/ocr/modellstm.pth')
pbar.update(j + 1, values=[('loss', loss / ((j + 1) * batchSize)), ('acc', acc)])
## 释放模型层参数
nepochs = 10
# acc = 0
interval = len(train_loader) // 2 ##评估模型
for i in range(10, 10 + nepochs):
print('epoch:{}/{}'.format(i, nepochs))
n = len(train_loader)
pbar = Progbar(target=n)
train_iter = iter(train_loader)
loss = 0
for j in range(n):
for p in model.named_parameters():
p[1].requires_grad = True
model.train()
cpu_images, cpu_texts = train_iter.next()
cost = trainBatch(model, criterion, optimizer, cpu_images, cpu_texts)
loss += cost.data.numpy()
if (j + 1) % interval == 0:
curAcc = val(model, testdataset, max_iter=1024)
if curAcc > acc:
acc = curAcc
torch.save(model.state_dict(), 'train/ocr/modellstm.pth')
pbar.update(j + 1, values=[('loss', loss / ((j + 1) * batchSize)), ('acc', acc)])
## 预测demo
model.eval()
N = len(testdataset)
im,label = testdataset[np.random.randint(0,N)]
pred = predict(im)
print('true:{},pred:{}'.format(label,pred))
im
|
import delfi.distribution as dd
import delfi.generator as dg
import delfi.inference as infer
import delfi.utils.io as io
import delfi.summarystats as ds
#from delfi.kernel.Kernel_learning import kernel_opt
import numpy as np
import os
import scipy.misc
import sys
def run_smc(model, prior, summary, obs_stats,
seed=None,n_particles=1e3,eps_init=2,maxsim=5e7,
fn=None, AW=False, OW=False):
"""Runs Sequential Monte Carlo ABC algorithm.
Adapted from epsilonfree code https://raw.githubusercontent.com/gpapamak/epsilon_free_inference/8c237acdb2749f3a340919bf40014e0922821b86/demos/mg1_queue_demo/mg1_abc.py
Parameters
----------
model :
Model
prior :
Prior
summary :
Function to compute summary statistics
obs_stats:
Observed summary statistics
n_params :
Number of parameters
seed : int or None
If set, randomness in sampling is disabled
n_particles : int
Number of particles for SMC-ABC
eps_init : Float
Initial tolerance for SMC-ABC
maxsim : int
Maximum number of simulations for SMC-ABC
"""
set_folders()
n_params, n_stats = get_in_out_dims(model, prior, summary)
prefix = str(n_params)+'params'
n_particles = int(n_particles)
maxsim = int(maxsim)
#####################
np.random.seed(seed)
# set parameters
ess_min = 0.0
eps_lvls, eps_last = set_eps_lvls(eps_init)
all_ps, all_xs, all_logweights, all_eps, all_nsims = [], [], [], [], []
# sample initial population
eps = eps_lvls[0]
ps, xs, logweights, nsims = sample_lvl_init(model, prior, summary,
calc_dist, obs_stats, n_particles,
n_params, n_stats, eps)
remsims = maxsim - nsims
weights = np.exp(logweights)
all_ps.append(ps)
all_xs.append(xs)
all_logweights.append(logweights)
all_eps.append(eps)
all_nsims.append(nsims)
iter = 0
print('iteration = {0}, eps = {1:.2}, ess = {2:.2%}, nsims = {3}'.format(iter, float(eps), 1.0, nsims))
while eps > eps_last:
# calculate kernel bandwidth
bw_x = set_kernel_bandwidths(data=xs, weights=weights,
obs_stats=obs_stats, rule='of_thumb') if AW else None
iter += 1
eps = eps_lvls[iter]
ps, xs, logweights, nsims, break_flag, bw_th = sample_lvl(model, prior, summary,
calc_dist, obs_stats, ps, xs, logweights,
eps, remsims, bw_x, OW)
weights = np.exp(logweights)
remsims -= nsims
if break_flag:
break
# calculate effective sample size
ess = 1.0 / (np.sum(weights ** 2) * n_particles)
print('iteration = {0}, eps = {1:.2}, ess = {2:.2%}, nsims = {3}'.format(iter, float(eps), ess, nsims))
if ess < ess_min:
ps, xs, logweights = resample(ps, xs, weights)
weights = np.exp(logweights)
all_ps.append(ps)
all_xs.append(xs)
all_logweights.append(logweights)
all_eps.append(eps_lvls[iter])
all_nsims.append(nsims)
if not fn is None:
np.save(fn,
{'seed' : seed,
'all_ps' : all_ps,
'all_logweights' : all_logweights,
'all_eps' : all_eps,
'all_nsims' : all_nsims,
'model' : model,
'prior' : prior,
'summary' : summary,
'obs_stats' : obs_stats,
'n_particles' : n_particles,
'maxsim' : maxsim,
'eps_init' : eps_init})
return all_ps, all_xs, all_logweights, all_eps, all_nsims
def calc_dist(stats_1, stats_2):
"""Euclidian distance between summary statistics"""
return np.sqrt(np.sum((stats_1 - stats_2) ** 2))
def set_folders():
# check for subfolders, create if they don't exist
dirs = {}
dirs['dir_abc'] = './results/abc/'
for k, v in dirs.items():
if not os.path.exists(v):
os.makedirs(v)
def get_in_out_dims(model, prior, summary):
p = prior.gen(n_samples=1)[0]
x = summary.calc([ model.gen_single(p) ])
return p.size, x.size
def set_eps_lvls(eps_init, eps_last=0.01, eps_decay=0.9):
if isinstance(eps_init, list):
eps_last = eps_init[-1]
else:
eps, eps_init = eps_init, []
while eps > eps_last:
eps *= eps_decay
eps_init.append(eps)
return eps_init, eps_last
def sample_lvl_init(model, prior, summary, calc_dist, obs_stats,
n_particles, n_params, n_stats, eps):
ps, xs = np.empty([n_particles, n_params]), np.empty([n_particles, n_stats])
logweights = np.zeros(n_particles)- np.log(n_particles)
nsims = 0
for i in range(n_particles):
dist = float('inf')
while dist > eps:
ps[i] = prior.gen(n_samples=1)[0]
states = model.gen_single(ps[i])
xs[i] = summary.calc([states])
dist = calc_dist(xs[i], obs_stats)
nsims += 1
return ps, xs, logweights, nsims
def sample_lvl(model, prior, summary, calc_dist, obs_stats,
ps, xs, logweights, eps, remsims, bw_x, OW):
n_particles, n_params = ps.shape
# perturb particles
new_ps, new_xs = np.empty_like(ps), np.empty_like(xs)
new_logweights = np.empty_like(logweights)
if not bw_x is None:
# adapt log-weights with kernels ( w -> v in Bonassi & West !)
print('using AW')
logweights = logweights - 0.5 * np.sum(np.linalg.solve(bw_x, (obs_stats - xs).T) ** 2, axis=0)
logweights = logweights - scipy.misc.logsumexp(logweights)
weights = np.exp(logweights)
bw_th = set_kernel_bandwidths(data=ps, weights=weights,
obs_stats=obs_stats, rule='of_thumb')
nsims, break_flag = 0, False
for i in range(n_particles):
dist = float('inf')
while dist > eps:
idx = discrete_sample(weights)[0]
new_ps[i] = ps[idx] + np.dot(bw_th, np.random.randn(n_params))
if isinstance(prior, dd.Uniform):
while np.any(new_ps[i] < prior.lower) or \
np.any(new_ps[i] > prior.upper):
new_ps[i] = ps[idx] + np.dot(bw_th, np.random.randn(n_params))
states = model.gen_single(new_ps[i])
new_xs[i] = summary.calc([states])
dist = calc_dist(new_xs[i], obs_stats)
nsims += 1
if nsims>=remsims:
print('Maximum number of simulations reached.')
break_flag = True
break
# k_{th,t}(theta^(t)_j | theta^{t-1}_j)
logkernel = -0.5 * np.sum(np.linalg.solve(bw_th, (new_ps[i] - ps).T) ** 2, axis=0)
new_logweights[i] = prior.eval(new_ps[i, np.newaxis], log=True)[0] - scipy.misc.logsumexp(logweights + logkernel)
if break_flag:
break
new_logweights = new_logweights - scipy.misc.logsumexp(new_logweights)
return new_ps, new_xs, new_logweights, nsims, break_flag, bw_th
def resample(ps, xs, weights):
# resample particles
new_ps, new_xs = np.empty_like(ps), np.empty_like(xs)
for i in range(new_ps.shape[0]):
idx = discrete_sample(weights)[0]
new_ps[i] = ps[idx]
new_xs[i] = xs[idx]
logweights = np.zeros_like(weights) - np.log(weights.size)
return new_ps, new_xs, logweights
def set_kernel_bandwidths(data, weights, obs_stats, rule='of_thumb',
data2=None, dist=None, eps=None):
n_particles = weights.size
assert n_particles == data.shape[0]
d = 2 * data.shape[1] # assumes #parameters = #summary stats !
if rule == 'of_thumb':
# see West (1993) and Scott & Sain (2005)
cov = np.diag(np.diag(np.atleast_2d(np.cov(data.T, aweights=weights))))
std = np.linalg.cholesky(cov)
return std * n_particles**( - 1./(d+4.))
if rule == 'norm_comp_opt':
# optimal component-wise Gaussian kernels
# see Filippi et al. (2012)
idx0 = np.where(dist(data2, obs_stats) < eps)[0]
w = weights[idx0] / weights[idx0].sum()
wd = np.sqrt(w).dot(data[idx0,:].reshape(idx0.size,1,-1) - data.reshape(1,n_particles,-1))
sig2s = w.dot(weights.dot(()**2))
return np.diag(np.sqrt(sig2s))
if rule == 'norm_full_opt':
# optimal full Gaussian kernels
# see Filippi et al. (2012)
idx0 = np.where(dist(data2, obs_stats) < eps)[0]
w = weights[idx0] / weights[idx0].sum()
sii = np.einsum('ij,ik,i->jk', data[idx0], data[idx0], w)
sjj = np.einsum('ij,ik,i->jk', data, data, weights)
sij = np.einsum('ij,kl,i,k->jl', data, data[idx0], weights, w)
return np.linalg.cholesky(sii + sjj - sij - sij.T)
if rule == 'norm_OLCM':
# optimal local covariance matrix kernels
# see Filippi et al. (2012)
#idx0 = np.where(dist(data2, obs_stats) < eps)[0]
#w = weights[idx0] / weights[idx0].sum()
#bw = np.empty((n_particles, d, d))
#for i in range(n_particles):
# tmp = data[idx0] - data[i]
# bw[i] = np.einsum('ij,ik,i->jk', tmp, tmp, w)
# return bw # bw.shape = (n_particles, d, d) !
raise NotImplementedError
elif rule == 'None' or rule is None:
return None
elif rule == 'x_kl':
raise NotImplementedError
"""
print('fitting kernel')
cbkrnl, cbl = kernel_opt(
iws=weights.astype(np.float32),
stats=data.astype(np.float32),
obs=obs_stats.astype(np.float32),
kernel_loss='x_kl',
epochs=1000,
minibatch=n_particles//10,
stop_on_nan=True,
seed=99,
monitor=None)
cbkrnl.B += 1e-10 * np.ones_like(cbkrnl.B)
#print('cbkrnl.B', cbkrnl.B)
return np.linalg.inv(np.diag(cbkrnl.B))
"""
else:
raise NotImplementedError
def discrete_sample(p, n_samples=1):
"""
Samples from a discrete distribution.
:param p: a distribution with N elements
:param n_samples: number of samples
:return: vector of samples
"""
# check distribution
#assert isdistribution(p), 'Probabilities must be non-negative and sum to one.'
# cumulative distribution
c = np.cumsum(p[:-1])[np.newaxis, :]
# get the samples
r = np.random.rand(n_samples, 1)
return np.sum((r > c).astype(int), axis=1)
|
__all__ = ['TUPLE', 'LIST', 'DICT']
from .base import Head, heads_precedence
def init_module(m):
from .base import heads
for n,h in heads.iterNameValue(): setattr(m, n, h)
class ContainerHead(Head):
def data_to_str_and_precedence(self, cls, seq):
c_p = heads_precedence.COMMA
s_p = getattr(heads_precedence, repr(self))
l = []
for item in seq:
i, i_p = item.head.data_to_str_and_precedence(cls, item.data)
if i_p < c_p: i = '('+i+')'
l.append(i)
s1, s2 = self.parenthesis
if self is TUPLE and len(l)==1:
return s1 + l[0] + ',' + s2, s_p
return s1 + ', '.join(l) + s2, s_p
class TupleHead(ContainerHead):
"""
TupleHead represents n-tuple,
data is n-tuple of expressions.
"""
parenthesis = '()'
def __repr__(self): return 'TUPLE'
class ListHead(ContainerHead):
"""
ListHead represents n-list,
data is n-tuple of expressions.
"""
parenthesis = '[]'
def __repr__(self): return 'LIST'
class DictHead(ContainerHead):
"""
DictHead represents n-dict,
data is n-tuple of expression pairs.
"""
def data_to_str_and_precedence(self, cls, seq2):
c_p = heads_precedence.COMMA
colon_p = heads_precedence.COLON
s_p = getattr(heads_precedence, repr(self))
l = []
for key, value in seq2:
k, k_p = key.head.data_to_str_and_precedence(cls, key.data)
if k_p < colon_p: k = '('+k+')'
v, v_p = value.head.data_to_str_and_precedence(cls, value.data)
if v_p < colon_p: v = '('+v+')'
l.append(k + ':' + v)
return '{' + ', '.join(l) + '}', heads_precedence.DICT
def __repr__(self): return 'DICT'
TUPLE = TupleHead()
LIST = ListHead()
DICT = DictHead()
|
#
# -*- coding: utf-8 -*-
#
# This file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright © 2007–14 martin f. krafft <madduck@madduck.net>
# Released under the terms of the Artistic Licence 2.0
#
RECLASS_NAME = 'reclass'
DESCRIPTION = 'merge data by recursive descent down an ancestry hierarchy'
VERSION = '1.4.1'
AUTHOR = 'martin f. krafft'
AUTHOR_EMAIL = 'reclass@pobox.madduck.net'
MAINTAINER = 'Jason Ritzke (@Rtzq0)'
MAINTAINER_EMAIL = 'jasonritzke@4loopz.com'
COPYRIGHT = 'Copyright © 2007–14 ' + AUTHOR
LICENCE = 'Artistic Licence 2.0'
URL = 'https://github.com/madduck/reclass'
|
import json
from channels.generic.websocket import WebsocketConsumer
from .Utils.general import is_valid_input_payload, str_bytes_to_pandas_df
from types import SimpleNamespace
from .MachineLearning.MachineLearning import HyperparameterTuning
class ComputationalNode(WebsocketConsumer):
def connect(self):
# 1. Verify where the request is coming from, and save the node as orchestrator
# Note that you can use the self.scope attribute to retrieve important data
orchestrator_ip = self.scope['client'][0]
print("Accepting incoming connection from IP", orchestrator_ip)
# 2. Check if the orchestrator IP is available in the verified DB of orchestrator IPs
#pseudocode -> if orchestratorIP in db.query(verified_orchestrator_IPs) then conferm else refuse
# 3. Accept the connection
self.accept()
# 4. Send back notification of acceptance of the connection
self.send(json.dumps({
"status": 200
}))
def receive(self, text_data=None, bytes_data=None):
# Analyze the payload here, check that all of them comply with the predefined format
decoded_bytes_data = bytes_data.decode('utf-8')
#Change the structure of the below
payload_dict = is_valid_input_payload(decoded_bytes_data)
print('Data Received just now.')
# Decodes all the inputs and gets them back to being pandas dataframes
data_dict = {k: str_bytes_to_pandas_df(v) for k, v in payload_dict.get('data').items()}
# Saves the dict in the object Data
Data = SimpleNamespace(**data_dict)
algorithm_name = payload_dict["instructions"].get("algorithm_name_str")
search_name = payload_dict["instructions"].get("search_name_str")
param_grid = payload_dict["instructions"].get("param_grid")
tuning_model = HyperparameterTuning(X= Data.X,
y= Data.y,
algorithm_str= algorithm_name,
search_str= search_name,
hyperparams_grid= param_grid)
res_dict = tuning_model.run_search()
print(f'Res dict is {res_dict}')
# Sends back the best model we have obtained through the procedure
self.send(json.dumps(
res_dict
))
|
'''
author: Wentao Hu(stevenhwt@gmail.com)
to do some addtional experiments
'''
import os
import time
random_seed=2
#experiment setting
mode="test"
#hyperparameter range
dim_range=[10]
dimension=dim_range[0]
lr_range=[0.0002] #initial learning rate
reg_range=[0.01]
#privacy setting
# #for dpmf method
strategy="min" # --strategy {strategy}
# create folder to store log and results
data=f"ml-1m"
datapath=f"Data/{data}"
for method in ["sampling"]:
dir1=f'log-{data}/{method}-dpmf/seed{random_seed}'
if not os.path.exists(dir1):
os.makedirs(dir1)
dir2=f'results-{data}/{method}-dpmf/seed{random_seed}'
if not os.path.exists(dir2):
os.makedirs(dir2)
#write command
str1=f"""
#/bin/bash
#BSUB -J {method}
#BSUB -e ./log-{data}/log/%J.err
#BSUB -o ./log-{data}/log/%J.out
#BSUB -n 1
#BSUB -q volta
#BSUB -gpu "num=1:mode=exclusive_process"
"""
uc_range=[0.1]
if method=="hdp" or method=="sampling":
for lr in lr_range:
for dim in dim_range:
for reg in reg_range:
for uc in uc_range:
# f_um=0.37
# f_ul=1-uc-f_um
# user_ratio=f"{uc} {f_um} {f_ul}" #change epsilon to f when using user ratio
user_privacy=f"{uc} 0.5 1"
logfile=f"{dir1}/epsilon_uc{uc}_{method}_{mode}_dim={dim}_lr={lr}_reg={reg}_seed{random_seed}.log"
filename=f"{dir2}/epsilon_uc{uc}_{method}_{mode}_dim={dim}_lr={lr}_reg={reg}_seed{random_seed}.csv"
str2=f""" python mf_{method}_decentralized.py --strategy {strategy} --data "{datapath}" --user_privacy "{user_privacy}" --mode "{mode}" --lr {lr} --embedding_dim {dim} --regularization {reg} --filename "{filename}" --logfile "{logfile}" """
with open(f'run_{method}_decentralized.sh','w') as f:
f.write(str1+str2)
#run .sh file
cmd = f'bsub < run_{method}_decentralized.sh'
os.system(cmd)
time.sleep(2)
if method=="nonprivate":
for lr in lr_range:
for dim in dim_range:
for reg in reg_range:
logfile=f"{dir1}/nonprivate_{mode}_dim={dim}_lr={lr}_reg={reg}_seed{random_seed}.log"
filename=f"{dir2}/nonprivate_{mode}_dim={dim}_lr={lr}_reg={reg}_seed{random_seed}.csv"
str2=f""" python mf_nonprivate.py --data "{datapath}" --mode "{mode}" --lr {lr} --embedding_dim {dim} --regularization {reg} --filename "{filename}" --logfile "{logfile}" """
with open(f'run_nonprivate.sh','w') as f:
f.write(str1+str2)
#run .sh file
cmd = f'bsub < run_nonprivate.sh'
os.system(cmd)
time.sleep(2)
|
import core.mcts_widening as mctswidening
import core.mcts as mctsnotwidening
class Trainer():
"""
Trainer class. Used for a given environment to perform training and validation steps.
"""
def __init__(self, environment, policy, replay_buffer, curriculum_scheduler, mcts_train_params,
mcts_test_params, num_validation_episodes, num_episodes_per_task, batch_size, num_updates_per_episode,
verbose=True, autograd_from=-1, autograd_to=100, widening=False):
self.env = environment
self.policy = policy
self.buffer = replay_buffer
self.curriculum_scheduler = curriculum_scheduler
self.mcts_train_params = mcts_train_params
self.mcts_test_params = mcts_test_params
self.num_validation_episodes = num_validation_episodes
self.num_episodes_per_task = num_episodes_per_task
self.batch_size = batch_size
self.num_updates_per_episode = num_updates_per_episode
self.verbose = verbose
self.autograd_from = autograd_from
self.autograd_to = autograd_to
self.widening = widening
# Generate an empty list for each program
self.iterations_for_each_program = {}
for p in self.env.programs_library:
self.iterations_for_each_program[p] = 0
def perform_validation_step(self, task_index):
"""
Perform validation steps for the task from index task_index.
Args:
task_index: task index
Returns:
(rewards, traces lengths)
"""
validation_rewards = []
traces_lengths = []
for _ in range(self.num_validation_episodes):
# Start new episode
if self.widening:
mcts = mctswidening.MCTS(self.policy, self.env, task_index, **self.mcts_test_params)
else:
mcts = mctsnotwidening.MCTS(self.policy, self.env, task_index, **self.mcts_test_params)
# Sample an execution trace with mcts using policy as a prior
trace = mcts.sample_execution_trace()
task_reward, trace_length, progs_failed_indices = trace[7], len(trace[3]), trace[10]
validation_rewards.append(task_reward)
traces_lengths.append(trace_length)
return validation_rewards, traces_lengths, progs_failed_indices
def play_iteration(self, task_index, verbose=False, current_iteration=0):
"""
Play one training iteration, i.e. select a task, play episodes, store experience in buffer and sample batches
to perform gradient descent on policy weights.
"""
# Keep all the losses
actor_losses = 0
critic_losses = 0
arguments_losses = 0
# Compute the total nodes
total_nodes = {}
total_nodes_selected = []
# Get new task to attempt
task_name = self.env.get_program_from_index(task_index)
if self.verbose:
print('Attempt task {} for {} episodes'.format(task_name, self.num_episodes_per_task))
# Increment the counter for this program
self.iterations_for_each_program[task_name] += 1
check_autograd = False
if self.autograd_from >= 0:
if self.autograd_from <= current_iteration <= self.autograd_from+self.autograd_to:
check_autograd = True
# Start training on the task
for episode in range(self.num_episodes_per_task):
# Start new episode
if self.widening:
mcts = mctswidening.MCTS(self.policy, self.env, task_index, **self.mcts_train_params)
else:
mcts = mctsnotwidening.MCTS(self.policy, self.env, task_index, **self.mcts_train_params)
# Sample an execution trace with mcts using policy as a prior
res = mcts.sample_execution_trace()
observations, prog_indices, previous_actions_indices, policy_labels, lstm_states, _, _, \
task_reward, clean_sub_execution, rewards, programs_failed_indices, \
programs_failed_initstates, programs_failed_states_indices, program_args, \
total_nodes_expanded, total_nodes_selected_episode = res
total_nodes_selected.append(total_nodes_selected_episode)
total_nodes = self.merge_counts(total_nodes, total_nodes_expanded)
# record trace and store it in buffer only if no problem in sub-programs execution
if clean_sub_execution:
# Generates trace
trace = list(zip(observations, prog_indices, lstm_states, policy_labels, rewards, program_args))
# Append trace to buffer
self.buffer.append_trace(trace)
else:
if self.verbose:
print("Trace has not been stored in buffer.")
# Decrease statistics of programs that failed
#for idx in programs_failed_indices:
#self.curriculum_scheduler.update_statistics(idx, torch.FloatTensor([0.0]))
# Train policy on batch
if self.buffer.get_memory_length() > self.batch_size:
for _ in range(self.num_updates_per_episode):
batch = self.buffer.sample_batch(self.batch_size)
if batch is not None:
actor_loss, critic_loss, arg_loss, _ = self.policy.train_on_batch(batch, check_autograd)
actor_losses += actor_loss
critic_losses += critic_loss
arguments_losses += arg_loss
if self.verbose:
print("Done episode {}/{}".format(episode + 1, self.num_episodes_per_task))
# Sum up all the exploration results
for k in total_nodes.keys():
total_nodes[k] = sum(total_nodes[k])
return actor_losses/self.num_episodes_per_task, critic_losses/self.num_episodes_per_task,\
arguments_losses/self.num_episodes_per_task, total_nodes, total_nodes_selected
def perform_validation(self):
"""
Perform validation for all the tasks and update curriculum scheduelr statistics.
"""
if self.verbose:
print("Start validation .....")
for idx in self.curriculum_scheduler.get_tasks_of_maximum_level():
# Evaluate performance on task idx
v_rewards, v_lengths, programs_failed_indices = self.perform_validation_step(idx)
# Update curriculum statistics
self.curriculum_scheduler.update_statistics(idx, v_rewards)
# Decrease statistics of programs that failed
#for idx_ in programs_failed_indices:
#self.curriculum_scheduler.update_statistics(idx_, torch.FloatTensor([0.0]))
def merge_counts(self, a, b):
tmp = a.copy()
for k in b.keys():
if k in tmp:
tmp[k] += b[k].copy()
else:
tmp[k] = b[k].copy()
return tmp
|
# Generated by Django 3.1.12 on 2021-08-05 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("polio", "0020_fix_statuses"),
]
operations = [
migrations.RenameField(
model_name="campaign",
old_name="vials_requested",
new_name="doses_requested",
),
]
|
import tensorflow as tf
class VggBlockWithBN(tf.keras.layers.Layer):
def __init__(self, layers, filters, kernel_size, name, stride=1):
super(VggBlockWithBN, self).__init__()
self.kernel_size = kernel_size
self.filters = filters
self.stride = stride
self.layers = layers
self.layer_name = name
self.conv_layers = [tf.keras.layers.Conv2D(self.filters, self.kernel_size, strides=self.stride, padding="SAME",
kernel_initializer='he_normal', name=self.layer_name + "_" + str(i))
for i in range(self.layers)]
self.bn_layers = [tf.keras.layers.BatchNormalization(name=self.layer_name + "_" + str(i)) for i in range(self.layers)]
def call(self, inputs, training):
x = inputs
for i in range(self.layers):
x = self.conv_layers[i](x)
x = self.bn_layers[i](x, training=training)
x = tf.nn.relu(x)
return x
|
import functools
def try_except_missing_data_decorator_factory( job_name ):
def try_except_missing_data_decorator( func ):
@functools.wraps( func )
def try_except_missing_data_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ValueError:
print(f'missing {job_name}, skipping')
return try_except_missing_data_wrapper
return try_except_missing_data_decorator
|
from httpx import Response
import respx
mock_api = respx.mock(
assert_all_mocked=False, assert_all_called=False, base_url="http://api.vwa.com"
)
valid_collection = {
"info": {
"name": "invalid collection",
"_postman_id": "my-collection-id",
"schema": "https://schema.getpostman.com/#2.0.0",
"version": {
"major": "2",
"minor": "0",
"patch": "0",
"prerelease": "draft.1",
},
},
"variable": [
{"id": "var-1", "type": "string", "value": "hello-world"},
],
"item": [
{
"id": "request-200",
"description": {
"content": "<h1>This is H1</h1><script>test toString()</script>",
"version": "2.0.1-abc+efg",
},
"name": "200 ok",
"request": "http://echo.getpostman.com/status/200",
}
],
}
mock_api.get("http://example.postman.com/schema").mock(
Response(200, headers={"content-type": "application/json"}, json=valid_collection)
)
|
import tensorflow as tf
import tensorflow.keras.layers as layers
from .transformer import VALID_CHARS
class InputLayer(layers.Layer):
def __init__(self, num_class, **kwargs):
super(InputLayer, self).__init__(**kwargs)
self.num_class = num_class
self.reshape_layer = layers.Reshape((-1, 3, num_class))
def call(self, x, **kwargs):
x = tf.cast(x, dtype=tf.int32)
x = tf.one_hot(x, self.num_class)
x = self.reshape_layer(x)
return x
def get_config(self):
config = super(InputLayer, self).get_config()
config.update({'num_class': self.num_class})
return config
class ClassifierLayer(layers.Layer):
def __init__(self, num_class, **kwargs):
super(ClassifierLayer, self).__init__(**kwargs)
self.num_class = num_class
self.conv1_layers = [
layers.Conv2D(kernel_size=2, strides=1, filters=32, padding='same', activation='relu'),
layers.Conv2D(kernel_size=2, strides=1, filters=64, padding='same', activation='relu'),
layers.Conv2D(kernel_size=2, strides=1, filters=128, padding='same', activation='relu'),
layers.GlobalMaxPool2D()
]
self.conv2_layers = [
layers.Conv2D(kernel_size=2, strides=2, filters=32, padding='same', activation='relu'),
layers.Conv2D(kernel_size=3, strides=2, filters=64, padding='same', activation='relu'),
layers.Conv2D(kernel_size=4, strides=2, filters=128, padding='same', activation='relu'),
layers.GlobalMaxPool2D()
]
self.conv3_layers = [
layers.Conv2D(kernel_size=4, strides=1, filters=32, padding='same', activation='relu'),
layers.Conv2D(kernel_size=5, strides=1, filters=64, padding='same', activation='relu'),
layers.Conv2D(kernel_size=6, strides=1, filters=128, padding='same', activation='relu'),
layers.GlobalMaxPool2D()
]
self.concat_layer = layers.Concatenate(axis=-1)
self.dense_layer = layers.Dense(256, activation='relu')
self.output_layer = layers.Dense(num_class, activation='softmax')
def call(self, x, **kwargs):
h1 = self._call_sequential(x, self.conv1_layers)
h2 = self._call_sequential(x, self.conv2_layers)
h3 = self._call_sequential(x, self.conv3_layers)
x = self.concat_layer([h1, h2, h3])
x = self.dense_layer(x)
x = self.output_layer(x)
return x
def _call_sequential(self, x, layers):
for layer in layers:
x = layer(x)
return x
def get_config(self):
config = super(ClassifierLayer, self).get_config()
config.update({'num_class': self.num_class})
return config
def create_training_model(input_shape, num_class, **kwargs):
x = inputs = layers.Input(shape=input_shape, dtype=tf.int32)
x = InputLayer(len(VALID_CHARS))(x)
x = outputs = ClassifierLayer(num_class)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs, **kwargs)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def create_ensemble_model(input_shape, num_class, folds, **kwargs):
x = inputs = layers.Input(shape=input_shape, dtype=tf.int32, name='feature')
x = InputLayer(len(VALID_CHARS))(x)
classifier_layers = [ClassifierLayer(num_class, name=fold.name) for fold in folds]
x = [layer(x) for layer in classifier_layers]
x = outputs = layers.Average(name='predicted')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs, **kwargs)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
for layer, fold in zip(classifier_layers, folds):
layer.set_weights(fold.get_weights())
return model
|
#!/usr/bin/python
# Copyright 2008 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Tests different aspects of Boost Builds automated testing support.
import BoostBuild
################################################################################
#
# test_files_with_spaces_in_their_name()
# --------------------------------------
#
################################################################################
def test_files_with_spaces_in_their_name():
"""Regression test making sure test result files get created correctly when
testing files with spaces in their name.
"""
t = BoostBuild.Tester(use_test_config=False)
t.write("valid source.cpp", "int main() {}\n");
t.write("invalid source.cpp", "this is not valid source code");
t.write("jamroot.jam", """
import testing ;
testing.compile "valid source.cpp" ;
testing.compile-fail "invalid source.cpp" ;
""")
t.run_build_system(status=0)
t.expect_addition("bin/invalid source.test/$toolset/debug*/invalid source.obj")
t.expect_addition("bin/invalid source.test/$toolset/debug*/invalid source.test")
t.expect_addition("bin/valid source.test/$toolset/debug*/valid source.obj")
t.expect_addition("bin/valid source.test/$toolset/debug*/valid source.test")
t.expect_content("bin/valid source.test/$toolset/debug*/valid source.test", \
"passed" )
t.expect_content( \
"bin/invalid source.test/$toolset/debug*/invalid source.test", \
"passed" )
t.expect_content( \
"bin/invalid source.test/$toolset/debug*/invalid source.obj", \
"failed as expected" )
t.cleanup()
################################################################################
#
# main()
# ------
#
################################################################################
test_files_with_spaces_in_their_name()
|
#!/usr/bin/env python3
#=========================================================================
#
# Copyright (c) 2018 Karl T. Diedrich, PhD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================*/
import vtk
import numpy as np
from kanvas.canvas import Renderer, RenderWindow, Box, Actor
from kanvas.shapes import parabola3D, ArrowFactory
from kanvas.transform import Rotation, rotation, xRotation, yRotation, zRotation
'''Plot points in 3D
: author Karl Diedrich, PhD <ktdiedrich@gmail.com>
'''
class Extent:
'''Range information
'''
def __init__(self, minX0=0, maxX0=0, minY0=0, maxY0=0, minZ0=0, maxZ0=0):
'Set initial values of range'
self._minX = minX0
self._maxX = maxX0
self._minY = minY0
self._maxY = maxY0
self._minZ = minZ0
self._maxZ = maxZ0
def __str__(self):
return "minX={:.3}, maxX={:.3}, minY={:.3}, maxY={:.3}, minZ={:.3}, maxZ={:.3}".format(self._minX, self._maxX,
self._minY, self._maxY, self._minZ, self._maxZ)
@property
def minX(self):
return self._minX
@minX.setter
def minX(self, value):
if value < self._minX:
self._minX = value
@property
def maxX(self):
return self._maxX
@maxX.setter
def maxX(self, value):
if value > self._maxX:
self._maxX = value
@property
def minY(self):
return self._minY
@minY.setter
def minY(self, value):
if value < self._minY:
self._minY = value
@property
def maxY(self):
return self._maxY
@maxY.setter
def maxY(self, value):
if value > self._maxY:
self._maxY = value
@property
def minZ(self):
return self._minZ
@minZ.setter
def minZ(self, value):
if value < self._minZ:
self._minY = value
@property
def maxZ(self):
return self._maxZ
@maxZ.setter
def maxZ(self, value):
if value > self._maxZ:
self._maxZ = value
class PointData:
def __init__(self, maxNumPoints=1e6):
'''Points of data kept in class object separate of mapper and actor.
'''
self._maxNumPoints = maxNumPoints
self._vtkPolyData = vtk.vtkPolyData()
self.clearPoints()
self._extent = None
def addPoint(self, point):
if self._vtkPoints.GetNumberOfPoints() < self._maxNumPoints:
pointId = self._vtkPoints.InsertNextPoint(point[:])
self._vtkDepth.InsertNextValue(point[2])
self._vtkCells.InsertNextCell(1)
self._vtkCells.InsertCellPoint(pointId)
else:
r = np.random.randint(0, self._maxNumPoints)
self._vtkPoints.SetPoint(r, point[:])
self._vtkCells.Modified()
self._vtkPoints.Modified()
self._vtkDepth.Modified()
def clearPoints(self):
self._vtkPoints = vtk.vtkPoints()
self._vtkCells = vtk.vtkCellArray()
self._vtkDepth = vtk.vtkDoubleArray()
self._vtkDepth.SetName('DepthArray')
self._vtkPolyData.SetPoints(self._vtkPoints)
self._vtkPolyData.SetVerts(self._vtkCells)
self._vtkPolyData.GetPointData().SetScalars(self._vtkDepth)
self._vtkPolyData.GetPointData().SetActiveScalars('DepthArray')
@property
def vtkPolyData(self):
return self._vtkPolyData
@property
def extent(self):
return self._extent
@extent.setter
def extent(self, value):
self._extent = value
class VtkPointCloud:
def __init__(self, zMin=-10.0, zMax=10.0, maxNumPoints=1e6):
self._pointData = PointData(maxNumPoints=maxNumPoints)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self._pointData.vtkPolyData )
mapper.SetColorModeToDefault()
mapper.SetScalarRange(zMin, zMax)
mapper.SetScalarVisibility(1)
self._vtkActor = vtk.vtkActor()
self._vtkActor.SetMapper(mapper)
def addPoint(self, point):
self._pointData.addPoint(point)
def clearPoints(self):
self._pointData.clearPoints()
@property
def vtkActor(self):
return self._vtkActor
def load_data(filename):
pointCloud = VtkPointCloud()
data = np.genfromtxt(filename,dtype=float,skip_header=0,usecols=[0,1,2], delimiter=',')
for k in range(np.size(data,0)):
point = data[k] #20*(random.rand(3)-0.5)
pointCloud.addPoint(point)
return pointCloud
def makePointCloudActor(xBegin, xEnd, yBegin, yEnd, functZ, step=1.0, dtype=np.float64):
'''Generate range of points with the functZ function
'''
pointCloud = VtkPointCloud()
for x in np.arange(xBegin, xEnd, step):
for y in np.arange(yBegin, yEnd, step):
z = functZ(x, y)
point = np.array((x, y, z), dtype=dtype)
pointCloud.addPoint(point)
return pointCloud
def makePointData(xBegin, xEnd, yBegin, yEnd, functZ, step=1.0, dtype=np.float64, rotationMatrix=None):
pointData = PointData()
rotator = None
zBegin = functZ(xBegin, yBegin)
extent = Extent(minX0=xBegin, maxX0=xBegin, minY0=yBegin, maxY0=yBegin, minZ0=zBegin, maxZ0=zBegin)
if type(rotationMatrix) == np.ndarray:
rotator = Rotation(rotationMatrix)
for x in np.arange(xBegin, xEnd, step):
for y in np.arange(yBegin, yEnd, step):
z = functZ(x, y)
point = np.array((x, y, z), dtype=dtype)
extent.minX = x
extent.maxX = x
extent.minY = y
extent.maxY = y
extent.minZ = z
extent.maxZ = z
if rotator:
point = rotator.rotate(point)
pointData.addPoint(point)
pointData.extent = extent
return pointData
def makeSpherePoints(r, step=.1, dtype=np.float64):
'''
'''
from math import pi, sin, cos
pointData = PointData()
for s in np.arange(0, 2*pi, step):
for t in np.arange(0, pi, step):
x = r*cos(s)*sin(t)
y = r*sin(s)*sin(t)
z = r*cos(t)
point = np.array((x,y,z), dtype=dtype)
pointData.addPoint(point)
return pointData
def displayPointCloud(pointCloud):
'''Example
pointCloud = makePointCloudActor(xBegin=-radius, xEnd=radius, yBegin=-radius, yEnd=radius, functZ=parabola3D, step=step, dtype=np.float64)
displayPointCloud(pointCloud=pointCloud)
'''
renderer = vtk.vtkRenderer()
renderer.AddActor(pointCloud.vtkActor)
#renderer.SetBackground(.2, .3, .4)
renderer.SetBackground(0.0, 0.0, 0.0)
renderer.ResetCamera()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindow.Render()
renderWindow.SetWindowName("XYZ Data Viewer")
renderWindowInteractor.Start()
if __name__ == '__main__':
import sys
import argparse
parser = argparse.ArgumentParser(description='Plot 3D points')
parser.add_argument('--input', type=str, default="/home/ktdiedrich/Documents/dev/Crispersight/python/ktd/test1.csv",
help='input CSV file of x,y,z points in rows ')
args = parser.parse_args()
print("input {}".format(args.input))
# pointCloud=load_data(args.input)
radius = 4.0
step = 0.1
xDegree = -20.0
yDegree = -15.0
zDegree = -20.0
rotationMatrix = rotation(np.radians(xDegree), np.radians(yDegree), np.radians(zDegree))
window = RenderWindow(size=(1200,600))
parabolaData = makePointData(xBegin=-radius, xEnd=radius, yBegin=-radius, yEnd=radius,
functZ=parabola3D, step=step, dtype=np.float64, rotationMatrix=rotationMatrix)
print("Parabola: {}".format(parabolaData.extent))
parabolaRenderer = Renderer(background=(.1, .15, .1))
parabolaRenderer.addActorSource(parabolaData.vtkPolyData, position=(0,0,0))
arrowFactory = ArrowFactory()
xArrow = arrowFactory.makeArrow()
yArrow = arrowFactory.makeArrow()
zArrow = arrowFactory.makeArrow()
xArrowProp = vtk.vtkProperty()
xArrowProp.SetColor(.4, .1, .1)
ext = parabolaData.extent
scaleFactor = 10
zPos = (ext.maxZ+ext.minZ)/2.0
xArrowActor = Actor(source=xArrow, zMin=-10.0, zMax=10.0, actorProperty=xArrowProp,
position=(0-scaleFactor, 0, 0), box=None, scale=(scaleFactor, scaleFactor, scaleFactor) )
parabolaRenderer.addActor(xArrowActor)
window.addRenderer(parabolaRenderer, (0.0, 0.0, 0.5, 1.0) )
circleData = makeSpherePoints(r=radius, step=step, dtype=np.float64)
circleRenderer = Renderer(background=(.15, .1, .1))
circleRenderer.addActorSource(circleData.vtkPolyData)
window.addRenderer(circleRenderer, (0.5, 0.0, 1.0, 1.0))
window.renderInteractive()
|
import math
import torch.nn as nn
import torch
import torch.nn.functional as F
class HyperDecoder(nn.Module):
def __init__(self, input_dim, outputdim=None):
super(HyperDecoder, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, input_dim)
self.fc2 = nn.Linear(input_dim, input_dim*8)
if not outputdim:
self.fc3 = nn.Linear(input_dim*8, input_dim*32)
else:
self.fc3 = nn.Linear(input_dim * 8, outputdim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = torch.exp(self.fc3(x))
# x = F.relu(self.fc3(x))
return x
|
INITIATOR='INITIATOR'
PASSIVE='PASSIVE'
SATELITE='SATELITE'
PARTNER='PARTNER'
ORIGINATOR='ORIGINATOR'
ROLE='ROLE'
STATE='STATE'
MSGS="MESSAGES"
ASK_SWAP='ASK_SWAP'
WAIT_ASK_RESPONSE='WAIT_ASK_RESPONSE'
SEND_ACCEPT_SWAP='SEND_ACCEPT_SWAP'
SEND_TABLE='SEND_TABLE'
WAIT_LOCKS_UTABLE='WAIT_LOCKS_UTABLE'
PERFORM_SWAP='PERFORM_SWAP'
UPDATE_SATELITES='UPDATE_SATELITES'
WAIT_INIT_TABLE='WAIT_INIT_TABLE'
WAIT_FINAL_TABLE='WAIT_FINAL_TABLE'
WAIT_UPDATE='WAIT_UPDATE'
|
"""
Creating an Image Dataset from Local PNGs/JPGs
There are two ways two create a data of PNGs/JPGs depending on whether the images are
stored locally on your computer or at a publicly accessible URL. The code snippet
below shows you what to do if the images are on your computer.
"""
from indico import IndicoClient, IndicoConfig
from indico.queries import CreateDataset
import pandas as pd
# Create an Indico API client
my_config = IndicoConfig(
host="app.indico.io", api_token_path="./path/to/indico_api_token.txt"
)
client = IndicoClient(config=my_config)
# With local images you should create a CSV formatted (here for demonstration) like below
# Where one column contains the paths from the csv to where the images are stored on your computer
image_dataset = pd.DataFrame()
image_dataset["image_files"] = [
"./path/from/csv/to/image.png",
"./path/from/csv/to/image2.png",
]
image_dataset.to_csv("./image_dataset.csv", index=False)
# Use the CSV you created (like above) to create the dataset
dataset = client.call(
CreateDataset(
name="My Image Dataset",
files="./image_dataset.csv",
from_local_images=True,
image_filename_col="image_files", # specify the column containing the images
)
)
|
import web
from api import *
from errors import *
from models import Contact, Institution
logger = logging.getLogger(__name__)
class ContactController(object):
"""Handles contact queries"""
@json_response
def OPTIONS(self,name):
return
@json_response
@api_response
@check_token
def GET(self, name):
""" Get contacts."""
logger.debug("Query: %s" % (web.input()))
contact_uuid = web.input().get('contact_uuid')
if contact_uuid:
results = Contact.get_from_uuid(contact_uuid)
else:
results = Contact.get_all()
if not results:
raise Error(NORESULT)
data = results_to_contacts(results)
return data
@json_response
@api_response
@check_token
def POST(self, name):
"""Inserts new contact."""
data = json.loads(web.data())
institution_uuid = data.get('institution_uuid')
name = data.get('contact_name')
email_address = data.get('contact_email_address')
notes = data.get('contact_notes')
try:
assert institution_uuid and name
except AssertionError as error:
logger.debug(error)
raise Error(BADPARAMS)
try:
assert Institution.get_institution(institution_uuid)[0]
except:
raise Error(NOTFOUND,msg="The institution provided does not exist.")
contact_uuid = generate_uuid()
contact = Contact(contact_uuid,institution_uuid,name,email_address,notes)
contact.save()
return [contact.__dict__]
@json_response
@api_response
@check_token
def PUT(self, name):
"""Checks if entry exists using contact_uuid, then modifies it."""
data = json.loads(web.data())
institution_uuid = data.get('institution_uuid')
contact_uuid = data.get('contact_uuid')
name = data.get('contact_name')
email_address = data.get('contact_email_address')
notes = data.get('contact_notes')
try:
assert institution_uuid and contact_uuid and name
except AssertionError as error:
logger.debug(error)
raise Error(BADPARAMS)
try:
assert Contact.get_from_uuid(contact_uuid)[0]
except:
raise Error(NOTFOUND,msg="The contact uuid provided does not exist.")
contact = Contact(contact_uuid,institution_uuid,name,email_address,notes)
contact.update()
return [contact.__dict__]
@json_response
@api_response
@check_token
def DELETE(self, name):
"""Deletes contact using contact name."""
contact_uuid = web.input().get('contact_uuid')
try:
assert contact_uuid
except AssertionError as error:
logger.debug(error)
raise Error(BADPARAMS)
try:
result = Contact.get_from_uuid(contact_uuid)[0]
except:
raise Error(NOTFOUND,msg="The contact uuid provided does not exist.")
contact = Contact(result['contact_uuid'],result['institution_uuid'],
result['contact_name'],result['contact_email_address'],
result['contact_notes'])
contact.delete()
return [contact.__dict__]
|
from enum import Enum
class Routes(Enum):
HEALTH = ('/health', 'health')
GITHUB = ('/github', 'github')
REGISTER = ('/register', 'register')
STATUS = ('/status', 'status')
COMMENT = ('/comment', 'comment')
MISSING = ('/missing/{eventType}', 'missing')
DEREGISTER = ('/deregister', 'deregister')
CLEAR = ('/clear', 'clear')
DEPLOYMENT = ('/deployment', 'deployment')
DEPLOYMENT_STATUS = ('/deployment_status', 'deployment_status')
def __init__(self, route: str, route_name: str) -> None:
self.route: str = route
self.route_id: str = route_name
|
import logging
from concurrent import futures
import grpc
from ray import cloudpickle
import ray
import ray.state
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
import time
import inspect
import json
from ray.experimental.client import stash_api_for_tests, _set_server_api
from ray.experimental.client.common import convert_from_arg
from ray.experimental.client.common import encode_exception
from ray.experimental.client.common import ClientObjectRef
from ray.experimental.client.server.core_ray_api import RayServerAPI
logger = logging.getLogger(__name__)
class RayletServicer(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, test_mode=False):
self.object_refs = {}
self.function_refs = {}
self.actor_refs = {}
self.registered_actor_classes = {}
self._test_mode = test_mode
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
resp = ray_client_pb2.ClusterInfoResponse()
resp.type = request.type
if request.type == ray_client_pb2.ClusterInfoType.CLUSTER_RESOURCES:
resources = ray.cluster_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(
table=float_resources))
elif request.type == ray_client_pb2.ClusterInfoType.AVAILABLE_RESOURCES:
resources = ray.available_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(
table=float_resources))
else:
resp.json = self._return_debug_cluster_info(request, context)
return resp
def _return_debug_cluster_info(self, request, context=None) -> str:
data = None
if request.type == ray_client_pb2.ClusterInfoType.NODES:
data = ray.nodes()
elif request.type == ray_client_pb2.ClusterInfoType.IS_INITIALIZED:
data = ray.is_initialized()
else:
raise TypeError("Unsupported cluster info type")
return json.dumps(data)
def Terminate(self, request, context=None):
if request.WhichOneof("terminate_type") == "task_object":
try:
object_ref = cloudpickle.loads(request.task_object.handle)
ray.cancel(
object_ref,
force=request.task_object.force,
recursive=request.task_object.recursive)
except Exception as e:
return_exception_in_context(e, context)
elif request.WhichOneof("terminate_type") == "actor":
try:
actor_ref = cloudpickle.loads(request.actor.handle)
ray.kill(actor_ref, no_restart=request.actor.no_restart)
except Exception as e:
return_exception_in_context(e, context)
else:
raise RuntimeError(
"Client requested termination without providing a valid terminate_type"
)
return ray_client_pb2.TerminateResponse(ok=True)
def GetObject(self, request, context=None):
request_ref = cloudpickle.loads(request.handle)
if request_ref.binary() not in self.object_refs:
return ray_client_pb2.GetResponse(valid=False)
objectref = self.object_refs[request_ref.binary()]
logger.info("get: %s" % objectref)
try:
item = ray.get(objectref, timeout=request.timeout)
except Exception as e:
return_exception_in_context(e, context)
item_ser = cloudpickle.dumps(item)
return ray_client_pb2.GetResponse(valid=True, data=item_ser)
def PutObject(self, request, context=None) -> ray_client_pb2.PutResponse:
obj = cloudpickle.loads(request.data)
objectref = self._put_and_retain_obj(obj)
pickled_ref = cloudpickle.dumps(objectref)
return ray_client_pb2.PutResponse(
ref=make_remote_ref(objectref.binary(), pickled_ref))
def _put_and_retain_obj(self, obj) -> ray.ObjectRef:
objectref = ray.put(obj)
self.object_refs[objectref.binary()] = objectref
logger.info("put: %s" % objectref)
return objectref
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
object_refs = [cloudpickle.loads(o) for o in request.object_handles]
num_returns = request.num_returns
timeout = request.timeout
object_refs_ids = []
for object_ref in object_refs:
if object_ref.binary() not in self.object_refs:
return ray_client_pb2.WaitResponse(valid=False)
object_refs_ids.append(self.object_refs[object_ref.binary()])
try:
ready_object_refs, remaining_object_refs = ray.wait(
object_refs_ids,
num_returns=num_returns,
timeout=timeout if timeout != -1 else None)
except Exception:
# TODO(ameer): improve exception messages.
return ray_client_pb2.WaitResponse(valid=False)
logger.info("wait: %s %s" % (str(ready_object_refs),
str(remaining_object_refs)))
ready_object_ids = [
make_remote_ref(
id=ready_object_ref.binary(),
handle=cloudpickle.dumps(ready_object_ref),
) for ready_object_ref in ready_object_refs
]
remaining_object_ids = [
make_remote_ref(
id=remaining_object_ref.binary(),
handle=cloudpickle.dumps(remaining_object_ref),
) for remaining_object_ref in remaining_object_refs
]
return ray_client_pb2.WaitResponse(
valid=True,
ready_object_ids=ready_object_ids,
remaining_object_ids=remaining_object_ids)
def Schedule(self, task, context=None,
prepared_args=None) -> ray_client_pb2.ClientTaskTicket:
logger.info("schedule: %s %s" %
(task.name,
ray_client_pb2.ClientTask.RemoteExecType.Name(task.type)))
if task.type == ray_client_pb2.ClientTask.FUNCTION:
return self._schedule_function(task, context, prepared_args)
elif task.type == ray_client_pb2.ClientTask.ACTOR:
return self._schedule_actor(task, context, prepared_args)
elif task.type == ray_client_pb2.ClientTask.METHOD:
return self._schedule_method(task, context, prepared_args)
else:
raise NotImplementedError(
"Unimplemented Schedule task type: %s" %
ray_client_pb2.ClientTask.RemoteExecType.Name(task.type))
def _schedule_method(
self,
task: ray_client_pb2.ClientTask,
context=None,
prepared_args=None) -> ray_client_pb2.ClientTaskTicket:
actor_handle = self.actor_refs.get(task.payload_id)
if actor_handle is None:
raise Exception(
"Can't run an actor the server doesn't have a handle for")
arglist = _convert_args(task.args, prepared_args)
with stash_api_for_tests(self._test_mode):
output = getattr(actor_handle, task.name).remote(*arglist)
self.object_refs[output.binary()] = output
pickled_ref = cloudpickle.dumps(output)
return ray_client_pb2.ClientTaskTicket(
return_ref=make_remote_ref(output.binary(), pickled_ref))
def _schedule_actor(self,
task: ray_client_pb2.ClientTask,
context=None,
prepared_args=None) -> ray_client_pb2.ClientTaskTicket:
with stash_api_for_tests(self._test_mode):
payload_ref = cloudpickle.loads(task.payload_id)
if payload_ref.binary() not in self.registered_actor_classes:
actor_class_ref = self.object_refs[payload_ref.binary()]
actor_class = ray.get(actor_class_ref)
if not inspect.isclass(actor_class):
raise Exception("Attempting to schedule actor that "
"isn't a class.")
reg_class = ray.remote(actor_class)
self.registered_actor_classes[payload_ref.binary()] = reg_class
remote_class = self.registered_actor_classes[payload_ref.binary()]
arglist = _convert_args(task.args, prepared_args)
actor = remote_class.remote(*arglist)
actorhandle = cloudpickle.dumps(actor)
self.actor_refs[actorhandle] = actor
return ray_client_pb2.ClientTaskTicket(
return_ref=make_remote_ref(actor._actor_id.binary(), actorhandle))
def _schedule_function(
self,
task: ray_client_pb2.ClientTask,
context=None,
prepared_args=None) -> ray_client_pb2.ClientTaskTicket:
payload_ref = cloudpickle.loads(task.payload_id)
if payload_ref.binary() not in self.function_refs:
funcref = self.object_refs[payload_ref.binary()]
func = ray.get(funcref)
if not inspect.isfunction(func):
raise Exception("Attempting to schedule function that "
"isn't a function.")
self.function_refs[payload_ref.binary()] = ray.remote(func)
remote_func = self.function_refs[payload_ref.binary()]
arglist = _convert_args(task.args, prepared_args)
# Prepare call if we're in a test
with stash_api_for_tests(self._test_mode):
output = remote_func.remote(*arglist)
if output.binary() in self.object_refs:
raise Exception("already found it")
self.object_refs[output.binary()] = output
pickled_output = cloudpickle.dumps(output)
return ray_client_pb2.ClientTaskTicket(
return_ref=make_remote_ref(output.binary(), pickled_output))
def _convert_args(arg_list, prepared_args=None):
if prepared_args is not None:
return prepared_args
out = []
for arg in arg_list:
t = convert_from_arg(arg)
if isinstance(t, ClientObjectRef):
out.append(t._unpack_ref())
else:
out.append(t)
return out
def make_remote_ref(id: bytes, handle: bytes) -> ray_client_pb2.RemoteRef:
return ray_client_pb2.RemoteRef(
id=id,
handle=handle,
)
def return_exception_in_context(err, context):
if context is not None:
context.set_details(encode_exception(err))
context.set_code(grpc.StatusCode.INTERNAL)
def serve(connection_str, test_mode=False):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
task_servicer = RayletServicer(test_mode=test_mode)
_set_server_api(RayServerAPI(task_servicer))
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
task_servicer, server)
server.add_insecure_port(connection_str)
server.start()
return server
if __name__ == "__main__":
logging.basicConfig(level="INFO")
# TODO(barakmich): Perhaps wrap ray init
ray.init()
server = serve("0.0.0.0:50051")
try:
while True:
time.sleep(1000)
except KeyboardInterrupt:
server.stop(0)
|
from apistar import App, ASyncApp
from apistar_autoapp import AutoApp, AutoASyncApp
from .app import test_from_app_file
def test_empty_autoApp():
app = AutoApp()
assert isinstance(app, App)
assert app.event_hooks == []
def test_empty_autoASyncApp():
app = AutoASyncApp()
assert isinstance(app, ASyncApp)
assert app.event_hooks == []
def test_app_entry():
test_from_app_file()
|
import math
import numpy as np
import matplotlib.pyplot as plt
def imag_residual_plot(f, res_imag_arr, fmt='.-', y_limits=(-5, 5)):
plt.plot(f, res_imag_arr * 100, fmt, label=r'$\Delta_{\,\mathrm{Re}}$')
# Make x axis log scale
plt.xscale('log')
# Set the labels to delta vs f
plt.xlabel('$f$ [Hz]', fontsize=14)
plt.ylabel('$\\Delta$ $(\\%)$', fontsize=14)
plt.legend()
plt.xlim(min(f), max(f))
plt.ylim(y_limits)
plt.show()
def real_residual_plot(f, res_real_arr, fmt='.-', y_limits=(-5, 5)):
plt.plot(f, res_real_arr * 100, fmt, label=r'$\Delta_{\,\mathrm{Re}}$')
# Make x axis log scale
plt.xscale('log')
# Set the labels to delta vs f
plt.xlabel('$f$ [Hz]', fontsize=14)
plt.ylabel('$\\Delta$ $(\\%)$', fontsize=14)
plt.legend()
plt.xlim(min(f), max(f))
plt.ylim(y_limits)
plt.show()
def residuals_plot(f, residual_arr=None, residual_real_arr=None, residual_imag_arr=None, fmt='.-', y_limits=None):
"""
refer: Impedance-->visualization.py-->plot_residuals(ax, f, res_real, res_imag, fmt='.-', y_limits=(-5, 5), **kwargs):
:return:
"""
if residual_arr is not None:
plt.plot(f, residual_arr.real * 100, fmt, label=r'$\Delta_{\,\mathrm{Re}}$')
plt.plot(f, residual_arr.imag * 100, fmt, label=r'$\Delta_{\,\mathrm{Im}}$')
y_abs_max = max(max(np.abs(residual_arr.real * 100)), max(np.abs(residual_arr.imag * 100)))
elif (residual_real_arr is not None) and (residual_imag_arr is not None):
plt.plot(f, residual_real_arr * 100, fmt, label=r'$\Delta_{\,\mathrm{Re}}$')
plt.plot(f, residual_imag_arr * 100, fmt, label=r'$\Delta_{\,\mathrm{Im}}$')
y_abs_max = max(max(np.abs(residual_real_arr * 100)), max(np.abs(residual_imag_arr * 100)))
# Make x axis log scale
plt.xscale('log')
# Set the labels to delta vs f
plt.xlabel('$f$ [Hz]', fontsize=14)
plt.ylabel('$\\Delta$ $(\\%)$', fontsize=14)
plt.legend()
plt.xlim(min(f) / 2, max(f) * 2)
if y_abs_max < 5:
y_limits = [-5, 5]
else:
y_limits = [-1.5 * y_abs_max, 1.5 * y_abs_max]
plt.ylim(y_limits)
# if y_limits is not None:
# # y_limits = [-5, 5]
# plt.ylim(y_limits)
# else:
# if y_abs_max < 5:
# y_limits = [-5, 5]
# else:
# y_limits = [-1.5 * y_abs_max, 1.5 * y_abs_max]
# plt.ylim(y_limits)
plt.show()
|
import json
import sqlite3
from typing import List
class SQLiteLoader():
"""Loads data from SQLite.
Loads data from SQLite, converts it and returns a list of dictionaries
for The dictionary list can be processed by PostgresSaver.
"""
SQL = """
WITH x as (
SELECT m.id, group_concat(a.id) AS actors_ids, group_concat(a.name) AS actors_names
FROM movies m
LEFT JOIN movie_actors ma ON m.id = ma.movie_id
LEFT JOIN actors a ON ma.actor_id = a.id
GROUP BY m.id
)
SELECT m.id, genre, director, title, plot, imdb_rating, x.actors_ids, x.actors_names,
CASE
WHEN m.writers = '' THEN '[{"id": "' || m.writer || '"}]' ELSE m.writers END AS writers
FROM movies m
LEFT JOIN x ON m.id = x.id
"""
def __init__(self, connection: sqlite3.Connection):
self.conn = connection
self.conn.row_factory = self.dict_factory
@staticmethod
def dict_factory(cursor: sqlite3.Cursor, row: tuple) -> dict:
"""Factory for strings as dict."""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def load_writers_names(self) -> dict:
"""Getting a dict of all the writers."""
writers = {}
SQL = """
SELECT DISTINCT id, name FROM writers
"""
for writer in self.conn.execute(SQL):
writers[writer['id']] = writer
return writers
def _transform_row(self, row: dict, writers: dict) -> dict:
"""Converting data from SQLite."""
movie_writers = []
writers_set = set()
for writer in json.loads(row['writers']):
writer_id = writer['id']
if writers[writer_id]['name'] != 'N/A' and writer_id not in writers_set:
movie_writers.append(writers[writer_id])
writers_set.add(writer_id)
actors_names = []
if row['actors_ids'] is not None and row['actors_names'] is not None:
actors_names = [x for x in row['actors_names'].split(',') if x != 'N/A']
return {
'id': row['id'],
'genre': row['genre'].replace(' ', '').split(','),
'actors': actors_names,
'writers': [x['name'] for x in movie_writers],
'imdb_rating': float(row['imdb_rating']) if row['imdb_rating'] != 'N/A' else None,
'title': row['title'],
'director': [
x.strip() for x in row['director'].split(',')
] if row['director'] != 'N/A' else None,
'description': row['plot'] if row['plot'] != 'N/A' else None
}
def load_movies(self) -> List[dict]:
"""Basic method for unloading data from MySQL."""
movies = []
writers = self.load_writers_names()
for row in self.conn.execute(self.SQL):
transformed_row = self._transform_row(row, writers)
movies.append(transformed_row)
return movies
|
from setuptools import setup
setup(name='lcfcn',
version='0.6.0',
description='LCFCN',
url='git@github.com:ElementAI/LCFCN.git',
maintainer='Issam Laradji',
maintainer_email='issam.laradji@gmail.com',
license='MIT',
packages=['lcfcn'],
zip_safe=False,
install_requires=[
'tqdm>=0.0'
'numpy>=0.0',
'pandas>=0.0',
'Pillow>=0.0',
'scikit-image>=0.0',
'scikit-learn>=0.0',
'scipy>=0.0',
'sklearn>=0.0',
'torch>=0.0',
'torchvision>=0.0',
]),
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib
plt.style.use('ggplot')
from matplotlib.pyplot import figure
matplotlib.rcParams['figure.figsize'] = (12,8)
pd.options.mode.chained_assignment
# ## Read CSV file as a pandas dataframe
# In[2]:
music_data = pd.read_csv(r'movies.csv')
music_data.head(20)
# ## Searching for any missing data
# In[3]:
for col in music_data.columns:
missing_data = np.mean(music_data[col].isnull())
print('{}-{}'.format(col,missing_data))
# ## Find out about data types in columns
# In[4]:
print(music_data.dtypes)
# ## Change data type of 'budget', 'gross' columns from float64 to int64
# In[5]:
music_data['budget'] = music_data['budget'].astype('Int64')
music_data['gross'] = music_data['gross'].astype('Int64')
# ## Make a corrected 'year' column by seperating that into 4 seperate columns
# In[6]:
music_data_frame = music_data["released"].str.split(" ",n=3,expand = True)
music_data_frame.rename(columns={0: "Month", 1: "day",2:"Year",3:"Country"}, inplace=True)
music_data['yearcorrect'] = music_data_frame['Year']
# In[7]:
music_data_frame
# ## Display the whole dataframe and sort them according to 'gross' column
# In[8]:
pd.set_option('display.max_rows',None)
music_data.sort_values(by =['gross'], inplace = False, ascending =False)
# ## Drop duplicates by first sorting according to the company with the highest duplicates
# In[9]:
music_data['company'].drop_duplicates().sort_values(ascending =False)
# In[10]:
music_data.drop_duplicates()
#
# ## Are there any outliers in the column of interest 'gross'? A box plot is used to determine if there is any
# In[11]:
music_data.boxplot(column=['gross'])
# 5-6 points can be concluded as outliers in the box plot above.
# ### Find features that correlate with 'gross' feature or column
# ### Build a scatter and strip plot and compare features
# In[12]:
music_data['gross'] = music_data['gross'].astype('float')
music_data['budget'] = music_data['budget'].astype('float')
# In[13]:
plt.scatter(x= music_data['budget'], y = music_data['gross'],alpha=0.5)
plt.title('Budget vs Gross Earnings')
plt.xlabel('Gross Earnings')
plt.ylabel('Budget for Film')
plt.show()
# In[14]:
sns.stripplot(x="rating", y="gross", data=music_data)
# ## A regression plot is used to determine the relation between 'gross' column and any other column of interest. In the case below, 'budget' column or feature.
# In[15]:
sns.regplot(x="gross", y="budget", data=music_data, scatter_kws = {"color":"red"}, line_kws = {"color":"green"})
# ## Correlation between features that have numerical data
# ## check person's correlation between 'budget and 'gross'
# In[16]:
music_data.corr(method ='pearson') #person, kendall, spearman
# In[17]:
music_data.corr(method ='spearman') #person, kendall, spearman
# In[18]:
music_data.corr(method ='kendall') #person, kendall, spearman
# ##### The is a high correlation between budget and gross
# ## Visualize correlation using correlation matrix and seaborn library
# In[19]:
correlation_matrix = music_data.corr(method ='pearson')
sns.heatmap(correlation_matrix, annot = True)
plt.title("Correlation matrix for Numeric Features")
plt.xlabel("Movie features")
plt.ylabel("Movie features")
plt.show()
# ## Create numeric values for all columns with categorical values and visualize with a correlation matrix
# In[20]:
music_data_numeric = music_data
for col_name in music_data_numeric.columns:
if(music_data_numeric[col_name].dtype == 'object'):
music_data_numeric[col_name] = music_data_numeric[col_name].astype('category')
music_data_numeric[col_name] = music_data_numeric[col_name].cat.codes
music_data_numeric
# In[21]:
correlation_matrix = music_data_numeric.apply(lambda x: x.factorize()[0]).corr(method='pearson')
sns.heatmap(correlation_matrix, annot = True)
plt.title("Correlation matrix for All Features")
plt.xlabel("Movie features")
plt.ylabel("Movie features")
plt.show()
# ## Using factorize - this assigns a random numeric value for each unique categorical value
#
# In[22]:
music_data.apply(lambda x: x.factorize()[0]).corr(method='pearson')
# ## Plot correlation matrix
# In[23]:
correlation_matrix = music_data.apply(lambda x: x.factorize()[0]).corr(method='pearson')
sns.heatmap(correlation_matrix, annot = True)
plt.title("Correlation matrix for Movies")
plt.xlabel("Movie features")
plt.ylabel("Movie features")
plt.show()
# ## Print Correlation pairs for analysis
# In[24]:
correlation_mat = music_data.apply(lambda x: x.factorize()[0]).corr()
corr_pairs = correlation_mat.unstack()
print(corr_pairs)
# ## Sort out correlation pairs using 'quicksort' method
# In[25]:
sorted_pairs = corr_pairs.sort_values(kind="quicksort")
print(sorted_pairs)
# ## Take a look at the features that have a high correlation (> 0.5)
#
# In[26]:
strong_pairs = sorted_pairs[abs(sorted_pairs) > 0.5]
print(strong_pairs)
# ##### Votes and budget has the highest correlation to gross earnings. Company has low correlation
# ## Looking at the top 15 companies by gross revenue
#
# In[27]:
CompanyGrossSum = music_data.groupby(['company'])[["gross"]].sum()
CompanyGrossSumSorted = CompanyGrossSum.sort_values(['gross','company'], ascending = False)[:15]
CompanyGrossSumSorted = CompanyGrossSumSorted['gross'].astype('int64')
CompanyGrossSumSorted
# In[ ]:
# In[ ]:
|
from typing import List
import pytest
from great_expectations import DataContext
from great_expectations.core.batch import BatchRequest
from great_expectations.exceptions import ProfilerConfigurationError
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.domain_builder import DomainBuilder
from great_expectations.rule_based_profiler.domain_builder.categorical_column_domain_builder import (
CategoricalColumnDomainBuilder,
)
from great_expectations.rule_based_profiler.types import Domain
def test_instantiate_with_cardinality_limit_modes(
alice_columnar_table_single_batch_context,
):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: BatchRequest = BatchRequest(
datasource_name="alice_columnar_table_single_batch_datasource",
data_connector_name="alice_columnar_table_single_batch_data_connector",
data_asset_name="alice_columnar_table_single_batch_data_asset",
)
domain_builder: DomainBuilder = CategoricalColumnDomainBuilder(
exclude_column_name_suffixes="_id",
limit_mode=CategoricalColumnDomainBuilder.cardinality_limit_modes.VERY_FEW,
batch_request=batch_request,
data_context=data_context,
)
domain_builder.get_domains()
def test_single_batch_very_few_cardinality(alice_columnar_table_single_batch_context):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: BatchRequest = BatchRequest(
datasource_name="alice_columnar_table_single_batch_datasource",
data_connector_name="alice_columnar_table_single_batch_data_connector",
data_asset_name="alice_columnar_table_single_batch_data_asset",
)
domain_builder: DomainBuilder = CategoricalColumnDomainBuilder(
exclude_column_name_suffixes="_id",
limit_mode="very_few",
batch_request=batch_request,
data_context=data_context,
)
domains: List[Domain] = domain_builder.get_domains()
alice_all_column_names: List[str] = [
"event_type",
"event_ts",
"server_ts",
"device_ts",
"user_agent",
]
column_name: str
alice_all_column_domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
},
)
for column_name in alice_all_column_names
]
assert len(domains) == 5
assert domains == alice_all_column_domains
def test_single_batch_one_cardinality(alice_columnar_table_single_batch_context):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: BatchRequest = BatchRequest(
datasource_name="alice_columnar_table_single_batch_datasource",
data_connector_name="alice_columnar_table_single_batch_data_connector",
data_asset_name="alice_columnar_table_single_batch_data_asset",
)
domain_builder: DomainBuilder = CategoricalColumnDomainBuilder(
limit_mode="ONE",
batch_request=batch_request,
data_context=data_context,
)
domains: List[Domain] = domain_builder.get_domains()
alice_all_column_names: List[str] = [
"user_agent",
]
column_name: str
alice_all_column_domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
},
)
for column_name in alice_all_column_names
]
assert len(domains) == 1
assert domains == alice_all_column_domains
def test_unsupported_cardinality_limit(
alice_columnar_table_single_batch_context,
):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: dict = {
"datasource_name": "alice_columnar_table_single_batch_datasource",
"data_connector_name": "alice_columnar_table_single_batch_data_connector",
"data_asset_name": "alice_columnar_table_single_batch_data_asset",
}
with pytest.raises(ProfilerConfigurationError) as excinfo:
# noinspection PyUnusedLocal,PyArgumentList
domains: List[Domain] = CategoricalColumnDomainBuilder(
limit_mode="&*#$&INVALID&*#$*&",
batch_request=batch_request,
data_context=data_context,
).get_domains()
assert "specify a supported cardinality mode" in str(excinfo.value)
assert "REL_1" in str(excinfo.value)
assert "MANY" in str(excinfo.value)
def test_unspecified_cardinality_limit(
alice_columnar_table_single_batch_context,
):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: dict = {
"datasource_name": "alice_columnar_table_single_batch_datasource",
"data_connector_name": "alice_columnar_table_single_batch_data_connector",
"data_asset_name": "alice_columnar_table_single_batch_data_asset",
}
with pytest.raises(ProfilerConfigurationError) as excinfo:
# noinspection PyUnusedLocal,PyArgumentList
domains: List[Domain] = CategoricalColumnDomainBuilder(
batch_request=batch_request,
data_context=data_context,
).get_domains()
assert "Please pass ONE of the following parameters" in str(excinfo.value)
assert "you passed 0 parameters" in str(excinfo.value)
def test_excluded_columns_single_batch(alice_columnar_table_single_batch_context):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: BatchRequest = BatchRequest(
datasource_name="alice_columnar_table_single_batch_datasource",
data_connector_name="alice_columnar_table_single_batch_data_connector",
data_asset_name="alice_columnar_table_single_batch_data_asset",
)
domain_builder: DomainBuilder = CategoricalColumnDomainBuilder(
limit_mode="VERY_FEW",
exclude_column_names=[
"id",
"event_type",
"user_id",
"event_ts",
"server_ts",
],
batch_request=batch_request,
data_context=data_context,
)
domains: List[Domain] = domain_builder.get_domains()
alice_all_column_names: List[str] = [
"device_ts",
"user_agent",
]
column_name: str
alice_all_column_domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
},
)
for column_name in alice_all_column_names
]
assert len(domains) == 2
assert domains == alice_all_column_domains
def test_excluded_columns_empty_single_batch(alice_columnar_table_single_batch_context):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: BatchRequest = BatchRequest(
datasource_name="alice_columnar_table_single_batch_datasource",
data_connector_name="alice_columnar_table_single_batch_data_connector",
data_asset_name="alice_columnar_table_single_batch_data_asset",
)
domain_builder: DomainBuilder = CategoricalColumnDomainBuilder(
limit_mode="VERY_FEW",
exclude_column_names=[],
batch_request=batch_request,
data_context=data_context,
)
domains: List[Domain] = domain_builder.get_domains()
alice_all_column_names: List[str] = [
"id",
"event_type",
"user_id",
"event_ts",
"server_ts",
"device_ts",
"user_agent",
]
column_name: str
alice_all_column_domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
},
)
for column_name in alice_all_column_names
]
assert len(domains) == 7
assert domains == alice_all_column_domains
def test_multi_batch_very_few_cardinality(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: BatchRequest = BatchRequest(
datasource_name="taxi_pandas",
data_connector_name="monthly",
data_asset_name="my_reports",
)
domain_builder: DomainBuilder = CategoricalColumnDomainBuilder(
limit_mode="very_few",
batch_request=batch_request,
data_context=data_context,
)
observed_domains: List[Domain] = domain_builder.get_domains()
expected_domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": "VendorID",
},
),
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": "passenger_count",
},
),
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": "RatecodeID",
},
),
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": "store_and_fwd_flag",
},
),
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": "payment_type",
},
),
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": "mta_tax",
},
),
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": "improvement_surcharge",
},
),
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": "congestion_surcharge",
},
),
]
assert len(observed_domains) == 8
assert observed_domains == expected_domains
def test_multi_batch_one_cardinality(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: BatchRequest = BatchRequest(
datasource_name="taxi_pandas",
data_connector_name="monthly",
data_asset_name="my_reports",
)
domain_builder: DomainBuilder = CategoricalColumnDomainBuilder(
limit_mode="ONE",
batch_request=batch_request,
data_context=data_context,
)
observed_domains: List[Domain] = domain_builder.get_domains()
expected_domains: List[Domain] = []
assert len(observed_domains) == 0
assert observed_domains == expected_domains
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import sys
import smallsmilhandler
import json
import urllib.request
class KaraokeLocal():
def __init__(self, file):
parser = make_parser()
self.cHandler = smallsmilhandler.SmallSMILHandler()
parser.setContentHandler(self.cHandler)
parser.parse(open(file))
# print(cHandler.get_tags())
def __str__(self):
for elemento in self.cHandler.get_tags():
etiqueta = elemento[0]
# print(etiqueta)
atributos = ""
for atributo in elemento[1]:
if elemento[1][atributo] != "":
atributos = atributos + '\\t' + atributo + '="'
atributos += elemento[1][atributo] + '"'
self.list = etiqueta + atributos
print(etiqueta + atributos + '\\n')
def to_json(self, file, json_file):
json_file = open(json_file, 'w')
json.dump(self.list, json_file)
def do_local(self):
for elemento in self.cHandler.get_tags():
for atributo in elemento[1]:
if elemento[1][atributo][:7] == 'http://':
urllib.request.urlretrieve(elemento[1][atributo])
# Ejercicio 6: indica la localización local del recurso
elemento[1][atributo] = elemento[1][atributo].split('/')
if __name__ == "__main__":
try:
file = sys.argv[1]
json_file = file[:-4] + "json"
except:
sys.exit("Usage: python3 karaoke.py file.smil")
karaoke = KaraokeLocal(file)
karaoke.__init__(file)
karaoke.__str__()
karaoke.to_json(file, json_file)
karaoke.do_local()
karaoke.to_json(file, 'local.json')
|
from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
from elasticapm.traces import capture_span
class PyLibMcInstrumentation(AbstractInstrumentedModule):
name = "pylibmc"
instrument_list = [
("pylibmc", "Client.get"),
("pylibmc", "Client.get_multi"),
("pylibmc", "Client.set"),
("pylibmc", "Client.set_multi"),
("pylibmc", "Client.add"),
("pylibmc", "Client.replace"),
("pylibmc", "Client.append"),
("pylibmc", "Client.prepend"),
("pylibmc", "Client.incr"),
("pylibmc", "Client.decr"),
("pylibmc", "Client.gets"),
("pylibmc", "Client.cas"),
("pylibmc", "Client.delete"),
("pylibmc", "Client.delete_multi"),
("pylibmc", "Client.touch"),
("pylibmc", "Client.get_stats"),
]
def call(self, module, method, wrapped, instance, args, kwargs):
wrapped_name = self.get_wrapped_name(wrapped, instance, method)
with capture_span(wrapped_name, "cache.memcached"):
return wrapped(*args, **kwargs)
|
"""DNS Authenticator for VitalQIP."""
import json
import logging
import requests
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for VitalQIP
This Authenticator uses the VitalQIP Remote REST API to fulfill a dns-01 challenge.
"""
description = "Obtain certificates using a DNS TXT record (if you are using VitalQIP for DNS)."
ttl = 60
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.credentials = None
@classmethod
def add_parser_arguments(cls, add):
super(Authenticator, cls).add_parser_arguments(add)
add("credentials", help="VitalQIP credentials INI file.")
def more_info(self):
return (
"This plugin configures a DNS TXT record to respond to a dns-01 challenge using "
+ "the VitalQIP Remote REST API."
)
def _setup_credentials(self):
self.credentials = self._configure_credentials(
"credentials",
"QIP credentials INI file",
{
"endpoint": "URL of the QIP remote API.",
"username": "Username for QIP remote API.",
"password": "Password for QIP remote API.",
"organisation": "Organisation for QIP remote API",
},
)
def _perform(self, domain, validation_name, validation):
self._get_qip_client().add_txt_record(
domain, validation_name, validation, self.ttl
)
def _cleanup(self, domain, validation_name, validation):
self._get_qip_client().del_txt_record(
domain, validation_name, validation, self.ttl
)
def _get_qip_client(self):
return _QIPClient(
self.credentials.conf("endpoint"),
self.credentials.conf("username"),
self.credentials.conf("password"),
self.credentials.conf("organisation"),
)
class _QIPClient(object):
"""
Encapsulates all communication with the QIP remote REST API.
"""
def __init__(self, endpoint, username, password, organisation):
logger.debug("creating qipclient")
e = urlparse(endpoint)
if e.scheme == "":
raise errors.PluginError("No scheme (http/https) found in provided endpoint")
self.endpoint = e
self.username = username
self.password = password
self.organisation = organisation
self.session = requests.Session()
self.session.headers.update({'accept': 'application/json'})
self.session.headers.update({'Content-Type': 'application/json'})
# remove for prod release
self.session.verify = False
def _login(self):
if "Authentication" in self.session.headers.keys():
return
logger.debug("logging in")
logindata = {"username": self.username, "password": self.password}
resp = self._api_request("POST", "/api/login", logindata)
if "Authentication" not in resp.headers.keys():
raise errors.PluginError("HTTP Error during login. No 'Authentication' header found")
token = resp.headers["Authentication"]
logger.debug(f"session token is {token}")
self.session.headers.update({'Authentication': f"Token {token}"})
def _api_request(self, method, action, data=None, query={}):
url = self._get_url(action)
logger.debug(f"Data: {data}")
resp = self.session.request(method, url, json=data, params=query)
logger.debug(f"API Request to URL: {url}")
if action == f"/api/v1/{self.organisation}/zone.json" and resp.status_code == 404:
return resp.text
if resp.status_code < 200 or resp.status_code > 299:
raise errors.PluginError(f"HTTP Error during request {resp.status_code}")
if action == "/api/login":
return resp
result = {}
if resp.text != "":
try:
result = resp.json()
except json.decoder.JSONDecodeError:
raise errors.PluginError(f"API response with non JSON: {resp.text}")
return result
def _get_url(self, action):
return f"{self.endpoint.geturl()}{action}"
def add_txt_record(self, domain, record_name, record_content, record_ttl):
"""
Add a TXT record using the supplied information.
:param str domain: The domain to use to look up the managed zone.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:param int record_ttl: The record TTL (number of seconds that the record may be cached).
:raises certbot.errors.PluginError: if an error occurs communicating with the VitalQIP API
"""
self._login()
record = self.get_existing_txt(record_name)
if record is not None:
if "rr" in record and record["rr"]["data"] == record_content:
logger.info(f"already there, id {record['rr']['name']}")
return
else:
logger.info(f"update {record_name}")
self._update_txt_record(record, record_content, record_ttl)
else:
logger.info("insert new txt record")
self._insert_txt_record(record_name, record_content, record_ttl, domain)
def del_txt_record(self, domain, record_name, record_content, record_ttl):
"""
Delete a TXT record using the supplied information.
:param str domain: The domain to use to look up the managed zone.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:param int record_ttl: The record TTL (number of seconds that the record may be cached).
:raises certbot.errors.PluginError: if an error occurs communicating with the VitalQIP API
"""
self._login()
record = self.get_existing_txt(record_name)
if record is None:
return
if "rr" in record and record["rr"]["data"] == record_content:
logger.info(f"delete {record_name}")
zone = self._find_managed_zone(domain)
query = {"infraFQDN": zone, "infraType": "ZONE", "owner": record_name, "rrType": "TXT", "data1": record_content}
self._api_request("DELETE", f"/api/v1/{self.organisation}/rr/singleDelete", query=query)
def _insert_txt_record(self, record_name, record_content, record_ttl, domain):
logger.debug(f"insert with data: {record_content}")
zone_name = self._find_managed_zone(domain)
payload = {
"owner": record_name,
"classType": "IN",
"rrType": "TXT",
"data1": record_content,
"publishing": "ALWAYS",
"ttl": record_ttl,
"infraType": "ZONE",
"infraFQDN": zone_name
}
self._login()
self._api_request("POST", f"/api/v1/{self.organisation}/rr", data=payload)
def _update_txt_record(self, old_record, record_content, record_ttl):
logger.debug(f"update with data: {record_content}")
self._login()
# old record data is being returned with quotes which make the update fail. We need to strip them for update to work
old_data = old_record["rr"]["data"].lstrip('"').rstrip('"')
update_body = {
"oldRRRec": {
"owner": old_record["rr"]["name"],
"classType": "IN",
"rrType": old_record["rr"]["recordType"],
"data1": old_data,
"publishing": "ALWAYS",
"ttl": record_ttl,
"infraType": "ZONE",
"infraFQDN": old_record["name"],
"isDefaultRR": False
},
"updatedRRRec": {
"owner": old_record["rr"]["name"],
"classType": "IN",
"rrType": old_record["rr"]["recordType"],
"data1": record_content,
"publishing": "ALWAYS",
"ttl": record_ttl,
"infraType": "ZONE",
"infraFQDN": old_record["name"],
"isDefaultRR": False
}
}
logger.debug(f"update with data: {update_body}")
self._api_request("PUT", f"/api/v1/{self.organisation}/rr", data=update_body)
def _find_managed_zone(self, domain):
"""
Find the managed zone for a given domain.
:param str domain: The domain for which to find the managed zone.
:returns: The name of the managed zone, if found.
:rtype: str
:raises certbot.errors.PluginError: if the managed zone cannot be found or unexpected response is received.
"""
if len(domain.split('.')) == 1:
raise errors.PluginError("No zone found")
self._login()
zones = self._api_request("GET", f"/api/v1/{self.organisation}/zone.json", query={"name": domain})
if "DNS Zone not found" in zones:
domain = '.'.join(domain.split('.')[1:])
return self._find_managed_zone(domain)
else:
if "list" in zones:
for zone in zones["list"]:
if "name" in zone:
if zone["name"] == domain:
logger.debug(f"found zone: {zone['name']}")
return zone["name"]
else:
raise errors.PluginError("Unexpected QIP response")
else:
raise errors.PluginError("Unexpected QIP response")
def get_existing_txt(self, record_name):
"""
Get existing TXT records from the RRset for the record name.
If an error occurs while requesting the record set, it is suppressed
and None is returned.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:returns: TXT record object or None
:rtype: `Object` or `None`
"""
self._login()
query = {"name": record_name, "searchType": "All", "subRange": "TXT"}
logger.debug(f"searching for : {query}")
try:
records = self._api_request("GET", f"/api/v1/{self.organisation}/qip-search.json", query=query)
except(errors.PluginError):
return None
for record in records['list']:
if "rr" in record:
if record["rr"]['recordType'] == 'TXT' and record["rr"]["name"] == record_name:
return record
return None
|
def addAll(n1,n2):
result = 0
for i in range(n1,n2+1):
result += i
return result
print(addAll(1,10))
|
""" Tests web_automation.py. However, we have to use some of it's functions in order to test other functions. """
import pytest
from tenacity import retry, wait_fixed, stop_after_attempt
HOST = "http://localhost:5000"
DEFAULT_VALUE = "default value"
def verify(func, args, expected_result, wait_time=1, attempts=10):
""" Executes the provided function with arguments, and asserts against the expected result until
found or times out.
Args:
func (obj): Function to execute
args (tuple): Arguments to pass to function
expected_result (any): What the function should return
wait_time (int): Time in seconds to wait between retries
attempts (int): Number of attempts to assert result
"""
@retry(wait=wait_fixed(wait_time), stop=stop_after_attempt(attempts))
def _verify(func, args, expected_result):
assert func(*args) == expected_result
_verify(func, args, expected_result)
def verify_standard(web):
""" Check against standard text output """
verify(web.get_text, ("output", "id"), "Passed")
def test_click_button(web):
""" Verify we can press an <input type=button> with an ID """
web.open_url(f"{HOST}/button")
web.click("button1", "id")
verify_standard(web)
def test_alert_text(web):
""" Verify text in an alert popup """
text = "This is an alert"
web.open_url(f"{HOST}/alert")
verify(web.get_alert_text, (), text)
web.accept_alert() # Alert has to be dismissed, otherwise the call to close the browser fails
def test_alert_dismiss(web):
""" Verify text in an alert popup """
web.open_url(f"{HOST}/alert")
web.accept_alert() # Alert has to be removed, otherwise the call to close the browser fails
result = web.check_for_alert()
assert not result
def test_alert_exists(web):
""" Verify we can detect an alert popup"""
web.open_url(f"{HOST}/alert")
result = web.check_for_alert() # Alert has to be removed, otherwise the call to close the browser fails
assert result
def test_alert_does_not_exist(web):
""" Verify we can detect an alert does not exist"""
web.open_url(f"{HOST}/")
result = web.check_for_alert() # Alert has to be removed, otherwise the call to close the browser fails
assert not result
def test_body_text(web):
""" Read text from body """
assert_text = "Awesome sauce"
web.open_url(f"{HOST}/params?value={assert_text}") # This web page will display any text given to it
verify(web.get_text, ("text", "id"), assert_text)
def test_read_text_from_textbox(web):
""" Verify we can read text from a textbox """
web.open_url(f"{HOST}/text_entry")
verify(web.get_text, ("text1", "id"), DEFAULT_VALUE)
def test_text_entry(web):
""" Enter text into input text box """
text = "Zebra"
web.open_url(f"{HOST}/text_entry")
web.text_entry(text, "text1", "id")
verify(web.get_text, ("text1", "id"), text)
def test_get_url(web):
""" Read URL from address bar """
url = f"{HOST}/params?value=random_task"
web.open_url(url) # This web page will display any text given to it
verify(web.get_url, (), url)
def test_wait_for_element(web):
""" Test dynamically waiting for an element to appear """
url = f"{HOST}/delayed_element"
web.open_url(url)
element = web.wait_for_element("output", "id") # Should return the element's object if found
assert element
def test_wait_for_element_removal(web):
""" Test dynamically waiting for an element to be deleted """
url = f"{HOST}/remove_element"
web.open_url(url)
assert web.wait_for_element_removal("output", "id") # Returns true if removed
def test_wait_for_expected_cond(web):
""" Test dynamically waiting for an element to be deleted """
url = f"{HOST}/remove_element"
web.open_url(url)
element = web.wait_for_element_removal("output", "id") # Returns true if removed
assert element
def test_click_and_hold(web):
""" Verify left click and hold on element for specific amount of time """
time_to_hold = 2
web.open_url(f"{HOST}/drag")
web.click_hold(time_to_hold, "block1", "id")
seconds = float(web.get_text("separator", "id")) # Read time
# Verify time down is within range, it is written to an element on the web page
assert seconds >= time_to_hold < time_to_hold + 1
def test_right_click(web):
""" Verify Javascript on the web page registers a right click """
web.open_url(f"{HOST}/keypress")
web.right_click("right_click", "id")
verify(web.get_text, ("right_click", "id"), "executed")
def test_double_click(web):
""" Verify a double click is registered """
web.open_url(f"{HOST}/keypress")
web.double_click("double_click", "id")
verify(web.get_text, ("double_click", "id"), "executed")
def test_mouse_hover(web):
""" Verify the mouse pointer is moved onto an element """
web.open_url(f"{HOST}/keypress")
web.mouse_hover("hover", "id")
verify(web.get_text, ("hover", "id"), "executed")
def test_drag_drop(web):
""" Verify an element can be dragged onto another element """
web.open_url(f"{HOST}/drag")
web.click("block1", "id") # Stores the current position in the element
position1 = int(web.get_text("position", "id"))
web.drag_drop("block1", "id", "block2", "id") # Drag to another element
position2 = int(web.get_text("position", "id"))
assert position2 > position1
@pytest.mark.xfail(reason="Haven't found way to intercept keypresses in order to verify them")
def test_keyboard_shortcut():
""" Verifies various keyboard shortcuts """
@pytest.mark.parametrize("direction", ["up", "left"])
def test_scroll1(web, direction):
""" Test scrolling the web page in two directions """
# Load the page which will be automatically scrolled, if we need to scroll up or left
pre_scroll = ""
if direction == "up":
pre_scroll = "down"
elif direction == "left":
pre_scroll = "right"
web.open_url(f"{HOST}/long?{pre_scroll}")
web.scroll_page(direction)
assert int(web.get_text("scroll", "id")) == 0
@pytest.mark.parametrize("direction", ["down", "right"])
def test_scroll2(web, direction):
""" Test scrolling the web page in two directions """
web.open_url(f"{HOST}/long")
web.scroll_page(direction)
assert int(web.get_text("scroll", "id")) > 0
def test_nav_back(web):
""" Test pressing the back button returns to the previous page """
web.open_url(f"{HOST}/link") # Link page
web.click("Go To Root", "link text") # Root page
web.page_navigation("back")
web.click("Go To Root", "link text") # Verify we're back on the link page. Exception if this link doesn't exist
def test_nav_forward(web):
""" Test pressing the forward button takes the user to the next page """
web.open_url(f"{HOST}/link")
web.click("Go To Root", "link text")
web.page_navigation("back") # Move back to previous page
web.page_navigation("forward") # Move forward, should be back on root page
verify(web.get_text, ("default", "id"), "Naught, but disappointment thou shalt find within this realm.")
def test_nav_refresh(web):
""" Verify the web page is refreshed """
web.open_url(f"{HOST}/drag")
web.click("block1", "id") # Stores the current position in the element
position1 = int(web.get_text("position", "id"))
web.drag_drop("block1", "id", "block2", "id") # Drag to another element
web.page_navigation("refresh")
web.click("block1", "id") # Stores the current position in the element
position2 = int(web.get_text("position", "id"))
assert position1 == position2 # If page was refreshed, element should be back in original position
@pytest.mark.parametrize("name", ["hor200", "ver200"])
def test_scroll_to_element(web, name):
""" Verify we can scroll to an element horizontally and vertically """
web.open_url(f"{HOST}/long")
web.scroll_to_element(name, "id")
web.mouse_hover(name, "id") # Raises exception if element is not visible
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='chat.html'), name='home'),
)
|
import h5py
import numpy as np
from BaseInputProcessor import BaseInputProcessor
class FileInputProcessor(BaseInputProcessor):
def __init__(self, filename, mode=0):
self.filename = filename
h5file = h5py.File(self.filename, 'r')
var_list = []
for var, g in h5file.items():
if not isinstance(g, h5py.Group): continue
uids = g.get('uids')[()].tolist()
var_list.append((var, uids))
super(FileInputProcessor, self).__init__(var_list, mode)
h5file.close()
def pre_run(self):
self.h5file = h5py.File(self.filename, 'r')
self.dsets = {}
for var, g in self.h5file.items():
if not isinstance(g, h5py.Group): continue
self.dsets[var] = g.get('data')
self.pointer = 0
self.end_of_file = False
def update_input(self):
for var, dset in self.dsets.iteritems():
if self.pointer+1 == dset.shape[0]: self.end_of_file=True
self.variables[var]['input'] = dset[self.pointer,:]
self.pointer += 1
if self.end_of_file: self.h5file.close()
def is_input_available(self):
return not self.end_of_file
def post_run(self):
if not self.end_of_file: self.h5file.close()
|
from jumpscale import j
import libvirt
from xml.etree import ElementTree
from JumpscaleLib.sal.kvm.BaseKVMComponent import BaseKVMComponent
import random
import re
class Interface(BaseKVMComponent):
"""
Object representation of xml portion of the interface in libvirt.
"""
@staticmethod
def generate_mac():
"""
Generate mac address.
"""
mac = [0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: '%02x' % x, mac))
def __init__(self, controller, bridge, name=None, mac=None, interface_rate=None, burst=None):
"""
Interface object instance.
@param controller object(j.sal.kvm.KVMController()): controller object to use.
@param bridge object(j.sal.kvm.Network()): network object that create the bridge
@param name str: name of interface
@param mac str: mac address to be assigned to port
@param interface_rate int: qos interface rate to bound to in Kb
@param burst str: maximum allowed burst that can be reached in Kb/s
"""
BaseKVMComponent.__init__(controller=controller)
self.controller = controller
self.name = name
self.ovs = name is not None
self.bridge = bridge
self.qos = not (interface_rate is None)
self.interface_rate = str(interface_rate)
self.burst = burst
if not (interface_rate is None) and burst is None:
self.burst = str(int(interface_rate * 0.1))
self.mac = mac if mac else Interface.generate_mac()
self._ip = None
self._iface = None
def iface(self):
if self._iface is None:
self._iface = self.controller.connection.interfaceLookupByName(self.name)
return self._iface
@property
def is_created(self):
try:
self.controller.connection.interfaceLookupByName(self.name)
return True
except libvirt.libvirtError as e:
if e.get_error_code == libvirt.VIR_ERR_NO_INTERFACE:
return False
raise e
@property
def is_started(self):
return self.iface.isActive() == 1
def create(self, start=True, autostart=True):
return NotImplementedError()
def start(self, autostart=True):
return NotImplementedError()
def delete(self):
"""
Delete interface and port related to certain machine.
"""
if self.ovs:
return self.controller.executor.execute('ovs-vsctl del-port %s %s' % (self.bridge.name, self.name))
else:
raise NotImplementedError("delete on non ovs network is not supported")
def stop(self):
return NotImplementedError()
def to_xml(self):
"""
Return libvirt's xml string representation of the interface.
"""
Interfacexml = self.controller.get_template('interface.xml').render(
macaddress=self.mac,
bridge=self.bridge.name,
qos=self.qos,
rate=self.interface_rate,
burst=self.burst,
name=self.name)
return Interfacexml
@classmethod
def from_xml(cls, controller, xml):
"""
Instantiate a interface object using the provided xml source and kvm controller object.
@param controller object(j.sal.kvm.KVMController): controller object to use.
@param xml str: xml string of machine to be created.
"""
interface = ElementTree.fromstring(xml)
if interface.find('virtualport') is None:
name = None
else:
name = interface.find('virtualport').find(
'parameters').get('profileid')
bridge_name = interface.find('source').get('bridge')
bridge = j.sal.kvm.Network(controller, bridge_name)
bandwidth = interface.findall('bandwidth')
if bandwidth:
interface_rate = bandwidth[0].find('inbound').get('average')
burst = bandwidth[0].find('inbound').get('burst')
else:
interface_rate = burst = None
mac = interface.find('mac').get('address')
return cls(controller=controller, bridge=bridge, name=name, mac=mac, interface_rate=interface_rate, burst=burst)
@classmethod
def get_by_name(cls, controller, name):
iface = interfaceLookupByName(name)
return cls.from_xml(controller, iface.XMLDesc())
@property
def ip(self):
if not self._ip:
bridge_name = self.bridge.name
mac = self.mac
rc, ip, err = self.controller.executor.prefab.core.run(
"nmap -n -sn $(ip r | grep %s | grep -v default | awk '{print $1}') | grep -iB 2 '%s' | head -n 1 | awk '{print $NF}'" %
(bridge_name, mac))
ip_pat = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
m = ip_pat.search(ip)
if m:
self._ip = m.group()
return self._ip
def qos(self, qos, burst=None):
"""
Limit the throughtput into an interface as a for of qos.
@interface str: name of interface to limit rate on
@qos int: rate to be limited to in Kb
@burst int: maximum allowed burst that can be reached in Kb/s
"""
# TODO: *1 spec what is relevant for a vnic from QOS perspective, what can we do
# goal is we can do this at runtime
if self.ovs:
self.controller.executor.execute(
'ovs-vsctl set interface %s ingress_policing_rate=%d' % (self.name, qos))
if not burst:
burst = int(qos * 0.1)
self.controller.executor.execute(
'ovs-vsctl set interface %s ingress_policing_burst=%d' % (self.name, burst))
else:
raise NotImplementedError('qos for std bridge not implemeted')
|
'''Desenvolva um programa que leia o primeiro termo de uma PA.
No final, mostre os 10 primeiros termos dessa progressão. obs: usando o while'''
primeiro = int(input(' Primeiro termo: '))
razão = int(input('Razão da PA: '))
termo = primeiro
cont = 1
while cont <= 10:
print(' {} -> '.format(termo), end='')
termo+=razão
cont += 1
print('FIM!')
|
import random
import tetris_blocks
class Randomblock:
def __init__(self):
number_of_possible_blocks = len(tetris_blocks.block_list)
self.maximum = number_of_possible_blocks - 1
def get_random_block(self):
re = random.randint(0, self.maximum)
return tetris_blocks.block_list[re]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .masktrack_rcnn_tracker import MaskTrackRCNNTracker
from .sort_tracker import SortTracker
from .tracktor_tracker import TracktorTracker
__all__ = [
'BaseTracker', 'TracktorTracker', 'SortTracker', 'MaskTrackRCNNTracker',
'ByteTracker'
]
|
# -*- coding: utf-8 -*-
""" Update an Attendify speakers XLSX file with the current list of
speakers.
Usage: manage.py attendify_speakers_xlsx ep2016 speakers.xlsx
Note that for Attendify you have to download the speakers before
running this script, since they add meta data to the downloaded
file which has to be kept around when uploading it again.
The script updates speakers.xlsx in place. Unfortunately, Attendify
currently has a bug in that it doesn't accept the file format
generated by openpyxl. Opening the file in LibreOffice and saving
it (without changes) fixes this as work-around.
Attendify Worksheet "Schedule" format
-------------------------------------
Row A4: First Name, Last Name, Company (Optional), Position
(Optional), Group (Optional). Profile (Optional), Email
(Optional), Phone (Optional), Twitter (Optional), Facebook
(Optional), LinkedIn (Optional), Google+ (Optional), UID (do not
delete)
Row A6: Start of data
"""
from django.core.management.base import BaseCommand, CommandError
from django.core import urlresolvers
from django.conf import settings
from django.utils.html import strip_tags
from conference import models as cmodels
from conference import utils
from p3 import models
import datetime
from collections import defaultdict
from optparse import make_option
import operator
import markdown2
import openpyxl
### Globals
# Debug output ?
_debug = 1
# These must match the talk .type or .admin_type
from accepted_talks import TYPE_NAMES
### Helpers
def profile_url(user):
return urlresolvers.reverse('conference-profile',
args=[user.attendeeprofile.slug])
def format_text(text, remove_tags=False, output_html=True):
# Remove whitespace
text = text.strip()
if not text:
return text
# Remove links, tags, etc.
if remove_tags:
text = strip_tags(text)
# Remove quotes
if text[0] == '"' and text[-1] == '"':
text = text[1:-1]
# Convert markdown markup to HTML
if output_html:
text = markdown2.markdown(text)
return text
def add_speaker(data, speaker):
# Get speaker profile
user = speaker.user
profile = cmodels.AttendeeProfile.objects.get(user=user)
p3profile = models.P3Profile.objects.get(profile=profile)
# Skip speakers without public profile. Speaker profiles must be
# public, but you never know. See conference/models.py
if profile.visibility != 'p':
return
# Collect data
first_name = speaker.user.first_name.title()
last_name = speaker.user.last_name.title()
company = profile.company
position = profile.job_title
profile_text = (u'<a href="%s%s">Profile on EuroPython Website</a>' %
(settings.DEFAULT_URL_PREFIX, profile_url(user)))
twitter = p3profile.twitter
if twitter.startswith(('https://twitter.com/', 'http://twitter.com/')):
twitter = twitter.split('/')[-1]
# Skip special entries
full_name = first_name + last_name
if first_name == 'To Be' and last_name == 'Announced':
return
# UID
uid = u''
data.append((
first_name,
last_name,
company,
position,
u'', # group
profile_text,
u'', # email: not published
u'', # phone: not published
twitter,
u'', # facebook
u'', # linkedin
u'', # google+
uid))
# Start row of data in spreadsheet (Python 0-based index)
SPEAKERS_WS_START_DATA = 5
# Column number of UID columns (Python 0-based index)
SPEAKERS_UID_COLUMN = 12
# Number of columns to make row unique (first, last, company)
SPEAKERS_UNIQUE_COLS = 3
def update_speakers(speakers_xlsx, new_data, updated_xlsx=None):
# Load workbook
wb = openpyxl.load_workbook(speakers_xlsx)
assert wb.sheetnames == [u'Instructions', u'Speakers', u'System']
ws = wb['Speakers']
# Extract data values
ws_data = list(ws.values)[SPEAKERS_WS_START_DATA:]
print ('read %i data lines' % len(ws_data))
print ('first line: %r' % ws_data[:1])
print ('last line: %r' % ws_data[-1:])
# Reconcile UIDs / talks
uids = {}
for line in ws_data:
uid = line[SPEAKERS_UID_COLUMN]
if not uid:
continue
uids[tuple(line[:SPEAKERS_UNIQUE_COLS])] = uid
# Add UID to new data
new_speakers = []
for line in new_data:
key = tuple(line[:SPEAKERS_UNIQUE_COLS])
if key not in uids:
print ('New speaker %s found' % (key,))
uid = u''
else:
uid = uids[key]
line = tuple(line[:SPEAKERS_UID_COLUMN]) + (uid,)
new_speakers.append(line)
new_data = new_speakers
# Replace old data with new data
old_data_rows = len(ws_data)
new_data_rows = len(new_data)
print ('new data: %i data lines' % new_data_rows)
offset = SPEAKERS_WS_START_DATA + 1
print ('new_data = %i rows' % len(new_data))
for j, row in enumerate(ws[offset: offset + new_data_rows - 1]):
new_row = new_data[j]
if _debug:
print ('updating row %i with %r' % (j, new_row))
if len(row) > len(new_row):
row = row[:len(new_row)]
for i, cell in enumerate(row):
cell.value = new_row[i]
# Overwrite unused cells with None
if new_data_rows < old_data_rows:
for j, row in enumerate(ws[offset + new_data_rows + 1:
offset + old_data_rows + 1]):
if _debug:
print ('clearing row %i' % (j,))
for i, cell in enumerate(row):
cell.value = None
# Write updated data
if updated_xlsx is None:
updated_xlsx = speakers_xlsx
wb.save(updated_xlsx)
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
args = '<conference> <xlsx-file>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
try:
speakers_xlsx = args[1]
except IndexError:
raise CommandError('XLSX file not specified')
# Get speaker records
speakers = set()
talks = cmodels.Talk.objects.accepted(conference)
for t in talks:
speakers |= set(t.get_all_speakers())
# Collect profiles
data = []
for speaker in speakers:
add_speaker(data, speaker)
data.sort()
# Update spreadsheet with new data
update_speakers(speakers_xlsx, data)
|
import json
import urllib.parse
import urllib.request
from typing import Dict
from kenallclient.model import KenAllResult
class KenAllClient:
def __init__(self, api_key: str) -> None:
self.api_key = api_key
@property
def authorization(self) -> Dict[str, str]:
auth = {"Authorization": f"Token {self.api_key}"}
return auth
def create_request(self, postal_code) -> urllib.request.Request:
url = urllib.parse.urljoin("https://api.kenall.jp/v1/postalcode/", postal_code)
req = urllib.request.Request(url, headers=self.authorization)
return req
def fetch(self, req: urllib.request.Request) -> KenAllResult:
with urllib.request.urlopen(req) as res:
if not res.headers["Content-Type"].startswith("application/json"):
ValueError("not json response", res.read())
d = json.load(res)
return KenAllResult.fromdict(d)
def get(self, postal_code) -> KenAllResult:
req = self.create_request(postal_code)
return self.fetch(req)
|
# Parameters
MAX_SEQUENCE_LENGTH = 100
DENSE_UNITS = 100
CATEGORIES =3
EPOCHS = 1
BATCH_SIZE = 32
LEARNING_RATE = 0.0001
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_pcap', [dirname(__file__)])
except ImportError:
import _pcap
return _pcap
if fp is not None:
try:
_mod = imp.load_module('_pcap', fp, pathname, description)
finally:
fp.close()
return _mod
_pcap = swig_import_helper()
del swig_import_helper
else:
import _pcap
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
__doc__ = _pcap.__doc__
for dltname, dltvalue in _pcap.DLT.items():
globals()[dltname] = dltvalue
del dltname, dltvalue
class pcapObject(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, pcapObject, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, pcapObject, name)
__repr__ = _swig_repr
def __init__(self):
import sys
if int(sys.version[0])>=2:
self.datalink.im_func.__doc__ = _pcap.pcapObject_datalink.__doc__
self.activate.im_func.__doc__ = _pcap.pcapObject_activate.__doc__
self.dispatch.im_func.__doc__ = _pcap.pcapObject_dispatch.__doc__
self.setnonblock.im_func.__doc__ = _pcap.pcapObject_setnonblock.__doc__
self.set_promisc.im_func.__doc__ = _pcap.pcapObject_set_promisc.__doc__
self.minor_version.im_func.__doc__ = _pcap.pcapObject_minor_version.__doc__
self.stats.im_func.__doc__ = _pcap.pcapObject_stats.__doc__
self.create.im_func.__doc__ = _pcap.pcapObject_create.__doc__
self.open_live.im_func.__doc__ = _pcap.pcapObject_open_live.__doc__
self.next.im_func.__doc__ = _pcap.pcapObject_next.__doc__
self.dump_open.im_func.__doc__ = _pcap.pcapObject_dump_open.__doc__
self.snapshot.im_func.__doc__ = _pcap.pcapObject_snapshot.__doc__
self.is_swapped.im_func.__doc__ = _pcap.pcapObject_is_swapped.__doc__
self.open_offline.im_func.__doc__ = _pcap.pcapObject_open_offline.__doc__
self.set_snaplen.im_func.__doc__ = _pcap.pcapObject_set_snaplen.__doc__
self.fileno.im_func.__doc__ = _pcap.pcapObject_fileno.__doc__
self.datalinks.im_func.__doc__ = _pcap.pcapObject_datalinks.__doc__
self.set_rfmon.im_func.__doc__ = _pcap.pcapObject_set_rfmon.__doc__
self.major_version.im_func.__doc__ = _pcap.pcapObject_major_version.__doc__
self.getnonblock.im_func.__doc__ = _pcap.pcapObject_getnonblock.__doc__
self.open_dead.im_func.__doc__ = _pcap.pcapObject_open_dead.__doc__
self.set_timeout.im_func.__doc__ = _pcap.pcapObject_set_timeout.__doc__
self.loop.im_func.__doc__ = _pcap.pcapObject_loop.__doc__
self.setfilter.im_func.__doc__ = _pcap.pcapObject_setfilter.__doc__
this = _pcap.new_pcapObject()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pcap.delete_pcapObject
__del__ = lambda self : None;
def create(self, *args): return _pcap.pcapObject_create(self, *args)
def set_snaplen(self, *args): return _pcap.pcapObject_set_snaplen(self, *args)
def set_promisc(self, *args): return _pcap.pcapObject_set_promisc(self, *args)
def set_rfmon(self, *args): return _pcap.pcapObject_set_rfmon(self, *args)
def set_timeout(self, *args): return _pcap.pcapObject_set_timeout(self, *args)
def activate(self): return _pcap.pcapObject_activate(self)
def open_live(self, *args): return _pcap.pcapObject_open_live(self, *args)
def open_dead(self, *args): return _pcap.pcapObject_open_dead(self, *args)
def open_offline(self, *args): return _pcap.pcapObject_open_offline(self, *args)
def dump_open(self, *args): return _pcap.pcapObject_dump_open(self, *args)
def setnonblock(self, *args): return _pcap.pcapObject_setnonblock(self, *args)
def getnonblock(self): return _pcap.pcapObject_getnonblock(self)
def setfilter(self, *args): return _pcap.pcapObject_setfilter(self, *args)
def loop(self, *args): return _pcap.pcapObject_loop(self, *args)
def dispatch(self, *args): return _pcap.pcapObject_dispatch(self, *args)
def next(self): return _pcap.pcapObject_next(self)
def datalink(self): return _pcap.pcapObject_datalink(self)
def datalinks(self): return _pcap.pcapObject_datalinks(self)
def snapshot(self): return _pcap.pcapObject_snapshot(self)
def is_swapped(self): return _pcap.pcapObject_is_swapped(self)
def major_version(self): return _pcap.pcapObject_major_version(self)
def minor_version(self): return _pcap.pcapObject_minor_version(self)
def stats(self): return _pcap.pcapObject_stats(self)
def fileno(self): return _pcap.pcapObject_fileno(self)
pcapObject_swigregister = _pcap.pcapObject_swigregister
pcapObject_swigregister(pcapObject)
def lookupdev():
return _pcap.lookupdev()
lookupdev = _pcap.lookupdev
def findalldevs(unpack=1):
return _pcap.findalldevs(unpack)
findalldevs = _pcap.findalldevs
def lookupnet(*args):
return _pcap.lookupnet(*args)
lookupnet = _pcap.lookupnet
def aton(*args):
return _pcap.aton(*args)
aton = _pcap.aton
def ntoa(*args):
return _pcap.ntoa(*args)
ntoa = _pcap.ntoa
# This file is compatible with both classic and new-style classes.
|
import itertools
BLANK = ''
class Tree(object):
def __init__(self, operator, operand):
self.operator = operator
self.operand = operand
self.parent = None
self.children = list()
def add_child(self, child):
child.parent = self
self.children.append(child)
def visit(self):
for c in self.children:
yield from c.visit()
yield self
def __len__(self):
count = 1
for child in self.children:
count += len(child)
return count
def depth(self):
count = 1
for child in self.children:
count = max(count, 1 + child.depth())
return count
def pad(self, depth):
if depth < 1:
raise ValueError()
if depth == 1:
assert self.is_leaf()
return
if self.is_leaf():
self.add_child(Tree(BLANK, BLANK))
self.add_child(Tree(BLANK, BLANK))
for c in self.children:
c.pad(depth - 1)
return self
def prune(self):
for node in self.visit():
if node.operator == BLANK and node.operand == BLANK:
parent = node.parent
parent.children = [c for c in parent.children if c is not node]
def is_null(self):
return len(self.operator) == 0 and len(self.operand) == 0
def is_leaf(self):
return all([c.is_null() for c in self.children])
def __str__(self):
rep = '%s:%s' % (self.operator, self.operand)
if len(self.children) == 0:
return rep
return '(' + ', '.join([rep] + [c.__str__() for c in self.children]) + ')'
BLANK_NODE = Tree(BLANK, BLANK)
def merge_tree(trees):
ret = Tree(operator=[n.operator for n in trees], operand=[n.operand for n in trees])
for subtrees in itertools.zip_longest(*[n.children for n in trees], fillvalue=BLANK_NODE):
ret.add_child(merge_tree(subtrees))
return ret
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2013 Dariusz Dwornikowski. All rights reserved.
#
"""
Providing links to manpages with :linuxman: directiv
"""
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
from string import Template
import re
from sphinx.util import logging
logger = logging.getLogger(__name__)
def make_link_node(rawtext, app, name, manpage_num, options):
"""Create a link to a man page.
"""
ref = None
ref = app.config.linux_man_url_regex
if not ref:
ref = "http://linux.die.net/man/%s/%s" % (manpage_num, name)
else:
s = Template(ref)
ref = s.substitute(num=manpage_num, topic=name)
set_classes(options)
node = nodes.reference(rawtext, "%s(%s)" % (name, manpage_num), refuri=ref,
**options)
return node
def man_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to an online man page issue.
"""
app = inliner.document.settings.env.app
p = re.compile("([a-zA-Z0-9_\.\-_]+)\((\d)\)")
m = p.match(text)
manpage_num = m.group(2)
name = m.group(1)
node = make_link_node(rawtext, app, name, manpage_num, options)
return [node], []
def setup(app):
logger.info('Initializing manpage plugin')
app.add_role('linuxman', man_role)
app.add_config_value('linux_man_url_regex', None, 'env')
return {'parallel_read_safe': True}
|
import numpy as np
import torch
from torch.distributions import Distribution as TorchDistribution
from torch.distributions import Normal as TorchNormal
from torch.distributions import Independent as TorchIndependent
from collections import OrderedDict
import lifelong_rl.torch.pytorch_util as ptu
from lifelong_rl.util.eval_util import create_stats_ordered_dict
def atanh(x):
one_plus_x = (1 + x).clamp(min=1e-6)
one_minus_x = (1 - x).clamp(min=1e-6)
return 0.5*torch.log(one_plus_x/ one_minus_x)
class Distribution(TorchDistribution):
def sample_and_logprob(self):
s = self.sample()
log_p = self.log_prob(s)
return s, log_p
def rsample_and_logprob(self):
s = self.rsample()
log_p = self.log_prob(s)
return s, log_p
def mle_estimate(self):
return self.mean
def get_diagnostics(self):
return {}
class Independent(Distribution, TorchIndependent):
def get_diagnostics(self):
return self.base_dist.get_diagnostics()
class TorchDistributionWrapper(Distribution):
def __init__(self, distribution: TorchDistribution):
self.distribution = distribution
@property
def batch_shape(self):
return self.distribution.batch_shape
@property
def event_shape(self):
return self.distribution.event_shape
@property
def arg_constraints(self):
return self.distribution.arg_constraints
@property
def support(self):
return self.distribution.support
@property
def mean(self):
return self.distribution.mean
@property
def variance(self):
return self.distribution.variance
@property
def stddev(self):
return self.distribution.stddev
def sample(self, sample_size=torch.Size()):
return self.distribution.sample(sample_shape=sample_size)
def rsample(self, sample_size=torch.Size()):
return self.distribution.rsample(sample_shape=sample_size)
def log_prob(self, value):
return self.distribution.log_prob(value)
def cdf(self, value):
return self.distribution.cdf(value)
def icdf(self, value):
return self.distribution.icdf(value)
def enumerate_support(self, expand=True):
return self.distribution.enumerate_support(expand=expand)
def entropy(self):
return self.distribution.entropy()
def perplexity(self):
return self.distribution.perplexity()
def __repr__(self):
return 'Wrapped ' + self.distribution.__repr__()
class MultivariateDiagonalNormal(TorchDistributionWrapper):
from torch.distributions import constraints
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
def __init__(self, loc, scale_diag, reinterpreted_batch_ndims=1):
dist = Independent(TorchNormal(loc, scale_diag),
reinterpreted_batch_ndims=reinterpreted_batch_ndims)
super().__init__(dist)
def get_diagnostics(self):
stats = OrderedDict()
stats.update(create_stats_ordered_dict(
'mean',
ptu.get_numpy(self.mean),
# exclude_max_min=True,
))
stats.update(create_stats_ordered_dict(
'std',
ptu.get_numpy(self.distribution.stddev),
))
return stats
def __repr__(self):
return self.distribution.base_dist.__repr__()
class TanhNormal(Distribution):
"""
Represent distribution of X where
X ~ tanh(Z)
Z ~ N(mean, std)
Note: this is not very numerically stable.
"""
def __init__(self, normal_mean, normal_std, epsilon=1e-6):
"""
:param normal_mean: Mean of the normal distribution
:param normal_std: Std of the normal distribution
:param epsilon: Numerical stability epsilon when computing log-prob.
"""
self.normal_mean = normal_mean
self.normal_std = normal_std
self.normal = TorchNormal(normal_mean, normal_std)
self.epsilon = epsilon
def sample_n(self, n, return_pre_tanh_value=False):
z = self.normal.sample_n(n)
if return_pre_tanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def log_prob(self, value, pre_tanh_value=None):
"""
:param value: some value, x
:param pre_tanh_value: arctanh(x)
:return:
"""
if pre_tanh_value is None:
pre_tanh_value = atanh(value)
return self.normal.log_prob(pre_tanh_value) - torch.log(
1 - value * value + self.epsilon
)
def sample(self, return_pretanh_value=False):
"""
Gradients will and should *not* pass through this operation.
See https://github.com/pytorch/pytorch/issues/4620 for discussion.
"""
z = self.normal.sample().detach()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def rsample(self, return_pretanh_value=False):
"""
Sampling in the reparameterization case.
"""
z = (
self.normal_mean +
self.normal_std *
TorchNormal(
ptu.zeros(self.normal_mean.size()),
ptu.ones(self.normal_std.size())
).sample()
)
z.requires_grad_()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
|
#!/usr/bin/python3
# Copyright 2020 Timothy Trippel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import subprocess
import sys
import prettytable
import yaml
from hwfutils.string_color import color_str_green as green
from hwfutils.string_color import color_str_red as red
from hwfutils.string_color import color_str_yellow as yellow
from hwfutils.tlul_fuzz_instr import TLULFuzzInstr
def dump_seed_file_to_stdin(output_file_name):
"""Dumps generated seed file in hex format to STDIN."""
print(output_file_name + ":")
cmd = ["xxd", output_file_name]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print(red("ERROR: cannot dump generated seed file."))
sys.exit(1)
def gen_seed(input_yaml_file_name, output_file_name, verbose):
"""Parse YAML HW fuzzing opcodes and translates them in binary to file."""
print(f"Creating fuzzer seed from YAML: {input_yaml_file_name} ...")
with open(input_yaml_file_name, "r") as fp:
fuzz_opcodes = yaml.load(fp, Loader=yaml.Loader)
with open(output_file_name, "wb") as fp:
for instr in fuzz_opcodes:
hwf_instr = TLULFuzzInstr(instr)
if verbose:
print(hwf_instr)
for _ in range(hwf_instr.repeat):
fp.write(hwf_instr.to_bytes())
print(green("Seed file generated!"))
if verbose:
dump_seed_file_to_stdin(output_file_name)
def _print_configs(args):
# Create table to print configurations to STDIN
config_table = prettytable.PrettyTable(header=False)
config_table.title = "Seed Generation Parameters"
config_table.field_names = ["Parameter", "Value"]
# Add parameter values to table
config_table.add_row(["Input (YAML) Filename", args.input_filename])
config_table.add_row(["Output Filename", args.output_filename])
config_table.add_row(["Frame Type", args.frame_type])
config_table.add_row(["Opcode Size (# bytes)", args.opcode_size])
config_table.add_row(["Address Size (# bytes)", args.address_size])
config_table.add_row(["Data Size (# bytes)", args.data_size])
# Print table
config_table.align = "l"
print(yellow(config_table.get_string()))
def parse_args(argv):
module_description = "OpenTitan Fuzzing Seed Composer"
parser = argparse.ArgumentParser(description=module_description)
parser.add_argument("--opcode-type",
default=TLULFuzzInstr.opcode_type,
choices=[
"constant",
"mapped",
],
type=str,
help="Fuzzing instruction opcode type.")
parser.add_argument("--instr-type",
default=TLULFuzzInstr.instr_type,
choices=[
"fixed",
"variable",
],
type=str,
help="Fuzzing instruction frame type.")
parser.add_argument("--endianness",
default=TLULFuzzInstr.endianness,
choices=[
"little",
"big",
],
type=str,
help="Endianness of HW Fuzzing Instruction frames.")
parser.add_argument("--opcode-size",
default=TLULFuzzInstr.opcode_size,
type=int,
help="Size of opcode field in bytes.")
parser.add_argument("--address-size",
default=TLULFuzzInstr.address_size,
type=int,
help="Size of address field in bytes")
parser.add_argument("--data-size",
default=TLULFuzzInstr.data_size,
type=int,
help="Size of data field in bytes.")
parser.add_argument("--direct-in-size",
default=TLULFuzzInstr.direct_in_size,
type=int,
help="Size of direct inputs field in bytes.")
parser.add_argument("-v",
"--verbose",
action="store_true",
help="Enable verbose status messages.")
parser.add_argument("input_file_name",
metavar="input.yaml",
help="Input configuration YAML file.")
parser.add_argument("output_file_name",
metavar="afl_seed.hwf",
help="Name of output seed file (hex).")
args = parser.parse_args(argv)
if args.verbose:
_print_configs(args)
return args
def config_tlul_fuzz_instr(args):
TLULFuzzInstr.opcode_type = args.opcode_type
TLULFuzzInstr.instr_type = args.instr_type
TLULFuzzInstr.opcode_size = args.opcode_size
TLULFuzzInstr.address_size = args.address_size
TLULFuzzInstr.data_size = args.data_size
TLULFuzzInstr.direct_in_size = args.direct_in_size
TLULFuzzInstr.endianness = args.endianness
def main(argv):
args = parse_args(argv)
config_tlul_fuzz_instr(args)
gen_seed(args.input_file_name, args.output_file_name, args.verbose)
if __name__ == "__main__":
main(sys.argv[1:])
|
# Generated by Django 2.0.2 on 2018-11-15 02:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20181115_0802'),
]
operations = [
migrations.CreateModel(
name='beer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('item', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='rum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('item', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='vodka',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('item', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='wine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('item', models.CharField(max_length=50)),
],
),
]
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from character import app
if __name__ == '__main__':
description = "Runs the Flask server."
parser = ArgumentParser(description=description)
parser.add_argument("-0", "--public", help="Makes the server world-"
"accessible by hosting at 0.0.0.0.",
action="store_true")
parser.add_argument("-p", "--port", help="Defines the port. Defaults to "
"9999.", type=int, default=9999)
parser.add_argument("-d", "--debug", help="Turns server debug mode on. "
"(Not recommended for world-accesible servers!)",
action="store_true")
parser.add_argument("-r", "--reload", help="Turns the automatic realoder "
"on. This setting restarts the server whenever a "
"change in the source is detected.",
action="store_true")
args = parser.parse_args()
app.run(host="0.0.0.0" if args.public else "localhost", port=args.port,
use_debugger=args.debug, use_reloader=args.reload)
|
#!/bin/env python3
"""
follow : @qywok_exploiter_357
"""
import unittest
class attributes:
def __init__(self):
self.decimal=[128,64,32,16,8,4,2,1]
self.bit=len(self.decimal) # length : 8 bit
class convert:
def __init__(self,value):
self.value=value
def bin2dec(self):
if str(type(self.value))=="<class 'str'>":
if len(self.value)==attributes().bit:
index=self.value
value=0
for attribute in range(len(attributes().decimal)):
if int(index[attribute])==1:
value+=int(index[attribute])*attributes().decimal[attribute]
elif int(index[attribute])==0:
value+=int(index[attribute])
return value
else:
return False
def dec2bin(self):
if str(type(self.value))=="<class 'int'>":
index=self.value
point=index
value=[]
for attribute in range(len(attributes().decimal)):
if (point>attributes().decimal[attribute])|(point==attributes().decimal[attribute]):
value.append("1")
point-=attributes().decimal[attribute]
else:
value.append("0")
return "".join(value)
else:
return False
if __name__ == "__main__":
class testing(unittest.TestCase):
def output_testing(self):
self.assertEqual(convert(138).dec2bin(),"10001010")
|
import pandas as pd
import math
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import copy
import numpy as np
'''
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkstudy.EventProfiler as ep
import csv
'''
#import sys
from datetime import datetime, date
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
'''
def maxloss(date, window_size, df_equity):
for date_idx in range(df_equity.index):
if df_equity.index[date_idx+window_size] <= df_equity.index[-1]:
'''
# Return a data frame of daily equity changes
def portvalchanges(df_equity):
df_port_val_changes = copy.deepcopy(df_equity)
df_port_val_changes = df_port_val_changes * 0
for date_idx in range(0,len(df_equity.index)):
if df_equity.index[date_idx] > df_equity.index[0]:
df_port_val_changes[0].ix[df_equity.index[date_idx]] = df_equity[0].ix[df_equity.index[date_idx]]-df_equity[0].ix[df_equity.index[date_idx-1]]
return df_port_val_changes
def maxdrawdown(df_equity):
df_rollsum = copy.deepcopy(df_equity)
df_rollsum = df_rollsum * 0
#windows = [2,4,8,16,32]
windows = np.arange(2,51)
columns =['rollsum']
index = windows
df_rsum = pd.DataFrame(index=index,columns=columns)
df_rsum = df_rsum.fillna(0)
for window_size in windows:
df_rollsum[0] = pd.rolling_sum(df_equity[0],window_size)
df_rsum['rollsum'].ix[window_size] = df_rollsum[0].min(axis=0)
#df_equity.to_csv('C:\Users\owner\Documents\Software\Python\Quant\Examples\ZeroSum Strategy Suite\df_equity.csv')
df_rsum.to_csv('C:\Users\owner\Documents\Software\Python\Quant\Examples\ZeroSum Strategy Suite\df_rsum.csv')
df_rollsum.to_csv('C:\Users\owner\Documents\Software\Python\Quant\Examples\ZeroSum Strategy Suite\df_rollsum.csv')
return df_rsum.min(axis=0)
def plot_stock(quotes):
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
#ax.xaxis.set_major_locator(mondays)
#ax.xaxis.set_minor_locator(alldays)
#ax.xaxis.set_major_formatter(weekFormatter)
#ax.xaxis.set_minor_formatter(dayFormatter)
#plot_day_summary(ax, quotes, ticksize=3)
candlestick(ax, quotes, width=0.6)
ax.xaxis_date()
ax.autoscale_view()
plt.setp( plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
plt.savefig('stock.pdf', format='pdf')
#plt.show()
def plots(index,series1, series2, series3, series4, file_name):
path = './plots/'+file_name+'_'+str(date.today())+'.pdf'
#pp = PdfPages('./plots/plots.pdf')
pp = PdfPages(path)
tot_symbols = len(series1.columns)
fig = plt.figure()
d = pp.infodict()
d['Title'] = 'Watchlist Chart Book'
d['Author'] = u'Rahul Kumar'
d['Subject'] = 'Watchlist Chart Book'
d['Keywords'] = 'Watchlist Charts'
#d['CreationDate'] = dt.datetime(2009, 11, 13)
d['CreationDate'] = dt.datetime.today()
d['ModDate'] = dt.datetime.today()
for subplot in range(1,tot_symbols+1):
#print series1.columns[subplot-1]
#ax = fig.add_subplot(tot_symbols,1,subplot)
plt.plot(index, series1[series1.columns[subplot-1]]) # $SPX 50 days
plt.plot(index, series2[series2.columns[subplot-1]]) # XOM 50 days
plt.plot(index, series3[series3.columns[subplot-1]]) # XOM 50 days
plt.plot(index, series4[series4.columns[subplot-1]]) # XOM 50 days
#plt.axhline(y=0, color='r')
plt.legend([series1.columns[subplot-1]], loc='best')
plt.ylabel('Daily Returns',size='xx-small')
plt.xlabel(series1.columns[subplot-1],size='xx-small')
plt.xticks(size='xx-small')
plt.yticks(size='xx-small')
plt.savefig(pp, format='pdf')
plt.close()
pp.close()
def plot(index,series1, series2, series3, series4):
#fig = plt.figure()
plt.clf()
plt.plot(index, series1) # $SPX 50 days
plt.plot(index, series2) # XOM 50 days
plt.plot(index, series3) # XOM 50 days
plt.plot(index, series4) # XOM 50 days
#plt.axhline(y=0, color='r')
plt.legend(['Portfolio', 'SPX '], loc='best')
plt.ylabel('Daily Returns',size='xx-small')
plt.xlabel('Date',size='xx-small')
plt.xticks(size='xx-small')
plt.yticks(size='xx-small')
plt.savefig('channel.pdf', format='pdf')
def analyze(analyzefile):
print 'Inside Analyze'
file_path = 'C:\\Users\\owner\\Documents\\Software\\Python\\Quant\\Examples\\ZeroSum Strategy Suite\\'
#analyze_file = sys.argv[1]
#analyze_file = 'values.csv'
analyze_file = analyzefile
input_file = file_path+analyze_file
port_value = pd.read_csv(input_file, sep=',',index_col = 0, header=0,names=['PortVal'])
port_daily_ret = pd.DataFrame(range(len(port_value)),index=port_value.index, dtype='float')
startdate = datetime.strptime(port_value.index[0],'%Y-%m-%d %H:%M:%S')
enddate = datetime.strptime(port_value.index[len(port_value)-1],'%Y-%m-%d %H:%M:%S')
#benchmark = sys.argv[2]
benchmark = ['$SPX']
#benchmark = ['SPY']
#benchmark = bench
#d_data = data
# Start and End date of the charts
dt_start = dt.datetime(startdate.year, startdate.month, startdate.day)
dt_end = dt.datetime(enddate.year, enddate.month, enddate.day+1)
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Creating an object of the dataaccess class with Yahoo as the source.
#c_dataobj = da.DataAccess('Yahoo',verbose=True,cachestalltime = 0)
c_dataobj = da.DataAccess('Yahoo')
# Keys to be read from the data, it is good to read everything in one go.
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
# Reading the data, now d_data is a dictionary with the keys above.
# Timestamps and symbols are the ones that were specified before.
ldf_data = c_dataobj.get_data(ldt_timestamps, benchmark, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Filling the data for NAN
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
df_close = d_data['close']
#df_close = benchdata
bench_daily_ret = pd.DataFrame(range(len(df_close)),index=df_close.index, dtype='float')
bench_val = pd.DataFrame(range(len(port_value)),index=port_value.index)
bench_init_investment = port_value['PortVal'].ix[0]
bench_val[0].ix[0] = bench_init_investment
# Portfolio Daily Returns
for row_idx in range(0,len(ldt_timestamps)):
#Start calculating daily return on day 2
if row_idx > 0:
port_daily_ret[0].ix[row_idx] = (float(port_value['PortVal'].ix[row_idx])/float(port_value['PortVal'].ix[row_idx-1]))-1
# Benchmark Daily Returns
for row_idx in range(0,len(ldt_timestamps)):
#Start calculating daily return on day 2
if row_idx > 0:
bench_daily_ret[0].ix[row_idx] = (float(df_close[benchmark].ix[row_idx])/float(df_close[benchmark].ix[row_idx-1]))-1
#Bench Value
for row_idx in range(1,len(ldt_timestamps)):
bench_val[0].ix[row_idx] = bench_val[0].ix[row_idx-1] * (1+bench_daily_ret[0].ix[row_idx])
avg_port_daily_ret = port_daily_ret.mean(axis=0)
avg_bench_daily_ret = bench_daily_ret.mean(axis=0)
port_vol = port_daily_ret.std(axis=0)
bench_vol = bench_daily_ret.std(axis=0)
port_sharpe = math.sqrt(252)*(avg_port_daily_ret/port_vol)
bench_sharpe = math.sqrt(252)*(avg_bench_daily_ret/bench_vol)
port_cum_ret = float(port_value['PortVal'].ix[len(ldt_timestamps)-1])/float(port_value['PortVal'].ix[0])
bench_cum_ret = df_close[benchmark].ix[len(ldt_timestamps)-1]/df_close[benchmark].ix[0]
# Plotting the plot of daily returns
plt.clf()
plt.plot(ldt_timestamps[0:], port_value['PortVal']) # $SPX 50 days
plt.plot(ldt_timestamps[0:], bench_val[0]) # XOM 50 days
#plt.axhline(y=0, color='r')
plt.legend(['Portfolio', 'SPX '], loc='best')
plt.ylabel('Daily Returns',size='xx-small')
plt.xlabel('Date',size='xx-small')
plt.xticks(size='xx-small')
plt.yticks(size='xx-small')
plt.savefig('rets.pdf', format='pdf')
print 'Sharpe ratio of fund:'+str(port_sharpe)
print 'Sharpe ratio of benchmark:'+str(bench_sharpe)
print 'Total Return of fund:'+str(port_cum_ret)
print 'Total Return of benchmark:'+str(bench_cum_ret)
print 'Standard Deviation of fund:'+str(port_vol)
print 'Standard Deviation of benchmark:'+str(bench_vol)
print 'Average Daily Return of fund:'+str(avg_port_daily_ret)
print 'Average Daily Return of benchmark:'+str(avg_bench_daily_ret)
|
"""
```nop``` command endpoints.
"""
from flask import Blueprint
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from app import irbox
from irbox.errors import IrboxError
nop_blueprint = Blueprint('nop_blueprint', __name__)
@nop_blueprint.route('/nop')
def nop():
"""
```nop``` command.
"""
try:
success = irbox.nop()
message = irbox.response
except IrboxError as irbox_error:
success = False
message = irbox_error.message
return redirect(url_for(
'nop_blueprint.nop_success' if success else 'nop_blueprint.nop_failure',
m=message
))
@nop_blueprint.route('/nop/success')
def nop_success():
"""
Successful ```nop```.
"""
message = request.args.get('m')
response = make_response(
render_template(
"nop.html",
success=True,
message=message
)
)
response.headers.set('Irbox-Success', 'true')
return response
@nop_blueprint.route('/nop/failure')
def nop_failure():
"""
Failed ```nop```.
"""
message = request.args.get('m')
response = make_response(
render_template(
"nop.html",
success=False,
message=message
)
)
response.headers.set('Irbox-Success', 'false')
return response
|
# -*- coding: utf-8 -*-
try:
import pygame
except ImportError:
print("The pygame is not instaled")
from pygame.locals import *
from pygame.sprite import Sprite
from colors import *
# All classes of the game are here
class Characters(Sprite):
'''
Base class for all characters of the game
'''
def __init__(self, start_px, start_py, image_name, *groups):
Sprite.__init__(self, *groups)
self.start_px = start_px
self.start_py = start_py
self.px = 0
self.py = 0
self._move_states = {"LEFT": 0, "RIGHT": 0, "UP": 0, "DOWN": 0}
self.rect = Rect(self.start_px, self.start_py, 0, 0)
self._base_image_path = "sprites/"
self.image_name = image_name
self.image = pygame.image.load(
self._base_image_path + image_name + "RIGHT1.png")
self.convert_image()
pygame.draw.rect(self.image, BLACK, self)
def move(self, side):
'''
move the character
'''
side_state = str(self._move_states[side] + 1)
self.image = pygame.image.load(
self._base_image_path + self.image_name + side + side_state + ".png")
self._change_state(side)
def _change_state(self, side):
'''
change the position of the character in the screen
'''
self.convert_image()
if side == 'LEFT':
x, y = -10, 0
if side == 'RIGHT':
x, y = 10, 0
if side == 'UP':
x, y = 0, -10
if side == 'DOWN':
x, y = 0, 10
self.rect.move_ip(x, y)
self.py += 1
self._move_states[side] += 1
if self._move_states[side] > 2:
self._move_states[side] = 0
def convert_image(self):
'''
Convert the character image and set colorkey to magenta(i.e pynk)
'''
self.image.set_alpha(None, RLEACCEL)
self.image.convert()
self.image.set_colorkey(MAGENTA, RLEACCEL)
class Hero(Characters):
'''
Class for the heroes of the game
'''
def __init__(self, start_px, start_py, image_name, *groups):
Characters.__init__(self, start_px, start_py, image_name, *groups)
class Npcs(Characters):
'''
Class for all the npcs of the game
npcs = all characters that you can interact
'''
def __init__(self, start_px, start_py, image_name, *groups):
Characters.__init__(self, start_px, start_py, *groups)
class Text():
'''
Class to handle the texts in the game
'''
def __init__(self, size, dialog, font_file, color=WHITE, antialias=True):
self.size = size # size of the dialog
self.dialog = dialog # current dialog
self.color = color
self.antialias = antialias
self.font = pygame.font.SysFont(font_file, self.size)
self.phrases = self.font.render(
self.dialog, self.antialias, self.color)
def change_dialog(self, new_dialog):
self.dialog = new_dialog
def change_color(self, new_color):
self.color = new_color
def change_size(self, new_size):
self.size = new_size
|
#! /usr/bin/env python
import sys, os, warnings
from pyorbit import Device
from pyorbit import ConnectError
from pyorbit.services import Status
# Example showing how to get the system uptime from an Orbit Radio.
# The uptime value is requested using it's keypath. The keypath
# can be obtained from the Orbit Radio command line using:
# show system uptime | display xpath
#
# This will return the following keypath:
# /system/mdssys:uptime/seconds 175198
#
# Strip off the namespace prefix 'mdssys' and we're left with:
# /system/uptime/seconds
#
# Running this example and piping the output through 'jq'
# should produce JSON data like the following:
#
# python3 pyorbit/examples/get_uptime.py 192.168.1.1 admin admin | jq
# {
# "data": {
# "@xmlns": "urn:ietf:params:xml:ns:netconf:base:1.0",
# "@xmlns:nc": "urn:ietf:params:xml:ns:netconf:base:1.0",
# "system": {
# "@xmlns": "urn:ietf:params:xml:ns:yang:ietf-system",
# "uptime": {
# "@xmlns": "com:gemds:mds-system",
# "seconds": "175732"
# }
# }
# }
# }
#
# This next example shows how to use 'jq' to extract just the value that
# we're interested in:
#
# python3 pyorbit/examples/get_uptime.py 192.168.1.1 admin admin | jq ".data.system.uptime.seconds | tonumber"
# 175732
def main(host, user, passwd):
try:
dev = Device(host=host,username=user,password=passwd)
dev.open()
with Status(dev) as st:
uptime="""/system/uptime/seconds"""
# JSON
out = st.get(filter=('xpath',uptime),format='json')
print(out)
# ODICT
#out = st.get(filter=('xpath',uptime),format='odict')
#print(out)
# XML
#out = st.get(filter=('xpath',uptime))
#print(out)
except ConnectError as err:
print ("Cannot connect to device: {0}".format(err))
return
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
import operator
from rtamt.spec.stl.visitor import STLVisitor
class STLCTOffline(STLVisitor):
def __init__(self, spec):
self.spec = spec
def offline(self, element, args):
sample = self.visit(element, args)
out_sample = self.spec.var_object_dict[self.spec.out_var]
if self.spec.out_var_field:
setattr(out_sample, self.spec.out_var_field, sample)
else:
out_sample = sample
return out_sample
def visitPredicate(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitVariable(self, element, args):
var = self.spec.var_object_dict[element.var]
if element.field:
value = operator.attrgetter(element.field)(var)
else:
value = var
return value
def visitConstant(self, element, args):
out_sample = element.node.offline()
return out_sample
def visitAbs(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitAddition(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitSubtraction(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitMultiplication(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitDivision(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitNot(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitAnd(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitOr(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitImplies(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitIff(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitXor(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitEventually(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitAlways(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitUntil(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitOnce(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitHistorically(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitSince(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitPrecedes(self, element, args):
in_sample_1 = self.visit(element.children[0], args)
in_sample_2 = self.visit(element.children[1], args)
out_sample = element.node.offline(in_sample_1, in_sample_2)
return out_sample
def visitRise(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitFall(self, element, args):
in_sample = self.visit(element.children[0], args)
out_sample = element.node.offline(in_sample)
return out_sample
def visitDefault(self, element, args):
return None
|
import random
from musthe import Chord, Note, Interval
import ui
# Chord notation: https://en.m.wikipedia.org/wiki/Chord_notation
# https://en.m.wikipedia.org/wiki/List_of_chords
# --------------------------------
# CLASSES FOR chord types
# --------------------------------
# TRIADS
# --------------------------------
triangle = chr(9651)
class ChordsInterface:
def __init__():
pass
def replace_with_random_notation(self, chord):
pass
def replace_with_default_notation(self, chord):
pass
class Major(ChordsInterface):
''' major chord '''
sym = 'maj'
sym_default = ''
symbols = ['', 'maj', 'M', triangle]
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
class Minor(ChordsInterface):
''' minor chord '''
sym = 'min'
sym_default = 'm'
symbols = ['-', 'min', 'm']
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
class Augmented(ChordsInterface):
''' augmented chord '''
sym = 'aug'
sym_default = 'aug'
symbols = ['aug', 'M#5', 'M+5', 'C+']
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
class Diminished(ChordsInterface):
''' diminished chord '''
sym = 'dim'
sym_default = 'dim'
symbols = ['dim', 'mb5', 'm°5','°']
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
# --------------------------------
# 7th
# --------------------------------
class Major7(ChordsInterface):
''' major seventh chord '''
sym = 'maj7'
sym_default = ''
symbols = ['maj7', 'M7', f'{triangle}7']
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
class Minor7(ChordsInterface):
''' minor seventh chord '''
sym = 'min7'
sym_default = 'm7'
symbols = ['min7', 'm7', '-7']
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
class Dominant7(ChordsInterface):
''' dominant seventh chord '''
sym = 'dom7'
sym_default = '7'
symbols = ['dom7', '7']
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
class Augmented7(ChordsInterface):
sym = 'aug7'
sym_default = 'aug7'
symbols = ['7M#5', '7M+5']
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
class Diminished7(ChordsInterface):
sym = 'm7dim5'
sym_default = '7dim'
symbols = ['m7b5', 'm7°5']
def __init__(self):
pass
def replace_with_random_notation(self, chord):
random_sym = random.choice(self.symbols)
return chord.replace(self.sym, random_sym)
def replace_with_default_notation(self, chord):
return chord.replace(self.sym, self.sym_default)
obj = {
'M': Major(),
'm': Minor(),
'+': Augmented(),
'°': Diminished(),
'M7': Major7(),
'm7': Minor7(),
'7': Dominant7(),
'7#5': Augmented7(),
'm7b5': Diminished7(),
}
# --------------------------------
# USEFUL FUNCS
# --------------------------------
def circle(accidentals):
if accidentals == 'Circle4maj':
note = Note('C1')
elif accidentals == 'Circle5maj':
note = Note('C7')
elif accidentals == 'Circle4min':
note = Note('A1')
elif accidentals == 'Circle5min':
note = Note('A7')
interval = Interval('P4')
for i in range(12):
yield note
if accidentals in ('Circle4maj','Circle4min'):
note = note + interval
elif accidentals in ('Circle5maj','Circle5min'):
note = note - interval
if note.letter == 'C' and note.accidental == 'b':
note = Note('B')
elif note.letter == 'B' and note.accidental == '#':
note = Note('C')
elif note.letter == 'E' and note.accidental == '#':
note = Note('F')
elif note.letter == 'F' and note.accidental == 'b':
note = Note('E')
def get_notes(accidentals):
if accidentals in ('b', '#', 'all'):
return Note('C').all()
else:
return circle(accidentals)
# --------------------------------
# USER INTERFACE
# --------------------------------
screen_width = ui.get_screen_size().width
screen_height = ui.get_screen_size().height
tv_height = 125
tv_width = screen_width / 3
tv_x_pos = tv_width
tv_y_pos = 0
# Table view: Accidentals
tv_alter = ui.TableView()
tv_alter.border_width = 0
tv_alter.x = 0 * tv_x_pos
tv_alter.y = tv_y_pos
tv_alter.width = tv_width
tv_alter.height = tv_height
# Table view: Chord type
tv_type = ui.TableView()
tv_type.border_width = 0
tv_type.x = 1 * tv_x_pos
tv_type.y = tv_y_pos
tv_type.width = tv_width
tv_type.height = tv_height
tv_type.allows_multiple_selection = True
# Table view: Chord output
tv_view = ui.TableView()
tv_view.border_width = 0
tv_view.x = 2 * tv_x_pos
tv_view.y = 0
tv_view.width = tv_width
tv_view.height = tv_height
# Button: Generate chord
bt_height = 40
bt_width = screen_width
bt_x_pos = 0
bt_y_pos = tv_height
bt_generator = ui.Button()
bt_generator.border_width = 4
bt_generator.title = 'Generate'
bt_generator.x = bt_x_pos
bt_generator.y = bt_y_pos
bt_generator.width = bt_width
bt_generator.height = bt_height
bt_generator.font = ('verdana', 25)
bt_generator.corner_radius = 10
# TextView: List of chords
txtv_height = screen_height - tv_height - bt_height
txtv_width = screen_width - tv_width - bt_width
txtv_x_pos = 0
txtv_y_pos = bt_y_pos + bt_height
txtv_info = ui.TextView()
txtv_info.alignment = ui.ALIGN_CENTER
txtv_info.border_width = 0
txtv_info.x = txtv_x_pos
txtv_info.y = txtv_y_pos
txtv_info.width = screen_width
txtv_info.height = txtv_height
txtv_info.editable = False
txtv_info.font = ('verdana-bold', 30)
# ----------------------------------
# Delegate and DataSource for
# Chord types
# ----------------------------------
class tvDelegateType():
def __init__(self, title, items):
self.items = items
self.currentNumLines = len(items)
self.currentTitle = title
self.currentRow = None
self.selected_items = []
def tableview_did_select(self, tableview, section, row):
# Called when a row was selected.
self.selected_items.append(self.items[row])
def tableview_did_deselect(self, tableview, section, row):
# Called when a row was de-selected (in multiple selection mode).
self.selected_items.remove(self.items[row])
def tableview_number_of_sections(self, tableview):
# Return the number of sections (defaults to 1). Someone else can mess with
# sections and section logic
return 1
def tableview_number_of_rows(self, tableview, section):
# Return the number of rows in the section
return self.currentNumLines # needed to be in sync with displayed version
def tableview_title_for_header(self, tableview, section):
# Return a title for the given section.
# If this is not implemented, no section headers will be shown.
return self.currentTitle
def tableview_cell_for_row(self, tableview, section, row):
# Create and return a cell for the given section/row
cell = ui.TableViewCell()
cell.text_label.text = self.items[row]['title']
return cell
# ----------------------------------
# General Delegate and DataSource for
# Accidentals and Output
# ----------------------------------
class tvDelegateGen():
# also acts as the data_source. Can be separate, but this is easier.
def __init__(self, title, items):
self.items = items
self.currentNumLines = len(items)
self.currentTitle = title
self.currentRow = None
self.selected_item = ''
def tableview_did_select(self, tableview, section, row):
# Called when a row was selected
self.selected_item = self.items[row]['value']
def tableview_did_deselect(self, tableview, section, row):
# Called when a row was de-selected (in multiple selection mode).
pass
def tableview_number_of_sections(self, tableview):
# Return the number of sections (defaults to 1). Someone else can mess with
# sections and section logic
return 1
def tableview_number_of_rows(self, tableview, section):
# Return the number of rows in the section
return self.currentNumLines # needed to be in sync with displayed version
def tableview_cell_for_row(self, tableview, section, row):
# Create and return a cell for the given section/row
cell = ui.TableViewCell()
cell.text_label.text = self.items[row]['title']
cell.accessory_type = self.items[row]['accessory_type']
return cell
def tableview_title_for_header(self, tableview, section):
# Return a title for the given section.
return self.currentTitle
# ----------------------------------
# CHORD GENERATOR WINDOW
# ----------------------------------
class ChordGenerator(ui.View):
chord_types = ' '
circle = chr(11044)
# ACCIDENTALS -----------------
titles = {
'Flats': 'b',
'Sharps': '#',
'All': 'all',
f'{circle}of4maj': 'Circle4maj',
f'{circle}of4min': 'Circle4min',
f'{circle}of5maj': 'Circle5maj',
f'{circle}of5min': 'Circle5min'
}
itemlist = [{
'title': x,
'value': y,
'accessory_type': 'none'
} for x, y in titles.items()]
tv_alter.data_source = tv_alter.delegate = tvDelegateGen(
title='Notes', items=itemlist)
# CHORD TYPES -----------------
titles = {
f'{item}': f'{key}'
for key, item in Chord(Note('C')).aliases.items()
}
itemlist = [{
'title': x,
'value': y,
'accessory_type': 'none'
} for x, y in titles.items()]
tv_type.data_source = tv_type.delegate = tvDelegateType(
title='Chord type', items=itemlist)
# CHORD OUTPUT -----------------
titles = {
'Default': 'Default',
'Random': 'Random',
'Musthe': 'Musthe',
}
itemlist = [{
'title': x,
'value': y,
'accessory_type': 'none'
} for x, y in titles.items()]
tv_view.data_source = tv_view.delegate = tvDelegateGen(
title='Notation', items=itemlist)
# -------------------------
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.table1_selection = None
self.bg_color = 'lightyellow'
self.make_view()
def make_view(self):
self.add_subview(tv_alter)
self.add_subview(tv_type)
self.add_subview(tv_view)
self.add_subview(bt_generator)
self.add_subview(txtv_info)
# Button: generate
bt_generator.action = self.fn_generate
def fn_generate(self, sender):
notes = get_notes(tv_alter.delegate.selected_item)
all_chords = []
new_chord = ''
txtv_info.text_color = 'red'
if tv_alter.delegate.selected_item == '':
txtv_info.text = 'Select Note type \n\n 1. Flats - notes with flats and notes without accidentals. Notes will be randomized \n 2. Sharps - notes with sharps and notes without accidentals. Notes will be randomized \n 3. All - all notes (with and without accidentals) \n 4. Circle of 4th - notes will be displayed in strict order of circle of 4th \n 5. Circle of 5th - notes will be displayed in strict order of circle of 5th'
txtv_info.font = ('verdana-bold', 15)
elif tv_type.delegate.selected_items == []:
txtv_info.text = 'Select chord type \n\n Multiple choice is possible. \n - maj (M): major chord \n- min (m): minor chord \n- aug (+): augmented chord \n- dim (°): diminished chord \n- dom7 (7): dominant 7th chord \n- min7 (m7): minor 7th \n- maj7 (M7): major 7th \n- aug7 (7aug5): augmented 7 \n- m7dim5 (m7b5): diminished 7'
txtv_info.font = ('verdana-bold', 15)
elif tv_view.delegate.selected_item == '':
txtv_info.text = 'Select notation \n\n 1. Random: randomize chord notations display. For example: chord C major can be displayed as Cmaj, CM, C. C minor can be displayed as Cm, Cmin, C- \n\n 2. Default: 1 most used notation is used. For example, C for C major, Cm for C minor. \n\n 3. MusThe: notations that are used in Musthe module.'
txtv_info.font = ('verdana-bold', 15)
else:
txtv_info.font = ('verdana-bold', 30)
txtv_info.text_color = 'black'
# loop through notes and chord types
for note in notes:
for item_type in tv_type.delegate.selected_items:
if tv_alter.delegate.selected_item in (
'all', 'Circle4maj', 'Circle5maj', 'Circle4min', 'Circle5min'
) or note.accidental == tv_alter.delegate.selected_item or note.accidental == '':
new_chord = str(Chord(note, chord_type=item_type['value']))
# substitue musthe chord types with custom: random or default
if tv_view.delegate.selected_item == 'Random':
t = obj[item_type['value']]
new_chord = t.replace_with_random_notation(new_chord)
elif tv_view.delegate.selected_item == 'Default':
t = obj[item_type['value']]
new_chord = t.replace_with_default_notation(new_chord)
all_chords.append(new_chord)
if tv_alter.delegate.selected_item in ('Circle4maj', 'Circle5maj', 'Circle4min', 'Circle5min'):
# Circles of 4th or 5th
txtv_info.text = ', '.join(all_chords)
else:
# random chords output
txtv_info.text = ', '.join(random.sample(all_chords, len(all_chords)))
# --------------------------------
# MAIN
# -------------------------------
v = ChordGenerator(name='CHORD GENERATOR')
v.present('popover')
|
from pathlib import Path
root = Path(__file__).parent.absolute()
import envo
envo.add_source_roots([root])
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from envo import Env, Namespace, env_var, logger, run
from env_comm import StickybeakCommEnv as ParentEnv
p = Namespace("p")
class StickybeakCiEnv(ParentEnv):
class Meta(ParentEnv.Meta):
stage: str = "ci"
emoji: str = "⚙"
load_env_vars = True
class Environ(ParentEnv.Environ):
pypi_username: Optional[str] = env_var(raw=True)
pypi_password: Optional[str] = env_var(raw=True)
e: Environ
def init(self) -> None:
super().init()
@p.command
def bootstrap(self, test_apps=True) -> None:
super().bootstrap(test_apps)
@p.command
def test(self) -> None:
run("pytest --reruns 2 -v tests")
@p.command
def build(self) -> None:
run("poetry build")
@p.command
def publish(self) -> None:
run(f'poetry publish --username "{self.e.pypi_username}" --password "{self.e.pypi_password}"', verbose=False)
@p.command
def rstcheck(self) -> None:
pass
# run("rstcheck README.rst | tee ./workspace/rstcheck.txt")
@p.command
def flake(self) -> None:
pass
# run("flake8 . | tee ./workspace/flake8.txt")
@p.command
def check_black(self) -> None:
run("black --check .")
@p.command
def check_isort(self) -> None:
run("black --check .")
@p.command
def mypy(self) -> None:
pass
run("mypy .")
@p.command
def generate_version(self) -> None:
import toml
config = toml.load(str(self.meta.root / "pyproject.toml"))
version: str = config["tool"]["poetry"]["version"]
version_file = self.meta.root / "stickybeak/__version__.py"
Path(version_file).touch()
version_file.write_text(f'__version__ = "{version}"\n')
ThisEnv = StickybeakCiEnv
|
#!/usr/bin/env python
# Train Simulator
# hacked by Arno Puder, 4/2003
# modified by Haijie Xiao, 3/2004
from Tkinter import *
import thread, socket, sys, time, os, re, random
debug = 1
def usage ():
print 'usage: train.pyw [--layout1|--layout2]'
sys.exit (-1)
layout = 1
if len (sys.argv) > 1:
if len (sys.argv) != 2:
usage()
if sys.argv[1] == '--layout1':
layout = 1
elif sys.argv[1] == '--layout2':
layout = 2
else:
usage()
if layout == 1:
tile_width = 30
else:
tile_width = 40
train_size = 10
train_color = 'red'
wagon_color = 'yellow'
zamboni_color = 'black'
even_contact_color = 'blue4'
odd_contact_color = 'brown'
train = 20
wagon = 1
zamboni = 78
max_vehicle_speed = 5
zamboni_speed = max_vehicle_speed
sleep_time = 0.05
train_stop_delay = 0.2
command_processing_delay_factor = 0.1
serial_line_delay = 0.1
wait_for_command_sleep_time = 0.3
# The track layout is defined through a two-dimensional grid. Each
# tile of this chessboard-like arrangement can have multiple track
# segments. There are eight access point to each tile, denoted by
# their location:
# NW--N--NE
# | |
# W E
# | |
# SW--S--SE
#
# A track segment lists the two access points that should be connected.
# E.g., "SW:E" will draw a track segment from SW to E.
# Each tile can have multiple segments which must be separated by ';'.
# E.g., "W:E;N:S" will put a cross in the tile.
#track_layout_1 simulates the train model layout
track_layout_1 = [
['', '', 'sw:e', 'w:e', 'w:e;w:se', 'w:e', 'w:e', 'w:e', 'w:e', 'w:e', 'w:e;sw:e', 'w:e', 'w:se', '', '', ''],
['', 'sw:ne', '', '', '', 'nw:e;nw:se', 'w:se', '', '', 'sw:ne', '', '', '', 'nw:se', '', ''],
['ne:s', '', '', '', '', '', 'nw:s', 'nw:se', 's:ne', '', '', '', '', '', 'nw:se', ''],
['n:s', '', '', '', '', '', 'n:s', '', 'nw:s;n:s', '', '', '', '', '', '', 'nw:s'],
['n:s', '', '', '', '', '', 'n:s', '', 'n:se', '', '', '', '', '', '', 'n:s'],
['n:s', '', '', '', '', '', 'n:s', '', '', 'nw:se', '', '', '', '', '', 'n:s;n:sw'],
['n:se', '', '', '', '', '', '', '', '', '', 'nw:se', '', '', '', 'sw:ne', 'n:s'],
['', 'nw:se', '', '', '', '', '', 'sw:ne', '', 'sw:e', 'w:e', 'w:e;nw:e', 'w:e', 'sw:ne;w:ne', '', 'n:sw'],
['', '', 'nw:se;nw:e', 'w:se;w:e', 'w:e', 'w:e', 'w:ne', '', 'sw:ne', '', '', '', 'sw:ne', '', 'sw:ne', ''],
['', '', '', 'nw:se', 'nw:e', 'w:e', 'w:e', 'w:ne', '', '', '', 'sw:ne', '', 'sw:ne', '', ''],
['', '', '', '', 'nw:e', 'w:e', 'w:e', 'w:e', 'w:e', 'w:e', 'w:ne', '', '', '', '', '']]
# track_labels_1 defines the switch labels, contacts (sensors), and contact labels.
# The first element of track_labels_1 (track-labels_1[0]) defines switch labels.
# The second element of track_labels_1 (track-labels_1[1]) defines contacts and contact labels.
#
# For switch labels:
# A label is defined using format: 'type:label number:tile:position'
# type - the type of the label. 's' for switches and 'c' for contacts
# label number - the label number
# tile - the tile where the switch locates. It is in the format "tile_column, tile_row"
# position - the position inside the tile where to display the label.
# Can be: 's', 'w', 'e', 'n', 'sw', 'se', 'nw', 'ne'
# E.g., 's:1:4:0:s' defines switch #1. The switch is in the tile(4,0).
# The label of the switch will be placed at the south of the tile.
#
# For contacts and contack labels:
# The format used is similar to the format that defines switch labels, except a tile list is used
# instead of a single tile. (A contact can cross multiply tiles.) A tile list is a set of tile
# seperated by ';'. The first tile in the list is the tile where the label will be displayed.
# E.g., 'c:1:8,8;9,7;10,7:se' defines contact #1. It occupies tile (8,8), (9,7), and (10,7). The
# label of this contact will be displayed in the sourth-east part of tile (8,8).
track_labels_1 =[
['s:1:13,7:s', 's:2:11,7:s', 's:3:3,8:n', 's:4:2,8:w', 's:5:4,0:sw', 's:6:5,1:w',
's:7:8,3:e','s:8:10,0:se','s:9:15,5:e'],
['c:1:8,8;9,7;10,7:se', 'c:2:5,9;4,9;6,9;7,9:s', 'c:3:10,10;8,10;9,10;11,9;12,8:s',
'c:4:5,10;3,9;4,10;6,10;7,10:s', 'c:5:5,8;7,7;4,8;6,8:n', 'c:6:0,6;0,5;1,7:ne',
'c:7:1,1;0,4;0,3;0,2;2,0;3,0:se','c:8:6,3;6,2;6,4;6,5:w','c:9:7,2;6,1:ne','c:10:7,0;5,0;6,0;8,0;9,0:s',
'c:11:9,1;8,2:se','c:12:9,5;8,4;10,6:ne','c:13:12,0;11,0;13,1:sw','c:14:14,2;15,3;15,4:w',
'c:15:14,6:nw','c:16:14,8;15,6;15,7;13,9:se']]
track_layout_2 = [
['', 'sw:e', 'w:e;w:se', 'w:e', 'w:e;sw:e', 'w:se', '', '', '', '', ''],
['s:ne', '', '', 'nw:s;ne:s', '', '', 'nw:s', '', 'n:s', '', ''],
['n:s;n:se', '', '', 'n:s', '', '', 'n:s', '', 's:n', '', 'n:s'],
['n:s', 'nw:s', '', 'n:se', '', '', 'n:s', '', 'sw:n', '', 'n:s'],
['n:s', 'n:s', '', '', 'nw:e', 'w:e', 'w:e;n:s', 'w:e;w:ne', 'w:e', 'w:se', 'n:s'],
['n:s', 'n:s', '', '', '', '', 'n:s', '', '', '', 'nw:s;n:s'],
['n:se', '', '', 'sw:e', 'w:e', '', 'n:sw;n:se', '', '', '', 'n:sw'],
['', 'nw:e', 'w:e;w:ne', 'w:e', 'w:e', 'w:e;w:ne', 'w:e', 'w:e;nw:e', 'w:e', 'w:ne', '']]
# There are three vehicles on the track: the controllable train,
# the uncontrollable zamboni (another train) and a wagon that
# can not move on its own. The following three variables define
# the initial default locations of those three vehicles. The
# x,y coordiates define the tile and the direction where the
# vehicle will be placed initially.
configurations_1 = [
# Wagon Train Zamboni
([6, 9, 'e'], [6, 3, 'n'], [5, 10, 'e']),
([6, 9, 'e'], [6, 3, 'n'], [6, 10, 'w']),
([8, 2, 's'], [5, 8, 'w'], [5, 10, 'e']),
# ([8, 2, 's'], [5, 8, 'w'], [6, 10, 'w'])
([15, 7, 'sw'], [5, 8, 'w'], [6, 10, 'w'])
]
configurations_2 = [
# Wagon Train Zamboni
([8, 1, 's'], [1, 4, 'n'], [2, 0, 'ne']),
([8, 1, 's'], [1, 4, 'n'], [2, 0, 'sw']),
([10, 2, 's'], [4, 6, 'w'], [2, 0, 'w']),
([10, 2, 's'], [4, 6, 'w'], [2, 0, 'e'])]
if layout == 1:
track_layout = track_layout_1
track_labels = track_labels_1
configurations = configurations_1
else:
track_layout = track_layout_2
configurations = configurations_2
half_tile_width = tile_width / 2
# Function f(x) computes the coordinates of a track segment along
# a curve. This happens for example for a track segment going from
# SW to E. In order to fit seamlessly with neighboring tiles, the
# function f(x) has the following properties:
# f(0) = 0
# f(tile_width) = tile_width / 2
# f'(0) = 1
# f'(tile_width) = 0
def f (x):
return x - (x * x / (2 * tile_width))
def draw_dot (canvas, x, y, tags):
obj_id = canvas.create_line (x - 1, y - 1, x + 1, y + 1, width=3)
#obj_id = canvas.create_oval (x, y, x, y, width=5, fill='black')
for i in range(len(tags)):
canvas.addtag_withtag (tags[i], obj_id)
def coordinates_within_tile (x, y):
return x < tile_width and x >= 0 and y < tile_width and y >= 0
def line (x, y, dx, dy, reverse_direction=0):
if reverse_direction:
dx = -dx
dy = -dy
x += dx
y += dy
return (x, y, dx, dy)
def curve1 (x, y, mx, dy, reverse_direction=0):
if reverse_direction:
dy = -dy
y += dy
x = f (tile_width - y - 1)
x = (x - half_tile_width) * mx + half_tile_width
return (x, y, mx, dy)
def curve2 (x, y, mx, dy, reverse_direction=0):
if reverse_direction:
dy = -dy
y += dy
x = tile_width - f (y)
x = (x - half_tile_width) * mx + half_tile_width
return (x, y, mx, dy)
def curve3 (x, y, dx, my, reverse_direction=0):
if reverse_direction:
dx = -dx
x += dx
y = tile_width - f (x)
y = (y - half_tile_width) * my + half_tile_width
return (x, y, dx, my)
def curve4 (x, y, dx, my, reverse_direction=0):
if reverse_direction:
dx = -dx
x += dx
y = tile_width - f (tile_width - x - 1)
y = (y - half_tile_width) * my + half_tile_width
return (x, y, dx, my)
w = tile_width - 1
w2 = half_tile_width
funcs = {
'n': [w2, 0, {'s': [line, 0, +1],
'sw': [curve1, +1, +1],
'se': [curve1, -1, +1]}],
'ne': [w, 0, {'sw': [line, -1, +1],
's': [curve2, +1, +1],
'w': [curve4, -1, -1]}],
'e': [w, w2, {'w': [line, -1, 0],
'sw': [curve3, -1, +1],
'nw': [curve3, -1, -1]}],
'se': [w, w, {'nw': [line, -1, -1],
'w': [curve4, -1, +1],
'n': [curve1, -1, -1]}],
's': [w2, w, {'n': [line, 0, -1],
'ne': [curve2, +1, -1],
'nw': [curve2, -1, -1]}],
'sw': [0, w, {'e': [curve3, +1, +1],
'ne': [line, +1, -1],
'n': [curve1, +1, -1]}],
'w': [0, w2, {'e': [line, 1, 0],
'se': [curve4, 1, +1],
'ne': [curve4, +1, -1]}],
'nw': [0, 0, {'se': [line, +1, +1],
's': [curve2, -1, +1],
'e': [curve3, +1, -1]}]}
class Vehicle:
def __init__ (self, track_gui, name, color):
self.track_gui = track_gui
self.track = track_gui.get_track()
self.name = name
self.canvas = self.track.get_canvas()
obj_id = self.canvas.create_oval (0, 0, train_size,
train_size, fill=color)
self.canvas.addtag_withtag (name, obj_id)
self.curr_pos_x = 0
self.curr_pos_y = 0
self.set_speed (0)
def remove_from_canvas (self):
self.canvas.delete (self.name)
def set_location (self, tile_x, tile_y, entry_point, exit_point, func, x, y, dx, dy):
self.tile_x = tile_x
self.tile_y = tile_y
self.entry_point = entry_point
self.exit_point = exit_point
self.func = func
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.draw()
def collides (self, vehicle):
# Check if this vehicle collides with another vehicle
dx = abs (self.curr_pos_x - vehicle.curr_pos_x)
dy = abs (self.curr_pos_y - vehicle.curr_pos_y)
distance = dx * dx + dy * dy
return distance <= train_size * train_size
def is_in_same_tile (self, vehicle):
return self.tile_x == vehicle.tile_x and self.tile_y == vehicle.tile_y
def is_in_tile (self, x, y):
return self.tile_x == x and self.tile_y == y
def set_speed (self, speed):
self.speed = speed
self.ticks = (max_vehicle_speed + 1) - speed
def get_speed (self):
return self.speed
def draw (self):
new_pos_x = self.x + self.tile_x * tile_width - train_size / 2
new_pos_y = self.y + self.tile_y * tile_width - train_size / 2
move_x = new_pos_x - self.curr_pos_x
move_y = new_pos_y - self.curr_pos_y
self.canvas.move (self.name, move_x, move_y)
self.curr_pos_x = new_pos_x
self.curr_pos_y = new_pos_y
def move_to_next_segment (self):
next_tile_x = self.tile_x
next_tile_y = self.tile_y
if self.exit_point == 'n':
next_entry_point = 's'
next_tile_y -= 1
elif self.exit_point == 'ne':
next_entry_point = 'sw'
next_tile_x += 1
next_tile_y -= 1
elif self.exit_point == 'e':
next_entry_point = 'w'
next_tile_x += 1
elif self.exit_point == 'se':
next_entry_point = 'nw'
next_tile_x += 1
next_tile_y += 1
elif self.exit_point == 's':
next_entry_point = 'n'
next_tile_y += 1
elif self.exit_point == 'sw':
next_entry_point = 'ne'
next_tile_x -= 1
next_tile_y += 1
elif self.exit_point == 'w':
next_entry_point = 'e'
next_tile_x -= 1
elif self.exit_point == 'nw':
next_entry_point = 'se'
next_tile_x -= 1
next_tile_y -= 1
else:
assert 0
self.track.enter_vehicle (self, next_tile_x, next_tile_y,
next_entry_point)
def move (self):
if not coordinates_within_tile (self.x, self.y):
try:
self.move_to_next_segment()
except:
# if we got an exception, then we ran out of track
self.track_gui.abort_simulation ('Ran out of track')
return
(self.x, self.y, self.dx, self.dy) = self.func (self.x,
self.y,
self.dx,
self.dy)
(self.x, self.y, self.dx, self.dy) = self.func (self.x,
self.y,
self.dx,
self.dy)
self.draw()
def reverse_direction (self):
(self.x, self.y, self.dx, self.dy) = self.func (self.x,
self.y,
self.dx,
self.dy,
1)
tmp = self.entry_point
self.entry_point = self.exit_point
self.exit_point = tmp
def next_simulation_step (self):
if self.speed == 0:
return
if self.ticks == 0:
self.ticks = (max_vehicle_speed + 1) - self.speed
self.move()
self.ticks -= 1
class Train (Vehicle):
def __init__ (self, track, wagon):
Vehicle.__init__ (self, track, 'train', train_color)
self.wagon = wagon
self.wagon_is_attached = 0
def move (self):
Vehicle.move (self)
if self.collides (self.wagon) and not self.wagon_is_attached:
self.wagon_is_attached = 1
# We have to check from which side the train approaches
# the wagen. Depending on the direction, we need to reverse
# the direction of the wagon (which, by default, initially
# points to the inside of the tile)
if self.is_in_same_tile (self.wagon):
self.wagon.reverse_direction()
if self.wagon_is_attached:
self.wagon.move()
def reverse_direction (self):
Vehicle.reverse_direction (self)
if self.wagon_is_attached:
self.wagon.reverse_direction()
class Wagon (Vehicle):
def __init__ (self, track):
Vehicle.__init__ (self, track, 'wagon', wagon_color)
class Zamboni (Vehicle):
def __init__ (self, track_gui, vehicle1, vehicle2):
Vehicle.__init__ (self, track_gui, 'zamboni', zamboni_color)
self.vehicle1 = vehicle1
self.vehicle2 = vehicle2
self.set_speed (zamboni_speed)
def move (self):
Vehicle.move (self)
if self.collides (self.vehicle1) or self.collides (self.vehicle2):
# Zamboni crashed into another vehicle. Abort the simulation
self.track_gui.abort_simulation ('Collision with Zamboni')
def animate_switch (canvas, tag):
original_color = canvas.itemcget(tag, "fill")
if original_color == 'black':
flashing_color = 'red'
else:
flashing_color = 'black'
for i in range (10):
canvas.itemconfig (tag, fill=flashing_color)
#Misc.update (canvas)
time.sleep (0.1)
canvas.itemconfig (tag, fill=original_color)
#Misc.update (canvas)
time.sleep (0.1)
class Tile:
def __init__ (self, track, x, y, layout):
self.track = track
self.x = x
self.y = y
self.layout = layout
self.set_switch (random.randint(0, 1))
def set_switch (self, switch, visual_feedback=0):
self.switch = switch
if visual_feedback:
tag = '%d_%d_%d' % (self.x, self.y, switch)
canvas = self.track.get_canvas()
thread.start_new (animate_switch, (canvas, tag))
#for i in range (15):
# canvas.itemconfig (tag, fill='red')
# Misc.update (canvas)
# time.sleep (0.1)
# canvas.itemconfig (tag, fill='black')
# Misc.update (canvas)
# time.sleep (0.1)
def draw (self, canvas):
if self.layout == '':
return
segments = self.layout.split (';')
x_ofs = self.x * tile_width
y_ofs = self.y * tile_width
seg = 0
for s in segments:
corners = s.split (':')
frm = corners[0]
to = corners[1]
spec = funcs[frm]
x = spec[0]
y = spec[1]
to_spec = spec[2][to]
func = to_spec[0]
dx = to_spec[1]
dy = to_spec[2]
tags = []
tags.append("track_segment")
if (len(segments) == 1 ):
contact_id = self.get_contact_id()
if contact_id != 0 and contact_id % 2 == 0:
tags.append("even_contact")
elif contact_id != 0:
tags.append("odd_contact")
else:
tags.append("non_contact")
else:
if seg == 0:
tags.append("switch_green")
else:
tags.append("switch_red")
tags.append ('%d_%d_%d' % (self.x, self.y, seg))
seg += 1
while coordinates_within_tile (x, y):
draw_dot (canvas, x_ofs + x, y_ofs + y, tags)
(x, y, dx, dy) = func (x, y, dx, dy)
def get_contact_id(self):
contact_id = 0
s = '[^0-9]' + str(self.x) + ',' + str(self.y) + '[^0-9]'
for i in range(len(track_labels_1[1])):
if re.search(s, track_labels_1[1][i]) != None:
contact_id = i + 1
break;
return contact_id
def enter_vehicle (self, vehicle, entry_point):
assert self.layout != ''
segments = self.layout.split (';')
exit_point = None
switch = self.switch
for s in segments:
corners = s.split (':')
frm = corners[0]
to = corners[1]
if frm == entry_point:
exit_point = to
if switch == 0:
break
switch -= 1
elif to == entry_point:
exit_point = frm
if switch == 0:
break
switch -= 1
assert exit_point != None
spec = funcs[entry_point]
v_x = spec[0]
v_y = spec[1]
exit_spec = spec[2][exit_point]
v_func = exit_spec[0]
v_dx = exit_spec[1]
v_dy = exit_spec[2]
vehicle.set_location (self.x, self.y, entry_point, exit_point,
v_func, v_x, v_y, v_dx, v_dy)
class Track:
def __init__ (self, canvas):
self.canvas = canvas
self.track_layout = []
self.vehicles = []
self.show_grid = 0
self.show_labels = 0
def get_canvas (self):
return self.canvas
def expand (self, x, y):
# Make sure that the array track_layout has enough x and y components
while len (self.track_layout) < x + 1:
self.track_layout.append ([])
l = self.track_layout[x]
while len (l) < y + 1:
l.append (None)
def add_track_segment (self, x, y, layout):
self.expand (x, y)
self.track_layout[x][y] = Tile (self, x, y, layout)
def set_switch (self, x, y, s, animate_switch):
self.track_layout[x][y].set_switch (s, animate_switch)
def reset_switches (self):
for x in self.track_layout:
for y in x:
y.set_switch (random.randint(0, 1))
def draw (self):
for x in self.track_layout:
for y in x:
y.draw (self.canvas)
def toggle_grid (self):
self.show_grid = not self.show_grid
if self.show_grid:
self.draw_grid()
else:
self.remove_grid()
def draw_grid (self):
cols = len (self.track_layout)
rows = len (self.track_layout[0])
for x in range (cols):
x_pos = x * tile_width
y_pos = tile_width * rows
obj_id = self.canvas.create_text (tile_width / 2 + x_pos, 5,
anchor='n', text= x)
self.canvas.addtag_withtag ('grid', obj_id)
if x == 0:
continue
obj_id = self.canvas.create_line (x_pos, 0, x_pos, y_pos, dash='.')
self.canvas.addtag_withtag ('grid', obj_id)
for y in range (rows):
x_pos = tile_width * cols
y_pos = y * tile_width
obj_id = self.canvas.create_text (7, tile_width / 2 + y_pos,
anchor='w', text= y)
self.canvas.addtag_withtag ('grid', obj_id)
if y == 0:
continue
obj_id = self.canvas.create_line (0, y_pos, x_pos, y_pos, dash='.')
self.canvas.addtag_withtag ('grid', obj_id)
self.canvas.lower ('grid')
def remove_grid (self):
self.canvas.delete ('grid')
def toggle_labels (self):
self.show_labels = not self.show_labels
if self.show_labels:
self.draw_labels()
self.color_track()
else:
self.remove_labels()
self.uncolor_track()
def draw_labels (self):
sn_position_x = ew_position_y = tile_width/2
n_position_y = 0
s_position_y = tile_width - 12
w_position_x = 5
e_position_x = tile_width - 5
for i in range (len(track_labels)):
for j in range (len(track_labels[i])):
tokens = track_labels[i][j].split(':')
type = tokens[0]
no = tokens[1]
tiles = tokens[2].split(';')
position = tokens[3]
display_tile = tiles[0]
xy = display_tile.split(',')
x = int(xy[0])
y = int(xy[1])
if type == 's':
color = 'red'
text_color = 'black'
else:
if (j+1) %2 == 0:
color = even_contact_color
else:
color = odd_contact_color
text_color = 'white'
if position == 'n':
x_position = x * tile_width + sn_position_x
y_position = y * tile_width + n_position_y
elif position == 'e':
x_position = x * tile_width + e_position_x
y_position = y * tile_width + ew_position_y
elif position == 's':
x_position = x * tile_width + sn_position_x
y_position = y * tile_width + s_position_y
elif position == 'w':
x_position = x * tile_width + w_position_x
y_position = y * tile_width + ew_position_y
elif position == 'se':
x_position = x * tile_width + e_position_x
y_position = y * tile_width + s_position_y
elif position == 'sw':
x_position = x * tile_width + w_position_x
y_position = y * tile_width + s_position_y
elif position == 'ne':
x_position = x * tile_width + e_position_x
y_position = y * tile_width + n_position_y
else:
x_position = x * tile_width + w_position_x
y_position = y * tile_width + n_position_y
if type == 's':
obj_id = self.canvas.create_oval(x_position-6,
y_position,
x_position+6,
y_position+12,
outline = color,
fill = color)
else:
obj_id = self.canvas.create_rectangle(x_position-6,
y_position,
x_position+6,
y_position+12,
width=1,
outline = color,
fill = color)
self.canvas.addtag_withtag('label', obj_id)
obj_id = self.canvas.create_text (x_position, y_position,
anchor='n', text= no,
fill = text_color)
self.canvas.addtag_withtag ('label', obj_id)
self.canvas.lower ('label')
def remove_labels (self):
self.canvas.delete ('label')
def color_track(self):
self.canvas.itemconfig('odd_contact', fill = odd_contact_color)
self.canvas.itemconfig('even_contact', fill = even_contact_color)
self.canvas.itemconfig('non_contact', fill = 'red')
self.canvas.itemconfig('switch_green', fill = 'green')
self.canvas.itemconfig('switch_red', fill = 'red')
def uncolor_track(self):
self.canvas.itemconfig('track_segment', fill = 'black')
def enter_vehicle (self, vehicle, x, y, entry_point):
if vehicle not in self.vehicles:
self.vehicles.append (vehicle)
self.track_layout[x][y].enter_vehicle (vehicle, entry_point)
def remove_vehicles (self):
for v in self.vehicles:
v.remove_from_canvas()
self.vehicles = []
def next_simulation_step (self):
for v in self.vehicles:
v.next_simulation_step()
class StatusWindow (Frame):
def __init__ (self, parent=None, text=''):
Frame.__init__ (self, parent)
self.pack (expand=YES, fill=BOTH)
self.makewidgets()
def makewidgets (self):
sbar = Scrollbar (self)
text = Text (self, height=5, width=40, relief=SUNKEN)
sbar.config (command=text.yview)
text.config (yscrollcommand=sbar.set)
sbar.pack (side=RIGHT, fill=Y)
text.pack (side=LEFT, expand=YES, fill=BOTH)
self.text = text
self.clear()
def clear (self):
self.empty = 1
self.text.config (state='normal')
self.text.delete ('1.0', END)
self.text.config (state='disabled')
def log (self, msg):
self.text.config (state='normal')
if not self.empty:
self.text.insert (END, '\n')
self.text.insert (END, msg)
self.text.yview (END)
self.text.config (state='disabled')
self.empty = 0
def remove_last_logs(self, count):
self.text.config (state='normal')
for i in range(count):
self.text.delete("end -1 lines", END)
if self.text.get(1.0, END) == '\n':
print("empty")
self.empty = 1
#haijie: have problem preventing flashing when deleting from a full window
self.text.config (state='disabled')
class TrainGUI (Frame):
def __init__ (self, track_layout):
Frame.__init__ (self)
self.master.title ('Train Simulator')
tiles_x = len (track_layout[0])
tiles_y = len (track_layout)
# Title
title_frame = Frame (self)
title_frame.pack (side=TOP)
title = Label (title_frame, text='Train Simulator ')
title.config (pady=10, font=('times', 30, 'bold italic'))
title.pack (side=LEFT)
# Logo
giffile = None
for fname in ['tos.gif', 'tools/train/tos.gif']:
if os.path.exists(fname):
giffile = fname
if giffile == None:
print('Cannot find tos.gif or tools/train/tos.gif');
sys.exit(-1);
self.pic = PhotoImage (file=giffile)
logo = Label (title_frame, image=self.pic)
logo.pack (side=RIGHT, padx=10, pady=10)
# Track
track_frame = Frame (self)
track_frame.pack (side=TOP)
canvas = Canvas (track_frame, height=tile_width*tiles_y,
width=tile_width*tiles_x, bd=5, relief=RAISED)
canvas.pack (side=TOP, padx=10, pady=10)
track = Track (canvas)
# Buttons: Reset, Exit
control_frame = Frame (self)
control_frame.pack (side=RIGHT, padx=10, pady=10)
button_frame = Frame (control_frame)
button_frame.pack (side=BOTTOM, padx=10, pady=10)
reset = Button (button_frame, text='Reset', command=self.reset)
reset.pack (side=LEFT, fill=BOTH, pady=5)
exit = Button (button_frame, text='Exit', command=self.quit)
exit.pack (side=LEFT, fill=BOTH, padx=10, pady=5)
conf_list = []
for i in range (len (configurations)):
conf_list.append ('Configuration %d' % (i+1))
self.active_configuration = StringVar()
self.active_configuration.set (conf_list[0])
conf = OptionMenu (control_frame, self.active_configuration,
*conf_list)
conf.pack (side=TOP, anchor=W, pady=10)
zamboni = Checkbutton (control_frame, text='Zamboni',
command=self.toggle_show_zamboni)
zamboni.pack (side=TOP, anchor=W)
if debug:
grid = Checkbutton (control_frame, text='Show grid',
command=track.toggle_grid)
grid.pack (side=TOP, anchor=W)
label = Checkbutton(control_frame, text = 'Show labels',
command=self.toggle_labels)
label.pack (side=TOP, anchor=W)
animate_switch = Checkbutton (control_frame, text='Animate switch',
command=self.toggle_animate_switch)
animate_switch.pack (side=TOP, anchor=W)
# Command
command_frame = Frame (self)
command_frame.pack (side=TOP, anchor=W, padx=10)
Label (command_frame, text='Command:').pack (side=LEFT)
entry = Entry (command_frame)
entry.bind ('<Return>', lambda event: self.exec_command (entry))
entry.focus_set()
entry.pack (side=LEFT)
# Status window
self.sw = StatusWindow (self)
self.sw.pack (side=BOTTOM, padx=10, pady=10)
self.pack()
# Init tiles of track
for row in range (tiles_y):
for col in range (tiles_x):
track.add_track_segment (col, row, track_layout[row][col])
track.draw()
self.track = track
self.show_zamboni = 0
self.animate_switch = 0
self.setup_vehicles()
self.simulation_running = 0
self.start_simulation()
self.command_buffer = []
def setup_vehicles (self):
# Get the configuration number
i = int (self.active_configuration.get().split (' ')[1]) - 1
wagon_location, train_location, zamboni_location = \
configurations[i]
self.track.remove_vehicles()
wagon = Wagon (self)
self.wagon = wagon
self.track.enter_vehicle (wagon,
wagon_location[0],
wagon_location[1],
wagon_location[2])
train = Train (self, wagon)
self.train = train
self.track.enter_vehicle (train,
train_location[0],
train_location[1],
train_location[2])
train.reverse_direction()
zamboni = None
if self.show_zamboni:
zamboni = Zamboni (self, train, wagon)
self.track.enter_vehicle (zamboni,
zamboni_location[0],
zamboni_location[1],
zamboni_location[2])
self.zamboni = zamboni
def reset_switches (self):
self.track.reset_switches()
def get_track (self):
return self.track
def toggle_show_zamboni (self):
self.show_zamboni = not self.show_zamboni
def toggle_animate_switch (self):
self.animate_switch = not self.animate_switch
def toggle_labels (self):
self.track.toggle_labels()
def exec_command (self, entry):
cmd = entry.get()
entry.delete ('0', END)
if cmd == '':
return
self.process_train_command (cmd)
def process_train_command (self, cmd):
if not self.simulation_running:
self.sw.log ('Simulation is currently not running')
return ''
if cmd[0] == 'L':
return_value = self.process_L_command(cmd)
elif cmd[0] == 'M':
return_value = self.process_M_command(cmd)
elif cmd[0] == 'C':
return_value = self.process_C_command(cmd)
elif cmd[0] == 'R':
return_value = self.process_R_command(cmd)
else:
self.abort_simulation('Illegal command (%s)' % cmd)
return ''
self.pre_pre_cmd = self.pre_cmd
self.pre_cmd = cmd
return return_value
def process_L_command (self, cmd):
is_format_correct = 1
if cmd.count('L') > 1:
is_format_correct = 0
else:
stripped_cmd = cmd.lstrip('L')
#"L#S#" format
if stripped_cmd.count('S') == 1:
tokens_s = stripped_cmd.split('S')
format = 'S'
try:
vehicle = int (tokens_s[0])
speed = int (tokens_s[1])
except:
is_format_correct = 0
#"L#D" format
elif stripped_cmd[len(stripped_cmd)- 1 ] == 'D' and stripped_cmd.count('D')== 1:
format = 'D'
try:
vehicle = int (stripped_cmd.rstrip('D'))
except:
is_format_correct = 0
else:
is_format_correct = 0
if is_format_correct == 0:
self.abort_simulation('"L" command format: "L # S #" or "L # D" (%s) ' %cmd)
return ''
if vehicle != train:
self.abort_simulation ('Only the train can be manipulated (%s)' %cmd)
return ''
if format == 'S':
if speed < 0 or speed > max_vehicle_speed:
self.abort_simulation('Speed out of range (%s) ' %cmd)
return ''
else:
if speed == 0:
time.sleep (train_stop_delay)
self.train.set_speed(speed)
self.sw.log('Changed train velocity to %d (%s)' % (speed, cmd))
else:
if self.train.get_speed() != 0:
self.abort_simulation ('Can not reverse direction while train is moving (%s) ' %cmd)
return ''
self.train.reverse_direction()
self.sw.log ('Reversed direction of train (%s)' % cmd)
return ''
def process_M_command(self, cmd):
is_format_correct = 1
if cmd.count('M') > 1:
is_format_corret = 0
else:
stripped_cmd = cmd.lstrip('M')
if (stripped_cmd[len(stripped_cmd)-1]) == 'R' and stripped_cmd.count('R') == 1:
switch_setting = 1
try:
switchID = int (stripped_cmd.rstrip('R'))
except:
is_format_correct = 0
elif (stripped_cmd[len(stripped_cmd)-1]) == 'G' and stripped_cmd.count('G') == 1:
switch_setting = 0
try:
switchID = int (stripped_cmd.rstrip('G'))
except:
is_format_correct = 0
else:
is_format_correct = 0
if is_format_correct == 0:
self.abort_simulation ('"M" command format: "M#[G][R]" (%s)' % cmd)
return ''
if switchID < 1 or switchID > len (track_labels[0]):
self.abort_simulation ('Switch ID out of range (%s)' % cmd )
return ''
tokens = track_labels[0][switchID-1].split(':')
tile = tokens[2]
tile_xy = tile.split(',')
tile_x = int(tile_xy[0])
tile_y = int(tile_xy[1])
self.track.set_switch(tile_x, tile_y, switch_setting, self.animate_switch)
self.sw.log ('Changed switch %d to %s (%s)' % (switchID, cmd[2], cmd))
return ''
def process_C_command(self, cmd):
is_format_correct = 1
if self.pre_cmd != 'R':
self.abort_simulation('s88 memory buffer has not been cleaned (%s)' % cmd)
return ''
if cmd.count('C') > 1:
is_format_corret = 0
else:
stripped_cmd = cmd.lstrip('C')
try:
contactID = int(stripped_cmd)
except:
is_format_correct = 0
if is_format_correct == 0:
self.abort_simulation ('"C" command format "C#" (%s)' % cmd)
return ''
if contactID < 1 or contactID > len(track_labels[1]):
self.abort_simulation('Contact ID out of range (%s)' %cmd)
return ''
tokens = track_labels[1][contactID-1].split(':')
tiles = tokens[2].split(';')
status = 0
for i in range(len(tiles)):
tile_xy = tiles[i].split(',')
tile_x = int(tile_xy[0])
tile_y = int(tile_xy[1])
if self.train.is_in_tile (tile_x, tile_y) or self.wagon.is_in_tile (tile_x, tile_y):
status = 1
break
if self.zamboni != None and self.zamboni.is_in_tile (tile_x, tile_y):
status = 1
break
if self.pre_pre_cmd == cmd and self.pre_status == status:
self.probe_count = self.probe_count + 1
self.sw.remove_last_logs(2)
self.sw.log('Probe result of [%d]: %d (%s).....%d times' %(contactID, status, cmd, self.probe_count))
else:
self.probe_count = 1
self.sw.log ('Probe result of [%d]: %d (%s)' % (contactID, status, cmd))
self.pre_status = status
return "*" + str (status) + "\015"
def process_R_command(self, cmd):
if len(cmd) != 1:
self.abort_simulation('"R" command format "R" (%s) ' %cmd)
return ''
self.sw.log('s88 memory buffer cleaned. (%s)' % cmd )
return ''
def reset (self):
self.setup_vehicles()
self.reset_switches()
self.sw.clear()
del self.command_buffer
self.command_buffer = []
self.start_simulation()
def start_simulation (self):
# Init pre_cmd, pre_pre_cmd, pre_status
self.pre_pre_cmd = ''
self.pre_cmd = ''
self.pre_status = -1
if not self.simulation_running:
self.simulation_running = 1
def abort_simulation (self, msg):
self.simulation_running = 0
self.sw.log (msg)
self.sw.log ('Simulation aborted')
def next_simulation_step (self):
if self.simulation_running:
self.track.next_simulation_step()
def add_command_to_buffer(self, cmd, time_interval):
self.command_buffer.append((cmd, time_interval))
def get_next_command(self):
if len(self.command_buffer) != 0:
return self.command_buffer.pop(0)
else:
return ()
class Socket_manager:
def __init__ (self, inet):
ip_port = inet.split (':')
if len (ip_port) != 2:
print 'Bad inet argument (%s)' % inet
thread.exit_thread()
self.ip = ip_port[0]
self.port = int (ip_port[1])
self.is_connected = 0
self.glock = thread.allocate_lock()
self.rlock = thread.allocate_lock()
def get_connection (self):
self.glock.acquire()
if self.is_connected == 0:
self.connect()
self.glock.release()
return self.conn
def connect (self):
try:
s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
s.bind ((self.ip, self.port))
s.listen (1)
self.conn, addr = s.accept()
except:
print "Could not connect to '%s'" % inet
thread.exit_thread()
self.is_connected = 1
def re_connect(self, oldconn):
self.rlock.acquire()
if self.conn == oldconn:
self.conn.close()
self.connect()
self.rlock.release()
return self.conn
def calculate_time_interval(conn):
i = 0
end_time = 0
for i in range(6):
while 42:
try:
ch = conn.recv (1)
except:
conn = socket_man.re_connect(conn)
i = -2
break
if not ch:
conn = socket_man.re_connect(conn)
i = -2
break
if ch == '\015':
break
if i == 0:
start_time = time.time()
if i == 5:
end_time = time.time()
i = i + 1
return (end_time - start_time)/5.0 - 0.0055
def read_commands_from_socket(train_gui, socket_man):
conn = socket_man.get_connection()
last_time = time.time() - 1
while 42:
cmd = ''
while 42:
try:
ch = conn.recv (1)
except:
conn = socket_man.re_connect(conn)
cmd = "invalid"
break
if not ch:
conn = socket_man.re_connect(conn)
cmd = "invalid"
break
if ch == '\015':
break
cmd += ch
this_time = time.time()
#print this_time - last_time
global detected_time_interval
#the first command from boot loader
if cmd == 'M':
detected_time_interval = calculate_time_interval(conn)
train_gui.reset()
print "detected time interval: %f " %(detected_time_interval)
elif cmd != "invalid":
#AP: remove timing code. Something is not working.
#if this_time - last_time < detected_time_interval:
# train_gui.abort_simulation("Commands are sent too fast. Not able to process. (%s)" % (cmd))
#else:
train_gui.add_command_to_buffer(cmd, this_time - last_time)
last_time = this_time
def process_train_command(train_gui, socket_man):
conn = socket_man.get_connection()
last_factor = 1
while 42:
temp = train_gui.get_next_command()
if len(temp) > 0:
cmd = temp[0]
# calculate the delay between two commands. (calculate how much
# time the user has delayed before sending this command.)
#
# time_interaval: the interval between the time when this command
# was received and the time when the last command was received.
# the last command. It corresponding to the delay a user put
# betweent the two commands.
#
# factor: the factor comparing time_interval to
# detected_time_interval, which is the detected time interval
# between two commands that were sent minimal delay in between. i
#
# delay_time: the time the simulator must delay before processing
# this command. It is corresponding to factor, which corresponding
# to the delay a user put between two commands.
time_interval = temp[1]
factor = time_interval/detected_time_interval
if factor < 10:
last_factor = factor
delay_time = command_processing_delay_factor * last_factor
time.sleep( serial_line_delay + delay_time)
ret = train_gui.process_train_command (cmd)
if ret != '':
time.sleep(serial_line_delay)
try:
conn.send(ret)
except:
conn = socket_man.get_connection()
try:
conn.send(ret)
except:
print "abort action: sending result of %s" %(cmd)
else:
time.sleep(wait_for_command_sleep_time)
def run_simulation (train_gui):
while 42:
current_time = time.time()
train_gui.next_simulation_step()
processing_time = time.time() - current_time
delta = sleep_time - processing_time
if delta > 0:
time.sleep (delta)
try:
bochsrc = os.path.expanduser ('~/.bochsrc')
bochsrc_f = open (bochsrc, 'r')
lines = bochsrc_f.readlines()
bochsrc_f.close()
inet = None
for l in lines:
if l[0] == '#':
continue
i = l.find ('com1: ')
if i != -1:
if l.find('mode=socket') == -1:
print 'com1 not redirected to a socket'
continue
i = l.find ('dev=')
if i == -1:
print 'com1 not followed by dev argument'
continue
inet = l[i+4:-1]
if inet == None:
print 'com1 not configured properly'
throw
except:
inet = "localhost:8888"
random.seed()
train_gui = TrainGUI (track_layout)
socket_man = Socket_manager(inet)
detected_time_interval = 0.2
#0.2 is a guess. Will be modified later when bochs is booting
thread.start_new (read_commands_from_socket, (train_gui, socket_man))
thread.start_new (process_train_command, (train_gui, socket_man))
thread.start_new (run_simulation, (train_gui,))
mainloop()
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return[
{
"label": _("Google Data"),
"icon": "icon-cog",
"items": [
{
"type": "doctype",
"name": "Production Entry",
"onboard": 1,
"dependencies": [],
"description": _("Production Entry"),
},
]
},
{ "label": _("Key Report"),
"icon": "icon-cog",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Sales Order Status Report",
"doctype": "Sales Order",
},
{
"type": "report",
"is_query_report": True,
"name": "Set Unset Report",
"doctype": "Sales Order",
},
]
}
]
|
import sys
import pytest
import shutil
import pathlib
from squirrel.addon_sources.gumroad import GumroadProducts
@pytest.fixture
def download_folder():
folder = pathlib.Path('download_test')
if not folder.is_dir():
folder.mkdir(parents=True)
yield folder
shutil.rmtree(folder.as_posix())
@pytest.fixture(scope='module')
def gumroad_addons():
products_manager = GumroadProducts(debug_html_requests=True)
yield products_manager
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="currently only works on windows")
def test_gumroad_listing(gumroad_addons):
assert len(gumroad_addons.list()) > 0
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="currently only works on windows")
def test_gumroad_product_download_links(gumroad_addons):
product_list = gumroad_addons.list()
print(product_list)
assert len(product_list) > 0
product = product_list[0]
print(product.download_links)
|
from unittest import mock
from lib_kafka import message_segmenter
import unittest
import uuid
import time
class TestMessageSegmenter(unittest.TestCase):
def test_segment_message(self):
msg = '0'*(1000*1024)
all_results = list(message_segmenter.segment_message(msg))
self.assertEqual(len(all_results), 2) # total segments are 2
self.assertEqual(len(all_results[0][0]), 900*1024) # first segment is 900*1024 bytes
self.assertEqual(all_results[0][1], all_results[1][1]) # identifier is equal for both
self.assertEqual(all_results[0][2], all_results[1][2]) # count is equal for both
self.assertEqual(all_results[1][2], b'2') # count is 2
self.assertEqual(all_results[0][3], b'1') # index for first segment is 1
self.assertEqual(all_results[1][3], b'2') # index for second segment is 2
msg = 'Hello World'
all_results = list(message_segmenter.segment_message(msg, 5))
self.assertEqual(len(all_results), 3)
self.assertEqual(len(all_results[0][0]), 5)
msg = b'Hello World'
all_results = list(message_segmenter.segment_message(msg, 5))
self.assertEqual(len(all_results), 3)
self.assertEqual(len(all_results[0][0]), 5)
try:
list(message_segmenter.segment_message(5))
self.fail('expected exception')
except Exception as e:
self.assertEqual(type(e), ValueError)
def test_combine_segments(self):
msgs = [b'hello', b' how ', b'are y', b'ou']
index = 1
identifier = str(uuid.uuid4()).encode('utf-8')
count = b'4'
for msg in msgs:
result = message_segmenter.combine_segments(msg, {
message_segmenter.ID: identifier,
message_segmenter.COUNT: count,
message_segmenter.INDEX: str(index).encode('utf-8')
})
index += 1
if index <= 4:
self.assertIsNone(result)
else:
self.assertIsNotNone(result)
self.assertEqual(result, b'hello how are you')
def test_purge_segements(self):
message_segmenter._message_store['abcd'] = {
'bitset': None,
'segments': None,
'last_accessed': time.time() - (10 * 60 - 1)
}
message_segmenter._purge_segments()
self.assertEqual(len(message_segmenter._message_store), 1)
time.sleep(2)
message_segmenter._purge_segments()
self.assertEqual(len(message_segmenter._message_store), 0)
if __name__ == '__main__':
unittest.main()
|
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
"""
:param prices:
:return:
"""
max_profit = 0
peak_value = valley_value = prices[0]
for price in prices:
if peak_value < price:
peak_value = price
max_profit = max(max_profit, peak_value-valley_value)
if valley_value > price:
valley_value = peak_value = price
return max_profit
|
# Copyright (c) 2019 NVIDIA Corporation
import unittest
from nemo.backends.pytorch.nm import TrainableNM
from .context import nemo
from .common_setup import NeMoUnitTest
class TestNM1(TrainableNM):
def __init__(self, var1, var2=2, var3=3, **kwargs):
super(TestNM1, self).__init__(**kwargs)
@staticmethod
def create_ports():
return {}, {}
def foward(self):
pass
class TestNM2(TestNM1):
def __init__(self, var2, **kwargs):
super(TestNM2, self).__init__(**kwargs)
@staticmethod
def create_ports():
return {}, {}
def foward(self):
pass
class BrokenNM(TrainableNM):
def __init__(self, var2, *error, **kwargs):
super(BrokenNM, self).__init__(**kwargs)
@staticmethod
def create_ports():
return {}, {}
def foward(self):
pass
class TestNeuralModulesPT(NeMoUnitTest):
def test_simple_local_params(self):
simple_nm = TestNM1(var1=10, var3=30)
local_params = simple_nm.local_parameters
self.assertEqual(local_params["var1"], 10)
self.assertEqual(local_params["var2"], 2)
self.assertEqual(local_params["var3"], 30)
def test_nested_local_params(self):
simple_nm = TestNM2(25, var1="hello")
local_params = simple_nm.local_parameters
self.assertEqual(local_params["var1"], "hello")
self.assertEqual(local_params["var2"], 25)
self.assertEqual(local_params["var3"], 3)
def test_posarg_check(self):
with self.assertRaises(ValueError):
NM = BrokenNM(8)
def test_constructor_TaylorNet(self):
tn = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
self.assertEqual(tn.local_parameters["dim"], 4)
def test_call_TaylorNet(self):
x_tg = nemo.core.neural_modules.NmTensor(
producer=None, producer_args=None,
name=None,
ntype=nemo.core.neural_types.NeuralType(
{
0: nemo.core.neural_types.AxisType(
nemo.core.neural_types.BatchTag),
1: nemo.core.neural_types.AxisType(
nemo.core.neural_types.ChannelTag)
}))
tn = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
# note that real port's name: x was used
y_pred = tn(x=x_tg)
self.assertEqual(y_pred.producer, tn)
self.assertEqual(y_pred.producer_args.get("x"), x_tg)
def test_simple_chain(self):
data_source = nemo.backends.pytorch.tutorials.RealFunctionDataLayer(
n=10000, batch_size=1)
trainable_module = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
loss = nemo.backends.pytorch.tutorials.MSELoss()
x, y = data_source()
y_pred = trainable_module(x=x)
loss_tensor = loss(predictions=y_pred, target=y)
# check producers' bookkeeping
self.assertEqual(loss_tensor.producer, loss)
self.assertEqual(loss_tensor.producer_args,
{"predictions": y_pred, "target": y})
self.assertEqual(y_pred.producer, trainable_module)
self.assertEqual(y_pred.producer_args, {"x": x})
self.assertEqual(y.producer, data_source)
self.assertEqual(y.producer_args, {})
self.assertEqual(x.producer, data_source)
self.assertEqual(x.producer_args, {})
if __name__ == '__main__':
unittest.main()
|
import pandas as pd
import numpy as np
import cv2
import math
from PIL import Image
from sklearn.utils import shuffle
from scipy.ndimage import rotate
class AbstractPipeline(object):
def get_model(self):
raise NotImplementedError
def preprocess_image(self, image):
raise NotImplementedError
def get_train_generator(self, data_folder, batch_size=64):
raise NotImplementedError
def get_validation_generator(self, data_folder, batch_size=64):
raise NotImplementedError
def get_train_samples(self, df):
raise NotImplementedError
def get_validation_samples(self, df):
raise NotImplementedError
def get_weight(self, label):
return 1#math.exp(abs(label))
def path_driving_log(self, data_folder):
return '{}/driving_log.csv'.format(data_folder)
# Credits to
# https://chatbotslife.com/using-augmentation-to-mimic-human-driving-496b569760a9#.xneaoqiwj
def augment_brightness_camera_images(self, image):
image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
random_bright = .25+np.random.uniform()
image1[:,:,2] = image1[:,:,2]*random_bright
image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)
return image1
def random_rotation(self, image, steering_angle, rotation_amount=15):
angle = np.random.uniform(-rotation_amount, rotation_amount + 1)
rad = (np.pi / 180.0) * angle
return rotate(image, angle, reshape=False), steering_angle + (-1) * rad
# Credits to
# https://chatbotslife.com/using-augmentation-to-mimic-human-driving-496b569760a9#.xneaoqiwj
def add_random_shadow(self, image):
top_y = 320*np.random.uniform()
top_x = 0
bot_x = 160
bot_y = 320*np.random.uniform()
image_hls = cv2.cvtColor(image,cv2.COLOR_RGB2HLS)
shadow_mask = 0*image_hls[:,:,1]
X_m = np.mgrid[0:image.shape[0],0:image.shape[1]][0]
Y_m = np.mgrid[0:image.shape[0],0:image.shape[1]][1]
shadow_mask[((X_m-top_x)*(bot_y-top_y) -(bot_x - top_x)*(Y_m-top_y) >=0)]=1
#random_bright = .25+.7*np.random.uniform()
if np.random.randint(2)==1:
random_bright = .5
cond1 = shadow_mask==1
cond0 = shadow_mask==0
if np.random.randint(2)==1:
image_hls[:,:,1][cond1] = image_hls[:,:,1][cond1]*random_bright
else:
image_hls[:,:,1][cond0] = image_hls[:,:,1][cond0]*random_bright
image = cv2.cvtColor(image_hls,cv2.COLOR_HLS2RGB)
return image
def crop(self, image):
cropped_image = image[55:135, :, :]
return cropped_image
def resize(self, image, new_shape):
return cv2.resize(image, new_shape)
def get_driving_log_dataframe(self, data_folder):
driving_log_df = pd.read_csv(self.path_driving_log(data_folder))
return driving_log_df
def generate_additional_image(self, image_np, label):
toss = np.random.randint(0, 2)
if toss == 0:
return self.augment_brightness_camera_images(image_np), label
elif toss == 1:
return self.add_random_shadow(image_np), label
# generator for dataframes that have left, center and right
# as opposed to
def get_left_center_right_generator(self, data_folder, batch_size=64):
driving_log_df = self.get_driving_log_dataframe(data_folder)
driving_log_df = driving_log_df.reindex(np.random.permutation(driving_log_df.index))
number_of_examples = len(driving_log_df)
image_columns = ['center', 'left', 'right']
X_train = []
y_train = []
weights = []
index_in_batch = 0
batch_number = 0
angle_offset = 0.3
while True:
for image_column in image_columns:
image_series = driving_log_df[image_column]
steering_series = driving_log_df['steering']
for offset in range(0, number_of_examples, batch_size):
X_train = []
y_train = []
weights = []
end_of_batch = min(number_of_examples, offset + batch_size)
for j in range(offset, end_of_batch):
try:
image_filename = image_series[j].lstrip().rstrip()
except:
print(j)
print(image_series[j])
continue
image = Image.open('{0}/{1}'.format(data_folder, image_filename))
image_np = self.preprocess_image(image)
label = steering_series[j]
if image_column == 'left':
delta_steering = -angle_offset
elif image_column == 'right':
delta_steering = angle_offset
else:
delta_steering = 0
label = label + delta_steering
X_train.append(image_np)
y_train.append(label)
weights.append(self.get_weight(label))
flipped_image = np.fliplr(image_np)
flipped_label = -label
X_train.append(flipped_image)
y_train.append(flipped_label)
weights.append(self.get_weight(flipped_label))
# generate additional image
X_augmented, y_augmented = self.generate_additional_image(image_np, label)
X_train.append(X_augmented)
y_train.append(y_augmented)
weights.append(self.get_weight(y_augmented))
X_augmented, y_augmented = self.generate_additional_image(flipped_image, flipped_label)
X_train.append(X_augmented)
y_train.append(y_augmented)
weights.append(self.get_weight(y_augmented))
X_train, y_train, weights = shuffle(X_train, y_train, weights)
yield np.array(X_train), np.array(y_train), np.array(weights)
def get_center_only_generator(self, data_folder, batch_size=64):
driving_log_df = self.get_driving_log_dataframe(data_folder)
driving_log_df = driving_log_df.reindex(np.random.permutation(driving_log_df.index))
number_of_examples = len(driving_log_df)
print(driving_log_df.head())
X_train = []
y_train = []
weights = []
index_in_batch = 0
batch_number = 0
angle_offset = 0.3
while True:
image_series = driving_log_df['center']
steering_series = driving_log_df['steering']
for offset in range(0, number_of_examples, batch_size):
X_train = []
y_train = []
weights = []
end_of_batch = min(number_of_examples, offset + batch_size)
for j in range(offset, end_of_batch):
image_filename = image_series[j].lstrip().rstrip()
image = Image.open('{0}/{1}'.format(data_folder, image_filename))
image_np = self.preprocess_image(image)
label = steering_series[j]
X_train.append(image_np)
y_train.append(label)
weights.append(self.get_weight(label))
flipped_image = np.fliplr(image_np)
flipped_label = -label
X_train.append(flipped_image)
y_train.append(flipped_label)
weights.append(self.get_weight(flipped_label))
X_train, y_train, weights = shuffle(X_train, y_train, weights)
yield np.array(X_train), np.array(y_train), np.array(weights)
def get_generator_cleaned(self, data_folder, batch_size=64):
pass
|
import time
from .base import TimeBased
from .base import TimeBasedUnsynced
class Countdown(TimeBasedUnsynced):
module = 'countdown'
time = 10
tick_rate = -0.01
auto_start = False
def check(self):
return self.status == "RUNNING" and self.time > 0
def timer_stopped(self):
if self.time == 0:
self.status = "STOPPED"
data = {'status': 'COMPLETED',
'time': self.time,
}
self.broadcast(data)
class Stopwatch(TimeBasedUnsynced):
module = 'stopwatch'
tick_rate = 0.01
auto_start = False
def timer_stopped(self):
pass
class Clock(TimeBased):
module = 'clock'
tick_rate = 0.5
auto_start = True
colon = True
def tick(self, tm):
# ignore the time sent by the tick - instead get localtime
tm = time.localtime(time.time())
if self.colon:
fmt = "{:02d}:{:02d}"
else:
fmt = "{:02d} {:02d}"
self.colon = not self.colon
data = {'status': 'RUNNING',
'hours': "{:02d}".format(tm.tm_hour),
'minutes': "{:02d}".format(tm.tm_min),
'seconds': "{:02d}".format(tm.tm_sec),
'colon': self.colon,
'as_string': fmt.format(tm.tm_hour, tm.tm_min),
}
self.broadcast(data)
|
import logging
import logging.config
class SpiderRecovery(object):
"""
爬虫的备份恢复类
"""
def __init__(self, isBackupToFile):
self.MODE_NEW = 1
self.MODE_OLD = 0
self.crashDate = None
self.usedData = set()
self.newData = set()
self.isBackupToFile = isBackupToFile
self.__backupFilePath = self.__readLatestBackupFile()
CONF_LOG = "configs/logging.conf"
logging.config.fileConfig(CONF_LOG) # 采用配置文件
self.__logger = logging.getLogger()
def __readLatestBackupFile(self):
"""
从备份文件列表中恢复最新的备份文件信息
:return: 返回最新的备份文件路径
"""
f = open('backups/latest-backup-file', 'r')
lines = f.readlines()
if len(lines) == 0:
from datetime import datetime
fn = 'backups/backup-' + str(datetime.now()) + '.bak'
self.__writeLatestBackupFile(fn)
else:
fn = lines[-1]
return fn.lstrip().rstrip()
@staticmethod
def __writeLatestBackupFile(backupFile):
"""
将最新的备份文件信息写入备份文件列表
:param backupFile: 需要被写入备份文件列表的文件路径
"""
f = open('backups/latest-backup-file', 'a')
f.write(backupFile)
f.write('\n')
@staticmethod
def __parseLine(line):
"""
解析从备份文件中的每一行信息
:param line: line
:return: 如果匹配失败返回None,否则返回[data,mode]
"""
import re
pattenNum = re.compile('\d+')
pattenMode = re.compile('\{\[\(\d+\)\]\}')
pattenData = re.compile('.*\{\[\(')
modeOnce = re.search(pattenMode, line).group(0)
dataOnce = re.search(pattenData, line).group(0)
if modeOnce is None and dataOnce is None: # 匹配失败
return None
return dataOnce[0: len(dataOnce) - 3], re.search(pattenNum, modeOnce).group(0)
def writeToFile(self, path, data, mode):
""" writeToFile
将data于mode信息写入文件
:param path: 备份文件路径
:param data: data
:param mode: mode
"""
try:
backupFile = open(path, 'a')
data = str(data)
backupFile.write(data) # 写入数据
backupFile.write('{[(' + str(mode) + ')]}') # 写入mode
backupFile.write('\n') # 数据分隔符
backupFile.close()
except Exception:
self.__logger.warning('backup path does not exist , please create it ')
def optimizeBackupFile(self):
"""
优化备份文件信息
缩短备份文件
"""
from datetime import datetime
bfp = 'backups/update-test-backup' + str(datetime.now()) + '.bak'
for newdata in self.newData:
self.writeToFile(bfp, newdata, self.MODE_NEW)
for useddata in self.usedData:
self.writeToFile(bfp, useddata, self.MODE_OLD)
self.__backupFilePath = bfp
self.__writeLatestBackupFile(bfp)
def recoverFromFile(self):
"""
从备份文件中恢复信息
:return:
"""
usedSet = set()
newSet = set()
rf = open(self.__backupFilePath, 'r')
self.__logger.info('from file : %s' % self.__backupFilePath)
lines = rf.readlines()
for line in lines[::-1]:
if line is None or line == '':
continue
(data, mode) = self.__parseLine(line)
mode = int(mode)
if data not in usedSet and data not in newSet: # 如果数据已经存在在其中一个集合,代表已经存在最新记录
if mode == self.MODE_NEW:
newSet.add(data)
elif mode == self.MODE_OLD:
usedSet.add(data)
self.usedData = usedSet
self.newData = newSet
totalData = len(usedSet) + len(self.newData)
self.__logger.info('Recovered %d data from %d lines' % (totalData, len(lines)))
if totalData == 0:
self.__logger.info('Backup is empty。')
else:
self.__logger.info('Recovered : UsedData:%i , NewData:%i' % (
len(self.usedData), len(self.newData)))
if totalData + 5000 < len(lines): # 如果两者相差10000条数据
self.__logger.info('The backup file need to be optimized , it will start now .')
self.optimizeBackupFile()
return usedSet, newSet
def isBackupExists(self):
"""
判断备份当前的最新备份文件是否存在
:return:
"""
print(self.__backupFilePath)
import os
return os.path.exists(self.__backupFilePath)
def recover(self):
"""
外部调用的恢复 api
如果可恢复则恢复
:return: 返回恢复后的数据
"""
if self.isBackupToFile is True:
# self.recoverFromFile()
if self.isBackupExists():
self.__logger.info('backup file exists , starting recover from this file ...')
self.usedData, self.newData = self.recoverFromFile()
# return True
else:
self.__logger.info('backup file not found ...')
# return False
return self.usedData, self.newData
def updateBackup(self, data, _mode):
"""
更新备份信息,将数据置旧
:param _mode:
:param data:
:return:
"""
if data is None:
pass
if _mode == self.MODE_OLD:
self.usedData.add(data)
if data in self.newData:
self.newData.remove(data)
self.newData.add(data)
if _mode == self.MODE_OLD or self.MODE_NEW:
if self.isBackupToFile is True:
self.writeToFile(self.__backupFilePath, data, _mode)
def backupList(self, dataList, _mode):
"""
:param _mode:
:param dataList:
:return:
"""
if dataList is None or len(dataList) == 0:
return
for data in dataList:
self.updateBackup(data, _mode)
def printStatus(self):
print("new data : ", len(self.newData))
print("used data : ", len(self.usedData))
if __name__ == '__main__':
r = SpiderRecovery(True)
# r.backup('123')
# r.updateBackup('123')
r.recoverFromFile()
r.printStatus()
# r.optimizeBackupFile()
r.recoverFromFile()
r.printStatus()
# pattenNum = re.compile('\d+')
# pattenMode = re.compile('\{\[\(\d+\)\]\}')
# pattenData = re.compile('.*\{\[\(')
#
# string = '123456{[(1)]}'
#
# modeOnce = re.search(pattenMode, string).group(0)
# dataOnce = re.search(pattenData, string).group(0)
#
# print(re.search(pattenNum, modeOnce).group(0))
# print(dataOnce[0: len(dataOnce) - 3])
|
from ionotomo.astro.real_data import DataPack
from ionotomo.plotting.plot_tools import plot_datapack
from ionotomo.inversion.initial_model import create_initial_model
from ionotomo.geometry.calc_rays import calc_rays
import numpy as np
import os
import logging as log
class Solver(object):
def __init__(self,datapack,output_folder,diagnostic_folder,**kwargs):
self.datapack = datapack
self.output_folder = output_folder
self.diagnostic_folder = diagnostic_folder
log.info("Initializing inversion for {}".format(self.datapack))
@property
def output_folder(self):
return self._output_folder
@output_folder.setter
def output_folder(self,folder):
self._output_folder = os.path.join(os.getcwd(),folder)
try:
os.makedirs(self._output_folder)
log.basicConfig(filename=os.path.join(self.output_folder,"log"),format='%(asctime)s %(levelname)s:%(message)s', level=log.DEBUG)
except:
pass
@property
def diagnostic_folder(self):
return self._diagnostic_folder
@diagnostic_folder.setter
def diagnostic_folder(self,folder):
self._diagnostic_folder = os.path.join(self.output_folder, folder)
try:
os.makedirs(self._diagnostic_folder)
except:
pass
@property
def datapack(self):
return self._datapack
@datapack.setter
def datapack(self,datapack):
assert isinstance(datapack,DataPack)
self._datapack = datapack
def setup(ref_ant_idx = 0, tmax = 1000., L_ne = 20., size_cell = 5., straight_line_approx = True,**kwargs):
time_idx = -1
dir_idx = -1
antennas,antenna_labels = self.datapack.get_antennas(ant_idx = ant_idx)
patches, patch_names = self.datapack.get_directions(dir_idx = dir_idx)
times,timestamps = self.datapack.get_times(time_idx=time_idx)
self.datapack.set_reference_antenna(antenna_labels[ref_ant_idx])
dobs = self.datapack.get_dtec(ant_idx = ant_idx, time_idx = time_idx, dir_idx = dir_idx)
Na = len(antennas)
Nt = len(times)
Nd = len(patches)
fixtime = times[Nt>>1]
phase = self.datapack.get_center_direction()
array_center = self.datapack.radio_array.get_center()
#Get dtec error
#Average time axis down and center on fixtime
if Nt == 1:
var = 0.01**2
Cd = np.ones([Na,1,Nd],dtype=np.double)*var
Ct = (np.abs(dobs)*0.01)**2
CdCt = Cd + Ct
else:
dt = times[1].gps - times[0].gps
log.info("Averaging down window of length {} seconds [{} timestamps]".format(dt*Nt, Nt))
Cd = np.stack([np.var(dobs,axis=1)]*Nt,axis=1)
#dobs = np.stack([np.mean(dobs,axis=1)],axis=1)
Ct = (np.abs(dobs)*0.01)**2
CdCt = Cd + Ct
#time_idx = [Nt>>1]
#times,timestamps = datapack.get_times(time_idx=time_idx)
#Nt = len(times)
log.info("E[S/N]: {} +/- {}".format(np.mean(np.abs(dobs)/np.sqrt(CdCt+1e-15)),np.std(np.abs(dobs)/np.sqrt(CdCt+1e-15))))
log.debug("CdCt = {}".format(CdCt))
vmin = np.percentile(dobs,5)
vmax = np.percentile(dobs,95)
plot_datapack(self.datapack,ant_idx=ant_idx,time_idx=time_idx,dir_idx = dir_idx,
figname='{}/dobs'.format(self.diagnostic_folder), vmin = vmin, vmax = vmax)
ne_tci = create_initial_model(self.datapack,ant_idx = ant_idx, time_idx = time_idx, dir_idx = dir_idx, zmax = tmax,spacing=size_cell)
#save the initial model?
ne_tci.save(os.path.join(self.diagnostic_folder,"ne_initial.hdf5"))
rays = calc_rays(antennas,patches,times, array_center, fixtime, phase, ne_tci, self.datapack.radio_array.frequency, straight_line_approx, tmax, ne_tci.nz)
m_tci = ne_tci.copy()
K_ne = np.mean(ne_tci.M)
m_tci.M /= K_ne
np.log(m_tci.M,out=m_tci.M)
self.K_ne,self.m_tci,self.rays,self.CdCt = K_ne,m_tci,rays,CdCt
def go():
raise NotImplementedError("Needs implementation")
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.patches import Polygon
from davg.lanefinding.ImgMgr import ImgMgr
from davg.lanefinding.BirdsEyeTransform import BirdsEyeTransform
def demonstrate_birdseye_reverse_usage():
img_mgr = ImgMgr()
birdseye = BirdsEyeTransform()
# Create a blank white image that represents the result of a birdeye warp
dst_img = np.ones((720, 1280), dtype=np.uint8)*255
# "Unwarp" the image to the aleged source
src_img = birdseye.unwarp(dst_img)
# Determine the bounding box of the non-zero values from the unwarped image
src_img_nonzeros = src_img.nonzero()
x_min = min(src_img_nonzeros[1])
x_max = max(src_img_nonzeros[1])
y_min = min(src_img_nonzeros[0])
y_max = max(src_img_nonzeros[0])
#print("src_img_nonzeros", src_img_nonzeros)
print("{} {} {} {}".format(x_min, x_max, y_min, y_max))
# Get the polygon points that define the "unwarped" birdseye view
poly = []
poly.append((x_min, y_max))
poly.append((x_min, min(src_img_nonzeros[0][src_img_nonzeros[1] == x_min])))
poly.append((min(src_img_nonzeros[1][src_img_nonzeros[0] == y_min]), y_min))
poly.append((max(src_img_nonzeros[1][src_img_nonzeros[0] == y_min]), y_min))
poly.append((x_max, min(src_img_nonzeros[0][src_img_nonzeros[1] == x_max])))
poly.append((x_max, y_max))
print("poly", poly)
# Display the results
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))
f.set_tight_layout(True)
# Draw the simulated birdseye
birdseye.draw_dst_on_img_gray(dst_img, intensity=127, thickness=2)
ax1.imshow(dst_img, cmap='gray')
# Draw the simulated original source image with the birdseye src coords and
# the polygon defining how the original was unwarped
birdseye.draw_src_on_img_gray(src_img, intensity=127, thickness=2)
ax2.imshow(src_img, cmap='gray')
ax2.add_patch(Polygon(np.float32(poly), True, edgecolor='#ff0000', fill=False))
plt.show()
# UNCOMMENT TO RUN
demonstrate_birdseye_reverse_usage()
|
# ROS stuff
import rospy
from urdf_parser_py.urdf import URDF
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
import copy
import numpy as np
# KDL utilities
import PyKDL
from pykdl_utils.kdl_kinematics import KDLKinematics
from pykdl_utils.kdl_parser import kdl_tree_from_urdf_model
# machine learning utils (python)
from sklearn.mixture import GMM
# tf stuff
import tf
import tf_conversions.posemath as pm
# input message types
import sensor_msgs
import trajectory_msgs
from trajectory_msgs.msg import JointTrajectoryPoint
from sensor_msgs.msg import JointState
# output message types
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseArray
from features import RobotFeatures
'''
RobotSkill
Defines a skill with a goal and a set of differential constraints
Goals are represented as the distribution of features that must be true for an action to be considered successful
'''
class RobotSkill:
'''
set up the robot skill
skills contain a model of expected features as they change over time
they also contain a description for our own purposes
oh, and which objects are involved
'''
def __init__(self,data=[],goals=[],action_k=4,goal_k=4,objs=[],manip_objs=[],name="",filename=None,num_gripper_vars=3,normalize=True):
self.name = name
self.action_model = GMM(n_components=action_k,covariance_type="full")
self.goal_model = GMM(n_components=goal_k,covariance_type="full")
self.trajectory_model = GMM(n_components=1,covariance_type="full")
# NOTE: gripper stuff is currently not supported. Take a look at this
# later on if you want to use it.
self.num_gripper_vars = num_gripper_vars
self.gripper_model = GMM(n_components=action_k,covariance_type="full")
self.objs = objs
self.manip_objs = manip_objs
if filename == None and len(data) > 0:
''' compute means and normalize incoming data '''
if normalize:
self.action_mean = np.mean(data,axis=0)
self.action_std = np.std(data,axis=0)
self.goal_mean = np.mean(goals,axis=0)
self.goal_std = np.std(goals,axis=0)
else:
self.action_mean = np.ones(data.shape[1])
self.action_std = np.ones(data.shape[1])
self.goal_mean = np.ones(data.shape[1])
self.goal_std = np.ones(data.shape[1])
goals = (goals - self.goal_mean) / self.goal_std
data = (data - self.action_mean) / self.action_std
''' compute the actual models '''
# learn action, goal, and trajectory models
if goal_k > 1:# or True:
self.goal_model.fit(goals)
else:
self.goal_model.means_ = np.array([np.mean(goals,axis=0)])
self.goal_model.covars_ = np.array([np.cov(goals,rowvar=False)])
self.goal_model.covars_[0] += 1e-10 * np.eye(self.goal_model.covars_.shape[1])
if action_k > 1:# or True:
self.action_model.fit(data)
else:
self.action_model.means_ = np.array([np.mean(data,axis=0)])
self.action_model.covars_ = np.array([np.cov(data,rowvar=False)])
self.action_model.covars_[0] += 1e-10 * np.eye(self.action_model.covars_.shape[1])
self.t_factor = 0.1
if 'gripper' in objs:
# remove last few indices from
self.gripper_model = self.action_model
self.action_model = copy.deepcopy(self.gripper_model)
# marginalizing out vars in gaussians is easy
self.action_model.means_ = self.gripper_model.means_[:,:-num_gripper_vars]
self.action_model.covars_ = self.gripper_model.covars_[:,:-num_gripper_vars,:-num_gripper_vars]
self.goal_model.covars_ = self.goal_model.covars_[:,:-num_gripper_vars,:-num_gripper_vars]
self.goal_model.means_ = self.goal_model.means_[:,:-num_gripper_vars]
self.action_mean_ng = self.action_mean[:-num_gripper_vars]
self.action_std_ng = self.action_std[:-num_gripper_vars]
self.goal_mean_ng = self.goal_mean[:-num_gripper_vars]
self.goal_std_ng = self.goal_std[:-num_gripper_vars]
else:
self.action_mean_ng = self.action_mean
self.action_std_ng = self.action_std
self.goal_mean_ng = self.goal_mean
self.goal_std_ng = self.goal_std
elif not filename == None:
stream = file(filename,'r')
data = yaml.load(stream,Loader=Loader)
self.name = data['name']
self.action_model = data['action_model']
self.goal_model = data['goal_model']
self.gripper_model = data['gripper_model']
self.trajectory_model = data['trajectory_model']
self.objs = data['objs']
self.manip_objs = data['manip_objs']
self.num_gripper_vars = data['num_gripper_vars']
self.action_mean = data['action_mean']
self.action_std = data['action_std']
self.goal_mean = data['goal_mean']
self.goal_std = data['goal_std']
self.action_mean_ng = data['action_mean_ng']
self.action_std_ng = data['action_std_ng']
self.goal_mean_ng = data['goal_mean_ng']
self.goal_std_ng = data['goal_std_ng']
self.t_factor = 0.1
def GetGoalModel(self,objs,preset=None):
if preset is None:
robot = RobotFeatures()
else:
robot = RobotFeatures(preset=preset)
if 'gripper' in objs:
objs.remove('gripper')
for obj in objs:
robot.AddObject(obj)
dims = robot.max_index
K = self.action_model.n_components
goal = GMM(n_components=K,covariance_type="full")
goal.weights_ = self.action_model.weights_
goal.means_ = np.zeros((K,dims))
goal.covars_ = np.zeros((K,dims,dims))
idx = robot.GetDiffIndices(objs)
print objs
for k in range(K):
goal.means_[k,:] = self.action_model.means_[k,idx]
for j in range(dims):
goal.covars_[k,j,idx] = self.action_model.covars_[k,j,idx]
return goal
'''
save the robot skill to a file
'''
def save(self,filename):
stream = file(filename,'w')
out = {}
out['name'] = self.name
out['action_model'] = self.action_model
out['goal_model'] = self.goal_model
out['gripper_model'] = self.gripper_model
out['trajectory_model'] = self.trajectory_model
out['objs'] = self.objs
out['manip_objs'] = self.manip_objs
out['num_gripper_vars'] = self.num_gripper_vars
out['action_mean'] = self.action_mean
out['action_std'] = self.action_std
out['goal_mean'] = self.goal_mean
out['goal_std'] = self.goal_std
out['action_mean_ng'] = self.action_mean_ng
out['action_std_ng'] = self.action_std_ng
out['goal_mean_ng'] = self.goal_mean_ng
out['goal_std_ng'] = self.goal_std_ng
yaml.dump(out,stream)
'''
execution loop update for trajectory skills
'''
def update(self, trajs, p_z, t, p_obs=1):
wts = np.zeros(len(trajs));
for i in range(len(trajs)):
p_exp = self.trajectory_model.score(trajs[i][:-1])
p_exp_f = self.goal_model.score(trajs[i][-1])
p_exp = np.concatenate((p_exp,p_exp_f))
wts[i] = weight(p_exp, 1, p_z[i], t[i], self.t_lambda)
'''
This function determines the weight on a given example point
p_expert: different for each point
p_obs: same (actually fixed at 1)
p_z: same for each trajectory
t: different for each point
'''
def weight(p_expert,p_obs,p_z,t,t_lambda=0.1):
return (p_expert * t_lambda**(1-t)) / (p_obs * p_z)
|
# -*- coding: utf-8 -*-
"""
This is parameter module for Averaged Neuron (AN) model.
These parameters are identical to those in Tatsuki et al., 2016
and Yoshida et al., 2018.
"""
__author__ = 'Fumiya Tatsuki, Kensuke Yoshida, Tetsuya Yamada, \
Takahiro Katsumata, Shoi Shi, Hiroki R. Ueda'
__status__ = 'Published'
__version__ = '1.0.0'
__date__ = '15 May 2020'
class Constants:
""" Constant values needed for AN model.
These constant values are based on previous researches. See Tatsuki et al., 2016,
Yoshida et al., 2018 and Compte et al., 2003.
Attributes
----------
cm : float
membrane capacitance (uF/cm2)
area : float
area of neuron (mm2)
tauA : float
time constant of inactivation variable for fast A-type potassium channel
s_a_ampar : float
coefficient of f(V)
s_tau_ampar : float
time constant of gating variable differential equation of AMPAR
x_a_nmdar : float
coefficient of f(V)
x_tau_nmdar : float
time constant for differential equation of second-order gating variable x
s_a_nmdar : float
coefficient of (1 - s)
s_tau_nmdar : float
time constant for differential equation of gating variable s
s_a_gabar : float
coefficient of f(V)
s_tau_gabar : float
time constant for differential equation of gating variable s
vL : float
equilibrium potential of leak channel (mV)
vNaL : float
equilibrium potential of leak sodium channel (mV)
vNa : float
equilibrium potential of sodium ion (mV)
vK : float
equilibrium potential of potassium ion (mV)
vCa : float
equilibrium potential of calcium channel (mV)
vAMPAR : float
equilibrium potential of AMPA receptor (mV)
vNMDAR : float
equilibrium potential of NMDA receptor (mV)
vGABAR : float
equilibrium potential of GABA receptor (mV)
an_ini : list (float)
initial parameters for differential equations of AN model:
v : membrane potential
h_nav : inactivation variable of voltage-gated sodium channel
n_kvhh : activation variable of HH-type voltage-gated
potassium channel
h_kva : inactivation variable of fast A-type potassium channel
m_kvsi : activation variable of slowly inactivating potassium channel
s_ampar : gating variable of AMPA recptor
x_nmdar : second-order gating variable of NMDA receptor
s_nmdar : gating variable of NMDA receptor
s_gabar : gating variable of GABA receptor
ca : intracellular calcium concentration
san_ini : list (float)
initial parameters for differential equations of SAN model:
v : membrane potential
n_kvhh : activation variable of HH-type voltage-gated
potassium channel
ca : intracellular calcium concentration
"""
def __init__(self) -> None:
self.cm = 1.0
self.area = 0.02
self.a_ca = 0.5
self.kd_ca = 30.0
self.tau_a = 15.0
self.s_a_ampar = 3.48
self.s_tau_ampar = 2.0
self.s_a_nmdar = 0.5
self.s_tau_nmdar = 100.0
self.x_a_nmdar = 3.48
self.x_tau_nmdar = 2.0
self.s_a_gabar = 1.0
self.s_tau_gabar = 10.0
self.vL = -60.95
self.vNaL = 0.
self.vNa = 55.0
self.vK = -100.0
self.vCa = 120.0
self.vAMPAR = 0.
self.vNMDAR = 0.
self.vGABAR = -70.0
self.an_ini = [
-45., # 0 : v
0.045, # 1 : h_nav
0.54, # 2 : n_kvhh
0.045, # 3 : h_kva
0.34, # 4 : m_kvsi
0.01, # 5 : s_ampar
0.01, # 6 : x_nmdar
0.01, # 7 : s_nmdar
0.01, # 8 : s_gabar
1., # 9 : Ca
]
self.san_ini = [
-45., # 0 : v
0.54, # 1 : n_kvhh
1., # 2 : Ca
]
class Ion:
""" Constant values needed for AN model with ion and typical ion concentrations.
These constant values are based on Ramussen et al., 2017.
Attributes
---------
r : float
gas constant (J/K/mol)
t : float
body temprature (K)
f : float
Faraday constant (C/mol)
awake_ion : dictionary (float)
typical ion concentrations which recapitulates awake firing pattern
sleep_ion : dictionary (float)
typical ion concentrations which recapitulates sleep (SWS) firing pattern
"""
def __init__(self) -> None:
self.r = 8.314472
self.t = 310.
self.f = 9.64853399 * 10000
self.awake_ion = {
'ex_na': 140,
'in_na': 7.0,
'ex_k': 4.4,
'in_k': 140,
'ex_cl': 140,
'in_cl': 7.0,
'ex_ca': 1.2,
'in_ca': 0.001,
'ex_mg': 0.7,
}
self.sleep_ion = {
'ex_na': 140.0,
'in_na': 7.0,
'ex_k': 3.9,
'in_k': 140.0,
'ex_cl': 140.0,
'in_cl': 7.0,
'ex_ca': 1.35,
'in_ca': 0.001,
'ex_mg': 0.8,
}
class TypicalParam:
""" Typical parameter set that recapitulate a cirtain firing pattern.
Attributes
----------
an_sws : dictionary (float)
typical parameter set which recapitulate SWS firing pattern in AN model
See : Tatsuki et al., 2016
san_sws : dictionary (float)
typical parameter set which recapitulate SWS firing patter in SAN model
See : Yoshida et al., 2018 figure 1L.
"""
def __init__(self) -> None:
self.an_sws = {
'g_leak': 0.03573,
'g_nav': 12.2438,
'g_kvhh': 2.61868,
'g_kva': 1.79259,
'g_kvsi': 0.0350135,
'g_cav': 0.0256867,
'g_kca': 2.34906,
'g_nap': 0.0717984,
'g_kir': 0.0166454,
'g_ampar': 0.513425,
'g_nmdar': 0.00434132,
'g_gabar': 0.00252916,
't_ca': 121.403,
}
self.san_sws = {
'g_leak': 0.016307,
'g_kvhh': 19.20436,
'g_cav': 0.1624,
'g_kca': 0.7506,
'g_nap': 0.63314,
't_ca': 739.09,
}
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'slidecrop\resources\main.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(773, 559)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/newPrefix/icon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.slide_tabWidget = QtWidgets.QTabWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.slide_tabWidget.sizePolicy().hasHeightForWidth())
self.slide_tabWidget.setSizePolicy(sizePolicy)
self.slide_tabWidget.setObjectName("slide_tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.slide_tabWidget.addTab(self.tab, "")
self.horizontalLayout.addWidget(self.slide_tabWidget)
self.roi_list = QtWidgets.QTableWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.roi_list.sizePolicy().hasHeightForWidth())
self.roi_list.setSizePolicy(sizePolicy)
self.roi_list.setObjectName("roi_list")
self.roi_list.setColumnCount(0)
self.roi_list.setRowCount(0)
self.horizontalLayout.addWidget(self.roi_list)
MainWindow.setCentralWidget(self.centralwidget)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setIconSize(QtCore.QSize(48, 48))
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.LeftToolBarArea, self.toolBar)
self.actionOpen = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/newPrefix/Folder-icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon1)
self.actionOpen.setObjectName("actionOpen")
self.actionThreshold = QtWidgets.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/newPrefix/threshold.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionThreshold.setIcon(icon2)
self.actionThreshold.setObjectName("actionThreshold")
self.actionSegment = QtWidgets.QAction(MainWindow)
self.actionSegment.setIcon(icon)
self.actionSegment.setObjectName("actionSegment")
self.actionROI = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/newPrefix/roi.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionROI.setIcon(icon3)
self.actionROI.setObjectName("actionROI")
self.actionCrop = QtWidgets.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/newPrefix/crop.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCrop.setIcon(icon4)
self.actionCrop.setObjectName("actionCrop")
self.actionBatch = QtWidgets.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/newPrefix/batch_crop.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionBatch.setIcon(icon5)
self.actionBatch.setObjectName("actionBatch")
self.actionExit = QtWidgets.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/newPrefix/Windows_Close_Program_Icon_64.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit.setIcon(icon6)
self.actionExit.setObjectName("actionExit")
self.toolBar.addAction(self.actionOpen)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionThreshold)
self.toolBar.addAction(self.actionSegment)
self.toolBar.addAction(self.actionROI)
self.toolBar.addAction(self.actionCrop)
self.toolBar.addAction(self.actionBatch)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExit)
self.retranslateUi(MainWindow)
self.slide_tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "SlideCrop"))
self.slide_tabWidget.setTabText(self.slide_tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionThreshold.setText(_translate("MainWindow", "Threshold"))
self.actionSegment.setText(_translate("MainWindow", "Segment"))
self.actionROI.setText(_translate("MainWindow", "ROI"))
self.actionCrop.setText(_translate("MainWindow", "Crop"))
self.actionBatch.setText(_translate("MainWindow", "Batch"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
from .. resources import resources_rc
|
def func(x, a=1, b=3):
return x + a - b
print func(2) # 0
print func(5, 2) # 4
print func(3, b=0) # 4
|
from unittest import TestCase
from unittest.mock import Mock, call, patch
from guet.commands import CommandMap
from guet.commands.help import UsageAction
class TestUsageAction(TestCase):
@patch('builtins.print')
def test_prints_all_descriptions_for_registered_commands(self, mock_print):
command_map = CommandMap()
command_map.add_command('test1', Mock(), 'description1')
command_map.add_command('test2', Mock(), 'description2')
usage_action = UsageAction(command_map)
usage_action.execute([])
mock_print.assert_has_calls([
call('usage: guet <command>\n'),
call('test1: description1'),
call('test2: description2')
])
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Backup manager manages volume backups.
Volume Backups are full copies of persistent volumes stored in a backup
store e.g. an object store or any other backup store if and when support is
added. They are usable without the original object being available. A
volume backup can be restored to the original volume it was created from or
any other available volume with a minimum size of the original volume.
Volume backups can be created, restored, deleted and listed.
**Related Flags**
:backup_topic: What :mod:`rpc` topic to listen to (default:
`cinder-backup`).
:backup_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.backup.manager.Manager`).
"""
from oslo.config import cfg
from cinder import context
from cinder import exception
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
backup_manager_opts = [
cfg.StrOpt('backup_driver',
default='cinder.backup.drivers.swift',
help='Driver to use for backups.',
deprecated_name='backup_service'),
]
# This map doesn't need to be extended in the future since it's only
# for old backup services
mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift',
'cinder.backup.services.ceph': 'cinder.backup.drivers.ceph'}
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
class BackupManager(manager.SchedulerDependentManager):
"""Manages backup of block storage devices."""
RPC_API_VERSION = '1.0'
def __init__(self, service_name=None, *args, **kwargs):
self.service = importutils.import_module(self.driver_name)
self.az = CONF.storage_availability_zone
self.volume_manager = importutils.import_object(
CONF.volume_manager)
self.driver = self.volume_manager.driver
super(BackupManager, self).__init__(service_name='backup',
*args, **kwargs)
self.driver.db = self.db
@property
def driver_name(self):
"""This function maps old backup services to backup drivers."""
return self._map_service_to_driver(CONF.backup_driver)
def _map_service_to_driver(self, service):
"""Maps services to drivers."""
if service in mapper:
return mapper[service]
return service
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
LOG.info(_("Cleaning up incomplete backup operations"))
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
for volume in volumes:
if volume['status'] == 'backing-up':
LOG.info(_('Resetting volume %s to available '
'(was backing-up)') % volume['id'])
self.volume_manager.detach_volume(ctxt, volume['id'])
if volume['status'] == 'restoring-backup':
LOG.info(_('Resetting volume %s to error_restoring '
'(was restoring-backup)') % volume['id'])
self.volume_manager.detach_volume(ctxt, volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
backups = self.db.backup_get_all_by_host(ctxt, self.host)
for backup in backups:
if backup['status'] == 'creating':
LOG.info(_('Resetting backup %s to error '
'(was creating)') % backup['id'])
err = 'incomplete backup reset on manager restart'
self.db.backup_update(ctxt, backup['id'], {'status': 'error',
'fail_reason': err})
if backup['status'] == 'restoring':
LOG.info(_('Resetting backup %s to available '
'(was restoring)') % backup['id'])
self.db.backup_update(ctxt, backup['id'],
{'status': 'available'})
if backup['status'] == 'deleting':
LOG.info(_('Resuming delete on backup: %s') % backup['id'])
self.delete_backup(ctxt, backup['id'])
def create_backup(self, context, backup_id):
"""
Create volume backups using configured backup service.
"""
backup = self.db.backup_get(context, backup_id)
volume_id = backup['volume_id']
volume = self.db.volume_get(context, volume_id)
LOG.info(_('create_backup started, backup: %(backup_id)s for '
'volume: %(volume_id)s') %
{'backup_id': backup_id, 'volume_id': volume_id})
self.db.backup_update(context, backup_id, {'host': self.host,
'service':
self.driver_name})
expected_status = 'backing-up'
actual_status = volume['status']
if actual_status != expected_status:
err = _('create_backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
raise exception.InvalidVolume(reason=err)
expected_status = 'creating'
actual_status = backup['status']
if actual_status != expected_status:
err = _('create_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self.db.volume_update(context, volume_id, {'status': 'available'})
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
raise exception.InvalidBackup(reason=err)
try:
backup_service = self.service.get_backup_driver(context)
self.driver.backup_volume(context, backup, backup_service)
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'available'})
self.db.backup_update(context, backup_id,
{'status': 'error',
'fail_reason': unicode(err)})
self.db.volume_update(context, volume_id, {'status': 'available'})
self.db.backup_update(context, backup_id, {'status': 'available',
'size': volume['size'],
'availability_zone':
self.az})
LOG.info(_('create_backup finished. backup: %s'), backup_id)
def restore_backup(self, context, backup_id, volume_id):
"""
Restore volume backups from configured backup service.
"""
LOG.info(_('restore_backup started, restoring backup: %(backup_id)s'
' to volume: %(volume_id)s') %
{'backup_id': backup_id, 'volume_id': volume_id})
backup = self.db.backup_get(context, backup_id)
volume = self.db.volume_get(context, volume_id)
self.db.backup_update(context, backup_id, {'host': self.host})
expected_status = 'restoring-backup'
actual_status = volume['status']
if actual_status != expected_status:
err = _('restore_backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s') % {
'expected_status': expected_status,
'actual_status': actual_status
}
self.db.backup_update(context, backup_id, {'status': 'available'})
raise exception.InvalidVolume(reason=err)
expected_status = 'restoring'
actual_status = backup['status']
if actual_status != expected_status:
err = _('restore_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s') % {
'expected_status': expected_status,
'actual_status': actual_status
}
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.warn('volume: %s, size: %d is larger than backup: %s, '
'size: %d, continuing with restore',
volume['id'], volume['size'],
backup['id'], backup['size'])
backup_service = self._map_service_to_driver(backup['service'])
configured_service = self.driver_name
if backup_service != configured_service:
err = _('restore_backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s]') % {
'configured_service': configured_service,
'backup_service': backup_service,
}
self.db.backup_update(context, backup_id, {'status': 'available'})
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
try:
backup_service = self.service.get_backup_driver(context)
self.driver.restore_backup(context, backup, volume,
backup_service)
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_restoring'})
self.db.backup_update(context, backup_id,
{'status': 'available'})
self.db.volume_update(context, volume_id, {'status': 'available'})
self.db.backup_update(context, backup_id, {'status': 'available'})
LOG.info(_('restore_backup finished, backup: %(backup_id)s restored'
' to volume: %(volume_id)s') %
{'backup_id': backup_id, 'volume_id': volume_id})
def delete_backup(self, context, backup_id):
"""
Delete volume backup from configured backup service.
"""
backup = self.db.backup_get(context, backup_id)
LOG.info(_('delete_backup started, backup: %s'), backup_id)
self.db.backup_update(context, backup_id, {'host': self.host})
expected_status = 'deleting'
actual_status = backup['status']
if actual_status != expected_status:
err = _('delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('delete_backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s]') % {
'configured_service': configured_service,
'backup_service': backup_service,
}
self.db.backup_update(context, backup_id,
{'status': 'error'})
raise exception.InvalidBackup(reason=err)
try:
backup_service = self.service.get_backup_driver(context)
backup_service.delete(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id,
{'status': 'error',
'fail_reason':
unicode(err)})
context = context.elevated()
self.db.backup_destroy(context, backup_id)
LOG.info(_('delete_backup finished, backup %s deleted'), backup_id)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import sqrt
class point_analysis:
"""
analysis point
"""
def __init__(self, target_list, measure_list):
'''
initialize with target and measure data list
'''
self.target_list = target_list
self.measure_list = measure_list
def cal_accuracy(self):
'''
calculate point accuracy using formula:
all_accuracy_x = max(max of all point accuracy in x)
single_point_accuracy = max(abs(target_list[i] - measure_list[i]))
'''
accuracy_list = [0] * len(self.target_list)
for i in range(len(self.target_list)):
target_x = self.target_list[i][0]
target_y = self.target_list[i][1]
point_list = []
for repeat in self.measure_list[i]:
repeat_list = []
for (measure_x, measure_y) in repeat:
delta_x = abs(measure_x - target_x)
delta_y = abs(measure_y - target_y)
repeat_list.append([delta_x, delta_y])
point_list.append(repeat_list)
accuracy_list[i] = point_list
return accuracy_list
def cal_precision(self):
'''
calculate point precision using formula:
all_precision_x = max(max of all point precision in x)
sigle_point_precision_x = 2 * sqrt(sum(measure_x ^ 2) / len(measure_list) - aver(measure_list) ^ 2)
'''
precision_list = [[0, 0]] * len(self.target_list)
for i in range(len(self.target_list)):
sum_x = 0
sum_y = 0
square_x_sum = 0
square_y_sum = 0
counter = 0
for repeat in self.measure_list[i]:
for (measure_x, measure_y) in repeat:
sum_x += measure_x
sum_y += measure_y
square_x_sum += (measure_x * measure_x)
square_y_sum += (measure_y * measure_y)
counter += 1
aver_x = sum_x / counter
aver_y = sum_y / counter
square_aver_x = square_x_sum / counter
square_aver_y = square_y_sum / counter
x_standard_deviation = sqrt(abs(square_aver_x - aver_x * aver_x))
y_standard_deviation = sqrt(abs(square_aver_y - aver_y * aver_y))
precision_list[i] = [2. * x_standard_deviation, 2. * y_standard_deviation]
return precision_list
def cal_linearity(self):
'''
calculate point linearity using formula:
all_linearity = max(all linearity)
single_point_linearity = distance(measure, target)
'''
linearity_list = [0] * len(self.target_list)
for i in range(len(self.target_list)):
target_x = self.target_list[i][0]
target_y = self.target_list[i][1]
point_list = []
for repeat in self.measure_list[i]:
repeat_list = []
for (measure_x, measure_y) in repeat:
delta_x = measure_x - target_x
delta_y = measure_y - target_y
distance = sqrt(delta_x * delta_x + delta_y * delta_y)
repeat_list.append(distance)
point_list.append(repeat_list)
linearity_list[i] = point_list
return linearity_list
if __name__ == "__main__":
"""
this is only for test purpose
"""
target_list = [[10, 10], [20, 20], [30, 30], [40, 40], [50, 50]]
measure_list = [ \
[[[10.01, 10.03], [10.01, 10.03], [10.01, 10.03], [10.01, 10.03], [10.01, 10.03]]], \
[[[20.01, 20.03], [20.01, 20.03], [20.01, 20.03], [20.01, 20.03], [20.01, 20.03]]], \
[[[30.01, 30.03], [30.01, 30.03], [30.01, 30.03], [30.01, 30.03], [30.01, 30.03]]], \
[[[40.01, 40.03], [40.01, 40.03], [40.01, 40.03], [40.01, 40.03], [40.01, 40.03]]], \
[[[50.01, 50.03], [50.01, 50.03], [50.01, 50.03], [50.01, 50.03], [50.01, 50.03]]]]
point_fd = point_analysis(target_list, measure_list)
accuracy_list = point_fd.cal_accuracy()
precision_list = point_fd.cal_precision()
linearity_list = point_fd.cal_linearity()
for i in range(len(target_list)):
print("\nNO.%d point, target coordinate (%f, %f)" % ( i + 1, target_list[i][0], target_list[i][1] ))
for j in range(len(measure_list[i])):
print("\tRepeat %d" % ( j + 1 ))
for k in range(len(measure_list[i][j])):
print("\t\tMeasured point %d ------------------------ (%f, %f)" % ( k + 1, measure_list[i][j][k][0], measure_list[i][j][k][1] ))
print("\t\t\tAccuracy: (%f, %f)" % ( accuracy_list[i][j][k][0], accuracy_list[i][j][k][1] ))
print("\t\t\tLinearity: %f" % ( linearity_list[i][j][k] ))
print("Precision: (%f, %f)" % ( precision_list[i][0], precision_list[i][1] ))
|
"""This module provides the CLI entry point via main().
"""
__version__ = "0.1.0"
from .cli.cli import UppyylSimulatorCLI
def main():
"""The main function."""
prompt = UppyylSimulatorCLI()
prompt.cmdloop()
|
with open("input1.txt","r") as f:
data = f.readlines()
# Data Formats
# List of lines
# Lines are a list, size 2
# Line Format
# - Index 0 = Parent
# - Index 1 = Node
# Dict format:
# Key = Child Node
# Value = Parent Node
Dict = {}
# Loop over every piece of data
for line in data:
line = line.replace("\n","").split(")")
check = Dict.get(line[0])
if check is not None:
Dict[line[0]].append(line[1])
if check is None:
Dict[line[0]] = [line[1]]
def getNodeParentFromNode(nodeToFind):
for key, value in Dict.items():
if nodeToFind in value:
return key
def generateFromNode(nodeStart):
nodes = {}
count = 0
currentNode = nodeStart
while currentNode != "COM":
nodes[count] = currentNode
count += 1
currentNode = getNodeParentFromNode(currentNode)
return nodes
youTravel = generateFromNode("YOU")
sanTravel = generateFromNode("SAN")
youPointOfIntersect = 0
sanPointOfIntersect = 0
for key, value in youTravel.items():
if value in sanTravel.values():
youPointOfIntersect = key
break
for key, value in sanTravel.items():
if value in youTravel.values():
sanPointOfIntersect = key
break
print("Node at sanPointOfIntersect({}) is {}".format(sanPointOfIntersect, sanTravel[sanPointOfIntersect]))
print("Node at youPointOfIntersect({}) is {}".format(youPointOfIntersect, youTravel[youPointOfIntersect]))
# Magic number 2. We want to get to the point San is orbiting(remove 1 for San's position), and we need to ignore our own ship
# as a satellite(remove 1 more)
print("Jumps to get there = {}".format(youPointOfIntersect+sanPointOfIntersect - 2))
|
import fnmatch
import os
from typing import List
def exist(file_path : str) -> bool:
return os.path.exists(file_path)
def not_exist(file_path : str) -> bool:
return not os.path.exists(file_path)
def is_dir(file_path : str) -> bool:
return os.path.isdir(file_path)
def is_file(file_path : str) -> bool:
return os.path.isfile(file_path)
def all_file(top: str, *patterns: str) -> List[str]:
_files = []
for root, dirs, files in os.walk(top, topdown=True):
files.sort()
for fname in files:
for pt in patterns:
if fnmatch.fnmatch(fname, pt):
_files.append(os.path.join(root, fname))
return _files
def file_name(file_path : str) -> str:
if is_file(file_path):
return os.path.basename(file_path)
return None
|
"""Container for DL-MONTE FED flavour option parameters
The class structure is:
FEDFlavour
Generic
PhaseSwitch
Each concrete class provides a class method from_string() method to
generate an instance from the appropriate DL CONTROL file entry,
while the __str__() method returns a valid string of the same form.
The DL-MONTE internal representation is in fed_interface_type.f90
"""
from collections import OrderedDict
FLAVOURS = ("gen", "generic", "ps")
"""The available flavours, key strings thereof"""
class FEDFlavour(object):
"""Abstract container for DL-MONTE FED flavour"""
def __init__(self, nfreq=None, keys=None):
"""Initialise container content.
Arguments:
nfreq (integer): frequency of fed (if specified on use fed line)
keys (OrderedDict): key/values describing further fed structure
"""
self.nfreq = None
self.keys = OrderedDict()
if nfreq is not None:
self.nfreq = nfreq
if keys is not None:
self.keys = keys
def __str__(self):
"""Implementeted by subclasses"""
raise NotImplementedError("Should be implemented by subclass")
@classmethod
def from_string(cls, dlstr):
"""Implementated by subclasses"""
raise NotImplementedError("Should be implemented by subclass")
def __repr__(self):
"""Return current state"""
repme = "nfreq= {!r}".format(self.nfreq)
for key in self.keys:
repme += ", {}= {!r}".format(key, self.keys[key])
return "{}({})".format(type(self).__name__, repme)
@staticmethod
def _parse_use_fed_line(dlstr):
"""Parse: 'use fed <flavour> [nfreq]' and return flavour, nfreq"""
try:
tokens = dlstr.lower().split()
flavour = tokens[2]
if tokens[0] != "use" or tokens[1] != "fed":
raise ValueError()
if flavour not in FLAVOURS:
raise ValueError()
try:
nfreq = int(tokens[3])
except IndexError:
# assume optional argument not present
nfreq = None
except (ValueError, IndexError):
usage = "use fed <flavour> [nfreq]"
raise ValueError("Expected {!r}; got {!r}".format(usage, dlstr))
return flavour, nfreq
class Generic(FEDFlavour):
"""Generic flavour FED container"""
_defaults = {"nfreq": 1}
def __str__(self):
"""Return the DL-MONTE CONTROL file string form"""
strme = "use fed generic"
if self.nfreq is not None:
strme = "{} {}".format(strme, self.nfreq)
return strme
@classmethod
def from_string(cls, dlstr):
"""Genrete an instance form DL CONTROL line"""
flavour, nfreq = FEDFlavour._parse_use_fed_line(dlstr)
if flavour != "gen" and flavour != "generic":
usage = "use fed gen[eric] [nfreq]"
raise ValueError("Expected {!r}; got {!r}".format(usage, dlstr))
return Generic(nfreq)
class PhaseSwitch(FEDFlavour):
"""Phase Switch container object following psmc_control_type.f90"""
# Here's a dict of allowed keywords (with default values)
_defaults = {"nfreq": 1, \
"switchfreq": 0, \
"initactive": 1, \
"datafreq": 100, \
"meltcheck": True, \
"meltthresh": 10, \
"meltfreq": 1000}
def __str__(self):
"""Returns a well-formed DL-CONTROL file entry"""
listme = []
if self.nfreq is None:
listme.append("use fed ps")
else:
listme.append("use fed ps {}".format(self.nfreq))
for key in self.keys:
# "meltcheck" appears without a value
if key == "meltcheck":
listme.append(" meltcheck")
else:
listme.append(" {} {}".format(key, self.keys[key]))
listme.append("ps done")
return "\n".join(listme)
@classmethod
def from_string(cls, dlstr):
"""Generate instance from DL CONTROL file block
Arguments:
dlstr (string): lines with blank lines and comments removed,
which should look like:
use fed ps [nfreq]
keyword1 value1
keyword2 value2
...
ps done
"""
lines = dlstr.splitlines()
line = lines.pop(0)
flavour, nfreq = FEDFlavour._parse_use_fed_line(line)
keys = OrderedDict()
if flavour != "ps":
usage = "use fed ps [nfreq]"
raise ValueError("Expected {}; got {!r}".format(usage, line))
done = False
try:
while not done:
line = lines.pop(0)
tokens = line.lower().split()
if tokens[0] == "ps" and tokens[1] == "done":
done = True
break
key = tokens[0]
if key == "switchfreq":
item = {"switchfreq": int(tokens[1])}
elif key.startswith("init"):
item = {"initactive": int(tokens[1])}
elif key == "datafreq":
item = {"datafreq": int(tokens[1])}
elif key == "meltcheck":
item = {"meltcheck": True}
elif key == "meltthresh":
item = {"meltthresh": float(tokens[1])}
elif key == "meltfreq":
item = {"meltfreq": int(tokens[1])}
else:
# Get out of this loop and fail
break
keys.update(item)
except (ValueError, IndexError):
raise ValueError("Parsing failed at line {!r}".format(line))
if not done:
msg = "Unrecognised PSMC keyword encountered before 'ps done'"
raise ValueError("{}: {!r}".format(msg, line))
return PhaseSwitch(nfreq, keys)
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
from twisted.internet import defer
from buildbot.config import error
from buildbot.worker.base import Worker
class LocalWorker(Worker):
def checkConfig(self, name, workdir=None, usePty=False, **kwargs):
Worker.checkConfig(self, name, None, **kwargs)
self.LocalWorkerFactory = None
try:
# importing here to avoid dependency on buildbot worker package
from buildbot_worker.bot import LocalWorker as RemoteLocalWorker
self.LocalWorkerFactory = RemoteLocalWorker
except ImportError:
error("LocalWorker needs the buildbot-worker package installed "
"(pip install buildbot-worker)")
self.remote_worker = None
@defer.inlineCallbacks
def reconfigService(self, name, workdir=None, usePty=False, **kwargs):
Worker.reconfigService(self, name, None, **kwargs)
if workdir is None:
workdir = name
workdir = os.path.abspath(
os.path.join(self.master.basedir, "workers", workdir))
if not os.path.isdir(workdir):
os.makedirs(workdir)
if self.remote_worker is None:
# create the actual worker as a child service
# we only create at reconfig, to avoid polluting memory in case of
# reconfig
self.remote_worker = self.LocalWorkerFactory(name, workdir, usePty)
yield self.remote_worker.setServiceParent(self)
else:
# The case of a reconfig, we forward the parameters
self.remote_worker.bot.basedir = workdir
self.remote_worker.usePty = usePty
|
import dataclasses
import typing
from flask_sqlalchemy import BaseQuery
from sqlalchemy import func
from geoalchemy2 import Geometry
from airq.lib.clock import timestamp
from airq.lib.geo import haversine_distance
from airq.lib.readings import ConversionFactor
from airq.lib.readings import Pm25
from airq.lib.readings import Readings
from airq.config import db
@dataclasses.dataclass
class ZipcodeMetrics:
num_sensors: int
min_sensor_distance: int
max_sensor_distance: int
sensor_ids: typing.List[int]
class ZipcodeQuery(BaseQuery):
def get_by_zipcode(self, zipcode: str) -> typing.Optional["Zipcode"]:
return self.filter_by(zipcode=zipcode).first()
def order_by_distance(self, zipcode: "Zipcode") -> "ZipcodeQuery":
return self.order_by(
func.ST_DistanceSphere(Zipcode.coordinates, zipcode.coordinates)
)
class Zipcode(db.Model): # type: ignore
__tablename__ = "zipcodes"
query_class = ZipcodeQuery
id = db.Column(db.Integer(), nullable=False, primary_key=True)
zipcode = db.Column(db.String(), nullable=False, unique=True, index=True)
city_id = db.Column(db.Integer(), db.ForeignKey("cities.id"), nullable=False)
latitude = db.Column(db.Float(asdecimal=True), nullable=False)
longitude = db.Column(db.Float(asdecimal=True), nullable=False)
timezone = db.Column(db.String(), nullable=True)
geohash_bit_1 = db.Column(db.String(), nullable=False)
geohash_bit_2 = db.Column(db.String(), nullable=False)
geohash_bit_3 = db.Column(db.String(), nullable=False)
geohash_bit_4 = db.Column(db.String(), nullable=False)
geohash_bit_5 = db.Column(db.String(), nullable=False)
geohash_bit_6 = db.Column(db.String(), nullable=False)
geohash_bit_7 = db.Column(db.String(), nullable=False)
geohash_bit_8 = db.Column(db.String(), nullable=False)
geohash_bit_9 = db.Column(db.String(), nullable=False)
geohash_bit_10 = db.Column(db.String(), nullable=False)
geohash_bit_11 = db.Column(db.String(), nullable=False)
geohash_bit_12 = db.Column(db.String(), nullable=False)
coordinates = db.Column(Geometry("POINT"), nullable=True)
pm25 = db.Column(db.Float(), nullable=False, index=True, server_default="0")
humidity = db.Column(db.Float(), nullable=False, server_default="0")
pm_cf_1 = db.Column(db.Float(), nullable=False, server_default="0")
pm25_updated_at = db.Column(
db.Integer(), nullable=False, index=True, server_default="0"
)
metrics_data = db.Column(db.JSON(), nullable=True)
city = db.relationship("City")
def __repr__(self) -> str:
return f"<Zipcode {self.zipcode}>"
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
obj._distance_cache = {}
return obj
def get_metrics(self) -> ZipcodeMetrics:
if not hasattr(self, "_metrics"):
self._metrics = ZipcodeMetrics(
num_sensors=self.metrics_data["num_sensors"],
max_sensor_distance=self.metrics_data["max_sensor_distance"],
min_sensor_distance=self.metrics_data["min_sensor_distance"],
sensor_ids=self.metrics_data["sensor_ids"],
)
return self._metrics
def get_readings(self) -> Readings:
return Readings(pm25=self.pm25, pm_cf_1=self.pm_cf_1, humidity=self.humidity)
@property
def num_sensors(self) -> int:
return self.get_metrics().num_sensors
@property
def max_sensor_distance(self) -> int:
return self.get_metrics().max_sensor_distance
@property
def min_sensor_distance(self) -> int:
return self.get_metrics().min_sensor_distance
@property
def geohash(self) -> str:
"""This zipcode's geohash."""
return "".join([getattr(self, f"geohash_bit_{i}") for i in range(1, 13)])
@classmethod
def pm25_stale_cutoff(cls) -> float:
"""Timestamp before which pm25 measurements are considered stale."""
return timestamp() - (60 * 60)
@property
def is_pm25_stale(self) -> bool:
"""Whether this zipcode's pm25 measurements are considered stale."""
return self.pm25_updated_at < self.pm25_stale_cutoff()
def distance(self, other: "Zipcode") -> float:
"""Distance between this zip and the given zip."""
if other.id in self._distance_cache:
return self._distance_cache[other.id]
if self.id in other._distance_cache:
return other._distance_cache[self.id]
self._distance_cache[other.id] = haversine_distance(
other.longitude,
other.latitude,
self.longitude,
self.latitude,
)
return self._distance_cache[other.id]
def get_aqi(self, conversion_factor: ConversionFactor) -> int:
"""The AQI for this zipcode (e.g., 35) as determined by the provided strategy."""
return self.get_readings().get_aqi(conversion_factor)
def get_pm25(self, conversion_factor: ConversionFactor) -> float:
"""Current pm25 for this client, as determined by the provided strategy."""
return self.get_readings().get_pm25(conversion_factor)
def get_pm25_level(self, conversion_factor: ConversionFactor) -> Pm25:
"""The pm25 category for this zipcode (e.g., Moderate)."""
return self.get_readings().get_pm25_level(conversion_factor)
def get_recommendations(
self, num_desired: int, conversion_factor: ConversionFactor
) -> typing.List["Zipcode"]:
"""Get n recommended zipcodes near this zipcode, sorted by distance."""
if self.is_pm25_stale:
return []
cutoff = self.pm25_stale_cutoff()
# TODO: Make this faster somehow?
curr_pm25_level = self.get_pm25_level(conversion_factor)
zipcodes = [
z
for z in Zipcode.query.filter(
Zipcode.pm25_updated_at > cutoff
).order_by_distance(self)
if z.get_pm25_level(conversion_factor) < curr_pm25_level
]
return zipcodes[:num_desired]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20151015_1938'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='feed_size',
field=models.PositiveIntegerField(default=20),
preserve_default=True,
),
]
|
import io
class IterStream(io.RawIOBase):
"""Wraps an iterator yielding bytes as a file object"""
def __init__(self, iterator):
self.iterator = iterator
self.leftover = None
def readable(self):
return True
# Python 3 requires only .readinto() method, it still uses other ones
# under some circumstances and falls back if those are absent. Since
# iterator already constructs byte strings for us, .readinto() is not the
# most optimal, so we provide .read1() too.
def readinto(self, b):
try:
n = len(b) # We're supposed to return at most this much
chunk = self.leftover or next(self.iterator)
output, self.leftover = chunk[:n], chunk[n:]
n_out = len(output)
b[:n_out] = output
return n_out
except StopIteration:
return 0 # indicate EOF
readinto1 = readinto
def read1(self, n=-1):
try:
chunk = self.leftover or next(self.iterator)
except StopIteration:
return b""
# Return an arbitrary number or bytes
if n <= 0:
self.leftover = None
return chunk
output, self.leftover = chunk[:n], chunk[n:]
return output
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
@login_required()
def index(request):
# GETアクセス時の処理
params = {
}
return render(request, 'home/index.htm', params)
|
import os
from typing import List
import difflib
from robot.api.parsing import (
ModelVisitor,
Token
)
from robot.parsing.model import Statement
from click import style
class StatementLinesCollector(ModelVisitor):
"""
Used to get writeable presentation of Robot Framework model.
"""
def __init__(self, model):
self.text = ''
self.visit(model)
def visit_Statement(self, node): # noqa
for token in node.tokens:
self.text += token.value
def __eq__(self, other):
return other.text == self.text
class GlobalFormattingConfig:
def __init__(self, space_count: int, line_sep: str, start_line: int, end_line: int):
self.space_count = space_count
self.start_line = start_line
self.end_line = end_line
if line_sep == 'windows':
self.line_sep = '\r\n'
elif line_sep == 'unix':
self.line_sep = '\n'
else:
self.line_sep = os.linesep
def decorate_diff_with_color(contents: List[str]) -> str:
"""Inject the ANSI color codes to the diff."""
for i, line in enumerate(contents):
if line.startswith("+++") or line.startswith("---"):
line = style(line, bold=True, reset=True)
elif line.startswith("@@"):
line = style(line, fg='cyan', reset=True)
elif line.startswith("+"):
line = style(line, fg='green', reset=True)
elif line.startswith("-"):
line = style(line, fg='red', reset=True)
contents[i] = line
return '\n'.join(contents)
def normalize_name(name):
return name.lower().replace('_', '').replace(' ', '')
def after_last_dot(name):
return name.split('.')[-1]
def node_within_lines(node_start, node_end, start_line, end_line):
if start_line:
if node_start < start_line:
return False
if end_line:
if node_end > end_line:
return False
else:
if start_line != node_start:
return False
return True
def node_outside_selection(node, formatting_config):
"""
Contrary to ``node_within_lines`` it just checks if node is fully outside selected lines.
Partial selection is useful for transformers like aligning code.
"""
if formatting_config.start_line and formatting_config.start_line > node.end_lineno or \
formatting_config.end_line and formatting_config.end_line < node.lineno:
return True
return False
def split_args_from_name_or_path(name):
"""Split arguments embedded to name or path like ``Example:arg1:arg2``.
The separator can be either colon ``:`` or semicolon ``;``. If both are used,
the first one is considered to be the separator.
"""
if os.path.exists(name):
return name, []
index = _get_arg_separator_index_from_name_or_path(name)
if index == -1:
return name, []
args = name[index+1:].split(name[index])
name = name[:index]
return name, args
def _get_arg_separator_index_from_name_or_path(name):
colon_index = name.find(':')
# Handle absolute Windows paths
if colon_index == 1 and name[2:3] in ('/', '\\'):
colon_index = name.find(':', colon_index+1)
semicolon_index = name.find(';')
if colon_index == -1:
return semicolon_index
if semicolon_index == -1:
return colon_index
return min(colon_index, semicolon_index)
def round_to_four(number):
div = number % 4
if div:
return number + 4 - div
return number
def any_non_sep(tokens):
return any(token.type not in (Token.EOL, Token.SEPARATOR, Token.EOS) for token in tokens)
def tokens_by_lines(node):
for line in node.lines:
if not any_non_sep(line):
continue
if line:
if line[0].type == Token.VARIABLE:
if line[0].value:
line[0].value = line[0].value.lstrip()
else:
# if variable is prefixed with spaces
line = line[1:]
elif line[0].type == Token.ARGUMENT:
line[0].value = line[0].value.strip() if line[0].value else line[0].value
yield [token for token in line if token.type not in (Token.SEPARATOR, Token.EOS)]
def left_align(node):
""" remove leading separator token """
tokens = list(node.tokens)
if tokens:
tokens[0].value = tokens[0].value.lstrip(' \t')
return Statement.from_tokens(tokens)
def remove_rst_formatting(text):
return text.replace('::', ':').replace("``", "'")
class RecommendationFinder:
def find_similar(self, name, candidates):
norm_name = name.lower()
norm_cand = self.get_normalized_candidates(candidates)
matches = self.find(norm_name, norm_cand.keys())
if not matches:
return ''
matches = self.get_original_candidates(matches, norm_cand)
suggestion = ' Did you mean:\n'
suggestion += '\n'.join(f' {match}' for match in matches)
return suggestion
def find(self, name, candidates, max_matches=2):
""" Return a list of close matches to `name` from `candidates`. """
if not name or not candidates:
return []
cutoff = self._calculate_cutoff(name)
return difflib.get_close_matches(
name, candidates, n=max_matches, cutoff=cutoff
)
@staticmethod
def _calculate_cutoff(string, min_cutoff=.5, max_cutoff=.85,
step=.03):
""" The longer the string the bigger required cutoff. """
cutoff = min_cutoff + len(string) * step
return min(cutoff, max_cutoff)
@staticmethod
def get_original_candidates(candidates, norm_candidates):
""" Map found normalized candidates to unique original candidates. """
return sorted(list(set(c for cand in candidates for c in norm_candidates[cand])))
@staticmethod
def get_normalized_candidates(candidates):
norm_cand = {cand.lower(): [cand] for cand in candidates}
# most popular typos
norm_cand['align'] = ['AlignSettingsSection', 'AlignVariablesSection']
norm_cand['normalize'] = ['NormalizeAssignments', 'NormalizeNewLines', 'NormalizeSectionHeaderName',
'NormalizeSeparators', 'NormalizeSettingName']
norm_cand['order'] = ['OrderSettings', 'OrderSettingsSection']
norm_cand['alignsettings'] = ['AlignSettingsSection']
norm_cand['alignvariables'] = ['AlignVariablesSection']
norm_cand['assignmentnormalizer'] = ['NormalizeAssignments']
return norm_cand
|
from kivy.uix.button import Button
class LanguageButton(Button):
pass
|
import os
from handcash_connect_sdk import HandcashCloudAccount, environments
def test_api_authorization():
auth_token = os.environ["HC_AUTH_TOKEN"]
handcash_cloud_account = HandcashCloudAccount.from_auth_token(auth_token, environments.PROD)
handcash_cloud_account.profile.get_current_profile()
|
from __future__ import print_function
from __future__ import division
import os
import codecs
import collections
from random import shuffle
import numpy as np
import pickle
class Vocab:
def __init__(self, token2index=None, index2token=None):
self._token2index = token2index or {}
self._index2token = index2token or []
def feed(self, token):
if token not in self._token2index:
# allocate new index for this token
index = len(self._token2index)
self._token2index[token] = index
self._index2token.append(token)
return self._token2index[token]
@property
def size(self):
return len(self._token2index)
def token(self, index):
return self._index2token[index]
def __getitem__(self, token):
index = self.get(token)
if index is None:
raise KeyError(token)
return index
def get(self, token, default=None):
return self._token2index.get(token, default)
def change(self, tokens):
res = ""
for token in tokens:
res += str(self.token(token))
return res
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump((self._token2index, self._index2token), f, pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, filename):
with open(filename, 'rb') as f:
token2index, index2token = pickle.load(f)
return cls(token2index, index2token)
def load_data(data_dir, max_word_length):
char_vocab = Vocab()
char_vocab.feed(' ') # blank is at index 0 in char vocab
actual_max_word_length = 0
char_tokens = collections.defaultdict(list)
for fname in ['train']:
print('reading', fname)
with codecs.open(os.path.join(data_dir, fname + '.txt'), 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if len(line) > max_word_length:
continue
# line += '*'
# line = line.split(".")[0]
char_array = [char_vocab.feed(c) for c in line]
char_tokens[fname].append(char_array)
actual_max_word_length = max(actual_max_word_length, len(char_array))
print('actual longest token length is:', actual_max_word_length)
print('size of char vocabulary:', char_vocab.size)
assert actual_max_word_length <= max_word_length
# now we know the sizes, create tensors
char_tensors = {}
char_lens = {}
for fname in ['train']:
char_tensors[fname] = np.zeros([len(char_tokens[fname]), actual_max_word_length], dtype=np.int32)
char_lens[fname] = np.zeros([len(char_tokens[fname])], dtype=np.int32)
for i, char_array in enumerate(char_tokens[fname]):
char_tensors[fname][i, :len(char_array)] = char_array
char_lens[fname][i] = len(char_array)
return char_vocab, char_tensors, char_lens, actual_max_word_length
class DataReader:
def __init__(self, char_tensor, char_lens, batch_size):
max_word_length = char_tensor.shape[1]
rollup_size = char_tensor.shape[0] // batch_size * batch_size
char_tensor = char_tensor[: rollup_size]
char_lens = char_lens[: rollup_size]
self.indexes = list(range(rollup_size // batch_size))
shuffle(self.indexes)
# round down length to whole number of slices
x_batches = char_tensor.reshape([batch_size, -1, max_word_length])
y_batches = char_lens.reshape([batch_size, -1])
x_batches = np.transpose(x_batches, axes=(1, 0, 2))
y_batches = np.transpose(y_batches, axes=(1, 0))
self._x_batches = list(x_batches)
self._y_batches = list(y_batches)
self.batch_size = batch_size
self.length = len(self._x_batches)
def shuf(self):
shuffle(self.indexes)
def iter(self):
for i in self.indexes:
yield self._x_batches[i], self._y_batches[i]
if __name__ == '__main__':
_, ct, cl, _ = load_data('dga_data', 65)
print(ct.keys())
count = 0
for x, y in DataReader(ct['train'], cl['train'], 35).iter():
count += 1
print(y)
if count > 0:
break
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gl, gloo, data
from glumpy.geometry import primitives
from glumpy.transforms import PanZoom
vertex = """
attribute vec3 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = <transform(vec4(position.xy,0,1.0))>;
v_texcoord = texcoord;
}
"""
fragment = """
#include "misc/spatial-filters.frag"
#include "colormaps/colormaps.glsl"
uniform sampler2D data;
uniform vec2 data_shape;
varying vec2 v_texcoord;
void main()
{
// Extract data value
float value = Bicubic(data, data_shape, v_texcoord).r;
// Map value to rgb color
vec4 bg_color = vec4(colormap_hot(value),1.0);
vec4 fg_color = vec4(0,0,0,1);
// Trace contour
float levels = 16.0;
float antialias = 1.0;
float linewidth = 1.0 + antialias;
if(length(value-0.5) < 0.5/levels)
linewidth = 3.0 + antialias;
float v = levels*value - 0.5;
float dv = linewidth/2.0 * fwidth(v);
float f = abs(fract(v) - 0.5);
float d = smoothstep(-dv,+dv,f);
float t = linewidth/2.0 - antialias;
d = abs(d)*linewidth/2.0 - t;
if( d < 0.0 ) {
gl_FragColor = bg_color;
} else {
d /= antialias;
gl_FragColor = mix(fg_color,bg_color,d);
}
} """
window = app.Window(800, 800, color = (1,1,1,1))
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_TRIANGLES, I)
@window.event
def on_key_press(key, modifiers):
if key == app.window.key.SPACE:
transform.reset()
def func3(x,y):
return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
x = np.linspace(-2.0, 2.0, 256).astype(np.float32)
y = np.linspace(-2.0, 2.0, 256).astype(np.float32)
X,Y = np.meshgrid(x, y)
Z = func3(X,Y)
program = gloo.Program(vertex, fragment)
V,I = primitives.plane(2.0, n=64)
program.bind(V)
program['data'] = (Z-Z.min())/(Z.max() - Z.min())
program['data'].interpolation = gl.GL_NEAREST
program['data_shape'] = Z.shape[1], Z.shape[0]
program['u_kernel'] = data.get("spatial-filters.npy")
program['u_kernel'].interpolation = gl.GL_LINEAR
transform = PanZoom(aspect=1)
program['transform'] = transform
window.attach(transform)
app.run()
|
from discord.ext import commands
import discord, random, aiosqlite3
class Moderation(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(help="a command to scan for malicious bots, specificially ones that only give you random invites and are fake(work in progress)")
async def scan_guild(self,ctx):
if isinstance(ctx.channel, discord.TextChannel):
cur = await self.client.sus_users.cursor()
sus_users=dict([n for n in await cur.execute("SELECT * FROM SUS_USERS;")])
await cur.close()
count = 0
for x in sus_users:
user=ctx.guild.get_member(x)
if user:
count = count + 1
await ctx.send(f"Found {x}. \nUsername: {user.name} \nReason: {sus_users[x]}")
if count < 1:
await ctx.send("No Bad users found.")
if isinstance(ctx.channel,discord.DMChannel):
await ctx.send("please use the global version")
def setup(client):
client.add_cog(Moderation(client))
|
from django.contrib import admin
from . import models
class DeviceAdmin(admin.ModelAdmin):
readonly_fields = ('id',)
admin.site.register(models.DeviceType)
admin.site.register(models.Device)
|
'''texto'''
frase = 'Curso em vídeo Python'
print(frase[9])
print(frase[:10])
print(frase[15:])
print(frase[9:13])
print(frase[9:21])
'''ele vai até o 20, mas não é mto recomendado'''
print(frase[9:21:2])
'''inicio, fim, salto -> v, d, o, P, t, o'''
print(frase[9::3])
'''tamanho do texto'''
print(len(frase))
'''contar as repetições'''
print(frase.count('o'))
'''contagem, com fatiamento, lista, inicio, fim'''
print(frase.count('o', 0, 13))
'''encontra o texto e a posição de onde começou o texto > 11'''
print(frase.find('deo'))
'''pd juntar funções: se quiser procurar pela letra 'o',
nesta frase só terá em maiúscula, então, transforma a frase
em maíscula e achará a letra'''
print(frase.upper().count('O'))
'''Se, não for encontrado, irá mostrar -1'''
print(frase.find('Android'))
'''Uso do moderador IN, mas só retorna true ou false'''
print('Curso' in frase)
'''transformação = trocar, mas transforma de forma secundária,
ou seja, não se altera o objeto original'''
print(frase.replace('Python', 'Android'))
'''pra salvar os dados precisa fazer a atribuição:
frase = print(frase.replace('Python', 'Android'))'''
'''todas MAIÚSCULAS'''
print(frase.upper())
'''tudas minúsculas'''
print(frase.lower())
'''coloca td em minúscula e coloca a 1a letra em maiúscula'''
print(frase.capitalize())
'''determina a quantidade de palavras pelos espaços,
e capitalize palavra por palavra'''
print(frase.title())
'''Remove todos os espaços vazios do início e do fim'''
frase2 = ' Aprenda Python '
print(frase2.strip())
'''Remove somente os espaços do lado direito'''
print(frase2.rstrip())
'''Remove somente os espaços do lado esquerdo'''
print(frase2.lstrip())
'''Divisão'''
'''observa os espaços, e ocorre uma divisao na string e,
cada palavra se torna um índice de uma nova lista.'''
print(frase.split())
'''Junção'''
print('-'.join(frase))
'''Se o split cria outra lista'''
dividido = frase.split()
'''o print abaixo vai mostra, o conteúdo do índice 0 = Curso'''
print(dividido[0])
'''ou, dá pra encontrar um caracter em específico [3],
na string [2] = 'e'.'''
print(dividido[2][3])
|
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The OpenTelemetry context module provides abstraction layer on top of
thread-local storage and contextvars. The long term direction is to switch to
contextvars provided by the Python runtime library.
A global object ``Context`` is provided to access all the context related
functionalities::
>>> from opentelemetry.context import Context
>>> Context.foo = 1
>>> Context.foo = 2
>>> Context.foo
2
When explicit thread is used, a helper function
``Context.with_current_context`` can be used to carry the context across
threads::
from threading import Thread
from opentelemetry.context import Context
def work(name):
print('Entering worker:', Context)
Context.operation_id = name
print('Exiting worker:', Context)
if __name__ == '__main__':
print('Main thread:', Context)
Context.operation_id = 'main'
print('Main thread:', Context)
# by default context is not propagated to worker thread
thread = Thread(target=work, args=('foo',))
thread.start()
thread.join()
print('Main thread:', Context)
# user can propagate context explicitly
thread = Thread(
target=Context.with_current_context(work),
args=('bar',),
)
thread.start()
thread.join()
print('Main thread:', Context)
Here goes another example using thread pool::
import time
import threading
from multiprocessing.dummy import Pool as ThreadPool
from opentelemetry.context import Context
_console_lock = threading.Lock()
def println(msg):
with _console_lock:
print(msg)
def work(name):
println('Entering worker[{}]: {}'.format(name, Context))
Context.operation_id = name
time.sleep(0.01)
println('Exiting worker[{}]: {}'.format(name, Context))
if __name__ == "__main__":
println('Main thread: {}'.format(Context))
Context.operation_id = 'main'
pool = ThreadPool(2) # create a thread pool with 2 threads
pool.map(Context.with_current_context(work), [
'bear',
'cat',
'dog',
'horse',
'rabbit',
])
pool.close()
pool.join()
println('Main thread: {}'.format(Context))
Here goes a simple demo of how async could work in Python 3.7+::
import asyncio
from opentelemetry.context import Context
class Span(object):
def __init__(self, name):
self.name = name
self.parent = Context.current_span
def __repr__(self):
return ('{}(name={}, parent={})'
.format(
type(self).__name__,
self.name,
self.parent,
))
async def __aenter__(self):
Context.current_span = self
async def __aexit__(self, exc_type, exc, tb):
Context.current_span = self.parent
async def main():
print(Context)
async with Span('foo'):
print(Context)
await asyncio.sleep(0.1)
async with Span('bar'):
print(Context)
await asyncio.sleep(0.1)
print(Context)
await asyncio.sleep(0.1)
print(Context)
if __name__ == '__main__':
asyncio.run(main())
"""
from .base_context import BaseRuntimeContext
__all__ = ["Context"]
try:
from .async_context import AsyncRuntimeContext
Context = AsyncRuntimeContext() # type: BaseRuntimeContext
except ImportError:
from .thread_local_context import ThreadLocalRuntimeContext
Context = ThreadLocalRuntimeContext()
|
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.AIMObject import AIMObject
from OTLMOW.OTLModel.Classes.Put import Put
from OTLMOW.OTLModel.Datatypes.KlPutMateriaal import KlPutMateriaal
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class BlindePut(AIMObject, Put, VlakGeometrie):
"""Een put waar de riolering op aangesloten is maar die niet meer zichtbaar is."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/installatie#BlindePut'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
AIMObject.__init__(self)
Put.__init__(self)
VlakGeometrie.__init__(self)
self._materiaal = OTLAttribuut(field=KlPutMateriaal,
naam='materiaal',
label='materiaal',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#BlindePut.materiaal',
definition='Het materiaal waaruit de blinde put is vervaardigd.',
owner=self)
@property
def materiaal(self):
"""Het materiaal waaruit de blinde put is vervaardigd."""
return self._materiaal.get_waarde()
@materiaal.setter
def materiaal(self, value):
self._materiaal.set_waarde(value, owner=self)
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from feedreader.models import Post
class PostContentTest(TestCase):
def test_relative_url(self):
"""
Tests that relative URLs are replaced by absolute URLs.
"""
post = Post()
post.link = "https://example.org"
post.content = """<a href="/url">url</a>"""
self.assertEqual(
post.processed_content, """<a href="https://example.org/url">url</a>"""
)
def test_iframe_replacement(self):
"""
Tests that iframes are replaced by a link to the source.
"""
post = Post()
post.content = """<iframe src="https://example.org"></iframe>"""
self.assertEqual(
post.processed_content, """<a href="https://example.org">iframe</a>"""
)
def test_iframe_no_src(self):
"""
Tests that iframes with no src attribute get removed.
"""
post = Post()
post.content = """<iframe></iframe>"""
self.assertEqual(post.processed_content, "")
def test_iframe_removes_script(self):
"""
Tests that scripts are removed from content.
"""
post = Post()
post.content = """<script src="https://example.org/script.js"></script>"""
self.assertEqual(post.processed_content, "")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.