content
stringlengths 5
1.05M
|
|---|
from django.shortcuts import render
from django.views.generic import View
from .models import Story, Gallery, Comment
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect, HttpResponse
from django.db.models import Q
from utils.email_send import send_register_email
from django.contrib.auth.hashers import make_password
from django.contrib import auth
import os, base64, datetime, json
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
class IndexView(View):
"""
首页
"""
def get(self, request):
story_list = Story.objects.all()
return render(request, 'index.html', context={'story_list': story_list})
def post(self, request):
return render(request, 'index.html')
class GalleryView(View):
"""
照片墙
"""
def get(self, request):
gallery_list = Gallery.objects.all()
return render(request, 'gallery.html', context={'gallery_list': gallery_list})
def post(self, request):
return render(request, 'index.html')
class StoryShareView(View):
"""
故事分享
"""
def get(self, request):
return render(request, 'story_share.html')
def post(self, request):
user = request.user
if user.is_authenticated:
story = Story()
title = request.POST.get('title' , '')
content = request.POST.get('content', '')
longitude = float(request.POST.get('y', '0'))
latitude = float(request.POST.get('x', '0'))
story.title = title
story.content = content
story.longitude = longitude
story.latitude = latitude
story.user_belong = user
story.save()
return HttpResponse('{"status": "success", "msg": "分享成功"}', content_type='application/json')
return HttpResponse('{"status": "fail", "msg": "尚未登录"}', content_type='application/json')
class StoryListView(View):
"""
故事列表
"""
def get(self, request):
story_list = Story.objects.all()
return render(request, 'story_list.html', context={'story_list': story_list})
def post(self, request):
return render(request, 'story_post.html')
class StoryDetailView(View):
def get(self, request, story_id):
story = Story.objects.get(id = story_id)
story.click_num += 1
story.save()
hot_list = Story.objects.all().order_by('-click_num')[0:2]
comment_list = Comment.objects.filter(story_belong = story)
return render(request, 'story_detail.html', context={'story': story,
'hot_list': hot_list,
'comment_list': comment_list})
def post(self, request, story_id):
user = request.user
if user.is_authenticated:
story = Story.objects.get(id=story_id)
if story:
comment = Comment()
comment.content = request.POST.get('message', '')
comment.user_belong = request.user
comment.story_belong = story
comment.save()
return HttpResponse('{"status": "success", "msg": "评论成功"}', content_type='application/json')
else:
return HttpResponse('{"status": "fail", "msg": "未找到所属故事"}', content_type='application/json')
else:
return HttpResponse('{"status": "fail", "msg": "尚未登录"}', content_type='application/json')
class WangEditor_uploadView(View):
"""
富文本编辑器图片上传
"""
def post(self,request):
gallery = Gallery()
user = request.user
user_name = str(user)
files = request.FILES.get('images') # 得到文件对象
file_dir = settings.STATICFILES_DIRS[0] + '/story_image/'
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_path = file_dir + '%s_' %(user_name) + files.name
open(file_path, 'wb+').write(files.read()) # 上传文件
upload_info = {"errno":0, "data":[settings.STATIC_URL + '/story_image/' + '%s_' %(user_name) + files.name]}
upload_info = json.dumps(upload_info) #wangeditor返回值json格式比较特殊,需要按照上面配置
gallery.photo_name = '%s_' %(user_name) + files.name
gallery.photo_url = 'story_image/' + '%s_' %(user_name) + files.name
gallery.user_belong = user
gallery.save()
return HttpResponse(upload_info, content_type="application/json")
|
import hashlib
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.shortcuts import get_current_site
from django.utils import formats
from django.utils.html import escape
from django.utils.translation import ugettext as _, activate, get_language
from django_comments_tree import get_form
from django_comments_tree.models import TreeCommentFlag
from django_comments_tree.signals import comment_will_be_posted, comment_was_posted
from rest_framework import serializers
from django_comments_tree import signed
from django_comments_tree.views import comments as views
from django_comments_tree.conf import settings
from django_comments_tree.models import (TmpTreeComment, TreeComment,
LIKEDIT_FLAG, DISLIKEDIT_FLAG)
from django_comments_tree.signals import confirmation_received
from django_comments_tree.utils import has_app_model_option
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)
class SerializerSaveMixin:
def on_save(self, **kwargs):
resp = {
'code': 200,
'comment': self.instance,
}
# Signal that the comment is about to be saved
responses = comment_will_be_posted.send(sender=self.instance.__class__,
comment=self.instance,
request=self.request)
for (receiver, response) in responses:
if response is False:
resp['code'] = 403 # Rejected.
return resp
return resp
class APICommentSerializer(SerializerSaveMixin, serializers.ModelSerializer):
class Meta:
model = TreeComment
fields = ['id', 'user',
'user_name', 'user_email', 'user_url',
'comment', 'comment_markup_type',
'submit_date', 'updated_on',
'ip_address', 'is_public', 'is_removed',
'followup',
]
def __init__(self, *args, **kwargs):
if kwargs.get('context'):
self.request = kwargs.get('context').get('request')
super().__init__(*args, **kwargs)
def to_representation(self, instance):
obj = super().to_representation(instance)
obj['submit_date'] = instance.submit_date.strftime(DATETIME_FORMAT)
return obj
def create(self, validated_data):
if 'parent' in validated_data:
p = validated_data.pop('parent')
instance = p.add_child(**validated_data)
return instance
return super().create(validated_data)
def update(self, instance, validated_data):
return super().update(instance, validated_data)
def save(self, **kwargs):
created = kwargs.pop('create', False)
result = super().save(**kwargs)
# Call after save, or instance won't be set
response = self.on_save(**kwargs)
if response.get('code') != 200:
return response.get('code')
comment_was_posted.send(sender=self.instance.__class__,
comment=self.instance,
request=self.request,
created=created)
return result
class WriteCommentSerializer(serializers.Serializer):
content_type = serializers.CharField()
object_id = serializers.CharField()
timestamp = serializers.CharField()
security_hash = serializers.CharField()
honeypot = serializers.CharField(allow_blank=True)
name = serializers.CharField(allow_blank=True)
email = serializers.EmailField(allow_blank=True)
url = serializers.URLField(required=False)
comment = serializers.CharField(max_length=COMMENT_MAX_LENGTH)
followup = serializers.BooleanField(default=False)
reply_to = serializers.IntegerField(default=0)
def __init__(self, *args, **kwargs):
self.request = kwargs['context']['request']
self.form = None
super().__init__(*args, **kwargs)
def validate_name(self, value):
if not len(value):
fnl = len(self.request.user.get_full_name())
if not fnl or not self.request.user.is_authenticated:
raise serializers.ValidationError("This field is required")
else:
return self.request.user.get_full_name() or self.request.user.get_username()
return value
def validate_email(self, value):
if not len(value):
eml = len(self.request.user.email)
if not eml or not self.request.user.is_authenticated:
raise serializers.ValidationError("This field is required")
else:
return self.request.user.email
return value
def validate(self, data):
ctype = data.get("content_type")
object_id = data.get("object_id")
if ctype is None or object_id is None:
return serializers.ValidationError("Missing content_type or "
"object_id field.")
try:
model = apps.get_model(*ctype.split(".", 1))
target = model._default_manager.get(pk=object_id)
except TypeError:
return serializers.ValidationError("Invalid content_type value: %r"
% escape(ctype))
except AttributeError:
return serializers.ValidationError("The given content-type %r does "
"not resolve to a valid model."
% escape(ctype))
except model.ObjectDoesNotExist:
return serializers.ValidationError(
"No object matching content-type %r and object ID %r exists."
% (escape(ctype), escape(object_id)))
except (ValueError, serializers.ValidationError) as e:
return serializers.ValidationError(
"Attempting go get content-type %r and object ID %r exists "
"raised %s" % (escape(ctype), escape(object_id),
e.__class__.__name__))
self.form = get_form()(target, data=data)
# Check security information
if self.form.security_errors():
return serializers.ValidationError(
"The comment form failed security verification: %s" %
escape(str(self.form.security_errors())))
if self.form.errors:
return serializers.ValidationError(self.form.errors)
return data
def save(self):
# resp object is a dictionary. The code key indicates the possible
# four states the comment can be in:
# * Comment created (http 201),
# * Confirmation sent by mail (http 204),
# * Comment in moderation (http 202),
# * Comment rejected (http 403).
site = get_current_site(self.request)
resp = {
'code': -1,
'comment': self.form.get_comment_object(site_id=site.id)
}
resp['comment'].ip_address = self.request.META.get("REMOTE_ADDR", None)
if self.request.user.is_authenticated:
resp['comment'].user = self.request.user
# Signal that the comment is about to be saved
responses = comment_will_be_posted.send(sender=TmpTreeComment,
comment=resp['comment'],
request=self.request)
for (receiver, response) in responses:
if response is False:
resp['code'] = 403 # Rejected.
return resp
# Replicate logic from django_comments_tree.views.comments.on_comment_was_posted.
if not settings.COMMENTS_TREE_CONFIRM_EMAIL or self.request.user.is_authenticated:
if not views._comment_exists(resp['comment']):
new_comment = views._create_comment(resp['comment'])
resp['comment'].tree_comment = new_comment
confirmation_received.send(sender=TmpTreeComment,
comment=resp['comment'],
request=self.request)
comment_was_posted.send(sender=new_comment.__class__,
comment=new_comment,
request=self.request)
if resp['comment'].is_public:
resp['code'] = 201
views.notify_comment_followers(new_comment)
else:
resp['code'] = 202
else:
key = signed.dumps(resp['comment'], compress=True,
extra_key=settings.COMMENTS_TREE_SALT)
views.send_email_confirmation_request(resp['comment'], key, site)
resp['code'] = 204 # Confirmation sent by mail.
return resp
class ReadCommentSerializer(serializers.ModelSerializer):
user_name = serializers.CharField(max_length=50, read_only=True)
user_url = serializers.CharField(read_only=True)
user_moderator = serializers.SerializerMethodField()
user_avatar = serializers.SerializerMethodField()
submit_date = serializers.SerializerMethodField()
parent_id = serializers.IntegerField(default=0, read_only=True)
level = serializers.IntegerField(read_only=True)
is_removed = serializers.BooleanField(read_only=True)
comment = serializers.SerializerMethodField()
allow_reply = serializers.SerializerMethodField()
permalink = serializers.SerializerMethodField()
flags = serializers.SerializerMethodField()
class Meta:
model = TreeComment
fields = ('id', 'user_name', 'user_url', 'user_moderator',
'user_avatar', 'permalink', 'comment', 'submit_date',
'parent_id', 'level', 'is_removed', 'allow_reply', 'flags')
def __init__(self, *args, **kwargs):
self.request = kwargs['context']['request']
super().__init__(*args, **kwargs)
def get_submit_date(self, obj):
activate(get_language())
return formats.date_format(obj.submit_date, 'DATETIME_FORMAT',
use_l10n=True)
def get_comment(self, obj):
if obj.is_removed:
return _("This comment has been removed.")
else:
return obj.comment
def get_user_moderator(self, obj):
try:
if obj.user and obj.user.has_perm('comments.can_moderate'):
return True
else:
return False
except Exception:
return None
def get_flags(self, obj):
flags = {
'like': {'active': False, 'users': None},
'dislike': {'active': False, 'users': None},
'removal': {'active': False, 'count': None},
}
users_likedit, users_dislikedit = None, None
if has_app_model_option(obj)['allow_flagging']:
users_flagging = obj.users_flagging(TreeCommentFlag.SUGGEST_REMOVAL)
if self.request.user in users_flagging:
flags['removal']['active'] = True
if self.request.user.has_perm("django_comments.can_moderate"):
flags['removal']['count'] = len(users_flagging)
opt = has_app_model_option(obj)
if opt['allow_feedback'] or opt['show_feedback']:
users_likedit = obj.users_flagging(LIKEDIT_FLAG)
users_dislikedit = obj.users_flagging(DISLIKEDIT_FLAG)
if has_app_model_option(obj)['allow_feedback']:
if self.request.user in users_likedit:
flags['like']['active'] = True
elif self.request.user in users_dislikedit:
flags['dislike']['active'] = True
if has_app_model_option(obj)['show_feedback']:
flags['like']['users'] = [
"%d:%s" % (user.id, settings.COMMENTS_TREE_API_USER_REPR(user))
for user in users_likedit]
flags['dislike']['users'] = [
"%d:%s" % (user.id, settings.COMMENTS_TREE_API_USER_REPR(user))
for user in users_dislikedit]
return flags
def get_allow_reply(self, obj):
return obj.allow_thread()
def get_user_avatar(self, obj):
path = hashlib.md5(obj.user_email.lower().encode('utf-8')).hexdigest()
param = urlencode({'s': 48})
return "https://www.gravatar.com/avatar/%s?%s&d=mm" % (path, param)
def get_permalink(self, obj):
return obj.get_absolute_url()
class FlagSerializer(serializers.ModelSerializer):
flag_choices = {'like': LIKEDIT_FLAG,
'dislike': DISLIKEDIT_FLAG,
'report': TreeCommentFlag.SUGGEST_REMOVAL}
class Meta:
model = TreeCommentFlag
fields = ('comment', 'flag',)
def validate(self, data):
# Validate flag.
if data['flag'] not in self.flag_choices:
raise serializers.ValidationError("Invalid flag.")
# Check commenting options on object being commented.
option = ''
if data['flag'] in ['like', 'dislike']:
option = 'allow_feedback'
elif data['flag'] == 'report':
option = 'allow_flagging'
comment = data['comment']
if not has_app_model_option(comment)[option]:
ctype = ContentType.objects.get_for_model(comment.content_object)
raise serializers.ValidationError(
"Comments posted to instances of '%s.%s' are not explicitly "
"allowed to receive '%s' flags. Check the "
"COMMENTS_TREE_APP_MODEL_OPTIONS setting." % (
ctype.app_label, ctype.model, data['flag']
)
)
data['flag'] = self.flag_choices[data['flag']]
return data
|
"""
Plotting convenience functions.
"""
from math import ceil
import ipywidgets as widgets
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from model_base import get_ext_input
# define basics
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
plt.style.use("seaborn-muted")
INPUT_START = 1000 # dt, i.e. 100ms
LABEL_SIZE = 16
def setup_sliders_layout(model_specific_sliders):
"""
Set up interactive part of the plot, i.e. sliders and grid layout.
model_params: list of model parameters names
"""
assert isinstance(model_specific_sliders, dict)
num_model_sliders = len(model_specific_sliders)
# define general sliders
I_m_slider = widgets.FloatSlider(
min=-5, max=20, step=0.5, value=10.0, description="I max"
)
T_slider = widgets.IntSlider(
min=500, max=2000, step=5, value=750, description="time"
)
I_types = widgets.ToggleButtons(
options=["constant", "sq. pulse", "sine", "ramp", "Ornstein-Uhlenbeck"],
value="constant",
description="Current type",
disabled=False,
layout=widgets.Layout(height="auto", width="auto"),
)
I_period = widgets.FloatSlider(
min=10, max=1000, step=5, value=200, description="I period"
)
# define grid
grid = widgets.GridspecLayout(ceil(5 + num_model_sliders / 2), 2)
grid[0, :] = widgets.Button(
description="Model parameters",
layout=widgets.Layout(height="auto", width="auto"),
)
# assign model sliders
for idx, (_, slider) in enumerate(model_specific_sliders.items()):
grid[idx // 2 + 1, idx % 2] = slider
grid[idx // 2 + 2, :] = widgets.Button(
description="External current parameters",
layout=widgets.Layout(height="auto", width="auto"),
)
grid[idx // 2 + 3, 0] = I_period
grid[idx // 2 + 4, 0] = I_m_slider
grid[idx // 2 + 4, 1] = T_slider
grid[idx // 2 + 5, :] = I_types
sliders = {
**model_specific_sliders,
"I_max": I_m_slider,
"I_period": I_period,
"T": T_slider,
"current_type": I_types,
}
for _, slider in sliders.items():
# lower number of "waiting" updates in the pipe
slider.msg_throttle = 1
return grid, sliders
def integrate_and_plot(model_cls, **kwargs):
"""
Integrate the model given its parameters and plot.
"""
T = kwargs.pop("T")
I_max = kwargs.pop("I_max")
I_period = kwargs.pop("I_period")
current_type = kwargs.pop("current_type")
model = model_cls(parameters=kwargs, T=T)
ext_current = np.zeros((model.n_points + 1))
input_length = ext_current.shape[0] - INPUT_START
ext_current[INPUT_START:] = get_ext_input(
I_max, I_period, current_type, model.T_total, input_length
)
model.set_input(ext_current)
t, y = model.integrate()
# set up figure
fig = plt.figure(constrained_layout=True, figsize=(15, 8))
spec = gridspec.GridSpec(ncols=3, nrows=3, figure=fig)
# set up axis for timeseries of input current
ax2 = fig.add_subplot(spec[2, :2])
ax2.set_ylim([-20, 30])
ax2.set_ylabel("INPUT CURRENT [AU]", size=LABEL_SIZE)
ax2.set_xlabel("TIME [ms]", size=LABEL_SIZE)
ax2.axvline(100.0, 0, 1, linestyle="--", color="grey", linewidth=0.7)
ax2.spines["right"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# set up axis for timeseries of state vector
ax1 = fig.add_subplot(spec[:2, :2], sharex=ax2)
ax1.set_ylim([-90, 30])
ax1.set_ylabel("MEMBRANE POTENTIAL [mV]", size=LABEL_SIZE)
ax1.spines["right"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.axvline(100.0, 0, 1, linestyle="--", color="grey", linewidth=0.7)
ax1.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
ax12 = ax1.twinx()
ax12.set_ylim([-20, 10])
ax12.set_yticklabels([])
ax12.set_yticks([])
ax12.spines["right"].set_visible(False)
ax12.spines["top"].set_visible(False)
ax12.spines["bottom"].set_visible(False)
ax12.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# set up axis for scatter u vs v
ax3 = fig.add_subplot(spec[:2, 2], sharey=ax1)
ax3.spines["right"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.set_xlabel("MEMBRANE RECOVERY", size=LABEL_SIZE)
scatter_colors = colors[3]
ax3.set_ylim([-90, 30])
ax3.set_xlim([-20, 10])
ax3.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# plot
ax1.plot(t, y[0, :], color=colors[0], linewidth=2.5)
ax12.plot(t, y[1:, :].T, color=colors[1])
ax2.plot(t, model.ext_current[1:], color=colors[2])
ax3.scatter(y[1, :], y[0, :], s=7, c=scatter_colors)
plt.suptitle(f"Number of spikes: {model.num_spikes}", size=LABEL_SIZE + 3)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 23 08:58:37 2021
@author: michp-ai
"""
# This script is web automation for the Capstone project on ML rapid text labeling
# Before running this script in a different console start the web server by running main.py for the web app
# This is a simple demo script to illustrate how selenium interacts with the web app
#%%
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
import re
import string
import os
from time import sleep
import datetime
import pickle
from zipfile import ZipFile
import sys
sys.path.insert(1, '../baseline-classifier/utilities')
import dt_utilities as utils
#%%
#set a timer
starttime = datetime.datetime.now()
#%%
# Get the data we'll need for evaluation
consolidated_disaster_tweet_data_df = \
utils.get_consolidated_disaster_tweet_data(root_directory="../baseline-classifier/data/",
event_type_directory="HumAID_data_event_type",
events_set_directories=["HumAID_data_events_set1_47K",
"HumAID_data_events_set2_29K"],
include_meta_data=True)
train_df = consolidated_disaster_tweet_data_df[consolidated_disaster_tweet_data_df["data_type"]=="train"].reset_index(drop=True)
test_df = consolidated_disaster_tweet_data_df[consolidated_disaster_tweet_data_df["data_type"]=="test"].reset_index(drop=True)
vectorizer_needs_transform = True
#%%
download_dir = os.path.join(os.getcwd(), "models")
chrome_options = Options()
chrome_options.add_experimental_option('prefs', {
"download.default_directory": download_dir,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
}
)
#%%
# PARAMETERS
mpath = os.getcwd() + "\chromedriver.exe"
wait_time = 0#.75 #0.75
scroll_wait_seconds = 0#1.75 #1.75
#%%
driver = webdriver.Chrome(mpath, options = chrome_options)
#%%
# load the webpage
driver.get("http://127.0.0.1:5000/")
driver.maximize_window()
#sleep(2) #for demo
#%%
# navigate landing page
driver.find_element_by_xpath('//*[@id="bodyLeftTable1"]/tbody/tr[1]/td[1]/a').click()
driver.find_element_by_id('config1').click()
driver.find_element_by_id('loadDataSetButton').click()
#%%
# identify radio buttons
def get_radio_buttons():
radio_buttons = []
radio_buttons.append(driver.find_element_by_id('category1'))
radio_buttons.append(driver.find_element_by_id('category2'))
radio_buttons.append(driver.find_element_by_id('category3'))
radio_buttons.append(driver.find_element_by_id('category4'))
return radio_buttons
def select_label_one_text(xpath, radio_button_id, wait_time=0.75):
# select a text from the list of all texts
driver.find_element_by_xpath(xpath).click()
sleep(wait_time)
# we select the correct radio button
radio_buttons = get_radio_buttons()
sleep(wait_time)
radio_buttons[radio_button_id].click()
# label one example
button_label_single = driver.find_element_by_id('labelButtonSingle')
button_label_single.click()
sleep(wait_time)
def select_label_multi_text(xpath, radio_button_id, wait_time=0.75, max_options=1, label_type="SimilarTexts",
min_recommender_labels=1000):
total_unlabeled, total_labeled = get_total_unlabeled(get_labeled=True)
# select a text from the list of all texts
driver.find_element_by_xpath(xpath).click()
sleep(wait_time)
# we select the correct radio button
radio_buttons = get_radio_buttons()
sleep(wait_time)
radio_buttons[radio_button_id].click()
#
op_xpath = op_base_xpath + str(max_options) + ']'
driver.find_element_by_xpath(op_xpath).click()
# label multi example
if (label_type=="SimilarTexts") | (total_labeled < min_recommender_labels) | (total_unlabeled < min_recommender_labels):
driver.find_element_by_id("buttonSimilarTexts1Buttons").click()
button_label_ten = driver.find_element_by_id('group1Button')
elif label_type=="RecommendedTexts":
driver.find_element_by_id("buttonSimilarTexts2Buttons").click()
button_label_ten = driver.find_element_by_id('group2Button')
sleep(wait_time)
button_label_ten.click()
def click_difficult_texts(wait_time=0.75):
sleep(wait_time)
button_difficult_texts = driver.find_element_by_id('generateDifficultTextsButton')
button_difficult_texts.click()
def scroll_label_ten(radio_button_id, scroll_wait_seconds = 1.75):
#we scroll down the results list
for scr in range(2,10,2):
scr_xpath = '//*[@id="group1Table"]/tbody/tr[' + str(scr) + ']/td[1]/a'
print(scr_xpath)
link_scroll = driver.find_element_by_xpath(scr_xpath)
driver.execute_script("return arguments[0].scrollIntoView(true);", link_scroll)
sleep(scroll_wait_seconds)
radio_buttons = get_radio_buttons()
radio_buttons[radio_button_id].click()
sleep(wait_time)
# we apply a group label after checking all 10 suggested are correct
button_label_ten = driver.find_element_by_id('group1Button')
sleep(wait_time)
button_label_ten.click()
def search_phrase(phrase):
# Search for a phrase
phrase = "richter"
element = driver.find_element_by_id("searchAllTexts")
element.send_keys(phrase)
driver.find_element_by_id("searchAllTextsButton").click()
def search_label(phrases, reps, label_type):
for r in range(reps):
for k, v in phrases.items():
search_phrase(k)
if label_type=='single':
select_label_one_text('//*[@id="allTextsTable"]/tbody/tr[' + str (r+1) + ']/td[1]', v, wait_time=wait_time)
def get_total_unlabeled(get_labeled=False):
total_unlabeled = driver.find_element_by_xpath('//*[@id="summarySectionTable"]/tbody/tr[3]/td[2]').text.replace(',', '')
total_unlabeled = int(total_unlabeled)
if get_labeled:
total_labeled = driver.find_element_by_xpath('//*[@id="summarySectionTable"]/tbody/tr[4]/td[2]').text.replace(',', '')
total_labeled = int(total_labeled)
return total_unlabeled, total_labeled
else:
return total_unlabeled
def get_overall_quality_score():
overall_quality_score = driver.find_element_by_xpath('//*[@id="difficultTextSummaryTable"]/tbody/tr[1]/td[2]').text
return overall_quality_score
def export_model():
driver.find_element_by_id("exportRecordsButton").click()
# Create a ZipFile Object and load sample.zip in it
for n in range(5000):
try:
with ZipFile('models/rapid-labeling-results.zip', 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall()
break
except:
sleep(0.01)
os.remove("models/rapid-labeling-results.zip")
#def check_if_vectorizer_needs_transform():
def get_accuracy_score(vectorizer_needs_transform):
# load the model from disk
model_filename = os.path.join("output", "trained-classifier.sav")
loaded_model = pickle.load(open(model_filename, 'rb'))
if vectorizer_needs_transform:
#vectorizer_needs_transform = False
vectorizer_filename = os.path.join("output", "fitted-vectorizer.sav")
#print(vectorizer_filename)
vectorizer = pickle.load(open(vectorizer_filename, 'rb'))
#X_train = vectorizer.transform(train_df["tweet_text"])
X_test = vectorizer.transform(test_df["tweet_text"])
y_test = test_df["event_type"]
y_pred = [x.lower() for x in loaded_model.predict(X_test)]
test_accuracy_score = accuracy_score(y_test, y_pred)
#print(test_accuracy_score)
return test_accuracy_score, vectorizer_needs_transform
def get_tracker_row(vectorizer_needs_transform):
overall_quality_score = get_overall_quality_score()
_, total_labeled = get_total_unlabeled(get_labeled=True)
test_accuracy_score = 0.
try:
export_model()
test_accuracy_score, vectorizer_needs_transform = get_accuracy_score(vectorizer_needs_transform)
except:
pass
currenttime = datetime.datetime.now()
elapsedtime = currenttime - starttime
tracker_row = {'labels': total_labeled,
'overall_quality_score': overall_quality_score,
'accuracy': test_accuracy_score,
'elapsed_time': elapsedtime
}
#print(tracker_row)
return tracker_row, vectorizer_needs_transform
#%%
df_tracker = pd.DataFrame(columns=['labels', 'overall_quality_score', 'accuracy', 'elapsed_time'])
#%%
# read the contents of the text
sectionstarttime = datetime.datetime.now()
phrases = {
'earthquake': 0,
'wildfire': 1,
'hurricane': 3,
'flooding': 2,
'fire': 1,
'richter': 0,
'smoke': 1,
'floods': 2,
'mph': 3,
'cyclone': 3,
'heat': 1,
'quake': 0,
'tornado': 3,
'Dorian': 3,
}
#
label_type = "RecommendedTexts" # list of valid values ["SimilarTexts", "RecommendedTexts"]
min_recommender_labels = 1000
max_display_options = 4 # range 1 to 6
txts_per_page = 50
pages_per_max_display_option = 1071 #20
label_applied = False
for op in range(max_display_options + 1):
# check how many are unlabelled
if get_total_unlabeled()==0:
break
if label_type == "SimilarTexts":
op_base_xpath = '//*[@id="group1_table_limit"]/option['
elif label_type == "RecommendedTexts":
op_base_xpath = '//*[@id="group2_table_limit"]/option['
for pg in range(pages_per_max_display_option):
# check how many are unlabelled
if get_total_unlabeled()==0:
break
# loop through page
for rrow in range(1,txts_per_page + 1):
# check how many are unlabelled
if get_total_unlabeled()==0:
break
xpath_base = '//*[@id="allTextsTable"]/tbody/tr[' + str(rrow) + ']/td['
tweet_text = str.lower(driver.find_element_by_xpath(xpath_base + '2]').text)
#print(tweet_text)
for k, v in phrases.items():
# label based on text contents
if k in tweet_text:
# check how many are unlabelled
if get_total_unlabeled()==0:
break
try:
select_label_multi_text(xpath_base + '1]/a', v, wait_time=wait_time, max_options=max_display_options,
label_type=label_type, min_recommender_labels=min_recommender_labels)
label_applied = True
if label_applied==True:
click_difficult_texts()
tracker_row, vectorizer_needs_transform = get_tracker_row(vectorizer_needs_transform)
print(tracker_row)
df_tracker = df_tracker.append(tracker_row, ignore_index=True)
except:
break
break
# go to next page
driver.find_element_by_xpath('//*[@id="allTextTableNextButtons"]/a[6]').click()
max_display_options = max_display_options - 1
sectionendtime = datetime.datetime.now()
elapsedsectiontime = sectionendtime - sectionstarttime
print("Elapsed section time", elapsedsectiontime)
#%%
# # Label difficult texts with single labels
# # read the contents of the text
# sectionstarttime = datetime.datetime.now()
# phrases = {
# 'earthquake': 0,
# 'wildfire': 1,
# 'hurricane': 3,
# 'flooding': 2,
# 'fire': 1,
# 'richter': 0,
# 'smoke': 1,
# 'floods': 2,
# 'mph': 3,
# 'cyclone': 3,
# 'heat': 1,
# 'quake': 0,
# }
# for pg in range(20):
# if get_total_unlabeled()==0:
# break
# # loop through page
# for rrow in range(1,51):
# if get_total_unlabeled()==0:
# break
# xpath_base = '//*[@id="allTextsTable"]/tbody/tr[' + str(rrow) + ']/td['
# tweet_text = str.lower(driver.find_element_by_xpath(xpath_base + '2]').text)
# #print(tweet_text)
# for k, v in phrases.items():
# # label based on text contents
# if get_total_unlabeled()==0:
# break
# if k in tweet_text:
# try:
# select_label_one_text(xpath_base + '1]/a', v, wait_time=wait_time)
# except:
# break
# break
# # go to next page
# driver.find_element_by_xpath('//*[@id="allTextTableNextButtons"]/a[6]').click()
# sectionendtime = datetime.datetime.now()
# elapsedsectiontime = sectionendtime - sectionstarttime
# print("Elapsed section time", elapsedsectiontime)
#%%
df_tracker.to_csv("tracker_output.csv")
print(df_tracker.head(20))
print(df_tracker.tail(20))
#%%
driver.close()
#%%
#os.remove("output/fitted-vectorizer.sav")
#%%
endtime = datetime.datetime.now()
elapsedtime = endtime - starttime
print("Elapsed time", elapsedtime)
|
#!/Users/robertpoenaru/.pyenv/shims/python
import numpy as np
import matplotlib.pyplot as plt
from numpy import random as rd
CHARGE_E = 1
# return the radius of a nuclei with respect to the total number of nucleus (assuming a constant density)
class Charge_Distribution:
def __init__(self, A, Z, diffuseness, beta):
self.A = A
self.Z = Z
self.a = diffuseness
self.beta = beta
R0 = 1.25
# the constant nuclear charge density
rho_0 = lambda A, Z, e: 0.17 * Z * e / A
R = lambda A: Charge_Distribution.R0 * np.power(A, 1.0 / 3.0)
R_half = lambda A: 1.128 * \
np.power(A, 1.0 / 3.0) - 0.89**np.power(A, 1.0 / 3.0)
# create the Fermi-function which describes the nuclear charge distribution within the nucleus
@staticmethod
def Fermi_Distribution(x, x0, a):
T = 1.0 + np.exp(x / a)
return x0 / T
@staticmethod
def rho_charge(A, Z, e, a, r):
# evaluate the function under the integrand for the RMS radius of the charge distribution
RHO0 = Charge_Distribution.rho_0(A, Z, e)
R_HALF = Charge_Distribution.R_half(A)
# declare the diffuseness parameter a
# a_diffuse = 0.524
# a_diffuse = 2.4
rho = Charge_Distribution.Fermi_Distribution(r - R_HALF, RHO0, a)
return rho
@staticmethod
def RMS_ConstantDensity(A):
# evaluate the rms function with the constant term proportional to the square of nuclear radius
# radius is given as a function of nuclear mass number A
r_const = Charge_Distribution.R(A)
rms = 3.0 / 5.0 * np.power(r_const, 2)
return rms
@staticmethod
def RMS_Deformed(A, beta):
r_sph = Charge_Distribution.RMS_ConstantDensity(A)
r_def = r_sph * (1.0 + 5.0 / (4.0 * np.pi) * np.power(beta, 2))
return r_def
|
import producer_ipc
import consumer_ipc
|
#!/usr/bin/env python3
import sys
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
print("Holo requires Python version 3.5 or greater")
sys.exit(1)
# Metadata
name = "Holo"
description = "episode discussion bot"
version = "0.1.1"
# Ensure proper files can be access if running with cron
import os
from pathlib import Path
os.chdir(str(Path(__file__).parent.parent))
# Do the things
from data import database
import services
def main(config, extra_args):
from logging import debug, info, warning, error, exception
# Set things up
db = database.living_in(config.database)
if not db:
error("Cannot continue running without a database")
return
services.setup_services(config)
# Run the requested module
try:
debug("Running module {}".format(config.module))
if config.module == "setup":
info("Setting up database")
db.setup_tables()
info("Registering services")
db.register_services(services.get_service_handlers())
db.register_link_sites(services.get_link_handlers())
elif config.module == "edit":
info("Editing database")
import module_edit as m
m.main(config, db, *extra_args)
elif config.module == "episode":
info("Finding new episodes")
import module_find_episodes as m
m.main(config, db, debug=config.debug)
elif config.module == "find":
info("Finding new shows")
import module_find_shows as m
m.main(config, db)
elif config.module == "update":
info("Updating shows")
import module_update_shows as m
m.main(config, db)
else:
warning("This should never happen or you broke it!")
except:
exception("Unknown exception or error")
db.rollback()
db.close()
if __name__ == "__main__":
# Parse args
import argparse
parser = argparse.ArgumentParser(description="{}, {}".format(name, description))
parser.add_argument("--no-input", dest="no_input", action="store_true", help="run without stdin and write to a log file")
parser.add_argument("-m", "--module", dest="module", nargs=1, choices=["setup", "edit", "episode", "update", "find"], default=["episode"], help="runs the specified module")
parser.add_argument("-c", "--config", dest="config_file", nargs=1, default=["config.ini"], help="use or create the specified database location")
parser.add_argument("-d", "--database", dest="db_name", nargs=1, default=None, help="use or create the specified database location")
parser.add_argument("-s", "--subreddit", dest="subreddit", nargs=1, default=None, help="set the subreddit on which to make posts")
parser.add_argument("-L", "--log-dir", dest="log_dir", nargs=1, default=["logs"], help="set the log directory")
parser.add_argument("-v", "--version", action="version", version="{} v{}, {}".format(name, version, description))
parser.add_argument("--debug", action="store_true", default=False)
parser.add_argument("extra", nargs="*")
args = parser.parse_args()
# Load config file
import config as config_loader
c = config_loader.from_file(args.config_file[0])
# Override config with args
c.debug = args.debug
c.module = args.module[0]
c.log_dir = args.log_dir[0]
if args.db_name is not None:
c.database = args.db_name[0]
if args.subreddit is not None:
c.subreddit = args.subreddit[0]
# Start
use_log = args.no_input
import logging
from logging.handlers import TimedRotatingFileHandler
if use_log:
os.makedirs(c.log_dir, exist_ok=True)
#from datetime import datetime
#log_file = "logs/{date}_{mod}.log".format(date=datetime.now().strftime("%Y-%m-%dT%H:%M:%S"), mod=c.module)
log_file = "{dir}/holo_{mod}.log".format(dir=c.log_dir, mod=c.module)
logging.basicConfig(
#filename=log_file,
handlers=[TimedRotatingFileHandler(log_file, when="midnight", backupCount=7, encoding="UTF-8")],
format="%(asctime)s | %(name)s | %(levelname)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG if c.debug else logging.INFO)
else:
logging.basicConfig(format="%(levelname)s | %(message)s", level=logging.DEBUG if c.debug else logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("praw-script-oauth").setLevel(logging.WARNING)
from logging import info, warning
from time import time
if use_log:
info("------------------------------------------------------------")
err = config_loader.validate(c)
if err:
warning("Configuration state invalid: {}".format(err))
start_time = time()
main(c, args.extra)
end_time = time()
time_diff = end_time - start_time
info("")
info("Run time: {:.6} seconds".format(time_diff))
if use_log:
info("------------------------------------------------------------\n")
|
import mtfl
from mtfl.hypothesis import MetricTemporalLogicStrategy
from hypothesis import given
@given(MetricTemporalLogicStrategy)
def test_identities(phi):
assert mtfl.TOP == mtfl.TOP | phi
assert mtfl.BOT == mtfl.BOT & phi
assert mtfl.TOP == phi | mtfl.TOP
assert mtfl.BOT == phi & mtfl.BOT
assert phi == phi & mtfl.TOP
assert phi == phi | mtfl.BOT
assert mtfl.TOP == mtfl.TOP & mtfl.TOP
assert mtfl.BOT == mtfl.BOT | mtfl.BOT
assert mtfl.TOP == mtfl.TOP | mtfl.BOT
assert mtfl.BOT == mtfl.TOP & mtfl.BOT
assert ~mtfl.BOT == mtfl.TOP
assert ~mtfl.TOP == mtfl.BOT
assert ~~mtfl.BOT == mtfl.BOT
assert ~~mtfl.TOP == mtfl.TOP
assert (phi & phi) & phi == phi & (phi & phi)
assert (phi | phi) | phi == phi | (phi | phi)
assert ~~phi == phi
def test_walk():
phi = mtfl.parse(
'(([ ][0, 1] ap1 & < >[1,2] ap2) | (@ap1 U ap2))')
assert len(list((~phi).walk())) == 18
|
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from fastapi_for_firebase.cache_control import middleware
@pytest.mark.parametrize(
"max_age,s_maxage,header_value",
[(30, 60, "public, max-age=30, s-maxage=60"), (30, None, "public, max-age=30")],
)
def test_cache_control__get(max_age, s_maxage, header_value):
app = FastAPI()
client = TestClient(app)
app.middleware("http")(middleware.cache_control(max_age, s_maxage))
app.get("/")(lambda: "OK")
response = client.get("/")
assert response.text == '"OK"'
assert response.headers["Cache-Control"] == header_value
def test_cache_control__post():
app = FastAPI()
client = TestClient(app)
app.middleware("http")(middleware.cache_control())
app.post("/")(lambda: "OK")
response = client.post("/")
assert response.text == '"OK"'
assert "Cache-Control" not in response.headers
|
import django.forms.widgets as widgets
class AbilityListBoxSelect(widgets.SelectMultiple):
template_name = 'dnd5e/widgets/ability_list_box.html'
option_template_name = 'dnd5e/widgets/ability_list_box_option.html'
class Media:
css = {'all': ('css/ability_listbox.css',)}
js = ('js/ability_listbox.js',)
|
"""
Common utilities between parsing and retrieval lambdas.
TODO: Considering structuring this as a class (if that's workable with AWS
Lambda layers). Many methods accept similar parameters, in order to accomplish
API calls, and it's information that could be easily encoded as state in an
object.
"""
import os
import re
import json
import tempfile
import requests
import functools
import google
import google.auth.transport.requests
from enum import Enum
from pathlib import Path
from google.oauth2 import service_account
E2E_MOCK_SOURCE_URL = os.environ.get("MOCK_SOURCE_DATA_ADDRESS", "")
REGISTRATION_ENDPOINT = os.environ.get("REGISTRATION_ENDPOINT", "http://localhost:3001/auth/register")
_ENV_TO_SOURCE_API_URL = {
"locale2e": E2E_MOCK_SOURCE_URL,
"local": "http://localhost:3001/api",
"dev": "https://dev-data.covid-19.global.health/api",
"prod": "https://data.covid-19.global.health/api"
}
_SERVICE_ACCOUNT_CRED_FILE = "covid-19-map-277002-0943eeb6776b.json"
_METADATA_BUCKET = "epid-ingestion"
MIN_SOURCE_ID_LENGTH, MAX_SOURCE_ID_LENGTH = 24, 24
class UploadError(Enum):
"""Upload error categories corresponding to the G.h Source API."""
INTERNAL_ERROR = 1
SOURCE_CONFIGURATION_ERROR = 2
SOURCE_CONFIGURATION_NOT_FOUND = 3
SOURCE_CONTENT_NOT_FOUND = 4
SOURCE_CONTENT_DOWNLOAD_ERROR = 5
PARSING_ERROR = 6
DATA_UPLOAD_ERROR = 7
VALIDATION_ERROR = 8
def create_upload_record(env, source_id, headers, cookies):
"""Creates an upload resource via the G.h Source API."""
post_api_url = f"{get_source_api_url(env)}/sources/{source_id}/uploads"
print(f"Creating upload via {post_api_url}")
res = requests.post(post_api_url,
json={"status": "IN_PROGRESS", "summary": {}},
cookies=cookies,
headers=headers)
if res and res.status_code == 201:
res_json = res.json()
return res_json["_id"]
e = RuntimeError(
f"Error creating upload record, status={res.status_code}, response={res.text}")
complete_with_error(e)
def finalize_upload(
env, source_id, upload_id, headers, cookies, count_created=None,
count_updated=None, count_error=None, error=None):
"""Records the results of an upload via the G.h Source API."""
put_api_url = f"{get_source_api_url(env)}/sources/{source_id}/uploads/{upload_id}"
print(f"Updating upload via {put_api_url}")
update = {"summary": {}}
if error:
update["status"] = "ERROR"
update["summary"]["error"] = error.name
else:
update["status"] = "SUCCESS"
if count_created:
update["summary"]["numCreated"] = count_created
if count_updated:
update["summary"]["numUpdated"] = count_updated
if count_error:
update["summary"]["numError"] = count_error
res = requests.put(put_api_url,
json=update,
headers=headers,
cookies=cookies)
return res.status_code, res.text
def complete_with_error(
exception, env=None, upload_error=None,
source_id=None, upload_id=None,
headers=None, cookies=None,
count_created=0, count_updated=0, count_error=0):
"""
Logs and raises the provided exception.
If upload details are provided, updates the indicated upload with the
provided data.
"""
print(exception)
if env and upload_error and source_id and upload_id:
finalize_upload(env, source_id, upload_id, headers, cookies,
error=upload_error,
count_created=count_created,
count_updated=count_updated,
count_error=count_error)
raise exception
def login(email: str):
"""Logs-in a local curator server instance for testing.
Returns the cookie of the now logged-in user.
"""
print("Logging-in user", email)
endpoint = REGISTRATION_ENDPOINT
res = requests.post(endpoint, json={
"email": email,
"roles": ["curator"],
})
if not res or res.status_code != 200:
raise RuntimeError(
f"Error registering local user, status={res.status_code}, response={res.text}")
return res.cookies
def obtain_api_credentials(s3_client):
"""
Creates HTTP headers credentialed for access to the Global Health Source API.
"""
try:
fd, local_creds_file_name = tempfile.mkstemp()
with os.fdopen(fd) as _:
print(
"Retrieving service account credentials from "
f"s3://{_METADATA_BUCKET}/{_SERVICE_ACCOUNT_CRED_FILE}")
s3_client.download_file(_METADATA_BUCKET,
_SERVICE_ACCOUNT_CRED_FILE,
local_creds_file_name)
credentials = service_account.Credentials.from_service_account_file(
local_creds_file_name, scopes=["email"])
headers = {}
request = google.auth.transport.requests.Request()
credentials.refresh(request)
credentials.apply(headers)
return headers
except Exception as e:
print(e)
raise e
def get_source_api_url(env):
"""
Returns the URL at which to reach the Source API for the provided environment.
"""
if env not in _ENV_TO_SOURCE_API_URL:
raise ValueError(f"No source API URL found for provided env: {env}")
return _ENV_TO_SOURCE_API_URL[env]
def python_module(folder: Path, root: Path):
"""Returns the unique python module in folder relative to root"""
modules = [f for f in Path(folder).glob("*.py")
if "test" not in str(f) and "__init__.py" not in str(f)]
if len(modules) == 1: # Ensure there is a unique python module
return str(modules[0].relative_to(root)).replace('/', '.')[:-3]
else:
return None
def get_parser_module(parser):
parser_name = re.sub(r"-ingestor-\w+", r"", parser)
parser_name = re.sub(r"-", r".", parser_name)
return f"parsing.{parser_name}"
@functools.lru_cache
def get_source_id_parser_map(parser_root: Path = None):
"""Returns a mapping of source IDs to parser information"""
parser_root = parser_root or Path(__file__).parent.parent
input_event_files = [
f for f in parser_root.rglob("input_event.json")
if all(not str(f).startswith(prefix)
for prefix in [".aws-sam", "common", "parsing/example"])
]
m = {} # map from source id -> parser information
for input_event_file in input_event_files:
input_event = json.loads(input_event_file.read_text())
sourceId = input_event["sourceId"]
if not MIN_SOURCE_ID_LENGTH <= len(sourceId) <= MAX_SOURCE_ID_LENGTH:
continue
del input_event["sourceId"]
m[sourceId] = input_event
m[sourceId]["python_module"] = python_module(input_event_file.parent, parser_root)
return m
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from argparse import RawTextHelpFormatter
from jdcloud_cli.cement.ext.ext_argparse import expose
from jdcloud_cli.controllers.base_controller import BaseController
from jdcloud_cli.client_factory import ClientFactory
from jdcloud_cli.parameter_builder import collect_user_args, collect_user_headers
from jdcloud_cli.printer import Printer
from jdcloud_cli.skeleton import Skeleton
class MonitorController(BaseController):
class Meta:
label = 'monitor'
help = 'JCLOUD MONITOR API'
description = '''
monitor cli 子命令,monitor API。
OpenAPI文档地址为:https://docs.jdcloud.com/cn/monitoring/api/overview
'''
stacked_on = 'base'
stacked_type = 'nested'
@expose(
arguments=[
(['--page-number'], dict(help="""(int) 当前所在页,默认为1 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 页面大小,默认为20;取值范围[1, 100] """, dest='pageSize', type=int, required=False)),
(['--service-code'], dict(help="""(string) 产品线标识,同一个产品线下可能存在多个product,如(redis下有redis2.8cluster、redis4.0) """, dest='serviceCode', required=False)),
(['--product'], dict(help="""(string) 产品标识,如redis下分多个产品(redis2.8cluster、redis4.0)。同时指定serviceCode与product时,product优先生效 """, dest='product', required=False)),
(['--dimension'], dict(help="""(string) 产品下的维度标识,指定dimension时必须指定product """, dest='dimension', required=False)),
(['--rule-name'], dict(help="""(string) 规则名称 """, dest='ruleName', required=False)),
(['--rule-type'], dict(help="""(int) 规则类型, 1表示资源监控,6表示站点监控,7表示可用性监控 """, dest='ruleType', type=int, required=False)),
(['--enabled'], dict(help="""(int) 规则状态:1为启用,0为禁用 """, dest='enabled', type=int, required=False)),
(['--rule-status'], dict(help="""(int) 资源的规则状态 2:报警、4:数据不足 """, dest='ruleStatus', type=int, required=False)),
(['--filters'], dict(help="""(array: filter) 服务码或资源Id列表; products - 产品product,精确匹配,支持多个; resourceIds - 资源Id,精确匹配,支持多个(必须指定serviceCode、product或dimension,否则该参数不生效); alarmIds - 规则id,精确匹配,支持多个 """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询规则列表 ''',
description='''
查询规则列表。
示例: jdc monitor describe-alarms
''',
)
def describe_alarms(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeAlarmsRequest import DescribeAlarmsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeAlarmsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--auto-scaling-policy-id'], dict(help="""(string) 弹性伸缩组Id。注:仅ag\asg产品线内部使用 """, dest='autoScalingPolicyId', required=False)),
(['--base-contact'], dict(help="""(array: baseContact) 告警通知联系人 """, dest='baseContact', required=False)),
(['--client-token'], dict(help="""(string) 幂等性校验参数,最长36位,若两个请求clientToken相等,则返回第一次创建的规则id,只创建一次规则 """, dest='clientToken', required=True)),
(['--dimension'], dict(help="""(string) 资源维度,可用的维度请使用 describeProductsForAlarm接口查询 """, dest='dimension', required=False)),
(['--enabled'], dict(help="""(int) 是否启用, 1表示启用规则,0表示禁用规则,默认为1 """, dest='enabled', type=int, required=False)),
(['--notice-option'], dict(help="""(array: noticeOption) 通知策略 """, dest='noticeOption', required=False)),
(['--product'], dict(help="""(string) 资源类型, 可用的资源类型列表请使用 describeProductsForAlarm接口查询。 """, dest='product', required=True)),
(['--resource-option'], dict(help="""(resourceOption) NA """, dest='resourceOption', required=True)),
(['--rule-name'], dict(help="""(string) 规则名称,规则名称,最大长度42个字符,只允许中英文、数字、''-''和"_" """, dest='ruleName', required=True)),
(['--rule-option'], dict(help="""(ruleOption) NA """, dest='ruleOption', required=True)),
(['--rule-type'], dict(help="""(string) 规则类型, 默认为resourceMonitor """, dest='ruleType', required=False)),
(['--tags'], dict(help="""(object) 资源维度,指定监控数据实例的维度标签,如resourceId=id。(请确认资源的监控数据带有该标签,否则规则会报数据不足) """, dest='tags', required=False)),
(['--web-hook-option'], dict(help="""(webHookOption) NA """, dest='webHookOption', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 创建报警规则 ''',
description='''
创建报警规则。
示例: jdc monitor create-alarm --client-token xxx --product xxx --resource-option '{"":""}' --rule-name xxx --rule-option '{"":""}'
''',
)
def create_alarm(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.CreateAlarmRequest import CreateAlarmRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = CreateAlarmRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--alarm-id'], dict(help="""(string) 规则id """, dest='alarmId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询规则详情 ''',
description='''
查询规则详情。
示例: jdc monitor describe-alarm --alarm-id xxx
''',
)
def describe_alarm(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeAlarmRequest import DescribeAlarmRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeAlarmRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--alarm-id'], dict(help="""(string) 规则id """, dest='alarmId', required=True)),
(['--auto-scaling-policy-id'], dict(help="""(string) 弹性伸缩组Id。注:仅ag\asg产品线内部使用 """, dest='autoScalingPolicyId', required=False)),
(['--base-contact'], dict(help="""(array: baseContact) 告警通知联系人 """, dest='baseContact', required=False)),
(['--dimension'], dict(help="""(string) 资源维度,可用的维度请使用 describeProductsForAlarm接口查询 """, dest='dimension', required=False)),
(['--enabled'], dict(help="""(int) 是否启用, 1表示启用规则,0表示禁用规则,默认为1 """, dest='enabled', type=int, required=False)),
(['--notice-option'], dict(help="""(array: noticeOption) 通知策略 """, dest='noticeOption', required=False)),
(['--product'], dict(help="""(string) 资源类型, 可用的资源类型列表请使用 describeProductsForAlarm接口查询。 """, dest='product', required=True)),
(['--resource-option'], dict(help="""(resourceOption) NA """, dest='resourceOption', required=True)),
(['--rule-name'], dict(help="""(string) 规则名称,规则名称,最大长度42个字符,只允许中英文、数字、''-''和"_" """, dest='ruleName', required=True)),
(['--rule-option'], dict(help="""(ruleOption) NA """, dest='ruleOption', required=True)),
(['--rule-type'], dict(help="""(string) 规则类型, 默认为resourceMonitor """, dest='ruleType', required=False)),
(['--tags'], dict(help="""(object) 资源维度,指定监控数据实例的维度标签,如resourceId=id。(请确认资源的监控数据带有该标签,否则规则会报数据不足) """, dest='tags', required=False)),
(['--web-hook-option'], dict(help="""(webHookOption) NA """, dest='webHookOption', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 修改已创建的报警规则 ''',
description='''
修改已创建的报警规则。
示例: jdc monitor update-alarm --alarm-id xxx --product xxx --resource-option '{"":""}' --rule-name xxx --rule-option '{"":""}'
''',
)
def update_alarm(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.UpdateAlarmRequest import UpdateAlarmRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = UpdateAlarmRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--alarm-id'], dict(help="""(string) 规则id """, dest='alarmId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除规则 ''',
description='''
删除规则。
示例: jdc monitor delete-alarms --alarm-id xxx
''',
)
def delete_alarms(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DeleteAlarmsRequest import DeleteAlarmsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteAlarmsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--alarm-id'], dict(help="""(string) 规则id """, dest='alarmId', required=True)),
(['--page-number'], dict(help="""(int) 当前所在页,默认为1 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 页面大小,默认为20;取值范围[1, 100] """, dest='pageSize', type=int, required=False)),
(['--reference-type'], dict(help="""(int) 联系人类型。0,联系人分组; 1,联系人 """, dest='referenceType', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询规则的报警联系人 ''',
description='''
查询规则的报警联系人。
示例: jdc monitor describe-alarm-contacts --alarm-id xxx
''',
)
def describe_alarm_contacts(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeAlarmContactsRequest import DescribeAlarmContactsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeAlarmContactsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--service-code'], dict(help="""(string) 产品线 """, dest='serviceCode', required=False)),
(['--product'], dict(help="""(string) 产品类型,如redis2.8cluster(集群)\redis2.8MS(主从)。当serviceCode与product同时指定时,product优先级更高 """, dest='product', required=False)),
(['--dimension'], dict(help="""(string) 产品维度,必须指定serviceCode或product才生效。 """, dest='dimension', required=False)),
(['--metric-type'], dict(help="""(int) metric类型,取值0、1;默认值:0(常规指标,用于控制台创建报警规则)、1(其它) """, dest='metricType', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询可用于创建监控规则的指标列表,metric介绍:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a> ''',
description='''
查询可用于创建监控规则的指标列表,metric介绍:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a>。
示例: jdc monitor describe-metrics-for-alarm
''',
)
def describe_metrics_for_alarm(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeMetricsForAlarmRequest import DescribeMetricsForAlarmRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeMetricsForAlarmRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--service-code'], dict(help="""(string) 产品线,从产品线维度筛选 """, dest='serviceCode', required=False)),
(['--product'], dict(help="""(string) 产品类型,从产品维度筛选、如redis2.8cluster\redis2.8instance。当serviceCode与product同时指定时,product优先级更高 """, dest='product', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询可用于创建监控规则的产品列表 ''',
description='''
查询可用于创建监控规则的产品列表。
示例: jdc monitor describe-products-for-alarm
''',
)
def describe_products_for_alarm(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeProductsForAlarmRequest import DescribeProductsForAlarmRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeProductsForAlarmRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--alarm-ids'], dict(help="""(array: string) 告警规则的ID列表 """, dest='alarmIds', required=False)),
(['--state'], dict(help="""(int) 启用:1,禁用0, """, dest='state', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 启用、禁用规则 ''',
description='''
启用、禁用规则。
示例: jdc monitor enable-alarms
''',
)
def enable_alarms(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.EnableAlarmsRequest import EnableAlarmsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = EnableAlarmsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--page-number'], dict(help="""(int) 当前所在页,默认为1 """, dest='pageNumber', type=int, required=False)),
(['--page-size'], dict(help="""(int) 页面大小,默认为20;取值范围[1, 100] """, dest='pageSize', type=int, required=False)),
(['--service-code'], dict(help="""(string) 产品线标识,同一个产品线下可能存在多个product,如(redis下有redis2.8cluster、redis4.0) """, dest='serviceCode', required=False)),
(['--product'], dict(help="""(string) 产品标识,默认返回该product下所有dimension的数据。eg:product=redis2.8cluster(redis2.8cluster产品下包含redis2.8-shard与redis2.8-proxy、redis2.8-instance多个维度)。 """, dest='product', required=False)),
(['--dimension'], dict(help="""(string) 维度标识、指定该参数时,查询只返回该维度的数据。如redis2.8cluster下存在实例、分片等多个维度 """, dest='dimension', required=False)),
(['--region'], dict(help="""(string) 根据region筛选对应region的资源的报警历史 """, dest='region', required=False)),
(['--is-alarming'], dict(help="""(int) 正在报警, 取值为1 """, dest='isAlarming', type=int, required=False)),
(['--status'], dict(help="""(int) 报警的状态,1为报警恢复、2为报警、4为报警恢复无数据 """, dest='status', type=int, required=False)),
(['--start-time'], dict(help="""(string) 开始时间 """, dest='startTime', required=False)),
(['--end-time'], dict(help="""(string) 结束时间 """, dest='endTime', required=False)),
(['--rule-type'], dict(help="""(int) 规则类型,默认查询1, 1表示资源监控,6表示站点监控,7表示可用性监控 """, dest='ruleType', type=int, required=False)),
(['--rule-name'], dict(help="""(string) 规则名称模糊搜索 """, dest='ruleName', required=False)),
(['--filters'], dict(help="""(array: filter) serviceCodes - 产品线servicecode,精确匹配,支持多个; resourceIds - 资源Id,精确匹配,支持多个(必须指定serviceCode才会在该serviceCode下根据resourceIds过滤,否则该参数不生效); alarmIds - 规则Id,精确匹配,支持多个 """, dest='filters', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询报警历史 ''',
description='''
查询报警历史。
示例: jdc monitor describe-alarm-history
''',
)
def describe_alarm_history(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeAlarmHistoryRequest import DescribeAlarmHistoryRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeAlarmHistoryRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--service-code'], dict(help="""(string) 资源的类型,取值vm, lb, ip, database 等。<a href="https://docs.jdcloud.com/cn/monitoring/api/describeservices?content=API&SOP=JDCloud">describeServices</a>:查询己接入云监控的产品线列表 """, dest='serviceCode', required=True)),
(['--dimension'], dict(help="""(string) NA """, dest='dimension', required=False)),
(['--type'], dict(help="""(int) metric的类型,取值0(控制台展示)、1(内部使用,控制台不展示)、2(所有).默认取0 """, dest='type', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 根据产品线查询可用监控项列表,metric介绍:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a> ''',
description='''
根据产品线查询可用监控项列表,metric介绍:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a>。
示例: jdc monitor describe-metrics --service-code xxx
''',
)
def describe_metrics(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeMetricsRequest import DescribeMetricsRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeMetricsRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 地域 Id """, dest='regionId', required=False)),
(['--metric'], dict(help="""(string) 监控项英文标识(id) """, dest='metric', required=True)),
(['--service-code'], dict(help="""(string) 资源的类型,取值vm, lb, ip, database 等。可用的serviceCode请使用describeServices接口查询 """, dest='serviceCode', required=True)),
(['--dimension'], dict(help="""(string) 资源的维度。serviceCode下可用的dimension请使用describeServices接口查询 """, dest='dimension', required=False)),
(['--resource-id'], dict(help="""(string) 资源的uuid,支持多个resourceId批量查询,每个id用竖线分隔。 如:id1|id2|id3|id4 """, dest='resourceId', required=True)),
(['--tags'], dict(help="""(array: tagFilter) 自定义标签 """, dest='tags', required=False)),
(['--start-time'], dict(help="""(string) 查询时间范围的开始时间, UTC时间,格式:2016-12-11T00:00:00+0800(早于30d时,将被重置为30d)(注意在url中+要转译为%2B故url中为2016-12-11T00:00:00%2B0800) """, dest='startTime', required=False)),
(['--end-time'], dict(help="""(string) 查询时间范围的结束时间, UTC时间,格式:2016-12-11T00:00:00+0800(为空时,将由startTime与timeInterval计算得出)(注意在url中+要转译为%2B故url中为2016-12-11T00:00:00%2B0800) """, dest='endTime', required=False)),
(['--time-interval'], dict(help="""(string) 查询的时间间隔,最大不超过30天,支持分钟级别,小时级别,天级别,例如:1m、1h、1d """, dest='timeInterval', required=False)),
(['--aggr-type'], dict(help="""(string) 聚合方式:max avg min等,用于不同维度之间聚合 """, dest='aggrType', required=False)),
(['--down-aggr-type'], dict(help="""(string) 聚合方式:max avg min等,用于将维度内一个周期数据聚合为一个点的聚合方式,默认last """, dest='downAggrType', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 根据不同的聚合方式将metric的数据聚合为一个点。downAggrType:last(最后一个点)、max(最大值)、min(最小值)、avg(平均值)。该接口返回值为上报metric的原始值,没有做单位转换。metric介绍:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a> ''',
description='''
根据不同的聚合方式将metric的数据聚合为一个点。downAggrType:last(最后一个点)、max(最大值)、min(最小值)、avg(平均值)。该接口返回值为上报metric的原始值,没有做单位转换。metric介绍:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a>。
示例: jdc monitor describe-one-data-point --metric xxx --service-code xxx --resource-id xxx
''',
)
def describe_one_data_point(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeOneDataPointRequest import DescribeOneDataPointRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeOneDataPointRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 地域 Id """, dest='regionId', required=False)),
(['--metric'], dict(help="""(string) 监控项英文标识(id) """, dest='metric', required=True)),
(['--aggr-type'], dict(help="""(string) 聚合方式,用于不同时间轴上的聚合。如balance产品同一个resourceId下存在port=80和port=8080等多种维度。可选值参考:sum、avg、min、max """, dest='aggrType', required=False)),
(['--down-sample-type'], dict(help="""(string) 采样方式,用于在时间轴维度上将聚合周期内的数据聚合为一个点。可选值参考:sum(聚合周期内的数据求和)、avg(求平均)、last(最新值)、min(最小值)、max(最大值) """, dest='downSampleType', required=False)),
(['--start-time'], dict(help="""(string) 查询时间范围的开始时间, UTC时间,格式:2016-12-11T00:00:00+0800(注意在url中+要转译为%2B故url中为2016-12-11T00:00:00%2B0800) """, dest='startTime', required=False)),
(['--end-time'], dict(help="""(string) 查询时间范围的结束时间, UTC时间,格式:2016-12-11T00:00:00+0800(为空时,将由startTime与timeInterval计算得出)(注意在url中+要转译为%2B故url中为2016-12-11T00:00:00%2B0800) """, dest='endTime', required=False)),
(['--time-interval'], dict(help="""(string) 时间间隔:1h,6h,12h,1d,3d,7d,14d,固定时间间隔,timeInterval默认为1h,当前时间往 前1h """, dest='timeInterval', required=False)),
(['--tags'], dict(help="""(array: tagFilter) 监控指标数据的维度信息,根据tags来筛选指标数据不同的维度 """, dest='tags', required=False)),
(['--group-by'], dict(help="""(bool) 是否对查询的tags分组 """, dest='groupBy', required=False)),
(['--rate'], dict(help="""(bool) 是否求速率 """, dest='rate', required=False)),
(['--service-code'], dict(help="""(string) 资源的类型,取值vm, lb, ip, database 等,<a href="https://docs.jdcloud.com/cn/monitoring/api/describeservices?content=API&SOP=JDCloud">describeServices</a>:查询己接入云监控的产品线列表 """, dest='serviceCode', required=False)),
(['--dimension'], dict(help="""(string) 资源的维度。查询serviceCode下可用的维度请使用describeServices接口 """, dest='dimension', required=False)),
(['--resource-id'], dict(help="""(string) 资源的uuid """, dest='resourceId', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查看某资源单个监控项数据,metric介绍:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a>,可以使用接口<a href="https://docs.jdcloud.com/cn/monitoring/metrics">describeMetrics</a>:查询产品线可用的metric列表。 ''',
description='''
查看某资源单个监控项数据,metric介绍:<a href="https://docs.jdcloud.com/cn/monitoring/metrics">Metrics</a>,可以使用接口<a href="https://docs.jdcloud.com/cn/monitoring/metrics">describeMetrics</a>:查询产品线可用的metric列表。。
示例: jdc monitor describe-metric-data --metric xxx --resource-id xxx
''',
)
def describe_metric_data(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeMetricDataRequest import DescribeMetricDataRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeMetricDataRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--filters'], dict(help="""(array: filter) 服务码列表; filter name 为serviceCodes表示查询多个产品线的规则 """, dest='filters', required=False)),
(['--product-type'], dict(help="""(int) 要查询的产品线类型 0:all 1:资源监控 2:其它 默认:1。若指定了查询的serviceCode,则忽略该参数 """, dest='productType', type=int, required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询监控图可用的产品线列表 ''',
description='''
查询监控图可用的产品线列表。
示例: jdc monitor describe-services
''',
)
def describe_services(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.DescribeServicesRequest import DescribeServicesRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeServicesRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--metric-data-list'], dict(help="""(array: metricDataCm) 数据参数 """, dest='metricDataList', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 该接口为自定义监控数据上报的接口,方便您将自己采集的时序数据上报到云监控。不同region域名上报不同region的数据,参考:<a href="https://docs.jdcloud.com/cn/monitoring/reporting-monitoring-data">调用说明</a>可上报原始数据和已聚合的统计数据。支持批量上报方式。单次请求最多包含 50 个数据点;数据大小不超过 256k。 ''',
description='''
该接口为自定义监控数据上报的接口,方便您将自己采集的时序数据上报到云监控。不同region域名上报不同region的数据,参考:<a href="https://docs.jdcloud.com/cn/monitoring/reporting-monitoring-data">调用说明</a>可上报原始数据和已聚合的统计数据。支持批量上报方式。单次请求最多包含 50 个数据点;数据大小不超过 256k。。
示例: jdc monitor put-custom-metric-data
''',
)
def put_custom_metric_data(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.PutMetricDataRequest import PutMetricDataRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = PutMetricDataRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--metric-data-list'], dict(help="""(array: metricDataCm) 数据参数 """, dest='metricDataList', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 该接口为自定义监控数据上报的接口,方便您将自己采集的时序数据上报到云监控。不同region域名上报不同region的数据,参考:<a href="https://docs.jdcloud.com/cn/monitoring/reporting-monitoring-data">调用说明</a>可上报原始数据和已聚合的统计数据。支持批量上报方式。单次请求最多包含 50 个数据点;数据大小不超过 256k。 ''',
description='''
该接口为自定义监控数据上报的接口,方便您将自己采集的时序数据上报到云监控。不同region域名上报不同region的数据,参考:<a href="https://docs.jdcloud.com/cn/monitoring/reporting-monitoring-data">调用说明</a>可上报原始数据和已聚合的统计数据。支持批量上报方式。单次请求最多包含 50 个数据点;数据大小不超过 256k。。
示例: jdc monitor put-custom-metric-data
''',
)
def put_custom_metric_data(self):
client_factory = ClientFactory('monitor')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.monitor.apis.PutCustomMetricDataRequest import PutCustomMetricDataRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = PutCustomMetricDataRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--api'], dict(help="""(string) api name """, choices=['describe-alarms','create-alarm','describe-alarm','update-alarm','delete-alarms','describe-alarm-contacts','describe-metrics-for-alarm','describe-products-for-alarm','enable-alarms','describe-alarm-history','describe-metrics','describe-one-data-point','describe-metric-data','describe-services','put-custom-metric-data','put-custom-metric-data',], required=True)),
],
formatter_class=RawTextHelpFormatter,
help=''' 生成单个API接口的json骨架空字符串 ''',
description='''
生成单个API接口的json骨架空字符串。
示例: jdc nc generate-skeleton --api describeContainer ''',
)
def generate_skeleton(self):
skeleton = Skeleton('monitor', self.app.pargs.api)
skeleton.show()
|
import csv
import argparse
import sys
def get_args(argv):
"""
Parse arguments from command line
"""
args = argparse.ArgumentParser()
args.add_argument('-u', required = True,
metavar = '<bed>',
help = 'Patcher output')
args.add_argument('-r1', required = True,
metavar = '<bed>',
help = 'Sim3C R1')
args.add_argument('-r2', required = True,
metavar = '<bed>',
help = 'Sim3C R2')
args.add_argument('-wm', action = 'store_true',
help = 'Write matched reads to file (default: off)')
args.add_argument('-wu', action = 'store_true',
help = 'Write unmatched reads to file (default: off)')
return(args.parse_args())
def match_reads(u_file, r1_file, r2_file, write_m, write_u):
"""
Match IDs between PAtChER reads and Sim3C reads
Count how many start at the same position
"""
u = open(u_file, mode='r')
r1 = open(r1_file, mode='r')
r2 = open(r2_file, mode='r')
if write_m:
wm = open(u_file.replace('.bed', '_matched.bed'), mode='w')
wm_csv = csv.writer(wm, delimiter='\t')
if write_u:
wu = open(u_file.replace('.bed', '_unmatched.bed'), mode='w')
wu_csv = csv.writer(wu, delimiter='\t')
wu_csv.writerow(['p.chr','p.start','p.end','id','s.chr','s.start','s.end','dist'])
u_csv = csv.reader(u, delimiter='\t')
r1_csv = csv.reader(r1, delimiter='\t')
r2_csv = csv.reader(r2, delimiter='\t')
tcount, mcount = 0, 0
r1_row = r1_csv.__next__()
r2_row = r2_csv.__next__()
for row in u_csv:
rid = row[3].split('_')
rnum = rid[1].split(':')[0]
if rnum == '1': #look in R1 file
while True:
if r1_row[3] == row[3]:
if r1_row[0] == row[0]:
dist = abs(int(row[1])-int(r1_row[1]))
else:
dist = 1e9
break
r1_row = r1_csv.__next__()
else: #look in R2 file
while True:
if r2_row[3] == row[3]:
if r2_row[0] == row[0]:
dist = abs(int(row[2])-int(r2_row[2]))
else:
dist = 1e9
break
r2_row = r2_csv.__next__()
tcount += 1
if dist == 0:
mcount += 1
if write_m:
wm_csv.writerow(row)
elif write_u:
if rnum == '1':
wu_csv.writerow(row[0:4] + r1_row[0:3] + [dist])
else:
wu_csv.writerow(row[0:4] + r2_row[0:3] + [dist])
print('File: ' + u_file)
print(f'Total PAtChER reads: {tcount}')
print(f'Total matched reads: {mcount}')
u.close()
r1.close()
r2.close()
if write_m:
wm.close()
if write_u:
wu.close()
if __name__ == '__main__':
args = get_args(sys.argv[1:])
match_reads(u_file=args.u, r1_file=args.r1, r2_file=args.r2, write_m=args.wm, write_u=args.wu)
|
from costf import calculate_cost
def schedule_permutation(num_phar):
for i in range(1, num_phar - 4 + 1):
for j in range(1, num_phar - i - 3 + 1):
for k in range(1, num_phar - i - j - 2 + 1):
for l in range(1, num_phar - i - j - k - 1 + 1):
m = num_phar - i - j - k - l
yield [i, j, k, l, m]
def naivepermutation():
best = float('inf')
for i in schedule_permutation(11):
cost = calculate_cost(i)
if cost < best:
best = cost
print(i, cost)
|
import torch
def compute_pairwise_cosine_distances(minibatch_embeddings, full_matrix=False):
# cosine_distance = 1 - cosine_similarity
# cosine similarity (A,B)= cos(theta) = (A . B ) / (||A||*||B||) ,
# constrainining embeddings into a hypersphere (unit-sphere) so all norms are 1 reduces this to a matrix multiplication (A.B)
D = 1 - torch.mm(minibatch_embeddings, torch.transpose(minibatch_embeddings, 0, 1))
if not full_matrix:
tri_idx = torch.triu_indices(minibatch_embeddings.shape[0],minibatch_embeddings.shape[0],1)
pairwise_dist_vector = D[tri_idx[0],tri_idx[1]]
return pairwise_dist_vector
else:
return D
def compute_pairwise_euclidean_distances(minibatch_embeddings, d, n, full_matrix=False ):
# as per https://www.robots.ox.ac.uk/~albanie/notes/Euclidean_distance_trick.pdf alg.1
X_view1 = minibatch_embeddings.reshape(d, n, 1)
X_view2 = minibatch_embeddings.reshape(d,1,n)
diff_mat = X_view1-X_view2
D = torch.sum(diff_mat**2,dim=0)
if not full_matrix:
tri_idx = torch.triu_indices(n,n,1)
pairwise_dist_vector = D[tri_idx[0],tri_idx[1]]
return torch.sqrt(pairwise_dist_vector)
else :
return torch.sqrt(D)
|
genetic_code = { 'ttt': 'F', 'tct': 'S', 'tat': 'Y', 'tgt': 'C',
'ttc': 'F', 'tcc': 'S', 'tac': 'Y', 'tgc': 'C',
'tta': 'L', 'tca': 'S', 'taa': '*', 'tga': '*',
'ttg': 'L', 'tcg': 'S', 'tag': '*', 'tgg': 'W',
'ctt': 'L', 'cct': 'P', 'cat': 'H', 'cgt': 'R',
'ctc': 'L', 'ccc': 'P', 'cac': 'H', 'cgc': 'R',
'cta': 'L', 'cca': 'P', 'caa': 'Q', 'cga': 'R',
'ctg': 'L', 'ccg': 'P', 'cag': 'Q', 'cgg': 'R',
'att': 'I', 'act': 'T', 'aat': 'N', 'agt': 'S',
'atc': 'I', 'acc': 'T', 'aac': 'N', 'agc': 'S',
'ata': 'I', 'aca': 'T', 'aaa': 'K', 'aga': 'R',
'atg': 'M', 'acg': 'T', 'aag': 'K', 'agg': 'R',
'gtt': 'V', 'gct': 'A', 'gat': 'D', 'ggt': 'G',
'gtc': 'V', 'gcc': 'A', 'gac': 'D', 'ggc': 'G',
'gta': 'V', 'gca': 'A', 'gaa': 'E', 'gga': 'G',
'gtg': 'V', 'gcg': 'A', 'gag': 'E', 'ggg': 'G'
}
def translate(nuc_seq, code):
prot_seq = ''
n = 0
# to avoid to compute len(seq)/3 at each loop
# I compute it once and use a reference
# it could be expensive if the sequence is very long.
cycle = len(nuc_seq)/3
while n < cycle:
start = n * 3
end = start + 3
codon = nuc_seq[start:end]
codon = codon.lower()
if codon in code:
prot_seq += code[codon]
else:
raise RuntimeError("unknow codon: " + codon)
n += 1
return prot_seq
def translate2(nuc_seq, code, phase = 1):
prot_seq = ''
if 0 < phase < 4 :
start = phase - 1
elif -4 < phase < 0:
start = -phase - 1
nuc_seq = nuc_seq[::-1]
# an other way to determine the end of looping
stop_iteration = len(nuc_seq)
while (start + 2) < stop_iteration:
end = start + 3
codon = nuc_seq[start:end].lower()
if codon in code:
prot_seq += code[codon]
else:
raise RuntimeError("unknow codon")
start += 3
return prot_seq
|
from typing import Any, Callable, List, Dict, Union, Optional, Sequence, Tuple
from numpy import ndarray
from collections import OrderedDict
from scipy import sparse
import os
import sklearn
import numpy
import typing
# Custom import commands if any
from sklearn.preprocessing import StandardScaler
from d3m.container.numpy import ndarray as d3m_ndarray
from d3m.container import DataFrame as d3m_dataframe
from d3m.metadata import hyperparams, params, base as metadata_base
from d3m import utils
from d3m.base import utils as base_utils
from d3m.exceptions import PrimitiveNotFittedError
from d3m.primitive_interfaces.base import CallResult, DockerContainer
# from d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import ProbabilisticCompositionalityMixin, ContinueFitMixin
from d3m import exceptions
import pandas
from d3m import container, utils as d3m_utils
import uuid
Inputs = d3m_dataframe
# Inputs = container.Dataset
Outputs = d3m_dataframe
__all__ = ('SKStandardScalerPrimitive',)
class Params(params.Params):
scale_: Optional[ndarray]
mean_: Optional[ndarray]
var_: Optional[ndarray]
n_samples_seen_: Optional[numpy.int64]
# Keep previous
input_column_names: Optional[Any]
target_names_: Optional[Sequence[Any]]
training_indices_: Optional[Sequence[int]]
target_column_indices_: Optional[Sequence[int]]
target_columns_metadata_: Optional[List[OrderedDict]]
class Hyperparams(hyperparams.Hyperparams):
# Added by Guanchu
with_mean = hyperparams.UniformBool(
default=True,
description='If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
with_std = hyperparams.UniformBool(
default=True,
description='If True, scale the data to unit variance (or equivalently, unit standard deviation).',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
# copy = hyperparams.UniformBool(
# default=True,
# description='If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned.',
# semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
# )
# Keep previous
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
exclude_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.",
)
return_result = hyperparams.Enumeration(
values=['append', 'replace', 'new'],
default='new',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.",
)
use_semantic_types = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe"
)
add_index_columns = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".",
)
error_on_no_input = hyperparams.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False.",
)
return_semantic_type = hyperparams.Enumeration[str](
values=['https://metadata.datadrivendiscovery.org/types/Attribute',
'https://metadata.datadrivendiscovery.org/types/ConstructedAttribute'],
default='https://metadata.datadrivendiscovery.org/types/Attribute',
description='Decides what semantic type to attach to generated attributes',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
class SKStandardScalerPrimitive(UnsupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
Standardize features by removing the mean and scaling to unit variance.
See `sklearn documentation <https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html?highlight=standardscaler#sklearn.preprocessing.StandardScaler>`_ for more details.
Parameters
----------
with_mean : bool
If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory.
with_std : bool
If True, scale the data to unit variance (or equivalently, unit standard deviation).
Attributes
----------
scale_: ndarray or None, shape (n_features,)
Per feature relative scaling of the data. This is calculated using np.sqrt(var_). Equal to None when with_std=False.
mean_: ndarray or None, shape (n_features,)
The mean value for each feature in the training set. Equal to None when with_mean=False.
var_: ndarray or None, shape (n_features,)
The variance for each feature in the training set. Used to compute scale_. Equal to None when with_std=False.
n_samples_seen_: int or array, shape (n_features,)
The number of samples processed by the estimator for each feature. If there are not missing samples, the n_samples_seen will be an integer, otherwise it will be an array. Will be reset on new calls to fit, but increments across partial_fit calls.
"""
metadata = metadata_base.PrimitiveMetadata({
"__author__": "DATA Lab @Taxes A&M University",
"name": "Standard_scaler",
"python_path": "d3m.primitives.tods.timeseries_processing.transformation.standard_scaler",
"source": {
'name': "DATA Lab @ Taxes A&M University",
'contact': 'mailto:khlai037@tamu.edu',
},
"hyperparams_to_tune": ['with_mean', 'with_std'],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.TODS_PRIMITIVE,
],
"primitive_family": metadata_base.PrimitiveFamily.DATA_TRANSFORMATION,
"version": "0.0.1",
"id": str(uuid.uuid3(uuid.NAMESPACE_DNS, 'SKStandardScaler')),
})
def __init__(self, *,
hyperparams: Hyperparams,
random_seed: int = 0,
docker_containers: Dict[str, DockerContainer] = None) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
# False
self._clf = StandardScaler(with_mean=self.hyperparams['with_mean'],
with_std=self.hyperparams['with_std'],
# copy=self.hyperparams['copy'],
)
self._inputs = None
self._outputs = None
self._training_inputs = None
self._training_outputs = None
self._target_names = None
self._training_indices = None
self._target_column_indices = None
self._target_columns_metadata: List[OrderedDict] = None
self._input_column_names = None
self._fitted = False
# print(self._clf.get_params(deep=True))
# print(getattr(self._clf, 'lambdas_'))
# print(dir(self._clf))
def set_training_data(self, *, inputs: Inputs) -> None:
"""
Set training data for Standardizer.
Args:
inputs: Container DataFrame
Returns:
None
"""
self._inputs = inputs
self._fitted = False
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
"""
Fit model with training data.
Args:
*: Container DataFrame. Time series data up to fit.
Returns:
None
"""
if self._fitted: # pragma: no cover
return CallResult(None)
self._training_inputs, self._training_indices = self._get_columns_to_fit(self._inputs, self.hyperparams)
self._input_column_names = self._training_inputs.columns
if self._training_inputs is None: # pragma: no cover
return CallResult(None)
if len(self._training_indices) > 0:
self._clf.fit_transform(self._training_inputs)
self._fitted = True
else: # pragma: no cover
if self.hyperparams['error_on_no_input']:
raise RuntimeError("No input columns were selected")
self.logger.warn("No input columns were selected")
# print(self._training_inputs.std())
return CallResult(None)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
"""
Process the testing data.
Args:
inputs: Container DataFrame. Time series data up to standardlize.
Returns:
Container DataFrame after standardlization.
"""
if not self._fitted:
raise PrimitiveNotFittedError("Primitive not fitted.")
sk_inputs = inputs
if self.hyperparams['use_semantic_types']: # pragma: no cover
sk_inputs = inputs.iloc[:, self._training_indices]
output_columns = []
if len(self._training_indices) > 0:
sk_output = self._clf.transform(sk_inputs)
if sparse.issparse(sk_output): # pragma: no cover
sk_output = sk_output.toarray()
outputs = self._wrap_predictions(inputs, sk_output)
if len(outputs.columns) == len(self._input_column_names):
outputs.columns = self._input_column_names
output_columns = [outputs]
else: # pragma: no cover
if self.hyperparams['error_on_no_input']:
raise RuntimeError("No input columns were selected")
self.logger.warn("No input columns were selected")
# print(outputs.metadata.to_internal_simple_structure())
outputs = base_utils.combine_columns(return_result=self.hyperparams['return_result'],
add_index_columns=self.hyperparams['add_index_columns'],
inputs=inputs, column_indices=self._training_indices,
columns_list=output_columns)
# print(inputs)
# print(outputs)
# print(inputs.metadata.to_internal_simple_structure())
# print(outputs.metadata.to_internal_simple_structure())
return CallResult(outputs)
def get_params(self) -> Params:
"""
Return parameters.
Args:
None
Returns:
class Params
"""
if not self._fitted:
return Params(
scale_=None,
mean_=None,
var_=None,
n_sample_seen_=None,
# Keep previous
input_column_names=self._input_column_names,
training_indices_=self._training_indices,
target_names_=self._target_names,
target_column_indices_=self._target_column_indices,
target_columns_metadata_=self._target_columns_metadata
)
# print(self._clf.n_samples_seen_.shape)
# print(type(self._clf.n_samples_seen_))
# print(type(self._clf.mean_))
return Params(
scale_=getattr(self._clf, 'scale_', None),
mean_=getattr(self._clf, 'mean_', None),
var_=getattr(self._clf, 'var_', None),
n_samples_seen_=getattr(self._clf, 'n_samples_seen_', None),
# Keep previous
input_column_names=self._input_column_names,
training_indices_=self._training_indices,
target_names_=self._target_names,
target_column_indices_=self._target_column_indices,
target_columns_metadata_=self._target_columns_metadata,
)
def set_params(self, *, params: Params) -> None:
"""
Set parameters for Standardizer.
Args:
params: class Params
Returns:
None
"""
self._clf.scale_ = params['scale_']
self._clf.mean_ = params['mean_']
self._clf.var_ = params['var_']
self._clf.n_samples_seen_ = params['n_samples_seen_']
# Keep previous
self._input_column_names = params['input_column_names']
self._training_indices = params['training_indices_']
self._target_names = params['target_names_']
self._target_column_indices = params['target_column_indices_']
self._target_columns_metadata = params['target_columns_metadata_']
if params['scale_'] is not None:
self._fitted = True
if params['mean_'] is not None:
self._fitted = True
if params['var_'] is not None:
self._fitted = True
if params['n_samples_seen_'] is not None:
self._fitted = True
@classmethod
def _get_columns_to_fit(cls, inputs: Inputs, hyperparams: Hyperparams): # pragma: no cover
"""
Select columns to fit.
Args:
inputs: Container DataFrame
hyperparams: d3m.metadata.hyperparams.Hyperparams
Returns:
list
"""
if not hyperparams['use_semantic_types']:
return inputs, list(range(len(inputs.columns)))
inputs_metadata = inputs.metadata
def can_produce_column(column_index: int) -> bool:
return cls._can_produce_column(inputs_metadata, column_index, hyperparams)
columns_to_produce, columns_not_to_produce = base_utils.get_columns_to_use(inputs_metadata,
use_columns=hyperparams['use_columns'],
exclude_columns=hyperparams[
'exclude_columns'],
can_use_column=can_produce_column)
return inputs.iloc[:, columns_to_produce], columns_to_produce
# return columns_to_produce
@classmethod
def _can_produce_column(cls, inputs_metadata: metadata_base.DataMetadata, column_index: int,
hyperparams: Hyperparams) -> bool: # pragma: no cover
"""
Output whether a column can be processed.
Args:
inputs_metadata: d3m.metadata.base.DataMetadata
column_index: int
Returns:
bool
"""
column_metadata = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index))
accepted_structural_types = (int, float, numpy.integer, numpy.float64)
accepted_semantic_types = set()
accepted_semantic_types.add("https://metadata.datadrivendiscovery.org/types/Attribute")
if not issubclass(column_metadata['structural_type'], accepted_structural_types):
return False
semantic_types = set(column_metadata.get('semantic_types', []))
# print(semantic_types)
if len(semantic_types) == 0:
cls.logger.warning("No semantic types found in column metadata")
return False
# Making sure all accepted_semantic_types are available in semantic_types
if len(accepted_semantic_types - semantic_types) == 0:
return True
return False
@classmethod
def _get_target_columns_metadata(cls, outputs_metadata: metadata_base.DataMetadata, hyperparams) -> List[OrderedDict]: # pragma: no cover
"""
Output metadata of selected columns.
Args:
outputs_metadata: metadata_base.DataMetadata
hyperparams: d3m.metadata.hyperparams.Hyperparams
Returns:
d3m.metadata.base.DataMetadata
"""
outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
target_columns_metadata: List[OrderedDict] = []
for column_index in range(outputs_length):
column_metadata = OrderedDict(outputs_metadata.query_column(column_index))
# Update semantic types and prepare it for predicted targets.
semantic_types = set(column_metadata.get('semantic_types', []))
semantic_types_to_remove = set([])
add_semantic_types = []
add_semantic_types.add(hyperparams["return_semantic_type"])
semantic_types = semantic_types - semantic_types_to_remove
semantic_types = semantic_types.union(add_semantic_types)
column_metadata['semantic_types'] = list(semantic_types)
target_columns_metadata.append(column_metadata)
return target_columns_metadata
@classmethod
def _update_predictions_metadata(cls, inputs_metadata: metadata_base.DataMetadata, outputs: Optional[Outputs],
target_columns_metadata: List[OrderedDict]) -> metadata_base.DataMetadata: # pragma: no cover
"""
Updata metadata for selected columns.
Args:
inputs_metadata: metadata_base.DataMetadata
outputs: Container Dataframe
target_columns_metadata: list
Returns:
d3m.metadata.base.DataMetadata
"""
outputs_metadata = metadata_base.DataMetadata().generate(value=outputs)
for column_index, column_metadata in enumerate(target_columns_metadata):
column_metadata.pop("structural_type", None)
outputs_metadata = outputs_metadata.update_column(column_index, column_metadata)
return outputs_metadata
def _wrap_predictions(self, inputs: Inputs, predictions: ndarray) -> Outputs: # pragma: no cover
"""
Wrap predictions into dataframe
Args:
inputs: Container Dataframe
predictions: array-like data (n_samples, n_features)
Returns:
Dataframe
"""
outputs = d3m_dataframe(predictions, generate_metadata=True)
target_columns_metadata = self._copy_inputs_metadata(inputs.metadata, self._training_indices, outputs.metadata,
self.hyperparams)
outputs.metadata = self._update_predictions_metadata(inputs.metadata, outputs, target_columns_metadata)
# print(outputs.metadata.to_internal_simple_structure())
return outputs
@classmethod
def _copy_inputs_metadata(cls, inputs_metadata: metadata_base.DataMetadata, input_indices: List[int],
outputs_metadata: metadata_base.DataMetadata, hyperparams): # pragma: no cover
"""
Updata metadata for selected columns.
Args:
inputs_metadata: metadata.base.DataMetadata
input_indices: list
outputs_metadata: metadata.base.DataMetadata
hyperparams: d3m.metadata.hyperparams.Hyperparams
Returns:
d3m.metadata.base.DataMetadata
"""
outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
target_columns_metadata: List[OrderedDict] = []
for column_index in input_indices:
column_name = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index)).get("name")
if column_name is None:
column_name = "output_{}".format(column_index)
column_metadata = OrderedDict(inputs_metadata.query_column(column_index))
semantic_types = set(column_metadata.get('semantic_types', []))
semantic_types_to_remove = set([])
add_semantic_types = set()
add_semantic_types.add(hyperparams["return_semantic_type"])
semantic_types = semantic_types - semantic_types_to_remove
semantic_types = semantic_types.union(add_semantic_types)
column_metadata['semantic_types'] = list(semantic_types)
column_metadata["name"] = str(column_name)
target_columns_metadata.append(column_metadata)
# If outputs has more columns than index, add Attribute Type to all remaining
if outputs_length > len(input_indices):
for column_index in range(len(input_indices), outputs_length):
column_metadata = OrderedDict()
semantic_types = set()
semantic_types.add(hyperparams["return_semantic_type"])
column_name = "output_{}".format(column_index)
column_metadata["semantic_types"] = list(semantic_types)
column_metadata["name"] = str(column_name)
target_columns_metadata.append(column_metadata)
return target_columns_metadata
SKStandardScalerPrimitive.__doc__ = SKStandardScalerPrimitive.__doc__
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import busio
import neopixel
from adafruit_seesaw.seesaw import Seesaw
from digitalio import DigitalInOut
import adafruit_requests as requests
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
from adafruit_esp32spi import adafruit_esp32spi
URL = "https://api.thingspeak.com/update?api_key={token}&field1={value}"
i2c = busio.I2C(board.GP9, board.GP8)
ss = Seesaw(i2c, addr=0x36)
pixel_pin = board.GP0
num_pixels = 1
ORDER = neopixel.RGBW
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=0.2, auto_write=False, pixel_order=ORDER
)
esp32_cs = DigitalInOut(board.GP7)
esp32_ready = DigitalInOut(board.GP10)
esp32_reset = DigitalInOut(board.GP11)
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
spi = busio.SPI(board.GP18, board.GP19, board.GP16)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
requests.set_socket(socket, esp)
if esp.status == adafruit_esp32spi.WL_IDLE_STATUS:
print("ESP32 found and in idle mode")
print("Firmware vers.", esp.firmware_version)
print("MAC addr:", [hex(i) for i in esp.MAC_address])
print("Connecting to AP...")
while not esp.is_connected:
try:
esp.connect_AP(secrets["ssid"], secrets["password"])
except RuntimeError as e:
print("could not connect to AP, retrying: ", e)
continue
print("Connected to", str(esp.ssid, "utf-8"), "\tRSSI:", esp.rssi)
print("My IP address is", esp.pretty_ip(esp.ip_address))
token = secrets["thingspeak_token"]
while True:
# read moisture level
touch = ss.moisture_read()
# read temperature from the temperature sensor
temp = ss.get_temp()
print("temp: " + str(temp) + " moisture: " + str(touch))
if touch < 500:
pixels.fill((0, 0, 255, 0))
pixels.show()
time.sleep(0.250)
pixels.fill((0, 0, 0, 0))
pixels.show()
time.sleep(0.250)
try:
r = requests.get(URL.format(token=token, value=str(touch)))
except:
print("Failed to publish value")
else:
print("-" * 40)
print(r.json())
print("-" * 40)
r.close()
time.sleep(2)
|
"""
This file is part of python-tdbus. Python-tdbus is free software
available under the terms of the MIT license. See the file "LICENSE" that
was provided together with this source file for the licensing terms.
Copyright (c) 2012 the python-tdbus authors. See the file "AUTHORS" for a
complete list.
"""
import unittest
from tdbus import SimpleDBusConnection, GEventDBusConnection, DBUS_BUS_SESSION
from .base import BaseTest
class TestSimpleDBusConnection(unittest.TestCase, BaseTest):
"""Test suite for D-BUS connection."""
def test_connection_open(self):
conn = SimpleDBusConnection(DBUS_BUS_SESSION)
conn.open(DBUS_BUS_SESSION)
conn.close()
def test_connection_init(self):
conn = SimpleDBusConnection(DBUS_BUS_SESSION)
conn.close()
def test_connection_multiple_open(self):
conn = SimpleDBusConnection(DBUS_BUS_SESSION)
conn.close()
conn.open(DBUS_BUS_SESSION)
conn.close()
def test_get_unique_name(self):
conn = SimpleDBusConnection(DBUS_BUS_SESSION)
name = conn.get_unique_name()
assert name.startswith(':')
conn.close()
class TestGeventDBusConnection(unittest.TestCase, BaseTest):
"""Test suite for D-BUS connection."""
def test_connection_open(self):
conn = GEventDBusConnection(DBUS_BUS_SESSION)
conn.open(DBUS_BUS_SESSION)
conn.close()
def test_connection_init(self):
conn = GEventDBusConnection(DBUS_BUS_SESSION)
conn.close()
def test_connection_multiple_open(self):
conn = GEventDBusConnection(DBUS_BUS_SESSION)
conn.close()
conn.open(DBUS_BUS_SESSION)
conn.close()
def test_get_unique_name(self):
conn = GEventDBusConnection(DBUS_BUS_SESSION)
name = conn.get_unique_name()
assert name.startswith(':')
conn.close()
|
'''
Выполнить задание уровня pro
Написать программу “Угадай число”. Программа должна с помощью наводящих вопросов отгадать число.
'''
# https://github.com/vv31415926/python_lesson_02_Pro
# Угадай число
import random
num = random.randint( 1,1000000 )
print('Игрок это не замечает: задумано '+str(num) )
print( 'Отгадайте заданное компьютером целое положительное число. (Отказ угадать:0)')
count=1
while True:
s = input( 'Попытка {}:'.format(count) )
if s.isdigit() == False:
print( 'Ошибка ввода. Ввести надо целое положительное число')
continue
n = int( s )
if n == 0:
print( 'Вы не захотели угадать число... Прощайте!')
break
if n > num:
print( 'Много')
elif n < num:
print( 'Мало')
else:
print( 'Задуманное число {} угадано за {} попыток!'.format( n,count ))
break
count += 1
|
import threading
import os
from boottest import local_dict
from datetime import datetime, timedelta
from functools import wraps
import traceback
import json
import hashlib
from django.conf import settings
__all__ = [
'STATE_DEBUG', 'STATE_INFO', 'STATE_WARNING', 'STATE_ERROR',
'operation_writer',
'except_captured',
'record_traceback',
]
# 状态常量
STATE_DEBUG = 'Debug'
STATE_INFO = 'Info'
STATE_WARNING = 'Warning'
STATE_ERROR = 'Error'
# 线程锁,用于对文件写入的排他性
__lock = threading.RLock()
# 记录最低等级
__log_level = STATE_INFO
# 文件操作体系
__log_root = "logstore"
if not os.path.exists(__log_root):
os.mkdir(__log_root)
__log_root_path = os.path.join(os.getcwd(), __log_root)
if os.getenv("YPPF_ENV") in ["PRODUCT", "TEST"]:
__log_root_path = os.environ["YPPF_LOG_DIR"]
__log_user = "user_detail"
if not os.path.exists(os.path.join(__log_root_path, __log_user)):
os.mkdir(os.path.join(__log_root_path, __log_user))
__log_user_path = os.path.join(__log_root_path, __log_user)
__log_detailed_path = os.path.join(__log_root_path, "traceback_record")
def status_enabled(status_code: str):
# 待完善,半成品
level_up = [STATE_DEBUG, STATE_INFO, STATE_WARNING, STATE_ERROR]
try:
return level_up.index(status_code) >= level_up.index(__log_level)
except:
return False
# 通用日志写入程序 写入时间(datetime.now()),操作主体(Sid),操作说明(Str),写入函数(Str)
# 参数说明:第一为Sid也是文件名,第二位消息,第三位来源的函数名(类别)
# 如果是系统相关的 请写local_dict["system_log"]
def operation_writer(user, message, source=None, status_code: str=STATE_INFO):
if not status_enabled(status_code):
return
__lock.acquire()
try:
timestamp = str(datetime.now())
source = str(source).ljust(30)
status = status_code.ljust(10)
message = f"{timestamp} {source}{status}: {message}\n"
with open(os.path.join(__log_user_path, f"{str(user)}.log"), mode="a") as journal:
journal.write(message)
if status_code == STATE_ERROR and local_dict.get('debug_stuids'):
from app.wechat_send import send_wechat
receivers = list(local_dict['debug_stuids'])
if isinstance(receivers, str):
receivers = receivers.replace(' ', '').split(',')
receivers = list(map(str, receivers))
send_message = message
if len(send_message) > 400:
send_message = '\n'.join([
send_message[:300],
'...',
send_message[-100:],
'详情请查看log'
])
send_wechat(receivers, f'YPPF {settings.MY_ENV}发生异常\n' + send_message, card=len(message) < 200)
except Exception as e:
# 最好是发送邮件通知存在问题
# TODO:
print(e)
finally:
__lock.release()
def except_captured(return_value=None, except_type=Exception,
log=True, show_traceback=False, record_args=False,
record_user=False, record_request_args=False,
source='utils[except_captured]', status_code=STATE_ERROR):
"""
Decorator that captures exception and log, raise or
return specific value if `return_value` is assigned.
"""
def actual_decorator(view_function):
@wraps(view_function)
def _wrapped_view(*args, **kwargs):
try:
return view_function(*args, **kwargs)
except except_type as e:
if log:
msg = f'发生意外的错误:{e}'
if record_args:
msg += f', 参数为:{args=}, {kwargs=}'
if record_user:
try:
user = None
if not args:
if 'request' in kwargs.keys():
user = kwargs["request"].user
elif 'user' in kwargs.keys():
user = kwargs["user"]
else:
user = args[0].user
msg += f', 用户为{user.username}'
try: msg += f', 姓名: {user.naturalperson}'
except: pass
try: msg += f', 组织名: {user.organization}'
except: pass
except:
msg += f', 尝试追踪用户, 但未能找到该参数'
if record_request_args:
try:
request = None
if not args:
request = kwargs["request"]
else:
request = args[0]
infos = []
infos.append(f'请求方式: {request.method}, 请求地址: {request.path}')
if request.GET:
infos.append(
'GET参数: ' +
';'.join([f'{k}: {v}' for k, v in request.GET.items()])
)
if request.POST:
infos.append(
'POST参数: ' +
';'.join([f'{k}: {v}' for k, v in request.POST.items()])
)
msg = msg + '\n' + '\n'.join(infos)
except:
msg += f'\n尝试记录请求体, 但未能找到该参数'
if show_traceback:
msg += '\n详细信息:\n\t'
msg += traceback.format_exc().replace('\n', '\n\t')
operation_writer(local_dict['system_log'],
msg, source, status_code)
if return_value is not None:
return return_value
raise
return _wrapped_view
return actual_decorator
def record_traceback(request, e):
'''尽量避免使用本函数'''
d = {}
d["time"] = datetime.now().strftime("%Y/%m/%d-%H%M")
d["username"] = request.user.username
d["request_path"] = request.path
if request.GET:
d["GET_Param"] = request.GET
if request.POST:
d["POST_Param"] = request.POST
d["traceback"] = traceback.format_exc()
hash_value = hashlib.sha1(json.dumps(d).encode()).digest().hex()
__log_dir = os.path.join(__log_detailed_path, request.user.username)
__log_path = os.path.join(__log_dir, hash_value + ".json")
os.makedirs(__log_dir, exist_ok=True)
with open(__log_path, "w") as f:
json.dump(d, f)
if local_dict.get('debug_stuids'):
from app.wechat_send import send_wechat
receivers = list(local_dict['debug_stuids'])
if isinstance(receivers, str):
receivers = receivers.replace(' ', '').split(',')
receivers = list(map(str, receivers))
message = f"错误类型:{type(e)}\n + 记录路径:{__log_path}\n"
send_wechat(receivers, f'YPPF {settings.MY_ENV} 记录到错误详情\n' + f"记录路径:{__log_path}")
|
import network
import ujson
import exception
def connect_to_access_point(wifi_config_filename='wifi_config.json'):
try:
with open(wifi_config_filename) as f:
json = f.read()
except OSError:
raise exception.FileNotFoundError(wifi_config_filename)
config = ujson.loads(json)
ssid = config['ssid']
password = config['key']
station = network.WLAN(network.STA_IF)
if station.isconnected():
print('connection already exists')
return station.ifconfig()
station.active(True)
station.connect(ssid, password)
while not station.isconnected():
pass
print('established a new connection')
return station.ifconfig()
|
# Generated by Django 3.2.7 on 2021-10-19 04:33
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('authors', '0017_alter_author_user'),
]
operations = [
migrations.AlterField(
model_name='author',
name='id',
field=models.CharField(default=uuid.uuid4, editable=False, max_length=200, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='inboxobject',
name='object_id',
field=models.CharField(max_length=200, null=True),
),
]
|
from setuptools import find_packages
from setuptools import setup
setup(
name="estimagic",
version="0.1.2",
description="Tools for the estimation of (structural) econometric models.",
long_description="""
Estimagic is a Python package that helps to build high-quality and user
friendly implementations of (structural) econometric models.
It is designed with large structural models in mind. However, it is also
useful for any other estimator that numerically minimizes or maximizes a
criterion function (Extremum Estimator). Examples are maximum likelihood
estimation, generalized method of moments, method of simulated moments and
indirect inference.""",
license="BSD",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Development Status :: 4 - Beta",
],
keywords=["econometrics", "statistics", "extremum estimation", "optimization"],
url="https://github.com/OpenSourceEconomics/estimagic",
author="Janos Gabler",
author_email="janos.gabler@gmail.com",
packages=find_packages(exclude=["tests/*"]),
entry_points={"console_scripts": ["estimagic=estimagic.cli:cli"]},
zip_safe=False,
package_data={"estimagic": ["optimization/algo_dict.json"]},
include_package_data=True,
)
|
import re
import string
with open("data/input_4.txt", "r") as f:
data = list()
one_pass = list()
for line in f.readlines():
if line == "\n":
data.append({i.split(":")[0]: i.split(":")[1] for i in one_pass})
one_pass = []
continue
op = line.replace("\n", "").split()
one_pass.extend(op)
# def part_1():
# northpole_valid = sorted(["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"])
# passport_valid = sorted(["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid", "cid"])
# valid_count = 0
# for d in data:
# if (
# sorted(d.keys()) == northpole_valid or
# sorted(d.keys()) == passport_valid
# ):
# valid_count+=1
# return valid_count
# Optimized one line solution
def part_1():
valid_fields = sorted(["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"])
return [all([field in passport for field in valid_fields]) for passport in data].count(True)
a_f = [*string.ascii_lowercase[:6]]
digits = [*string.digits]
def part_2():
northpole_valid = sorted(["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"])
passport_valid = sorted(["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid", "cid"])
valid_count = 0
for d in data:
valid = True
try:
if (
sorted(d.keys()) == northpole_valid or
sorted(d.keys()) == passport_valid
):
for k, v in d.items():
if k == "byr":
if len(v) == 4:
assert int(v) >= 1920 and int(v) <=2002
if k == "iyr":
if len(v) == 4:
assert int(v) >= 2010 and int(v) <=2020
if k == "eyr":
if len(v) == 4:
assert int(v) >= 2020 and int(v) <=2030
if k == "hgt":
match = re.match(r"([0-9]+)([a-z]+)", v, re.I)
if match:
items = match.groups()
assert items[1] == "cm" or items[1] == "in"
if items[1] == "cm":
assert int(items[0]) >= 150 and int(items[0]) <=193
elif items[1] == "in":
assert int(items[0]) >= 59 and int(items[0]) <=76
else:
raise AssertionError
if k == "hcl":
match = re.match(r"^#([A-Fa-f0-9]{6})", v, re.I)
if match is None:
raise AssertionError
if k == "ecl":
assert v in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if k == "pid":
assert len(v) == 9
match = re.match(r"([0-9]{9})", v)
if match is None:
raise AssertionError
if valid:
valid_count+=1
except AssertionError:
valid = False
return valid_count
# print(part_2())
if __name__=="__main__":
p1 = part_1()
p2 = part_2()
print("The Solution to part 1 is : {}".format(p1))
print("The Solution to part 2 is : {}".format(p2))
|
import ntpath
import os
import pickle
from functools import partial
from pathlib import Path
from time import sleep
import peewee
from kivy.app import App
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.logger import Logger
from kivy.metrics import dp
from kivy.properties import (
BooleanProperty,
DictProperty,
ListProperty,
NumericProperty,
ObjectProperty,
StringProperty,
)
from kivymd.toast.kivytoast.kivytoast import toast
from kivy.network.urlrequest import UrlRequest
from libs.utils.comic_server_conn import ComicServerConn
from libs.utils.db_functions import Comic, ComicIndex, ReadingList
from kivymd.uix.dialog import MDDialog
CHECKBOX_STATE_BOOL = {"normal": False, "down": True}
READINGLIST_DB_KEYS = [
"name",
"cb_limit_active",
"limit_num",
"cb_only_read_active",
"cb_purge_active",
"cb_optimize_size_active",
"sw_syn_this_active",
"end_last_sync_num",
"totalCount",
"data",
]
READINGLIST_SETTINGS_KEYS = [
"cb_limit_active",
"limit_num",
"cb_only_read_active",
"cb_purge_active",
"cb_optimize_size_active",
"sw_syn_this_active",
]
COMIC_DB_KEYS = [
"Id",
"Series",
"Number",
"Volume",
"Year",
"Month",
"UserCurrentPage",
"UserLastPageRead",
"PageCount",
"Summary",
"FilePath",
"local_file",
"data",
"is_sync",
]
def get_size(start_path="."):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
class ComicBook(EventDispatcher):
"""
class representing a single comic
"""
Id = StringProperty()
__str__ = StringProperty()
slug = StringProperty()
name = StringProperty()
Number = StringProperty()
Series = StringProperty()
date = StringProperty()
Year = NumericProperty()
Month = NumericProperty()
UserLastPageRead = NumericProperty()
UserCurrentPage = NumericProperty()
PageCount = NumericProperty()
Summary = StringProperty()
FilePath = StringProperty()
Volume = NumericProperty()
readlist_obj = ObjectProperty()
local_file = StringProperty("None")
is_sync = BooleanProperty(False)
data = DictProperty()
order_index = NumericProperty()
def __init__(
self,
data=None,
readlist_obj=None,
comic_Id="",
comic_index=0,
mode="Server",
*args,
**kwargs,
):
self.readlist_obj = readlist_obj
if mode in ("Server", "FileOpen"):
if comic_Id == "":
comic_data = data
self.data = comic_data
self.Id = comic_data["Id"]
self.__str__ = "{} #{}".format(
comic_data["Series"], comic_data["Number"]
)
self.slug = str(comic_data["Id"])
self.name = f"{comic_data['Series']} #{comic_data['Number']}"
self.Number = comic_data["Number"]
self.Series = comic_data["Series"]
self.date = f"{comic_data['Month']}/{comic_data['Year']}"
self.Year = comic_data["Year"]
self.Month = comic_data["Month"]
self.UserLastPageRead = comic_data["UserLastPageRead"]
self.UserCurrentPage = comic_data["UserCurrentPage"]
self.PageCount = comic_data["PageCount"]
self.Summary = comic_data["Summary"]
self.FilePath = comic_data["FilePath"]
self.Volume = comic_data["Volume"]
app = App.get_running_app()
self.comic_jsonstore = app.comic_db
self.readlist_obj = readlist_obj
self.comic_index = comic_index
self.local_file = ""
if mode != "FileOpen":
Clock.schedule_once(
lambda dt: self.get_or_create_db_item())
if mode == "db_data":
self.Id = comic_Id
if mode != "FileOpen":
# Clock.schedule_once(
# lambda dt: self.get_or_create_db_item())
self.get_or_create_db_item()
def get_or_create_db_item(self):
tmp_defaults = {}
for key in COMIC_DB_KEYS:
if key == "comic_index":
pass
elif key == "data":
new_dict = {k: self.data[k] for k in self.data.keys()}
tmp_defaults["data"] = new_dict
else:
tmp_defaults[key] = getattr(self, key)
db_item, created = Comic.get_or_create(
Id=self.Id, defaults=tmp_defaults
)
if created is True:
rl = self.readlist_obj
db_item.comic_index.index = self.comic_index
comic_index_db, created_index = ComicIndex.get_or_create(
comic=db_item, readinglist=rl.db, index=self.comic_index
)
db_item.save()
if rl.slug not in [item.slug for item in db_item.readinglists]:
rl.db.comics.add(db_item)
else:
for key in COMIC_DB_KEYS:
if key == "comic_index":
pass
else:
setattr(self, key, getattr(db_item, key))
self.__str__ = f"{db_item.Series} #{db_item.Number}"
self.name = self.__str__
self.date = f"{db_item.Month}/{db_item.Year}"
self.slug = str(self.Id)
self.comic_index = db_item.comic_index.select(
ReadingList.slug == self.readlist_obj.slug
)
def update(self, key_list=()):
for key, value in key_list:
print(f"key:{key}\nval:{value}")
def callback(self, store, key, result):
pass
def set_is_sync(self):
try:
db_item = ComicIndex.get(
ComicIndex.comic == self.Id,
ComicIndex.readinglist == self.readlist_obj.slug,
)
if db_item:
if db_item.is_sync:
setattr(self, "is_sync", db_item.is_sync)
except peewee.IntegrityError:
Logger.error("Somthing went wrong")
class ComicReadingList(EventDispatcher):
# ids = DictProperty({})
name = StringProperty()
comics = ListProperty()
data = DictProperty()
slug = StringProperty()
comic_db = ObjectProperty()
comic_json = ListProperty()
cb_only_read_active = BooleanProperty(False)
cb_only_read_active = BooleanProperty(False)
cb_purge_active = BooleanProperty(False)
cb_optimize_size_active = BooleanProperty(False)
cb_limit_active = BooleanProperty(False)
limit_num = NumericProperty(25)
sw_syn_this_active = BooleanProperty(False)
comic_db_in = BooleanProperty(False)
db = ObjectProperty()
comics_loaded = ObjectProperty(False)
last_comic_read = NumericProperty()
start_last_sync_num = NumericProperty(0)
end_last_sync_num = NumericProperty(0)
totalCount = NumericProperty()
pickled_data = ObjectProperty()
sync_list = ListProperty()
def __init__(self, name="", data=None, slug="", mode="Server"):
self.slug = slug
self.name = name
self.event = None
if data != "db_data":
self.pickled_data = pickle.dumps(data, -1)
self.data = pickle.loads(self.pickled_data)
self.comic_json = self.data["items"]
if mode != "FileOpen":
if name == "Single_FileLoad":
self.totalCount = 0
else:
self.totalCount = self.data["totalCount"]
if mode != "FileOpen":
self.get_or_create_db_item(mode=mode)
def add_comic(self, comic, index=0):
"""
Add Single comic book to this colection
"""
self.comics.insert(0, comic)
def get_or_create_db_item(self, mode):
tmp_defaults = {}
try:
for key in READINGLIST_DB_KEYS:
if key == "data":
new_dict = {k: self.data[k] for k in self.data.keys()}
tmp_defaults["data"] = new_dict
else:
tmp_defaults[key] = getattr(self, key)
db_item, created = ReadingList.get_or_create(
slug=self.slug, defaults=tmp_defaults
)
self.db = db_item
if db_item:
for key in READINGLIST_DB_KEYS:
setattr(self, key, getattr(db_item, key))
if created is True:
len_dbcomics = len(db_item.comics)
if (
len_dbcomics == len(self.comic_json)
and len(self.comic_json) != 0
):
self.comic_db_in = True
self.comics = self.db.comics.order_by(
-Comic.comic_index.index
)
else:
self.comic_db_in = True
comicindex_db = ComicIndex.get(
ComicIndex.readinglist == self.slug
)
if mode == "local_file":
list_comics = self.db.comics.where(
Comic.is_sync == True, Comic.local_file != ""
).order_by( # noqa
comicindex_db.index
)
print(f"len:{len(list_comics)}")
else:
list_comics = self.db.comics.order_by(
comicindex_db.index
)
for comic in list_comics:
new_comic = ComicBook(
comic_Id=comic.Id,
readlist_obj=self,
mode="db_data",
)
self.comics.append(new_comic)
self.comics_loaded = True
except peewee.OperationalError:
Logger.critical(
"Somthing happened in get_or_create of readinglist"
)
def save_settings(self, *args, **kwargs):
try:
rl = ReadingList.get(ReadingList.slug == self.slug)
for key in READINGLIST_SETTINGS_KEYS:
setattr(rl, key, kwargs[key])
setattr(self, key, kwargs[key])
rl.save()
except peewee.OperationalError:
pass
def do_db_refresh(self, screen=None):
def __finish_toast(dt):
app = App.get_running_app()
screen = app.manager.get_screen("server_readinglists_screen")
screen.refresh_callback()
toast("DataBase Refresh Complete")
def __got_readlist_data(results):
def __updated_progress(results):
pass
the_keys = [
"Id",
"Series",
"Number",
"Volume",
"Year",
"Month",
"UserCurrentPage",
"UserLastPageRead",
"PageCount",
"Summary",
"FilePath",
]
for server_comic in results["items"]:
for db_comic in self.comics:
if db_comic.Id == server_comic["Id"]:
for key in the_keys:
if getattr(db_comic, key) != server_comic[key]:
if key in (
"UserCurrentPage",
"UserLastPageRead",
) and (db_comic.is_sync):
if (
db_comic.UserLastPageRead
> server_comic["UserLastPageRead"]
) or (
db_comic.UserCurrentPage
> server_comic["UserCurrentPage"]
):
if (
db_comic.UserCurrentPage
> db_comic.UserLastPageRead
):
current_page = (
db_comic.UserCurrentPage
) # noqa
else:
current_page = (
db_comic.UserLastPageRead
) # noqa
update_url = "{}/Comics/{}/Progress".format(
api_url, db_comic.Id
)
self.fetch_data.update_progress(
update_url,
current_page,
callback=lambda req, results: __updated_progress(
results
),
)
else:
x_str = db_comic.__str__
Logger.info(
"Updating DB Record for {} of {}".format(
key, x_str
)
)
toast(
"Updating DB Record for {} of {}".format(
key, x_str
)
)
db_item = Comic.get(
Comic.Id == db_comic.Id
)
if db_item:
setattr(
db_item, key, server_comic[key]
)
db_item.save()
setattr(self, key, db_item)
Clock.schedule_once(__finish_toast, 3)
self.fetch_data = ComicServerConn()
app = App.get_running_app()
api_url = app.api_url
server_url = f"{api_url}/Lists/{self.slug}/Comics/"
self.fetch_data.get_server_data_callback(
server_url,
callback=lambda req, results: __got_readlist_data(results),
)
def get_last_comic_read(self):
last_read_comic = 0
for comic in self.comics:
if (
comic.UserLastPageRead == comic.PageCount - 1
and comic.PageCount > 1
):
last_read_comic = self.comics.index(comic)
return last_read_comic
def do_sync(self):
def _syncrun_callback(*args):
pass
app = App.get_running_app()
if app.sync_is_running is True:
self.please_wait_dialog = MDDialog(
title="Sync Already in Progress",
size_hint=(0.8, 0.4),
text_button_ok="Ok",
text=f"Please wait till current Sync is done",
events_callback=_syncrun_callback,
)
self.please_wait_dialog.open()
return
self.num_file_done = 0
sync_range = 0
self.fetch_data = ComicServerConn()
rl_db = ReadingList.get(ReadingList.slug == self.slug)
end_last_sync_num = rl_db.end_last_sync_num
if end_last_sync_num != 0:
end_last_sync_num = end_last_sync_num - 1
comicindex_db = ComicIndex.get(ComicIndex.readinglist == self.slug)
last_read_comic_db = self.db.comics.where(
(Comic.UserLastPageRead == Comic.PageCount - 1)
& (Comic.PageCount > 1)
).order_by(comicindex_db.index)
if len(last_read_comic_db) > 1:
last_read_index = ComicIndex.get(
ComicIndex.comic == last_read_comic_db[-1].Id,
ComicIndex.readinglist == self.slug,
).index
elif len(last_read_comic_db) != 0:
last_read_index = ComicIndex.get(
ComicIndex.comic == last_read_comic_db[0].Id,
ComicIndex.readinglist == self.slug,
).index
else:
last_read_index = 0
if self.cb_limit_active:
if self.cb_only_read_active:
list_comics = self.db.comics.where(
~(Comic.UserLastPageRead == Comic.PageCount - 1)
& (Comic.PageCount > 1)
& (Comic.been_sync)
!= True
).order_by(
comicindex_db.index
) # noqa: E712
if last_read_index < end_last_sync_num:
sync_range = int(self.limit_num)
tmp_comic_list = list_comics[0:int(sync_range)]
else:
sync_range = int(end_last_sync_num) + int(self.limit_num)
tmp_comic_list = list_comics[end_last_sync_num:int(sync_range)]
purge_list = self.db.comics.where(
(Comic.UserLastPageRead == Comic.PageCount - 1)
& (Comic.PageCount > 1)
& (Comic.is_sync == True)
).order_by(
comicindex_db.index
) # noqa: E712
else:
list_comics = (
Comic.select()
.where(
(Comic.is_sync == False) & (Comic.been_sync == False)
)
.order_by(comicindex_db.index)
) # noqa: E712,E501
sync_range = int(self.limit_num)
tmp_comic_list = list_comics[0:int(sync_range)]
purge_list = self.db.comics.where(
Comic.is_sync == True
).order_by(
comicindex_db.index
) # noqa: E712
else:
sync_range = int(len(self.comics))
# rl_db.end_last_sync_num = new_end_last_sync_num
# rl_db.save()
if self.cb_only_read_active:
list_comics = self.db.comics.where(
~(Comic.UserLastPageRead == Comic.PageCount - 1)
& (Comic.PageCount > 1)
).order_by(
comicindex_db.index
) # noqa: E712
tmp_comic_list = list_comics[0:int(sync_range)]
else:
list_comics = self.db.comics.where(
(Comic.is_sync == False) & (Comic.been_sync == False)
).order_by(
comicindex_db.index
) # noqa: E712,E501
tmp_comic_list = list_comics
db_item = ReadingList.get(ReadingList.slug == self.slug)
for key in READINGLIST_SETTINGS_KEYS:
v = getattr(db_item, key)
globals()["%s" % key] = v
app = App.get_running_app()
id_folder = os.path.join(app.sync_folder, self.slug)
my_comic_dir = Path(os.path.join(id_folder, "comics"))
if os.path.isdir(my_comic_dir):
print(f"{get_size(my_comic_dir)/1000000} MB")
sync_comic_list = []
for comic in tmp_comic_list:
if comic.is_sync is False:
sync_comic_list.append(comic)
if self.cb_purge_active:
for item in purge_list:
os.remove(item.local_file)
db_comic = Comic.get(Comic.Id == item.Id)
db_comic.is_sync = False
db_comic.local_file = ""
db_comic.save()
server_readinglists_screen = app.manager.get_screen(
"server_readinglists_screen"
)
server_readinglists_screen.file_sync_update(item.Id, False)
self.sync_readinglist(comic_list=sync_comic_list)
def get_server_file_download(self, req_url, callback, file_path):
def is_finished(dt):
if req.is_finished is True:
app = App.get_running_app()
screen = app.manager.get_screen("server_readinglists_screen")
screen.ids.sync_button.enabled = True
Clock.schedule_once(self.download_file)
else:
Clock.schedule_once(is_finished, 0.25)
app = App.get_running_app()
username = app.config.get("General", "username")
api_key = app.config.get("General", "api_key")
str_cookie = f"API_apiKey={api_key}; BCR_username={username}"
head = {
"Content-Type": "application/json",
"Accept": "application/json",
"Cookie": str_cookie,
}
req = UrlRequest(
req_url, req_headers=head, on_success=callback, file_path=file_path
)
app = App.get_running_app()
screen = app.manager.get_screen("server_readinglists_screen")
if len(self.sync_list) != 0:
screen.ids.sync_status_lbl.text = (
f"Sync is Running Left in Que: {len(self.sync_list)}"
)
Clock.schedule_once(is_finished, 0.25)
else:
toast("Reading List has been Synced, Refreshing Screen")
screen.ids.sync_status_lbl.text = ""
screen.ids.sync_button.enabled = True
app.sync_is_running = False
# screen.refresh_callback()
def got_file(self, comic_obj, comic_file="", *args, **kwargs):
def file_finished_toast(dt):
toast(f"{os.path.basename(comic_file)} Synced")
self.num_file_done += 1
Clock.schedule_once(file_finished_toast)
self.file_download = True
db_comic = Comic.get(Comic.Id == comic_obj.Id)
db_comic.is_sync = True
db_comic.save()
db_comic = Comic.get(Comic.Id == comic_obj.Id)
db_comic.local_file = comic_file
db_comic.been_sync = True
db_comic.save()
rl_db = ReadingList.get(ReadingList.slug == self.slug)
rl_db.end_last_sync_num += 1
rl_db.save()
app = App.get_running_app()
server_readinglists_screen = app.manager.get_screen(
"server_readinglists_screen"
)
server_readinglists_screen.file_sync_update(comic_obj.Id, True)
def download_file(self, dt):
def got_thumb(results):
pass
app = App.get_running_app()
screen = app.manager.get_screen("server_readinglists_screen")
screen.ids.sync_button.enabled = False
if len(self.sync_list) == 0:
toast("Reading List has been Synced, Refreshing Screen")
app = App.get_running_app()
screen = app.manager.get_screen("server_readinglists_screen")
screen.ids.sync_status_lbl.text = ""
screen.ids.sync_button.enabled = True
app.sync_is_running = False
# screen.refresh_callback()
return
comic = self.sync_list.pop(0)
self.file_download = False
file_name = ntpath.basename(comic.FilePath)
y = 240
part_url = f"/Comics/{comic.Id}/Pages/0?"
app = App.get_running_app()
part_api = f"&apiKey={app.api_key}&height={round(dp(y))}"
thumb_url = f"{app.api_url}{part_url}{part_api}"
if self.cb_optimize_size_active is False:
sync_url = f"{app.api_url}/Comics/{comic.Id}/Sync/File/"
elif self.cb_optimize_size_active is True:
sync_url = f"{app.api_url}/Comics/{comic.Id}/Sync/Webp"
app = App.get_running_app()
id_folder = os.path.join(app.sync_folder, self.slug)
self.my_comic_dir = Path(os.path.join(id_folder, "comics"))
self.my_thumb_dir = Path(os.path.join(id_folder, "thumb"))
if not self.my_comic_dir.is_dir():
os.makedirs(self.my_comic_dir)
if not self.my_thumb_dir.is_dir():
os.makedirs(self.my_thumb_dir)
t_file = os.path.join(self.my_comic_dir, file_name)
self.get_server_file_download(
sync_url,
callback=self.got_file(comic, comic_file=t_file),
file_path=t_file,
)
thumb_name = f"{comic.Id}.jpg"
self.fetch_data.get_server_file_download(
thumb_url,
callback=lambda req, results: got_thumb(results),
file_path=os.path.join(self.my_thumb_dir, thumb_name),
)
def _finish_sync(self, comic_list, *largs):
def __finish_toast(dt):
toast("Reading List has been Synced, Refreshing Screen")
# app = App.get_running_app()
# screen = app.manager.get_screen("server_readinglists_screen")
# screen.refresh_callback()
list_comics = comic_list
num_comic = len(list_comics)
if self.num_file_done == num_comic:
Clock.schedule_once(__finish_toast, 3)
self.event.cancel()
self.event = None
def sync_readinglist(self, comic_list=[]):
app = App.get_running_app()
self.sync_list = comic_list
app = App.get_running_app()
screen = app.manager.get_screen("server_readinglists_screen")
screen.ids.sync_status_lbl.text = (
f"Sync is Running Comics to Sync: {len(self.sync_list)}"
)
app.sync_is_running = True
screen.ids.sync_button.enabled = False
Clock.schedule_once(self.download_file)
# app.delayed_work(
# self.download_file, list_comics, delay=.5)
# self.event = Clock.schedule_interval(
# partial(self._finish_sync, comic_list), 0.5
# )
|
import copy
import numpy as np
from utils.utils import getdtype
from Ottergrad.autograd import Tensor, Func, checkTensor
from Ottergrad.utils import *
def ndot(a, b):
res = np.dot(a, b)
return res
class _dot(Func):
def __init__(self):
super().__init__()
def __call__(self, x, y):
tensor = Tensor()
tensor.type = np.dot
tensor.left = x
tensor.right = y
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
self.root = tensor
self.input = tensor
return tensor
@staticmethod
def _forwardfunc(node: Tensor):
res = ndot(node.getleft().getdata(), node.getright().getdata())
node.setdata(res)
@checkgradisnone
def gradient(self):
grad_l = ndot(self.root.getgrad(), self.root.getright().getdata().T)
grad_r = ndot(self.root.getleft().getdata().T, self.root.getgrad())
self.root.getleft().setgrad(self.root.getgrad() + grad_l)
self.root.getright().setgrad(self.root.getgrad() + grad_r)
@staticmethod
@checkgradisnone
def _gradient(node):
node.getleft().setgrad(node.getleft().getgrad() + ndot(node.getgrad(), node.getright().getdata().T))
node.getright().setgrad(node.getright().getgrad() + ndot(node.getleft().getdata().T, node.getgrad()))
@checkTensor
def dot(x, y):
func = _dot()
return func(x, y)
class _abs(Func):
def __init__(self, x=None):
super().__init__()
self.x = x
def __call__(self, x):
self.x = x
tensor = Tensor()
tensor.left = x
tensor.type = np.abs
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.abs(node.getleft().getdata(), dtype=getdtype()))
@staticmethod
@checkgradisnone
def _gradient(node):
condlist = [node.getleft().getdata() < 0, node.getleft().getdata() > 0]
choicelist = [-1, 1]
node.getleft().setgrad(node.getleft().getgrad() + np.select(condlist, choicelist))
def nabs(x):
res = np.abs(x)
return res
@checkTensor
def abs(x):
func = _abs()
return func(x)
class _sum(Func):
def __init__(self, x: Tensor = None, axis=None):
super().__init__()
self.x = x
self.axis = axis
def __call__(self, x: Tensor, axis=None):
self.x = x
self.axis = axis
tensor = Tensor()
tensor.left = x
tensor.type = np.sum
tensor.setkwargs({'axis': axis})
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node: Tensor):
node.setdata(nsum(node.getleft().getdata(), **node.getkwargs()))
def gradient(self):
self.root.getleft().setgrad(self.root.getleft().getgrad() + nsum(self.root.getgrad(), *self.root.getargs()))
@staticmethod
@checkgradisnone
def _gradient(node: Tensor):
node.getleft().setgrad(node.getleft().getgrad()
+ node.getgrad() * np.ones(node.getleft().getdata().shape))
def nsum(x, axis):
res = np.sum(x, axis)
return res
def sum(x: Tensor, axis: int = None):
func = _sum()
return func(x, axis)
class _concatenate(Func):
def __init__(self, sets=None, axix=0):
super().__init__()
def __call__(self, sets: tuple, axis=0):
assert len(sets) > 1, "must have two or more sets"
isalldata = True
for set in sets:
assert type(set) is Tensor, "set type must be Tensor"
isalldata = isalldata and (set.getdata() is not None)
if isalldata:
result = sets[0]
for set in sets:
nconcatenate((result, set), axis)
return result
else:
result = sets[0]
for set in sets[1:]:
tensor = Tensor()
tensor.left = result
tensor.right = set
tensor.type = np.concatenate
tensor.setkwargs({'axis', axis})
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
result = tensor
return tensor
@staticmethod
def _forwardfunc(node: Tensor):
node.setdata(nconcatenate((node.getleft().getdata(), node.getright().getdata()), **node.getkwargs()))
@staticmethod
@checkgradisnone
def _gradient(node: Tensor):
pass
# @pyjit
def nconcatenate(x, y, **kwargs):
return np.concatenate(x, y, **kwargs)
def concatenate(sets: tuple, axis=0):
for set in sets:
assert type(set) is Tensor, "set type must be Tensor"
func = _concatenate()
return func(sets, axis)
class _split(Func):
def __init__(self):
super().__init__()
def __call__(self, x: Tensor, indices, axis=0):
if x.getdata() is not None:
return np.split(x.getdata(), indices, axis)
else:
tensor = Tensor()
tensor.left = x
tensor.type = np.split
tensor.setkwargs({"indices_or_sections": indices, 'axis': axis})
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node: Tensor):
node.setdata(nsplit(node.getleft().getdata(), **node.getkwargs()))
@staticmethod
@checkgradisnone
def _gradient(node: Tensor):
pass
# @pyjit
def nsplit(x, indices, axis=0):
return np.split(x, indices, axis=axis)
def split(x, indices, axis=0):
func = _split()
return func(x, indices, axis)
class _ones(Func):
def __init__(self, shape=None, dtype=getdtype()):
super().__init__()
self.shape = shape
self.dtype = dtype
def __call__(self, shape, dtype=getdtype()):
tensor = Tensor()
tensor.type = np.ones
tensor.isgrad = False
tensor.setargs({"shape": shape, "dtype": dtype})
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
self.root = tensor
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.ones(*node.gettype()))
def gradient(self):
pass
@staticmethod
def _gradient(node):
pass
def ones(shape, dtype=getdtype()):
func = _ones()
return func(shape, dtype)
class _shape(Func):
def __init__(self, x=None):
super().__init__()
self.x = x
def __call__(self, x):
self.x = x
if x.getdata() is not None:
return np.shape(x.getdata())
else:
tensor = Tensor()
tensor.setleft(x)
tensor.type = np.shape
tensor.isgrad = False
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.shape(node.getleft().getdata()))
def gradient(self):
pass
@staticmethod
def _gradient(*args):
pass
@checkTensor
def shape(x):
func = _shape()
return func(x)
class _take(Func):
def __init__(self, x=None, choosen=None, axis=0):
super().__init__()
self.x = x
self.choosen = choosen
self.axis = axis
def __call__(self, x, indices=None, axis=0):
self.x = x
self.indices = indices
self.axis = axis
if x.getdata() is not None:
return np.take(x.getdata(), indices, axis)
else:
tensor = Tensor()
tensor.setleft(x)
tensor.type = np.take
tensor.isgrad = False
tensor.setkwargs({"indices": indices, "axis": axis})
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(ntake(node.getleft().getdata(), **node.getkwargs()))
def gradient(self):
pass
@staticmethod
def _gradient(*args):
pass
# @pyjit
def ntake(x, indices=None, axis=0):
return np.take(x, indices=indices, axis=axis)
def take(x: Tensor, choosen, axis=0):
func = _take()
return func(x, choosen, axis)
class _exp(Func):
def __init__(self):
super().__init__()
def __call__(self, x, **kwargs):
tensor = Tensor()
tensor.left = x
tensor.type = np.exp
tensor.setkwargs(kwargs)
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
self.root = tensor
return tensor
@staticmethod
def _forwardfunc(node):
if node.getkwargs() != []:
node.setdata(nexp(node.getleft().getdata(), **node.getkwargs()))
else:
node.setdata(nexp(node.getleft().getdata()))
def gradient(self):
self.root.getleft().setgrad(self.root.getleft().getgrad() + self.root.getgrad())
@staticmethod
@checkgradisnone
def _gradient(node):
node.getleft().setgrad(node.getleft().getgrad() + (node.getgrad() * nexp(node.getleft().getdata())))
def nexp(x):
return np.exp(x)
def exp(x, **kwargs):
func = _exp()
return func(x, **kwargs)
class _maximum(Func):
def __init__(self):
super().__init__()
def __call__(self, x, y, **kwargs):
tensor = Tensor()
tensor.left = x
tensor.right = y
tensor.setkwargs(kwargs)
tensor.type = np.maximum
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
self.root = tensor
return tensor
@staticmethod
def _forwardfunc(node):
if node.getkwargs() != []:
node.setdata(nmaximum(node.getleft().getdata(), node.getright().getdata(), **node.getkwargs()))
else:
node.setdata(nmaximum(node.getleft().getdata(), node.getright().getdata()))
def gradient(self):
# grad = copy.deepcopy(node.getgrad())
# grad[grad <= 0] = 0
# node.getleft().grad = grad
self.root.getleft().setgrad(nminimum(self.root.getleft().getdata(), self.root.getright().getdata()))
self.root.getright().setgrad(nminimum(self.root.getright().getdata(), self.root.getleft().getdata()))
@staticmethod
@checkgradisnone
def _gradient(node):
node.getleft().setgrad(
node.getleft().getgrad() + nminimum(node.getleft().getdata(), node.getright().getdata()))
node.getright().setgrad(
node.getright().getgrad() + nminimum(node.getright().getdata(), node.getleft().getdata()))
# @pyjit
def nmaximum(x, y):
return np.maximum(x, y)
# @pyjit
def nminimum(x, y):
return np.minimum(x, y)
def maximum(x: [Tensor, int, float, np.ndarray], y: [Tensor, int, float, np.ndarray], **kwargs):
if type(x) is not Tensor:
x = Tensor(x)
x.isgrad = False
if type(y) is not Tensor:
y = Tensor(y)
y.isgrad = False
func = _maximum()
return func(x, y, **kwargs)
class _tanh(Func):
def __init__(self):
super().__init__()
def __call__(self, x):
tensor = Tensor()
tensor.setleft(x)
tensor.type = np.tanh
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.tanh(node.getleft().getdata()))
def gradient(self):
self.getroot().getleft().setgrad(self.getroot().getleft().getgrad() +
np.multiply((np.ones(self.getroot().getgrad().shape) -
ntanh(self.getroot().getgrad()) ** 2),
self.getroot().getgrad()))
@staticmethod
@checkgradisnone
def _gradient(node):
node.getleft().setgrad(node.getleft().getgrad() +
np.multiply((np.ones(node.getgrad().shape) - ntanh(node.getgrad()) ** 2),
node.getgrad()))
def ntanh(x):
return np.tanh(x)
@checkTensor
def tanh(x):
func = _tanh()
return func(x)
class _sin(Func):
def __init__(self):
super().__init__()
def __call__(self, x):
tensor = Tensor()
tensor.setleft(x)
tensor.type = np.sin
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.sin(node.getleft().getdata()))
def gradient(self):
self.getroot().getleft().setgrad(self.getroot().getleft().getgrad() +
np.multiply(self.root.getgrad(), np.cos(self.root.getleft().getdata())))
@staticmethod
@checkgradisnone
def _gradient(node):
node.getleft().setgrad(node.getleft().getgrad() + np.multiply(node.getgrad(), np.cos(node.getleft().getdata())))
def nsin(x):
return np.sin(x)
@checkTensor
def sin(x):
func = _sin()
return func(x)
class _cos(Func):
def __init__(self):
super().__init__()
def __call__(self, x):
tensor = Tensor()
tensor.setleft(x)
tensor.type = np.cos
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.cos(node.getleft().getdata()))
def gradient(self):
self.getroot().getleft().setgrad(self.getroot().getleft().getgrad() +
np.multiply(self.root.getgrad(), -np.sin(self.root.getleft().getdata())))
@staticmethod
@checkgradisnone
def _gradient(node):
node.getleft().setgrad(node.getleft().getgrad() +
np.multiply(node.getgrad(), -np.sin(node.getleft().getdata())))
def ncos(x):
return np.cos(x)
@checkTensor
def cos(x):
func = _cos()
return func(x)
class _tan(Func):
def __init__(self):
super().__init__()
def __call__(self, x):
tensor = Tensor()
tensor.setleft(x)
tensor.type = np.tan
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.tan(node.getleft().getdata()))
def gradient(self):
self.getroot().getleft().setgrad(self.getroot().getleft().getgrad() +
np.multiply(self.root.getgrad(), 1 -
np.tanh(self.root.getleft().getdata()) ** 2))
@staticmethod
@checkgradisnone
def _gradient(node):
node.getleft().setgrad(node.getleft().getgrad() +
np.multiply(node.getgrad(), 1 - np.tanh(node.getleft().getdata()) ** 2))
def ntan(x):
return np.tan(x)
@checkTensor
def tan(x):
func = _tan()
return func(x)
class _where(Func):
def __init__(self):
super().__init__()
def __call__(self, condition: Tensor, x: Tensor = None, y: Tensor = None):
tensor = Tensor()
tensor.setleft(Func(condition).getinput())
tensor.type = np.where
tensor.setargs([Func(condition), Func(x), Func(y)])
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
args = []
for arg in node.getargs():
arg.forward()
args.append(arg.getroot().getdata())
node.setdata(np.where(*args))
def gradient(self):
self.root.getleft().setgrad()
@checkgradisnone
@staticmethod
def _gradient(node):
node.getargs()[1].getroot().setgrad(node.getgrad())
node.getargs()[2].getroot().setgrad(node.getgrad())
x = copy.deepcopy(node.getargs()[1])
y = copy.deepcopy(node.getargs()[2])
x.backward()
y.backward()
node.getleft().setgrad(np.where(node.getargs()[0].getinput().getdata(),
x.getinput().getgrad(), y.getinput().getgrad()))
def nwhere(contition, x, y):
return np.where(contition, x, y)
@checkTensor
def where(contition, x, y):
func = _where()
return func(contition, x, y)
class _mean(Func):
def __init__(self):
super().__init__()
def __call__(self, x: Tensor = None, axis=0):
tensor = Tensor()
tensor.setleft(x)
tensor.type = np.mean
tensor.setkwargs({"axis": axis})
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.mean(node.getleft().getdata(), **node.getkwargs()))
@staticmethod
@checkgradisnone
def _gradient(node):
shape = np.shape(node.getleft().getdata())
node.getleft().setgrad(node.getleft().getgrad() + 1 / shape[0] * np.ones(shape) * node.getgrad())
def nmean(x, axis=0):
return np.mean(x, axis=axis)
def mean(x: Tensor, axis=0):
func = _mean()
return func(x, axis)
class _var(Func):
def __init__(self):
super().__init__()
def __call__(self, x: Tensor = None, axis=0):
tensor = 1 / shape(x)[0] * (x - mean(x, axis)) ** 2
return tensor
@checkTensor
def var(x, axis=0):
func = _var()
return func(x, axis)
class _sqrt(Func):
def __init__(self):
super().__init__()
def __call__(self, x: Tensor):
self.x = x
tensor = Tensor()
tensor.setleft(x)
tensor.type = np.sqrt
tensor.setforwardfunc(self._forwardfunc)
tensor.setgradfunc(self._gradient)
return tensor
@staticmethod
def _forwardfunc(node):
node.setdata(np.sqrt(node.getleft().getdata()))
@staticmethod
@checkgradisnone
def _gradient(node):
node.getleft().setgrad(node.getleft().getgrad() + 1 / 2 * node.getleft().getdata() ** (-1 / 2))
@checkTensor
def sqrt(x):
func = _sqrt()
return func(x)
|
import sys
import requests_html
from requests.sessions import session
from t.log import error
def check_platform() -> dict:
return {
"darwin": {"audio": "afplay {}"},
"linux": {"audio": ""},
"win32": {"audio": ""},
}[sys.platform]
def check_langs() -> list:
session, langs = requests_html.HTMLSession(), {"english"}
try:
langs = langs.union(
set(
session.get("https://dictionary.cambridge.org/").html.xpath(
"//a[@data-dictcode]/@data-dictcode"
)
)
)
except Exception as e:
error(e)
return session, tuple(sorted(langs))
|
class RegressionPredictor():
def __init__(self, model):
self.model = model
def predict(self, X, decimals=2):
""" Performs one Prediction.
Parameters:
- X: Input array compatible with model interface for inference.
- decimals (int): Number of decimals required
Returns: (int) Prediction result.
"""
prediction = self.model.predict(X)[0]
if prediction < 0: prediction = 0
return [round(prediction, decimals)]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from pandas import DataFrame
from lib.cast import safe_int_cast
from lib.data_source import DataSource
from lib.time import datetime_isoformat
class SudanHumdataDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
# Rename the appropriate columns
data = (
dataframes[0]
.rename(
columns={
"Report Date": "date",
"State": "match_string",
"Confirmed Cases": "total_confirmed",
}
)
.drop([0])
)
# The dates in the provided CSV are incorrect for one of the reports.
# Replace with report date taken from text of report.
data.loc[
data["Source"]
== "https://reliefweb.int/sites/reliefweb.int/files/resources/Situation%20Report%20-%20Sudan%20-%207%20May%202020.pdf",
"date",
] = "5/11/2020"
data = data.drop(axis=1, columns=["As of Date", "Source"])
# Remove Abyei PCA, a disputed region with no data shown.
data = data[data["match_string"] != "Abyei PCA"]
# Data source uses different spelling from src/data/iso_3166_2_codes.csv
data["match_string"].replace({"Gedaref": "Al Qadarif"}, inplace=True)
data.date = data.date.apply(lambda x: datetime_isoformat(x, "%m/%d/%Y"))
# Sudan data includes empty cells where there are no confirmed cases.
# These get read in as NaN. Replace them with zeroes so that the
# grouped_diff call to get new confirmed cases works for a state's first
# day with a case.
data["total_confirmed"] = data["total_confirmed"].fillna(0).apply(safe_int_cast)
# Make sure all records have the country code
data["country_code"] = "SD"
# Output the results
return data
|
#/************************************************************************************************************************
# Copyright (c) 2016, Imagination Technologies Limited and/or its affiliated group companies.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#************************************************************************************************************************/
import unittest
from nose_parameterized import parameterized
from nose.plugins.attrib import attr
from framework.test_cases.gateway_client_test_case import GWClientTestCase
from framework.operation_assertions import get_operation_assertions
from framework.operation_assertions import set_operation_assertions
from framework.operation_assertions import subscribe_operation_assertions
from framework.operation_assertions import client_delete_assertions
from framework.operation_assertions import client_define_assertions
from framework.operation_assertions import client_get_definition_assertions
from framework import test_assertions
from framework.test_assertions import Assertion
from framework.test_assertions import GetAssertion
from framework.test_assertions import SetAssertion
from framework.test_assertions import SubscribeAssertion
from framework.test_assertions import DeleteAssertion
from framework.test_assertions import DefineOperationAssertion
from framework.test_assertions import GetDefinitionAssertion
from framework.definitions import ObjectDefinitionSettings, ResourceDefinitionSettings
from framework.test_objects import objectDefinition1000
from framework.test_objects import objectDefinition1001
from framework.test_objects import objectDefinition1002
from framework.test_objects import objectDefinition1003
from framework.test_objects import resourceDefinitions
from framework.awa_enums import AwaResourceType
from framework.awa_enums import AwaResourceOperations
from framework import awa_constants
from framework.nose_parameterised import noseParameterisedTestNameGenerator
@attr("gateway_client")
class CustomObjectDefineTestCases(GWClientTestCase):
@parameterized.expand([
["DefineAndGetObject1000Definition", (DefineOperationAssertion(client_define_assertions.CheckForSuccess, objectDefinition1000, resourceDefinitions),
GetDefinitionAssertion(client_get_definition_assertions.CheckForSuccess, objectDefinition1000, resourceDefinitions), )],
["DefineAndGetObject1001Definition", (DefineOperationAssertion(client_define_assertions.CheckForSuccess, objectDefinition1001, resourceDefinitions),
GetDefinitionAssertion(client_get_definition_assertions.CheckForSuccess, objectDefinition1001, resourceDefinitions), )],
["DefineAndGetObject1002Definition", (DefineOperationAssertion(client_define_assertions.CheckForSuccess, objectDefinition1002, resourceDefinitions),
GetDefinitionAssertion(client_get_definition_assertions.CheckForSuccess, objectDefinition1002, resourceDefinitions), )],
["DefineAndGetObject1003Definition", (DefineOperationAssertion(client_define_assertions.CheckForSuccess, objectDefinition1003, resourceDefinitions),
GetDefinitionAssertion(client_get_definition_assertions.CheckForSuccess, objectDefinition1003, resourceDefinitions), )],
#Negative define test cases
["RedefineObject1000", (DefineOperationAssertion(client_define_assertions.CheckForSuccess, objectDefinition1000, resourceDefinitions),
DefineOperationAssertion(client_define_assertions.CheckForAlreadyDefined, objectDefinition1000, resourceDefinitions),)],
["GetNotDefinedObjectDefinition", (GetDefinitionAssertion(client_get_definition_assertions.CheckForObjectNotDefined, ObjectDefinitionSettings(5000, "Object5000", 1, 1), ()), )],
["GetObjectDefinitionIDOutOfRange", (GetDefinitionAssertion(client_get_definition_assertions.CheckForObjectNotDefined, ObjectDefinitionSettings(-500, "Object500", 1, 1), ()), )],
["GetNotDefinedResourceDefinition", (DefineOperationAssertion(client_define_assertions.CheckForSuccess, objectDefinition1000, resourceDefinitions),
GetDefinitionAssertion(client_get_definition_assertions.CheckForResourceNotDefined,
objectDefinition1000,
(ResourceDefinitionSettings(555, "Resource555", AwaResourceType.String, AwaResourceOperations.ReadWrite, 0, 1), )), )],
["DefineObjectIDOutOfRange", (DefineOperationAssertion(client_define_assertions.CheckForNullPointerException, ObjectDefinitionSettings(999999, "Object999999", 0, 1), ()), )],
["DefineResourceIDOutOfRange", (DefineOperationAssertion(client_define_assertions.CheckForSuccess, objectDefinition1000, resourceDefinitions),
DefineOperationAssertion(client_define_assertions.CheckForIDInvalid,
objectDefinition1000,
(ResourceDefinitionSettings(999999, "Resource999999", AwaResourceType.String, AwaResourceOperations.ReadWrite, 0, 1), )), )],
["DefineObjectNegativeID", (DefineOperationAssertion(client_define_assertions.CheckForNullPointerException,
ObjectDefinitionSettings(-123, "Object123", 0, 1),
()), )],
["DefineResourceNegativeID", (DefineOperationAssertion(client_define_assertions.CheckForSuccess, objectDefinition1000, resourceDefinitions),
DefineOperationAssertion(client_define_assertions.CheckForIDInvalid,
objectDefinition1000,
(ResourceDefinitionSettings(-123, "Resource123", AwaResourceType.String, AwaResourceOperations.ReadWrite, 0, 1), )), )],
], testcase_func_name=noseParameterisedTestNameGenerator)
def test(self, name, assertions):
test_assertions.callAssertions(self, assertions)
@attr("gateway_client")
class CustomObjectCreateTestCases(GWClientTestCase):
def setUp(self):
super(CustomObjectCreateTestCases, self).setUp()
self.topology.gatewayClients[0].DefineTestObjects()
@parameterized.expand([
# Create object instance test cases
["CreateSingleOptionalObjectInstance", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0", None, None, True), )],
["CreateSingleMandatoryObjectInstance", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1001/0", None, None, True), )],
["CreateMultipleObjectInstance", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1002/0", None, None, True), )],
["CreateExistingSingleObjectInstance", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0", None, None, True),
SetAssertion(set_operation_assertions.CheckForCannotCreate, "/1000/0", None, None, True), )],
["CreateExistingMultipleObjectInstance", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1002/0", None, None, True),
SetAssertion(set_operation_assertions.CheckForCannotCreate, "/1002/0", None, None, True), )],
["CreateMultipleOnSingleObjectInstance", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0", None, None, True),
SetAssertion(set_operation_assertions.CheckForCannotCreate, "/1000/1", None, None, True), )],
["CreateMultipleOnMultipleObjectInstance", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1002/0", None, None, True),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1002/1", None, None, True), )],
# Create resource test cases
["CreateStringArrayResource", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0", None, None, True),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/109", AwaResourceType.StringArray, None, False, True),
GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/109", AwaResourceType.StringArray, {1: "Sample1", 2: "Sample2", 3: "Sample3"}), )],
["CreateExistingStringArrayResource", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0", None, None, True),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/109", AwaResourceType.StringArray, None, False, True),
SetAssertion(set_operation_assertions.CheckForCannotCreate, "/1000/0/109", AwaResourceType.StringArray, None, False, True), )],
["CreateStringArrayResourceWithValue", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0", None, None, True),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/109", AwaResourceType.StringArray, {0: "Sample0", 2: "Sample2"}, False, True),
GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/109", AwaResourceType.StringArray, {0: "Sample0", 2: "Sample2"}), )],
], testcase_func_name=noseParameterisedTestNameGenerator)
def test(self, name, assertions):
test_assertions.callAssertions(self, assertions)
@attr("gateway_client")
class CustomObjectDeleteTestCases(GWClientTestCase):
def setUp(self):
super(CustomObjectDeleteTestCases, self).setUp()
self.topology.gatewayClients[0].DefineTestObjects()
self.topology.gatewayClients[0].CreateInstancesOfTestObjects()
@parameterized.expand([
# Delete resource test cases
["DeleteMandatoryResource", (DeleteAssertion(client_delete_assertions.CheckForSuccess, "/1000/0/202"), )],
["DeleteOptionalResource", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/102", AwaResourceType.String, None, False, True),
DeleteAssertion(client_delete_assertions.CheckForSuccess, "/1000/0/102"), )],
["DeleteNonExistentResource", (DeleteAssertion(client_delete_assertions.CheckForPathNotFound, "/1000/0/102"), )],
["DeleteUndefinedResource", (DeleteAssertion(client_delete_assertions.CheckForPathNotFound, "/1000/0/999"), )],
# Delete object instance success cases
["DeleteMandatoryObjectInstance", (DeleteAssertion(client_delete_assertions.CheckForSuccess, "/1001/0"), )],
["DeleteOptionalObjectInstance", (DeleteAssertion(client_delete_assertions.CheckForSuccess, "/1000/0"), )],
["DeleteNonExistentObjectInstance", (DeleteAssertion(client_delete_assertions.CheckForPathNotFound, "/1000/999"), )],
["DeleteUndefinedObject", (DeleteAssertion(client_delete_assertions.CheckForPathNotFound, "/9999"), )],
# TODO delete whole object cases
], testcase_func_name=noseParameterisedTestNameGenerator)
def test(self, name, assertions):
test_assertions.callAssertions(self, assertions)
@attr("gateway_client")
class CustomObjectTestCases(GWClientTestCase):
def setUp(self):
super(CustomObjectTestCases, self).setUp()
self.topology.gatewayClients[0].DefineTestObjects()
self.topology.gatewayClients[0].CreateInstancesOfTestObjects()
@parameterized.expand([
# Create object instance test cases
["SetGetMandatoryStringResource", (GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/202", AwaResourceType.String, "test"),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/202", AwaResourceType.String, "Imagination Technologies"),
GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/202", AwaResourceType.String, "Imagination Technologies"), )],
["SetGetMandatoryStringArrayResource", (GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/209", AwaResourceType.StringArray, {1: "Sample1", 2: "Sample2", 3: "Sample3"}),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/209", AwaResourceType.StringArray, {0: "Sample0", 20: "Sample20"}),
GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/209", AwaResourceType.StringArray, {0: "Sample0", 20: "Sample20"}), )],
["SetGetMandatoryIntegerArrayResource", (GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/210", AwaResourceType.IntegerArray, {0: 5, 1: 10, 2: 15}),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/210", AwaResourceType.IntegerArray, {3: 5, 4: 10, 5: 15}),
GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/210", AwaResourceType.IntegerArray, {3: 5, 4: 10, 5: 15}), )],
["UpdateMandatoryIntegerArrayResource", (GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/210", AwaResourceType.IntegerArray, {3: 5, 4: 10, 5: 15}),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/210", AwaResourceType.IntegerArray, {6: 20}, False, False, True),
GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/210", AwaResourceType.IntegerArray, {3: 5, 4: 10, 5: 15, 6: 20}), )],
["SetGetOptionalStringResource", (SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/102", None, None, False, True), # create resource
GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/102", AwaResourceType.String, "test"),
SetAssertion(set_operation_assertions.CheckForSuccess, "/1000/0/102", AwaResourceType.String, "Imagination Technologies"),
GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/102", AwaResourceType.String, "Imagination Technologies"), )],
# Set negative test cases
["SetIntegerValueOnUndefinedObject", (SetAssertion(set_operation_assertions.CheckForNotDefinedWhenAddingValue, "/9999/0/0", AwaResourceType.Integer, 1234), )],
["SetIntegerValueOnUndefinedObjectInstance", (SetAssertion(set_operation_assertions.CheckForNotDefinedWhenAddingValue, "/1000/9999/0", AwaResourceType.Integer, 1234), )],
["SetIntegerValueOnUndefinedResource", (SetAssertion(set_operation_assertions.CheckForNotDefinedWhenAddingValue, "/1000/0/9999", AwaResourceType.Integer, 1234), )],
["SetIntegerValueOnInvalidPath", (SetAssertion(set_operation_assertions.CheckForPathInvalid, "/@%@!#$/0/9999", AwaResourceType.Integer, 1234), )],
# Get negative test cases
["GetIntegerValueOnUndefinedObject", (GetAssertion(get_operation_assertions.CheckForPathNotFound, "/9999/0/0", AwaResourceType.Integer, 1234), )],
["GetIntegerValueOnUndefinedObjectInstance", (GetAssertion(get_operation_assertions.CheckForPathNotFound, "/1000/9999/0", AwaResourceType.Integer, 1234), )],
["GetIntegerValueOnUndefinedResource", (GetAssertion(get_operation_assertions.CheckForPathNotFound, "/1000/0/9999", AwaResourceType.Integer, 1234), )],
["GetIntegerValueOnInvalidPath", (GetAssertion(get_operation_assertions.CheckForPathInvalid, "/@%@!#$/0/9999", AwaResourceType.Integer, 1234), )],
# subscribe test cases
["SubscribeToChangeStringResource", (GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/202", AwaResourceType.String, "test"),
SubscribeAssertion(subscribe_operation_assertions.CheckForSuccess, "/1000/0/202", AwaResourceType.String, "test2", True), )],
["SubscribeToChangeStringArrayResource", (GetAssertion(get_operation_assertions.CheckForSuccess, "/1000/0/209", AwaResourceType.StringArray, {1: "Sample1", 2: "Sample2", 3: "Sample3"}),
SubscribeAssertion(subscribe_operation_assertions.CheckForSuccess, "/1000/0/209", AwaResourceType.StringArray, {0: "Sample0", 5: "Sample5"}, True, True), )],
], testcase_func_name=noseParameterisedTestNameGenerator)
def test(self, name, assertions):
test_assertions.callAssertions(self, assertions)
|
from __future__ import division
import cv2
import numpy as np
import scipy.io
import scipy.ndimage
def padding(img, shape_r=240, shape_c=320, channels=3):
img_padded = np.zeros((shape_r, shape_c, channels), dtype=np.uint8)
if channels == 1:
img_padded = np.zeros((shape_r, shape_c), dtype=np.uint8)
original_shape = img.shape
rows_rate = original_shape[0]/shape_r
cols_rate = original_shape[1]/shape_c
if rows_rate > cols_rate:
new_cols = (original_shape[1] * shape_r) // original_shape[0]
img = cv2.resize(img, (new_cols, shape_r))
if new_cols > shape_c:
new_cols = shape_c
img_padded[:, ((img_padded.shape[1] - new_cols) // 2):((img_padded.shape[1] - new_cols) // 2 + new_cols)] = img
else:
new_rows = (original_shape[0] * shape_c) // original_shape[1]
img = cv2.resize(img, (shape_c, new_rows))
if new_rows > shape_r:
new_rows = shape_r
img_padded[((img_padded.shape[0] - new_rows) // 2):((img_padded.shape[0] - new_rows) // 2 + new_rows), :] = img
return img_padded
def resize_fixation(img, rows=480, cols=640):
out = np.zeros((rows, cols))
factor_scale_r = rows / img.shape[0]
factor_scale_c = cols / img.shape[1]
coords = np.argwhere(img)
for coord in coords:
r = int(np.round(coord[0]*factor_scale_r))
c = int(np.round(coord[1]*factor_scale_c))
if r == rows:
r -= 1
if c == cols:
c -= 1
out[r, c] = 1
return out
def padding_fixation(img, shape_r=480, shape_c=640):
img_padded = np.zeros((shape_r, shape_c))
original_shape = img.shape
rows_rate = original_shape[0]/shape_r
cols_rate = original_shape[1]/shape_c
if rows_rate > cols_rate:
new_cols = (original_shape[1] * shape_r) // original_shape[0]
img = resize_fixation(img, rows=shape_r, cols=new_cols)
if new_cols > shape_c:
new_cols = shape_c
img_padded[:, ((img_padded.shape[1] - new_cols) // 2):((img_padded.shape[1] - new_cols) // 2 + new_cols)] = img
else:
new_rows = (original_shape[0] * shape_c) // original_shape[1]
img = resize_fixation(img, rows=new_rows, cols=shape_c)
if new_rows > shape_r:
new_rows = shape_r
img_padded[((img_padded.shape[0] - new_rows) // 2):((img_padded.shape[0] - new_rows) // 2 + new_rows), :] = img
return img_padded
def preprocess_images(paths, shape_r, shape_c):
ims = np.zeros((len(paths), shape_r, shape_c, 3))
for i, path in enumerate(paths):
original_image = cv2.imread(path)
padded_image = padding(original_image, shape_r, shape_c, 3)
ims[i] = padded_image
ims[:, :, :, 0] -= 103.939
ims[:, :, :, 1] -= 116.779
ims[:, :, :, 2] -= 123.68
ims = ims.transpose((0, 3, 1, 2))
return ims
def preprocess_maps(paths, shape_r, shape_c):
ims = np.zeros((len(paths), 1, shape_r, shape_c))
for i, path in enumerate(paths):
original_map = cv2.imread(path, 0)
padded_map = padding(original_map, shape_r, shape_c, 1)
ims[i, 0] = padded_map.astype(np.float32)
ims[i, 0] /= 255.0
return ims
def preprocess_fixmaps(paths, shape_r, shape_c):
ims = np.zeros((len(paths), 1, shape_r, shape_c))
for i, path in enumerate(paths):
fix_map = scipy.io.loadmat(path)["I"]
ims[i, 0] = padding_fixation(fix_map, shape_r=shape_r, shape_c=shape_c)
return ims
def postprocess_predictions(pred, shape_r, shape_c):
predictions_shape = pred.shape
rows_rate = shape_r / predictions_shape[0]
cols_rate = shape_c / predictions_shape[1]
pred = pred / np.max(pred) * 255
if rows_rate > cols_rate:
new_cols = (predictions_shape[1] * shape_r) // predictions_shape[0]
pred = cv2.resize(pred, (new_cols, shape_r))
img = pred[:, ((pred.shape[1] - shape_c) // 2):((pred.shape[1] - shape_c) // 2 + shape_c)]
else:
new_rows = (predictions_shape[0] * shape_c) // predictions_shape[1]
pred = cv2.resize(pred, (shape_c, new_rows))
img = pred[((pred.shape[0] - shape_r) // 2):((pred.shape[0] - shape_r) // 2 + shape_r), :]
img = scipy.ndimage.filters.gaussian_filter(img, sigma=7)
img = img / np.max(img) * 255
return img
|
# Generated by Django 3.0.2 on 2020-01-08 18:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projectm', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='completed',
field=models.BooleanField(default=False, help_text='Task completion status'),
),
]
|
'''
Inference engine for sparse image pair correspondences
'''
import time
import random
import numpy as np
import torch
from COTR.inference.inference_helper import THRESHOLD_SPARSE, THRESHOLD_AREA, cotr_flow, cotr_corr_base
from COTR.inference.refinement_task import RefinementTask
from COTR.utils import debug_utils, utils
from COTR.cameras.capture import stretch_to_square_np
class SparseEngine():
def __init__(self, model, batch_size, mode='stretching'):
assert mode in ['stretching', 'tile']
self.model = model
self.batch_size = batch_size
self.total_tasks = 0
self.mode = mode
def form_batch(self, tasks, zoom=None):
counter = 0
task_ref = []
img_batch = []
query_batch = []
for t in tasks:
if t.status == 'unfinished' and t.submitted == False:
if zoom is not None and t.cur_zoom != zoom:
continue
task_ref.append(t)
img, query = t.get_task()
img_batch.append(img)
query_batch.append(query)
counter += 1
if counter >= self.batch_size:
break
if len(task_ref) == 0:
return [], [], []
img_batch = torch.stack(img_batch)
query_batch = torch.stack(query_batch)
return task_ref, img_batch, query_batch
def infer_batch(self, img_batch, query_batch):
self.total_tasks += img_batch.shape[0]
device = next(self.model.parameters()).device
img_batch = img_batch.to(device)
query_batch = query_batch.to(device)
out = self.model(img_batch, query_batch)['pred_corrs'].clone().detach()
out = out.cpu().numpy()[:, 0, :]
if utils.has_nan(out):
raise ValueError('NaN in prediction')
return out
def conclude_tasks(self, tasks, return_idx=False, force=False,
offset_x_from=0,
offset_y_from=0,
offset_x_to=0,
offset_y_to=0,
img_a_shape=None,
img_b_shape=None):
corrs = []
idx = []
for t in tasks:
if t.status == 'finished':
out = t.conclude(force)
if out is not None:
corrs.append(np.array(out))
idx.append(t.identifier)
corrs = np.array(corrs)
idx = np.array(idx)
if corrs.shape[0] > 0:
corrs -= np.array([offset_x_from, offset_y_from, offset_x_to, offset_y_to])
if img_a_shape is not None and img_b_shape is not None and not force:
border_mask = np.prod(corrs < np.concatenate([img_a_shape[::-1], img_b_shape[::-1]]), axis=1)
border_mask = (np.prod(corrs > np.array([0, 0, 0, 0]), axis=1) * border_mask).astype(np.bool)
corrs = corrs[border_mask]
idx = idx[border_mask]
if return_idx:
return corrs, idx
return corrs
def num_finished_tasks(self, tasks):
counter = 0
for t in tasks:
if t.status == 'finished':
counter += 1
return counter
def num_good_tasks(self, tasks):
counter = 0
for t in tasks:
if t.result == 'good':
counter += 1
return counter
def gen_tasks_w_known_scale(self, img_a, img_b, queries_a, areas, zoom_ins=[1.0], converge_iters=1, max_corrs=1000):
assert self.mode == 'tile'
corr_a = cotr_corr_base(self.model, img_a, img_b, queries_a)
tasks = []
for c in corr_a:
tasks.append(RefinementTask(img_a, img_b, c[:2], c[2:], areas[0], areas[1], converge_iters, zoom_ins))
return tasks
def gen_tasks(self, img_a, img_b, zoom_ins=[1.0], converge_iters=1, max_corrs=1000, queries_a=None, force=False, areas=None):
if areas is not None:
assert queries_a is not None
assert force == True
assert max_corrs >= queries_a.shape[0]
return self.gen_tasks_w_known_scale(img_a, img_b, queries_a, areas, zoom_ins=zoom_ins, converge_iters=converge_iters, max_corrs=max_corrs)
if self.mode == 'stretching':
if img_a.shape[0] != img_a.shape[1] or img_b.shape[0] != img_b.shape[1]:
img_a_shape = img_a.shape
img_b_shape = img_b.shape
img_a_sq = stretch_to_square_np(img_a.copy())
img_b_sq = stretch_to_square_np(img_b.copy())
corr_a, con_a, resample_a, corr_b, con_b, resample_b = cotr_flow(self.model,
img_a_sq,
img_b_sq
)
corr_a = utils.float_image_resize(corr_a, img_a_shape[:2])
con_a = utils.float_image_resize(con_a, img_a_shape[:2])
resample_a = utils.float_image_resize(resample_a, img_a_shape[:2])
corr_b = utils.float_image_resize(corr_b, img_b_shape[:2])
con_b = utils.float_image_resize(con_b, img_b_shape[:2])
resample_b = utils.float_image_resize(resample_b, img_b_shape[:2])
else:
corr_a, con_a, resample_a, corr_b, con_b, resample_b = cotr_flow(self.model,
img_a,
img_b
)
elif self.mode == 'tile':
corr_a, con_a, resample_a, corr_b, con_b, resample_b = cotr_flow(self.model,
img_a,
img_b
)
else:
raise ValueError(f'unsupported mode: {self.mode}')
mask_a = con_a < THRESHOLD_SPARSE
mask_b = con_b < THRESHOLD_SPARSE
area_a = (con_a < THRESHOLD_AREA).sum() / mask_a.size
area_b = (con_b < THRESHOLD_AREA).sum() / mask_b.size
tasks = []
if queries_a is None:
index_a = np.where(mask_a)
index_a = np.array(index_a).T
index_a = index_a[np.random.choice(len(index_a), min(max_corrs, len(index_a)))]
index_b = np.where(mask_b)
index_b = np.array(index_b).T
index_b = index_b[np.random.choice(len(index_b), min(max_corrs, len(index_b)))]
for pos in index_a:
loc_from = pos[::-1]
loc_to = (corr_a[tuple(np.floor(pos).astype('int'))].copy() * 0.5 + 0.5) * img_b.shape[:2][::-1]
tasks.append(RefinementTask(img_a, img_b, loc_from, loc_to, area_a, area_b, converge_iters, zoom_ins))
for pos in index_b:
'''
trick: suppose to fix the query point location(loc_from),
but here it fixes the first guess(loc_to).
'''
loc_from = pos[::-1]
loc_to = (corr_b[tuple(np.floor(pos).astype('int'))].copy() * 0.5 + 0.5) * img_a.shape[:2][::-1]
tasks.append(RefinementTask(img_a, img_b, loc_to, loc_from, area_a, area_b, converge_iters, zoom_ins))
else:
if force:
for i, loc_from in enumerate(queries_a):
pos = loc_from[::-1]
pos = np.array([np.clip(pos[0], 0, corr_a.shape[0] - 1), np.clip(pos[1], 0, corr_a.shape[1] - 1)], dtype=np.int)
loc_to = (corr_a[tuple(pos)].copy() * 0.5 + 0.5) * img_b.shape[:2][::-1]
tasks.append(RefinementTask(img_a, img_b, loc_from, loc_to, area_a, area_b, converge_iters, zoom_ins, identifier=i))
else:
for i, loc_from in enumerate(queries_a):
pos = loc_from[::-1]
if (pos > np.array(img_a.shape[:2]) - 1).any() or (pos < 0).any():
continue
if mask_a[tuple(np.floor(pos).astype('int'))]:
loc_to = (corr_a[tuple(np.floor(pos).astype('int'))].copy() * 0.5 + 0.5) * img_b.shape[:2][::-1]
tasks.append(RefinementTask(img_a, img_b, loc_from, loc_to, area_a, area_b, converge_iters, zoom_ins, identifier=i))
if len(tasks) < max_corrs:
extra = max_corrs - len(tasks)
counter = 0
for i, loc_from in enumerate(queries_a):
if counter >= extra:
break
pos = loc_from[::-1]
if (pos > np.array(img_a.shape[:2]) - 1).any() or (pos < 0).any():
continue
if mask_a[tuple(np.floor(pos).astype('int'))] == False:
loc_to = (corr_a[tuple(np.floor(pos).astype('int'))].copy() * 0.5 + 0.5) * img_b.shape[:2][::-1]
tasks.append(RefinementTask(img_a, img_b, loc_from, loc_to, area_a, area_b, converge_iters, zoom_ins, identifier=i))
counter += 1
return tasks
def cotr_corr_multiscale(self, img_a, img_b, zoom_ins=[1.0], converge_iters=1, max_corrs=1000, queries_a=None, return_idx=False, force=False, return_tasks_only=False, areas=None):
'''
currently only support fixed queries_a
'''
img_a = img_a.copy()
img_b = img_b.copy()
img_a_shape = img_a.shape[:2]
img_b_shape = img_b.shape[:2]
if queries_a is not None:
queries_a = queries_a.copy()
tasks = self.gen_tasks(img_a, img_b, zoom_ins, converge_iters, max_corrs, queries_a, force, areas)
while True:
num_g = self.num_good_tasks(tasks)
print(f'{num_g} / {max_corrs} | {self.num_finished_tasks(tasks)} / {len(tasks)}')
task_ref, img_batch, query_batch = self.form_batch(tasks)
if len(task_ref) == 0:
break
if num_g >= max_corrs:
break
out = self.infer_batch(img_batch, query_batch)
for t, o in zip(task_ref, out):
t.step(o)
if return_tasks_only:
return tasks
if return_idx:
corrs, idx = self.conclude_tasks(tasks, return_idx=True, force=force,
img_a_shape=img_a_shape,
img_b_shape=img_b_shape,)
corrs = corrs[:max_corrs]
idx = idx[:max_corrs]
return corrs, idx
else:
corrs = self.conclude_tasks(tasks, force=force,
img_a_shape=img_a_shape,
img_b_shape=img_b_shape,)
corrs = corrs[:max_corrs]
return corrs
def cotr_corr_multiscale_with_cycle_consistency(self, img_a, img_b, zoom_ins=[1.0], converge_iters=1, max_corrs=1000, queries_a=None, return_idx=False, return_cycle_error=False, force=False):
EXTRACTION_RATE = 0.3
temp_max_corrs = int(max_corrs / EXTRACTION_RATE)
if queries_a is not None:
temp_max_corrs = min(temp_max_corrs, queries_a.shape[0])
queries_a = queries_a.copy()
corr_f, idx_f = self.cotr_corr_multiscale(img_a.copy(), img_b.copy(),
zoom_ins=zoom_ins,
converge_iters=converge_iters,
max_corrs=temp_max_corrs,
queries_a=queries_a,
return_idx=True,
force=force)
assert corr_f.shape[0] > 0
corr_b, idx_b = self.cotr_corr_multiscale(img_b.copy(), img_a.copy(),
zoom_ins=zoom_ins,
converge_iters=converge_iters,
max_corrs=corr_f.shape[0],
queries_a=corr_f[:, 2:].copy(),
return_idx=True,
force=force)
assert corr_b.shape[0] > 0
cycle_errors = np.linalg.norm(corr_f[idx_b][:, :2] - corr_b[:, 2:], axis=1)
order = np.argsort(cycle_errors)
out = [corr_f[idx_b][order][:max_corrs]]
if return_idx:
out.append(idx_f[idx_b][order][:max_corrs])
if return_cycle_error:
out.append(cycle_errors[order][:max_corrs])
if len(out) == 1:
out = out[0]
return out
class FasterSparseEngine(SparseEngine):
'''
search and merge nearby tasks to accelerate inference speed.
It will make spatial accuracy slightly worse.
'''
def __init__(self, model, batch_size, mode='stretching', max_load=256):
super().__init__(model, batch_size, mode=mode)
self.max_load = max_load
def infer_batch_grouped(self, img_batch, query_batch):
device = next(self.model.parameters()).device
img_batch = img_batch.to(device)
query_batch = query_batch.to(device)
out = self.model(img_batch, query_batch)['pred_corrs'].clone().detach().cpu().numpy()
return out
def get_tasks_map(self, zoom, tasks):
maps = []
ids = []
for i, t in enumerate(tasks):
if t.status == 'unfinished' and t.submitted == False and t.cur_zoom == zoom:
t_info = t.peek()
point = np.concatenate([t_info['loc_from'], t_info['loc_to']])
maps.append(point)
ids.append(i)
return np.array(maps), np.array(ids)
def form_squad(self, zoom, pilot, pilot_id, tasks, tasks_map, task_ids, bookkeeping):
assert pilot.status == 'unfinished' and pilot.submitted == False and pilot.cur_zoom == zoom
SAFE_AREA = 0.5
pilot_info = pilot.peek()
pilot_from_center_x = pilot_info['patch_from'].x + pilot_info['patch_from'].w/2
pilot_from_center_y = pilot_info['patch_from'].y + pilot_info['patch_from'].h/2
pilot_from_left = pilot_from_center_x - pilot_info['patch_from'].w/2 * SAFE_AREA
pilot_from_right = pilot_from_center_x + pilot_info['patch_from'].w/2 * SAFE_AREA
pilot_from_upper = pilot_from_center_y - pilot_info['patch_from'].h/2 * SAFE_AREA
pilot_from_lower = pilot_from_center_y + pilot_info['patch_from'].h/2 * SAFE_AREA
pilot_to_center_x = pilot_info['patch_to'].x + pilot_info['patch_to'].w/2
pilot_to_center_y = pilot_info['patch_to'].y + pilot_info['patch_to'].h/2
pilot_to_left = pilot_to_center_x - pilot_info['patch_to'].w/2 * SAFE_AREA
pilot_to_right = pilot_to_center_x + pilot_info['patch_to'].w/2 * SAFE_AREA
pilot_to_upper = pilot_to_center_y - pilot_info['patch_to'].h/2 * SAFE_AREA
pilot_to_lower = pilot_to_center_y + pilot_info['patch_to'].h/2 * SAFE_AREA
img, query = pilot.get_task()
assert pilot.submitted == True
members = [pilot]
queries = [query]
bookkeeping[pilot_id] = False
loads = np.where(((tasks_map[:, 0] > pilot_from_left) &
(tasks_map[:, 0] < pilot_from_right) &
(tasks_map[:, 1] > pilot_from_upper) &
(tasks_map[:, 1] < pilot_from_lower) &
(tasks_map[:, 2] > pilot_to_left) &
(tasks_map[:, 2] < pilot_to_right) &
(tasks_map[:, 3] > pilot_to_upper) &
(tasks_map[:, 3] < pilot_to_lower)) *
bookkeeping)[0][: self.max_load]
for ti in task_ids[loads]:
t = tasks[ti]
assert t.status == 'unfinished' and t.submitted == False and t.cur_zoom == zoom
_, query = t.get_task_pilot(pilot)
members.append(t)
queries.append(query)
queries = torch.stack(queries, axis=1)
bookkeeping[loads] = False
return members, img, queries, bookkeeping
def form_grouped_batch(self, zoom, tasks):
counter = 0
task_ref = []
img_batch = []
query_batch = []
tasks_map, task_ids = self.get_tasks_map(zoom, tasks)
shuffle = np.random.permutation(tasks_map.shape[0])
tasks_map = np.take(tasks_map, shuffle, axis=0)
task_ids = np.take(task_ids, shuffle, axis=0)
bookkeeping = np.ones_like(task_ids).astype(bool)
for i, ti in enumerate(task_ids):
t = tasks[ti]
if t.status == 'unfinished' and t.submitted == False and t.cur_zoom == zoom:
members, img, queries, bookkeeping = self.form_squad(zoom, t, i, tasks, tasks_map, task_ids, bookkeeping)
task_ref.append(members)
img_batch.append(img)
query_batch.append(queries)
counter += 1
if counter >= self.batch_size:
break
if len(task_ref) == 0:
return [], [], []
max_len = max([q.shape[1] for q in query_batch])
for i in range(len(query_batch)):
q = query_batch[i]
query_batch[i] = torch.cat([q, torch.zeros([1, max_len - q.shape[1], 2])], axis=1)
img_batch = torch.stack(img_batch)
query_batch = torch.cat(query_batch)
return task_ref, img_batch, query_batch
def cotr_corr_multiscale(self, img_a, img_b, zoom_ins=[1.0], converge_iters=1, max_corrs=1000, queries_a=None, return_idx=False, force=False, return_tasks_only=False, areas=None):
'''
currently only support fixed queries_a
'''
img_a = img_a.copy()
img_b = img_b.copy()
img_a_shape = img_a.shape[:2]
img_b_shape = img_b.shape[:2]
if queries_a is not None:
queries_a = queries_a.copy()
tasks = self.gen_tasks(img_a, img_b, zoom_ins, converge_iters, max_corrs, queries_a, force, areas)
for zm in zoom_ins:
print(f'======= Zoom: {zm} ======')
while True:
num_g = self.num_good_tasks(tasks)
task_ref, img_batch, query_batch = self.form_grouped_batch(zm, tasks)
if len(task_ref) == 0:
break
if num_g >= max_corrs:
break
out = self.infer_batch_grouped(img_batch, query_batch)
num_steps = 0
for i, temp in enumerate(task_ref):
for j, t in enumerate(temp):
t.step(out[i, j])
num_steps += 1
print(f'solved {num_steps} sub-tasks in one invocation with {img_batch.shape[0]} image pairs')
if num_steps <= self.batch_size:
break
# Rollback to default inference, because of too few valid tasks can be grouped together.
while True:
num_g = self.num_good_tasks(tasks)
print(f'{num_g} / {max_corrs} | {self.num_finished_tasks(tasks)} / {len(tasks)}')
task_ref, img_batch, query_batch = self.form_batch(tasks, zm)
if len(task_ref) == 0:
break
if num_g >= max_corrs:
break
out = self.infer_batch(img_batch, query_batch)
for t, o in zip(task_ref, out):
t.step(o)
if return_tasks_only:
return tasks
if return_idx:
corrs, idx = self.conclude_tasks(tasks, return_idx=True, force=force,
img_a_shape=img_a_shape,
img_b_shape=img_b_shape,)
corrs = corrs[:max_corrs]
idx = idx[:max_corrs]
return corrs, idx
else:
corrs = self.conclude_tasks(tasks, force=force,
img_a_shape=img_a_shape,
img_b_shape=img_b_shape,)
corrs = corrs[:max_corrs]
return corrs
|
#!/usr/bin/python3
# run this on bsd to create keys in dconf
import os
from gi.repository import Gio
schema_source = Gio.SettingsSchemaSource.new_from_directory(
# get default schema
os.path.expanduser("../data"),
Gio.SettingsSchemaSource.get_default(),
False,
)
schema = schema_source.lookup('com.github.darkoverlordofdata.catlock', False)
settings = Gio.Settings.new_full(schema, None, None)
settings.set_string('calendar-path',".local/share/catlock/us_en.ics")
settings.set_string('pin',"999999")
|
"""Tests for module state accessors in the protocol engine state store."""
import pytest
from pytest_lazyfixture import lazy_fixture # type: ignore[import]
from decoy import Decoy
from contextlib import nullcontext
from typing import ContextManager, Dict, NamedTuple, Optional, Type, TypeVar, Union
from opentrons.hardware_control.modules import AbstractModule, MagDeck, TempDeck
from opentrons.types import DeckSlotName
from opentrons.protocol_engine import errors
from opentrons.protocol_engine.types import (
LoadedModule,
DeckSlotLocation,
ModuleDefinition,
ModuleModel,
)
from opentrons.protocol_engine.state.modules import (
ModuleView,
ModuleState,
HardwareModule,
)
def make_module_view(
slot_by_module_id: Optional[Dict[str, DeckSlotName]] = None,
hardware_module_by_slot: Optional[Dict[DeckSlotName, HardwareModule]] = None,
) -> ModuleView:
"""Get a module view test subject with the specified state."""
state = ModuleState(
slot_by_module_id=slot_by_module_id or {},
hardware_module_by_slot=hardware_module_by_slot or {},
)
return ModuleView(state=state)
HardwareModuleT = TypeVar("HardwareModuleT", bound=AbstractModule)
def make_hardware_module(
decoy: Decoy, type: Type[HardwareModuleT], serial_number: str
) -> HardwareModuleT:
"""Return a mock hardware module with the specified type and serial number.
Ideally, we wouldn't use mocks for this, since our subject uses these objects
as pure input data, and doesn't call anything behavioral on them.
But it's prohibitively difficult to instantiate these objects in tests otherwise.
"""
hardware_module = decoy.mock(cls=type)
# "type: ignore" to override what's normally a read-only property.
hardware_module.device_info = {"serial": serial_number} # type: ignore[misc]
return hardware_module
def test_initial_module_data_by_id() -> None:
"""It should raise if module ID doesn't exist."""
subject = make_module_view()
with pytest.raises(errors.ModuleDoesNotExistError):
subject.get("helloWorld")
def test_get_missing_hardware() -> None:
"""It should raise if no loaded hardware."""
subject = make_module_view(slot_by_module_id={"module-id": DeckSlotName.SLOT_1})
with pytest.raises(errors.ModuleDoesNotExistError):
subject.get("module-id")
def test_get_module_data(tempdeck_v1_def: ModuleDefinition) -> None:
"""It should get module data from state by ID."""
subject = make_module_view(
slot_by_module_id={"module-id": DeckSlotName.SLOT_1},
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-number",
definition=tempdeck_v1_def,
)
},
)
assert subject.get("module-id") == LoadedModule(
id="module-id",
model=ModuleModel.TEMPERATURE_MODULE_V1,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
serialNumber="serial-number",
definition=tempdeck_v1_def,
)
def test_get_all_modules(
tempdeck_v1_def: ModuleDefinition,
tempdeck_v2_def: ModuleDefinition,
) -> None:
"""It should return all modules in state."""
subject = make_module_view(
slot_by_module_id={
"module-1": DeckSlotName.SLOT_1,
"module-2": DeckSlotName.SLOT_2,
},
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-1",
definition=tempdeck_v1_def,
),
DeckSlotName.SLOT_2: HardwareModule(
serial_number="serial-2",
definition=tempdeck_v2_def,
),
},
)
assert subject.get_all() == [
LoadedModule(
id="module-1",
serialNumber="serial-1",
model=ModuleModel.TEMPERATURE_MODULE_V1,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
definition=tempdeck_v1_def,
),
LoadedModule(
id="module-2",
serialNumber="serial-2",
model=ModuleModel.TEMPERATURE_MODULE_V2,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_2),
definition=tempdeck_v2_def,
),
]
def test_get_properties_by_id(
tempdeck_v1_def: ModuleDefinition,
tempdeck_v2_def: ModuleDefinition,
) -> None:
"""It should return a loaded module's properties by ID."""
subject = make_module_view(
slot_by_module_id={
"module-1": DeckSlotName.SLOT_1,
"module-2": DeckSlotName.SLOT_2,
},
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-1",
definition=tempdeck_v1_def,
),
DeckSlotName.SLOT_2: HardwareModule(
serial_number="serial-2",
definition=tempdeck_v2_def,
),
},
)
assert subject.get_definition("module-1") == tempdeck_v1_def
assert subject.get_dimensions("module-1") == tempdeck_v1_def.dimensions
assert subject.get_model("module-1") == ModuleModel.TEMPERATURE_MODULE_V1
assert subject.get_serial_number("module-1") == "serial-1"
assert subject.get_location("module-1") == DeckSlotLocation(
slotName=DeckSlotName.SLOT_1
)
assert subject.get_definition("module-2") == tempdeck_v2_def
assert subject.get_dimensions("module-2") == tempdeck_v2_def.dimensions
assert subject.get_model("module-2") == ModuleModel.TEMPERATURE_MODULE_V2
assert subject.get_serial_number("module-2") == "serial-2"
assert subject.get_location("module-2") == DeckSlotLocation(
slotName=DeckSlotName.SLOT_2
)
def test_get_magnet_home_to_base_offset() -> None:
"""It should return the model-specific offset to bottom."""
subject = make_module_view()
assert (
subject.get_magnet_home_to_base_offset(
module_model=ModuleModel.MAGNETIC_MODULE_V1
)
== 2.5
)
assert (
subject.get_magnet_home_to_base_offset(
module_model=ModuleModel.MAGNETIC_MODULE_V2
)
== 2.5
)
@pytest.mark.parametrize(
"module_model", [ModuleModel.MAGNETIC_MODULE_V1, ModuleModel.MAGNETIC_MODULE_V2]
)
def test_calculate_magnet_height(module_model: ModuleModel) -> None:
"""It should use true millimeters as hardware units."""
subject = make_module_view()
assert (
subject.calculate_magnet_height(
module_model=module_model,
height_from_base=100,
)
== 100
)
# todo(mm, 2022-02-28):
# It's unclear whether this expected result should actually be the same
# between GEN1 and GEN2.
# The GEN1 homing backoff distance looks accidentally halved, for the same reason
# that its heights are halved. If the limit switch hardware is the same for both
# modules, we'd expect the backoff difference to cause a difference in the
# height_from_home test, even though we're measuring everything in true mm.
# https://github.com/Opentrons/opentrons/issues/9585
assert (
subject.calculate_magnet_height(
module_model=module_model,
height_from_home=100,
)
== 97.5
)
assert (
subject.calculate_magnet_height(
module_model=module_model,
labware_default_height=100,
offset_from_labware_default=10.0,
)
== 110
)
class _CalculateMagnetHardwareHeightTestParams(NamedTuple):
model: ModuleModel
mm_from_base: float
expected_result: Optional[float]
expected_exception_type: Union[Type[Exception], None]
@pytest.mark.parametrize(
"model, mm_from_base, expected_result, expected_exception_type",
[
# Happy cases:
_CalculateMagnetHardwareHeightTestParams(
model=ModuleModel.MAGNETIC_MODULE_V1,
mm_from_base=10,
# TODO(mm, 2022-03-09): It's unclear if this expected result is correct.
# https://github.com/Opentrons/opentrons/issues/9585
expected_result=25,
expected_exception_type=None,
),
_CalculateMagnetHardwareHeightTestParams(
model=ModuleModel.MAGNETIC_MODULE_V2,
mm_from_base=10,
expected_result=12.5,
expected_exception_type=None,
),
# Boundary conditions:
#
# TODO(mm, 2022-03-09):
# In Python >=3.9, improve precision with math.nextafter().
# Also consider relying on shared constants instead of hard-coding bounds.
#
# TODO(mm, 2022-03-09): It's unclear if the bounds used for V1 modules
# are physically correct. https://github.com/Opentrons/opentrons/issues/9585
_CalculateMagnetHardwareHeightTestParams( # V1 barely too low.
model=ModuleModel.MAGNETIC_MODULE_V1,
mm_from_base=-2.51,
expected_result=None,
expected_exception_type=errors.EngageHeightOutOfRangeError,
),
_CalculateMagnetHardwareHeightTestParams( # V1 lowest allowed.
model=ModuleModel.MAGNETIC_MODULE_V1,
mm_from_base=-2.5,
expected_result=0,
expected_exception_type=None,
),
_CalculateMagnetHardwareHeightTestParams( # V1 highest allowed.
model=ModuleModel.MAGNETIC_MODULE_V1,
mm_from_base=20,
expected_result=45,
expected_exception_type=None,
),
_CalculateMagnetHardwareHeightTestParams( # V1 barely too high.
model=ModuleModel.MAGNETIC_MODULE_V1,
mm_from_base=20.01,
expected_result=None,
expected_exception_type=errors.EngageHeightOutOfRangeError,
),
_CalculateMagnetHardwareHeightTestParams( # V2 barely too low.
model=ModuleModel.MAGNETIC_MODULE_V2,
mm_from_base=-2.51,
expected_result=None,
expected_exception_type=errors.EngageHeightOutOfRangeError,
),
_CalculateMagnetHardwareHeightTestParams( # V2 lowest allowed.
model=ModuleModel.MAGNETIC_MODULE_V2,
mm_from_base=-2.5,
expected_result=0,
expected_exception_type=None,
),
_CalculateMagnetHardwareHeightTestParams( # V2 highest allowed.
model=ModuleModel.MAGNETIC_MODULE_V2,
mm_from_base=22.5,
expected_result=25,
expected_exception_type=None,
),
_CalculateMagnetHardwareHeightTestParams( # V2 barely too high.
model=ModuleModel.MAGNETIC_MODULE_V2,
mm_from_base=22.51,
expected_result=None,
expected_exception_type=errors.EngageHeightOutOfRangeError,
),
# Bad model:
_CalculateMagnetHardwareHeightTestParams(
model=ModuleModel.TEMPERATURE_MODULE_V1,
mm_from_base=0,
expected_result=None,
expected_exception_type=errors.WrongModuleTypeError,
),
],
)
def test_calculate_magnet_hardware_height(
model: ModuleModel,
mm_from_base: float,
expected_result: float,
expected_exception_type: Union[Type[Exception], None],
) -> None:
"""It should return the expected height or raise the expected exception."""
subject = make_module_view()
context: ContextManager[None] = (
# Not sure why mypy has trouble with this.
nullcontext() # type: ignore[assignment]
if expected_exception_type is None
else pytest.raises(expected_exception_type)
)
with context:
result = subject.calculate_magnet_hardware_height(
magnetic_module_model=model, mm_from_base=mm_from_base
)
assert result == expected_result
@pytest.mark.parametrize(
argnames=["from_slot", "to_slot", "should_dodge"],
argvalues=[
(DeckSlotName.SLOT_1, DeckSlotName.FIXED_TRASH, True),
(DeckSlotName.FIXED_TRASH, DeckSlotName.SLOT_1, True),
(DeckSlotName.SLOT_4, DeckSlotName.FIXED_TRASH, True),
(DeckSlotName.FIXED_TRASH, DeckSlotName.SLOT_4, True),
(DeckSlotName.SLOT_4, DeckSlotName.SLOT_9, True),
(DeckSlotName.SLOT_9, DeckSlotName.SLOT_4, True),
(DeckSlotName.SLOT_4, DeckSlotName.SLOT_8, True),
(DeckSlotName.SLOT_8, DeckSlotName.SLOT_4, True),
(DeckSlotName.SLOT_1, DeckSlotName.SLOT_8, True),
(DeckSlotName.SLOT_8, DeckSlotName.SLOT_1, True),
(DeckSlotName.SLOT_4, DeckSlotName.SLOT_11, True),
(DeckSlotName.SLOT_11, DeckSlotName.SLOT_4, True),
(DeckSlotName.SLOT_1, DeckSlotName.SLOT_11, True),
(DeckSlotName.SLOT_11, DeckSlotName.SLOT_1, True),
(DeckSlotName.SLOT_2, DeckSlotName.SLOT_4, False),
],
)
def test_thermocycler_dodging(
thermocycler_v1_def: ModuleDefinition,
from_slot: DeckSlotName,
to_slot: DeckSlotName,
should_dodge: bool,
) -> None:
"""It should specify if thermocycler dodging is needed.
It should return True if thermocycler exists and movement is between bad pairs of
slot locations.
"""
subject = make_module_view(
slot_by_module_id={"module-id": DeckSlotName.SLOT_1},
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-number",
definition=thermocycler_v1_def,
)
},
)
assert (
subject.should_dodge_thermocycler(from_slot=from_slot, to_slot=to_slot)
is should_dodge
)
def test_find_loaded_hardware_module(
decoy: Decoy, magdeck_v1_def: ModuleDefinition
) -> None:
"""It should return the matching hardware module."""
matching = make_hardware_module(
decoy=decoy, type=MagDeck, serial_number="serial-matching"
)
non_matching = make_hardware_module(
decoy=decoy, type=MagDeck, serial_number="serial-non-matching"
)
another_non_matching = make_hardware_module(
decoy=decoy, type=TempDeck, serial_number="serial-another-non-matching"
)
attached = [non_matching, matching, another_non_matching]
subject = make_module_view(
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-non-matching",
definition=magdeck_v1_def,
),
DeckSlotName.SLOT_2: HardwareModule(
serial_number="serial-matching",
definition=magdeck_v1_def,
),
DeckSlotName.SLOT_3: HardwareModule(
serial_number="serial-another-non-matching",
definition=magdeck_v1_def,
),
},
slot_by_module_id={
"id-non-matching": DeckSlotName.SLOT_1,
"id-matching": DeckSlotName.SLOT_2,
"id-another-non-matching": DeckSlotName.SLOT_3,
},
)
result = subject.find_loaded_hardware_module(
module_id="id-matching",
attached_modules=attached,
expected_type=MagDeck,
)
assert result == matching
def test_find_loaded_hardware_module_raises_if_no_match_loaded(
decoy: Decoy,
) -> None:
"""It should raise if the ID doesn't point to a loaded module."""
subject = make_module_view(
hardware_module_by_slot={},
slot_by_module_id={},
)
with pytest.raises(errors.ModuleDoesNotExistError):
subject.find_loaded_hardware_module(
module_id="module-id",
attached_modules=[],
expected_type=MagDeck,
)
def test_find_loaded_hardware_module_raises_if_match_not_attached(
decoy: Decoy, magdeck_v1_def: ModuleDefinition
) -> None:
"""It should raise if a match was loaded but is not found in the attached list."""
subject = make_module_view(
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-matching",
definition=magdeck_v1_def,
),
},
slot_by_module_id={
"id-matching": DeckSlotName.SLOT_1,
},
)
with pytest.raises(errors.ModuleNotAttachedError):
subject.find_loaded_hardware_module(
module_id="id-matching",
attached_modules=[],
expected_type=MagDeck,
)
def test_find_loaded_hardware_module_raises_if_match_is_wrong_type(
decoy: Decoy, magdeck_v1_def: ModuleDefinition
) -> None:
"""It should raise if a match was found but is of an unexpected type."""
matching = make_hardware_module(
decoy=decoy, type=MagDeck, serial_number="serial-matching"
)
subject = make_module_view(
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-matching",
definition=magdeck_v1_def,
),
},
slot_by_module_id={
"id-matching": DeckSlotName.SLOT_1,
},
)
with pytest.raises(errors.WrongModuleTypeError):
subject.find_loaded_hardware_module(
module_id="id-matching",
attached_modules=[matching],
expected_type=TempDeck, # Will definitely not match.
)
def test_select_hardware_module_to_load_rejects_missing() -> None:
"""It should raise if the correct module isn't attached."""
subject = make_module_view()
with pytest.raises(errors.ModuleNotAttachedError):
subject.select_hardware_module_to_load(
model=ModuleModel.TEMPERATURE_MODULE_V1,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
attached_modules=[],
)
@pytest.mark.parametrize(
argnames=["requested_model", "attached_definition"],
argvalues=[
(ModuleModel.TEMPERATURE_MODULE_V1, lazy_fixture("tempdeck_v1_def")),
(ModuleModel.TEMPERATURE_MODULE_V2, lazy_fixture("tempdeck_v2_def")),
(ModuleModel.TEMPERATURE_MODULE_V1, lazy_fixture("tempdeck_v2_def")),
(ModuleModel.TEMPERATURE_MODULE_V2, lazy_fixture("tempdeck_v1_def")),
(ModuleModel.MAGNETIC_MODULE_V1, lazy_fixture("magdeck_v1_def")),
(ModuleModel.MAGNETIC_MODULE_V2, lazy_fixture("magdeck_v2_def")),
(ModuleModel.THERMOCYCLER_MODULE_V1, lazy_fixture("thermocycler_v1_def")),
],
)
def test_select_hardware_module_to_load(
requested_model: ModuleModel,
attached_definition: ModuleDefinition,
) -> None:
"""It should return the first attached module that matches."""
subject = make_module_view()
attached_modules = [
HardwareModule(serial_number="serial-1", definition=attached_definition),
HardwareModule(serial_number="serial-2", definition=attached_definition),
]
result = subject.select_hardware_module_to_load(
model=requested_model,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
attached_modules=attached_modules,
)
assert result == attached_modules[0]
def test_select_hardware_module_to_load_skips_non_matching(
magdeck_v1_def: ModuleDefinition,
magdeck_v2_def: ModuleDefinition,
) -> None:
"""It should skip over non-matching modules."""
subject = make_module_view()
attached_modules = [
HardwareModule(serial_number="serial-1", definition=magdeck_v1_def),
HardwareModule(serial_number="serial-2", definition=magdeck_v2_def),
]
result = subject.select_hardware_module_to_load(
model=ModuleModel.MAGNETIC_MODULE_V2,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
attached_modules=attached_modules,
)
assert result == attached_modules[1]
def test_select_hardware_module_to_load_skips_already_loaded(
magdeck_v1_def: ModuleDefinition,
) -> None:
"""It should skip over already assigned modules."""
subject = make_module_view(
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-1",
definition=magdeck_v1_def,
)
}
)
attached_modules = [
HardwareModule(serial_number="serial-1", definition=magdeck_v1_def),
HardwareModule(serial_number="serial-2", definition=magdeck_v1_def),
]
result = subject.select_hardware_module_to_load(
model=ModuleModel.MAGNETIC_MODULE_V1,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_3),
attached_modules=attached_modules,
)
assert result == attached_modules[1]
def test_select_hardware_module_to_load_reuses_already_loaded(
magdeck_v1_def: ModuleDefinition,
) -> None:
"""It should reuse over already assigned modules in the same location."""
subject = make_module_view(
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-1",
definition=magdeck_v1_def,
)
}
)
attached_modules = [
HardwareModule(serial_number="serial-1", definition=magdeck_v1_def),
HardwareModule(serial_number="serial-2", definition=magdeck_v1_def),
]
result = subject.select_hardware_module_to_load(
model=ModuleModel.MAGNETIC_MODULE_V1,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
attached_modules=attached_modules,
)
assert result == attached_modules[0]
def test_select_hardware_module_to_load_rejects_location_reassignment(
magdeck_v1_def: ModuleDefinition,
tempdeck_v1_def: ModuleDefinition,
) -> None:
"""It should raise if a non-matching module is already present in the slot."""
subject = make_module_view(
hardware_module_by_slot={
DeckSlotName.SLOT_1: HardwareModule(
serial_number="serial-1",
definition=magdeck_v1_def,
)
}
)
attached_modules = [
HardwareModule(serial_number="serial-1", definition=magdeck_v1_def),
HardwareModule(serial_number="serial-2", definition=tempdeck_v1_def),
]
with pytest.raises(errors.ModuleAlreadyPresentError):
subject.select_hardware_module_to_load(
model=ModuleModel.TEMPERATURE_MODULE_V1,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
attached_modules=attached_modules,
)
|
import asyncio
import pytest
import zmq
from zmq.utils import z85
from zope.interface.verify import verifyClass
def test_noop_auth_backend_client():
from pseud.auth import NoOpAuthenticationBackendForClient
from pseud.interfaces import IAuthenticationBackend
assert verifyClass(IAuthenticationBackend, NoOpAuthenticationBackendForClient)
def test_noop_auth_backend_server():
from pseud.auth import NoOpAuthenticationBackendForServer
from pseud.interfaces import IAuthenticationBackend
assert verifyClass(IAuthenticationBackend, NoOpAuthenticationBackendForServer)
@pytest.mark.asyncio
async def test_trusted_curve(loop, unused_tcp_port, trusted_curve_auth_backend):
from pseud import Client, Server
from pseud.utils import register_rpc
server_id = b'server'
endpoint = f'tcp://127.0.0.1:{unused_tcp_port}'
server_public, server_secret = zmq.curve_keypair()
security_plugin = 'trusted_curve'
server = Server(
server_id,
security_plugin=security_plugin,
public_key=server_public,
secret_key=server_secret,
loop=loop,
)
server.bind(endpoint)
bob_public, bob_secret = server.auth_backend.known_identities[b'bob']
client = Client(
server_id,
user_id=b'bob',
security_plugin=security_plugin,
public_key=bob_public,
secret_key=bob_secret,
peer_public_key=server_public,
loop=loop,
)
client.connect(endpoint)
assert server.socket.mechanism == zmq.CURVE
assert client.socket.mechanism == zmq.CURVE
register_rpc(name='string.lower')(str.lower)
async with server, client:
result = await client.string.lower('FOO')
assert result == 'foo'
@pytest.mark.asyncio
async def test_trusted_curve_with_wrong_peer_public_key(loop, unused_tcp_port_factory):
from pseud import Client, Server
from pseud.utils import register_rpc
server_id = b'server'
port = unused_tcp_port_factory()
endpoint = f'tcp://127.0.0.1:{port}'
server_public, server_secret = zmq.curve_keypair()
server = Server(
server_id,
security_plugin='trusted_curve',
public_key=server_public,
secret_key=server_secret,
loop=loop,
)
server.bind(endpoint)
alice_public, alice_secret = server.auth_backend.known_identities[b'alice']
client = Client(
server_id,
user_id=b'alice',
security_plugin='trusted_curve',
public_key=alice_public,
secret_key=alice_secret,
peer_public_key=z85.encode(b'R' * 32),
timeout=0.5,
loop=loop,
)
client.connect(endpoint)
assert server.socket.mechanism == zmq.CURVE
assert client.socket.mechanism == zmq.CURVE
register_rpc(name='string.lower')(str.lower)
async with server, client:
with pytest.raises(asyncio.TimeoutError):
await client.string.lower('BAR')
@pytest.mark.asyncio
async def test_untrusted_curve_with_allowed_password(
loop, unused_tcp_port, untrusted_curve_auth_backend
):
from pseud import Client, Server
from pseud.utils import register_rpc
client_id = b'john'
server_id = b'server'
endpoint = f'tcp://127.0.0.1:{unused_tcp_port}'
server_public, server_secret = zmq.curve_keypair()
client_public, client_secret = zmq.curve_keypair()
security_plugin = 'untrusted_curve'
password = b's3cret!'
client = Client(
server_id,
security_plugin=security_plugin,
public_key=client_public,
secret_key=client_secret,
peer_public_key=server_public,
user_id=client_id,
password=password,
loop=loop,
)
server = Server(
server_id,
security_plugin=security_plugin,
public_key=server_public,
secret_key=server_secret,
loop=loop,
)
server.bind(endpoint)
client.connect(endpoint)
assert server.socket.mechanism == zmq.CURVE
assert client.socket.mechanism == zmq.CURVE
# configure manually authentication backend
server.auth_backend.user_map[client_id] = password
register_rpc(name='string.lower')(str.lower)
async with server, client:
result = await client.string.lower('FOO')
result2 = await client.string.lower('FOO_JJ')
result3 = await server.send_to(client_id).string.lower('ABC')
assert result == 'foo'
assert result2 == 'foo_jj'
assert result3 == 'abc'
@pytest.mark.asyncio
async def test_untrusted_curve_with_allowed_password_and_client_disconnect(
loop, unused_tcp_port
):
from pseud import Client, Server
client_id = b'john'
server_id = b'server'
endpoint = f'tcp://127.0.0.1:{unused_tcp_port}'
server_public, server_secret = zmq.curve_keypair()
client_public, client_secret = zmq.curve_keypair()
security_plugin = 'untrusted_curve'
password = b's3cret!'
client = Client(
server_id,
security_plugin=security_plugin,
public_key=client_public,
secret_key=client_secret,
peer_public_key=server_public,
user_id=client_id,
password=password,
timeout=1,
loop=loop,
)
server = Server(
server_id,
security_plugin=security_plugin,
public_key=server_public,
secret_key=server_secret,
loop=loop,
)
server.bind(endpoint)
client.connect(endpoint)
assert server.socket.mechanism == zmq.CURVE
assert client.socket.mechanism == zmq.CURVE
# configure manually authentication backend
server.auth_backend.user_map[client_id] = password
server.register_rpc(name='string.lower')(str.lower)
async with server, client:
result = await client.string.lower('FOO')
assert result == 'foo'
# Simulate disconnection and reconnection with new identity
client.disconnect(endpoint)
client.connect(endpoint)
await asyncio.sleep(0.1)
result = await client.string.lower('ABC')
assert result == 'abc'
@pytest.mark.asyncio
async def test_untrusted_curve_with_wrong_password(loop, unused_tcp_port):
from pseud import Client, Server
from pseud.interfaces import UnauthorizedError
from pseud.utils import register_rpc
client_id = b'john'
server_id = b'server'
endpoint = f'tcp://127.0.0.1:{unused_tcp_port}'
server_public, server_secret = zmq.curve_keypair()
client_public, client_secret = zmq.curve_keypair()
security_plugin = 'untrusted_curve'
password = b's3cret!'
client = Client(
server_id,
user_id=client_id,
security_plugin=security_plugin,
public_key=client_public,
secret_key=client_secret,
peer_public_key=server_public,
password=password,
loop=loop,
)
server = Server(
server_id,
security_plugin=security_plugin,
public_key=server_public,
secret_key=server_secret,
loop=loop,
)
server.bind(endpoint)
client.connect(endpoint)
assert server.socket.mechanism == zmq.CURVE
assert client.socket.mechanism == zmq.CURVE
# configure manually authentication backend
server.auth_backend.user_map[client_id] = password + b'Looser'
register_rpc(name='string.lower')(str.lower)
async with server, client:
with pytest.raises(UnauthorizedError):
await client.string.lower(b'IMSCREAMING')
@pytest.mark.asyncio
async def test_client_can_reconnect(loop, unused_tcp_port_factory):
from pseud import Client, Server
port = unused_tcp_port_factory()
server_id = b'server'
endpoint = f'tcp://127.0.0.1:{port}'
server_public, server_secret = zmq.curve_keypair()
security_plugin = 'trusted_curve'
server = Server(
server_id,
security_plugin=security_plugin,
public_key=server_public,
secret_key=server_secret,
loop=loop,
)
server.bind(endpoint)
bob_public, bob_secret = server.auth_backend.known_identities[b'bob']
client = Client(
server_id,
user_id=b'bob',
security_plugin=security_plugin,
public_key=bob_public,
secret_key=bob_secret,
peer_public_key=server_public,
loop=loop,
)
client.connect(endpoint)
assert server.socket.mechanism == zmq.CURVE
assert client.socket.mechanism == zmq.CURVE
server.register_rpc(name='string.upper')(str.upper)
async with server, client:
result = await client.string.upper('hello')
assert result == 'HELLO'
client.disconnect(endpoint)
client.connect(endpoint)
await asyncio.sleep(0.01)
result = await client.string.upper('hello2')
assert result == 'HELLO2'
@pytest.mark.asyncio
async def test_server_can_send_to_trustable_peer_identity(loop, unused_tcp_port):
"""
Uses internal metadata of zmq.Frame.get() to fetch identity of sender
"""
from pseud import Client, Server
server_id = b'server'
endpoint = f'tcp://127.0.0.1:{unused_tcp_port}'
server_public, server_secret = zmq.curve_keypair()
security_plugin = 'trusted_curve'
server = Server(
server_id,
security_plugin=security_plugin,
public_key=server_public,
secret_key=server_secret,
loop=loop,
)
server.bind(endpoint)
bob_public, bob_secret = server.auth_backend.known_identities[b'bob']
client = Client(
server_id,
user_id=b'bob',
security_plugin=security_plugin,
public_key=bob_public,
secret_key=bob_secret,
peer_public_key=server_public,
loop=loop,
)
client.connect(endpoint)
assert server.socket.mechanism == zmq.CURVE
assert client.socket.mechanism == zmq.CURVE
@server.register_rpc(with_identity=True)
def echo(peer_identity, message):
return peer_identity, message
async with server, client:
result = await client.echo(b'one')
if zmq.zmq_version_info() >= (4, 1, 0):
assert result == (b'bob', b'one')
else:
assert result == (b'', b'one')
|
# -*- coding: utf-8 -*-
"""
Provide functions and classes:
Model = Class for loading and using trained models from tensorflow
create_cell = function for creatting RNN cells with wrappers
"""
import tensorflow as tf
from tensorflow.python.ops.rnn_cell_impl import LSTMCell, ResidualWrapper, DropoutWrapper, MultiRNNCell
class Model():
"""Loading and running isolated tf graph."""
def __init__(self, loc, operation='activation', input_name='x'):
"""
loc: location of file containing saved model
operation: name of operation for running the model
input_name: name of input placeholder
"""
self.input = input_name + ":0"
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
saver = tf.train.import_meta_graph(loc + '.meta', clear_devices=True)
saver.restore(self.sess, loc)
self.op = self.graph.get_operation_by_name(operation).outputs[0]
def run(self, data):
"""Run the specified operation on given data."""
return self.sess.run(self.op, feed_dict={self.input: data})
def eval_feed(self, feed):
"""Run the specified operation with given feed."""
return self.sess.run(self.op, feed_dict=feed)
def run_op(self, op, feed, output=True):
"""Run given operation with the feed."""
if output:
return self.sess.run(
self.graph.get_operation_by_name(op).outputs[0],
feed_dict=feed)
else:
self.sess.run(
self.graph.get_operation_by_name(op),
feed_dict=feed)
def _create_single_cell(cell_fn, num_units, is_residual=False, is_dropout=False, keep_prob=None):
"""Create single RNN cell based on cell_fn."""
cell = cell_fn(num_units)
if is_dropout:
cell = DropoutWrapper(cell, input_keep_prob=keep_prob)
if is_residual:
cell = ResidualWrapper(cell)
return cell
def create_cell(num_units, num_layers, num_residual_layers, is_dropout=False, keep_prob=None, cell_fn=LSTMCell):
"""Create corresponding number of RNN cells with given wrappers."""
cell_list = []
for i in range(num_layers):
cell_list.append(_create_single_cell(
cell_fn=cell_fn,
num_units=num_units,
is_residual=(i >= num_layers - num_residual_layers),
is_dropout=is_dropout,
keep_prob=keep_prob
))
if num_layers == 1:
return cell_list[0]
return MultiRNNCell(cell_list)
|
import os
from typing import Dict, List
from . import config
from .err import PinterestException
from .util import pinterest_request
class Pin:
def __init__(self, token: str, pin_id: str = None):
self.token = token
self.pin_id = pin_id
def create(self, board: str, note: str, link: str = None,
image: str = None, image_url: str = None, image_base64: str = None,
fields: List[str] = None) -> Dict:
"""
Creates a Pin for the authenticated user.
The default response returns the note, URL, link and ID of the created Pin.
board (required): The board you want the new Pin to be on. In the format <username>/<board_name>.
note (required): The Pin’s description.
link (optional): The URL the Pin will link to when you click through.
And one of the following three options is required:
image: Upload the image you want to pin using multipart form data.
image_url: The link to the image that you want to Pin.
image_base64: The link of a Base64 encoded image.
fields: attribution, board, color, counts, created_at, creator,
id, image, link, media, metadata, note, original_link, url
POST /v1/pins/
"""
if fields is None:
fields = ['note', 'url', 'link', 'id']
url = config.api_url + '/v1/pins/'
params = {'access_token': self.token, 'fields': ','.join(fields)}
data = {
'board': board,
'note': note,
'link': link
}
files = {}
if image is not None:
if not os.path.exists(image):
raise PinterestException("Pin: image does not exist")
files['image'] = open(image, 'rb')
elif image_url is not None:
data['image_url'] = image_url
elif image_base64 is not None:
data['image_base64'] = image_base64
else:
raise PinterestException("Pin: create() requires either image, image_url, or image_base64")
return pinterest_request('post', url, params=params, data=data, files=files)
def fetch(self, fields: List[str] = None) -> Dict:
"""
The default response returns the ID, link, URL and note of the Pin.
fields: attribution, board, color, counts, created_at, creator,
id, image, link, media, metadata, note, original_link, url
GET /v1/pins/<pin>/
"""
if fields is None:
fields = ['id', 'link', 'url', 'note']
url = config.api_url + '/v1/pins/{pin_id}/'.format(pin_id=self.pin_id)
params = {'access_token': self.token, 'fields': ','.join(fields)}
return pinterest_request('get', url, params=params)
def edit(self, board: str = None, note: str = None, link: str = None, fields: List[str] = None) -> Dict:
"""
Changes the board, description and/or link of the Pin.
pin (required): The ID (unique string of numbers and letters) of the Pin you want to edit.
board (optional): The board you want to move the Pin to, in the format <username>/<board_name>.
note (optional): The new Pin description.
link (optional): The new Pin link. Note: You can only edit the link of a repinned Pin if
the pinner owns the domain of the Pin in question, or if the Pin itself
has been created by the pinner.
fields: attribution, board, color, counts, created_at, creator,
id, image, link, media, metadata, note, original_link, url
PATCH /v1/pins/<pin>/
"""
if board is None and note is None and link is None:
raise PinterestException("Pin: edit() requires valid board, note, or link")
if fields is None:
fields = ['id', 'link', 'url', 'note']
url = config.api_url + '/v1/pins/{pin_id}/'.format(pin_id=self.pin_id)
params = {'access_token': self.token, 'fields': ','.join(fields)}
data = {}
if board is not None:
data['board'] = board
if note is not None:
data['note'] = note
if link is not None:
data['link'] = link
return pinterest_request('patch', url, params=params, data=data)
def delete(self) -> Dict:
"""
Deletes the specified Pin. This action is permanent and cannot be undone.
DELETE /v1/pins/<pin>/
"""
url = config.api_url + '/v1/pins/{pin_id}/'.format(pin_id=self.pin_id)
params = {'access_token': self.token}
return pinterest_request('delete', url, params=params)
|
"""
HTML renderer for mistletoe.
"""
import re
import sys
from itertools import chain
from urllib.parse import quote
from mistletoe.block_token import HTMLBlock
from mistletoe.span_token import HTMLSpan
from mistletoe.base_renderer import BaseRenderer
import html
class HTMLRenderer(BaseRenderer):
"""
HTML renderer class.
See mistletoe.base_renderer module for more info.
"""
def __init__(self, *extras):
"""
Args:
extras (list): allows subclasses to add even more custom tokens.
"""
self._suppress_ptag_stack = [False]
super().__init__(*chain((HTMLBlock, HTMLSpan), extras))
# html.entities.html5 includes entitydefs not ending with ';',
# CommonMark seems to hate them, so...
self._stdlib_charref = html._charref
_charref = re.compile(
r"&(#[0-9]+;" r"|#[xX][0-9a-fA-F]+;" r"|[^\t\n\f <&#;]{1,32};)"
)
html._charref = _charref
def __exit__(self, *args):
super().__exit__(*args)
html._charref = self._stdlib_charref
def render_to_plain(self, token):
if hasattr(token, "children"):
inner = [self.render_to_plain(child) for child in token.children]
return "".join(inner)
return self.escape_html(token.content)
def render_strong(self, token):
template = "<strong>{}</strong>"
return template.format(self.render_inner(token))
def render_emphasis(self, token):
template = "<em>{}</em>"
return template.format(self.render_inner(token))
def render_inline_code(self, token):
template = "<code>{}</code>"
inner = html.escape(token.children[0].content)
return template.format(inner)
def render_strikethrough(self, token):
template = "<del>{}</del>"
return template.format(self.render_inner(token))
def render_image(self, token):
template = '<img src="{}" alt="{}"{} />'
if token.title:
title = ' title="{}"'.format(self.escape_html(token.title))
else:
title = ""
return template.format(token.src, self.render_to_plain(token), title)
def render_link(self, token):
template = '<a href="{target}"{title}>{inner}</a>'
target = self.escape_url(token.target)
if token.title:
title = ' title="{}"'.format(self.escape_html(token.title))
else:
title = ""
inner = self.render_inner(token)
return template.format(target=target, title=title, inner=inner)
def render_auto_link(self, token):
template = '<a href="{target}">{inner}</a>'
if token.mailto:
target = "mailto:{}".format(token.target)
else:
target = self.escape_url(token.target)
inner = self.render_inner(token)
return template.format(target=target, inner=inner)
def render_escape_sequence(self, token):
return self.render_inner(token)
def render_raw_text(self, token):
return self.escape_html(token.content)
@staticmethod
def render_html_span(token):
return token.content
def render_heading(self, token):
if token.level == 1 or token.level == 2:
template = '<h{level}>{inner}</h{level}>'
else:
template = "<h{level}>{inner}</h{level}>"
inner = self.render_inner(token)
return template.format(level=token.level, inner=inner)
def render_quote(self, token):
elements = ["<blockquote>"]
self._suppress_ptag_stack.append(False)
elements.extend([self.render(child) for child in token.children])
self._suppress_ptag_stack.pop()
elements.append("</blockquote>")
return "\n".join(elements)
def render_paragraph(self, token):
if self._suppress_ptag_stack[-1]:
return "{}".format(self.render_inner(token))
return "<p>{}</p>".format(self.render_inner(token))
def render_block_code(self, token):
template = "<pre><code{attr}>{inner}</code></pre>"
if token.language:
attr = ' class="{}"'.format(
"language-{}".format(self.escape_html(token.language))
)
else:
attr = ""
inner = html.escape(token.children[0].content)
return template.format(attr=attr, inner=inner)
def render_list(self, token):
template = "<{tag}{attr}>\n{inner}\n</{tag}>"
if token.start is not None:
tag = "ol"
attr = ' start="{}"'.format(token.start) if token.start != 1 else ""
else:
tag = "ul"
attr = ""
self._suppress_ptag_stack.append(not token.loose)
inner = "\n".join([self.render(child) for child in token.children])
self._suppress_ptag_stack.pop()
return template.format(tag=tag, attr=attr, inner=inner)
def render_list_item(self, token):
if len(token.children) == 0:
return "<li></li>"
inner = "\n".join([self.render(child) for child in token.children])
inner_template = "\n{}\n"
if self._suppress_ptag_stack[-1]:
if token.children[0].__class__.__name__ == "Paragraph":
inner_template = inner_template[1:]
if token.children[-1].__class__.__name__ == "Paragraph":
inner_template = inner_template[:-1]
return "<li>{}</li>".format(inner_template.format(inner))
def render_table(self, token):
# This is actually gross and I wonder if there's a better way to do it.
#
# The primary difficulty seems to be passing down alignment options to
# reach individual cells.
template = "<table>\n{inner}</table>"
if hasattr(token, "header"):
head_template = "<thead>\n{inner}</thead>\n"
head_inner = self.render_table_row(token.header, is_header=True)
head_rendered = head_template.format(inner=head_inner)
else:
head_rendered = ""
body_template = "<tbody>\n{inner}</tbody>\n"
body_inner = self.render_inner(token)
body_rendered = body_template.format(inner=body_inner)
return template.format(inner=head_rendered + body_rendered)
def render_table_row(self, token, is_header=False):
template = "<tr>\n{inner}</tr>\n"
inner = "".join(
[self.render_table_cell(child, is_header) for child in token.children]
)
return template.format(inner=inner)
def render_table_cell(self, token, in_header=False):
template = "<{tag}{attr}>{inner}</{tag}>\n"
tag = "th" if in_header else "td"
if token.align is None:
align = "left"
elif token.align == 0:
align = "center"
elif token.align == 1:
align = "right"
attr = ' align="{}"'.format(align)
inner = self.render_inner(token)
return template.format(tag=tag, attr=attr, inner=inner)
@staticmethod
def render_thematic_break(token):
return "<hr />"
@staticmethod
def render_line_break(token):
return "\n" if token.soft else "<br />\n"
@staticmethod
def render_html_block(token):
return token.content
def render_scaffold(self, body):
css_link = """
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/kognise/water.css@latest/dist/light.min.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.0.2/styles/default.min.css">
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.0.2/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad();</script>
"""
scaffold = f"<!DOCTYPE html>\n<html>\n<head>\n<meta charset='utf-8'>\n\n</head>\n<body class=\"container\">{body}</body>\n</html>"
return scaffold
def render_document(self, token):
self.footnotes.update(token.footnotes)
inner = "\n".join([self.render(child) for child in token.children])
return self.render_scaffold(inner)
@staticmethod
def escape_html(raw):
return html.escape(html.unescape(raw)).replace("'", "'")
@staticmethod
def escape_url(raw):
"""
Escape urls to prevent code injection craziness. (Hopefully.)
"""
return html.escape(quote(html.unescape(raw), safe="/#:()*?=%@+,&"))
|
import scipy.misc
import numpy as np
def fractal_dimension(Z):
# Only for 2d image
assert(len(Z.shape) == 2)
def boxcount(Z, k):
S = np.add.reduceat(
np.add.reduceat(Z, np.arange(0, Z.shape[0], k), axis=0),
np.arange(0, Z.shape[1], k), axis=1)
# We count non-empty (0) and non-full boxes (k*k)
return len(np.where((S > 0) & (S < k*k))[0])
# Minimal dimension of image
p = min(Z.shape)
# Greatest power of 2 less than or equal to p
n = 2**np.floor(np.log(p)/np.log(2))
# Extract the exponent
n = int(np.log(n)/np.log(2))
# Build successive box sizes (from 2**n down to 2**1)
sizes = 2**np.arange(n, 1, -1)
# Actual box counting with decreasing size
counts = []
for size in sizes:
counts.append(boxcount(Z, size))
# Fit the successive log(sizes) with log (counts)
coeffs = np.polyfit(np.log(sizes), np.log(counts), 1)
return -coeffs[0]
def vessel_density(Z):
# Only for 2d image
assert(len(Z.shape) == 2)
vessel_total_count = np.sum(Z==1)
pixel_total_count = Z.shape[0]*Z.shape[1]
return vessel_total_count/pixel_total_count
|
import matplotlib.pyplot as plt
def plot_drift_score_per_dimension(df_drift_scores, plot_file_name=False, latex_font=False):
"""
Parameters
----------
df_drift_scores: Data frame with drift scores
plot_file_name: Name of plot file
latex_font: Whether latex font should be used
Returns
-------
"""
if latex_font:
# Use LaTex Font
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# Create plot
plt.style.use('ggplot')
plt.tight_layout()
fontsize = 18
params = {'axes.labelsize': fontsize, 'axes.titlesize': fontsize, 'legend.fontsize': fontsize,
'xtick.labelsize': fontsize, 'ytick.labelsize': fontsize}
plt.rcParams.update(params)
groups = df_drift_scores.groupby('Dimension Type')
# Plot
fig, ax = plt.subplots()
for name, group in groups:
ax.scatter(x=group['Dimension'], y=group['Drift score'], label=name)
ax.legend()
plt.ylim(bottom=0, top=0.44)
# plt.title(plot_title)
plt.ylabel('Drift score')
plt.xlabel('Dimension')
if plot_file_name:
plt.savefig("Plots/Where/" + str(plot_file_name), bbox_inches='tight')
plt.show()
|
# -*- coding: utf-8 -*-
# Problem Set 5: Experimental Analysis
# Name: Brighton Ancelin
# Collaborators (discussion):
# Time:
# SEARCH FOR "MY_CODE" COMMENT TO FIND MY CONTRIBUTIONS
import pylab
import numpy as np
import re
# cities in our weather data
CITIES = [
'BOSTON',
'SEATTLE',
'SAN DIEGO',
'PHILADELPHIA',
'PHOENIX',
'LAS VEGAS',
'CHARLOTTE',
'DALLAS',
'BALTIMORE',
'SAN JUAN',
'LOS ANGELES',
'MIAMI',
'NEW ORLEANS',
'ALBUQUERQUE',
'PORTLAND',
'SAN FRANCISCO',
'TAMPA',
'NEW YORK',
'DETROIT',
'ST LOUIS',
'CHICAGO'
]
TRAINING_INTERVAL = range(1961, 2010)
TESTING_INTERVAL = range(2010, 2016)
"""
Begin helper code
"""
class Climate(object):
"""
The collection of temperature records loaded from given csv file
"""
def __init__(self, filename):
"""
Initialize a Climate instance, which stores the temperature records
loaded from a given csv file specified by filename.
Args:
filename: name of the csv file (str)
"""
self.rawdata = {}
f = open(filename, 'r')
header = f.readline().strip().split(',')
for line in f:
items = line.strip().split(',')
date = re.match('(\d\d\d\d)(\d\d)(\d\d)', items[header.index('DATE')])
year = int(date.group(1))
month = int(date.group(2))
day = int(date.group(3))
city = items[header.index('CITY')]
temperature = float(items[header.index('TEMP')])
if city not in self.rawdata:
self.rawdata[city] = {}
if year not in self.rawdata[city]:
self.rawdata[city][year] = {}
if month not in self.rawdata[city][year]:
self.rawdata[city][year][month] = {}
self.rawdata[city][year][month][day] = temperature
f.close()
def get_yearly_temp(self, city, year):
"""
Get the daily temperatures for the given year and city.
Args:
city: city name (str)
year: the year to get the data for (int)
Returns:
a 1-d pylab array of daily temperatures for the specified year and
city
"""
temperatures = []
assert city in self.rawdata, "provided city is not available"
assert year in self.rawdata[city], "provided year is not available"
for month in range(1, 13):
for day in range(1, 32):
if day in self.rawdata[city][year][month]:
temperatures.append(self.rawdata[city][year][month][day])
return pylab.array(temperatures)
def get_daily_temp(self, city, month, day, year):
"""
Get the daily temperature for the given city and time (year + date).
Args:
city: city name (str)
month: the month to get the data for (int, where January = 1,
December = 12)
day: the day to get the data for (int, where 1st day of month = 1)
year: the year to get the data for (int)
Returns:
a float of the daily temperature for the specified time (year +
date) and city
"""
assert city in self.rawdata, "provided city is not available"
assert year in self.rawdata[city], "provided year is not available"
assert month in self.rawdata[city][year], "provided month is not available"
assert day in self.rawdata[city][year][month], "provided day is not available"
return self.rawdata[city][year][month][day]
def se_over_slope(x, y, estimated, model):
"""
For a linear regression model, calculate the ratio of the standard error of
this fitted curve's slope to the slope. The larger the absolute value of
this ratio is, the more likely we have the upward/downward trend in this
fitted curve by chance.
Args:
x: an 1-d pylab array with length N, representing the x-coordinates of
the N sample points
y: an 1-d pylab array with length N, representing the y-coordinates of
the N sample points
estimated: an 1-d pylab array of values estimated by a linear
regression model
model: a pylab array storing the coefficients of a linear regression
model
Returns:
a float for the ratio of standard error of slope to slope
"""
assert len(y) == len(estimated)
assert len(x) == len(estimated)
EE = ((estimated - y)**2).sum()
var_x = ((x - x.mean())**2).sum()
SE = pylab.sqrt(EE/(len(x)-2)/var_x)
return SE/model[0]
"""
End helper code
"""
def generate_models(x, y, degs):
"""
Generate regression models by fitting a polynomial for each degree in degs
to points (x, y).
Args:
x: an 1-d pylab array with length N, representing the x-coordinates of
the N sample points
y: an 1-d pylab array with length N, representing the y-coordinates of
the N sample points
degs: a list of degrees of the fitting polynomial
Returns:
a list of pylab arrays, where each array is a 1-d array of coefficients
that minimizes the squared error of the fitting polynomial
"""
# MY_CODE
return [pylab.polyfit(x, y, deg) for deg in degs]
def r_squared(y, estimated):
"""
Calculate the R-squared error term.
Args:
y: 1-d pylab array with length N, representing the y-coordinates of the
N sample points
estimated: an 1-d pylab array of values estimated by the regression
model
Returns:
a float for the R-squared error term
"""
# MY_CODE
return 1 - (np.sum((y - estimated) ** 2) / np.sum((y - np.mean(y)) ** 2))
def evaluate_models_on_training(x, y, models):
"""
For each regression model, compute the R-squared value for this model with the
standard error over slope of a linear regression line (only if the model is
linear), and plot the data along with the best fit curve.
For the plots, you should plot data points (x,y) as blue dots and your best
fit curve (aka model) as a red solid line. You should also label the axes
of this figure appropriately and have a title reporting the following
information:
degree of your regression model,
R-square of your model evaluated on the given data points,
and SE/slope (if degree of this model is 1 -- see se_over_slope).
Args:
x: an 1-d pylab array with length N, representing the x-coordinates of
the N sample points
y: an 1-d pylab array with length N, representing the y-coordinates of
the N sample points
models: a list containing the regression models you want to apply to
your data. Each model is a pylab array storing the coefficients of
a polynomial.
Returns:
None
"""
# MY_CODE
for model in models:
pylab.figure()
e = np.polyval(model, x)
pylab.plot(x, y, 'b.')
pylab.plot(x, e, 'r-')
pylab.xlim(np.min(x)-1, np.max(x)+1)
pylab.ylim(np.min(y)-1, np.max(y)+1)
pylab.xlabel('Year')
pylab.ylabel('Temperature (C)')
deg = len(model) - 1
pylab.title('R^2={}, Deg={}'.format(r_squared(y, e), deg) +
(', SE/slope={}'.format(
se_over_slope(x, y, e, model)) if 1 == deg else ''))
pylab.show()
def gen_cities_avg(climate, multi_cities, years):
"""
Compute the average annual temperature over multiple cities.
Args:
climate: instance of Climate
multi_cities: the names of cities we want to average over (list of str)
years: the range of years of the yearly averaged temperature (list of
int)
Returns:
a pylab 1-d array of floats with length = len(years). Each element in
this array corresponds to the average annual temperature over the given
cities for a given year.
"""
# MY_CODE
return np.array([np.mean([np.mean(climate.get_yearly_temp(city, year))
for city in multi_cities]) for year in years])
def moving_average(y, window_length):
"""
Compute the moving average of y with specified window length.
Args:
y: an 1-d pylab array with length N, representing the y-coordinates of
the N sample points
window_length: an integer indicating the window length for computing
moving average
Returns:
an 1-d pylab array with the same length as y storing moving average of
y-coordinates of the N sample points
"""
# MY_CODE
arr1 = np.array([np.mean(y[:(k+1)]) for k in range(window_length-1)])
arr2 = np.zeros((len(y) - window_length + 1))
sum = np.sum(y[:window_length])
arr2[0] = sum / window_length
for start in range(len(y) - window_length):
sum -= y[start]
sum += y[start + window_length]
arr2[start+1] = sum / window_length
return np.concatenate((arr1, arr2))
def rmse(y, estimated):
"""
Calculate the root mean square error term.
Args:
y: an 1-d pylab array with length N, representing the y-coordinates of
the N sample points
estimated: an 1-d pylab array of values estimated by the regression
model
Returns:
a float for the root mean square error term
"""
# MY_CODE
return np.sqrt(np.mean((y - estimated) ** 2))
def gen_std_devs(climate, multi_cities, years):
"""
For each year in years, compute the standard deviation over the averaged yearly
temperatures for each city in multi_cities.
Args:
climate: instance of Climate
multi_cities: the names of cities we want to use in our std dev calculation (list of str)
years: the range of years to calculate standard deviation for (list of int)
Returns:
a pylab 1-d array of floats with length = len(years). Each element in
this array corresponds to the standard deviation of the average annual
city temperatures for the given cities in a given year.
"""
# MY_CODE
# clim_arr = np.array([np.array([climate.get_yearly_temp(city, year)
# for city in multi_cities]) for year in years])
# clim_arr = np.mean(clim_arr, axis=1)
# clim_arr = np.std(clim_arr, axis=1)
clim_arr = np.zeros(len(years))
for i in range(len(years)):
year = years[i]
clim_dat = np.array([climate.get_yearly_temp(city, year) for city in
multi_cities])
clim_dat = np.mean(clim_dat, axis=0)
clim_dat = np.std(clim_dat)
clim_arr[i] = clim_dat
return clim_arr
def evaluate_models_on_testing(x, y, models):
"""
For each regression model, compute the RMSE for this model and plot the
test data along with the model’s estimation.
For the plots, you should plot data points (x,y) as blue dots and your best
fit curve (aka model) as a red solid line. You should also label the axes
of this figure appropriately and have a title reporting the following
information:
degree of your regression model,
RMSE of your model evaluated on the given data points.
Args:
x: an 1-d pylab array with length N, representing the x-coordinates of
the N sample points
y: an 1-d pylab array with length N, representing the y-coordinates of
the N sample points
models: a list containing the regression models you want to apply to
your data. Each model is a pylab array storing the coefficients of
a polynomial.
Returns:
None
"""
# MY_CODE
for model in models:
pylab.figure()
e = np.polyval(model, x)
pylab.plot(x, y, 'b.')
pylab.plot(x, e, 'r-')
pylab.xlim(np.min(x)-1, np.max(x)+1)
pylab.ylim(np.min(y)-1, np.max(y)+1)
pylab.xlabel('Year')
pylab.ylabel('Temperature (C)')
deg = len(model) - 1
pylab.title('RMSE={}, Deg={}'.format(rmse(y, e), deg))
pylab.show()
# MY_CODE
# === BEGIN MY CUSTOM CODE
def gen_std_devs_BUT_BETTER(climate, multi_cities, years):
"""
So I was looking at the way that gen_std_devs computes its values and it
does it by averaging the temperature across all cities on a particular
day, then finding the std dev for the population of all days in that year.
So I was thinking, "Hey, extreme weather doesn't mean that one day the
entire globe goes cold and the next day the entire globe gets hot. It
means that in all local regions, the variance in temperature from day to
day should go up. In other words, we should be looking at the std devs
of daily temperatures by city, and then averaging those. The way
implemented in the instructions' version of gen_std_devs is kinda shit.
It averages away most of our extrema by looking at temperatures all over
the globe, when we're looking for extrema in local regions of the globe."
Returns 1-d array where each entry is the average std dev (done this new
way) for a given year.
:param climate:
:param multi_cities:
:param years:
:return:
"""
clim_arr = np.array([np.array([np.std(climate.get_yearly_temp(city, year))
for city in multi_cities]) for year in years])
clim_arr = np.mean(clim_arr, axis=1)
return clim_arr
if __name__ == '__main__':
# PART 4
# MY_CODE
climate = Climate('data.csv')
x = np.array(TRAINING_INTERVAL[:])
city = 'NEW YORK'
assert city in CITIES, 'Unknown city'
y = np.array([climate.get_daily_temp(city, 1, 10, year) for \
year in x])
model = generate_models(x, y, [1])
# evaluate_models_on_training(x, y, model)
y = np.array([np.mean(climate.get_yearly_temp(city, year)) for year in x])
model = generate_models(x, y, [1])
# evaluate_models_on_training(x, y, model)
# Part B
# MY_CODE
y = gen_cities_avg(climate, CITIES, x)
model = generate_models(x, y, [1])
# evaluate_models_on_training(x, y, model)
# Part C
# MY_CODE
y = moving_average(y, 5)
model = generate_models(x, y, [1])
# evaluate_models_on_training(x, y, model)
# Part D.2
# MY_CODE
models = generate_models(x, y, [1, 2, 20])
# evaluate_models_on_training(x, y, models)
test_x = TESTING_INTERVAL[:]
test_y = moving_average(gen_cities_avg(climate, CITIES, test_x), 5)
# evaluate_models_on_testing(test_x, test_y, models)
# Part E
# MY_CODE
stds = gen_std_devs(climate, CITIES, x)
stds = moving_average(stds, 5)
model = generate_models(x, stds, [1])
# evaluate_models_on_training(x, stds, model)
# FURTHER ATTEMPTS
# MY_CODE
pylab.plot(climate.get_yearly_temp('NEW YORK', x[0]), 'b-')
pylab.plot(climate.get_yearly_temp('NEW YORK', x[-1]), 'r-')
pylab.show()
# IDK, doesn't seem like the whole extreme weather is backed by data.
# Std dev in yearly temperatures are slowly decreasing as the earth
# warms, not the other way around. At least according to my rudimentary
# analysis.
# MY_CODE
stds = gen_std_devs_BUT_BETTER(climate, CITIES, x)
stds_1 = gen_std_devs(climate, CITIES, x)
if np.all(stds > stds_1):
print('Wow, all std devs are larger when done my way')
stds = moving_average(stds, 5)
model = generate_models(x, stds, [1])
evaluate_models_on_training(x, stds, model)
test_stds = gen_std_devs_BUT_BETTER(climate, CITIES, test_x)
test_stds = moving_average(test_stds, 5)
evaluate_models_on_testing(test_x, test_stds, model)
|
from robot.exceptions import RobotError
class InvalidJointAngleError(RobotError):
def __init__(self, msg):
super().__init__(msg)
class InvalidJointDictError(RobotError):
def __init__(self, msg):
super().__init__(msg)
class InvalidSerialDictError(RobotError):
def __init__(self, msg):
super().__init__(msg)
|
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
from project_runpy import env
BASE_DIR = os.path.dirname(__file__)
SECRET_KEY = env.get("SECRET_KEY", "Rotom")
DEBUG = env.get("DEBUG", False)
ALLOWED_HOSTS = ["*"]
INSTALLED_APPS = (
"bandc.apps.agenda.apps.AgendaConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# support
"django_extensions",
"django_object_actions",
"bootstrap_pagination",
)
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
)
ROOT_URLCONF = "bandc.urls"
WSGI_APPLICATION = "bandc.wsgi.application"
# Database
# https://docs.djangoproject.com/en/stable/ref/settings/#databases
DATABASES = {"default": dj_database_url.config(default="sqlite:///bandc.db")}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/stable/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "America/Chicago"
USE_I18N = False
USE_L10N = False
USE_TZ = True
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"bandc.context_processors.base_url",
],
"debug": DEBUG,
},
},
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
MEDIA_ROOT = os.path.join(BASE_DIR, "..", "media")
MEDIA_URL = "/media/"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"root": {"level": os.environ.get("LOG_LEVEL", "WARNING"), "handlers": ["console"]},
"formatters": {
"dev": {
"format": "%(levelname)s %(name)s %(message)s",
# 'datefmt': '%Y-%m-%dT%H:%M:%S%z', # I want milliseconds but Python doesn't make it easy
# "class": "pythonjsonlogger.jsonlogger.JsonFormatter",
},
},
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
"readable_sql": {"()": "project_runpy.ReadableSqlFilter"},
},
"handlers": {
"console": {
"level": "DEBUG",
"formatter": "dev",
"class": "project_runpy.ColorizingStreamHandler",
},
},
"loggers": {
"django.db.backends": {
"level": "DEBUG" if env.get("SQL", False) else "INFO",
"handlers": ["console"],
"filters": ["require_debug_true", "readable_sql"],
"propagate": False,
},
"sh": {"level": "WARNING", "propagate": False},
"pdfminer": {"level": "WARNING", "propagate": False},
"factory": {"level": "ERROR", "propagate": False},
},
}
|
import io
import pytest
from cptv.bitstream import BitStream
def test_single_bytes():
b = BitStream(io.BytesIO(b'\xff\xee\xdd'))
assert b.bytes(1) == b'\xff'
assert b.bytes(1) == b'\xee'
assert b.bytes(1) == b'\xdd'
def test_multi_bytes():
b = BitStream(io.BytesIO(b'\xff\xee\xdd'))
assert b.bytes(3) == b'\xff\xee\xdd'
def test_not_enough_bytes():
b = BitStream(io.BytesIO(b'\xff'))
with pytest.raises(EOFError, match="short read. wanted 2, got 1"):
b.bytes(2)
def test_uint8():
b = BitStream(io.BytesIO(b'\xff\x01\x00'))
assert b.uint8() == 255
assert b.uint8() == 1
assert b.uint8() == 0
def test_uint32():
b = BitStream(io.BytesIO(b'\xff\xee\xdd\xaa'))
assert b.uint32() == 0xaaddeeff
def test_uint64():
b = BitStream(io.BytesIO(b'\xff\xee\xdd\xcc\xbb\xaa\x00\x11'))
assert b.uint64() == 0x1100aabbccddeeff
def test_iter_int():
i = BitStream(io.BytesIO(b'\xf0\x13')).iter_int(2, 4)
assert next(i) == -1
assert next(i) == 0x0
assert next(i) == 0x1
assert next(i) == 0x3
|
coordinates_01EE00 = ((123, 121),
(123, 123), (124, 119), (124, 124), (124, 125), (124, 126), (124, 127), (124, 129), (125, 115), (125, 117), (125, 118), (125, 121), (125, 122), (125, 123), (125, 130), (125, 135), (126, 115), (126, 119), (126, 120), (126, 121), (126, 122), (126, 123), (126, 124), (126, 125), (126, 126), (126, 127), (126, 128), (126, 130), (126, 134), (126, 135), (127, 115), (127, 117), (127, 118), (127, 119), (127, 120), (127, 121), (127, 122), (127, 123), (127, 124), (127, 125), (127, 126), (127, 127), (127, 128), (127, 129), (127, 135), (128, 115), (128, 117), (128, 118), (128, 119), (128, 120), (128, 121), (128, 122), (128, 123), (128, 124), (128, 125), (128, 126), (128, 127), (128, 128), (128, 129), (128, 130), (128, 135), (129, 116), (129, 118), (129, 119), (129, 120), (129, 121), (129, 122), (129, 123), (129, 124), (129, 125), (129, 126), (129, 127), (129, 128),
(129, 129), (129, 130), (129, 131), (129, 132), (129, 133), (129, 135), (130, 116), (130, 118), (130, 119), (130, 120), (130, 121), (130, 122), (130, 123), (130, 124), (130, 125), (130, 127), (130, 128), (130, 129), (130, 130), (130, 131), (130, 132), (130, 134), (131, 117), (131, 120), (131, 121), (131, 122), (131, 123), (131, 124), (131, 127), (131, 128), (131, 129), (131, 130), (131, 131), (131, 132), (131, 134), (132, 118), (132, 121), (132, 122), (132, 123), (132, 124), (132, 127), (132, 128), (132, 129), (132, 130), (132, 133), (133, 120), (133, 122), (133, 124), (133, 127), (133, 132), (134, 121), (134, 123), (134, 128), (134, 130), (135, 121), (135, 123), (135, 128), (136, 122), (137, 121), (138, 121), )
coordinates_00EE00 = ((110, 124),
(110, 125), (111, 124), (111, 126), (112, 125), )
|
class PrintSetup(APIObject, IDisposable):
""" Represents the Print Setup (Application Menu->Print->Print Setup) within Autodesk Revit. """
def Delete(self):
"""
Delete(self: PrintSetup) -> bool
Delete the current print setting,and make the In-Session setting as the
current one.
Returns: False if Delete operation fails,otherwise true.
"""
pass
def Dispose(self):
""" Dispose(self: APIObject,A_0: bool) """
pass
def ReleaseManagedResources(self, *args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: APIObject) """
pass
def Rename(self, newName):
"""
Rename(self: PrintSetup,newName: str) -> bool
Rename the current print setting with the specified name.
newName: print setting name to be renamed as.
Returns: False if Rename operation fails,otherwise true.
"""
pass
def Revert(self):
"""
Revert(self: PrintSetup)
Revert the current print setting.
"""
pass
def Save(self):
"""
Save(self: PrintSetup) -> bool
Save the changes for the current print setting.
Returns: False if save operation fails,otherwise True.
"""
pass
def SaveAs(self, newName):
"""
SaveAs(self: PrintSetup,newName: str) -> bool
Save the current print setting to another print setting with the specified name.
newName: print setting name to be saved as.
Returns: False if Save As operation fails,otherwise true.
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
CurrentPrintSetting = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The current Print Setting of Print Setup.
Get: CurrentPrintSetting(self: PrintSetup) -> IPrintSetting
Set: CurrentPrintSetting(self: PrintSetup)=value
"""
InSession = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The in-session Print Setting of Print Setup.
Get: InSession(self: PrintSetup) -> InSessionPrintSetting
"""
|
import pygame.sprite
class Soldier(pygame.sprite.Sprite):
def __init__(self, surface, char_type: str, x: int, y: int, scale: int, speed: int):
pygame.sprite.Sprite.__init__(self)
self.surface = surface
self.char_type = char_type
self.speed = speed
self.direction = 1 # Faces right at startup.
self.flip = False
self.animation_list = []
self.action = 0
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
temp_list = []
for i in range(5):
img = pygame.image.load(f"assets/{self.char_type}/Idle/{i}.png")
img = pygame.transform.scale(
img, (int(img.get_width() * scale), int(img.get_height() * scale))
)
temp_list.append(img)
self.animation_list.append(temp_list)
temp_list = []
for i in range(6):
img = pygame.image.load(f"assets/{self.char_type}/Run/{i}.png")
img = pygame.transform.scale(
img, (int(img.get_width() * scale), int(img.get_height() * scale))
)
temp_list.append(img)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def move(self, moving_left: bool, moving_right: bool):
dx = 0
dy = 0
if moving_left:
dx = -self.speed
self.flip = True
self.direction = 0
if moving_right:
dx = self.speed
self.flip = False
self.direction = 1
self.rect.x += dx
self.rect.y += dy
def update_animation(self):
ANIMATION_COOLDOWN = 100
self.image = self.animation_list[self.action][self.frame_index]
if (pygame.time.get_ticks() - self.update_time) > ANIMATION_COOLDOWN:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
if self.frame_index >= len(self.animation_list[self.action]):
self.frame_index = 0
def update_action(self, new_action: int):
if new_action != self.action:
self.action = new_action
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def draw(self):
self.surface.blit(
pygame.transform.flip(self.image, self.flip, False), self.rect
)
|
import os
from barbarika.helpers import logger
from barbarika.aws import send_response, get_subnet_address, get_subnet_gateway
from barbarika.citrixadc import CitrixADC
current_aws_region = os.environ['AWS_DEFAULT_REGION']
def lambda_handler(event, context):
fail_reason = None
logger.info("event: {}".format(str(event)))
request_type = event['RequestType']
response_status = 'FAILED'
response_data = {}
try:
if request_type == 'Create':
primary_instance_id = event['ResourceProperties']['PrimaryADCInstanceID']
primary_nsip = event['ResourceProperties']['PrimaryADCPrivateNSIP']
primary_server_subnet = event['ResourceProperties']['PrimaryADCServerPrivateSubnetID']
secondary_instance_id = event['ResourceProperties']['SecondaryADCInstanceID']
secondary_nsip = event['ResourceProperties']['SecondaryADCPrivateNSIP']
secondary_server_subnet = event['ResourceProperties']['SecondaryADCServerPrivateSubnetID']
primary = CitrixADC(nsip=primary_nsip,
nsuser="nsroot", nspass=primary_instance_id)
secondary = CitrixADC(
nsip=secondary_nsip, nsuser="nsroot", nspass=secondary_instance_id)
# Primary ADC to send traffic to all the servers (even in other availability zone)
primary_server_subnet_address = get_subnet_address(
primary_server_subnet)
secondary_server_subnet_address = get_subnet_address(
secondary_server_subnet)
primary_server_gateway = get_subnet_gateway(primary_server_subnet)
secondary_server_gateway = get_subnet_gateway(
secondary_server_subnet)
primary.add_route(
network_ip=secondary_server_subnet_address[0], netmask=secondary_server_subnet_address[1], gateway_ip=primary_server_gateway)
secondary.add_route(
network_ip=primary_server_subnet_address[0], netmask=primary_server_subnet_address[1], gateway_ip=secondary_server_gateway)
primary.save_config()
secondary.save_config()
response_status = 'SUCCESS'
else: # request_type == 'Delete' | 'Update'
response_status = 'SUCCESS'
except Exception as e:
fail_reason = str(e)
logger.error(fail_reason)
response_status = 'FAILED'
finally:
send_response(event, context, response_status,
response_data, fail_reason=fail_reason)
|
import torch
from torch.utils.data import Dataset
from torchvision import transforms, datasets
from torch.utils.data import (
DataLoader,
RandomSampler,
DistributedSampler,
SequentialSampler,
)
import scipy.io as sio
from PIL import Image
import numpy as np
import heapq
class load_prcc_dataset_scnn_pos(Dataset):
def __init__(self, txt_path, crop_path, transform=None, train = True, N = 100,nuclues_size = 32):
fh = open(txt_path, 'r')
imgs = []
for line in fh:
line = line.rstrip()
words = line.split()
imgs.append((words[0], int(words[1])))
self.imgs = imgs
self.transform = transform
self.train = train
self.N = N
self.nuclues_size = nuclues_size
self.crop_path = crop_path
def __getitem__(self,index):
fn, label = self.imgs[index]
slide_name = fn.split('/')[-2]
patch_name = fn.split('/')[-1].strip('.png')
crop_path = self.crop_path + '/' + str(self.nuclues_size) + '/' + slide_name+ '/' + patch_name + '.mat'
crop = sio.loadmat(crop_path)
nuclues = np.array(crop['nucleus'])
cls_all = np.array(crop['class_all'])[0]
cls_all=-cls_all
cls_all.sort()
cls_all = -cls_all
cls_all = cls_all[:self.N]
#print(len(cls_all))
cls_list = [0 for i in range(self.N)]
cls_list[:len(cls_all)] = cls_all
nuclues = nuclues[:self.N]
patches = []
for i in nuclues :
#temp =self.transform(self.train)(i)
patches.append(i)
pos = np.array(crop['cls_keep_xy'])
pos = pos[:self.N]
pos_list = [[0,0]for i in range(self.N)]
pos_list[:len(pos)]=pos
new_patch = np.zeros([3,self.nuclues_size,self.nuclues_size])
while len(patches)<self.N:
patches.append(new_patch)
patches = torch.from_numpy(np.asarray(patches))
cls_list = torch.as_tensor(np.asarray(cls_list), dtype=torch.int64)
pos_list = torch.as_tensor(np.asarray(pos_list), dtype=torch.int64)
return patches, cls_list, pos_list, label
def __len__(self):
return len(self.imgs)
|
# Autoencoder
from .SSIM_loss import SSIM_loss
from .Mutil_SSIM_loss import Multi_SSIM_loss
from .VAE_loss import VAE_loss
# GAN
from .SRGAN_loss import GeneratorLoss as SRGAN_Gloss
from .SRGAN_loss import DiscriminatorLoss as SRGAN_Dloss
|
#!/usr/bin/python
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import os
import sys
import imp
import base64
import re
import json
import platform
import shutil
import time
import traceback
import datetime
import subprocess
from AbstractPatching import AbstractPatching
from Common import *
from CommandExecutor import *
class UbuntuPatching(AbstractPatching):
def __init__(self, logger, distro_info):
super(UbuntuPatching, self).__init__(distro_info)
self.logger = logger
self.command_executor = CommandExecutor(logger)
self.base64_path = '/usr/bin/base64'
self.bash_path = '/bin/bash'
self.blkid_path = '/sbin/blkid'
self.cat_path = '/bin/cat'
self.cryptsetup_path = '/sbin/cryptsetup'
self.dd_path = '/bin/dd'
self.e2fsck_path = '/sbin/e2fsck'
self.echo_path = '/bin/echo'
self.lsblk_path = '/bin/lsblk'
self.lsscsi_path = '/usr/bin/lsscsi'
self.mkdir_path = '/bin/mkdir'
self.mount_path = '/bin/mount'
self.openssl_path = '/usr/bin/openssl'
self.resize2fs_path = '/sbin/resize2fs'
self.umount_path = '/bin/umount'
self.touch_path = '/usr/bin/touch'
def install_extras(self):
"""
install the sg_dd because the default dd do not support the sparse write
"""
cmd = " ".join(['apt-get', 'update'])
self.command_executor.Execute(cmd)
packages = ['at', 'cryptsetup-bin', 'lsscsi', 'python-six', 'python-parted', 'procps', 'psmisc', 'gcc', 'libssl-dev', 'libffi-dev', 'python-dev', 'python-pip']
cmd = " ".join(['apt-get', 'install', '-y'] + packages)
self.command_executor.Execute(cmd)
cmd = " ".join(['pip', 'install', 'adal'])
self.command_executor.Execute(cmd)
|
"""Lefthook installer
"""
from ._core import run
__version__ = '0.1.2'
__all__ = ['run']
|
import logging
import time
class Heartbeat:
@classmethod
def init(cls, job_metadata):
cls.iterations_count = 0
cls.previous_beat_at = time.time()
cls.min_seconds_between_beats = 10
cls.job_metadata = job_metadata
@classmethod
def increase_iterations(cls, n_iter):
cls.iterations_count += n_iter
if cls.should_beat(): cls.beat()
@classmethod
def should_beat(cls):
return time.time() - cls.previous_beat_at > cls.min_seconds_between_beats
@classmethod
def beat(cls):
logging.info(cls.progress_report())
cls.previous_beat_at = time.time()
@classmethod
def new_point(cls, x, y, t):
cls.increase_iterations(1)
@classmethod
def progress_report(cls):
completed_percentage = (cls.iterations_count / cls.job_metadata.total_iterations) * 100
current_execution_time = int(time.time() - cls.job_metadata.start_time)
time_in_minutes = f'{current_execution_time//60}:{(current_execution_time%60):02d}'
return f'time: {time_in_minutes}, progress: {completed_percentage:.2f}%'
|
import datetime
from django.test import TestCase
from pytz import UTC
from beers.models import (
Beer, Manufacturer, BeerAlternateName, ManufacturerAlternateName, BeerPrice,
ServingSize,
)
from taps.test.factories import TapFactory
from venues.test.factories import VenueFactory
from .factories import BeerFactory, ManufacturerFactory
class BeerTestCase(TestCase):
def setUp(self):
self.manufacturer = ManufacturerFactory()
self.new_time = UTC.localize(datetime.datetime(2018, 4, 3, 6, 2))
self.other_time = self.new_time + datetime.timedelta(days=30)
self.beer1 = BeerFactory(
manufacturer=self.manufacturer, untappd_url='http://localhost/123456',
color_srm=None, time_first_seen=self.other_time,
)
self.beer2 = BeerFactory(
manufacturer=self.manufacturer, color_srm=55, stem_and_stein_pk=551,
time_first_seen=self.new_time,
)
self.tap = TapFactory(beer=self.beer2)
self.venue2 = self.tap.venue
self.venue1 = VenueFactory()
self.serving_size = ServingSize.objects.create(name='foo', volume_oz=12)
def test_merge(self):
self.beer1.merge_from(self.beer2)
self.assertEqual(self.beer1.color_srm, self.beer2.color_srm)
self.tap.refresh_from_db()
self.assertEqual(self.tap.beer, self.beer1)
self.assertFalse(Beer.objects.filter(id=self.beer2.id).exists())
self.assertTrue(BeerAlternateName.objects.filter(
name=self.beer2.name, beer=self.beer1,
).exists())
self.assertEqual(self.tap.beer.time_first_seen, self.new_time)
self.assertEqual(self.tap.beer.stem_and_stein_pk, 551)
def test_preserve_prices_no_overlap(self):
BeerPrice.objects.create(
beer=self.beer1,
venue=self.venue1,
price=15,
serving_size=self.serving_size,
)
BeerPrice.objects.create(
beer=self.beer2,
venue=self.venue2,
price=10,
serving_size=self.serving_size,
)
self.beer1.merge_from(self.beer2)
self.assertEqual(BeerPrice.objects.filter(beer=self.beer1).count(), 2)
def test_preserve_prices_overlap(self):
BeerPrice.objects.create(
beer=self.beer1,
venue=self.venue2,
price=15,
serving_size=self.serving_size,
)
BeerPrice.objects.create(
beer=self.beer2,
venue=self.venue2,
price=10,
serving_size=self.serving_size,
)
other_size = ServingSize.objects.create(name='bar', volume_oz=16)
BeerPrice.objects.create(
beer=self.beer2,
venue=self.venue2,
price=20,
serving_size=other_size,
)
self.beer1.merge_from(self.beer2)
# Because we have an overlap in one unique condition (beer + venue + size),
# we are going to take the safest route possible and ignore both of the
# prices from beer2 for venue2.
self.assertEqual(BeerPrice.objects.filter(beer=self.beer1).count(), 1)
class ManufacturerTestCase(TestCase):
def test_merge(self):
new_time = UTC.localize(datetime.datetime(2018, 4, 3, 6, 2))
other_time = new_time + datetime.timedelta(days=30)
mfg1 = ManufacturerFactory(
untappd_url='http://localhost/123456', location='',
time_first_seen=other_time,
)
mfg2 = ManufacturerFactory(
location='your house', time_first_seen=new_time,
)
beer2 = BeerFactory(manufacturer=mfg2)
mfg1.merge_from(mfg2)
self.assertEqual(mfg1.location, mfg2.location)
beer2.refresh_from_db()
self.assertEqual(beer2.manufacturer, mfg1)
self.assertFalse(Manufacturer.objects.filter(id=mfg2.id).exists())
self.assertTrue(ManufacturerAlternateName.objects.filter(
name=mfg2.name, manufacturer=mfg1,
).exists())
self.assertEqual(beer2.manufacturer.time_first_seen, new_time)
|
from unittest import TestCase
import tempfile
import os
from jinjawalk import JinjaWalk
from typing import Callable, Dict
import shutil
def path_joiner(base: str) -> Callable[[str], str]:
return lambda s: os.path.join(base, s)
def dump_string(file_path: str, s: str):
with open(file_path, "w") as of:
print(s, file=of)
def read_file(file_path: str) -> str:
with open(file_path, "r") as f:
content = f.read()
return content
class TestJinjaWalk(TestCase):
@staticmethod
def write_dummy_template(destination: str,
conf: Dict[str, str],
section_name: str = 'section_name',
namespace: str = 'config') -> str:
s = '\n'.join(["line" + str(i+1) + " with {{ " + namespace + "['" + section_name + "']['" + k + "'] }}"
for i, k in enumerate(conf)])
dump_string(destination, s)
expected_render = '\n'.join([f"line{i+1} with {conf[k]}" for i, k in enumerate(conf)])
return expected_render
@staticmethod
def write_dummy_conf_file(destination: str, conf: Dict[str, str], section_name='section_name'):
s = f"[{section_name}]\n" + '\n'.join([f"{k} = {conf[k]}" for k in conf])
dump_string(destination, s)
class TestMultipleInPlace(TestJinjaWalk):
def setUp(self) -> None:
self.work_dir = tempfile.mkdtemp()
conf1 = {"key1": "value1"}
conf2 = {"key2": "value2"}
conf3 = {"key3": "value3"}
self.conf_file_path1 = path_joiner(self.work_dir)('conf1.ini')
self.conf_file_path2 = path_joiner(self.work_dir)('conf2.ini')
self.conf_file_path3 = path_joiner(self.work_dir)('conf3.ini')
self.write_dummy_conf_file(self.conf_file_path1, conf1)
self.write_dummy_conf_file(self.conf_file_path2, conf2)
self.write_dummy_conf_file(self.conf_file_path3, conf3)
self.expected_render = self.write_dummy_template(path_joiner(self.work_dir)('template.txt'),
{**conf1, **conf2, **conf3})
def tearDown(self) -> None:
shutil.rmtree(self.work_dir)
def test_multiple_in_place(self):
walker = JinjaWalk()
walker.walk([self.conf_file_path1, self.conf_file_path2, self.conf_file_path3], self.work_dir, self.work_dir)
rendered_template = read_file(path_joiner(self.work_dir)('template.txt'))
self.assertEqual(self.expected_render, rendered_template.strip('\n'))
class TestDefaults(TestJinjaWalk):
conf = {"key1": "value1", "key2": "value2"}
list_of_dummy_subdirs = ['.', 'subdir1', 'subdir2']
list_of_dummy_templates = ['template1.txt', 'template2.txt']
def setUp(self) -> None:
self.source_dir = tempfile.mkdtemp()
self.conf_file_path = path_joiner(self.source_dir)('conf.ini')
self.write_dummy_conf_file(self.conf_file_path, self.conf)
subdirs_to_populate = map(path_joiner(self.source_dir), self.list_of_dummy_subdirs)
for subdir in subdirs_to_populate:
os.makedirs(subdir, exist_ok=True)
templates_to_create = map(path_joiner(subdir), self.list_of_dummy_templates)
for template_path in templates_to_create:
self.expected_render = self.write_dummy_template(template_path, self.conf)
self.output_dir = tempfile.mkdtemp()
def tearDown(self) -> None:
shutil.rmtree(self.source_dir)
shutil.rmtree(self.output_dir)
def test_subdirs(self):
walker = JinjaWalk()
walker.walk(self.conf_file_path, self.source_dir, self.output_dir)
subdirs_to_check = map(path_joiner(self.output_dir), self.list_of_dummy_subdirs)
for subdir in subdirs_to_check:
rendered_templates_to_check = map(path_joiner(subdir), self.list_of_dummy_templates)
for template_path in rendered_templates_to_check:
with self.subTest(template=template_path):
rendered_template = read_file(template_path)
self.assertEqual(self.expected_render, rendered_template.strip('\n'))
|
# Author: a101269
# Date : 2020/3/4
import os
from glob import glob
import torch
from argparse import ArgumentParser
from config.config import configs
from pre_process.vocab import load_vocab
from pre_process.conll_processor import ConllProcessor
from pre_process.dataloader import Dataloader
from utils.utils import init_logger,logger,device,seed_everything
from model.trainer import Trainer
from model.parser_model import Parser_model
def main():
parser = ArgumentParser()
parser.add_argument("--arch", default='bert', type=str)
parser.add_argument("--do_train", action='store_true')
parser.add_argument("--do_test", action='store_true')
parser.add_argument('--do_pre', action='store_true')
parser.add_argument("--from_checkpoint", action='store_true')
parser.add_argument("--epoch_continue", default=0, type=int)
parser.add_argument("--n_gpu", type=str, default='0', help='"0,1,.." or "0" or "" ')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1)
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--fp16_opt_level', type=str, default='O1') # O1混合精度训练
args = parser.parse_args()
for k, v in configs.items():
setattr(args, k, v)
args.device = device(args.use_cuda)
if not args.do_test:
args.gold_file = args.dev_file
if args.from_checkpoint:
args.epoch=args.epoch_continue
args.warmup_prop=0
seed_everything(seed=args.seed)
init_logger(log_file='myparser.log')
logger.info(args)
vocabs = load_vocab(args)
# logger.warning(vocabs['know']._id2unit)
processor = ConllProcessor(args,vocabs)
dataloader= Dataloader(args, processor)
model = Parser_model(args, vocabs)
def test():
model.load_state_dict(torch.load(args.saved_model_path + '/pytorch_model.bin'))
logger.info('Start testing-----------------')
test_files=[args.test_file2, args.test_file1,args.test_file3,args.test_file4]
# test_files = glob('dataset/*for_sdp.conllu')
for test_file in test_files:
args.gold_file = test_file
test_dataloader, test_conllu_file = dataloader.load_data(test_file, args.batch_size, args.max_seq_len,
mode='test')
batch_num = len(test_dataloader)
trainer = Trainer(args, model, batch_num)
LAS, UAS = trainer.predict(test_dataloader, test_conllu_file)
logger.warning(f"Test result in {test_file}: LAS:{LAS:.4f}, UAS:{UAS:.4f}")
if not args.do_pre:
reference_file = args.gold_file + '.sem16.sdp'
os.system('python evalute.py --reference ' + reference_file)
if args.from_checkpoint:
model.load_state_dict(torch.load(args.saved_model_path + '/pytorch_model.bin'))
train_dataloader, _ = dataloader.load_data(args.train_file,args.batch_size,args.max_seq_len, mode='train')
dev_dataloader, dev_conllu_file = dataloader.load_data(args.dev_file,args.batch_size,args.max_seq_len, mode='dev')
batch_num = len(train_dataloader)
logger.info('Start training-----------------')
trainer=Trainer(args,model,batch_num)
trainer.train(train_dataloader,dev_dataloader,dev_conllu_file)
test()
elif args.do_test:
test()
else:
train_dataloader, _ = dataloader.load_data(args.train_file,args.batch_size,args.max_seq_len, mode='train')
dev_dataloader, dev_conllu_file = dataloader.load_data(args.dev_file,args.batch_size,args.max_seq_len, mode='dev')
batch_num = len(train_dataloader)
logger.info('Start training-----------------')
trainer=Trainer(args,model,batch_num)
trainer.train(train_dataloader,dev_dataloader,dev_conllu_file)
test()
if __name__ == '__main__':
main()
'''
/pytorch/aten/src/THC/THCTensorIndex.cu:361: void indexSelectLargeIndex(TensorInfo<T, IndexType>, TensorInfo<T, IndexType>,
TensorInfo<long, IndexType>, int, int, IndexType, IndexType, long) [with T = float, IndexType = unsigned int, DstDim = 2,
SrcDim = 2, IdxDim = -2, IndexIsMajor = true]: block: [392,0,0], thread: [94,0,0] Assertion `srcIndex < srcSelectDimSize` failed.
可能是数据集原因,未完全更新
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
#=============================================================================
# FileName: addtask
# Desc:
# Author: ge.jin
# Email: ge.jin@woqutech.com
# HomePage: wwww.woqutech.com
# Version: 0.0.1
# LastChange: 5/3/16 9:33 AM
# History:
#=============================================================================
'''
from celery import Celery, current_app
app = Celery('celerybeat-sqlalchemy')
app.config_from_object('settings')
from model import PeriodicTask, CrontabSchedule, IntervalSchedule, get_session
print current_app.conf.CELERYBEAT_MAX_LOOP_INTERVAL
session = get_session()
cs = CrontabSchedule(minute='*/5')
iss = IntervalSchedule(every=30, period='seconds')
session.add(cs)
# pt = PeriodicTask(name="sdisfsdffaf124asf", task="task_hello", crontab=cs, interval=iss, args='[]', kwargs='{}')
pt = PeriodicTask(name="sd124asf", task="task_hello", interval=iss, args='[]', kwargs='{}')
session.add(pt)
session.flush()
|
# Third order Newton's method
max_steps = 50 ## max number of iterations to use
x0 = 2.
def f(x):
return x**3 - 2.
def fp(x):
return 3*x**2
def fpp(x):
return 6*x
def third_order():
global x
x = []
x.append(x0)
for i in range(max_steps):
x.append(x[i] - f(x[i])/fp(x[i]) - (fpp(x[i])*f(x[i])**2)/(fp(x[i])**3))
return x
|
"""
This contains the unit tests for the cross_validate module.
"""
from __future__ import print_function
import unittest
from Analytics import cross_validate as cv
from Parser import build_training as bt
from Train import train_model as tm
from copy import deepcopy
import numpy as np
from sklearn.preprocessing import Imputer
_split_value = 70
_random = 12345
_n_iter = 10
class Object(object):
pass
class CrossValidateTests(unittest.TestCase):
def setUp(self):
"""Create an instance of the read_training Read class"""
print("Initializing test")
compound = {'7844': {'predictor': '98.8', 'experimentalhash': {u'Density': 0.577, u'Vapor Density': 1.93, u'Boiling Point': -6.47, u'Rotatable Bond Count': 1.0, u'XLogP3': 2.4, u'Melting Point': -185.3, u'Flash Point': False, u'Undefined Atom Stereocenter Count': 0.0, u'Auto-Ignition': 725.0, u'Molecular Weight': 56.10632, u'LogP': 2.4, u'Complexity': 14.0, u'Vapor Pressure': 2253.0, u'Heavy Atom Count': 4.0, u'Exact Mass': 56.0626, u'Monoisotopic Mass': 56.0626}}, '19502': {'predictor': '57.6', 'experimentalhash': {u'Rotatable Bond Count': 1.0, u'Heavy Atom Count': 8.0, u'Undefined Atom Stereocenter Count': 3.0, u'Molecular Weight': 112.21264, u'Complexity': 66.4, u'Exact Mass': 112.125201, u'Monoisotopic Mass': 112.125201}}, '11610': {'predictor': '54.5', 'experimentalhash': {u'Density': 0.697, u'Vapor Density': 0.7, u'Boiling Point': 93.6, u'Rotatable Bond Count': 4.0, u'XLogP3': 4.0, u'Melting Point': -119.7, u'Flash Point': 32.0, u'Undefined Atom Stereocenter Count': 0.0, u'Auto-Ignition': 500.0, u'Molecular Weight': 98.18606, u'LogP': 3.99, u'Complexity': 37.3, u'Vapor Pressure': 59.3, u'Heavy Atom Count': 7.0, u'Exact Mass': 98.10955, u'Monoisotopic Mass': 98.10955}}, '7855': {'predictor': '98.8', 'experimentalhash': {u'Density': 0.577, u'Vapor Density': 1.93, u'Boiling Point': -6.47, u'Rotatable Bond Count': 1.0, u'XLogP3': 2.4, u'Melting Point': -185.3, u'Flash Point': False, u'Undefined Atom Stereocenter Count': 0.0, u'Auto-Ignition': 725.0, u'Molecular Weight': 56.10632, u'LogP': 2.4, u'Complexity': 14.0, u'Vapor Pressure': 2253.0, u'Heavy Atom Count': 4.0, u'Exact Mass': 56.0626, u'Monoisotopic Mass': 56.0626}}, '19503': {'predictor': '57.6', 'experimentalhash': {u'Rotatable Bond Count': 1.0, u'Heavy Atom Count': 8.0, u'Undefined Atom Stereocenter Count': 3.0, u'Molecular Weight': 112.21264, u'Complexity': 66.4, u'Exact Mass': 112.125201, u'Monoisotopic Mass': 112.125201}}, '11611': {'predictor': '54.5', 'experimentalhash': {u'Density': 0.697, u'Vapor Density': 0.7, u'Boiling Point': 93.6, u'Rotatable Bond Count': 4.0, u'XLogP3': 4.0, u'Melting Point': -119.7, u'Flash Point': 32.0, u'Undefined Atom Stereocenter Count': 0.0, u'Auto-Ignition': 500.0, u'Molecular Weight': 98.18606, u'LogP': 3.99, u'Complexity': 37.3, u'Vapor Pressure': 59.3, u'Heavy Atom Count': 7.0, u'Exact Mass': 98.10955, u'Monoisotopic Mass': 98.10955}}, '7864': {'predictor': '98.8', 'experimentalhash': {u'Density': 0.577, u'Vapor Density': 1.93, u'Boiling Point': -6.47, u'Rotatable Bond Count': 1.0, u'XLogP3': 2.4, u'Melting Point': -185.3, u'Flash Point': False, u'Undefined Atom Stereocenter Count': 0.0, u'Auto-Ignition': 725.0, u'Molecular Weight': 56.10632, u'LogP': 2.4, u'Complexity': 14.0, u'Vapor Pressure': 2253.0, u'Heavy Atom Count': 4.0, u'Exact Mass': 56.0626, u'Monoisotopic Mass': 56.0626}}}
self.test_data = Object()
self.test_data.compound = deepcopy(compound)
def tearDown(self):
"""Delete data structure"""
print("Clearing out file")
del self.test_data
def testCrossValidate(self):
np.random.seed(seed=_random)
train = bt.Process(self.test_data, split_value=_split_value)
X = train.train
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(X)
t = imp.transform(X)
train.train = t
model = tm.Train(train)
model.train_model()
print("Test cross_validate")
cross = cv.Analysis(model, seed=_random, verbose=True)
cross.cross_validate(n_iter=_n_iter)
self.assertEqual(0.95, cross.acc)
self.assertEqual(0.95, cross.prec)
self.assertEqual(1.0, cross.recall)
self.assertEqual(0.95, cross.roc)
cross.feature_importances()
self.assertAlmostEqual(0.0586, cross.feats[0][0], 3)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.1.3 on 2021-03-19 11:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('description', models.TextField()),
('cats_id', models.IntegerField(null=True)),
],
),
migrations.CreateModel(
name='CourseLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('link', models.CharField(max_length=500, null=True)),
('usages', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='CourseSchedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField(null=True)),
('week_schedule', models.CharField(max_length=500, null=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.course')),
],
),
migrations.CreateModel(
name='CourseProgress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lessons', models.JSONField(null=True)),
('attendance', models.JSONField(null=True)),
('course', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='progress', to='course.course')),
],
),
]
|
from .platform_factory import get_platform
|
# -*- coding: utf-8 -*-
"""Provides the AppConfig class, that is required by Django."""
# Django imports
from django.apps import AppConfig
from django.conf import settings
from django.core.checks import register
from django.db.models.signals import post_save
from django.utils import six
# app imports
from auth_enhanced.checks import check_settings_values
from auth_enhanced.email import (
callback_admin_information_new_signup,
callback_user_signup_email_verification,
)
from auth_enhanced.exceptions import AuthEnhancedConversionError
from auth_enhanced.settings import (
DAE_CONST_MODE_EMAIL_ACTIVATION, DAE_CONST_VERIFICATION_TOKEN_MAX_AGE,
convert_to_seconds, set_app_default_settings,
)
class AuthEnhancedConfig(AppConfig):
"""App specific configuration class
Within its 'ready()'-method, app-specific settings are injected (meaning:
default values are provided here, if they are not already given in the
project's settings-module) and app-specific checks are performed (using
Django's check framework)."""
name = 'auth_enhanced'
verbose_name = 'auth_enhanced'
def ready(self):
"""Executed, when application loading is completed."""
# apply the default settings
set_app_default_settings()
# convert time-strings to seconds
if isinstance(settings.DAE_VERIFICATION_TOKEN_MAX_AGE, six.string_types):
try:
setattr(
settings,
'DAE_VERIFICATION_TOKEN_MAX_AGE',
convert_to_seconds(settings.DAE_VERIFICATION_TOKEN_MAX_AGE)
)
except AuthEnhancedConversionError:
setattr(
settings,
'DAE_VERIFICATION_TOKEN_MAX_AGE',
DAE_CONST_VERIFICATION_TOKEN_MAX_AGE
)
# register app-specific system checks
register(check_settings_values)
# add a 'post_save'-callback to automatically create a UserEnhancement,
# whenever a User-object is created.
post_save.connect(
self.get_model('UserEnhancement').callback_create_enhancement_object,
sender=settings.AUTH_USER_MODEL,
dispatch_uid='DAE_create_enhance_user_object'
)
# add a 'post_save'-callback to inform admins/superusers about a newly
# registered user.
# Please note: the callback is only registered, if the corresponding
# setting is not False.
if settings.DAE_ADMIN_SIGNUP_NOTIFICATION:
post_save.connect(
callback_admin_information_new_signup,
sender=settings.AUTH_USER_MODEL,
dispatch_uid='DAE_admin_information_new_signup'
)
# add a 'post_save'-callback to send an email to the newly registered
# user, if 'DAE_OPERATION_MODE' == 'DAE_CONST_MODE_EMAIL_ACTIVATION'.
# This means, an automatic email verification is only available in
# that mode. However, users may verify their email addresses by a
# manual process.
if settings.DAE_OPERATION_MODE == DAE_CONST_MODE_EMAIL_ACTIVATION:
post_save.connect(
callback_user_signup_email_verification,
sender=settings.AUTH_USER_MODEL,
dispatch_uid='DAE_user_signup_email_verification'
)
|
# Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from datetime import datetime
import tensorflow as tf
import tf_replicate_model_fn
from net import danet_deform as danet
from dataset import dataset_common
from preprocessing import dan_preprocessing
from utility import anchor_manipulator
from utility import scaffolds
from utility import custom_op
# hardware related configuration
tf.app.flags.DEFINE_integer(
'num_readers', 24,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 48,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', './dataset/tfrecords',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_integer(
'num_classes', 2, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'model_dir', './dan_logs_deform/',
'The directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are printed.')
tf.app.flags.DEFINE_integer(
'save_summary_steps', 500,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 7200, # not used
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_steps', 10000,
'The frequency with which the model is saved, in steps.')
# model related configuration
tf.app.flags.DEFINE_integer(
'train_image_size', 640,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
'train_epochs', None,
'The number of epochs to use for training.')
tf.app.flags.DEFINE_integer(
'max_number_of_steps', 120000,
'The max number of steps to use for training.')
tf.app.flags.DEFINE_integer(
'batch_size', 16,
'Batch size for training and evaluation.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_last', # 'channels_first' or 'channels_last'
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
tf.app.flags.DEFINE_float(
'match_threshold', 0.35, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'neg_threshold', 0.35, 'Matching threshold for the negtive examples in the loss function.')
# optimizer related configuration
tf.app.flags.DEFINE_integer(
'tf_random_seed', 20180817, 'Random seed for TensorFlow initializers.')
tf.app.flags.DEFINE_float(
'weight_decay', 0.0005, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 1e-3, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.000001,
'The minimal end learning rate used by a polynomial decay learning rate.')
# for learning rate piecewise_constant decay
tf.app.flags.DEFINE_string(
'decay_boundaries', '50, 80000, 100000',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '0.1, 1, 0.1, 0.01',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', 'vgg_16',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
'model_scope', 'dan',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', 'dan/predict_face, dan/prediction_modules_stage1, dan/prediction_modules_stage2, dan/predict_cascade, dan/additional_layers, dan/l2_norm_layer_3, dan/l2_norm_layer_4, dan/l2_norm_layer_5, dan/lfpn, dan/lfpn_stage1, dan/lfpn_stage2',
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'multi_gpu', True,
'Whether there is GPU to use for training.')
FLAGS = tf.app.flags.FLAGS
#CUDA_VISIBLE_DEVICES
def validate_batch_size_for_multi_gpu(batch_size):
"""For multi-gpu, batch-size must be a multiple of the number of
available GPUs.
Note that this should eventually be handled by replicate_model_fn
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
"""
if FLAGS.multi_gpu:
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
if not num_gpus:
raise ValueError('Multi-GPU mode was specified, but no GPUs '
'were found. To use CPU, run --multi_gpu=False.')
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. '
'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return num_gpus
return 0
def get_init_fn():
return scaffolds.get_init_fn_for_scaffold(FLAGS.model_dir, FLAGS.checkpoint_path,
FLAGS.model_scope, FLAGS.checkpoint_model_scope,
FLAGS.checkpoint_exclude_scopes, FLAGS.ignore_missing_vars,
name_remap={'/conv2d/kernel': '/weights', '/conv2d/bias': '/biases'})
# couldn't find better way to pass params from input_fn to model_fn
# some tensors used by model_fn must be created in input_fn to ensure they are in the same graph
# but when we put these tensors to labels's dict, the replicate_model_fn will split them into each GPU
# the problem is that they shouldn't be splited
global_anchor_info = dict()
def input_pipeline(dataset_pattern='train-*', is_training=True, batch_size=FLAGS.batch_size):
def input_fn():
target_shape = [FLAGS.train_image_size] * 2
anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(positive_threshold = FLAGS.match_threshold,
ignore_threshold = FLAGS.neg_threshold,
prior_scaling=[0.1, 0.1, 0.2, 0.2])
all_anchor_scales = [(16.,), (32.,), (64.,), (128.,), (256.,), (512.,)]
all_extra_scales = [(), (), (), (), (), ()]
all_anchor_ratios = [(0.8,), (0.8,), (0.8,), (0.8,), (0.8,), (0.8,)]
all_layer_shapes = [(160, 160), (80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
all_layer_strides = [4, 8, 16, 32, 64, 128]
offset_list = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
total_layers = len(all_layer_shapes)
anchors_height = list()
anchors_width = list()
anchors_depth = list()
for ind in range(total_layers):
_anchors_height, _anchors_width, _anchor_depth = anchor_encoder_decoder.get_anchors_width_height(all_anchor_scales[ind], all_extra_scales[ind], all_anchor_ratios[ind], name='get_anchors_width_height{}'.format(ind))
anchors_height.append(_anchors_height)
anchors_width.append(_anchors_width)
anchors_depth.append(_anchor_depth)
anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax, inside_mask = anchor_encoder_decoder.get_all_anchors(target_shape, anchors_height, anchors_width, anchors_depth,
offset_list, all_layer_shapes, all_layer_strides,
[FLAGS.train_image_size * 1.] * total_layers, [False] * total_layers)
num_anchors_per_layer = list()
for ind, layer_shape in enumerate(all_layer_shapes):
_, _num_anchors_per_layer = anchor_encoder_decoder.get_anchors_count(anchors_depth[ind], layer_shape, name='get_anchor_count{}'.format(ind))
num_anchors_per_layer.append(_num_anchors_per_layer)
image_preprocessing_fn = lambda image_, bboxes_ : dan_preprocessing.preprocess_image(image_, bboxes_, target_shape, [16, 32, 64, 128, 256, 512], is_training=is_training, data_format=FLAGS.data_format, output_rgb=False)
anchor_encoder_fn = lambda gbboxes_: anchor_encoder_decoder.encode_anchors(gbboxes_, anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax, inside_mask)
image, filename, shape, loc_targets, cls_targets, match_scores, matched_gt = dataset_common.slim_get_batch(FLAGS.num_classes,
batch_size,
('train' if is_training else 'valid'),
os.path.join(FLAGS.data_dir, dataset_pattern),
FLAGS.num_readers,
FLAGS.num_preprocessing_threads,
image_preprocessing_fn,
anchor_encoder_fn,
num_epochs=FLAGS.train_epochs,
is_training=is_training)
global global_anchor_info
global_anchor_info = {'decode_fn': lambda pred : anchor_encoder_decoder.batch_decode_anchors(pred, anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax),
'num_anchors_per_layer': num_anchors_per_layer,
'all_num_anchors_depth': anchors_depth }
return image, {'filename': filename, 'shape': shape, 'loc_targets': loc_targets, 'cls_targets': cls_targets, 'match_scores': match_scores, 'matched_gt': matched_gt}
return input_fn
def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights=1., bbox_outside_weights=1., sigma=1.):
"""
ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets))
SmoothL1(x) = 0.5 * (sigma * x)^2, if |x| < 1 / sigma^2
|x| - 0.5 / sigma^2, otherwise
"""
with tf.name_scope('smooth_l1', [bbox_pred, bbox_targets]):
sigma2 = sigma * sigma
inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))
smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)
smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)
smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),
tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))
outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)
return outside_mul
def mining_hard_neg_across_batch(batch_size, cls_pred, location_pred, cls_targets, match_scores, loc_targets, name=None):
with tf.variable_scope(name, 'hard_neg_mining'):
############## hard negtive mining across batch
cls_targets = tf.reshape(cls_targets, [-1])
match_scores = tf.reshape(match_scores, [-1])
loc_targets = tf.reshape(loc_targets, [-1, 4])
# each positive examples has one label
positive_mask = cls_targets > 0
n_positives = tf.count_nonzero(positive_mask)
negtive_mask = tf.equal(cls_targets, 0)
#negtive_mask = tf.logical_and(tf.equal(cls_targets, 0), match_scores > 0.)
n_negtives = tf.count_nonzero(negtive_mask)
n_neg_to_select = tf.to_int32(FLAGS.negative_ratio * tf.to_float(n_positives))
n_neg_to_select = tf.minimum(n_neg_to_select, tf.to_int32(n_negtives))
# hard negative mining for classification
predictions_for_bg = tf.nn.softmax(cls_pred)[:, 0]
prob_for_negtives = tf.where(negtive_mask,
0. - predictions_for_bg,
# ignore all the positives
0. - tf.ones_like(predictions_for_bg))
topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=n_neg_to_select)
selected_neg_mask = prob_for_negtives > topk_prob_for_bg[-1]
# include both selected negtive and all positive examples
final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask))
total_examples = tf.count_nonzero(final_mask)
flaten_cls_targets = tf.boolean_mask(tf.clip_by_value(cls_targets, 0, FLAGS.num_classes), final_mask)
cls_pred = tf.boolean_mask(cls_pred, final_mask)
location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))
flaten_loc_targets = tf.boolean_mask(loc_targets, tf.stop_gradient(positive_mask))
return cls_pred, location_pred, flaten_cls_targets, flaten_loc_targets
def mining_hard_neg(batch_size, cls_pred, location_pred, cls_targets, match_scores, loc_targets, name=None):
with tf.variable_scope(name, 'hard_neg_mining'):
flaten_cls_targets = tf.reshape(cls_targets, [-1])
flaten_match_scores = tf.reshape(match_scores, [-1])
flaten_loc_targets = tf.reshape(loc_targets, [-1, 4])
# each positive examples has one label
positive_mask = flaten_cls_targets > 0
n_positives = tf.count_nonzero(positive_mask)
batch_n_positives = tf.count_nonzero(cls_targets > 0, -1)
batch_negtive_mask = tf.equal(cls_targets, 0)#tf.logical_and(tf.equal(cls_targets, 0), match_scores > 0.)
batch_n_negtives = tf.count_nonzero(batch_negtive_mask, -1)
batch_n_neg_select = tf.to_int32(FLAGS.negative_ratio * tf.to_float(batch_n_positives))
batch_n_neg_select = tf.maximum(tf.minimum(batch_n_neg_select, tf.to_int32(batch_n_negtives)), 1)
# hard negative mining for classification
predictions_for_bg = tf.nn.softmax(tf.reshape(cls_pred, [batch_size, -1, FLAGS.num_classes]))[:, :, 0]
prob_for_negtives = tf.where(batch_negtive_mask,
0. - predictions_for_bg,
# ignore all the positives
0. - tf.ones_like(predictions_for_bg))
topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=tf.shape(prob_for_negtives)[1])
score_at_k = tf.gather_nd(topk_prob_for_bg, tf.stack([tf.range(batch_size), batch_n_neg_select - 1], axis=-1))
selected_neg_mask = prob_for_negtives >= tf.expand_dims(score_at_k, axis=-1)
# include both selected negtive and all positive examples
final_mask = tf.stop_gradient(tf.logical_or(tf.reshape(tf.logical_and(batch_negtive_mask, selected_neg_mask), [-1]), positive_mask))
total_examples = tf.count_nonzero(final_mask)
cls_pred = tf.boolean_mask(cls_pred, final_mask)
location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))
flaten_cls_targets = tf.boolean_mask(tf.clip_by_value(flaten_cls_targets, 0, FLAGS.num_classes), final_mask)
flaten_loc_targets = tf.stop_gradient(tf.boolean_mask(flaten_loc_targets, positive_mask))
return cls_pred, location_pred, flaten_cls_targets, flaten_loc_targets
# from scipy.misc import imread, imsave, imshow, imresize
# import numpy as np
# from utility import draw_toolbox
# def save_image_with_bbox(image, labels_, scores_, bboxes_):
# if not hasattr(save_image_with_bbox, "counter"):
# save_image_with_bbox.counter = 0 # it doesn't exist yet, so initialize it
# save_image_with_bbox.counter += 1
# img_to_draw = np.copy(image)
# img_to_draw = draw_toolbox.bboxes_draw_on_img(img_to_draw, labels_, scores_, bboxes_, thickness=2)
# imsave(os.path.join('./sfd_debug/{}.jpg').format(save_image_with_bbox.counter), img_to_draw)
# return save_image_with_bbox.counter
def reshape_pred(batch_size, cls_pred, location_pred, data_format, name=None):
with tf.name_scope(name, 'reshape_pred', [cls_pred, location_pred]):
if data_format == 'channels_first':
cls_pred = [tf.transpose(pred, [0, 2, 3, 1]) for pred in cls_pred]
location_pred = [tf.transpose(pred, [0, 2, 3, 1]) for pred in location_pred]
cls_pred = [tf.reshape(pred, [batch_size, -1, FLAGS.num_classes]) for pred in cls_pred]
location_pred = [tf.reshape(pred, [batch_size, -1, 4]) for pred in location_pred]
cls_pred = tf.concat(cls_pred, axis=1)
location_pred = tf.concat(location_pred, axis=1)
cls_pred = tf.reshape(cls_pred, [-1, FLAGS.num_classes])
location_pred = tf.reshape(location_pred, [-1, 4])
return cls_pred, location_pred
def anchor_routing(decoded_bbox, gt_bboxes, gt_lables, easy_mask, feat_height, feat_width, feat_strides, all_num_anchors_depth, num_anchors_per_layer, threshold_per_layer, ignore_threshold_per_layer):
num_anchors_per_layer = tf.stack(num_anchors_per_layer)
#print(num_anchors_per_layer)
def impl_anchor_routing(_decoded_bbox, _gt_bboxes, _gt_lables, _easy_mask):
decoded_bbox_list = tf.split(_decoded_bbox, num_anchors_per_layer, axis=0, name='split_decoded_bbox')
gt_bboxes_list = tf.split(_gt_bboxes, num_anchors_per_layer, axis=0, name='split_gt_bboxes')
gt_lables_list = tf.split(_gt_lables, num_anchors_per_layer, axis=0, name='split_gt_lables')
easy_mask_list = tf.split(_easy_mask, num_anchors_per_layer, axis=0, name='split_easy_mask')
mask_out_list = []
decode_out_list = []
for ind in range(len(all_num_anchors_depth)):
with tf.name_scope('routing_{}'.format(ind)):
with tf.device('/cpu:0'):
#decoded_bbox_list[ind] = tf.Print(decoded_bbox_list[ind], [tf.shape(decoded_bbox_list[ind]), tf.shape(gt_bboxes_list[ind]), tf.shape(gt_lables_list[ind]), tf.shape(easy_mask_list[ind]), feat_height[ind], feat_width[ind], all_num_anchors_depth[ind], feat_strides[ind]])
mask_out, decode_out = custom_op.dynamic_anchor_routing(decoded_bbox_list[ind], gt_bboxes_list[ind], gt_lables_list[ind], easy_mask_list[ind], feat_height[ind], feat_width[ind], all_num_anchors_depth[ind], feat_strides[ind], FLAGS.train_image_size, FLAGS.train_image_size, True, threshold_per_layer[ind], ignore_threshold_per_layer[ind])
mask_out_list.append(mask_out)
decode_out_list.append(decode_out)
mask_out = tf.concat(mask_out_list, axis=0)
decode_out = tf.concat(decode_out_list, axis=0)
return tf.stop_gradient(mask_out), tf.stop_gradient(decode_out)
#return impl_anchor_routing(decoded_bbox[0,:,:], gt_bboxes[0,:,:], gt_lables[0,:], easy_mask[0,:])
return tf.map_fn(lambda _args: impl_anchor_routing(_args[0], _args[1], _args[2], _args[3]),
(decoded_bbox, gt_bboxes, gt_lables, easy_mask),
dtype=(tf.int32, tf.float32), back_prop=False, name='anchor_routing')
def dan_model_fn(features, labels, mode, params):
"""model_fn for dan to be used with our Estimator."""
shape = labels['shape']
loc_targets = labels['loc_targets']
cls_targets = labels['cls_targets']
match_scores = labels['match_scores']
matched_gt = labels['matched_gt']
global global_anchor_info
decode_fn = global_anchor_info['decode_fn']
num_anchors_per_layer = global_anchor_info['num_anchors_per_layer']
all_num_anchors_depth = global_anchor_info['all_num_anchors_depth']
# bboxes_pred = decode_fn(loc_targets[0])
# bboxes_pred = [tf.reshape(preds, [-1, 4]) for preds in bboxes_pred]
# bboxes_pred = tf.concat(bboxes_pred, axis=0)
# save_image_op = tf.py_func(save_image_with_bbox,
# [dan_preprocessing.unwhiten_image(features[0]),
# tf.clip_by_value(cls_targets[0], 0, tf.int64.max),
# match_scores[0],
# bboxes_pred],
# tf.int64, stateful=True)
# with tf.control_dependencies([save_image_op]):
#print(all_num_anchors_depth)
with tf.variable_scope(params['model_scope'], default_name=None, values=[features], reuse=tf.AUTO_REUSE):
backbone = danet.VGG16Backbone(params['data_format'])
feature_layers = backbone.get_featmaps(features, training=(mode == tf.estimator.ModeKeys.TRAIN))
#print(feature_layers)
#print(feature_layers), dan/
feature_layers = backbone.build_lfpn(feature_layers, skip_last=3)
feature_layers_stage1 = backbone.get_features_stage1(feature_layers, name='prediction_modules_stage1')
feature_layers_stage1 = backbone.build_lfpn(feature_layers_stage1, skip_last=3, name='lfpn_stage1')
location_pred, cls_pred = backbone.get_predict_module(feature_layers_stage1, [1] * len(feature_layers),
[1] + [1] * (len(feature_layers) - 1), all_num_anchors_depth, name='predict_face')
cls_pred, location_pred = reshape_pred(tf.shape(features)[0], cls_pred, location_pred, params['data_format'], name='face_pred_reshape')
feature_layers_stage2 = backbone.get_features_stage2(feature_layers_stage1, feature_layers, name='prediction_modules_stage2')
feature_layers_stage2 = backbone.build_lfpn(feature_layers_stage2, skip_last=3, name='lfpn_stage2')
final_location_pred, final_cls_pred = backbone.get_predict_module(feature_layers_stage2, [1] * len(feature_layers),
[3] + [1] * (len(feature_layers) - 1), all_num_anchors_depth, name='predict_cascade')
final_cls_pred, final_location_pred = reshape_pred(tf.shape(features)[0], final_cls_pred, final_location_pred, params['data_format'], name='cascade_pred_reshape')
with tf.name_scope('post_forward'):
bboxes_pred = decode_fn(tf.reshape(location_pred, [tf.shape(features)[0], -1, 4]))
if params['data_format'] == 'channels_first':
feat_height_list = [tf.shape(feat)[2] for feat in feature_layers]
feat_width_list = [tf.shape(feat)[3] for feat in feature_layers]
else:
feat_height_list = [tf.shape(feat)[1] for feat in feature_layers]
feat_width_list = [tf.shape(feat)[2] for feat in feature_layers]
final_mask, final_loc_targets = anchor_routing(bboxes_pred, matched_gt, tf.to_float(cls_targets > 0),
tf.to_int32(tf.reshape(tf.nn.softmax(cls_pred, name='pred_score')[:, -1], [tf.shape(features)[0], -1]) > 0.03),
feat_height_list, feat_width_list, [4, 8, 16, 32, 64, 128], all_num_anchors_depth, num_anchors_per_layer, [0.4, 0.5, 0.6, 0.7, 0.8, 0.9], [0.35, 0.4, 0.45, 0.5, 0.55, 0.6])
# bboxes_pred = decode_fn(tf.reshape(location_pred, [tf.shape(features)[0], -1, 4]))
# #cls_targets = tf.Print(cls_targets, [tf.shape(bboxes_pred[0]),tf.shape(bboxes_pred[1]),tf.shape(bboxes_pred[2]),tf.shape(bboxes_pred[3])])
# bboxes_pred = tf.reshape(bboxes_pred, [-1, 4])
############## hard negtive mining for each sample
cls_pred, location_pred, flaten_cls_targets, flaten_loc_targets = mining_hard_neg(tf.shape(features)[0], cls_pred, location_pred, cls_targets, match_scores, loc_targets, name='mining_0')
final_cls_pred, final_location_pred, \
final_flaten_cls_targets, final_flaten_loc_targets = mining_hard_neg(tf.shape(features)[0], final_cls_pred, final_location_pred,
final_mask, tf.ones_like(final_mask, dtype=tf.float32),
final_loc_targets * tf.expand_dims(tf.expand_dims(tf.constant([10., 10., 5., 5.], dtype=tf.float32) * 2., axis=0), axis=0), name='mining_1')
predictions = {'classes': tf.argmax(cls_pred, axis=-1),
'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1)}
cls_accuracy = tf.metrics.accuracy(flaten_cls_targets, predictions['classes'])
metrics = {'cls_accuracy': cls_accuracy}
# Create a tensor named train_accuracy for logging purposes.
tf.identity(cls_accuracy[1], name='cls_accuracy')
tf.summary.scalar('cls_acc', cls_accuracy[1])
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss, which includes softmax cross entropy and L2 regularization.
#cross_entropy = tf.cond(n_positives > 0, lambda: tf.losses.sparse_softmax_cross_entropy(labels=flaten_cls_targets, logits=cls_pred), lambda: 0.)# * (params['negative_ratio'] + 1.)
#flaten_cls_targets=tf.Print(flaten_cls_targets, [flaten_loc_targets],summarize=50000)
cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=flaten_cls_targets, logits=cls_pred) * (params['negative_ratio'] + 1.)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy_loss')
tf.summary.scalar('ce_loss', cross_entropy)
#loc_loss = tf.cond(n_positives > 0, lambda: modified_smooth_l1(location_pred, tf.stop_gradient(flaten_loc_targets), sigma=1.), lambda: tf.zeros_like(location_pred))
loc_loss = modified_smooth_l1(location_pred, flaten_loc_targets, sigma=1.)
#loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets))
loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1), name='location_loss')
tf.summary.scalar('loc_loss', loc_loss)
tf.losses.add_loss(loc_loss)
final_cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=final_flaten_cls_targets, logits=final_cls_pred) * (params['negative_ratio'] + 1.)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(final_cross_entropy, name='final_cross_entropy_loss')
tf.summary.scalar('final_ce_loss', final_cross_entropy)
final_loc_loss = modified_smooth_l1(final_location_pred, final_flaten_loc_targets, sigma=1.)
final_loc_loss = tf.reduce_mean(tf.reduce_sum(final_loc_loss, axis=-1), name='final_location_loss')
tf.summary.scalar('final_loc_loss', final_loc_loss)
tf.losses.add_loss(final_loc_loss)
l2_loss_vars = []
for trainable_var in tf.trainable_variables():
if '/bn' not in trainable_var.name:
if 'l2_norm_layer' not in trainable_var.name:
if '/bias' not in trainable_var.name: l2_loss_vars.append(tf.nn.l2_loss(trainable_var))
else:
l2_loss_vars.append(tf.nn.l2_loss(trainable_var) * 0.2)
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
total_loss = tf.add(cross_entropy + loc_loss + final_cross_entropy + final_loc_loss, tf.multiply(params['weight_decay'], tf.add_n(l2_loss_vars), name='l2_loss'), name='total_loss')
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
[int(_) for _ in params['decay_boundaries']],
lr_values)
truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype), name='learning_rate')
# Create a tensor named learning_rate for logging purposes.
tf.summary.scalar('lr', truncated_learning_rate)
gradient_multipliers = {}
for var in tf.trainable_variables():
if '/bias' in var.name: gradient_multipliers[var] = 2.
else: gradient_multipliers[var] = 1.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.contrib.layers.optimize_loss(total_loss, global_step, truncated_learning_rate,
optimizer=lambda lr: tf_replicate_model_fn.TowerOptimizer(tf.train.MomentumOptimizer(learning_rate=lr, momentum=params['momentum'])),
gradient_multipliers=gradient_multipliers,
update_ops=update_ops,
summaries=['loss'])
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
eval_metric_ops=metrics,
scaffold=tf.train.Scaffold(init_fn=get_init_fn()))
def parse_comma_list(args):
return [float(s.strip()) for s in args.split(',')]
def main(_):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, intra_op_parallelism_threads=FLAGS.num_cpu_threads, inter_op_parallelism_threads=FLAGS.num_cpu_threads, gpu_options=gpu_options)
num_gpus = validate_batch_size_for_multi_gpu(FLAGS.batch_size)
# Set up a RunConfig to only save checkpoints once per training cycle.
run_config = tf.estimator.RunConfig().replace(
save_checkpoints_secs=None).replace(
save_checkpoints_steps=FLAGS.save_checkpoints_steps).replace(
save_summary_steps=FLAGS.save_summary_steps).replace(
keep_checkpoint_max=5).replace(
tf_random_seed=FLAGS.tf_random_seed).replace(
log_step_count_steps=FLAGS.log_every_n_steps).replace(
session_config=config)
replicate_dan_model_fn = tf_replicate_model_fn.replicate_model_fn(dan_model_fn, loss_reduction=tf.losses.Reduction.MEAN)
dan_detector = tf.estimator.Estimator(
model_fn=replicate_dan_model_fn, model_dir=FLAGS.model_dir, config=run_config,
params={
'num_gpus': num_gpus,
'data_format': FLAGS.data_format,
'batch_size': FLAGS.batch_size,
'model_scope': FLAGS.model_scope,
'num_classes': FLAGS.num_classes,
'negative_ratio': FLAGS.negative_ratio,
'match_threshold': FLAGS.match_threshold,
'neg_threshold': FLAGS.neg_threshold,
'weight_decay': FLAGS.weight_decay,
'momentum': FLAGS.momentum,
'learning_rate': FLAGS.learning_rate,
'end_learning_rate': FLAGS.end_learning_rate,
'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries),
'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors),
})
tensors_to_log = {
'lr': 'learning_rate',
'ce': 'cross_entropy_loss',
'loc': 'location_loss',
'ce2': 'final_cross_entropy_loss',
'loc2': 'final_location_loss',
'loss': 'total_loss',
'l2': 'l2_loss',
'acc': 'post_forward/cls_accuracy',
}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps,
formatter=lambda dicts: (', '.join(['%s=%.6f' % (k, v) for k, v in dicts.items()])))
#hook = tf.train.ProfilerHook(save_steps=50, output_dir='.', show_memory=True)
tf.logging.info('Starting a training cycle.')
dan_detector.train(input_fn=input_pipeline(dataset_pattern='{}-*', is_training=True, batch_size=FLAGS.batch_size),
hooks=[logging_hook], max_steps=FLAGS.max_number_of_steps)
tf.logging.info('Finished runing at {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
if __name__ == '__main__':
import logging
tf.gfile.MakeDirs(FLAGS.model_dir)
tf.gfile.Copy(os.path.realpath(__file__), os.path.join(FLAGS.model_dir, 'train.py'), overwrite=True)
log = logging.getLogger('tensorflow')
fh = logging.FileHandler(FLAGS.model_dir + 'tensorflow.log')
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
fh.setFormatter(formatter)
fh.setLevel(logging.INFO)
log.addHandler(fh)
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('Starting runing at {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
tf.app.run()
|
#! /usr/bin/python3
#-*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
import os
import shutil
from contextlib import closing
import requests
import urllib.request
from urllib.error import HTTPError, URLError
import socket
import ftplib
def url_split(target):
return target[0:target.rindex('/')], target[target.rindex('/') + 1:]
def ftp_retrieve_active(ftpip, path, username, password, remote, local):
status = 1
ftp = ftplib.FTP(host=ftpip, user=username, passwd=password, acct='', timeout=10)
# print('--> Downloading active with ftpip=[{:}], path=[{:}], username=[{:}], password=[{:}], remote=[{:}], local=[{:}]'.format(ftpip, path, username, password, remote, local))
ftp.set_pasv(False)
if path != '': ftp.cwd(path)
try:
## query size so that we fail if the remote does not exist and no local
## file is created
assert( ftp.size(remote) )
ftp.retrbinary("RETR " + remote, open(local, 'wb').write)
status = 0
except FTP.error_perm:
os.remove(local)
ftp.close()
return status
def ftp_retrieve(url, filename=None, **kwargs):
"""
:return: An integer denoting the download status; anything other than 0
denotes an error
kwargs:
save_as: 'foobar' Save remote file as 'foobar' (can include path)
save_dir: 'foo/bar' Directory to save remote file; if both save_dir and
save_as are given, then the local file will be the concatenation
of these two, aka os.path.join(save_dir, save_as)
username: 'usrnm' Use the given username
password: 'psswrd' Use the given password
active : (boolean) true or false
fail_error: True/False Throw exception if download fails. By default
the function will throw if the download fails
"""
# print('>> called ftp_retrieve with args: url={:}, filename={:}, kwargs={:}'.format(url, filename, kwargs))
if filename is None:
url, filename = url_split(url)
# print('>> split url and filename to {:} and {:}'.format(url, filename))
saveas = kwargs['save_as'] if 'save_as' in kwargs else filename
if 'save_dir' in kwargs:
if not os.path.isdir(kwargs['save_dir']):
msg = '[ERROR] retrieve::http_retrieve Directory does not exist {:}'.format(
kwargs['save_dir'])
raise RuntimeError(msg)
saveas = os.path.join(kwargs['save_dir'], saveas)
# print('>> saveas is now {:}'.format(saveas))
if not 'fail_error' in kwargs:
kwargs['fail_error'] = True
## username or password key(s) in kwargs
if set(['username', 'password']).intersection(set(kwargs)):
username = kwargs['username'] if 'username' in kwargs else ''
password = kwargs['password'] if 'password' in kwargs else ''
if (not username or username == '') and (not password or password == ''):
pass
else:
## need to construct a string of type:
## 'ftp://username:password@server/path/to/file' from (url=)
## 'server/path/to/file'
# print('>> using credentials .... ')
url = re.sub(r'^ftp://', 'ftp://{:}:{:}@'.format(username, password), url)
target = '{:}/{:}'.format(url, filename)
status = 0
## Handle active FTP
if 'active' in kwargs and kwargs['active'] == True:
g=re.match(r'ftp://[^@]*([^/]*)(.*)', url)
ftpip = g.group(1).lstrip('@')
target = filename
path = g.group(2).replace(target, '')
status = ftp_retrieve_active(ftpip, path, username, password, target, saveas)
else:
# print(">> Note that target={:}".format(target))
try:
with closing(urllib.request.urlopen(target, timeout=10)) as r:
with open(saveas, 'wb') as f:
shutil.copyfileobj(r, f)
except:
status = 1
## For debugging
#try:
# with closing(urllib.request.urlopen(target, timeout=10)) as r:
# with open(saveas, 'wb') as f:
# shutil.copyfileobj(r, f)
#except HTTPError as error:
# print('Data not retrieved because {:}\nURL: {}', error, target)
#except URLError as error:
# if isinstance(error.reason, socket.timeout):
# print('socket timed out - URL {}', target)
# else:
# print('some other error happened')
# status = 1
if not os.path.isfile(saveas):
status += 1
if status > 0 and kwargs['fail_error'] == True:
msg = '[ERROR] retrieve::ftp_retrieve Failed to download file {:}'.format(
target)
raise RuntimeError(msg)
return status, target, saveas
def http_retrieve(url, filename=None, **kwargs):
"""
:return: An integer denoting the download status; anything other than 0
denotes an error
kwargs:
username: Use this username to access the url.
password: Use this password to access the url.
save_as: 'foobar' Save remote file as 'foobar' (can include path)
save_dir: 'foo/bar' Directory to save remote file; if both save_dir and
save_as are given, then the local file will be the concatenation
of these two, aka os.path.join(save_dir, save_as)
fail_error: True/False Throw exception if download fails. By default
the function will throw if the download fails
"""
if filename is None:
url, filename = url_split(url)
saveas = kwargs['save_as'] if 'save_as' in kwargs else filename
if 'save_dir' in kwargs:
if not os.path.isdir(kwargs['save_dir']):
msg = '[ERROR] retrieve::http_retrieve Directory does not exist {:}'.format(
kwargs['save_dir'])
raise RuntimeError(msg)
saveas = os.path.join(kwargs['save_dir'], saveas)
if not 'fail_error' in kwargs:
kwargs['fail_error'] = True
use_credentials = False
if set(['username', 'password']).intersection(set(kwargs)):
use_credentials = True
username = kwargs['username'] if 'username' in kwargs else ''
password = kwargs['password'] if 'password' in kwargs else ''
if (not username or username == '') and (not password or password == ''):
use_credentials = False
target = '{:}/{:}'.format(url, filename)
status = 0
if not use_credentials: ## download with no credentials
try:
## allow timeout with requests
request = requests.get(target, timeout=20, stream=True)
if request.status_code == 200:
with open(saveas, 'wb') as fh:
for chunk in request.iter_content(1024 * 1024):
fh.write(chunk)
if not os.path.isfile(saveas):
status += 1
except:
status = 1
else: ## download with credentials (not sure if this works for python 2)
try:
with requests.get(target, auth=(username, password), timeout=20) as r:
r.raise_for_status()
if r.status_code == 200:
with open(saveas, 'wb') as f:
#shutil.copyfileobj(r.raw, f)
f.write(r.content)
if not os.path.isfile(saveas):
status += 1
except:
status = 1
if status > 0 and kwargs['fail_error'] == True:
msg = '[ERROR] retrieve::http_retrieve Failed to download file {:}'.format(
target)
raise RuntimeError(msg)
return status, target, saveas
def web_retrieve(url, **kwargs):
# print('>> called web_retrieve with args: url={:}, kwargs={:}'.format(url, kwargs))
filename = None if 'filename' not in kwargs else kwargs['filename']
if url.startswith('http'):
return http_retrieve(url, filename, **kwargs)
elif url.startswith('ftp'):
return ftp_retrieve(url, filename, **kwargs)
else:
msg = '[ERROR] retrieve::web_retrieve Unknown url protocol {:}'.format(
url)
raise RuntimeError(msg)
|
from dbt.adapters.bigquery.connections import BigQueryConnectionManager # noqa
from dbt.adapters.bigquery.connections import BigQueryCredentials
from dbt.adapters.bigquery.relation import BigQueryRelation # noqa
from dbt.adapters.bigquery.relation import BigQueryColumn # noqa
from dbt.adapters.bigquery.impl import BigQueryAdapter
from dbt.adapters.base import AdapterPlugin
from dbt.include import bigquery
Plugin = AdapterPlugin(
adapter=BigQueryAdapter,
credentials=BigQueryCredentials,
include_path=bigquery.PACKAGE_PATH)
|
import typing
IntegerType = typing.NewType("IntegerType", int)
TextType = typing.NewType("TextType", str)
VarcharType = typing.NewType("VarcharType", str)
|
# Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from collections import defaultdict
from io import StringIO
from os.path import basename, dirname, splitext
from typing import DefaultDict, Set
from google.protobuf.compiler.plugin_pb2 import CodeGeneratorRequest, CodeGeneratorResponse
from ..syntax.python import all_decl
from ._header import HEADER
__all__ = ["run_plugin"]
def run_plugin(request: "CodeGeneratorRequest") -> "CodeGeneratorResponse":
"""
Create `__init__.py` files for all directories.
"""
directories = set()
# for all of the requested files, add those files' parent directories, and the parents of all
# THOSE directories
for proto_dir in {dirname(f) for f in request.file_to_generate}:
components = proto_dir.split("/")
for i in range(0, len(components) + 1):
directories.add("/".join(components[:i]))
return CodeGeneratorResponse(
file=[package_file(proto_dir, request) for proto_dir in sorted(directories)]
)
def package_file(proto_dir: str, request: "CodeGeneratorRequest") -> "CodeGeneratorResponse.File":
proto_package = proto_dir.replace("/", ".")
import_map = defaultdict(set) # type: DefaultDict[str, Set[str]]
for file in request.proto_file:
if file.package == proto_package:
current_module_base = splitext(basename(file.name))[0]
current_module_pb = "." + current_module_base + "_pb2"
current_module_grpc = "." + current_module_base + "_pb2_grpc"
for e in file.enum_type:
import_map[current_module_pb].add(e.name)
for m in file.message_type:
import_map[current_module_pb].add(m.name)
for s in file.service:
import_map[current_module_grpc].add(s.name + "Stub")
with StringIO() as buf:
buf.write(HEADER)
buf.write("\n")
all_symbols = set()
for f, i in import_map.items():
buf.write(f"from {f} import {', '.join(sorted(i))}\n")
all_symbols.update(i)
buf.write("\n")
buf.write(all_decl(sorted(all_symbols)))
return CodeGeneratorResponse.File(name=f"{proto_dir}/__init__.py", content=buf.getvalue())
|
from django.contrib import admin
from .models import Contact
# Register your models here.
@admin.register(Contact)
class AdminContact(admin.ModelAdmin):
pass
|
from django.apps import AppConfig
class TplansConfig(AppConfig):
name = 'tplans'
|
'''
Problem 2: Even Fibonacci numbers
Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
Answer: 4613732
'''
def even_fibonacci_numbers(limit = 4000000):
"""
Returns the sum of the even-valued terms in the Fibonacci sequence,
whose values do not exceed four million.
"""
n1, n2 = 1, 2
even_sum = 0
while (n2 <= limit):
if n2%2 == 0:
even_sum += n2
temp = n2 + n1
n1 = n2
n2 = temp
return even_sum
if __name__ == '__main__':
print even_fibonacci_numbers(4000000)
|
from django.shortcuts import render
from .models import UserPost, UserProfile
# Create your views here.
class FollowUserPosts(object):
"""
PO文列表
"""
def __init__(self, user_name=None, page_size=10):
self.user_name = user_name
self.user_post_list = []
self.total_num = 0
self.page_size = page_size
def set_user_post_list(self, page):
"""
为PO文列表对象生成内容(分页)
:param page: 页码
:return:
"""
page += 1
follow_list = self._get_follow_list()
user_post_query_set = self._get_user_posts(follow_list)
self.total_num = user_post_query_set.count()
self.user_post_list = [{"nick_name": each_post.user__nickname, "content": each_post.content,
"post_date": each_post.creation_date}
for each_post in user_post_query_set[page * self.page_size: (page + 1) * self.page_size]]
def _get_user_posts(self, user_names):
user_post_query_set = UserPost.objects.filter(user__username__in=user_names).order_by("-creation_date")
return user_post_query_set
def _get_follow_list(self):
if not self.user_name:
raise ValueError("user id should be initialize before call this method")
user_follow_query_set = UserProfile.objects.filter(followed_by__username=self.user_name).all()
follow_list = [each_user.username for each_user in user_follow_query_set]
return follow_list
class UserBaseInfo(object):
def __init__(self, user_name, nick_name):
self.user_name = user_name
self.nick_name = nick_name
class UserInfoList(object):
"""
用户信息列表基类
"""
def __init__(self):
self.user_list = []
self.total_num = 0
self.page = 0
self.page_size = 10
self.num = 0
class FollowInfoList(UserInfoList):
def followed_by(self, user_name, page_size=None, page=None):
"""
获得关注列表的分页结果
:param user_name:
:param page_size: (optional)
:param page: (optional)
:return:
"""
if not UserProfile.objects.filter(username=user_name).exists():
raise ValueError('This user id is not exist')
if page_size:
self.page_size = page_size
if page:
self.page = page
user_profile_query_set = UserProfile.objects.filter(followed_by__username=user_name).all()
self.num = user_profile_query_set.count()
self.user_list = [UserBaseInfo(user.username, user.nickname)
for user in user_profile_query_set[self.page * self.page_size :(self.page + 1) * self.page_size]]
return self
class FollowerInfoList(UserInfoList):
def follow_to(self, user_name, page_size=None, page=None):
"""
获得跟随者列表的分页结果
:param user_name:
:param page_size: (optional)
:param page: (optional)
:return:
"""
if not UserProfile.objects.filter(username=user_name).exists():
raise ValueError('This user id is not exist')
if page_size:
self.page_size = page_size
if page:
self.page = page
user_profile_query_set = UserProfile.objects.filter(follows=user_name).all()
self.num = user_profile_query_set.count()
self.user_list = [UserBaseInfo(user.username, user.nickname)
for user in user_profile_query_set[self.page: self.page + self.page_size]]
return self
|
from ssher.core.ports.outgoing import OS
import pexpect
class CmdRunner(OS):
def spawn_child(*args, **kwargs):
kwargs.setdefault("encoding", "utf-8")
return pexpect.spawn(*args, **kwargs).interact()
|
import unittest
from caravan.tables import Tables
from caravan.parameter_set import ParameterSet
from caravan.simulator import Simulator
class ParameterSetTest(unittest.TestCase):
def setUp(self):
self.t = Tables.get()
self.t.clear()
self.sim = Simulator.create("echo")
def test_ps(self):
param = {"p1": 1, "p2": 2}
ps = self.sim.find_or_create_parameter_set(param)
self.assertEqual(ps.id(), 0)
self.assertEqual(ps.v(), param)
self.assertEqual(ps.runs(), [])
self.assertEqual(ps.simulator(), self.sim)
self.assertEqual(ParameterSet.all(), [ps])
self.assertEqual(ParameterSet.find(0), ps)
self.assertEqual(self.sim.find_parameter_set(param), ps)
self.assertEqual(ps.to_dict(), {"id": 0, "sim_id": 0, "params": param, "run_ids": []})
# second PS
param2 = {"p1": 2, "p2": 3}
ps2 = self.sim.find_or_create_parameter_set(param2)
self.assertEqual(ps2.id(), 1)
self.assertEqual(ps2.v(), param2)
self.assertEqual(ParameterSet.all(), [ps, ps2])
# duplicate PS
self.assertEqual(self.sim.find_or_create_parameter_set(param), ps)
self.assertEqual(len(ParameterSet.all()), 2)
def test_create_runs(self):
ps = self.sim.find_or_create_parameter_set({"foo": "bar"})
ps.create_runs_upto(3)
self.assertEqual(len(ps.runs()), 3)
self.assertEqual([r.id() for r in ps.runs()], [0, 1, 2])
ps.create_runs_upto(3)
self.assertEqual(len(ps.runs()), 3)
ps.create_runs_upto(5)
self.assertEqual(len(ps.runs()), 5)
def test_is_finished(self):
ps = self.sim.find_or_create_parameter_set({"foo": "bar"})
self.assertEqual(ps.is_finished(), True)
ps.create_runs_upto(2)
self.assertEqual(ps.is_finished(), False)
self.assertEqual(ps.finished_runs(), [])
ps.runs()[0]._store_result({"o1": 1}, 0, 1, 1000, 2000)
self.assertEqual(ps.is_finished(), False)
self.assertEqual([r.id() for r in ps.finished_runs()], [0])
ps.runs()[1]._store_result({"o1": 1}, 0, 2, 1000, 2000)
self.assertEqual(ps.is_finished(), True)
self.assertEqual([r.id() for r in ps.finished_runs()], [0, 1])
def test_outputs(self):
ps = self.sim.find_or_create_parameter_set({"foo": "bar"})
ps.create_runs_upto(2)
self.assertEqual(ps.outputs(), [])
for (i, r) in enumerate(ps.runs()):
r._store_result( {"i": i}, 0, i, 0, 10)
self.assertEqual(len(ps.finished_runs()), 2)
self.assertEqual(ps.outputs(), [{"i": 0}, {"i": 1}])
def test_find(self):
sim2 = Simulator.create("echo")
self.assertEqual(sim2.id(), 1)
ps1 = self.sim.find_or_create_parameter_set({"foo": "bar"})
self.assertEqual(self.sim.parameter_sets(), [ps1])
ps1.create_runs_upto(2)
ps2 = sim2.find_or_create_parameter_set({"foo": "bar"})
self.assertEqual(sim2.parameter_sets(), [ps2])
ps2.create_runs_upto(2)
self.assertEqual([r.id() for r in ps1.runs()], [0, 1])
self.assertEqual([r.id() for r in ps2.runs()], [2, 3])
self.assertEqual([r.id() for r in ParameterSet.all()], [0, 1])
self.assertEqual(ParameterSet.find(1), ps2)
if __name__ == '__main__':
unittest.main()
|
import os
import requests
import pymysql
import pytest
from flask import url_for
from solarforecastarbiter.datamodel import QualityFlagFilter as QFF
from sfa_dash import create_app
BASE_URL = 'http://localhost'
resample_threshold = QFF.resample_threshold_percentage
@pytest.fixture(scope='session')
def auth_token():
token_req = requests.post(
'https://solarforecastarbiter.auth0.com/oauth/token',
headers={'content-type': 'application/json'},
data=('{"grant_type": "password", '
'"username": "testing@solarforecastarbiter.org",'
'"password": "Thepassword123!", '
'"audience": "https://api.solarforecastarbiter.org", '
'"client_id": "c16EJo48lbTCQEhqSztGGlmxxxmZ4zX7"}'))
if token_req.status_code != 200:
pytest.skip('Cannot retrieve valid Auth0 token')
else:
token = token_req.json()
return token
@pytest.fixture()
def expired_token():
stored = {'access_token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6Ik5UZENSRGRFTlVNMk9FTTJNVGhCTWtRelFUSXpNRFF6TUVRd1JUZ3dNekV3T1VWR1FrRXpSUSJ9.eyJpc3MiOiJodHRwczovL3NvbGFyZm9yZWNhc3RhcmJpdGVyLmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw1YmUzNDNkZjcwMjU0MDYyMzc4MjBiODUiLCJhdWQiOlsiaHR0cHM6Ly9hcGkuc29sYXJmb3JlY2FzdGFyYml0ZXIub3JnIiwiaHR0cHM6Ly9zb2xhcmZvcmVjYXN0YXJiaXRlci5hdXRoMC5jb20vdXNlcmluZm8iXSwiaWF0IjoxNTU1NDU0NzcwLCJleHAiOjE1NTU0NjU1NzAsImF6cCI6IlByRTM5QWR0R01QSTRnSzJoUnZXWjJhRFJhcmZwZzdBIiwic2NvcGUiOiJvcGVuaWQgcHJvZmlsZSBlbWFpbCBvZmZsaW5lX2FjY2VzcyJ9.lT1XPtLkYCVGUZjcAgWFCU6AJbKWtE077zw_KO4fhIaF0wo6TTpLTkZBmF9Sxmrwb5NfeR5XuJmkX3SPCjpzcZG9wdXIpPWRGhsOAAUdoSkoHKFzALoc46VPjA3A5SZxlGqNeh6RoKFlWRAp5EJN9Z-JcwT06JyJGrbx7ip4tCbAADqWuDY2tzkjKD3EHjHTO9OSJiCRxlNA22OCfMTF6B8-8RLUabZ414bypezw83S9g25mLLWtlGhQvzWGA8F7yhhVXbEsAPPC1QoyjevXzn8PBqL5dSDp6u1gL6T29PsbhZ0diZ1xt5jkm4iX-cryc7tqwq-5D5ZkC3wbhNpLuQ', 'refresh_token': 'QlLHR9wyFS5cokItX-ym7jWlCCuLO1fC3AtZLUeDVX-mI', 'id_token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6Ik5UZENSRGRFTlVNMk9FTTJNVGhCTWtRelFUSXpNRFF6TUVRd1JUZ3dNekV3T1VWR1FrRXpSUSJ9.eyJuaWNrbmFtZSI6InRlc3RpbmciLCJuYW1lIjoidGVzdGluZ0Bzb2xhcmZvcmVjYXN0YXJiaXRlci5vcmciLCJwaWN0dXJlIjoiaHR0cHM6Ly9zLmdyYXZhdGFyLmNvbS9hdmF0YXIvY2MxMTNkZjY5NmY4ZTlmMjA2Nzc5OTQzMzUxNzRhYjY_cz00ODAmcj1wZyZkPWh0dHBzJTNBJTJGJTJGY2RuLmF1dGgwLmNvbSUyRmF2YXRhcnMlMkZ0ZS5wbmciLCJ1cGRhdGVkX2F0IjoiMjAxOS0wNC0xNlQyMjo0NjoxMC42NTdaIiwiZW1haWwiOiJ0ZXN0aW5nQHNvbGFyZm9yZWNhc3RhcmJpdGVyLm9yZyIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwiaXNzIjoiaHR0cHM6Ly9zb2xhcmZvcmVjYXN0YXJiaXRlci5hdXRoMC5jb20vIiwic3ViIjoiYXV0aDB8NWJlMzQzZGY3MDI1NDA2MjM3ODIwYjg1IiwiYXVkIjoiUHJFMzlBZHRHTVBJNGdLMmhSdldaMmFEUmFyZnBnN0EiLCJpYXQiOjE1NTU0NTQ3NzAsImV4cCI6MTU1NTQ5MDc3MH0.axw45-ms_LVIS_WsUdcCryZeOwpZVAn95zbUm9WO23bpIja7QaR1h6_Emb9nUNJIk44vp-J-zwKIZd4j7bg5_vaVcJER4_rL8vlc6f5lJdZAU20yeisTT4q1YcwlvQhg7avWMUkZaiO3SgK0eJ3371Gm2gJgK2b21bnpzmUHQ0vS906GLGngaVzb3VEE_g4CgR4u6qmBQRwq3Z6DyRBq572Qhn3TXk_0Xvj43Q9TyYjV5ioou5Xe-3T5HHsCoUWqDp0BZ3bP9FlYFws9DffnFzf1yVtpwfk9shmAe8V6Fn9N0OjuS4LJP0Tc-I7adspJlYfB9BeTEci6MKn58OQCrw', 'scope': ['openid', 'profile', 'email', 'offline_access'], 'expires_in': 0, 'token_type': 'Bearer', 'expires_at': 1555465570.9597309} # NOQA
return stored
@pytest.fixture()
def mocked_storage(mocker, auth_token, expired_token):
def make_storage(authenticated=False):
if authenticated:
token = auth_token
else:
token = expired_token
class fake_storage:
def __init__(*args, **kwargs):
pass
def get(self, *args):
return token
def set(self, *args):
pass
def delete(self, *args):
pass
return fake_storage
return make_storage
@pytest.fixture()
def mocked_unauth_storage(mocker, mocked_storage):
mocker.patch('sfa_dash.session_storage',
new=mocked_storage())
@pytest.fixture()
def mocked_auth_storage(mocker, mocked_storage):
mocker.patch('sfa_dash.session_storage',
new=mocked_storage(True))
@pytest.fixture()
def app_unauth(mocked_unauth_storage):
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
return create_app('sfa_dash.config.TestConfig')
@pytest.fixture()
def app(mocked_auth_storage):
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
return create_app('sfa_dash.config.TestConfig')
@pytest.fixture()
def client(app):
yield app.test_client()
no_arg_routes_list = [
'/sites/',
'/observations/',
'/forecasts/single/',
'/forecasts/cdf/',
'/reports/',
'/aggregates/',
'/sites/create',
'/reports/deterministic/create',
'/reports/event/create',
'/aggregates/create',
]
@pytest.fixture(params=no_arg_routes_list)
def no_arg_route(request):
return request.param
admin_routes_list = [
'/admin/permissions/create/cdf_forecast_group',
'/admin/permissions/create/observation',
'/admin/permissions/create/forecast',
'/admin/permissions/create/report',
'/admin/permissions/create/site',
'/admin/roles/create',
'/admin/permissions/',
'/admin/roles/',
'/admin/users/',
]
@pytest.fixture(params=admin_routes_list)
def admin_route(request):
return request.param
admin_multiarg_route_list = [
'/admin/permissions/{permission_id}/remove/{object_id}',
'/admin/roles/{role_id}/remove/{permission_id}',
'/admin/users/{user_id}/remove/{role_id}',
]
@pytest.fixture(params=admin_multiarg_route_list)
def admin_multiarg_route(request):
def fn(object_id, permission_id, user_id, role_id):
return request.param.format(
object_id=object_id,
permission_id=permission_id,
user_id=user_id,
role_id=role_id)
return fn
user_id_route_list = [
'/admin/users/{user_id}',
'/admin/users/{user_id}/add/',
]
@pytest.fixture(params=user_id_route_list)
def user_id_route(request):
def fn(user_id):
return request.param.format(user_id=user_id)
return fn
role_id_route_list = [
'/admin/roles/{role_id}',
'/admin/roles/{role_id}/delete',
'/admin/roles/{role_id}/add/',
'/admin/roles/{role_id}/grant/',
]
@pytest.fixture(params=role_id_route_list)
def role_id_route(request):
def fn(role_id):
return request.param.format(role_id=role_id)
return fn
permission_id_route_list = [
'/admin/permissions/{permission_id}',
'/admin/permissions/{permission_id}/delete',
'/admin/permissions/{permission_id}/add',
]
@pytest.fixture(params=permission_id_route_list)
def permission_id_route(request):
def fn(permission_id):
return request.param.format(permission_id=permission_id)
return fn
report_id_route_list = [
'/reports/{report_id}',
'/reports/{report_id}/delete',
]
@pytest.fixture(params=report_id_route_list)
def report_id_route(request):
def fn(report_id):
return request.param.format(report_id=report_id)
return fn
site_id_route_list = [
'/sites/{site_id}/',
'/sites/{site_id}/delete',
'/sites/{site_id}/forecasts/single/create',
'/sites/{site_id}/forecasts/cdf/create',
'/sites/{site_id}/observations/create',
'/sites/{site_id}/observations/create',
]
@pytest.fixture(params=site_id_route_list)
def site_id_route(request):
def fn(site_id):
return request.param.format(site_id=site_id)
return fn
observation_id_route_list = [
'/observations/{observation_id}',
'/observations/{observation_id}/delete',
]
@pytest.fixture(params=observation_id_route_list)
def observation_id_route(request):
def fn(observation_id):
return request.param.format(observation_id=observation_id)
return fn
forecast_id_route_list = [
'/forecasts/single/{forecast_id}',
'/forecasts/single/{forecast_id}/delete',
]
@pytest.fixture(params=forecast_id_route_list)
def forecast_id_route(request):
def fn(forecast_id):
return request.param.format(forecast_id=forecast_id)
return fn
cdf_forecast_id_route_list = [
'/forecasts/cdf/{forecast_id}',
'/forecasts/cdf/{forecast_id}/delete',
]
@pytest.fixture(params=cdf_forecast_id_route_list)
def cdf_forecast_id_route(request):
def fn(forecast_id):
return request.param.format(forecast_id=forecast_id)
return fn
cdf_forecast_single_id_routes_list = [
'/forecasts/cdf/single/{forecast_id}',
]
@pytest.fixture(params=cdf_forecast_single_id_routes_list)
def cdf_forecast_single_id_route(request):
def fn(forecast_id):
return request.param.format(forecast_id=forecast_id)
return fn
aggregate_id_route_list = [
'/aggregates/{aggregate_id}',
'/aggregates/{aggregate_id}/delete',
'/aggregates/{aggregate_id}/add',
'/aggregates/{aggregate_id}/forecasts/single/create',
'/aggregates/{aggregate_id}/forecasts/cdf/create',
]
@pytest.fixture(params=aggregate_id_route_list)
def aggregate_id_route(request):
def fn(aggregate_id):
return request.param.format(aggregate_id=aggregate_id)
return fn
clone_route_list = [
'/sites/{site_id}/clone',
'/observations/{observation_id}/clone',
'/forecasts/single/{forecast_id}/clone',
]
@pytest.fixture(params=clone_route_list)
def clone_route(request):
def fn(uuids):
# NOTE: expects a dict of all possible ids to use for formatting
return request.param.format(**uuids)
return fn
@pytest.fixture()
def missing_id():
return '7d2c3208-5243-11e9-8647-d663bd873d93'
@pytest.fixture()
def observation_id():
return '123e4567-e89b-12d3-a456-426655440000'
@pytest.fixture()
def cdf_forecast_group_id():
return 'ef51e87c-50b9-11e9-8647-d663bd873d93'
@pytest.fixture()
def cdf_forecast_id():
return '633f9396-50bb-11e9-8647-d663bd873d93'
@pytest.fixture()
def forecast_id():
return '11c20780-76ae-4b11-bef1-7a75bdc784e3'
@pytest.fixture()
def site_id():
return '123e4567-e89b-12d3-a456-426655440001'
@pytest.fixture()
def site_id_plant():
return '123e4567-e89b-12d3-a456-426655440002'
@pytest.fixture()
def test_orgid():
return 'b76ab62e-4fe1-11e9-9e44-64006a511e6f'
@pytest.fixture()
def user_id():
return '0c90950a-7cca-11e9-a81f-54bf64606445'
@pytest.fixture()
def aggregate_id():
return '458ffc27-df0b-11e9-b622-62adb5fd6af0'
@pytest.fixture()
def report_id():
return '9f290dd4-42b8-11ea-abdf-f4939feddd82'
@pytest.fixture
def all_metadata_ids(
observation_id, forecast_id, cdf_forecast_group_id, cdf_forecast_id,
site_id, site_id_plant, aggregate_id, report_id):
return {
'observation_id': observation_id,
'forecast_id': forecast_id,
'cdf_forecast_group_id': cdf_forecast_group_id,
'cdf_forecast_id': cdf_forecast_id,
'site_id': site_id,
'site_id_plant': site_id_plant,
'aggregate_id': aggregate_id,
'report_id': report_id,
}
@pytest.fixture()
def test_url(app):
def fn(view):
with app.test_request_context():
return url_for(view, _external=True)
return fn
@pytest.fixture(scope='session')
def connection():
connection = pymysql.connect(
host=os.getenv('MYSQL_HOST', '127.0.0.1'),
port=int(os.getenv('MYSQL_PORT', '3306')),
user='root',
password='testpassword',
database='arbiter_data',
binary_prefix=True)
# with no connection.commit(), no data should stay in db
return connection
@pytest.fixture()
def cursor(connection):
connection.rollback()
return connection.cursor()
@pytest.fixture()
def dictcursor(connection):
connection.rollback()
return connection.cursor(cursor=pymysql.cursors.DictCursor)
@pytest.fixture()
def role_id(cursor):
cursor.execute(
'SELECT BIN_TO_UUID(id, 1) from arbiter_data.roles '
'WHERE name = "Test user role"')
role_id = cursor.fetchone()[0]
return role_id
@pytest.fixture()
def permission_id(cursor, role_id):
cursor.execute(
'SELECT BIN_TO_UUID(id, 1) FROM arbiter_data.permissions '
'WHERE id IN (SELECT permission_id FROM '
'arbiter_data.role_permission_mapping WHERE role_id '
'= UUID_TO_BIN(%s, 1) ) LIMIT 1', role_id)
permission_id = cursor.fetchone()[0]
return permission_id
@pytest.fixture()
def permission_object_type(cursor, permission_id):
cursor.execute(
'SELECT object_type FROM arbiter_data.permissions '
'WHERE id = UUID_TO_BIN(%s, 1)', permission_id)
return cursor.fetchone()[0]
@pytest.fixture()
def valid_permission_object_id(
observation_id, forecast_id, cdf_forecast_group_id, aggregate_id,
site_id, role_id, user_id, permission_id, report_id,
permission_object_type):
ot = permission_object_type
if ot == 'forecasts':
return forecast_id
if ot == 'observations':
return observation_id
if ot == 'cdf_forecasts':
return cdf_forecast_group_id
if ot == 'agggregates':
return aggregate_id
if ot == 'sites':
return site_id
if ot == 'reports':
return report_id
if ot == 'users':
return user_id
if ot == 'permissions':
return permission_id
if ot == 'roles':
return role_id
@pytest.fixture()
def site():
return {
'created_at': '2019-03-01T11:44:38+00:00',
'elevation': 595.0,
'extra_parameters': '{"network_api_abbreviation": "AS","network": "University of Oregon SRML","network_api_id": "94040"}', # noqa
'latitude': 42.19,
'longitude': -122.7,
'modeling_parameters': {'ac_capacity': None,
'ac_loss_factor': None,
'axis_azimuth': None,
'axis_tilt': None,
'backtrack': None,
'dc_capacity': None,
'dc_loss_factor': None,
'ground_coverage_ratio': None,
'max_rotation_angle': None,
'surface_azimuth': None,
'surface_tilt': None,
'temperature_coefficient': None,
'tracking_type': None},
'modified_at': '2019-03-01T11:44:38+00:00',
'name': 'Weather Station',
'provider': 'Organization 1',
'site_id': '123e4567-e89b-12d3-a456-426655440001',
'timezone': 'Etc/GMT+8'}
@pytest.fixture()
def site_with_modeling_params():
return {
'created_at': '2019-03-01T11:44:46+00:00',
'elevation': 786.0,
'extra_parameters': '',
'latitude': 43.73403,
'longitude': -96.62328,
'modeling_parameters': {
'ac_capacity': 0.015,
'ac_loss_factor': 0.0,
'axis_azimuth': None,
'axis_tilt': None,
'backtrack': None,
'dc_capacity': 0.015,
'dc_loss_factor': 0.0,
'ground_coverage_ratio': None,
'max_rotation_angle': None,
'surface_azimuth': 180.0,
'surface_tilt': 45.0,
'temperature_coefficient': -0.2,
'tracking_type': 'fixed'},
'modified_at': '2019-03-01T11:44:46+00:00',
'name': 'Power Plant 1',
'provider': 'Organization 1',
'site_id': '123e4567-e89b-12d3-a456-426655440002',
'timezone': 'Etc/GMT+6'}
@pytest.fixture()
def observation():
return {
'_links': {'site': 'http://localhost:5000/sites/123e4567-e89b-12d3-a456-426655440001'}, # noqa
'created_at': '2019-03-01T12:01:39+00:00',
'extra_parameters': '{"instrument": "Ascension Technology Rotating Shadowband Pyranometer", "network": "UO SRML"}', # noqa
'interval_label': 'beginning',
'interval_length': 5,
'interval_value_type': 'interval_mean',
'modified_at': '2019-03-01T12:01:39+00:00',
'name': 'GHI Instrument 1',
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'provider': 'Organization 1',
'site_id': '123e4567-e89b-12d3-a456-426655440001',
'uncertainty': 0.1,
'variable': 'ghi'}
@pytest.fixture()
def forecast():
return {
'_links': {'aggregate': None,
'site': 'http://localhost:5000/sites/123e4567-e89b-12d3-a456-426655440001'}, # noqa
'aggregate_id': None,
'created_at': '2019-03-01T11:55:37+00:00',
'extra_parameters': '',
'forecast_id': '11c20780-76ae-4b11-bef1-7a75bdc784e3',
'interval_label': 'beginning',
'interval_length': 5,
'interval_value_type': 'interval_mean',
'issue_time_of_day': '06:00',
'lead_time_to_start': 60,
'modified_at': '2019-03-01T11:55:37+00:00',
'name': 'DA GHI',
'provider': 'Organization 1',
'run_length': 1440,
'site_id': '123e4567-e89b-12d3-a456-426655440001',
'variable': 'ghi'}
@pytest.fixture()
def cdf_forecast():
return {
'_links': {'site': 'http://localhost:5000/sites/123e4567-e89b-12d3-a456-426655440001'}, # noqa
'aggregate_id': None,
'axis': 'y',
'constant_values': [{'_links': {'timerange': 'http://localhost:5000/forecasts/cdf/single/633f9396-50bb-11e9-8647-d663bd873d93/values/timerange', # noqa
'values': 'http://localhost:5000/forecasts/cdf/single/633f9396-50bb-11e9-8647-d663bd873d93/values'}, # noqa
'constant_value': 5.0,
'forecast_id': '633f9396-50bb-11e9-8647-d663bd873d93'},
{'_links': {'timerange': 'http://localhost:5000/forecasts/cdf/single/633f9864-50bb-11e9-8647-d663bd873d93/values/timerange', # noqa
'values': 'http://localhost:5000/forecasts/cdf/single/633f9864-50bb-11e9-8647-d663bd873d93/values'}, # noqa
'constant_value': 20.0,
'forecast_id': '633f9864-50bb-11e9-8647-d663bd873d93'},
{'_links': {'timerange': 'http://localhost:5000/forecasts/cdf/single/633f9b2a-50bb-11e9-8647-d663bd873d93/values/timerange', # noqa
'values': 'http://localhost:5000/forecasts/cdf/single/633f9b2a-50bb-11e9-8647-d663bd873d93/values'}, # noqa
'constant_value': 50.0,
'forecast_id': '633f9b2a-50bb-11e9-8647-d663bd873d93'},
{'_links': {'timerange': 'http://localhost:5000/forecasts/cdf/single/633f9d96-50bb-11e9-8647-d663bd873d93/values/timerange', # noqa
'values': 'http://localhost:5000/forecasts/cdf/single/633f9d96-50bb-11e9-8647-d663bd873d93/values'}, # noqa
'constant_value': 80.0,
'forecast_id': '633f9d96-50bb-11e9-8647-d663bd873d93'},
{'_links': {'timerange': 'http://localhost:5000/forecasts/cdf/single/633fa548-50bb-11e9-8647-d663bd873d93/values/timerange', # noqa
'values': 'http://localhost:5000/forecasts/cdf/single/633fa548-50bb-11e9-8647-d663bd873d93/values'}, # noqa
'constant_value': 95.0,
'forecast_id': '633fa548-50bb-11e9-8647-d663bd873d93'}],
'created_at': '2019-03-02T14:55:37+00:00',
'extra_parameters': '',
'forecast_id': 'ef51e87c-50b9-11e9-8647-d663bd873d93',
'interval_label': 'beginning',
'interval_length': 5,
'interval_value_type': 'interval_mean',
'issue_time_of_day': '06:00',
'lead_time_to_start': 60,
'modified_at': '2019-03-02T14:55:37+00:00',
'name': 'DA GHI',
'provider': 'Organization 1',
'run_length': 1440,
'site_id': '123e4567-e89b-12d3-a456-426655440001',
'variable': 'ghi'}
@pytest.fixture()
def aggregate():
return {
'aggregate_id': '458ffc27-df0b-11e9-b622-62adb5fd6af0',
'aggregate_type': 'mean',
'created_at': '2019-09-24T12:00:00+00:00',
'description': 'ghi agg',
'extra_parameters': 'extra',
'interval_label': 'ending',
'interval_length': 60,
'interval_value_type': 'interval_mean',
'modified_at': '2019-09-24T12:00:00+00:00',
'name': 'Test Aggregate ghi',
'observations': [
{'_links': {'observation': 'http://localhost:5000/observations/123e4567-e89b-12d3-a456-426655440000/metadata'}, # noqa
'created_at': '2019-09-25T00:00:00+00:00',
'effective_from': '2019-01-01T00:00:00+00:00',
'effective_until': None,
'observation_deleted_at': None,
'observation_id': '123e4567-e89b-12d3-a456-426655440000'},
{'_links': {'observation': 'http://localhost:5000/observations/e0da0dea-9482-4073-84de-f1b12c304d23/metadata'}, # noqa
'created_at': '2019-09-25T00:00:00+00:00',
'effective_from': '2019-01-01T00:00:00+00:00',
'effective_until': None,
'observation_deleted_at': None,
'observation_id': 'e0da0dea-9482-4073-84de-f1b12c304d23'},
{'_links': {'observation': 'http://localhost:5000/observations/b1dfe2cb-9c8e-43cd-afcf-c5a6feaf81e2/metadata'}, # noqa
'created_at': '2019-09-25T00:00:00+00:00',
'effective_from': '2019-01-01T00:00:00+00:00',
'effective_until': None,
'observation_deleted_at': None,
'observation_id': 'b1dfe2cb-9c8e-43cd-afcf-c5a6feaf81e2'}],
'provider': 'Organization 1',
'timezone': 'America/Denver',
'variable': 'ghi'}
@pytest.fixture()
def report():
return {
'created_at': '2020-01-22T13:48:00+00:00',
'modified_at': '2020-01-22T13:50:00+00:00',
'provider': 'Organization 1',
'raw_report': {
'data_checksum': None,
'generated_at': '2019-07-01T12:00:00+00:00',
'messages': [
{'function': 'fcn',
'level': 'error',
'message': 'FAILED',
'step': 'dunno'}],
'metrics': [],
'plots': None,
'processed_forecasts_observations': [],
'timezone': 'Etc/GMT+8',
'versions': []},
'report_id': '9f290dd4-42b8-11ea-abdf-f4939feddd82',
'report_parameters': {
'categories': ['total', 'date'],
'end': '2019-06-01T06:59:00+00:00',
'filters': [{'quality_flags': ['USER FLAGGED'],
'discard_before_resample': True,
'resample_threshold_percentage': resample_threshold,
},
{'quality_flags': ['STALE VALUES'],
'discard_before_resample': True,
'resample_threshold_percentage': resample_threshold,
}],
'metrics': ['mae', 'rmse'],
'name': 'NREL MIDC OASIS GHI Forecast Analysis',
'object_pairs': [
{'forecast': '11c20780-76ae-4b11-bef1-7a75bdc784e3',
'observation': '123e4567-e89b-12d3-a456-426655440000',
'reference_forecast': None,
'uncertainty': None,
'forecast_type': 'forecast',
}],
'start': '2019-04-01T07:00:00+00:00',
'costs': [{
'name': 'example cost',
'type': 'constant',
'parameters': {
"cost": 1.1,
"aggregation": "sum",
"net": False,
},
}],
},
'status': 'failed',
'values': [
{'id': 'a2b6ed14-42d0-11ea-aa3c-f4939feddd82',
'object_id': '123e4567-e89b-12d3-a456-426655440000',
'processed_values': 'superencodedvalues'}]
}
|
# ---------------------------------------------------------------------
# Alarm Rule
# ---------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
from collections import defaultdict
from typing import Optional, DefaultDict, Set, List, Iterable, Pattern, Tuple
from dataclasses import dataclass
# Third-party modules
from jinja2 import Template
# NOC modules
from noc.fm.models.alarmrule import AlarmRule as CfgAlarmRule
from noc.fm.models.alarmclass import AlarmClass
from noc.fm.models.activealarm import ActiveAlarm
DEFAULT_GROUP_CLASS = "Group"
@dataclass
class Match(object):
labels: Set[str]
exclude_labels: Optional[Set[str]]
alarm_class: Optional[AlarmClass]
reference_rx: Optional[Pattern]
@dataclass
class Group(object):
reference_template: Template
alarm_class: AlarmClass
title_template: Template
labels: Optional[List[str]] = None
min_threshold: int = 0
window: int = 0
@dataclass
class GroupItem(object):
reference: str
alarm_class: AlarmClass
title: str
labels: Optional[List[str]] = None
min_threshold: int = 0
window: int = 0
class AlarmRule(object):
_default_alarm_class: Optional[AlarmClass] = None
def __init__(self):
self.match: List[Match] = []
self.groups: List[Group] = []
@classmethod
def try_from(cls, rule_cfg: CfgAlarmRule) -> Optional["AlarmRule"]:
"""
Generate rule from config
"""
rule = AlarmRule()
# Add matches
for match in rule_cfg.match:
rule.match.append(
Match(
labels=set(match.labels),
alarm_class=match.alarm_class,
exclude_labels=match.exclude_labels if match.exclude_labels else None,
reference_rx=re.compile(match.reference_rx) if match.reference_rx else None,
)
)
# Add groups
for group in rule_cfg.groups:
rule.groups.append(
Group(
reference_template=Template(group.reference_template),
alarm_class=group.alarm_class
if group.alarm_class
else cls.get_default_alarm_class(),
title_template=Template(group.title_template),
min_threshold=group.min_threshold or 0,
window=group.window or 0,
labels=group.labels or [],
)
)
return rule
def is_match(self, alarm: ActiveAlarm) -> bool:
"""
Check if alarm matches the rule
"""
if not self.match:
return True
lset = set(alarm.effective_labels)
for match in self.match:
# Match against labels
if match.exclude_labels and match.exclude_labels.issubset(lset):
continue
if not match.labels.issubset(lset):
continue
# Match against alarm class
if match.alarm_class and match.alarm_class != alarm.alarm_class:
continue
# Match against referene re
if (
getattr(alarm, "raw_reference", None)
and match.reference_rx
and not match.reference_rx.search(alarm.raw_reference)
):
continue
return True
return False
@classmethod
def get_default_alarm_class(cls) -> AlarmClass:
if not cls._default_alarm_class:
cls._default_alarm_class = AlarmClass.get_by_name(DEFAULT_GROUP_CLASS)
assert cls._default_alarm_class
return cls._default_alarm_class
def iter_groups(self, alarm: ActiveAlarm) -> Iterable[GroupItem]:
"""
Render group templates
"""
ctx = {"alarm": alarm}
for group in self.groups:
yield GroupItem(
reference=group.reference_template.render(**ctx),
alarm_class=group.alarm_class,
title=group.title_template.render(**ctx),
labels=group.labels,
min_threshold=group.min_threshold,
window=group.window,
)
class AlarmRuleSet(object):
"""
Full set of alarm rules
"""
def __init__(self):
self._label_rules: DefaultDict[Tuple[str, ...], List[AlarmRule]] = defaultdict(list)
self.label_rules: List[Tuple[Set[str], List[AlarmRule]]] = []
def add(self, rule: CfgAlarmRule):
"""
Add rule to set
"""
if not rule.is_active:
return
new_rule = AlarmRule.try_from(rule)
if not new_rule:
return
if rule.match:
for match in rule.match:
lset = tuple(sorted(match.labels))
self._label_rules[lset].append(new_rule)
else:
self._label_rules[tuple()].append(new_rule)
def compile(self):
"""
Finalize rules
"""
self.label_rules = [(set(k), v) for k, v in self._label_rules.items()]
self._label_rules = defaultdict(list)
def iter_candidates(self, alarm: ActiveAlarm) -> Iterable[AlarmRule]:
"""
Iterable candidate rules with matching labels
"""
lset = set(alarm.effective_labels)
seen: Set[AlarmRule] = set()
for mset, rules in self.label_rules:
if not mset.issubset(lset):
continue
for rule in rules:
if rule in seen:
continue
yield rule
seen.add(rule)
def iter_rules(self, alarm: ActiveAlarm) -> Iterable[AlarmRule]:
"""
Iterate all matched rules
"""
for rule in self.iter_candidates(alarm):
if rule.is_match(alarm):
yield rule
|
#!/bin/env python
from sys import argv
import os
REPEAT = 865
RULES = [(18,19,20,21,22,23,24,25),(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18),(26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,1)] + \
[tuple(range(i,i+17)+[i-17]) for i in range(43,825,17)] + \
[(825,826,827,828,829,830,831,832,833,834,835,836,837,838,839,840,841,808), \
(842,843,844,845,846,847,848,849,850,851,852,853,854,855,856,857,858,825),(842,859,860,861,862,863,864,865)]
mass = (8,12,12,12,12,12,12,12,12,1,1,1,1,1,1,1,1,8)
MASS = [(8,12,12,1,1,1,1,1)] + [mass[:] for i in range(1,51)] + [(8,12,12,1,1,1,1,1)]
FORMAT = "%.4f"
def read_frame(f):
if len(ATOMS) > 0:
del ATOMS[:len(ATOMS)]
if len(BOXLINES) > 0:
del BOXLINES[:len(BOXLINES)]
nAtoms = 0
line = f.readline()
while line:
if line.startswith("Generated"):
line = f.readline()
nAtoms = int(line.strip())
else:
count = 1
while count <= nAtoms:
strs = line.strip().split()
length = len(strs)
if length == 6:
ATOMS.append((float(strs[3])*10, float(strs[4])*10, float(strs[5])*10))
if length == 5:
ATOMS.append((float(strs[2])*10, float(strs[3])*10, float(strs[4])*10))
count = count + 1
line = f.readline()
strs = line.strip().split()
BOXLINES.append(float(strs[0])*10)
BOXLINES.append(float(strs[1])*10)
BOXLINES.append(float(strs[2])*10)
return True
line = f.readline()
return False
def convert(out):
for i in range(len(ATOMS) / REPEAT):
for j in range(len(RULES)):
t_x = 0.0
t_y = 0.0
t_z = 0.0
t_mass = 0.0
for k in range(len(RULES[j])):
t_x = t_x + ATOMS[RULES[j][k]-1+i*REPEAT][0] * MASS[j][k]
t_y = t_y + ATOMS[RULES[j][k]-1+i*REPEAT][1] * MASS[j][k]
t_z = t_z + ATOMS[RULES[j][k]-1+i*REPEAT][2] * MASS[j][k]
t_mass = t_mass + MASS[j][k]
t_x = float(t_x) / t_mass
t_y = float(t_y) / t_mass
t_z = float(t_z) / t_mass
f_x = FORMAT % t_x
f_y = FORMAT % t_y
f_z = FORMAT % t_z
if int(j+1)%52 == 1 or int(j+1)%52 == 0:
l=1
else:
l=2
out.write(str(len(RULES)*i+j+1) + " " + str(l) + " " + f_x + " " + f_y + " " + f_z + "\n")
time = 0
f = open(argv[1], 'r')
o = open("PPE-CG.lammpstrj", 'w')
ATOMS = []
BOXLINES = []
while read_frame(f):
o.write("ITEM: TIMESTEP\n")
o.write(str(time) + "\n")
o.write("ITEM: NUMBER OF ATOMS\n")
o.write(str(len(ATOMS) / REPEAT * len(RULES)) + "\n")
o.write("ITEM: BOX BOUNDS pp pp pp\n")
o.write("0 " + str(BOXLINES[0]) + "\n")
o.write("0 " + str(BOXLINES[1]) + "\n")
o.write("0 " + str(BOXLINES[2]) + "\n")
o.write("ITEM: ATOMS id type xu yu zu\n")
convert(o)
time = time + 1000
f.close()
|
'''
Author: Eric P. Nichols
Date: Feb 8, 2008.
Board class.
Board data:
1=white, -1=black, 0=empty
first dim is column , 2nd is row:
pieces[1][7] is the square in column 2,
at the opposite end of the board in row 8.
Squares are stored and manipulated as (x,y) tuples.
x is the column, y is the row.
'''
import numpy as np
class Board():
# list of all 6 directions on the board, as (x,y) offsets
__directions = [(2,0),(-2,0),(1,1),(1,-1),(-1,1),(-1,-1)]
# list of all entries of the matrix, which are actually spots on the board
actBoard = [(2,3),(3,2),(3,4),(4,1),(4,3),(4,5),(5,2),(5,4),(6,1),(6,3),(6,5),(7,2),(7,4),(8,1),(8,3),(8,5),(9,2),(9,4),(10,3)]
# list of all starting Points on the board
startingPoints = [(0,3),(1,2),(1,4),(2,1),(2,5),(3,0),(3,6),(5,0),(5,6),(7,0),(7,6),(9,0),(9,6),(10,1),(10,5),(11,2),(11,4),(12,3)]
# dictionary for the translation of the spot names into the entries of the matrix (as tuple)
move_dict = {"a1": (9,0), "a2": (7,0), "a3": (5,0), "a4": (3,0), "b1": (10,1), "b2": (8,1), "b3": (6,1), "b4": (4,1), "b5": (2,1), "c1": (11,2),
"c2": (9,2), "c5": (3,2), "c6": (1,2), "d1": (12,3), "d2": (10,3), "d6": (2,3), "d7": (0,3), "e1": (11,4), "e2": (9,4), "e5": (3,4),
"e6": (1,4), "f1": (10,5), "f2": (8,5), "f3": (6,5), "f4": (4,5), "f5": (2,5), "g1": (9,6), "g2": (7,6), "g3": (5,6), "g4": (3,6)}
def __init__(self, n):
"Set up initial board configuration."
self.n = n
# Create the empty board array.
self.pieces = [None]*self.n # rows: mini: 13, normal: 17
for i in range(self.n):
self.pieces[i] = [0]*(int(self.n//(1.8))) # columns: mini: 13//1.8=7 normal: 17//1.8=9
#Set up reserve in board corner
self.pieces[0][0] = 5
self.pieces[0][2] = 5
# Set up the initial 6 pieces.
self.pieces[4][1] = 1
self.pieces[4][5] = 1
self.pieces[10][3] = 1
self.pieces[8][1] = -1
self.pieces[8][5] = -1
self.pieces[2][3] = -1
"""
#Testfall Sym
self.pieces[8][1] = 1
self.pieces[10][3] = 1
self.pieces[4][5] = 1
self.pieces[2][3] = -1
self.pieces[7][4] = -1
self.pieces[8][5] = -1
#Testfall A
self.pieces[8][1] = -1
self.pieces[7][2] = -1
self.pieces[4][3] = -1
self.pieces[10][3] = 1
self.pieces[8][3] = 1
self.pieces[4][5] = 1
self.pieces[5][4] = 1
#Testfall B
self.pieces[7][2] = 1
self.pieces[6][1] = 1
self.pieces[10][3] = 1
self.pieces[8][3] = -1
self.pieces[4][3] = -1
self.pieces[2][3] = -1
#Testfall C
self.pieces[4][1] = 1
self.pieces[5][2] = -1
self.pieces[10][3] = 1
self.pieces[4][3] = -1
self.pieces[2][3] = -1
#Testfall D
self.pieces[6][1] = -1
self.pieces[7][2] = -1
self.pieces[9][4] = 1
self.pieces[10][3] = -1
self.pieces[6][3] = -1
self.pieces[4][3] = -1
self.pieces[2][3] = 1
"""
# add [][] indexer syntax to the Board
def __getitem__(self, index):
return self.pieces[index]
def __setitem__(self, index, color):
self.pieces[index] = color
def get_actBoard(self):
if self.n == 13:
return self.actBoard
else:
pass # return actBoard + ext
def get_startingPoints(self):
if self.n == 13:
return self.startingPoints
else:
pass # return actBoard + ext
@staticmethod
def translate_move(move):
"""Returns a tuple of the spot names as a tuple of the matrix
"""
try:
move_new = (Board.move_dict[move[0]],Board.move_dict[move[1]])
return move_new
except KeyError:
'Invalid Field'
def get_legal_moves(self):
"""Returns all the legal moves
"""
moves = set() # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[1],[2]
moves.update(newmoves)
return list(moves)
def get_legal_moves_binary(self):
"""Returns all the legal moves
"""
moves = [] # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[2]
moves.extend(newmoves)
return moves
def get_all_moves(self):
"""Returns all the legal moves
"""
moves = [] # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[1]
moves.extend(newmoves)
return moves
def get_moves_for_dot(self, dot):
"""Returns all the legal moves that use the given dot as a base.
"""
# search all possible directions.
legal_moves = []
all_moves = []
all_moves_binary = []
for direction in self.__directions:
target = tuple(np.add(dot, direction))
if target in self.actBoard:
move = (dot, target)
all_moves.append(move)
if self.check_move(target, direction):
legal_moves.append(move)
all_moves_binary.extend([1])
else:
all_moves_binary.extend([0])
# return the generated move list
return legal_moves, all_moves, all_moves_binary
def check_move(self, target, direction):
"""Returns True if there is a free field along the given direction
if not returns Flase because the move is not valid
"""
s = target
while s in self.actBoard:
if self[s] == 0:
return True
s = tuple(np.add(s, direction))
return False
def execute_move(self, action, curPlayer):
"""Performs the given move on the board; does not remove pieces!
color gives the color of the piece to play (1=white,-1=black)
"""
all_moves = self.get_all_moves()
move = all_moves[action]
start=move[0]
target=move[1]
direction = tuple(np.subtract(target, start))
s=target
# Runs up to a gap and places the piece there
while s in self.actBoard:
if self[s] == 0:
break
s = tuple(np.add(s, direction))
self[start]=curPlayer
# Runs in opposite direction and moves the pieces
while s in self.actBoard:
s_prev = tuple(np.subtract(s, direction))
s_prev_color = self[s_prev]
self[s]= s_prev_color
s = tuple(np.subtract(s, direction))
self[s]=0
# Decreases reserve
#players[color+1].dec_reserve()
def remove_lines(self, curPlayer):
"""Checks for each field whether a row of four results.
If so, removes the entire line
"""
#prüfen ob mehrere 4er, wenn ja zuerst den der spielenden Farbe, wenn immer noch mehrere zuerst den der mehr schlägt
rows = []
add_reserve = [0, None, 0]
for spot in self.actBoard:
new_row = self.discover_row_of_4(spot)
if new_row and new_row not in rows:
rows.append(new_row)
while len(rows)>1:
#mehrere rows
rows_of_color = [] #alle rows der aktuellen Farbe (haben vorrang)
index_max = None
for row in rows:
row_color = self[list(row)[0]]
if row_color == curPlayer:
rows_of_color.append(row)
if len(rows_of_color)>1:
#mehrere rows der aktiven Farbe
#prüfen welche die meisten schlägt
c = [None]*len(rows_of_color)
for index, row in enumerate(rows_of_color):
c[index] = self.get_hit_count(row)
index_max = np.argmax(c)
add_reserve = np.add(add_reserve, self.remove_line(rows_of_color[index_max]), where=[1,0,1])
elif len(rows_of_color)>0:
#nur eine row der aktiven Farbe
add_reserve = np.add(add_reserve, self.remove_line(rows_of_color[0]), where=[1,0,1])
else:
#mehrer rows der anderen Farbe und keine der aktiven
#prüfen welche die meisten schlägt
c = [None]*len(rows)
for index, row in enumerate(rows):
c[index] = self.get_hit_count(row)
index_max = np.argmax(c)
add_reserve = np.add(add_reserve, self.remove_line(rows[index_max]), where=[1,0,1])
#prüfe ob rows noch aktuell
rows = self.check_rows(rows)
if len(rows)>0:
#nur eine row (egal welche Farbe)
add_reserve = np.add(add_reserve, self.remove_line(rows[0]), where=[1,0,1])
return add_reserve
def check_rows(self, rows):
rows_new = rows.copy()
for row in rows:
for spot in row:
if self[spot] == 0:
rows_new.remove(row)
break
return rows_new
def get_hit_count(self, row):
count = 0
row = list(row)
color_of_row = self[row[0]]
direction = tuple(np.subtract(row[0], row[1]))
s = row[0]
# Runs from the first of the 4 in one direction of the line
while s in self.actBoard:
if self[s] == 0:
break
else:
color = self[s]
if color != color_of_row:
count += 1
#self[s] = 0
s = tuple(np.add(s, direction))
# Runs in the opposite direction
s = tuple(np.subtract(row[0], direction))
while s in self.actBoard:
if self[s] == 0:
break
else:
color = self[s]
if color != color_of_row:
count += 1
#self[s] = 0
s = tuple(np.subtract(s, direction))
return count
def discover_row_of_4(self, spot):
"""Examines all directions for the given spot to see if a row of four exists
If found returns a array of the four, otherwise returns False
"""
color = self[spot]
for direction in self.__directions:
row_of_4 = [] #set() #weil unorderd
#row_of_4.update([spot])
row_of_4.append(spot)
s = tuple(np.add(spot, direction))
while s in self.actBoard:
if self[s] == 0 or self[s] != color:
break
else:
#row_of_4.update([s])
row_of_4.append(s)
s = tuple(np.add(s, direction))
if len(row_of_4)>2: #GipfMini: 3; Normal: 4
row_of_4.sort()
return row_of_4
def remove_line(self, row_of_4):
"""Removes the 4 pieces and the pieces that form a direct extension of these 4
The pieces with the color of the 4 return to his reserve
"""
add_reserve = [0, None, 0]
row_of_4 = list(row_of_4)
color_of_4 = self[row_of_4[0]]
direction = tuple(np.subtract(row_of_4[0], row_of_4[1]))
s = row_of_4[0]
# Runs from the first of the 4 in one direction of the line
while s in self.actBoard:
if self[s] == 0:
break
else:
color = self[s]
if color == color_of_4:
add_reserve[color+1]+=1
#players[color+1].inc_reserve()
self[s] = 0
s = tuple(np.add(s, direction))
# Runs in the opposite direction
s = tuple(np.subtract(row_of_4[0], direction))
while s in self.actBoard:
if self[s] == 0:
break
else:
color = self[s]
if color == color_of_4:
add_reserve[color+1]+=1
#players[color+1].inc_reserve()
self[s] = 0
s = tuple(np.subtract(s, direction))
return add_reserve
|
from distutils.core import setup
from os import path
from glob import glob
def get_modules():
objdir = path.join(path.dirname(__file__),'bbots/*.py')
mods=[]
for file in glob(objdir):
name = path.splitext(path.basename(file))[0]
if name == '__init__':
continue
mods.append("bbots." + name)
return mods
setup(name="bbots",
version="0.1",
packages = ['bbots'],
py_modules = get_modules(),
scripts = ['bin/bbotsd.py'],
author = ['Derrick Karimi'],
author_email = [ 'derrick.karimi@gmail.com' ],
maintainer = ['Derrick Karimi'],
maintainer_email = ['derrick.karimi@gmail.com'],
description = ['bbots'],
url = ['https://github.com/AlwaysTraining/bbots'],
download_url = ['https://github.com/AlwaysTraining/bbots'],
install_requires=['pexpect <= 2.4'])
|
import random
from datetime import datetime, timedelta
import config
from classes.command import command
from classes.module import Module
class Adventure(Module):
"""
Always be adventurous!
"""
def __init__(self, handler):
self.handler = handler
@command(description="Get the list of adventures", usage="adventures")
def adventures(self, ctx):
"""
Get the full list of adventures.
Hopefully you will find an adventure you like.
"""
ctx.print("These are the adventures you can go on.")
ctx.print("Success rate depends on difficulty of the adventure and your xp.")
ctx.print('Use "adventure <number>" to begin on an adventure!')
for i in range(len(config.adventure_names)):
ctx.print_empty()
ctx.print(f"{i + 1}. {config.adventure_names[i]}", indent=1)
ctx.print(config.adventure_descriptions[i], indent=1)
if i == 0:
difficulty = "Very easy"
elif i == 1:
difficulty = "Easy"
elif i == 2:
difficulty = "Medium"
elif i == 3:
difficulty = "Hard"
else:
difficulty = "Very hard"
ctx.print(f"Difficulty level: {difficulty}", indent=1)
ctx.print(f"Duration: {config.adventure_durations[i]}", indent=1)
success_rate = (
config.adventure_chances[i] + self.handler.character.level * 5
)
if success_rate > 95:
success_rate = 95
ctx.print(f"Success rate: {success_rate}%", indent=1)
@command(description="Go on an adventure!", usage="adventure <number>")
def adventure(self, ctx, number: int):
"""
Hooray! It is time to go on an adventure! Best of luck to you.
Specify the adventure number which you can find using the "adventures" command/
"""
if number <= 0 or number > len(config.adventure_names):
return ctx.print(
'Oops! Invalid adventure number. Use "adventures" to see all of them.'
)
if self.handler.character.adventure_end:
return ctx.print(
'Oops! You are already on an adventure. Check with "status".'
)
self.handler.character.adventure(number - 1)
ctx.print("Going on an adventure... Good luck!")
ctx.print(
'Check the status of the adventure and collect rewards with "status".'
)
@command(description="Cancel the adventure.", usage="cancel")
def cancel(self, ctx):
"""
Cancel the adventure. This is irreversible!
"""
if not self.handler.character.adventure_end:
return ctx.print(
"You are not on an adventure. Don't try to cancel nothing!"
)
self.handler.character.reset_adventure()
ctx.print("Adventure has been cancelled. Be sure to start on a new one.")
@command(description="Status of the adventure.", usage="status")
def status(self, ctx):
"""
Check the status of the adventure.
This command should be used after completion of an adventure to receive rewards too.
"""
if not self.handler.character.adventure_end:
return ctx.print("You are not on an adventure yet!")
if self.handler.character.adventure_end <= datetime.now():
rand = random.randint(1, 100)
success_rate = config.adventure_chances[self.handler.character.adventure_id]
success_rate += self.handler.character.level * 5
if success_rate > 95:
success_rate = 95
if rand > success_rate:
ctx.print("RIP. You found nothing during the adventure.")
else:
_type = random.choice(["sword", "shield"])
action = "attack"
if _type == "shield":
action = "defense"
min_level = (self.handler.character.adventure_id + 1) * 5
max_level = min_level + 5
level = random.randint(min_level, max_level)
self.handler.character.add_item(_type, level)
xp = random.randint(min_level, max_level) * 2
self.handler.character.add_xp(xp)
ctx.print(f"Congratulations! You gained {xp} XP.")
ctx.print(f"You also got a new {_type} with {action} {level}.")
self.handler.character.reset_adventure()
else:
remaining = self.handler.character.adventure_end - datetime.now()
remaining -= timedelta(microseconds=remaining.microseconds)
ctx.print(f"The time left till completion of the adventure is {remaining}.")
def setup(handler):
handler.add_module(Adventure(handler))
|
from common.celeryapp import get_celery_app
from common.measurement import Measure
app = get_celery_app()
@app.task
@Measure.timer(__name__, function_name_as_metric=True)
@Measure.counter(__name__, function_name_as_metric=True, count_once=True)
def echo(message='This is default queue'):
print(message)
|
name = "S - Big City Scroll"
description = "Draws stacked rectangles scaled to audio amplitude, scrolls across screen with history fade"
knob1 = "Scroll Direction"
knob2 = "Y Position"
knob3 = "History Opacity"
knob4 = "Color"
released = "September 7 2017"
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - MoinMoin.packages tests
@copyright: 2005 MoinMoin:AlexanderSchremmer,
2007 Federico Lorenzi,
2010 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import os
import py
import tempfile
import zipfile
from datetime import datetime
from MoinMoin import user, wikiutil
from MoinMoin.action import AttachFile
from MoinMoin.action.PackagePages import PackagePages
from MoinMoin.packages import Package, ScriptEngine, MOIN_PACKAGE_FILE, ZipPackage, packLine, unpackLine
from MoinMoin._tests import become_trusted, become_superuser, create_page, nuke_page
from MoinMoin.Page import Page
from MoinMoin.PageEditor import PageEditor
class DebugPackage(Package, ScriptEngine):
""" Used for debugging, does not need a real .zip file. """
def __init__(self, request, filename, script=None):
Package.__init__(self, request)
ScriptEngine.__init__(self)
self.filename = filename
self.script = script or u"""moinmoinpackage|1
print|foo
ReplaceUnderlay|testdatei|TestSeite2
IgnoreExceptions|True
IgnoreExceptions|False
AddRevision|foofile|FooPage
AddRevision|foofile|FooPage
#foobar
"""
def extract_file(self, filename):
if filename == MOIN_PACKAGE_FILE:
return self.script.encode("utf-8")
else:
return "Hello world, I am the file " + filename.encode("utf-8")
def filelist(self):
return [MOIN_PACKAGE_FILE, "foo"]
def isPackage(self):
return True
class TestUnsafePackage:
""" Tests various things in the packages package. Note that this package does
not care to clean up and needs to run in a test wiki because of that. """
def setup_class(self):
if not getattr(self.request.cfg, 'is_test_wiki', False):
py.test.skip('This test needs to be run using the test wiki.')
def teardown_class(self):
nuke_page(self.request, "FooPage")
def testBasicPackageThings(self):
become_superuser(self.request)
myPackage = DebugPackage(self.request, 'test')
myPackage.installPackage()
assert myPackage.msg == u'foo\nFooPage added \n'
testseite2 = Page(self.request, 'TestSeite2')
assert testseite2.getPageText() == "Hello world, I am the file testdatei"
assert testseite2.isUnderlayPage()
class TestQuoting:
def testQuoting(self):
for line in ([':foo', 'is\\', 'ja|', u't|�', u'baAz�'], [], ['', '']):
assert line == unpackLine(packLine(line))
class TestRealCreation:
def testSearch(self):
package = PackagePages(self.request.rootpage.page_name, self.request)
assert package.searchpackage(self.request, "title:BadCon") == [u'BadContent']
def testListCreate(self):
package = PackagePages(self.request.rootpage.page_name, self.request)
temp = tempfile.NamedTemporaryFile(suffix='.zip')
package.collectpackage(['FrontPage'], temp)
assert zipfile.is_zipfile(temp.name)
def testAllCreate(self):
package = PackagePages(self.request.rootpage.page_name, self.request)
temp = tempfile.NamedTemporaryFile(suffix='.zip')
package.collectpackage(self.request.rootpage.getPageList(
include_underlay=False,
filter=lambda name: not wikiutil.isSystemPage(self.request, name)),
temp)
if not package:
py.test.skip("No user created pages in wiki!")
assert zipfile.is_zipfile(temp.name)
def testInvalidCreate(self):
package = PackagePages(self.request.rootpage.page_name, self.request)
temp = tempfile.NamedTemporaryFile(suffix='.zip')
package.collectpackage(['___//THIS PAGE SHOULD NOT EXIST\\___'], temp)
assert not zipfile.is_zipfile(temp.name)
class TestRealPackageInstallation:
def create_package(self, script, page=None):
# creates the package example zip file
userid = user.getUserIdentification(self.request)
COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED
zip_file = tempfile.mkstemp(suffix='.zip')[1]
zf = zipfile.ZipFile(zip_file, "w", COMPRESSION_LEVEL)
if page:
timestamp = wikiutil.version2timestamp(page.mtime_usecs())
zi = zipfile.ZipInfo(filename="1", date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
zi.compress_type = COMPRESSION_LEVEL
zf.writestr(zi, page.get_raw_body().encode("utf-8"))
zf.writestr("1_attachment", "sample attachment")
zf.writestr(MOIN_PACKAGE_FILE, script.encode("utf-8"))
zf.close()
return zip_file
def testAttachments_after_page_creation(self):
become_trusted(self.request)
pagename = u'PackageTestPageCreatedFirst'
page = create_page(self.request, pagename, u"This page has not yet an attachments dir")
script = u"""MoinMoinPackage|1
AddRevision|1|%(pagename)s
AddAttachment|1_attachment|my_test.txt|%(pagename)s
Print|Thank you for using PackagePages!
""" % {"pagename": pagename}
zip_file = self.create_package(script, page)
package = ZipPackage(self.request, zip_file)
package.installPackage()
assert Page(self.request, pagename).exists()
assert AttachFile.exists(self.request, pagename, "my_test.txt")
nuke_page(self.request, pagename)
os.unlink(zip_file)
def testAttachments_without_page_creation(self):
become_trusted(self.request)
pagename = u"PackageAttachmentAttachWithoutPageCreation"
script = u"""MoinMoinPackage|1
AddAttachment|1_attachment|my_test.txt|%(pagename)s
Print|Thank you for using PackagePages!
""" % {"pagename": pagename}
zip_file = self.create_package(script)
package = ZipPackage(self.request, zip_file)
package.installPackage()
assert not Page(self.request, pagename).exists()
assert AttachFile.exists(self.request, pagename, "my_test.txt")
nuke_page(self.request, pagename)
os.unlink(zip_file)
coverage_modules = ['MoinMoin.packages']
|
import os
# first cd to a place that has lots (at least 200 GB) of space!
for year in range(2009, 2016):
for month in range(1, 13):
for color in 'yellow', 'green':
if color == 'green':
# green taxis only show up starting from 08/2013
if year <= 2012:
continue
if year == 2013 and month < 8:
continue
fileName = '%s_tripdata_%s-%02d.csv' % (color, year, month)
if not os.path.exists(fileName):
url = 'https://storage.googleapis.com/tlc-trip-data/%s/%s' % (year, fileName)
print('\nDOWNLOAD: %s' % url)
if os.system('wget --no-check-certificate %s' % url):
raise RuntimeError('failed')
|
# coding=utf-8
###############################################################################
#
# This file is part of pyglfw project which is subject to zlib license.
# See the LICENSE file for details.
#
# Copyright (c) 2013 Roman Valov <roman.valov@gmail.com>
#
###############################################################################
from ctypes import c_int, c_uint, c_char_p, c_void_p
from ctypes import c_float, c_double, c_ushort, c_ubyte
from ctypes import cast, py_object, CFUNCTYPE, POINTER, Structure
c_void = None
c_func = CFUNCTYPE
# ---- definition helper factory ----
class DeclareFunction(object):
def __init__(self, lib, functype):
self.lib = lib
self.fun = functype
self.dir = {}
def __call__(self, name, restype=c_void, *argtypes):
errcheck = None
if isinstance(restype, (list, tuple)):
errcheck = restype[1]
restype = restype[0]
paramflags = list(argtypes)
argtypes = list(argtypes)
for idx, arg in enumerate(argtypes):
if isinstance(arg, (list, tuple)):
argtypes[idx] = arg[0]
paramflags[idx] = arg[1:] and arg[1:] or (2,)
else:
argtypes[idx] = arg
paramflags[idx] = (1,)
signature = name, self.lib
func = self.fun(restype, *argtypes)(signature, tuple(paramflags))
if errcheck:
func.errcheck = errcheck
self.dir[name] = func
# ---- ret/arg helper functions ----
class object_p(c_void_p):
@classmethod
def from_param(cls, obj):
return c_void_p(id(obj))
def ret_object(obj, func, args):
return cast(obj, py_object).value
def ret_list_p(icount):
def sz_array_p(obj, func, args):
return [obj[i] for i in range(args[icount].value)]
return sz_array_p
def ret_addr_p(obj, func, args):
return obj.contents
def allow_void_p_param(func):
def cast_from_void_p(cls, obj):
if isinstance(obj, c_void_p):
return cast(obj, cls)
elif not obj:
return None
else:
return func(obj)
return cast_from_void_p
def get_void_p(obj):
return cast(obj, c_void_p)
def _POINTER(cls):
cls = POINTER(cls)
cls.from_param = classmethod(allow_void_p_param(cls.from_param))
cls.get_void_p = get_void_p
return cls
def _FUNCPTR(cls):
cls.from_param = classmethod(allow_void_p_param(cls.from_param))
cls.get_void_p = get_void_p
return cls
def ret_ramp_p(obj, func, args):
_gramp = obj.contents
return (
[_gramp.red[i] for i in range(_gramp.size)],
[_gramp.green[i] for i in range(_gramp.size)],
[_gramp.blue[i] for i in range(_gramp.size)],
)
def cast_from_tuple(func):
def ramp_from_param(cls, obj):
if not (len(obj[0]) == len(obj[1]) == len(obj[2])):
raise ValueError("Object must be tuple of 3 same size sequences")
size = len(obj[0])
red = (c_ushort * size)(*obj[0])
green = (c_ushort * size)(*obj[1])
blue = (c_ushort * size)(*obj[2])
obj = GLFWgammaramp(size=size, red=red, green=green, blue=blue)
return func(obj)
return ramp_from_param
def _RAMPPTR(cls):
cls = POINTER(cls)
cls.from_param = classmethod(cast_from_tuple(cls.from_param))
return cls
# ---- datatype definitions ----
class GLFWwindow(Structure):
pass
GLFWwindowP = _POINTER(GLFWwindow)
class GLFWmonitor(Structure):
pass
GLFWmonitorP = _POINTER(GLFWmonitor)
class GLFWvidmode(Structure):
_fields_ = [
("width", c_int),
("height", c_int),
("redBits", c_int),
("greenBits", c_int),
("blueBits", c_int),
("refreshRate", c_int),
]
GLFWvidmodeP = POINTER(GLFWvidmode)
class GLFWgammaramp(Structure):
_fields_ = [
("red", POINTER(c_ushort)),
("green", POINTER(c_ushort)),
("blue", POINTER(c_ushort)),
("size", c_int)
]
GLFWgammarampP = _RAMPPTR(GLFWgammaramp)
# ---- callback prototypes ----
GLFWerrorfun = _FUNCPTR(c_func(c_void, c_int, c_char_p))
GLFWwindowposfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_int, c_int))
GLFWwindowsizefun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_int, c_int))
GLFWwindowclosefun = _FUNCPTR(c_func(c_void, GLFWwindowP))
GLFWwindowrefreshfun = _FUNCPTR(c_func(c_void, GLFWwindowP))
GLFWwindowfocusfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_int))
GLFWwindowiconifyfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_int))
GLFWframebuffersizefun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_int, c_int))
GLFWmousebuttonfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_int, c_int, c_int))
GLFWcursorposfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_double, c_double))
GLFWcursorenterfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_int))
GLFWscrollfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_double, c_double))
GLFWkeyfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_int, c_int, c_int, c_int))
GLFWcharfun = _FUNCPTR(c_func(c_void, GLFWwindowP, c_uint))
GLFWmonitorfun = _FUNCPTR(c_func(c_void, GLFWmonitorP, c_int))
|
import numpy
from numpy.random import normal
import matplotlib.pyplot as plt
def matlab_hist(v):
plt.hist(v, bins=50, normed=False)
plt.show()
def numpy_hist(v):
(n,bins) = numpy.histogram(v, bins=50, normed=False)
plt.plot(.5*(bins[1:]+bins[:-1]), n)
plt.show()
if __name__ == "__main__":
mu, sigma = 100, 0.5
v = normal(mu, sigma, 10000)
matlab_hist(v)
numpy_hist(v)
|
""""
Copyright © Krypton 2021 - https://github.com/kkrypt0nn
Description:
This is a template to create your own discord bot in python.
Version: 2.8
"""
import json
import os
import platform
import random
import sys
import asyncio
import sqlite3
from pytz import timezone
import discord
from discord.ext import commands, tasks
from discord.ext.commands import Bot
con = sqlite3.connect('db.db') # open the database
cur = con.cursor() # cursor object for the db
for row in cur.execute('select * from systemconfig'):
selectedtz = timezone(row[0])
fmt = '%Y-%m-%d %H:%M:%S %Z%z'
timef = '%H:%M:%S'
datef = '%Y-%m-%d'
if not os.path.isfile("config.json"):
sys.exit("'config.json' not found! Please add it and try again.")
else:
with open("config.json") as file:
config = json.load(file)
"""
Setup bot intents (events restrictions)
For more information about intents, please go to the following websites:
https://discordpy.readthedocs.io/en/latest/intents.html
https://discordpy.readthedocs.io/en/latest/intents.html#privileged-intents
Default Intents:
intents.messages = True
intents.reactions = True
intents.guilds = True
intents.emojis = True
intents.bans = True
intents.guild_typing = False
intents.typing = False
intents.dm_messages = False
intents.dm_reactions = False
intents.dm_typing = False
intents.guild_messages = True
intents.guild_reactions = True
intents.integrations = True
intents.invites = True
intents.voice_states = False
intents.webhooks = False
Privileged Intents (Needs to be enabled on dev page), please use them only if you need them:
intents.presences = True
intents.members = True
"""
intents = discord.Intents.default()
intents.members = True
bot = Bot(command_prefix=config["bot_prefix"], intents=intents)
# The code in this event is executed when the bot is ready
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name}")
print(f"Discord.py API version: {discord.__version__}")
print(f"Python version: {platform.python_version()}")
print(f"Running on: {platform.system()} {platform.release()} ({os.name})")
print("-------------------")
status_task.start()
while True:
await asyncio.sleep(600) # run every 10 minutes
await schedtasks()
# Setup the game status task of the bot
@tasks.loop(minutes=1.0)
async def status_task():
statuses = ["with you!", "with Kyle!", "with Miss Feet!", "with Edin's Bar!",
"with OneScreen!", f"{config['bot_prefix']}help", "with humans!"]
await bot.change_presence(activity=discord.Game(random.choice(statuses)))
async def schedtasks():
print("Running tasks...\nBot will become unresponsive for a few seconds")
channel = bot.get_channel(872632514022342656)
await channel.send("hello")
"""
select * from personal_reminders
select * from reminder_consent where username = ?
for reminder in personal_reminders:
if consent > 0:
send_reminder(user, consent) - this will need to act on the consent value
this will also need to account for the configured timezone
allow the mention of the players role?
"""
# Removes the default help command of discord.py to be able to create our custom help command.
bot.remove_command("help")
if __name__ == "__main__":
for file in os.listdir("./cogs"):
if file.endswith(".py"):
extension = file[:-3]
try:
bot.load_extension(f"cogs.{extension}")
print(f"Loaded extension '{extension}'")
except Exception as e:
exception = f"{type(e).__name__}: {e}"
print(f"Failed to load extension {extension}\n{exception}")
print("Loaded all known extensions!\n")
# The code in this event is executed every time someone sends a message, with or without the prefix
@bot.event
async def on_message(message):
# Ignores if a command is being executed by a bot or by the bot itself
if message.author == bot.user or message.author.bot:
return
# Ignores if a command is being executed by a blacklisted user
with open("blacklist.json") as file:
blacklist = json.load(file)
if message.author.id in blacklist["ids"]:
return
await bot.process_commands(message)
# The code in this event is executed every time a command has been *successfully* executed
@bot.event
async def on_command_completion(ctx):
fullCommandName = ctx.command.qualified_name
split = fullCommandName.split(" ")
executedCommand = str(split[0])
print(
f"Executed {executedCommand} command in {ctx.guild.name} (ID: {ctx.message.guild.id}) by {ctx.message.author} (ID: {ctx.message.author.id})")
# The code in this event is executed every time a valid commands catches an error
@bot.event
async def on_command_error(context, error):
if isinstance(error, commands.CommandOnCooldown):
minutes, seconds = divmod(error.retry_after, 60)
hours, minutes = divmod(minutes, 60)
hours = hours % 24
embed = discord.Embed(
title="Hey, please slow down!",
description=f"You can use this command again in {f'{round(hours)} hours' if round(hours) > 0 else ''} {f'{round(minutes)} minutes' if round(minutes) > 0 else ''} {f'{round(seconds)} seconds' if round(seconds) > 0 else ''}.",
color=0xE02B2B
)
await context.send(embed=embed)
elif isinstance(error, commands.MissingPermissions):
embed = discord.Embed(
title="Error!",
description="You are missing the permission `" + ", ".join(
error.missing_perms) + "` to execute this command!",
color=0xE02B2B
)
await context.send(embed=embed)
elif isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(
title="Error!",
description=str(error).capitalize(),
# We need to capitalize because the command arguments have no capital letter in the code.
color=0xE02B2B
)
await context.send(embed=embed)
elif isinstance(error, commands.MissingRole):
embed = discord.Embed(
title="Error!",
description="You are missing the role {} to execute this command!\nAsk the admins for permission!".format(
error.missing_role),
color=0xE02B2B
)
await context.send(embed=embed)
elif isinstance(error, commands.NoPrivateMessage):
embed = discord.Embed(
title="No DMs",
description="You cannot DM the bot a command",
color=0xE02B2B
)
await context.message.author.send(embed=embed)
raise error
# Run the bot with the token
bot.run(config["token"])
|
#!/usr/bin/python3
print("Content-type: text/html\r\n\r\n")
from bs4 import BeautifulSoup
import requests
page = requests.get('http://scores.sify.com/?ref=deskcric')
soup = BeautifulSoup(page.content,'html.parser')
match_name=soup.find_all('a',{'class':'scroll'})[0].string
print("<h1>Current match running is:"+match_name+"</h1>")
match_location=soup.select_one('div.scoresbox-center span').string
live_match_link=soup.select_one('div.scoresbox-center h2 a').attrs.get('href')
page_curr_match = requests.get(live_match_link)
soup_live = BeautifulSoup(page_curr_match.content,'html.parser')
teams_info=soup_live.find_all('div',{'class':'team-live'})[0].find('h1').string
bat_team_info=soup_live.select_one('div.team-live-venue h2').string
bat_team_name=''
bat_team_run=''
for i in bat_team_info:
if i is not ' ':
bat_team_name = bat_team_name + i
else:
break
bat_team_run=''
for i in range(0,len(bat_team_info)):
if bat_team_info[i] is ' ':
for j in range(i+1,len(bat_team_info)):
bat_team_run = bat_team_run + bat_team_info[j]
# run rat and needs that run to win the match
try:
target_split=soup_live.find_all('div',{'class':'team-live-venue'})[0].find('p').string.split('|')
target=target_split[1]
except:
target='Match yet to start'
#finding team 1
team1_finder=soup_live.find_all('div',{'id':'team1'})[0].find_all('ul')
team1_squad=[]
for i in team1_finder[0].find_all('li'):
team1_squad.append(i.find_all('a')[0].string)
# finding the team2 members
team2_finder=soup_live.find_all('div',{'id':'team2'})[0].find_all('ul')
team2_squad=[]
for i in team2_finder[0].find_all('li'):
team2_squad.append(i.find_all('a')[0].string)
over=soup.find_all('h3',{'id':'batteamnameruns1'})[0].text.split('(')[1].split(')')[0]
batsman1=soup.find_all('h4',{'id':'batsmanbowlerdetails1'})[0].find_all('p')[0].find('a').string
try:
batsman2=str(soup.find_all('h4',{'id':'batsmanbowlerdetails1'})[0].find_all('p')[0]).split('|')[1].split('>')[1].split('<')[0]
except:
batsman2='Wait For Match To Start'
print('<!DOCTYPE html>')
print('<html>')
print('<head>')
print('<meta http-equiv="refresh" content="10">')
print('<link rel="stylesheet" type="text/css" href="my.css">')
print('<title> LIVE SCORE</title>')
print('<style>')
print('body "background-image:url(/usr/lib/cgi-bin/m.jpg)"')
print('</style>')
print('</head>')
print('<body>')
print('<b style="color: #f44167" >SCORE VIEWER</b>')
print('<marquee width = "100%"><b><font color="red">SEE MATCH SCORE HERE </font></b></marquee>')
print('</body>')
print('</html>')
print('<form >')
print('<div id="tm" style=" width:100%;">')
print('<div id="tm1" style="float: left"> Team 1: <label>'+bat_team_name+'</label></div>')
print('<div id="tm2" style="margin-right:20px; float: right"> Team 2: <input type = "text" style="align="right" name = "Team 2" /></div>')
print('</div>')
#print('Team 1: <input type = "text" align="left" name = "Team 1" />')
print('<br>')
print('<table border = "1" width="250" align="left" >')
print('<tr>')
print('<th><font color="blue"><b>Players :TEAM 1</b></font></th>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[0]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[1]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[2]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[3]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[4]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[5]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[6]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[7]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[8]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[9]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team1_squad[10]+'</label></td>')
print('</tr>')
print('</table>')
print('<style>')
print('table, td, th {border: 1px solid black;}')
print('table {border-collapse: collapse; width: 25%;}')
print('th {height: px;}')
print('</style>')
print('<table border="1" align="right" width="250" >')
print('<tr>')
print('<th><font color="blue"><b>Players : TEAM 2</b></font></th>')
print('</tr>')
print('<tr>')
print('<td><label>'+team2_squad[0]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team2_squad[1]+'</label></td>')
print('</tr>')
print('<tr>')
print('<td><label>'+team2_squad[2]+'</label></td>')
print('</tr>')
print('<tr>')
print(' <td><label>'+team2_squad[3]+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td><label>'+team2_squad[4]+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td><label>'+team2_squad[5]+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td><label>'+team2_squad[6]+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td><label>'+team2_squad[7]+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td><label>'+team2_squad[8]+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td><label>'+team2_squad[9]+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td><label>'+team2_squad[10]+'</label></td>')
print(' </tr>')
print(' </table>')
print(' <table align="center" border="1" width="500" height="150">')
print(' <tr>')
print(' <th>Bating team : <label>'+bat_team_name+'</label> </th> ')
print(' </tr>')
print(' <tr>')
print(' <td>Batsman1 : <label>'+batsman1+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td>Batsman 2 : <label>'+batsman2+'</label></td>')
print(' </tr>')
print('</table>')
print('<table align="center" border="1" width="500">')
print('<tr>')
print(' <th>Bowling team : <input type = "text"name = "name"/> </th> ')
print(' </tr>')
print(' <tr>')
print(' <td>Bowler : <input type = "text"name = "name"/></td>')
print(' </tr>')
print('</table>')
print('<div align="center" style="vertical-align:bottom">')
print('<div align="center" style="vertical-align:bottom">')
print('<table>')
print('<tr>')
print(' <td><b style="color:#f44167" > SCORE</b> : <label>'+bat_team_run+'</label></td>')
print(' </tr>')
print(' <tr>')
print(' <td><b style="color:#f44167">OVER</b> : <label>'+over+'</label></td>')
print(' </tr>')
print('</table>')
print('</div>')
print('</div>')
print('</body>')
print('</html> ')
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import ComediansViewSet, JokesViewSet
router = DefaultRouter()
router.register("comedians", ComediansViewSet)
router.register("jokes", JokesViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
from setuptools.dist import Distribution
class BinaryDistribution(Distribution):
def has_ext_modules(foo):
return True
def wheel_name(**kwargs):
# create a fake distribution from arguments
dist = BinaryDistribution(attrs=kwargs)
# finalize bdist_wheel command
bdist_wheel_cmd = dist.get_command_obj('bdist_wheel')
bdist_wheel_cmd.ensure_finalized()
# assemble wheel file name
distname = bdist_wheel_cmd.wheel_dist_name
tag = '-'.join(bdist_wheel_cmd.get_tag())
return f'{distname}-{tag}.whl'
print(wheel_name(name='pymylib', version='0.0.1'), end='')
|
Python Program To Check Combo Sum
N integers are passed as input to the program. The program must print the count of the combination of the integers (1 or more integers) which add up to a given sum S.
Boundary Condition(s):
1 <= N <= 25
-10^5 <= S <= 10^5
Input Format:
The first line contains N.
The second line contains N integer values separated by a space.
The third line contains S.
Output Format:
The first line contains the combination count where the sum is S.
Example Input/Output 1:
Input:
4
10 20 50 20
50
Output:
2
Explanation:
Here the required sum S=50.
The two combinations whose sum is 50 are
10 20 20
50
Example Input/Output 2:
Input:
7
3 9 -10 7 -12 -2 -5
0
Output:
5
Explanation:
Here the required sum S=0.
The combinations whose sum is 0 are
3 9 -12
7 -2 -5
3 7 -10
3 9 7 -12 -2 -5
3 9 -10 -2
from itertools import combinations as c
n=int(input())
l=list(map(int,input().split()))
d=int(input())
p=0
for i in range(0,n+1):
for j in list(c(l,i)):
if sum(j)==d and j!=():
p+=1
print(p)
|
import numpy as np
#import matplotlib.pyplot as plt
def interpret_many(x_raw, relu, pool, all_wi_ai, best_trigrams = {}, n=5):
pool = pool.squeeze() #should be len(x_raw) x num_filters (128)
relu = relu.squeeze()
if len(x_raw)==1:
pool=np.expand_dims(pool, axis=0)
relu=np.expand_dims(relu, axis=0)
text_lists = []
for text in x_raw:
text_list = text.split()
text_lists.append(text_list)
top_n_neurons = []
for i in range(pool.shape[0]):
best_trigrams = interpret(text_lists[i],relu[i], pool[i], best_trigrams)
weights = all_wi_ai[i].T #2 x 128 --> 128 x 2
top_n_neurons.append([get_n_best_neurons(weights,5)])
return best_trigrams, top_n_neurons
def find_relu_index(relu, pooled_val, i):
#each thing in relu should be 128 length
#index represents the index (out of max seq len) that resulted in the pooled val
for ind, arr in enumerate(relu):
if arr[i]==pooled_val:
return ind
return None
def interpret(word_list, relu, pool, best_trigrams={}):
relu = relu.squeeze()
for i, pooled_val in enumerate(pool):
relu_index = find_relu_index(relu, pooled_val, i)
trigram = word_list[relu_index:relu_index+3]
if i in best_trigrams:
best_trigrams[i]+=[(pooled_val,[trigram])]
else:
best_trigrams[i]=[(pooled_val,[trigram])]
return best_trigrams
def get_best_n_for_each_neuron(best_trigrams,n):
best_n_trigrams=best_trigrams.copy()
for neuron in best_trigrams.keys():
best_n_trigrams[neuron]=sorted(best_trigrams[neuron])[:n]
return best_n_trigrams
def make_weight_histogram(weights):
plt.figure(1)
plt.subplot(211)
plt.title("Weights for Fake News Indicator")
plt.hist(weights[:,0], bins = 20, range = [-0.3,0.3])
plt.subplot(212)
plt.title("Weights for Real News Indicator")
plt.hist(weights[:,1], bins=20, range = [-0.3,0.3])
plt.show()
def get_most_relevant_neurons(all_wi_ai=None, ind = None, abs = False):
pass
if ind is None:
fake_news = np.mean(all_wi_ai[:,0,:], axis = 0)
real_news = np.mean(all_wi_ai[:,1,:], axis = 1)
def make_wi_ai_histogram(all_wi_ai, ind = None):
if ind is None:
#plot the average
fake_news = np.mean(all_wi_ai[:,0,:], axis = 0)
real_news = np.mean(all_wi_ai[:,1,:], axis = 1)
else:
wi_ai = all_wi_ai[ind]
#plot x_raw[ind] weights*activation for fake news indicator
fake_news=wi_ai[0]
#plot x_raw[ind] weights*activation for real news indicator
real_news=wi_ai[1]
plt.figure(1)
plt.subplot(211)
plt.title("W_i * a_i for Fake News Indicator")
plt.hist(fake_news)
plt.subplot(212)
plt.title("W_i * a_i for Real News Indicator")
plt.hist(real_news)
plt.show()
def make_list_of_arrays_into_list(l):
total_arr = []
for array in l:
for num in array:
total_arr.append(num)
return total_arr
def label_peaks(ax, li):
for i in range(129):
count = li.count(i)
if count>5:
ax.annotate(i, xy=(i,count))
def make_top_neuron_histogram(all_top_neurons):
fake_news_pos = make_list_of_arrays_into_list([top_n[0][0] for top_n in all_top_neurons])
real_news_pos = make_list_of_arrays_into_list([top_n[0][1] for top_n in all_top_neurons])
fake_news_neg = make_list_of_arrays_into_list([top_n[0][2] for top_n in all_top_neurons])
real_news_neg = make_list_of_arrays_into_list([top_n[0][3] for top_n in all_top_neurons])
# from data helpers: plt.figure(1)
ax = plt.subplot(221)
plt.title("Most positive for Fake News Indicator")
plt.xticks(np.arange(0,129,1))
plt.ylabel('count')
plt.xlabel('neuron number')
plt.hist(fake_news_pos, bins=128)
labels = [str(i) if i%10==0 else '' for i in range(129)]
ax.set_xticklabels(labels)
label_peaks(ax,fake_news_pos)
ax2 = plt.subplot(222)
plt.title("Most positive for Real News Indicator")
plt.ylabel('count')
plt.xlabel('neuron number')
plt.xticks(np.arange(0,129,1))
plt.hist(real_news_pos, bins = 128)
labels = [str(i) if i%10==0 else '' for i in range(129)]
ax2.set_xticklabels(labels)
label_peaks(ax2,real_news_pos)
ax3 = plt.subplot(223)
plt.title("Most negative for Fake News Indicator")
plt.ylabel('count')
plt.xlabel('neuron number')
plt.xticks(np.arange(0,129,1))
plt.hist(fake_news_neg, bins = 128)
labels = [str(i) if i%10==0 else '' for i in range(129)]
ax3.set_xticklabels(labels)
label_peaks(ax3,fake_news_neg)
ax4 = plt.subplot(224)
plt.title("Most negative for Real News Indicator")
#plt.hist(real_news_neg, bins = 128)
plt.ylabel('count')
plt.xlabel('neuron number')
plt.xticks(np.arange(0,129,1))
plt.hist(real_news_neg, bins = 128)
labels = [str(i) if i%10==0 else '' for i in range(129)]
ax4.set_xticklabels(labels)
label_peaks(ax4,real_news_pos)
plt.savefig('Most_relevant_neurons.png')
plt.show()
# positive_labels = [[0, 1] for _ in positive_examples]
# negative_labels = [[1, 0] for _ in negative_examples]
def get_n_best_neurons(weights, n,abs_value = False):
#print(weights, weights.shape) #128 x 2
arr_0 = weights[:,0]
list_0=arr_0.argsort()[-n:][::-1]
list_0 = [(element, round(arr_0[element],2)) for element in list_0]
list_0_neg = arr_0.argsort()[:n]
list_0_neg = [(element, round(arr_0[element],2)) for element in list_0_neg]
arr_1 = weights[:,1]
list_1=arr_1.argsort()[-n:][::-1]
list_1 = [(element, round(arr_1[element],2)) for element in list_1]
list_1_neg = arr_1.argsort()[:n]
list_1_neg = [(element, round(arr_1[element],2)) for element in list_1_neg]
#return weights for fake news, weights for real news
return list_0, list_1, list_0_neg, list_1_neg
def get_info(ind, all_wi_ai, all_top_neurons, best_trigrams, cur_dir=''):
import pickle
print(all_top_neurons[ind], "is all top neurons[ind]")
all_triples = []
for li in all_top_neurons[ind][0]:
triple_li = []
for tup in li:
neuron = tup[0]
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
triple_li.append((neuron, trigram, tup[1]))
all_triples.append(triple_li)
wi_ais = all_wi_ai[ind]
#plot x_raw[ind] weights*activation for fake news indicator
fake_news=wi_ais[0]
#plot x_raw[ind] weights*activation for real news indicator
real_news=wi_ais[1]
most_fake_indices=fake_news.argsort()[-10:][::-1]
least_fake_indices=fake_news.argsort()[:10]
most_real_indices = real_news.argsort()[-10:][::-1]
least_real_indices = real_news.argsort()[:10]
most_fake_trigrams=[]
for neuron in most_fake_indices:
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
string = trigram+', *neuron: '+str(neuron)+' '+str(fake_news[neuron])
most_fake_trigrams.append(string)
print("MOST FAKE: ",most_fake_trigrams)
most_real_trigrams=[]
for neuron in most_real_indices:
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
most_real_trigrams.append(trigram+', *neuron: '+str(neuron)+' '+str(real_news[neuron]))
print("MOST REAL: ",most_real_trigrams)
least_fake_trigrams=[]
for neuron in least_fake_indices:
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
least_fake_trigrams.append(trigram+', *neuron: '+str(neuron)+' '+str(fake_news[neuron]))
print("LEAST FAKE: ",least_fake_trigrams)
least_real_trigrams=[]
for neuron in least_real_indices:
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
least_real_trigrams.append(trigram+', *neuron: '+str(neuron)+' '+str(real_news[neuron]))
print("LEAST REAL: ",least_real_trigrams)
def get_info2(ind, all_wi_ai, all_top_neurons, best_trigrams):
wi_ais = all_wi_ai[ind]
# plot x_raw[ind] weights*activation for fake news indicator
fake_news = wi_ais[0]
# plot x_raw[ind] weights*activation for real news indicator
real_news = wi_ais[1]
most_fake_indices = fake_news.argsort()[-10:][::-1]
least_fake_indices = fake_news.argsort()[:10]
most_real_indices = real_news.argsort()[-10:][::-1]
least_real_indices = real_news.argsort()[:10]
most_fake_trigrams = []
for neuron in most_fake_indices:
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
# string = trigram+', *neuron: '+str(neuron)+' '+str(fake_news[neuron])
most_fake_trigrams.append(trigram)
# print(most_fake_trigrams)
most_real_trigrams = []
for neuron in most_real_indices:
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
# most_real_trigrams.append(trigram+', *neuron: '+str(neuron)+' '+str(real_news[neuron]))
most_real_trigrams.append(trigram)
# print(most_real_trigrams)
least_fake_trigrams = []
for neuron in least_fake_indices:
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
# least_fake_trigrams.append(trigram+', *neuron: '+str(neuron)+' '+str(fake_news[neuron]))
least_fake_trigrams.append(trigram)
# print(least_fake_trigrams)
least_real_trigrams = []
for neuron in least_real_indices:
trigram = ' '.join(best_trigrams[neuron][ind][1][0])
# least_real_trigrams.append(trigram+', *neuron: '+str(neuron)+' '+str(real_news[neuron]))
least_real_trigrams.append(trigram)
# print(least_real_trigrams)
return most_fake_trigrams, most_real_trigrams, least_fake_trigrams, least_real_trigrams
def make_top_5_histogram():
import pickle
cur_dir = ""
all_top_neurons="all_top_n_neurons.txt"
with open(cur_dir+all_top_neurons, 'rb') as f:
all_top_neurons = pickle.load(f)
make_top_neuron_histogram(all_top_neurons)
|
def compute_pay(hours, rate):
if hours <= 40:
pay = rate * 40
else:
pay = ((hours - 40) * (1.5 * rate)) + (40 * rate)
return pay
try:
hrs = float(input('Enter Hours:\n'))
rate = float(input('Enter Rate:\n'))
print(compute_pay(hrs, rate))
except:
print("Please enter numeric input")
exit()
|
#! python3
# maillist and mail merge for CBD + Theos customer lists
# data is from CSV files downloaded from Theos and CBD web
# copy the files into the mailist directory on D:\
# Chad Cropley 2016-06-23 a RAD design solution
#
# import modules
import os
import csv
import shutil
# make sure we're in the right directory
os.chdir('d:\\maillist\\')
# print(os.getcwd())
# read file1
filename = input('Enter a Theos mail list filename: ')
shutil.copyfile(filename, 'MAILLIST-THEOS.csv')
# shutil.copyfile('MAILLIST-06.19.16.csv', 'MAILLIST-THEOS.csv')
with open('MAILLIST-THEOS.csv') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
with open('output1.csv','w') as output:
writer = csv.DictWriter(output, delimiter=',', fieldnames=reader.fieldnames)
headers = ('Email, Mailing list, Language')
print(headers, file = output)
# writer.writeheader('EMail', 'Mailing list', 'Language')
for row in reader:
if row['Email'] != '':
print(row['Email'],',', 'CBD Mailing List',',', 'EN', file = output)
# read file2
filename2 = input('Enter a CBD mail list filename: ')
shutil.copyfile(filename2, 'users_cbd.csv')
# shutil.copyfile('users_general_06162016.csv', 'users_cbd.csv')
with open('users_cbd.csv') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
with open('output1.csv','a') as output:
writer = csv.DictWriter(output, delimiter=',', fieldnames=reader.fieldnames)
for row in reader:
if row['E-mail'] != '':
print(row['E-mail'],',', 'CBD Mailing List',',', 'EN', file = output)
# os.system('pause')
# Remove duplicate items from merge
inFile = open('output1.csv','r')
outFile = open('output2.csv','w')
listLines = []
for line in inFile:
if line in listLines:
continue
else:
outFile.write(line)
listLines.append(line)
outFile.close()
inFile.close()
print ('open output2.csv to export to CBD CS-Cart subscriber list')
os.system('pause')
|
"""
Validate a configuration value by converting it to a specific type.
These functions are used by :mod:`staticconf.readers` and
:mod:`staticconf.schema` to coerce config values to a type.
"""
import datetime
import logging
import re
import time
import six
from staticconf.errors import ValidationError
def validate_string(value):
return None if value is None else six.text_type(value)
def validate_bool(value):
return None if value is None else bool(value)
def validate_numeric(type_func, value):
try:
return type_func(value)
except ValueError:
raise ValidationError("Invalid %s: %s" % (type_func.__name__, value))
def validate_int(value):
return validate_numeric(int, value)
def validate_float(value):
return validate_numeric(float, value)
date_formats = [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %I:%M:%S %p",
"%Y-%m-%d",
"%y-%m-%d",
"%m/%d/%y",
"%m/%d/%Y",
]
def validate_datetime(value):
if isinstance(value, datetime.datetime):
return value
for format_ in date_formats:
try:
return datetime.datetime.strptime(value, format_)
except ValueError:
pass
raise ValidationError("Invalid date format: %s" % value)
def validate_date(value):
if isinstance(value, datetime.date):
return value
return validate_datetime(value).date()
time_formats = [
"%I %p",
"%H:%M",
"%I:%M %p",
"%H:%M:%S",
"%I:%M:%S %p"
]
def validate_time(value):
if isinstance(value, datetime.time):
return value
for format_ in time_formats:
try:
return datetime.time(*time.strptime(value, format_)[3:6])
except ValueError:
pass
raise ValidationError("Invalid time format: %s" % value)
def _validate_iterable(iterable_type, value):
"""Convert the iterable to iterable_type, or raise a Configuration
exception.
"""
if isinstance(value, six.string_types):
msg = "Invalid iterable of type(%s): %s"
raise ValidationError(msg % (type(value), value))
try:
return iterable_type(value)
except TypeError:
raise ValidationError("Invalid iterable: %s" % (value))
def validate_list(value):
return _validate_iterable(list, value)
def validate_set(value):
return _validate_iterable(set, value)
def validate_tuple(value):
return _validate_iterable(tuple, value)
def validate_regex(value):
try:
return re.compile(value)
except (re.error, TypeError) as e:
raise ValidationError("Invalid regex: %s, %s" % (e, value))
def build_list_type_validator(item_validator):
"""Return a function which validates that the value is a list of items
which are validated using item_validator.
"""
def validate_list_of_type(value):
return [item_validator(item) for item in validate_list(value)]
return validate_list_of_type
def build_map_type_validator(item_validator):
"""Return a function which validates that the value is a mapping of
items. The function should return pairs of items that will be
passed to the `dict` constructor.
"""
def validate_mapping(value):
return dict(item_validator(item) for item in validate_list(value))
return validate_mapping
def validate_log_level(value):
"""Validate a log level from a string value. Returns a constant from
the :mod:`logging` module.
"""
try:
return getattr(logging, value)
except AttributeError:
raise ValidationError("Unknown log level: %s" % value)
def validate_any(value):
return value
validators = {
'': validate_any,
'bool': validate_bool,
'date': validate_date,
'datetime': validate_datetime,
'float': validate_float,
'int': validate_int,
'list': validate_list,
'set': validate_set,
'string': validate_string,
'time': validate_time,
'tuple': validate_tuple,
'regex': validate_regex,
'log_level': validate_log_level,
}
def get_validators():
"""Return an iterator of (validator_name, validator) pairs."""
return six.iteritems(validators)
|
from .dataset import MemoryMapDataset
|
import requests
import sys
import random
import os
import os.path as op
from mastodon import Mastodon
from datetime import datetime
# --------------------------------------------------
DIR_SFW = 'sfw/'
DIR_NSFW = 'nsfw/'
def main():
mastodon = Mastodon(
access_token = 'token.dat',
api_base_url = 'https://antabaka.me/'
)
sfwcount = len([name for name in os.listdir(DIR_SFW) if os.path.isfile(os.path.join(DIR_SFW, name))])
nsfwcount = len([name for name in os.listdir(DIR_NSFW) if os.path.isfile(os.path.join(DIR_NSFW, name))])
random_choice = random.randint(1, sfwcount + nsfwcount)
print('\ns:' + str(sfwcount) + ' n:' + str(nsfwcount) + ' r:' + str(random_choice))
is_safe = False if random_choice < nsfwcount else True
art = ""
if is_safe:
files = [f for f in os.listdir(DIR_SFW) if op.isfile(op.join(DIR_SFW, f))]
art = DIR_SFW + random.choice(files)
else:
files = [f for f in os.listdir(DIR_NSFW) if op.isfile(op.join(DIR_NSFW, f))]
art = DIR_NSFW + random.choice(files)
fformat = op.splitext(art)[1][1:]
if (fformat == 'jpg'):
fformat = 'jpeg'
with open(art, 'rb') as picture:
data = picture.read()
media = mastodon.media_post(data, f'image/{fformat}')
toot = f':gyate_reisen_love:'
mastodon.status_post(toot, media_ids=[media], visibility='unlisted', sensitive=not is_safe)
print(str(datetime.now()) + ': ' + art)
if __name__ == '__main__':
sys.exit(main())
|
# test for+range, mostly to check optimisation of this pair
# apply args using *
for x in range(*(1, 3)):
print(x)
for x in range(1, *(6, 2)):
print(x)
# zero step
try:
for x in range(1, 2, 0):
pass
except ValueError:
print('ValueError')
# apply args using **
try:
for x in range(**{'end':1}):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, **{'end':1}):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, 1, **{'step':1}):
print(x)
except TypeError:
print('TypeError')
# keyword args
try:
for x in range(end=1):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, end=1):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(start=0, end=1):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, 1, step=1):
print(x)
except TypeError:
print('TypeError')
# argument is a comprehension
try:
for x in range(0 for i in []):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, (0 for i in [])):
print(x)
except TypeError:
print('TypeError')
try:
for x in range(0, 1, (0 for i in [])):
print(x)
except TypeError:
print('TypeError')
|
# -*- encoding: utf-8 -*-
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///test.db', convert_unicode=True)
db = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db.query_property()
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
from paste import Paste
Base.metadata.create_all(bind=engine)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.