content
stringlengths 5
1.05M
|
|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import topi
topi_funcs = {
"cumsum": {"generic": topi.cumsum, "cuda": topi.cuda.cumsum},
"cumprod": {"generic": topi.cumprod, "cuda": topi.cuda.cumprod},
}
identity_value = {"cumsum": 0, "cumprod": 1}
def get_implementations(name, axis, dtype, exclusive):
topi_func_generic = topi_funcs[name]["generic"]
topi_func_cuda = topi_funcs[name]["cuda"]
return {
"generic": (
lambda x: topi_func_generic(x, axis, dtype, exclusive=exclusive),
topi.generic.schedule_extern,
),
"cuda": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"nvptx": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"vulkan": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"metal": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
}
def _run_tests(
dev,
target,
op_name: str = "cumsum",
gt_func: Callable[..., np.array] = np.cumsum,
):
def check_scan(np_ref, data, axis=None, dtype=None, exclusive=False):
implementations = get_implementations(op_name, axis, dtype, exclusive)
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm.topi.testing.compare_numpy_tvm([data], np_ref, target, dev, fcompute, fschedule)
data = np.array([2, 3, 0])
check_scan(gt_func(data), data)
data = np.random.rand(10) > 0.5
data = data.astype(np.int32)
check_scan(gt_func(data, dtype=np.int32), data)
check_scan(gt_func(data), data, dtype="int64")
data = np.random.rand(10) > 0.5
check_scan(gt_func(data, dtype=np.int32), data, dtype="int32")
for in_dtype in ["float32", "float64"]:
if target == "metal" and in_dtype == "float64":
# float64 is not supported in metal
continue
data = np.random.randn(10, 10).astype(in_dtype)
check_scan(gt_func(data), data)
check_scan(gt_func(data, axis=0), data, axis=0)
check_scan(gt_func(data, axis=1), data, axis=1)
data = np.random.randn(10, 5, 10).astype(in_dtype)
check_scan(gt_func(data), data)
check_scan(gt_func(data, axis=0), data, axis=0)
check_scan(gt_func(data, axis=1), data, axis=1)
check_scan(gt_func(data, axis=-1), data, axis=-1)
for in_dtype in ["int32", "int64"]:
data = np.random.randint(-100, 100, size=(100, 100)).astype(in_dtype)
check_scan(gt_func(data, dtype=in_dtype), data)
check_scan(gt_func(data), data, dtype="int64")
check_scan(gt_func(data, axis=0, dtype=in_dtype), data, axis=0)
check_scan(gt_func(data, axis=1, dtype=in_dtype), data, axis=1)
data = np.random.randint(1 << 30, (1 << 31) - 1, size=(100)).astype(in_dtype)
check_scan(gt_func(data), data, dtype="int64")
data = np.random.randint(-100, 100, size=(100, 100)).astype("int64")
expected_result = np.roll(gt_func(data), 1)
expected_result[0] = identity_value[op_name]
check_scan(expected_result, data, dtype="int64", exclusive=True)
expected_result = np.roll(gt_func(data, axis=0, dtype=in_dtype), 1, axis=0)
expected_result[0, :] = identity_value[op_name]
check_scan(expected_result, data, axis=0, exclusive=True)
expected_result = np.roll(gt_func(data, axis=1, dtype=in_dtype), 1, axis=1)
expected_result[:, 0] = identity_value[op_name]
check_scan(gt_func(data, axis=1, dtype=in_dtype), data, axis=1)
@tvm.testing.parametrize_targets
def test_cumsum(dev, target):
_run_tests(dev, target, op_name="cumsum", gt_func=np.cumsum)
@tvm.testing.parametrize_targets
def test_cumprod(dev, target):
_run_tests(dev, target, op_name="cumprod", gt_func=np.cumprod)
if __name__ == "__main__":
test_cumsum(tvm.device("cpu"), tvm.target.Target("llvm"))
test_cumsum(tvm.device("cuda"), tvm.target.Target("cuda"))
test_cumsum(tvm.device("nvptx"), tvm.target.Target("nvptx"))
test_cumsum(tvm.device("vulkan"), tvm.target.Target("vulkan"))
test_cumsum(tvm.device("metal"), tvm.target.Target("metal"))
test_cumprod(tvm.device("cpu"), tvm.target.Target("llvm"))
test_cumprod(tvm.device("cuda"), tvm.target.Target("cuda"))
test_cumprod(tvm.device("nvptx"), tvm.target.Target("nvptx"))
test_cumprod(tvm.device("vulkan"), tvm.target.Target("vulkan"))
test_cumprod(tvm.device("metal"), tvm.target.Target("metal"))
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
from django.shortcuts import render
def Startpage(request):
return render(request,'starter.html')
|
import webapp2
import os
import jinja2
import json
import datetime
import time
from google.appengine.api import users
from google.appengine.ext import ndb
from database import seed_data
from users import User
from content_manager import populate_feed, logout_url, login_url
from data import Course, Teacher, User, Post, Enrollment
from pprint import pprint, pformat
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def get_auth():
# Get the Google user
user = users.get_current_user()
nickname = None
if user:
nickname = user.nickname()
auth_url = users.create_logout_url('/')
else:
auth_url = users.create_login_url('/login')
return {
"nickname": nickname,
"auth_url": auth_url,
"auth_text": "Sign out" if user else "Sign in"}
class MainHandler(webapp2.RequestHandler):
def get(self):
template= jinja_env.get_template("/templates/home.html")
self.response.write(template.render(get_auth()))
# self.response.write(template.render())
class LogInHandler(webapp2.RequestHandler):
def get(self):
google_login_template = jinja_env.get_template("/templates/google_login.html")
new_user_template = jinja_env.get_template("/templates/new_user.html")
user = users.get_current_user()
if user:
print("ACCOUNT EXISTS:")
print(user.email())
print(user.nickname())
existing_user = User.query().filter(User.email == user.email()).get()
nickname = user.nickname()
if existing_user:
self.redirect('/addcourses')
if not existing_user:
fields = {
"nickname": nickname,
"logout_url": logout_url,
}
self.response.write(new_user_template.render(fields))
# else:
# self.redirect('/layout.html')
else:
self.response.write(google_login_template.render({ "login_url": login_url }))
class AddCoursesHandler(webapp2.RequestHandler):
def get(self):
template = jinja_env.get_template("/templates/addcourses.html")
google_user = users.get_current_user()
study_spot_user = User.query().filter(User.email == google_user.email()).get()
enrollments = Enrollment.query().filter(Enrollment.user == study_spot_user.key).fetch()
courses = [e.course.get().name for e in enrollments]
params = get_auth()
params['courses'] = courses
self.response.write(template.render(params))
def post(self):
# Get the current Google account user
user = users.get_current_user()
print(user)
# If the user doesn't exist, go home
if user is None:
self.redirect('/')
return
# Fetch the user from the data store
current_user = User.query().filter(User.email == user.email()).get()
# If the user doesn't exist in the data store, create and put the new user
if not current_user:
new_user_entry = User(
name = self.request.get("name"),
username = self.request.get("username"),
email = user.email(),
)
new_user_entry.put()
current_user = new_user_entry
time.sleep(.2)
self.redirect('/addcourses')
class ChatHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user is None:
self.redirect('/')
return
print("user.email(): " + user.email())
# Get current user from data store
current_user = User.query().filter(User.email == user.email()).get()
if current_user is None:
self.redirect('/')
return
print(current_user);
chat_fields = populate_feed(current_user, self.request.get("course") +" "+ self.request.get("teacher"))
start_chat = jinja_env.get_template("templates/chat.html")
self.response.write(start_chat.render(chat_fields))
def post(self):
user = users.get_current_user()
if user is None:
self.redirect('/')
return
current_user = User.query().filter(User.email == user.email()).get()
print(self.request.get("course"))
new_post = Post(author= current_user.key, board=self.request.get("course")+" "+self.request.get("teacher"), content= self.request.get("user_post"))
new_post.put()
time.sleep(.2)
self.redirect('/chat?course=' + self.request.get("course")+"&teacher="+self.request.get("teacher"))
class ViewCourseHandler(webapp2.RequestHandler):
def get(self):
userdata_template = jinja_env.get_template("/templates/userdata.html")
fields = {"names": User.query().filter(User.email == user.email()).get()}
self.response.write(template.render(fields))
class LoadDataHandler(webapp2.RequestHandler):
def get(self):
seed_data()
self.response.write("Seed data added")
class CourseService(webapp2.RequestHandler):
def get(self):
key = self.getKey(self.request);
course_key = ndb.Key('Courses', key)
courses = Course.query().order(Course.name).fetch()
teachers = Teacher.query().order(Teacher.name).fetch()
results = []
for course in courses:
result = {}
teacher_keys = course.teachers
result['course_id'] = course.key.id()
result['course_name'] = course.name
result['teachers'] = []
for teacher_key in teacher_keys:
for teacher in teachers:
if teacher.key == teacher_key:
teacher_dict = {}
teacher_dict['teacher_name'] = teacher.name
teacher_dict['teacher_id'] = teacher_key.id()
result['teachers'].append(teacher_dict)
results.append(result)
self.response.headers['Content-Type'] = 'application/json'
print(json.dumps(results))
self.response.write(json.dumps(results))
def post(self):
key = self.getKey(self.request);
content = self.request.get('content');
course = Course(parent=ndb.Key("Courses", key), content=content)
course.put()
def getKey(self, request):
from_user = self.request.get('from');
to_user = self.request.get('to');
key_values = [from_user, to_user]
key_values.sort()
return key_values[0] + '_' + key_values[1];
def to_serializable(self, data):
"""Build a new dict so that the data can be JSON serializable"""
result = data.to_dict()
record = {}
# Populate the new dict with JSON serializiable values
for key in result.iterkeys():
if isinstance(result[key], datetime.datetime):
record[key] = result[key].isoformat()
continue
record[key] = result[key]
# Add the key so that we have a reference to the record
record['key'] = data.key.id()
return record
""" Teacher Service will allow look up by ID in datastore """
class TeacherService(webapp2.RequestHandler):
def get(self):
teacher_id = self.getKey(self.request);
# course_key = ndb.Key('Courses', key)
teachers = Teacher.query().order(Teacher.name).fetch()
# print(teachers)
results = json.dumps([t.to_dict() for t in teachers], default=str)
self.response.headers['Content-Type'] = 'application/json'
self.response.write(results)
def getKey(self, request):
field = self.request.get('id');
id = "";
print("field: " + field);
return id;
class UserCourseService(webapp2.RequestHandler):
def get(self):
stub='[]'
print(stub)
def post(self):
google_user = users.get_current_user()
study_spot_user = User.query().filter(User.email == google_user.email()).get()
course_name = self.request.get("course")
course = Course.query().filter(Course.name == course_name).get();
Enrollment(user=study_spot_user.key, course=course.key).put()
teacher_name = self.request.get("teacher")
time.sleep(.2)
self.redirect("/chat?course={}&teacher={}".format(course.name, teacher_name))
print("/chat?course={}&teacher={}".format(course.name, teacher_name))
app = webapp2.WSGIApplication([
('/', MainHandler),
('/login', LogInHandler),
('/addcourses', AddCoursesHandler),
('/chat', ChatHandler),
('/viewcourses', ViewCourseHandler),
('/seed-data', LoadDataHandler),
('/course', CourseService),
('/teacher', TeacherService),
('/user-course', UserCourseService),
], debug=True)
|
# -*- coding:utf-8 -*-
from kamo import TemplateManager
import os.path
import logging
logging.basicConfig(level=logging.DEBUG)
m = TemplateManager(directories=[os.path.dirname(__file__)])
template = m.lookup("template.kamo")
print(template.render())
|
import cv2
from imutils.paths import list_images
import imutils
import re
import datetime
def get_frame_number(impath):
return int(re.search(r"image data (\d+)", impath).group(1))
def get_timestamp(impath):
"assuming that the timestamp is a part of the image name"
date_str = impath.split(".")[0]
date_str = re.split(r"image data \d+ ", date_str)[1]
date = datetime.datetime.strptime(date_str, '%Y-%b-%d %H %M %S %f')
return date
# Load the data
impaths = list(list_images("C:/Users/msa/Documents/datasets/CREATE lidar camera/ImageData/"))
impaths = sorted(impaths, key=get_frame_number)
for impath in impaths[:100]:
date = get_timestamp(impath)
print(date)
image = cv2.imread(impath)
image = imutils.resize(image, width=640)
# do something cool
cv2.imshow("im", image)
cv2.waitKey(0)
#closing all open windows
cv2.destroyAllWindows()
'''
reader = dataset.hdf5generator(filepath)
for data in reader:
# do something
pass
reader = dataset.pcap_reader(filename, callib)
for data in reader:
# do something
pass
reader = dataset.ats_radar_reader(filename, callib)
for data in reader:
# do something
pass
# KITTI wih pykitti
basedir = '/home/dodo_brain/kitti_data/'
date = '2011_09_26'
drive = '0019'
data = pykitti.raw(basedir, date, drive, frames=range(0, 50, 5))
for cam0_image in data.cam0:
# do something
pass
'''
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the birthday function below.
def birthday(s, d, m):
totalCount = 0
l = len(s)
# print("{} {} {}".format(s, d, m))
for i in range(l):
sv = 0
for j in range(i, i+m):
if(l == j):
break
# print("sv[{}, {}] : {} > {} >> {}".format(i, j, sv, s[j], abs(j-i+1)))
sv += s[j]
if sv == d and abs(j-i+1) == m:
totalCount += 1
# print("find => sv[{}, {}] : {} -> {}".format(i, j, sv, totalCount))
break
# print("count :{}".format(totalCount))
return totalCount
if __name__ == '__main__':
# fptr = open("./output/my.txt", 'w')
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
s = list(map(int, input().rstrip().split()))
dm = input().rstrip().split()
d = int(dm[0])
m = int(dm[1])
result = birthday(s, d, m)
fptr.write(str(result) + '\n')
fptr.close()
|
import tensorflow as tf
import numpy as np
from src.tftools.custom_constraint import MinMaxConstraint
shear_multi = 0.01
class ShearLayer(tf.keras.layers.Layer):
def __init__(self, trainable=True, **kwargs):
"""
:param visible: one dimension of visible image (for this dimension [x,y] will be computed)
"""
super(ShearLayer, self).__init__(**kwargs)
tf.compat.v1.constant_initializer()
# rotation by an angle
self.shear = self.add_weight(name='shear', shape=(1,), dtype=tf.float32, initializer='zeros',
trainable=True,
constraint=MinMaxConstraint(-1., 1.)
)
def call(self, coords, **kwargs):
shear_x = tf.reshape([[1., tf.multiply(self.shear[0], shear_multi)],
[0., 1.]], (2, 2))
idx = tf.cast(coords, tf.float32)
idx = tf.einsum("ij,kj->ik", idx, shear_x)
return idx
|
#!/usr/bin/env python3.7
import os
BLOCK_SIZE = 256
P = [
(0, [1, 7]),
(1, [0, 8]),
(0, [5, 3]),
(1, [8, 6]),
(0, [3, 9]),
(1, [4, 0]),
(0, [9, 1]),
(1, [6, 2]),
(0, [7, 5]),
(1, [2, 4]),
]
def n2B(b,length=BLOCK_SIZE):
return list(map(int, bin(b)[2:].rjust(BLOCK_SIZE, '0')))
def B2n(b):
return int("".join(map(str, b)), 2)
def swap(b):
l = BLOCK_SIZE // 2
mask = (1 << l) - 1
return (b >> l) | ((b & mask) << l)
def bitchain(cb, state=0):
if len(cb) == 0:
return cb
b, ns = P[state]
b0, bn = cb[0],cb[1:]
return [b0 ^ b] + bitchain(bn, state=ns[b0])
def blockcipher(b):
return B2n(bitchain(n2B(b)))
class CbC:
def __init__(self, k, rounds):
self.key = [k]
self.rounds = rounds
for i in range(1, self.rounds):
k = swap(blockcipher(k))
self.key.append(k)
def encrypt(self, b):
for i in range(self.rounds):
b ^= self.key[i]
b = swap(blockcipher(b))
return b
if __name__ == "__main__":
flag = bytes.hex(os.urandom(BLOCK_SIZE // 8))
key = int(flag, 16)
C = CbC(key, 99)
print("Flag : AIS3{%s}" % flag)
with open("data", "w") as f:
for i in range(100):
pt = int(bytes.hex(os.urandom(BLOCK_SIZE // 8)), 16)
ct = C.encrypt(pt)
f.write(str((pt, ct)) + "\n")
|
"""
'''
Description: Problem 912 (Sort an Array) - Solution 1
Version: 1.0.0.20220322
Author: Arvin Zhao
Date: 2022-03-19 12:58:01
Last Editors: Arvin Zhao
LastEditTime: 2022-03-22 19:38:21
'''
"""
from typing import List
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
"""Built-in stable sort function."""
return sorted(nums)
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for customizing tf.test.TestCase class."""
import contextlib
import copy
import os
from typing import Iterable, Optional, Text, Union
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.utils import io_utils
from google.protobuf import message
from google.protobuf import text_format
@contextlib.contextmanager
def override_env_var(name: str, value: str):
"""Overrides an environment variable and returns a context manager.
Example:
with test_case_utils.override_env_var('HOME', new_home_dir):
or
self.enter_context(test_case_utils.override_env_var('HOME', new_home_dir))
Args:
name: Name of the environment variable.
value: Overriding value.
Yields:
None.
"""
old_value = os.getenv(name)
os.environ[name] = value
yield
if old_value is None:
del os.environ[name]
else:
os.environ[name] = old_value
class TfxTest(tf.test.TestCase):
"""Convenient wrapper for tfx test cases."""
def setUp(self):
super().setUp()
self.tmp_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
fileio.makedirs(self.tmp_dir)
# TODO(b/176196624): Delete following block when we drop support for TF<2.4.
# Manually set up exit_stack because absltest.TestCase.setUp() is not called
# in TF<2.4.
if self._exit_stack is None:
self._exit_stack = contextlib.ExitStack()
self.addCleanup(self._exit_stack.close)
def load_proto_from_text(self, path: Text,
proto_message: message.Message) -> message.Message:
"""Loads proto message from serialized text."""
return io_utils.parse_pbtxt_file(path, proto_message)
def assertProtoPartiallyEquals(
self,
expected: Union[str, message.Message],
actual: message.Message,
ignored_fields: Optional[Iterable[str]] = None,
):
"""Asserts proto messages are equal except the ignored fields."""
if isinstance(expected, str):
expected = text_format.Parse(expected, actual.__class__())
actual = copy.deepcopy(actual)
else:
expected = copy.deepcopy(expected)
actual = copy.deepcopy(actual)
# Currently only supports one-level for ignored fields.
for ignored_field in ignored_fields or []:
expected.ClearField(ignored_field)
actual.ClearField(ignored_field)
return self.assertProtoEquals(expected, actual)
@contextlib.contextmanager
def change_working_dir(working_dir: str):
"""Changes working directory to a given temporary directory.
Example:
with test_case_utils.change_working_dir(tmp_dir):
or
self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))
Args:
working_dir: The new working directory. This directoy should already exist.
Yields:
Old working directory.
"""
old_dir = os.getcwd()
os.chdir(working_dir)
yield old_dir
os.chdir(old_dir)
|
import random
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QVBoxLayout
from gui import *
windowHeight = 600
windowWidth = 800
background_color = "#111111"
def change_background(window: QMainWindow):
c = '{:x}'.format(random.randrange(16))
background_color = "#" + c + c + c
window.setStyleSheet("QMainWindow {background: " + background_color + ";}")
if __name__ == '__main__':
app = QApplication(sys.argv)
window = QMainWindow()
window.setStyleSheet("QMainWindow {background: " + background_color + ";}")
window.setFixedSize(windowWidth, windowHeight)
button_full_width = 100
button_full_height = int(button_full_width / 3)
button_full_path = Render.build_svg(
Button_full(button_full_width, button_full_height, "#111111", "button_full"))
button_full = QPushButton("#FFFF99")
button_full.setStyleSheet("QPushButton {"
"font-size: 10pt;"
"font-family: Oswald;"
"background-image : url(" + button_full_path + ");"
"border:1px;"
"background-color:#CCDDFF;}"
"QPushButton::hover {background-color: #99CCFF;}")
button_full.setFixedSize(button_full_width, button_full_height)
button_left_width = 100
button_left_height = int(button_left_width / 3)
button_left_path = Render.build_svg(
Button_semi_left(button_left_width, button_left_height, "#111111", "button_left"))
button_left = QPushButton("#FFFF99")
button_left.setStyleSheet("QPushButton {"
"font-size: 10pt;"
"font-family: Oswald;"
"background-image : url(" + button_left_path + ");"
"border:1px;"
"background-color:#CCDDFF;}"
"QPushButton::hover {background-color: #99CCFF;}")
button_left.setFixedSize(button_left_width, button_left_height)
button_right_width = 100
button_right_height = int(button_right_width / 3)
button_right_path = Render.build_svg(
Button_semi_right(button_right_width, button_right_height, "#111111", "button_right"))
button_right = QPushButton("#FFFF99")
button_right.setStyleSheet("QPushButton {"
"font-size: 10pt;"
"font-family: Oswald;"
"background-image : url(" + button_right_path + ");"
"border:1px;"
"background-color:#CCDDFF;}"
"QPushButton::hover {background-color: #99CCFF;}")
button_right.setFixedSize(button_right_width, button_right_height)
header_left_width = 600
header_left_heigth = 30
header_left_path = Render.build_svg(
Header_left(header_left_width, header_left_heigth, "#111111", "header_left"))
header_left = QPushButton("")
header_left.setStyleSheet("QPushButton {"
"font-size: 10pt;"
"font-family: Oswald;"
"background-image : url(" + header_left_path + ");"
"border:1px;"
"background-color:#CCDDFF;}"
"QPushButton::hover {background-color: #99CCFF;}")
header_left.setFixedSize(header_left_width, header_left_heigth)
header_right_width = 600
header_right_heigth = 30
header_right_path = Render.build_svg(
Header_right(header_right_width, header_right_heigth, "#111111", "header_right"))
header_right = QPushButton("")
header_right.setStyleSheet("QPushButton {"
"font-size: 10pt;"
"font-family: Oswald;"
"background-image : url(" + header_right_path + ");"
"border:1px;"
"background-color:#CCDDFF;}"
"QPushButton::hover {background-color: #99CCFF;}")
header_right.setFixedSize(header_right_width, header_right_heigth)
grid = QVBoxLayout()
grid.addWidget(button_full)
grid.addWidget(button_left)
grid.addWidget(button_right)
grid.addWidget(header_left)
grid.addWidget(header_right)
window.central_widget = QWidget()
main_widget = QWidget()
main_widget.setLayout(grid)
window.setCentralWidget(main_widget)
button_full.clicked.connect(lambda: change_background(window))
window.show()
app.exec_()
|
def optfunc(arg1, arg2, arg3):
'''
函数语法定义
:param arg1:
:param arg2:
:param arg3:
:return:
'''
# return 'hello world'
return "hello world", True, False
|
import unittest
from typing import Dict, Optional
import sqlalchemy.engine
from magma.db_service.config import TestConfig
from magma.db_service.models import Base
from magma.db_service.session_manager import Session
from sqlalchemy import MetaData, create_engine
class DBTestCaseBlueprint(unittest.TestCase):
metadata: MetaData
engine: sqlalchemy.engine.Engine
session: Session
@classmethod
def drop_all(cls):
cls.metadata.drop_all()
@classmethod
def create_all(cls):
cls.metadata.create_all()
@classmethod
def setMetadata(cls, metadata: MetaData = Base.metadata):
cls.metadata = metadata
@classmethod
def setUpClass(cls) -> None:
cls.setMetadata(metadata=Base.metadata)
@classmethod
def set_up_db_test_case(cls, **kwargs: Optional[Dict]):
cls.engine = cls.get_test_db_engine(**kwargs)
cls.session = Session(bind=cls.engine)
cls.bind_engine()
@staticmethod
def get_test_db_engine(**kwargs) -> sqlalchemy.engine.Engine:
config = TestConfig()
return create_engine(
url=kwargs.get("SQLALCHEMY_DB_URI", config.SQLALCHEMY_DB_URI),
encoding=kwargs.get("SQLALCHEMY_DB_ENCODING", config.SQLALCHEMY_DB_ENCODING),
echo=False,
future=kwargs.get("SQLALCHEMY_FUTURE", config.SQLALCHEMY_FUTURE),
)
@classmethod
def bind_engine(cls):
cls.metadata.bind = cls.engine
@classmethod
def close_session(cls):
cls.session.rollback()
cls.session.close()
class BaseDBTestCase(DBTestCaseBlueprint):
def setUp(self):
self.set_up_db_test_case()
self.create_all()
def tearDown(self):
self.close_session()
self.drop_all()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import importlib
def add_logging_arguments(parser, default=logging.WARNING):
"""
Add options to configure logging level
:param parser:
:param default:
:return:
"""
parser.add_argument(
'--debug',
help="Set logging level to DEBUG",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=default,
)
parser.add_argument(
'-v', '--verbose',
help="Be verbose. Sets logging level to INFO",
action="store_const",
dest="loglevel",
const=logging.INFO
)
def configure_colored_logging(loglevel):
"""
Configure colored logging
:param loglevel:
:return:
"""
import coloredlogs
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles['asctime'] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles['debug'] = {}
coloredlogs.install(
level=loglevel,
use_chroot=False,
fmt='%(asctime)s %(levelname)-8s %(name)s - %(message)s',
level_styles=level_styles,
field_styles=field_styles)
def get_class_from_path(path):
"""
get class given path
:param path:
:return:
"""
path_list = path.split('.')
module = importlib.import_module('.'.join(path_list[0:-1]))
iclass = getattr(module, path_list[-1])
return iclass
def update_config(defaults, custom):
config = defaults if defaults else {}
if custom:
config.update(custom)
return config
|
# Copyright The OpenTelemetry Authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from utils import assertTest
pytest_plugins = ["pytester"]
common_code = """
import os
import time
import logging
import pytest
"""
def test_success_plugin(pytester, otel_service):
"""test a success test"""
pytester.makepyfile(
common_code
+ """
def test_success():
assert True
""")
assertTest(pytester, "test_success", "passed", "STATUS_CODE_OK", "passed", "STATUS_CODE_OK")
|
import numpy as np
import pandas as pd
from pycytominer.operations import sparse_random_projection
data_df = pd.DataFrame(
{
"Metadata_plate": ["a", "a", "a", "a", "b", "b", "b", "b"],
"Metadata_treatment": [
"drug",
"drug",
"control",
"control",
"drug",
"drug",
"control",
"control",
],
"Metadata_batch": [
"day1",
"day1",
"day1",
"day1",
"day1",
"day1",
"day1",
"day1",
],
"x": [1, 2, 8, 2, 5, 5, 5, 1],
"y": [3, 1, 7, 4, 5, 9, 6, 1],
"z": [1, 8, 2, 5, 6, 22, 2, 2],
"zz": [14, 46, 1, 6, 30, 100, 2, 2],
}
)
def test_sparse_random_projection():
"""
Testing the base covariance pycytominer function
"""
n_components = 2
cp_features = ["x", "y", "z"]
seed = 123
sparse_result = sparse_random_projection(
population_df=data_df,
variables=cp_features,
n_components=n_components,
seed=seed,
).round(2)
expected_result = pd.DataFrame(
{
0: [2.79, 1.86],
1: [0.93, -0.93],
2: [6.51, -0.93],
3: [3.72, 1.86],
4: [4.65, 0.00],
5: [8.38, 3.72],
6: [5.58, 0.93],
7: [0.93, 0.00],
}
).transpose()
expected_result.columns = ["sparse_comp_0", "sparse_comp_1"]
assert sparse_result.equals(expected_result)
def test_sparse_random_projection_allvar():
"""
Testing the base covariance pycytominer function
"""
n_components = 2
cp_features = "all"
seed = 123
input_data_df = data_df.loc[:, ["x", "y", "z", "zz"]]
sparse_result = sparse_random_projection(
population_df=input_data_df,
variables=cp_features,
n_components=n_components,
seed=seed,
).round(2)
expected_result = pd.DataFrame(
{
0: [16.0, -14.0],
1: [45.0, -40.0],
2: [0.0, -7.0],
3: [8.0, -3.0],
4: [30.0, -29.0],
5: [104.0, -83.0],
6: [3.0, -5.0],
7: [2.0, -1.0],
}
).transpose()
expected_result.columns = ["sparse_comp_0", "sparse_comp_1"]
assert sparse_result.equals(expected_result)
|
import sys
import versioneer
from setuptools import setup
from broomer import (__version__ as version,
__description__ as description)
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError as e:
long_description = open('README.md').read()
except OSError as e:
print(e)
sys.exit(1)
install_requires = ['zipa', 'maya', 'PyYAML', 'pystache']
tests_require = ['pytest', 'pytest-runner>=2.0,<3dev', 'pytest-flake8']
setup_requires = tests_require + ['pypandoc']
setup(
name='broomer',
version=version,
description=description,
author="Calin Don",
author_email="calin.don@gmail.com",
url="https://github.com/calind/broomer",
long_description=long_description,
cmdclass=versioneer.get_cmdclass(),
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'test': tests_require
},
entry_points={
'console_scripts': [
'broomer = broomer.cli:main'
]
},
license='BSD',
keywords='github issues broomer',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-07-12 07:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trend', '0010_auto_20190711_1434'),
]
operations = [
migrations.AddField(
model_name='product',
name='gender',
field=models.CharField(choices=[('Menswear', 'Menswear'), ('womenswear', 'womenswear'), ('Generalised', 'Generalised')], default='menswear', max_length=50),
preserve_default=False,
),
migrations.AlterField(
model_name='product',
name='category',
field=models.CharField(choices=[('Rocker looks', 'Rocker looks'), ('Tomboy Fashion', 'Tomboy Fashion'), ('Sophisticated Fashion', 'Sophisticated Fashion'), ('Artsy Fashion', 'Artsy Fashion'), ('Casual Fashion', 'Casual Fashion'), ('Vintage Fashion', 'Vintage Fashion')], default='No style', max_length=100),
),
]
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
# vase feed api:
from django.conf import Settings
from profilesproject import settings
class UserProfileManager(BaseUserManager):
"""manager for user profiles"""
def create_user(self,email,name,password=None):
"""create a new user profile"""
if not email:
raise ValueError('User must have email')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
"""creates and saves a new superuser with given details"""
user = self.create_user(email,name,password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
"""database model for users in the system"""
email=models.EmailField(max_length=255,unique=True)
name=models.CharField(max_length=255)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
# isf=models.BooleanField(default=False)
objects=UserProfileManager()
USERNAME_FIELD="email"
# name pain hamoon username balast ke dar vaghe email hast
REQUIRED_FIELDS=["name"]
def getfullname(self):
"""retrieve full name"""
return self.name
def getshortname(self):
'''retrieve shortname of user'''
return self.name
def __str__(self):
"""returns string representation of the user"""
return self.email
class profilefeeditem(models.Model):
"""profile status update"""
userprofile=models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
statustext=models.CharField(max_length=255)
createdon=models.DateTimeField(auto_now_add=True)
def __str__(self):
"""returns the model as a strign"""
return self.statustext
|
# -*- coding: utf-8 -*-
from typing import Any, Optional, Tuple
from .neg_cycle import negCycleFinder
Cut = Tuple[Any, float]
class network_oracle:
"""Oracle for Parametric Network Problem:
find x, u
s.t. u[j] − u[i] ≤ h(e, x)
∀ e(i, j) ∈ E
"""
def __init__(self, G, u, h):
"""[summary]
Arguments:
G: a directed graph (V, E)
u: list or dictionary
h: function evaluation and gradient
"""
self._G = G
self._u = u
self._h = h
self._S = negCycleFinder(G)
def update(self, t):
"""[summary]
Arguments:
t (float): the best-so-far optimal value
"""
self._h.update(t)
def __call__(self, x) -> Optional[Cut]:
"""Make object callable for cutting_plane_feas()
Arguments:
x ([type]): [description]
Returns:
Optional[Cut]: [description]
"""
def get_weight(e):
"""[summary]
Arguments:
e ([type]): [description]
Returns:
Any: [description]
"""
return self._h.eval(e, x)
for Ci in self._S.find_neg_cycle(self._u, get_weight):
f = -sum(self._h.eval(e, x) for e in Ci)
g = -sum(self._h.grad(e, x) for e in Ci)
return g, f
return None
|
import gym
class Engine:
def __init__(self, env_name, max_total_steps = 20000,
max_episodes = 3000,
max_steps_per_episode = 200):
self.env = gym.make(env_name)
self.max_total_steps = max_total_steps
self.max_episodes = max_episodes
self.max_steps_per_episode = max_steps_per_episode
self.render = False
def rollout(self, agent):
global_step = 0
for i_episode in range(self.max_episodes):
self.total_reward = 0
agent.episode_started()
observation = self.env.reset()
for t in range(self.max_steps_per_episode):
self.render_env()
observation, done = self.step_env(agent, observation)
global_step += 1
if done or global_step > self.max_total_steps:
break
agent.episode_ended()
print("{}. Episode {} finished after {} timesteps. Total reward: {}"
.format(global_step, i_episode + 1, t + 1, self.total_reward))
if global_step > self.max_total_steps:
break
self.env.close()
def step_env(self, agent, observation):
action = agent.select_action(observation)
# observation - an environment-specific object representing your observation of the environment
# reward - amount of reward achieved by the previous action
# done - whether it’s time to reset the environment again
# info - diagnostic information useful for debugging
observation, reward, done, info = self.env.step(action)
agent.register_reward(observation, reward, done)
self.total_reward += reward
return observation, done
def render_env(self):
if self.render:
env.render()
|
import unittest
class TestServer(unittest.TestCase):
def test_server(self: unittest.TestCase) -> None:
pass
if __name__ == '__main__':
unittest.main()
|
"""Basic tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from sympy import oo
from sympy.core import igcd
from sympy.polys.monomials import monomial_min, monomial_div
from sympy.polys.orderings import monomial_key
import random
def poly_LC(f, K):
"""
Return leading coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_LC
>>> poly_LC([], ZZ)
0
>>> poly_LC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
1
"""
if not f:
return K.zero
else:
return f[0]
def poly_TC(f, K):
"""
Return trailing coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_TC
>>> poly_TC([], ZZ)
0
>>> poly_TC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
3
"""
if not f:
return K.zero
else:
return f[-1]
dup_LC = dmp_LC = poly_LC
dup_TC = dmp_TC = poly_TC
def dmp_ground_LC(f, u, K):
"""
Return the ground leading coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_LC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_LC(f, 2, ZZ)
1
"""
while u:
f = dmp_LC(f, K)
u -= 1
return dup_LC(f, K)
def dmp_ground_TC(f, u, K):
"""
Return the ground trailing coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_TC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_TC(f, 2, ZZ)
3
"""
while u:
f = dmp_TC(f, K)
u -= 1
return dup_TC(f, K)
def dmp_true_LT(f, u, K):
"""
Return the leading term ``c * x_1**n_1 ... x_k**n_k``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_true_LT
>>> f = ZZ.map([[4], [2, 0], [3, 0, 0]])
>>> dmp_true_LT(f, 1, ZZ)
((2, 0), 4)
"""
monom = []
while u:
monom.append(len(f) - 1)
f, u = f[0], u - 1
if not f:
monom.append(0)
else:
monom.append(len(f) - 1)
return tuple(monom), dup_LC(f, K)
def dup_degree(f):
"""
Return the leading degree of ``f`` in ``K[x]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_degree
>>> f = ZZ.map([1, 2, 0, 3])
>>> dup_degree(f)
3
"""
if not f:
return -oo
return len(f) - 1
def dmp_degree(f, u):
"""
Return the leading degree of ``f`` in ``x_0`` in ``K[X]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree
>>> dmp_degree([[[]]], 2)
-oo
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree(f, 1)
1
"""
if dmp_zero_p(f, u):
return -oo
else:
return len(f) - 1
def _rec_degree_in(g, v, i, j):
"""Recursive helper function for :func:`dmp_degree_in`."""
if i == j:
return dmp_degree(g, v)
v, i = v - 1, i + 1
return max([ _rec_degree_in(c, v, i, j) for c in g ])
def dmp_degree_in(f, j, u):
"""
Return the leading degree of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_in
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree_in(f, 0, 1)
1
>>> dmp_degree_in(f, 1, 1)
2
"""
if not j:
return dmp_degree(f, u)
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_degree_in(f, u, 0, j)
def _rec_degree_list(g, v, i, degs):
"""Recursive helper for :func:`dmp_degree_list`."""
degs[i] = max(degs[i], dmp_degree(g, v))
if v > 0:
v, i = v - 1, i + 1
for c in g:
_rec_degree_list(c, v, i, degs)
def dmp_degree_list(f, u):
"""
Return a list of degrees of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_list
>>> f = ZZ.map([[1], [1, 2, 3]])
>>> dmp_degree_list(f, 1)
(1, 2)
"""
degs = [-oo]*(u + 1)
_rec_degree_list(f, u, 0, degs)
return tuple(degs)
def dup_strip(f):
"""
Remove leading zeros from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.densebasic import dup_strip
>>> dup_strip([0, 0, 1, 2, 3, 0])
[1, 2, 3, 0]
"""
if not f or f[0]:
return f
i = 0
for cf in f:
if cf:
break
else:
i += 1
return f[i:]
def dmp_strip(f, u):
"""
Remove leading zeros from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_strip
>>> dmp_strip([[], [0, 1, 2], [1]], 1)
[[0, 1, 2], [1]]
"""
if not u:
return dup_strip(f)
if dmp_zero_p(f, u):
return f
i, v = 0, u - 1
for c in f:
if not dmp_zero_p(c, v):
break
else:
i += 1
if i == len(f):
return dmp_zero(u)
else:
return f[i:]
def _rec_validate(f, g, i, K):
"""Recursive helper for :func:`dmp_validate`."""
if type(g) is not list:
if K is not None and not K.of_type(g):
raise TypeError("%s in %s in not of type %s" % (g, f, K.dtype))
return {i - 1}
elif not g:
return {i}
else:
levels = set()
for c in g:
levels |= _rec_validate(f, c, i + 1, K)
return levels
def _rec_strip(g, v):
"""Recursive helper for :func:`_rec_strip`."""
if not v:
return dup_strip(g)
w = v - 1
return dmp_strip([ _rec_strip(c, w) for c in g ], v)
def dmp_validate(f, K=None):
"""
Return the number of levels in ``f`` and recursively strip it.
Examples
========
>>> from sympy.polys.densebasic import dmp_validate
>>> dmp_validate([[], [0, 1, 2], [1]])
([[1, 2], [1]], 1)
>>> dmp_validate([[1], 1])
Traceback (most recent call last):
...
ValueError: invalid data structure for a multivariate polynomial
"""
levels = _rec_validate(f, f, 0, K)
u = levels.pop()
if not levels:
return _rec_strip(f, u), u
else:
raise ValueError(
"invalid data structure for a multivariate polynomial")
def dup_reverse(f):
"""
Compute ``x**n * f(1/x)``, i.e.: reverse ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_reverse
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_reverse(f)
[3, 2, 1]
"""
return dup_strip(list(reversed(f)))
def dup_copy(f):
"""
Create a new copy of a polynomial ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return list(f)
def dmp_copy(f, u):
"""
Create a new copy of a polynomial ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_copy
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_copy(f, 1)
[[1], [1, 2]]
"""
if not u:
return list(f)
v = u - 1
return [ dmp_copy(c, v) for c in f ]
def dup_to_tuple(f):
"""
Convert `f` into a tuple.
This is needed for hashing. This is similar to dup_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return tuple(f)
def dmp_to_tuple(f, u):
"""
Convert `f` into a nested tuple of tuples.
This is needed for hashing. This is similar to dmp_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_to_tuple
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_to_tuple(f, 1)
((1,), (1, 2))
"""
if not u:
return tuple(f)
v = u - 1
return tuple(dmp_to_tuple(c, v) for c in f)
def dup_normal(f, K):
"""
Normalize univariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_normal
>>> dup_normal([0, 1.5, 2, 3], ZZ)
[1, 2, 3]
"""
return dup_strip([ K.normal(c) for c in f ])
def dmp_normal(f, u, K):
"""
Normalize a multivariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_normal
>>> dmp_normal([[], [0, 1.5, 2]], 1, ZZ)
[[1, 2]]
"""
if not u:
return dup_normal(f, K)
v = u - 1
return dmp_strip([ dmp_normal(c, v, K) for c in f ], u)
def dup_convert(f, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_convert
>>> R, x = ring("x", ZZ)
>>> dup_convert([R(1), R(2)], R.to_domain(), ZZ)
[1, 2]
>>> dup_convert([ZZ(1), ZZ(2)], ZZ, R.to_domain())
[1, 2]
"""
if K0 is not None and K0 == K1:
return f
else:
return dup_strip([ K1.convert(c, K0) for c in f ])
def dmp_convert(f, u, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_convert
>>> R, x = ring("x", ZZ)
>>> dmp_convert([[R(1)], [R(2)]], 1, R.to_domain(), ZZ)
[[1], [2]]
>>> dmp_convert([[ZZ(1)], [ZZ(2)]], 1, ZZ, R.to_domain())
[[1], [2]]
"""
if not u:
return dup_convert(f, K0, K1)
if K0 is not None and K0 == K1:
return f
v = u - 1
return dmp_strip([ dmp_convert(c, v, K0, K1) for c in f ], u)
def dup_from_sympy(f, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_sympy
>>> dup_from_sympy([S(1), S(2)], ZZ) == [ZZ(1), ZZ(2)]
True
"""
return dup_strip([ K.from_sympy(c) for c in f ])
def dmp_from_sympy(f, u, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_sympy
>>> dmp_from_sympy([[S(1)], [S(2)]], 1, ZZ) == [[ZZ(1)], [ZZ(2)]]
True
"""
if not u:
return dup_from_sympy(f, K)
v = u - 1
return dmp_strip([ dmp_from_sympy(c, v, K) for c in f ], u)
def dup_nth(f, n, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_nth
>>> f = ZZ.map([1, 2, 3])
>>> dup_nth(f, 0, ZZ)
3
>>> dup_nth(f, 4, ZZ)
0
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
return f[dup_degree(f) - n]
def dmp_nth(f, n, u, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nth
>>> f = ZZ.map([[1], [2], [3]])
>>> dmp_nth(f, 0, 1, ZZ)
[3]
>>> dmp_nth(f, 4, 1, ZZ)
[]
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return dmp_zero(u - 1)
else:
return f[dmp_degree(f, u) - n]
def dmp_ground_nth(f, N, u, K):
"""
Return the ground ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_nth
>>> f = ZZ.map([[1], [2, 3]])
>>> dmp_ground_nth(f, (0, 1), 1, ZZ)
2
"""
v = u
for n in N:
if n < 0:
raise IndexError("`n` must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
d = dmp_degree(f, v)
if d == -oo:
d = -1
f, v = f[d - n], v - 1
return f
def dmp_zero_p(f, u):
"""
Return ``True`` if ``f`` is zero in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero_p
>>> dmp_zero_p([[[[[]]]]], 4)
True
>>> dmp_zero_p([[[[[1]]]]], 4)
False
"""
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
return not f
def dmp_zero(u):
"""
Return a multivariate zero.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero
>>> dmp_zero(4)
[[[[[]]]]]
"""
r = []
for i in range(u):
r = [r]
return r
def dmp_one_p(f, u, K):
"""
Return ``True`` if ``f`` is one in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one_p
>>> dmp_one_p([[[ZZ(1)]]], 2, ZZ)
True
"""
return dmp_ground_p(f, K.one, u)
def dmp_one(u, K):
"""
Return a multivariate one over ``K``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one
>>> dmp_one(2, ZZ)
[[[1]]]
"""
return dmp_ground(K.one, u)
def dmp_ground_p(f, c, u):
"""
Return True if ``f`` is constant in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground_p
>>> dmp_ground_p([[[3]]], 3, 2)
True
>>> dmp_ground_p([[[4]]], None, 2)
True
"""
if c is not None and not c:
return dmp_zero_p(f, u)
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
if c is None:
return len(f) <= 1
else:
return f == [c]
def dmp_ground(c, u):
"""
Return a multivariate constant.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground
>>> dmp_ground(3, 5)
[[[[[[3]]]]]]
>>> dmp_ground(1, -1)
1
"""
if not c:
return dmp_zero(u)
for i in range(u + 1):
c = [c]
return c
def dmp_zeros(n, u, K):
"""
Return a list of multivariate zeros.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_zeros
>>> dmp_zeros(3, 2, ZZ)
[[[[]]], [[[]]], [[[]]]]
>>> dmp_zeros(3, -1, ZZ)
[0, 0, 0]
"""
if not n:
return []
if u < 0:
return [K.zero]*n
else:
return [ dmp_zero(u) for i in range(n) ]
def dmp_grounds(c, n, u):
"""
Return a list of multivariate constants.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_grounds
>>> dmp_grounds(ZZ(4), 3, 2)
[[[[4]]], [[[4]]], [[[4]]]]
>>> dmp_grounds(ZZ(4), 3, -1)
[4, 4, 4]
"""
if not n:
return []
if u < 0:
return [c]*n
else:
return [ dmp_ground(c, u) for i in range(n) ]
def dmp_negative_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is negative.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_negative_p
>>> dmp_negative_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
False
>>> dmp_negative_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
True
"""
return K.is_negative(dmp_ground_LC(f, u, K))
def dmp_positive_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is positive.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_positive_p
>>> dmp_positive_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
True
>>> dmp_positive_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
False
"""
return K.is_positive(dmp_ground_LC(f, u, K))
def dup_from_dict(f, K):
"""
Create a ``K[x]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_dict
>>> dup_from_dict({(0,): ZZ(7), (2,): ZZ(5), (4,): ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
>>> dup_from_dict({}, ZZ)
[]
"""
if not f:
return []
n, h = max(f.keys()), []
if type(n) is int:
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
else:
(n,) = n
for k in range(n, -1, -1):
h.append(f.get((k,), K.zero))
return dup_strip(h)
def dup_from_raw_dict(f, K):
"""
Create a ``K[x]`` polynomial from a raw ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_raw_dict
>>> dup_from_raw_dict({0: ZZ(7), 2: ZZ(5), 4: ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
"""
if not f:
return []
n, h = max(f.keys()), []
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
return dup_strip(h)
def dmp_from_dict(f, u, K):
"""
Create a ``K[X]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_dict
>>> dmp_from_dict({(0, 0): ZZ(3), (0, 1): ZZ(2), (2, 1): ZZ(1)}, 1, ZZ)
[[1, 0], [], [2, 3]]
>>> dmp_from_dict({}, 0, ZZ)
[]
"""
if not u:
return dup_from_dict(f, K)
if not f:
return dmp_zero(u)
coeffs = {}
for monom, coeff in f.items():
head, tail = monom[0], monom[1:]
if head in coeffs:
coeffs[head][tail] = coeff
else:
coeffs[head] = { tail: coeff }
n, v, h = max(coeffs.keys()), u - 1, []
for k in range(n, -1, -1):
coeff = coeffs.get(k)
if coeff is not None:
h.append(dmp_from_dict(coeff, v, K))
else:
h.append(dmp_zero(v))
return dmp_strip(h, u)
def dup_to_dict(f, K=None, zero=False):
"""
Convert ``K[x]`` polynomial to a ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_dict
>>> dup_to_dict([1, 0, 5, 0, 7])
{(0,): 7, (2,): 5, (4,): 1}
>>> dup_to_dict([])
{}
"""
if not f and zero:
return {(0,): K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[(k,)] = f[n - k]
return result
def dup_to_raw_dict(f, K=None, zero=False):
"""
Convert a ``K[x]`` polynomial to a raw ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_raw_dict
>>> dup_to_raw_dict([1, 0, 5, 0, 7])
{0: 7, 2: 5, 4: 1}
"""
if not f and zero:
return {0: K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[k] = f[n - k]
return result
def dmp_to_dict(f, u, K=None, zero=False):
"""
Convert a ``K[X]`` polynomial to a ``dict````.
Examples
========
>>> from sympy.polys.densebasic import dmp_to_dict
>>> dmp_to_dict([[1, 0], [], [2, 3]], 1)
{(0, 0): 3, (0, 1): 2, (2, 1): 1}
>>> dmp_to_dict([], 0)
{}
"""
if not u:
return dup_to_dict(f, K, zero=zero)
if dmp_zero_p(f, u) and zero:
return {(0,)*(u + 1): K.zero}
n, v, result = dmp_degree(f, u), u - 1, {}
if n == -oo:
n = -1
for k in range(0, n + 1):
h = dmp_to_dict(f[n - k], v)
for exp, coeff in h.items():
result[(k,) + exp] = coeff
return result
def dmp_swap(f, i, j, u, K):
"""
Transform ``K[..x_i..x_j..]`` to ``K[..x_j..x_i..]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_swap
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_swap(f, 0, 1, 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_swap(f, 1, 2, 2, ZZ)
[[[1], [2, 0]], [[]]]
>>> dmp_swap(f, 0, 2, 2, ZZ)
[[[1, 0]], [[2, 0], []]]
"""
if i < 0 or j < 0 or i > u or j > u:
raise IndexError("0 <= i < j <= %s expected" % u)
elif i == j:
return f
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
H[exp[:i] + (exp[j],) +
exp[i + 1:j] +
(exp[i],) + exp[j + 1:]] = coeff
return dmp_from_dict(H, u, K)
def dmp_permute(f, P, u, K):
"""
Return a polynomial in ``K[x_{P(1)},..,x_{P(n)}]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_permute
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_permute(f, [1, 0, 2], 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_permute(f, [1, 2, 0], 2, ZZ)
[[[1], []], [[2, 0], []]]
"""
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
new_exp = [0]*len(exp)
for e, p in zip(exp, P):
new_exp[p] = e
H[tuple(new_exp)] = coeff
return dmp_from_dict(H, u, K)
def dmp_nest(f, l, K):
"""
Return a multivariate value nested ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nest
>>> dmp_nest([[ZZ(1)]], 2, ZZ)
[[[[1]]]]
"""
if not isinstance(f, list):
return dmp_ground(f, l)
for i in range(l):
f = [f]
return f
def dmp_raise(f, l, u, K):
"""
Return a multivariate polynomial raised ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_raise
>>> f = ZZ.map([[], [1, 2]])
>>> dmp_raise(f, 2, 1, ZZ)
[[[[]]], [[[1]], [[2]]]]
"""
if not l:
return f
if not u:
if not f:
return dmp_zero(l)
k = l - 1
return [ dmp_ground(c, k) for c in f ]
v = u - 1
return [ dmp_raise(c, l, v, K) for c in f ]
def dup_deflate(f, K):
"""
Map ``x**m`` to ``y`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_deflate
>>> f = ZZ.map([1, 0, 0, 1, 0, 0, 1])
>>> dup_deflate(f, ZZ)
(3, [1, 1, 1])
"""
if dup_degree(f) <= 0:
return 1, f
g = 0
for i in range(len(f)):
if not f[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, f
return g, f[::g]
def dmp_deflate(f, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> dmp_deflate(f, 1, ZZ)
((2, 3), [[1, 2], [3, 4]])
"""
if dmp_zero_p(f, u):
return (1,)*(u + 1), f
F = dmp_to_dict(f, u)
B = [0]*(u + 1)
for M in F.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, f
H = {}
for A, coeff in F.items():
N = [ a // b for a, b in zip(A, B) ]
H[tuple(N)] = coeff
return B, dmp_from_dict(H, u, K)
def dup_multi_deflate(polys, K):
"""
Map ``x**m`` to ``y`` in a set of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_multi_deflate
>>> f = ZZ.map([1, 0, 2, 0, 3])
>>> g = ZZ.map([4, 0, 0])
>>> dup_multi_deflate((f, g), ZZ)
(2, ([1, 2, 3], [4, 0]))
"""
G = 0
for p in polys:
if dup_degree(p) <= 0:
return 1, polys
g = 0
for i in range(len(p)):
if not p[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, polys
G = igcd(G, g)
return G, tuple([ p[::G] for p in polys ])
def dmp_multi_deflate(polys, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a set of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_multi_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> g = ZZ.map([[1, 0, 2], [], [3, 0, 4]])
>>> dmp_multi_deflate((f, g), 1, ZZ)
((2, 1), ([[1, 0, 0, 2], [3, 0, 0, 4]], [[1, 0, 2], [3, 0, 4]]))
"""
if not u:
M, H = dup_multi_deflate(polys, K)
return (M,), H
F, B = [], [0]*(u + 1)
for p in polys:
f = dmp_to_dict(p, u)
if not dmp_zero_p(p, u):
for M in f.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
F.append(f)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, polys
H = []
for f in F:
h = {}
for A, coeff in f.items():
N = [ a // b for a, b in zip(A, B) ]
h[tuple(N)] = coeff
H.append(dmp_from_dict(h, u, K))
return B, tuple(H)
def dup_inflate(f, m, K):
"""
Map ``y`` to ``x**m`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_inflate
>>> f = ZZ.map([1, 1, 1])
>>> dup_inflate(f, 3, ZZ)
[1, 0, 0, 1, 0, 0, 1]
"""
if m <= 0:
raise IndexError("'m' must be positive, got %s" % m)
if m == 1 or not f:
return f
result = [f[0]]
for coeff in f[1:]:
result.extend([K.zero]*(m - 1))
result.append(coeff)
return result
def _rec_inflate(g, M, v, i, K):
"""Recursive helper for :func:`dmp_inflate`."""
if not v:
return dup_inflate(g, M[i], K)
if M[i] <= 0:
raise IndexError("all M[i] must be positive, got %s" % M[i])
w, j = v - 1, i + 1
g = [ _rec_inflate(c, M, w, j, K) for c in g ]
result = [g[0]]
for coeff in g[1:]:
for _ in range(1, M[i]):
result.append(dmp_zero(w))
result.append(coeff)
return result
def dmp_inflate(f, M, u, K):
"""
Map ``y_i`` to ``x_i**k_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inflate
>>> f = ZZ.map([[1, 2], [3, 4]])
>>> dmp_inflate(f, (2, 3), 1, ZZ)
[[1, 0, 0, 2], [], [3, 0, 0, 4]]
"""
if not u:
return dup_inflate(f, M[0], K)
if all(m == 1 for m in M):
return f
else:
return _rec_inflate(f, M, u, 0, K)
def dmp_exclude(f, u, K):
"""
Exclude useless levels from ``f``.
Return the levels excluded, the new excluded ``f``, and the new ``u``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_exclude
>>> f = ZZ.map([[[1]], [[1], [2]]])
>>> dmp_exclude(f, 2, ZZ)
([2], [[1], [1, 2]], 1)
"""
if not u or dmp_ground_p(f, None, u):
return [], f, u
J, F = [], dmp_to_dict(f, u)
for j in range(0, u + 1):
for monom in F.keys():
if monom[j]:
break
else:
J.append(j)
if not J:
return [], f, u
f = {}
for monom, coeff in F.items():
monom = list(monom)
for j in reversed(J):
del monom[j]
f[tuple(monom)] = coeff
u -= len(J)
return J, dmp_from_dict(f, u, K), u
def dmp_include(f, J, u, K):
"""
Include useless levels in ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_include
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_include(f, [2], 1, ZZ)
[[[1]], [[1], [2]]]
"""
if not J:
return f
F, f = dmp_to_dict(f, u), {}
for monom, coeff in F.items():
monom = list(monom)
for j in J:
monom.insert(j, 0)
f[tuple(monom)] = coeff
u += len(J)
return dmp_from_dict(f, u, K)
def dmp_inject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X][Y]`` to ``K[X,Y]``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inject
>>> R, x,y = ring("x,y", ZZ)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain())
([[[1]], [[1], [2]]], 2)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain(), front=True)
([[[1]], [[1, 2]]], 2)
"""
f, h = dmp_to_dict(f, u), {}
v = K.ngens - 1
for f_monom, g in f.items():
g = g.to_dict()
for g_monom, c in g.items():
if front:
h[g_monom + f_monom] = c
else:
h[f_monom + g_monom] = c
w = u + v + 1
return dmp_from_dict(h, w, K.dom), w
def dmp_eject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X,Y]`` to ``K[X][Y]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_eject
>>> dmp_eject([[[1]], [[1], [2]]], 2, ZZ['x', 'y'])
[1, x + 2]
"""
f, h = dmp_to_dict(f, u), {}
n = K.ngens
v = u - K.ngens + 1
for monom, c in f.items():
if front:
g_monom, f_monom = monom[:n], monom[n:]
else:
g_monom, f_monom = monom[-n:], monom[:-n]
if f_monom in h:
h[f_monom][g_monom] = c
else:
h[f_monom] = {g_monom: c}
for monom, c in h.items():
h[monom] = K(c)
return dmp_from_dict(h, v - 1, K)
def dup_terms_gcd(f, K):
"""
Remove GCD of terms from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_terms_gcd
>>> f = ZZ.map([1, 0, 1, 0, 0])
>>> dup_terms_gcd(f, ZZ)
(2, [1, 0, 1])
"""
if dup_TC(f, K) or not f:
return 0, f
i = 0
for c in reversed(f):
if not c:
i += 1
else:
break
return i, f[:-i]
def dmp_terms_gcd(f, u, K):
"""
Remove GCD of terms from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_terms_gcd
>>> f = ZZ.map([[1, 0], [1, 0, 0], [], []])
>>> dmp_terms_gcd(f, 1, ZZ)
((2, 1), [[1], [1, 0]])
"""
if dmp_ground_TC(f, u, K) or dmp_zero_p(f, u):
return (0,)*(u + 1), f
F = dmp_to_dict(f, u)
G = monomial_min(*list(F.keys()))
if all(g == 0 for g in G):
return G, f
f = {}
for monom, coeff in F.items():
f[monomial_div(monom, G)] = coeff
return G, dmp_from_dict(f, u, K)
def _rec_list_terms(g, v, monom):
"""Recursive helper for :func:`dmp_list_terms`."""
d, terms = dmp_degree(g, v), []
if not v:
for i, c in enumerate(g):
if not c:
continue
terms.append((monom + (d - i,), c))
else:
w = v - 1
for i, c in enumerate(g):
terms.extend(_rec_list_terms(c, w, monom + (d - i,)))
return terms
def dmp_list_terms(f, u, K, order=None):
"""
List all non-zero terms from ``f`` in the given order ``order``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_list_terms
>>> f = ZZ.map([[1, 1], [2, 3]])
>>> dmp_list_terms(f, 1, ZZ)
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
>>> dmp_list_terms(f, 1, ZZ, order='grevlex')
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
"""
def sort(terms, O):
return sorted(terms, key=lambda term: O(term[0]), reverse=True)
terms = _rec_list_terms(f, u, ())
if not terms:
return [((0,)*(u + 1), K.zero)]
if order is None:
return terms
else:
return sort(terms, monomial_key(order))
def dup_apply_pairs(f, g, h, args, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dup_apply_pairs([1, 2, 3], [3, 2, 1], h, (1,), ZZ)
[4, 5, 6]
"""
n, m = len(f), len(g)
if n != m:
if n > m:
g = [K.zero]*(n - m) + g
else:
f = [K.zero]*(m - n) + f
result = []
for a, b in zip(f, g):
result.append(h(a, b, *args))
return dup_strip(result)
def dmp_apply_pairs(f, g, h, args, u, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dmp_apply_pairs([[1], [2, 3]], [[3], [2, 1]], h, (1,), 1, ZZ)
[[4], [5, 6]]
"""
if not u:
return dup_apply_pairs(f, g, h, args, K)
n, m, v = len(f), len(g), u - 1
if n != m:
if n > m:
g = dmp_zeros(n - m, v, K) + g
else:
f = dmp_zeros(m - n, v, K) + f
result = []
for a, b in zip(f, g):
result.append(dmp_apply_pairs(a, b, h, args, v, K))
return dmp_strip(result, u)
def dup_slice(f, m, n, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[x]``. """
k = len(f)
if k >= m:
M = k - m
else:
M = 0
if k >= n:
N = k - n
else:
N = 0
f = f[N:M]
if not f:
return []
else:
return f + [K.zero]*m
def dmp_slice(f, m, n, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[X]``. """
return dmp_slice_in(f, m, n, 0, u, K)
def dmp_slice_in(f, m, n, j, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``x_j`` in ``K[X]``. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not u:
return dup_slice(f, m, n, K)
f, g = dmp_to_dict(f, u), {}
for monom, coeff in f.items():
k = monom[j]
if k < m or k >= n:
monom = monom[:j] + (0,) + monom[j + 1:]
if monom in g:
g[monom] += coeff
else:
g[monom] = coeff
return dmp_from_dict(g, u, K)
def dup_random(n, a, b, K):
"""
Return a polynomial of degree ``n`` with coefficients in ``[a, b]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_random
>>> dup_random(3, -10, 10, ZZ) #doctest: +SKIP
[-2, -8, 9, -4]
"""
f = [ K.convert(random.randint(a, b)) for _ in range(0, n + 1) ]
while not f[0]:
f[0] = K.convert(random.randint(a, b))
return f
|
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2020 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>3</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_TAD_SetFanMaxOverrideAndReboot</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id> </primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>TADstub_Get</primitive_test_name>
<!-- -->
<primitive_test_version>3</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>check for the persistence of Device.Thermal.Fan. parameters after enabling Fan Max Override and reboot</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>30</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>Broadband</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_TAD_80</test_case_id>
<test_objective>This test case will check for the persistence of Device.Thermal.Fan. parameters after enabling MaxOveride and reboot</test_objective>
<test_type>Positive</test_type>
<test_setup>BroadBand</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script
3.The device should have a fan implementation</pre_requisite>
<api_or_interface_used>Mocastub_SetOnly
TADstub_Get</api_or_interface_used>
<input_parameters>Name of the Parameter
Type of the value to be get/set
parameter value to be set/get</input_parameters>
<automation_approch>1.Function which needs to be tested will be configured in Test Manager GUI.
2.Python Script will be generated by Test Manager with provided arguments in configure page.
3.TM will load the TAD library via Test agent
4.Enable the Device.Thermal.Fan.MaxOverride
5.Initiate a reboot operation
6.Get the values of Parameter under Device.Thermal.Fan.
7.Device.Thermal.Fan.Status should be disbled and Device.Thermal.Fan.Speed should be zero ,Device.Thermal.Fan.RotorLock to be Not_Applicable.
8.Revert the value back by disabling the Device.Thermal.Fan.MaxOverride
9.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from TAD stub.
10.unload the loaded modules</automation_approch>
<expected_output>The parameters under Device.Thermal.Fan. value should not be persistent on reboot</expected_output>
<priority>High</priority>
<test_stub_interface>TAD</test_stub_interface>
<test_script>TS_TAD_SetFanMaxOverrideAndReboot</test_script>
<skipped>No</skipped>
<release_version>M77</release_version>
<remarks>None</remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
import tdkutility;
from tdkutility import *
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("tad","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_TAD_SetFanMaxOverrideAndReboot');
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus;
if "SUCCESS" in loadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj.createTestStep('TADstub_SetOnly');
tdkTestObj.addParameter("ParamName","Device.Thermal.Fan.MaxOverride");
tdkTestObj.addParameter("ParamValue","true");
tdkTestObj.addParameter("Type","bool");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details= tdkTestObj.getResultDetails();
if expectedresult in actualresult:
print "TEST STEP 1: Set the Fan Max Override to true"
print "EXPECTED RESULT 1: Should set the Fan Max Override to true"
print "ACTUAL RESULT 1 : Set was Successfull"
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.setResultStatus("SUCCESS");
sleep(5);
tdkTestObj = obj.createTestStep('TADstub_Get');
paramList=["Device.Thermal.Fan.Status","Device.Thermal.Fan.Speed","Device.Thermal.Fan.RotorLock","Device.Thermal.Fan.MaxOverride"]
tdkTestObj,status,orgValue = getMultipleParameterValues(obj,paramList)
if expectedresult in status:
print "TEST STEP 2: Get the parameters under Device.Thermal.Fan."
print "EXPECTED RESULT 2:Should get the parameters under Device.Thermal.Fan."
print "ACTUAL RESULT 2: Get was successful"
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.setResultStatus("SUCCESS");
#rebooting the device
obj.initiateReboot();
sleep(300);
paramList=["Device.Thermal.Fan.Status","Device.Thermal.Fan.Speed","Device.Thermal.Fan.RotorLock","Device.Thermal.Fan.MaxOverride"]
tdkTestObj,status,afterrebootValue = getMultipleParameterValues(obj,paramList)
if expectedresult in status:
print "TEST STEP 3: Get the parameters under Device.Thermal.Fan after reboot"
print "EXPECTED RESULT 3:Should get the parameters under Device.Thermal.Fan after reboot"
print "ACTUAL RESULT 3: Get was successful"
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.setResultStatus("SUCCESS");
print "*************************************************************************************************************************"
print "Checking the Fan's Speed ,Status,Rotor lock,MaxOverride statuse's after setting the Fan Max Override to true and reboot"
print "*************************************************************************************************************************"
print "orgValue:",orgValue
print "afterrebootValue :",afterrebootValue
if orgValue[0]!= afterrebootValue[0]:
print "TEST STEP 4: Check for Fan Status"
print "EXPECTED RESULT 4: Fan status should be false"
print "ACTUAL RESULT 4: Fan status is ",afterrebootValue[0]
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.setResultStatus("SUCCESS");
if int(orgValue[1]) != afterrebootValue[1] :
print "TEST STEP 5: Check for Fan Speed"
print "EXPECTED RESULT 5: Fan speed should be equal zero"
print "ACTUAL RESULT 5: Fan speed is ",afterrebootValue[1]
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.setResultStatus("SUCCESS");
if (orgValue[2]) != afterrebootValue[2]:
print "TEST STEP 6: Check for Rotor Lock "
print "EXPECTED RESULT 6: Fan Rotor lock should be Not_Applicable"
print "ACTUAL RESULT 6: Fan rotor lock is ",afterrebootValue[2]
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.setResultStatus("SUCCESS");
if (orgValue[3]) == "false":
print "TEST STEP 7: Check for MaxOverride speed"
print "EXPECTED RESULT 7: Fan MaxOverride speed should be false"
print "ACTUAL RESULT 7: Fan MaxOverride speed is ",afterrebootValue[3]
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.setResultStatus("SUCCESS");
else:
print "TEST STEP 7: Check for MaxOverride speed"
print "EXPECTED RESULT 7: Fan MaxOverride speed should be false"
print "ACTUAL RESULT 7: Fan MaxOverride speed is ",afterrebootValue[3]
print "[TEST EXECUTION RESULT] : FAILURE";
tdkTestObj.setResultStatus("FAILURE");
else:
print "TEST STEP 6: Check for Rotor Lock "
print "EXPECTED RESULT 6: Fan Rotor lock should be Not_Applicable"
print "ACTUAL RESULT 6: Fan rotor lock is",afterrebootValue[2]
print "[TEST EXECUTION RESULT] : FAILURE";
tdkTestObj.setResultStatus("FAILURE");
else:
print "TEST STEP 5: Check for Fan Speed"
print "EXPECTED RESULT 5: Fan speed should be equal to zero"
print "ACTUAL RESULT 5: Fan speed is ",afterrebootValue[1]
print "[TEST EXECUTION RESULT] : FAILURE";
tdkTestObj.setResultStatus("FAILURE");
else:
print "TEST STEP 4: Check for Fan Status"
print "EXPECTED RESULT 4: Fan status should be true"
print "ACTUAL RESULT 4: Fan status is ",afterrebootValue[0]
print "[TEST EXECUTION RESULT] : FAILURE";
tdkTestObj.setResultStatus("FAILURE");
else:
print "TEST STEP 3: Get the parameters under Device.Thermal.Fan."
print "EXPECTED RESULT 3:Should get the parameters under Device.Thermal.Fan."
print "ACTUAL RESULT 3: Get Failed"
print "[TEST EXECUTION RESULT] : FAILURE";
tdkTestObj.setResultStatus("FAILURE");
print "Reverting the Fan max override to false to disable the Fan";
#Revert the value
tdkTestObj = obj.createTestStep('TADstub_SetOnly');
tdkTestObj.addParameter("ParamName","Device.Thermal.Fan.MaxOverride");
tdkTestObj.addParameter("ParamValue","false");
tdkTestObj.addParameter("Type","bool");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details= tdkTestObj.getResultDetails();
if expectedresult in actualresult:
print "TEST STEP 7 : Revert the Fan Max Override to previous"
print "EXPECTED RESULT 7: Should set the Fan Max Override to false"
print "ACTUAL RESULT 7 : Set was Successfull"
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj.setResultStatus("SUCCESS");
else:
print "TEST STEP 7 : Revert the Fan Max Override to previous"
print "EXPECTED RESULT 7: Should set the Fan Max Override to false"
print "ACTUAL RESULT 7 : Set was failed"
print "[TEST EXECUTION RESULT] : FAILURE";
tdkTestObj.setResultStatus("FAILURE");
else:
print "TEST STEP 1: Set the Fan Max Override to true"
print "EXPECTED RESULT 1: Should set the Fan Max Override to true"
print "ACTUAL RESULT 1 : Set failed"
print "[TEST EXECUTION RESULT] : FAILURE";
tdkTestObj.setResultStatus("FAILURE");
obj.unloadModule("tad");
else:
print "Failed to load tad module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
|
from pathlib import Path
import sys
from tqdm import tqdm
if len(sys.argv) < 2 or (sys.argv[1] != '0' and sys.argv[1] != '1'):
print('usage: python create_symlinks.py (0 | 1)')
sys.exit()
if sys.argv[1] == '0':
path = '../CodeInquisitor/PATTERN_ANALYSIS/blackbox_time_series'
else:
path = '../CodeInquisitor/PATTERN_ANALYSIS/blackbox_time_series_Jan_July_2019_w_timestamps'
with open('BLACKBOX_INPUT/input.txt', 'w') as input_f:
pathlist = Path(path).glob('*/*')
for path in tqdm(list(pathlist)):
path_in_str = str(path) + '/events'
try:
with open(path_in_str, 'r') as f:
content = f.read().split(',')
# skipping sessions where less than 10 events happen
if len(content) < 10:
continue
# aggregate all consecutive edits into one
aggregated_content = []
ON_REPETITIVE_EVENT = False
for event in content:
if event in ('22', '19', '20', '8', '9', '47') and not ON_REPETITIVE_EVENT:
ON_REPETITIVE_EVENT = True
aggregated_content.append(event)
elif event in ('22', '19', '20', '8', '9', '47') and ON_REPETITIVE_EVENT:
pass
else:
ON_REPETITIVE_EVENT = False
aggregated_content.append(event)
events = ' '.join(aggregated_content)
input_f.write(events)
input_f.write('\n')
except FileNotFoundError:
pass
|
#!/usr/bin/env python
import os
import sys
import globalvar
sys.path.append("./")
os.chdir(globalvar.Runloc)
def monitor(appName):
pid = os.fork()
if 0 == pid: # child process
os.system(appName)
sys.exit(0)
else: # parent process
os.wait()
if __name__ == '__main__' :
while 1:
monitor('./client_s.py -c 108.170.4.20 -l 10.1.104.2/24')
|
muster = open("muster.txt", "r")
output = open("output.txt", "r")
mustervals = []
outputvals = []
for line in muster:
a,b,val = line.split(" ")
mustervals.append(int(val))
for line in output:
outputvals.append(int(line))
print len(mustervals)
print len(outputvals)
length = min(len(mustervals), len(outputvals))
for i in range(length):
if mustervals[i] != outputvals[i]:
print i
print "muster: " + str(mustervals[i])
print "output: " + str(outputvals[i])
|
#!/usr/bin/env python3
"""Command-line wrapper for simpleLabels.cli_labelComands."""
import loadPath # Adds the project path.
import linkograph.simpleLabels
linkograph.simpleLabels.cli_labelCommands()
|
"""
setup.py - setuptools configuration for esc
"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="lblsolve",
version="0.1.0",
author="Soren I. Bjornstad",
author_email="contact@sorenbjornstad.com",
description="solitaire solver for La Belle Lucie",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sobjornstad/lblsolve",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": [
"lbl = lblsolve.cards:main"
],
},
python_requires='>=3.7',
)
|
#
#
#
##
from __future__ import print_function, unicode_literals
import inspect
import os
import socket
import pprint as pp
import time
import pickle
from workflow import *
from manager import *
DEBUG = 0
class Pipeline( object ):
""" The main pipeline class that the user will interact with """
def __init__(self):
""" Create a pipeline """
self.project_name = "CCBG"
self.queue_name = ""
self.project = ""
# For housekeeping to see how long the processing took
self._start_time = None
self._end_time = None
# when was the run information last saved
self._last_save = None
# How often to save, in secs
self.save_interval = 300
self.max_retry = 3
self._failed_steps = 0 # failed jobs that cannot be restarted.
self.sleep_time = 30
self.max_sleep_time = 300
self.sleep_start = self.sleep_time
self.sleep_increase = 30
if ( 0 ):
self.sleep_time = 3
self.max_sleep_time = 3
self.sleep_start = self.sleep_time
self.sleep_increase = 1
# to control that we do not flood the hpc with jobs, or if local block server machine.
# -1 is no limit
self.max_jobs = -1
self._use_storing = 1 # debugging purposes
self._freeze_file = None
self._delete_files = []
self._cwd = os.getcwd()
# Setup helper classes, step manager tracks the steps in the
# pipeline and the job-manager the running of actual executation
# of steps as jobs
self._workflow = Workflow()
self._manager = Manager(pipeline=self)
self._step_name = None
self._thread_id = None
self._pid = os.getpid()
self._hostname = socket.gethostname()
# generic ge
def __getitem__(self, item):
""" generic geter function, variable starting with _ are ignored as are private """
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" generic setter function, variable starting with _ are ignored as are private """
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
def backend(self, backend):
self._manager.backend = backend
def backend_name(self):
print("Backend name: {}".format(self._manager.backend.name()))
def start_step(self, function, name=None):
return self._workflow.start_step(function, name)
# Generic step adder, wrapped in the few functions below it
def add_step( self, prev_step, function, name=None, step_type=None):
return self._workflow.add_step(prev_step, function, name, step_type)
# Simple wrapper functions for the generic add_step function.
def next_step(self, prev_step, function, name=None):
return self._workflow.add_step( prev_step, function, name);
def global_merge_step(self, prev_step, function, name=None):
return self._workflow.add_step( prev_step, function, name);
def thread_merge_step(self, prev_step, function, name=None):
return self._workflow.add_step( prev_step, function, name);
def print_workflow(self, starts=None):
self._workflow.print_flow( starts )
def system_job(self, cmd, output=None, delete_file=None):
return self.submit_job(cmd, output=output, limit=None, delete_file=delete_file, system_call=True)
def submit_job(self, cmd, output=None, limit=None, delete_file=None, system_call=False):
self._manager.submit_job( cmd, self._step_name, output, limit, delete_file, thread_id=self._thread_id, system_call=system_call )
def _sleep(self, active_jobs=None):
""" sleeps the loop, if there are no active jobs (eg running or started) increase sleep time
Args:
active_jobs (int): any active jobs, resets the sleep time
Returns:
none
"""
if active_jobs is not None and active_jobs > 0:
self._sleep_time = self.sleep_start
elif ( self.max_sleep_time < self.sleep_time):
self.sleep_time += self.sleep_increase
time.sleep( self.sleep_time)
def store_state(self, filename=None):
""" stores the pipeline and all assosiated information in a binary python file (picle)
Args:
filename (str): default is project_name.PID
Returns:
None
"""
if filename is None:
filename = "{}.{}".format(self.project_name, self._pid)
output = open(filename, 'wb')
pickle.dump(self, output, -1)
output.close()
def restore_state(self, filename ):
""" restores a pipeline state and assosiated information from a save file
Args:
filename (str):
Returns:
Pipeline (obj)
"""
pipeline = pickle.load( open( filename, "rb" ) )
def report( self ):
""" prints the manager report
"""
self._manager.report();
def run( self, starts=None, args=None ):
""" Run the tasks and track everything
Args:
Start tasks (list of str): default it pull from the workflow
args (list of str): input(s) for the start(s)
Returns:
Nr of failed jobs (int)
"""
# if no start states selected, pull from the workflow, if
# steps have been provided translate names to states.
if starts is None:
starts = self._workflow.start_steps()
else:
starts = self._workflow.steps_by_name( starts )
# Kick off the start jobs before starting to spend some quality tom in the main loop...
for start in starts:
self._step_name = start.name
start.function( args )
while ( True ):
# print( "Pulling job ...")
# Pull the satus on all jobs, and return the active ones. Active being non-finished
active_jobs = self._manager.active_jobs();
started_jobs = 0
queued_jobs = 0
running_jobs = 0
for job in active_jobs:
# print( "Checking in on job {}/{}".format( job.step_name, job.status ))
self._step_name = job.step_name
# self._thread_id = job.thread_id
if job.status == Job_status.FINISHED:
job.active = False
next_steps = self._workflow.next_steps( job.step_name )
# Nothing after this step, looppon to the next job
if next_steps is None:
continue
for next_step in next_steps:
# print( "Next step is {}".format( next_step.name))
# The next step is either a global sync or a
# thread sync, so things are slightly
# complicated and we need to check the states
# of a ton of jobs
if next_step.step_type == 'sync' or next_step.step_type == 'thread_sync':
# print("Sync step ")
# A global sync is equal to thread_id being 0 (top level)
# Threading is really not tobe working for this version.
# if ( next_step.step_type == 'sync' ):
# self._active_thread_id = 0
# else:
# self._active_thread_id = next_step.thread_id
# Check if the next step is depending on
# something running or queuing
step_depends_on = self._workflow.get_step_dependencies( next_step )
# print("Dependencies {}".format(step_depends_on))
if self._manager.waiting_for_job( step_depends_on ):
# print( "step is waiting for something...")
continue
if self._manager.failed_dependency_jobs( step_depends_on):
break
# print( "kicking of next step...")
self._step_name = next_step.name
if next_step.step_type == 'sync' or next_step.step_type == 'thread_sync':
job_outputs = self._manager.job_outputs( next_step.name )
else:
job_outputs = job.output
next_step.function( job_outputs )
started_jobs += 1
elif job.status == Job_status.QUEUEING:
queued_jobs += 1
elif job.status == Job_status.RUNNING:
running_jobs += 1
elif job.status == Job_status.FAILED:
if job.nr_of_tries < self.max_retry:
self._manager.resubmit_job( job )
started_jobs += 1
else:
job.active = False
job.status = Job_status.NO_RESTART
self._failed_steps += 1
elif job.status == Job_status.FAILED or job.status == Job_status.KILLED:
job.active = False
if (started_jobs + running_jobs + queued_jobs == 0):
break
self._manager.report()
self._sleep( started_jobs + running_jobs )
self._manager.report();
print("The pipeline finished with {} job(s) failing\n".format(self._failed_steps));
return self._failed_steps
|
# NOTE dataloader needs to be implemented
# Please use special/kinetics_video_unimodal.py for now
import os
import sys
import torch
sys.path.append(os.getcwd())
from unimodals.common_models import ResNetLSTMEnc, MLP
from datasets.kinetics.get_data import get_dataloader
from training_structures.unimodal import train, test
modalnum = 0
traindata, validdata, testdata = get_dataloader(sys.argv[1])
encoder = ResNetLSTMEnc(64).cuda()
head = MLP(64, 200, 5).cuda()
train(encoder, head, traindata, validdata, 20, optimtype=torch.optim.SGD,
lr=0.01, weight_decay=0.0001, modalnum=modalnum)
print("Testing:")
encoder = torch.load('encoder.pt').cuda()
head = torch.load('head.pt')
test(encoder, head, testdata, modalnum=modalnum)
|
"""Utility methods for schedule parsing."""
from datetime import time
from typing import List
from .scheduletypes import ScheduleEntry, ScheduleEvent
def sort_schedule_events(events: List[ScheduleEvent]) -> List[ScheduleEvent]:
"""Sort events into time order."""
return sorted(events, key=lambda e: e[0])
def daily_schedule(schedule: List[ScheduleEntry], day: int) \
-> List[ScheduleEvent]:
"""Return a single list of events on the given day."""
events = [event for entry in schedule if day in entry[0]
for event in entry[1]]
return sort_schedule_events(events)
def events_after(events: List[ScheduleEvent], after: time) \
-> List[ScheduleEvent]:
"""Return events strictly after the given time."""
return [event for event in events if event[0] > after]
def events_until(events: List[ScheduleEvent],
until: time, *, after: time = None) \
-> List[ScheduleEvent]:
"""
Return events up to and including the given time.
Keyword arguments:
after -- if specified, only events after this time will be included.
"""
if after is not None:
events = events_after(events, after)
return [event for event in events if event[0] <= until]
|
"""Test connection of containment relationship."""
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.diagram.tests.fixtures import allow, connect, disconnect
from gaphor.UML.classes import ClassItem, PackageItem
from gaphor.UML.classes.containment import ContainmentItem
def test_containment_package_glue(create):
"""Test containment glue to two package items."""
pkg1 = create(PackageItem, UML.Package)
containment = create(ContainmentItem)
assert allow(containment, containment.head, pkg1)
def test_containment_package_glue_connected_on_one_end(create):
"""Test containment glue to two package items."""
pkg1 = create(PackageItem, UML.Package)
pkg2 = create(PackageItem, UML.Package)
containment = create(ContainmentItem)
connect(containment, containment.head, pkg1)
assert allow(containment, containment.tail, pkg2)
assert not allow(containment, containment.tail, pkg1)
def test_containment_can_not_create_cycles(create, diagram, element_factory):
"""Test containment connecting to a package and a class."""
package = create(PackageItem, UML.Package)
klass = create(ClassItem, UML.Class)
klass.subject.package = package.subject
line = create(ContainmentItem)
connect(line, line.head, klass)
assert not allow(line, line.tail, package)
def test_containment_package_class(create, diagram):
"""Test containment connecting to a package and a class."""
package = create(PackageItem, UML.Package)
line = create(ContainmentItem)
klass = create(ClassItem, UML.Class)
connect(line, line.head, package)
connect(line, line.tail, klass)
assert diagram.connections.get_connection(line.tail).connected is klass
assert len(package.subject.ownedElement) == 1
assert klass.subject in package.subject.ownedElement
def test_containment_package_class_disconnect(create, diagram, element_factory):
"""Test containment disconnecting from a package and a class."""
parent_package = element_factory.create(UML.Package)
diagram.element = parent_package
package = create(PackageItem, UML.Package)
line = create(ContainmentItem)
klass = create(ClassItem, UML.Class)
connect(line, line.tail, klass)
connect(line, line.head, package)
disconnect(line, line.head)
assert klass.subject in parent_package.ownedElement
def test_containment_class_class(create, diagram, element_factory):
"""Test containment connecting to a package and a class."""
parent_package = element_factory.create(UML.Package)
container = create(ClassItem, UML.Class)
container.subject.package = parent_package
line = create(ContainmentItem)
klass = create(ClassItem, UML.Class)
klass.subject.package = parent_package
connect(line, line.head, container)
connect(line, line.tail, klass)
assert diagram.connections.get_connection(line.tail).connected is klass
assert len(container.subject.ownedElement) == 1
assert klass.subject.owner is container.subject
assert klass.subject in container.subject.ownedElement
def test_containment_class_class_disconnect(create, diagram, element_factory):
"""Test containment connecting to a package and a class."""
parent_package = element_factory.create(UML.Package)
diagram.element = parent_package
container = create(ClassItem, UML.Class)
line = create(ContainmentItem)
klass = create(ClassItem, UML.Class)
connect(line, line.head, container)
connect(line, line.tail, klass)
disconnect(line, line.head)
assert klass.subject.owner is parent_package
assert klass.subject in parent_package.ownedElement
def test_containment_reconnect_in_new_diagram(create, element_factory):
# Most recently created containment relation wins.
rel = create(ContainmentItem)
c1 = create(ClassItem, UML.Class)
c2 = create(ClassItem, UML.Class)
connect(rel, rel.head, c1)
connect(rel, rel.tail, c2)
# Now do the same on a new diagram:
diagram2 = element_factory.create(Diagram)
c3 = diagram2.create(ClassItem, subject=c1.subject)
c4 = diagram2.create(ClassItem, subject=c2.subject)
rel2 = diagram2.create(ContainmentItem)
connect(rel2, rel2.head, c3)
connect(rel2, rel2.tail, c4)
c5 = diagram2.create(ClassItem, subject=element_factory.create(UML.Class))
connect(rel2, rel2.head, c5)
assert c2.subject.owner is c5.subject
assert c4.subject.owner is c5.subject
|
import json
import requests
import settings
from geotext import GeoText
with open("txt/tom.txt", "r") as f1:
tom = f1.read()
with open("txt/huck.txt", "r") as f2:
huck = f2.read()
with open("txt/life-on-the-mississippi.txt", "r") as f3:
life = f3.read()
with open("txt/roughing-it.txt", "r") as f4:
rough = f4.read()
tom_places = GeoText(tom, "US")
huck_places = GeoText(huck, "US")
life_places = GeoText(life, "US")
rough_places = GeoText(rough, "US")
all_places = (
tom_places.cities + huck_places.cities + life_places.cities + rough_places.cities
)
past = ""
filtered_places = []
for place in all_places:
if place != past:
filtered_places.append(place)
past = place
pairs = []
for idx, place in enumerate(filtered_places[1:]):
start = filtered_places[idx]
end = filtered_places[idx + 1]
start = start.replace(" ", "+")
end = end.replace(" ", "+")
pairs.append((start, end))
for idx, pair in enumerate(pairs):
resp = requests.get(
"https://maps.googleapis.com/maps/api/directions/json?origin="
+ pair[0]
+ "+USA&destination="
+ pair[1]
+ "+USA&key="
+ settings.goog
)
with open("data/leg-" + str(idx) + ".txt", "w") as filename:
json.dump(resp.json(), filename)
|
import tensorflow as tf
from .basic_ops import *
"""This script defines 3D different multi-head attention layers.
"""
def multihead_attention_3d(inputs, total_key_filters, total_value_filters,
output_filters, num_heads, training, layer_type='SAME',
name=None):
"""3d Multihead scaled-dot-product attention with input/output transformations.
Args:
inputs: a Tensor with shape [batch, d, h, w, channels]
total_key_filters: an integer. Note that queries have the same number
of channels as keys
total_value_filters: an integer
output_depth: an integer
num_heads: an integer dividing total_key_filters and total_value_filters
layer_type: a string, type of this layer -- SAME, DOWN, UP
name: an optional string
Returns:
A Tensor of shape [batch, _d, _h, _w, output_filters]
Raises:
ValueError: if the total_key_filters or total_value_filters are not divisible
by the number of attention heads.
"""
if total_key_filters % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_filters, num_heads))
if total_value_filters % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_filters, num_heads))
if layer_type not in ['SAME', 'DOWN', 'UP']:
raise ValueError("Layer type (%s) must be one of SAME, "
"DOWN, UP." % (layer_type))
with tf.variable_scope(
name,
default_name="multihead_attention_3d",
values=[inputs]):
# produce q, k, v
q, k, v = compute_qkv_3d(inputs, total_key_filters,
total_value_filters, layer_type)
# after splitting, shape is [batch, heads, d, h, w, channels / heads]
q = split_heads_3d(q, num_heads)
k = split_heads_3d(k, num_heads)
v = split_heads_3d(v, num_heads)
# normalize
key_filters_per_head = total_key_filters // num_heads
q *= key_filters_per_head**-0.5
# attention
x = global_attention_3d(q, k, v, training)
x = combine_heads_3d(x)
x = Conv3D(x, output_filters, 1, 1, use_bias=True)
return x
def compute_qkv_3d(inputs, total_key_filters, total_value_filters, layer_type):
"""Computes query, key and value.
Args:
inputs: a Tensor with shape [batch, d, h, w, channels]
total_key_filters: an integer
total_value_filters: and integer
layer_type: String, type of this layer -- SAME, DOWN, UP
Returns:
q: [batch, _d, _h, _w, total_key_filters] tensor
k: [batch, h, w, total_key_filters] tensor
v: [batch, h, w, total_value_filters] tensor
"""
# linear transformation for q
if layer_type == 'SAME':
q = Conv3D(inputs, total_key_filters, 1, 1, use_bias=True)
elif layer_type == 'DOWN':
q = Conv3D(inputs, total_key_filters, 3, 2, use_bias=True)
elif layer_type == 'UP':
q = Deconv3D(inputs, total_key_filters, 3, 2, use_bias=True)
# linear transformation for k
k = Conv3D(inputs, total_key_filters, 1, 1, use_bias=True)
# linear transformation for k
v = Conv3D(inputs, total_value_filters, 1, 1, use_bias=True)
return q, k, v
def split_heads_3d(x, num_heads):
"""Split channels (last dimension) into multiple heads (becomes dimension 1).
Args:
x: a Tensor with shape [batch, d, h, w, channels]
num_heads: an integer
Returns:
a Tensor with shape [batch, num_heads, d, h, w, channels / num_heads]
"""
return tf.transpose(split_last_dimension(x, num_heads), [0, 4, 1, 2, 3, 5])
def split_last_dimension(x, n):
"""Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
"""
old_shape = x.get_shape().dims
last = old_shape[-1]
new_shape = old_shape[:-1] + [n] + [last // n if last else None]
ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))
ret.set_shape(new_shape)
return ret
def global_attention_3d(q, k, v, training, name=None):
"""global self-attention.
Args:
q: a Tensor with shape [batch, heads, _d, _h, _w, channels_k]
k: a Tensor with shape [batch, heads, d, h, w, channels_k]
v: a Tensor with shape [batch, heads, d, h, w, channels_v]
name: an optional string
Returns:
a Tensor of shape [batch, heads, _d, _h, _w, channels_v]
"""
with tf.variable_scope(
name,
default_name="global_attention_3d",
values=[q, k, v]):
new_shape = tf.concat([tf.shape(q)[0:-1], [v.shape[-1].value]], 0)
# flatten q,k,v
q_new = flatten_3d(q)
k_new = flatten_3d(k)
v_new = flatten_3d(v)
# attention
output = dot_product_attention(q_new, k_new, v_new, bias=None,
training=training, dropout_rate=0.5, name="global_3d")
# putting the representations back in the right place
output = scatter_3d(output, new_shape)
return output
def reshape_range(tensor, i, j, shape):
"""Reshapes a tensor between dimensions i and j."""
target_shape = tf.concat(
[tf.shape(tensor)[:i], shape, tf.shape(tensor)[j:]],
axis=0)
return tf.reshape(tensor, target_shape)
def flatten_3d(x):
"""flatten x."""
x_shape = tf.shape(x)
# [batch, heads, length, channels], length = d*h*w
x = reshape_range(x, 2, 5, [tf.reduce_prod(x_shape[2:5])])
return x
def scatter_3d(x, shape):
"""scatter x."""
x = tf.reshape(x, shape)
return x
def dot_product_attention(q, k, v, bias, training, dropout_rate=0.0, name=None):
"""Dot-product attention.
Args:
q: a Tensor with shape [batch, heads, length_q, channels_k]
k: a Tensor with shape [batch, heads, length_kv, channels_k]
v: a Tensor with shape [batch, heads, length_kv, channels_v]
bias: bias Tensor
dropout_rate: a floating point number
name: an optional string
Returns:
A Tensor with shape [batch, heads, length_q, channels_v]
"""
with tf.variable_scope(
name,
default_name="dot_product_attention",
values=[q, k, v]):
# [batch, num_heads, length_q, length_kv]
logits = tf.matmul(q, k, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# dropping out the attention links for each of the heads
weights = tf.layers.dropout(weights, dropout_rate, training)
return tf.matmul(weights, v)
def combine_heads_3d(x):
"""Inverse of split_heads_3d.
Args:
x: a Tensor with shape [batch, num_heads, d, h, w, channels / num_heads]
Returns:
a Tensor with shape [batch, d, h, w, channels]
"""
return combine_last_two_dimensions(tf.transpose(x, [0, 2, 3, 4, 1, 5]))
def combine_last_two_dimensions(x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., a*b]
"""
old_shape = x.get_shape().dims
a, b = old_shape[-2:]
new_shape = old_shape[:-2] + [a * b if a and b else None]
ret = tf.reshape(x, tf.concat([tf.shape(x)[:-2], [-1]], 0))
ret.set_shape(new_shape)
return ret
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QBrush, QColor
from PyQt5.QtWidgets import QTableWidgetItem, QHeaderView
from views.py.askLepWidgetQt import Ui_askLepWidgetFrame
class AskLepWidget(Ui_askLepWidgetFrame):
def __init__(self, master, view, expression):
self.view = view
self.expression = expression
self.setupUi(master)
self.setExpressionInfo(self.expression)
def setExpressionInfo(self, expression):
if expression is not None:
# Set Expression
self.logicalExpressionLabel.setText(expression.getDisplayString())
self.logicalExpressionLabel.adjustSize()
# Set DNF
self.dnfFormTitleLabel.setText(f"DNF: {expression.getDNF()}")
self.dnfFormTitleLabel.adjustSize()
# Set Satisfiability
self.satisfiabilityLabel.setText(f"Satisfiable: {expression.getSatisfiable()}")
self.satisfiabilityLabel.adjustSize()
# Set Validity
self.validityLabel.setText(f"Valid: {expression.getValid()}")
self.validityLabel.adjustSize()
truthTable = expression.getTruthTable()
expResults = expression.getSimpleTable()
columnInfo = truthTable[0]
expressionSplit = list(columnInfo[-1])
self.truthTableWidget.setRowCount(len(expResults) + 1)
self.truthTableWidget.setColumnCount(len(columnInfo) + len(expressionSplit) - 1)
# Setting the headings
for columnVarInd in range(len(columnInfo)):
self.truthTableWidget.setItem(0, columnVarInd, QTableWidgetItem(columnInfo[columnVarInd]))
self.truthTableWidget.item(0, columnVarInd).setForeground(QtGui.QColor(255, 255, 242))
self.truthTableWidget.item(0, columnVarInd).setBackground(QtGui.QColor(25, 36, 33))
for columnExpInd in range(len(expressionSplit)):
self.truthTableWidget.setItem(0, columnVarInd + columnExpInd, QTableWidgetItem(expressionSplit[columnExpInd]))
self.truthTableWidget.item(0, columnVarInd + columnExpInd).setForeground(QtGui.QColor(255, 255, 242))
self.truthTableWidget.item(0, columnVarInd + columnExpInd).setBackground(QtGui.QColor(25, 36, 33))
# Populate the table
for expResultsInd in range(len(expResults)):
# Filling in the variable inputs
if len(columnInfo) == 1:
for expVarInpInd in range(len(columnInfo)):
if truthTable[expResultsInd + 1][expVarInpInd][0] == True:
stringRes = "T"
self.truthTableWidget.setItem(expResultsInd + 1, expVarInpInd, QTableWidgetItem(stringRes))
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd).setForeground(QtGui.QColor(204,54,20))
else:
stringRes = "F"
self.truthTableWidget.setItem(expResultsInd + 1, expVarInpInd, QTableWidgetItem(stringRes))
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd).setForeground(QtGui.QColor(204,54,20))
else:
for expVarInpInd in range(len(columnInfo) - 1):
if truthTable[expResultsInd + 1][expVarInpInd][0] == True:
stringRes = "T"
self.truthTableWidget.setItem(expResultsInd + 1, expVarInpInd, QTableWidgetItem(stringRes))
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd).setForeground(QtGui.QColor(23,161,145))
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd).setBackground(QtGui.QColor(25, 36, 33))
else:
stringRes = "F"
self.truthTableWidget.setItem(expResultsInd + 1, expVarInpInd, QTableWidgetItem(stringRes))
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd).setForeground(QtGui.QColor(204,54,20))
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd).setBackground(QtGui.QColor(25, 36, 33))
# Filling in the expression outputs
outRes = truthTable[expResultsInd + 1][-1]
for outResInd in range(len(outRes)):
if outRes[outResInd] == True:
outResString = "T"
self.truthTableWidget.setItem(expResultsInd + 1, expVarInpInd + outResInd + 1, QTableWidgetItem(outResString))
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd + outResInd + 1).setForeground(QtGui.QColor(23,161,145))
elif outRes[outResInd] == False:
outResString = "F"
self.truthTableWidget.setItem(expResultsInd + 1, expVarInpInd + outResInd + 1, QTableWidgetItem(outResString))
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd + outResInd + 1).setForeground(QtGui.QColor(204,54,20))
else:
outResString = ""
self.truthTableWidget.setItem(expResultsInd + 1, expVarInpInd + outResInd + 1, QTableWidgetItem(outResString))
# Highlighting the result column
if outResInd == expression.getTableFinalColumn():
self.truthTableWidget.item(expResultsInd + 1, expVarInpInd + outResInd + 1).setBackground(QtGui.QColor(255,237,102))
# Resize the cells to make table look better
self.truthTableWidget.horizontalHeader().setMinimumSectionSize(0)
self.truthTableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
# Hiding the column and row headers
self.truthTableWidget.verticalHeader().setVisible(False)
self.truthTableWidget.horizontalHeader().setVisible(False)
# Removed the empty columns
for remIndex in range(self.truthTableWidget.columnCount(), 0, -1):
if self.truthTableWidget.item(0, remIndex - 1).text() == " ":
self.truthTableWidget.removeColumn(remIndex - 1)
|
#!/usr/bin/env python
class Queue:
def __init__(self):
self.items = []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def isEmpty(self):
return self.items == []
def size(self):
return len(self.items)
if __name__=='__main__':
q=Queue()
q.enqueue(3)
q.enqueue(5)
q.enqueue(8)
print 'size: ', q.size()
print 'dequeue: ', q.dequeue()
print 'isEmpty: ', q.isEmpty()
|
import numpy as np
import mindspore
from mindspore import context, ops, Tensor, nn
from mindspore.common.parameter import Parameter, ParameterTuple
import copy
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
_update_op = ops.MultitypeFuncGraph("update_op")
@_update_op.register("Tensor", "Tensor")
def _parameter_update(policy_param, target_param):
assign = ops.Assign()
output = assign(target_param, policy_param)
return output
class DQN(nn.Cell):
neuron_nums = 16
def __init__(self, n_features, n_actions):
super(DQN, self).__init__()
self.net = nn.SequentialCell(
nn.Dense(n_features, self.neuron_nums),
nn.ReLU(),
nn.Dense(self.neuron_nums, n_actions),
)
def construct(self, s):
return self.net(s)
class PolicyNetWithLossCell(nn.Cell):
"""DQN policy network with loss cell"""
def __init__(self, backbone, loss_fn):
super(PolicyNetWithLossCell,
self).__init__(auto_prefix=False)
self._backbone = backbone
self._loss_fn = loss_fn
self.gather = ops.GatherD()
def construct(self, x, a0, label):
"""constructor for Loss Cell"""
out = self._backbone(x)
out = self.gather(out, 1, a0)
loss = self._loss_fn(out, label)
return loss
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=3,
e_greedy_increment=None,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2))
self.eval_net = DQN(self.n_features, self.n_actions)
self.target_net = copy.deepcopy(self.eval_net)
self.policy_param = ParameterTuple(
self.eval_net.get_parameters())
self.target_param = ParameterTuple(
self.target_net.get_parameters())
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
loss_func = nn.MSELoss()
opt = nn.Adam(self.eval_net.trainable_params(), learning_rate=self.lr)
loss_q_net = PolicyNetWithLossCell(self.eval_net, loss_func)
self.policy_network_train = nn.TrainOneStepCell(loss_q_net, opt)
self.policy_network_train.set_train(mode=True)
self.hyper_map = ops.HyperMap()
self.cost_his = []
def store_transition(self, transition):
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def reset_epsilon(self, epsilon):
self.epsilon = epsilon
def choose_action(self, observation):
observation = Tensor(observation[np.newaxis, :], mindspore.float32)
if np.random.uniform() < self.epsilon:
self.eval_net.set_train(mode=False)
action_v = self.eval_net(observation)
action = np.argmax(action_v)
else:
action = np.random.randint(0, self.n_actions)
return action
def update_param(self):
assign_result = self.hyper_map(
_update_op,
self.policy_param,
self.target_param
)
return assign_result
def learn(self):
if self.learn_step_counter % self.replace_target_iter == 0:
self.update_param()
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size, replace=False)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size, replace=False)
batch_memory = Tensor(self.memory[sample_index, :], mindspore.float32)
b_s = batch_memory[:, :self.n_features]
b_a = ops.ExpandDims()(batch_memory[:, self.n_features], 1).astype(mindspore.int32)
b_r = ops.ExpandDims()(batch_memory[:, self.n_features + 1], 1)
b_s_ = batch_memory[:, -self.n_features:]
q_next = self.target_net(b_s_).max(axis=1)
q_target = b_r + self.gamma * q_next
loss = self.policy_network_train(b_s, b_a, q_target)
self.cost_his.append(round(float(np.mean(loss.asnumpy())), 3))
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
return loss
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show()
|
from django.db import models
from ckeditor.fields import RichTextField
# Create your models here.
class MainPageStatisticNumber(models.Model):
number = models.IntegerField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
class Meta:
verbose_name_plural = "Физический факультет в цифрах"
class KeyPublications(models.Model):
title = models.TextField(null=True, blank=True)
authors = models.TextField(null=True, blank=True)
journal= models.TextField(null=True, blank=True)
publicationUrl = models.URLField(max_length=250)
image = models.ImageField(upload_to="media/keypublications", null=True, blank=True)
class Meta:
verbose_name_plural = "Основные публикации"
class FamousGraduates(models.Model):
name = models.TextField(null=True, blank=True)
shortDescription = models.TextField(null=True, blank=True)
photo = models.ImageField(upload_to="media/famousGraduates", null=True, blank=True)
briefBioInfo = RichTextField(null=True, blank=True)
typeOfProfActivity = RichTextField(null=True, blank=True)
periodOfStudy = models.TextField(null=True, blank=True)
facultyAndSpeciality = models.TextField(null=True, blank=True)
professionalAchievements = RichTextField(null=True, blank=True)
slug = models.SlugField(max_length=250, unique=True)
class Meta:
verbose_name_plural = "Известные выпускники"
def admin_photo(self):
return mark_safe('<img src="{}" width="100" />'.format(self.image.url))
admin_photo.short_description = "Афиша"
admin_photo.allow_tags = True
|
#!/usr/bin/python
# part of https://github.com/WolfgangFahl/play-chess-with-a-webcam
from pathlib import Path
import os
import getpass
import socket
class Environment:
""" Runtime Environment """
debugImagePath="/tmp/pcwawc/"
def __init__(self):
""" get the directory in which the testMedia resides """
self.scriptPath = Path(__file__).parent
self.projectPath = self.scriptPath.parent
self.testMediaPath = Path(self.projectPath, 'testMedia')
self.testMedia = str(self.testMediaPath.absolute()) + "/"
self.games = str(self.projectPath) + "/games"
@staticmethod
def checkDir(path):
#print (path)
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s " % path)
@staticmethod
def inContinuousIntegration():
'''
are we in a Continuous Integration Environment?
'''
publicCI=getpass.getuser() in [ "travis", "runner" ];
privateCI="capri.bitplan.com"==socket.getfqdn()
return publicCI or privateCI
|
import pytest
import magma as m
def test_array_partial_unwired():
class Foo(m.Circuit):
io = m.IO(A=m.Out(m.Bits[2]))
io.A[0] @= 1
with pytest.raises(Exception) as e:
m.compile("build/Foo", Foo)
assert str(e.value) == """\
Found unconnected port: Foo.A
Foo.A
Foo.A[0]: Connected
Foo.A[1]: Unconnected\
"""
def test_array_partial_unwired_nested():
class Foo(m.Circuit):
io = m.IO(A=m.Out(m.Array[2, m.Bits[2]]))
io.A[0] @= 1
with pytest.raises(Exception) as e:
m.compile("build/Foo", Foo)
assert str(e.value) == """\
Found unconnected port: Foo.A
Foo.A
Foo.A[0]: Connected
Foo.A[1]: Unconnected\
"""
def test_array_partial_unwired_nested2():
class Foo(m.Circuit):
io = m.IO(A=m.Out(m.Array[2, m.Bits[2]]))
io.A[0] @= 1
io.A[1][0] @= 1
with pytest.raises(Exception) as e:
m.compile("build/Foo", Foo)
assert str(e.value) == """\
Found unconnected port: Foo.A
Foo.A
Foo.A[0]: Connected
Foo.A[1]
Foo.A[1][0]: Connected
Foo.A[1][1]: Unconnected\
"""
@pytest.mark.parametrize("_slice", [slice(1, 4), slice(3, 4), slice(-1, -4),
slice(-3, -4)])
def test_invalid_slice(_slice):
class Foo(m.Circuit):
io = m.IO(A=m.Out(m.Array[2, m.Bits[2]]))
with pytest.raises(IndexError) as e:
io.A[_slice]
assert str(e.value) == ("array index out of range "
f"(type=Array[2, In(Bits[2])], key={_slice})")
|
/home/runner/.cache/pip/pool/b8/3c/25/da29a843bef53d2645fcb22fa7cb6ae50c3b7d5408fea6d1078ceefdf5
|
import warnings
from pathlib import Path
from typing import Any, Dict, List, Optional
from gobbli.augment.base import BaseAugment
from gobbli.docker import maybe_mount, run_container
from gobbli.model.base import BaseModel
from gobbli.model.context import ContainerTaskContext
from gobbli.util import assert_type, escape_line_delimited_texts
class MarianMT(BaseModel, BaseAugment):
"""
Backtranslation-based data augmenter using the Marian Neural Machine Translation
model. Translates English text to one of several languages and back to obtain
similar texts for training.
"""
_BUILD_PATH = Path(__file__).parent
_INPUT_FILE = "input.txt"
_OUTPUT_FILE = "output.txt"
_CONTAINER_CACHE_DIR = Path("/cache")
# Hardcoded list based on the languages available in various models
# Ignore models that translate multiple languages because they have
# a different API (texts require a >>lang_code<< prelude describing
# which language to translate to)
# https://huggingface.co/models?search=opus-mt-en&sort=alphabetical
# The below mapping was reconstructed manually using various resources
# including ISO language codes, Google Translate auto-detect,
# and the JWS docs from Opus: http://opus.nlpl.eu/JW300.php (see languages.json)
LANGUAGE_CODE_MAPPING = {
"afrikaans": "af",
"central-bikol": "bcl",
"bemba": "bem",
"berber": "ber",
"bulgarian": "bg",
"bislama": "bi",
# BZS stands for Brazilian Sign Language, but the text
# looks like Portugese, and there's no other model for Portugese
"portugese": "bzs",
"catalan": "ca",
"cebuano": "ceb",
"chuukese": "chk",
"seychelles-creole": "crs",
"czech": "cs",
"welsh": "cy",
"danish": "da",
"german": "de",
"ewe": "ee",
"efik": "efi",
"esperanto": "eo",
"estonian": "et",
"basque": "eu",
"finnish": "fi",
"fijian": "fj",
"french": "fr",
"irish": "ga",
"ga": "gaa",
"gilbertese": "gil",
"galician": "gl",
"gun": "guw",
"manx": "gv",
"hausa": "ha",
"hiligaynon": "hil",
"hiri-motu": "ho",
"haitian": "ht",
"hungarian": "hu",
"indonesian": "id",
"igbo": "ig",
"iloko": "ilo",
"icelandic": "is",
"isoko": "iso",
"italian": "it",
"japanese": "jap",
"kongo": "kg",
"kuanyama": "kj",
"kikaonde": "kqn",
"kwangali": "kwn",
"kikongo": "kwy",
"luganda": "lg",
"lingala": "ln",
"silozi": "loz",
"kiluba": "lu",
"tshiluba": "lua",
"luvale": "lue",
"lunda": "lun",
"luo": "luo",
"mizo": "lus",
"mauritian-creole": "mfe",
"malagasy": "mg",
"marshallese": "mh",
"macedonian": "mk",
"malayalam": "ml",
"moore": "mos",
"marathi": "mr",
"maltese": "mt",
"ndonga": "ng",
"niuean": "niu",
"dutch": "nl",
"sepedi": "nso",
"chichewa": "ny",
"nyaneka": "nyk",
"oromo": "om",
"pangasinan": "pag",
"papiamento": "pap",
"solomon-islands-pidgin": "pis",
"ponapean": "pon",
"uruund": "rnd",
"russian": "ru",
"kirundi": "run",
"kinyarwanda": "rw",
"sango": "sg",
"slovak": "sk",
"samoan": "sm",
"shona": "sn",
"albanian": "sq",
"swati": "ss",
"sesotho-lesotho": "st",
"swedish": "sv",
"swahili-congo": "swc",
"tigrinya": "ti",
"tiv": "tiv",
"tagalog": "tl",
"otetela": "tll",
"setswana": "tn",
"tongan": "to",
"chitonga": "toi",
"tok-pisin": "tpi",
"tsonga": "ts",
"tuvaluan": "tvl",
"ukrainian": "uk",
"umbundu": "umb",
"xhosa": "xh",
}
def init(self, params: Dict[str, Any]):
"""
See :meth:`gobbli.model.base.BaseModel.init`.
MarianMT parameters:
- ``batch_size``: Number of documents to run through the Marian model at once.
- ``target_languages``: List of target languages to translate texts to and back.
See :attr:`MarianMT.ALL_TARGET_LANGUAGES` for a full list of possible values. You may
only augment texts up to the number of languages specified, since each language
will be used at most once. So if you want to augment 5 times, you need to specify
at least 5 languages when initializing the model.
"""
self.batch_size = 32
# Current default - top 5 lanugages in Wikipedia which are also available
# in the list of target languages
# https://en.wikipedia.org/wiki/List_of_Wikipedias#List
self.target_languages = ["french", "german", "japanese", "russian", "italian"]
for name, value in params.items():
if name == "batch_size":
assert_type(name, value, int)
if value < 1:
raise ValueError("batch_size must be >= 1")
self.batch_size = value
elif name == "target_languages":
assert_type(name, value, list)
for target in value:
if target not in MarianMT.LANGUAGE_CODE_MAPPING:
raise ValueError(
f"invalid target language '{target}'. Valid values are "
f"{list(MarianMT.LANGUAGE_CODE_MAPPING.keys())}"
)
self.target_languages = value
else:
raise ValueError(f"Unknown param '{name}'")
@property
def image_tag(self) -> str:
"""
Returns:
The Docker image tag to be used for the Marian container.
"""
return f"gobbli-marian-nmt"
@classmethod
def marian_model(cls, language: str) -> str:
"""
Returns:
Name of the Marian MT model to use to translate English
to the passed language.
"""
return f"Helsinki-NLP/opus-mt-en-{cls.LANGUAGE_CODE_MAPPING[language]}"
@classmethod
def marian_inverse_model(cls, language: str) -> str:
"""
Returns:
Name of the Marian MT model to use to translate the passed language
back to English.
"""
return f"Helsinki-NLP/opus-mt-{cls.LANGUAGE_CODE_MAPPING[language]}-en"
def _build(self):
self.docker_client.images.build(
path=str(MarianMT._BUILD_PATH),
tag=self.image_tag,
**self._base_docker_build_kwargs,
)
def _write_input(self, X: List[str], context: ContainerTaskContext):
"""
Write the user input to a file for the container to read.
"""
input_path = context.host_input_dir / MarianMT._INPUT_FILE
input_path.write_text(escape_line_delimited_texts(X))
def _read_output(self, context: ContainerTaskContext) -> List[str]:
"""
Read generated text output to a file from the container.
"""
output_path = context.host_output_dir / MarianMT._OUTPUT_FILE
return output_path.read_text().split("\n")
@property
def host_cache_dir(self):
"""
Directory to be used for downloaded transformers files.
Should be the same across all instances of the class, since these are
generally static model weights/config files that can be reused.
"""
cache_dir = MarianMT.model_class_dir() / "cache"
cache_dir.mkdir(exist_ok=True, parents=True)
return cache_dir
def augment(
self, X: List[str], times: Optional[int] = None, p: float = None
) -> List[str]:
if times is None:
times = len(self.target_languages)
if times > len(self.target_languages):
raise ValueError(
"MarianMT was asked to augment {len(times)} times but was only initialized with "
"{len(self.target_languages)} target languages. You must specify at least as "
"many target languages as the number of times you'd like to augment."
)
if p is not None:
warnings.warn(
"MarianMT doesn't replace text at the token level, so the 'p' parameter "
"will be ignored."
)
context = ContainerTaskContext(self.data_dir())
self._write_input(X, context)
# Determine which device to use for augmentation
device = "cpu"
if self.use_gpu:
if self.nvidia_visible_devices == "all":
device = "cuda"
else:
device_num = self.nvidia_visible_devices.split(",")[0]
device = f"cuda:{device_num}"
augmented_texts = []
for i in range(times):
language = self.target_languages[i]
cmd = (
"python3 backtranslate_text.py"
f" {context.container_input_dir / MarianMT._INPUT_FILE}"
f" {context.container_output_dir / MarianMT._OUTPUT_FILE}"
f" --batch-size {self.batch_size}"
f" --cache-dir {MarianMT._CONTAINER_CACHE_DIR}"
f" --device {device}"
f" --marian-model {MarianMT.marian_model(language)}"
f" --marian-inverse-model {MarianMT.marian_inverse_model(language)}"
)
run_kwargs = self._base_docker_run_kwargs(context)
maybe_mount(
run_kwargs["volumes"],
self.host_cache_dir,
MarianMT._CONTAINER_CACHE_DIR,
)
run_container(
self.docker_client, self.image_tag, cmd, self.logger, **run_kwargs
)
augmented_texts.extend(self._read_output(context))
return augmented_texts
|
# Approximation using a Greedy Algorithm
import math
def distance(point1, point2):
dx = point2[0] - point1[0]
dy = point2[1] - point1[1]
return math.hypot(dx, dy)
unvisited = set()
total_dist = 0
current_index = 0
points = []
n = int(input())
for i in range(n):
point = tuple(map(int, input().split())) # x, y
points.append(point)
unvisited.add(i)
while (len(unvisited) != 0):
min_index = 0
min_dist = math.inf
current_point = points[current_index]
for i in unvisited:
if (i != 0):
dist = distance(current_point, points[i])
if (dist < min_dist):
min_dist = dist
min_index = i
if (min_index == 0):
min_dist = distance(current_point, points[0])
total_dist += min_dist
unvisited.remove(min_index)
current_index = min_index
print(round(total_dist))
|
# Programa que lê nome, sexo e idade de várias pessoas, guardando os dado de cada pessoa em um
# dicionário e todos os dicionários em uma lista. No final, mostra:
# Quantas pessoas foram cadastradas; A média de idade; Uma lista com as mulheres; Uma lista de pessoas com idade acima
# da média.
cadastro = []
pessoa = {}
somaidade = mediaidade = 0
while True:
pessoa.clear()
pessoa['nome'] = str(input('Nome: '))
while True:
pessoa['sexo'] = str(input('Sexo: [M/F] ')).upper()[0]
if pessoa['sexo'] in 'MF':
break
print('Inválido! Digite apenas M ou F.')
# end-while
pessoa['idade'] = int(input('Idade: '))
somaidade = somaidade + pessoa['idade']
cadastro.append(pessoa.copy)
continua = str(input('Deseja cadastrar mais? [N para Não.]: ')).upper()[0]
if continua == 'N':
break
# end-if
# end-while
print(f'Foram cadastradas {len(cadastro)} pessoas.')
mediaidade = somaidade / len(cadastro)
print(f'A média de idade é {mediaidade:5.2f} anos.')
print('Lista das mulheres cadastradas:')
for cad in cadastro:
if cad['sexo'] == 'F':
print(f'{cad["nome"]}')
# end-if
# end-for
print()
print('Lista das pessoas acima da média de idade:')
for cad in cadastro:
if cad['idade'] > mediaidade:
print(f'{cad["nome"]}')
# end-if
# end-for
|
'''
13-loc and iloc [1]
With loc and iloc you can do practically any data selection operation on DataFrames
you can think of. loc is label-based, which means that you have to specify rows and
columns based on their row and column labels. iloc is integer index based, so you have to specify rows and columns by their integer index like you did in the previous exercise.
Try out the following commands in the IPython Shell to experiment with loc and iloc
to select observations. Each pair of commands here gives the same result.
cars.loc['RU']
cars.iloc[4]
cars.loc[['RU']]
cars.iloc[[4]]
cars.loc[['RU', 'AUS']]
cars.iloc[[4, 1]]
As before, code is included that imports the cars data as a Pandas DataFrame.
Instructions:
- Use loc or iloc to select the observation corresponding to Japan as a Series.
The label of this row is JAP, the index is 2. Make sure to print the resulting Series.
- Use loc or iloc to select the observations for Australia and Egypt as a DataFrame.
You can find out about the labels/indexes of these rows by inspecting cars in the
IPython Shell. Make sure to print the resulting DataFrame.
'''
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out observation for Japan
print(cars.loc['JPN'])
# Print out observations for Australia and Egypt
print(cars.loc[['AUS', 'EG']])
|
import lucs_tools
from time import sleep
class skyscanner(lucs_tools.internet.internet_base_util):
BASE_LINK = 'https://www.skyscanner.com/transport/flights-from/FROM_AIRPORT/DEPART_DATE/RETURN_DATE/?adults=1&children=0&adultsv2=1&childrenv2=&infants=0&cabinclass=economy&rtn=1&preferdirects=false&outboundaltsenabled=ALTS_ENABLED&inboundaltsenabled=ALTS_ENABLED&ref=home&previousCultureSource=GEO_LOCATION&redirectedFrom=www.skyscanner.com&locale=en-GB¤cy=USD&market=US&_mp=16a8d030756ea-03e7f4b2ae4014-e323069-190140-16a8d0307570_1557144041864&_ga=2.238286656.1539299884.1557143947-922572072.1557143947'
@staticmethod
def main(
airport_code_1,
airport_code_2,
depart_date,
return_date,
alts_enabled=False,
*args,
**kwargs,
):
util = skyscanner()
locations = (airport_code_1, airport_code_2)
dates = [depart_date.split('/' if '/' in depart_date else '.'), return_date.split('/' if '/' in depart_date else '.')]
dates = [''.join([date[-1]] + date[0:-1]) for date in dates]
for location in locations:
link = util.get_formatted_link(
airport_code=location,
dates=dates,
alts_enabled=alts_enabled,
)
util.open_link(link)
return util
def get_formatted_link(
self,
airport_code,
dates,
alts_enabled,
):
link = self.BASE_LINK
link = link.replace('ALTS_ENABLED', 'true' if alts_enabled else 'false')
link = link.replace('FROM_AIRPORT', airport_code.lower())
link = link.replace('DEPART_DATE', dates[0])
link = link.replace('RETURN_DATE', dates[1])
return link
# elt = self.get_elements_with_param_matching_spec(
# 'class_name',
# 'BpkInput_bpk-input__XAfK8',
# )[0]
# elt.click()
# elt = self.driver.switch_to_active_element()
# elt.send_keys('{}\t\t'.format(searchkey))
# elt = self.driver.switch_to_active_element()
# elt.send_keys()
def grab_data(
self,
):
browse_elts = True
elts = self.get_elements_with_param_matching_spec('class_name', 'browse_data_route')
# def inputdates(
# self,
# dar
# )
class google(lucs_tools.internet.internet_base_util):
@staticmethod
def main(
place1,
place2,
date1,
date2,
):
texts = []
util = google()
for place in [place1, place2]:
# so verbose it comments itself ;?0
util.openlink()
util.inputsearch(place)
util.inputdates(date1, date2)
util.clicksearch()
# grab data
texts.append(util.scraperesults())
return texts
def openlink(
self
):
self.open_link('https://www.google.com/flights?hl=en')
def inputsearch(
self,
searchkey
):
elt = self.get_elements_with_param_matching_spec('class_name', 'flt-input')[0]
elt.click()
elt = self.driver.switch_to_active_element()
sleep(0.4)
elt = self.driver.switch_to_active_element()
elt.send_keys('{}'.format(searchkey))
sleep(1)
elt = self.driver.switch_to_active_element()
elt.send_keys('\n')
sleep(1)
def inputdates(
self,
date1,
date2,
):
#print(i, datekey)
date = self.get_elements_with_param_matching_spec('class_name', 'gws-flights-form__date-content')[0]
date.click()
sleep(0.5)
date = self.driver.switch_to_active_element()
sleep(0.5)
date.send_keys('{}\t{}'.format(date1, date2))
sleep(0.5)
elt = self.get_element_with_param_matching_spec('class_name', 'eE8hUfzg9Na__button')
elt.click()
def clicksearch(
self,
):
elt = self.get_elements_with_param_matching_spec('class_name', 'flt-form-sb')[0]
sleep(1)
elt.click()
def scraperesults(
self,
):
elt = self.get_elements_with_param_matching_spec('class_name', 'VIUEWc')[0]
return elt.text.split('\n')
|
__title__ = 'DNS Explorer/Tracker'
__version__ = '1.0.0'
__author__ = "Jan Wendling <jan.wendling@gmail.com>"
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2021 - Jan Wendling'
import dns.reversename, dns.resolver
import model.host
def run_application(target: str):
'''
IP or Domain to start
'''
root = model.host.Host(target)
root.crawl_dns()
print(root._dns_record._a)
print(root._dns_record._mx)
print(root._dns_record._ns)
def ppline(msg):
'''
Print pointed line
'''
line_length = 80
print("".join(["." for i in range(0, line_length-len(msg))]), end="")
print(msg)
def main():
ppline("Starting application")
run_application('demo')
ppline("End")
if "__main__" == __name__:
main()
|
# Solution Reference:
# http://stackoverflow.com/questions/16427073/signed-integer-to-twos-complement-hexadecimal
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
#### works with positive num but fails with negative num
#
# d = { 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f' }
# l = list(bin(num)[2:])
# l = l[::-1]
# l.extend('0' * (4*(len(l)/4+1) - len(l)))
#
# group = 0
# hex_result = ""
# temp = 0
#
# for i in xrange(len(l)):
# temp += (int(l[i]) * (2**group))
#
# if group == 3: # start from 0 and every 4 is a group
# if temp < 10:
# hex_result += str(temp)
# else:
# hex_result += str(d[temp])
# temp = 0
# group = 0
# else:
# group += 1
#
# return hex_result[::-1]
bits = 32
return "{0:{1}x}".format(num & ((1<<bits) - 1), bits//4).lstrip()
def main():
n = -1
solution = Solution()
print solution.toHex(n)
if __name__ == '__main__':
main()
|
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
from parsl.launchers import JsrunLauncher
from parsl.providers import LSFProvider
from parsl.addresses import address_by_interface
config = Config(
executors=[
HighThroughputExecutor(
label='Summit_HTEX',
working_dir='/gpfs/alpine/scratch/yadunan/gen011/',
address=address_by_interface('ib0'), # This assumes Parsl is running on login node
worker_port_range=(50000, 55000),
provider=LSFProvider(
launcher=JsrunLauncher(),
walltime="00:10:00",
nodes_per_block=2,
init_blocks=1,
max_blocks=1,
worker_init="source ~/setup.sh",
project='GEN011WORKFLOW',
cmd_timeout=60
),
)
],
)
|
import click
import functools
def commet_logger_args(func):
@functools.wraps(func)
@click.option("--comet-project-name")
@click.option("--comet-offline", is_flag=True)
@click.option("--comet-offline-dir", type=click.Path(exists=True), default=".")
@click.option("--comet-auto-metric-logging", is_flag=True)
@click.option("--comet-auto-output-logging", is_flag=True)
@click.option("--comet-log-code", is_flag=True)
@click.option("--comet-log-env-cpu", is_flag=True)
@click.option("--comet-log-env-gpu", is_flag=True)
@click.option("--comet-log-env-host", is_flag=True)
@click.option("--comet-log-graph", is_flag=True)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class NullLogger:
def log_metric(self, *args, **kwargs):
pass
def log_metrics(self, *args, **kwargs):
pass
def log_parameter(self, *args, **kwargs):
pass
def log_parameters(self, *args, **kwargs):
pass
class CometLogger:
def __init__(self, args):
import comet_ml
comet_args = dict(
project_name=args.comet_project_name,
auto_metric_logging=args.comet_auto_metric_logging,
auto_output_logging=args.comet_auto_output_logging,
log_code=args.comet_log_code,
log_env_cpu=args.comet_log_env_cpu,
log_env_gpu=args.comet_log_env_gpu,
log_env_host=args.comet_log_env_host,
log_graph=args.comet_log_graph,
)
if args.comet_offline:
self.logger = comet_ml.OfflineExperiment(offline_directory=args.comet_offline_dir, **comet_args)
else:
self.logger = comet_ml.Experiment(**comet_args)
def log_metric(self, *args, **kwargs):
self.logger.log_metric(*args, **kwargs)
def log_metrics(self, *args, **kwargs):
self.logger.log_metrics(*args, **kwargs)
def log_parameter(self, *args, **kwargs):
self.logger.log_parameter(*args, **kwargs)
def log_parameters(self, *args, **kwargs):
self.logger.log_parameters(*args, **kwargs)
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 30th 22:21:07 2016
Simple driver for GrainHill model, based on example by Charlie Shobe for
his Brake model.
"""
import os
print('grain_hill_dakota_friendly_driver here. cwd = ' + os.getcwd())
import grain_hill_as_class
from landlab import load_params
import numpy as np
import sys
grain_hill_as_class = reload(grain_hill_as_class)
def two_node_diff(a):
"""Calculate and return diffs over two nodes instead of one."""
N = len(a)
return a[2:] - a[:(N-2)]
def calc_fractional_soil_cover(grain_hill):
"""Calculate and return fractional soil versus rock cover."""
num_soil_air_faces = 0.0
num_rock_air_faces = 0.0
grid = grain_hill.grid
node_state = grain_hill.ca.node_state
for link in range(grid.number_of_links):
tail = grid.node_at_link_tail[link]
head = grid.node_at_link_head[link]
if node_state[tail] == 0: # if tail is air, see if head is rock/sed
if node_state[head] == 7:
num_soil_air_faces += 1
elif node_state[head] == 8:
num_rock_air_faces += 1
elif node_state[head] == 0: # if head is air, see if tail is rock/sed
if node_state[tail] == 7:
num_soil_air_faces += 1
elif node_state[tail] == 8:
num_rock_air_faces += 1
total_surf_faces = num_soil_air_faces + num_rock_air_faces
frac_rock = num_rock_air_faces / total_surf_faces
frac_soil = num_soil_air_faces / total_surf_faces
print('Total number of surface faces: ' + str(total_surf_faces))
print('Number of soil-air faces: ' + str(num_soil_air_faces))
print('Number of rock-air faces: ' + str(num_rock_air_faces))
print('Percent rock-air faces: ' + str(100.0 * frac_rock))
print('Percent soil-air faces: ' + str(100.0 * frac_soil))
return frac_soil
dx = 0.1 # assumed node spacing, m
#DAKOTA stuff: setting input files
input_file = 'inputs.txt' #DAKOTA creates this
#INPUT VARIABLES
# read parameter values from file
params = load_params(input_file)
num_cols = params['number_of_node_columns']
num_rows = int(np.round(0.866 * 1.0 * (num_cols - 1)))
print('Launching run with ' + str(num_rows) + ' rows and ' + str(num_cols) + ' cols')
params['number_of_node_columns'] = num_cols
params['number_of_node_rows'] = num_rows
params['disturbance_rate'] = 10.0 ** params['disturbance_rate']
params['uplift_interval'] = 10.0 ** params['uplift_interval']
wprime = 0.4 * (10.0 ** params['weathering_rate'])
params['weathering_rate'] = wprime / params['uplift_interval']
# Calculate run duration
#
# Time for the domain to rise by L, where L is # of node cols
t1 = params['uplift_interval'] * num_cols
print('Time for domain rise:')
print(t1)
# Time to generate, on average, 10 * L disturbance events per column
t2 = 10 * num_cols / params['disturbance_rate']
print('Time for 0.1 (10) L disturbances per column:')
print(t2)
# Take the minimum
tt = min(t1, t2)
# Time to have at least ten uplift events
t3 = 10 * params['uplift_interval']
# Take the max
params['run_duration'] = max(tt, t3)
if params['run_duration'] > 580000.0:
print('WARNING: something is wrong')
params['run_duration'] = 1.0
print('Run duration used:')
print(params['run_duration'])
params['plot_interval'] = 1.1 * params['run_duration']
params['output_interval'] = params['run_duration']
print('Running grainhill, params:')
print(params)
sys.stdout.flush()
# instantiate a GrainHill model
grain_hill = grain_hill_as_class.GrainHill((num_rows, num_cols), **params)
#run the model
grain_hill.run()
# compute and write the results
(elev_profile, soil) = grain_hill.get_profile_and_soil_thickness(grain_hill.grid,
grain_hill.ca.node_state)
max_elev = np.amax(elev_profile)
N = len(elev_profile)
mean_grad_left = np.mean(two_node_diff(elev_profile[:((N+1)/2)])/1.73205)
mean_grad_right = np.mean(-two_node_diff(elev_profile[((N+1)/2):])/1.73205)
mean_grad = (mean_grad_left + mean_grad_right) / 2
frac_soil = calc_fractional_soil_cover(grain_hill)
myfile = open('results.out', 'w')
myfile.write(str(max_elev) + ' ' + str(mean_grad) + ' ' + str(frac_soil)
+ '\n')
myfile.close()
# Make a plot to file
import matplotlib.pyplot as plt
grain_hill.grid.hexplot('node_state')
plt.savefig('final_hill.png')
|
# tests.test_utils.test_types
# Tests for type checking utilities and validation
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri May 19 10:58:32 2017 -0700
#
# ID: test_types.py [79cd8cf] benjamin@bengfort.com $
"""
Tests for type checking utilities and validation.
Generally if there is a problem with a type checking utility, the offending
object should be imported then added to the correct bucket under the import
statement (e.g. REGRESSORS). The pytest parametrize decorator uses these
groups to generate tests, so this will automatically cause the test to run on
that class.
"""
##########################################################################
## Imports
##########################################################################
import pytest
import inspect
try:
import pandas as pd
except:
pd = None
# Yellowbrick Utilities
from yellowbrick.utils.types import *
from yellowbrick.base import Visualizer, ScoreVisualizer, ModelVisualizer
# Import Regressors
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge, RidgeCV, Lasso, LassoCV
REGRESSORS = [
SVR,
DecisionTreeRegressor,
MLPRegressor,
LinearRegression,
RandomForestRegressor,
Ridge,
RidgeCV,
Lasso,
LassoCV,
]
# Import Classifiers
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB
CLASSIFIERS = [
SVC,
DecisionTreeClassifier,
MLPClassifier,
LogisticRegression,
RandomForestClassifier,
GradientBoostingClassifier,
MultinomialNB,
GaussianNB,
]
# Import Clusterers
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.cluster import AffinityPropagation, Birch
CLUSTERERS = [KMeans, MiniBatchKMeans, AffinityPropagation, Birch]
# Import Decompositions
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
DECOMPOSITIONS = [PCA, TruncatedSVD]
# Import Transformers
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
TRANSFORMERS = [
DictVectorizer,
QuantileTransformer,
StandardScaler,
SimpleImputer,
TfidfVectorizer,
]
# Import Pipeline Utilities
from sklearn.pipeline import Pipeline, FeatureUnion
PIPELINES = [Pipeline, FeatureUnion]
# Import GridSearch Utilities
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
SEARCH = [GridSearchCV, RandomizedSearchCV]
# Other Groups
MODELS = REGRESSORS + CLASSIFIERS + CLUSTERERS
ESTIMATORS = MODELS + DECOMPOSITIONS + TRANSFORMERS
# Get the name of the object to label test cases
def obj_name(obj):
if inspect.isclass(obj):
return obj.__name__
return obj.__class__.__name__
##########################################################################
## Model type checking test cases
##########################################################################
class TestModelTypeChecking(object):
"""
Test model type checking utilities
"""
##////////////////////////////////////////////////////////////////////
## is_estimator testing
##////////////////////////////////////////////////////////////////////
def test_estimator_alias(self):
"""
Assert isestimator aliases is_estimator
"""
assert isestimator is is_estimator
@pytest.mark.parametrize("model", ESTIMATORS, ids=obj_name)
def test_is_estimator(self, model):
"""
Test that is_estimator works for instances and classes
"""
assert inspect.isclass(model)
assert is_estimator(model)
obj = model()
assert is_estimator(obj)
@pytest.mark.parametrize(
"cls", [list, dict, tuple, set, str, bool, int, float], ids=obj_name
)
def test_not_is_estimator(self, cls):
"""
Assert Python objects are not estimators
"""
assert inspect.isclass(cls)
assert not is_estimator(cls)
obj = cls()
assert not is_estimator(obj)
def test_is_estimator_pipeline(self):
"""
Test that is_estimator works for pipelines
"""
assert is_estimator(Pipeline)
assert is_estimator(FeatureUnion)
model = Pipeline([("reduce_dim", PCA()), ("linreg", LinearRegression())])
assert is_estimator(model)
def test_is_estimator_search(self):
"""
Test that is_estimator works for search
"""
assert is_estimator(GridSearchCV)
assert is_estimator(RandomizedSearchCV)
model = GridSearchCV(SVR(), {"kernel": ["linear", "rbf"]})
assert is_estimator(model)
@pytest.mark.parametrize(
"viz,params",
[
(Visualizer, {}),
(ScoreVisualizer, {"estimator": LinearRegression()}),
(ModelVisualizer, {"estimator": LogisticRegression()}),
],
ids=["Visualizer", "ScoreVisualizer", "ModelVisualizer"],
)
def test_is_estimator_visualizer(self, viz, params):
"""
Test that is_estimator works for Visualizers
"""
assert inspect.isclass(viz)
assert is_estimator(viz)
obj = viz(**params)
assert is_estimator(obj)
##////////////////////////////////////////////////////////////////////
## is_regressor testing
##////////////////////////////////////////////////////////////////////
def test_regressor_alias(self):
"""
Assert isregressor aliases is_regressor
"""
assert isregressor is is_regressor
@pytest.mark.parametrize("model", REGRESSORS, ids=obj_name)
def test_is_regressor(self, model):
"""
Test that is_regressor works for instances and classes
"""
assert inspect.isclass(model)
assert is_regressor(model)
obj = model()
assert is_regressor(obj)
@pytest.mark.parametrize(
"model", CLASSIFIERS + CLUSTERERS + TRANSFORMERS + DECOMPOSITIONS, ids=obj_name
)
def test_not_is_regressor(self, model):
"""
Test that is_regressor does not match non-regressor estimators
"""
assert inspect.isclass(model)
assert not is_regressor(model)
obj = model()
assert not is_regressor(obj)
def test_is_regressor_pipeline(self):
"""
Test that is_regressor works for pipelines
"""
assert not is_regressor(Pipeline)
assert not is_regressor(FeatureUnion)
model = Pipeline([("reduce_dim", PCA()), ("linreg", LinearRegression())])
assert is_regressor(model)
@pytest.mark.xfail(reason="grid search has no _estimator_type it seems")
def test_is_regressor_search(self):
"""
Test that is_regressor works for search
"""
assert is_regressor(GridSearchCV)
assert is_regressor(RandomizedSearchCV)
model = GridSearchCV(SVR(), {"kernel": ["linear", "rbf"]})
assert is_regressor(model)
@pytest.mark.parametrize(
"viz,params",
[
(ScoreVisualizer, {"estimator": LinearRegression()}),
(ModelVisualizer, {"estimator": Ridge()}),
],
ids=["ScoreVisualizer", "ModelVisualizer"],
)
def test_is_regressor_visualizer(self, viz, params):
"""
Test that is_regressor works on visualizers
"""
assert inspect.isclass(viz)
assert not is_regressor(viz)
obj = viz(**params)
assert is_regressor(obj)
##////////////////////////////////////////////////////////////////////
## is_classifier testing
##////////////////////////////////////////////////////////////////////
def test_classifier_alias(self):
"""
Assert isclassifier aliases is_classifier
"""
assert isclassifier is is_classifier
@pytest.mark.parametrize("model", CLASSIFIERS, ids=obj_name)
def test_is_classifier(self, model):
"""
Test that is_classifier works for instances and classes
"""
assert inspect.isclass(model)
assert is_classifier(model)
obj = model()
assert is_classifier(obj)
@pytest.mark.parametrize(
"model", REGRESSORS + CLUSTERERS + TRANSFORMERS + DECOMPOSITIONS, ids=obj_name
)
def test_not_is_classifier(self, model):
"""
Test that is_classifier does not match non-classifier estimators
"""
assert inspect.isclass(model)
assert not is_classifier(model)
obj = model()
assert not is_classifier(obj)
def test_classifier_pipeline(self):
"""
Test that is_classifier works for pipelines
"""
assert not is_classifier(Pipeline)
assert not is_classifier(FeatureUnion)
model = Pipeline([("reduce_dim", PCA()), ("linreg", LogisticRegression())])
assert is_classifier(model)
@pytest.mark.xfail(reason="grid search has no _estimator_type it seems")
def test_is_classifier_search(self):
"""
Test that is_classifier works for search
"""
assert is_classifier(GridSearchCV)
assert is_classifier(RandomizedSearchCV)
model = GridSearchCV(SVC(), {"kernel": ["linear", "rbf"]})
assert is_classifier(model)
@pytest.mark.parametrize(
"viz,params",
[
(ScoreVisualizer, {"estimator": MultinomialNB()}),
(ModelVisualizer, {"estimator": MLPClassifier()}),
],
ids=["ScoreVisualizer", "ModelVisualizer"],
)
def test_is_classifier_visualizer(self, viz, params):
"""
Test that is_classifier works on visualizers
"""
assert inspect.isclass(viz)
assert not is_classifier(viz)
obj = viz(**params)
assert is_classifier(obj)
##////////////////////////////////////////////////////////////////////
## is_clusterer testing
##////////////////////////////////////////////////////////////////////
def test_clusterer_alias(self):
"""
Assert isclusterer aliases is_clusterer
"""
assert isclusterer is is_clusterer
@pytest.mark.parametrize("model", CLUSTERERS, ids=obj_name)
def test_is_clusterer(self, model):
"""
Test that is_clusterer works for instances and classes
"""
assert inspect.isclass(model)
assert is_clusterer(model)
obj = model()
assert is_clusterer(obj)
@pytest.mark.parametrize(
"model", REGRESSORS + CLASSIFIERS + TRANSFORMERS + DECOMPOSITIONS, ids=obj_name
)
def test_not_is_clusterer(self, model):
"""
Test that is_clusterer does not match non-clusterer estimators
"""
assert inspect.isclass(model)
assert not is_clusterer(model)
obj = model()
assert not is_clusterer(obj)
def test_clusterer_pipeline(self):
"""
Test that is_clusterer works for pipelines
"""
assert not is_clusterer(Pipeline)
assert not is_clusterer(FeatureUnion)
model = Pipeline([("reduce_dim", PCA()), ("kmeans", KMeans())])
assert is_clusterer(model)
@pytest.mark.parametrize(
"viz,params", [
(ModelVisualizer, {"estimator": KMeans()})
], ids=["ModelVisualizer"]
)
def test_is_clusterer_visualizer(self, viz, params):
"""
Test that is_clusterer works on visualizers
"""
assert inspect.isclass(viz)
assert not is_clusterer(viz)
obj = viz(**params)
assert is_clusterer(obj)
##////////////////////////////////////////////////////////////////////
## is_gridsearch testing
##////////////////////////////////////////////////////////////////////
def test_gridsearch_alias(self):
"""
Assert isgridsearch aliases is_gridsearch
"""
assert isgridsearch is is_gridsearch
@pytest.mark.parametrize("model", SEARCH, ids=obj_name)
def test_is_gridsearch(self, model):
"""
Test that is_gridsearch works correctly
"""
assert inspect.isclass(model)
assert is_gridsearch(model)
obj = model(SVC, {"C": [0.5, 1, 10]})
assert is_gridsearch(obj)
@pytest.mark.parametrize(
"model", [MLPRegressor, MLPClassifier, SimpleImputer], ids=obj_name
)
def test_not_is_gridsearch(self, model):
"""
Test that is_gridsearch does not match non grid searches
"""
assert inspect.isclass(model)
assert not is_gridsearch(model)
obj = model()
assert not is_gridsearch(obj)
##////////////////////////////////////////////////////////////////////
## is_probabilistic testing
##////////////////////////////////////////////////////////////////////
def test_probabilistic_alias(self):
"""
Assert isprobabilistic aliases is_probabilistic
"""
assert isprobabilistic is is_probabilistic
@pytest.mark.parametrize(
"model",
[
MultinomialNB,
GaussianNB,
LogisticRegression,
SVC,
RandomForestClassifier,
GradientBoostingClassifier,
MLPClassifier,
],
ids=obj_name,
)
def test_is_probabilistic(self, model):
"""
Test that is_probabilistic works correctly
"""
assert inspect.isclass(model)
assert is_probabilistic(model)
obj = model()
assert is_probabilistic(obj)
@pytest.mark.parametrize(
"model",
[MLPRegressor, SimpleImputer, StandardScaler, KMeans, RandomForestRegressor],
ids=obj_name,
)
def test_not_is_probabilistic(self, model):
"""
Test that is_probabilistic does not match non probablistic estimators
"""
assert inspect.isclass(model)
assert not is_probabilistic(model)
obj = model()
assert not is_probabilistic(obj)
##########################################################################
## Data type checking test cases
##########################################################################
class TestDataTypeChecking(object):
"""
Test data type checking utilities
"""
##////////////////////////////////////////////////////////////////////
## is_dataframe testing
##////////////////////////////////////////////////////////////////////
def test_dataframe_alias(self):
"""
Assert isdataframe aliases is_dataframe
"""
assert isdataframe is is_dataframe
@pytest.mark.skipif(pd is None, reason="requires pandas")
def test_is_dataframe(self):
"""
Test that is_dataframe works correctly
"""
df = pd.DataFrame(
[{"a": 1, "b": 2.3, "c": "Hello"}, {"a": 2, "b": 3.14, "c": "World"}]
)
assert is_dataframe(df)
@pytest.mark.parametrize(
"obj",
[
np.array(
[(1, 2.0, "Hello"), (2, 3.0, "World")],
dtype=[("foo", "i4"), ("bar", "f4"), ("baz", "S10")],
),
np.array([[1, 2, 3], [1, 2, 3]]),
[[1, 2, 3], [1, 2, 3]],
],
ids=["structured array", "array", "list"],
)
def test_not_is_dataframe(self, obj):
"""
Test that is_dataframe does not match non-dataframes
"""
assert not is_dataframe(obj)
##////////////////////////////////////////////////////////////////////
## is_series testing
##////////////////////////////////////////////////////////////////////
def test_series_alias(self):
"""
Assert isseries aliases is_series
"""
assert isseries is is_series
@pytest.mark.skipif(pd is None, reason="requires pandas")
def test_is_series(self):
"""
Test that is_series works correctly
"""
df = pd.Series([1, 2, 3])
assert is_series(df)
@pytest.mark.parametrize(
"obj",
[
np.array(
[(1, 2.0, "Hello"), (2, 3.0, "World")],
dtype=[("foo", "i4"), ("bar", "f4"), ("baz", "S10")],
),
np.array([1, 2, 3]),
[1, 2, 3],
],
ids=["structured array", "array", "list"],
)
def test_not_is_series(self, obj):
"""
Test that is_series does not match non-dataframes
"""
assert not is_series(obj)
##////////////////////////////////////////////////////////////////////
## is_structured_array testing
##////////////////////////////////////////////////////////////////////
def test_structured_array_alias(self):
"""
Assert isstructuredarray aliases is_structured_array
"""
assert isstructuredarray is is_structured_array
def test_is_structured_array(self):
"""
Test that is_structured_array works correctly
"""
x = np.array(
[(1, 2.0, "Hello"), (2, 3.0, "World")],
dtype=[("foo", "i4"), ("bar", "f4"), ("baz", "S10")],
)
assert is_structured_array(x)
@pytest.mark.parametrize(
"obj", [np.array([[1, 2, 3], [1, 2, 3]]), [[1, 2, 3], [1, 2, 3]]], ids=obj_name
)
def test_not_is_structured_array(self, obj):
"""
Test that is_structured_array does not match non-structured-arrays
"""
assert not is_structured_array(obj)
|
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import torchvision.transforms as T
import os
os.environ['TORCH_HOME'] = 'Cache'
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from utils import *
from utils import COCO_INSTANCE_CATEGORY_NAMES
class ObjectDetector():
def __init__(self):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# load a model pre-trained pre-trained on COCO
self.model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True).to(self.device)
self.model.eval()
self.transform = T.Compose([T.ToTensor()])
def Predict(self, image, score_thresh = 0.8):
im = (self.transform(image.copy())).to(self.device)
res = self.model([im])[0]
boxes_out, labels_out, scores_out = self.FilterResults(res, score_thresh)
return boxes_out, labels_out, scores_out
@staticmethod
def FilterResults(res, score_thresh):
boxes = res["boxes"].cpu().detach().numpy()
labels = res["labels"].cpu().detach().numpy()
scores = res["scores"].cpu().detach().numpy()
valid_indices = scores > score_thresh
boxes_out = boxes[valid_indices,:]
labels_out = labels[valid_indices]
scores_out = scores[valid_indices]
return boxes_out, labels_out, scores_out
if __name__ == "__main__":
od = ObjectDetector()
image = np.asarray(Image.open("./data/val2017/000000001268.jpg"))
boxes_out, labels_out, scores_out = od.Predict(image)
utils.PlotBoxes(image, boxes_out, labels_out, scores_out)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on ...
@author: ...
"""
|
from django.contrib import admin
from elections_app.models import Person, Info, Election
def import_data(modeladmin, request, queryset):
print("hello")
admin.site.add_action(import_data)
admin.site.register(Person)
admin.site.register(Info)
admin.site.register(Election)
|
import json
from base64 import b64encode, b64decode
from collections import namedtuple
from connexion.exceptions import ProblemException
from itertools import product
from skyfield.api import Loader, Topos, EarthSatellite
from home.leaf import LeafPassFile
from flask import request, Response
from home.models import GroundStation, Satellite, Pass, TaskStack
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from v0.accesses import Access
from v0.track import get_track_file
from v0.time import (add_seconds, now, utc, iso)
TWO_DAYS_S = 2 * 24 * 60 * 60
def search(limit=100, range_start=None, range_end=None, range_inclusive='both',
satellites=None, groundstations=None, order_by='start_time',
show_stale=False):
if satellites is None:
dj_sats = Satellite.objects.all()
else:
dj_sats = Satellite.objects.filter(hwid__in=satellites)
if groundstations is None:
dj_gss = GroundStation.objects.all()
else:
dj_gss = GroundStation.objects.filter(hwid__in=groundstations)
passes = Pass.objects.filter(
satellite__in=dj_sats, groundstation__in=dj_gss,
)
# set the default time range, if no range is specified
if range_start is None and range_end is None:
range_start = now()
range_end = add_seconds(range_start, TWO_DAYS_S)
# filter the start of the range
if range_start is not None:
range_start = utc(range_start)
if range_inclusive in ['end', 'neither']:
passes = passes.filter(start_time__gte=range_start.utc_datetime())
else:
passes = passes.filter(end_time__gte=range_start.utc_datetime())
# filter the end of the range
if range_end is not None:
range_end = utc(range_end)
if range_inclusive in ['start', 'neither']:
passes = passes.filter(end_time__lte=range_end.utc_datetime())
else:
passes = passes.filter(start_time__lte=range_end.utc_datetime())
if not show_stale:
passes = passes.exclude(Q(scheduled_on_gs=False) &
Q(scheduled_on_sat=False) &
Q(is_desired=False))
passes = passes.all().order_by(order_by)[:limit]
return [p.to_dict() for p in passes]
def get_pass(uuid):
return Pass.objects.get(uuid=uuid).to_dict()
def delete(uuid):
pass_obj = Pass.objects.get(uuid=uuid)
problems = []
if pass_obj.scheduled_on_gs:
problems += [
'pass is scheduled on the groundstation ({hwid})'.format(
hwid=pass_obj.groundstation.hwid)
]
if pass_obj.scheduled_on_sat:
problems += [
'pass is scheduled on the satellite ({hwid})'.format(
hwid=pass_obj.satellite.hwid)
]
if problems:
raise ProblemException(
status=400,
title='Cannot delete pass that is scheduled',
detail=problems
)
pass_obj.delete()
return None, 204
def patch(uuid, _pass):
_pass["uuid"] = uuid
pass_obj = Pass.objects.get(uuid=uuid)
for key, value in _pass.items():
setattr(pass_obj, key, value)
pass_obj.save()
return pass_obj.to_dict()
def get_track(uuid):
_pass = Pass.objects.get(uuid=uuid)
access = _pass.access()
access._start_time.tai = max(utc(_pass.start_time).tai, access.start_time.tai)
access._end_time.tai = min(utc(_pass.end_time).tai, access.end_time.tai)
return get_track_file(access)
def recalculate(uuid):
_pass = get_pass(uuid)
valid = _pass.recompute()
status = 200 if valid else 400
return None, status
def put(uuid, _pass):
_pass["uuid"] = uuid
try:
access_id = _pass["access_id"]
access = Access.from_id(access_id)
sat_obj = access.satellite
gs_obj = access.groundstation
_pass.setdefault("start_time", iso(access.start_time))
_pass.setdefault("end_time", iso(access.end_time))
except KeyError:
# user provided all required fields instead of access id
sat_hwid = _pass["satellite"]
sat_obj = Satellite.objects.get(hwid=sat_hwid)
gs_hwid = _pass["groundstation"]
gs_obj = GroundStation.objects.get(hwid=gs_hwid)
try:
access_id = Access.from_overlap(
_pass["start_time"], _pass["end_time"],
sat_obj, gs_obj
).access_id
except ObjectDoesNotExist:
_pass["is_valid"] = False
# FIXME we are creating an new Pass object to get all of the defaults
# but we don't want to create the foreign keys yet, so we pop them
# out, create the object, then add them back in...
_pass.pop("satellite", None)
_pass.pop("groundstation", None)
task_stack_uuid = _pass.pop("task_stack", None)
if task_stack_uuid:
task_stack = TaskStack.objects.get(uuid=task_stack_uuid)
else:
task_stack = None
po = Pass(**_pass)
m = po.to_dict()
m["satellite"] = sat_obj
m["groundstation"] = gs_obj
m["task_stack"] = task_stack
m["source_tle"] = m["satellite"].tle
_pass, _created = Pass.objects.update_or_create(
defaults=m, uuid=uuid
)
status_code = 201 if _created else 200
return _pass.to_dict(), status_code
def get_attributes(uuid):
_pass = Pass.objects.get(uuid=uuid)
return _pass.attributes or {}
def patch_attributes(uuid, attributes):
_pass = Pass.objects.get(uuid=uuid)
if _pass.attributes:
_pass.attributes.update(attributes)
else:
_pass.attributes = attributes
_pass.save()
return _pass.attributes or {}
def put_attributes(uuid, attributes):
_pass = Pass.objects.get(uuid=uuid)
_pass.attributes = attributes
_pass.save()
return _pass.attributes or {}
|
from django.db import models
from account.models import Profile
from django.contrib.auth.models import User
STATUS_CHOICES = [
("created", "created"),
("accepted", "accepted"),
("done", "done"), # done jest po zamknieciu przez boomera
("finished", "finished") # fnished jest po zamknieciu przez boomera i potem volunteera
]
# Create your models here.
class Order(models.Model):
boomer = models.ForeignKey(Profile, related_name='orders',
on_delete=models.SET_NULL, null=True)
volunteer = models.ForeignKey(Profile, related_name='tasks',
on_delete=models.SET_NULL, null=True, blank=True)
coord_x = models.FloatField(default=0)
coord_y = models.FloatField(default=0)
comment = models.CharField(max_length=512, default='', blank=True)
paymentMethod = models.CharField(max_length=32, default='', blank=True)
status = models.CharField(max_length=32, choices = STATUS_CHOICES, default ='created')
class Product(models.Model):
name = models.CharField(max_length=32)
productType = models.CharField(max_length=32)
countity = models.CharField(max_length=32, default='')
price = models.FloatField(default=0)
order = models.ForeignKey(Order, related_name='products', on_delete=models.CASCADE)
isBought = models.BooleanField(default=False)
|
import requests #import Library after cmd pip install requests
print("////////////")
#이미지가 있는 url 주소
url="http://search1.kakaocdn.net/argon/600x0_65_wr/ImZk3b2X1w8"
#해당 url로 서버에게 요청
img_response=requests.get(url)
#요청에 성공했다면,
if img_response.status_code==200:
#print(img_response.content)
print("============[이미지 저장]============")
with open("test.jpg","wb") as f: #wb 바이너리 형식을 쓰는 모드
f.write(img_response.content) #img_response.content는 jpeg가 binary 형태로 값이 들어있음. jpeg binary 데이터를 파일로 저장하면 이미지 현시
|
import requests
from bs4 import BeautifulSoup
results = []
class Sale:
#Данный по ктороым он будет искать
Sale = 'https://www.amway.ua/nashi-marky/holiday-promotions'
haders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
}
def parse(self):
full_page = requests.get(self.Sale, headers=self.haders)
soup = BeautifulSoup(full_page.content, 'html.parser')
global results
for comps in soup.findAll('div', class_='layout_element component_wrapper wrapped_amway_product_teaser'):
if soup.a.get('href') is None:
pass
else:
results.append({
'Artical': comps.find('div', class_='sku'),
'Link': comps.find('div', class_='panel_title'),
'Title': comps.find('div', class_='panel_title'),
'Price': comps.find('td', class_='cell_2 data_column'),
'Image': comps.find('img')
})
|
from datetime import datetime
from typing import Optional
from uuid import uuid1
import pytest
import pytz
from snuba.consumer import KafkaMessageMetadata
from snuba.consumers.snapshot_worker import SnapshotProcessor
from snuba.datasets.cdc.types import InsertEvent
from snuba.datasets.storages import StorageKey
from snuba.datasets.storages.factory import get_writable_storage
from snuba.processor import InsertBatch, ProcessedMessage
from snuba.snapshots import SnapshotId
from snuba.snapshots.postgres_snapshot import Xid
from snuba.stateful_consumer.control_protocol import TransactionData
def get_insert_event(xid: int) -> InsertEvent:
return {
"event": "change",
"xid": xid,
"kind": "insert",
"schema": "public",
"table": "sentry_groupedmessage",
"columnnames": [
"id",
"logger",
"level",
"message",
"view",
"status",
"times_seen",
"last_seen",
"first_seen",
"data",
"score",
"project_id",
"time_spent_total",
"time_spent_count",
"resolved_at",
"active_at",
"is_public",
"platform",
"num_comments",
"first_release_id",
"short_id",
],
"columntypes": [
"bigint",
"character varying(64)",
"integer",
"text",
"character varying(200)",
"integer",
"integer",
"timestamp with time zone",
"timestamp with time zone",
"text",
"integer",
"bigint",
"integer",
"integer",
"timestamp with time zone",
"timestamp with time zone",
"boolean",
"character varying(64)",
"integer",
"bigint",
"bigint",
],
"columnvalues": [
74,
"",
40,
"<module> ZeroDivisionError integer division or modulo by zero client3.py __main__ in <module>",
"__main__ in <module>",
0,
2,
"2019-06-19 06:46:28+00",
"2019-06-19 06:45:32+00",
"eJyT7tuwzAM3PkV2pzJiO34VRSdmvxAgA5dCtViDAGyJEi0AffrSxrZOlSTjrzj3Z1MrOBekCWHBcQaPj4xhXe72WyDv6YU0ouynnDGpMxzrEJSSzCrC+p7Vz8sgNhAvhdOZ/pKOKHd0PC5C9yqtjuPddcPQ9n0w8hPiLRHsWvZGsWD/91xIya2IFxz7vJWfTUlHHnwSCEBUkbTZrxCCcOf2baY/XTU1VJm9cjHL4JriHPYvOnliyP0Jt2q4SpLkz7v6owW9E9rEOvl0PawczxcvkLIWppxg==",
1560926969,
2,
0,
0,
None,
"2019-06-19 06:45:32+00",
False,
"python",
0,
None,
20,
],
}
PROCESSED = {
"offset": 1,
"project_id": 2,
"id": 74,
"record_deleted": 0,
"status": 0,
"last_seen": datetime(2019, 6, 19, 6, 46, 28, tzinfo=pytz.UTC),
"first_seen": datetime(2019, 6, 19, 6, 45, 32, tzinfo=pytz.UTC),
"active_at": datetime(2019, 6, 19, 6, 45, 32, tzinfo=pytz.UTC),
"first_release_id": None,
}
test_data = [
(90, None),
(100, None),
(110, None),
(120, InsertBatch([PROCESSED])),
(210, InsertBatch([PROCESSED])),
]
@pytest.mark.parametrize("xid, expected", test_data)
def test_send_message(xid: int, expected: Optional[ProcessedMessage]) -> None:
processor = (
get_writable_storage(StorageKey.GROUPEDMESSAGES)
.get_table_writer()
.get_stream_loader()
.get_processor()
)
worker = SnapshotProcessor(
processor=processor,
snapshot_id=SnapshotId(str(uuid1())),
transaction_data=TransactionData(
xmin=Xid(100), xmax=Xid(200), xip_list=[Xid(120), Xid(130)]
),
)
ret = worker.process_message(
get_insert_event(xid),
KafkaMessageMetadata(offset=1, partition=0, timestamp=datetime.now()),
)
assert ret == expected
|
from django.urls import include, path
from rest_framework import routers
from credit_integration import views
app_name = "credit_integration"
router = routers.DefaultRouter()
urlpatterns = [
path("", include(router.urls)),
path(
"get_credit_decisions/",
views.get_credit_decisions,
name="get-credit-decisions",
),
path(
"send_credit_decision_inquiry/",
views.send_credit_decision_inquiry,
name="send-credit-decision-inquiry",
),
]
|
import asyncio
import os
from unittest import mock
import pytest
from async_asgi_testclient import TestClient
from django.http import HttpResponse
from django.urls.conf import path
from django_simple_task import defer
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.settings"
@pytest.fixture
async def get_app():
async def _get_app(patterns, asgi_version=3, inner_asgi_version=3):
from . import urls, app
urls.urlpatterns.clear()
urls.urlpatterns.extend(patterns)
test_app = (
app.application
if inner_asgi_version == 3
else app.application_wrapping_asgi2
)
if asgi_version == 2:
return app.application_wrapt_as_asgi2
return test_app
return _get_app
@pytest.mark.asyncio
async def test_sanity_check(get_app):
def view(requests):
return HttpResponse("Foo")
app = await get_app([path("", view)])
async with TestClient(app) as client:
resp = await client.get("/")
assert resp.status_code == 200
assert resp.text == "Foo"
app_asgi2 = await get_app([path("", view)], 2)
async with TestClient(app_asgi2) as client_asgi2:
resp = await client_asgi2.get("/")
assert resp.status_code == 200
assert resp.text == "Foo"
app_wrapping_asgi2 = await get_app([path("", view)], 3, 2)
async with TestClient(app_asgi2) as client_asgi2:
resp = await client_asgi2.get("/")
assert resp.status_code == 200
assert resp.text == "Foo"
@pytest.mark.asyncio
async def test_should_call_task(get_app):
task = mock.MagicMock()
def view(requests):
defer(task)
return HttpResponse("Foo1")
app = await get_app([path("", view)])
async with TestClient(app) as client:
task.assert_not_called()
resp = await client.get("/")
assert resp.status_code == 200
assert resp.text == "Foo1"
task.assert_called_once()
@pytest.mark.asyncio
async def test_should_call_async_task(get_app):
cb = mock.MagicMock()
async def task():
await asyncio.sleep(1)
cb()
def view(requests):
defer(task)
defer(task)
defer(task)
defer(task)
return HttpResponse("Foo")
app = await get_app([path("", view)])
async with TestClient(app) as client:
cb.assert_not_called()
resp = await client.get("/")
assert resp.text == "Foo"
cb.assert_not_called()
assert cb.call_count == 4
|
# -*- coding: utf-8 -*-
'''
Handles PDF form filling
'''
from collections import OrderedDict
from PyPDF2 import PdfFileWriter, PdfFileReader
def _getFields(obj, tree=None, retval=None, fileobj=None):
"""
Extracts field data if this PDF contains interactive form fields.
The *tree* and *retval* parameters are for recursive use.
:param fileobj: A file object (usually a text file) to write
a report to on all interactive form fields found.
:return: A dictionary where each key is a field name, and each
value is a :class:`Field<PyPDF2.generic.Field>` object. By
default, the mapping name is used for keys.
:rtype: dict, or ``None`` if form data could not be located.
"""
fieldAttributes = {
'/FT': 'Field Type',
'/Parent': 'Parent',
'/T': 'Field Name',
'/TU': 'Alternate Field Name',
'/TM': 'Mapping Name',
'/Ff': 'Field Flags',
'/V': 'Value',
'/DV': 'Default Value',
}
if retval is None:
retval = OrderedDict()
catalog = obj.trailer["/Root"]
# get the AcroForm tree
if "/AcroForm" in catalog:
tree = catalog["/AcroForm"]
else:
return None
if tree is None:
return retval
obj._checkKids(tree, retval, fileobj)
for attr in fieldAttributes:
if attr in tree:
# Tree is a field
obj._buildField(tree, retval, fileobj, fieldAttributes)
break
if "/Fields" in tree:
fields = tree["/Fields"]
for f in fields:
field = f.getObject()
obj._buildField(field, retval, fileobj, fieldAttributes)
return retval
def get_form_fields(infile):
'''Returns fields of the document'''
infile = PdfFileReader(open(infile, 'rb'))
fields = _getFields(infile)
return OrderedDict((k, v.get('/V', '')) for k, v in fields.items())
def update_form_values(infile, outfile, newvals=None):
'''Fill values and create new PDF file'''
pdf = PdfFileReader(open(infile, 'rb'))
writer = PdfFileWriter()
for i in range(pdf.getNumPages()):
page = pdf.getPage(i)
if newvals:
writer.updatePageFormFieldValues(page, newvals)
else:
writer.updatePageFormFieldValues(
page,
{k: f'#{i} {k}={v}' for i, (k, v) in enumerate(get_form_fields(infile).items())},
)
writer.addPage(page)
with open(outfile, 'wb') as out:
writer.write(out)
def prepare_directory_structure(filename):
import datetime
import pathlib
current = datetime.datetime.now()
report_path = (
pathlib.Path.cwd()
/ 'kela_lomakkeet'
/ str(current.year)
/ str(current.month)
)
report_path.mkdir(parents=True, exist_ok=True)
report_file_path = report_path / (filename + '.pdf')
return report_file_path
def write_invoice(therapist, customer):
file_name = prepare_directory_structure(customer.name)
fields = {
'tx02': customer.name,
'tx03': customer.id_number,
'tx03': customer.id_number,
'tx04': therapist.company_name,
'tx05': therapist.id_number,
'tx06': therapist.phone_number,
'tx07': therapist.email,
'tx08': therapist.iban,
'tx09': therapist.bic,
'tx80': therapist.name,
}
import datetime
fields['tx119'] = datetime.date.today().strftime("%d.%m.%Y")
index = 0
with open(str(file_name).replace('.pdf', '.txt'), 'w') as out_file:
out_file.write(customer.name + "\n\n")
out_file.write("Tuntihinta: " + str(customer.hour_price) + "\n\n")
out_file.write("Sähköposti: " + customer.email + "\n\n")
out_file.write("Osoite: " + customer.street_address + "\n\n")
out_file.write("Kelakorvaus: " + str(therapist.kelakorvaus) + "\n\n")
out_file.write("Laskutustapa: " + customer.way_of_billing + "\n\n")
out_file.write("Käynnit:\n")
total_cost = 0
total_without_kela = 0
for visit in sorted(customer.visits, key=lambda x: x.visit_date):
fields['tx' + str(16 + index * 3)] = visit.visit_date.strftime("%d.%m.%Y")
fields['tx' + str(17 + index * 3)] = visit.visit_type
fields['tx' + str(18 + index * 3)] = visit.cost
index += 1
total_cost += visit.cost
total_without_kela += visit.cost - therapist.kelakorvaus
out_file.write(str(index) + ": " + visit.visit_date.strftime("%d.%m.%Y") + "\n")
out_file.write("\n")
out_file.write("Yht: " + str(total_cost))
out_file.write("\nOmavastuu " + str(total_without_kela))
update_form_values('pdf_originals/ku205.pdf', file_name, fields)
def write_tilitys(therapist, customers, reference_number=None, biller=None, invoicer=None):
'''
biller = tilityksen laatija
invoicer = Kuntoutuspalveluntuottajan ilmoittama laskuttajan nimi
'''
file_name = prepare_directory_structure('tilitys')
fields = {
'tx04': therapist.company_name,
'tx05': therapist.id_number, # y-tunnus or henkilötunnus
'tx06': therapist.street_address,
'tx07': therapist.phone_number,
'tx13': therapist.iban,
'tx14': therapist.bic,
'tx120': therapist.name,
}
if reference_number:
fields['tx16'] = reference_number
if invoicer:
fields['tx08'] = invoicer.name
fields['tx09'] = invoicer.id_number
if biller:
fields['tx10'] = biller.name
fields['tx11'] = biller.phone_number
fields['tx12'] = biller.fax
nro = 0
for customer in customers:
fields['tx' + str(19 + nro * 5)] = str(nro + 1)
fields['tx' + str(20 + nro * 5)] = customer.name
fields['tx' + str(21 + nro * 5)] = customer.id_number
fields['tx' + str(22 + nro * 5)] = customer.unbilled_time()
fields['tx' + str(23 + nro * 5)] = customer.unbilled_total()
nro += 1
fields['tx121'] = sum([cust.unbilled_total() for cust in customers])
fields['tx03'] = len(customers)
import datetime
fields['tx119'] = datetime.date.today().strftime("%d.%m.%Y")
update_form_values('pdf_originals/ku206.pdf', file_name, fields)
if __name__ == '__main__':
from pprint import pprint
pdf_file_name = 'ku205.pdf'
pprint(get_form_fields('pdf_originals/' + pdf_file_name))
update_form_values(
'pdf_originals/' + pdf_file_name, 'out-' + pdf_file_name
) # enumerate & fill the fields with their own names
update_form_values(
'pdf_originals/' + pdf_file_name,
'out2-' + pdf_file_name,
{'tx04': 'Psykologipalvelu Riikka Rahikkala', 'tx05': 'My Another'},
) # update the form fields
|
from shorty.services import constants
from shorty.services.bitly.bitly_serv import BitlyService
from shorty.services.tinyurl.tinyurl_serv import TinyUrlService
import validators
from shorty.services import error_handler
DEFAULT_SERVICE = "tinyurl"
ACCEPTED_SERVICES = ["tinyurl", "bitly"]
class Services:
""" this class combines the providers bitly and tinyurl """
def __init__(self, data):
self.provider = data['provider'] if 'provider' in data else DEFAULT_SERVICE
self.url = data['url']
self.data_keys = list(data.keys())
self.shorty = ""
def check_request_parameters_format(self):
if [True for key in self.data_keys if key not in ['provider', 'url']]:
raise error_handler.wrong_request_parameters()
def check_url_and_provider_invalid_format(self):
if (not validators.url(self.url)) and (self.provider not in ACCEPTED_SERVICES):
raise error_handler.url_provider_invalid_format()
def check_url_invalid_format(self):
if not validators.url(self.url):
raise error_handler.url_invalid_format()
def check_provider_invalid_format(self):
if self.provider not in ACCEPTED_SERVICES:
raise error_handler.invalid_provider()
def check_errors(self):
self.check_url_and_provider_invalid_format()
self.check_url_invalid_format()
self.check_provider_invalid_format()
self.check_request_parameters_format()
def shortened_link(self):
if self.provider == "bitly":
self.shorty = BitlyService(self.url, constants.GENERIC_ACCESS_TOKEN).bitly_shortener()
if not self.shorty:
self.shorty = TinyUrlService(self.url).tinyurl_shortener()
if not self.shorty:
raise error_handler.both_services_down()
if self.provider == "tinyurl":
self.shorty = TinyUrlService(self.url).tinyurl_shortener()
if not self.shorty:
self.shorty = BitlyService(self.url, constants.GENERIC_ACCESS_TOKEN).bitly_shortener()
if not self.shorty:
raise error_handler.both_services_down()
return {"url": self.url, "link": self.shorty}
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import time
from azure.identity import DefaultAzureCredential
from azure.mgmt.cdn import CdnManagementClient
from azure.mgmt.resource import ResourceManagementClient
# - other dependence -
# - end -
def main():
SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
TIME = str(time.time()).replace('.','')
GROUP_NAME = "testcdn" + TIME
CDN = "cdn" + TIME
LOCATION='WestUs'
# Create client
# # For other authentication approaches, please see: https://pypi.org/project/azure-identity/
resource_client = ResourceManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
cdn_client = CdnManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
# - init depended client -
# - end -
# Create resource group
resource_client.resource_groups.create_or_update(
GROUP_NAME,
{"location": LOCATION}
)
# - init depended resources -
# - end -
# Create cdn
cdn = cdn_client.profiles.begin_create(
GROUP_NAME,
CDN,
{
"location": LOCATION,
"sku": {
"name": "Standard_Verizon"
}
}
).result()
print("Create cdn:\n{}\n".format(cdn))
# Get cdn
cdn = cdn_client.profiles.get(
GROUP_NAME,
CDN
)
print("Get cdn:\n{}\n".format(cdn))
# Update cdn
cdn = cdn_client.profiles.begin_update(
GROUP_NAME,
CDN,
{
"tags": {
"additional_properties": "Tag1"
}
}
).result()
print("Update cdn:\n{}\n".format(cdn))
# Delete cdn
cdn = cdn_client.profiles.begin_delete(
GROUP_NAME,
CDN
).result()
print("Delete cdn.\n")
# Delete Group
resource_client.resource_groups.begin_delete(
GROUP_NAME
).result()
if __name__ == "__main__":
main()
|
# J'écris en notation PEP 8
# Vous avez le droit de ne pas mettre d'espace
#par exemple
# 1er exercice
# Introduction sur les séquences
# Boucles, if, comparaison
#
# J'ai une liste de nombres entiers: [5, 4, 8, 2]
# Le but est de trier la liste en ordre croissant
#
# A l'issue de l'exercice on doit obtenir [2, 4, 5, 8]
#
# Triche: Il y a une fonction qui s'appelle: sort
# Le but c'est de le faire par nous meme
l = [] # Une liste vide
l = list() # Une autre liste vide
l = [5, 4, 8, 2] # Une liste avec 4 nombres entiers dedans
# La liste se comporte un peu comme une chaine de caracteres
# Liste et chaines de caracteres sont deux structures de donnees 'iterable'
# Donc, on peut indexer une liste
print(l[1]) # Vous retourne le deuxieme element, c'est a dire 4
print(l[2:]) # Vous retourne depuis le troisieme element jusqu a la fin [8, 2]
l.append(9) # [5, 4, 8, 2, 9] Resultat de append
print(l)
print(l.sort()) # Attention, print rien du tout, sort ne retourne pas la chaine triee
print(l)
l = [5, 4, 8, 2] # Une liste avec 4 nombres entiers dedans
copy_l = [] # Variable qui va sauvegarder le resultat de l'exercice
# Strategie, on prend chaque element de la liste l, et on l'insere dans la liste copy_l
# Quel est le mot cle pour effectuer une iteration sur un ensemble?
# => Pour chaque element de l
for i in l:
print(i) # Affiche les elements de la liste 1 par 1
for i in l:
copy_l.append(i)
print(copy_l)
copy_l.clear()
for i in range(5): # range est comme une liste: range(5) => [0, 1, 2, 3, 4]
copy_l.append(i)
print(copy_l)
copy_l.clear()
l = [5, 4, 8, 2] # Une liste avec 4 nombres entiers dedans
#
# l = [5, 4, 8, 2]
# c = []
#
# l = [5, 4, 8, 2] le plus petit, le minimum quoi
# |
# v
# c = [2] J'ai utilise append
# Il faut maintenant eliminer 2 de la liste
# l.remove(2)
# l = [5, 4, 8]
# c = [2]
#
# l = [5, 4, 8] le plus petit, le minimum quoi
# |
# v
# c = [2, ] J'ai utilise append
# append: Ajoute a la fin
copy_l.append(min(l)) # Le minimum d'une liste, vous utilisez min()
l.remove(min(l)) # Il va retirer le minimum de l, donc a la fin l = [5, 4, 8]
print(copy_l)
copy_l.append(min(l)) # Le minimum de [5, 4, 8], donc 4
l.remove(min(l)) # Il va retirer le minimum de l, donc a la fin l = [5, 8]
print(copy_l) # [2, 4]
copy_l.append(min(l)) # Le minimum de [5, 8], donc 5
l.remove(min(l)) # Il va retirer le minimum de l, donc a la fin l = [8]
print(copy_l) # [2, 4, 5]
copy_l.append(min(l)) # Le minimum de [8], donc 8
l.remove(min(l)) # Il va retirer le minimum de l, donc a la fin l = []
print(copy_l) # [2, 4, 5, 8]
#
# On vient de copier coller 4 fois le meme code
# Pas tres dynamique
#
# 4 fois la meme chose appelle une boucle
# while = for + if
# while True: # S'execute toujours tant que la condition est vraie
# print(":)")
i = 0
while i < 10:
print("Fais quelque chose")
i = i + 1
# Meme chose qu'en javascript:
# while (i < 10){
# console.log("Fais quelque chose");
# i = i + 1;
# }
l = [5, 4, 8, 2] # Une liste avec 4 nombres entiers dedans
copy_l = []
# Tant qu'il reste des nombres a trier, je prend le minimum
# et je l'ajoute a ma nouvelle liste copy_l
while l: # Structure idiomatique: tant que la liste 'l' a des elements dedans
copy_l.append(min(l))
l.remove(min(l))
print("Nouvelle liste:", copy_l)
print("Ancienne liste:", l)
print()
# Vous pouvez coder la fonction min
# Donc le but, vous avez une liste, et vous retournez
# le plus petit nombre de la liste.
# Creer une variable x, il faut l'initialiser (lui donner une valeur de depart) - La valeur du premier element de la liste
# Pour chaque element de la liste
# Si l'element est plus petit que la variable x,
# Alors la variable x devient cet element
ll = [4, 2, 3, 1]
# On attend de voir affiche 1 a l'ecran
# Interdiser d'utiliser min
|
import requests, json, time, currency, exchange, pools
class Miner:
def cost(alg):
#returns lowest cost of alg in H/s/BTC/Day
pass
def order(alg, cost):
#opens a new order with alg algorithm costing cost btc
pass
def getOrders():
#returns dictionary of order dictionarys
pass
def getOrder(alg):
#returns order information dictionary
pass
def getBalance():
#returns available btc balance
pass
class Controller:
def refresh(self):
#refresh service data
pass
def updateOrders(self):
#run logic to update orders
pass
class NHController(Controller):
#cri: currency refresh interval
def __init__(self, api_id, api_key, pools, cri, target_rate, order_time, number):
self.nh = Nicehash(api_id, api_key, pools)
self.cri = cri
self.orders = self.nh.orders
#{str: {currency.Currency(): btc_rate}}
self.currency_cache = {}
self.last_cache = 0
self.currency_data = self.getCurrencyData()
self.target_rate = target_rate
self.order_time = order_time
self.order_number = number
def refresh(self):
self.nh.refreshOrderBook()
self.orders = self.nh.getOrders(True)
self.currency_data = self.getCurrencyData()
def update(self):
if self.nh.getAccountBalance() > 0.01:
for i in self.currency_data.items():
#get rate
algorithm = list(i[1].keys)
rate = list(i[1].keys)[1]
currency = i[0]
if rate > self.target_rate:
openOrder = False
for x in self.orders.items():
if pools.getCurrencyFromPool(int(x[1]['alg']), x[1]['pool']) == currency:
openOrder = True
if not openOrder:
#calculate order speed
orderTotal = 0
if self.nh.getAccountBalance() / self.order_number > 0.01:
orderTotal = self.nh.getAccountBalance() / self.order_number
else:
orderTotal = 0.01
speed = (orderTotal * (2 * i[0]) / nh.cost(i[1].alg)) / nh.alg_data[i[1]]['prefix']
def getCurrencyData(self):
#{currency: rate}
update_cache = (self.last_cache + self.cri < int(time.time()))
currency_data = {}
for i in self.nh.pools.items():
for c in list(i[1].keys()):
if(len(i[1]) > 0):
#currency object from pools
#get btc price of currency
btc_rate = 0
if update_cache:
co = currency.Currency.currencyFromStr(c)
btc_rate = co.btcRate()
if btc_rate > 0:
self.currency_cache[c] = {co: btc_rate}
else:
btc_rate = self.currency_cache[c][co]
self.last_cache = int(time.time())
else:
btc_rate = self.currency_cache[c]
#pretend investment is 1 btc
rate = (self.nh.cost(i[0]))**(-1) *.97
#get currency object
co = list(self.currency_cache[c].keys())[0]
#currency mined in 1 day
profit = co.miningCalculator(rate, 24*60*60) * self.currency_cache[c][co]
currency_data[c] = profit
return currency_data
class Nicehash(Miner):
def __init__(self, api_id, api_key, pools):
self.api_id = api_id
self.api_key = api_key
self.pools = pools
self.alg_data = {}
self.alg_data = self.refreshOrderBook()
self.orders = {}
self.orders = self.getOrders(True)
def refreshOrderBook(self):
r = json.loads(requests.get('https://www.nicehash.com/api?method=buy.info').text)['result']['algorithms']
data = {}
for i in r:
prefix = {'H': 10*0, 'kH': 10**3, 'mH': 10**6, 'gH': 10**9, 'tH': 10**12, 'pH': 10**15, 'KH': 10**3, 'MH': 10**6, 'GH': 10**9, 'TH': 10**12, 'PH': 10**15, 'kSol': 10**3, 'KSol': 10**3, 'MSol': 10**6}
data[int(i['algo'])] = { 'name': i['name'], 'prefix': prefix[i['speed_text']], 'decrease_amount': float(i['down_step'])}
self.alg_data = data
return data
def cost(self, alg):
r = json.loads(requests.get('https://www.nicehash.com/api?method=orders.get&location=0&algo=' + str(alg)).text)['result']
lowest = -1
for i in r['orders']:
if (lowest > float(i['price']) or lowest < 0) and int(i['type']) == 0 and int(i['workers']) > 0:
lowest = float(i['price'])
return lowest / self.alg_data[alg]['prefix']
def getOrders(self, refresh):
if refresh:
orders = {}
for i in range(0, 25):
o = json.loads(requests.get('https://www.nicehash.com/api?method=orders.get&my&id=' + self.api_id + '&key=' + self.api_key + '&location=0&algo=' + str(i)).text)['result']['orders']
for x in o:
if x['id'] in self.orders:
orders[int(x['id'])] = {'price': x['price'], 'btc_remaining': x['btc_avail'], 'btc_spent': x['btc_paid'] , 'last_decrease': self.orders[int(x['id'])]['last_decrease'], 'alg': str(i), 'speed': x['accepted_speed'], 'pool': x['pool_host']}
else:
orders[int(x['id'])] = {'price': x['price'], 'btc_remaining': x['btc_avail'], 'btc_spent': x['btc_paid'] , 'last_decrease': 0, 'alg': str(i), 'speed': x['accepted_speed'], 'pool': x['pool_host']}
return orders
else:
return self.orders
def getDecreaseAmount(self, alg):
return abs(float(self.alg_data[alg]['decrease_amount'])) / self.alg_data[alg]['prefix']
def decreaseOrder(self, oid):
if self.orders[oid]['last_decrease'] + (10 * 60) < int(time.time()):
r = json.loads(requests.get('https://www.nicehash.com/api?method=orders.set.price.decrease&id=' + self.api_id + '&key=' + self.api_key +'&location=0&algo=' + self.orders[oid]['alg'] + '&order=' + str(oid)).text)['result']
if 'success' in r:
self.orders[oid]['last_decrease'] = int(time.time())
return 'success' in r
else:
return False
def increaseOrder(self, oid, price):
r = json.loads(requests.get('https://www.nicehash.com/api?method=orders.set.price&id=' + self.api_id + '&key=' + self.api_key + '&algo=' + str(self.orders[oid]['alg']) + '&location=0&order=' + str(oid) + '&price=' +str(price)).text)
def createOrder(self, algo, currency, price, btcamount, speed):
pool = self.pools[algo][currency]
r = json.loads(requests.get('https://www.nicehash.com/api?method=orders.create&id=' + self.api_id +'&key=' + self.api_key + '&location=0&algo=' + str(algo) + '&amount=' + str(btcamount) + '&price=' + str(price) + '&limit=' + str(speed) + '&pool_host=' + pool['host'] + '&pool_port=' + pool['port'] + '&pool_user=' + pool['user'] + '&pool_pass=' + pool['pass']).text)['result']
print(r)
print(price)
return 'success' in r
def getAccountBalance(self):
r = json.loads(requests.get('https://www.nicehash.com/api?method=balance&id=' + self.api_id + '&key=' + self.api_key).text)['result']['balance_confirmed']
return float(r)
def getAmountInOrders(self):
total = 0.0
for order in self.orders:
total = total + float(order['btc_remaining'])
def getPrefix(self, alg):
return self.alg_data[alg]['prefix']
|
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from member.models import SocialUserData
User = get_user_model()
class Backend(ModelBackend):
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class EmailPasswordBackend(Backend):
"""Authentication with user's email and password
"""
def authenticate(self, request, username=None, password=None, **kwargs):
try:
user = User.objects.get(email=username)
except User.DoesNotExist:
return None
if user.check_password(password):
return user
class SocialLoginBackend(Backend):
"""Authentication with social service id
"""
def authenticate(self, request, service=None, username=None, **kwargs):
try:
user_data = SocialUserData.objects.get(service=service,
username=username)
return user_data.user
except SocialUserData.DoesNotExist:
return None
|
### Required Libraries ###
import os
# General Data Manipulation Libraries
import numpy as np
import pandas as pd
# Log and Pickle Libraries
import logging
import pickle
# Model & Helper Libraries
import xgboost
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
if __name__=='__main__':
### Model & Data Import ###
# Load model
file_name = "xgb_cls.pkl"
xgb_model_loaded = pickle.load(open(file_name, "rb"))
# Load Data
data_dir = './data'
try:
df_test = pd.read_csv(data_dir + '/test.csv')
except:
logger.exception("Unable to load CSV file.")
### Data Preperation ###
var_colums = [c for c in df_test.columns if c not in ['ID_code']]
X = df_test.loc[:, var_colums]
### Model Prediction ###
y_test_pred = xgb_model_loaded.predict(X)
### Save Results ###
# Construct Dataframe
data = {'ID_code':df_test.loc[:, 'ID_code'],'Target':y_test_pred}
df = pd.DataFrame(data)
# Create New Datafile
filename = 'predict.csv'
if os.path.isfile(filename):
print('File Exists. Going to overwrite.')
# Writing Data to csv file
df.to_csv(filename, index = False, header=True)
else:
# Writing Data to csv file
df.to_csv(filename, index = False, header=True)
|
#!/usr/bin/python3
VOWELS = ['a', 'e', 'i', 'o', 'u']
def words():
with open('word_list.txt') as f:
for line in f:
yield line.strip()
def get_vowels(word):
vowel_only = []
for letter in word:
if letter in VOWELS:
vowel_only.append(letter)
return vowel_only
def test_word(word):
if len(word) < 6:
return False
if get_vowels(word) == VOWELS:
return True
return False
def main():
for word in words():
if test_word(word):
print(word)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 09:02:36 2017
@author: Paris
"""
import autograd.numpy as np
import matplotlib.pyplot as plt
from pyDOE import lhs
from gaussian_process import GP
np.random.seed(1234)
def f(x):
return x * np.sin(4.0*np.pi*x)
def Normalize(X, X_m, X_s):
return (X-X_m)/(X_s)
def Denormalize(X, X_m, X_s):
return X_s*X + X_m
if __name__ == "__main__":
N = 8
D = 1
lb = -0.5*np.ones(D)
ub = 1.0*np.ones(D)
noise = 0.00
tol = 1e-4
nsteps = 20
Normalize_input_data = 1
Normalize_output_data = 1
# Training data
X = lb + (ub-lb)*lhs(D, N)
y = f(X) + noise*np.random.randn(N,D)
# Test data
nn = 200
X_star = np.linspace(lb, ub, nn)[:,None]
y_star = f(X_star)
# Normalize Input Data
if Normalize_input_data == 1:
X_m = np.mean(X, axis = 0)
X_s = np.std(X, axis = 0)
X = Normalize(X, X_m, X_s)
lb = Normalize(lb, X_m, X_s)
ub = Normalize(ub, X_m, X_s)
X_star = Normalize(X_star, X_m, X_s)
# Normalize Output Data
if Normalize_output_data == 1:
y_m = np.mean(y, axis = 0)
y_s = np.std(y, axis = 0)
y = Normalize(y, y_m, y_s)
y_star = Normalize(y_star, y_m, y_s)
# Define model
model = GP(X, y)
plt.figure(1, facecolor = 'w')
for i in range(0,nsteps):
# Train
model.train()
# Predict
y_pred, y_var = model.predict(X_star)
y_var = np.abs(np.diag(y_var))[:,None]
# Sample where posterior variance is maximized
new_X = X_star[np.argmax(y_var),:]
# Check for convergence
if np.max(y_var) < tol:
print("Converged!")
break
# Normalize new point if needed
if Normalize_input_data == 1:
xx = Denormalize(new_X, X_m, X_s)
new_y = f(xx) + + noise*np.random.randn(1,D)
else:
new_y = f(new_X) + + noise*np.random.randn(1,D)
if Normalize_output_data == 1:
new_y = Normalize(new_y, y_m, y_s)
# Plot
plt.cla()
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=16)
plt.plot(X_star, y_star, 'b-', label = "Exact", linewidth=2)
plt.plot(X_star, y_pred, 'r--', label = "Prediction", linewidth=2)
lower = y_pred - 2.0*np.sqrt(y_var)
upper = y_pred + 2.0*np.sqrt(y_var)
plt.fill_between(X_star.flatten(), lower.flatten(), upper.flatten(),
facecolor='orange', alpha=0.5, label="Two std band")
plt.plot(model.X,model.y,'bo', label = "Data")
plt.plot(new_X*np.ones(2), np.linspace(-4,4,2),'k--')
ax = plt.gca()
ax.set_xlim([lb[0], ub[0]])
ax.set_ylim([-4,4])
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.title("Iteration #%d" % (i+1))
plt.pause(0.5)
plt.savefig("../figures/AL_it_%d.png" % (i+1), format='png', dpi=300)
# Add new point to the training set
model.X = np.vstack((model.X, new_X))
model.y = np.vstack((model.y, new_y))
|
"""This module defines the lambda_handler
"""
import json
from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerRunner
from twisted.internet import reactor
from custom_process import CustomProcess
def spider_process(spider, settings=None):
"""Runs a scrapy CrawlerRunner"""
runner = CrawlerRunner(settings)
deferred = runner.crawl(spider)
deferred.addBoth(lambda _: reactor.stop())
reactor.run()
def run_spider(event):
"""Runs selected spider from event.
This function runs the scrapy crawler on a multiprocessing
Process. This is a workaround to avoid errors when the container
is re-utilized and the scrapy is trying to start the a Twisted
reactor again (this generates an exception).
Note: The function expects that event has a pathParameter
with key spider_name.
Event example:
{
"pathParameters": {
"spider_name": "spider_name_here"
}
}
Example URL for API call:
"https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/spider/spider_name_here"
:param event: event from lambda handler.
:return: dictionary with response.
"""
settings = get_project_settings()
spider_name = event['pathParameters']['spider_name']
output_bucket = settings['OUTPUT_BUCKET']
settings['OUTPUT_URI'] = f"s3://{output_bucket}/{spider_name}-rss.xml"
p = CustomProcess(target=spider_process, args=(spider_name, settings))
p.start()
p.join()
if not p.exception:
status_code = 200
message = 'Success'
else:
error, traceback = p.exception
status_code = 500
message = 'Internal error:\n' + traceback
return {
"statusCode": status_code,
"body": json.dumps({
"message": message
}),
}
def lambda_handler(event, context):
response = run_spider(event)
return response
|
from __future__ import annotations
import asyncio
from subprocess import PIPE, CalledProcessError
from typing import IO, Tuple, Union
from typing_extensions import Protocol
from .effect import Effect, Try, add_repr, depend, from_callable
from .either import Left, Right
from .functions import curry
from .immutable import Immutable
class Subprocess(Immutable):
"""
Module that enables running commands in the shell
"""
def run_in_shell(
self,
cmd: str,
stdin: Union[IO, int] = PIPE,
stdout: Union[IO, int] = PIPE,
stderr: Union[IO, int] = PIPE
) -> Try[CalledProcessError, Tuple[bytes, bytes]]:
"""
Get an `Effect` that runs `cmd` in the shell
Example:
>>> Subprocess().run_in_shell('cat foo.txt').run(None)
(b'contents of foo.txt', b'')
Args:
cmd: the command to run
stdin: input pipe for the subprocess
stdout: output pipe for the subprocess
stderr: error pipe for the subprocess
Return:
`Effect` that runs `cmd` in the shell and produces \
a tuple of `(stdout, stderr)`
"""
async def f(_):
proc = await asyncio.create_subprocess_shell(
cmd, stdin=stdin, stdout=stdout, stderr=stderr
)
stdout_, stderr_ = await proc.communicate()
if proc.returncode != 0:
return Left(
CalledProcessError(
proc.returncode, cmd, stdout_, stderr_
)
)
return Right((stdout_, stderr_))
return from_callable(f)
class HasSubprocess(Protocol):
"""
Module provider providing the subprocess module
"""
subprocess: Subprocess
"""
The provided `Subprocess` module
"""
@curry
@add_repr
def run_in_shell(
cmd: str,
stdin: Union[IO, int] = PIPE,
stdout: Union[IO, int] = PIPE,
stderr: Union[IO, int] = PIPE
) -> Effect[HasSubprocess, CalledProcessError, Tuple[bytes, bytes]]:
"""
Get an `Effect` that runs `cmd` in the shell
Example:
>>> class Env:
... subprocess = Subprocess()
>>> run_in_shell('cat foo.txt').run(Env())
(b'contents of foo.txt', b'')
Args:
cmd: the command to run
stdin: input pipe for the subprocess
stdout: output pipe for the subprocess
stderr: error pipe for the subprocess
Return:
`Effect` that runs `cmd` in the shell and produces \
a tuple of `(stdout, stderr)`
"""
return depend(HasSubprocess).and_then(
lambda env: env.subprocess.run_in_shell(cmd, stdin, stdout, stderr)
)
|
#!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #
"""Main module for CauliflowerVest including wsgi URL mappings."""
import appengine_config
from google.appengine import api
from google.appengine import runtime
from cauliflowervest.server import urls
# TODO(user): we have a main module like this in other products to catch
# exceptions and appropriate log/notify users. However I don't think I've
# seen it trigger at all since we moved to HRD. Do we need/want this
# here?
def main():
try:
urls.main()
except (
api.datastore_errors.Timeout,
api.datastore_errors.InternalError,
runtime.apiproxy_errors.CapabilityDisabledError):
pass
# TODO(user): email? extra logging? ...
if __name__ == '__main__':
main()
|
import time
import numpy as np
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.gaussian_process import GaussianProcess
from sklearn.linear_model import Ridge, Lasso
from sklearn.svm import NuSVR, SVR
import scipy
from util import Logger, get_rmse
class Linear():
def __init__(self, type='Ridge', alpha=3, C=1.0, nu=0.2, limit=None, \
epsilon=0.1):
self.limit = limit
if type == 'Ridge':
self.model = Ridge(alpha=alpha)
elif type == 'SVR':
self.model = SVR(kernel='linear', C=C, epsilon=epsilon)
elif type == 'NuSVR':
self.model = NuSVR(C=C, nu=nu, kernel='linear')
elif type == 'Lasso':
self.model = Lasso(alpha=alpha)
@staticmethod
def get_cal(m):
# get calitative features
# watch out as indices depend on feature vector!
return np.hstack((m[:,:23], m[:,24:37], m[:,38:52])) + 1
@staticmethod
def get_cant(m):
# get cantitative features
# watch out as indices depend on feature vector!
return np.hstack((m[:,23:24], m[:,37:38], m[:,52:]))
def fit(self, train_X, train_Y):
# no fitting done here, just saving data
if self.limit:
if len(train_X) > self.limit:
train_X = train_X[-self.limit:]
train_Y = train_Y[-self.limit:]
self.train_X = np.array(train_X)
self.train_Y = np.array(train_Y)
def predict(self, test_X):
# fitting done here
# not efficient on the long term
test_X = np.array(test_X)
enc = OneHotEncoder()
scal = MinMaxScaler()
data = np.vstack((self.train_X, test_X))
enc.fit(self.get_cal(data))
scal.fit(self.get_cant(data))
new_train_X1 = enc.transform(self.get_cal(self.train_X))
new_train_X2 = scal.transform(self.get_cant(self.train_X))
new_train_X = scipy.sparse.hstack((new_train_X1, new_train_X2))
new_test_X1 = enc.transform(self.get_cal(test_X))
new_test_X2 = scal.transform(self.get_cant(test_X))
new_test_X = scipy.sparse.hstack((new_test_X1, new_test_X2))
self.model.fit(new_train_X, self.train_Y)
R = self.model.predict(new_test_X)
return R
def tt(args):
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, \
ExtraTreesRegressor
task_id, model_type, model_params, Xtrain, Ytrain, Xtest, model_key, key = args
if len(Xtest) == 0:
return (task_id, [])
model_options = {
'GBR': GradientBoostingRegressor,
'RFR': RandomForestRegressor,
'Linear': Linear,
'ETR': ExtraTreesRegressor,
}
map_params = {
'ne': 'n_estimators',
'md': 'max_depth',
'mss': 'min_samples_split',
'msl': 'min_samples_leaf',
'ss': 'subsample',
'lr': 'learning_rate',
'n': 'n_jobs',
'rs': 'random_state',
'a': 'alpha',
't': 'type',
'c': 'C',
'nu': 'nu',
'l': 'limit',
'e': 'epsilon',
}
mp = {}
for k, v in list(model_params.items()):
mp[map_params[k]] = v
m = model_options[model_type](**mp)
m.fit(Xtrain, Ytrain)
r = m.predict(Xtest)
del m
del model_options
return (task_id, r)
class SplitModel():
'''
build a series of models by spliting the dataset on a given feature
'''
def __init__(self, split_keys, index, n_est=5, models=None, \
weights=None, bias=None, seed=1):
self.bias = None
model_params = []
for md in [3, 6, 12, 16, 30]:
for lr in [0.1, 0.3, 0.6, 0.9]:
for mss in [5, 10, 16, 32, 64]:
msl = mss / 2
for ss in [0.3, 0.5, 0.7, 1]:
model_params.append(
('GBR', dict(ne=n_est, md=md, lr=lr, mss=mss, msl=msl, ss=ss))
)
for md in [3, 6, 12, 16, 30]:
for mss in [5, 10, 16, 32, 64]:
msl = mss / 2
model_params.append(
('RFR', dict(ne=n_est, md=md, mss=mss, msl=msl, n=4, rs=seed+md+mss))
)
model_params.append(('Linear', dict(t='NuSVR', c=0.1, nu=0.2, l=25000)))
model_params.append(('Linear', dict(t='SVR', c=0.1, e=0.05, l=10000))) # ajuta pe index1 (0.22013316780439385, 0.52, 0.2, 0.14, 0.08, 0.06)
model_params.append(('Linear', dict(t='Lasso', a=0.0001, l=20000)))
model_params.append(('Linear', dict(t='Lasso', a=0.00001, l=20000)))
self.bias = bias
if weights:
self.weights = weights
else:
self.weights = [1.0/len(models)] * len(models)
self.model_params = []
for i in range(len(models)):
self.model_params.append((i,) + model_params[models[i]])
for m in self.model_params:
print(m)
self.index = index
self.split_keys = split_keys
def train_test(self, Xtrain, Ytrain, Xtest):
Xtrain = np.array(Xtrain)
Ytrain = np.array(Ytrain)
Xtest = np.array(Xtest)
tasks = []
task_id = 0
results = []
l = Logger(len(self.split_keys) * len(self.model_params), \
len(self.split_keys), tag='SplitModel')
for key in self.split_keys:
mask_train = Xtrain[:,self.index] == key
mask_test = Xtest[:,self.index] == key
for model_key, model_type, model_params in self.model_params:
task = (
task_id,
model_type,
model_params,
Xtrain[mask_train],
Ytrain[mask_train],
Xtest[mask_test],
model_key,
key,
)
results.append(tt(task))
print((task_id, model_key, key))
tasks.append((task_id, model_key, key))
l.step()
task_id += 1
tasks = {t[0]: t[1:] for t in tasks}
result_batches = [np.array([0.0] * len(Xtest))\
for i in range(len(self.model_params))]
for result_set in results:
task_id, Ytask = result_set
task = tasks[task_id]
model_key = task[-2]
mask_test = Xtest[:,self.index] == task[-1]
result_batches[model_key][mask_test] += Ytask
Ytest = np.array([0.0] * len(Xtest))
for (i, batch) in enumerate(result_batches):
Ytest += batch * self.weights[i]
if self.bias:
Ytest += self.bias
return (Ytest, result_batches)
class BigModel():
def __init__(self, columns, n_est=5, seed=1):
split_keys1 = sorted(list(range(1,7)))
index1 = columns.index('ProductGroup')
split_keys2 = [0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 34, 35, 36, 37, 38, 60, 61, 62, 63, 64, 65, 65.5, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 120, 121, 122, 123, 124, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159]
index2 = columns.index('type2')
model_weights1 = [0.03131793, 0.02611096, 0.04683615, 0.04510745, 0.05901089, 0.05310125, 0.03905133, 0.06681227, 0.02479392, 0.04527772, 0.04649242, 0.03905532, 0.05324603, 0.02950949, 0.02942703, 0.04844027, 0.01096952, 0.04392747, 0.02677431, 0.00101947, 0.01306091, 0.00294178, 0.03421554, 0.05725074, 0.01947722, 0.01850144, -0.00323456, 0.03188587, 0.01445867, 0.01919743, 0.03913423, 0.02684197, -0.01231671, -0.01055348]
model_indices1 = [20, 22, 26, 28, 30, 33, 36, 46, 57, 96, 97, 123, 131, 143, 160, 175, 185, 187, 197, 209, 215, 236, 255, 257, 276, 279, 298, 322, 330, 335, 354, 357, 414, 424]
bias1 = -0.12037114510366997
model_weights2 = [0.114999546, 0.0642159312, 0.0763160215, 0.0749201568, 0.0722169352, 0.0403322002, 0.105175222, 0.0257017482, 0.00992976551, -0.0198667402, 0.0836062323, 0.0618304965, -0.00770000674, -0.00243349526, 0.106124237, 0.0228227453, -1.57590333e-05, 0.0449772596, 0.0141671971, -0.0480243632, 0.049008765, 0.0389751147, 0.087701499]
model_indices2 = [22, 24, 29, 47, 88, 94, 98, 110, 130, 139, 162, 172, 192, 214, 256, 291, 297, 322, 329, 371, 413, 414, 415]
bias2 = -0.099892980166821133
self.weights = [0.53, 0.2, 0.09, 0.1, 0.08]
self.models = [
SplitModel(split_keys1, index1, n_est=n_est, \
seed=seed+1, models=model_indices1, weights=model_weights1,\
bias=bias1),
SplitModel(split_keys2, index2, n_est=n_est, \
seed=seed+2, models=model_indices2, weights=model_weights2,\
bias=bias2),
SplitModel(split_keys1, index1, seed=seed+3, models=[425]),
SplitModel(split_keys2, index2, seed=seed+4, models=[427]),
SplitModel(split_keys2, index2, seed=seed+5, models=[428]),
]
def train_test(self, train_X, train_Y, test_X):
self.results = []
rez = np.array([0.0] * len(test_X))
l = Logger(len(self.models), 1, tag='BigModel')
for i, m in enumerate(self.models):
m_rez, m_batches = m.train_test(train_X, train_Y, test_X)
rez += self.weights[i] * m_rez
self.results.append((m_rez, m_batches))
l.step()
return rez
'''
class TimeModel():
@staticmethod
def train_test(train_X, train_Y, test_X):
test_X_np = np.array(test_X)
year1 = np.min(np.array(test_X_np)[:,37])
month1 = np.min(np.array(test_X_np)[:,36])
year2 = np.max(np.array(test_X_np)[:,37])
month2 = np.max(np.array(test_X_np)[:,36])
n_predict = int((year2 - year1) * 12 + month2 - month1 + 1)
ppm = {} # price per month
apm = [[] for i in range(13)] # average per month
for i, row in enumerate(train_X):
m = row[36]
y = row[37]
p = train_Y[i]
if y * 12 + m < year1 * 12 + month1 - 72:
continue
if y not in ppm:
ppm[y] = {}
if m not in ppm[y]:
ppm[y][m] = []
ppm[y][m].append(p)
apm[m].append(p)
apm = [np.mean(l) for l in apm[1:]]
average = np.mean(train_Y)
plot = []
for y in sorted(ppm.keys()):
for m in sorted(ppm[y].keys()):
plot.append(np.mean(ppm[y][m]))
X = np.reshape(range(len(plot) + n_predict), (len(plot) + n_predict, 1))
nuggets = [0.000003,0.00001,0.00003,0.0001,0.0003,0.001,0.003, 0.01]
total_dev = np.array([0.0] * n_predict)
preds = np.array([0.0] * len(X))
for nugget in nuggets:
g = GaussianProcess(regr='linear', \
corr='squared_exponential', nugget=nugget)
g.fit(X[:-n_predict], plot)
preds += g.predict(X)
deviations = g.predict(X[-n_predict:]) - average
total_dev = total_dev + deviations
total_dev /= len(nuggets)
preds /= len(nuggets)
R = []
for row in test_X:
m = row[36] # month
y = row[37] # year
i = (y - year1) * 12 + m - month1
R.append(total_dev[i])
return R
'''
|
# Copyright (c) Facebook, Inc. and its affiliates.
# -*- coding: utf-8 -*-
import typing
import fvcore
from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table
from torch import nn
from ..export import TracingAdapter
__all__ = [
"activation_count_operators",
"flop_count_operators",
"parameter_count_table",
"parameter_count",
]
FLOPS_MODE = "flops"
ACTIVATIONS_MODE = "activations"
# Some extra ops to ignore from counting, including elementwise and reduction ops
_IGNORED_OPS = {
"aten::add",
"aten::add_",
"aten::argmax",
"aten::argsort",
"aten::batch_norm",
"aten::constant_pad_nd",
"aten::div",
"aten::div_",
"aten::exp",
"aten::log2",
"aten::max_pool2d",
"aten::meshgrid",
"aten::mul",
"aten::mul_",
"aten::neg",
"aten::nonzero_numpy",
"aten::reciprocal",
"aten::rsub",
"aten::sigmoid",
"aten::sigmoid_",
"aten::softmax",
"aten::sort",
"aten::sqrt",
"aten::sub",
"torchvision::nms", # TODO estimate flop for nms
}
class FlopCountAnalysis(fvcore.nn.FlopCountAnalysis):
"""
Same as :class:`fvcore.nn.FlopCountAnalysis`, but supports detectron2 models.
"""
def __init__(self, model, inputs):
"""
Args:
model (nn.Module):
inputs (Any): inputs of the given model. Does not have to be tuple of tensors.
"""
wrapper = TracingAdapter(model, inputs, allow_non_tensor=True)
super().__init__(wrapper, wrapper.flattened_inputs)
self.set_op_handle(**{k: None for k in _IGNORED_OPS})
def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]:
"""
Implement operator-level flops counting using jit.
This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard
detection models in detectron2.
Please use :class:`FlopCountAnalysis` for more advanced functionalities.
Note:
The function runs the input through the model to compute flops.
The flops of a detection model is often input-dependent, for example,
the flops of box & mask head depends on the number of proposals &
the number of detected objects.
Therefore, the flops counting using a single input may not accurately
reflect the computation cost of a model. It's recommended to average
across a number of inputs.
Args:
model: a detectron2 model that takes `list[dict]` as input.
inputs (list[dict]): inputs to model, in detectron2's standard format.
Only "image" key will be used.
supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count`
Returns:
Counter: Gflop count per operator
"""
old_train = model.training
model.eval()
ret = FlopCountAnalysis(model, inputs).by_operator()
model.train(old_train)
return {k: v / 1e9 for k, v in ret.items()}
def activation_count_operators(
model: nn.Module, inputs: list, **kwargs
) -> typing.DefaultDict[str, float]:
"""
Implement operator-level activations counting using jit.
This is a wrapper of fvcore.nn.activation_count, that supports standard detection models
in detectron2.
Note:
The function runs the input through the model to compute activations.
The activations of a detection model is often input-dependent, for example,
the activations of box & mask head depends on the number of proposals &
the number of detected objects.
Args:
model: a detectron2 model that takes `list[dict]` as input.
inputs (list[dict]): inputs to model, in detectron2's standard format.
Only "image" key will be used.
Returns:
Counter: activation count per operator
"""
return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs)
def _wrapper_count_operators(
model: nn.Module, inputs: list, mode: str, **kwargs
) -> typing.DefaultDict[str, float]:
# ignore some ops
supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS}
supported_ops.update(kwargs.pop("supported_ops", {}))
kwargs["supported_ops"] = supported_ops
assert len(inputs) == 1, "Please use batch size=1"
tensor_input = inputs[0]["image"]
inputs = [{"image": tensor_input}] # remove other keys, in case there are any
old_train = model.training
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
model = model.module
wrapper = TracingAdapter(model, inputs)
wrapper.eval()
if mode == FLOPS_MODE:
ret = flop_count(wrapper, (tensor_input,), **kwargs)
elif mode == ACTIVATIONS_MODE:
ret = activation_count(wrapper, (tensor_input,), **kwargs)
else:
raise NotImplementedError("Count for mode {} is not supported yet.".format(mode))
# compatible with change in fvcore
if isinstance(ret, tuple):
ret = ret[0]
model.train(old_train)
return ret
|
#!/usr/bin/env python
"""
scp tests
"""
import unittest
import mock
from cirrus.scp import SCP, put
class ScpTests(unittest.TestCase):
@mock.patch('cirrus.scp.local')
def test_scp(self, mock_local):
s = SCP(
target_host="HOST",
target_path="/PATH",
source="somefile",
ssh_username="steve",
ssh_keyfile="steves_key",
ssh_config="~/.ssh/config"
)
comm = s.scp_command
self.assertTrue('somefile' in comm)
self.assertTrue('steve@HOST:/PATH' in comm)
self.assertTrue('-i steves_key' in comm)
s()
mock_local.assert_has_calls(mock.call(s.scp_command))
if __name__ =='__main__':
unittest.main()
|
from bs4 import BeautifulSoup
import re
__author__ = 'fcanas'
class IzProperties(dict):
"""
Responsible for parsing and containing any properties used by IzPack's installation spec files.
"""
def __init__(self, path):
"""
Initialize paths to properties and begin parsing.
"""
# noinspection PyTypeChecker
dict.__init__(self)
if 'pom.xml' in path:
self.parse_pom_properties(path)
else:
self.parse_properties(path)
def parse_properties(self, path):
"""
Finds properties defined in properties file at specified path adds them to map.
"""
soup = BeautifulSoup(open(path, 'r'))
properties = soup.find_all('properties')
for props in properties:
for prop in props.find_all('property'):
try:
self[prop['name']] = prop['value']
except KeyError:
continue
def parse_pom_properties(self, path):
"""
Special parser for pom.xml file properties.
"""
soup = BeautifulSoup(open(path, 'r'), 'xml')
properties = soup.find_all('properties')
# add the basedir property
self['basedir'] = path.replace('pom.xml', '')
for props in properties:
for prop in props.find_all(recursive=False):
try:
self[str(prop.name)] = str(prop.string)
except KeyError:
continue
def substitute(self, string):
"""
Puts the given string through variable substitution: replacing all incidences of
${key} for the key's value if it exists. If key doesn't exist, it returns
the unsubstituted variable. The substitution is performed iteratively until all
possible variables have been subbed.
"""
while True:
old_string = string
matches = re.findall('\$\{.*\}', string)
if not matches:
break
for match in matches:
value = self._substitute(match)
if not value is match:
string = str.replace(string, match, value)
if string is old_string:
break
return string
def _substitute(self, key):
"""
Substitutes a given key for its value. If the value doesn't exist,
return the key.
Key is in the form ${some.key}
"""
stripped_key = key[2:-1]
if stripped_key in self:
return self[stripped_key]
else:
return key
|
import glob
# from multiprocessing import Pool
import multiprocessing
import multiprocessing as mp
import os.path
from pathlib import Path
from typing import List, Tuple
import numpy as np
import torchvision.transforms as standard_transforms
import tqdm
from PIL import Image
from numpy import ndarray
from utils import PROJECT_ROOT
from utils.data import COLOR_PALETTE
from utils.data.dataset import get_real_test_list
# @staticmethod
def multiprocess_visuals(
id_list: List[str],
input_paths: List[str], prediction_paths: List[str], processes: int,
output_directory: Path, verbose: bool = False
):
"""Master process spawning worker processes for reading and writing of colorized visuals
Test
.. code-block: python
test_path_dict = get_real_test_list(bands=["NIR", "RGB"]) # ../test/images/rgb/{}.jpg
pred_paths = glob.glob(str(Path(PROJECT_ROOT) / "submission" / "results" / "*.png"))
out_dir = str(Path(PROJECT_ROOT) / "submission/visualized/")
im_rgb_path_fmt = str(test_path_dict["images"][1])
# aggregate prediction ids
pred_ids = [
"{}".format(
os.path.splitext(
os.path.basename(fp))[0])
for fp in pred_paths
]
input_paths = []
for id in pred_ids:
p = im_rgb_path_fmt.format(id)
input_paths.append(p)
if not os.path.exists(Path(out_dir) / id):
print("Created: ", Path(out_dir) / id)
os.mkdir(Path(out_dir) / id)
multiprocess_visuals(
id_list=pred_ids,
input_paths=input_paths,
prediction_paths=pred_paths,
output_directory=out_dir,
verbose=True,
processes=os.cpu_count()
)
# plot and visualize
import matplotlib.pyplot as plt
input_paths = glob.glob(str(Path(DATASET_ROOT) / "test" / "images" / "rgb" / "*.jpg"))
prediction_paths = glob.glob(str(Path(PROJECT_ROOT) / "submission" / "results" / "*.png"))
_ = multiprocess_visuals(input_paths=input_paths, prediction_paths=prediction_paths)
idx, in_arr, pred_arr = next(_)
pred_colored = np.asarray(colorize_mask(pred_arr, COLOR_PALETTE).convert("RGB"))
f, ax = plt.subplots(1, 2)
ax[0].imshow(in_arr)
ax[1].imshow(pred_colored)
plt.show()
:param output_directory:
:param processes:
:param input_paths:
:param prediction_paths:
:return:
"""
# data setup
in_pred_pil_gen = generate_entry(input_paths, prediction_paths, verbose=True)
idx, input_data, pred_data = next(in_pred_pil_gen)
id = id_list[idx]
print(id_list[idx], input_paths[idx], prediction_paths[idx])
assert (id in input_paths[idx]) and (id in prediction_paths[idx])
# multiprocess setup
# pool = mp.Pool(processes)
# sema = mp.Semaphore(processes)
procs = []
# create and start processes
for idx, input_data, pred_data in tqdm.tqdm(in_pred_pil_gen):
# mapping of output paths
out_paths_dict = {
"in_rgb": str(Path(output_directory) / id / ("input.jpg")),
"pred_src": str(Path(output_directory) / id / ("lut.png")),
"pred_rgb": str(Path(output_directory) / id / ("lut_rgb.png"))
}
id = id_list[idx]
if verbose: print(id_list[idx], input_paths[idx], prediction_paths[idx])
assert (id in input_paths[idx]) and (id in prediction_paths[idx])
p = mp.Process(target=apply_color_and_save, args=(
# sema,
None,
input_data,
pred_data,
COLOR_PALETTE,
out_paths_dict,
False,
True
))
# print()
procs.append(p)
# if idx == 0:
# apply_color_and_save(
# sema=None,
# _input_data=input_data,
# _pred_data=pred_data,
# _color_palette=COLOR_PALETTE,
# _output_paths=out_paths_dict,
# use_gpu=False,
# verbose=True
# )
# input()
p.start()
# for p in procs:
# c += 1
# if c == 5:
# break
# pass
for p in procs:
p.join()
class DeNormalize(object):
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
def get_visualize(args):
visualize = standard_transforms.Compose(
[
standard_transforms.Resize(300),
standard_transforms.CenterCrop(300),
standard_transforms.ToTensor(),
]
)
if args.pre_norm:
restore = standard_transforms.Compose(
[DeNormalize(*DeNormalize.mean_std), standard_transforms.ToPILImage(), ]
)
else:
restore = standard_transforms.Compose([standard_transforms.ToPILImage(), ])
return visualize, restore
def setup_palette(palette):
"""
:param palette:
:return:
"""
palette_rgb = []
for _, color in palette.items():
palette_rgb += color
zero_pad = 256 * 3 - len(palette_rgb)
for i in range(zero_pad):
palette_rgb.append(0)
return palette_rgb
def colorize_mask(mask, palette):
"""Color code for the mask
:param mask:
:param palette:
:return:
"""
new_mask = Image.fromarray(mask.astype(np.uint8)).convert("P")
new_mask.putpalette(setup_palette(palette))
return new_mask
def convert_to_color(arr_2d, palette):
"""
:param arr_2d:
:param palette:
:return:
"""
arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)
for c, i in palette.items():
m = arr_2d == c
arr_3d[m] = i
return arr_3d
# class Visualize:
# @staticmethod
def generate_entry(_input_paths, _prediction_paths, verbose: bool = False) -> Tuple[int, ndarray, ndarray]:
for idx, (in_path, pred_path) in enumerate(zip(_input_paths, _prediction_paths)):
if verbose: print("Yield from:", in_path, pred_path)
yield idx, np.asarray(Image.open(in_path)), np.asarray(Image.open(pred_path))
pass
# return in_pred_pil_gen
# @staticmethod
def apply_color_and_save(
sema: multiprocessing.Semaphore,
_input_data, _pred_data,
_color_palette, _output_paths: dict[Path],
use_gpu: bool, verbose: bool = True,
) -> None:
"""Function for applying color to a provided mask array and writing to disk
:param sema:
:param verbose:
:param _input_data:
:param _pred_data:
:param _color_palette:
:param _output_paths: desired output paths
:param use_gpu:
:return:
"""
if sema: sema.acquire()
pred_colored = np.asarray(
colorize_mask(_pred_data, COLOR_PALETTE).convert("RGB")
)
# op = _output_directory /
# pred_colored = pred_colored / 255.0
# configure path and write image
pred_src_path = str(_output_paths["pred_src"])
pred_color_path = str(_output_paths["pred_rgb"])
in_path = str(_output_paths["in_rgb"])
if verbose:
print("Writing to...")
print(pred_src_path, pred_color_path, in_path)
# pred_colored.save(_output_directory)
im = Image.fromarray(pred_colored)
im.save(pred_color_path)
im = Image.fromarray(_pred_data)
im.save(pred_src_path)
im = Image.fromarray(_input_data)
im.save(in_path)
# im.sa
# cv2.imwrite(pred_color_path, pred_colored)
# cv2.imwrite(pred_src_path, _pred_data)
# cv2.imwrite(in_path, _input_data)
if sema:
sema.release()
# time.sleep(3)
# add saving to the target output directory
return pred_colored
# out_dir = None
#
# # collect processes
# for p in processes:
# p.join()
def run_visualization_demo(
) -> None:
test_path_dict = get_real_test_list(bands=["NIR", "RGB"]) # ../test/images/rgb/{}.jpg
pred_paths = glob.glob(str(Path(PROJECT_ROOT) / "submission" / "results" / "*.png"))
out_dir = str(Path(PROJECT_ROOT) / "submission/visualized/")
im_rgb_path_fmt = str(test_path_dict["images"][1])
# aggregate prediction ids
pred_ids = [
"{}".format(
os.path.splitext(
os.path.basename(fp))[0])
for fp in pred_paths
]
print(pred_ids[:5])
print(im_rgb_path_fmt)
# in_paths = glob.glob(str(Path(DATASET_ROOT) / "test" / "images" / "rgb" / f"{}.jpg".format(basename)))
print(os.path.exists(out_dir))
if not os.path.exists(out_dir):
os.mkdir(out_dir)
input_paths = []
for id in pred_ids:
p = im_rgb_path_fmt.format(id)
input_paths.append(p)
if not os.path.exists(Path(out_dir) / id):
print("Created: ", Path(out_dir) / id)
os.mkdir(Path(out_dir) / id)
import matplotlib.pyplot as plt
# input_paths = glob.glob(str(Path(DATASET_ROOT) / "test" / "images" / "rgb" / "*.jpg"))
prediction_paths = glob.glob(str(Path(PROJECT_ROOT) / "submission" / "results" / "*.png"))
im_pred_pil_gen = generate_entry(input_paths, pred_paths, verbose=True)
# _ = multiprocess_visuals(
# input_paths=input_paths, prediction_paths=prediction_paths
# )
ROWS = 4
f, ax = plt.subplots(ROWS, 2, figsize=(8,12))
# f.set_size_inches()
# plt.rcParams["figure.figsize"] = (20, 3)
i = 0
for idx, in_arr, pred_arr in im_pred_pil_gen:
print(i)
if (i + 1) == ROWS + 1:
plt.tight_layout()
plt.show()
# reset
f, ax = plt.subplots(ROWS, 2)
i = 0
input()
idx, in_arr, pred_arr = next(im_pred_pil_gen)
pred_colored = np.asarray(colorize_mask(pred_arr, COLOR_PALETTE).convert("RGB"))
ax[i, 0].imshow(in_arr, interpolation='nearest')
ax[i, 1].imshow(pred_colored, interpolation='nearest')
i += 1
def run_multiprocessing_lut2rgb():
pass
#
test_path_dict = get_real_test_list(bands=["NIR", "RGB"]) # ../test/images/rgb/{}.jpg
pred_paths = glob.glob(str(Path(PROJECT_ROOT) / "submission" / "results" / "*.png"))
out_dir = str(Path(PROJECT_ROOT) / "submission/visualized/")
im_rgb_path_fmt = str(test_path_dict["images"][1])
# aggregate prediction ids
pred_ids = [
"{}".format(
os.path.splitext(
os.path.basename(fp))[0])
for fp in pred_paths
]
print(pred_ids[:5])
print(im_rgb_path_fmt)
# in_paths = glob.glob(str(Path(DATASET_ROOT) / "test" / "images" / "rgb" / f"{}.jpg".format(basename)))
print(os.path.exists(out_dir))
if not os.path.exists(out_dir):
os.mkdir(out_dir)
input_paths = []
for id in pred_ids:
p = im_rgb_path_fmt.format(id)
input_paths.append(p)
od = Path(out_dir) / id
if not os.path.exists(Path(out_dir) / id):
print("Created: ", Path(out_dir) / id)
os.mkdir(Path(out_dir) / id)
else:
print("Existing directory:", od)
print(os.listdir(od))
print("===================")
p = os.cpu_count()
# - 4
# p = 7
print("Max processes:", p)
multiprocess_visuals(
id_list=pred_ids,
input_paths=input_paths,
prediction_paths=pred_paths,
output_directory=out_dir,
verbose=True,
processes=p
)
if __name__ == "__main__":
run_visualization_demo()
|
import numpy as np
import pywt
import pickle
from sklearn.model_selection import train_test_split
import argparse
from scipy import stats
import supervised_learning
import random
NR_THREADS = 14
#WAVELETS = ['bior6.8', 'cgau8', 'cmor', 'coif17', 'db38', 'dmey', 'fbsp', 'gaus8', 'haar', 'mexh', 'morl', 'rbio6.8', 'shan', 'sym20']
#WAVELETS = pywt.wavelist(kind='discrete')
WAVELETS = ['db1']
OUTPUT_PREFIX = './'
# EIIP represents the distribution of free electrons' energies along the DNA sequence:
EIIP = [0.1260, 0.1340, 0.1335, 0.0806] # A, C, T, G
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--peak-files', dest='peak_files', help='List of ATAC-seq peak files separated by a pipe (|). If one file is provided, 20% will be used for testing. If multiple files are passed, the last one will be used for testing.', required=True)
parser.add_argument('-g', '--grid', dest='grid', help='Perform a grid search with cross validation for hyperparameter optimization.', required=False, action='store_true')
parser.add_argument('-c', '--cross-validation', dest='cross_validation', help='Run 5-fold cross-validation for the given single file.', required=False, action='store_true')
parser.add_argument('-f', '--classifier', dest='classifier', help='Classifier used. Currently supported: adaboost, rf, svm. Defaults to all of them if not provided.', required=False, default='all')
parser.add_argument('-r', '--random-labels', dest='random_labels', help='Use random labels (for baseline calculations).', required=False, action='store_true')
args = parser.parse_args()
def wavelet_component_prediction(current_wavelet):
mode = 'periodic'
#for mode in ['zero', 'constant', 'symmetric', 'periodic', 'smooth', 'periodization', 'reflect']:
#for level in [1, 2, 3]:
for level in [1]:
level = 1
print("Using the %s wavelet with mode %s, level %d" % (current_wavelet, mode, level))
X_train_w = []
X_test_w = []
y_train_w = []
y_test_w = []
if single_file:
full_X_train = []
for i in range(X_train.shape[0]):
full_X_train.append(pywt.wavedec(X_train[i], current_wavelet, mode=mode, level=level)[0])
full_X_train = np.array(full_X_train)
full_y_train = np.array(y_train)
indices = np.arange(len(y_train))
X_train_w, X_test_w, y_train_w, y_test_w, idx_train, idx_test = \
train_test_split( full_X_train, \
full_y_train, \
indices, \
test_size = 0.2)
else:
for i in range(len(y_train)):
X_train_w.append(pywt.wavedec(X_train[i], current_wavelet, mode=mode, level=level)[0])
X_train_w = np.array(X_train_w)
for i in range(X_test.shape[0]):
X_test_w.append(pywt.wavedec(X_test[i], current_wavelet, mode=mode, level=level)[0])
X_test_w = np.array(X_test_w)
y_train_w = y_train
y_test_w = y_test
if args.classifier == 'adaboost' or args.classifier == 'all':
print("Wavelet decomposition peak classification using AdaBoost:")
supervised_learning.classify_AdaBoost( args.grid, args.cross_validation, \
X_train_w, X_test_w, y_train_w, y_test_w, coordinates, \
400, 'SAMME.R', \
single_file, test_file_prefix)
elif args.classifier == 'rf' or args.classifier == 'all':
print("Wavelet decomposition peak classification using Random Forests:")
supervised_learning.classify_RF(args.grid, args.cross_validation, \
X_train_w, X_test_w, y_train_w, y_test_w, coordinates, \
False, 'entropy', 1000, \
single_file, test_file_prefix)
elif args.classifier == 'svm' or args.classifier == 'all':
print("Wavelet decomposition peak classification using SVM:")
supervised_learning.classify_SVM( args.grid, args.cross_validation, \
X_train_w, X_test_w, y_train_w, y_test_w, coordinates, \
'rbf', 10, 'auto', \
single_file, test_file_prefix)
# A: 0
# C: 1
# T: 2
# G: 3
def get_base_index(nucleotide):
base = 0 # A
if nucleotide == 'c':
base = 1
elif nucleotide == 't':
base = 2
elif nucleotide == 'g':
base = 3
#TODO: special symbols? (R, N, etc)
# for now, flip a coin on the possible bases the symbol represents
elif nucleotide == 'r': # purine
base = np.random.choice([0,3])
elif nucleotide == 'y': # pyrimidine
base = np.random.choice([1,2])
elif nucleotide == 'k': # keto
base = np.random.choice([2,3])
elif nucleotide == 'm': # amino
base = np.random.choice([0,1])
elif nucleotide == 's': # strong
base = np.random.choice([1,3])
elif nucleotide == 'w': # weak
base = np.random.choice([0,2])
elif nucleotide == 'b':
base = np.random.choice([1,2,3])
elif nucleotide == 'd':
base = np.random.choice([0,2,3])
elif nucleotide == 'h':
base = np.random.choice([0,1,2])
elif nucleotide == 'v':
base = np.random.choice([0,1,3])
elif nucleotide == 'n': # any
base = np.random.choice([0,1,2,3])
return base
def get_EIIP_sequence(peak_sequence):
representation = []
for nucleotide in peak_sequence.lower():
nucleotide_index = get_base_index(nucleotide)
representation.append(EIIP[nucleotide_index])
return representation
if __name__=='__main__':
peak_files = args.peak_files.split('|')
single_file = True
print('INPUT FILES:')
print(args.peak_files)
if len(peak_files) > 1:
single_file = False
X_train = []
X_test = []
y_train = []
y_test = []
coordinates = []
test_file_prefix = ''
if single_file:
feature_dict = pickle.load(open(args.peak_files[0], 'rb'))
for chromosome in feature_dict:
for peak in chromosome:
EIIP_sequence_representation = get_EIIP_sequence(peak['sequence'])
X_train.append([EIIP_sequence_representation])
if args.random_labels:
y_train.append(random.choice([0, 1]))
else:
y_train.append(peak['bidir_ovlp'])
coordinates.append([peak['chrom'], peak['start'], peak['end']])
test_file_prefix = peak_files[0].split('/')[-1].split('_')[0] + '-sig'
else:
# The last file listed is used for testing
for peaks_file in peak_files[:-1]:
feature_dict = pickle.load(open(peaks_file, 'rb'))
for chromosome in feature_dict:
for peak in chromosome:
EIIP_sequence_representation = get_EIIP_sequence(peak['sequence'])
X_train.append([EIIP_sequence_representation])
if args.random_labels:
y_train.append(random.choice([0, 1]))
else:
y_train.append(peak['bidir_ovlp'])
feature_dict = pickle.load(open(peak_files[-1], 'rb'))
for chromosome in feature_dict:
for peak in chromosome:
EIIP_sequence_representation = get_EIIP_sequence(peak['sequence'])
X_test.append([EIIP_sequence_representation])
if args.random_labels:
y_test.append(random.choice([0, 1]))
else:
y_test.append(peak['bidir_ovlp'])
coordinates.append([peak['chrom'], peak['start'], peak['end']])
test_file_prefix = peak_files[-1].split('/')[-1].split('_')[0] + '-sig'
X_train = np.array(X_train)
X_train = X_train.reshape(len(y_train), 1000)
X_test = np.array(X_test)
X_test = X_test.reshape(X_test.shape[0], 1000)
y_train = np.array(y_train)
y_test = np.array(y_test)
wavelet_component_prediction('db1')
|
import pytest
import numpy as np
import numpy.testing as npt
from apl.posterior_approximation import LogLikelihood
@pytest.mark.parametrize(
"D,f_x,expected",
[
(
np.asarray([[0, 1], [2, 0], [2, 3]]),
np.asarray([1.5, 0.7, 2.1, 1.2], dtype=np.float32),
np.asarray([0.8, 0.6, 0.9], dtype=np.float32),
)
],
)
def test_ll_get_diffs(D, f_x, expected):
ll = LogLikelihood()
ll.register_data(D)
npt.assert_almost_equal(ll._get_diffs(f_x).flatten(), expected.flatten())
|
import os
import random
import sys
import warnings
import numpy
from bokeh.io import curdoc
from bokeh.models.widgets import Panel, Tabs
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
file_path = os.path.realpath(__file__)
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(file_path)))))
sys.path.append(root_path)
# sys.path.append(os.path.join(root_path, 'coord2vec'))
from coord2vec.evaluation.visualizations.bokeh_server.feature_dashboard import FeatureDashboard
from coord2vec.common.itertools import flatten
from coord2vec.feature_extraction.osm.postgres_feature_factory import PostgresFeatureFactory
from coord2vec.config import BUILDINGS_FEATURES_TABLE, BUILDING_RESULTS_DIR
from coord2vec.feature_extraction.feature_bundles import create_building_features
from coord2vec.feature_extraction.features_builders import FeaturesBuilder
from coord2vec.evaluation.visualizations.bokeh_server.building_dashboard import BuildingTaskDashboard
from xgboost import XGBClassifier
from coord2vec.common.parallel import multiproc_util
def main():
random.seed(42)
numpy.random.seed(42)
multiproc_util.force_serial = True
task = None
# get the feature names
feature_factory = PostgresFeatureFactory(task.embedder.features, input_gs=None)
all_feat_names = flatten([feat.feature_names for feat in feature_factory.features])
# create bokeh tabs
tabs = []
# tabs += [Panel(child=FeatureDashboard(all_feat_names).main_panel, title="Feature Exploration")]
tabs += [Panel(child=BuildingTaskDashboard(task).main_panel, title="Task")] # TODO: doesn't work for some reason
tabs = Tabs(tabs=tabs)
curdoc().add_root(tabs)
warnings.filterwarnings("ignore")
main()
|
# coding: utf-8
# # Dataset preparation and Mask R-CNN feature vector extraction
# Change path to Mask RCNN directory:
import os
# Root directory of the project
ROOT_DIR = os.path.abspath("./")
# import os
import sys
import json
import random
import math
import numpy as np
import skimage.io
import matplotlib
matplotlib.use('Agg') # if you want to use it in docker without a display
import matplotlib.pyplot as plt
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import YCBV config
sys.path.append(os.path.join(ROOT_DIR, "samples/ycbv/")) # To find local version
import ycbv
# get_ipython().run_line_magic('matplotlib', 'inline')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
YCBV_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_ycbv_0005.h5")
# TODO error if path does not exist
# Directory of images to run detection on
# IMAGE_DIR = os.path.join(ROOT_DIR, "images")
rooms = ["/home/workspace/data/GH30_office/", "/home/workspace/data/GH30_living/", "/home/workspace/data/GH30_kitchen/",
"/home/workspace/data/KennyLab", "/home/workspace/data/Arena/"]
class InferenceConfig(ycbv.YCBVConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on YCBV
model.load_weights(YCBV_MODEL_PATH, by_name=True)
# YCBV Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'master_chef_can', 'cracker_box', 'sugar_box', 'tomato_soup_can', 'mustard_bottle',
'tuna_fish_can',
'pudding_box', 'gelatin_box', 'potted_meat_can', 'banana', 'pitcher', 'bleach_cleanser', 'bowl', 'mug',
'power_drill', 'wood_block', 'scissors', 'large_marker', 'large_clamp', 'extra_large_clamp',
'foam_brick']
# ## Run Object Detection
for dataset_path in rooms:
scene_paths = [f.path for f in os.scandir(dataset_path) if f.is_dir()]
scene_paths = [s + '/rgb/' for s in scene_paths]
for scene in scene_paths:
# count images
nr_images = 0
if os.path.isdir(scene):
print(scene)
for i in os.listdir(scene):
# check if file is a "png"
if i.endswith(".png"):
nr_images = nr_images + 1
print(nr_images)
count = 0
for scene in scene_paths:
if os.path.isdir(scene):
for i in os.listdir(scene):
# check if file is a "png"
try:
if i.endswith(".png"):
# file name without extension
file_id = i.split('.')[0]
# set paths
file_path = os.path.join(scene, i)
seq_path = os.path.join(scene, file_id + "_detections_ycbv")
json_path = os.path.join(seq_path, file_id + ".json")
label_path = os.path.join(seq_path, "labels.txt")
vis_path = os.path.join(seq_path, file_id + "_visualization.png")
if not os.path.exists(seq_path):
os.makedirs(seq_path)
# img = cv2.imread(file_path)
image = skimage.io.imread(file_path)
# plt.imshow(img)
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
pltret = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], False)
pltret.savefig(vis_path, bbox_inches='tight')
pltret.close()
# save values to files
output_json_data = {"detections": []}
# prepare arrays of detection result values
bounding_boxes = []
for i in r['rois']:
bounding_boxes.append({'minX': int(i[0]),
'minY': int(i[1]),
'maxX': int(i[2]),
'maxY': int(i[3])})
labels = []
f = open(label_path, 'w')
for label_id in r['class_ids']:
label_name = class_names[label_id]
labels.append(label_name)
f.write(str(label_id) + ": " + label_name + "\n")
f.close()
scores = r['scores']
for d in range(len(r['scores'])):
output_json_data['detections'].append({'id': d,
'bb': bounding_boxes[d],
'label': labels[d],
'score': str(scores[d]),
'featureDimensions': [len(r['features'][d])]})
feature_path = os.path.join(seq_path, str(d) + ".feature")
temp_feature = []
# copy the values to a new list otherwise they are incomplete for some reason
for i in range(len(r['features'][d])):
temp_feature.append(r['features'][d][i])
# save one feature file for each detection
with open(feature_path, 'w') as f:
f.write(str(temp_feature))
with open(json_path, 'w') as output_json_file:
json.dump(output_json_data, output_json_file)
count = count + 1
print("Status: {0}".format(count / nr_images))
except Exception as e:
print(e)
|
""" Simulation Dataset Catalog
"""
import fcntl
import glob
import logging
import os
import shutil
from pathlib import Path
from PIL import Image
from pyquaternion import Quaternion
from datasetinsights.datasets.unity_perception import (
AnnotationDefinitions,
Captures,
)
from datasetinsights.datasets.unity_perception.tables import SCHEMA_VERSION
from datasetinsights.io.bbox import BBox2D, BBox3D
from .exceptions import DatasetNotFoundError
logger = logging.getLogger(__name__)
def read_bounding_box_3d(annotation, label_mappings=None):
""" Convert dictionary representations of 3d bounding boxes into objects
of the BBox3d class
Args:
annotation (List[dict]): 3D bounding box annotation
label_mappings (dict): a dict of {label_id: label_name} mapping
Returns:
A list of 3d bounding box objects
"""
bboxes = []
for b in annotation:
label_id = b["label_id"]
translation = b["translation"]
size = b["size"]
rotation = b["rotation"]
rotation = Quaternion(
b=rotation[0], c=rotation[1], d=rotation[2], a=rotation[3]
)
if label_mappings and label_id not in label_mappings:
continue
box = BBox3D(
translation=translation,
size=size,
label=label_id,
sample_token=0,
score=1,
rotation=rotation,
)
bboxes.append(box)
return bboxes
def read_bounding_box_2d(annotation, label_mappings=None):
"""Convert dictionary representations of 2d bounding boxes into objects
of the BBox2D class
Args:
annotation (List[dict]): 2D bounding box annotation
label_mappings (dict): a dict of {label_id: label_name} mapping
Returns:
A list of 2D bounding box objects
"""
bboxes = []
for b in annotation:
label_id = b["label_id"]
x = b["x"]
y = b["y"]
w = b["width"]
h = b["height"]
if label_mappings and label_id not in label_mappings:
continue
box = BBox2D(label=label_id, x=x, y=y, w=w, h=h)
bboxes.append(box)
return bboxes
class SynDetection2D:
"""Synthetic dataset for 2D object detection.
During the class instantiation, it would check whether the data files
such as annotations.json, images.png are present, if not it'll check
whether a compressed dataset file is present which contains the necessary
files, if not it'll raise an error.
See synthetic dataset schema documentation for more details.
<https://datasetinsights.readthedocs.io/en/latest/Synthetic_Dataset_Schema.html>
Attributes:
catalog (list): catalog of all captures in this dataset
transforms: callable transformation that applies to a pair of
capture, annotation. Capture is the information captured by the
sensor, in this case an image, and annotations, which in this
dataset are 2d bounding box coordinates and labels.
label_mappings (dict): a dict of {label_id: label_name} mapping
"""
ARCHIVE_FILE = "SynthDet.zip"
SUBFOLDER = "synthetic"
def __init__(
self,
*,
data_path=None,
transforms=None,
version=SCHEMA_VERSION,
def_id=4,
**kwargs,
):
"""
Args:
data_path (str): Directory of the dataset
transforms: callable transformation that applies to a pair of
capture, annotation.
version(str): synthetic dataset schema version
def_id (int): annotation definition id used to filter results
"""
self._data_path = self._preprocess_dataset(data_path)
captures = Captures(self._data_path, version)
annotation_definition = AnnotationDefinitions(self._data_path, version)
catalog = captures.filter(def_id)
self.catalog = self._cleanup(catalog)
init_definition = annotation_definition.get_definition(def_id)
self.label_mappings = {
m["label_id"]: m["label_name"] for m in init_definition["spec"]
}
self.transforms = transforms
def __getitem__(self, index):
"""
Get the image and corresponding bounding boxes for that index
Args:
index:
Returns (Tuple(Image,List(BBox2D))): Tuple comprising the image and
bounding boxes found in that image with transforms applied.
"""
cap = self.catalog.iloc[index]
capture_file = cap.filename
ann = cap["annotation.values"]
capture = Image.open(os.path.join(self._data_path, capture_file))
capture = capture.convert("RGB") # Remove alpha channel
annotation = read_bounding_box_2d(ann, self.label_mappings)
if self.transforms:
capture, annotation = self.transforms(capture, annotation)
return capture, annotation
def __len__(self):
return len(self.catalog)
def _cleanup(self, catalog):
"""
remove rows with captures that having missing files and remove examples
which have no annotations i.e. an image without any objects
Args:
catalog (pandas dataframe):
Returns: dataframe without rows corresponding to captures that have
missing files and removes examples which have no annotations i.e. an
image without any objects.
"""
catalog = self._remove_captures_with_missing_files(
self._data_path, catalog
)
catalog = self._remove_captures_without_bboxes(catalog)
return catalog
@staticmethod
def _remove_captures_without_bboxes(catalog):
"""Remove captures without bounding boxes from catalog
Args:
catalog (pd.Dataframe): The loaded catalog of the dataset
Returns:
A pandas dataframe with empty bounding boxes removed
"""
keep_mask = catalog["annotation.values"].apply(len) > 0
return catalog[keep_mask]
@staticmethod
def _remove_captures_with_missing_files(root, catalog):
"""Remove captures where image files are missing
During the synthetic dataset download process, some of the files might
be missing due to temporary http request issues or url corruption.
We should remove these captures from catalog so that it does not
stop the training pipeline.
Args:
catalog (pd.Dataframe): The loaded catalog of the dataset
Returns:
A pandas dataframe of the catalog with missing files removed
"""
def exists(capture_file):
path = Path(root) / capture_file
return path.exists()
keep_mask = catalog.filename.apply(exists)
return catalog[keep_mask]
@staticmethod
def _preprocess_dataset(data_path):
""" Preprocess dataset inside data_path and un-archive if necessary.
Args:
data_path (str): Path where dataset is stored.
Return:
Path of the dataset files.
"""
archive_file = Path(data_path) / SynDetection2D.ARCHIVE_FILE
if archive_file.exists():
file_descriptor = os.open(archive_file, os.O_RDONLY)
try:
fcntl.flock(file_descriptor, fcntl.LOCK_EX)
unarchived_path = Path(data_path) / SynDetection2D.SUBFOLDER
if not SynDetection2D.is_dataset_files_present(unarchived_path):
shutil.unpack_archive(
filename=archive_file, extract_dir=unarchived_path
)
return unarchived_path
finally:
os.close(file_descriptor)
elif SynDetection2D.is_dataset_files_present(data_path):
# This is for dataset generated by unity simulation.
# In this case, all data are downloaded directly in the data_path
return data_path
else:
raise DatasetNotFoundError(
f"Expecting a file {archive_file} under {data_path} or files "
f"directly exist under {data_path}"
)
@staticmethod
def is_dataset_files_present(data_path):
return os.path.isdir(data_path) and any(glob.glob(f"{data_path}/**/*"))
|
import os
import numpy as np
def exp_func(x, a, b, c):
#y = a * np.exp(-b * x) + c
y = (a - c) * np.exp(-b * x) + c
return y
def time_resolved_anisotropy_decay_func(t, r0, r_inf, transfer_rate):
r_t = (r0-r_inf) * np.exp(-2 * transfer_rate * t) + r_inf
return r_t
def two_phase_exp_decay_func(x, plateau, SpanFast, Kfast, SpanSlow, Kslow):
"""Function for two phase exponential decay.
# variable names inspired by GraphPad
https://www.graphpad.com/guides/prism/7/curve-fitting/reg_exponential_decay_2phase.htm?toc=0&printWindow
Parameters
----------
x : float
x value
plateau : float
yvalue where curve is flat (i.e. y at infinity)
SpanFast : float
Parameter for fast component
Kfast : float
K parameter for fast component
SpanSlow : float
Parameter for slow component
Kslow : float
K parameter for fast component
Returns
-------
y : float
y value
"""
y = plateau + SpanFast * np.exp(-Kfast * x) + SpanSlow * np.exp(-Kslow * x)
return y
class FlourescentColours():
"""
Object with fluorescent html colours from https://www.w3schools.com/colors/colors_crayola.asp
"""
def __init__(self):
self.red = "#FF355E"
self.watermelon = "#FD5B78"
self.orange = "#FF6037"
self.tangerine = "#FF9966"
self.carrot = "#FF9933"
self.sunglow = "#FFCC33"
self.lemon = "#FFFF66"
self.yellow = "#FFFF66"
self.lime = "#CCFF00"
self.green = "#66FF66"
self.mint = "#AAF0D1"
self.blue = "#50BFE6"
self.pink = "#FF6EFF"
self.rose = "#EE34D2"
self.magenta = "#FF00CC"
self.pizzazz = "FF00CC"
self.colourlist = ['#50BFE6', '#FF9933', '#66FF66', '#FFFF66', '#CCFF00', '#FF00CC', '#AAF01', '#FF6037', '#FF6EFF', 'FF00CC', '#FF355E', '#EE34D2', '#FFCC33', '#FF9966', '#FD5B78', '#FFFF66']
def get_fluorescent_colours():
"""
DEPRECATED. Use the FlourescentColours object instead.
Usage
-----
fc_dict, fl_col_keys, fl_col_list = blitzcurve.utils.get_fluorescent_colours()
"""
# get dict and list of fluorescent colours
#https://www.w3schools.com/colors/colors_crayola.asp
fc_dict = {"Red":"#FF355E","Watermelon":"#FD5B78","Orange":"#FF6037","Tangerine":"#FF9966","Carrot":"#FF9933","Sunglow":"#FFCC33","Lemon":"#FFFF66","Yellow":"#FFFF66","Lime":"#CCFF00","Green":"#66FF66","Mint":"#AAF01","Blue":"#50BFE6","Pink":"#FF6EFF","Rose":"#EE34D2","Magenta":"#FF00CC","Pizzazz":"FF00CC"}
fl_col_keys = sorted(fc_dict)
fl_col_list = [fc_dict[k] for k in fl_col_keys]
return fc_dict, fl_col_keys, fl_col_list
def setup_matplotlib_dark_background(plt):
""" Initialises the dark background style of matplotlib, which applies to all following plots.
Adds error caps to barcharts, sets figure size and dpi.
Parameters
----------
plt : matplotlib library
"""
plt.style.use('dark_background')
plt.rcParams['errorbar.capsize'] = 3
plt.rcParams['figure.figsize'] = (5, 5)
plt.rcParams["savefig.dpi"] = 240
# class OutFilepaths:
# def __init__(self, data_dir, csv):
# self.fits_dir = os.path.join(data_dir, "fits")
# self.rotat_dir = os.path.join(self.fits_dir, "rotat")
# self.savgol_dir = os.path.join(self.fits_dir, "savgol")
# self.seg1_dir = os.path.join(self.fits_dir, "seg1")
# self.seg2_dir = os.path.join(self.fits_dir, "seg2")
# self.fitdata_dir = os.path.join(self.fits_dir, "fitdata")
#
# for path in [self.fits_dir, self.rotat_dir, self.savgol_dir, self.seg1_dir, self.seg2_dir, self.fitdata_dir]:
# if not os.path.isdir(path):
# os.makedirs(path)
# self.filename = os.path.basename(csv)
# self.rotat_fit_png = os.path.join(self.rotat_dir, self.filename[:-4] + "_rotat_fit.png")
# self.savgol_fit_png = os.path.join(self.savgol_dir, self.filename[:-4] + "_savgol_fit.png")
# self.savgol_fit_peak_png = os.path.join(self.savgol_dir, self.filename[:-4] + "_savgol_fit_peak.png")
# self.savgol_fit_desc_png = os.path.join(self.seg1_dir, self.filename[:-4] + "_savgol_fit_desc.png")
# self.exp_fit_seg1_png = os.path.join(self.seg1_dir, self.filename[:-4] + "_seg1.png")
# self.exp_fit_seg2_png = os.path.join(self.seg2_dir, self.filename[:-4] + "_seg2.png")
#
# self.fitdata_pickle = os.path.join(self.fitdata_dir, self.filename[:-4] + "_fitdata.pickle")
class OutDirPaths:
"""
Creates paths for subdirectories and makes sure that they exist
"""
def __init__(self, data_dir):
self.fits_dir = os.path.join(data_dir, "fits")
self.rotat_dir = os.path.join(self.fits_dir, "rotat")
self.savgol_dir = os.path.join(self.fits_dir, "savgol")
self.seg1_dir = os.path.join(self.fits_dir, "seg1")
self.seg2_dir = os.path.join(self.fits_dir, "seg2")
self.two_comp_exp_decay_dir = os.path.join(self.fits_dir, "two_phase_exp_decay")
self.time_resolved_anisotropy_decay_dir = os.path.join(self.fits_dir, "time_resolved_anisotropy_decay")
self.fitdata_dir = os.path.join(self.fits_dir, "fitdata")
self.summary_figs_dir = os.path.join(data_dir, "summary", "figs")
for path in [self.fits_dir, self.rotat_dir, self.savgol_dir, self.seg1_dir, self.seg2_dir, self.two_comp_exp_decay_dir, self.time_resolved_anisotropy_decay_dir, self.fitdata_dir, self.summary_figs_dir]:
if not os.path.isdir(path):
os.makedirs(path)
class FitFilePaths(OutDirPaths):
"""
Adds file paths to the OutDirPaths object that are specific to a single sample, based on the original sample filename.
"""
def __init__(self, data_dir, csv):
# instantiate the parent OutDirPaths object, giving the relevant directories
OutDirPaths.__init__(self, data_dir)
# create various paths
self.filename = os.path.basename(csv)
self.rotat_fit_png = os.path.join(self.rotat_dir, self.filename[:-4] + "_rotat_fit.png")
self.savgol_fit_png = os.path.join(self.savgol_dir, self.filename[:-4] + "_savgol_fit.png")
self.savgol_fit_peak_png = os.path.join(self.savgol_dir, self.filename[:-4] + "_savgol_fit_peak.png")
self.savgol_fit_desc_png = os.path.join(self.seg1_dir, self.filename[:-4] + "_savgol_fit_desc.png")
self.exp_fit_seg1_png = os.path.join(self.seg1_dir, self.filename[:-4] + "_seg1.png")
self.exp_fit_seg2_png = os.path.join(self.seg2_dir, self.filename[:-4] + "_seg2.png")
self.two_comp_exp_decay_png = os.path.join(self.two_comp_exp_decay_dir, self.filename[:-4] + "_two_comp_exp_decay.png")
self.time_resolved_anisotropy_decay_png = os.path.join(self.time_resolved_anisotropy_decay_dir, self.filename[:-4] + "_time_resolved_anisotropy_decay.png")
self.fitdata_pickle = os.path.join(self.fitdata_dir, self.filename[:-4] + "_fitdata.pickle")
class CompareFilePaths(OutDirPaths):
"""
Adds file paths to the OutDirPaths object that are specific to the compare function, e.g. for barcharts.
"""
def __init__(self, data_dir):
# instantiate the parent OutDirPaths object, giving the relevant directories
OutDirPaths.__init__(self, data_dir)
# barchart paths
self.barchart_r_max = os.path.join(self.summary_figs_dir, "01_barchart_r_max.png")
self.barchart_r_inf = os.path.join(self.summary_figs_dir, "02_barchart_r_inf.png")
self.barchart_variable_a_png = os.path.join(self.summary_figs_dir, "03_barchart_a.png")
self.barchart_variable_b_png = os.path.join(self.summary_figs_dir, "04_barchart_b.png")
self.barchart_variable_c_png = os.path.join(self.summary_figs_dir, "05_barchart_c.png")
# linechart paths
self.linechart_savgol = os.path.join(self.summary_figs_dir, "06_linechart_savgol.png")
self.linechart_seg1 = os.path.join(self.summary_figs_dir, "07_linechart_seg1.png")
self.linechart_seg2 = os.path.join(self.summary_figs_dir, "08_linechart_seg2.png")
|
# -*- coding: utf-8 -*-
#Import packages
import numpy as np
import pandas as pd
import imageio
import matplotlib.pyplot as plt
#Set Random Seed
np.random.seed(438672590)
#List of contestants in order of purchase
Contestants = ["Mary Q",
"Will N",
"Will C",
"David D",
"Sarah H",
"Rachel P",
"Margaret C",
"Eric M",
"Francisco R",
"Meghana G",
"Shivram V",
"Shahzaib S"]
NumContestants = len(Contestants)
#List of Columns for df
colNames = ["Previous Total",
"Total",
"Win this round?",
"Winner",
"Winning Value",
"Won on Step #",
"W",
"AW",
"L"]
df = pd.DataFrame(np.zeros(shape=(len(Contestants), len(colNames))),
columns = colNames, index = Contestants)
#Looping through to find winners
winners = []
n = 0
while len(winners) < 2:
#Start for next step
n = n+1
#Random Draw
df["Previous Total"] = df["Total"]
df["Added on Step #%d" % n] = 4 * np.random.rand(NumContestants)
df["Total"] = df["Previous Total"] + df["Added on Step #%d" % n]
#Determine if there are any winners
df["Win this round?"] = (df["Total"] >= 100)
for x in Contestants:
if df.loc[x, "Win this round?"] == True and df.loc[x, "Winner"] == False:
winners.append(x)
df.loc[x, "Winning Value"] = df.loc[x, "Total"]
df.loc[x, "Won on Step #"] = n
df.loc[x, "Winner"] = True
#Set winning value and make other values zero
if df.loc[x, "Winner"] == True:
df.loc[x, "Previous Total"] = 0
df.loc[x, "Added on Step #%d" % n] = 0
#Create Plots for Step n
N = len(Contestants)
ind = np.arange(N) # the x locations for the groups
width = 0.75 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, df["Previous Total"], width, color='darkred')
p2 = plt.bar(ind, df["Added on Step #%d" % n], width, color='lightcoral',
bottom=df["Previous Total"])
p3 = plt.bar(ind, df["Winning Value"], width, color='goldenrod',
bottom=df["Previous Total"])
plt.title('AMData Lottery: Step #%d' % n)
plt.xticks(ind, Contestants, rotation='vertical')
plt.yticks(np.arange(0, 120 + 0.01, 25))
plt.axhline(y=100, linestyle='--')
plt.savefig('ExportedGraphs/%d.png' % n, bbox_inches="tight")
#Sort and find the winners
df_sorted = df.sort_values(by=["Winner", "Won on Step #", "Winning Value"],
ascending=[False, True, False])
winner1 = df_sorted.index[0]
winner2 = df_sorted.index[1]
winners = [winner1, winner2]
#Create Final Graph
for x in Contestants:
if df.loc[x, "Winner"] == True:
df.loc[x, "AW"] = df.loc[x, "Total"]
if df.loc[x, "Winner"] == False:
df.loc[x, "L"] = df.loc[x, "Total"]
df.loc[winner1, "W"] = df.loc[winner1, "Winning Value"]
df.loc[winner1, "AW"] = 0
df.loc[winner2, "W"] = df.loc[winner2, "Winning Value"]
df.loc[winner2, "AW"] = 0
N = len(Contestants)
ind = np.arange(N) # the x locations for the groups
width = 0.75 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, df["W"], width, color='darkgoldenrod')
p2 = plt.bar(ind, df["AW"], width, color='saddlebrown')
p3 = plt.bar(ind, df["L"], width, color='black')
plt.title('The winners are %s and %s' % (winner1, winner2))
plt.xticks(ind, Contestants, rotation='vertical')
plt.yticks(np.arange(0, 120 + 0.01, 25))
plt.axhline(y=100, linestyle='--')
plt.savefig('ExportedGraphs/%d.png' % (n+1), bbox_inches="tight")
#Create GIF
gif_path = "AMDataLottery.gif"
frames_path = "ExportedGraphs/{i}.png"
with imageio.get_writer(gif_path, mode='I', duration = 0.5) as writer:
for i in range(1, n + 2):
writer.append_data(imageio.imread(frames_path.format(i=i)))
|
#!/usr/bin/python
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import imp
import os
import re
import shutil
import shlex
import subprocess
import sys
import tempfile
import zipfile
import config_parser
# We expect the tools directory from the dart repo to be checked out into:
# ../../tools
DART_DIR = os.path.abspath(
os.path.normpath(os.path.join(__file__, '..', '..', '..')))
UTILS_PATH = os.path.join(DART_DIR, 'tools', 'utils.py')
BOT_UTILS_PATH = os.path.join(DART_DIR, 'tools', 'bots', 'bot_utils.py')
if os.path.isfile(UTILS_PATH):
utils = imp.load_source('utils', UTILS_PATH)
else:
print 'error: %s not found' % UTILS_PATH
exit(1)
if os.path.isfile(BOT_UTILS_PATH):
bot_utils = imp.load_source('bot_utils', BOT_UTILS_PATH)
else:
print 'error: %s not found' % BOT_UTILS_PATH
exit(1)
# We are deliberately not using bot utils from the dart repo.
PACKAGES_BUILDER = r'packages-(windows|linux|mac)(-repo)?(-sample)?-(.*)'
NAME_OVERRIDES = {
'dart-protobuf' : 'protobuf',
'polymer-dart' : 'polymer',
'serialization.dart' : 'serialization',
'unittest-stable' : 'unittest'
}
# Some packages need all tests run sequentially, due to side effects.
# This list only affects tests run with 'pub run test'.
SERIALIZED_PACKAGES = [
'analyzer_cli'
]
class BotInfo(object):
"""
Stores the info extracted from the bot name
- system: windows, linux, mac
- package-name
"""
def __init__(self, system, package_name, is_repo, is_sample):
self.system = system
self.package_name = NAME_OVERRIDES.get(package_name,
package_name.replace('-', '_'))
self.is_repo = is_repo
self.is_sample = is_sample
def __str__(self):
return "System: %s, Package-name: %s, Repo: %s, Sample: %s" % (
self.system, self.package_name, self.is_repo, self.is_sample)
def GetBotInfo():
name = os.environ.get('BUILDBOT_BUILDERNAME')
if not name:
print ("BUILDBOT_BUILDERNAME not defined. "
"Expected pattern of the form: %s" % PACKAGES_BUILDER)
exit(1)
builder_pattern = re.match(PACKAGES_BUILDER, name)
if builder_pattern:
is_repo = builder_pattern.group(2) is not None
is_sample = builder_pattern.group(3) is not None
return BotInfo(builder_pattern.group(1),
builder_pattern.group(4),
is_repo,
is_sample)
class BuildStep(object):
"""
A context manager for handling build steps.
When the context manager is entered, it prints the "@@@BUILD_STEP __@@@"
message. If it exits from an error being raised it displays the
"@@@STEP_FAILURE@@@" message.
If swallow_error is True, then this will catch and discard any OSError that
is thrown. This lets you run later BuildSteps if the current one fails.
"""
def __init__(self, name, swallow_error=False):
self.name = name
self.swallow_error = swallow_error
def __enter__(self):
print '@@@BUILD_STEP %s@@@' % self.name
sys.stdout.flush()
def __exit__(self, type, value, traceback):
if value:
print '@@@STEP_FAILURE@@@'
sys.stdout.flush()
if self.swallow_error and isinstance(value, OSError):
return True
class TempDir(object):
def __init__(self, prefix=''):
self._temp_dir = None
self._prefix = prefix
def __enter__(self):
self._temp_dir = tempfile.mkdtemp(self._prefix)
return self._temp_dir
def __exit__(self, *_):
shutil.rmtree(self._temp_dir, ignore_errors=True)
class ChangedWorkingDirectory(object):
def __init__(self, working_directory):
self._working_directory = working_directory
def __enter__(self):
self._old_cwd = os.getcwd()
print "Enter directory = ", self._working_directory
os.chdir(self._working_directory)
def __exit__(self, *_):
print "Enter directory = ", self._old_cwd
os.chdir(self._old_cwd)
def RunProcess(command, shell=False, extra_env=None):
"""
Runs command.
If a non-zero exit code is returned, raises an OSError with errno as the exit
code.
"""
env = dict(os.environ)
env['TERM'] = 'nocolor'
if 'GIT_USER_AGENT' in env:
del env['GIT_USER_AGENT']
if extra_env:
env.update(extra_env)
print "Running: %s" % ' '.join(command)
print "env: %s" % str(env)
sys.stdout.flush()
exit_code = subprocess.call(command, env=env, shell=shell)
if exit_code != 0:
raise OSError(exit_code)
def GetSDK(bot_info):
with BuildStep('Get sdk'):
namer = bot_utils.GCSNamer(channel=bot_utils.Channel.DEV)
# TODO(ricow): Be smarter here, only download if new.
build_root = GetBuildRoot(bot_info)
SafeDelete(os.path.join(build_root, 'dart-sdk'), bot_info)
if not os.path.exists(build_root):
os.makedirs(build_root)
local_zip = os.path.join(build_root, 'sdk.zip')
gsutils = bot_utils.GSUtil()
gsutils.execute(['cp',
namer.sdk_zipfilepath('latest', bot_info.system,
'ia32', 'release'),
local_zip])
if bot_info.system == 'windows':
with zipfile.ZipFile(local_zip, 'r') as zip_file:
zip_file.extractall(path=build_root)
else:
# We don't keep the execution bit if we use python's zipfile on posix.
RunProcess(['unzip', local_zip, '-d', build_root])
pub = GetPub(bot_info)
RunProcess([pub, '--version'])
def GetPackagePath(bot_info):
if bot_info.is_repo:
return os.path.join('pkg', bot_info.package_name)
return os.path.join('third_party', 'pkg', bot_info.package_name)
def GetBuildRoot(bot_info):
system = bot_info.system
if system == 'windows':
system = 'win32'
if system == 'mac':
system = 'macos'
return utils.GetBuildRoot(system, mode='release', arch='ia32',
target_os=system)
def SafeDelete(path, bot_info):
if bot_info.system == 'windows':
if os.path.exists(path):
args = ['cmd.exe', '/c', 'rmdir', '/q', '/s', path]
RunProcess(args)
else:
shutil.rmtree(path, ignore_errors=True)
def GetPackageCopy(bot_info):
build_root = GetBuildRoot(bot_info)
package_copy = os.path.join(build_root, 'package_copy')
package_path = GetPackagePath(bot_info)
copy_path = os.path.join(package_copy, bot_info.package_name)
SafeDelete(package_copy, bot_info)
no_git = shutil.ignore_patterns('.git')
shutil.copytree(package_path, copy_path, symlinks=False, ignore=no_git)
return copy_path
def GetSdkBin(bot_info):
return os.path.join(os.getcwd(), GetBuildRoot(bot_info),
'dart-sdk', 'bin')
def GetPub(bot_info):
executable = 'pub.bat' if bot_info.system == 'windows' else 'pub'
return os.path.join(GetSdkBin(bot_info), executable)
def GetPubEnv(bot_info):
pub_cache = os.path.join(os.getcwd(), GetBuildRoot(bot_info), 'pub_cache')
return {'PUB_CACHE': pub_cache, 'PUB_ENVIRONMENT': 'dart_bots'}
# _RunPubCacheRepair and _CheckPubCacheCorruption are not used right now, but we
# keep them around because they provide an easy way to diagnose and fix issues
# in the bots.
def _RunPubCacheRepair(bot_info, path):
pub = GetPub(bot_info)
extra_env = GetPubEnv(bot_info)
with BuildStep('Pub cache repair'):
# For now, assume pub
with ChangedWorkingDirectory(path):
args = [pub, 'cache', 'repair']
RunProcess(args, extra_env=extra_env)
corruption_checks = 0
def _CheckPubCacheCorruption(bot_info, path):
extra_env = GetPubEnv(bot_info)
global corruption_checks
corruption_checks += 1
with BuildStep('Check pub cache corruption %d' % corruption_checks):
with ChangedWorkingDirectory(path):
packages = os.path.join(
extra_env['PUB_CACHE'], 'hosted', 'pub.dartlang.org')
print '\nLooking for packages in %s:' % str(packages)
if not os.path.exists(packages):
print "cache directory doesn't exist"
return
for package in os.listdir(packages):
if 'unittest-' in package:
exists = os.path.exists(
os.path.join(packages, package, 'lib', 'unittest.dart'))
print '- ok: ' if exists else '- bad: ',
print os.path.join(package, 'lib', 'unittest.dart')
print ''
def RunPubUpgrade(bot_info, path):
pub = GetPub(bot_info)
extra_env = GetPubEnv(bot_info)
with BuildStep('Pub upgrade'):
# For now, assume pub
with ChangedWorkingDirectory(path):
args = [pub, 'upgrade', '--no-packages-dir']
RunProcess(args, extra_env=extra_env)
def RunPubBuild(bot_info, path, folder, mode=None):
skip_pub_build = ['dart-protobuf', 'rpc']
with BuildStep('Pub build on %s' % folder):
if bot_info.package_name in skip_pub_build:
print "Not running pub build"
return
pub = GetPub(bot_info)
extra_env = GetPubEnv(bot_info)
with ChangedWorkingDirectory(path):
# run pub-build on the web folder
if os.path.exists(folder):
args = [pub, 'build']
if mode:
args.append('--mode=%s' % mode)
if folder != 'web':
args.append(folder)
RunProcess(args, extra_env=extra_env)
# Major hack
def FixupTestControllerJS(package_path):
if os.path.exists(os.path.join(package_path, 'packages', 'unittest')):
test_controller = os.path.join(package_path, 'packages', 'unittest',
'test_controller.js')
dart_controller = os.path.join('tools', 'testing', 'dart',
'test_controller.js')
print 'Hack test controller by copying of %s to %s' % (dart_controller,
test_controller)
shutil.copy(dart_controller, test_controller)
else:
print "No unittest to patch, do you even have tests"
JS_RUNTIMES = {
'windows': ['ff', 'chrome', 'ie10'],
'linux': ['d8', 'ff', 'chrome'],
'mac': ['safari'],
}
is_first_test_run = True
def LogsArgument():
global is_first_test_run
if is_first_test_run:
is_first_test_run = False
return []
return ['--append_logs']
def RunPackageTesting(bot_info, package_path, folder='test'):
package_name = os.path.basename(package_path)
if package_name == '':
# when package_path had a trailing slash
package_name = os.path.basename(os.path.dirname(package_path))
if folder == 'build/test':
suffix = ' under build'
package_root = os.path.join(package_path, folder, 'packages')
package_arg = '--package-root=%s' % package_root
else:
suffix = ''
package_spec_file = os.path.join(package_path, '.packages')
package_arg = '--packages=%s' % package_spec_file
# Note: we use package_name/package_name/folder and not package_name/folder on
# purpose. The first package_name denotes the suite, the second is part of the
# path we want to match. Without the second package_name, we may match tests
# that contain "folder" further down. So if folder is "test",
# "package_name/test" matches "package_name/build/test", but
# "package_name/package_name/test" does not.
standard_args = ['--arch=ia32',
'--suite-dir=%s' % package_path,
'--use-sdk', '--report', '--progress=buildbot',
'--reset-browser-configuration',
package_arg,
'--write-debug-log', '-v',
'--time',
'%s/%s/%s/' % (package_name, package_name, folder)]
with BuildStep('Test vm release mode%s' % suffix, swallow_error=True):
args = [sys.executable, 'tools/test.py',
'-mrelease', '-rvm', '-cnone'] + standard_args
args.extend(LogsArgument())
# For easy integration testing we give access to the sdk bin directory.
# This only makes sense on vm testing.
extra_env = { 'DART_SDK_BIN' : GetSdkBin(bot_info) }
RunProcess(args, extra_env=extra_env)
with BuildStep('Test analyzer%s' % suffix, swallow_error=True):
args = [sys.executable, 'tools/test.py',
'-mrelease', '-rnone', '-cdart2analyzer'] + standard_args
args.extend(LogsArgument())
RunProcess(args)
# TODO(27065): Restore Dartium testing once it works on test.py again.
for runtime in JS_RUNTIMES[bot_info.system]:
with BuildStep('dart2js-%s%s' % (runtime, suffix), swallow_error=True):
test_args = [sys.executable, 'tools/test.py',
'-mrelease', '-r%s' % runtime, '-cdart2js', '-j4',
'--dart2js-batch']
args = test_args + standard_args
args.extend(LogsArgument())
_RunWithXvfb(bot_info, args)
def FillMagicMarkers(v, replacements):
def replace(match):
word = match.group(1)
if not word in replacements:
raise Exception("Unknown magic marker %s. Known mappings are: %s" %
(word, replacements))
return replacements[word]
return re.sub(r"\$(\w+)", replace, v)
def RunTestRunner(bot_info, test_package, package_path):
package_name = os.path.basename(package_path)
if package_name == '':
# when package_path had a trailing slash
package_name = os.path.basename(os.path.dirname(package_path))
pub = GetPub(bot_info)
extra_env = GetPubEnv(bot_info)
with BuildStep('pub run test', swallow_error=True):
# TODO(nweiz): include dartium here once sdk#23816 is fixed.
platforms = set(['vm', 'chrome', 'firefox'])
if bot_info.system == 'windows':
platforms.add('ie')
# TODO(nweiz): remove dartium here once sdk#23816 is fixed.
elif bot_info.system == 'mac':
platforms.add('safari')
platforms.remove('firefox')
if 'platforms' in test_package:
platforms = platforms.intersection(set(test_package['platforms']))
with utils.ChangedWorkingDirectory(package_path):
test_args = [pub, 'run', 'test', '--reporter', 'expanded', '--no-color',
'--platform', ','.join(platforms)]
if bot_info.package_name in SERIALIZED_PACKAGES:
test_args.append('-j1')
# TODO(6): If barback is needed, use --pub-serve option and pub serve test
if test_package.get('barback'): test_args.append('build/test')
_RunWithXvfb(bot_info, test_args, extra_env=extra_env)
def _RunWithXvfb(bot_info, args, **kwargs):
if bot_info.system == 'linux':
args = ['xvfb-run', '-a', '--server-args=-screen 0 1024x768x24'] + args
RunProcess(args, **kwargs)
# Runs the script given by test_config.get_config if it exists, does nothing
# otherwise.
# Returns `True` if the script was run.
def RunCustomScript(test_config):
custom_script = test_config.get_custom_script()
if custom_script:
command_string = FillMagicMarkers(custom_script, test_config.replacements)
with BuildStep('Running custom script'):
args = shlex.split(command_string, posix=False)
print 'Running command: %s' % args
sys.stdout.flush()
exit_code = subprocess.call(args)
if exit_code != 0:
print "Custom script failed"
return True
else:
return False
def RunDefaultScript(bot_info, test_config, copy_path):
print "No custom script found, running default steps."
GetSDK(bot_info)
print 'Running testing in copy of package in %s' % copy_path
RunPrePubUpgradeHooks(test_config)
RunPubUpgrade(bot_info, copy_path)
test_package = test_config.get_test_package()
if test_package is None or test_package.get('barback'):
RunPrePubBuildHooks(test_config)
RunPubBuild(bot_info, copy_path, 'web')
RunPubBuild(bot_info, copy_path, 'test', 'debug')
RunPostPubBuildHooks(test_config)
if test_package is not None:
print 'Running the test package runner'
RunPreTestHooks(test_config)
RunTestRunner(bot_info, test_package, copy_path)
else:
print 'Running tests manually'
FixupTestControllerJS(copy_path)
RunPreTestHooks(test_config)
RunPackageTesting(bot_info, copy_path, 'test')
# TODO(6): Packages that need barback should use the test package runner,
# instead of trying to run from the build/test directory.
RunPackageTesting(bot_info, copy_path, 'build/test')
RunPostTestHooks(test_config)
def RunHooks(hooks, section_name, replacements):
for name, command in hooks.iteritems():
command = FillMagicMarkers(command, replacements)
with BuildStep('%s: %s' % (section_name, name), swallow_error=True):
RunProcess(command, shell=True)
def RunPrePubUpgradeHooks(test_config):
RunHooks(test_config.get_pre_pub_upgrade_hooks(), "Pre pub upgrade hooks",
test_config.replacements)
def RunPrePubBuildHooks(test_config):
RunHooks(test_config.get_pre_pub_build_hooks(), "Pre pub build hooks",
test_config.replacements)
def RunPostPubBuildHooks(test_config):
RunHooks(test_config.get_post_pub_build_hooks(), "Pre pub build hooks",
test_config.replacements)
def RunPreTestHooks(test_config):
RunHooks(test_config.get_pre_test_hooks(), "Pre test hooks",
test_config.replacements)
def RunPostTestHooks(test_config):
RunHooks(test_config.get_post_test_hooks(), "Post test hooks",
test_config.replacements)
def main():
bot_info = GetBotInfo()
print 'Bot info: %s' % bot_info
copy_path = GetPackageCopy(bot_info)
config_file = os.path.join(copy_path, '.test_config')
test_config = config_parser.ConfigParser(config_file)
test_config.replacements = {
'dart': utils.CheckedInSdkExecutable(),
'project_root': copy_path,
'python': sys.executable
}
RunCustomScript(test_config) or \
RunDefaultScript(bot_info, test_config, copy_path)
if __name__ == '__main__':
main()
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Test the discrete_domain utilities.
Caveat assumes that the MNI template image is available at
in ~/.nipy/tests/data
In those tests, we often access some ROI directly by a fixed index
instead of using the utility functions such as get_id() or select_id().
"""
import numpy as np
from numpy.testing import assert_equal
from ..hroi import HROI_as_discrete_domain_blobs, make_hroi_from_subdomain
from ..mroi import subdomain_from_array
from ..discrete_domain import domain_from_binary_array
shape = (5, 6, 7)
def make_domain():
"""Create a multiple ROI instance
"""
labels = np.ones(shape)
dom = domain_from_binary_array(labels, affine=None)
return dom
#######################################################################
# Test on hierarchical ROI
#######################################################################
def make_hroi(empty=False):
"""Create a multiple ROI instance
"""
labels = np.zeros(shape)
if not empty:
labels[4:, 5:, 6:] = 1
labels[:2, 0:2, 0:2] = 2
labels[:2, 5:, 6:] = 3
labels[:2, 0:2, 6:] = 4
labels[4:, 0:2, 6:] = 5
labels[4:, 0:2, 0:2] = 6
labels[4:, 5:, 0:2] = 7
labels[:2, 5:, 0:2] = 8
parents = np.zeros(9)
else:
labels = -np.ones(shape)
parents = np.array([])
sd = subdomain_from_array(labels, affine=None, nn=0)
hroi = make_hroi_from_subdomain(sd, parents)
return hroi
def test_hroi():
"""
"""
hroi = make_hroi()
assert_equal(hroi.k, 9)
def test_hroi_isleaf():
""" Test basic construction of a tree of isolated leaves
"""
hroi = make_hroi()
hroi.select_roi([0] + range(2, 9))
assert_equal(hroi.k, 8)
def test_hroi_isleaf_2():
"""Test tree pruning, with parent remapping
"""
hroi = make_hroi()
#import pdb; pdb.set_trace()
hroi.select_roi(range(1, 9))
assert_equal(hroi.parents, np.arange(8).astype(np.int))
def test_asc_merge():
""" Test ascending merge
"""
hroi = make_hroi()
s1 = hroi.get_size(0) + hroi.get_size(1)
total_size = np.sum([hroi.get_size(id) for id in hroi.get_id()])
assert_equal(hroi.get_size(0, ignore_children=False), total_size)
hroi.merge_ascending([1])
assert_equal(hroi.get_size(0), s1)
def test_asc_merge_2():
""" Test ascending merge
Test that ROI being their own parent are inchanged.
"""
hroi = make_hroi()
s1 = hroi.get_size(0)
hroi.merge_ascending([0])
assert_equal(hroi.k, 9)
assert_equal(hroi.get_size(0), s1)
def test_asc_merge_3():
"""Test ascending merge
"""
hroi = make_hroi()
hroi.set_roi_feature('labels', np.arange(9))
hroi.set_roi_feature('labels2', np.arange(9))
hroi.merge_ascending([1], pull_features=['labels2'])
assert_equal(hroi.get_roi_feature('labels', 0), 0)
assert_equal(hroi.get_roi_feature('labels2', 0), 1)
def test_asc_merge_4():
"""Test ascending merge
"""
hroi = make_hroi()
hroi.set_roi_feature('labels', range(9))
hroi.set_roi_feature('labels2', range(9))
parents = np.arange(9) - 1
parents[0] = 0
hroi.parents = parents
labels3 = [hroi.label[hroi.label == k] for k in range(hroi.k)]
hroi.set_feature('labels3', labels3)
hroi.merge_ascending([1], pull_features=['labels2'])
assert_equal(hroi.k, 8)
assert_equal(hroi.get_roi_feature('labels', 0), 0)
assert_equal(hroi.get_roi_feature('labels2', 0), 1)
assert_equal(len(hroi.get_feature('labels3')), hroi.k)
assert_equal(hroi.get_roi_feature('labels2').size, hroi.k)
def test_desc_merge():
""" Test descending merge
"""
hroi = make_hroi()
parents = np.arange(hroi.k)
parents[1] = 0
hroi.parents = parents
s1 = hroi.get_size(0) + hroi.get_size(1)
hroi.merge_descending()
assert_equal(hroi.get_size()[0], s1)
def test_desc_merge_2():
""" Test descending merge
"""
hroi = make_hroi()
parents = np.arange(-1, hroi.k - 1)
parents[0] = 0
hroi.parents = parents
hroi.set_roi_feature('labels', np.arange(hroi.k))
labels2 = [hroi.label[hroi.label == k] for k in range(hroi.k)]
hroi.set_feature('labels2', labels2)
hroi.merge_descending()
assert_equal(hroi.k, 1)
assert_equal(len(hroi.get_feature('labels2')), hroi.k)
assert_equal(hroi.get_roi_feature('labels').size, hroi.k)
def test_desc_merge_3():
""" Test descending merge
"""
hroi = make_hroi()
parents = np.minimum(np.arange(1, hroi.k + 1), hroi.k - 1)
hroi.parents = parents
hroi.merge_descending()
assert_equal(hroi.k, 1)
def test_leaves():
""" Test leaves
"""
hroi = make_hroi()
size = hroi.get_size()[1:].copy()
lroi = hroi.copy()
lroi.reduce_to_leaves()
assert_equal(lroi.k, 8)
assert_equal(lroi.get_size(), size)
assert_equal(lroi.get_leaves_id(), np.arange(1, 9))
def test_leaves_empty():
"""Test the reduce_to_leaves method on an HROI containing no node
"""
hroi = make_hroi(empty=True)
lroi = hroi.reduce_to_leaves()
assert_equal(lroi.k, 0)
def test_hroi_from_domain():
dom = make_domain()
data = np.random.rand(*shape)
data[:2, 0:2, 0:2] = 2
rdata = np.reshape(data, (data.size, 1))
hroi = HROI_as_discrete_domain_blobs(dom, rdata, threshold=1., smin=0)
assert_equal(hroi.k, 1)
def test_sd_representative():
"""Test the computation of representative features
"""
hroi = make_hroi()
hroi.parents = np.arange(9)
hroi.parents[2] = 1
data = [[k] * hroi.get_size(k) for k in hroi.get_id()]
hroi.set_feature('data', data)
sums = hroi.representative_feature('data')
for k in hroi.get_id():
assert_equal(sums[hroi.select_id(k)], k)
sums2 = hroi.representative_feature('data', ignore_children=False)
for k in hroi.get_id():
if k != 1:
assert_equal(sums2[hroi.select_id(k)], k)
else:
assert_equal(sums2[1], 17. / 9)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
import os
CONFIG = {'local_data_dir': 'themis_data/',
'remote_data_dir': 'http://themis.ssl.berkeley.edu/data/themis/'}
# override local data directory with environment variables
if os.environ.get('SPEDAS_DATA_DIR'):
CONFIG['local_data_dir'] = os.sep.join([os.environ['SPEDAS_DATA_DIR'],
'themis'])
if os.environ.get('THM_DATA_DIR'):
CONFIG['local_data_dir'] = os.environ['THM_DATA_DIR']
|
# Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from unittest import mock
import pytest
from tethys.core.exceptions import TethysSessionClosed, TethysRONotFound
from tethys.core.networks.network_zero import ZeroNetwork
from tethys.core.nodes.node_zero import ZeroNode
from tethys.core.nodes.operators.operator_base import OperatorBase
from tethys.core.pipes import ZeroPipe
from tethys.core.pipes.filters.filter_function import FNFilter
from tethys.core.sessions.sess_zero import ZeroSession
from tethys.core.streams.stream_zero import ZeroStream
from tethys.core.transports.connectors.connector_base import ConnectorBase
from tethys.core.transports.transport_zero import ZeroTransport
def load(*_, **__):
raise TethysRONotFound()
class MockOperator(OperatorBase):
def process(self, *args, **kwargs):
pass
class MockNode(ZeroNode):
def __init__(self, **kwargs):
operator = MockOperator()
super().__init__(operator, **kwargs)
class MockConnector(ConnectorBase):
def connect(self, channel_id: str, *args, **kwargs):
pass
class MockSession(ZeroSession):
pass
class MockNetwork(ZeroNetwork):
pass
class TestZeroPipe:
# set_transport_factory
def test_set_transport_factory_and_create_pipe(self):
node_a, node_b = MockNode(), MockNode()
transport = ZeroTransport(MockConnector())
transport_factory = mock.MagicMock(side_effect=lambda *_, **__: transport)
ZeroPipe.set_transport_factory(transport_factory)
pipe = ZeroPipe(node_a, node_b)
assert ZeroPipe._transport_factory == transport_factory
assert pipe.transport == transport
ZeroPipe._transport_factory.assert_called_once_with(pipe)
def test_set_transport_factory_as_transport(self):
node_a, node_b = MockNode(), MockNode()
pipe = ZeroPipe(node_a, node_b)
transport = ZeroTransport(MockConnector())
pipe.set_transport_factory(transport)
assert pipe._transport_factory() == transport
def test_transport_factory_context(self):
node_a, node_b = MockNode(), MockNode()
pipe = ZeroPipe(node_a, node_b)
transport = mock.MagicMock()
prev_method = pipe._transport_factory
with pipe.transport_factory_context(transport):
assert pipe._transport_factory == transport
assert pipe._transport_factory == prev_method
# filter_data_packet
def test_filter_data_packet_empty(self):
node_a, node_b = MockNode(), MockNode()
session_mock = mock.MagicMock()
pipe = ZeroPipe(node_a, node_b, filters=[])
assert pipe.filter_data_packet(..., session_mock)
def test_filter_data_packet_true(self):
node_a, node_b = MockNode(), MockNode()
session_mock = mock.MagicMock()
def f1(data_packet, *_, **__):
return data_packet
def f2(data_packet, *_, **__):
return data_packet / 2
pipe = ZeroPipe(
node_a, node_b, filters=[FNFilter(f1), FNFilter(f2)], filters_threshold=0.5
)
assert pipe.filter_data_packet(1, session_mock) is True
def test_filter_data_packet_false(self):
node_a, node_b = MockNode(), MockNode()
session_mock = mock.MagicMock()
def f1(data_packet, *_, **__):
return data_packet
def f2(data_packet, *_, **__):
return data_packet / 2
pipe = ZeroPipe(
node_a, node_b, filters=[FNFilter(f1), FNFilter(f2)], filters_threshold=1.1
)
assert pipe.filter_data_packet(2, session_mock) is False
# get_stream
def test_get_stream_exists(self):
node_a, node_b = MockNode(), MockNode()
session_mock = mock.MagicMock(spec=ZeroSession)
session_mock.id = "1"
session_mock.closed = False
session_mock.closing_mode = None
stream_mock = mock.MagicMock()
stream_cls_mock = mock.MagicMock(side_effect=lambda *_, **__: stream_mock)
stream_cls_mock.load = mock.MagicMock(side_effect=lambda *_, **__: stream_mock)
pipe = ZeroPipe(node_a, node_b)
@contextmanager
def patch():
old_load = ZeroStream.load
old_new = ZeroStream.__new__
try:
ZeroStream.load = lambda *_, **__: stream_mock
ZeroStream.__new__ = lambda *_, **__: stream_mock
yield ZeroStream
finally:
ZeroStream.load = old_load
ZeroStream.__new__ = old_new
with patch():
assert pipe.get_stream(session_mock) == stream_mock
def test_get_stream_new_with_transport(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = False
session_mock.closing_mode = None
transport_mock = ZeroTransport(MockConnector())
pipe = ZeroPipe(node_a, node_b, transport=transport_mock)
ZeroStream.load = load
ZeroStream.save = mock.MagicMock()
stream = pipe.get_stream(session_mock)
ZeroStream.save.assert_called_once_with(save_dependency=False)
assert stream.transport == transport_mock
def test_get_stream_new_without_transport(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = False
session_mock.closing_mode = None
transport_mock = ZeroTransport(MockConnector())
def transport_factory(_):
return transport_mock
transport_factory_mock = mock.MagicMock(side_effect=transport_factory)
ZeroPipe.set_transport_factory(transport_factory_mock)
pipe = ZeroPipe(node_a, node_b)
ZeroStream.load = load
ZeroStream.save = mock.MagicMock()
stream = pipe.get_stream(session_mock)
ZeroStream.save.assert_called_once_with(save_dependency=False)
ZeroPipe._transport_factory.assert_called_once_with(pipe)
assert stream.transport == transport_mock
def test_get_stream_new_when_sess_closed(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = True
session_mock.closing_mode = None
pipe = ZeroPipe(node_a, node_b)
with pytest.raises(TethysSessionClosed):
pipe.get_stream(session_mock)
def test_get_stream_new_when_sess_hard_closing(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = False
session_mock.closing_mode = ZeroSession.HARD_CLOSING_MODE
pipe = ZeroPipe(node_a, node_b)
with pytest.raises(TethysSessionClosed):
pipe.get_stream(session_mock)
# pull
def test_pull(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = False
session_mock.closing_mode = ZeroSession.HARD_CLOSING_MODE
stream_mock = mock.MagicMock()
stream_mock.read = mock.MagicMock(
side_effect=lambda *_, **__: iter([("key", "value")])
)
def get_stream(_):
return stream_mock
pipe = ZeroPipe(node_a, node_b)
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
assert next(pipe.pull(session_mock, test_kw=1)) == "value"
pipe.get_stream.assert_called_once_with(session_mock)
stream_mock.read.assert_called_once_with(test_kw=1)
# push
def test_push(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
pipe = ZeroPipe(node_a, node_b)
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push(..., session_mock, test_kw=1)
assert res is True
pipe.get_stream.assert_called_once_with(session_mock)
stream_mock.write.assert_called_once_with(..., many=False, test_kw=1)
def test_push_filter_return_false(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
def lambda_null(*_, **__):
return 0
pipe = ZeroPipe(node_a, node_b, filters=[FNFilter(lambda_null)])
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push(..., session_mock, test_kw=1)
assert res is False
pipe.get_stream.assert_not_called()
stream_mock.write.assert_not_called()
def test_push_many(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
pipe = ZeroPipe(node_a, node_b)
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push([...], session_mock, many=True, test_kw=1)
assert res is True
pipe.get_stream.assert_called_once_with(session_mock)
stream_mock.write.assert_called_once_with([...], many=True, test_kw=1)
def test_push_many_return_piece_of_data(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
def lambda_dummy(x, *_, **__):
return x
pipe = ZeroPipe(
node_a, node_b, filters=[FNFilter(lambda_dummy)], filters_threshold=2
)
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push(list(range(5)), session_mock, many=True, test_kw=1)
assert res is True
pipe.get_stream.assert_called_once_with(session_mock)
stream_mock.write.assert_called_once_with(
list(range(2, 5)), many=True, test_kw=1
)
def test_push_many_return_empty(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
def lambda_null(*_, **__):
return 0
pipe = ZeroPipe(node_a, node_b, filters=[FNFilter(lambda_null)])
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push(list(range(5)), session_mock, many=True, test_kw=1)
assert res is False
pipe.get_stream.assert_not_called()
stream_mock.write.assert_not_called()
|
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from cros.factory.probe.functions import sysfs
from cros.factory.probe.lib import cached_probe_function
REQUIRED_FIELDS = ['vendor']
OPTIONAL_FIELDS = ['manufacturer', 'product', 'bcdDevice']
def ReadSDIOSysfs(dir_path):
ret = sysfs.ReadSysfs(dir_path, ['vendor', 'device'])
if ret is None:
return None
ret['bus_type'] = 'sdio'
return ret
class SDIOFunction(cached_probe_function.GlobPathCachedProbeFunction):
"""Probes all SDIO devices listed in the sysfs ``/sys/bus/sdio/devices/``.
Description
-----------
This function goes through ``/sys/bus/sdio/devices/`` to read attributes of
each SDIO device listed there. Each result should contain these fields:
- ``device_path``: Pathname of the sysfs directory.
- ``vendor``
- ``device``
Examples
--------
Let's say the Chromebook has two SDIO devices. One of which
(at ``/sys/bus/sdio/devices/mmc1:0001:1``) has the attributes:
- ``vendor=0x0123``
- ``device=0x4567``
And the other one (at ``/sys/bus/sdio/devices/mmc1:0002:1``) has the
attributes:
- ``vendor=0x0246``
- ``device=0x1357``
Then the probe statement::
{
"eval": "sdio"
}
will have the corresponding probed result::
[
{
"bus_type": "sdio",
"vendor": "0123",
"device": "4567"
},
{
"bus_type": "sdio",
"vendor": "0246",
"device": "1357"
}
]
To verify if the Chromebook has SDIO device which ``vendor`` is ``0x0246``,
you can write a probe statement like::
{
"eval": "sdio",
"expect": {
"vendor": "0246"
}
}
The corresponding probed result will be empty if and only if there's no
SDIO device which ``vendor`` is ``0x0246`` found.
"""
GLOB_PATH = '/sys/bus/sdio/devices/*'
@classmethod
def ProbeDevice(cls, dir_path):
return ReadSDIOSysfs(dir_path)
|
# This file will read results.txt after it is created by the simulator finishing a run.
def read_file(res_location="results.txt"):
# bring in results.txt as a list of strings for each line
# open file in read mode.
file1 = open(res_location, "r+")
results = file1.readlines()
file1.close()
# check for error
if results[0][0:5] == "Error":
return {"Score":-1}
# interpret the important elements, and write to dictionary.
keys, values = [], []
for line in results:
# lines with important info all contain ": "
if ":" in line:
l = line.split(": ")
# check if this is the "XX% Damaged" key
if "%" in l[0]:
keys.append("Damage")
values.append(float(l[1][1:])) #remove the "x"
else:
keys.append(l[0])
values.append(l[1])
# make the dictionary
elements = dict(zip(keys, values))
# if the robot was not damaged, add the value of 0 so it doesn't break things
if "Damage" not in keys:
elements["Damage"] = 0.0
# count the number of waypoints reached
elements["Num waypoints hit"] = 0
elements["Num waypoints hit"] += 1 if "Waypoint 1 reached" in elements else 0
elements["Num waypoints hit"] += 1 if "Waypoint 2 reached" in elements else 0
elements["Num waypoints hit"] += 1 if "Waypoint 3 reached" in elements else 0
elements["wp1"] = "Waypoint 1 reached" in elements
elements["wp2"] = "Waypoint 2 reached" in elements
elements["wp3"] = "Waypoint 3 reached" in elements
# convert the types of important vars
elements["Time"] = float(elements["Time"])
elements["Score"] = float(elements["Score"])
elements["Multiplier"] = float(elements["Multiplier"][1:]) #remove the "x"
# recover the sim settings
elements["Seed"] = int(elements["Seed"])
obs_settings = {"x1.00":"normal", "x15.00":"none", "x0.10":"hard"}
noise_settings = {"x10.00":"none", "x1.00":"reduced", "x0.60":"realistic"}
elements["Obstacle Type"] = obs_settings[elements["Obstacles Bonus"][:-1]]
elements["Noise Type"] = noise_settings[elements["Noise Bonus"][:-1]]
return elements
h_keys = ["Seed", "Obstacle Type", "Noise Type", "Time", "Score", "Damage", "Num waypoints hit","wp1","wp2","wp3"]
header = [s.lower().replace(" ","_") for s in h_keys]
def write_file(elements):
#filepath = "results/" + str(elements["Seed"]) + ".csv"
filepath = "config/results.csv"
file2 = open(filepath, "w+")
# clear the file
file2.seek(0)
file2.truncate()
# check if this run finished successfully
if len(elements) > 1:
# add the header
file2.write(",".join(header) + "\n")
# add the row for this round of results
row = ",".join([str(elements[k]) for k in h_keys])
else:
row = "seed\n-1"
file2.write(row + "\n")
file2.close()
#el = read_file()
#print(el)
#write_file(el)
""" # Sample results.txt on successful completion of course.
Team WickedSlickRobotics
Date: 2021-02-23 17:50:17 -06
Seed: 76168294
Time: 24.680
Multiplier: x1.539
Score: 37.986
-- Multipliers --
Obstacles Bonus: x1.00
Noise Bonus: x0.60
Waypoint 1 reached: x0.80
Waypoint 2 reached: x0.80
Waypoint 3 reached: x0.80
44% Damaged: x5.01
"""
""" #Sample results.txt on simulator shutdown.
Error: Connection to RosBridge lost!
"""
|
import unittest
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
from torchvision.models.squeezenet import SqueezeNet
import simplify
from simplify.utils import set_seed
from tests.benchmark_models import models
class SimplificationTest(unittest.TestCase):
def setUp(self):
set_seed(3)
def test_simplification(self):
@torch.no_grad()
def test_arch(arch, x, pretrained=False, fuse_bn=True):
if architecture.__name__ in ["shufflenet_v2_x1_5", "shufflenet_v2_x2_0", "mnasnet0_75", "mnasnet1_3"]:
pretrained = False
model = arch(pretrained, progress=False)
model.eval()
for name, module in model.named_modules():
if isinstance(model, SqueezeNet) and 'classifier.1' in name:
continue
if isinstance(module, nn.Conv2d):
prune.random_structured(module, 'weight', amount=0.8, dim=0)
prune.remove(module, 'weight')
y_src = model(x)
zeros = torch.zeros(1, *x.shape[1:])
simplify.simplify(model, zeros, fuse_bn=fuse_bn, training=True)
y_prop = model(x)
return torch.equal(y_src.argmax(dim=1), y_prop.argmax(dim=1))
im = torch.randint(0, 256, (256, 3, 224, 224))
x = im / 255.
for architecture in models:
with self.subTest(arch=architecture.__name__, pretrained=True, fuse_bn=True):
self.assertTrue(test_arch(architecture, x, True, True))
with self.subTest(arch=architecture.__name__, pretrained=True, fuse_bn=False):
self.assertTrue(test_arch(architecture, x, True, False))
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.plugins.openstack.scenarios.neutron import security_groups
from tests.unit import test
@ddt.ddt
class NeutronSecurityGroup(test.TestCase):
@ddt.data(
{},
{"security_group_create_args": {}},
{"security_group_create_args": {"description": "fake-description"}},
)
@ddt.unpack
def test_create_and_list_security_groups(
self, security_group_create_args=None):
scenario = security_groups.CreateAndListSecurityGroups()
security_group_data = security_group_create_args or {}
scenario._create_security_group = mock.Mock()
scenario._list_security_groups = mock.Mock()
scenario.run(security_group_create_args=security_group_create_args)
scenario._create_security_group.assert_called_once_with(
**security_group_data)
scenario._list_security_groups.assert_called_once_with()
@ddt.data(
{},
{"security_group_create_args": {}},
{"security_group_create_args": {"description": "fake-description"}},
)
@ddt.unpack
def test_create_and_delete_security_groups(
self, security_group_create_args=None):
scenario = security_groups.CreateAndDeleteSecurityGroups()
security_group_data = security_group_create_args or {}
scenario._create_security_group = mock.Mock()
scenario._delete_security_group = mock.Mock()
scenario.run(security_group_create_args=security_group_create_args)
scenario._create_security_group.assert_called_once_with(
**security_group_data)
scenario._delete_security_group.assert_called_once_with(
scenario._create_security_group.return_value)
@ddt.data(
{},
{"security_group_create_args": {}},
{"security_group_create_args": {"description": "fake-description"}},
{"security_group_update_args": {}},
{"security_group_update_args": {"description": "fake-updated-descr"}},
)
@ddt.unpack
def test_create_and_update_security_groups(
self, security_group_create_args=None,
security_group_update_args=None):
scenario = security_groups.CreateAndUpdateSecurityGroups()
security_group_data = security_group_create_args or {}
security_group_update_data = security_group_update_args or {}
scenario._create_security_group = mock.Mock()
scenario._update_security_group = mock.Mock()
scenario.run(security_group_create_args=security_group_create_args,
security_group_update_args=security_group_update_args)
scenario._create_security_group.assert_called_once_with(
**security_group_data)
scenario._update_security_group.assert_called_once_with(
scenario._create_security_group.return_value,
**security_group_update_data)
|
from . import atomic
from . import processtweet
from . import standardize
from . import tweet
from .preprocess import preprocess
# from .preprocess import get_preprocess_func
|
import os
from datetime import datetime
from logging import getLogger
from time import time
import chess
import re
from supervised_learning_chess.agent.player_chess import ChessPlayer
from supervised_learning_chess.config import Config
from supervised_learning_chess.env.chess_env import ChessEnv, Winner
from supervised_learning_chess.lib import tf_util
from supervised_learning_chess.lib.data_helper import get_game_data_filenames, write_game_data_to_file, find_pgn_files, read_game_data_from_file
from supervised_learning_chess.lib.model_helper import load_best_model_weight, save_as_best_model, \
reload_best_model_weight_if_changed
import random
from time import sleep
import keras.backend as k
import numpy as np
from keras.optimizers import SGD
from supervised_learning_chess.agent.model_chess import ChessModel, objective_function_for_policy, \
objective_function_for_value
logger = getLogger(__name__)
TAG_REGEX = re.compile(r"^\[([A-Za-z0-9_]+)\s+\"(.*)\"\]\s*$")
def start(config: Config):
tf_util.set_session_config(per_process_gpu_memory_fraction=0.59)
return SupervisedLearningWorker(config, env=ChessEnv()).start()
class SupervisedLearningWorker:
def __init__(self, config: Config, env=None, model=None):
"""
:param config:
:param ChessEnv|None env:
:param supervised_learning_chess.agent.model_chess.ChessModel|None model:
"""
self.config = config
self.model = model
self.env = env # type: ChessEnv
self.black = None # type: ChessPlayer
self.white = None # type: ChessPlayer
self.buffer = []
self.optimizer = None
def start(self):
if self.model is None:
self.model = self.load_model()
self.optimizer = OptimizeWorker(self.config)
self.buffer = []
idx = 1
k = 0
while True:
start_time = time()
_ = self.read_game(idx)
end_time = time()
logger.debug(
f"Reading game {idx} time={end_time - start_time} sec")
if (idx % self.config.play_data.nb_game_in_file) == 0:
reload_best_model_weight_if_changed(self.model)
idx += 1
k += 1
if k > 100:
self.optimizer.training()
k = 0
def read_game(self, idx):
self.env.reset()
self.black = ChessPlayer(self.config, self.model)
self.white = ChessPlayer(self.config, self.model)
files = find_pgn_files(self.config.resource.play_data_dir)
if len(files) > 0:
random.shuffle(files)
filename = files[0]
pgn = open(filename, errors='ignore')
size = os.path.getsize(filename)
pos = random.randint(0, size)
pgn.seek(pos)
line = pgn.readline()
offset = 0
# Parse game headers.
while line:
if line.isspace() or line.startswith("%"):
line = pgn.readline()
continue
# Read header tags.
tag_match = TAG_REGEX.match(line)
if tag_match:
offset = pgn.tell()
break
line = pgn.readline()
pgn.seek(offset)
game = chess.pgn.read_game(pgn)
node = game
result = game.headers["Result"]
actions = []
while not node.is_end():
next_node = node.variation(0)
actions.append(node.board().uci(next_node.move))
node = next_node
pgn.close()
k = 0
observation = self.env.observation
while not self.env.done and k < len(actions):
if self.env.board.turn == chess.BLACK:
action = self.black.sl_action(observation, actions[k])
else:
action = self.white.sl_action(observation, actions[k])
board, _ = self.env.step(action)
observation = board.fen()
k += 1
self.env.done = True
if result == '1-0':
self.env.winner = Winner.white
elif result == '0-1':
self.env.winner = Winner.black
else:
self.env.winner = Winner.draw
self.finish_game()
self.save_play_data(write=idx %
self.config.play_data.nb_game_in_file == 0)
self.remove_play_data()
else:
logger.debug(f"there is no pgn file in the dataset folder!")
return self.env
def save_play_data(self, write=True):
data = self.black.moves + self.white.moves
self.buffer += data
if not write:
return
rc = self.config.resource
game_id = datetime.now().strftime("%Y%m%d-%H%M%S.%f")
path = os.path.join(
rc.play_data_dir, rc.play_data_filename_tmpl % game_id)
logger.info(f"save play data to {path}")
write_game_data_to_file(path, self.buffer)
self.buffer = []
def remove_play_data(self):
files = get_game_data_filenames(self.config.resource)
if len(files) < self.config.play_data.max_file_num:
return
for i in range(len(files) - self.config.play_data.max_file_num):
os.remove(files[i])
def finish_game(self):
if self.env.winner == Winner.black:
black_win = 1
elif self.env.winner == Winner.white:
black_win = -1
else:
black_win = 0
self.black.finish_game(black_win)
self.white.finish_game(-black_win)
def load_model(self):
from supervised_learning_chess.agent.model_chess import ChessModel
model = ChessModel(self.config)
if self.config.opts.new or not load_best_model_weight(model):
model.build()
save_as_best_model(model)
return model
class OptimizeWorker:
def __init__(self, config: Config):
self.config = config
self.model = None # type: ChessModel
self.loaded_filenames = set()
self.loaded_data = {}
self.dataset = None
self.optimizer = None
self.model = self.load_model()
self.compile_model()
self.total_steps = 0
def training(self):
min_data_size_to_learn = 1000
self.load_play_data()
if self.dataset_size < min_data_size_to_learn:
logger.info(
f"dataset_size={self.dataset_size} is less than {min_data_size_to_learn}")
self.load_play_data()
return
logger.debug(
f"total steps={self.total_steps}, dataset size={self.dataset_size}")
self.update_learning_rate(self.total_steps)
steps = self.train_epoch(self.config.trainer.epoch_to_checkpoint)
self.total_steps += steps
save_as_best_model(self.model)
def train_epoch(self, epochs):
tc = self.config.trainer
state_ary, policy_ary, z_ary = self.dataset
self.model.model.fit(state_ary, [policy_ary, z_ary],
batch_size=tc.batch_size,
epochs=epochs)
steps = (state_ary.shape[0] // tc.batch_size) * epochs
return steps
def compile_model(self):
self.optimizer = SGD(lr=1e-2, momentum=0.9)
losses = [objective_function_for_policy, objective_function_for_value]
self.model.model.compile(optimizer=self.optimizer, loss=losses)
def update_learning_rate(self, total_steps):
if total_steps < 100000:
lr = 1e-2
elif total_steps < 500000:
lr = 1e-3
elif total_steps < 900000:
lr = 1e-4
else:
lr = 2.5e-5
k.set_value(self.optimizer.lr, lr)
logger.debug(f"total step={total_steps}, set learning rate to {lr}")
def collect_all_loaded_data(self):
state_ary_list, policy_ary_list, z_ary_list = [], [], []
for s_ary, p_ary, z_ary_ in self.loaded_data.values():
state_ary_list.append(s_ary)
policy_ary_list.append(p_ary)
z_ary_list.append(z_ary_)
state_ary = np.concatenate(state_ary_list)
policy_ary = np.concatenate(policy_ary_list)
z_ary = np.concatenate(z_ary_list)
return state_ary, policy_ary, z_ary
@property
def dataset_size(self):
if self.dataset is None:
return 0
return len(self.dataset[0])
def load_model(self):
from supervised_learning_chess.agent.model_chess import ChessModel
model = ChessModel(self.config)
rc = self.config.resource
dirs = rc.model_best_config_path
if not dirs:
model.build()
save_as_best_model(model)
logger.debug(f"loading best model")
if not load_best_model_weight(model):
raise RuntimeError(f"Best model can not loaded!")
else:
logger.debug(f"loading latest model")
config_path = rc.model_best_config_path
weight_path = rc.model_best_weight_path
model.load(config_path, weight_path)
return model
def load_play_data(self):
filenames = get_game_data_filenames(self.config.resource)
updated = False
for filename in filenames:
if filename in self.loaded_filenames:
continue
self.load_data_from_file(filename)
updated = True
for filename in (self.loaded_filenames - set(filenames)):
self.unload_data_of_file(filename)
updated = True
if updated:
logger.debug("updating training dataset")
self.dataset = self.collect_all_loaded_data()
def load_data_from_file(self, filename):
try:
logger.debug(f"loading data from {filename}")
data = read_game_data_from_file(filename)
self.loaded_data[filename] = self.convert_to_training_data(data)
self.loaded_filenames.add(filename)
except Exception as e:
logger.warning(str(e))
def unload_data_of_file(self, filename):
logger.debug(f"removing data about {filename} from training set")
self.loaded_filenames.remove(filename)
if filename in self.loaded_data:
del self.loaded_data[filename]
@staticmethod
def convert_to_training_data(data):
"""
:param data: format is SelfPlayWorker.buffer
:return:
"""
state_list = []
policy_list = []
z_list = []
for state, policy, z in data:
env = ChessEnv().update(state)
black_ary, white_ary = env.black_and_white_plane()
state = [black_ary, white_ary] if env.board.turn == chess.BLACK else [
white_ary, black_ary]
state_list.append(state)
policy_list.append(policy)
z_list.append(z)
return np.array(state_list), np.array(policy_list), np.array(z_list)
|
# Copyright (c) 2017 Polytechnique Montreal <www.neuro.polymtl.ca>
#
# About the license: see the file LICENSE.TXT
""" Qt widgets for manual labeling of images """
from __future__ import absolute_import, division
import logging
from time import time
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.widgets import Cursor
from PyQt5 import QtCore, QtGui, QtWidgets
from spinalcordtoolbox.gui.base import MissingLabelWarning
logger = logging.getLogger(__name__)
class VertebraeWidget(QtWidgets.QWidget):
"""A group of checkboxes that list labels."""
_unchecked = []
_checked = []
_active_label = None
_check_boxes = {}
_labels = None
_label = None
def __init__(self, parent, vertebraes):
super(VertebraeWidget, self).__init__(parent)
self.parent = parent
self.vertebraes = vertebraes
self._init_ui(parent.params)
self.refresh()
def _init_ui(self, params):
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
font = QtGui.QFont()
font.setPointSize(10)
for vertebrae in self.vertebraes:
rdo = QtWidgets.QCheckBox('Label {}'.format(vertebrae))
rdo.label = vertebrae
rdo.setFont(font)
rdo.setTristate()
self._check_boxes[vertebrae] = rdo
rdo.clicked.connect(self.on_select_label)
layout.addWidget(rdo)
layout.addStretch()
def on_select_label(self):
label = self.sender()
self.label = label.label
def refresh(self, labels=None):
if labels:
self._checked = labels
self._unchecked = set(self._check_boxes.keys()) - set(labels)
for checkbox in self._check_boxes.values():
checkbox.setCheckState(QtCore.Qt.Unchecked)
logger.debug('refresh labels {}'.format(self.parent._controller.points))
for point in self.parent._controller.points:
self._check_boxes[point[3]].setCheckState(QtCore.Qt.Checked)
@property
def label(self):
if self._active_label:
return self._active_label.label
raise MissingLabelWarning('No vertebrae was selected')
@label.setter
def label(self, index):
self.refresh()
self._active_label = self._check_boxes[index]
self._active_label.setCheckState(QtCore.Qt.PartiallyChecked)
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, values):
self._labels = values
for x in self._check_boxes.values():
x.setCheckState(QtCore.Qt.Unchecked)
for label in self._labels:
self._check_boxes[label].setCheckState(QtCore.Qt.Checked)
class AnatomicalCanvas(FigureCanvas):
"""Base canvas for anatomical views
Attributes
----------
point_selected_signal : QtCore.Signal
Create a event when user clicks on the canvas
"""
point_selected_signal = QtCore.Signal(float, float, float)
_horizontal_nav = None
_vertical_nav = None
_navigation_state = False
annotations = []
last_update = 0
update_freq = 0.0667
previous_point = (0, 0)
def __init__(self, parent, width=8, height=8, dpi=100, crosshair=False, plot_points=False,
annotate=False, vertical_nav=False, horizontal_nav=False):
self._parent = parent
self._image = parent.image
self._params = parent.params
self._crosshair = crosshair
self._plot_points = plot_points
self._annotate_points = annotate
self._vertical_nav = vertical_nav
self._horizontal_nav = horizontal_nav
self.position = None
self._x, self._y, self._z = [int(i) for i in self._parent._controller.position]
self._fig = Figure(figsize=(width, height), dpi=dpi)
super(AnatomicalCanvas, self).__init__(self._fig)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.vmin_updated = self._params.vmin
self.vmax_updated = self._params.vmax
def _init_ui(self, data, aspect):
self._fig.canvas.mpl_connect('button_release_event', self.on_select_point)
self._fig.canvas.mpl_connect('scroll_event', self.on_zoom)
self._fig.canvas.mpl_connect('button_release_event', self.on_change_intensity)
self._fig.canvas.mpl_connect('motion_notify_event', self.on_change_intensity)
self._axes = self._fig.add_axes([0, 0, 1, 1], frameon=True)
self._axes.axis('off')
self.view = self._axes.imshow(
data,
cmap=self._params.cmap,
interpolation=self._params.interp,
vmin=self._params.vmin,
vmax=self._params.vmax,
alpha=self._params.alpha)
self._axes.set_aspect(aspect)
if self._crosshair:
self.cursor = Cursor(self._axes, useblit=True, color='r', linewidth=1)
self.points = self._axes.plot([], [], '.r', markersize=7)[0]
def title(self, message):
self._fig.suptitle(message, fontsize=10)
def annotate(self, x, y, label):
self.annotations.append(self._axes.annotate(label, xy=(x, y), xytext=(-3, 3),
textcoords='offset points', ha='right',
va='bottom', color='r'))
def clear(self):
for i in self.annotations:
i.remove()
self.annotations = []
self.points.set_xdata([])
self.points.set_ydata([])
def refresh(self):
self.view.set_clim(vmin=self.vmin_updated, vmax=self.vmax_updated)
# self.view.set_clim(self._parent._controller.vmin_updated,
# self._parent._controller.vmax_updated)
logger.debug("vmin_updated="+str(self.vmin_updated)+", vmax_updated="+str(self.vmax_updated))
self.plot_position()
self.plot_points()
self.view.figure.canvas.draw()
def plot_data(self, xdata, ydata, labels):
self.points.set_xdata(xdata)
self.points.set_ydata(ydata)
if self._annotate_points:
for x, y, label in zip(xdata, ydata, labels):
self.annotate(x, y, label)
def on_zoom(self, event):
if event.xdata is None or event.ydata is None:
return
if event.button == 'up':
scale_factor = 1.3
else:
scale_factor = 1 / 1.3
x = event.xdata
y = event.ydata
x_lim = self._axes.get_xlim()
y_lim = self._axes.get_ylim()
left = (x - x_lim[0]) * scale_factor
right = (x_lim[1] - x) * scale_factor
top = (y - y_lim[0]) * scale_factor
bottom = (y_lim[1] - y) * scale_factor
if x + right - left >= self.x_max or y + bottom - top >= self.y_max:
return
self._axes.set_xlim(x - left, x + right)
self._axes.set_ylim(y - top, y + bottom)
self.view.figure.canvas.draw()
def on_select_point(self, event):
pass
def on_change_intensity(self, event):
if event.xdata is None or event.ydata is None:
return
if event.button == 3: # right click
curr_time = time()
if curr_time - self.last_update <= self.update_freq:
# TODO: never enters that loop because last_update set to 0 and it is never updated
return
if (abs(event.xdata - self.previous_point[0]) < 1 and abs(event.ydata - self.previous_point) < 1):
# TODO: never enters that loop because previous_point set to 0,0 and it is never updated
self.previous_point = (event.xdata, event.ydata)
return
logger.debug("X=" + str(event.xdata) + ", Y=" + str(event.ydata))
xlim, ylim = self._axes.get_xlim(), self._axes.get_ylim()
x_factor = (event.xdata - xlim[0]) / float(xlim[1] - xlim[0]) # between 0 and 1. No change: 0.5
y_factor = (event.ydata - ylim[1]) / float(ylim[0] - ylim[1])
# get dynamic of the image
vminvmax = self._params.vmax - self._params.vmin # todo: get variable based on image quantization
# adjust brightness by adding offset to image intensity
# the "-" sign is there so that when moving the cursor to the right, brightness increases (more intuitive)
# the 2.0 factor maximizes change.
self.vmin_updated = self._params.vmin - (x_factor - 0.5) * vminvmax * 2.0
self.vmax_updated = self._params.vmax - (x_factor - 0.5) * vminvmax * 2.0
# adjust contrast by multiplying image dynamic by scaling factor
# the factor 2.0 maximizes contrast change. For y_factor = 0.5, the scaling will be 1, which means no change
# in contrast
self.vmin_updated = self.vmin_updated * (y_factor * 2.0)
self.vmax_updated = self.vmax_updated * (y_factor * 2.0)
self.refresh()
def horizontal_position(self, position):
if self._horizontal_nav:
try:
self._horizontal_nav.remove()
except AttributeError:
pass
self._horizontal_nav = self._axes.axhline(position, color='r')
def vertical_position(self, position):
if self._vertical_nav:
try:
self._vertical_nav.remove()
except AttributeError:
pass
self._vertical_nav = self._axes.axvline(position, color='r')
def __repr__(self):
return '{}: {}, {}, {}'.format(self.__class__, self._x, self._y, self._z)
def __str__(self):
return '{}: {}, {}'.format(self._x, self._y, self._z)
class SagittalCanvas(AnatomicalCanvas):
def __init__(self, *args, **kwargs):
super(SagittalCanvas, self).__init__(*args, **kwargs)
x, y, z, _, dx, dy, dz, _ = self._image.dim
self._init_ui(self._image.data[:, :, self._z], dx / dy)
self.annotations = []
self.x_max = y
self.y_max = x
def refresh(self):
self._x, self._y, self._z = [int(i) for i in self._parent._controller.position]
data = self._image.data[:, :, self._z]
self.view.set_array(data)
super(SagittalCanvas, self).refresh()
def on_select_point(self, event):
if event.xdata is not None and event.ydata is not None and event.button == 1:
self.point_selected_signal.emit(event.ydata, event.xdata, self._z)
def plot_points(self):
"""Plot the controller's list of points (x, y) and annotate the point with the label"""
if self._plot_points:
logger.debug('Plotting points {}'.format(self._parent._controller.points))
points = self._parent._controller.points
self.clear()
try:
xs, ys, zs, labels = zip(*points)
self.plot_data(ys, xs, labels)
except ValueError:
pass
def plot_position(self):
position = self._parent._controller.position
self.horizontal_position(position[0])
self.vertical_position(position[1])
class CoronalCanvas(AnatomicalCanvas):
def __init__(self, *args, **kwargs):
super(CoronalCanvas, self).__init__(*args, **kwargs)
x, y, z, _, dx, dy, dz, _ = self._image.dim
self._init_ui(self._image.data[:, self._y, :], dx / dz)
self.x_max = x
self.y_max = z
def refresh(self):
self._x, self._y, self._z = [int(i) for i in self._parent._controller.position]
data = self._image.data[:, self._y, :]
self.view.set_array(data)
super(CoronalCanvas, self).refresh()
def on_select_point(self, event):
if event.xdata is not None and event.ydata is not None and event.button == 1:
self.point_selected_signal.emit(event.xdata, self._y, event.ydata)
def plot_points(self):
logger.debug('Plotting points {}'.format(self._parent._controller.points))
if self._parent._controller.points:
points = [x for x in self._parent._controller.points]
self.clear()
try:
xs, ys, zs, _ = zip(*points)
self.plot_data(xs, zs, [])
except ValueError:
pass
self.view.figure.canvas.draw()
class AxialCanvas(AnatomicalCanvas):
def __init__(self, *args, **kwargs):
super(AxialCanvas, self).__init__(*args, **kwargs)
x, y, z, _, dx, dy, dz, _ = self._image.dim
self._init_ui(self._image.data[self._x, :, :], dy / dz)
self.x_max = z
self.y_max = y
def refresh(self):
self._x, self._y, self._z = [int(i) for i in self._parent._controller.position]
data = self._image.data[self._x, :, :]
self.view.set_array(data)
super(AxialCanvas, self).refresh()
def on_select_point(self, event):
if event.xdata is not None and event.ydata is not None and event.button == 1:
self.point_selected_signal.emit(self._x, event.ydata, event.xdata)
def plot_points(self):
if self._plot_points:
controller = self._parent._controller
logger.debug('Plotting points {}'.format(controller.points))
points = [x for x in controller.points if x[0] == controller.position[0]]
self.clear()
try:
xs, ys, zs, _ = zip(*points)
self.plot_data(zs, ys, [])
except ValueError:
pass
def plot_position(self):
position = self._parent._controller.position
self.horizontal_position(position[1])
self.vertical_position(position[2])
class AnatomicalToolbar(NavigationToolbar):
def __init__(self, canvas, parent):
self.toolitems = (('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Back', 'Back to previous view', 'back', 'back'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'))
super(AnatomicalToolbar, self).__init__(canvas, parent)
|
# -*- coding: utf-8 -*-
#================================================================
# Copyright (C) 2020 * Ltd. All rights reserved.
# Time : 2020/3/15 18:01
# Author : Xuguosheng
# contact: xgs11@qq.com
# File : character_5.py
# Software: PyCharm
# Description :图像分类 fashsion_minist
#================================================================
import tensorflow as tf
from tensorflow import keras
import numpy as np
import time
import sys
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import fashion_mnist
import cv2
import os
# os.environ['CUDA_DEVICE_ORDER'] = '-1'
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def softmax(logits, axis=-1):
return tf.exp(logits)/tf.reduce_sum(tf.exp(logits), axis, keepdims=True)
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
batch_size = 256
if sys.platform.startswith('win'):
num_workers = 0 # 0表示不用额外的进程来加速读取数据
else:
num_workers = 4
x_train = tf.cast(x_train/255.,dtype=tf.float32)
x_test = tf.cast(x_test/255.,dtype=tf.float32)
train_iter = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
test_iter = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
# num_inputs = 784
# num_outputs = 10
# W = tf.Variable(tf.random.normal(shape=(num_inputs,num_outputs),dtype=tf.float32))
# b = tf.Variable(tf.zeros(num_outputs,dtype=tf.float32))
num_inputs = 784
num_outputs = 10
W = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01, dtype=tf.float32))
b = tf.Variable(tf.zeros(num_outputs, dtype=tf.float32))
def net(X):
xx = tf.reshape(X, shape=(-1, W.shape[0]))
logits = tf.matmul(xx, W)
logits +=b
return softmax(logits)
def cross_entropy(y_hat, y):
y = tf.cast(tf.reshape(y, shape=[-1, 1]),dtype=tf.int32)
y = tf.one_hot(y, depth=y_hat.shape[-1])
y = tf.cast(tf.reshape(y, shape=[-1, y_hat.shape[-1]]),dtype=tf.int32)
return -tf.math.log(tf.boolean_mask(y_hat, y)+1e-8)
# y_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
# y = np.array([0, 2], dtype='int32')
# z = tf.boolean_mask(y_hat, tf.one_hot(y, depth=3))
def accuracy(y_hat, y):
return np.mean((tf.argmax(y_hat, axis=1) == y))#判定最大值索引与结果比较
def evaluate_accuracy(data_iter,net):
acc_sum,n =0.0 ,0
for x,y in data_iter:
y = tf.cast(y,dtype=tf.int32)
acc_sum +=np.sum(tf.cast(tf.argmax(net(x),axis =1),dtype=tf.int32)==y)
n +=y.shape[0]
return acc_sum/n
num_epochs, lr = 5, 0.1
# 本函数已保存在d2lzh包中方便以后使用
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params=None, lr=None, trainer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
with tf.GradientTape() as tape:
y_hat = net(X)
l = tf.reduce_sum(loss(y_hat, y))
grads = tape.gradient(l, params)
if trainer is None:
# 如果没有传入优化器,则使用原先编写的小批量随机梯度下降
for i, param in enumerate(params):
param.assign_sub(lr * grads[i] / batch_size)
else:
# tf.keras.optimizers.SGD 直接使用是随机梯度下降 theta(t+1) = theta(t) - learning_rate * gradient
# 这里使用批量梯度下降,需要对梯度除以 batch_size, 对应原书代码的 trainer.step(batch_size)
trainer.apply_gradients(zip([grad / batch_size for grad in grads], params))
# trainer.apply_gradients(zip(grads/batch_size, params))
y = tf.cast(y, dtype=tf.float32)
train_l_sum += l.numpy()
train_acc_sum += tf.reduce_sum(tf.cast(tf.argmax(y_hat, axis=1) == tf.cast(y, dtype=tf.int64), dtype=tf.int64)).numpy()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
start = time.time()
trainer = tf.keras.optimizers.SGD(lr)
train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size, [W, b], lr,trainer)
print('use time is: ',time.time()-start)
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(10, activation=tf.nn.softmax)])
print(len(model.weights))
print(model.weights[0])
print(model.weights[1])
random_x = tf.random.normal(shape=(1,28,28),mean=0,stddev=0.1,dtype=tf.float32)
random_y = model.predict(random_x)
yyy = np.array(random_y)
index = np.argmax(yyy)
conf = np.max(yyy)
print(random_y)
print(index,conf)
# 数据读取与测试,28 *28的灰度图
# feature,label=x_train[0],y_train[0]
# print(type(x_test),type(y_test))
# print(feature,label)
# cv2.imshow('first_img',feature)
# cv2.waitKey(0)
# keras.layers.Conv2d()
keras.layers.Dense(10, activation=tf.nn.softmax)])
lr = 0.05
optimizer = keras.optimizers.SGD(lr)
loss = 'sparse_categorical_crossentropy'
model.compile(optimizer,loss,metrics=['accuracy'])
model.fit(x_train,y_train,batch_size=256)
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test Acc:',test_acc)
|
import re
import sublime
import sublime_plugin
def plugin_loaded():
global g_settings
g_settings = sublime.load_settings('sxs_settings.sublime-settings')
g_settings.clear_on_change('sxs_settings')
update_settings()
g_settings.add_on_change('sxs_settings', update_settings)
def update_settings():
global g_hide_minimap
global g_filter_platform
global g_open_in_distraction_free
g_hide_minimap = g_settings.get('hide_minimap', False)
g_filter_platform = g_settings.get('filter_platform', False)
g_open_in_distraction_free = g_settings.get('open_in_distraction_free', False)
def plugin_unloaded():
g_settings.clear_on_change('sxs_settings')
def open_window(self, left_path):
last_slash = left_path.rfind("/")
right_path = left_path[(last_slash+1):] # Extract the filename
# If we're opening a .sublime-keymap file, the right pane should always open
# to "Default ({platform}).sublime-keymap" since that's where keys should be
# put.
if re.search(r"\.sublime-keymap", left_path):
platform = sublime.platform()
if platform == "linux":
platform = "Linux"
elif platform == "osx":
platform = "OSX"
else:
platform = "Windows"
right_path = "Default (" + platform + ").sublime-keymap"
# Test to see if we are opening a platform-specific settings file. If so,
# strip the platform specific portion of the filename (platform-specific
# files are ignored in the User directory)
elif re.search(r" \((?:Linux|OSX|Windows)\).sublime-settings", left_path):
right_path = re.sub(r" \((?:Linux|OSX|Windows)\)", "", right_path)
# Default to object notation for sublime-settings files
right_contents = "{\n\t$0\n}\n"
if re.search(r"\.sublime-keymap", left_path):
# Use array notation for sublime-keymap files
right_contents = "[\n\t$0\n]\n"
active_window = sublime.active_window()
active_window.run_command("edit_settings", {'base_file': "${packages}/" + left_path, "default": right_contents})
new_window = sublime.active_window()
if g_hide_minimap:
new_window.set_minimap_visible(False)
if g_open_in_distraction_free:
new_window.run_command('toggle_distraction_free')
new_window.run_command('toggle_tabs')
class SxsSelectFileCommand(sublime_plugin.WindowCommand):
platform_filter = {
'linux': ('OSX', 'Windows'),
'osx': ('Linux', 'Windows'),
'windows': ('Linux', 'OSX'),
}
def __init__(self, window):
super(SxsSelectFileCommand, self).__init__(window)
self.file_list = []
self.last_index = -1
def run(self):
# Clear our cache
del self.file_list[:]
platforms_to_filter = self.platform_filter.get(sublime.platform())
settings_list = sublime.find_resources("*.sublime-settings")
keymap_list = sublime.find_resources("*.sublime-keymap")
temp_list = settings_list + keymap_list
for i, item in enumerate(temp_list):
temp_item = re.sub(r"^Packages/", "", item)
# Ignore anything we find in the User directory
# (those will get treated as "right pane" files)
if re.match(r"0_settings_loader/|User/", temp_item):
continue
else:
# Skip the necessary platforms if we're filtering on platform
if g_filter_platform:
skip = False
for p in platforms_to_filter:
if(p in str(temp_item)):
skip = True
break
if skip:
continue
self.file_list.append(temp_item)
self.file_list.sort()
self.window.show_quick_panel(self.file_list, self.on_done, 0, self.last_index)
def on_done(self, index):
if index == -1:
return
self.last_index = index
open_window(self.window, self.file_list[index])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.