text stringlengths 8 6.05M |
|---|
# Enter script code
keyboard.send_keys("<f6>6") |
from .. import Interpreter, adapter
from ..interface import Block
from typing import Optional
import random
class RandomBlock(Block):
def will_accept(self, ctx : Interpreter.Context) -> bool:
dec = ctx.verb.declaration.lower()
return any([dec == "random", dec == "#", dec =="rand"])
def process(self, ctx : Interpreter.Context) -> Optional[str]:
if ctx.verb.payload is None:
return None
spl = []
if "~" in ctx.verb.payload:
spl = ctx.verb.payload.split("~")
else:
spl = ctx.verb.payload.split(",")
random.seed(ctx.verb.parameter)
result = random.choice(spl)
return result |
balance = 3329
annualInterestRate = 0.2
monthlyInterestRate = annualInterestRate / 12.0
guess = 10
def yearEndBanlance(monthPayment):
mybalance = balance
for m in range(12):
interest = (mybalance - monthPayment) * monthlyInterestRate
mybalance = mybalance + interest - monthPayment
return mybalance
while yearEndBanlance(guess) >= 0:
guess += 10
print 'Lowest payment: ' + str(guess)
|
from django.db import models
import datetime
from django.utils import timezone
# Create your models here.
#a question has a question text and a publication date
class Question(models.Model):
question_text=models.CharField(max_length=200)
pub_date=models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date>=timezone.now()-datetime.timedelta(days=1)
#a choice is it's own sheet. two parts question- referenced from
#what question you chose to answer earlier. and vote tally
class Choice (models.Model):
question=models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text=models.CharField(max_length=200)
votes=models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
# new file in github
# try to pull
# simutanously change local/origin and master
# these 2 lines are changed in origin
# simutanously change this file in origin and master
# this file changed in master
# Now solved the conflicts by delete the local file and change the master file, then pull
|
#-*- coding=utf-8 -*-
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column,Integer,String
from sqlalchemy.orm import sessionmaker
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
from numpy import *
from decimal import *
f = pd.read_excel("F:/study/fish/testdata.xls")
data = array(f)
ppm = data[0]
peak_values = []
data1 = []
for i in range(1,len(data)):
data1.append(data[i])
traindata = array(data1).transpose()
list_data = traindata.tolist()
#print len(list_data)
def bubble(list):
'''
冒泡排序
:param list:代处理数组
:return:数组均值
'''
if len(list)!=None:
for i in range(len(list)-1):
for j in range(len(list)-i-1):
if list[j] > list[j+1]:
list[j],list[j+1] = list[j+1],list[j]
list.pop(0)
list.pop(-1)
average_data = average(list)
return average_data
for i in range(len(list_data)):
peak_value = bubble(list_data[i])
peak_values.append(peak_value)
class std_data():
def __init__(self,ppm,Ename,short,Rtype):
self.ppm = ppm
self.Ename = Ename
self.short = short
self.Rtype = Rtype
test_data = std_data(3.325,'testnmr','Tn','d')
def select_distance(list1,num):
select_ppm = []
select_data = []
for i in range(0,len(list1)):
if abs(ppm[i]-num)<0.15:
select_ppm.append(ppm[i])
select_data.append(peak_values[i])
if ppm[i]-num>0.16:
break;
return select_ppm,select_data
ss= select_distance(ppm,test_data.ppm)
def sum_peak(list2,select):
total = 0
peak_ppm = []
peak_data = []
for i in range(1,len(list2)-1):
if list2[i]>list2[i-1] and list2[i]>list2[i+1]:
total += 1
peak_ppm.append(select[0][i])
peak_data.append(select[1][i])
if total == 0:
return 'no'
else:
big_num = len(peak_data)/2
high_ppm = peak_ppm[big_num]
high_data = peak_data[big_num]
return total,high_ppm,high_data
high_peak = sum_peak(ss[1],ss)
#print high_peak
def set_peaknum(num1):
rtype = 'nuknow'
if high_peak == 'no':
return 'failed'
else:
if num1 == 1:
rtype = 's'
elif num1 == 2:
rtype = 'd'
elif num1 == 3:
rtype = 't'
elif num1 == 4:
rtype = 'q'
elif num1 > 4:
rtype = 'm'
return rtype
type_peak = set_peaknum(high_peak[0])
#print type_peak
if type_peak == test_data.Rtype:
print 'English : ' + test_data.Ename
print 'PPM ; ' + str(high_peak[1])
print 'Abbreviation :' + test_data.short
else:
print 'failed'
|
import numpy as np
import os
# Paths for writing
training_data_path = 'census-income.data/training_data_preprocess2'
testing_data_path = 'census-income.test/testing_data_preprocess2'
# Read the data of a file into an array
def read_file_into_array(file_path):
file_array = []
file = open(file_path)
lines = file.read().split("\n")
for line in lines:
file_array.append(line.split(', '))
file.close()
return file_array[:-1] # remove the last entry as it is just white-space
# Read the files into arrays for processing the training and testing data
train_data = read_file_into_array(training_data_path)
test_data = read_file_into_array(testing_data_path)
# Declare arrays with the different attribute values
num_attributes = 18
class_of_worker_attributes = ['Not Employed', 'Private', 'Self-Employed', 'Local Government', 'State Government', 'Federal Government']
education_attributes = ['Less than high school', 'College', 'Bachelors', 'Masters', 'Prof Degree', 'Doctorate']
education_enrollment_attributes = ['Not in School', 'High School', 'College or University']
married_attributes = ['Not Married', 'Married', 'Divorced', 'Widowed']
race_attributes = ['Asian or Pacific Islander', 'White', 'Other', 'Amer Indian Aleut or Eskimo', 'Black']
sex_attributes = ['Male, Female']
employment_attributes = ['Not Employed', 'Part Time', 'Full Time']
tax_filer_status_attributes = ['Head of Household', 'Joint', 'Single', 'Nonfiler']
country_of_birth_of_parents_attributes = ["USA", "Both not USA"]
country_of_birth_of_person = ["American", "Not American"]
income = ["- 50000.", "50000+."]
# A normalizing function for age that divides by 100
def normalize_age_values(data_array, index):
for i in range(0, len(data_array)):
data_array[i][index] = float(data_array[i][index])/float(90)
return data_array
# Normalizes a real valued feature by dividing by max-min
def normalize_values(data_array, index, max_value, min_value):
for i in range(0, len(data_array)):
data_array[i][index] = (float(data_array[i][index])-min_value)/float(max_value-min_value)
return data_array
# Turns a categorical variable with two values two a single binary digit
def binarization(data_array, index, value_array):
for i in range(0, len(data_array)):
if data_array[i][index] == value_array[0]:
data_array[i][index] = float(0)
else:
data_array[i][index] = float(1)
return data_array
# Normalize the data values
train_data = normalize_age_values(train_data, 0) # Normalize the age values
test_data = normalize_age_values(test_data, 0)
train_data = normalize_values(train_data, 5, 10000, 0) # Normalize the wage data
test_data = normalize_values(test_data, 5, 10000, 0)
train_data = normalize_values(train_data, 11, 100000, 0) # Normalize the Capital Gains
test_data = normalize_values(test_data, 11, 100000, 0)
train_data = normalize_values(train_data, 12, 4608, 0) # Normalize the Capital Losses
test_data = normalize_values(test_data, 12, 4608, 0)
train_data = normalize_values(train_data, 13, 100000, 0) # Normalize the Stock Dividends
test_data = normalize_values(test_data, 13, 100000, 0)
train_data = normalize_values(train_data, 18, 52, 0) # Normalize the number of weeks works
test_data = normalize_values(test_data, 18, 52, 0)
# Convert the Industry Code and Occupation Code to floats
for i in range(0, len(train_data)):
train_data[i][2] = float(train_data[i][2])
train_data[i][3] = float(train_data[i][3])
for i in range(0, len(test_data)):
test_data[i][2] = float(test_data[i][2])
test_data[i][3] = float(test_data[i][3])
# One hot encode a categorical value
def one_hot_encode(value_array, value):
one_hot_encoding = []
for i in range(0, len(value_array)):
if value_array[i] == value:
one_hot_encoding.append(float(1))
else:
one_hot_encoding.append(float(0))
return one_hot_encoding
# One-hot encode an entire array
def one_hot_encode_feature(data_array, index, value_array):
for i in range(0, len(data_array)):
data_array[i][index] = one_hot_encode(value_array, data_array[i][index])
return data_array
train_data = one_hot_encode_feature(train_data, 1, class_of_worker_attributes) # One-hot encode the class of worker attribute
test_data = one_hot_encode_feature(test_data, 1, class_of_worker_attributes)
train_data = one_hot_encode_feature(train_data, 4, education_attributes) # One-hot encode the educational attribute
test_data = one_hot_encode_feature(test_data, 4, education_attributes)
train_data = one_hot_encode_feature(train_data, 6, education_enrollment_attributes) # One-hot encode the enrollment attribute
test_data = one_hot_encode_feature(test_data, 6, education_enrollment_attributes)
train_data = one_hot_encode_feature(train_data, 7, married_attributes) # One-hot encode the married attribute
test_data = one_hot_encode_feature(test_data, 7, married_attributes)
train_data = one_hot_encode_feature(train_data, 8, race_attributes) # One-hot encode the race attribute
test_data = one_hot_encode_feature(test_data, 8, race_attributes)
train_data = binarization(train_data, 9, sex_attributes) # Binarize the sex attribute
test_data = binarization(test_data, 9, sex_attributes)
train_data = one_hot_encode_feature(train_data, 10, employment_attributes) # One-hot encode the emplyment attributs
test_data = one_hot_encode_feature(test_data, 10, employment_attributes)
train_data = one_hot_encode_feature(train_data, 14, tax_filer_status_attributes) # One-hot encode the tax filer attribute
test_data = one_hot_encode_feature(test_data, 14, tax_filer_status_attributes)
train_data = binarization(train_data, 16, country_of_birth_of_parents_attributes) # Binarize the country of the parents attrobite
test_data = binarization(test_data, 16, country_of_birth_of_parents_attributes)
train_data = binarization(train_data, 17, country_of_birth_of_person) # Binarize the country of birth of the person attribute
test_data = binarization(test_data, 17, country_of_birth_of_person)
train_data = binarization(train_data, 19, income) # Binarize the income data
test_data = binarization(test_data, 19, income)
# The indices of elements in the data that are lists for flattening
list_indices = [1, 4, 6, 7, 8, 10, 14]
# Custom flattening function
def flatten_data(data_array):
return_array = []
for i in range(0, len(data_array)):
temp = []
for j in range(0, len(data_array[0])):
if j in list_indices:
for t in range(0, len(data_array[i][j])):
temp.append(data_array[i][j][t])
else:
temp.append(data_array[i][j])
return_array.append(temp)
return return_array
# Flatten the arrays into a 1-d vector for training and testing
train_data = np.array(flatten_data(train_data)).astype(float)
test_data = np.array(flatten_data(test_data)).astype(float)
def get_possible_attribute_values(data_array, index):
set_values = set()
for i in range(0, len(data_array)):
set_values.add(data_array[i][index])
return set_values
industry_code_set = get_possible_attribute_values(train_data, 7) # get the possible values for the industry code attribute
occupation_code_set = get_possible_attribute_values(train_data, 8) # get the possible values for the occupation code attribute
def get_attribute_counts(data_array, attribute_set, index):
count_dict = {} # The number of values of a given attribute
count_pos_dict = {} # The number of values of a given attribute that are positive
for x in attribute_set:
count_dict[x] = 0
count_pos_dict[x] = 0
for i in range(0, len(data_array)):
if data_array[i][index] == x:
count_dict[x] = count_dict[x]+1
if data_array[i][len(data_array[0]) - 1] == 1:
count_pos_dict[x] = count_pos_dict[x]+1
return count_dict, count_pos_dict
industry_code_counts, industry_code_pos_counts = get_attribute_counts(train_data, industry_code_set, 7)
occupation_code_counts, occupation_code_pos_counts = get_attribute_counts(train_data, occupation_code_set, 8)
# Compute the calibration using the Laplace correction
def compute_calibration(total_count, num_pos):
prior_odds = float(0.3504/(1-0.3504)) # The prior odds of an American making more than 50 K a year
numerator = float(num_pos + 1)
denominator = float(num_pos + 1 + prior_odds*(total_count - num_pos + 1))
return float(numerator/denominator)
def calibrate_features(data_array, counts, pos_counts, index):
for i in range(0, len(data_array)):
data_array[i][index] = compute_calibration(counts[data_array[i][index]], pos_counts[data_array[i][index]])
return data_array
# Calibrate the industry code and occupation code
train_data = calibrate_features(train_data, industry_code_counts, industry_code_pos_counts, 7)
test_data = calibrate_features(test_data, industry_code_counts, industry_code_pos_counts, 7)
train_data = calibrate_features(train_data, occupation_code_counts, occupation_code_pos_counts, 8)
test_data = calibrate_features(test_data, occupation_code_counts, occupation_code_pos_counts, 8)
np.random.shuffle(train_data)
np.random.shuffle(test_data)
# Convert the array to a string values for writing to a file
def convert_floats_to_string(data_array):
return_array = []
for i in range(0, len(data_array)):
temp = []
for j in range(0, len(data_array[0])):
temp.append(str(data_array[i][j]))
return_array.append(temp)
return return_array
# Convert the float values to the string for writing to a file
train_data = convert_floats_to_string(train_data)
test_data = convert_floats_to_string(test_data)
# Write the pre-processed data to a file for the next stage of processing
def write_array_to_file(data_array, file_path):
if os.path.exists(file_path):
os.remove(file_path)
with open(file_path, 'w') as f:
for i in range(0, len(data_array)):
f.write("%s\n" % ", ".join(data_array[i]))
f.close()
# Paths for writing
train_dath_path = 'census-income.data/train_data'
test_data_path = 'census-income.test/test_data'
# Write the pre-processed data to test files for the next step in the feature selection/pre-processing stage
write_array_to_file(train_data, train_dath_path)
write_array_to_file(test_data, test_data_path) |
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, Conv2D, Flatten
from Architectures.Algorithm import Algorithm
# noinspection PyMethodOverriding
class ConvNet(Algorithm):
def __init__(self, hidden_layer:list, n_classes:int, input_shape:tuple):
self.__model = Sequential()
self.__model.add(Conv2D(filters=hidden_layer[0], kernel_size=3, activation='relu', input_shape=input_shape))
[self.__model.add(layer=Conv2D(filters=n_filters, kernel_size=3, activation='relu')) for n_filters in hidden_layer[1:]]
self.__model.add(layer=Flatten())
self.__model.add(layer=Dense(units=n_classes, activation='softmax'))
self.__model.compile(optimizer='adam', loss='categorical_crossentropy')
self.__model.summary()
def fit(self, x, y, batch_size, epochs, val_split):
"""Fit the neural network an data x and labels y.
:param x: a numpy.ndarray of shape (n_examples, n_features, n_channels)
:param y: a numpy.ndarray of shape (n_examples, n_classes)
:param batch_size: a int specifying the number of examples per batch
:param epochs: a int specifying the number of epochs to train the neural network
:param val_split: a float < 1 specifying the fraction of training data to be used as validation data
"""
self.__model.fit(x=x, y=y, batch_size=batch_size, epochs=epochs, validation_split=val_split)
def evaluate(self, x, y):
"""Evaluate a neural network on given data x and labels y
:param x: a numpy.ndarray of shape (n_examples, n_features, n_channels)
:param y: a numpy.ndarray of shape (n_examples, n_classes)
"""
self.__model.evaluate(x=x, y=y)
def predict(self, x):
"""Generate predictions for the samples x
:param x: a numpy.ndarray of shape (n_examples, n_features, n_channels)
"""
self.__model.predict(x=x)
def save(self, file):
"""Saves the model and the weights to file
:param file: A file-like object to save the model and weights to
"""
self.__model.save(filepath=file)
def load(self, file):
"""Load the model and weights.
:param file: A file-like object to load the model and weights from
"""
self.__model = load_model(filepath=file)
if __name__ == '__main__':
network = ConvNet([64, 128, 256], 10, (28, 28, 1)) |
from unittest import mock
import numpy as np
import pytest
from .constants import PAD_VALUE, SHAPE, PADDED_SHAPE, OFFSET, TEST_DIR, PROJECT_DIR
from .file_helpers import file_constructors
@pytest.fixture
def array():
np.random.seed(1)
return np.random.randint(PAD_VALUE + 1, 256, SHAPE, dtype=np.uint8)
@pytest.fixture
def padded_array(array):
padded = np.ones(shape=PADDED_SHAPE, dtype=np.uint8) * PAD_VALUE
slices = tuple(slice(o, o + s) for o, s in zip(OFFSET, SHAPE))
padded[slices] = array
return padded
@pytest.fixture(params=file_constructors, ids=lambda pair: pair[0])
def data_file(request, tmpdir, padded_array):
ext, fn = request.param
path = str(tmpdir.join("data." + ext))
requires_internal = fn(path, padded_array)
return path, requires_internal
@pytest.fixture
def subplots_patch():
with mock.patch(
"matplotlib.pyplot.subplots", return_value=(mock.Mock(), mock.Mock())
) as sp_mock:
yield sp_mock
@pytest.fixture
def data_dir():
dpath = TEST_DIR / "data"
if not dpath.is_dir():
rel_path = dpath.relative_to(PROJECT_DIR)
pytest.fail(
"Test data directory at '{}' required but not found: run `make data`".format(
rel_path
)
)
return dpath
@pytest.fixture
def data_tif(data_dir):
fpath = data_dir / "data.tif"
if not fpath.is_file():
rel_path = fpath.relative_to(PROJECT_DIR)
pytest.fail(
"Test data at '{}' required but not found: run `make data`".format(rel_path)
)
return fpath
|
from math import factorial
def factorial_first(number):
fact = factorial(number)
return fact
def factorial_second(number):
fact = factorial(number)
return fact
def factorial_division():
result = factorial_first(integer_1) / factorial_second(integer_2)
return f"{result:.2f}"
integer_1 = int(input())
integer_2 = int(input())
print(factorial_division()) |
# LEVEL 23
# www.pythonchallenge.com/pc/hex/bonus.html
import this
# ROT13
first = ord('a')
last = ord('z')
str1 = "".join([chr(x) for x in range(first, last + 1)])
str2 = "".join([chr(first + x + 13 - last - 1) if x + 13 > last else chr(x + 13) for x in range(first, last + 1)])
print(str1)
print(str2)
table = str.maketrans(str1, str2)
print('va gur snpr bs jung?'.translate(table))
# in the face of what?
|
'''
7. Reverse Integer
Reverse digits of an integer.
Example1: x = 123, return 321
Example2: x = -123, return -321
Note:
The input is assumed to be a 32-bit signed integer. Your function should
return 0 when the reversed integer overflows.
'''
class Solution(object):
def reverse(self, x):
# thinking: use flag to save the sign
# rnum should be less than 2**31
flag = cmp(x, 0)
x = x*flag
rnum = 0
while x:
num = x % 10
x = x / 10
if x > 0:
rnum = (rnum + num)*10
else:
rnum = rnum + num
return flag*rnum*(rnum < 2**31)
class Solution2(object):
def reverse(self, x):
flag = cmp(x, 0)
# reverse
x = int(repr(x*flag)[::-1])
return flag*x*(x < 2**31)
|
from django.contrib.syndication.feeds import Feed
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.syndication.feeds import FeedDoesNotExist
from django.core.urlresolvers import reverse
from blog.models import Post
from tagging.models import Tag
class LatestPosts(Feed):
def get_object(self, bits):
""" Check to see if the user is requesting a feed for a specific tag
used in a post"""
if len(bits)==1:
return Tag.objects.get(name__iexact=bits[0])
elif len(bits)==0:
return None
else:
raise ObjectDoesNotExist
def title(self, obj):
if obj:
return "mosbius.com: posts tagged as '%s'" % obj.name
else:
return "mosbius.com"
def description(self, obj):
if obj:
return "recent blog posts tagged under '%s'" % obj.name
else:
return "recent blog posts"
def link(self, obj):
if obj:
return reverse('blog_tag_list', kwargs={'tag': obj.name})
else:
return reverse('blog_post_latest')
def items(self, obj):
if obj:
return Post.objects.filter(published=True).filter(tags__contains=obj)[:10]
else:
return Post.objects.filter(published=True)[:10]
|
#!/usr/bin/env python
# -*-encoding:UTF-8-*-
# 扼杀限流类之令牌水桶
import time
class TokenBucket:
"""
注意:对于单个key的操作不是线程安全的
"""
def __init__(self, key, capacity, fill_rate, default_capacity, redis_conn):
"""
构造函数初始化
:param key: 秘钥
:param capacity: 最大容量
:param fill_rate: 填充速度/s
:param default_capacity: 初始容量
:param redis_conn: redis connection
"""
self._key = key
self._capacity = capacity
self._fill_rate = fill_rate
self._default_capacity = default_capacity
self._redis_conn = redis_conn
self._last_capacity_key = "last_capacity"
self._last_timestamp_key = "last_timestamp"
def _init_key(self):
"""
初始化关键字
:return: 默认的容量和事件
"""
self._last_capacity = self._default_capacity
now = time.time()
self._last_timestamp = now
return self._default_capacity,now
@property
def _last_capacity(self):
"""
设置上一次的存储最大容量
:return:
"""
last_capacity = self._redis_conn.hget(self._key, self._last_capacity_key)
if last_capacity is None:
return self._init_key()[0]
else:
return float(last_capacity)
@_last_capacity.setter
def _last_capacity(self, value):
self._redis_conn.hset(self._key, self._last_capacity_key, value)
@property
def _last_timestamp(self):
"""
上一次的时间戳
:return:
"""
return float(self._redis_conn.hget(self._key, self._last_capacity_key))
@_last_timestamp.setter
def _last_timestamp(self, value):
self._redis_conn.hset(self._key, self._last_capacity_key, value)
def _try_to_fill(self, now):
"""
尝试填充,单位是秒
:param now:
:return: 返回一个用时最少的
"""
delta = self._fill_rate * (now - self._last_timestamp)
return min(self._last_capacity + delta, self._capacity)
def consume(self, num=1):
"""
消耗num个token,返回是否成功
双因素验证工具类
:param num:
:return: result:boolean, wait_time:float
"""
# print("capacity ",self.fill(time.time()))
if self._last_capacity >= num:
self._last_capacity -= num
return True, 0
else:
# 获取当前时间
now = time.time()
cur_num = self._try_to_fill(now)
if cur_num >= num:
self._last_capacity = cur_num - num
self._last_timestamp = now
return True, 0
else:
# 保证生成同一个hash
return False, (num - cur_num) / self._fill_rate
|
from flask import Flask, redirect, url_for
app = Flask(__name__)
@app.route('/user/<name>')
def user_page(name):
if name == "Godwin":
return redirect(url_for('admin', name=name))
else:
return redirect(url_for('guest', name=name))
@app.route('/user/admin/<name>')
def admin(name):
return 'I am the admin. My name is %s' % name
@app.route('/user/guest/<name>')
def guest(name):
return 'I am on the guest page. My name is %s' % name
@app.route('/user/payment/<int:sal>')
def payment(sal):
if sal > 5000:
return '<h1 style="display:flex;justify-content:center;">You are rich!</h1>'
elif sal > 2000:
return 'You got enough?'
else:
return 'You are poor'
if __name__ == "__main__":
app.debug = True
app.run()
|
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination
class CollapseLimitOffsetPagination(LimitOffsetPagination):
"""Customized limit/offset pagination which handles collapsed results."""
def get_paginated_response_context(self, data):
"""Overrides `get_paginated_response_context` to ignore facets."""
return [
('count', self.get_count()),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data),
]
def get_count(self):
if self.facets:
return self.facets.total.value
else:
return self.count
|
from django.urls import path,include
from .views import index,student_list
from rest_framework import routers
r=routers.DefaultRouter()
r.register('',student_list)
urlpatterns = [
path('',index,name='index'),
path('student_json',student_list.as_view()),
path('student_json1',include(r.urls)),
] |
from selenium import webdriver
import pytest
from selenium.webdriver.common.by import By
@pytest.yield_fixture()
def driver():
_driver = webdriver.Chrome()
yield _driver
_driver.quit()
def login(driver, username, password):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys(username)
driver.find_element_by_name("password").send_keys(password)
driver.find_element_by_name("login").click()
def test_home_task_10(driver):
product_attribute1 = "//*[@id='box-campaigns']//*[@class='%s']"
product_attribute2 = "//*[@id='box-product']//*[@class='%s']"
login(driver, username="admin", password="admin")
driver.find_element(By.XPATH, "//*[@title='Catalog']").click()
product_name1 = driver.find_element(By.XPATH, product_attribute1 % 'name').text
product_strong_price1 = driver.find_element(By.XPATH, product_attribute1 % 'campaign-price')
strong_price_value1 = product_strong_price1.text
assert product_strong_price1.value_of_css_property("color") == 'rgba(204, 0, 0, 1)'
assert product_strong_price1.value_of_css_property("font-weight") == 'bold'
product_regular_price1 = driver.find_element(By.XPATH, product_attribute1 % 'regular-price')
regular_price_value1 = product_regular_price1.text
assert product_regular_price1.value_of_css_property("color") == 'rgba(119, 119, 119, 1)'
assert product_regular_price1.value_of_css_property("text-decoration") == 'line-through'
driver.find_element(By.XPATH, "//*[@id='box-campaigns']//*[@class='product column shadow hover-light']").click()
product_name2 = driver.find_element(By.XPATH, product_attribute2 % 'title').text
product_strong_price2 = driver.find_element(By.XPATH, product_attribute2 % 'campaign-price')
strong_price_value2 = product_strong_price2.text
assert product_strong_price2.value_of_css_property("color") == 'rgba(204, 0, 0, 1)'
assert product_strong_price2.value_of_css_property("font-weight") == 'bold'
product_regular_price2 = driver.find_element(By.XPATH, product_attribute2 % 'regular-price')
regular_price_value2 = product_regular_price2.text
assert product_regular_price2.value_of_css_property("color") == 'rgba(102, 102, 102, 1)'
assert product_regular_price2.value_of_css_property("text-decoration") == 'line-through'
assert product_name1 == product_name2
assert strong_price_value1 == strong_price_value2
assert regular_price_value1 == regular_price_value2
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy
class ContactConfig(AppConfig):
name = 'contact'
def ready(self):
from . import signals
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
def kaboom(x, y):
print(x + y)
try:
kaboom([0, 1, 2], 'spam')
except TypeError as e:
print(e.__class__.__name__)
print('resuming here!')
|
import random
print(random.randint(1, 20))
tommys_age = input("How old are you?: ")
tommys_age = int(tommys_age)
michaels_age = 26
# Boolean
print(tommys_age > michaels_age)
if tommys_age > michaels_age:
print("you are older than michael")
elif tommys_age == michaels_age:
print("you are the same age as michael")
else:
print("you are NOT older than michael")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-11-28 17:15:37
# @Author : swz
# @Email : js_swz2008@163.com
|
import redis
def connect_redis_ssl():
try:
conn = redis.StrictRedis(
host='x.x.x.x',
port=6380,
password='YOUR_PASSWORD',
ssl=True,
ssl_ca_certs='LOCAL/PATH/TO/key.pem')
print(conn)
conn.ping()
print('Connected using SSL!')
except Exception as ex:
print('Error:', ex)
exit('Failed to connect, terminating.')
def connect_redis():
try:
conn = redis.StrictRedis(
host='35.203.61.217',
port=6379, db = 0)
print(conn)
conn.ping()
print('Connected without SSL!')
except Exception as ex:
print('Error:', ex)
exit('Failed to connect, terminating.')
if __name__ == "__main__":
connect_redis() |
#! /usr/bin/python3
"""
python -m py_compile IOtest.py
"""
import base64
import sys
import numpy as np
from PIL import Image
# for line in sys.stdin:
# # sys.stdout.write(line.strip())
# data = base64.b64decode(line.strip())
# sys.stdout.write(data)
# # base64_bytes = base64.b64encode(data)
# # sys.stdout.write(data)
# # base64_message = base64_bytes.encode('ascii')
# # sys.stdout.write("Hello, %s. \n" %base64_message)
# # print(base64_message)
# # print("ff")
for line in sys.stdin:
if not line.strip() or line.strip().startswith('#'):
continue
# sys.stdout.write(line.strip())
data = base64.b64decode(line.strip())
# print(data)
# sys.stdout.w
array = np.frombuffer(data, dtype=np.uint8).reshape((28, 28))
sums = array.sum(axis=0, dtype=np.int64)
# print(np.min(sums))
print(np.max(sums))
|
'''
Runner Module for AD User/Group management
Depends on grain value being set for activedirectory server
'''
from __future__ import absolute_import
import logging
import salt.client
import salt.utils.master
import random
log = logging.getLogger(__name__)
def __virtual__():
return 'ad'
def _srvmgr(func):
adhost = _get_ad_server()
myargs = []
salt_cmd = 'cmd.run'
myargs.append('Import-Module activedirectory; {0}'.format(func))
myargs.append('shell=powershell')
myargs.append('python_shell=True')
local = salt.client.get_local_client(__opts__['conf_file'])
cmd_ret = local.cmd('{0}'.format(adhost), salt_cmd, myargs)
if len(cmd_ret) == 0:
log.error('Unable to execute command: %s', func)
return cmd_ret[adhost]
def _get_pillar_cache():
tgt = 'saltmaster'
expr_form = 'glob'
pillar_util = salt.utils.master.MasterPillarUtil(
tgt,
expr_form,
use_cached_grains=True,
grains_fallback=False,
opts=__opts__)
return pillar_util.get_minion_pillar()
def _get_ad_server():
# Get a list of minions that have the AD role
# Randomly select one of them
adhost = []
tgt = 'activedirectory:True'
expr_form = 'grain'
pillar_util = salt.utils.master.MasterPillarUtil(
tgt,
expr_form,
use_cached_grains=True,
grains_fallback=False,
opts=__opts__)
cached_grains = pillar_util.get_minion_grains()
for item in cached_grains.viewitems():
if len(item[1]) > 0:
adhost.append(item[0])
return random.choice(adhost)
def get_allusers():
command = 'get-aduser -filter * | select name'
ret_names = _srvmgr(command).split()
names = []
for name in ret_names:
if name not in ('name', '----'):
names.append(name)
return names
def get_user(name):
command = "get-aduser -filter 'Name -like \"{0}\"' | select name".format(name)
ret_names = _srvmgr(command).split()
names = []
for name in ret_names:
if name not in ('name', '----'):
names.append(name)
return names
def remove_users(
pillarkey,
saltmaster='saltmaster'):
cached_pillars = _get_pillar_cache()
for user in cached_pillars[saltmaster][pillarkey]:
username = user['name']
if len(get_user(username)) == 0:
return "User {0} does not exists".format(username)
pscmd = []
pscmd.append('remove-aduser')
pscmd.append('-Identity {0}'.format(username))
command = ' '.join(pscmd)
ret_data = _srvmgr(command)
if len(ret_data) == 0:
ret.append('User {0} removed successfully'.format(username))
else:
ret.append('Encountered a problem removing user account\n{0}'.format(ret_data))
return '\n'.join(ret)
def create_users(
pillarkey,
saltmaster='saltmaster'):
# Check to see if user already exists
cached_pillars = _get_pillar_cache()
ret = []
for user in cached_pillars[saltmaster][pillarkey]:
username = user['name']
domain = user['domain']
password = user['password']
if len(get_user(username)) > 0:
return "User {0} already exists".format(username)
pscmd = []
pscmd.append('new-aduser')
pscmd.append('-SamAccountName {0}'.format(username))
pscmd.append('-Name {0}'.format(username))
pscmd.append('-UserPrincipalName {0}@{1}'.format(username, domain))
pscmd.append('-AccountPassword (ConvertTo-SecureString {0} -AsPlainText -Force)'.format(password))
pscmd.append('-Enabled $true')
pscmd.append('-PasswordNeverExpires $true')
command = ' '.join(pscmd)
ret_data = _srvmgr(command)
if len(ret_data) == 0:
ret.append('User {0} created successfully'.format(username))
else:
ret.append('Encountered a problem creating user account\n{0}'.format(ret_data))
return '\n'.join(ret)
def add_usertogroup(
pillarkey,
saltmaster='saltmaster'):
cached_pillars = _get_pillar_cache()
ret = []
for user in cached_pillars[saltmaster][pillarkey]:
username = user['name']
groups = user['groups']
if type(groups) is list:
for group in groups:
pscmd = []
pscmd.append('add-adgroupmember')
pscmd.append('{0}'.format(group))
pscmd.append('-members {0}'.format(username))
command = ' '.join(pscmd)
ret_data = _srvmgr(command)
if len(ret_data) > 0:
ret.append('Encountered a problem adding user to group\n{0}'.format(ret_data))
else:
ret.append('User {0} was successfully added to group {1}'.format(username, group))
elif type(groups) is str:
pscmd = []
pscmd.append('add-adgroupmember')
pscmd.append('{0}'.format(groups))
pscmd.append('-members {0}'.format(username))
command = ' '.join(pscmd)
ret_data = _srvmgr(command)
if len(ret_data) > 0:
ret.append('Encountered a problem adding user to group\n{0}'.format(ret_data))
else:
ret.append('User {0} was successfully added to group {1}'.format(username, groups))
return '\n'.join(ret)
def get_group(groupname):
command = "get-adgroup -filter 'Name -like \"{0}\"' | select name".format(groupname)
ret_names = _srvmgr(command).split()
names = []
for name in ret_names:
if name not in ('name', '----'):
names.append(name)
return names
def remove_secgroups(
pillarkey,
saltmaster='saltmaster'):
cached_pillars = _get_pillar_cache()
ret = []
for groups in cached_pillars[saltmaster][pillarkey]:
groupname = groups['name']
if len(get_group(groupname)) == 0:
ret.append("Security Group {0} does not exists".format(groupname))
else:
pscmd = []
pscmd.append('remove-adgroup')
pscmd.append('-Identity {0}'.format(groupname))
command = ' '.join(pscmd)
ret_data = _srvmgr(command)
if 'group does not exists' in ret_data:
ret.append('Group {0} already removed'.format(groupname))
elif len(ret_data) > 0:
ret.append('Encountered a problem creating group \n{0}'.format(ret_data))
else:
ret.append('Group {0} was successfully removed'.format(groupname))
return '\n'.join(ret)
def create_secgroups(
pillarkey,
saltmaster='saltmaster',
groupscope='Global',
path=None):
cached_pillars = _get_pillar_cache()
ret = []
for groups in cached_pillars[saltmaster][pillarkey]:
groupname = groups['name']
if len(get_group(groupname)) > 0:
ret.append("Security Group {0} already exists".format(groupname))
else:
pscmd = []
pscmd.append('new-adgroup')
pscmd.append('-name {0}'.format(groupname))
pscmd.append('-GroupScope {0}'.format(groupscope))
if path:
pscmd.append('-Path {0}'.format(path))
command = ' '.join(pscmd)
ret_data = _srvmgr(command)
if 'group already exists' in ret_data:
ret.append('Group {0} already exists'.format(groupname))
elif len(ret_data) > 0:
ret.append('Encountered a problem creating group \n{0}'.format(ret_data))
else:
ret.append('Group {0} was successfully created'.format(groupname))
return '\n'.join(ret)
def unjoin_domain(computers):
ret_data = []
for computer in computers:
pscmd = []
pscmd.append('remove-adcomputer -identity {0} -confirm:$false'.format(computer))
command = ''.join(pscmd)
ret = _srvmgr(command)
if 'Cannot find' in ret:
ret_data.append('Could not find computer {0}'.format(computer))
elif len(ret) == 0:
ret_data.append('Computer {0} removed'.format(computer))
return '\n'.join(ret_data)
|
import DigitalMicrograph as DM
print("Initializing DigitalMicrograph environmnet...");
# the name of the tag is used, this is deleted so it shouldn't matter anyway
file_tag_name = "__python__file__"
# the dm-script to execute, double curly brackets are used because of the
# python format function
script = ("\n".join((
"DocumentWindow win = GetDocumentWindow(0);",
"if(win.WindowIsvalid()){{",
"if(win.WindowIsLinkedToFile()){{",
"TagGroup tg = GetPersistentTagGroup();",
"if(!tg.TagGroupDoesTagExist(\"{tag_name}\")){{",
"number index = tg.TagGroupCreateNewLabeledTag(\"{tag_name}\");",
"tg.TagGroupSetIndexedTagAsString(index, win.WindowGetCurrentFile());",
"}}",
"else{{",
"tg.TagGroupSetTagAsString(\"{tag_name}\", win.WindowGetCurrentFile());",
"}}",
"}}",
"}}"
))).format(tag_name=file_tag_name)
# execute the dm script
DM.ExecuteScriptString(script)
# read from the global tags to get the value to the python script
if DM.GetPersistentTagGroup():
s, __file__ = DM.GetPersistentTagGroup().GetTagAsString(file_tag_name);
if s:
# delete the created tag again
DM.ExecuteScriptString(
"GetPersistentTagGroup()." +
"TagGroupDeleteTagWithLabel(\"{}\");".format(file_tag_name)
)
else:
del __file__
try:
__file__
except NameError:
# set a default if the __file__ could not be received
__file__ = ""
if __file__ != "":
import os
import sys
base_path = str(os.path.dirname(os.path.dirname(__file__)))
if base_path not in sys.path:
sys.path.insert(0, base_path)
print("Initializing python environment...");
import pprint
import time
import random
import threading
import importlib
import traceback
try:
import pylo
print("Preparing...");
view = pylo.DMView()
configuration = pylo.AbstractConfiguration()
controller = pylo.Controller(view, configuration)
controller.microscope = pylo.loader.getDevice("Dummy Microscope", controller)
controller.camera = pylo.loader.getDevice("Dummy Camera", controller)
tests = [
# "error",
# "hint",
# "create-measurement",
"create-measurement-series",
# "ask-for-decision",
# "show-settings",
# "show-custom-tags",
# "ask-for",
# "show-running",
]
# view._exec_debug = True
if "error" in tests:
print("")
print("= " * 40)
print("")
print("Showing an error:")
view.showError("Test error", "Test fix")
if "hint" in tests:
print("")
print("= " * 40)
print("")
print("Showing a hint:")
view.showHint("Test hint")
if "create-measurement" in tests:
print("")
print("= " * 40)
print("")
print("Showing create Measurement")
pprint.pprint(view.showCreateMeasurement(controller))
if "create-measurement-series" in tests:
print("")
print("= " * 40)
print("")
print("Showing create Measurement with a series")
series, *_ = pylo.MeasurementSteps.formatSeries(
controller.microscope.supported_measurement_variables,
{"variable": "focus", "start": 33, "end": 99, "step": 3, "on-each-point": {
"variable": "pressure", "start": 1020, "step": 510, "end": 2040
}}, add_default_values=True)
start, *_ = pylo.MeasurementSteps.formatStart(
controller.microscope.supported_measurement_variables,
{"ol-current": 0x1000}, series, add_default_values=True)
pprint.pprint(view.showCreateMeasurement(controller, series, start))
if "ask-for-decision" in tests:
print("")
print("= " * 40)
print("")
print("Ask for decision")
print(view.askForDecision("Please click on one of the buttons.",
("Button 1", "Button 2", "Button 3", "Button 4")))
if "show-settings" in tests:
print("")
print("= " * 40)
print("")
print("Showing Settings")
pprint.pprint(view.showSettings(configuration))
if "show-custom-tags" in tests:
from pylo.config import CUSTOM_TAGS_GROUP_NAME
configuration.setValue(CUSTOM_TAGS_GROUP_NAME, "saved-key", "Saved value");
print("")
print("= " * 40)
print("")
print("Showing Custom Tags")
pprint.pprint(view.showCustomTags(configuration))
print("Configuration:")
config_tags = {}
for key in configuration.getKeys(CUSTOM_TAGS_GROUP_NAME):
config_tags[key] = configuration.getValue(CUSTOM_TAGS_GROUP_NAME, key)
pprint.pprint(config_tags)
if "ask-for" in tests:
print("")
print("= " * 40)
print("")
print("Asking for values")
inputs = (
{"name": "Askval1", "datatype": str, "description": "Type in a str"},
{"name": "Askval2", "datatype": int, "description": "Type in an int"},
{"name": "Askval3", "datatype": float, "description": "Type in a float"}
)
pprint.pprint(view.askFor(*inputs))
if "show-running" in tests:
print("")
print("= " * 40)
print("")
print("Show running indicator")
view.progress = 0;
view.progress_max = 987
def updateProgress(view):
i = 1
while i <= view.progress_max:
if random.randint(0, 3) == 0:
i += random.randint(1, 30)
else:
i += 1
view.progress = i
view.print("Setting view.progress = {}".format(i));
time.sleep(0.1)
thread = threading.Thread(target=updateProgress, args=(view,))
thread.start()
view.showRunning()
view.progress_max = 0
print(" Thread stopped.")
except Exception as e:
print("Exception: ", e)
traceback.print_exc()
raise e |
from setuptools import find_packages, setup
setup(
name='oebinarytools',
packages=find_packages(),
version='0.1.0',
description='OpenEphys tools for loading binary data with rpioperant behavioor',
author='Tim Sainburg',
license='MIT',
)
|
import unittest
from katas.kyu_8.basic_mathematical_operations import basic_op
class BasicOperationsTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(basic_op('+', 4, 7), 11)
def test_equal_2(self):
self.assertEqual(basic_op('-', 15, 18), -3)
def test_equal_3(self):
self.assertEqual(basic_op('*', 5, 5), 25)
def test_equal_4(self):
self.assertEqual(basic_op('/', 49, 7), 7)
|
import subprocess
import warnings
from unittest.mock import MagicMock, patch
import pytest
from rubicon_ml.client.mixin import ArtifactMixin, DataframeMixin, TagMixin
from rubicon_ml.exceptions import RubiconException
# ArtifactMixin
def test_log_artifact_from_bytes(project_client):
project = project_client
artifact = ArtifactMixin.log_artifact(project, data_bytes=b"content", name="test.txt")
assert artifact.id in [a.id for a in project.artifacts()]
assert artifact.name == "test.txt"
assert artifact.data == b"content"
def test_log_artifact_from_file(project_client):
project = project_client
mock_file = MagicMock()
mock_file.__enter__().read.side_effect = [b"content"]
artifact = ArtifactMixin.log_artifact(project, data_file=mock_file, name="test.txt")
assert artifact.id in [a.id for a in project.artifacts()]
assert artifact.name == "test.txt"
assert artifact.data == b"content"
@patch("fsspec.implementations.local.LocalFileSystem.open")
def test_log_artifact_from_path(mock_open, project_client):
project = project_client
mock_file = MagicMock()
mock_file().read.side_effect = [b"content"]
mock_open.side_effect = mock_file
artifact = ArtifactMixin.log_artifact(project, data_path="/path/to/test.txt")
assert artifact.id in [a.id for a in project.artifacts()]
assert artifact.name == "test.txt"
assert artifact.data == b"content"
def test_log_artifact_throws_error_if_data_missing(project_client):
project = project_client
with pytest.raises(RubiconException) as e:
ArtifactMixin.log_artifact(project, name="test.txt")
assert (
"One of `data_bytes`, `data_file`, `data_object` or `data_path` must be provided." in str(e)
)
def test_log_artifact_throws_error_if_name_missing_data_bytes(project_client):
project = project_client
with pytest.raises(RubiconException) as e:
ArtifactMixin.log_artifact(project, data_bytes=b"content")
assert "`name` must be provided if not using `data_path`." in str(e)
def test_log_artifact_throws_error_if_name_missing_data_file(project_client):
project = project_client
mock_file = MagicMock()
mock_file.__enter__().read.side_effect = [b"content"]
with pytest.raises(RubiconException) as e:
ArtifactMixin.log_artifact(project, data_file=mock_file)
assert "`name` must be provided if not using `data_path`." in str(e)
def test_log_artifact_throws_error_if_name_missing_data_object(project_client):
project = project_client
class TestObject:
value = "test"
test_object = TestObject()
with pytest.raises(RubiconException) as e:
ArtifactMixin.log_artifact(project, data_object=test_object)
assert "`name` must be provided if not using `data_path`." in str(e)
def test_get_environment_bytes(project_client, mock_completed_process_empty):
project = project_client
with patch("subprocess.run") as mock_run:
mock_run.return_value = mock_completed_process_empty
env_bytes = project._get_environment_bytes(["conda", "env", "export"])
assert env_bytes == b"\n"
def test_get_environment_bytes_error(project_client):
project = project_client
with pytest.raises(RubiconException) as e:
with patch("subprocess.run") as mock_run:
mock_run.side_effect = subprocess.CalledProcessError(
returncode=-1, cmd=b"\n", stderr="yikes"
)
project._get_environment_bytes(["conda", "env", "export"])
assert "yikes" in str(e)
def test_log_conda_env(project_client, mock_completed_process_empty):
project = project_client
with patch("subprocess.run") as mock_run:
mock_run.return_value = mock_completed_process_empty
artifact = project.log_conda_environment()
assert artifact.id in [a.id for a in project.artifacts()]
assert ".yml" in artifact.name
assert artifact.data == b"\n"
def test_log_pip_requirements(project_client, mock_completed_process_empty):
project = project_client
with patch("subprocess.run") as mock_run:
mock_run.return_value = mock_completed_process_empty
artifact = project.log_pip_requirements()
assert artifact.id in [a.id for a in project.artifacts()]
assert ".txt" in artifact.name
assert artifact.data == b"\n"
def test_artifacts(project_client):
project = project_client
data = b"content"
artifact_a = ArtifactMixin.log_artifact(project, data_bytes=data, name="test.txt")
artifact_b = ArtifactMixin.log_artifact(project, data_bytes=data, name="test.txt")
artifacts = ArtifactMixin.artifacts(project)
assert len(artifacts) == 2
assert artifact_a.id in [a.id for a in artifacts]
assert artifact_b.id in [a.id for a in artifacts]
def test_artifacts_by_name(project_client):
project = project_client
data = b"content"
artifact_a = ArtifactMixin.log_artifact(project, data_bytes=data, name="test.txt")
artifact_b = ArtifactMixin.log_artifact(project, data_bytes=data, name="test.txt")
ArtifactMixin.log_artifact(project, data_bytes=data, name="test2.txt")
artifacts = ArtifactMixin.artifacts(project, name="test.txt")
assert len(artifacts) == 2
assert artifact_a.id in [a.id for a in artifacts]
assert artifact_b.id in [a.id for a in artifacts]
def test_artifacts_tagged_and(project_client):
project = project_client
artifact = ArtifactMixin.log_artifact(project, name="name", data_bytes=b"test", tags=["x", "y"])
ArtifactMixin.log_artifact(project, name="name", data_bytes=b"test", tags=["x"])
ArtifactMixin.log_artifact(project, name="name", data_bytes=b"test", tags=["y"])
artifacts = ArtifactMixin.artifacts(project, tags=["x", "y"], qtype="and")
assert len(artifacts) == 1
assert artifact.id in [d.id for d in artifacts]
def test_artifacts_tagged_or(project_client):
project = project_client
artifact_a = ArtifactMixin.log_artifact(project, name="name", data_bytes=b"test", tags=["x"])
artifact_b = ArtifactMixin.log_artifact(project, name="name", data_bytes=b"test", tags=["y"])
ArtifactMixin.log_artifact(project, name="name", data_bytes=b"test", tags=["z"])
artifacts = ArtifactMixin.artifacts(project, tags=["x", "y"], qtype="or")
assert len(artifacts) == 2
assert artifact_a.id in [d.id for d in artifacts]
assert artifact_b.id in [d.id for d in artifacts]
def test_artifact_warning(project_client):
project = project_client
data = b"content"
artifact_a = ArtifactMixin.log_artifact(project, data_bytes=data, name="test.txt")
artifact_b = ArtifactMixin.log_artifact(project, data_bytes=data, name="test.txt")
with warnings.catch_warnings(record=True) as w:
artifact_c = ArtifactMixin.artifact(project, name="test.txt")
assert (
"Multiple artifacts found with name 'test.txt'. Returning most recently logged"
) in str(w[0].message)
assert artifact_c.id != artifact_a.id
assert artifact_c.id == artifact_b.id
def test_artifact_name_not_found_error(project_client):
project = project_client
with pytest.raises(RubiconException) as e:
ArtifactMixin.artifact(project, name="test.txt")
assert "No artifact found with name 'test.txt'." in str(e)
def test_artifacts_name_not_found_error(project_client):
project = project_client
artifacts = ArtifactMixin.artifacts(project, name="test.txt")
assert artifacts == []
def test_artifact_by_name(project_client):
project = project_client
data = b"content"
ArtifactMixin.log_artifact(project, data_bytes=data, name="test.txt")
artifact = ArtifactMixin.artifact(project, name="test.txt")
assert artifact.name == "test.txt"
def test_artifact_by_id(project_client):
project = project_client
data = b"content"
ArtifactMixin.log_artifact(project, data_bytes=data, name="test.txt")
artifact = ArtifactMixin.artifact(project, name="test.txt")
artifact_name = ArtifactMixin.artifact(project, id=artifact.id).name
assert artifact_name == "test.txt"
def test_delete_artifacts(project_client):
project = project_client
artifact = ArtifactMixin.log_artifact(project, data_bytes=b"content", name="test.txt")
ArtifactMixin.delete_artifacts(project, [artifact.id])
assert artifact.id not in [a.id for a in project.artifacts()]
# DataframeMixin
def test_log_dataframe(project_client, test_dataframe):
project = project_client
df = test_dataframe
test_df_name = "test_df"
dataframe = DataframeMixin.log_dataframe(project, df, name=test_df_name, tags=["x"])
DataframeMixin.log_dataframe(project, df, name="secondary test df", tags=["x"])
assert dataframe.name == test_df_name
assert dataframe.id in [df.id for df in project.dataframes()]
def test_dataframes(project_client, test_dataframe):
project = project_client
df = test_dataframe
dataframe_a = DataframeMixin.log_dataframe(project, df)
dataframe_b = DataframeMixin.log_dataframe(project, df)
dataframes = DataframeMixin.dataframes(project)
assert len(dataframes) == 2
assert dataframe_a.id in [d.id for d in dataframes]
assert dataframe_b.id in [d.id for d in dataframes]
def test_dataframes_by_name(project_client, test_dataframe):
project = project_client
df = test_dataframe
test_df_name = "test_df"
dataframe_a = DataframeMixin.log_dataframe(project, df, name=test_df_name)
dataframe_b = DataframeMixin.log_dataframe(project, df, name=test_df_name)
dataframes = DataframeMixin.dataframes(project, name=test_df_name)
assert len(dataframes) == 2
assert dataframe_a.id in [d.id for d in dataframes]
assert dataframe_b.id in [d.id for d in dataframes]
def test_dataframe_by_name(project_client, test_dataframe):
project = project_client
df = test_dataframe
test_df_name = "test_df"
dataframe_a = DataframeMixin.log_dataframe(project, df, name=test_df_name)
dataframe_b = DataframeMixin.dataframe(project, name=test_df_name)
assert dataframe_a.id == dataframe_b.id
def test_dataframe_by_id(project_client, test_dataframe):
project = project_client
df = test_dataframe
dataframe_a = DataframeMixin.log_dataframe(project, df)
id = dataframe_a.id
dataframe_b = DataframeMixin.dataframe(project, id=id)
assert dataframe_a.id == dataframe_b.id
def test_dataframe_warning(project_client, test_dataframe):
project = project_client
df = test_dataframe
test_df_name = "test_df"
dataframe_a = DataframeMixin.log_dataframe(project, df, name=test_df_name)
dataframe_b = DataframeMixin.log_dataframe(project, df, name=test_df_name)
with warnings.catch_warnings(record=True) as w:
dataframe_c = DataframeMixin.dataframe(project, name=test_df_name)
assert (
"Multiple dataframes found with name 'test_df'. Returning most recently logged"
) in str(w[0].message)
assert dataframe_c.id != dataframe_a.id
assert dataframe_c.id == dataframe_b.id
def test_dataframe_by_name_not_found(project_client, test_dataframe):
project = project_client
test_df_name = "test_df"
with pytest.raises(RubiconException) as e:
DataframeMixin.dataframe(project, name=test_df_name)
assert "No dataframe found with name 'test_df'." in str(e.value)
def test_dataframes_by_name_not_found(project_client, test_dataframe):
project = project_client
test_df_name = "test_df"
dataframes = DataframeMixin.dataframes(project, name=test_df_name)
assert dataframes == []
def test_dataframes_tagged_and(project_client, test_dataframe):
project = project_client
df = test_dataframe
dataframe = DataframeMixin.log_dataframe(project, df, tags=["x", "y"])
DataframeMixin.log_dataframe(project, df, tags=["x"])
DataframeMixin.log_dataframe(project, df, tags=["y"])
dataframes = DataframeMixin.dataframes(project, tags=["x", "y"], qtype="and")
assert len(dataframes) == 1
assert dataframe.id in [d.id for d in dataframes]
def test_dataframes_tagged_or(project_client, test_dataframe):
project = project_client
df = test_dataframe
dataframe_a = DataframeMixin.log_dataframe(project, df, tags=["x"])
dataframe_b = DataframeMixin.log_dataframe(project, df, tags=["y"])
DataframeMixin.log_dataframe(project, df, tags=["z"])
dataframes = DataframeMixin.dataframes(project, tags=["x", "y"], qtype="or")
assert len(dataframes) == 2
assert dataframe_a.id in [d.id for d in dataframes]
assert dataframe_b.id in [d.id for d in dataframes]
def test_delete_dataframes(project_client, test_dataframe):
project = project_client
df = test_dataframe
dataframe = DataframeMixin.log_dataframe(project, df, tags=["x"])
DataframeMixin.delete_dataframes(project, [dataframe.id])
assert dataframe.id not in [df.id for df in project.dataframes()]
# TagMixin
def test_get_taggable_experiment_identifiers(project_client):
project = project_client
experiment = project.log_experiment()
project_name, experiment_id, dataframe_id = TagMixin._get_taggable_identifiers(experiment)
assert project_name == project.name
assert experiment_id == experiment.id
assert dataframe_id is None
def test_get_taggable_dataframe_identifiers(project_client, test_dataframe):
project = project_client
experiment = project.log_experiment()
df = test_dataframe
project_df = project.log_dataframe(df)
experiment_df = experiment.log_dataframe(df)
project_name, experiment_id, dataframe_id = TagMixin._get_taggable_identifiers(project_df)
assert project_name == project.name
assert experiment_id is None
assert dataframe_id == project_df.id
project_name, experiment_id, dataframe_id = TagMixin._get_taggable_identifiers(experiment_df)
assert project_name == project.name
assert experiment_id is experiment.id
assert dataframe_id == experiment_df.id
def test_get_taggable_artifact_identifiers(project_client):
project = project_client
experiment = project.log_experiment()
project_artifact = project.log_artifact(data_bytes=b"test", name="test")
experiment_artifact = experiment.log_artifact(data_bytes=b"test", name="test")
project_name, experiment_id, artifact_id = TagMixin._get_taggable_identifiers(project_artifact)
assert project_name == project.name
assert experiment_id is None
assert artifact_id == project_artifact.id
project_name, experiment_id, artifact_id = TagMixin._get_taggable_identifiers(
experiment_artifact
)
assert project_name == project.name
assert experiment_id is experiment.id
assert artifact_id == experiment_artifact.id
def test_add_tags(project_client):
project = project_client
experiment = project.log_experiment()
TagMixin.add_tags(experiment, ["x"])
assert experiment.tags == ["x"]
def test_remove_tags(project_client):
project = project_client
experiment = project.log_experiment(tags=["x", "y"])
TagMixin.remove_tags(experiment, ["x", "y"])
assert experiment.tags == []
|
import socket
import hmac
sk = socket.socket()
sk.connect(('127.0.0.1',43))
rand = sk.recv(1024)
h = hmac.new(b'043',rand)
ret_client = h.digest()
sk.send(ret_client)
msg = sk.recv(1024)
print(msg) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 26 11:33:07 2019
@author: nico
"""
import os
import numpy as np
from scipy import signal as sig
import matplotlib.pyplot as plt
#from scipy.fftpack import fft
import scipy.io as sio
#from time import time
import pandas as pd
#from scipy.interpolate import CubicSpline
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
fig_sz_x = 14
fig_sz_y = 13
fig_dpi = 80 # dpi
fig_font_family = 'Ubuntu'
fig_font_size = 16
#%% cargo el archivo ECG_TP$.mat
# para listar las variables que hay en el archivo
sio.whosmat('ECG_TP4.mat')
mat_struct = sio.loadmat('ECG_TP4.mat')
ecg_one_lead = mat_struct['ecg_lead']
ecg_one_lead = ecg_one_lead.flatten(1)
cant_muestras = len(ecg_one_lead)
qrs_detections = mat_struct['qrs_detections']
qrs_detections = qrs_detections.flatten(1)
cant_detections = len(qrs_detections)
fs = 1000
tt = np.linspace(0, cant_muestras, cant_muestras)
'''
Algoritmo de Pan–Tompkins
_________ _____________ ___________ ______________ ______________
| | | | | | | | | |
| Filtro | | Filtro | | Elevar | | Integracion | | Detección |
--->| pasa |----->| Derivativo |----->| al |----->| por ventana |----->| de puntos |
| Banda | | | | Cuadrado | | deslizante | | QRS |
|________| |____________| |__________| |_____________| |_____________|
'''
#%% Filtro pasa banda
'''
_________
| |
| Filtro |
--->| pasa |----->
| Banda |
|________|
'''
nyq_frec = fs / 2
#utilizo técnicas multirate para poder implementar el filtro
nyq_frec = nyq_frec/8
# filter design
ripple = 0.1 # dB
atenuacion = 40. # dB
ws1 = 0.4# 0.21 #Hz podria ser 0.05 pero uso la media geométrica para que sea simétrico
wp1 = 0.5#0.3 #Hz
wp2 = 40 #Hz
ws2 = 50 #50 #Hz
frecs = np.array([0.0, ws1, wp1, wp2, ws2, nyq_frec ]) / nyq_frec
gains = np.array([-atenuacion, -atenuacion, -ripple, -ripple, -atenuacion, -atenuacion])
gains = 10**(gains/20)
bp_sos_cauer = sig.iirdesign(wp=np.array([wp1, wp2]) / nyq_frec, ws=np.array([ws1, ws2])
/ nyq_frec, gpass=ripple, gstop=atenuacion, analog=False,
ftype='ellip', output='sos')
#bajo la frecuencia del ECG para que sea compatible
ecg_one_lead_decimate = sig.decimate(ecg_one_lead, 8)
# Procedemos al filtrado
ECG_f_cauer = sig.sosfiltfilt(bp_sos_cauer, ecg_one_lead_decimate)
ECG_f_cauer = sig.resample(ECG_f_cauer,8*len(ECG_f_cauer))
del ecg_one_lead_decimate
#%% Filtro derivativo
'''
_____________
| |
| Filtro |
---->| |----->
| Derivativo |
|____________|
'''
#ECG_Derivada = np.diff(ECG_f_cauer)
#Make impulse response
h = np.array([-1, -2, 0, 2, 1])/8
Delay = 2 # Delay in samples
#Apply filter
ECG_Derivada = np.convolve(ECG_f_cauer ,h);
ECG_Derivada = ECG_Derivada[Delay:cant_muestras+Delay]
ECG_Derivada = ECG_Derivada / np.max( np.abs(ECG_Derivada));
#%% Filtro derivativo
'''
____________
| |
| Elevar |
---->| al |----->
| cuadrado |
|___________|
'''
ECG_cuadrado = np.square(ECG_Derivada)
#%% Integración por ventana deslizante
'''
______________
| |
| Integracion |
---->| por ventana |----->
| deslizante |
|_____________|
'''
#Moving Window Integration
#Make impulse response
h = np.ones((31))/31
Delay = 15 # Delay in samples
#Apply filter
ECG_Detection = sig.convolve(ECG_cuadrado,h)
ECG_Detection = ECG_Detection[Delay:cant_muestras+Delay]
ECG_Detection = ECG_Detection / np.max( np.abs(ECG_Detection));
#%% Detección de puntos QRS
'''
_____________
| |
| Detección |
---->| de puntos |----->
| QRS |
|____________|
'''
# Busco los máximos
QRS_detections_pos = sig.argrelmax(ECG_Detection,order=350)
QRS_detections_pos = QRS_detections_pos[0]
#%% Gráficos
qrs1 = ecg_one_lead[QRS_detections_pos]
qrs = ecg_one_lead[qrs_detections]
plt.figure("Deteccion de los latidos", constrained_layout=True)
plt.title("Deteccion de los latidos")
plt.plot(tt, ecg_one_lead, label='Señal de ECG ')
plt.plot(QRS_detections_pos, qrs1, label='QRS_det Pan-Tompkins algorithm ', linestyle='None', marker='x')
plt.plot(qrs_detections, qrs, label='QRS_detection', linestyle='None', marker='x')
plt.xlabel('Muestras')
plt.ylabel("Amplitud ")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.legend()
plt.show()
# Regiones de interes
regs_interes = (
np.array([12, 12.4]) *60*fs, # minutos a muestras
np.array([15, 15.2]) *60*fs, # minutos a muestras
np.array([5, 5.2]) *60*fs, # minutos a muestras
)
for ii in regs_interes:
# intervalo limitado de 0 a cant_muestras
zoom_region = np.arange(np.max([0, ii[0]]), np.min([cant_muestras, ii[1]]),
dtype='uint')
#hace el clipeo para salvar a los indices otra forma es el modulo N (le sumas N para que ingece
#por el otro extremo y queda circular en 'C' se hace x % 5 )
plt.figure(figsize=(fig_sz_x, fig_sz_y), dpi= fig_dpi, facecolor='w',
edgecolor='k')
plt.plot(zoom_region, ecg_one_lead[zoom_region], label='ECG', lw=2)
plt.plot(QRS_detections_pos, qrs1, label='QRS_det Pan-Tompkins algorithm ',
linestyle='None', marker='x')
plt.plot(qrs_detections, qrs, label='QRS_detection', linestyle='None',
marker='x')
plt.title('ECG filtering from ' + str(ii[0]) + ' to ' + str(ii[1]) )
plt.ylabel('Adimensional')
plt.xlabel('Muestras (#)')
plt.axis([zoom_region[0], zoom_region[-1], np.min(ecg_one_lead),
np.max(ecg_one_lead)])
axes_hdl = plt.gca()
axes_hdl.legend()
#axes_hdl.set_yticks(())
plt.grid()
plt.show()
#%% Métrica de cuantificación.
"""
Propongo utilizar la varianza de la diferencia entre las deteccipones dadas y
el algoritomo Pan-Tompkins , no es una buena medida porque hay detecciones que
solo estan con un algoritmo y no con el otro
1° métrica podria ser comparar la cantidad de detecciones
2° métrica podria ser calcular el error pero solo de las detecciones "correctas"
"""
# 1°
error_cant_detecciones = np.abs(cant_detections - len(QRS_detections_pos))
#2°
# la muestra 0 y la 305 es erronea
error1 = np.abs(QRS_detections_pos[1:305] - qrs_detections[0:304])
error2 = np.abs(QRS_detections_pos[306:] - qrs_detections[304:])
error_abs = np.concatenate((error1,error2),axis=0)
error_rel = error_abs /qrs_detections
error_rel_porcentual = error_rel *100
varianza = np.var(error_rel)
del error1, error2
#%% presentación de resultados
NF = 0
PF = 2
PV = 1903
NV = 1903
TA = (NF + PF) *100/PV
tus_resultados_per = [
[ NF, PF, PV, NV, TA] # <-- acá debería haber numeritos :)
]
errores = [
[max(error_rel_porcentual), varianza ]
]
df = pd.DataFrame(tus_resultados_per, columns=['NF', 'PF', 'PV', 'NV', 'TA %'],
index=[' Algoritmo de Pan–Tompkins'])
df1 = pd.DataFrame(errores, columns=['error relativo maximo %', 'varianza'],
index=[' Algoritmo de Pan–Tompkins'])
print("\n")
print(df)
print("\n")
print(df1) |
# -*- coding: utf-8 -*-
import time
def get_now_time():
return time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
class Logging:
flag = True
@staticmethod
def error(msg):
if Logging.flag == True:
colour.show_error(get_now_time() + " [Error]:" + ''.join(msg))
@staticmethod
def warn(msg):
if Logging.flag ==True:
colour.show_warn(get_now_time() + " [Warn]:" + ''.join(msg))
@staticmethod
def info(msg):
if Logging.flag == True:
colour.show_info(get_now_time() + " [Info]:" + ''.join(msg))
@staticmethod
def debug(msg):
if Logging.flag == True:
colour.show_debug(get_now_time() + " [Debug]:" + ''.join(msg))
@staticmethod
def success(msg):
if Logging.flag ==True:
colour.show_verbose(get_now_time() + " [Success]:" + ''.join(msg))
class colour:
@staticmethod
def c(msg,colour):
try:
from termcolor import colored, cprint
p = lambda x: cprint(x, '%s' % colour)
return p(msg)
except:
print msg
@staticmethod
def show_verbose(msg):
colour.c(msg, 'white')
@staticmethod
def show_debug(msg):
colour.c(msg, 'blue')
@staticmethod
def show_info(msg):
colour.c(msg,'green')
@staticmethod
def show_warn(msg):
colour.c(msg,'yellow')
@staticmethod
def show_error(msg):
colour.c(msg,'red')
|
#! /usr/bin/python
# Copyright (c) 2003 Robert L. Campbell
"""
data2bfactor: contains the functions
data2b_atom(mol='',data_file=''),
data2b_res(mol='',data_file=''),
data2q_atom(mol='',data_file='') and
data2q_res(mol='',data_file='')
"""
import os,sys,time,re
comment = re.compile('^\s*$|^\s*#')
def atom_data_extract(data_file):
"""
Read the specified 'by-atom' data file and extract the data from it
and store it in parallel dictionaries specifying the data
and residue names (both with keys of chain and residue number and atom name).
The data file can contain comment lines starting with "#" (on lines by themselves).
These comment lines are ignored.
"""
bdat = {}
residue = {}
chain = ''
data_lines = file(data_file).readlines()
for line in data_lines:
# ignore comment lines (beginning with a '#') or blank lines
if not comment.match(line):
words = line.split()
# check number of columns of data
if len(words) == 5:
chain = words[0]
resnum = int(words[1])
resname = words[2]
atname = words[3]
if chain == '-':
chain = ''
data = float(words[4])
elif len(words) == 4:
resnum = int(words[0])
resname = words[1]
atname = words[2]
data = float(words[3])
else:
sys.stderr.write("Error in reading data files -- check number of columns")
sys.exit(1)
if bdat.has_key(chain):
if bdat[chain].has_key(resnum):
bdat[chain][resnum][atname] = data
else:
bdat[chain][resnum] = {atname:data}
else:
bdat[chain] = {resnum:{atname:data}}
if residue.has_key(chain):
if residue[chain].has_key(resnum):
residue[chain][resnum][atname] = resname
else:
residue[chain][resnum] = {atname:resname}
else:
residue[chain] = {resnum:{atname:resname}}
return bdat,residue
def residue_data_extract(data_file):
"""
Read the specified 'by-residue' data file and extract the data from it
and store it in parallel dictionaries specifying the data
and residue names (both with keys of chain and residue number).
The data file can contain comment lines starting with "#" (on lines by themselves).
These comment lines are ignored.
"""
bdat = {}
residue = {}
chain = ''
data_lines = file(data_file).readlines()
for line in data_lines:
# ignore comment lines (beginning with a '#') or blank lines
if not comment.match(line):
words = line.split()
# check number of columns of data
if len(words) == 4:
chain = words[0]
resnum = int(words[1])
resname = words[2]
if chain == '-':
chain = ''
data = float(words[3])
elif len(words) == 3:
resnum = int(words[0])
resname = words[1]
data = float(words[2])
elif len(words) == 2:
resnum = int(words[0])
data = float(words[1])
resname = ''
else:
sys.stderr.write("Error in reading data files -- check number of columns")
sys.exit(1)
if bdat.has_key(chain):
bdat[chain][resnum] = data
else:
bdat[chain] = {resnum:data}
if residue.has_key(chain):
residue[chain][resnum] = resname
else:
residue[chain] = {resnum:resname}
return bdat,residue
def data2b_atom(mol='',data_file=''):
"""
usage: data2b_atom <mol>, <data_file>
where <mol> is the molecular object whose B-factor data you wish to modify
and <data_file> is a file contain the data (one value for each atom)
The format of <data_file> should be:
chain resnum resname name data
or
resnum resname name data
(i.e. "chain" is optional if all atoms are in one chain).
Lines beginning with '#' are ignored as comments.
"""
# call the function to extract the data per atom from 'data_file'
# alter 'mol' with it.
from pymol import cmd
# read the data file and extract the
# data to the b_dict and res_dict dictionaries
b_dict,res_dict = atom_data_extract(data_file)
# make lists of chains and residues within the chains and sort them
chain_list = b_dict.keys()
chain_list.sort()
for chain in chain_list:
res_list = b_dict[chain].keys()
res_list.sort()
for resnum in res_list:
atom_list = b_dict[chain][resnum].keys()
# now do the alteration of the B-factor data
for at in atom_list:
cmd.do("alter /%s//%s/%s/%s/,b=%f" % (mol,chain,resnum,at,b_dict[chain][resnum][at]))
# force a rebuild of the drawing
cmd.rebuild()
def data2b_res(mol='',data_file=''):
"""
usage: data2b_res <mol>, <data_file>
where <mol> is the molecular object whose B-factor data you wish to modify
and <data_file> is a file contain the data (one value for each residue)
The format of <data_file> should be:
chain resnum resname data
or
resnum resname data
(i.e. "chain" is optional). Lines beginning with '#' are ignored as comments.
"""
#call the function to extract the data per residue from 'data_file'
#alter 'mol' with it.
from pymol import cmd
# read the data file and extract the
# data to the b_dict and res_dict dictionaries
b_dict,res_dict = residue_data_extract(data_file)
# make lists of chains and residues within the chains and sort them
chain_list = b_dict.keys()
chain_list.sort()
for chain in chain_list:
res_list = b_dict[chain].keys()
res_list.sort()
# now do the alteration of the B-factor data
for resnum in res_list:
cmd.do("alter /%s//%s/%s/,b=%f" % (mol,chain,resnum,b_dict[chain][resnum]))
# force a rebuild of the drawing
cmd.rebuild()
def data2q_atom(mol='',data_file=''):
"""
usage: data2q_atom <mol>, <data_file>
where <mol> is the molecular object whose occupancy data you wish to modify
and <data_file> is a file contain the data (one value for each atom)
The format of <data_file> should be:
chain resnum resname name data
or
resnum resname name data
(i.e. "chain" is optional). Lines beginning with '#' are ignored as comments.
"""
# call the function to extract the data per atom from 'data_file'
# alter 'mol' with it.
from pymol import cmd
# read the data file and extract the
# data to the q_dict and res_dict dictionaries
q_dict,res_dict = atom_data_extract(data_file)
# make lists of chains and residues within the chains and sort them
chain_list = q_dict.keys()
chain_list.sort()
for chain in chain_list:
res_list = q_dict[chain].keys()
res_list.sort()
for resnum in res_list:
atom_list = q_dict[chain][resnum].keys()
# now do the alteration of the occupancy (q) data
for at in atom_list:
cmd.do("alter /%s//%s/%s/%s/,q=%f" % (mol,chain,resnum,at,q_dict[chain][resnum][at]))
# force a rebuild of the drawing
cmd.rebuild()
def data2q_res(mol='',data_file=''):
"""
usage: data2q_res <mol>, <data_file>
where <mol> is the molecular object whose occupancy data you wish to modify
and <data_file> is a file contain the data (one value for each residue)
The format of <data_file> should be:
chain resnum resname data
or
resnum resname data
(i.e. "chain" is optional). Lines beginning with '#' are ignored as comments.
"""
# call the function to extract the data per residue from 'data_file'
# alter 'mol' with it.
from pymol import cmd
# read the data file and extract the
# data to the q_dict and res_dict dictionaries
q_dict,res_dict = residue_data_extract(data_file)
# make lists of chains and residues within the chains and sort them
chain_list = q_dict.keys()
chain_list.sort()
for chain in chain_list:
res_list = q_dict[chain].keys()
res_list.sort()
# now do the alteration of the occupancy (q) data
for resnum in res_list:
cmd.do("alter /%s//%s/%s/,q=%f" % (mol,chain,resnum,q_dict[chain][resnum]))
# force a rebuild of the drawing
cmd.rebuild()
cmd.extend('data2b_res',data2b_res)
cmd.extend('data2b_atom',data2b_atom)
cmd.extend('data2q_res',data2q_res)
cmd.extend('data2q_atom',data2q_atom)
###########################################################################################
# for testing purposes:
# if calling this as a program on its own, read the pdb_file name from
# the command line and run residue_data_extract on it. (does not require
# importing cmd from pymol
if __name__ == '__main__':
pdb_file = sys.argv[1]
b_dict,res_dict = residue_data_extract(pdb_file)
chain_list = b_dict.keys()
chain_list.sort()
for chain in chain_list:
res_list = b_dict[chain].keys()
res_list.sort()
for resnum in res_list:
print "b-factors %s %s %s %s new B='%s'" % (pdb_file,chain,res_dict[chain][resnum],resnum,b_dict[chain][resnum])
|
import numpy as np
import math
#5개의 중점과 각각의 분산값
#0(1) 0(1) 0(1)
#4(1) 0(2) 0(1)
#4(1) 8(2) 0(2)
#0(1) 8(2) 0(1)
#2(1.5) 4(1.5) 3(1)
#초기 중점 설정
centerx = [0, 4, 4, 0, 2]
centery = [0, 0, 8, 8 ,4]
centerz = [0, 0, 0, 0, 3]
max_len = [0, 0, 0, 0, 0]
#데이터로부터 idx번째 center까지의 거리를 구해주는 함수
def get_dis(vec, idx):
return math.sqrt((vec[0]-centerx[idx])*(vec[0]-centerx[idx]) + (vec[1]-centery[idx])*(vec[1]-centery[idx]) + (vec[2]-centerz[idx])*(vec[2]-centerz[idx]))
#중점마다 300개씩 총 1500개의 데이터를 생성해 n에 저장함
n = []
sigma, mu = 1, 0
x = sigma * np.random.randn(300) + mu
y = sigma * np.random.randn(300) + mu
z = sigma * np.random.randn(300) + mu
for i in range(300):
n.append([x[i],y[i],z[i]])
sigma, mu = 1, 4
x = sigma * np.random.randn(300) + mu
sigma, mu = 2, 0
y = sigma * np.random.randn(300) + mu
sigma, mu = 1, 0
z = sigma * np.random.randn(300) + mu
for i in range(300):
n.append([x[i],y[i],z[i]])
sigma, mu = 1, 4
x = sigma * np.random.randn(300) + mu
sigma, mu = 2, 8
y = sigma * np.random.randn(300) + mu
sigma, mu = 2, 0
z = sigma * np.random.randn(300) + mu
for i in range(300):
n.append([x[i],y[i],z[i]])
sigma, mu = 1, 0
x = sigma * np.random.randn(300) + mu
sigma, mu = 2, 8
y = sigma * np.random.randn(300) + mu
sigma, mu = 1, 0
z = sigma * np.random.randn(300) + mu
for i in range(300):
n.append([x[i],y[i],z[i]])
sigma, mu = 1.5, 2
x = sigma * np.random.randn(300) + mu
sigma, mu = 1.5, 4
y = sigma * np.random.randn(300) + mu
sigma, mu = 1.5, 3
z = sigma * np.random.randn(300) + mu
for i in range(300):
n.append([x[i],y[i],z[i]])
#10번의 K-means clustering을 통해 중점을 조정해주고
#cluster에서 cluster 내부 데이터와의 최대 거리를 계산해줌
for i in range(10):
max_len = [0, 0, 0, 0, 0]
cluster0 = []
cluster1 = []
cluster2 = []
cluster3 = []
cluster4 = []
for v in n:
this_cluster = -1
this_len = 99999999
for i in range(5):
d = get_dis(v,i)
if this_len > d:
this_cluster = i
this_len = d
max_len[this_cluster] = max(max_len[this_cluster], this_len)
if this_cluster == 0:
cluster0.append(v)
elif this_cluster == 1:
cluster1.append(v)
elif this_cluster == 2:
cluster2.append(v)
elif this_cluster == 3:
cluster3.append(v)
else:
cluster4.append(v)
v = [0,0,0]
if len(cluster0) > 0:
for t in cluster0:
v[0] += t[0]
v[1] += t[1]
v[2] += t[2]
v[0] /= len(cluster0)
v[1] /= len(cluster0)
v[2] /= len(cluster0)
centerx[0] = v[0]
centery[0] = v[1]
centerz[0] = v[2]
v = [0,0,0]
if len(cluster1) > 0:
for t in cluster1:
v[0] += t[0]
v[1] += t[1]
v[2] += t[2]
v[0] /= len(cluster1)
v[1] /= len(cluster1)
v[2] /= len(cluster1)
centerx[1] = v[0]
centery[1] = v[1]
centerz[1] = v[2]
v = [0,0,0]
if len(cluster2) > 0:
for t in cluster2:
v[0] += t[0]
v[1] += t[1]
v[2] += t[2]
v[0] /= len(cluster2)
v[1] /= len(cluster2)
v[2] /= len(cluster2)
centerx[2] = v[0]
centery[2] = v[1]
centerz[2] = v[2]
v = [0,0,0]
if len(cluster3) > 0:
for t in cluster3:
v[0] += t[0]
v[1] += t[1]
v[2] += t[2]
v[0] /= len(cluster3)
v[1] /= len(cluster3)
v[2] /= len(cluster3)
centerx[3] = v[0]
centery[3] = v[1]
centerz[3] = v[2]
v = [0,0,0]
if len(cluster4) > 0:
for t in cluster4:
v[0] += t[0]
v[1] += t[1]
v[2] += t[2]
v[0] /= len(cluster4)
v[1] /= len(cluster4)
v[2] /= len(cluster4)
centerx[4] = v[0]
centery[4] = v[1]
centerz[4] = v[2]
#각각의 cluster마다 test해서 num에서는 idx번째 cluster로 분류된 데이터의 개수를
#각각 저장해서 출력함. 마지막 idx는 거리가 기준을 초과해서 해당 cluster로
#분류되지 못한 데이터의 수
n = []
sigma, mu = 1, 0
x = sigma * np.random.randn(100) + mu
y = sigma * np.random.randn(100) + mu
z = sigma * np.random.randn(100) + mu
for i in range(100):
n.append([x[i],y[i],z[i]])
num = [0, 0, 0, 0, 0, 0]
print("cluster0")
for v in n:
this_cluster = -1
this_len = 99999999
for i in range(5):
d = get_dis(v,i)
if this_len > d:
this_cluster = i
this_len = d
if this_len > max_len[this_cluster]:
num[5] += 1
else:
num[this_cluster] += 1
print(num)
n = []
sigma, mu = 1, 4
x = sigma * np.random.randn(100) + mu
sigma, mu = 2, 0
y = sigma * np.random.randn(100) + mu
sigma, mu = 1, 0
z = sigma * np.random.randn(100) + mu
for i in range(100):
n.append([x[i],y[i],z[i]])
num = [0, 0, 0, 0, 0, 0]
print("cluster1")
for v in n:
this_cluster = -1
this_len = 99999999
for i in range(5):
d = get_dis(v,i)
if this_len > d:
this_cluster = i
this_len = d
if this_len > max_len[this_cluster]:
num[5] += 1
else:
num[this_cluster] += 1
print(num)
n = []
sigma, mu = 1, 4
x = sigma * np.random.randn(100) + mu
sigma, mu = 2, 8
y = sigma * np.random.randn(100) + mu
sigma, mu = 2, 0
z = sigma * np.random.randn(100) + mu
for i in range(100):
n.append([x[i],y[i],z[i]])
num = [0, 0, 0, 0, 0, 0]
print("cluster2")
for v in n:
this_cluster = -1
this_len = 99999999
for i in range(5):
d = get_dis(v,i)
if this_len > d:
this_cluster = i
this_len = d
if this_len > max_len[this_cluster]:
num[5] += 1
else:
num[this_cluster] += 1
print(num)
n = []
sigma, mu = 1, 0
x = sigma * np.random.randn(100) + mu
sigma, mu = 2, 8
y = sigma * np.random.randn(100) + mu
sigma, mu = 1, 0
z = sigma * np.random.randn(100) + mu
for i in range(100):
n.append([x[i],y[i],z[i]])
num = [0, 0, 0, 0, 0, 0]
print("cluster3")
for v in n:
this_cluster = -1
this_len = 99999999
for i in range(5):
d = get_dis(v,i)
if this_len > d:
this_cluster = i
this_len = d
if this_len > max_len[this_cluster]:
num[5] += 1
else:
num[this_cluster] += 1
print(num)
n = []
sigma, mu = 1.5, 2
x = sigma * np.random.randn(100) + mu
sigma, mu = 1.5, 4
y = sigma * np.random.randn(100) + mu
sigma, mu = 1.5, 3
z = sigma * np.random.randn(100) + mu
for i in range(100):
n.append([x[i],y[i],z[i]])
num = [0, 0, 0, 0, 0, 0]
print("cluster4")
for v in n:
this_cluster = -1
this_len = 99999999
for i in range(5):
d = get_dis(v,i)
if this_len > d:
this_cluster = i
this_len = d
if this_len > max_len[this_cluster]:
num[5] += 1
else:
num[this_cluster] += 1
print(num)
n = []
sigma, mu = 1, 2
x = sigma * np.random.randn(100) + mu
sigma, mu = 1, 4
y = sigma * np.random.randn(100) + mu
sigma, mu = 1, -5
z = sigma * np.random.randn(100) + mu
for i in range(100):
n.append([x[i],y[i],z[i]])
num = [0, 0, 0, 0, 0, 0]
print("another")
for v in n:
this_cluster = -1
this_len = 99999999
for i in range(5):
d = get_dis(v,i)
if this_len > d:
this_cluster = i
this_len = d
if this_len > max_len[this_cluster]:
num[5] += 1
else:
num[this_cluster] += 1
print(num)
|
class Solution(object):
def sortedSquares(self, nums):
mylist = []
for i in nums:
mylist.append(i**2)
return sorted(mylist)
import bisect
class Solution(object):
def sortedSquares(self, nums):
new = []
n = len(nums)
for i in range(n):
if nums[i]<0:
new.append(nums[i]*nums[i])
else:
break
new = new[::-1]
if len(new) == n:
return new
if new:
M = new[-1]
else:
M = -9999
for j in range(i,n):
p = nums[j]*nums[j]
if p >= M:
new.append(p)
M = p
else:
bisect.insort(new, p)
return new |
import configparser
class smtp_client():
pass
class User(object):
# User class created dynamically from JSON configuration
def __init__(self, d):
self.__dict__ = d
|
import requests
from urllib.parse import urljoin
from pprint import pprint
def get_weather():
city_name = 'Saint Petersburg, RU'
API_KEY = '' # Вставьте сюда свой API-ключ
API_FIND_URL = 'http://api.openweathermap.org/data/2.5/find'
API_GET_WEATHER_URL = 'https://api.openweathermap.org/data/2.5/onecall'
resp = requests.get(API_FIND_URL, params={'q': city_name, 'units': 'metric', 'APPID': API_KEY})
city_list = resp.json()
city_id = city_list['list'][0]['id']
coord_lat = city_list['list'][0]['coord']['lat']
coord_lon = city_list['list'][0]['coord']['lon']
resp_weather = requests.get(API_GET_WEATHER_URL, params={'lat': coord_lat, 'lon': coord_lon, 'units': 'metric',
'lang': 'ru', 'exclude': 'current,minutely,hourly',
'appid': API_KEY})
answer = resp_weather.json()
five_day = answer['daily'][0:5]
max_pressure = 0
max_temp_dif = 0
for day in five_day:
temp_dif = day['temp']['day'] - day['temp']['night']
if day['pressure'] > max_pressure:
max_pressure = day['pressure']
if temp_dif > max_temp_dif:
max_temp_dif = temp_dif
print(f'Город {city_name} находится по координатам {coord_lat} : {coord_lat} . ID по базе = {city_id} ')
print(f'Максимальное давление равно {max_pressure} hPA')
print(f'Максимальная разница темпенратур равна {round(max_temp_dif, 2)}')
return
if __name__ == '__main__':
get_weather()
|
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
#shengming a exchange,type is fanout
channel.exchange_declare(exchange='logs', type='fanout')
message = ' '.join(sys.argv[1:]) or "info:Hello World!"
channel.basic_publish(exchange='logs',routing_key='',body=message)
print "[x] Sent %r"%(message,)
connection.close()
|
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping
import numpy
import numpy as np
import os
import tensorflow as tf
(X_train_all, Y_train_all), (X_test, Y_test) = mnist.load_data()
import matplotlib.pyplot as plt
print(X_train_all.shape)
print()
from sklearn.model_selection import train_test_split
X_train, __, Y_train, __ = train_test_split(X_train_all, Y_train_all, random_state = 66, test_size = 0.995)
# X_train = X_train_all[:300]
# Y_train = Y_train_all[:300]
print(X_train.shape)
print(Y_train.shape)
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
# print(X_train.shape)
# print(X_test.shape)
model = Sequential()
model.add(Conv2D(16, kernel_size=(6,6), input_shape=(28,28,1), activation='relu'))
model.add(Conv2D(8,(12,12), activation='relu'))
# model.add(Conv2D(128,(3,3), activation='relu'))
# model.add(Conv2D(256,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) #분류모델의 마지막은 softmax
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# early_stopping_callback = EarlyStopping(monitor='loss', patience=10)
history = model.fit(X_train, Y_train, epochs=30, batch_size=1, verbose=1)#, callbacks=[early_stopping_callback])
print("\n Test Accuracy: %.4f" % (model.evaluate(X_test, Y_test)[1]))
|
import dash_bootstrap_components as dbc
from dash import html
left_jumbotron = dbc.Col(
html.Div(
[
html.H2("Change the background", className="display-3"),
html.Hr(className="my-2"),
html.P(
"Swap the background-color utility and add a `.text-*` color "
"utility to mix up the look."
),
dbc.Button("Example Button", color="light", outline=True),
],
className="h-100 p-5 text-white bg-dark rounded-3",
),
md=6,
)
right_jumbotron = dbc.Col(
html.Div(
[
html.H2("Add borders", className="display-3"),
html.Hr(className="my-2"),
html.P(
"Or, keep it light and add a border for some added definition "
"to the boundaries of your content."
),
dbc.Button("Example Button", color="secondary", outline=True),
],
className="h-100 p-5 bg-light border rounded-3",
),
md=6,
)
jumbotron = dbc.Row(
[left_jumbotron, right_jumbotron],
className="align-items-md-stretch",
)
|
word = input("Choose word: ")
guessed = []
wrong = []
tries = 5
# Turn Letters in blank dashes
# Guess letter
while tries > 0:
out = ""
for letter in word:
if letter in guessed:
out = out + letter
else:
out = out + "_"
if out == word:
break
print("Guess a letter: ", out)
print(tries," guess are left")
guess = input()
# check individual letters for/ guesses letters/ tries lost
if guess in guessed or guess in wrong:
print("You already guessed", guess)
elif guess in word:
print("The letter/word", guess)
tries = tries
guessed.append(guess)
else:
print("Incorrect")
tries = tries - 1
wrong.append(guess)
# guess the word
if guess in guessed or guess == word:
print("Correct you guessed the right word:", guess)
elif guess in guessed or guess != word:
print("Wrong word, Try again!")
print()
if tries:
print("You guessed", word)
else:
print("No, the word is not", word)
break
#if __name__ == '__main__':
# print_hi('PyCharm')
|
from collections import defaultdict
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
graph = defaultdict(list)
for u,v in prerequisites:
graph[u].append(v)
stack = []
visited = [False]*numCourses
recStack = [False]*numCourses
def depth(v,stack):
visited[v] = True
recStack[v] = True
for i in graph[v]:
if visited[i] == False:
if depth(i,stack):
return True
elif recStack[i] == True:
return True
stack.insert(0,v)
recStack[v] = False
return False
def traverse(numCourses):
for i in range(numCourses):
if visited[i] == False:
if depth(i,stack):
return True
return False
if not traverse(numCourses):
return(stack[::-1])
#method 2
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
status = [0]*numCourses
graph = collections.defaultdict(list)
for c, pre in prerequisites:
graph[pre].append(c)
stack = []
def dfs(course):
if status[course] == 1:
return False # circle case
if status[course] == 2: # visited
return True
status[course] = 1 # visiting
for c in graph[course]:
if not dfs(c):
return False
stack.append(course)
status[course] = 2
return True
for course in range(numCourses):
if status[course] == 0:
if not dfs(course):
return []
return stack[::-1] |
#Build the reserved word set
tag_word = set()
tag_word.add('true')
tag_word.add('false')
class Lex():
def __init__(self, statement):
self.id_table = set()
self.num_table = set()
self.bool_table = set()
self.lexDict = dict()
self.token(statement)
self.getToken(statement)
def token(self, statement):
copy = statement
temp = statement.replace(' ', '') #skip the space
for i in '&|<>=!();':
temp = temp.replace(i, ' ') #skip all the terminal symbol
split = temp.split(' ') #skip the space
split = [i for i in split if(i.isalnum())] #catch all the number or English term
for i in split:
#If i is number, then add it to number list and store its position
if i.isdigit():
self.num_table.add(i)
pos = len(statement.partition(i)[0]) #the position of the lex
assert pos >= 0
self.lexDict[pos] = i
statement = statement[:pos] + '#'*len(i) + statement[pos + len(i):] if pos > 0 else '#'*len(i) + statement[pos + len(i):] #replace the lex to other chars
assert len(statement) == len(copy)
#If i is identifier, then add it to id list and store its pos
if i.isidentifier():
#for the non reversed word
if i not in tag_word:
self.id_table.add(i)
pos = len(statement.partition(i)[0])
assert pos >= 0
self.lexDict[pos] = i
statement = statement[:pos] + '*'*len(i) + statement[pos + len(i):] if pos > 0 else '*'*len(i) + statement[pos + len(i):]
assert len(statement) == len(copy)
#for the reversed word
else:
self.bool_table.add(i)
pos = len(statement.partition(i)[0])
assert pos >= 0
self.lexDict[pos] = i
statement = statement[:pos] + '-'*len(i) + statement[pos + len(i):] if pos > 0 else '-'*len(i) + statement[pos + len(i):]
assert len(statement) == len(copy)
def getToken(self, statement: str):
tokenList = []
i = 0
gap = 1
#Double check each token attribute and push it to tokenList
while i < len(statement):
s = statement[i]
temp = list(self.lexDict.keys())
if i in temp:
tokenList.append(self.lexDict[i])
gap = len(self.lexDict[i])
elif s == '(':
tokenList.append(s)
gap = 1
elif s ==')':
tokenList.append(s)
gap = 1
elif s =='='and statement[i+1] == '=':
tokenList.append('==')
gap = 2
elif s =='!' and statement[i+1] == '=':
tokenList.append('!=')
gap = 2
elif s == '|' and statement[i+1] == '|':
tokenList.append('||')
gap = 2
elif s == '&' and statement[i+1] == '&':
tokenList.append('&&')
gap = 2
elif s == '<' and statement[i+1] == '=':
tokenList.append('<=')
gap = 2
elif s == '>' and statement[i+1] == '=':
tokenList.append('>=')
gap = 2
elif s == '<':
tokenList.append(s)
gap = 1
elif s == '>':
tokenList.append(s)
gap = 1
elif s == ';':
tokenList.append(s)
gap = 1
elif s == '%':
tokenList.append(s)
gap = 1
else:
gap = 1
i += gap
self.tokenList = tokenList
|
# Generated by Django 3.1.3 on 2020-11-17 15:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('library', '0008_auto_20201117_2110'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='Date_of_Birth',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='profile',
name='address',
field=models.TextField(blank=True, default='None', null=True),
),
migrations.AlterField(
model_name='profile',
name='adhar_card',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='profile',
name='book_status',
field=models.BooleanField(default=False, null=True),
),
migrations.AlterField(
model_name='profile',
name='borrow_date',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='profile',
name='email_id',
field=models.EmailField(blank=True, default='None', max_length=254, null=True),
),
migrations.AlterField(
model_name='profile',
name='name',
field=models.CharField(blank=True, default='None', max_length=30, null=True),
),
migrations.AlterField(
model_name='profile',
name='return_date',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='profile',
name='town',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='profile',
name='user_info',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
from amqp_handler import AMQPHandler
import asyncio
import json
import logging
import json
import os
import django
import sys
from random import choice
from asgiref.sync import sync_to_async
sys.path.append("..")
import pathfinder
os.environ['DJANGO_SETTINGS_MODULE'] = 'pathfinder.settings'
django.setup()
from route_collector.models import Route
from route_collector.models import Point
from route_collector.models import PointClaster
logger = logging.getLogger('route_creator')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
bf = logging.Formatter('{asctime} {name} {levelname:8s} {message}', style='{')
handler.setFormatter(bf)
logger.addHandler(handler)
config = {}
config['rmq_host'] = os.environ.get('RMQ_HOST', 'amqp://guest:guest@127.0.0.1:5672')
config['rmq_exchange'] = os.environ.get('RMQ_ROUTE_CREATOR_EXCHANGE', 'rmq_route_creator_exchange')
config['rmq_queue_in'] = os.environ.get('RMQ_ROUTE_CREATOR_QUEUE_IN', 'rmq_route_creator_queue_in')
@sync_to_async
def make_route(route_name):
route = Route.objects.get(Name=route_name)
point_a_pk = route.Order['point_list'][0]
point_b_pk = route.Order['point_list'][1]
point_a = Point.objects.get(pk=point_a_pk)
point_b = Point.objects.get(pk=point_b_pk)
lon_a = int(point_a.Longitude)
lon_b = int(point_b.Longitude)
lat_a = point_a.Latitude
lat_b = point_b.Latitude
if (lon_a < lon_b):
start_p = lon_a
end_p = lon_b
else:
start_p = lon_b
end_p = lon_a
for p in range(start_p, end_p):
y = ((lat_b - lat_a) / (end_p - start_p )) * (p - start_p) + lat_a
claster = PointClaster.objects.get(Number_lat=y/10, Number_lon=p/10)
points = Point.objects.filter(PointC=claster)
if len(points) != 0:
point = choice(points)
route.Points.add(point)
current_order = route.Order
current_order['point_list'].append(point.Name)
current_order['point_list_x'].append(point.X)
current_order['point_list_y'].append(point.Y)
route.save()
return route
async def rmq_msg_proc(msg):
route_msg = json.loads(msg)
route_name = route_msg['route_name']
logger.info('Creating {} route'.format(route_name))
try:
route = await make_route(route_name)
logger.info('Route {} created'.format(route))
return (True, None)
except Exception as e:
logger.error('Route did not created {}'.format(e))
return (False, None)
def main():
logger.info('=== Activating route_creator service ===')
loop = asyncio.get_event_loop()
AMQPH = AMQPHandler(loop)
loop.run_until_complete(AMQPH.connect(amqp_connect_string=config['rmq_host']))
loop.run_until_complete(
AMQPH.receive(
config['rmq_exchange'],
config['rmq_queue_in'],
awaitable_msg_proc_func=rmq_msg_proc
)
)
loop.close()
if __name__ == '__main__':
main()
|
from django.db import models
import qrcode
from django import forms
from io import BytesIO
from django.core.files import File
from PIL import Image, ImageDraw
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
# Create your models here.
class Manager(models.Model):
UserAccount = models.ForeignKey(User, on_delete=models.CASCADE)
First_Name = models.CharField(max_length=45)
Last_Name = models.CharField(max_length=45)
Address = models.CharField(max_length=45)
phone_no = models.CharField(max_length=45)
def __str__(self):
return self.First_Name + '|' + str(self.Last_Name)
class Employee(models.Model):
UserAccount = models.ForeignKey(User, on_delete=models.CASCADE)
Emp_Id = models.CharField(max_length=45)
First_Name = models.CharField(max_length=45)
Last_Name = models.CharField(max_length=45)
Address = models.CharField(max_length=45)
phone_no = models.CharField(max_length=45)
Sallary = models.CharField(max_length=45)
Status = models.CharField(max_length=45)
Age = models.CharField(max_length=45)
profile_pic = models.ImageField( null=True, blank=True, upload_to="profileimage/")
qr_code = models.ImageField(upload_to='qr_codes',blank=True)
def __str__(self):
return self.First_Name + '|' + str(self.Last_Name)
def save(self, *args, **kwargs):
qrcode_img = qrcode.make(self.Emp_Id)
canvas = Image.new('RGB',(290,290),'white')
draw = ImageDraw.Draw(canvas)
canvas.paste(qrcode_img)
fname = f'qr_code-(self.Emp_Id)'+ '.png'
buffer = BytesIO()
canvas.save(buffer,'PNG')
self.qr_code.save(fname,File(buffer), save=False)
canvas.close()
super().save(*args, **kwargs)
class LowOfficer(models.Model):
Employee = models.ForeignKey(Employee, on_delete=models.CASCADE)
officer_id = models.CharField(max_length=45)
def __str__(self):
return self.officer_id + '|' + str(self.Employee)
class Judge(models.Model):
Employee = models.ForeignKey(Employee, on_delete=models.CASCADE)
Judge_id = models.CharField(max_length=45)
def __str__(self):
return self.Judge_id + '|' + str(self.Employee)
class Case(models.Model):
Judge = models.ForeignKey(Judge, on_delete=models.CASCADE)
LowOfficer = models.ForeignKey(LowOfficer, on_delete=models.CASCADE)
First_Name = models.CharField(max_length=45)
Last_Name = models.CharField(max_length=45)
CaseType = models.CharField(max_length=45)
Address = models.CharField(max_length=45)
Case_Id = models.CharField(max_length=45)
phone_no = models.CharField(max_length=45)
Status = models.CharField(max_length=45)
Age = models.CharField(max_length=45)
casefile = models.FileField(upload_to='casefile/')
profile_pic = models.ImageField( null=True, blank=True, upload_to="plainttif profileimage/")
qr_code = models.ImageField(upload_to='caseqr_codes',blank=True)
def __str__(self):
return self.First_Name + '|' + str(self.Case_Id)
def save(self, *args, **kwargs):
qrcode_img = qrcode.make(self.Case_Id)
canvas = Image.new('RGB',(290,290),'white')
draw = ImageDraw.Draw(canvas)
canvas.paste(qrcode_img)
cname = f'qr_code-(self.Case_Id)'+ '.png'
buffer = BytesIO()
canvas.save(buffer,'PNG')
self.qr_code.save(cname,File(buffer), save=False)
canvas.close()
super().save(*args, **kwargs)
class Shedule(models.Model):
LowOfficer = models.ForeignKey(LowOfficer, on_delete=models.CASCADE)
Judge = models.ForeignKey(Judge, on_delete=models.CASCADE)
Case = models.ForeignKey(Case, on_delete=models.CASCADE)
Date = models.DateTimeField(auto_now=True)
Court_Room = models.IntegerField()
body = models.TextField()
def __str__(self):
return self.body
class Comment(models.Model):
Customer_Name = models.CharField(max_length=255)
Writte_Comment = models.TextField()
def __str__(self):
return self.Customer_Name
class Decision(models.Model):
Judge = models.ForeignKey(Judge, on_delete=models.CASCADE)
Case = models.ForeignKey(Case, on_delete=models.CASCADE)
Writte_Dession = models.TextField()
Date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.Judge
class Summon(models.Model):
Judge = models.ForeignKey(Judge, on_delete=models.CASCADE)
LowOfficer = models.ForeignKey(LowOfficer, on_delete=models.CASCADE)
Summon_body = models.TextField()
def __str__(self):
return self.officer_id
class Assignjudge(models.Model):
LowOfficer = models.ForeignKey(LowOfficer, on_delete=models.CASCADE)
Judge = models.ForeignKey(Judge, on_delete=models.CASCADE)
Case = models.ForeignKey(Case, on_delete=models.CASCADE)
|
import unittest
from katas.beta.food_combinations import actually_really_good
class ActuallyReallyGoodTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(actually_really_good([]),
'You know what\'s actually really good? Nothing!')
def test_equals_2(self):
self.assertEqual(actually_really_good(['Peanut butter']),
'You know what\'s actually really good? Peanut '
'butter and more peanut butter.')
|
numbers = [12, 10, 32, 3, 66, 17, 42, 99, 20]
#a)
for i in numbers:
print(i)
#b)
for i in numbers:
print(i, "\t", i**2 )
#c)
total=0
for i in numbers:
total += i
print(total)
#d)
final=1
for i in numbers:
final *=i
print(final) |
# I pledge my Honor that I have abided by the Stevens Honor System - Owen Gresham
# Owen Gresham
# 11-25-2020
# CS-110-A
# Quiz 2 Part 2
def is_vowel(letter):
letter = letter.lower()
if letter == "a" or letter == "e" or letter == "i" or letter == "o" or letter == "u" or letter == "y":
return True
return False
def encrypt(phrase, key):
alphabet = "abcdefghijklmnopqrstuvwxyz "
letters = []
for i in range(len(alphabet)):
letters.append(alphabet[i])
encrypted = ""
for i in phrase:
if i.lower() == i:
encrypted += letters[(letters.index(i) + key) % 27]
elif i.upper() == i:
encrypted += letters[(letters.index(i.lower()) + key) % 27].upper()
return encrypted
def add(num1, num2):
return float(num1) + float(num2)
def subtract(num1, num2):
return float(num1) - float(num2)
def multiply(num1, num2):
return float(num1) * float(num2)
def divide(num1, num2):
return float(num1) / float(num2)
def main():
print("For Mathematical Functions, Please Enter the Number 1\n"
"For String Operations, Please Enter the Number 2")
selection_one = input()
if selection_one == "1":
print("For Addition, Please Enter the Number 1\n"
"For Subtraction, Please Enter the Number 2\n"
"For Multiplication, Please Enter the Number 3\n"
"For Division, Please Enter the Number 4")
selection_two = input()
if selection_two != "1" and selection_two != "2" and selection_two != "3" and selection_two != "4":
print("Invalid input")
else:
nums = input("Enter two numbers separated by a comma: ").split(",")
if selection_two == "1":
try:
print(float(nums[0]), "+", float(nums[1]), "=", add(nums[0], nums[1]))
except (ValueError, IndexError):
print("Invalid Input")
elif selection_two == "2":
try:
print(float(nums[0]), "-", float(nums[1]), "=", subtract(nums[0], nums[1]))
except (ValueError, IndexError):
print("Invalid Input")
elif selection_two == "3":
try:
print(float(nums[0]), "*", float(nums[1]), "=", multiply(nums[0], nums[1]))
except (ValueError, IndexError):
print("Invalid Input")
elif selection_two == "4":
try:
print(float(nums[0]), "/", float(nums[1]), "=", divide(nums[0], nums[1]))
except (ValueError, IndexError):
print("Invalid Input")
elif selection_one == "2":
print("To Determine the Number of Vowels in a String, Enter the Number 1\n"
"To Encrypt a String, Enter the Number 2")
selection_two = input()
if selection_two != "1" and selection_two != "2":
print("Invalid input")
else:
phrase = input("Please enter a phrase only containing letters and/or spaces: ")
if selection_two == "1":
num = 0
for i in phrase:
if is_vowel(i):
num += 1
print("The phrase \"" + phrase + "\" contains", num, "vowels.")
elif selection_two == "2":
try:
key = int(input("Enter the shift key: "))
encrypted = encrypt(phrase, key)
print("\nThe encrypted phrase:\n" + encrypted)
except ValueError:
print("Invalid Input")
else:
print("Invalid input")
main()
|
from client import Client
from server import Server
from tracker import Tracker
from config import Config
# from downloader import downloader
from torrent import * # assumes that your Torrent file is in this folder
from threading import Thread
import uuid
from message import Message
import socket
from file_manager import FileManager
import time
from htpbs import ProgressBars
"""
class Peer():
SEEDER = 2;
LECHES = 1;
PEER = 0;
# copy and paste here your code implementation from the peer.py in your Labs
def __init__(self):
self.role = PEER # default role
pass
"""
class Peer:
"""
In this part of the peer class we implement methods to connect to multiple peers.
Once the connection is created downloading data is done in similar way as in TCP assigment.
"""
SERVER_IP = '127.0.0.1'
TARGET_IP = '127.0.0.1'
TARGET2_IP = '127.0.0.2'
TARGET3_IP = '127.0.0.4'
NUM_SERVER = 2
SERVER_PORT = 5000
CLIENT_MIN_PORT_RANGE = 5001
MAX_NUM_CONNECTIONS = 10
MAX_UPLOAD_RATE = 100
MAX_DOWNLOAD_RATE = 1000
PEER = 'peer'
LEECHER = 'leecher'
SEEDER = 'seeder'
#def __init__(self, role=PEER, server_ip_address='172.20.176.1'): DIFFERENT computer
#def __init__(self, role=SEEDER, server_ip_address='10.0.0.246'):#Run client role = PEER or LEECHER, Don't run client role = SEEDER
#def __init__(self, role=PEER, server_ip_address=socket.gethostbyname(socket.gethostname())):
def __init__(self, role=SEEDER, server_ip_address=SERVER_IP):#Run client role = PEER or LEECHER, Don't run client role = SEEDER
"""
Class constructor
:param server_ip_address: used when need to use the ip assigned by LAN
"""
# self.client = Client('127.0.0.4', 5000)
self.server_ip_address = server_ip_address
self.id = uuid.uuid4() # creates unique id for the peer
self.role = role
self.torrent = Torrent("age.torrent")
self.server = Server(self.torrent, self.id, server_ip_address,
self.SERVER_PORT) # inherits methods from the server
self.client = None
self.tracker = Tracker(self.server, self.torrent, False) #bool - announce?
self.swarm = None # [('127.0.0.1', 5000), ('127.0.0.2', 5000), ('127.0.0.3', 5000)] # Test
self.message = Message() # Initialize bitfield for this peer
self.file_manager = FileManager(self.torrent, self.id)
self.progressbars = ProgressBars(num_bars=self.NUM_SERVER)
def run_server(self):
"""
Starts a threaded server
:return: VOID
"""
try:
# must thread the server, otherwise it will block the main thread
Thread(target=self.server.run, daemon=False).start()
print("Server started.........")
except Exception as error:
print(error) # server failed to run
"""
def run_client(self, announce=False):
try:
Thread(target=self.client.run, daemon=False).start()
print("Client started.........")
except Exception as error:
print(error) # server failed to run
"""
def run_tracker(self, announce=False):
"""
Starts a threaded tracker
:param announce: True if this is the first announce in this network
:return: VOID
"""
try:
if self.server:
announce = True
if self.role == 'peer':
announce = True
self.tracker = Tracker(self.server, self.torrent, announce)
#Thread(target=self.tracker.run, daemon=False).start()
print("Tracker running.....")
<<<<<<< HEAD
print("Tracker DHT: ", self.tracker.get_DHT(), "Look here")
#self.swarm = self.tracker.get_DHT()
=======
# print("Tracker DHT: ", self.tracker.get_DHT(self.torrent.info_hash()), "Look here")
# self.swarm = self.tracker.get_DHT(self.torrent.info_hash())
>>>>>>> 2caa4a27e9160a45aaaa90ec062ec6c0b9f461fb
"""
if self.role != 'seeder': # Seeder does not need client to download
self.message.init_bitfield(self.torrent.num_pieces()) # Initialize this bitfield
self.file_manager.create_tmp_file()
i = 0
print(self.swarm[1][0])
port = 5001
for i in range(self.NUM_SERVER):
peer_ip = self.swarm[i][0]
self._connect_to_peer(i , port, peer_ip)
port += 1
"""
"""
self.client = Client(self, 0, self.message, self.torrent, announce, self.tracker, str(self.id), self.TARGET_IP,
self.server_ip_address, 5001)
Thread(target=self.client.run, daemon=False).start()
time.sleep(1)
self.client2 = Client(self, 1, self.message, self.torrent, announce, self.tracker, str(self.id), self.TARGET2_IP,
self.server_ip_address, 5002)
Thread(target=self.client2.run, daemon=False).start()
time.sleep(1)
self.client3 = Client(self, 2, self.message, self.torrent, announce, self.tracker, str(self.id), self.TARGET3_IP,
self.server_ip_address, 5003)
Thread(target=self.client3.run, daemon=False).start()
"""
# print("Client started.........")
# Get an array of (ip, port) that are connected to the torrent file
#self.swarm = self.tracker.get_DHT()
except Exception as error:
print(error) # Tracker or Client error
def _connect_to_peer(self, threadID, client_port_to_bind, peer_ip_address):
"""
TODO: * Create a new client object and bind the port given as a
parameter to that specific client. Then use this client
to connect to the peer (server) listening in the ip
address provided as a parameter
* Thread the client
* Run the downloader
:param client_port_to_bind: the port to bind to a specific client
:param peer_ip_address: the peer ip address that
the client needs to connect to
:return: VOID
"""
try:
self.client = Client(self, threadID, self.message, self.torrent, 1, self.tracker, str(self.id), peer_ip_address,
self.server_ip_address, client_port_to_bind)
Thread(target=self.client.run, daemon=False).start()
print("Client started.........")
except Exception as error:
print(error) # server failed to run
# runs when executing python3 peer.py
# main execution
if __name__ == '__main__':
# testing
# peer = Peer(role='peer')
peer = Peer()
#print(peer.torrent.info_hash())
print("\n***** P2P client App *****\n")
print("Peer: " + str(peer.id) + " started....")
print("Max download rate: 2048 b/s")
print("Max upload rate: 2048 b/s\n")
print("Torrent: " + peer.torrent.torrent_path)
print("file: "+ peer.torrent.file_name())
print("seeder/s: " + str(peer.server_ip_address) + ":" + str(peer.SERVER_PORT))
peer.run_server()
peer.run_tracker()
# peer.run_client()
|
from twisted.web import server, resource
from twisted.internet import reactor
HARDCODED_STATIC_HTML="""
<html>
<head>
<title>CS131 Project (Winter 2017)</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
</head>
<body>
<div class="container">
<div class="span12">
<h1>CS131 Project (Winter 2017)</h1>
<h2>Notice</h2>
<ul>
<li>(2017-03-01) Please include the following configuration file (<a href="conf.py">download conf.py</a>).</li>
<strong>Usage</strong>
<pre>
import conf
print conf.API_KEY
print conf.PORT_NUM['ALFORD']</pre>
</ul>
<h2>Port Allocation</h2>
<p>Please enter your UID (9 digits): </p>
<form id="query-form">
<input type="text" name="m" value=""/>
<button class="btn btn-default" type="submit">Query</button>
</form>
<p>Result: <strong><pre id="query-result"></pre></strong></p>
<script>
$("#query-form").submit(function(event){
$("#query-result").load("/q?m="+encodeURI($("input[name=m]").val()));
event.preventDefault(); });
</script>
<h2>Links</h2>
<ul><li><a href="http://web.cs.ucla.edu/classes/winter17/cs131/hw/pr.html">Specification (Twisted Places proxy herd)</a></li></ul>
</div>
</div>
</body>
</html>
"""
HARDCODED_STATIC_CONF="""
# Google Places API key
API_KEY="your Google Places API key"
# TCP port numbers for server instances
# Please use the port numbers allocated by the TA.
PORT_NUM = {
'ALFORD': 12000,
'BALL': 12001,
'HAMILTON': 12002,
'HOLIDAY': 12003,
'WELSH': 12004
}
"""
class PortNumber(resource.Resource):
isLeaf = True
lookup_table = {}
def load(self):
with open("port.txt") as f:
contents = f.read()
tmp = [c.split() for c in contents.split("\n")]
for v in tmp:
if len(v)==3:
self.lookup_table[v[0]] = int(v[1]), int(v[2])
def render_GET(self, request):
if len(request.postpath)==1:
if request.postpath[0]=="":
return HARDCODED_STATIC_HTML
elif request.postpath[0]=="q":
if "m" in request.args:
uid=request.args["m"][0]
if uid in self.lookup_table:
found_entry = self.lookup_table[uid]
return "{} <= PORT <= {}".format(found_entry[0], found_entry[1]-1)
else:
return "Entry not found"
else:
return "Invalid usage"
elif request.postpath[0]=="conf.py":
request.setHeader("Content-Type", "text/plain; charset=utf-8")
return HARDCODED_STATIC_CONF
request.setResponseCode(404)
request.finish()
return server.NOT_DONE_YET
resource = PortNumber()
resource.load()
site = server.Site(resource)
reactor.listenTCP(8080, site)
reactor.run()
|
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor, ToPILImage, Grayscale
import glob
from PIL import Image
import utils
from datasets.mnist_download import MNIST_local
from utils import used_device
class MnistNN(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28 * 28, 512),
nn.Sigmoid(),
nn.Linear(512, 512),
nn.Sigmoid(),
nn.Linear(512, 10),
nn.Sigmoid()
)
def forward(self, x):
x = self.flatten(x)
results = self.linear_relu_stack(x)
return results
class MnistSmallNN(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28 * 28, 64),
nn.Sigmoid(),
nn.Linear(64, 32),
nn.Sigmoid(),
nn.Linear(32, 10),
nn.Sigmoid()
)
def forward(self, x):
x = self.flatten(x)
results = self.linear_relu_stack(x)
return results
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred = model(X.to(used_device()))
loss = loss_fn(pred, y.to(used_device()))
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item() / len(X), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = model(X.to(used_device()))
y = y.to(used_device())
test_loss += loss_fn(pred, y.to(used_device())).item()
# correct += (pred.argmax(1) == y.argmax(1)).type(torch.float).sum().item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
def workflow():
utils.use_cuda()
batch_size = 64
learning_rate = 1e-2
#training_data = datasets.MNIST(
training_data = MNIST_local(
root="./datasets/mnist",
train=True,
transform=ToTensor(),
folder="./datasets/mnist_local"
#target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), src=torch.tensor(1.)))
)
#test_data = datasets.MNIST(
test_data = MNIST_local(
root="./datasets/mnist",
train=False,
transform=ToTensor(),
folder="./datasets/mnist_local"
#target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), src=torch.tensor(1.)))
)
train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
# figure = plt.figure(figsize=(8, 8))
# cols, rows = 4, 4
# for i in range(1, cols * rows + 1):
# sample_idx = torch.randint(len(test_data), size=(1,)).item()
# img, label = test_data[sample_idx]
# img = img.transpose(0, 1).transpose(1, 2)
# figure.add_subplot(rows, cols, i)
# plt.title(label)
# plt.axis("off")
# plt.imshow(img.squeeze(), cmap="gray")
# plt.show()
model_type = MnistSmallNN
model_create_new = False
model_save = True
model_file_name = "mnist_small.pth" if model_type == MnistSmallNN else "mnist.pth"
model_file_path = f'./model_files/{model_file_name}'
model = model_type().to(used_device()) if model_create_new else torch.load(model_file_path).to(used_device())
# model = MnistNN().to(used_device())
# model = torch.load('../model_files/mnist.pth').to(used_device())
# model = MnistSmallNN().to(used_device())
# model = torch.load('../model_files/mnist_small.pth').to(used_device())
# for name, param in model.named_parameters():
# print(f"Layer: {name} | Size: {param.size()} | Values : {param[:2]} \n")
# loss_fn = nn.MSELoss()
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
epochs = 1000
for t in range(epochs):
print(f"Epoch {t + 1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Training finished!")
if model_save:
torch.save(model, model_file_path)
print(f"Saved model to {model_file_path}")
images = glob.glob(r"./datasets\mnist_my\*png")
for image in images:
img = Image.open(image)
trans0 = ToTensor()
trans1 = ToPILImage()
trans2 = Grayscale(num_output_channels=1)
im = trans2(trans1(trans0(img)))
# plt.imshow(im)
tens = trans0(im)
pred = model(tens.to(used_device()))
print(image)
print(pred)
print(pred.argmax().item())
print()
print("Finished!")
# workflow() |
#!/usr/bin/env python
RABBITMQ_DEFAULT_HOST = 'localhost'
RABBITMQ_DEFAULT_PORT = 5672
def get_host_and_port():
host = raw_input("hostname: ")
host = host if host != '' else RABBITMQ_DEFAULT_HOST
port = raw_input("port: ")
port = int(port) if port != '' else RABBITMQ_DEFAULT_PORT
return [host, port]
|
def who_do_you_know():
people = input("Enter the list of people you know\n")
list_of_people = people.split(",")
normalised_people = []
for person in list_of_people:
normalised_people.append(person.strip().lower())
return normalised_people
def ask_user():
person = input("Enter the name of the person\n")
if person.strip().lower() in list_of_people: # conditional operators are and, not, or
print("You know this person {}".format(person))
else: # use elif in case of one more conditional branch
print("You do not know this person {}".format(person))
list_of_people = who_do_you_know()
ask_user()
|
# -*- coding: utf-8 -*-
#####################################################
# 混合蛙跳算法解决Fn5
# Fn5: DeJong's F5 function
# -66 <= x1, x2 <= 66
#
#####################################################
import numpy as np
import random
import matplotlib.pyplot as plt
from optim_func import fn5
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
#根据实际问题和经验确定相关参数
#社群的个数
memeplexes_number = 100
#每个社群中青蛙的个数
frog_n = 30
#整个蛙群的数量
total = memeplexes_number * frog_n
#进化的次数
circulation_N = 30
#蛙群
populations = []
#社群 例:memplexes[0]代表第一个社群
memeplexes = [[] for _ in range(memeplexes_number)]
#适应度列表 适应度越小越好
fitness = []
#社群适应度
mem_fitness = [[] for _ in range(memeplexes_number)]
#全局最好青蛙的位置
frog_g = []
#子群的大小
submemep_q = 20
#最大步长 #增加一个维度变量增加抽象
step_max = [1,1] #待定
up_limit = 66
low_limit = -66
#收敛精度
accuracy = 0.0000000001
#全局最大循环次数
total_eval = 100
def globalSearch():
#初始化青蛙群,并计算相应青蛙的适应度
populations = [[random.randint(-66,66), random.randint(-66,66)] for _ in range(total)]
# while 收敛精度满足 或达到最大循环次数 stop , 否则循环
evalution = 0
show_fitness = []
while(evalution < total_eval):
fitness = [fn5(x1, x2) for x1, x2 in populations]
#将蛙群以适应度降序,并记录全局最好青蛙的位置
zip_sorted = sorted(zip(populations, fitness), key=lambda x: (x[1],x[0]))#排序次序
# print(zip_sorted)
populations, fitness = [list(x) for x in zip(*zip_sorted)]
global frog_g #全局变量修改
frog_g = populations[0]
print("The best frog's position and fitness: ", frog_g, fitness[0])
#画图
show_fitness.append(fitness[0])
plot(populations, show_fitness)
# print(global_best_position)
#将蛙群划分为社群
for j in range(frog_n):
for k in range(memeplexes_number):
memeplexes[k].append(populations[k + memeplexes_number * j])
mem_fitness[k].append(fitness[k + memeplexes_number * j])
#调用局部搜索函数
#可以使用多线程进行加速
localSearch()
#社群混洗 np的reshape更快
for i in range(memeplexes_number):
for j in range(frog_n):
populations[i * frog_n + j] = memeplexes[i][j]
evalution += 1
def localSearch():
'''
Argument:
入参--所有社群
出参 -- 更新完的所有社群
'''
#当前社群编号
im = 0
while im < memeplexes_number:
#当前进化次数
iN = 0
while iN < circulation_N:
submemep = constructSubmemep(memeplexes[im])
#子群的适应度 都能重构
sub_fitness = [fn5(x1, x2) for x1, x2 in submemep]
#将子群以适应度降序,并记录子群最好\最坏青蛙的位置
sub_sorted = sorted(zip(submemep, sub_fitness), key=lambda x: (x[1],x[0]))#排序次序
submemep, sub_fitness = [list(x) for x in zip(*sub_sorted)]
sub_best = submemep[0]
sub_worst = submemep[-1]
new_position = updateWorst(sub_best, sub_worst)
#最坏青蛙在相应社群的位置
index = mem_fitness[im].index(sub_fitness[-1])
#更新社群中青蛙及适应度
memeplexes[im][index] = [int(new_position[0]),int(new_position[1])]
mem_fitness[im][index] = fn5(int(new_position[0]), int(new_position[1]))
iN += 1
im += 1
def constructSubmemep(current_memep):
'''构造当前社群的子群
'''
#转盘赌选择submemep_q个个体 概率和为1
select_prob = [2*(frog_n - j) / (frog_n * (frog_n + 1)) for j in range(frog_n)]
wheel_select = [sum(select_prob[:i+1]) for i in range(frog_n)]
submemep = []
i = 0
while i < submemep_q:
rand = random.random()
for j in range(frog_n):
if rand < wheel_select[j]:
seleted = current_memep[j]
break
#个体不能重复
if seleted not in submemep:
submemep.append(seleted)
else:
i -= 1
i += 1
return submemep #最好返回q在社群中的对应位置
def updateWorst(local_best, local_wrost):
'''更新最差青蛙的位置
先通过局部最优来提高最差青蛙的位置,还不行,则通过全局最优来提高
Arguments:
local_best {list} -- 子群最好青蛙的位置
Return:
{list} -- 返回更新后的位置
'''
#局部最优来更新worst
#暂时忽略step_max, 直接求
# if diff = (local_best - local_wrost) > 0:
# step_size = min(int(random.random() * diff), step_max)
# else:
# step_size = min(int(random.random() * diff), -step_max)
# new = local_wrost + step_
forg_b = np.array(local_best)
forg_w = np.array(local_wrost)
step_size = random.random() * (forg_b - forg_w)
new = (forg_w + step_size).tolist()
#如果不优或不在可行域中
if int(new[0]) > up_limit or int(new[0]) < low_limit \
or int(new[1]) > up_limit or int(new[1]) < low_limit \
or fn5(int(new[0]), int(new[1])) < fn5(forg_w[0], forg_w[1]):
step_size = random.random() * (np.array(frog_g) - forg_w)
new = (forg_w + step_size).tolist()
#还不是最优的,则产生一个新的青蛙
if int(new[0]) > up_limit or int(new[0]) < low_limit \
or int(new[1]) > up_limit or int(new[1]) < low_limit \
or fn5(int(new[0]), int(new[1])) < fn5(forg_w[0], forg_w[1]):
new = [random.randint(-66,66), random.randint(-66,66)]
return new
def plot(populations, best_fitness):
'''
'''
x1 = []
x2 = []
for i, j in populations:
x1.append(i)
x2.append(j)
plt.ion()
plt.figure(1)
#进化动态图
plt.subplot(121)
plt.plot(x1, x2, 'r^')
plt.ylabel('x2')
plt.xlabel('x1')
plt.title('SFLA进化动态')
# plt.text(1,1, 'x=1')
plt.axis([-66,66,-66,66])
plt.grid(True)
#进化曲线
plt.subplot(122)
plt.plot(best_fitness, 'g')
plt.xlabel('number of evalution')
plt.ylabel('best fitness value')
plt.title('进化曲线')
plt.axis([1,100,0,5])
plt.draw()
plt.pause(0.8)
# plt.ioff()
# plt.close()
plt.clf()
# plt.show()
# def plot(populations, best_fitness):
# '''
# '''
# x1 = []
# x2 = []
# for i, j in populations:
# x1.append(i)
# x2.append(j)
# plot1(x1,x2)
# plot2(best_fitness)
# def plot1(x1, x2):
# plt.ion()
# plt.figure(1)
# #进化动态图
# plt.plot(x1, x2, 'r^')
# plt.ylabel('x2')
# plt.xlabel('x1')
# plt.title('SFLA进化动态')
# plt.axis([-66,66,-66,66])
# plt.grid(True)
# plt.draw()
# plt.pause(0.8)
# # plt.ioff()
# # plt.close()
# plt.clf()
# # plt.show()
# def plot2(best):
# plt.figure(1)
# plt.plot([best], 'g-')
# plt.xlabel('number of evalution')
# plt.ylabel('best fitness value')
# plt.title('进化曲线')
# plt.pause(0.8)
# # plt.ioff()
# # plt.close()
# plt.show()
if __name__ == '__main__':
globalSearch()
|
import subprocess
res = subprocess.run(['ls','-l'],stdout=subprocess.PIPE,)
print('return code:',res.returncode)
print('{} bytes in stdout :\n{}'.format(len(res.stdout),res.stdout.decode('utf-8')))
|
# 在进行转换之间先研究下python中list和array(np.array)的不同:
# 1、list是python中内置的数据类型,其中的数据的类型可以不相同,如java中List也可以不用相同的数据,但是为了格式的统一,就要用到泛型或者ArrayList。array中的数据类型必须是一样的。
# 2、list中保存的数据的存放地址,而不是数据,会增加内存的占用,所以存放数据还是尽量使用array。
# 3、list中有append的方法,可以进行追加,而array没有追加的方法,只能通过np.append来实现追加。
# 4、在print的时候,打印的结果不同。list元素之间有","分割,而array之间是空格。
print('-----------------list和array-------------------')
import numpy as np
list1 = [1, 2, 3, 4]
arr1 = np.array(list1)
print(list1)
print(arr1)
print('-----------------list转换为str-------------------')
# 当list中存放的数据是字符串时,一般是通过str中的join函数进行转换:
list2 = ['a','b','c','d']
str1 = ''.join(list2)
str2 = ' '.join(list2)
str3 = '.'.join(list2)
print(str1)
print(str2)
print(str3)
# 但是当list中存放的数据是整型数据或者数字的话,需要先将数据转换为字符串再进行转换:
list3 = [1, 2, 3, 4, 5]
str_1 = ''.join([str(x) for x in list3])
str_2 = ' '.join([str(x) for x in list3])
str_3 = '.'.join([str(x) for x in list3])
print(str_1)
print(str_2)
print(str_3)
print('-----------------array转换为str-------------------')
# 将array转换为str和list转换时是一样的,join()函数中的参数是一个iterator,所以array或者list都可以。
arr2 = np.array(list2)
str4 = ''.join(arr2)
print(str4)
print('-----------------str转换为list-------------------')
str11 = 'abcde'
str22 = 'a b c d e'
str33 = 'a, b, c, d, e'
result1 = list(str11)
result2 = str22.split()
result3 = str33.split(', ')
print(result1)
print(result2)
print(result3)
|
C = float(input('Qual a temperatura em C°? '))
F = C * 1.8 + 32
K = C + 273.15
print('De {}C° para {:.2f}F° \nDe {}C° para {:.2f}K°'.format(C, F, C, K)) |
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Ben Scott on '23/05/2016'.
"""
import time
import os
import requests
from ke2mongo import config
# Time to wait before checking the import has worked - by default 1 minute
SLEEP_INTERVAL = 60
class SolrIndex:
"""
Class for handling solr indexes
Public api:
full_import - run full-import command
status - get status of full-import command
"""
def __init__(self, index):
self.index = index
def _request(self, command):
# We want the JSON response
params = {
'wt': 'json',
'json.nl': 'map',
'command': command
}
url = os.path.join(self.index, 'dataimport')
r = requests.get(url, params=params)
r.raise_for_status()
return r.json()
def full_import(self):
return self._request('full-import')
def status(self):
return self._request('status')
def solr_reindex():
indexes = config.get('solr', 'indexes').split(',')
# Loop through the indexes, request a full import and wait until it completes before
# requesting for the next index - ensures there's always a stable index available for requests
for index in indexes:
solr_index = SolrIndex(index)
print("Starting full import of index: %s" % index)
solr_index.full_import()
# Enter loop to keep checking status every SLEEP_INTERVAL
while True:
r = solr_index.status()
if r['status'] == 'busy':
print('Total Rows Fetched: %s' % r['statusMessages'].get('Total Rows Fetched'))
print('Time elapsed: %s' % r['statusMessages'].get('Time Elapsed'))
time.sleep(SLEEP_INTERVAL)
else:
print(r['statusMessages'].get(''))
print('Time taken: %s' % r['statusMessages'].get('Time taken'))
break;
if __name__ == "__main__":
solr_reindex()
|
from _typeshed import Incomplete
def betweenness_centrality_subset(
G, sources, targets, normalized: bool = False, weight: Incomplete | None = None
): ...
def edge_betweenness_centrality_subset(
G, sources, targets, normalized: bool = False, weight: Incomplete | None = None
): ...
|
import logging
from os import environ, mkdir
from os.path import dirname, exists
from shutil import copyfile, copyfileobj
import boto3
import botocore.client
import botocore.exceptions
# Configuration
STORAGE_ENGINE = environ.get('STORAGE_ENGINE', 's3') # 's3' or 'filesystem'
FILESYSTEM_PATH = environ.get('FILESYSTEM_PATH', 'firmwares')
S3_HOST = environ.get('S3_HOST', 'http://127.0.0.1:9000')
S3_LOCATION = environ.get('S3_LOCATION', 'nyc3')
S3_BUCKET = environ.get('S3_BUCKET', 'qmk-api')
COMPILE_S3_BUCKET = environ.get('COMPILE_S3_BUCKET', 'qmk')
S3_ACCESS_KEY = environ.get('S3_ACCESS_KEY', 'minio_dev')
S3_SECRET_KEY = environ.get('S3_SECRET_KEY', 'minio_dev_secret')
S3_SECURE = False
S3_DOWNLOAD_TIMEOUT = 7200 # 2 hours, how long S3 download URLs are good for
# The `keymap.c` template to use when a keyboard doesn't have its own
DEFAULT_KEYMAP_C = """#include QMK_KEYBOARD_H
// Helpful defines
#define _______ KC_TRNS
const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = {
__KEYMAP_GOES_HERE__
};
"""
# Setup boto3 for talking to S3
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
session = boto3.session.Session()
s3 = session.client(
's3',
region_name=S3_LOCATION,
endpoint_url=S3_HOST,
aws_access_key_id=S3_ACCESS_KEY,
aws_secret_access_key=S3_SECRET_KEY,
)
# Check to see if S3 is working, and if not print an error in the log.
for bucket in [S3_BUCKET, COMPILE_S3_BUCKET]:
try:
s3.create_bucket(Bucket=bucket)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] not in ['BucketAlreadyOwnedByYou', 'BucketAlreadyExists']:
logging.warning('Could not contact S3! Storage related functionality will not work!')
def delete(object, *, bucket=S3_BUCKET, **kwargs):
"""Delete an object from S3.
Parameters
* Key (string) -- [REQUIRED]
* MFA (string) -- The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.
* VersionId (string) -- VersionId used to reference a specific version of the object.
* RequestPayer (string) -- Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
"""
return s3.delete_object(Bucket=bucket, Key=object, **kwargs)
def list_objects(*, bucket=S3_BUCKET, **kwargs):
"""List the objects in our bucket.
This function yields objects and handles pagination for you. It will only fetch as many pages as you consume.
Parameters
* Bucket (string) -- [REQUIRED]
* Delimiter (string) -- A delimiter is a character you use to group keys.
* EncodingType (string) -- Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.
* Marker (string) -- Specifies the key to start with when listing objects in a bucket.
* MaxKeys (integer) -- Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.
* Prefix (string) -- Limits the response to keys that begin with the specified prefix.
* RequestPayer (string) -- Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests.
"""
if 'Bucket' not in kwargs:
kwargs['Bucket'] = bucket
while True:
resp = s3.list_objects(**kwargs)
if 'Contents' in resp:
for obj in resp['Contents']:
yield obj
if 'NextContinuationToken' in resp:
print('\nFetching more results from the S3 API.\n')
kwargs['ContinuationToken'] = resp['NextContinuationToken']
elif 'NextMarker' in resp:
print('\nFetching more results from the Spaces API.\n')
kwargs['Marker'] = resp['NextMarker']
else:
if 'Contents' in resp:
del resp['Contents']
print('Could not find any pagination information:')
print(resp)
break
def save_fd(fd, filename, *, bucket=S3_BUCKET):
"""Store the contents of a file-like object in the configured storage engine.
"""
if STORAGE_ENGINE == 's3':
logging.debug('Uploading %s to s3.', filename)
s3.upload_fileobj(fd, bucket, filename)
else:
logging.debug('Writing to %s/%s.', FILESYSTEM_PATH, filename)
if FILESYSTEM_PATH[0] == '/':
file_path = '%s/%s' % (FILESYSTEM_PATH, filename)
else:
file_path = '../%s/%s' % (FILESYSTEM_PATH, filename)
mkdir(dirname(file_path))
copyfileobj(fd, open(file_path, 'w'))
def save_file(local_filename, remote_filename, *, bucket=S3_BUCKET, public=False):
"""Store the contents of a file in the configured storage engine.
"""
if STORAGE_ENGINE == 's3':
logging.debug('Uploading %s to s3: %s.', local_filename, remote_filename)
if public:
s3.upload_file(local_filename, bucket, remote_filename, ExtraArgs={'ACL':'public-read'})
else:
s3.upload_file(local_filename, bucket, remote_filename)
else:
logging.debug('Writing to %s/%s.', FILESYSTEM_PATH, remote_filename)
if FILESYSTEM_PATH[0] == '/':
file_path = '%s/%s' % (FILESYSTEM_PATH, remote_filename)
else:
file_path = '../%s/%s' % (FILESYSTEM_PATH, remote_filename)
mkdir(dirname(file_path))
copyfile(local_filename, remote_filename)
def put(filename, value, *, bucket=S3_BUCKET, public=False):
"""Uploads an object to S3.
"""
if STORAGE_ENGINE == 's3':
try:
if public:
object = s3.put_object(Bucket=bucket, Key=filename, Body=value, ExtraArgs={'ACL':'public-read'})
else:
object = s3.put_object(Bucket=bucket, Key=filename, Body=value)
return object
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
return False
else:
raise
else:
logging.debug('Writing to %s.', FILESYSTEM_PATH, filename)
if FILESYSTEM_PATH[0] == '/':
file_path = '%s/%s' % (FILESYSTEM_PATH, filename)
else:
file_path = '../%s/%s' % (FILESYSTEM_PATH, filename)
mkdir(dirname(file_path))
open(file_path, 'w').write(value)
def get_fd(filename, *, bucket=S3_BUCKET):
"""Retrieve an object from S3 and return a file-like object
FIXME: This doesn't work as a context manager.
"""
if STORAGE_ENGINE == 's3':
s3_object = s3.get_object(Bucket=bucket, Key=filename)
return s3_object['Body']
else:
file_path = '/'.join((FILESYSTEM_PATH, filename))
if exists(file_path):
return open(file_path)
else:
raise FileNotFoundError(filename)
def get(filename, *, bucket=S3_BUCKET):
"""Retrieve an object from S3
"""
fd = get_fd(filename, bucket=bucket)
data = fd.read()
try:
return data.decode('utf-8')
except UnicodeDecodeError:
return data
def get_public_url(filename, *, bucket=S3_BUCKET):
"""Returns an S3 URL a client can use to download a file.
"""
params = {'Bucket': bucket, 'Key': filename}
return s3.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=S3_DOWNLOAD_TIMEOUT)
if __name__ == '__main__':
print(1, put('foo', 'bar'))
print(2, get('foo'))
|
"""
Transformer for geometric hashing
"""
import math
from Minutia import MinutiaNBIS_GH
from Minutia_Converter import MinutiaConverter
class GHTransformer:
@staticmethod
def generate_enrollment_table(vault_element_pairs):
"""
generate geometric hashing table with vault element pairs
converting the minutia representation to MinutiaNBIS_GH
:param vault_element_pairs: list of VaultElements tuple: (minutia, poly mapping of minutia)
:returns geometric hashing table as list of GHElementEnrollment
"""
geom_table = []
# list of MinutiaNBIS_GH
minutiae_list = []
function_points = []
m_conv = MinutiaConverter()
for element in vault_element_pairs:
minutia_uint = element.x_rep
minutia = m_conv.get_minutia_from_uint(minutia_uint)
minutiae_list.append(MinutiaNBIS_GH.convert_from_MinutiaNBIS(minutia))
function_points.append(element.y_rep)
assert len(minutiae_list) == len(vault_element_pairs)
for basis in minutiae_list:
# Indices of minutiae_list in GHElementEnrollment is the same as vault_element_pairs
geom_table.append(GHElementEnrollment(basis, minutiae_list, function_points))
return geom_table
@staticmethod
def generate_verification_table_element(basis, minutiae_list):
"""
generate verification table element from probe minutiae and basis
:param basis: basis to transform probe minutiae to
:param minutiae_list: list of minutiae (Minutia_NBIS_GH)
:return: verification table element
"""
return GHElementVerification(basis, minutiae_list)
@staticmethod
def convert_list_to_MinutiaNBIS_GH(minutiae_list):
"""
converts list of MinutiaNBIS to MinutiaNBIS_GH
:return: list of MinutiaNBIS_GH
"""
result = []
for minutia in minutiae_list:
result.append(MinutiaNBIS_GH.convert_from_MinutiaNBIS(minutia))
return result
@staticmethod
def transform_minutia_to_basis(m_basis: MinutiaNBIS_GH, m: MinutiaNBIS_GH):
"""
transform one MinutiaNBIS_GH to new basis
(caution: transformed minutia can be out of bounds of original minutia boundaries!)
:param m_basis: Minutia used as basis as MinutiaNBIS_GH
:param m: Minutia to be transformed as MinutiaNBIS_GH
:return: transformed MinutiaNBIS_GH
"""
x_diff = m.x - m_basis.x
y_diff = m.y - m_basis.y
cos_basis_theta = math.cos(math.radians(m_basis.theta))
sin_basis_theta = math.sin(math.radians(m_basis.theta))
x_transformed = int(round(x_diff * cos_basis_theta + y_diff * sin_basis_theta))
y_transformed = int(round(-x_diff * sin_basis_theta + y_diff * cos_basis_theta))
theta_diff = m.theta - m_basis.theta
theta_transformed = theta_diff if theta_diff >= 0 else theta_diff + 360
return MinutiaNBIS_GH(x_transformed, y_transformed, theta_transformed)
@staticmethod
def transform_minutiae_to_basis(basis, minutiae_list):
"""
transforms all minutiae in list to basis
:param basis: Minutia used as basis as MinutiaNBIS_GH
:param minutiae_list: list of MinutiaNBIS_GH
:return: list of transformed MinutiaNBIS_GH
"""
transformed_minutiae_list = []
for m in minutiae_list:
transformed_minutiae_list.append(GHTransformer.transform_minutia_to_basis(basis, m))
return transformed_minutiae_list
class GHElementEnrollment:
""" Element of geometric hash table for enrollment using vault """
def __init__(self, basis, minutiae_list, function_points, save_to_db=False):
"""
:param basis: Minutia used as basis as MinutiaNBIS_GH
:param minutiae_list: list of MinutiaNBIS_GH
"""
self.basis = basis
self.transformed_minutiae_list = GHTransformer.transform_minutiae_to_basis(self.basis, minutiae_list)
if save_to_db:
# representations to store in DB
m_conv = MinutiaConverter()
self.basis_rep = m_conv.get_uint_from_minutia(self.basis, non_negative=False)
self.minutiae_rep = []
for m in self.transformed_minutiae_list:
self.minutiae_rep.append(m_conv.get_uint_from_minutia(m, non_negative=False))
self.function_points_rep = function_points
def __str__(self):
return '(Basis:\n' \
'x = {}\n' \
'y = {}\n' \
'theta = {}\n' \
'#Minutiae:' \
'{})'.format(self.basis.x, self.basis.y, self.basis.theta, len(self.transformed_minutiae_list))
def __repr__(self):
return '{}(Basis: ({}, {}, {}))'.format(
self.__class__.__name__, self.basis.x, self.basis.y, self.basis.theta
)
class GHElementVerification:
""" Element of geometric hash table for verification using probe fingerprint """
def __init__(self, basis, minutiae_list):
"""
:param basis: Minutia used as basis as MinutiaNBIS_GH
:param minutiae_list: list of MinutiaNBIS_GH
"""
self.basis = basis
self.transformed_minutiae_list = GHTransformer.transform_minutiae_to_basis(self.basis, minutiae_list)
def __str__(self):
return '(Basis:\n' \
'x = {}\n' \
'y = {}\n' \
'theta = {}\n' \
'#Minutiae:' \
'{})'.format(self.basis.x, self.basis.y, self.basis.theta, len(self.transformed_minutiae_list))
def __repr__(self):
return '{}(Basis: ({}, {}, {}))'.format(
self.__class__.__name__, self.basis.x, self.basis.y, self.basis.theta
)
|
nc=int(input("Enter no. of computer "))
nt=int(input("Enter no. of Table "))
ns=int(input("Enter no. of chairs "))
cc=float(input("Enter cost of 1 computer "))
ct=float(input("Enter cost of 1 table "))
cs=float(input("Enter cost of 1 chair "))
nw=int(input("Enter no. of hours worked "))
wh=int(input("Enter wages per hour"))
budget= (nc*cc)+(nw*wh)+(nt*ct)+(ns*cs)
print("Toatal budget should be ",budget)
|
# Generated by Django 2.2.11 on 2020-03-13 06:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('emp_id', models.IntegerField(default='null')),
('status', models.CharField(default='null', max_length=10)),
('last_name', models.CharField(default='null', max_length=40)),
('preferred_name', models.CharField(default='null', max_length=40)),
('manager_id', models.IntegerField(default='null')),
('work_phone', models.TextField(default='null')),
('personal_email', models.TextField(default='null')),
('location', models.TextField(default='null')),
('subordinates', models.TextField(default='null')),
('department', models.TextField(default='null')),
('employments', models.TextField(default='null')),
('gender', models.CharField(default='null', max_length=40)),
],
),
]
|
# Generated by Django 3.1.4 on 2021-01-10 18:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('homepage', '0009_auto_20201228_1244'),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('students', models.ManyToManyField(to='homepage.Student')),
],
),
migrations.AddField(
model_name='student',
name='book',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='homepage.book'),
),
migrations.AddField(
model_name='student',
name='teachers',
field=models.ManyToManyField(to='homepage.Teacher'),
),
]
|
from collections import namedtuple
from datetime import datetime
import PIL.Image
import PIL.ExifTags
# Just the EXIF tags we care about
ExifTags = namedtuple("ExifTags", ["capture_datetime", "iso", "exposure_time"])
def _read_exif_tags(image_path):
"""
Uses (an "experimental" private function from) PIL to read EXIF tags from an image file.
Returns a dictionary of tag names to values
"""
PIL_image = PIL.Image.open(image_path)
EXIF_CODES_TO_NAMES = PIL.ExifTags.TAGS
# _getexif() returns a dictionary of {tag code: tag value}. Use PIL.ExifTags.TAGS dictionary of {tag code: tag name}
# to construct a more digestible dictionary of {tag name: tag value}
tags = {
EXIF_CODES_TO_NAMES[tag_code]: tag_value
for tag_code, tag_value in PIL_image._getexif().items()
if tag_code in EXIF_CODES_TO_NAMES
}
return tags
def _parse_date_time_original(tags):
date_time_string = tags["DateTimeOriginal"]
# For DateTimeOriginal, PIL _getexif returns an ISO8601-ish string
return datetime.strptime(date_time_string, "%Y:%m:%d %H:%M:%S")
def _parse_iso(tags):
# For ISOSpeedRatings PIL _getexif returns an int
return tags["ISOSpeedRatings"]
def _parse_exposure_time(tags):
exposure = tags["ExposureTime"]
# For ExposureTime, PIL _getexif returns a tuple of (numerator, denominator)
numerator, denominator = exposure
return numerator / denominator
def get_exif_tags(raw_image_path):
""" Extracts relevant EXIF tags from a JPEG+RAW file.
Args:
raw_image_path: The full file path of the JPEG+RAW file to extract metadata from
Returns:
Relevant EXIF tags, as an ExifTags namedtuple
"""
tags = _read_exif_tags(raw_image_path)
return ExifTags(
capture_datetime=_parse_date_time_original(tags),
iso=_parse_iso(tags),
exposure_time=_parse_exposure_time(tags),
)
|
from .ernie import ERNIE
from .bert import BERT
|
from scipy import integrate
# take f(x) function as f
f = lambda x : x**2
#single integration with a = 0 & b = 1
integration = integrate.quad(f, 0 , 1)
print(integration)
|
from random import shuffle
from random import randint
import time
# time.sleep(30)
modes = ["Splat Zones","Tower Control","Rainmaker","Clam Blitz"]
maps = ["Arowana Mall", "Blackbelly Skatepark", "Camp Triggerfish", "Goby Arena", "Humpback Pump Track", "Inkblot Art Academy", "Kelp Dome", "MakoMart", "Manta Maria", "Moray Towers", "Musselforge Fitness", "Piranha Pit", "Port Mackerel", "Shellendorf Institute", "Snapper Canal", "Starfish Mainstage", "Sturgeon Shipyard", "The Reef", "Wahoo World", "Walleye Warehouse"]
ctr = 1
i = 0
doneFlag = False
while ctr < 7:
i = 0
j = 0
while j < 5:
shuffle(modes)
shuffle(maps)
j = j + 1
print("\n\nBracket ", ctr, " maps are...\n\n")
print("\n\nProcessing...\n\n")
time.sleep(10)
print(".\n")
time.sleep(10)
print("..\n")
time.sleep(10)
print("...\n\n")
time.sleep(5)
if ctr <= 5:
while i < 3:
print(modes[i], ", ", maps[i])
i = i + 1
if ctr >= 6:
while i < 5:
if i == 4:
oldI = i
i = randint(0,2)
print(modes[i], ", ", maps[oldI])
break
print(modes[i], ", ", maps[i])
i = i + 1
ctr = ctr + 1
|
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import generics
from .filters import EventFilter
from .serializers import EventSerializer
from .services import EventPagination
from mainapp.models import Event
class EventsListView(generics.ListAPIView):
queryset = Event.objects.all()
serializer_class = EventSerializer
filter_backends = (DjangoFilterBackend,)
filterset_class = EventFilter
pagination_class = EventPagination
class EventDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Event.objects.all()
serializer_class = EventSerializer
class EventCreateView(generics.CreateAPIView):
serializer_class = EventSerializer
|
n, m = [int(x) for x in input().split()]
s = input()
t = input()
idx = 0
ans = 0
if(m < n - 1):
print('NO')
exit(0)
if not '*' in s:
if(s == t):
print('YES')
else:
print('NO')
exit(0)
for x in range(n):
if(s[x] == '*'):
idx = x
break
lol = m - (n - idx) + 1
if(s[:idx] == t[:idx] and s[idx+1:] == t[lol:]):
print('YES')
else:
print('NO')
|
from __future__ import absolute_import
from arMarker.detect import detect_markers
from arMarker.marker import HammingMarker
|
import time
class Logger(object):
def __init__(self, logger_file):
self.logger_file = logger_file
with open(self.logger_file,"w") as container:
hour = time.strftime("%H:%M:%S")
date = time.strftime("%d/%m/%Y")
message = date+"-"+hour+"[log]<<Log start>>\n"
container.write(message)
def write_log(self, level, message):
with open(self.logger_file,"a") as container:
hour = time.strftime("%H:%M:%S")
date = time.strftime("%d/%m/%Y")
message = date+"-"+hour+"["+level+"]<<"+message+">>\n"
container.write(message)
# CUANDO CIERRO EL ARCHIVO, DONDE SE QUEDA EL CURSOR
# log
# warn
# error
# (opcional) critical
|
default_app_config = "modoboa.maillog.apps.MaillogConfig"
|
"""Admin API urls."""
from rest_framework import routers
from . import viewsets
router = routers.SimpleRouter()
router.register(r"domains", viewsets.DomainViewSet, basename="domain")
router.register(
r"domainaliases", viewsets.DomainAliasViewSet, basename="domain_alias")
router.register(r"accounts", viewsets.AccountViewSet, basename="account")
router.register(r"aliases", viewsets.AliasViewSet, basename="alias")
router.register(
r"senderaddresses", viewsets.SenderAddressViewSet, basename="sender_address")
urlpatterns = router.urls
|
from django.contrib import admin
from django.urls import path, include
from mainapp import views
app_name = "mainapp"
urlpatterns = [
path('index/',views.index,name="index"),
path('detail/',views.detail,name="detail"),
path('booklist/',views.booklist,name='booklist'),
path('slurdetail/',views.slurdetail,name='slurdetail'),
path('del2/',views.del2,name='del2'),
] |
# -*- coding: utf-8 -*-
"""
Functions for offline evaluation, e.g. using
the TREC example queries and annotations.
"""
import csv, requests, os, json
import pandas as pd
# Use a corrected golden standard for long track
fp_gs = "evaluation/golden_standard_1906.tsv"
def query_server(query_list, run_id, url):
"""Sends queries to a running server.
"""
out_str = ""
for query in query_list:
payload = {
'runID': run_id,
'TextID': query[0],
'Text': query[1]
}
resp = requests.post(url, data=payload)
if resp.status_code != 200:
print "Breaking due to non-200 response"
break
out_str += resp.content
f_path = os.path.join("evaluation/system_output", run_id + ".tsv")
abs_path = os.path.abspath(f_path)
with open(abs_path, 'w') as f:
f.write(out_str[1:]) # strip initial newline
return abs_path
# Short track
def read_tsv_queries(file_path="../Trec_beta.query.txt"):
with open(file_path, 'r') as f:
return [r for r in csv.reader(
f, delimiter='\t', quoting=csv.QUOTE_NONE
)]
# Long track
def read_document_queries(dir_path="evaluation/documents"):
doc_file_names = sorted([fn for fn in os.listdir(dir_path)
if fn.endswith(".txt")])
query_list = []
for fn in doc_file_names:
text_id = fn[:-4]
with open(os.path.join(dir_path, fn), 'rb') as f:
doc_text = f.read()
query_list.append((text_id, doc_text))
return query_list
def load_comparison(fp_golden_standard, fp_sys_output):
gs = pd.read_csv(fp_golden_standard, sep="\t", encoding='utf8', quotechar='~', header=None)
# NB: remove any rows in sys_output that correspond to DocIDs not included in golden_standard
so = pd.read_csv(fp_sys_output, sep="\t", encoding='utf8', quotechar='~', header=None)
headers = ['DocID', 'begin', 'end', 'FbID', 'AltID', 'mention', 'conf', 'altconf']
gs.columns = so.columns = headers
return pd.merge(gs, so, how='outer', on=headers[:2], suffixes=('_gs', '_so'))
def calculate_performance(comparison_df):
comp = comparison_df
TP_df = comp[comp.FbID_gs == comp.FbID_so]
FP_df = comp[(comp.FbID_gs != comp.FbID_so) & (comp.FbID_so.notnull())]
FN_df = comp[comp.FbID_so.isnull()]
# Discern between FP caused by NIL and disambiguation
FP_should_be_NIL_df = FP_df[FP_df.FbID_gs.isnull()]
FP_wrong_disambiguation_df = FP_df[FP_df.FbID_gs.notnull()]
assert len(TP_df) + len(FP_df) == comp.FbID_so.count(), "TP + FP should equal all sys_output"
assert len(FP_wrong_disambiguation_df) + len(TP_df) + len(FN_df) == comp.FbID_gs.count(), "Incorrect disambiguations + TP + FN should equal all golden_standard"
overall_precision = float(len(TP_df)) / comp.FbID_so.count()
overall_recall = float(len(TP_df)) / comp.FbID_gs.count()
overall_F1 = f_score(overall_precision, overall_recall, 1)
FP_frac_wrong_disambiguation = len(FP_wrong_disambiguation_df) / float(len(FP_df))
FP_frac_should_be_NIL = len(FP_should_be_NIL_df) / float(len(FP_df))
# Per-document statistics
tp_per_doc = TP_df.DocID.value_counts()
tp_per_doc.name = 'TP'
fp_per_doc = FP_df.DocID.value_counts()
fp_per_doc.name = 'FP'
fn_per_doc = FN_df.DocID.value_counts()
fn_per_doc.name = 'FN'
fp_nil_per_doc = FP_should_be_NIL_df.DocID.value_counts()
fp_nil_per_doc.name = 'FP (should be NIL)'
fp_dis_per_doc = FP_wrong_disambiguation_df.DocID.value_counts()
fp_dis_per_doc.name = 'FP (wrong disambiguation)'
stats_per_doc = pd.concat([tp_per_doc, fp_per_doc, fn_per_doc,
fp_nil_per_doc, fp_dis_per_doc], axis=1)
spd = stats_per_doc.fillna(0)
spd['precision'] = spd.TP / (spd.TP + spd.FP)
spd['recall'] = spd.TP / (spd.TP + spd.FN)
spd['F1'] = f_score(spd.precision, spd.recall, 1)
spd_means = spd.mean(axis=0)
overview = {
'overall_precision': overall_precision,
'overall_recall': overall_recall,
'overall_F1': overall_F1,
'average_precision': spd_means.precision,
'average_recall': spd_means.recall,
'average_F1': spd_means.F1,
'FP_wrong_disambiguation': FP_frac_wrong_disambiguation,
'FP_should_be_NIL': FP_frac_should_be_NIL
}
data_and_stats = {
'overview': overview,
'stats_per_doc': spd,
'TP_df': TP_df,
'FP_df': FP_df,
'FN_df': FN_df,
'FP_should_be_NIL_df': FP_should_be_NIL_df,
'FP_wrong_disambiguation_df': FP_wrong_disambiguation_df
}
return data_and_stats
def f_score(precision, recall, beta):
return (1+beta**2)*(precision*recall)/((beta**2*precision)+recall)
def multiple_runs_long(run_id_list, url):
query_list = read_document_queries()
for run_id in run_id_list:
fp_so = query_server(query_list, run_id, url)
comparison_df = load_comparison(fp_gs, fp_so)
data_and_stats = calculate_performance(comparison_df)
dir_path = "evaluation/error_analyses/{0}".format(run_id)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
f_path = os.path.join(dir_path, "{0}.{1}")
for k, v in data_and_stats.items():
try:
v.to_csv(f_path.format(k, "tsv"), sep="\t", encoding='utf8', index=False)
except AttributeError:
print run_id, k, v
with open(f_path.format(k, "json"), 'wb') as f:
json.dump(v, f, sort_keys=True) |
#!/usr/bin/python
#Vivek Sinha
print "Content-Type: text/html"
print ""
from bs4 import BeautifulSoup
import subprocess
import urllib2
import csv
import cgi, cgitb
cgitb.enable()
URL = 'http://gsmon.grid.lhr1.inmobi.com/pipeline.status.html'
s = urllib2.urlopen(URL)
test = s.read()
soup = BeautifulSoup(test)
data = soup.find('pre')
lines = soup.pre.string.splitlines()
for line in csv.reader(lines, skipinitialspace=True):
for a in line:
new = a.split('/')
k = new[1].split('_')
a = [word for word in k if any(letter in word for letter in 'perf')]
if new[0]=='gold' and k[3] in a:
subprocess.call('/var/www/cgi-bin/perf_gold_d.py',shell='True')
elif new[0]=='uh1' and k[3] in a:
subprocess.call('/var/www/cgi-bin/perf_uh1_d.py',shell='True')
elif new[0]=='uj1' and k[3] in a:
subprocess.call('/var/www/cgi-bin/perf_uj1_d.py',shell='True')
elif new[0]=='hkg1' and k[3] in a:
subprocess.call('/var/www/cgi-bin/perf_hkg1_d.py',shell='True')
elif new[0]=='lhr1' and k[3] in a:
subprocess.call('/var/www/cgi-bin/perf_lhr1_d.py',shell='True')
|
import unittest
from katas.kyu_7.collatz_conjecture import collatz
class CollatzTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(collatz(20), 8)
def test_equals_2(self):
self.assertEqual(collatz(15), 18)
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
# from django.template import loader
from .models import Question
# Create your views here.
#INDEX
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
# Second version with Templates but without render
# need loader import for this
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# # looks inside template folder for this
# template = loader.get_template('polls/index.html')
# context = {
# 'latest_question_list': latest_question_list,
# }
# return HttpResponse
# First simpliest verson without templates or render
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# output = ', '.join([q.question_text for q in latest_question_list])
# return HttpResponse(output)
# DETAIL
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question':question})
# There's also get_list_or_404 which uses filter() instead of get()
# so it can raise a Http404 if the list is empty
# Second version w/404 but in a long windy way
# def detail(request, question_id):
# try:
# question = Question.objects.get(pk=question_id)
# except Question.DoesNotExist:
# raise Http404("Question does not exist")
# return render(request, 'polls/detail.html', {'question': question})
# First simpliest static html response
# def detail(request, question_id):
# return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id) |
from GameLogic import Player
class Barrack:
def __init__(self, tile, owner: Player):
self._tile = tile
self._owner = owner
self._defencePoints = 6
@property
def Tile(self): return self._tile
@property
def Owner(self) -> Player: return self._owner
@property
def DefencePoints(self) -> int: return self._defencePoints
@DefencePoints.setter
def DefencePoints(self, value: int):
self._defencePoints = value
class BaseBarrack(Barrack):
def __init__(self, tile, owner: Player):
super().__init__(tile, owner)
self._defencePoints = 25
|
############################################################
# #
# General feature extraction abstract class with specific #
# implementations for the biclustering based method, the #
# cluster/pca method, and the feature clustering method. #
# #
# Authors: Amy Peerlinck and Neil Walton #
# #
############################################################
from abc import ABC, abstractmethod
import numpy as np
from clustering import Kmeans, Bicluster, Dbscan
from dataprocessing import DataReader
from utils import Pca, silhouette
from classifiers import Knn, NaiveBayes
from matplotlib import pyplot as plt
class FeatureExtractor(ABC):
'''
Abstract class for the feature extractors implemented
below
'''
def __init__(self, input, labels):
self.input = input
self.labels = labels
def _get_clustering(self, method):
'''
Return the selected clustering method. Valid methods
are "kmeans" and "dbscan"
'''
pass
@abstractmethod
def extract_features(self):
'''
Abstract method for extracting the features relevant
to the specific feature extraction technique
'''
pass
class BiclusterExtractor(FeatureExtractor):
'''
Implementation of the biclustering based feature extractor
'''
def __init__(self, input, labels, n):
self.n = n
super().__init__(input, labels)
def extract_features(self):
'''
Using the biclustering technique of Cheng and Church,
extract features as binary arrays that indicate of
which clusters each data point is a member and return
these vectors as the extracted features
'''
bc = Bicluster(self.input)
delta = 0.15
alpha = 1.1
clusters = bc.assign_clusters(delta, alpha, self.n)
#Create a binary vector where a 1 indicates
#the ith data point belongs to the jth cluster
num_rows = self.input.shape[0]
num_columns = self.n
binary_features = np.zeros((num_rows, num_columns))
for j, c in enumerate(clusters):
for i in c[0]:
binary_features[i][j] = 1
return binary_features
class FeatureCluster(FeatureExtractor):
'''
Implementation of the feature cluster feature extractor
'''
def __init__(self, input, labels, method="kmeans", num_clusters=2, _type="hard"):
super().__init__(input, labels)
self.method = method
self.num_clusters = num_clusters
self._type = _type
if self._type == "soft" or self._type=="mixed":
self.method ="kmeans"
self.clustering = self._get_clustering(self.method)
self.cluster_labels = np.array([])
def _get_clustering(self, method):
'''
Return the selected clustering method. Valid methods
are "kmeans" and "dbscan"
'''
if self.method == 'kmeans':
return Kmeans(self.input.T, k=2)
elif self.method == 'dbscan':
return Dbscan(self.input.T, min_points=10, e=0.0015)
else:
raise CantClusterLikeThat('Invalid clustering method selected "' +self.method+ '".')
def hard_combination(self):
'''
Dot product between hard weighted matrix and original feature matrix.
Hard weights consist of 0 if feature does not belong to cluster, 1 otherwise.
This means the new feature is a sum of all old feature values in the
corresponding cluster.
'''
nr_datapoints = self.input.shape[0]
nr_features = self.input.shape[1]
weight_matrix = np.zeros([self.num_clusters,nr_features])
combined_clusters = np.zeros([nr_datapoints, self.num_clusters])
for i,c in enumerate(self.cluster_labels):
weight_matrix[c,i] = 1
for k, cluster in enumerate(weight_matrix):
to_sum = self.input[:,[i for i, weight in enumerate(cluster) if weight == 1]] #150,2
combined_clusters[:,k] = np.sum(to_sum, axis=1)
return combined_clusters
def soft_combination(self):
'''
Dot product between soft weighted matrix and original feature matrix.
Soft weights depend on the probability of a feature belonging to the cluster.
This means the new feature is a weighted combinations of all old feature values.
'''
nr_datapoints = self.input.shape[0]
nr_features = self.input.shape[1]
weight_matrix = np.zeros([self.num_clusters,nr_features])
combined_clusters = np.zeros([nr_datapoints, self.num_clusters])
for i,c in enumerate(self.cluster_labels):
for j, prob in enumerate(c):
weight_matrix[i,j] = prob
for k, cluster_prob in enumerate(weight_matrix):
to_sum = np.asarray([np.multiply(self.input[:,i],prob) for i, prob in enumerate(cluster_prob)]).T
combined_clusters[:,k] = np.sum(to_sum,axis=1)
return combined_clusters
def mixed_combination(self):
nr_datapoints = self.input.shape[0]
nr_features = self.input.shape[1]
hard_weight_matrix = np.zeros([self.num_clusters,nr_features])
soft_weight_matrix = np.zeros([self.num_clusters,nr_features])
combined_clusters = np.zeros([nr_datapoints, self.num_clusters])
for i,c in enumerate(self.cluster_labels[0]):
hard_weight_matrix[c,i] = 1
for i,c in enumerate(self.cluster_labels[1]):
for j, prob in enumerate(c):
soft_weight_matrix[i,j] = prob
hard_weight_matrix = np.multiply(hard_weight_matrix, 0.2)
soft_weight_matrix = np.multiply(soft_weight_matrix, 0.8)
weight_matrix = [soft_weight_matrix[i]+h for i,h in enumerate(hard_weight_matrix)]
for k, cluster_prob in enumerate(weight_matrix):
to_sum = np.asarray([np.multiply(self.input[:,i],prob) for i, prob in enumerate(cluster_prob)]).T
combined_clusters[:,k] = np.sum(to_sum,axis=1)
return combined_clusters
def weighted_combination(self):
'''
Calls correct weighting function
'''
nr_datapoints = self.input.shape[0]
combined_clusters = np.zeros([nr_datapoints, self.num_clusters])
if self._type=="hard":
combined_clusters = self.hard_combination()
elif self._type=="soft":
combined_clusters = self.soft_combination()
elif self._type=="mixed":
combined_clusters = self.mixed_combination()
print(combined_clusters)
return combined_clusters
def extract_features(self):
'''
Combine features using K-Means or DBSCAN for hard weighted clustering
and Fuzzy C-Means for soft weighted clustering.
Perform soft, hard or mixed weighted combination of clustered features.
'''
features = self.input.T #Transpose so we're clustering features
if self._type =="hard":
self.cluster_labels = self.clustering.assign_clusters()
elif self._type=="soft":
self.cluster_labels = self.clustering.assign_fuzzy_clusters()
elif self._type=="mixed":
self.cluster_labels = [self.clustering.assign_clusters() , self.clustering.assign_fuzzy_clusters()]
new_features = self.weighted_combination()
return new_features
class ClusterPCA(FeatureExtractor):
'''
Implementation of the cluster pca feature extractor
'''
def __init__(self, input, labels, method='kmeans', num_clusters=2, feats_per_cluster=1):
super().__init__(input, labels)
self.num_clusters = num_clusters
self.feats_per_cluster = feats_per_cluster
self.method = method
self.clustering = self._get_clustering()
def _get_clustering(self):
'''
Return the selected clustering method. Valid methods
are "kmeans" and "dbscan"
'''
if self.method == 'kmeans':
return Kmeans(self.input.T, k=self.num_clusters)
elif self.method == 'dbscan':
return Dbscan(self.input.T, min_points=4, e=0.5)
else:
raise CantClusterLikeThat('Invalid clustering method selected "' +self.method+ '".')
def extract_features(self):
'''
Cluster the features of the data set, then use PCA to
extract new features from each of the resulting clusters.
Valid clustering techniques include DBSCAN and kmeans
'''
features = self.input.T #Transpose so we're clustering features
clusters = self.clustering.assign_clusters()
new_features = np.array([])
sc = silhouette(features, clusters) #silhouette coefficient
#For each cluster, run PCA on the columns in the cluster to reduce dimension
for c in set(clusters):
columns = []
for i in range(len(clusters)):
if clusters[i] == c:
columns.append(features[i])
columns = np.array(columns).T
p = Pca(columns, n=self.feats_per_cluster)
extracted_features = p.get_components()
if new_features.shape[0] == 0:
new_features = extracted_features
else:
new_features = np.hstack((new_features, extracted_features))
return new_features
class CantClusterLikeThat(Exception):
def __init__(self, message):
self.message = message
def load_iris():
path = '../data/iris.txt'
iris_text = open(path, 'r');
data_matrix = []
labels = []
for line in iris_text:
temp_list = line.strip().split(',')
features = np.array([float(x) for x in temp_list[:4]])
print(features)
data_matrix.append(features)
if temp_list[-1] == 'Iris-setosa':
labels.append(0)
elif temp_list[-1] == 'Iris-versicolor':
labels.append(1)
elif temp_list[-1] == 'Iris-virginica':
labels.append(2)
return (np.array(data_matrix), np.array(labels))
if __name__ == '__main__':
#iris = load_iris()
iris= DataReader("../data/iris.txt").run()
in_ = iris[0]
out = iris[1]
bc = BiclusterExtractor(in_, out)
fc = FeatureCluster(in_, out, _type="mixed")
cpca = ClusterPCA(in_, out, method='kmeans')
feats = fc.extract_features()
#print (feats)
p = Pca(in_, n=2)
pca_feats = p.get_components()
k=8
knn1 = Knn(in_, out, k=k)
knn2 = Knn(feats, out, k=k)
knn3 = Knn(pca_feats, out, k=k)
print ('Knn score with original features: ', knn1.k_fold_score()[0])
print ('Knn score with extracted features: ', knn2.k_fold_score()[0])
print ('Knn score with PCA features: ', knn3.k_fold_score()[0])
nb1 = NaiveBayes(in_, out)
nb2 = NaiveBayes(feats, out)
nb3 = NaiveBayes(pca_feats, out)
print ('Naive Bayes score with original features: ', nb1.k_fold_score()[0])
print ('Naive Bayes score with extracted features: ', nb2.k_fold_score()[0])
print ('Naive Bayes score with PCA features: ', nb3.k_fold_score()[0])
#plt.figure(1)
plt.subplot(121, rasterized=True)
plt.scatter(pca_feats[:,0], pca_feats[:,1], c=out)
#plt.show()
#plt.figure(2)
plt.subplot(122)
plt.scatter(feats[:,0], feats[:,1], c=out)
plt.show()
|
# Utilities (non-math)
from common import *
#########################################
#
#########################################
from copy import deepcopy, copy
class ResultsTable():
"""
Main purpose: collect result data (avrgs) from separate (e.g. parallelized) experiments.
Supports merging datasets with distinct xticks and labels.
Load avrgs (array of dicts of fields of time-average statistics)
from .npz files which also contain arrays 'xticks' and 'labels'.
Assumes avrgs.shape == (len(xticks),nRepeat,len(labels)).
But the avrgs of different source files can have entirely different
xticks, nRepeat, labels. The sources will be properly handled
also allowing for nan values. This flexibility allows working with
a patchwork of "inhomogenous" sources.
Merge (stack) into a TABLE with shape (len(labels),len(xticks)).
Thus, all results for a given label/xticks are easily indexed,
and don't even have to be of the same length
(TABLE[iC,iX] is a list of the avrgs for that (label,absissa)).
Also provides functions that partition the TABLE,
(but nowhere near the power of a full database).
NB: the TABLE is just convenience:
the internal state of ResultsTable is the dict of datasets.
Examples:
# COMPOSING THE DATABASE OF RESULTS
>>> R = ResultsTable('data/AdInf/bench_LUV/c_run[1-3]') # Load by regex
>>> R.load('data/AdInf/bench_LUV/c_run7') # More loading
>>> R.mv(r'tag (\d+)',r'tag\1') # change "tag 50" to "tag50" => merge such labels (configs)
>>> R.rm([0, 1, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16]) # rm uninteresting labels (configs)
>>> R.rm('EnKF[^_]') # rm EnKF but not EnKF_N
>>> cond = lambda s: s.startswith('EnKF_N') and not re.search('(FULL|CHEAT)',s) # Define more involved criterion
>>> R1 = R.split('mytag') # R1: labels with 'mytag'. R <-- R\R1
>>> R1, R2 = R.split2(cond) # R1: labels satisfying cond. R2 = R\R1
# PRESENTING RESULTS
>>> R.print_frame(R.field('rmse_a')[iR]) # print frame of exprmnt#iR of 'rmse_a' field.
>>> R.print_field(R.field('rmse_a')) # print all experiment frames
>>> R.print_frame(R.mean_field('rmse_a')[0].tolist()) # print frame of mean of field
>>> R.print_mean_field('rmse_a',show_fail=True,show_conf=False,cols=None) # This gives more options
See AdInf/present_results.py for further examples.
"""
def __init__(self,*args,**kwargs):
self.load(*args,**kwargs)
def load(self,pattern):
"""
Load datasets into the "datasets",
Call regen_table.
"""
self.patterns = getattr(self,'patterns',[]) + [pattern]
self.datasets = getattr(self,'datasets',OrderedDict())
# NB: Don't declare any more attributes here; put them in regen_table().
DIR,regex = os.path.split(pattern)
keys = sorted_human(os.listdir(DIR))
keys = [os.path.join(DIR,f) for f in keys if re.search(regex,f)]
if len(keys)==0:
raise Exception("No files found that match with the given pattern")
for f in keys:
if f in self.datasets:
print("Warning: re-loading",f)
if 0==os.path.getsize(f):
print("Encountered placeholder file:",f)
continue
self.datasets[f] = dict(np.load(f))
self.regen_table()
return self # for chaining
# NB: Writes to file. Dangerous!
def write2all(self,**kwargs):
for key in list(self.datasets):
tmp = np.load(key)
np.savez(key, **tmp, **kwargs)
def rm_dataset(self,pattern):
for key in list(self.datasets):
if re.search(pattern,key):
del self.datasets[key]
self.regen_table()
def regen_table(self):
"""
from datasets, do:
- assemble labels and xticks
- generate corresponding TABLE
- validate xlabel, tuning_tag
"""
# xticks, labels
# Also see section below for self._scalars.
# -------------------
xticks = [] # <--> xlabel
labels = [] # <--> tuning_tag (if applicable)
# Grab from datasets
for ds in self.datasets.values():
xticks += [ds['xticks']]
labels += [ds['labels']]
# Make labels and xticks unique
xticks = np.sort(np.unique(ccat(*xticks)))
labels = keep_order_unique(ccat(*labels))
# Assign
self.xticks = xticks
self.labels = labels
# Init TABLE of avrgs
# -------------------
TABLE = np.empty(self.shape,object)
for i,j in np.ndindex(TABLE.shape):
TABLE[i,j] = []
# Fill TABLE, fields
fields = set()
for ds in self.datasets.values():
for iC,C in enumerate(ds['labels']):
for iX,X in enumerate(ds['xticks']):
avrgs = ds['avrgs'][iX,:,iC].tolist()
TABLE[labels==C,xticks==X][0] += avrgs
fields |= set().union(*(a.keys() for a in avrgs))
self.TABLE = TABLE
self.fields = fields
# Non-array attributes (i.e. must be the same in all datasets).
# --------------------------------------------------------------
self._scalars = ['xlabel', 'tuning_tag', 'meta'] # Register attributes.
# NB: If you add a new attribute but not by registering them in _scalars,
# then you must also manage it in __deepcopy__().
scalars = {key:[] for key in self._scalars} # Init
# Grab from datasets
for ds in self.datasets.values():
for key in scalars:
if key in ds:
scalars[key] += [ds[key].item()]
# Assign, having ensured consistency
for key,vals in scalars.items():
if vals:
#def validate_homogeneity(key,vals):
if not all(vals[0] == x for x in vals):
raise Exception("The loaded datasets have different %s."%key)
# Check if some datasets lack the tag.
if 0<len(vals)<len(self.datasets):
# Don't bother to warn in len==0 case.
print("Warning: some of the loaded datasets don't specify %s."%key)
#validate_homogeneity(key,vals)
setattr(self,key,vals[0])
else:
setattr(self,key,None)
if key is not 'tuning_tag':
print("Warning: none of the datasets specify %s."%key)
@property
def shape(self):
return (len(self.labels),len(self.xticks))
# The number of experiments for a given Config and Setting [iC,iX]
# may differ (and may be 0). Generate 2D table counting it.
@property
def nRepeats(self):
return np.vectorize(lambda x: len(x))(self.TABLE)
def rm(self,conditions,INV=False):
"""
Delete configs where conditions is True.
Also, conditions can be indices or a regex.
Examples:
delete if inflation>1.1: Res.rm('infl 1\.[1-9]')
delete if contains tag 50: Res.rm('tag 50')
"""
if not isinstance(conditions,list):
conditions = [conditions]
def check_conds(ind, label):
for cond in conditions:
if hasattr(cond,'__call__'): match = cond(label)
elif isinstance(cond,str): match = bool(re.search(cond,label))
else: match = ind==cond # works for inds
if match:
break
if INV: match = not match
return match
for ds in self.datasets.values():
inds = [i for i,label in enumerate(ds['labels']) if check_conds(i,label)]
ds['labels'] = np.delete(ds['labels'], inds)
ds['avrgs'] = np.delete(ds['avrgs'] , inds, axis=-1)
ds['avrgs'] = np.ascontiguousarray(ds['avrgs'])
# for ascontiguousarray, see stackoverflow.com/q/46611571
self.regen_table()
def split2(self,cond):
"""
Split.
Example:
>>> R1, R2 = R.split2(cond) # R1 <-- labels satisfying cond. R2 = R\R1
"""
C1 = deepcopy(self); C1.rm(cond,INV=True)
C2 = deepcopy(self); C2.rm(cond)
return C1, C2
def split(self,cond):
"""
Split. In-place version.
Example:
>>> R1 = R.split('mytag') # R1 <-- labels with 'mytag'. R <-- R\R1.
"""
C1 = deepcopy(self); C1.rm(cond,INV=True)
self.rm(cond)
return C1
def mv(self,regex,sub,inds=None):
"""
Rename labels.
sub: substitution pattern
inds: restrict to these inds of table's labels
"""
if isinstance(inds,int): inds = [inds]
for ds in self.datasets.values():
ds['labels'] = list(ds['labels']) # coz fixed string limits
for i,cfg in enumerate(ds['labels']):
if inds is None or cfg in self.labels[inds]:
ds['labels'][i] = re.sub(regex, sub, cfg)
self.regen_table()
def rm_abcsissa(self,inds):
"""
Remove xticks with indices inds.
"""
D = self.xticks[inds] # these points will be removed
for ds in self.datasets.values():
keep = [i for i,a in enumerate(ds['xticks']) if a not in D]
ds['xticks'] = ds['xticks'][keep]
ds['avrgs'] = ds['avrgs'] [keep]
ds['avrgs'] = np.ascontiguousarray(ds['avrgs'])
self.regen_table()
def __deepcopy__(self, memo):
"""
Implement __deepcopy__ to make it faster.
We only need to copy the datasets.
Then regen_table essentially re-inits the object.
The speed-up is achieved by stopping the 'deep' copying
at the level of the arrays containing the avrgs.
This is admissible because the entries of avrgs
should never be modified, only deleted.
"""
cls = self.__class__
new = cls.__new__(cls)
memo[id(self)] = new
new.patterns = deepcopy(self.patterns)
new.datasets = OrderedDict()
for k, ds in self.datasets.items():
# deepcopy
new.datasets[k] = {
'xticks':deepcopy(ds['xticks']),
'labels':deepcopy(ds['labels'])
}
for tag in self._scalars:
if tag in ds:
new.datasets[k][tag] = deepcopy(ds[tag])
# 'shallow' copy for avrgs:
new.datasets[k]['avrgs'] = np.empty(ds['avrgs'].shape,dict)
for idx, avrg in np.ndenumerate(ds['avrgs']):
new.datasets[k]['avrgs'][idx] = copy(avrg)
new.regen_table()
return new
def __len__(self):
# len(self) == len(self.labels) == len(self.TABLE)
return len(self.TABLE)
def _headr(self):
return "ResultsTable from datasets matching patterns:\n" + "\n".join(self.patterns)
def __repr__(self):
s = self._headr()
if hasattr(self,'xticks'):
s +="\n\nfields:\n" + str(self.fields) +\
"\n\nmeta: " + str(self.meta) +\
"\n\nxlabel: " + str(self.xlabel) +\
"\nxticks: " + str(self.xticks) +\
"\n\ntuning_tag: " + str(self.tuning_tag) +\
"\nlabels:\n"+\
"\n".join(["[{0:2d}] {1:s}".format(i,name) for i,name in enumerate(self.labels)])
return s
def field(self,field):
"""
Extract a given field from TABLE.
Insert in 3D list "field3D",
but with a fixed shape (like an array), where empty <--> None.
Put iRepeat dimension first,
so that repr(field3D) prints nRepeats.max() 2D-tables.
"""
shape = (self.nRepeats.max(),)+self.shape
field3D = np.full(shape, -999.9) # Value should be overwritten below
field3D = field3D.tolist()
# Lists leave None as None, as opposed to a float ndarray.
# And Tabulate uses None to identify 'missingval'.
# So we stick with lists here, to be able to print directly, e.g.
# Results.print_field(Results.field('rmse_a')
for iR,iC,iX in np.ndindex(shape):
try:
field3D[iR][iC][iX] = self.TABLE[iC,iX][iR][field].val
except (IndexError,KeyError):
field3D[iR][iC][iX] = None
return field3D
def mean_field(self,field):
"Extract field"
field3D = self.field(field)
field3D = array(field3D,float) # converts None to Nan (not to be counted as fails!)
mu = zeros(self.shape)
conf = zeros(self.shape)
nSuc = zeros(self.shape,int) # non-fails
for (iC,iX),_ in np.ndenumerate(mu):
nRep = self.nRepeats[iC,iX]
f = field3D[:nRep,iC,iX]
f = f[np.logical_not(np.isnan(f))]
mu [iC,iX] = f.mean() if len(f) else None
conf[iC,iX] = f.std(ddof=1)/sqrt(len(f)) if len(f)>3 else np.nan
nSuc[iC,iX] = len(f)
return mu, conf, nSuc
def print_frame(self,frame):
"Print single frame"
for iC,row in enumerate(frame):
row.insert(0,self.labels[iC])
print(tabulate_orig.tabulate(frame,headers=self.xticks,missingval=''))
def print_field(self,field3D):
"Loop over repetitions, printing Config-by-Setting tables."
for iR,frame in enumerate(field3D):
print_c("\nRep: ",iR)
self.print_frame(frame)
def print_mean_field(self,field,show_conf=False,show_fail=False,cols=None):
"""
Print mean frame, including nRep (#) for each value.
Don't print nan's when nRep==0 (i.e. print nothing).
show_conf: include confidence estimate (±).
show_fail: include number of runs that yielded NaNs (#).
if False but NaNs are present: print NaN for the mean value.
s: indices of columns (experiment xticks) to include.
- Default : all
- tuple of length 2: value range
- a number : closest match
"""
mu, conf, nSuc = self.mean_field(field)
nReps = self.nRepeats
nFail = nReps-nSuc
# Num of figures required to write nRepeats.max()
NF = str(int(floor(log10(nReps.max()))+1))
# Set mean values to NaN wherever NaNs are present
if not show_fail: mu[nFail.astype(bool)] = np.nan
# Determine columns to print
if cols is None:
# All
cols = arange(len(self.xticks))
if isinstance(cols,slice):
# Slice
cols = arange(len(self.xticks))[cols]
if isinstance(cols,(int,float)):
# Find closest
cols = [abs(self.xticks - cols).argmin()]
if isinstance(cols,tuple):
# Make range
cols = np.where( (cols[0]<=self.xticks) & (self.xticks<=cols[1]) )[0]
# mattr[0]: names
mattr = [self.labels.tolist()]
# headr: name \ setting, with filling spacing:
MxLen = max([len(x) for x in mattr[0]])
Space = max(0, MxLen - len(self.xlabel) - 2)
headr = ['name' + " "*(Space//2) + "\\" + " "*-(-Space//2) + self.xlabel + ":"]
# Fill in stats
for iX in cols:
X = self.xticks[iX]
# Generate column. Include header for cropping purposes
col = [('{0:@>6g} {1: <'+NF+'s}').format(X,'#')]
if show_fail: col[0] += (' {0: <'+NF+'s}').format('X')
if show_conf: col[0] += ' ±'
for iC in range(len(self.labels)):
# Format entry
nRep = nReps[iC][iX]
val = mu [iC][iX]
c = conf [iC][iX]
nF = nFail[iC][iX]
if nRep:
s = ('{0:@>6.3g} {1: <'+NF+'d} ' ).format(val,nRep)
if show_fail: s += ('{0: <'+NF+'d} ').format(nF)
if show_conf: s += ('{0: <6g} ' ).format(c)
else:
s = ' ' # gets filled by tabulate
col.append(s)
# Crop
crop= min([s.count('@') for s in col])
col = [s[crop:] for s in col]
# Split column into headr/mattr
headr.append(col[0])
mattr.append(col[1:])
# Used @'s to avoid auto-cropping by tabulate().
print(tabulate(mattr,headr,inds=False).replace('@',' '))
def plot_1d(self,field='rmse_a',**kwargs):
fig, ax = plt.gcf(), plt.gca()
Z = self.mean_field(field)[0]
labels = self.labels
title_ = ""
if self.tuning_tag and not any(np.isnan(self.tuning_vals())):
labels = self.tuning_vals()
title_ = "\nLegend: " + self.tuning_tag
#
from cycler import cycler
colors = plt.get_cmap('jet')(linspace(0,1,len(labels)))
ax.set_prop_cycle(cycler('color',colors))
lhs = []
for iC,(row,name) in enumerate(zip(Z,labels)):
lhs += [ ax.plot(self.xticks,row,'-o',label=name,**kwargs)[0] ]
if ax.is_last_row():
ax.set_xlabel(self.xlabel)
ax.set_ylabel(field)
ax.set_title(self._headr() + title_)
return lhs
def plot_1d_minz(self,field='rmse_a',**kwargs):
fig = plt.gcf()
fig.clear()
fig, axs = plt.subplots(nrows=2,ncols=1,sharex=True,gridspec_kw={'height_ratios':[3, 1]},num=fig.number)
ax , ax_ = axs
lhs, lhs_ = [], []
c = deepcopy(plt.rcParams["axes.prop_cycle"])
c += plt.cycler(marker=(['o','s','x','+','*' ]*99)[:len(c)])
c += plt.cycler(ls =(['-','--','-.',':','--']*99)[:len(c)])
ax .set_prop_cycle(c)
ax_.set_prop_cycle(c)
unique, _, tuning_vals, fieldvals = self.select_optimal(field)
for group_name, tunings, vals in zip(unique, tuning_vals, fieldvals):
lhs += [ ax .plot(self.xticks,vals ,label=group_name,**kwargs)[0] ]
lhs_ += [ ax_.plot(self.xticks,tunings,label=group_name,**kwargs)[0] ]
ax_.set_xlabel(self.xlabel)
ax_.set_ylabel("opt. " + self.tuning_tag)
ax .set_ylabel(field)
ax .set_title(self._headr())
plt.sca(ax)
return ax, ax_, lhs, lhs_
def plot_2d(self,field='rmse_a',log=False,cMin=None,cMax=None,
show_fail=True,minz=True,**kwargs):
fig, ax = plt.gcf(), plt.gca()
# Get plotting data
Z, _, nSuc = self.mean_field(field)
nFail = self.nRepeats - nSuc
if show_fail: pass # draw crosses (see further below)
else: Z[nFail.astype(bool)] = np.nan # Create nan (white?) pixels
# Color range limit
cMin = 0.95*Z.min() if cMin is None else cMin
cMax = Z.max() if cMax is None else cMax
CL = cMin, cMax
# Colormap
cmap = plt.get_cmap('nipy_spectral',200)
cmap.set_over('w') # white color for out-of-range values
if log: trfm = mpl.colors.LogNorm (*CL)
else: trfm = mpl.colors.Normalize(*CL)
# Plot
mesh = ax.pcolormesh(Z,
cmap=cmap,norm=trfm,
edgecolor=0.0*ones(3),linewidth=0.3,
**kwargs)
if show_fail: # draw crosses on top of colored pixels
d = array([0,1])
for (i,j), failed in np.ndenumerate(nFail):
if failed:
plt.plot(j+d,i+d ,'w-',lw=2)
plt.plot(j+d,i+d ,'k:',lw=1.5)
plt.plot(j+d,i-d+1,'w-',lw=2)
plt.plot(j+d,i-d+1,'k:',lw=1.5)
# Colorbar and its ticks.
# Caution: very tricky in log-case. Don't mess with this.
cb = fig.colorbar(mesh,shrink=0.9)
cb.ax.tick_params(length=4, direction='out',width=1, color='k')
if log:
ct = round2sigfig(LogSp( max(Z.min(),CL[0]), min(CL[1],Z.max()), 10 ), 2)
ct = [x for x in ct if CL[0] <= x <= CL[1]] # Cannot go outside of clim! Buggy as hell!
cb.set_ticks( ct )
cb.set_ticklabels(ct)
else:
pass
cb.set_label(field)
# Mark optimal tuning
if minz:
tuning_inds, _, _ = self.minz_tuning(field)
ax.plot(0.5 + arange(len(self.xticks)), 0.5 + tuning_inds, 'w*' , ms=12)
ax.plot(0.5 + arange(len(self.xticks)), 0.5 + tuning_inds, 'b:*', ms=5)
# title
ax.set_title(self._headr())
# xlabel
ax.set_xlabel(self.xlabel)
# ylabel:
if self.tuning_tag is None:
ax.set_ylabel('labels')
ylbls = self.labels
else:
ax.set_ylabel(self.tuning_tag)
ylbls = self.tuning_vals()
# Make xticks less dense, if needed
nXGrid = len(self.xticks)
step = 1 if nXGrid <= 16 else nXGrid//10
# Set ticks
ax.set_xticks(0.5+arange(nXGrid)[::step]); ax.set_xticklabels(self.xticks[::step]);
ax.set_yticks(0.5+arange(len(ylbls))); ax.set_yticklabels(ylbls)
# Reverse order
#ax.invert_yaxis()
return mesh
def tuning_vals(self,**kwargs):
return pprop(self.labels, self.tuning_tag, **kwargs)
def minz_tuning(self,field='rmse_a'):
Z = self.mean_field(field)[0]
tuning_inds = np.nanargmin(Z,0)
tuning_vals = self.tuning_vals()[tuning_inds]
fieldvals = Z[tuning_inds,arange(len(tuning_inds))]
return tuning_inds, tuning_vals, fieldvals
def select_optimal(self,field='rmse_a'):
assert self.tuning_tag
from numpy import ma
# Remove tuning tag (repl by spaces to avoid de-alignment in case of 1.1 vs 1.02 for ex).
pattern = '(?<!\S)'+self.tuning_tag+':\S*' # (no not whitespace), tuning_tag, (not whitespace)
repl_fun = lambda m: ' '*len(m.group()) # replace by spaces of same length
names = [re.sub(pattern, repl_fun, n) for n in self.labels]
names = trim_table(names)
names = [n.strip() for n in names]
Z = self.mean_field(field)[0]
unique = OrderedDict.fromkeys(n for n in names)
tuning_inds = []
tuning_vals = []
fieldvals = []
for group_name in unique:
gg = [i for i, n in enumerate(names) if n==group_name] # Indices of group
Zg = ma.masked_invalid(Z[gg]) # Vals of group, with invalids masked.
fieldvalsg = Zg.min(axis=0) # Minima of group; yields invalid if all are invalid
tuning_indsg = Zg.argmin(axis=0) # Indices of minima
tuning_valsg = self.tuning_vals()[gg][tuning_indsg] # Tuning vals of minmia
tuning_valsg = ma.masked_array(tuning_valsg, mask=fieldvalsg.mask) # Apply mask for invalids
# Append
tuning_inds += [tuning_indsg]
tuning_vals += [tuning_valsg]
fieldvals += [fieldvalsg]
return unique, tuning_inds, tuning_vals, fieldvals
def trim_table(list_of_strings):
"""Make (narrow) columns with only whitespace to width 1."""
# Definitely not an efficient implementation.
table = list_of_strings
j = 0
while True:
if j==min(len(row) for row in table):
break
if all(row[j-1:j+1]==" " for row in table): # check if col has 2 spaces
table = [row[:j]+row[j+1:] for row in table] # remove column j
else:
j += 1
return table
def pprop(labels,propID,cast=float,fillval=np.nan):
"""
Parse property (propID) values from labels.
Example:
>>> pprop(R.labels,'infl',float)
"""
props = []
for s in labels:
x = re.search(r'.*'+propID+':(.+?)(\s|$)',s)
props += [cast(x.group(1))] if x else [fillval]
return array(props)
|
def prime(n):
l = [0]*n
p = 2
while p < n:
if l[p] == 0:
q = 2*p
while q < n:
l[q] = 1
q = q + p
p = p + 1
result = []
i = 2
while i < n:
if l[i] == 0:
result.append(i)
i = i + 1
return result
#print(prime(200))
|
import re
import itertools
file = open("iptextfile.txt")
for line in file:
ip = re.findall( r'[0-9]+(?:\.[0-9]+){3}', line )
print(ip) |
import random , sys
f = 0
scr = 0
while f < 3 :
l = random.randint(2, 19)
r = random.randint(1, 9)
print(str(l)+" X "+str(r)+"의 답은? ")
answer = int(sys.stdin.readline())
if answer == l*r :
if l < 11 :
scr += 10
else :
scr += 15
else :
f +=1
print("ㄴㄴ")
print(scr) |
import asyncio
from time import sleep, ctime
def simulation_work(time = 1):
print('working time = ' + str(time))
sleep(time)
async def loop_func(event):
await event.wait() # блокирует поток, ждёт event
print('loop_func')
sleep(1)
print('ctime')
async def main():
# thread = asyncio.async(loop()) # Один из способов создать поток
# thread.start() # И запустить этот поток
event = asyncio.Event()
waiter_task = asyncio.create_task(loop_func(event))
simulation_work(1)
print('done')
event.set()
# asyncio.sleep(1)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(loop_func())
# await waiter_task
print('here')
asyncio.run(main()) |
import camera
import settings
import logging, time
import slashmap
def main():
logging.basicConfig(level=logging.INFO)
logging.info("STARTING CLIENT...")
# loop
while True:
try:
camera.takePicture()
s3_filename = slashmap.getS3Filename()
slashmap.uploadToS3(settings.IMG_PATH, s3_filename)
logging.info(f"Getting analysis for {s3_filename}")
analysis = slashmap.getAnalysis(s3_filename)
print("\n")
slashmap.validate(analysis)
print("\n")
except Exception as e:
logging.error(f"Error in main loop: {e}")
time.sleep(settings.LOOP_TIMEOUT)
if __name__ == '__main__':
main()
|
# Main Class
from utils import v_sub, boid_pointlist, agent_degree_rotation, limit
from Agent import DEFAULT_SPEED, Agent
from Obstacle import Obstacle
import shared
import domain
import sys
import pygame as pyg
#pygame variables
TITLE = "FLOCKING"
BACKGROUND = (0,0,0)
AGENT_COLOR = [(116,175,173),(222,27,26)] # [agent 1, agent 2]
OBSTACLE_COLOR = (250,250,250)
TEXT_COLOR = (255,255,255)
TRI_BASE = [12,10] # [agent 1, agent 2]
TRI_HEIGHT = [18,15] # [agent 1, agent 2]
MAX_AGENT_COUNT = 60
#Initialize Display
shared.init()
pyg.init()
clock = pyg.time.Clock()
screen = pyg.display.set_mode((shared.WIDTH, shared.HEIGHT))
pyg.display.set_caption(TITLE)
def make_agent_inbound():
for agent in shared.agent_array:
agent.pos = agent.pos[0] % shared.WIDTH, agent.pos[1] % shared.HEIGHT
def draw_text():
font = pyg.font.SysFont("consolas",16)
text_array = [
font.render("Clock FPS: {}".format(int(clock.get_fps())),20,TEXT_COLOR),
font.render("Clock Ticks: {}".format(pyg.time.get_ticks()),10,TEXT_COLOR),
font.render("Agent Count: {}".format(len(shared.agent_array)),20,TEXT_COLOR),
font.render("Obstacle Count: {}".format(len(shared.obstacle_array)),20,TEXT_COLOR),
font.render("Red Agent Speed: {}".format(DEFAULT_SPEED + shared.speed_adjustment + 6),20,TEXT_COLOR),
font.render("Blue Agent Speed: {}".format(DEFAULT_SPEED + shared.speed_adjustment),20,TEXT_COLOR)
]
#display "Agents Reached Max" when agent count reaches max
if MAX_AGENT_COUNT == len(shared.agent_array):
text_array[2] = font.render("Agent Count: {} (Agents Reached Max)".format(len(shared.agent_array)),20,TEXT_COLOR)
#display text
for i in range(len(text_array)):
text = text_array[i]
screen.blit(text,(2,3 + i*15))
def draw_agent():
agent_array_size = len(shared.agent_array)
for i in range(agent_array_size):
agent = shared.agent_array[i]
make_agent_inbound()
pointlist = boid_pointlist(TRI_BASE[i%2],TRI_HEIGHT[i%2])
surface = pyg.Surface((TRI_BASE[i%2], TRI_HEIGHT[i%2]), pyg.SRCALPHA).convert_alpha()
pyg.draw.polygon(surface,AGENT_COLOR[i%2],pointlist, 0)
rotate = pyg.transform.rotate(surface,-agent_degree_rotation(agent))
center = v_sub(agent.pos,(TRI_BASE[i%2] / 2, TRI_HEIGHT[i%2] / 2))
screen.blit(rotate, center)
def draw_obstacle():
for obs in shared.obstacle_array:
width = obs.width
pyg.draw.rect(screen, OBSTACLE_COLOR, (obs.pos[0] - width/2, obs.pos[1] - width/2, width, width), 0)
def run():
#game loop
while True:
for event in pyg.event.get():
if event.type == pyg.QUIT:
#quit game
pyg.quit()
sys.exit()
elif pyg.key.get_pressed()[pyg.K_c]:
#clear canvas
domain.clear_all_item()
elif pyg.key.get_pressed()[pyg.K_r]:
#randomize all agents' position
domain.randomize_position()
elif pyg.key.get_pressed()[pyg.K_UP]:
#increase agent speed
domain.adjust_speed(1)
elif pyg.key.get_pressed()[pyg.K_DOWN]:
#decrease agent speed
domain.adjust_speed(0)
elif pyg.mouse.get_pressed()[0] and MAX_AGENT_COUNT > len(shared.agent_array):
# append new agent
shared.agent_array.append(Agent(pyg.mouse.get_pos()))
elif pyg.mouse.get_pressed()[2]:
# append new obstacle
shared.obstacle_array.append(Obstacle(pyg.mouse.get_pos()))
screen.fill(BACKGROUND)
draw_agent()
draw_obstacle()
draw_text()
domain.agent_update()
pyg.display.update()
clock.tick(shared.FPS)
run()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
with open("C.png","rb") as f:
f.seek(100000)
print(len(f.read())) |
from typing import NamedTuple, Text
from django.contrib.gis.db.models import MultiPolygonField, PointField
from django.contrib.gis.geos import Polygon
from django.contrib.postgres.fields import JSONField
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Coords(NamedTuple):
"""
Represents a pair of x, y coordinates (typically longitude and latitude).
"""
x: float
y: float
class BBox(NamedTuple):
"""
Represents a bounding box. c1 is the small values and c2 is the big values.
"""
c1: Coords
c2: Coords
def to_flickr(self) -> Text:
"""
Converts the bounding box into Flickr syntax.
"""
return f"{self.c1.x},{self.c1.y},{self.c2.x},{self.c2.y}"
def to_polygon(self) -> Polygon:
"""
Converts the bounding box into a GEOS polygon.
"""
return Polygon.from_bbox(self.c1 + self.c2)
class Tile(models.Model):
"""
The MAX_DEPTH is computed so that it encompasses a small area. If there is
more than the API limit of pictures in this area then it means that this
place is super-interesting but also already totally standing out, so there
is no need to dig deeper. This is mostly helpful for special points like
0,0 which are uninteresting but due to bugs have numerous pictures.
See
https://www.xkcd.com/2170/
https://www.wolframalpha.com/input/?i=360%2F(2%5Ex)+%3D+0.001;+solve+x
"""
MAX_DEPTH = 19
TO_PROBE = "to-probe"
CONTAINED = "contained"
SPLIT = "split"
STATUSES = [
(TO_PROBE, _("to probe")),
(CONTAINED, _("contained")),
(SPLIT, _("split")),
]
parent = models.ForeignKey(
"Tile", related_name="children", on_delete=models.CASCADE, null=True
)
depth = models.IntegerField(
db_index=True,
validators=[
MinValueValidator(
0,
message=_(
"Depth cannot be lower than 0 as it is already a full-earth tile"
),
),
MinValueValidator(
MAX_DEPTH,
message=_(
"Max depth is %(max)d because more would not be useful "
"(see documentation)"
)
% {"max": MAX_DEPTH},
),
],
)
x = models.BigIntegerField(db_index=True)
y = models.BigIntegerField(db_index=True)
status = models.CharField(
max_length=max(len(x) for x, _ in STATUSES),
choices=STATUSES,
default=TO_PROBE,
help_text=_(
"to-probe = a first API call must be done to know if the tile "
"needs to be split, contained = all pictures from this tile are "
"returned by the API, split = sub-tiles must be inspected instead"
),
)
@property
def bbox(self) -> BBox:
"""
Generates the bounding box for this tile.
"""
splits = 2.0 ** self.depth
dx = 360.0 / splits
dy = 180.0 / splits
return BBox(
Coords(x=-180 + self.x * dx, y=-90 + self.y * dy),
Coords(x=-180 + (self.x + 1) * dx, y=-90 + (self.y + 1) * dy),
)
@property
def polygon(self) -> Polygon:
"""
Generates the GEOS polygon for this tile.
"""
return self.bbox.to_polygon()
@property
def can_have_children(self):
"""
Indicates if this tile can have children
"""
return (self.depth + 1) <= self.MAX_DEPTH
def need_children(self) -> bool:
"""
Call this function if you find that this tile needs children. It will
generate the required children, unless MAX_DEPTH has been reached. In
that case, False will be returned.
"""
if not self.can_have_children:
return False
depth = self.depth + 1
x2 = self.x * 2
y2 = self.y * 2
Tile.objects.bulk_create(
[
Tile(depth=depth, x=x2, y=y2, parent=self),
Tile(depth=depth, x=x2 + 1, y=y2, parent=self),
Tile(depth=depth, x=x2 + 1, y=y2 + 1, parent=self),
Tile(depth=depth, x=x2, y=y2 + 1, parent=self),
]
)
self.status = Tile.SPLIT
self.save()
return True
def mark_done(self):
"""
Marks the tile as completely contained
"""
self.status = Tile.CONTAINED
self.save()
class Image(models.Model):
"""
Represents a Flickr image. All image data returned by the API is kept in
the data field just in case it becomes useful later.
"""
flickr_id = models.BigIntegerField(unique=True)
coords = PointField(spatial_index=True)
date_taken = models.DateTimeField(null=True)
faves = models.PositiveIntegerField(db_index=True)
data = JSONField()
class Area(models.Model):
"""
That's a scanable area that can be created in the admin.
"""
name = models.SlugField(unique=True)
area = MultiPolygonField()
def __str__(self):
return self.name
|
"""
Code obtained from Allen AI NLP toolkit in September 2019
Modified by PH March 2020
"""
import pandas as pd
from typing import Optional, List, Dict, TextIO
import os
import shutil
import subprocess
import tempfile
from collections import defaultdict
from pathlib import Path
from babybertsrl import configs
class SrlEvalScorer:
"""
This class uses the external srl-eval.pl script for computing the CoNLL SRL metrics.
AllenNLP contains the srl-eval.pl script, but you will need perl 5.x.
Note that this metric reads and writes from disk quite a bit. In particular, it
writes and subsequently reads two files per __call__, which is typically invoked
once per batch. You probably don't want to include it in your training loop;
instead, you should calculate this on a validation set only.
Parameters
----------
ignore_classes : ``List[str]``, optional (default=``None``).
A list of classes to ignore.
"""
def __init__(self,
srl_eval_path: Path,
ignore_classes: Optional[List[str]] = None,
):
self._srl_eval_path = str(srl_eval_path) # The path to the srl-eval.pl script.
self._ignore_classes = set(ignore_classes)
# These will hold per label span counts.
self._true_positives = defaultdict(int)
self._false_positives = defaultdict(int)
self._false_negatives = defaultdict(int)
def __call__(self, # type: ignore
batch_verb_indices: List[Optional[int]],
batch_sentences: List[List[str]],
batch_conll_formatted_predicted_tags: List[List[str]],
batch_conll_formatted_gold_tags: List[List[str]],
verbose: bool = False,
):
# pylint: disable=signature-differs
"""
Parameters
----------
batch_verb_indices : ``List[Optional[int]]``, required.
The indices of the verbal predicate in the sentences which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
batch_sentences : ``List[List[str]]``, required.
The word tokens for each instance in the batch.
batch_conll_formatted_predicted_tags : ``List[List[str]]``, required.
A list of predicted CoNLL-formatted SRL tags (itself a list) to compute score for.
Use allennlp.models.semantic_role_labeler.convert_bio_tags_to_conll_format
to convert from BIO to CoNLL format before passing the tags into the metric,
if applicable.
batch_conll_formatted_gold_tags : ``List[List[str]]``, required.
A list of gold CoNLL-formatted SRL tags (itself a list) to use as a reference.
Use allennlp.models.semantic_role_labeler.convert_bio_tags_to_conll_format
to convert from BIO to CoNLL format before passing the
tags into the metric, if applicable.
"""
if not os.path.exists(self._srl_eval_path):
raise SystemError("srl-eval.pl not found at {}.".format(self._srl_eval_path))
tempdir = tempfile.mkdtemp()
gold_path = os.path.join(tempdir, "gold.txt")
predicted_path = os.path.join(tempdir, "predicted.txt")
with open(predicted_path, "w") as predicted_file, open(gold_path, "w") as gold_file:
for verb_index, sentence, predicted_tag_sequence, gold_tag_sequence in zip(
batch_verb_indices,
batch_sentences,
batch_conll_formatted_predicted_tags,
batch_conll_formatted_gold_tags):
write_conll_formatted_tags_to_file(predicted_file,
gold_file,
verb_index,
sentence,
predicted_tag_sequence,
gold_tag_sequence)
perl_script_command = ['perl', self._srl_eval_path, gold_path, predicted_path]
completed_process = subprocess.run(perl_script_command, stdout=subprocess.PIPE,
universal_newlines=True, check=True)
if configs.Eval.print_perl_script_output:
print(completed_process.stdout)
for line in completed_process.stdout.split("\n"):
stripped = line.strip().split()
if len(stripped) == 7:
tag = stripped[0]
# Overall metrics are calculated in get_metric, skip them here.
if tag == "Overall" or tag in self._ignore_classes:
# print('Skipping collection of tp, fp, and fn for tag={}'.format(tag))
continue
# This line contains results for a span
num_correct = int(stripped[1])
num_excess = int(stripped[2])
num_missed = int(stripped[3])
self._true_positives[tag] += num_correct
self._false_positives[tag] += num_excess
self._false_negatives[tag] += num_missed
shutil.rmtree(tempdir)
def get_tag2metrics(self,
reset: bool = False,
) -> Dict[str, Dict[str, float]]:
"""
modified by PH March 2020
Returns
-------
A Dict per label containing following the span based metrics:
precision : float
recall : float
f1-measure : float
"""
all_tags = set()
all_tags.update(self._true_positives.keys())
all_tags.update(self._false_positives.keys())
all_tags.update(self._false_negatives.keys())
res = {}
for tag in all_tags:
if tag == "overall":
raise ValueError("'overall' is disallowed as a tag type, "
"rename the tag type to something else if necessary.")
precision, recall, f1_measure = self._compute_metrics(self._true_positives[tag],
self._false_positives[tag],
self._false_negatives[tag])
res[tag] = {'precision': precision,
'recall': recall,
'f1': f1_measure}
# add overall metrics
precision, recall, f1_measure = self._compute_metrics(
sum(self._true_positives.values()),
sum(self._false_positives.values()),
sum(self._false_negatives.values()))
res['overall'] = {'precision': precision,
'recall': recall,
'f1': f1_measure}
if reset:
self.reset()
return res
@staticmethod
def print_summary(tag2metrics: Dict[str, Dict[str, float]],
) -> None:
"""
added by PH March 2020
"""
for tag, metrics in sorted(tag2metrics.items()):
print(tag)
for k, v in metrics.items():
print(f'\t{k:<12}={v:>.2f}')
@staticmethod
def save_tag2metrics(out_path: Path,
tag2metrics: Dict[str, Dict[str, float]],
rounding: int = 4,
) -> None:
"""
added by ph March 2020
"""
df = pd.DataFrame(data=tag2metrics).round(rounding)
df.to_csv(out_path)
@staticmethod
def _compute_metrics(true_positives: int, false_positives: int, false_negatives: int):
precision = float(true_positives) / float(true_positives + false_positives + 1e-13)
recall = float(true_positives) / float(true_positives + false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
return precision, recall, f1_measure
def reset(self):
self._true_positives = defaultdict(int)
self._false_positives = defaultdict(int)
self._false_negatives = defaultdict(int)
def convert_bio_tags_to_conll_format(labels: List[str],
):
"""
Converts BIO formatted SRL tags to the format required for evaluation with the
official CONLL 2005 perl script. Spans are represented by bracketed labels,
with the labels of words inside spans being the same as those outside spans.
Beginning spans always have a opening bracket and a closing asterisk (e.g. "(ARG-1*" )
and closing spans always have a closing bracket (e.g. "*)" ). This applies even for
length 1 spans, (e.g "(ARG-0*)").
A full example of the conversion performed:
[B-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, O]
[ "(ARG-1*", "*", "*", "*", "*)", "*']
Parameters
----------
labels : List[str], required.
A list of BIO tags to convert to the CONLL span based format.
Returns
-------
A list of labels in the CONLL span based format.
"""
sentence_length = len(labels)
conll_labels = []
for i, label in enumerate(labels):
if label == "O":
conll_labels.append("*")
continue
new_label = "*"
# Are we at the beginning of a new span, at the first word in the sentence,
# or is the label different from the previous one? If so, we are seeing a new label.
if label[0] == "B" or i == 0 or label[1:] != labels[i - 1][1:]:
new_label = "(" + label[2:] + new_label
# Are we at the end of the sentence, is the next word a new span, or is the next
# word not in a span? If so, we need to close the label span.
if i == sentence_length - 1 or labels[i + 1][0] == "B" or label[1:] != labels[i + 1][1:]:
new_label = new_label + ")"
conll_labels.append(new_label)
return conll_labels
def write_conll_formatted_tags_to_file(prediction_file: TextIO,
gold_file: TextIO,
verb_index: int,
sentence: List[str],
conll_formatted_predictions: List[str],
conll_formatted_gold_labels: List[str],
):
"""
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
The CoNLL SRL format is described in
`the shared task data README <https://www.lsi.upc.edu/~srlconll/conll05st-release/README>`_ .
This function expects IOB2-formatted tags, where the B- tag is used in the beginning
of every chunk (i.e. all chunks start with the B- tag).
Parameters
----------
prediction_file : TextIO, required.
A file reference to print predictions to.
gold_file : TextIO, required.
A file reference to print gold labels to.
verb_index : Optional[int], required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : List[str], required.
The word tokens.
conll_formatted_predictions : List[str], required.
The predicted CoNLL-formatted labels.
conll_formatted_gold_labels : List[str], required.
The gold CoNLL-formatted labels.
"""
verb_only_sentence = ['-'] * len(sentence)
if verb_index:
verb_only_sentence[verb_index] = sentence[verb_index]
for word, predicted, gold in zip(verb_only_sentence,
conll_formatted_predictions,
conll_formatted_gold_labels):
prediction_file.write(word.ljust(15))
prediction_file.write(predicted.rjust(15) + "\n")
gold_file.write(word.ljust(15))
gold_file.write(gold.rjust(15) + "\n")
prediction_file.write("\n")
gold_file.write("\n") |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test that custom generators can be passed to --format
"""
import TestGyp
test = TestGyp.TestGypCustom(format='mygenerator.py')
test.run_gyp('test.gyp')
# mygenerator.py should generate a file called MyBuildFile containing
# "Testing..." alongside the gyp file.
test.must_match('MyBuildFile', 'Testing...\n')
test.pass_test()
|
from typing import List, Optional, Union
from hashlib import sha256
COINBASE_AMOUNT = 50.
class UnspentTxOut:
"""Class for unspent transactions."""
def __init__(
self,
tx_out_id: str,
tx_out_index: int,
address: str,
amount: float
):
assert type(tx_out_id) == str
assert type(tx_out_index) == int
assert type(address) == str
assert type(amount) == float
self.tx_out_id = tx_out_id
self.tx_out_index = tx_out_index
self.address = address
self.amount = amount
# TODO: Avoid setting attributes after creation.
def __delattr__(self, key, value):
raise AttributeError("Cannot delete attributes.")
class TxIn:
"""
The transaction input. Usually there will be more than one. This is because
the protocol ammasess as inputs several previous unspent outputs,
including, if there is more more money available in total for the unspent
transactions that cover the transaction amount, the remaining unneeded
coins go back to the sender as a transaction output.
"""
def __init__(self, tx_out_id: str, tx_out_index: int, signature: Optional[str]):
assert type(tx_out_id) == str
assert type(tx_out_index) == int
assert type(signature) == str or signature is None
# This is the traansaction id and index of the tx out.
self.tx_out_id: str = tx_out_id
self.tx_out_index: int = tx_out_index
self.signature: Optional[str] = signature # Not the private key itself
class TxOut:
"""Transaction output."""
def __init__(self, address: str, amount: float):
assert type(address) == str
assert type(amount) == float
self.address: str = address
self.amount: float = amount
class Transaction:
def __init__(self, tx_ins: List[TxIn], tx_outs: List[TxOut]):
for tx_in, tx_out in zip(tx_ins, tx_outs):
assert isinstance(tx_in, TxIn) and isinstance(tx_out, TxOut)
self.tx_ins: List[TxIn] = tx_ins
self.tx_outs: List[TxOut] = tx_outs
self.transaction_id = self._get_transaction_id()
def _get_transaction_id(self):
tx_in_str: str = [
tx_in.tx_out_id + str(tx_in.tx_out_index) for tx_in in self.tx_ins
]
tx_out_str: str = [
tx_out.address + str(tx_out.amount) for tx_out in self.tx_outs
]
unhashed_id: str = ''.join(tx_out_str) + ''.join(tx_in_str)
return sha256(unhashed_id.encode()).hexdigest()
# TODO: Validations
def get_tx_in_amount(tx_in: TxIn, a_unspent_tx_outs: List[UnspentTxOut]) -> float:
"""Get amounts from unspent transactions."""
u_tx_out = find_unspent_tx_out(
tx_in.tx_out_id,
tx_in.tx_out_index,
a_unspent_tx_outs
)
if u_tx_out:
return u_tx_out.amount
return 0.
def find_unspent_tx_out(
transaction_id: str,
index: int,
a_unspent_tx_outs: List[UnspentTxOut]
) -> Union[bool, UnspentTxOut]:
"""Lookup unspent transaction."""
# TODO: Would be good to do with sets/numpy.
# Generally awkward to return Union[bool, UnspentTxOut]
for u_tx_out in a_unspent_tx_outs:
if transaction_id == u_tx_out.tx_out_id and index == u_tx_out.tx_out_index:
return u_tx_out
return False
def get_coinbase_transaction(address: str, block_index: int) -> Transaction:
"""Usually the first transaction, in order to start outputs."""
tx_ins: List[TxIn] = [TxIn("", block_index, "")]
tx_outs: List[TxOut] = [TxOut(address, COINBASE_AMOUNT)]
return Transaction(tx_ins, tx_outs)
def sign_tx_in(
transaction: Transaction,
tx_in_index: int,
private_key: str,
a_unspent_tx_outs: List[UnspentTxOut]
):
"""
As the owner of unspent transactions, in order to spend the coins, you must
prove that you own the coins by providing a signature. The signature shows
that you have the private key that produced that public key (address).
"""
tx_in: TxIn = transaction.tx_ins[tx_in_index]
data_to_sign: str = transaction.transaction_id
referenced_unspent_tx_out: Union[bool, UnspentTxOut] = find_unspent_tx_out(
tx_in.tx_out_id, tx_in.tx_out_index, a_unspent_tx_outs
)
# TODO: Hack to avoid circular dependency.
from coin.wallet import get_public_from_wallet, get_signature
# TODO: Bit of a hack for now.
assert isinstance(referenced_unspent_tx_out, UnspentTxOut)
referenced_address: str = referenced_unspent_tx_out.address
pubkey = get_public_from_wallet()
# Making sure that we are signing for a transaction (ie; trying to spend
# an unspent transaction) whoe funds belong to our public key.
assert referenced_address == pubkey
sig = get_signature(data_to_sign)
return sig
def process_transactions(
a_transactions: List[Transaction],
a_unspent_transactions: List[UnspentTxOut]
):
"""Helper function for this."""
# TODO Validate on the receivers end. Rn I'm assuming they are benevolent
# and using this software to create transactions, which doesn't have to be
# the case.
return update_unspent_tx_outs(a_transactions, a_unspent_transactions)
def update_unspent_tx_outs(
new_transactions: List[Transaction],
a_unspent_tx_outs: List[UnspentTxOut]
):
"""
After new_transactions come in, this function is used to update the list of
unspent transactions. That list is important to maintain to be able to
iterate through and see who has unspent transactions.
"""
# TODO: Hack to avoid circular dependency.
from coin.wallet import get_public_from_wallet
# In particular these would be the new ones from a new block
# After that block had been validated.
# TODO: Not efficeint
# TODO: Wrap in UpdateUnspentTxOut
new_unspent_tx_outs: List[UnspentTxOut] = [
UnspentTxOut(
t.transaction_id,
i,
tx_out.address,
tx_out.amount
) for t in new_transactions for i, tx_out in enumerate(t.tx_outs)
]
consumed_tx_outs: List[UnspentTxOut] = [
UnspentTxOut(
tx_in.tx_out_id,
tx_in.tx_out_index,
"",
0.
) for t in new_transactions for tx_in in t.tx_ins
]
resulting_unspent_tx_outs: List[UnspentTxOut] = []
for u_tx_out in a_unspent_tx_outs:
found_tx_out: UnspentTxOut = find_unspent_tx_out(
u_tx_out.tx_out_id,
u_tx_out.tx_out_index,
consumed_tx_outs
)
if found_tx_out is False:
resulting_unspent_tx_outs.append(u_tx_out)
resulting_unspent_tx_outs += new_unspent_tx_outs
return resulting_unspent_tx_outs
|
# Directory Listing
# Input: string (path to directory)
# Output: list of strings (full paths to files in the directory)
from CONST import *
from os import listdir
from os.path import isfile, join
def ListFiles(dir):
onlyfiles = [join(dir, f) for f in listdir(dir) if isfile(join(dir, f))]
return onlyfiles
if __name__ == '__main__':
# Standalone Test
listedFiles = ListFiles(rootDir)
print(listedFiles) |
import sys
class SpaceObject():
def __init__(self, name):
super().__init__()
self._name = name
self.orbitters = []
self.orbitting = None
def addOrbitter(self, newOrbitter):
self.orbitters.append(newOrbitter)
def addOrbitting(self, orbitting):
self.orbitting = orbitting
def getName(self):
return self._name
def getNumberOfOrbitters(self):
return len(self.orbitters)
def getOrbitting(self):
if self.orbitting:
return self.orbitting
return None
def getOrbits(self, amount):
if self.orbitting is None:
return amount
return self.orbitting.getOrbits(amount+1)
def __repr__(self):
return "<{}>".format(self._name)
def getOrbits(space_objects):
amount = 0
for object in space_objects.values():
amount += object.getOrbits(0)
return amount
if __name__ == "__main__":
assert(len(sys.argv) == 2)
with open(sys.argv[1]) as file:
orbits = [x.rstrip() for x in file.readlines()]
print(len(orbits))
space_objects = {}
for orbit in orbits:
objects = orbit.split(")")
orbitter = space_objects.get(objects[1], SpaceObject(objects[1]))
center = space_objects.get(objects[0], SpaceObject(objects[0]))
center.addOrbitter(orbitter)
orbitter.addOrbitting(center)
space_objects[center.getName()] = center
space_objects[orbitter.getName()] = orbitter
print(getOrbits(space_objects)) |
import boto3
from pydicom import filereader
import pydicom
from S3StreamingObj import S3StreamObj
import logging
from smart_open import open as so_open
import tarfile
import pandas as pd
import time,sys
from utils import serialize_sets, validateDCMKeyword, flatPN
S3_BUCKET = "hemande-dicom2"
S3_REGION = "us-west-2"
S3_KEY = "MammoTomoUPMC_Case14.tar.bz2"
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
smartfplist = []
s3Streamfplist = []
localfplist = []
def recurse(ds, book={}, parent=""):
for elm in ds:
if elm.VR == 'SQ' and elm.keyword != "":
[recurse(item, book=book,
parent=f"{validateDCMKeyword(elm)}.") for item in elm]
elif elm.VR == 'UN':
logger.warning(
f"Skipped: Found Value Representation: UN for Tag {elm.tag} in DCM ; ")
elif (elm.keyword == ""):
logger.warning(f"Skipped: Found Empty Keyword Tag {elm.tag}")
elif (elm.tag.is_private):
logger.warning(f"Skipped: Found Private Tag {elm.tag}")
elif elm.VR == 'PN':
nestPN = flatPN(elm)
for i in nestPN.keys():
logger.debug(
f"TAG: {elm.tag} Keyword: {parent}{elm.keyword}.{i} Value: {nestPN[i]}")
book[parent + elm.keyword + "." + i] = nestPN[i]
else:
if parent == "":
logger.debug(
f"TAG: {elm.tag} Keyword: {elm.keyword} Value: {elm.value}")
book[validateDCMKeyword(elm)] = serialize_sets(elm.value)
else:
logger.debug(
f"TAG: {elm.tag} Keyword: {parent}.{elm.keyword} Value: {elm.value}")
book[parent +
validateDCMKeyword(elm)] = serialize_sets(elm.value)
def smartgenerateBZ2List():
logger.info("Starting Generate BZ2 Decompression")
test = so_open(f"s3://{S3_BUCKET}/{S3_KEY}", mode="rb")
t = tarfile.open(fileobj=test._fp, mode='r:bz2')
listItems = t.getmembers()
for item in listItems:
if item.isfile():
logger.info(f"Found File: {item.name}")
bz = t.extractfile(item)
bz.filename = item.name
bz.archivefilename = item.name
smartfplist.append(bz)
else:
logger.info(f"Skipped {item.name} : Not File")
return ""
def localgenerateBZ2List():
logger.info("Starting LOCAL Generate BZ2 Decompression")
fileobj.download_file('/tmp/dicom.tar.bz')
t = tarfile.open('/tmp/dicom.tar.bz', mode='r:bz2')
listItems = t.getmembers()
for item in listItems:
if item.isfile():
logger.info(f"Found File: {item.name}")
bz = t.extractfile(item)
bz.filename = item.name
bz.archivefilename = item.name
localfplist.append(bz)
else:
logger.info(f"Skipped {item.name} : Not File")
return ""
def streamgenerateBZ2List():
logger.info("Starting LOCAL Generate BZ2 Decompression")
# fileobj.download_file('/tmp/dicom.tar.bz')
filestreamobj = s3.Object(bucket_name=S3_BUCKET, key=S3_KEY)
S3StreamObj(filestreamobj)
t = tarfile.open(fileobj=S3StreamObj(filestreamobj), mode='r:bz2')
listItems = t.getmembers()
for item in listItems:
if item.isfile():
logger.info(f"Found File: {item.name}")
bz = t.extractfile(item)
bz.filename = item.name
bz.archivefilename = item.name
localfplist.append(bz)
else:
logger.info(f"Skipped {item.name} : Not File")
return ""
def getDF(filelist):
totalGets = 0
totalBytes = 0
dfList = []
totalNonDCM = 0
startTimer = time.perf_counter()
for item in filelist:
try:
logger.info(f"Attempt to read File: {item.filename}")
ds = filereader.read_partial(
item, filereader._at_pixel_data, defer_size=None, force=False, specific_tags=None)
ds.remove_private_tags()
# # Build Python Dict
FINAL_STRUCT = {}
recurse(ds, book=FINAL_STRUCT)
FINAL_STRUCT["S3Bucket"] = S3_BUCKET
FINAL_STRUCT["S3Key"] = S3_KEY
FINAL_STRUCT["S3BucketRegion"] = S3_REGION
FINAL_STRUCT["S3KeyArchivePath"] = item.archivefilename
df = pd.json_normalize(FINAL_STRUCT)
logger.debug(
f"Found {df.columns.values.size} Metadata items in Object")
dfList.append(df)
logger.info(
f"Completed Read File: {item.filename} ; Generated {df.columns.values.size} columns")
except pydicom.errors.InvalidDicomError as ee:
totalNonDCM += 1
if len(filelist) > 1:
logger.warning(
f"Skipping non-DCM file: {item.filename} in archive")
pass
else:
logger.error(ee)
sys.exit(1)
endTimer = time.perf_counter()
if len(dfList) > 1:
bigDf = pd.concat(dfList)
else:
bigDf = dfList[0]
logger.info(
f"Completed PUT s3://{S3_BUCKET}/{S3_KEY} to ")
return bigDf
s3 = boto3.resource("s3")
s3 = boto3.resource("s3", region_name=S3_REGION)
fileobj = s3.Object(bucket_name=S3_BUCKET, key=S3_KEY)
streamgenerateBZ2List()
smartgenerateBZ2List()
localgenerateBZ2List()
smartdf = getDF(smartfplist)
localdf = getDF(localfplist)
streamdf = getDF(s3Streamfplist)
print(smartdf)
|
def main():
n=int(input("N: "))
miin=0
print("Введите",n,"целых чисел: ")
for i in range(1,n+1):
x=int(input())
if x>0:
if (miin==0) or (miin>x):
miin=x
print("Ответ: ",miin)
main()
|
from DataModelDict import DataModelDict as DM
from iprPy.tools import input
import atomman as am
def read_input(f, uuid=None):
"""Reads the calc_*.in input commands for this calculation."""
#Read input file in as dictionary
input_dict = input.file_to_dict(f)
#Load a dislocation model if given
if 'dislocation_model' in input_dict:
assert 'x-axis' not in input_dict, 'x-axis and dislocation_model cannot both be supplied'
assert 'y-axis' not in input_dict, 'y-axis and dislocation_model cannot both be supplied'
assert 'z-axis' not in input_dict, 'z-axis and dislocation_model cannot both be supplied'
with open(input_dict['dislocation_model']) as f:
input_dict['dislocation_model'] = DM(f)
params = input_dict['dislocation_model'].find('atomman-defect-Stroh-parameters')
x_axis = params['crystallographic-axes']['x-axis']
y_axis = params['crystallographic-axes']['y-axis']
z_axis = params['crystallographic-axes']['z-axis']
else:
#Remove any axes so system is not rotated
input_dict['dislocation_model'] = None
x_axis = input_dict.pop('x-axis', [1,0,0])
y_axis = input_dict.pop('y-axis', [0,1,0])
z_axis = input_dict.pop('z-axis', [0,0,1])
#Interpret input terms common across calculations
input.process_common_terms(input_dict, uuid)
#Add axes back to input_dict
input_dict['x-axis'] = x_axis
input_dict['y-axis'] = y_axis
input_dict['z-axis'] = z_axis
#Interpret input terms unique to this calculation.
input_dict['chi_angle'] = float(input_dict.get('chi_angle', 0.0))
input_dict['rss_steps'] = int(input_dict.get('rss_steps', 0))
input_dict['sigma'] = input.value_unit(input_dict, 'sigma', default_unit=input_dict['pressure_unit'], default_term='0.0 GPa')
input_dict['tau_1'] = input.value_unit(input_dict, 'tau_1', default_unit=input_dict['pressure_unit'], default_term='0.0 GPa')
input_dict['tau_2'] = input.value_unit(input_dict, 'tau_2', default_unit=input_dict['pressure_unit'], default_term='0.0 GPa')
input_dict['press'] = input.value_unit(input_dict, 'press', default_unit=input_dict['pressure_unit'], default_term='0.0 GPa')
input_dict['energy_tolerance'] = float(input_dict.get('energy_tolerance', 0.0))
input_dict['force_tolerance'] = input.value_unit(input_dict, 'force_tolerance', default_unit=input_dict['force_unit'], default_term='1e-6 eV/angstrom')
input_dict['maximum_iterations'] = int(input_dict.get('maximum_iterations', 100000))
input_dict['maximum_evaluations'] = int(input_dict.get('maximum_evaluations', 100000))
#Extract explicit elastic constants from input_dict
Cdict = {}
for key in input_dict.iterkeys():
if key[0] == 'C':
Cdict[key] = input.value_unit(input_dict, key, default_unit=input_dict['pressure_unit'])
if len(Cdict) > 0:
assert 'elastic_constants_model' not in input_dict, 'Cij values and elastic_constants_model cannot both be specified.'
input_dict['elastic_constants_model'] = None
input_dict['C'] = am.ElasticConstants(**Cdict)
#If no Cij elastic constants defined check for elastic_constants_model
else:
#load file may be the elastic_constants_model file
input_dict['elastic_constants_model'] = input_dict.get('elastic_constants_model', input_dict['load'].split()[1])
with open(input_dict['elastic_constants_model']) as f:
C_model = DM(f)
try:
input_dict['elastic_constants_model'] = DM([('elastic-constants', C_model.find('elastic-constants'))])
input_dict['C'] = am.ElasticConstants(model=input_dict['elastic_constants_model'])
except:
input_dict['elastic_constants_model'] = None
input_dict['C'] = None
return input_dict |
from django.contrib import admin
from .models import Partner
class PartnerModelAdmin(admin.ModelAdmin):
list_display = ('name', 'url')
admin.site.register(Partner, PartnerModelAdmin)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.