text stringlengths 8 6.05M |
|---|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
import xgboost
import matplotlib.pyplot as plt
df_train = pd.read_csv('train.csv', header=0)
df_test = pd.read_csv('test.csv',header=0)
df_train_0 = df_train[df_train['m13'] == 0]
df_train_1 = df_train[df_train['m13'] == 1]
downsample = 55
oversample = 2.5
df_train_0_1 = df_train_0[:downsample*len(df_train_1)]
df_train_0_2 = df_train_0[downsample*len(df_train_1):2*downsample*len(df_train_1)]
df_train_0_3 = df_train_0[2*downsample*len(df_train_1):3*downsample*len(df_train_1)]
df_train_0_4 = df_train_0[3*downsample*len(df_train_1):]
df_train_1 = df_train_1.sample(int(oversample*len(df_train_1)), replace=True)
df = pd.concat([df_train_1, df_train_0_1])
df_1 = pd.concat([df_train_1, df_train_0_2])
df_2 = pd.concat([df_train_1, df_train_0_3])
df_3 = pd.concat([df_train_1, df_train_0_4])
def Model(df, flag, mdel):
df = pd.get_dummies(df, columns=['source', 'financial_institution', 'loan_purpose'])
df['origination_date'] = pd.to_datetime(df['origination_date'], format="%Y/%m/%d")
df['first_payment_date'] = pd.to_datetime(df['first_payment_date'], format="%m/%Y")
df['term'] = df['loan_term'].astype('timedelta64[ns]')
df['day'] = df['first_payment_date'] - df['origination_date']
df['app'] = df['day'] / df['term']
df['app'] = df['app'].astype('float')
y = df['m13']
x = df.drop(columns=['m13', 'loan_id', 'origination_date', 'first_payment_date', 'day', 'term'])
x_train, x_cv, y_train, y_cv = train_test_split(x, y, test_size=0.3)
dt = DecisionTreeClassifier(max_depth=21,
min_samples_split=2,
max_features=None,
random_state=None,
max_leaf_nodes=None,
)
if flag:
model = xgboost.XGBClassifier(base_estimator=dt, n_estimators=500, xgb_model=mdel,scale_pos_weight=4)
else:
model = xgboost.XGBClassifier(base_estimator=dt, n_estimators=500, scale_pos_weight=4)
model.fit(x_train, y_train)
y_pred = model.predict(x_cv)
print(classification_report(y_cv, y_pred))
return model
model_1 = Model(df, False, None)
model_2 = Model(df_1, True, model_1)
model_3 = Model(df_2, True, model_2)
#print(model_3.feature_importances_)
df_test = pd.get_dummies(df_test, columns=['source', 'financial_institution', 'loan_purpose'])
df_test['origination_date'] = pd.to_datetime(df_test['origination_date'], format="%d/%m/%y")
df_test['first_payment_date'] = pd.to_datetime(df_test['first_payment_date'], format="%b-%y")
df_test['term'] = df_test['loan_term'].astype('timedelta64[ns]')
df_test['day'] = df_test['first_payment_date'] - df_test['origination_date']
df_test['app'] = df_test['day']/df_test['term']
df_test['app'] = df_test['app'].astype('float')
df_test.drop(columns=['loan_id', 'origination_date', 'first_payment_date', 'day', 'term'], inplace=True)
y = model_3.predict(df_test)
df_test['m13'] = y
df_test = df_test[['m13']]
df_test['loan_id'] = df_test.index + 1
df_test.to_csv('final_submission.csv', index=False)
feat_importance = pd.Series(model_3.feature_importances_, index=col)
feat_importance.nlargest(30).plot(kind='barh')
#plt.show() |
import os
import math
import random
import shutil
alphabet_path = r'alphabet'
target_path = r'Test'
# Generate dictionary
def generate_dict(path):
''' Generate Picture Dictionary
:params path: alphabet pictures dir path
:return alphabet_dict: {'A' : 'athens',...}
'''
origin_path = os.getcwd()
os.chdir(path)
values = sorted([f[:-4] for f in os.listdir(path) if f.endswith('.jpg')])
keys = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ. ')
alphabet_dict = dict(zip(keys, values))
os.chdir(origin_path)
return alphabet_dict
# Generate Rename list
def generate_rename(s, alpha_dict):
''' Generate a rename list for the given string seek
:params s: Input UpperCase string
:params alpha_dict: Alphabet Picture Filename
:params new: File name list according to s
'''
N = len(s)
all_az = 'abcdefghijklmnopqrstuvwxyz'
candidates = [a+b+c for a in all_az for b in all_az for c in all_az]
old = [alpha_dict[si] for si in s]
add_new = sorted([random.choice(candidates) for i in range(N)])
new = [a+b for (a,b) in zip(add_new, old)]
return new
# Generate a series of pictures of right order
def generate_pic_dir(s, alpha_dict, start_path, end_path):
''' Generate pictures in order to some directory
:params s: Input UpperCase string
:params alpha_dict: Alphabet Picture Filename
:params start_path: Pictures origin path
:params end_path: Generate Picture Path
'''
f_old_list = [alpha_dict[si] for si in s]
f_new_list = generate_rename(s, alpha_dict)
for (f_old, f_new) in zip(f_old_list, f_new_list):
shutil.copy(os.path.join(start_path,f_old+'.jpg'), end_path)
os.rename(
os.path.join(end_path,f_old+'.jpg'),
os.path.join(end_path,f_new+'.jpg'))
# Encode filename by add nums
def encode_f_names(path):
''' Encode filenames by inserting random numbers
'''
for f in os.listdir(path):
f_name, ext = os.path.splitext(f)
rd = random.randint(0, 5)
nums = '0123456789'
tmp = list(f_name)
for i in range(rd):
idx = random.randint(0, len(tmp)-1)
tmp.insert(idx, random.choice(nums))
f_new_name = ''.join(tmp)
os.rename(
os.path.join(path,f_name+'.jpg'),
os.path.join(path,f_new_name+'.jpg'))
# Decode filename by strip nums
def decode_f_names(path):
''' Decode filenames by removing random numbers
'''
for f in os.listdir(path):
f_old = f
print('Old File Name: %s' % f_old)
remove_digits = str.maketrans('', '', '0123456789')
f_new = f_old.translate(remove_digits)
print('New File Name: %s' % f_new)
os.rename(os.path.join(path,f_old), os.path.join(path,f_new))
if __name__ == '__main__':
alphabet_path = os.path.join(os.getcwd(), alphabet_path)
target_path = os.path.join(os.getcwd(), target_path)
az_dict = generate_dict(alphabet_path)
s = input("Input some sentences: \n").upper()
flag = eval(input('Encode[0]/Decode[1]: '))
if not flag:
generate_pic_dir(s, az_dict, alphabet_path, target_path)
encode_f_names(target_path)
else:
decode_f_names(target_path) |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
going down tree L : 2 : None -> (0)30
going down tree L : 3 : (0)30 -> (1)20
going down tree leaf : 2 : (1)20 -> (2)15
up from left child R : 3 : (2)15 -> (1)20
going down tree leaf : 2 : (1)20 -> (2)25
up from right child : 1 : (2)25 -> (1)20
up from left child R : 2 : (1)20 -> (0)30
going down tree L : 3 : (0)30 -> (1)40
going down tree leaf : 2 : (1)40 -> (2)37
up from left child R : 3 : (2)37 -> (1)40
going down tree leaf : 2 : (1)40 -> (2)45
up from right child : 1 : (2)45 -> (1)40
up from right child : 0 : (1)40 -> (0)30
iterativeLevelOrder
(0)30
(1)20 (1)40
(2)15 (2)25 (2)37 (2)45
"""
from intersect import intersect_primitive, Node, Ray, UNION, INTERSECTION, DIFFERENCE, BOX, SPHERE, EMPTY, desc
class N(object):
def __init__(self, value, left=None, right=None, shape=None, operation=None, depth=0):
self.value = value
self.left = left
self.right = right
self.shape = shape
self.operation = operation
self.depth = depth
def __repr__(self):
return "%s" % (self.value)
class Node(object):
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def __repr__(self):
return "(%d)%s" % (self.depth, self.value)
@classmethod
def add(cls, node, value, depth=0):
if node is None:
node = Node(value)
node.depth = depth
else:
if value > node.value:
node.right = cls.add(node.right, value, depth=depth+1)
else:
node.left = cls.add(node.left, value, depth=depth+1)
pass
return node
def postorder(root):
if root is None:return
postorder(root.left)
postorder(root.right)
print "%d " % root.value
def postorderTraversalWithoutRecursion(root):
"""
* http://algorithmsandme.in/2015/03/postorder-traversal-without-recursion/
1. Start with the root node and push the node onto stack.
2. Repeat all steps till stack is not empty.
3. Peek the top of the stack.
3.1 If previous node is parent of current node : ( When we are moving down the tree)
3.1.1 If left child is present, push left child onto stack.
3.1.2 Else if right child is present, push right child onto stack
3.1.3 If left and right children are not present, print the node.
3.2 If previous node is left child of current node ( When moving up after visiting left node)
3.2.1 If right child is not present, print current node
3,2.2 If right child is present, push it onto stack.
3.3 If previous node is right child of current node ( When moving up after visiting right child )
3.3.1 Print the node.
3.3.2 Pop node from stack.
* pushing a node as navigate across non-leaf nodes preps it for traversal
* popping a left node, communicates that it has been processed
"""
pass
prev = None
stackPush(root)
while not stackEmpty():
node = stackPeek()
stage = "other"
if (prev is None) or (node is prev.left) or (node is prev.right):
stage = "going down tree"
if node.left is not None:
stackPush(node.left)
stage += " L"
elif node.right is not None:
stackPush(node.right)
stage += " R"
else:
stage += " leaf"
stackPop()
pass
pass
if prev is node.left:
stage = "up from left child"
if node.right is not None:
stackPush(node.right)
stage += " R"
else:
stage += " leaf"
stackPop()
pass
pass
if prev is node.right:
stage = "up from right child"
stackPop()
pass
print " %20s : %d : %s -> %s l:%s r:%s " % (stage, stackCount(), prev, node, node.left, node.right)
prev = node
def iterativeLevelorder(root):
q = []
q.append(root)
prev = None
while len(q) > 0:
node = q.pop(0) # bottom of q (ie fifo)
if prev and node.depth > prev.depth:print "\n",
print node,
if not node.left is None:q.append(node.left)
if not node.right is None:q.append(node.right)
prev = node
stack = []
def stackPeek():
global stack
if len(stack) > 0:
return stack[-1]
return None
def stackPop():
global stack
if len(stack) == 0:
return None
return stack.pop()
def stackPush(obj):
global stack
stack.append(obj)
def stackEmpty():
global stack
return len(stack) == 0
def stackCount():
global stack
return len(stack)
if __name__ == '__main__':
root0 = None
for val in [30,20,15,25,40,37,45]:
root0 = Node.add(root0, val)
l = N(20, left=N(15,depth=2), right=N(25,depth=2), depth=1)
r = N(40, left=N(37,depth=2), right=N(45,depth=2), depth=1)
root1 = N(30, left=l, right=r, depth=0 )
postorder(root0)
print "\n"
postorder(root1)
print "\n"
postorderTraversalWithoutRecursion(root1)
print "\niterativeLevelOrder"
iterativeLevelorder(root1)
|
A, B, C = input().split()
A, B, C = int(A), int(B), int(C)
if A != B and A != C:
print('A')
elif B != A and B != C:
print('B')
elif C != A and C != B:
print('C')
else:
print("*")
|
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def subarraySum(self, nums, k):
result = 0
cumsum, counter = 0, Counter([0])
for num in nums:
cumsum += num
result += counter[cumsum - k]
counter[cumsum] += 1
return result
if __name__ == "__main__":
solution = Solution()
assert 2 == solution.subarraySum([1, 1, 1], 2)
|
# Generated by Django 2.1.7 on 2019-03-14 07:43
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Actors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=150)),
('last_name', models.CharField(max_length=150)),
('birthdate', models.DateField()),
('sex', models.CharField(choices=[('male', 'MALE'), ('female', 'FEMALE'), ('other', 'OTHER')], default='male', max_length=6)),
('nationality', models.CharField(max_length=150)),
('is_alive', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Awards',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('kind', models.CharField(choices=[(0, 'Movie'), (1, 'Actor')], default=0, max_length=10)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('actor', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='movies.Actors')),
],
),
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('text', models.TextField()),
('author', models.CharField(max_length=150)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='Movies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('description', models.TextField()),
('release_date', models.DateField()),
('logo', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('actors', models.ManyToManyField(blank=True, to='movies.Actors')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.Categories')),
],
),
migrations.AddField(
model_name='awards',
name='movie',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='movies.Movies'),
),
]
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def tweak_identical(one: TreeNode, two: TreeNode):
if one is None and two is None:
return True
elif one is None or two is None:
return False
elif one.val != two.val:
return False
return tweak_identical(one.left, two.left) & tweak_identical(one.right, two.right) \
| tweak_identical(one.left, two.right) & tweak_identical(one.right, two.left)
|
import os
from kik import KikApi
import requests, base64
BOT_USERNAME = os.environ.get('MUSIK_USERNAME')
BOT_API_KEY = os.environ.get('MUSIK_API_KEY')
token_response_json = None
bot_config = {
"username": BOT_USERNAME,
"key": BOT_API_KEY
}
kik = KikApi(bot_config["username"], bot_config["key"])
def get_spotify_token():
cached_token = os.environ.get('SPOTIPY_ACCESS_TOKEN')
# if token is valid, no need to request a new one
if (is_cached_token_valid()):
print("Returning a cached token since it's valid")
return cached_token
# use the refresh token to get a new access token
print("Requesting a new token!")
token_data = {
'grant_type': 'refresh_token',
'refresh_token': os.environ.get('SPOTIPY_REFRESH_TOKEN'),
}
clientID = os.environ.get('SPOTIPY_CLIENT_ID') + ":" + os.environ.get('SPOTIPY_CLIENT_SECRET')
b64Val = base64.b64encode(clientID)
r = requests.post('https://accounts.spotify.com/api/token', headers={'Authorization': 'Basic ' + b64Val},
data=token_data)
token_response_json = r.json()
# get token from response and store
access_token = token_response_json['access_token']
os.environ["SPOTIPY_ACCESS_TOKEN"] = access_token
print("Got a new token with: ", access_token)
return access_token
def is_cached_token_valid():
cached_token = os.environ.get('SPOTIPY_ACCESS_TOKEN')
print("Cached token is: ", cached_token)
r = requests.get('https://api.spotify.com/v1/tracks/2TpxZ7JUBn3uw46aR7qd6', headers={'Authorization': 'Bearer ' + cached_token})
return r.status_code != 401
|
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test_default',
'type': 'executable',
'sources': ['hello.cc'],
},
{
'target_name': 'test_set_reserved_size',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': 2097152, # 2MB
}
},
},
{
'target_name': 'test_set_commit_size',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'StackCommitSize': 8192, # 8KB
}
},
},
{
'target_name': 'test_set_both',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': 2097152, # 2MB
'StackCommitSize': 8192, # 8KB
}
},
},
]
}
|
# Generated by Django 3.2.7 on 2021-10-06 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lab', '0006_alter_joinus_data'),
]
operations = [
migrations.CreateModel(
name='alumini',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('labalumini', models.CharField(max_length=300, null=True)),
('currently', models.CharField(max_length=500, null=True)),
],
),
]
|
#!/usr/bin/env python
import sys, math
from optparse import OptionParser
# python 2.6 does not have erf function
# Handbook of Mathematical Functions: with Formulas, Graphs, and Mathematical Tables formula 7.1.26
# http://stackoverflow.com/questions/457408/is-there-an-easily-available-implementation-of-erf-for-python
def erf(x):
# save the sign of x
sign = 1 if x >= 0 else -1
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
parser = OptionParser(usage="usage: %prog -m mean -s std < [file_of_means]")
parser.add_option("-m", "--mean", dest="mean", help="mean of the distribution")
parser.add_option("-s", "--std", dest="std", help="std of the distribution")
options, args = parser.parse_args()
if not (options.mean and options.std and float(options.std) > 0):
parser.error("missing arguments")
mean = float(options.mean)
std = float(options.std)
for line in sys.stdin:
line = line.split()
m = float(line[-1])
print "%s\t%f" % (' '.join(line[:-1]), 100*(1 - erf(abs(mean - m) / (std * (2 ** 0.5)))))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Build a trial loop Step 1
Use this template script to present one trial with your desired structure
@author: katherineduncan
"""
#%% Required set up
# this imports everything you might need and opens a full screen window
# when you are developing your script you might want to make a smaller window
# so that you can still see your console
import numpy as np
import pandas as pd
import os, sys
import random
from psychopy import visual, core, event, gui, logging
#Initialize answer and response arrays
correct_answer = []
response = []
## Begin loop here ##
# Begin with pre-stim fixation interval
win = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
fixation = visual.GratingStim(win=win,
size=0.02,
pos=[0,0],
sf=0,
rgb='black')
fixation.draw()
event.Mouse(visible=False)
keys = event.waitKeys(keyList =['space']) # Wait for space bar press to begin trial
win.flip()
core.wait(0.4)
win.close()
#Present target face image
win2 = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
event.Mouse(visible=False)
temp = random.randint(1,8)
file = '/Users/jmsaito/Documents/GitHub/trialloops-jmsaito25/faces/face' + str(temp) + '.jpg'
current_face = visual.ImageStim(win2, image=file, pos=(0,0))
current_face.draw()
win2.flip()
core.wait(2)
win2.close()
#Delay Period
win3 = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
event.Mouse(visible=False)
win.flip()
core.wait(3)
win.close()
#Present probe face
win4 = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
event.Mouse(visible=False)
temp2 = random.randint(1,8)
file = '/Users/jmsaito/Documents/GitHub/trialloops-jmsaito25/faces/face' + str(temp2) + '.jpg'
current_face = visual.ImageStim(win4, image=file, pos=(0,0))
current_face.draw()
if temp == temp2:
correct_answer.append(1)
else:
correct_answer.append(0)
win4.flip()
#Gather participant same/different response
keys = event.waitKeys(keyList =['1','0'])
response.append(keys)
win4.close()
#End loop with blank screen to indicate end of trial
win5 = visual.Window(fullscr=True,
allowGUI=False,
color='white',
unit='height'
)
win5.flip()
|
from copy import copy
from typing import Sequence, Union
import numpy as np
from ..utils.events import EmitterGroup
class Dims:
"""Dimensions object modeling slicing and displaying.
Parameters
----------
ndim : int, optional
Number of dimensions
ndisplay : int, optional
Number of displayed dimensions.
order : list of int, optional
Order in which dimensions are displayed where the last two or last
three dimensions correspond to row x column or plane x row x column if
ndisplay is 2 or 3.
axis_labels : list of str, optional
Dimension names
Attributes
----------
events : EmitterGroup
Event emitter group
range : list of 3-tuple
List of tuples (min, max, step), one for each dimension. In a world
coordinates space.
point : list of float
List of floats setting the current value of the range slider when in
POINT mode, one for each dimension. In a world coordinates space.
current_step : tuple of int
Tuple the slider position for each dims slider, in slider coordinates.
nsteps : tuple of int
Number of steps available to each slider.
ndim : int
Number of dimensions.
displayed : tuple
List of dimensions that are displayed.
not_displayed : tuple
List of dimensions that are not displayed.
displayed_order : tuple
Order of only displayed dimensions.
"""
def __init__(self, ndim=None, *, ndisplay=2, order=None, axis_labels=None):
super().__init__()
# Events:
self.events = EmitterGroup(
source=self,
auto_connect=True,
current_step=None,
axis_labels=None,
ndim=None,
ndisplay=None,
order=None,
range=None,
last_used=None,
deprecated={"axis": "current_step", "camera": "ndisplay"},
)
self._range = []
self._current_step = []
self._order = []
self._axis_labels = []
self._scroll_progress = 0
self._last_used = None
self._ndisplay = 2 if ndisplay is None else ndisplay
if ndim is None and order is None and axis_labels is None:
ndim = self._ndisplay
elif ndim is None and order is None:
ndim = len(axis_labels)
elif ndim is None and axis_labels is None:
ndim = len(order)
self.ndim = ndim
if order is not None:
if len(order) != ndim:
raise ValueError(
f"Length of order must be identical to ndim."
f" ndim is {ndim} while order is {order}."
)
self._order = order
if axis_labels is not None:
if len(axis_labels) != ndim:
raise ValueError(
f"Length of axis labels must be identical to ndim."
f" ndim is {ndim} while axis labels is {axis_labels}."
)
self._axis_labels = list(axis_labels)
@property
def range(self):
"""List of 3-tuple: (min, max, step size) of each dimension.
"""
return copy(self._range)
@property
def nsteps(self):
"""Number of slider steps for each dimension.
"""
return [
int((max_val - min_val) // step_size) + 1
for min_val, max_val, step_size in self._range
]
@property
def current_step(self):
"""Tuple of int: value of slider position for each dimension."""
return copy(self._current_step)
@property
def point(self):
"""List of float: value of each dimension."""
# The point value is computed from the current_step
point = [
min_val + step_size * value
for (min_val, max_val, step_size), value in zip(
self._range, self._current_step
)
]
return point
@property
def axis_labels(self):
"""List of labels for each axis."""
return copy(self._axis_labels)
@axis_labels.setter
def axis_labels(self, labels):
if self._axis_labels == labels:
return
if len(labels) != self.ndim:
raise ValueError(
f"Number of labels doesn't match number of dimensions. Number"
f" of given labels was {len(labels)}, number of dimensions is"
f" {self.ndim}. Note: If you wish to keep some of the "
"dimensions unlabeled, use '' instead."
)
self._axis_labels = list(labels)
for axis in range(self.ndim):
self.events.axis_labels(axis=axis)
@property
def last_used(self):
"""int: Index of the last used slider."""
return self._last_used
@last_used.setter
def last_used(self, last_used):
if self._last_used == last_used:
return
self._last_used = last_used
self.events.last_used()
@property
def order(self):
"""List of int: Display order of dimensions."""
return copy(self._order)
@order.setter
def order(self, order):
if np.all(self._order == order):
return
if not len(order) == self.ndim:
raise ValueError(
f"Invalid ordering {order} for {self.ndim} dimensions"
)
self._order = order
self.events.order()
@property
def ndim(self):
"""Returns the number of dimensions.
Returns
-------
ndim : int
Number of dimensions
"""
return len(self.point)
@ndim.setter
def ndim(self, ndim):
cur_ndim = self.ndim
if cur_ndim == ndim:
return
elif ndim > cur_ndim:
# Range value is (min, max, step) for the entire slider
self._range = [(0, 2, 1)] * (ndim - cur_ndim) + self._range
# Point is the slider value if in point mode
self._current_step = [0] * (ndim - cur_ndim) + self._current_step
self._order = list(range(ndim - cur_ndim)) + [
o + ndim - cur_ndim for o in self.order
]
# Append new "default" labels to existing ones
if self._axis_labels == list(map(str, range(cur_ndim))):
self._axis_labels = list(map(str, range(ndim)))
else:
self._axis_labels = (
list(map(str, range(ndim - cur_ndim))) + self._axis_labels
)
# Notify listeners that the number of dimensions have changed
self.events.ndim()
# Notify listeners of which dimensions have been affected
for axis_changed in range(ndim - cur_ndim):
self.events.current_step(axis=axis_changed)
elif ndim < cur_ndim:
self._range = self._range[-ndim:]
self._current_step = self._current_step[-ndim:]
self._order = self._reorder_after_dim_reduction(
self._order[-ndim:]
)
self._axis_labels = self._axis_labels[-ndim:]
# Notify listeners that the number of dimensions have changed
self.events.ndim()
def _reorder_after_dim_reduction(self, order):
"""Ensure current dimension order is preserved after dims are dropped.
Parameters
----------
order : list-like
The data to reorder.
Returns
-------
arr : list
The original array with the unneeded dimension
thrown away.
"""
arr = np.array(order)
arr[np.argsort(arr)] = range(len(arr))
return arr.tolist()
@property
def ndisplay(self):
"""Int: Number of displayed dimensions."""
return self._ndisplay
@ndisplay.setter
def ndisplay(self, ndisplay):
if self._ndisplay == ndisplay:
return
if ndisplay not in (2, 3):
raise ValueError(
f"Invalid number of dimensions to be displayed {ndisplay}"
)
self._ndisplay = ndisplay
self.events.ndisplay()
@property
def displayed(self):
"""Tuple: Dimensions that are displayed."""
return self.order[-self.ndisplay :]
@property
def not_displayed(self):
"""Tuple: Dimensions that are not displayed."""
return self.order[: -self.ndisplay]
@property
def displayed_order(self):
"""Tuple: Order of only displayed dimensions."""
order = np.array(self.displayed)
order[np.argsort(order)] = list(range(len(order)))
return tuple(order)
def reset(self):
"""Reset dims values to initial states."""
for axis in range(self.ndim):
# Range value is (min, max, step) for the entire slider
self._range[axis] = (0, 2, 1)
# Point is the slider value if in point mode
self._current_step[axis] = 0
self._order[axis] = axis
# Default axis labels go from "-ndim" to "-1" so new axes can easily be added
self._axis_labels[axis] = str(axis - self.ndim)
def set_range(self, axis: int, _range: Sequence[Union[int, float]]):
"""Sets the range (min, max, step) for a given dimension.
Parameters
----------
axis : int
Dimension index.
_range : tuple
Range specified as (min, max, step).
"""
axis = self._assert_axis_in_bounds(axis)
if self.range[axis] != _range:
self._range[axis] = _range
self.events.range(axis=axis)
def set_point(self, axis: int, value: Union[int, float]):
"""Sets point to slice dimension in world coordinates.
The desired point gets transformed into an integer step
of the slider and stored in the current_step.
Parameters
----------
axis : int
Dimension index.
value : int or float
Value of the point.
"""
axis = self._assert_axis_in_bounds(axis)
(min_val, max_val, step_size) = self._range[axis]
raw_step = (value - min_val) / step_size
self.set_current_step(axis, raw_step)
def set_current_step(self, axis: int, value: int):
"""Sets the slider step at which to slice this dimension.
The position of the slider in world coordinates gets
calculated from the current_step of the slider.
Parameters
----------
axis : int
Dimension index.
value : int or float
Value of the point.
"""
axis = self._assert_axis_in_bounds(axis)
step = np.round(np.clip(value, 0, self.nsteps[axis] - 1)).astype(int)
if self._current_step[axis] != step:
self._current_step[axis] = step
self.events.current_step(axis=axis, value=step)
def _increment_dims_right(self, axis: int = None):
"""Increment dimensions to the right along given axis, or last used axis if None
Parameters
----------
axis : int, optional
Axis along which to increment dims, by default None
"""
if axis is None:
axis = self.last_used
if axis is not None:
self.set_current_step(axis, self.current_step[axis] + 1)
def _increment_dims_left(self, axis: int = None):
"""Increment dimensions to the left along given axis, or last used axis if None
Parameters
----------
axis : int, optional
Axis along which to increment dims, by default None
"""
if axis is None:
axis = self.last_used
if axis is not None:
self.set_current_step(axis, self.current_step[axis] - 1)
def _focus_up(self):
"""Shift focused dimension slider to be the next slider above."""
sliders = [d for d in self.not_displayed if self.nsteps[d] > 1]
if len(sliders) == 0:
return
if self.last_used is None:
self.last_used = sliders[-1]
else:
index = (sliders.index(self.last_used) + 1) % len(sliders)
self.last_used = sliders[index]
def _focus_down(self):
"""Shift focused dimension slider to be the next slider bellow."""
sliders = [d for d in self.not_displayed if self.nsteps[d] > 1]
if len(sliders) == 0:
return
if self.last_used is None:
self.last_used = sliders[-1]
else:
index = (sliders.index(self.last_used) - 1) % len(sliders)
self.last_used = sliders[index]
def set_axis_label(self, axis: int, label: str):
"""Sets a new axis label for the given axis.
Parameters
----------
axis : int
Dimension index
label : str
Given label
"""
axis = self._assert_axis_in_bounds(axis)
if self.axis_labels[axis] != str(label):
self._axis_labels[axis] = str(label)
self.events.axis_labels(axis=axis)
def _assert_axis_in_bounds(self, axis: int) -> int:
"""Assert a given value is inside the existing axes of the image.
Returns
-------
axis : int
The axis which was checked for validity.
Raises
------
ValueError
The given axis index is out of bounds.
"""
if axis not in range(-self.ndim, self.ndim):
msg = (
f'Axis {axis} not defined for dimensionality {self.ndim}. '
f'Must be in [{-self.ndim}, {self.ndim}).'
)
raise ValueError(msg)
return axis % self.ndim
def _roll(self):
"""Roll order of dimensions for display."""
order = np.array(self.order)
nsteps = np.array(self.nsteps)
order[nsteps > 1] = np.roll(order[nsteps > 1], 1)
self.order = list(order)
def _transpose(self):
"""Transpose displayed dimensions."""
order = copy(self.order)
order[-2], order[-1] = order[-1], order[-2]
self.order = order
|
from tkinter import *
from random import *
from time import *
root = Tk()
screen = Canvas( root, height = 700, width = 1200, background = "black")
screen.pack()
boxsize = 50
xBall1 = 50 #starting points of the ball
xBall2 = 60
yBall1= 400
yBall2 = 410
xincrement = 1
yincrement = 1
xPlatform1 = 375 #starting points of the platform
xPlatform2 = 425
bricksx = [] #array that will be filled with every pixel in the x plane that touches a brick
bricksy = [] #array that will be filled with every pixel in the y plane that touches a brick
platform = screen.create_rectangle(xPlatform1, 600, xPlatform2, 610, fill = "green") #draws the platform initially
ball = screen.create_oval(xBall1, yBall1, xBall2, yBall2, fill = "green") #draws the ball initially
boxes = [] #array that will the filled with the uppermost and lowermost corners of the individual boxes
x1 = 50 #coordinates of the first box
y1 = 50
x2 = 100
y2 = 100
for z in range (0, 5): #loops through five times so that five rows of boxes are drawn
x1 = 50 #resets the x positions so that the rows begin from the left hand side of the screen
x2 = 100
for i in range (0, 22): #draws 22 boxes laterally in every row
box = screen.create_rectangle(x1, y1, x2, y2, fill = "green", outline = "green") #draws the box
x1 = x1 + boxsize #increases the x position by the boxsize so the boxes are equal
x2 = x2 + boxsize
x1la = x1-50 #made a variable for the x corner of the box that can be updated later without changing the position of the box
for n in range (0, 50): #the length of the box is fifty, loops through for the entire length of each box
x1la = x1la + 1 #adds one to the previously assigned label
bricksx.append(x1la) #adds the pixel to the array with every pixel that touches the brick
boxes.append(box) #adds the corner points of the box into the array that stores the boxes
y1 = y1 + boxsize #updates the y position of the boxes after an entire row has been drawn
y2 = y2 + boxsize
y1la = y1-50 #a variable for the y corner of the box that can be updated without changing box position
for q in range (0, 50): #height of the box is fifty, loops through for the entire height of the box
y1la = y1la + 1 #adds one to previously assigned label
bricksy.append(y1la)#adds the pixel to the array with every pixel that touches the brick
xPlatformNumero2 = xPlatform2 #making a variable that has the same value as the lower right corner of the box
xPlatformNumero = xPlatform1 #making a variable that has the same value as the lower left corner of the box
falt = [] #empty array that will be filled with every pixel that touches the platform ina given frame
def keyPressDetector( event ):
global xPlatform1, xPlatform2, platform, falt, xPlatformNumero2, xPlatformNumero
if event.keysym == "Right": #if the right key is pressed
screen.delete(platform) #delete previoiusly drawn platform
falt = [] #reset array to empty
xPlatformNumero2 = xPlatform2
for q in range (0, 50): #loops through fifty times for the length of the platform
xPlatformNumero2 = xPlatformNumero2 - 1 #takes the bottom right hand corner and subtracts one
falt.append(xPlatformNumero2) #adds the pixel to the array with the platform pixels
xPlatform1 = xPlatform1 + 10 #updates the x position of the platform to the right
xPlatform2 = xPlatform2 + 10
platform = screen.create_rectangle(xPlatform1+10, 600, xPlatform2, 610, fill = "green") #draws the platform
screen.update()
if event.keysym == "Left": #if the left key is pressed
screen.delete(platform)#delete previoiusly drawn platform
falt = [] #reset array to empty
xPlatformNumero = xPlatform1
for y in range (0, 50):#loops through fifty times for the length of the platform
xPlatformNumero = xPlatformNumero + 1 #takes the bottom left hand corner and adds one
falt.append(xPlatformNumero) #adds the pixel to the array with the platform pixels
xPlatform1 = xPlatform1 - 10#updates the x position of the platform to the left
xPlatform2 = xPlatform2 - 10
platform = screen.create_rectangle(xPlatform1+10, 600, xPlatform2, 610, fill = "green")#draws the platform
screen.update()
def ClickManager (event):
global Click
if event.x > 0 and event.y > 0: #if the mouse is clicked anywhere within the screen
Click = True #set Click equal to True
else:
Click = False
def moveball():
global xBall1, xBall2, yBall1, yBall2, xincrement, yincrement, ball, Qpressed, v, chibi
v = 0 #a variable used as a counter or index during a while loop
while True:
screen.delete(ball)
xBall1 = xBall1 + xincrement #updates the position of the ball by the constant increment
xBall2 = xBall2 + xincrement
yBall1 = yBall1 + yincrement
yBall2 = yBall2 + yincrement
ball = screen.create_oval(xBall1, yBall1, xBall2, yBall2, fill = "green") #draws the ball
sleep(0.01) #creates a pause between frames so that there is time to react
screen.update()
v = (v + 1) % len(boxes) #increases the looping variable by one and finds the remainder when divided by the length of the boxes array
direction = 0 #creates a variable that will determine which direction of the ball whhen it hits a brick
if xBall2 > 1200: #if the ball hits the right wall
xincrement = xincrement*(-1) #return the ball in the opposite direction at the same angle
direction = 1
if xBall1 < 0: #if the ball hits the left wall
xincrement = xincrement*(-1) #return the ball in the opposite direction at the same angle
direction = 1
if yBall2 > 600 and yBall1 < 600 and xBall1 in (falt): #if the ball hits platform by hitting the top of the platform and in the range of the pixels at a certain frame(otherwise it will continue to fall)
yincrement = yincrement*(-1) #return the ball in the opposite direction at the same angle
direction = 0
if yBall1 < 0: #if the ball hits the top wall
yincrement = yincrement*(-1) #return the ball in the opposite direction at the same angle
direction = 0
if xBall1 in (bricksx) or xBall2 in (bricksx): #if the ball hits the bricks in the x plane
if yBall1 in (bricksy) or yBall2 in (bricksy): #if the ball hits the bricks in the y plane
if direction == 1: #if the direction is positive one
xincrement = xincrement*(-1) #reverse the x increment
if direction == 0: #if the direction control is zero
yincrement = yincrement*(-1) #reverse the y increment
screen.delete(boxes[v]) #delete the brick for which the looping variable decides
def EndGame(): #a procedure to end the game
global platform, ball, boxes
screen.delete(boxes) #delete every box
screen.delete(ball) #delete the ball
screen.delete(platform) #delete the platform
def runGame():
moveball() #calls moveball() procedure
if Click == True: #if the screen has been clicked
EndGame() #call EndGame() procedure
screen.bind( "<Key>", keyPressDetector ) #determines if keyboard controls are performed
screen.bind("<Button-1>", ClickManager ) #determined if mouse controls are performed
screen.focus_set()
runGame() #calls the runGame() procedure
screen.pack()
root.mainloop()
|
#coding:utf-8
from math import log
import operator
def calcShannonEnt(dataSet):
numEntties = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel]=0
labelCounts[currentLabel] += 1
shannonEnt = 0
for key in labelCounts:
prob = float(labelCounts[key])/numEntties
shannonEnt -= prob*log(prob,2)
return shannonEnt
def intrinsicValue(fretureCounts,num):
iv=0.0
for key in fretureCounts.keys():
prob = float(fretureCounts[key])/num
iv -= prob*log(prob,num)
return iv
def calcGain(dataSet):
# 计算出有多少个特征
numFeature = len(dataSet[0]) - 1
# 每个特征需要统计有多少个D
for i in range(numFeature):
# 拿到第i个feature的列表
print "拿到第%d个feature的列表" % i
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
fretureCounts = {}
sumEntropy = 0.0
num = len(featList)
# print num
for feature in featList:
if feature not in fretureCounts.keys():
fretureCounts[feature] = 0
fretureCounts[feature] += 1
for feature in uniqueVals:
subDataSet = []
subDataSet.extend([example for example in dataSet if example[i] == feature])
print subDataSet
# 计算每个子的Ent(D^v)
sumEntropy += (int(fretureCounts[feature])/float(num))*calcShannonEnt(subDataSet)
Gain = calcShannonEnt(dataSet) - sumEntropy
iv = intrinsicValue(fretureCounts,num)
print 'iv:',iv
Gain_ratio = Gain/iv
print Gain_ratio
def createDataSet():
dataSet = [[1,1,'yes'],[1,1,'yes'],[1,0,'no'],[0,1,'no'],[0,0,'no']]
labels = ['no surfacing','flippers']
return dataSet,labels
def splitDataSet(dataSet,axis,value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeature = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeature):
print "按照第 %d 个特征划分" % i
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
print "特征划分",uniqueVals
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob*calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
if infoGain > bestInfoGain:
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys():
classCount[vote]=0
classCount[vote] +=1
sortedClassCount = sorted(classCount.iteritems(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet,labels):
classList = [example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),subLabels)
return myTree
if __name__ == '__main__':
dataSet,labels = createDataSet()
print calcShannonEnt(dataSet)
print chooseBestFeatureToSplit(dataSet)
print createTree(dataSet,labels)
calcGain(dataSet)
|
from lib.Database import Database
from uuid import UUID
from plotly import tools, utils
from lib.plots.violin2 import violin2
import plotly.offline as py
import plotly.figure_factory as ff
import plotly.graph_objs as go
import json
import numpy as np
import pandas as pd
db = Database()
async def main(args):
if len(args) == 0: return "Invalid query"
else: return await run_query(*args)
async def run_query(query, *args):
return to_json(await getattr(Queries, query)(*args))
def to_json(data):
return json.dumps(data)
class Queries:
async def particles_by_category_with_flow_near(experiment, flow, category = None, limit = 10):
if category is None:
q = ("""
SELECT t2.particle AS particle, MAX(t2.frame::text) AS frame, AVG(-(t2.location <-> t1.location) * p.area - $1) as dflow
FROM Track t1, Track t2, Frame f1, Frame f2, Particle p
WHERE p.particle = t1.particle AND t1.particle = t2.particle
AND t1.frame = f1.frame AND t2.frame = f2.frame
AND f1.number = f2.number - 1
AND p.experiment = $2
GROUP BY t2.particle
ORDER BY dflow ASC LIMIT $3
""", float(flow), UUID(experiment), int(limit))
else:
q = ("""
SELECT t2.particle AS particle, MAX(t2.frame::text) AS frame, AVG(-(t2.location <-> t1.location) * p.area - $1) as dflow
FROM Track t1, Track t2, Frame f1, Frame f2, Particle p
WHERE p.particle = t1.particle AND t1.particle = t2.particle
AND t1.frame = f1.frame AND t2.frame = f2.frame
AND f1.number = f2.number - 1
AND p.experiment = $2
AND p.category = $4
GROUP BY t2.particle
ORDER BY dflow ASC LIMIT $3
""", float(flow), UUID(experiment), int(limit), int(category))
return [[str(row["frame"]), str(row["particle"])] async for row in db.query(*q)]
|
#import matlab.engine
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import matplotlib.pyplot as plt
from models.unet import *
from dataset.SpinalDataset_Heatmap import *
from utils import Bar, Logger, AverageMeter, normalizedME, mkdir_p, savefig
from utils.cobb import *
import pandas as pd
parser = argparse.ArgumentParser(description='Spinal landmark Training')
# Datasets
parser.add_argument('-d', '--dataset', default='Spine', type=str)
parser.add_argument('-p', '--datapath', default='dataset/boostnet_labeldata/', type=str)
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
# Optimization options
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=12, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=12, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--drop', '--dropout', default=0.5, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--schedule', type=int, nargs='+', default=[100,200],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.2, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# Checkpoints
parser.add_argument('-c', '--checkpoint', default='checkpoint/00/', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='./checkpoint/00/model_best.pth.tar', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# Architecture
parser.add_argument('--depth', type=int, default=104, help='Model depth.')
parser.add_argument('--cardinality', type=int, default=8, help='Model cardinality (group).')
parser.add_argument('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')
parser.add_argument('--growthRate', type=int, default=12, help='Growth rate for DenseNet.')
parser.add_argument('--compressionRate', type=int, default=2, help='Compression Rate (theta) for DenseNet.')
# Miscs
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
#Device options
parser.add_argument('--gpu_id', default='0', type=str, help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc= 999 # best test accuracy
def main():
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# Data
print('==> Preparing dataset %s' % args.dataset)
transform_test = transforms.Compose([
#SmartRandomCrop(),
Rescale((64, 32)),
ToTensor(),
#Normalize([ 0.485, 0.485, 0.485,], [ 0.229, 0.229, 0.229,]),
])
testset = SpinalDataset_Heatmap(
csv_file = args.datapath + '/labels/test/filenames.csv', transform=transform_test,
img_dir = args.datapath + '/data/test/', landmark_dir = args.datapath + '/labels/test/')
testloader = data.DataLoader(testset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
model = UNet(3,69)
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.MSELoss().cuda()
#ignored_params = list(map(id, model.fc.parameters()))
#base_params = filter(lambda p: id(p) not in ignored_params,
# model.parameters())
#params = [
# {'params': base_params, 'lr': args.lr},
# {'params': model.fc.parameters(), 'lr': args.lr * 10}
#]
#model = torch.nn.DataParallel(model).cuda()
model = model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
#optimizer = optim.Adam(params=params, lr=args.lr, weight_decay=args.weight_decay)
# Resume
title = 'facelandmark_resnet_136'
# Load checkpoint.
print('==> Resuming from checkpoint..')
print(args.resume)
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if os.path.exists(os.path.join(args.checkpoint, title+'_log.txt')):
logger = Logger(os.path.join(args.checkpoint, title+'_log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, title+'_log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
print('\nEvaluation only')
test_loss, test_acc = test(testloader, model, criterion, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
def test(testloader, model, criterion, use_cuda):
landmarks = []
shapes = []
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar('Processing', max=len(testloader))
for batch_idx, batch_data in enumerate(testloader):
# measure data loading time
data_time.update(time.time() - end)
inputs = batch_data['image']
targets = batch_data['heatmap']
shape = batch_data['shapes']
shapes.append(shape.cpu().data.numpy())
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda(async=True)
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
plt.subplot(1,2,1)
print(outputs.shape)
plt.imshow(outputs.cpu().data.numpy()[0,20,:,:])
plt.subplot(1,2,2)
print(targets.shape)
plt.imshow(targets.cpu().data.numpy()[0, 20, :, :])
plt.savefig('test.png')
landmarks.append(outputs.cpu().data.numpy())
loss = criterion(outputs, targets)
print(loss)
# measure accuracy and record loss
losses.update(loss.data, inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format(
batch=batch_idx + 1,
size=len(testloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
)
bar.next()
bar.finish()
shapes = np.concatenate(shapes, axis=0)
Hs = shapes[:,0]
Ws = shapes[:,1]
landmarks = np.concatenate(landmarks, axis = 0)
landmarks = np.reshape(landmarks, (-1, 68, 2))
landmarks = np.transpose(landmarks, (0, 2, 1))
landmarks = np.reshape(landmarks, (-1, 136))
angles = angleCal_py(landmarks, Hs, Ws)
dataframe = pd.DataFrame(angles)
dataframe.to_csv('pred_angles.csv',index=False)
return (losses.avg, 0)
if __name__ == '__main__':
main() |
from django.contrib import admin
from django.urls import path,include
from rest_framework import routers
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
from rest_framework_jwt.views import verify_jwt_token
from .view import resident,announcement,securityGuard,visitor,notification,bill
router = routers.DefaultRouter()
router.register(r'resident',resident.ResidentViewSet,basename="resident")
router.register(r'resident_request',resident.RequestViewSet,basename="resident_request")
router.register(r'property',resident.PropertyViewSet,basename="property_list")
router.register(r'announcements',announcement.AnnouncementViewSet,basename="announcement")
router.register(r'get_primary',securityGuard.GetPrimaryViewSet,basename="security_primary")
router.register(r'reasons',securityGuard.ReasonViewSet)
router.register(r'security_passnumber',securityGuard.PassNumberViewSet)
router.register(r'security_devicenumber',securityGuard.DeviceNumberViewSet)
router.register(r'security_street',securityGuard.SecStreetViewSet)
router.register(r'security_postlog',securityGuard.PostLogViewSet,basename="security_postlog")
router.register(r'security_resident',securityGuard.SecResidentViewSet,basename="security_resident")
router.register(r'security_ipcam',securityGuard.SecIPCamViewSet)
router.register(r'security_boomgate',securityGuard.BoomgateViewSet)
router.register(r'security_boomgatelog',securityGuard.SecBoomgateLogViewSet,basename='boomgatelog')
router.register(r'visitor_entry',visitor.TrackEntryViewSet,basename='visitor_entry')
router.register(r'visitors',visitor.VisitorViewSet,basename='visitor')
router.register(r'entry_schedule',visitor.EntryScheduleViewSet,basename='entry_schedule')
router.register(r'notification',notification.NotificationViewSet,basename='notification')
router.register(r'forgot',resident.PasswordRecoveryViewSet,basename='forgot')
router.register(r'change_password',resident.ChangePasswordViewSet,basename='change_password')
router.register(r'request_family',resident.RequestFamilyViewSet,basename='request_family')
router.register(r'billing',bill.BillingViewSet,basename='billing')
urlpatterns = [
path('',include(router.urls)),
path('login/', resident.custom_obtain_jwt_token),
path('verify_token/', verify_jwt_token),
path('security_login/',securityGuard.SecurityLogin.as_view()),
path('security_verify_token/',securityGuard.SVerifyJSONWebToken.as_view())
]
|
import time
import sys
from Transactions import *
from Bitcoin import *
class Rate:
def __init__(self, name):
self.name = name
self.rates = {}
def addRate(self, timestamp, rate):
self.rates[int(timestamp)] = rate
def findPreviousRate(self, timestamp):
ts = sorted(self.rates.keys())
for (tid, t) in enumerate(ts):
if t >= timestamp:
break
if tid == 0:
return 0.0
else:
return self.rates[ts[tid-1]]
def getRate(self, timestamp):
timestamp = int(timestamp)
if timestamp in self.rates:
# exact timestamp known
return self.rates[timestamp]
else:
# interpolate
return self.findPreviousRate(timestamp)
def getMinTimestamp(self):
return min(self.rates.keys())
def getMaxTimestamp(self):
return max(self.rates.keys())
def __str__(self):
c = ""
for ts in sorted(self.rates.keys()):
try:
c += "{:s},{:d},{:f}\n".format(self.name, ts, self.rates[ts])
except:
print "uuu wrong format"
return c
class Rates:
def __init__(self):
self.rates = {}
def hasSymbol(self, name):
return name in self.rates
def addSymbol(self, name):
#print "add new symbol {:s}".format(name)
self.rates[name] = Rate(name)
def getMinTimestamp(self, symbol=None):
if symbol is None:
ts = []
for s in self.rates.keys():
ts.append(self.rates[s].getMinTimestamp())
return min(ts)
return self.rates[symbol].getMinTimestamp()
def getMaxTimestamp(self, symbol=None):
if symbol is None:
ts = []
for s in self.rates.keys():
ts.append(self.rates[s].getMaxTimestamp())
return max(ts)
return self.rates[symbol].getMaxTimestamp()
def getRate(self, name, timestamp):
if name not in self.rates.keys():
print "no values for symbol {:s}".format(name)
return 0.0
return self.rates[name].getRate(timestamp)
def addRate(self, name, rate, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
if name not in self.rates:
self.addSymbol(name)
self.rates[name].addRate(timestamp, rate)
def store(self, filename):
try:
f = open(filename, "w")
except:
print "failed to open filename {:s}".format(filename)
return None
c = ""
for symbol in self.rates.values():
c += str(symbol)
f.write(c)
f.close()
def load(self, filename):
try:
f = open(filename, "r")
except:
print "failed to load rates from {:s}".format(filename)
return None
c = f.read().split("\n")
f.close()
for line in c:
values = line.split(",")
if len(values) != 3:
continue
try:
name = values[0]
timestamp = int(values[1])
rate = float(values[2])
except:
print "failed to parse line {:s}".format(line)
continue
self.addRate(name, rate, timestamp)
def loadHavelockFile(self, filename):
f = open(filename, "r")
raw = f.read()
f.close()
transactions = Transactions()
for line in raw.split("\n"):
transactions.addTransaction(line)
self.loadTransactions(transactions)
def loadBitcoinFile(self, filename):
f = open(filename, "r")
raw = f.read()
f.close()
transactions = Transactions()
ts = []
for line in raw.split("\n"):
if not line:
continue
b = BitcoinTransaction()
if b.parse(line):
ts.append(b)
transactions.addTransactions(ts)
self.loadTransactions(transactions)
def loadTransactions(self, transactions):
inserted = 0
for trans in transactions.transactions:
t = trans.getType()
if t == "buyipo" or t == "rate" or t == "buy" or t == "sell":
self.addRate(trans.getSymbol(), trans.getPrice(), trans.getTimestamp())
inserted += 1
print "loaded {:d} rates".format(inserted)
if __name__ == "__main__":
if len(sys.argv) == 1:
print "run this script with:"
print "- {:s} havelock-transactions-file bitcoin-transaction-file".format(sys.argv[0])
print " to read rates from havelock and bitcoin file"
print " store to rates.prf"
print "- {:s} rate-file".format(sys.argv[0])
print " to read a rate file"
sys.exit(0)
rates = Rates()
if len(sys.argv) == 3:
rates.loadHavelockFile(sys.argv[1])
rates.loadBitcoinFile(sys.argv[2])
rates.store("rates.prf")
if len(sys.argv) == 2:
rates.load(sys.argv[1])
print rates.getRate("AMHASH1", 1418419807)
print rates.getRate("AMHASH1", 1018419807)
print rates.getRate("AMHASH1", 1418419808)
print rates.getRate("AMHASH1", 1418419809)
|
import kivy
from kivy.clock import Clock
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.boxlayout import BoxLayout
from kivy.core.image import Image
from kivy.uix.image import Image as ImageWidget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
import particlesystem as kivyparticle
from colorpicker.cblcolorpicker import CBLColorPicker, CBLColorWheel
from kivy.properties import NumericProperty, BooleanProperty, ListProperty, StringProperty, ObjectProperty
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelHeader
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.uix.rst import RstDocument
from kivy.core.window import Window
from kivy.graphics.opengl import glReadPixels, GL_RGBA, GL_UNSIGNED_BYTE
from kivy.uix.scrollview import ScrollView
import os
import math
import pygame
from random import randint
from functools import partial
from time import sleep
from xml.dom.minidom import Document
import xml.dom.minidom
from kivy.uix.dropdown import DropDown
class ParticleBuilder(Widget):
demo_particle = ObjectProperty(kivyparticle.ParticleSystem)
particle_window = ObjectProperty(None)
active_filename = StringProperty(None)
init_count = NumericProperty(0)
def __init__(self, **kwargs):
super(ParticleBuilder, self).__init__(**kwargs)
class ParticleParamsLayout(Widget):
tabs_loaded = BooleanProperty(False)
def __init__(self,**kwargs):
super(ParticleParamsLayout, self).__init__(**kwargs)
class ParticleLoadSaveLayout(Widget):
new_particle = ObjectProperty(None)
load_dir = 'templates'
def __init__(self,**kwargs):
load_particle_popup_content = LoadParticlePopupContents(self)
self.load_particle_popup = Popup(title="Particle Effects", content=load_particle_popup_content, size_hint = (None,None), size=(512,512), on_open=self._popup_opened, on_dismiss=self._popup_dismissed)
save_particle_popup_content = SaveParticlePopupContents(self)
self.save_particle_popup = Popup(title="Particle Effects", content=save_particle_popup_content, size_hint = (None,None), size=(512,512), on_open=self._popup_opened, on_dismiss=self._popup_dismissed)
super(ParticleLoadSaveLayout,self).__init__(**kwargs)
# load the default particle (scheduled for the next frame so that it doesn't break)
Clock.schedule_once(self.load_default_particle)
class GetNewFilenameLayout(Widget):
fname_input = ObjectProperty(None)
def __init__(self, load_save_widget, **kwargs):
self.load_save_widget = load_save_widget
super(GetNewFilenameLayout,self).__init__(**kwargs)
class LoadParticlePopupContents(Widget):
blayout = ObjectProperty(None)
blayout_height = NumericProperty(50)
menu_height = NumericProperty(50)
label_height = NumericProperty(50)
def __init__(self, load_save_widget, **kwargs):
self.load_save_widget = load_save_widget
super(LoadParticlePopupContents,self).__init__(**kwargs)
def button_callback(self,value):
if value == 'load templates':
self.load_save_widget.load_templates()
elif value == 'load user files':
self.load_save_widget.load_user_files()
class SaveParticlePopupContents(Widget):
blayout = ObjectProperty(None)
blayout_height = NumericProperty(50)
label_height = NumericProperty(50)
def __init__(self, load_save_widget, **kwargs):
self.load_save_widget = load_save_widget
super(SaveParticlePopupContents,self).__init__(**kwargs)
class Default_Particle_Panel(Widget):
pass
class ImageChooserCell(Widget):
image_location = StringProperty("None")
image_chooser = ObjectProperty(None)
def cell_press(self):
self.image_chooser.select(self.image_location)
class ImageChooserPopupContent(GridLayout):
def __init__(self, image_chooser = None, **kwargs):
super(ImageChooserPopupContent,self).__init__(rows = 6, cols = 6, col_force_default = True, row_force_default = True, row_default_height = 80, col_default_width = 80, **kwargs)
self.image_chooser = image_chooser
png_files = self.get_all_images('./media/particles', '.png')
# atlasses = self.get_all_images('.', '.atlas')
for i in png_files:
self.add_widget(ImageChooserCell(image_location=i, image_chooser = self.image_chooser, size=(self.col_default_width, self.row_default_height)))
def get_all_images(self,dir_name,extension):
outputList = []
for root, dirs, files in os.walk(dir_name):
for fl in files:
if fl.endswith(extension): outputList.append(os.path.join(root,fl))
return outputList
class ImageChooser(Widget):
button_text = StringProperty("Choose a texture...")
image_location = StringProperty('media/particle.png')
def __init__(self,**kwargs):
image_chooser_popup_content = ImageChooserPopupContent(image_chooser = self)
self.image_chooser_popup = Popup(title="Images", content=image_chooser_popup_content, size_hint = (None,None), size=(512,512))
super(ImageChooser,self).__init__(**kwargs)
def button_callback(self,):
self.image_chooser_popup.open()
def select(self,image_location):
self.image_location = image_location
self.image_chooser_popup.dismiss()
class Particle_Property_Slider(Widget):
slider_bounds_min = NumericProperty(0)
slider_bounds_max = NumericProperty(100)
slider_bounds_init_value = NumericProperty(0)
slider_step = NumericProperty(1.0)
box_margin = NumericProperty(5)
prop_slider = ObjectProperty(None)
increment_slider_by = NumericProperty(1.0)
def increment_slider(self):
if self.prop_slider.value + self.increment_slider_by <= self.slider_bounds_max:
self.prop_slider.value += self.increment_slider_by
def decrement_slider(self):
if self.prop_slider.value - self.increment_slider_by >= self.slider_bounds_min:
self.prop_slider.value -= self.increment_slider_by
class Particle_Color_Sliders(Widget):
color_r = NumericProperty(1.)
color_r_min = NumericProperty(0)
color_r_max = NumericProperty(1.)
color_g = NumericProperty(1.)
color_g_min = NumericProperty(0)
color_g_max = NumericProperty(1.)
color_b = NumericProperty(1.)
color_b_min = NumericProperty(0)
color_b_max = NumericProperty(1.)
color_a = NumericProperty(1.)
color_a_min = NumericProperty(0)
color_a_max = NumericProperty(1.)
# necessary because of weird slider bug that allows values to go over bounds
def clip(self, val, vmin, vmax):
if val < vmin:
return vmin
elif val > vmax:
return vmax
else:
return val
class ParticlePanel(Widget):
particle_builder = ObjectProperty(None)
texture_path = StringProperty("media/particle.png")
max_num_particles = NumericProperty(200.)
max_num_particles_min = NumericProperty(1.)
max_num_particles_max = NumericProperty(500.)
life_span = NumericProperty(2.)
life_span_min = NumericProperty(.01)
life_span_max = NumericProperty(10.)
life_span_variance = NumericProperty(0.)
life_span_variance_min = NumericProperty(0.)
life_span_variance_max = NumericProperty(10.)
start_size = NumericProperty(8.)
start_size_min = NumericProperty(0.)
start_size_max = NumericProperty(256.)
start_size_variance = NumericProperty(0.)
start_size_variance_min = NumericProperty(0.)
start_size_variance_max = NumericProperty(256.)
end_size = NumericProperty(8.)
end_size_min = NumericProperty(0.)
end_size_max = NumericProperty(256.)
end_size_variance = NumericProperty(0.)
end_size_variance_min = NumericProperty(0.)
end_size_variance_max = NumericProperty(256.)
emit_angle = NumericProperty(0.)
emit_angle_min = NumericProperty(0.)
emit_angle_max = NumericProperty(360.)
emit_angle_variance = NumericProperty(0.)
emit_angle_variance_min = NumericProperty(0.)
emit_angle_variance_max = NumericProperty(360.)
start_rotation = NumericProperty(0.)
start_rotation_min = NumericProperty(0.)
start_rotation_max = NumericProperty(360.)
start_rotation_variance = NumericProperty(0.)
start_rotation_variance_min = NumericProperty(0.)
start_rotation_variance_max = NumericProperty(360.)
end_rotation = NumericProperty(0.)
end_rotation_min = NumericProperty(0.)
end_rotation_max = NumericProperty(360.)
end_rotation_variance = NumericProperty(0.)
end_rotation_variance_min = NumericProperty(0.)
end_rotation_variance_max = NumericProperty(360.)
def __init__(self, pbuilder, **kwargs):
super(ParticlePanel, self).__init__(**kwargs)
self.particle_builder = pbuilder.parent
class BehaviorPanel(Widget):
particle_builder = ObjectProperty(None)
emitter_type = NumericProperty(0)
## Gravity Emitter Params
emitter_x_variance = NumericProperty(0.)
emitter_x_variance_min = NumericProperty(0.)
emitter_x_variance_max = NumericProperty(200.)
emitter_y_variance = NumericProperty(0.)
emitter_y_variance_min = NumericProperty(0.)
emitter_y_variance_max = NumericProperty(200.)
gravity_x = NumericProperty(0)
gravity_x_min = NumericProperty(-1500)
gravity_x_max = NumericProperty(1500)
gravity_y = NumericProperty(0)
gravity_y_min = NumericProperty(-1500)
gravity_y_max = NumericProperty(1500)
speed = NumericProperty(0.)
speed_min = NumericProperty(0.)
speed_max = NumericProperty(300.)
speed_variance = NumericProperty(0.)
speed_variance_min = NumericProperty(0.)
speed_variance_max = NumericProperty(300.)
radial_acceleration = NumericProperty(0)
radial_acceleration_min = NumericProperty(-400)
radial_acceleration_max = NumericProperty(400)
radial_acceleration_variance = NumericProperty(0.)
radial_acceleration_variance_min = NumericProperty(0.)
radial_acceleration_variance_max = NumericProperty(400.)
tangential_acceleration = NumericProperty(0)
tangential_acceleration_min = NumericProperty(-500)
tangential_acceleration_max = NumericProperty(500)
tangential_acceleration_variance = NumericProperty(0.)
tangential_acceleration_variance_min = NumericProperty(0.)
tangential_acceleration_variance_max = NumericProperty(500.)
## Radial Emitter Params
max_radius = NumericProperty(100.)
max_radius_min = NumericProperty(0.)
max_radius_max = NumericProperty(250.)
max_radius_variance = NumericProperty(0.)
max_radius_variance_min = NumericProperty(0.)
max_radius_variance_max = NumericProperty(250.)
min_radius = NumericProperty(0.)
min_radius_min = NumericProperty(0.)
min_radius_max = NumericProperty(250.)
rotate_per_second = NumericProperty(0)
rotate_per_second_min = NumericProperty(-720)
rotate_per_second_max = NumericProperty(720)
rotate_per_second_variance = NumericProperty(0.)
rotate_per_second_variance_min = NumericProperty(0.)
rotate_per_second_variance_max = NumericProperty(720.)
def __init__(self, pbuilder, **kwargs):
super(BehaviorPanel, self).__init__(**kwargs)
self.particle_builder = pbuilder.parent
class ColorPanel(Widget):
particle_builder = ObjectProperty(None)
start_color = ListProperty([1,1,1,1])
end_color = ListProperty([1,1,1,1])
start_color_r_variance = NumericProperty(.1)
start_color_r_variance_min = NumericProperty(0)
start_color_r_variance_max = NumericProperty(1.)
start_color_g_variance = NumericProperty(.1)
start_color_g_variance_min = NumericProperty(0)
start_color_g_variance_max = NumericProperty(1.)
start_color_b_variance = NumericProperty(.1)
start_color_b_variance_min = NumericProperty(0)
start_color_b_variance_max = NumericProperty(1.)
start_color_a_variance = NumericProperty(.1)
start_color_a_variance_min = NumericProperty(0)
start_color_a_variance_max = NumericProperty(1.)
end_color_r_variance = NumericProperty(.1)
end_color_r_variance_min = NumericProperty(0)
end_color_r_variance_max = NumericProperty(1.)
end_color_g_variance = NumericProperty(.1)
end_color_g_variance_min = NumericProperty(0)
end_color_g_variance_max = NumericProperty(1.)
end_color_b_variance = NumericProperty(.1)
end_color_b_variance_min = NumericProperty(0)
end_color_b_variance_max = NumericProperty(1.)
end_color_a_variance = NumericProperty(.1)
end_color_a_variance_min = NumericProperty(0)
end_color_a_variance_max = NumericProperty(1.)
current_blend_src = NumericProperty(0, allownone = True)
current_blend_dest = NumericProperty(0, allownone = True)
def __init__(self, pbuilder, **kwargs):
super(ColorPanel, self).__init__(**kwargs)
self.particle_builder = pbuilder.parent
class ScrollViewWithBars(ScrollView):
def _start_decrease_alpha(self, *l):
pass
class DebugPanel(Widget):
fps = StringProperty(None)
def __init__(self, **kwargs):
super(DebugPanel, self).__init__(**kwargs)
Clock.schedule_once(self.update_fps, .03)
def update_fps(self,dt):
self.fps = str(int(Clock.get_fps()))
Clock.schedule_once(self.update_fps, .03)
class WorkingFile(Widget):
filename = StringProperty(None)
class VariableDescriptions(Widget):
def tab_info(self):
class BlendFuncChoices(Popup):
def __init__(self, func_chooser, **kwargs):
super(BlendFuncChoices, self).__init__(**kwargs)
self.func_chooser = func_chooser
self.populate_list()
class BlendFuncChooser(BoxLayout):
func_choices = ObjectProperty(None)
current_src = NumericProperty(None)
current_dest = NumericProperty(None)
def __init__(self, **kwargs):
super(BlendFuncChooser, self).__init__(**kwargs)
Clock.schedule_once(self.setup_chooser)
Builder.load_file(os.path.dirname(os.path.realpath(__file__)) + '/colorpicker/cblcolorpicker.kv')
class ParticleBuilderApp(App):
def build(self):
pass
if __name__ == '__main__':
ParticleBuilderApp().run()
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import collections
from torch.autograd import Variable
from weightMask import generateSquareWeightMask, generateGridWeightMask, generateGridWeightMask_PredPrey, generateFixedWeightMask_PredPrey
## Netowrk Types ##
## This is the important class for this folder
## This implements the propagated and recurrent decision layer fixed
class Fixed_PredPrey(torch.nn.Module):
def __init__(self, num_pixels, layers, H_decision, imageSize):
super(Fixed_PredPrey, self).__init__()
weightMask, diagMask, edgeMask, cornerMask = generateFixedWeightMask_PredPrey(imageSize)
self.iteratedLayer = FixedPropagationDecision(num_pixels, num_pixels, layers, weightMask, diagMask, edgeMask, cornerMask)
def forward(self, X, pred, prey, cave, dtype):
prey_range, pred_range, decision = self.iteratedLayer(prey, pred, X, X, cave)
# This block is to fix a problem when only one element is passed in
# The first dimension is normally the batch, but the repeated layers will squeeze out this dimension
# Add it back in before concatenating
# if (pred_range.dim() == 1):
# pred_range.unsqueeze(0)
return prey_range, pred_range, decision
## Classes for Predator-Prey Model
# This version has the grid structure but all the parameters are trainable
class PredPrey_Decision(torch.nn.Module):
def __init__(self, num_pixels, layers, H_decision, imageSize):
super(PredPrey_Decision, self).__init__()
self.outputLayer1 = nn.Linear(3*num_pixels, H_decision)
self.outputLayer2 = nn.Linear(H_decision, 2)
self.relu = nn.ReLU()
def forward(self, X, pred, prey, cave, dtype):
pred_range = pred
prey_range = prey
if (pred_range.dim() == 1):
pred_range.unsqueeze(0)
if (prey_range.dim() == 1):
prey_range.unsqueeze(0)
if (cave.dim() == 1):
cave.unsqueeze(0)
tags = torch.cat((pred_range, prey_range, cave), dim = 1)
h = self.relu(self.outputLayer1(tags))
label = self.relu(self.outputLayer2(h))
return label
# This version has the grid structure but all the parameters are trainable
class RecurrentScaledGrid_PredPrey(torch.nn.Module):
def __init__(self, num_pixels, layers, H_decision, imageSize):
super(RecurrentScaledGrid_PredPrey, self).__init__()
weightMask, diagMask = generateGridWeightMask_PredPrey(imageSize)
self.iteratedLayer_Pred = RepeatedLayersScaledMasked(num_pixels, num_pixels, layers, weightMask, diagMask)
self.iteratedLayer_Prey = RepeatedLayersScaledMasked(num_pixels, num_pixels, layers, weightMask, diagMask)
self.outputLayer1 = nn.Linear(3*num_pixels, H_decision)
self.outputLayer2 = nn.Linear(H_decision, 2)
self.tanh = nn.Tanh()
def forward(self, X, pred, prey, cave, dtype):
pred_range = self.iteratedLayer_Pred(pred, X)
prey_range = self.iteratedLayer_Prey(prey, X)
tags = torch.cat((pred_range, prey_range, cave), dim = 1)
h = self.tanh(self.outputLayer1(tags))
label = self.tanh(self.outputLayer2(h))
return label
# This version was intended to be where the weights are shared for each pixel, i.e. it learns a single w
# This is the version that shares w and b across all pixels
# In it's current form, this code should not work
# Corners and edges should have to learn a different weight than pixels in the center of the image
class RecurrentSharedAll_PredPrey(torch.nn.Module):
def __init__(self, num_pixels, layers, H_decision, imageSize):
super(RecurrentSharedAll_PredPrey, self).__init__()
weightMask, diagMask = generateGridWeightMask_PredPrey(imageSize)
self.iteratedLayer_Pred = RepeatedLayersShared(num_pixels, num_pixels, layers, weightMask, diagMask)
self.iteratedLayer_Prey = RepeatedLayersShared(num_pixels, num_pixels, layers, weightMask, diagMask)
self.outputLayer1 = nn.Linear(3*num_pixels, H_decision)
self.outputLayer2 = nn.Linear(H_decision, H_decision)
self.outputLayer3 = nn.Linear(H_decision, 2)
self.tanh = nn.Tanh()
def forward(self, X, pred, prey, cave, dtype):
pred_range = self.iteratedLayer_Pred(pred, X)
prey_range = self.iteratedLayer_Prey(prey, X)
if (pred_range.dim() == 1):
pred_range.unsqueeze(0)
if (prey_range.dim() == 1):
prey_range.unsqueeze(0)
if (cave.dim() == 1):
cave.unsqueeze(0)
tags = torch.cat((pred_range, prey_range, cave), dim = 1)
h = self.tanh(self.outputLayer1(tags))
h = self.tanh(self.outputLayer2(h))
label = self.tanh(self.outputLayer3(h))
return label
# Forward-engineered version of the network
# Recurrent weights are fixed, decision layer is learned
class RecurrentFixed_PredPrey(torch.nn.Module):
def __init__(self, num_pixels, layers, H_decision, imageSize):
super(RecurrentFixed_PredPrey, self).__init__()
weightMask, diagMask, edgeMask, cornerMask = generateFixedWeightMask_PredPrey(imageSize)
self.iteratedLayer_Pred = RepeatedLayersFixed(num_pixels, num_pixels, layers, weightMask, diagMask, edgeMask, cornerMask)
self.tanh = nn.Tanh()
def forward(self, X, pred, dtype):
pred_range = self.iteratedLayer_Pred(pred, X)
# This block is to fix a problem when only one element is passed in
# The first dimension is normally the batch, but the repeated layers will squeeze out this dimension
# Add it back in before concatenating
# if (pred_range.dim() == 1):
# pred_range.unsqueeze(0)
return pred_range
class RecurrentSharedPixel_PredPrey(torch.nn.Module):
def __init__(self, num_pixels, layers, H_decision, imageSize):
super(RecurrentSharedPixel_PredPrey, self).__init__()
weightMask, diagMask, edgeMask, cornerMask = generateFixedWeightMask_PredPrey(imageSize)
self.iteratedLayer_Pred = RepeatedLayersSharedPixel(num_pixels, num_pixels, layers, weightMask, diagMask, edgeMask, cornerMask)
self.tanh = nn.Tanh()
def forward(self, X, pred, dtype):
pred_range = self.iteratedLayer_Pred(pred, X)
# This block is to fix a problem when only one element is passed in
# The first dimension is normally the batch, but the repeated layers will squeeze out this dimension
# Add it back in before concatenating
# if (pred_range.dim() == 1):
# pred_range.unsqueeze(0)
return pred_range
## Classes used to construct network types ##
# This is the original code from the working masked code for edge-connected pixel task
class RepeatedLayersScaledMasked(torch.nn.Module):
def __init__(self, D_input, hidden, layers, weightMask, diagMask):
super(RepeatedLayersScaledMasked, self).__init__()
self.mask = weightMask
self.diagMask = diagMask
self.invertMask = torch.ones((hidden, hidden)).type(torch.cuda.ByteTensor) - self.mask
self.invertDiag = torch.ones((hidden, hidden)).type(torch.cuda.ByteTensor) - self.diagMask
self.iteration = layers
self.hiddenWeight = nn.Linear(hidden, hidden)
self.inputWeight = nn.Linear(D_input, hidden, bias=False)
self.tanh = nn.Tanh()
self.scalar = nn.Parameter(torch.ones(1)*2, requires_grad=True)
self.hiddenWeight.weight.data[self.invertMask] = 0
#self.hiddenWeight.weight.data[self.mask] = 0.25
self.inputWeight.weight.data[:] = 0
#self.inputWeight.weight.data[self.diagMask] = 1
#self.hiddenWeight.bias.data[:] = -0.15
self.hiddenWeight.weight.register_hook(self.backward_hook)
self.inputWeight.weight.register_hook(self.backward_hook_input)
def forward(self, initial_hidden, input):
u = initial_hidden.clone()
for _ in range(0, self.iteration):
v = self.hiddenWeight(u) + self.inputWeight(input)
u = self.tanh(v * self.scalar.expand_as(v))
#u = torch.sign(u)
return u
def backward_hook(self, grad):
out = grad.clone()
out[self.invertMask] = 0
return out
def backward_hook_input(self, grad):
out = grad.clone()
out[self.invertDiag] = 0
return out
# Class below is modified to deal with batched matrix multiply
# Has one w and b that are shared by all the pixels
class RepeatedLayersSharedAll(torch.nn.Module):
def __init__(self, D_input, hidden, layers, weightMask, diagMask):
super(RepeatedLayersSharedAll, self).__init__()
self.mask = weightMask
self.diagMask = diagMask
self.invertMask = torch.ones((hidden, hidden)).type(torch.cuda.ByteTensor) - self.mask
self.invertDiag = torch.ones((hidden, hidden)).type(torch.cuda.ByteTensor) - self.diagMask
self.iteration = layers
# For this case we want these to remain fixed
self.hiddenWeight = nn.Parameter(torch.ones(hidden, hidden), requires_grad=False)
self.bias = nn.Parameter(torch.ones(hidden, 1), requires_grad=False)
self.inputWeight = nn.Parameter(torch.ones(D_input, hidden), requires_grad=False)
self.tanh = nn.Tanh()
self.scalar = nn.Parameter(torch.ones(1)*2, requires_grad=True)
# Set up the hidden weights
self.w = nn.Parameter(torch.ones(1)*2, requires_grad=True)
self.b = nn.Parameter(torch.ones(1)*2, requires_grad=True)
self.hiddenWeight.data[self.invertMask] = 0
self.hiddenWeight.data[self.mask] = 1
# Set up the input weights
self.a = nn.Parameter(torch.ones(1)*2, requires_grad=True)
self.inputWeight.data[:] = 0
self.inputWeight.data[self.diagMask] = 1
#self.hiddenWeight.bias.data[:] = -0.15
# self.hiddenWeight.weight.register_hook(self.backward_hook)
# self.inputWeight.weight.register_hook(self.backward_hook_input)
def forward(self, initial_hidden, input):
u = initial_hidden.clone()
input_expand = input.unsqueeze(-1)
for _ in range(0, self.iteration):
u_expand = u.unsqueeze(-1)
v_expand = torch.matmul((self.w.expand_as(self.hiddenWeight) * self.hiddenWeight), u_expand) + (self.b * self.bias) + \
torch.matmul((self.a.expand_as(self.inputWeight) * self.inputWeight), input_expand)
v = v_expand.squeeze()
u = self.tanh(v * self.scalar.expand_as(v))
#u = torch.sign(u)
return u
# This code has one weight learned by each pixel
class RepeatedLayersSharedPixel(torch.nn.Module):
def __init__(self, D_input, hidden, layers, weightMask, diagMask, edgeMask, cornerMask):
super(RepeatedLayersSharedPixel, self).__init__()
self.mask = weightMask
self.diagMask = diagMask
self.invertMask = torch.ones((hidden, hidden)).type(torch.ByteTensor) - self.mask
self.invertDiag = torch.ones((hidden, hidden)).type(torch.ByteTensor) - self.diagMask
self.iteration = layers
# For this case we want these to remain fixed
self.hiddenWeight = nn.Parameter(torch.randn(hidden, hidden), requires_grad=False)
self.bias = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.inputWeight = nn.Parameter(torch.randn(D_input, hidden), requires_grad=False)
self.tanh = nn.Tanh()
self.scalar = nn.Parameter(torch.ones(1)*2, requires_grad=True)
# Set up the hidden weights
self.w = nn.Parameter(torch.randn(hidden, 1), requires_grad=True)
#self.w.data[:] = 0.25
#self.w.data[edgeMask] = 0.34
#self.w.data[cornerMask] = 0.5
self.hiddenWeight.data[self.invertMask] = 0
self.hiddenWeight.data[self.mask] = 1
# Set up the input weights
self.a = nn.Parameter(torch.randn(hidden, 1), requires_grad=True)
self.inputWeight.data[:] = 0
self.inputWeight.data[self.diagMask] = 1
#self.bias.data[:] = -0.15
# self.hiddenWeight.weight.register_hook(self.backward_hook)
# self.inputWeight.weight.register_hook(self.backward_hook_input)
def forward(self, initial_hidden, input):
u = initial_hidden.clone()
u_fix = initial_hidden.clone()
u_fix[u_fix==-1]=0
u_fix_expand = u_fix.unsqueeze(-1)
input_expand = input.unsqueeze(-1)
for _ in range(0, self.iteration):
u_expand = u.unsqueeze(-1)
v_expand = u_fix_expand + torch.matmul((self.w.expand_as(self.hiddenWeight) * self.hiddenWeight), u_expand) + (self.bias) + \
torch.matmul((self.a.expand_as(self.inputWeight) * self.inputWeight), input_expand)
v = v_expand.squeeze()
u = self.tanh(v * self.scalar.expand_as(v))
#u = torch.sign(u)
return u
# Code for the forward-engineered version of the network
# Nothing in the below is trainable, specify the weights for the propagation
class RepeatedLayersFixed(torch.nn.Module):
def __init__(self, D_input, hidden, layers, weightMask, diagMask, edgeMask, cornerMask):
super(RepeatedLayersFixed, self).__init__()
self.mask = weightMask
self.diagMask = diagMask
self.invertMask = torch.ones((hidden, hidden)).type(torch.ByteTensor) - self.mask
self.invertDiag = torch.ones((hidden, hidden)).type(torch.ByteTensor) - self.diagMask
self.iteration = layers
# For this case we want these to remain fixed
self.hiddenWeight = nn.Parameter(torch.ones(hidden, hidden), requires_grad=False)
self.bias = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.inputWeight = nn.Parameter(torch.ones(D_input, hidden), requires_grad=False)
self.tanh = nn.Tanh()
self.scalar = nn.Parameter(torch.ones(1)*20, requires_grad=True)
# Set up the hidden weights
self.w = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.w.data[:] = 0.25
self.w.data[edgeMask] = 0.34
self.w.data[cornerMask] = 0.5
self.hiddenWeight.data[self.invertMask] = 0
self.hiddenWeight.data[self.mask] = 1
# Set up the input weights
self.a = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.inputWeight.data[:] = 0
self.inputWeight.data[self.diagMask] = 1
self.bias.data[:] = -0.15
#self.hiddenWeight.bias.data[:] = -0.15
# self.hiddenWeight.weight.register_hook(self.backward_hook)
# self.inputWeight.weight.register_hook(self.backward_hook_input)
def forward(self, initial_hidden, input):
u = initial_hidden.clone()
u_fix = initial_hidden.clone()
u_fix[u_fix==-1]=0
u_fix_expand = u_fix.unsqueeze(-1)
input_expand = input.unsqueeze(-1)
for _ in range(0, self.iteration):
u_expand = u.unsqueeze(-1)
v_expand = u_fix_expand + torch.matmul((self.w.expand_as(self.hiddenWeight) * self.hiddenWeight), u_expand) + (self.bias) + \
torch.matmul((self.a.expand_as(self.inputWeight) * self.inputWeight), input_expand)
v = v_expand.squeeze()
u = self.tanh(v * self.scalar.expand_as(v))
#u = torch.sign(u)
return u
class FixedPropagationDecision(torch.nn.Module):
def __init__(self, D_input, hidden, layers, weightMask, diagMask, edgeMask, cornerMask):
super(FixedPropagationDecision, self).__init__()
# We have two propagation layers to set up
# 1 will be the prey
# 2 will be the predator
self.mask = weightMask
self.diagMask = diagMask
self.invertMask = torch.ones((hidden, hidden)).type(torch.ByteTensor) - self.mask
self.invertDiag = torch.ones((hidden, hidden)).type(torch.ByteTensor) - self.diagMask
self.iteration = layers
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
# These are the prey parameters
# For this case we want these to remain fixed
self.hiddenWeight1 = nn.Parameter(torch.ones(hidden, hidden), requires_grad=False)
self.bias1 = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.inputWeight1 = nn.Parameter(torch.ones(D_input, hidden), requires_grad=False)
self.scalar1 = nn.Parameter(torch.ones(1)*20, requires_grad=True)
# Set up the hidden weights
self.w1 = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.w1.data[:] = 0.25
self.w1.data[edgeMask] = 0.34
self.w1.data[cornerMask] = 0.5
self.hiddenWeight1.data[self.invertMask] = 0
self.hiddenWeight1.data[self.mask] = 1
# Set up the input weights
self.a1 = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.inputWeight1.data[:] = 0
self.inputWeight1.data[self.diagMask] = 1
self.bias1.data[:] = -0.15
# These are the predator parameters
self.hiddenWeight2 = nn.Parameter(torch.ones(hidden, hidden), requires_grad=False)
self.bias2 = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.inputWeight2 = nn.Parameter(torch.ones(D_input, hidden), requires_grad=False)
self.scalar2 = nn.Parameter(torch.ones(1)*20, requires_grad=True)
# Set up the hidden weights
self.w2 = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.w2.data[:] = 0.25
self.w2.data[edgeMask] = 0.34
self.w2.data[cornerMask] = 0.5
self.hiddenWeight2.data[self.invertMask] = 0
self.hiddenWeight2.data[self.mask] = 1
# Set up the input weights
self.a2 = nn.Parameter(torch.ones(hidden, 1), requires_grad=True)
self.inputWeight2.data[:] = 0
self.inputWeight2.data[self.diagMask] = 1
self.bias2.data[:] = -0.15
def forward(self, initial_hidden1, initial_hidden2, input1, input2, cave):
u1 = initial_hidden1.clone()
u1_fix = initial_hidden1.clone()
u1_fix[u1_fix==-1]=0
u1_fix_expand = u1_fix.unsqueeze(-1)
input1_expand = input1.unsqueeze(-1)
u2 = initial_hidden2.clone()
u2_fix = initial_hidden2.clone()
u2_fix[u2_fix==-1]=0
u2_fix_expand = u2_fix.unsqueeze(-1)
input2_expand = input2.unsqueeze(-1)
batch = list(u1.size())
batch = batch[0]
decision = torch.zeros(batch, 2)
cave[cave==-1] = 0
idx = torch.nonzero(cave)
for _ in range(0, self.iteration):
u1_expand = u1.unsqueeze(-1)
v1_expand = u1_fix_expand + torch.matmul((self.w1.expand_as(self.hiddenWeight1) * self.hiddenWeight1), u1_expand) + (self.bias1) + \
torch.matmul((self.a1.expand_as(self.inputWeight1) * self.inputWeight1), input1_expand)
v1 = v1_expand.squeeze()
u1 = self.tanh(v1 * self.scalar1.expand_as(v1))
#u = torch.sign(u)
u2_expand = u2.unsqueeze(-1)
v2_expand = u2_fix_expand + torch.matmul((self.w2.expand_as(self.hiddenWeight2) * self.hiddenWeight2), u2_expand) + (self.bias2) + \
torch.matmul((self.a2.expand_as(self.inputWeight2) * self.inputWeight2), input2_expand)
v2 = v2_expand.squeeze()
u2 = self.tanh(v2 * self.scalar2.expand_as(v2))
decision[:, 0] = self.sigmoid((-100.0*decision[:, 1] + 20*torch.sum(cave*u2, dim=1)))
decision[:, 1] = self.sigmoid((-100.0*decision[:, 0] + 20*torch.sum(cave*u1, dim=1)))
#print(torch.sum(cave*u1))
# print(u2[idx])
# print(u1[idx])
# print(decision)
return u1, u2, decision
|
__author__ = 'Kadri'
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
from random import randint
#rea numbri määramine
def rea_numbrite_leidmine():
global rea_numbrid
#teen eelneva list tühjaks
del rea_numbrid[:]
while True:
rea_number = randint(1,3)
if rea_number not in rea_numbrid:
rea_numbrid.append(rea_number)
if len(rea_numbrid)==3:
return rea_numbrid
def esimene_vastusevariant():
global ah_number
while True:
esimene_number = randint(0, len(aminohapped)-1)
if esimene_number != ah_number:
esimene_valik = aminohapped[esimene_number]
return esimene_valik, esimene_number
def teine_vastusevariant():
global ah_number
global esimene_number
while True:
teine_number = randint(0, len(aminohapped)-1)
if teine_number != ah_number and teine_number != esimene_number:
teine_valik = aminohapped[teine_number]
return teine_valik
def vastuse_kontroll():
global v
global ah_nimi
vastus = False
if v.get() == ah_nimi:
vastus = True
return vastus
def vaheta_valikvastused():
global ah_nimi
global esimene_valik
global esimene_number
global teine_valik
global teine_number
global nupp_1
global nupp_2
global nupp_3
global v
global rea_numbrid
v = StringVar()
esimene_valik, esimene_number = esimene_vastusevariant()
teine_valik = teine_vastusevariant()
#kustutab eelnevad vastusevariandid ära
nupp_1.destroy()
nupp_2.destroy()
nupp_3.destroy()
rea_numbrid = rea_numbrite_leidmine()
#lisab uued vastusevariandid
nupp_1 = Radiobutton(raam, text=esimene_valik, variable = v, value=esimene_valik)
nupp_1.grid(row=rea_numbrid[0], sticky=(N))
nupp_2 = Radiobutton(raam, text=teine_valik, variable = v, value=teine_valik)
nupp_2.grid(row=rea_numbrid[1], sticky=(N))
nupp_3 = Radiobutton(raam, text=ah_nimi, variable = v, value=ah_nimi)
nupp_3.grid(row=rea_numbrid[2], sticky=(N))
def vaheta_aminohape():
global aminohape
global ah_number
global ah_nimi
global valitud_aminohapped
while True:
ah_number = randint(0, len(aminohapped)-1)
ah_nimi = aminohapped[ah_number]
if ah_nimi not in valitud_aminohapped:
break
valitud_aminohapped.append(ah_nimi)
print(ah_nimi)
print(ah_number)
print(valitud_aminohapped)
normaal_suurus = Image.open("pildid/"+ah_nimi+".png")
muudetud = normaal_suurus.resize((200,150),Image.ANTIALIAS)
aminohape = ImageTk.PhotoImage(muudetud)
tahvel.itemconfigure(pilt, image=aminohape)
vaheta_valikvastused()
def salvesta_vastus():
global nupp_1
global nupp_2
global nupp_3
tulemus = vastuse_kontroll()
if len(tulemused)<=9:
if tulemus == False:
tulemused.append(0)
else:
tulemused.append(1)
vaheta_aminohape()
if len(tulemused)==10:
tahvel.delete("all")
nupp.place_forget()
nupp_1.destroy()
nupp_2.destroy()
nupp_3.destroy()
silt3 = Label(raam, background="white", text="Mäng on läbi!")
silt3.place(x=160, y=80)
silt2 = Label(raam, background="white", text="Sinu skoor on "+ \
str(tulemused.count(1))+"/"+str(tulemused.count(0)+tulemused.count(1)))
silt2.place(x=150, y=100)
print(tulemused)
raam = Tk()
raam.title("Aminohapete mäng")
tahvel = Canvas(raam, width= 400, height= 200, background = "white")
tahvel.grid()
aminohapped = ["alaniin", "arginiin", "asparagiin", "asparagiinhape", "tsüsteiin", "glutamiin", "glutamiinhape", "glütsiin",
"histidiin", "isoleutsiin", "leutsiin", "lüsiin", "metioniin", "fenüülalaniin", "proliin", "seriin",
"treoniin", "trüptofaan", "türosiin", "valiin", "selenotsüsteiin", "pürrolüsiin"]
tulemused = []
rea_numbrid = []
valitud_aminohapped = []
#Pildi kuvamine
ah_number = randint(0, len(aminohapped)-1)
ah_nimi = aminohapped[ah_number]
valitud_aminohapped.append(ah_nimi)
print(ah_nimi)
print(ah_number)
print(valitud_aminohapped)
normaal_suurus = Image.open("pildid/"+ah_nimi+".png")
muudetud = normaal_suurus.resize((200,150),Image.ANTIALIAS)
aminohape = ImageTk.PhotoImage(muudetud)
pilt = tahvel.create_image(100, 100, anchor = W, image= aminohape)
#Kahe aminohappe määramine valikvastuseks
v = StringVar()
esimene_valik, esimene_number = esimene_vastusevariant()
teine_valik = teine_vastusevariant()
rea_numbrid = rea_numbrite_leidmine()
#Valikvastused
nupp_1 = Radiobutton(raam, text=esimene_valik, variable = v, value=esimene_valik)
nupp_1.grid(row=rea_numbrid[0], sticky=(N))
nupp_2 = Radiobutton(raam, text=teine_valik, variable = v, value=teine_valik)
nupp_2.grid(row=rea_numbrid[1], sticky=(N))
nupp_3 = Radiobutton(raam, text=ah_nimi, variable = v, value=ah_nimi)
nupp_3.grid(row=rea_numbrid[2], sticky=(N))
nupp = ttk.Button(raam, text="Edasi", command = salvesta_vastus)
nupp.place(x=270, y=220, width=100)
raam.mainloop() |
# http://learnpythonthehardway.org/book/ex47.html
# http://seminar.io/2013/09/27/testing-your-rest-client-in-python/
from nose.tools import *
from flask import Flask, jsonify, request
from bot.core import Message, Plugin
def test_message():
msg = Message('')
assert_equal(msg.team_id, "")
assert_equal(msg.channel_id, "")
assert_equal(msg.channel_name, "")
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import shutil
import logging
import unittest
import time
from TestUtils import TestUtilsMixin, ACCUMULO_HOME
log = logging.getLogger('test.auto')
class CombinerTest(TestUtilsMixin, unittest.TestCase):
"Start a clean accumulo, use a Combiner, verify the data is aggregated"
order = 25
def checkSum(self):
# check the scan
out, err, code = self.shell(self.masterHost(),"table test\nscan\n")
self.assert_(code == 0)
for line in out.split('\n'):
if line.find('row1') == 0:
self.assert_(int(line.split()[-1]) == sum(range(10)))
break
else:
self.fail("Unable to find needed output in %r" % out)
def runTest(self):
# initialize the database
out, err, code = self.rootShell(self.masterHost(),"createtable test\n"
"setiter -t test -scan -p 10 -n mycombiner -class org.apache.accumulo.core.iterators.user.SummingCombiner\n"
"\n"
"cf\n"
"\n"
"STRING\n"
"deleteiter -t test -n vers -minc -majc -scan\n")
self.assert_(code == 0)
# insert some rows
log.info("Starting Test Ingester")
cmd = 'table test\n';
for i in range(10):
cmd += 'insert row1 cf col1 %d\n' % i
out, err, code = self.rootShell(self.masterHost(), cmd)
self.assert_(code == 0)
self.checkSum()
out, err, code = self.rootShell(self.masterHost(), "flush -t test -w\n")
self.assert_(code == 0)
self.checkSum()
self.shutdown_accumulo()
self.start_accumulo()
self.checkSum()
jarPath = ACCUMULO_HOME+"/lib/ext/TestCombiner.jar"
class ClassLoaderTest(TestUtilsMixin, unittest.TestCase):
"Start a clean accumulo, ingest one data, read it, set a combiner, read it again, change the combiner jar, read it again"
order = 26
def setUp(self):
# make sure the combiner is not there
if os.path.exists(jarPath):
os.remove(jarPath)
TestUtilsMixin.setUp(self)
def checkSum(self, val):
# check the scan
out, err, code = self.shell(self.masterHost(), "table test\nscan\n")
self.assert_(code == 0)
for line in out.split('\n'):
if line.find('row1') == 0:
self.assert_(line.split()[-1] == val)
break
else:
self.fail("Unable to find needed output in %r" % out)
def runTest(self):
# initialize the database
out, err, code = self.rootShell(self.masterHost(), "createtable test\n")
self.assert_(code == 0)
# insert some rows
log.info("Starting Test Ingester")
out, err, code = self.rootShell(self.masterHost(), "table test\ninsert row1 cf col1 Test\n")
self.assert_(code == 0)
self.checkSum("Test")
log.info("creating jar file")
shutil.copy(sys.path[0]+"/TestCombinerX.jar", jarPath)
time.sleep(1)
out, err, code = self.rootShell(self.masterHost(), "setiter -t test -scan -p 10 -n TestCombiner -class org.apache.accumulo.test.functional.TestCombiner\n"
"\n"
"cf\n")
self.assert_(code == 0)
self.checkSum("TestX")
shutil.copy(sys.path[0]+"/TestCombinerY.jar", jarPath)
time.sleep(1)
self.checkSum("TestY")
os.remove(jarPath)
def suite():
result = unittest.TestSuite()
result.addTest(CombinerTest())
result.addTest(ClassLoaderTest())
return result
|
from aws_cdk import (
core
)
from network_topology import NetworkTopology
from webservers import WebServers
from apiservers import APIServers
from database import Database
from load_balancer import LoadBalancer
from security_groups import SecurityGroups
class ThreeTierWebStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, config: dict, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
topology = NetworkTopology(
self, 'VPC'
)
security_groups = SecurityGroups(
self, 'SecurityGroups',
topology.vpc
)
webservers = WebServers(
self, 'WebServers',
vpc=topology.vpc,
security_group=security_groups.webserver_security_group,
instance_type=config["webservers_instance_type"],
)
apiservers = APIServers(
self, 'APIServers',
vpc=topology.vpc,
security_group=security_groups.apiserver_security_group,
instance_type=config["apiservers_instance_type"]
)
load_balancer = LoadBalancer(
self, 'LoadBalancer',
vpc=topology.vpc,
security_group=security_groups.alb_security_group,
instances=webservers.instances
)
database = Database(
self, 'Database',
vpc=topology.vpc,
security_group=security_groups.database_security_group,
master_username=config["rds_master_username"],
master_password=config["rds_master_password"],
database_name=config["rds_db_name"]
)
|
import numpy as np
fpath = 'input.txt'
contents = open(fpath,'r').read().splitlines()
for i, coord in enumerate(contents):
contents[i] = list(map(int, coord.split(', ')))
licoords = contents
ymax = max(licoords, key=lambda x: x[1])[1]
xmax = max(licoords, key=lambda x: x[0])[0]
field = np.zeros((xmax, ymax)).astype(int)
maxdist = 10000
def finddistance(x, y, licoords):
distance = 0
for i, coords in enumerate(licoords):
distance += abs(coords[0] - x) + abs(coords[1] - y)
return distance
for x, xc in enumerate(field):
for y, yc in enumerate(xc):
if finddistance(x, y, licoords) < maxdist:
field[x, y] = 1
print(np.sum(field)) |
# Copyright 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# These gyp files create the following dependencies:
#
# test.gyp:
# #a -> b
# a.c
# #b
# b.c
# a and b are static libraries.
{
'targets': [
{
'target_name': 'a',
'type': 'static_library',
'sources': [
'a.c',
],
'dependencies': [
'b',
],
},
{
'target_name': 'b',
'type': 'static_library',
'sources': [
'b.c',
],
},
],
}
|
print("-----------------------------------------------------------------------------")
print("EMERGING TECHNOLOGIES - LABORATORY ACTIVITY 2")
print("-----------------------------------------------------------------------------")
print("")
prelims = float(input("Input your prelim grades: "))
midterm = float(input("Input your midterm grades: "))
semi = float(input("Input your semi-final grades: "))
final = float(input("Input your final grades: "))
avg = (prelims + midterm + semi + final) / 4
print(" ")
print("Your total average is {}" .format (avg)) |
from aip import AipOcr
import threading, time
import SocketServer
import base64
import struct
import json
# import pymongo
import requests
from pymongo import MongoClient
from ctpnport import *
from newcrnnport import *
import numpy as np
import models.position_helper as POS
import ctypes
import inspect
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
_async_raise(thread.ident, SystemExit)
# MyThread.py
class MyThread(threading.Thread):
def __init__(self, func, args=()):
super(MyThread, self).__init__()
self.func = func
self.args = args
def run(self):
time.sleep(2)
self.result = self.func(*self.args)
def get_result(self):
threading.Thread.join(self) # wait finished
try:
return self.result
except Exception:
return None
# do the recognization
def api_run(im_raw):
image = im_raw
""" """
client.basicAccurate(image);
""" optional params """
options = {}
options["detect_direction"] = "false"
options["probability"] = "true"
""" do the request """
result = client.basicAccurate(image, options)
sentence_list = []
for temp in result['words_result']:
start = 0
for word in temp['words']:
if word == ' ':
start += 1
else:
break
sentence_list.append(temp['words'][start:])
return sentence_list
# build the html text
def get_font(res):
size = str(res.tag_X + 4)
return "<font size="+size+"> "
# start handle the image
def handle_img(img_raw):
task = MyThread(api_run, (img_raw,))
task.start()
print("start api..")
img_array = np.fromstring(img_raw,np.uint8)
img_cv = cv2.imdecode(img_array,cv2.COLOR_BGR2RGB)
print("start image ctpn..")
img,text_recs = getCharBlock(text_detector,img_cv)
print("start image crnn..")
att = crnnRec(model,converter,img,text_recs)
sentence_list = task.get_result()
print(sentence_list)
for i in range(att.__len__()):
print(att[i].pred)
att[i].pred = sentence_list[i]
att[i].W = att[i].width / sentence_list[i].__len__()
att_ex = POS.PositionHelper().__sort__(att)
for i in range(att_ex.__len__()):
print(get_font(att_ex[i])+ att_ex[i].pred + " </font>")
# stop_thread(task)
class Myserver(SocketServer.BaseRequestHandler):
def handle(self):
conn = self.request
#conn.sendall(bytes("Start Analysis....",encoding="utf-8"))
conn.sendall("Start Analysis....")
total_data = ""
ret_bytes = conn.recv(4096)
if ret_bytes:
if(len(ret_bytes) > 8):
#TODO: check if the params are valid!
# check the size , need a try.
size_user = struct.unpack('i', ret_bytes[0:4])[0]
size_img = struct.unpack('i', ret_bytes[4:8])[0]
#TODO: check if the img is too large
if size_img > 4096 * 1024:
conn.sendall("img is too large!!!")
return
total_data = ret_bytes
checked = False
# receive all the data
while len(total_data) < size_user + size_img + 8:
ret_bytes = conn.recv(4096)
total_data += ret_bytes
if not ret_bytes:
break
# check the user info
if len(total_data) > size_user + 8 and not checked:
try:
raw_user_info = total_data[8:8+size_user]
#user_info = json.loads(raw_user_info.decode("utf-8"))
user_info = json.loads(raw_user_info)
# TODO: check if user is valid
try:
r = requests.post("http://192.168.17.131/login", user_info)
if (r.url == "http://192.168.17.131/campgrounds"):
conn.sendall("Start Analysis..")
else:
conn.sendall("permission denied")
return
except Exception as e:
print(e)
raise e
return
checked = True
except Exception as e:
#conn.sendall(bytes("error json object",encoding="utf-8"))
conn.sendall("error json object")
raise e
return
# check the image info
try:
print("start image process..")
raw_img = total_data[8+size_user:]
img = base64.b64decode(raw_img)
handle_img(img)
#conn.sendall(bytes("finished!!!",encoding="utf-8"))
conn.sendall("finished!!!")
# TODO: image analysis
except Exception as e:
#conn.sendall(bytes("error image!",encoding="utf-8"))
conn.sendall("error image!")
raise e
else:
#conn.sendall(bytes("error params!",encoding="utf-8"))
conn.sendall("error params!")
if __name__ == "__main__":
# mongodb = MongoClient('localhost',27017)
# db = mongodb.test
# users = db.users
""" APPID AK SK """
APP_ID = '11765015'
API_KEY = 'aX3L3UzaL2GTxBHDyCZD4rG6S'
SECRET_KEY = 'kjISzZhXMeLOgnEYB62vdO4gzKvAOgH7'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
#ctpn
text_detector = ctpnSource()
#crnn
model,converter = crnnSource()
timer=Timer()
print("initialize finished...")
print("Start listening..")
server = SocketServer.ThreadingTCPServer(("192.168.17.131",50007),Myserver)
server.serve_forever()
|
class Calculator:
def __init__(self):
pass
def calculate(f):
while True:
print('\n===== CALCULATOR =====\n')
print(' 1 -- Addition')
print(' 2 -- Subtraction')
print(' 3 -- Multiplication')
print(' 4 -- Division')
print(' 5 -- Percentage')
print(' 6 -- Exponentiation')
print(' 7 -- Floor Division')
print(' 8 -- Exit\n')
x = int(input('Choice an option: '))
if x == 8:
print('Byee...'); exit()
elif x not in range(1,8):
print('Invalid Option')
else:
a = float(input('1° Number: '))
b = float(input('2° Number: '))
if x==1:
print('Result:',a+b)
elif x==2:
print('Result:',a-b)
elif x==3:
print('Result:',a*b)
elif x==4:
print('Result:',a/b)
elif x==5:
print('Result:',int(a*b)/100)
elif x==6:
print('Result:',a**b)
elif x==7:
print('Result:',a//b)
x = Calculator()
x.calculate()
|
# Arithmetic operators
# a = 2 + 1
# b = 2 - 1
# c = 2 / 1
# d = 2 * 1
# e = 2 ** 3
# f = 2 // 3
# g = 2 % 3
# Assignment operators
# h = 1
# Incrementation
# i = 20
# i += 2
# i *= 2
# j = 2
# k = 2
# # or
# Assign the value in 1 line
# j,k = 2,2
# Unary operators
# nagation concept
# l = 8
# -l
# print(-l)
# l = -l
#
# print(l)
# Relation operators
# ab = 5
# ba = 10
# print(ab < ba)
# print(ab <= ba)
# print(ab >= ba)
# print(ab > ba)
# print(ab == ba)
# print(ab != ba)
# print(ba != ab)
# Logical operators
# and , or , not
# True / fales
# and
m1 = True and True #= True
m2 = True and False #= False
m3 = False and True #= False
m4 = False and False #= False
# or
m5 = True or True #= True
m6 = True or False #= True
m7 = False or True #= True
m8 = False or False #= False
print(m1)
print(m2)
print(m3)
print(m4)
print(m5)
print(m6)
print(m7)
print(m8) |
from enum import Enum
class EffectType(Enum):
physical = 1
magical = 2
healing = 3
|
from django import forms
from .models import Product
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = ('title', 'description', 'price', 'thumbnail_1', 'thumbnail_2', 'thumbnail_3', 'categories', 'featured', 'show_to_friends') |
if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
import os
import argparse
from utct.MXNet.converter import Converter
def parse_args():
parser = argparse.ArgumentParser(
description='Export MXNet model parameters to h5 file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--checkpoint-dir',
dest='checkpoint_dir',
help='Destination directory with checkpoint files',
required=True,
type=str)
parser.add_argument(
'--prefix',
dest='prefix',
help='Prefix for MXNet checkpoint files',
default='mnist',
type=str)
parser.add_argument(
'--epoch',
dest='epoch',
help='Epoch for MXNet checkpoint files',
default=1,
type=int)
parser.add_argument(
'--output',
dest='dst_filepath',
help='Output file for MXNet model parameters',
required=True,
type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
Converter.export_to_h5(
checkpoint_path=os.path.join(args.checkpoint_dir, args.prefix),
checkpoint_epoch=args.epoch,
dst_filepath=args.dst_filepath)
if __name__ == '__main__':
main()
|
"""Version module for repo."""
__version__ = "8.4.2"
def version():
"""Return current version number."""
return __version__
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from .cmu_search import CmuFullImageSearch, CmuImageBackgroundSearch
from .settings import CmuSetting
def load(info):
cmuSetting = CmuSetting()
for setting in cmuSetting.requiredSettings:
cmuSetting.get(setting)
info['apiRoot'].cmu_imagebackgroundsearch = CmuImageBackgroundSearch()
info['apiRoot'].cmu_fullimagesearch = CmuFullImageSearch()
|
import socket, sys
t_host = "127.0.0.1"
t_port = 80
if t_host is not None:
# Crea un objeto socket que use una dirección o hostname IPv4 (IF_INET) usando UDP (SOCK_DGRAM)
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#client.connect((t_host, t_port)) --> UDP is CONNECTIONLESS, dude!
# Envía datos
req = str.encode("AABBCC")
client.sendto(req, (t_host, t_port))
resp, addr = client.recvfrom(1024)
print(resp.decode(errors="ignore"))
|
'''
integrantes:
Leonardo Daneu Lopes (8516816)
Lucas Sung Jun Hong (8124329)
'''
from parser import CTLtree
#from subprocess import call
from pyeda.inter import *
import os
import re
import string
#------------functions--------------
# Pega uma string '[ai, .. ,aj], [bx, .. ,by], ...'
# devolve uma lista de strings: lista[0] = [ai, .. ,aj]
def separaParenteses_em_grupos (str_tmp):
return ( re.findall("\((.*?)\)", str_tmp) )
# devolve uma lista com todos os inteiros envolvidos em um string
def array_all_int (str):
str_tmp = re.sub(r'\D', " ", str)
high_Xi = [int(s) for s in str_tmp.split() if s.isdigit()]
return (high_Xi)
# recebe um array de inteiro
# e devolve o array com sinais
def array_muda_sinal (array_tmp, str):
i = 0; guarda_sinal = False
for c in str:
if (c == "-" or c == "~"):
guarda_sinal = True
if c.isdigit():
if guarda_sinal == True:
array_tmp[i] = -array_tmp[i]
i = i + 1
guarda_sinal = False
return array_tmp
# Standard valor no dicionario
def default_ddt_value (rotulos, numeroEstados):
# criando uma conjuncao com todas as proposicoes envolvidas
high_Xi = int ( max(array_all_int (rotulos)) ) + 1 # maior proposicao
str_all_value = "~X[1]"
for i in range( 2, high_Xi ):
str_all_value = str_all_value + " & " + "~" + "X[" + str(i) + "]"
# Todos os valores do dicionario contem conjuncao do tipo PI( ~x_i )
# com i = 0..N-1
str_tmp = "" # limpando str_tmp
str_tmp = [i for i in range( numeroEstados) ]
ddt = {el:str_all_value for el in str_tmp}
return (ddt)
# Atualizacao do dicionario
def update_ddt_value (rotulos, numeroEstados, ddt):
# guarda todos os estados em uma lista
arrayStr_listaEstados = separaParenteses_em_grupos (rotulos)
for i in range(numeroEstados):
array_tmp = ""
# Lemos um rotulo por vez
array_tmp = array_all_int ( arrayStr_listaEstados[i] )
# Se estado/rotulo nao for vazio, continue:
if len(array_tmp) != 0:
# verificamos cada proposicao
for j in range( len(array_tmp) ):
str_tmp = "" # limpamos str_tmp
str_tmp_new = "X[" + str( array_tmp[j] ) + "]"
str_tmp = "~" + str_tmp_new
# atualizamos o value do key[i]
str_tmp = ddt[i].replace(str_tmp, str_tmp_new)
ddt[i] = str_tmp
return (ddt)
# Converte X para Y (ou X')
def convert_X_to_Xprime (str_tmp):
str_tmp = str_tmp.replace("X", "Y")
return str_tmp
# write Bs
def write_B_s (ddt):
b_s = ""
for i in range(len(ddt)):
# atualiza b_s
b_s = b_s + " | " + ddt[i]
b_s = b_s[3:] # apagando 3 primeiros char
return (b_s)
# Criamos BDD do B->
def write_B_arrow (kripke, ddt):
kripke = ( separaParenteses_em_grupos (kripke) )
b_arrow = ""
for i in range( len(kripke) ):
# lista temporaria contem os pares kripkes
lst_tmp = array_all_int (kripke[i])
# Do par(f,g), fazemos str_tmp = X[i] & X'[j]
str_tmp = ( ddt[ lst_tmp[0] ] +
" & " +
convert_X_to_Xprime ( ddt[ lst_tmp[1] ] ) )
#guarda_conjunto_b_arrow
# atualiza b_arrow
b_arrow = b_arrow + " | " + str_tmp
b_arrow = b_arrow[3:] # apagando 3 primeiros char
return (b_arrow)
# construindo agora o Bx', que devolve
# uma lista de todos os estados envolvidos
def write_array_B_prime (ddt, modeloPhi):
array_b_prime = [0] * len(ddt)
if (modeloPhi == "0" or modeloPhi == "1"):
return modeloPhi
j = 0; b_prime = ""; guarda_operador = ""; guarda_proposicao = False;
for c in modeloPhi:
if (c == "+" or c == "*"):
guarda_operador = c
modeloPhi_array = array_muda_sinal ( array_all_int (modeloPhi) , modeloPhi)
# percorre dicionario e procura todas as proposicoes
# envolvidas no modelo
for i in range(numeroEstados):
array_tmp = array_muda_sinal ( (array_all_int (ddt[i])) , ddt[i])
if guarda_operador != "":
if (guarda_operador == "*"):
for s in range( len(modeloPhi_array) ):
hold = False
if modeloPhi_array[s] in array_tmp:
hold = True
else: break;
if (guarda_operador == "+"):
for s in range( len(modeloPhi_array) ):
hold = False
if modeloPhi_array[s] in array_tmp:
hold = True; break;
else:
hold = False
if modeloPhi_array[0] in array_tmp:
hold = True
if hold == True:
array_b_prime[j] = convert_X_to_Xprime (ddt[i])
j = j + 1
new_array_b_prime = [0] * j
for i in range(j):
new_array_b_prime[i] = array_b_prime[i]
return new_array_b_prime
# Calcula S - X (recebe X como parametro)
def calcula_S_minus_X (ddt, modeloPhi):
s_minus_x_OLD = [0] * (2 * len(ddt)); s = 0;
array_b_prime = write_array_B_prime (ddt, modeloPhi)
for i in range( len(ddt) ):
for j in range( len(array_b_prime) ):
if convert_X_to_Xprime (ddt[i]) != array_b_prime[j]:
hold = True
else:
hold = False; break;
if hold == True:
s_minus_x_OLD[s] = convert_X_to_Xprime (ddt[i])
s = s + 1
s_minus_x = [0] * s
for i in range(s):
s_minus_x[i] = s_minus_x_OLD[i]
return (s_minus_x)
# Calculo da pre imagem fraca
def Pre_fraca (kripke, ddt, array_b_prime):
kripke = ( separaParenteses_em_grupos (kripke) )
len_kripke = len(kripke)
lst1 = [0] * len_kripke
lst2 = [0] * len_kripke
b_arrow = ""
#array_b_prime = write_array_B_prime (ddt, modeloPhi)
for i in range( len_kripke ):
# lista temporaria contem os pares kripkes
lst_tmp = array_all_int (kripke[i])
# Do par(f,g), fazemos str_tmp = X[i] & X'[j]
lst1[i] = ddt[ lst_tmp[0] ]
lst2[i] = convert_X_to_Xprime ( ddt[ lst_tmp[1] ] )
# fazendo a conta
for i in range( len_kripke ):
lado1 = expr2bdd( expr(lst2[i]) )
for j in range( len(array_b_prime) ):
lado2 = expr2bdd( expr(array_b_prime[j]) )
str_tmp = ""
f = (lado1) & (lado2)
if (f.is_zero() != True):
str_tmp = lst1[i]
b_arrow = b_arrow + " | " + str_tmp
#print (str_tmp)
b_arrow = b_arrow[3:] # apagando 3 primeiros char
b_arrow = expr2bdd(expr(b_arrow))
return (b_arrow)
def Pre_forte (b_s, kripke, ddt, modeloPhi):
array_b_prime = calcula_S_minus_X (ddt, modeloPhi)
b_result = b_s + " | ~(" + Pre_fraca (kripke, ddt, array_b_prime) + ")"
return (b_result)
#-----------ALGORITMOS SAT----------------------------------------------
def SAT(phi, S):
if (phi.kind == "1"):
return S
if (phi.kind == "0"):
return expr2bdd(expr("None"))
if (phi.childs == None):
return (expr2bdd(expr(phi.kind)) & S.restrict({bddvar(phi.kind): 1}))
if (phi.kind == "-"):
#return (expr2bdd(expr(phi.childs[0])) & S.restrict({phi.childs[0]: 0}))
return(S | ~SAT(phi.childs[0], S))
if (phi.kind == "+" and phi.childs[0] != None and phi.childs[1] != None):
X = SAT(phi.childs[0], S)
Y = SAT(phi.childs[1], S)
if (X != None and Y != None):
return(X | Y)
else:
return expr2bdd(expr("None"))
if (phi.kind == "*" and phi.childs[0] != None and phi.childs[1] != None):
X = SAT(phi.childs[0], S)
Y = SAT(phi.childs[1], S)
if (X != None and Y != None):
return(X & Y)
else:
return expr2bdd(expr("None"))
if (phi.kind == "AX"):
return(SAT(CTLtree.parse("- EX -" + str(phi.childs[0])), S))
if (phi.kind == "AU"):
string = "+-(EU -" + str(phi.childs[0]) + ")(*(-" + str(phi.childs[0]) + ")(-" + str(phi.childs[1]) + "))(EG -" + str(phi.childs[1]) + ")"
return(SAT(CTLtree(string), S))
if (phi.kind == "EX"):
return(SAT_EX(phi.childs[0], S))
if (phi.kind == "EU"):
return(SAT_EU(phi.childs[0], phi.childs[1], S))
if (phi.kind == "EF"):
return(SAT(CTLtree("EU(1)(" + str(phi.childs[0]) + ")"), S))
if (phi.kind == "EG"):
return(SAT(CTLtree("- AF -" + str(phi.childs[0])), S))
if (phi.kind == "AF"):
return(SAT_AF(phi.childs[0], S))
if (phi.kind == "AG"):
return(SAT(CTLtree("-EF (-" + str(phi.childs[0]) + ")"), S))
def SAT_AF(phi, S):
X = S
Y = SAT(phi, S)
while (X != Y):
X = Y
Y = Y | Pre_forte(Y)
return (Y)
def SAT_EU(phi, psi, S):
W = SAT(phi, S)
Y = SAT(psi, S)
X = S
while (X != Y):
X = Y
#Y = Y | (W & Pre_fraca(Y))
array_b_prime = write_array_B_prime (ddt, str(phi))
Y = Y | (W & Pre_fraca (kripke, ddt, array_b_prime))
return (Y)
def SAT_EX(phi, S):
X = SAT(phi, S)
array_b_prime = write_array_B_prime (ddt, str(phi))
Y = Pre_fraca(kripke, ddt, array_b_prime)
return Y
#------------main------------------------------------------------------------
numeroEstados = int(input())
kripke = input()
rotulos = input()
formulaCTL = CTLtree(input())
interest = input()
# criando dicionario
ddt = default_ddt_value (rotulos, numeroEstados)
# atualizamos dicionario
ddt = update_ddt_value (rotulos, numeroEstados, ddt)
# construimos Bs
b_s = write_B_s (ddt)
S = expr2bdd( expr(b_s) ) # conversao para BDD
# Aplicacao do algoritmo SAT
if (SAT(formulaCTL, S) == None):
print("UNSAT")
elif (SAT(formulaCTL, S) != None and SAT(formulaCTL, S).satisfy_one() == None):
print ("UNSAT")
else:
print ("SAT")
print ("lista de todos os estados que SAT:")
print (list( SAT(formulaCTL, S).satisfy_all() ))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:13:58 2018
@author: user
不定數迴圈-閏年判斷
"""
while True:
year = int(input())
if year == -9999:
break
else:
if((year%4==0)and (year % 100 != 0 or year % 400 == 0)):
print("{:} is a leap year.".format(year))
else:
print("{:} is not a leap year.".format(year)) |
#Universidade Federal Rural de Pernambuco - Dept. de Estatística e Informática
#BSI - 2019.2 - Laboratório de Informática
#Autora: Paula Priscila da C. Araujo
#Programa feito para calcular o número de metros deslocados de um robo 'o RL2' através de 2 comandos.
print('='*20,' CONTADOR DE DESLOCAMENTO DO RL2', '='*20)
print('Use os comandos F p/frente e T p/trás,qualquer outro será desconsiderado! \n')
while True:
#usuario insere uma cadeia de strings com comandos,que é jogado para letras maiusculas
comandos = str(input('Digite os comandos desejados [SAIR p/parar]: ')).upper()
if comandos == 'SAIR':
break
#conta quantas vezes cada letra foi vista na cadeia
passos_f = comandos.count('F')
passos_t = comandos.count('T')
#o programa calcula quantos passos serão dados de acordo com a qntd de letras 'F' e 'T' na cadeia
if passos_f > passos_t:
total = passos_f - passos_t
print(f'O RL2 deslocará {total}m para frente\n')
elif passos_f == passos_t:
print('O deslocamento do RL2 é 0\n')
else:
total = passos_t - passos_f
print(f'O RL2 deslocará {total}m para trás\n')
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return "Hello Myk Finally should work"
@app.route('/base')
def base():
return render_template("base.html")
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
from django.conf import settings
import bs4 as bs
from urllib.request import urlopen
from urllib import parse
import string
from .models import Product, ProductImage
import requests
import tempfile
from PIL import Image
from django.core import files
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
import requests
import os
import boto3
import re
collection_urls = {
"acoustic":'https://www.someneckguitars.com/collections/acoustic',
"electric": 'https://www.someneckguitars.com/collections/electric',
"bass": 'https://www.someneckguitars.com/collections/bass',
"amplifier": 'https://www.someneckguitars.com/collections/amplifiers',
"effects": 'https://www.someneckguitars.com/collections/accessories',
"pickup": 'https://www.someneckguitars.com/collections/pickups',
"audio": 'https://www.someneckguitars.com/collections/audio',
}
featured_urls = set()
def save_image_from_url(field, url):
r = requests.get(url)
if r.status_code == requests.codes.ok:
img_temp = NamedTemporaryFile(delete = True)
img_temp.write(r.content)
img_temp.flush()
img_filename = parse.urlsplit(url).path[1:]
print(img_filename)
field.save(img_filename, File(img_temp), save = True)
# FOR UPLOADING TO AWS
s3 = boto3.resource('s3')
s3.Bucket('e-commerce-johnpooch').put_object(Key="media/images/" + img_filename, Body=r.content)
# FOR UPLOADING TO LOCAL
# with open(settings.MEDIA_ROOT + "/images/" + img_filename, "wb") as f:
# f.write(r2.content)
return True
return False
def scrape_urls_from_collection_page(url, type):
number_of_pages = "1"
current_page = 1
urls = []
print("scraping " + url)
sauce = urlopen(url).read()
soup = bs.BeautifulSoup(sauce, 'lxml')
spans = soup.findAll("span", class_="page")
last_span = None
for last_span in spans:
pass
if last_span:
number_of_pages = last_span.text
for i in range(int(number_of_pages)):
sauce = urlopen(url + "?page=%i" %(current_page)).read()
soup = bs.BeautifulSoup(sauce, 'lxml')
cut_off = soup.find("div", class_="pagination")
non_featured_anchors = cut_off.find_all_previous("a", class_="product-img-wrapper")
featured_anchors = cut_off.find_all_next("a", class_="product-img-wrapper")
for anchor in reversed(non_featured_anchors):
urls.append(("https://www.someneckguitars.com" + anchor['href'], type))
for anchor in reversed(featured_anchors):
featured_urls.add("https://www.someneckguitars.com" + anchor['href'])
current_page = current_page + 1
return urls
def scrape_product_from_url(url_tuple):
url, type = url_tuple
regex = r"\b(18|19|20)\d{2}\b"
# default values
product_year = 0
product_description = "No description available"
if url in featured_urls:
product_featured = True
print("featured product found")
else:
product_featured = False
sauce = urlopen(url).read()
soup = bs.BeautifulSoup(sauce, 'lxml')
title = soup.find("meta", property="og:title")
product_name = title['content']
image = soup.find("meta", property="og:image")
image_url = image['content']
if soup.find("meta", property="og:description"):
description = soup.find("meta", property="og:description")
product_description = description['content']
if re.search(regex, product_description):
product_year = re.search(regex, product_description).group(0)
brand_header = soup.find("h2", itemprop="brand")
product_manufacturer = brand_header.find('a').text
price_amount = soup.find("meta", property="og:price:amount")
product_price_amount = float(price_amount['content'].replace(',',''))
price_currency = soup.find("meta", property="og:price:currency")
product_price_currency = price_currency['content']
gallery_images = []
for image_element in soup.findAll("meta", {"property": "og:image"}):
gallery_images.append(image_element["content"])
p = Product(
name = product_name,
manufacturer = product_manufacturer.upper(),
year = product_year,
type = type.upper(),
description = product_description,
price = product_price_amount,
# image = product_image,
featured = product_featured
)
save_image_from_url(p.image, image_url)
p.save()
for url in gallery_images:
pi = ProductImage(
product = p,
)
save_image_from_url(pi.image, url)
pi.save()
def get_products_from_someneck():
product_urls = []
product_details_list = []
type_set = set()
manufacturer_set = set()
print("\n\nscraping data from https://www.someneckguitars.com\n----------------------------------------------- \n")
for type, url in collection_urls.items():
product_urls.extend(scrape_urls_from_collection_page(url, type))
for url_tuple in product_urls[::-1]:
print('\n' + url_tuple[0].rsplit('/', 1)[-1].replace('-', ' ').title())
product_details = scrape_product_from_url(url_tuple)
product_details_list.append(product_details)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
delete_hw_module_element_query = """
UPDATE public.hw_module AS hwm
SET active = False, deleted = True
WHERE hwm.id = $1::BIGINT RETURNING *;
"""
|
from django.db import models
# Create your models here.
class TraditionalStrategy(models.Model):
class Meta:
verbose_name = verbose_name_plural = '传统策略'
class AIStrategy(models.Model):
class Meta:
verbose_name = verbose_name_plural = 'AI策略' |
#!/usr/bin/env python3
# Author: Michael Petit
# Date: May 2018
# michael.p.petit@gmail.com
# Copywrite: MIT Commons
########################################################################################################################
import datetime
import speedtest
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import socket
import subprocess
import connectivity
import sys
from sys import platform
from logwrite import logwrite
# TODO
# Refactor classes # http://docs.python-guide.org/en/latest/writing/structure/
# try/catch everything that makes sence
# Add node to node iperf3 to check LAN performance - use arg qulaifier
# Variables
LOGFILE = "speedtest_results.txt"
DEBUG = False
try:
if platform == "linux" or platform == "linux2":
osplatform = "linux"
if DEBUG is True:
print("Operating system: Linux")
elif platform == "win32":
osplatform = "win"
if DEBUG is True:
print("Operating system: Windows")
else:
print("Unsupported operating system - exiting...")
exit()
except NameError as err:
print("Error: NameError: Unable to set platform: {0}".format(err))
except OSError as err:
print("Error: OSError: Unable to set platform: {0}".format(err))
except ReferenceError as err:
print("Error: ReferenceError: Unable to set platform: {0}".format(err))
except RuntimeError as err:
print("Error: RuntimeError: Unable to set platform: {0}".format(err))
except SyntaxError as err:
print("Error: SyntaxError: Unable to set platform: {0}".format(err))
except TypeError as err:
print("Error: TypeError: Unable to set platform: {0}".format(err))
except ValueError as err:
print("Error: ValueError: Unable to set platform: {0}".format(err))
except ArithmeticError as err:
print("Error: ArithmeticError: Unable to set platform: {0}".format(err))
except EOFError as err:
print("Error: EOFError: Unable to set platform: {0}".format(err))
except ImportError as err:
print("Error: ImportError: Unable to set platform: {0}".format(err))
except MemoryError as err:
print("Error: MemoryError : Unable to set platform: {0}".format(err))
except StopIteration as err:
print("Error: StopIteration : Unable to set platform: {0}".format(err))
def printdot():
sys.stdout.write(".")
sys.stdout.flush()
class NetworkPerformance(object):
def __init__(self):
self.date = datetime.datetime.now().strftime("%y-%m-%d")
self.time = datetime.datetime.now().strftime("%H:%M:%S")
self.datetime = self.date + " " + self.time
self.isp = 0
self.download = 0
self.upload = 0
self.ping = 0
self.client = '?'
self.client_latitude = 0
self.client_longitude = 0
self.server = '?'
self.server_location = '?'
self.server_latitude = 0
self.server_longitude = 0
self.hostname = socket.gethostname()
self.hops = []
self.highhopserver = 0
self.highhop = 0
def printout(self):
print ("Date: " + str(self.date))
print ("Time: " + str(self.time))
print ("ISP: " + str(self.isp))
print ("Hostname: " + str(self.hostname))
print ("Client IP: " + str(self.client))
print ("Client lat: " + str(self.client_latitude))
print ("Client lon: " + str(self.client_longitude))
print ("Server: " + str(self.server))
print ("Server Loc: " + str(self.server_location))
print ("Server lat: " + str(self.server_latitude))
print ("Server lon: " + str(self.server_longitude))
print ("Download: " + str(self.download))
print ("Upload: " + str(self.upload))
print ("Ping: " + str(self.ping))
print ("Hop Count: " + str(len(self.hops)))
if self.hops:
for h in self.hops:
h.printout()
print ("High Hop Ser: " + str(self.highhopserver))
print ("High Hop: " + str(self.highhop))
def tostring(self):
return str(self.date) + "," + \
str(self.time) + "," + \
str(self.datetime) + "," + \
str(self.isp) + "," + \
str(self.hostname) + "," + \
str(self.client) + "," + \
str(self.client_latitude) + "," + \
str(self.client_longitude) + "," + \
str(self.server) + "," + \
str(self.server_location) + "," + \
str(self.server_latitude) + "," + \
str(self.server_longitude) + "," + \
str(self.download) + "," + \
str(self.upload) + "," + \
str(self.ping) + "," + \
str(self.highhopserver) + "," + \
str(self.highhop)
def toarray(self):
arr = [
str(self.date),
str(self.time),
str(self.datetime),
str(self.isp),
str(self.hostname),
str(self.client),
str(self.client_latitude),
str(self.client_longitude),
str(self.server),
str(self.server_location),
str(self.server_latitude),
str(self.server_longitude),
self.download,
self.upload,
self.ping,
len(self.hops),
str(self.highhopserver),
self.highhop]
return arr
def sethighhop(self):
if self.hops:
for h in self.hops:
if h.highms > self.highhop:
self.highhop = h.highms
self.highhopserver = h.ip + " / " + h.name
def savedata(self):
self.logdata()
self.gdwrite()
def gdwrite(self):
if DEBUG is True:
print("Write to GoogleDocs...")
# set highhop and highhopserver if not set
if self.highhop == 0:
self.sethighhop()
# Google Docs auth and open spreadsheet
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name("servicecredentials.json", scope)
connection = gspread.authorize(credentials)
spreadsheet = connection.open("NetworkMonitorData")
# Write Speedtest data - Insert at the top of GD:Speedtest sheet
sheet = spreadsheet.worksheet("Speedtest")
arr = self.toarray()
sheet.insert_row(arr, 2, 'RAW')
# Write trace data - insert data at the top of GD:traceroute sheet
sheet = spreadsheet.worksheet("TraceRoute")
for h in reversed(self.hops):
if DEBUG is True:
h.printout()
arr = h.toarray()
arr.insert(0, str(self.server))
arr.insert(0, str(self.client))
arr.insert(0, str(self.time))
arr.insert(0, str(self.date))
sheet.insert_row(arr, 2, 'RAW')
def logdata(self):
if DEBUG is True:
print("Log data...")
with logwrite(LOGFILE) as log:
log.write(self.tostring() + "\n")
def wanspeedtest(self):
if DEBUG is True:
print("Speed test...")
st = speedtest.Speedtest()
printdot()
st.get_best_server()
printdot()
self.download = round(st.download()/1000000, 2)
self.upload = round(st.upload()/1000000, 2)
self.ping = round(st.results.ping, 2)
results = st.results
printdot()
if DEBUG is True:
print(st.results)
client = st.results.client
self.client = client.get("ip", "?")
self.client_latitude = client.get("lat", "?")
self.client_longitude = client.get("lon", "?")
self.isp = client.get("isp", "?")
server = st.results.server
self.server = server.get("host", "?")
self.server_location = server.get("name", "?")
self.server_latitude = server.get("lat", "?")
self.server_longitude = server.get("lon", "?")
if DEBUG is True:
print(results.json())
self.download = st.results.download
self.ping = st.results.ping
self.upload = st.results.upload
# print(st._opener)
# print(st._secure)
# print(st._best)
print(st.closest)
print(st.config)
print(st.servers)
# print(st._source_address)
# print(st._timeout)
def traceroute(self):
if DEBUG is True:
print ("Traceroute...")
values = self.server.split(":")
if osplatform == "linux":
strg = "paris-traceroute -n "+values[0]
elif osplatform == "win":
strg = "tracert -d -4 "+values[0]
if DEBUG is True:
print(strg)
p = subprocess.Popen(strg, shell=True, stdout=subprocess.PIPE)
while True:
line = p.stdout.readline()
if not line:
break
if DEBUG is True:
print (line)
h = NetworkHop(line)
if h.hopnum != 0:
if DEBUG is True:
print (">>>>>>> " + str(h))
self.hops.append(h)
class NetworkHop(object):
def __init__(self, line):
self.hopnum = 0
self.ip = 0
self.name = "?"
self.ms1 = 0
self.ms2 = 0
self.ms3 = 0
self.highms = self.ms1 # highest out of 3 ms
self.msavg = 0 # average
line = line.replace("ms ", " ").strip().replace(" ", " ").replace(" ", " ")
if DEBUG is True:
print ("LINE: " + line)
values = line.split(" ")
if osplatform == "linux":
if DEBUG is True:
print("Platform is linux in NetworkHop")
if len(values) == 4 and values[1] == "*":
self.hopnum = values[0]
self.ip = "?"
self.name = "?"
self.ms1 = 0
self.ms2 = 0
self.ms3 = 0
self.msavg = 0
self.highms = 0
elif len(values) == 5:
self.hopnum = values[0]
self.ip = values[1]
self.name = self._gethostbyaddress()
self.ms1 = float(values[2])
self.ms2 = float(values[3])
self.ms3 = float(values[4])
self.msaverage()
self.sethighms()
elif osplatform == "win":
if DEBUG is True:
print("Platform is win in NetworkHop")
print("LINE: " + line)
print("TODO: Windows tracert")
print("LEN: " + str(len(line)))
# if len(values) > 4 and values[1] == "*":
# Tracing route to google.com [2607:f8b0:4009:80f::200e]
# over a maximum of 30 hops:
#
# 1 * * * Request timed out.
# 2 52 ms 28 ms 38 ms 2600:1007:b027:d1bc:0:5c:4745:9940
# 3 * * * Request timed out.
# 4 32 ms 30 ms 46 ms 2001:4888:35:2010:383:2a1:0:1
# 5 24 ms 36 ms 47 ms 2001:4888:35:2079:383:2a1::
# 6 295 ms 36 ms 20 ms 2001:4888:35:200e:383:25:0:1
# 7 316 ms 30 ms 28 ms 2001:4888:35:2000:383:26:0:1
# 8 63 ms 38 ms 38 ms 2001:4888:3f:4191:383:1::
# 9 305 ms 39 ms 40 ms 2001:4888:3f:4191:383:1::
# 10 309 ms 39 ms 32 ms 2001:4888:35:1001:383:24::
# 11 326 ms 40 ms 40 ms 2600:805:71f::5
# 12 65 ms 47 ms 55 ms 2600:805::85
# 13 66 ms 60 ms 53 ms 2600:805:41f::26
# 14 63 ms 59 ms 57 ms 2001:4860:0:100d::e
# 15 327 ms 56 ms 45 ms 2001:4860:0:1::1579
# 16 341 ms 42 ms 50 ms ord30s26-in-x0e.1e100.net [2607:f8b0:4009:80f::200e]
#
# Trace complete.
else:
print("Unsupported OS - should have never got here - yikes!")
if DEBUG is True:
self.printout()
def _gethostbyaddress(self):
if DEBUG is True:
print("IP: "+self.ip)
octet = self.ip.split(".")
if len(octet) == 4:
try:
name, alias, addressliet = socket.gethostbyaddr(self.ip)
return name
except:
if DEBUG is True:
print("Socket error")
return "?"
else:
if DEBUG is True:
print("IP does not have 4 octets")
return "?"
def sethighms(self):
if self.ms2 > self.highms:
self.highms = self.ms2
if self.ms2 > self.highms:
self.highms = self.ms3
def gethighms(self):
return self.highms
def toarray(self):
arr = [
str(self.hopnum),
str(self.ip),
str(self.name),
str(self.ms1),
str(self.ms2),
str(self.ms2),
str(self.msavg)]
return arr
def printout(self):
print ("")
print ("hopnum: " + str(self.hopnum))
print ("ip: " + str(self.ip))
print ("name: " + str(self.name))
print ("ms1: " + str(self.ms1))
print ("ms2: " + str(self.ms2))
print ("ms3: " + str(self.ms3))
print ("msavg: " + str(self.msavg))
def msaverage(self):
self.msavg = float((self.ms1 + self.ms2 + self.ms3)/3)
self.msavg = round(self.msavg, 2)
return self.msavg
def main():
netp = 0
printdot()
printdot()
printdot()
try:
internet = connectivity.Connectivity()
if internet.hasinternet() is False:
return
netp = NetworkPerformance()
printdot()
netp.wanspeedtest()
printdot()
netp.traceroute()
printdot()
netp.savedata()
printdot()
except Exception as error:
print("Error: {0}".format(error))
print(type(error))
print(error.message)
print(error.args)
print("Execution complete: " + str(netp.datetime) + ": Download:" + str(netp.download) + "MBps Upload:" +
str(netp.upload) + "MBps Latency:" + str(netp.ping) + "ms Hops:" + str(len(netp.hops)))
if __name__ == "__main__":
main()
|
from .orbitalstatus import OrbitalStatus
from .operationalstatus import OperationalStatus
from .tle import TLE
from .catalogentry import CatalogEntry
from .source import Source
from .launchsite import LaunchSite
|
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.static import static
schema_view = get_schema_view(
openapi.Info(
title=f'{settings.CLIENT_DOMAIN} API',
default_version='v1',
description="Findy",
),
public=True,
)
urlpatterns = [
path('admin/', include('admin_honeypot.urls', namespace='admin_honeypot')),
path('findy-staff/', admin.site.urls),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path(f'api/v{settings.API_VERSION}/public/', include('core.urls', namespace='core')),
path(f'api/v{settings.API_VERSION}/accounts/', include('users.urls', namespace='users')),
path(f'api/v{settings.API_VERSION}/messaging/', include('messaging.urls', namespace='messaging')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.ENABLE_DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
if settings.DEBUG:
urlpatterns += [
path('api-docs/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
|
meucartao = int(input("Digite o seu numero de cartão de crédito: "))
cartão = 1
while meucartao !=cartão:
cartão = int(input("Digite o número do próximo cartão: "))
if meucartao == cartão:
cartão=meucartao
print("Meu cartão na lista")
if cartão == 0:
cartão = meucartao
print("Meu cartão não está na lista")
|
from django.contrib import admin
from .models import Product, ProductImage
class ProductImageInline(admin.TabularInline):
model = ProductImage
extra = 3
class ProductAdmin(admin.ModelAdmin):
inlines = [ ProductImageInline, ]
admin.site.register(Product, ProductAdmin) |
from django.forms import ModelForm
from django.core.exceptions import ValidationError
from playlist.models import Music, Composer , Genre , Radio , Album , Charts
class ComposerForm(ModelForm):
class Meta:
model = Composer
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
class MusicForm(ModelForm):
class Meta:
model = Music
fields = ['title', 'composer', 'summary', 'ismn', 'genre']
class GenreForm(ModelForm):
class Meta:
model = Genre
fields = ['music_name', 'Autour_name', 'date_of_make']
class RadioForm(ModelForm):
class Meta:
model = Radio
fields = ['radio_id', 'radio_name', 'radio_description']
class AlbumForm(ModelForm):
class Meta:
model = Album
fields = ['album_id', 'album_name', 'album_description', 'album_music', 'album_genre']
class ChartsForm(ModelForm):
class Meta:
model = Charts
fields = ['charts_id', 'charts_name', 'charts_description'] |
__all__ = ['write', 'wordpress']
import os
import sys
import logging
import importlib
from zrong.base import slog, add_log_handler
import wpcmd.base
def _build(name, conf, args, parser):
pack = importlib.import_module(name)
pack.build(conf, args, parser)
def main():
add_log_handler(slog,
handler=logging.StreamHandler(sys.stdout),
debug=logging.DEBUG)
gconf = wpcmd.base.Conf()
workDir = os.path.abspath(
os.path.join(os.path.split(
os.path.abspath(__file__))[0], os.pardir))
confFile = os.path.join(workDir, "build.conf.py")
if os.path.exists(confFile):
gconf.readFromFile(confFile)
else:
gconf.init(workDir, confFile)
slog.info('Please modify build.conf.py!')
exit(1)
gargs, subParser = wpcmd.base.check_args()
if gargs:
_build(gargs.sub_name, gconf, gargs, subParser)
|
# Actually compute the histograms, yummy
# Dump out to netCDF
import os
import numpy
import netCDF3
years = range(1982, 2009)
days = ['1111', '1112', '1113', '1114', '1115',
'1121', '1122', '1123', '1124', '1125',
'1129', '1130', '1201', '1202', '1203']
# Figure out our precipitation bins
# Floor: 0.25 mm/hour
# Max: 75 mm/hour ??
# Interval: 0.25 mm/hour
bins = numpy.arange( 0.25 / 3600.0, 75.0 / 3600.0, 0.25 / 3600.)
output = netCDF3.Dataset("mred_precip_histogram.nc", 'w')
output.createDimension("bins", len(bins) -1 )
output.createDimension("runid", len(years) * len(days) )
data = output.createVariable("count", numpy.float32, ("runid", "bins") )
data.long_name = "Grid cell count"
ncbins = output.createVariable("bins", numpy.float32, ("bins") )
ncbins.long_name = "Precipitation Bins"
ncbins.units = "kg m-2 s-1"
ncbins[:] = bins[:-1]
cnt = 0
for year in years:
for day in days:
ar = numpy.zeros( None, 'f')
for box in range(6):
fp = "final.prechist/box%s_pr_IMM5_%s%s03_CFS01.nc" %(box, year, day)
if not os.path.isfile(fp):
continue
nc = netCDF3.Dataset(fp, 'r')
ar = numpy.append( ar, nc.variables['pr'][:] )
nc.close()
hist, edges = numpy.histogram(ar, bins)
print numpy.shape(hist)
data[cnt,:] = hist
cnt += 1
output.close()
|
#载入必要的模块
import pygame
#pygame初始化
pygame.init()
text = u"文字转图片"
#设置字体和字号
font = pygame.font.SysFont('Microsoft YaHei', 64)
#渲染图片,设置背景颜色和字体样式,前面的颜色是字体颜色
ftext = font.render(text, True, (65, 83, 130),(255, 255, 255))
#保存图片
pygame.image.save(ftext, "image.jpg")#图片保存地址 |
from _typeshed import Incomplete
def attribute_mixing_dict(
G, attribute, nodes: Incomplete | None = None, normalized: bool = False
): ...
def attribute_mixing_matrix(
G,
attribute,
nodes: Incomplete | None = None,
mapping: Incomplete | None = None,
normalized: bool = True,
): ...
def degree_mixing_dict(
G,
x: str = "out",
y: str = "in",
weight: Incomplete | None = None,
nodes: Incomplete | None = None,
normalized: bool = False,
): ...
def degree_mixing_matrix(
G,
x: str = "out",
y: str = "in",
weight: Incomplete | None = None,
nodes: Incomplete | None = None,
normalized: bool = True,
mapping: Incomplete | None = None,
): ...
def mixing_dict(xy, normalized: bool = False): ...
|
"""Imports env vars into local variables for import via config.py"""
# noqa
import os
APP_NODE = os.environ.get('APP_NODE', 'development')
API_PREFIX = os.environ.get('API_PREFIX', '')
DEBUG = os.environ.get('WEBHOOK_DEBUG', False)
RABBIT_SERVER = os.environ.get("RABBIT_SERVER", "")
RABBIT_PORT = os.environ.get("RABBIT_PORT", "")
RABBIT_USER = os.environ.get("RABBIT_USER", "")
RABBIT_PASS = os.environ.get("RABBIT_PASS", "")
|
#coding:utf-8
#!/usr/bin/env python
from game.models.inventory import inventory
from game.routine.equipment import equipment
def strengthen(request):
"""
强化
"""
id = request.GET['id']
isUseGem = request.GET['is_use_gem']
usr = request.user
isUseGem = isUseGem == 'yes'
ownerTeamPosition = int(request.GET['owner_team_position'])
return equipment.strengthen(usr, id, ownerTeamPosition, isUseGem)
def strengthen_reset(request):
"""
重置强化冷却时间
"""
usr = request.user
return equipment.strengthen_reset(usr)
def equip(request):
"""
装备
"""
teamPosition = int(request.GET['team_position'])
equipmentid = request.GET['equipment_id']
ownerTeamPosition = int(request.GET['owner_team_position'])
usr = request.user
return equipment.equip(usr, teamPosition, ownerTeamPosition, equipmentid)
def sell(request):
"""
贩卖
"""
equipmentid = [request.GET['equipment_id1']]
for i in range(2, 50):
keyname = 'equipment_id' + str(i)
if request.GET.has_key(keyname):
equipmentid.append(request.GET[keyname])
else:
break
usr = request.user
return equipment.sell(usr, equipmentid)
def degradation(request):
"""
降级
"""
equipmentid = request.GET['id']
usr = request.user
return equipment.degradation(usr, equipmentid)
def assembly(request):
"""
组装
"""
usr = request.user
equipmentid = request.GET['equipmentid']
return equipment.assembly(usr, equipmentid) |
t = int(input())
for i in range(t):
entrada = input()
exclamacao = entrada.index("!")
n = int(entrada[:exclamacao])
k = int(len(entrada)) - exclamacao
dif = n
kfact = 1
cont = 0
while dif > 1:
kfact *= dif
cont += 1
dif = n - cont*k
print(kfact)
|
def main():
number = int(input("enter a number : "))
print(gameOfThree(number))
def gameOfThree(number):
if(number == 1):
return 1
if(int(number % 3) == 0):
print(int(number))
return gameOfThree(number/3)
elif(int(number % 3) == 1):
print(str(int(number)) + " - 1")
return gameOfThree((number-1)/3)
else:
print(str(int(number)) + " + 1")
return gameOfThree((number+1)/3)
if __name__ == '__main__':
main()
|
import os.path as pth
import matplotlib.pyplot as plt
import numpy as np
import sys
from sklearn import metrics
# Get true and predicted labels of classes
def GetLabels(y_true_file, y_score_file):
y_true, y_score = [], []
if not pth.exists(y_true_file) or not pth.exists(y_score_file): # checking of reading files with labels
print('Cannot read files with labels!')
else: # reading true and predicted labels of classes
y_true = np.loadtxt(y_true_file, delimiter=' ').tolist()
y_score = np.loadtxt(y_score_file, delimiter=' ').tolist()
return y_true, y_score
# Calculate AUC-ROC value
def CalculateAucRoc(y_true, y_score):
fpr, tpr, _ = metrics.roc_curve(y_true, y_score) # get FPR and TPR values for ROC curve
auc = metrics.roc_auc_score(y_true, y_score)
return fpr, tpr, auc
# Build ROC curve
def BuildRocCurve(fpr, tpr):
plt.figure()
plt.plot(fpr, tpr, color='red', lw=3, label='ROC curve')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
plt.show()
if (len(sys.argv) != 3): # checking of right call
print('The call of this script looks like this:\n' +
' python aucroc.py y_true_file y_score_file')
else:
y_true, y_score = GetLabels(sys.argv[1], sys.argv[2])
if y_true and y_score: # main part of script
fpr, tpr, auc = CalculateAucRoc(y_true, y_score)
print('AUC-ROC for class 1: ' + str(auc))
BuildRocCurve(fpr, tpr) |
# coding: utf-8
import sys
from docplex.cp.model import CpoModel
'''
O problema da mochila é um problema de optimização combinatória.
O nome dá-se devido ao modelo de uma situação em que é necessário
preencher uma mochila com objetos de diferentes pesos e valores.
O objetivo é que se preencha a mochila com o maior valor possível,
não ultrapassando o peso máximo.
https://pt.wikipedia.org/wiki/Problema_da_mochila#0/1
'''
# Create CPO model
def knapsacCplex(mdl, k, profits, weights, n):
# -----------------------------------------------------------------------------
# Inicializando dados
# -----------------------------------------------------------------------------
zero_or_one = [mdl.integer_var(min=0, max=1, name="X{}: P {} W {}".format(i+1, profits[i], weights[i]))
for i in range(n)] # varivaveis de decisao
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Construindo o modelo
# -----------------------------------------------------------------------------
# função objetivo
fo = mdl.sum(p*x for p, x in zip(profits, zero_or_one))
mdl.add(mdl.maximize(fo))
# sujeito a
restricoes = mdl.sum(w*x for w, x in zip(weights, zero_or_one))
mdl.add(restricoes <= k)
# -----------------------------------------------------------------------------
def read_from(filepath):
f = open(filepath, "r")
n = None
k = 0
profits = []
weights = []
countline = 0
for line in f.readlines():
try:
a, b = line.strip().split(' ')
if countline == 0:
n, k = int(a), int(b)
else:
profits.append(float(a))
weights.append(float(b))
countline += 1
except:
pass
return k, profits, weights, n
if __name__ == "__main__":
# k = 850 # capacidade da mochila
# profits = [360, 83, 59, 130, 431, 67, 230, 52, 93, 125, 670, 892, 600, 38, 48, 147,
# 78, 256, 63, 17, 120, 164, 432, 35, 92, 110, 22, 42, 50, 323, 514, 28,
# 87, 73, 78, 15, 26, 78, 210, 36, 85, 189, 274, 43, 33, 10, 19, 389, 276,
# 312] # profits = lucros
# weights = [7, 0, 30, 22, 80, 94, 11, 81, 70, 64, 59, 18, 0, 36, 3, 8, 15, 42, 9, 0,
# 42, 47, 52, 32, 26, 48, 55, 6, 29, 84, 2, 4, 18, 56, 7, 29, 93, 44, 71,
# 3, 86, 66, 31, 65, 0, 79, 20, 65, 52, 13] # weights = pesos
# n = len(weights)
mdl = CpoModel()
k, profits, weights, n = read_from(sys.argv[1])
knapsacCplex(mdl, k, profits, weights, n)
print("\nImprimindo solução....")
msol = mdl.solve(TimeLimit=60, Workers=1)
if msol:
print(msol.print_solution())
print("Status: " + msol.get_solve_status())
else:
print("Nenhuma solução encontrada")
|
'''
Module for creating a line graph representing NYC restaurant grades over time by region as specified by
user.
Author: kk3175
Date: 12/8/2015
Class: DSGA1007, Assignment 10
'''
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
'''
Creates a line graph representing NYC restaurant grades over time by region.
Accepts the following arguments:
(1) Data containing the restaurant date and grade. The data must be formatted as a pandas dataframe
with the following columns (from left to right): Date, Total number of A Grades, Total number of B Grades, and Total number of C Grades.
(2) Geographical region as a string.
Saves the figure to the figures folder.
'''
def plotRestaurantGrades(data, region):
colors = ['yellowgreen', 'magenta', 'black']
data.plot(color = colors)
plt.title('Restaurant Grades for %s' %region)
plt.ylabel('Number of Restaurants')
plt.savefig('figures/grade_improvement_%s.pdf' %region.lower())
|
# Project 1: Implementation of Go-Back-N Protocol
# Group Member: Daksh Patel ID: 104 030 031
# Group Member: Nyasha Kapfumvuti ID: 104 121 166
# Date: Mar 30th, 2018
import socket
import numpy
import time
import json
from random import randint
acked = [] # acknowledged packets
unAcked = [] # unacknowledged packets
ticker = 0 # 0.2 loss rate = 1/5 packets get "lost" => placed in unAcked
lostItem = 5 # every 5th item gets placed in unacked
returnVals = [] # array of values to be returned as acks/unacks
timer = time.localtime
packets = []
packet = ''
server_address = ('localhost', 10000)
serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
serverSocket.bind(server_address)
serverSocket.listen(1)
print('The server is ready to receive')
while True:
print('waiting for a connection')
connection, client_address = serverSocket.accept()
try:
print('client connected:', client_address)
while True:
data = connection.recv(1024) # data arrives as a string. Need to convert this back to an array
newPack = int(data)
if(randint(0,5) == 5):
print('packet was lost/corrupted')
connection.sendto(str(newPack).encode(), server_address)
else:
if newPack not in acked:
acked.append(newPack)
print('recieved sequence # ', str(newPack), ' successfully. Sending ack')
connection.sendto(str(newPack).encode(), server_address)
print('sent')
ticker += 1 # loss rate leads to every nth item getting lost
if data:
# send acknowledgement
# connection.sendto(str(newPack).encode(), server_address)
print('')
else:
break
finally:
connection.close()
print(acked)
|
# Make School OOP Coding Challenge Python Problem 3
import sys
# Create a class called Tiger.
# It should contain two instance variables: name and favoriteFood
# It should contain eat and sleep instance methods
# Write an initializer that takes a name.
class Tiger(object):
# initializer
def __init__(self, name):
self.name = name
self.favoriteFood = "meat"
# Sleep prints "<name> sleeps for 8 hours".
def sleep(self):
print "%s sleeps for 8 hours" % self.name
# Eat takes an argument for food.
# It prints: "<name> eats <food>"
# If the animal eats their favorite food then also prints:
# "YUM! <name> wants more <food>"
def eat(self, food):
print "%s eats %s" % (self.name, food)
if self.favoriteFood == food:
print "YUM! %s wants more %s" % (self.name, food)
# Test the Tiger class and its instance methods
def test():
def getline():
# Read a line from stdin and strip whitespace
return sys.stdin.readline().strip()
# Get the number of animals
animalCount = int(getline())
# Iterate through the input
for count in range(animalCount):
# Get the animal's name and food
name = getline()
food = getline()
# Create a Tiger object and test its instance methods
tiger = Tiger(name)
tiger.eat(food)
tiger.sleep()
if __name__ == "__main__":
test()
|
#!/usr/bin/python
#-----------------------------------------------------------------------------------------------------------------------------------------
# Script Description:
# Module to perform ray tracing to get path lengths for attenuation tomography.
# Input variables:
# Output variables:
# Created by Tom Hudson, 4th August 2020
# Notes:
# Depends upon ttcrpy - An external python module for computing travel times and ray tracing (see Nasr et al 2020)
# ttcrpy depends on vtk
#-----------------------------------------------------------------------------------------------------------------------------------------
# Import neccessary modules:
import os, sys
from re import A
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# from mayavi import mlab
# import plotly.graph_objects as go
# from plotly.subplots import make_subplots
import pickle
import time
import gc
from scipy.sparse.linalg import lsqr
from scipy.interpolate import griddata
import copy
import NonLinLocPy
import ttcrpy.rgrid as ttcrpy_rgrid
from scipy.ndimage import gaussian_filter
from scipy.stats import multivariate_normal
#----------------------------------------------- Define constants and parameters -----------------------------------------------
# Define any constants/parameters:
#----------------------------------------------- End: Define constants and parameters -----------------------------------------------
#----------------------------------------------- Define main functions -----------------------------------------------
# def batch_gradient_descent(Y, G, m_init, lr=0.01, n_iter=100):
# """Function to perform batch gradient decent to solve a system of linear equations of the form:
# Y = G.m.
# Inputs:
# Y - The Y values (1D np array).
# G - The G matrix (2D np array).
# m_init - The initial starting model.
# lr - The learning rate. Default is 0.01 (float).
# n_iter - The number of iterations to perform. Default is 100 (int).
# Returns:
# m = The model output.
# """
# # Find number of samples:
# n_samp = len(Y)
# # Perform iterations:
# m = m_init.copy() # Set initial guess
# for i in range(n_iter):
# Y_hat = np.dot(G, m) # Find Y for current model solution
# cost = # Mean-squared error cost
# m = m
class rays:
"""Class to obtain rays and path lengths for tomography, given event locations, station locations,
and grid nodes. Creates a class describing rays for a particular seismic phase.
Note: All units are SI units, apart from km rather than m scales."""
def __init__(self, x_node_labels, y_node_labels, z_node_labels, vel_grid, QpQs_inv_constraint_Qp_grid=None, n_threads=1):
"""Function to initialise rays class.
Inputs:
x_labels - Array of x labels for vel_grid nodes, in km. (1D np array)
x_labels - Array of x labels for vel_grid nodes, in km. (1D np array)
x_labels - Array of x labels for vel_grid nodes, in km. (1D np array)
vel_grid - A 3D grid describing the velocity, in km/s, for a particular seismic phase. (np array, of shape x,y,z)
Optional:
QpQs_inv_constraint_Qp_grid - A 3D grid specifying Qp values if performing constrained Qp/Qs inversion (of Wei and Wiens (2020)).
(np array, of shape x,y,z)
n_threads - Number of threads to try and use.
"""
# Assign the grids:
self.x_node_labels = x_node_labels
self.y_node_labels = y_node_labels
self.z_node_labels = z_node_labels
self.n_threads = n_threads
self.grid = ttcrpy_rgrid.Grid3d(x_node_labels, y_node_labels, z_node_labels, cell_slowness=False, n_threads=self.n_threads)
self.vel_grid = vel_grid
self.QpQs_inv_constraint_Qp_grid = QpQs_inv_constraint_Qp_grid
# Initialise other key variables:
self.rays_coords = [] # Will store ray coords of each (xs,ys,zs) as list of arrays
self.rays_cell_path_lengths_grids = [] # Will store ray cell path lengths for each ray
#self.thread_no = 1 # Number of threads to use for calculations
def find_nearest(self, array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
def add_event_rays(self, station_coords, event_coords):
"""Function to add rays for event.
Required inputs:
station_coords - An array of n x station coords for the event. (np array of shape n x 3)
event_coords - A list of event coords (list, of len 3)"""
# Calculate rays:
tt, curr_event_rays = self.grid.raytrace(event_coords, station_coords, 1./self.vel_grid, return_rays=True)#, thread_no=self.thread_no)
# Append to data stores:
for i in range(len(curr_event_rays)):
self.rays_coords.append(curr_event_rays[i])
def calc_rays_cells_path_lengths(self):
"""Function to calculate rays cell path lengths, from curr_event_rays. Returns array of cell path lengths.
Appends rays to self.rays_cell_path_lengths_grids, flattened/ravelled for use in inversions."""
# Do some initial clearing up, if required:
try:
self.unsampled_cell_idxs
del self.unsampled_cell_idxs
del self.sampled_cell_idxs
gc.collect()
except AttributeError:
print('')
# Loop over all rays, assigning path lengths to grid:
for j in range(len(self.rays_coords)):
if (j+1) % 1000 == 0:
print('Processing rays for ray ',j+1,'/',len(self.rays_coords))
ray = self.rays_coords[j]
# Create new grid and fill:
new_cell_path_lengths_grid = np.zeros(self.vel_grid.shape)
# Loop over individual ray sections, calculating path length for a particular cell:
# Try to find if multiple ray coords:
try:
ray.shape[1]
multi_rays_exist = True
except IndexError:
multi_rays_exist = False
#print('Skipping ray, as only one set of coordinates.')
# And calculate cell path lengths, if exists:
if multi_rays_exist:
for i in range(ray.shape[0] - 1):
# Get cell indices for current ray section:
val, idx_x = self.find_nearest(self.x_node_labels, ray[i, 0])
val, idx_y = self.find_nearest(self.y_node_labels, ray[i, 1])
val, idx_z = self.find_nearest(self.z_node_labels, ray[i, 2])
# And calculate current path length, and append to grid:
path_len_curr = np.sqrt(np.sum((ray[i+1,:] - ray[i,:])**2))
new_cell_path_lengths_grid[idx_x, idx_y, idx_z] = path_len_curr
# Append new grid to data store:
self.rays_cell_path_lengths_grids.append(new_cell_path_lengths_grid.ravel())
# And convert from list to path lengths second order tensor:
self.rays_cell_path_lengths_grids = np.array(self.rays_cell_path_lengths_grids)
def find_number_of_ray_passes_through_cells(self):
"""Function to find number of ray passes through cells.
Creates object self.ray_sampling_grid."""
# Create grid of ray sampling:
self.ray_sampling_grid = np.zeros(self.vel_grid.shape)
# Loop over all rays, assigning path lengths to grid:
for j in range(len(self.rays_coords)):
if (j+1) % 1000 == 0:
print('Processing rays for ray ',j+1,'/',len(self.rays_coords))
ray = self.rays_coords[j]
# Loop over individual ray sections, calculating path length for a particular cell:
# Try to find if multiple ray coords:
try:
ray.shape[1]
multi_rays_exist = True
except IndexError:
multi_rays_exist = False
#print('Skipping ray, as only one set of coordinates.')
# And calculate cell path lengths, if exists:
if multi_rays_exist:
for i in range(ray.shape[0] - 1):
# Get cell indices for current ray section:
val, idx_x = self.find_nearest(self.x_node_labels, ray[i, 0])
val, idx_y = self.find_nearest(self.y_node_labels, ray[i, 1])
val, idx_z = self.find_nearest(self.z_node_labels, ray[i, 2])
# And calculate current path length, and append to grid:
self.ray_sampling_grid[idx_x,idx_y,idx_z] += 1
def consolidate_rays_cell_path_lengths_grids(self):
"""Function to consolidate rays_cell_path_lengths_grids to
only hold non-zero values."""
# Protect from running this function twice, as if do, would
# lose non-sampled array information:
try:
self.unsampled_cell_idxs
run_func = False
print('Not consolidated again, as already undetaken.')
except AttributeError:
run_func = True
if run_func:
# Find ray passes, if possible:
try:
self.ray_sampling_grid
except AttributeError:
self.find_number_of_ray_passes_through_cells()
# Find indices of non-sampled cells:
ray_sampling_grid_ravelled = self.ray_sampling_grid.ravel()
self.unsampled_cell_idxs = np.argwhere(ray_sampling_grid_ravelled == 0)[:,0]
self.sampled_cell_idxs = np.argwhere(ray_sampling_grid_ravelled != 0)[:,0] # (And find sampled cells, for easy reconstruction later)
# If performing constrained Qp/Qs inversion, also find indices where Qp/Qs has not been solved:
if not self.QpQs_inv_constraint_Qp_grid is None:
unsolved_Qp_cell_idxs = np.argwhere(np.isnan(self.QpQs_inv_constraint_Qp_grid.ravel()))[:,0]
self.unsampled_cell_idxs = np.unique(np.append(self.unsampled_cell_idxs, unsolved_Qp_cell_idxs)) # Update unsampled indices
all_idxs_tmp = np.arange(len(ray_sampling_grid_ravelled))
self.sampled_cell_idxs = all_idxs_tmp[np.argwhere(np.in1d(all_idxs_tmp, self.unsampled_cell_idxs) == False)].flatten() # Update sampled indices (I.e. Find values that aren't in unsampled_cell_idxs)
# And remove non-sampled cells from rays_cell_path_lengths_grids:
self.rays_cell_path_lengths_grids = np.delete(self.rays_cell_path_lengths_grids, self.unsampled_cell_idxs, axis=1)
# And find consolidated vel_grid:
self.vel_grid_ravelled = self.vel_grid.ravel()
self.vel_grid_ravelled = np.delete(self.vel_grid_ravelled, self.unsampled_cell_idxs, axis=0)
# And consolidate Qp grid, if performing constrained Qp/Qs inversion:
if not self.QpQs_inv_constraint_Qp_grid is None:
self.QpQs_inv_constraint_Qp_grid_ravelled = self.QpQs_inv_constraint_Qp_grid.ravel()
self.QpQs_inv_constraint_Qp_grid_ravelled = np.delete(self.QpQs_inv_constraint_Qp_grid_ravelled, self.unsampled_cell_idxs, axis=0)
else:
self.QpQs_inv_constraint_Qp_grid_ravelled = None
print("Consolidated arrays by removing non-sampled cells. \n The info on these removed cells is held in: self.unsampled_cell_idxs.")
# And tidy:
gc.collect()
def plot_vel_model_slice(self, slice_idx=0, slice_axis=0):
"""Function to plot velocity model slices.
Optional inputs:
slice_idx - The slice index to slice the model for (int)
slice_axis - The axis to slice for (int)"""
plt.figure()
if slice_axis == 0:
plt.imshow(self.vel_grid[slice_idx,:,:].transpose())
plt.xlabel('y-direction (indices)')
plt.ylabel('z-direction (indices)')
elif slice_axis == 1:
plt.imshow(self.vel_grid[:,slice_idx,:].transpose())
plt.xlabel('x-direction (indices)')
plt.ylabel('z-direction (indices)')
elif slice_axis == 2:
plt.imshow(self.vel_grid[:,:,slice_idx].transpose())
plt.xlabel('x-direction (indices)')
plt.ylabel('y-direction (indices)')
plt.colorbar(label='Velocity ($m$ $s^{-1}$)')
plt.show()
def plot_all_ray_paths(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for r in self.rays_coords:
ax.plot(r[:,0], r[:,1], r[:,2],'-k', alpha=0.1)
ax.invert_zaxis()
ax.set_title("All ray paths")
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
ax.set_zlabel('Z (km)')
fig.tight_layout()
plt.show()
def save_ray_info(self, out_fname='ray_info.pkl'):
"""Saves ray class object to file.
Optional inputs:
out_fname - Path/filename to save data to (str)
"""
f = open(out_fname,'wb')
pickle.dump(self, f)
print('Saved class object to file: ',out_fname)
print('(Note: To load, load using pickle as rb)')
def load_ray_info(self, in_fname):
"""Loads ray class object from file.
Required inputs:
in_fname - Path/filename to load data from (str)
"""
f = open('ray_info_S.pkl', 'rb')
self.rays = pickle.load(f)
return(self.rays)
class inversion:
"""Class to perform attenuation tomography inversion."""
def __init__(self, rays):
"""Function to initialise inversion class.
Inputs:
rays - A rays class containing ray tracing, as in the class described in this module.
"""
# Assign input arguments to the class object:
self.rays = rays
# Assign other paramters:
self.G = []
self.t_stars = []
self.seismic_phase_to_use = 'P' # Can be P or S
self.Q_stdev_filt = 400. # Standard deviation in Q filter to use
self.inv_info_fname = "inv_info.pkl"
# And print any info. on inversion:
if not self.rays.QpQs_inv_constraint_Qp_grid is None:
print("Note: Performing Qp constrained Qp/Qs inversion as rays.QpQs_inv_constraint_Qp_grid is specified.")
print("Therefore output will be Qp/Qs.")
def prep_rays_for_inversion(self):
"""Function to prep the ray-tracing data for the inversion."""
# Find ray path lengths, and the tomography tensor:
self.rays.rays_cell_path_lengths_grids = []
gc.collect()
self.rays.calc_rays_cells_path_lengths()
# Find and plot ray sampling through grid, and consolidate/remove cells with no samples:
print("Shape before consolidation:", self.rays.rays_cell_path_lengths_grids.shape)
self.rays.consolidate_rays_cell_path_lengths_grids()
print("Shape after consolidation:", self.rays.rays_cell_path_lengths_grids.shape)
# Get tomography tensor:
self.G = self.rays.rays_cell_path_lengths_grids.copy() / self.rays.vel_grid_ravelled
# And include 1 / Qp in tomography tensor, if performing Qp constrained Qp/Qs inversion:
# (As in Wei and Wiens (2020) method)
if not self.rays.QpQs_inv_constraint_Qp_grid is None:
self.G = self.G / self.rays.QpQs_inv_constraint_Qp_grid_ravelled
# And tidy:
del self.rays.rays_cell_path_lengths_grids
gc.collect()
print('Finished data preparation for inversion')
def prep_t_stars_from_SeisSrcMoment_for_inversion(self, moment_mags_dict_fname, seismic_phase_to_use, Q_stdev_filt=400.):
"""Function to prep t-star values found using SeisSrcMoment for inversion."""
# Initialise parameters input into function:
self.moment_mags_dict_fname = moment_mags_dict_fname
self.seismic_phase_to_use = seismic_phase_to_use
self.Q_stdev_filt = Q_stdev_filt
# Load in magnitudes analysis data:
# (Note: Found using SeisSrcMoment)
mag_dict = pickle.load(open(self.moment_mags_dict_fname, 'rb'))
# Get all t* values, in order for event:
# (Note: order is really important, as must correspond to ray path lengths below)
event_fnames = list(mag_dict.keys())
t_stars = []
for event_fname in event_fnames:
nonlinloc_hyp_data = NonLinLocPy.read_nonlinloc.read_hyp_file(event_fname)
stations = list(nonlinloc_hyp_data.phase_data.keys()) #list(mag_dict[event_fname].keys())
for station in stations:
# Check if current station t_star exists and is positive:
try:
t_star_curr = mag_dict[event_fname][station]['t_star']
except KeyError:
continue
Q_stdev_curr = mag_dict[event_fname][station]['Q_stdev']
if t_star_curr > 0.:
if Q_stdev_curr < Q_stdev_filt:
# And check whether any current station for the current t* for the current seismic phase:
try:
nonlinloc_hyp_data.phase_data[station][seismic_phase_to_use]['StaLoc']['x']
except KeyError:
continue
# And append t* if criteria met:
t_stars.append(t_star_curr)
self.t_stars = np.array(t_stars)
print('Number of t_star observations to use:', len(self.t_stars))
def reconstruct_full_threeD_grid_soln(self, m):
"""Function to reconstruct full 3D grid solution.
Note: This is neccessary since the inversion was undertaken after consolidating/removing zero
values from G matrix prior to the inversion.
Returns:
Q_tomo_array - An array of 1/Q values output from the inversion."""
# Add unsampled cells back in then reshape solution back to 3D grid:
m_all = np.zeros(len(self.rays.vel_grid.ravel()))
m_all[self.rays.sampled_cell_idxs] = m
self.Q_tomo_array = np.reshape(m_all, self.rays.vel_grid.shape)
return(self.Q_tomo_array)
def perform_inversion(self, lamb=1., Q_init=250., result_out_fname='', perform_diff_inv=False, diff_inv_m0=1.):
"""Function to perform the inversion, using lsqr method.
Inputs:
Optional:
lamb - The damping coefficient/regularisation coefficient to use. Default is 1. (float)
Q_init - Initial guess of Q value. Can be a single value or a 1D array of flattened
values describing Q for each point in the consolidated 3D grid. (float, or 1D
array of floats)
result_out_fname - The path/filename to save the inversion output to. If unspecified by
user, will not save to file (str)
perform_diff_inv - If True, then will invert for the difference in the model from the value <diff_inv_m0>.
(bool)
diff_inv_m0 - m0 value to use if performing a difference inversion (I.e. if <perform_diff_inv> = True).
(float)
"""
# Initialise function input parameters:
self.lamb = lamb # Damping
self.Q_init = Q_init # Initial guess at Q
self.result_out_fname = result_out_fname
if perform_diff_inv:
self.perform_diff_inv = perform_diff_inv
self.diff_inv_m0 = diff_inv_m0
# perform lsqr inversion:
x0 = np.ones(self.G.shape[1]) / self.Q_init # Initial guess
if perform_diff_inv:
# Perform diff inv:
# (Eq. 7, Wei and Wiens (2020))
t_stars_minus_diff = self.t_stars - np.dot(self.G, np.ones(self.G.shape[1])*diff_inv_m0)
result = lsqr(self.G, t_stars_minus_diff, damp=self.lamb, show=True, x0=x0)
else:
# Perform inv:
result = lsqr(self.G, self.t_stars, damp=self.lamb, show=True, x0=x0)
self.m = result[0]
# And save result, if specified:
if len(result_out_fname) > 0:
pickle.dump(result, open(self.result_out_fname, 'wb'))
# And get reconstructed inversion result:
# (Adding back in zero values)
self.Q_tomo_array = self.reconstruct_full_threeD_grid_soln(self.m)
return(self.Q_tomo_array)
def perform_multi_lambda_reg_inversion(self, lambs=[1., 0.1, 1e-2, 1e-3, 1e-4], Q_init=250.,
results_out_fname_prefix='result_lsqr_lamb_',
perform_diff_inv=False, diff_inv_m0=1.):
"""Function to perform inversion for mulitple damping coefficients, to find the
optimal regualarised solution.
Inputs:
Optional:
lambs - The damping coefficient/regularisation coefficients to use. (float)
Q_init - Initial guess of Q value. Can be a single value or a 1D array of flattened
values describing Q for each point in the consolidated 3D grid. (float, or 1D
array of floats)
results_out_fname_prefix - The path/filename prefix to save the inversion output to.
If unspecified by user, will not save to file (str)
perform_diff_inv - If True, then will invert for the difference in the model from the value <diff_inv_m0>.
(bool)
diff_inv_m0 - m0 value to use if performing a difference inversion (I.e. if <perform_diff_inv> = True).
(float)
"""
# Initialise function input parameters:
self.lambs = lambs # List of damping/reg. coefficients
self.Q_init = Q_init # Initial guess at Q
self.results_out_fname_prefix = results_out_fname_prefix
if perform_diff_inv:
self.perform_diff_inv = perform_diff_inv
self.diff_inv_m0 = diff_inv_m0
# Loop over damping coefficients, performing inversion:
for i in range(len(self.lambs)):
fname_out = self.results_out_fname_prefix+str(self.lambs[i])+'_'+self.seismic_phase_to_use+'.pkl'
# Use lsqr method:
x0 = np.ones(self.G.shape[1]) / self.Q_init # Initial guess
if perform_diff_inv:
# Perform diff inv:
# (Eq. 7, Wei and Wiens (2020))
t_stars_minus_diff = self.t_stars - np.dot(self.G, np.ones(self.G.shape[1])*diff_inv_m0)
result = lsqr(self.G, t_stars_minus_diff, damp=self.lambs[i], show=True, x0=x0)
else:
# Perform inv:
result = lsqr(self.G, self.t_stars, damp=self.lambs[i], show=True, x0=x0)
# Save result:
pickle.dump(result, open(fname_out, 'wb'))
def save_inversion_obj(self, out_fname='inv_info.pkl'):
"""Loads inversion class object from file.
Required inputs:
in_fname - Path/filename to load data from (str)
"""
self.inv_info_fname = out_fname
try:
f = open(out_fname, 'wb')
pickle.dump(self, f)
except OverflowError:
f = open(out_fname, 'wb')
inv_out = copy.deepcopy(self)
inv_out.G = []
gc.collect()
pickle.dump(inv_out, f)
del inv_out
gc.collect()
print('Note: Failed to save G, as >4gb')
print('Saved class object to file: ',out_fname)
print('(Note: To load, load using pickle as rb)')
def load_inversion_obj(self):
"""Loads inversion class object from file.
Required inputs:
in_fname - Path/filename to load data from (str)
"""
f = open('ray_info_S.pkl', 'rb')
self.rays = pickle.load(f)
return(self.rays)
class plot:
"""Class to plot attenuation tomography inversion results."""
def __init__(self, rays, inv):
"""Function to initialise plot class.
Inputs:
rays - Ray tracing class containing info on ray paths. (class object)
inv - Inversion class containing info on the inversion (class object)
"""
# Assign input arguments to the class object:
self.rays = rays
self.inv = inv
# Assign other paramters:
self.G = []
self.t_stars = []
self.seismic_phase_to_use = self.inv.seismic_phase_to_use # Can be P or S
self.Q_stdev_filt = self.inv.Q_stdev_filt # Standard deviation in Q filter to use
def plot_L_curve(self):
"""Function to plot L-curve analysis for choice of damping/
regularisation parameter.
"""
# Calculate 2-norms to find L-curve:
soln_norms = np.zeros(len(self.inv.lambs))
res_norms = np.zeros(len(self.inv.lambs))
for i in range(len(self.inv.lambs)):
fname_in = self.inv.results_out_fname_prefix+str(self.inv.lambs[i])+'_'+self.inv.seismic_phase_to_use+'.pkl'
result = pickle.load(open(fname_in, 'rb'))
m = result[0]
soln_norms[i] = np.sqrt(np.sum(m**2))
if self.inv.perform_diff_inv:
t_stars_minus_diff = self.inv.t_stars - np.dot(self.inv.G, np.ones(self.inv.G.shape[1])*self.inv.diff_inv_m0)
res_norms[i] = np.sqrt(np.sum((np.matmul(self.inv.G,m) - t_stars_minus_diff)**2))
else:
res_norms[i] = np.sqrt(np.sum((np.matmul(self.inv.G,m) - self.inv.t_stars)**2))
# And plot results:
plt.figure()
plt.plot(res_norms, soln_norms)
for i in range(len(self.inv.lambs)):
plt.annotate(str(self.inv.lambs[i]), (res_norms[i], soln_norms[i]))
plt.xlabel('Residual norms $||A x - b||_2$')
plt.ylabel('Solution norms $|| x ||_2$')
plt.show()
def psuedo_threeD_interpolation(self):
"""Function to perform psuedo-3D interpolation of results.
(Note: Currently interpolates in X-Y plane)
(Note: Currently only interpolates for real, physical Q values (i.e. > 0))
"""
# Setup requried data:
X, Y = np.meshgrid(self.rays.x_node_labels, self.rays.y_node_labels)
self.opt_Q_tomo_array_interp = np.zeros(self.rays.vel_grid.shape)
# Loop over 2D planes in Z:
for i in range(len(self.rays.z_node_labels)):
# And select non-zeros values only:
non_zero_idxs = np.argwhere(self.opt_Q_tomo_array[:,:,i] > 0.)
# And check that there are some non-zero values:
# (interpolation scheme needs at least 4 data points!)
if non_zero_idxs.shape[0] > 4.:
x_idxs = non_zero_idxs[:,0]
y_idxs = non_zero_idxs[:,1]
points = np.zeros((len(x_idxs),2))
points[:,0] = X[x_idxs,y_idxs].ravel()
points[:,1] = Y[x_idxs,y_idxs].ravel()
values = self.opt_Q_tomo_array[x_idxs,y_idxs,i].ravel()
gridded_data = griddata(points, values, (X, Y), method='linear') #, method='nearest') #, method='linear')
self.opt_Q_tomo_array_interp[:,:,i] = gridded_data
# And save interpolated result:
fname_out = 'opt_Q_tomo_array_interp_'+self.inv.seismic_phase_to_use
np.save(fname_out, self.opt_Q_tomo_array_interp)
print('Saved interpolated data to: ', fname_out)
return(self.opt_Q_tomo_array_interp)
def load_opt_Q_tomo_result_interpolated_smoothed(self, inv_fname, spatial_smooth_sigma_km=0.0):
"""Function to load optimal Q tomography result from file
and interpolate data.
Inputs:
inv_fname - The inversion data fname to plot data for.
Optional:
spatial_smooth_sigma - The spatial smoothing to apply, in km.
Applies Gaussian filtering. Default is 0.0,
which applies no filtering (float)
Returns:
opt_Q_tomo_array_interp - Optimal tomography array,
interpolated. (3D np array)
"""
# Load optimal data:
opt_result = pickle.load(open(inv_fname, 'rb'))
opt_m = opt_result[0]
# Reconstruct full model 3D grid result from data:
# (Add unsampled cells back in then reshape solution back to 3D grid)
self.opt_Q_tomo_array = self.inv.reconstruct_full_threeD_grid_soln(opt_m)
# Interpolate results:
self.opt_Q_tomo_array_interp = self.psuedo_threeD_interpolation()
# Apply spatial filtering, if specified:
if spatial_smooth_sigma_km > 0.:
grid_spacing_km = self.rays.x_node_labels[1] - self.rays.x_node_labels[0] # (Note: Assumes uniform grid spacing in x,y,z)
gauss_filt_sigma = spatial_smooth_sigma_km / grid_spacing_km
self.opt_Q_tomo_array_interp_smooth = gaussian_filter(self.opt_Q_tomo_array_interp, sigma=gauss_filt_sigma)
else:
self.opt_Q_tomo_array_interp_smooth = self.opt_Q_tomo_array_interp
return(self.opt_Q_tomo_array_interp_smooth)
def plot_inversion_result(self, inv_fname, plane='xz', plane_idx=0, spatial_smooth_sigma_km=0.0, cmap='viridis',
fig_out_fname='', vmin=10., vmax=1000., xlims=[], ylims=[], checkerboard_inv=None,
earthquakes_nonlinloc_fnames=None):
"""Plot inversion result for optimal damping parameter.
Inputs:
inv_fname - The inversion data fname to plot data for.
Optional:
plane - The plane to plot. Can be xy, xz, or yz. (str)
plane_idx - The index of the plane to plot (int)
spatial_smooth_sigma - The spatial smoothing to apply, in km.
Applies Gaussian filtering. Default is 0.0,
which applies no filtering (float)
cmap - The matplotlib colormap to use. Default is viridis (str)
fig_out_fname - The name of the file to save the file to, if
specified. Default is not to save to file. (str)
xlims, ylims - The x and y minimum and maximum extents to plot
for the specified plane, in km. In format
[xmin, xmax] , [ymin, ymax]. Default is [],
which means it will use the full extent.
(list of two floats each)
checkerboard_inv - Checkerboard object. If provided, will plot the
locations of synthetic spikes (their widths).
Default = None, so will not plot. (checkerboard
object)
"""
# Load optimal data:
opt_result = pickle.load(open(inv_fname, 'rb'))
opt_m = opt_result[0]
# Reconstruct full model 3D grid result from data:
# (Add unsampled cells back in then reshape solution back to 3D grid)
self.opt_Q_tomo_array = self.inv.reconstruct_full_threeD_grid_soln(opt_m)
# Interpolate results:
self.opt_Q_tomo_array_interp = self.psuedo_threeD_interpolation()
# Apply spatial filtering, if specified:
if spatial_smooth_sigma_km > 0.:
grid_spacing_km = self.rays.x_node_labels[1] - self.rays.x_node_labels[0] # (Note: Assumes uniform grid spacing in x,y,z)
gauss_filt_sigma = spatial_smooth_sigma_km / grid_spacing_km
self.opt_Q_tomo_array_interp_smooth = gaussian_filter(self.opt_Q_tomo_array_interp, sigma=gauss_filt_sigma)
else:
self.opt_Q_tomo_array_interp_smooth = self.opt_Q_tomo_array_interp
# Plot result:
if len(xlims) > 0 and len(ylims) > 0:
max_lim_tmp = np.max(np.abs(np.array((xlims, ylims))))
xlims = np.array(xlims)
ylims = np.array(ylims)
fig, ax = plt.subplots(figsize=(3*((xlims[1]-xlims[0])/max_lim_tmp),(3*((ylims[1]-ylims[0])/max_lim_tmp))))
else:
fig, ax = plt.subplots(figsize=(8,4))
# Specify plot limits:
if len(xlims) > 0 and len(ylims) > 0:
ax.set_xlim(xlims)
ax.set_ylim(ylims)
if plane == 'xy':
# Plot data:
Y, X = np.meshgrid(self.rays.y_node_labels, self.rays.x_node_labels)
im = ax.pcolormesh(X, Y, 1./self.opt_Q_tomo_array_interp_smooth[:,:,plane_idx], vmin=vmin, vmax=vmax, norm=matplotlib.colors.LogNorm(), cmap=cmap)
# Add text:
ax.set_title(' '.join(("XY-plane, z =",str(self.rays.z_node_labels[plane_idx]),"km")))
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
# And plot checkerboard synthetic spike locations, if specified:
if checkerboard_inv:
for i in range(len(checkerboard_inv.spike_x_idxs)):
for j in range(len(checkerboard_inv.spike_y_idxs)):
x_tmp = checkerboard_inv.rays.x_node_labels[checkerboard_inv.spike_x_idxs[i]] + ((checkerboard_inv.rays.x_node_labels[1] - checkerboard_inv.rays.x_node_labels[0]) / 2)
y_tmp = checkerboard_inv.rays.y_node_labels[checkerboard_inv.spike_y_idxs[j]] + ((checkerboard_inv.rays.y_node_labels[1] - checkerboard_inv.rays.y_node_labels[0]) / 2)
circle_tmp = matplotlib.patches.Circle((x_tmp,y_tmp), radius=checkerboard_inv.spike_width_km/2., fill=False, edgecolor='white', linestyle='--')
ax.add_artist(circle_tmp)
# And plot seismicity, if specified:
if earthquakes_nonlinloc_fnames:
for i in range(len(earthquakes_nonlinloc_fnames)):
nonlinloc_hyp_data = NonLinLocPy.read_nonlinloc.read_hyp_file(earthquakes_nonlinloc_fnames[i])
ax.scatter(nonlinloc_hyp_data.max_prob_hypocenter['x'], nonlinloc_hyp_data.max_prob_hypocenter['y'], s=2.5, c='k', alpha=0.5)
elif plane == 'xz':
# Plot data:
Z, X = np.meshgrid(self.rays.z_node_labels, self.rays.x_node_labels)
im = ax.pcolormesh(X, Z, 1./self.opt_Q_tomo_array_interp_smooth[:,plane_idx,:], vmin=vmin, vmax=vmax, norm=matplotlib.colors.LogNorm(), cmap=cmap)
ax.invert_yaxis()
# Add text:
ax.set_title(' '.join(("XZ-plane, y =",str(self.rays.y_node_labels[plane_idx]),"km")))
ax.set_xlabel('X (km)')
ax.set_ylabel('Z (km)')
# And plot checkerboard synthetic spike locations, if specified:
if checkerboard_inv:
for i in range(len(checkerboard_inv.spike_x_idxs)):
for j in range(len(checkerboard_inv.spike_z_idxs)):
x_tmp = checkerboard_inv.rays.x_node_labels[checkerboard_inv.spike_x_idxs[i]] + ((checkerboard_inv.rays.x_node_labels[1] - checkerboard_inv.rays.x_node_labels[0]) / 2)
z_tmp = checkerboard_inv.rays.z_node_labels[checkerboard_inv.spike_z_idxs[j]] + ((checkerboard_inv.rays.z_node_labels[1] - checkerboard_inv.rays.z_node_labels[0]) / 2)
circle_tmp = matplotlib.patches.Circle((x_tmp,z_tmp), radius=checkerboard_inv.spike_width_km/2., fill=False, edgecolor='white', linestyle='--')
ax.add_artist(circle_tmp)
# And plot seismicity, if specified:
if earthquakes_nonlinloc_fnames:
for i in range(len(earthquakes_nonlinloc_fnames)):
nonlinloc_hyp_data = NonLinLocPy.read_nonlinloc.read_hyp_file(earthquakes_nonlinloc_fnames[i])
ax.scatter(nonlinloc_hyp_data.max_prob_hypocenter['x'], nonlinloc_hyp_data.max_prob_hypocenter['z'], s=2.5, c='k', alpha=0.5)
elif plane == 'yz':
# Plot data:
Z, Y = np.meshgrid(self.rays.z_node_labels, self.rays.y_node_labels)
im = ax.pcolormesh(Y, Z, 1./self.opt_Q_tomo_array_interp_smooth[plane_idx,:,:], vmin=vmin, vmax=vmax, norm=matplotlib.colors.LogNorm(), cmap=cmap)
ax.invert_yaxis()
# Add text:
ax.set_title(' '.join(("YZ-plane, x =",str(self.rays.x_node_labels[plane_idx]),"km")))
ax.set_xlabel('Y (km)')
ax.set_ylabel('Z (km)')
# And plot checkerboard synthetic spike locations, if specified:
if checkerboard_inv:
for i in range(len(checkerboard_inv.spike_y_idxs)):
for j in range(len(checkerboard_inv.spike_z_idxs)):
y_tmp = checkerboard_inv.rays.y_node_labels[checkerboard_inv.spike_y_idxs[i]] + ((checkerboard_inv.rays.y_node_labels[1] - checkerboard_inv.rays.y_node_labels[0]) / 2)
z_tmp = checkerboard_inv.rays.z_node_labels[checkerboard_inv.spike_z_idxs[j]] + ((checkerboard_inv.rays.z_node_labels[1] - checkerboard_inv.rays.z_node_labels[0]) / 2)
circle_tmp = matplotlib.patches.Circle((y_tmp,z_tmp), radius=checkerboard_inv.spike_width_km/2., fill=False, edgecolor='white', linestyle='--')
ax.add_artist(circle_tmp)
# And plot seismicity, if specified:
if earthquakes_nonlinloc_fnames:
for i in range(len(earthquakes_nonlinloc_fnames)):
nonlinloc_hyp_data = NonLinLocPy.read_nonlinloc.read_hyp_file(earthquakes_nonlinloc_fnames[i])
ax.scatter(nonlinloc_hyp_data.max_prob_hypocenter['y'], nonlinloc_hyp_data.max_prob_hypocenter['z'], s=2.5, c='k', alpha=0.5)
else:
print('Error: Plane option', plane, 'does not exist. Exiting.')
sys.exit()
# plt.colorbar(label='$Q_'+self.inv.seismic_phase_to_use+'$')
fig.colorbar(im, label='$Q_'+self.inv.seismic_phase_to_use+'$')
# Save figure, if specified:
if len(fig_out_fname) > 0:
plt.savefig(fig_out_fname, dpi=300)
print('Saved figure to:',fig_out_fname)
# And show figure:
plt.show()
def plot_inversion_result_3D_slices(self, opt_lamb, plane='xz', spatial_smooth_sigma_km=0.0, cmap='viridis', fig_out_fname='', vmin=10, vmax=1000):
"""Plot inversion result for optimal damping parameter as a 3D plot with a
number of 2D surfaces.
Inputs:
opt_lamb - The optimal damping/regularisation parameter (decided
upon based on L-curve analysis).
Optional:
plane - The plane to plot. Can be xy, xz, or yz. (str)
spatial_smooth_sigma - The spatial smoothing to apply, in km.
Applies Gaussian filtering. Default is 0.0,
which applies no filtering (float)
cmap - The matplotlib colormap to use. Default is viridis (str)
fig_out_fname - The name of the file to save the file to, if
specified. Default is not to save to file. (str)
"""
# Load optimal data:
opt_result = pickle.load(open(self.inv.results_out_fname_prefix+str(opt_lamb)+'_'+self.seismic_phase_to_use+'.pkl', 'rb'))
opt_m = opt_result[0]
# Reconstruct full model 3D grid result from data:
# (Add unsampled cells back in then reshape solution back to 3D grid)
self.opt_Q_tomo_array = self.inv.reconstruct_full_threeD_grid_soln(opt_m)
# Interpolate results:
self.opt_Q_tomo_array_interp = self.psuedo_threeD_interpolation()
# Apply spatial filtering, if specified:
if spatial_smooth_sigma_km > 0.:
grid_spacing_km = self.rays.x_node_labels[1] - self.rays.x_node_labels[0] # (Note: Assumes uniform grid spacing in x,y,z)
gauss_filt_sigma = spatial_smooth_sigma_km / grid_spacing_km
print(gauss_filt_sigma)
self.opt_Q_tomo_array_interp_smooth = gaussian_filter(self.opt_Q_tomo_array_interp, sigma=gauss_filt_sigma)
else:
self.opt_Q_tomo_array_interp_smooth = self.opt_Q_tomo_array_interp
# # # Plot result (plotly):
# # Setup figure:
# fig = make_subplots(rows=1, cols=1,
# specs=[[{'is_3d': True}]],
# subplot_titles=['Color corresponds to z'],)
# # fig = make_subplots(rows=1, cols=2,
# # specs=[[{'is_3d': True}, {'is_3d': True}]],
# # subplot_titles=['Color corresponds to z', 'Color corresponds to distance to origin'],)
# # --------- Plot xy plane: ---------
# plane_idx = int(len(self.rays.z_node_labels) / 2. )
# Y, X = np.meshgrid(self.rays.y_node_labels, self.rays.x_node_labels)
# Z = self.rays.z_node_labels[plane_idx] * np.ones(X.shape)
# # Create forth dimension to colour surfaces:
# colour_dimension = 1./self.opt_Q_tomo_array_interp_smooth[:,:,plane_idx]
# # And plot:
# fig.add_trace(go.Surface(x=X, y=Y, z=Z, surfacecolor=np.log10(colour_dimension), cmin=np.log10(vmin), cmax=np.log10(vmax), opacity=0.6, colorscale=cmap), 1, 1)
# # ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=vmin, vmax=vmax, shade=False)
# # --------- Plot xz plane: ---------
# plane_idx = int(len(self.rays.y_node_labels) / 2. )
# Z, X = np.meshgrid(self.rays.z_node_labels, self.rays.x_node_labels)
# Y = self.rays.y_node_labels[plane_idx] * np.ones(X.shape)
# # Create forth dimension to colour surfaces:
# colour_dimension = 1./self.opt_Q_tomo_array_interp_smooth[:,plane_idx,:]
# # And plot:
# fig.add_trace(go.Surface(x=X, y=Y, z=Z, surfacecolor=np.log10(colour_dimension), cmin=np.log10(vmin), cmax=np.log10(vmax), opacity=0.6, colorscale=cmap), 1, 1)
# # ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=vmin, vmax=vmax, shade=False)
# # --------- Plot xz plane: ---------
# plane_idx = int(len(self.rays.y_node_labels) / 2. )
# Z, X = np.meshgrid(self.rays.z_node_labels, self.rays.x_node_labels)
# Y = self.rays.y_node_labels[plane_idx] * np.ones(X.shape)
# # Create forth dimension to colour surfaces:
# colour_dimension = 1./self.opt_Q_tomo_array_interp_smooth[:,plane_idx,:]
# # And plot:
# fig.add_trace(go.Surface(x=X, y=Y, z=Z, surfacecolor=np.log10(colour_dimension), cmin=np.log10(vmin), cmax=np.log10(vmax), opacity=0.6, colorscale=cmap), 1, 1)
# # ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=vmin, vmax=vmax, shade=False)
# # --------- Finish plotting: ---------
# fig.update_layout(coloraxis_colorbar=dict(
# title="QP",
# tickvals=[1,2,3],
# ticktext=["10", "100", "1000"],
# ))
# print(fig['layout']) #['zaxis']['autorange'] = "reversed"
# fig.show()
# # Plot result (mayavi):
# # Setup figure:
# fig = mlab.figure()
# # --------- Plot xy plane: ---------
# plane_idx = int(len(self.rays.z_node_labels) / 2. )
# Y, X = np.meshgrid(self.rays.y_node_labels, self.rays.x_node_labels)
# Z = self.rays.z_node_labels[plane_idx] * np.ones(X.shape)
# # Create forth dimension to colour surfaces:
# colour_dimension = 1./self.opt_Q_tomo_array_interp_smooth[:,:,plane_idx]
# # And plot:
# surf_xy = mlab.volume_slice(X, Y, Z, colour_dimension, vmin=vmin, vmax=vmax, plane_opacity=0.5, plane_orientation='x_axes', slice_index=plane_idx) #colormap='inferno_r',
# # # --------- Plot xz plane: ---------
# # plane_idx = int(len(self.rays.y_node_labels) / 2. )
# # Z, X = np.meshgrid(self.rays.z_node_labels, self.rays.x_node_labels)
# # Y = self.rays.y_node_labels[plane_idx] * np.ones(X.shape)
# # # Create forth dimension to colour surfaces:
# # colour_dimension = 1./self.opt_Q_tomo_array_interp_smooth[:,plane_idx,:]
# # fcolors = cm.to_rgba(colour_dimension)
# # # And plot:
# # ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=vmin, vmax=vmax, shade=False, zorder=1)
# fig.show()
# Plot result:
# Setup figure:
fig = plt.figure(figsize=(6,6))
ax = fig.gca(projection='3d')
# Setup colour dimension info:
norm = matplotlib.colors.LogNorm(vmin, vmax) #matplotlib.colors.Normalize(vmin, vmax)
cm = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
cm.set_array([])
# --------- Plot xy plane: ---------
plane_idx = int(len(self.rays.z_node_labels) / 2. )
Y, X = np.meshgrid(self.rays.y_node_labels, self.rays.x_node_labels)
Z = self.rays.z_node_labels[plane_idx] * np.ones(X.shape)
# Create forth dimension to colour surfaces:
colour_dimension = 1./self.opt_Q_tomo_array_interp_smooth[:,:,plane_idx]
fcolors = cm.to_rgba(colour_dimension, alpha=0.2)
# And plot:
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=vmin, vmax=vmax, shade=False, zorder=2)
# --------- Plot xz plane: ---------
plane_idx = int(len(self.rays.y_node_labels) / 2. )
Z, X = np.meshgrid(self.rays.z_node_labels, self.rays.x_node_labels)
Y = self.rays.y_node_labels[plane_idx] * np.ones(X.shape)
# Create forth dimension to colour surfaces:
colour_dimension = 1./self.opt_Q_tomo_array_interp_smooth[:,plane_idx,:]
fcolors = cm.to_rgba(colour_dimension)
# And plot:
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=vmin, vmax=vmax, shade=False, zorder=1)
plt.show()
class checkerboard:
"""Class to perform checkerboard testing."""
def __init__(self, rays):
"""Function to initialise checkerboard class.
Inputs:
rays - A rays class containing ray tracing, as in the class described in this module.
"""
# Assign input arguments to the class object:
self.rays = rays
def find_nearest(self, array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
def create_checkerboard_spikes_grid(self, spike_spacing_km, spike_width_km, Q_background=250., spike_rel_amp=0.2, plot_out_fname='', cmap='viridis'):
"""Function to create checkerboard spikes grid, from specified
spikes size and spacing.
Inputs:
spike_spacing_km - Spike spacing, in km. (float)
spike_width_km - Spike half width, in km. (float)
Optional:
Q_background - The background Q value. Default is 250. (float)
spike_rel_amp - The relative amplitude of the spikes above the
background level, Q_background. (float)
"""
# Specify Q grid from velocity grid:
Q_grid = np.ones(self.rays.vel_grid.shape) * Q_background
# Find spike x indices:
i = 0
spike_x_idxs = []
val, idx = self.find_nearest(self.rays.x_node_labels, self.rays.x_node_labels[i] + spike_spacing_km)
spike_spacing_lim = idx.copy() + 1
while i < len(self.rays.x_node_labels) - spike_spacing_lim:
if i==0:
val, idx = self.find_nearest(self.rays.x_node_labels, self.rays.x_node_labels[i] + spike_spacing_km/2)
else:
val, idx = self.find_nearest(self.rays.x_node_labels, self.rays.x_node_labels[i] + spike_spacing_km)
spike_x_idxs.append(idx)
i = idx
# Find spike y indices:
i = 0
spike_y_idxs = []
val, idx = self.find_nearest(self.rays.y_node_labels, self.rays.y_node_labels[i] + spike_spacing_km)
spike_spacing_lim = idx.copy() + 1
while i < len(self.rays.y_node_labels) - spike_spacing_lim:
if i==0:
val, idx = self.find_nearest(self.rays.y_node_labels, self.rays.y_node_labels[i] + spike_spacing_km/2)
else:
val, idx = self.find_nearest(self.rays.y_node_labels, self.rays.y_node_labels[i] + spike_spacing_km)
spike_y_idxs.append(idx)
i = idx
# Find spike z indices:
i = 0
spike_z_idxs = []
val, idx = self.find_nearest(self.rays.z_node_labels, self.rays.z_node_labels[i] + spike_spacing_km)
spike_spacing_lim = idx.copy() + 1
while i < len(self.rays.z_node_labels) - spike_spacing_lim:
if i==0:
val, idx = self.find_nearest(self.rays.z_node_labels, self.rays.z_node_labels[i] + spike_spacing_km/2)
else:
val, idx = self.find_nearest(self.rays.z_node_labels, self.rays.z_node_labels[i] + spike_spacing_km)
spike_z_idxs.append(idx)
i = idx
# Add multivariate Gaussian spikes into Q grid:
X, Y, Z = np.meshgrid(self.rays.x_node_labels, self.rays.y_node_labels, self.rays.z_node_labels)
spike_amp = Q_background * spike_rel_amp
# Set coords for multivar gaussian:
multivar_gauss_pos = np.zeros((X.shape[0],X.shape[1],X.shape[2],3))
multivar_gauss_pos[:,:,:,0] = X
multivar_gauss_pos[:,:,:,1] = Y
multivar_gauss_pos[:,:,:,2] = Z
# Loop over spike indices, adding to field:
for i in range(len(spike_x_idxs)):
mu_x = self.rays.x_node_labels[spike_x_idxs[i]]
print(100*(i+1)/len(spike_x_idxs),'% complete')
for j in range(len(spike_y_idxs)):
mu_y = self.rays.y_node_labels[spike_y_idxs[j]]
for k in range(len(spike_z_idxs)):
mu_z = self.rays.z_node_labels[spike_z_idxs[k]]
# Add a multivariate gaussian spike:
rv = multivariate_normal(mean=[mu_x, mu_y, mu_z], cov=(spike_width_km**2), allow_singular=True)
curr_gauss_spike_vals = rv.pdf(multivar_gauss_pos)
Q_grid = Q_grid + ( spike_amp * curr_gauss_spike_vals / np.max(curr_gauss_spike_vals) )
del X,Y,Z,curr_gauss_spike_vals
gc.collect()
# Plot Q grid:
print('Plotting Q grid')
fig, ax = plt.subplots(figsize=(8,4))
Z, X = np.meshgrid(self.rays.z_node_labels, self.rays.x_node_labels)
im = ax.pcolormesh(X, Z, Q_grid[:,spike_y_idxs[int(len(spike_y_idxs)/2)],:], norm=matplotlib.colors.LogNorm(), cmap=cmap)
ax.invert_yaxis()
fig.colorbar(im, label='Q')
ax.set_title('Q synth in')
ax.set_xlabel('x (km)')
ax.set_ylabel('z (km)')
# Plot spike locations (for comparison with other plots):
for i in range(len(spike_x_idxs)):
for j in range(len(spike_z_idxs)):
x_tmp = self.rays.x_node_labels[spike_x_idxs[i]] + ((self.rays.x_node_labels[1] - self.rays.x_node_labels[0]) / 2)
z_tmp = self.rays.z_node_labels[spike_z_idxs[j]] + ((self.rays.z_node_labels[1] - self.rays.z_node_labels[0]) / 2)
circle_tmp = matplotlib.patches.Circle((x_tmp,z_tmp), radius=spike_width_km/2., fill=False, edgecolor='white', linestyle='--')
ax.add_artist(circle_tmp)
if len(plot_out_fname) > 0:
plt.savefig(plot_out_fname, dpi=300)
plt.show()
# And create inv Q grid and tidy up:
self.inv_Q_grid = 1. / Q_grid
self.spike_x_idxs = spike_x_idxs
self.spike_y_idxs = spike_y_idxs
self.spike_z_idxs = spike_z_idxs
self.spike_spacing_km = spike_spacing_km
self.spike_width_km = spike_width_km
del Q_grid
gc.collect()
def create_synth_t_stars(self):
"""Creates synthetic t stars using the path lengths and velocity model
from the rays object, and 1/Q from the cehckerboard spikes input
(created using checkerboard.create_checkerboard_spikes_grid()).
Creates self.inv.t_stars output.
"""
# Calculate G from path lengths, if haven't already:
try:
self.inv.G
print('self.inv.G already exists, so continuing without recalculation.')
except AttributeError:
self.inv = inversion(self.rays)
self.inv.prep_rays_for_inversion()
# Consolidate Q values, to only use those that have ray paths going through them:
inv_Q_grid_consolidated_ravelled = np.delete(self.inv_Q_grid.ravel(), self.inv.rays.unsampled_cell_idxs, axis=0)
# And calculate synth t stars from the path lengths, vel grid and the Q grid:
self.inv.t_stars = np.matmul(self.inv.G, inv_Q_grid_consolidated_ravelled)
print('Number of synth t_star observations to use:', len(self.inv.t_stars))
def perform_synth_inversion(self, lamb, Q_init=250., synth_result_out_fname=''):
"""
Function to perform the inversion on synthetic input data.
Inputs:
Required:
lamb - The damping/regularisation parameter. This should
be the same value as used in the real data inversion.
(float)
Optional:
Q_init - The initial Q value to use in the initial lsqr
inversion conditions. This value should be equal
to Q_background used in the synthetics for normal
use. Default is 250. (float)
synth_result_out_fname - The filename to save data out to.
Default is no output. (str)
Returns:
synth_Q_tomo_array - An array containing the synthetic Q
tomography result.
"""
# Perform the inversion:
if len(synth_result_out_fname) == 0:
self.synth_Q_tomo_array = self.inv.perform_inversion(lamb=lamb, Q_init=Q_init)
else:
self.synth_Q_tomo_array = self.inv.perform_inversion(lamb=lamb, Q_init=Q_init, result_out_fname=synth_result_out_fname)
return(self.synth_Q_tomo_array)
#----------------------------------------------- End: Define main functions -----------------------------------------------
#----------------------------------------------- Run script -----------------------------------------------
if __name__ == "__main__":
# Add main script run commands here...
print("Finished")
|
import time
import pandas as pd
class Niswanob1:
def __init__(fuscat1,Chri1,Chri2,Niswanob2,fuscat2,bitboostdotcom1,Chri4,fuscat3):
fuscat1.bitboostdotcom2=Chri1
fuscat1.Chri2=Chri2
fuscat1.Niswanob2=Niswanob2
fuscat1.fuscat2=fuscat2
fuscat1.bitboostdotcom1=bitboostdotcom1
fuscat1.Chri4=Chri4
fuscat1.fuscat3=fuscat3
def bitboostdotcom5(ordemo1,bitboostdotcom3,Chri3):
fuscat5=Chri3
ordemo3=((2* Chri3) + 1)
bitboostdotcom4=((2* Chri3) + 2)
if ((ordemo3<bitboostdotcom3) and (ordemo1[Chri3].fuscat3<ordemo1[ordemo3].fuscat3)):
fuscat5=ordemo3
if ((bitboostdotcom4<bitboostdotcom3) and (ordemo1[fuscat5].fuscat3<ordemo1[bitboostdotcom4].fuscat3)):
fuscat5=bitboostdotcom4
if (fuscat5!=Chri3):
(ordemo1[Chri3],ordemo1[fuscat5])=(ordemo1[fuscat5],ordemo1[Chri3])
bitboostdotcom5(ordemo1,bitboostdotcom3,fuscat5)
def bitboostdotcom6(bitboostdotcom7):
ordemo2=len(bitboostdotcom7)
for Niswanob4 in range(((ordemo2 / 2) - 1),(-1),(-1)):
bitboostdotcom5(bitboostdotcom7,ordemo2,Niswanob4)
for Niswanob4 in range((ordemo2 - 1),0,(-1)):
(bitboostdotcom7[Niswanob4],bitboostdotcom7[0])=(bitboostdotcom7[0],bitboostdotcom7[Niswanob4])
bitboostdotcom5(bitboostdotcom7,Niswanob4,0)
Niswanob3=[]
def Niswanob6():
Chri5=pd.read_csv('final.csv')
bitboostdotcom8=0
global Niswanob3
for fuscat4 in Chri5.iterrows():
id=fuscat4[1][0]
fuscat6=fuscat4[1][1]
Niswanob5=fuscat4[1][2]
Niswanob7=fuscat4[1][3]
fuscat8=fuscat4[1][4]
fuscat7=fuscat4[1][5]
bitboostdotcomA=fuscat4[1][6]
Niswanob3.append(Niswanob1(id,fuscat6,Niswanob5,Niswanob7,fuscat8,fuscat7,bitboostdotcomA))
bitboostdotcom8+=1
bitboostdotcom6(Niswanob3)
Niswanob6()
def bitboostdotcomB():
global Niswanob3
Niswanob2=Niswanob3[0].Niswanob2
fuscat2=Niswanob3[0].fuscat2
fuscat3=Niswanob3[0].fuscat3
return (Niswanob2,fuscat2,fuscat3)
|
"""
v3-specific utilities.
"""
import functools
import os
import re
import shutil
from ruamel import yaml
from kyoukai.asphalt import HTTPRequestContext
RATES_PATH = os.path.join(os.getcwd(), "rates.yml")
# No default user agents.
# Customize them, please.
DISALLOW_AGENTS = re.compile(r"(?:.*aiohttp/.*|.*python-requests/.*)")
# Bad useragent response text.
BAD_USERAGENT = (
{
"error": 400,
"msg": "Hi! To prevent abuse of this service, it is required that you "
"customize your user agent.",
},
400,
{"Content-Type": "application/json"},
)
if not os.path.exists(RATES_PATH):
shutil.copy(os.path.join(os.getcwd(), "rates.default.yml"), RATES_PATH)
with open(RATES_PATH) as r:
ratelimits = yaml.load(r, Loader=yaml.Loader).get("rates")
compiled = []
# Compile the ratelimits.
for key, val in ratelimits.items():
compiled.append((re.compile(key), val))
# Deref as we don't use it anymore
del ratelimits
def check_default_useragents(useragent: str):
"""
Checks if the user agent matches a disallowed one.
"""
return DISALLOW_AGENTS.match(useragent)
def with_ratelimit(bucket: str, timelimit: int = None, max_reqs: int = 0):
"""
Defines a function to rate limit for.
Rate limits are stored in `rates.yml`.
"""
# Compile regular expressions
def _rl_inner1(func):
@functools.wraps(func)
async def _rl_inner2(ctx: HTTPRequestContext, *args, **kwargs):
"""
Inner ratelimit function.
"""
if ctx.app.config["owapi_disable_ratelimits"]:
# Don't bother with ratelimits.
return await func(ctx, *args, **kwargs)
# only ratelimit if we have redis. Can't make this decision in
# outer functions because they are called before globalsettings are set
if ctx.app.config["owapi_use_redis"]:
import aioredis
assert isinstance(ctx.redis, aioredis.Redis)
# Get the IP.
ip = (
ctx.request.headers.get("X-Real-IP")
or ctx.request.headers.get("X-Forwarded-For")
or ctx.request.remote_addr
)
# Build the ratelimit string.
built = "{bucket}:{ip}:ratelimit".format(bucket=bucket, ip=ip)
# Check the user agent before.
user_agent = ctx.request.headers.get("User-Agent")
if user_agent is None:
return BAD_USERAGENT
if check_default_useragents(user_agent):
return BAD_USERAGENT
# Load the rate limit based on the regular expression provided.
for regex, rates in compiled:
if regex.match(user_agent):
break
else:
# UH OH
raise RuntimeError("Failed to match User-Agent - did you wipe rates.yml?")
_timelimit = timelimit or rates.get("time", 1)
_max_reqs = max_reqs or rates.get("max_reqs", 1)
# Redis-based ratelimiting.
# First, check if the key even exists.
if not (await ctx.redis.exists(built)):
# LPUSH, and EXPIRE it.
await ctx.redis.lpush(built, _max_reqs)
await ctx.redis.expire(built, _timelimit)
else:
# LLEN it.
tries = await ctx.redis.llen(built)
if tries >= max_reqs:
# 429 You Are Being Ratelimited.
ttl = await ctx.redis.ttl(built)
if ttl == -1:
# wtf
await ctx.redis.expire(built, _timelimit)
ttl = _timelimit
return (
{"error": 429, "msg": "you are being ratelimited", "retry": ttl},
429,
{"Retry-After": ttl},
)
# LPUSH a `1` or something onto the edge of the list.
# The actual value doesn't matter.
await ctx.redis.lpush(built, 1)
# Now, await the underlying function.
return await func(ctx, *args, **kwargs)
return _rl_inner2
return _rl_inner1
|
#coding:utf-8
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import numpy as np
import matplotlib.pyplot as plt
import sys
import time
#初始化权重w,这里也是卷积核(filter)
def weight_variable(shape):
#从截断的正态分布中输出随机值
#shape:[filter_height, filter_width, in_channels, out_channels]
#[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数]
inital = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(inital)
#初始化权重b
def bias_variable(shape):
inital = tf.constant(0.1,shape=shape)
return tf.Variable(inital)
#构建一个卷积层
def conv2d(x,w):
#strides是卷积的步进,
#[batch_size_stride,height_stride,width_stride,channels_stride]
#padding:
#SAME:卷积输出与输入的尺寸相同。这里在计算如何跨越图像时,并不考虑滤
#波器的尺寸。选用该设置时,缺失的像素将用0填充,卷积核扫过的像素数将
#超过图像的实际像素数。
#VALID:在计算卷积核如何在图像上跨越时,需要考虑滤波器的尺寸。这会使
# 卷积核尽量不越过图像的边界。 在某些情形下, 可能边界也会被填充。
return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding="SAME")
#池化层,简单的讲就是下采样,max_pool就是取区域内最大值
def max_pool(x):
#ksize是池化窗口大小,[]列表里的参数和strides意义一致
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding = "SAME")
#读取数据
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
pix_num = mnist.train.images.shape[1]
label_num = mnist.train.labels.shape[1]
# 构建网络
X = tf.placeholder(tf.float32,shape=[None,pix_num])
Y = tf.placeholder(tf.float32,shape=[None,label_num])
#将MNIST的[784]数据转换为[28,28],-1表示None,我们不指定输入batch大小
x_image = tf.reshape(X,[-1,28,28,1])
w_conv1 = weight_variable([5,5,1,32]) #生成32个特征
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image,w_conv1) + b_conv1) #第一个卷积层,relu = max(0,x)
h_pool1 = max_pool(h_conv1) #第一个池化层
w_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1,w_conv2) + b_conv2) #第二个卷积层
h_pool2 = max_pool(h_conv2) #第二个池化层
w_fc1 = weight_variable([7*7*64,1024])#生成1024个特征
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1) #第一个全连接层
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob) #dropout层
w_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
Y_prev = tf.nn.softmax(tf.matmul(h_fc1_drop,w_fc2) + b_fc2)
#训练
loss = -tf.reduce_sum(Y*tf.log(Y_prev))
train_op = tf.train.GradientDescentOptimizer(0.0005).minimize(loss)
#correct_prediction得到的是一个bool数组[True,False,True....]
correct_prediction = tf.equal(tf.argmax(Y_prev,1),tf.arg_max(Y,1))
#tf.cast将correct_prediction转换为浮点型数组
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
W_ = np.zeros([pix_num,label_num])
b_ = np.zeros([label_num])
batch_size = 8
print("start")
sys.stdout.flush()
t = time.clock()
with tf.Session() as sess:
with tf.device("/gpu:0"):
sess.run(tf.global_variables_initializer())
for epoch in range(20000):
batch = mnist.train.next_batch(batch_size)
sess.run([train_op],feed_dict={X:batch[0],Y:batch[1],keep_prob:0.5})
if(epoch % 10 == 0):
batch = mnist.test.next_batch(1000)
print("step:{} ,rate:{}, time:{}".format(epoch,sess.run(accuracy,feed_dict={X:batch[0],Y:batch[1],keep_prob:1.0}),time.clock() - t))
t = time.clock()
sys.stdout.flush()
batch = mnist.test.next_batch(1000)
print(sess.run(accuracy,feed_dict={X:batch[0],Y:batch[1],keep_prob:1.0}))
|
import importlib
import traceback
from lib.conf.config import settings
class PluginManager(object):
def __init__(self, hostname=None):
self.hostname = hostname
self.plugin_dict = settings.PLUGINS_DICT
self.mode = settings.MODE
self.debug = settings.DEBUG
if self.mode == "SSH":
self.ssh_user = settings.SSH_USER
self.ssh_port = settings.SSH_PORT
self.ssh_pwd = settings.SSH_PWD
self.ssh_key = settings.SSH_KEY
def exec_plugin(self):
"""
获取所有的插件,并执行获取插件的返回值
:return:
"""
response = {}
for k, v in self.plugin_dict.items():
# 'basic': "src.plugins.basic.Basic"
ret = {"status": True, "data": None}
try:
prefix, class_module = v.rsplit(".", 1)
m = importlib.import_module(prefix)
cls = getattr(m, class_module)
if hasattr(cls, "initial"):
obj = cls.initial()
else:
obj = cls()
result = obj.process(self.command, self.debug)
ret["data"] = result
except Exception:
ret["status"] = False
ret["data"] = f"[{self.hostname if self.hostname else 'AGENT'}][{prefix}]采集信息出现错误:" \
f"{traceback.format_exc()}"
response[k] = ret
return response
def command(self, cmd):
if self.mode == "AGENT":
return self.__agent(cmd)
elif self.mode == "SSH":
return self.__ssh(cmd)
elif self.mode == "SALT":
return self.__salt(cmd)
else:
raise Exception("请选择AGENT/SSH/SALT模式")
def __agent(self, cmd):
import subprocess
output = subprocess.getoutput(cmd)
return output
def __salt(self, cmd):
salt_cmd = f"salt {self.hostname} cmd.run '{cmd}'"
import subprocess
output = subprocess.getoutput(salt_cmd)
return output
def __ssh(self, cmd):
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=self.hostname, port=self.ssh_port, username=self.ssh_user, password=self.ssh_pwd)
stdin, stdout, stderr = ssh.exec_command(cmd)
result = stdout.read()
ssh.close()
return result
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import sklearn
import zipcodes
import sys
import os
import json
import warnings
import yaml
import argparse
import pickle
import logging
from make_data import choose_features
logger = logging.getLogger(__name__)
def splitDF_cities(df, city_list):
"""Splits dataframe into a dictionary of dataframes by city.
Arguments:
df {DataFrame} -- Dataframe containing a city column
city_list {Str, list} -- List of cities that are in the dataframe to split dataframe into sub-dataframes.
Returns:
cityDict {Dictionary} -- Dictionary of city specific dataframes
"""
#first city in the city list will have index of 0 in the cityDict, and so on.
cityDict=[]
logger.info("the dataframe is appending city names for %s cities", len(city_list))
for i in city_list: #loop through cities
df1 = df[df.city== i] #get data for that city
cityDict.append(df1)
return cityDict
def get_target(df_dict, target, save_path=None):
"""Takes in a dictionary of dataframes and finds the target variable in each dataframe.
Arguments:
df_dict {Dictonary} -- Dictionary of city specific dataframes
target {str} -- Name of the target column
Keyword Arguments:
save_path {str} -- Optional path to save the dictionary of targets to. (default: {None})
Returns:
yDict {Dictionary} -- Dictionary of dataframe target variables, indexed by the city list.
"""
yDict = []
for i in range(len(df_dict)): #loop through the dictionary of city dfs to get each target
df1 = df_dict[i][target]
yDict.append(df1)
if save_path is not None: #optional saving
yDict.to_csv(save_path, header=True)
logger.info("the target data was saved to %s", save_path)
return yDict
def split_data(XDict, yDict, path_xDict=None, path_yDict=None, train_size=0.7, test_size=0.3, random_state=24):
"""Splits dictionary of dataframes into train and test sizes within each dataframe.
Arguments:
XDict {Dictionary} -- Dictionary of dataframes with feature variables, indexed by the city list.
yDict {Dictionary} -- Dictionary of dataframe target variables, indexed by the city list.
Keyword Arguments:
path_xDict {str} -- Optional path to save dictionary of split city dataframes for feature variables. (default:{None})
path_yDict {str} -- Optional path to save dictionary of split city dataframes for target variable. (default:{None})
train_size {float} -- Fraction of data to use for testing (default: {0.7})
test_size {float} -- Fraction of data to use for training (default: {0.3})
random_state {int} -- Random state to obtain same results each time. (default: {24})
Returns:
finalxDict {Dictionary} -- Dictionary of dictionaries. Each dictionary is for a city and contains the test and train feature data for that city.
finalyDict {Dictionary} -- Dictionary of dictionaries. Each dictionary is for a city and contains the test and train target data for that city.
"""
finalxDict = []
finalyDict = []
for i in range(len(XDict)): #loops through the dictionary of dataframes to get each individual dataframe
cityX= XDict[i] #dataframe of features for the city i
cityy= yDict[i] #target dataframe for city i
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(cityX, cityy, train_size=0.7, random_state=random_state) #splits according to test size
#make dict for each city that contains train and test dataframes
X= dict(train=X_train)
y= dict(train=y_train)
#adds test to dictionary
if len(X_test) > 0:
X["test"] = X_test
y["test"] = y_test
finalxDict.append(X)
finalyDict.append(y)
#save x and y dict
if path_xDict is not None:
finalxDict.to_csv(path_xDict)
logger.info("final x dictionary saved to %s", path_xDict)
if path_yDict is not None:
finalyDict.to_csv(path_yDict)
logger.info("final y dictionary saved to %s", path_yDict)
return finalxDict, finalyDict
def model_train(xDict, yDict, **kwargs):
"""Train each city model and save a dictionary of models to a pickle.
Arguments:
xDict {Dictionary} -- Dictionary of dataframes with feature variables split into test and train, indexed by the city list.
yDict {Dictionary} -- Dictionary of dataframes with target variable split into test and train, indexed by the city list.
Returns:
models {Model} -- All models of the cities
finalxDict {Dictionary} -- Dictionary of dataframes with feature variables split into test and train, indexed by the city list.
finalyDict {Dictionary} -- Dictionary of dataframes with target variable split into test and train, indexed by the city list.
"""
x_final = []
#choose features for training according to config yml file.
for i in range(len(xDict)):
X = xDict[i]
X = choose_features(X, **kwargs["choose_features"])
x_final.append(X)
logger.info("length of x features is %s", len(x_final))
#split data
finalxDict, finalyDict = split_data(x_final, yDict, **kwargs["split_data"])
#create list of models for each city
models = []
for i in range(len(finalxDict)):
X = finalxDict[i]
y = finalyDict[i]
model = LinearRegression()
X_train = X["train"].iloc[:, 0:10]
y_train = y["train"]
model.fit(X_train, y_train)
models.append(model)
logger.info("%s models made for cities", len(models))
return models, finalxDict, finalyDict
def model_score(models, xDict, yDict, city_list, path_results=None, **kwargs):
"""Scores the model to find r-squared of train and test
Arguments:
models {List} -- List of models
xDict {Dictionary} -- Dictionary of dataframes with feature variables split into test and train, indexed by the city list.
yDict {Dictionary} -- Dictionary of dataframes with target variable split into test and train, indexed by the city list.
city_list {str, list} -- List of cities in same order that dataframe was indexed by.
Keyword Arguments:
path_results {str} -- Where to save results csv to (default: {None})
Returns:
results{dataframe} -- Dataframe containing r-squared for test and train of each city model.
"""
r2Train_list = []
r2Test_list = []
#gets r-squred for both test and train sets.
for i in range(len(xDict)):
model = models[i]
x_test = xDict[i]["test"]
y_test = yDict[i]["test"]
x_train = xDict[i]["train"]
y_train = yDict[i]["train"]
r2Train = model.score(x_train, y_train)
r2Test = model.score(x_test, y_test)
r2Train_list.append(r2Train)
r2Test_list.append(r2Test)
#appends results to dataframe
results = pd.DataFrame(index = city_list)
results['r2_Train'] = r2Train_list
results['r2_Test'] = r2Test_list
if path_results is not None:
results.to_csv(path_results)
logger.info("Model scoring results are saved to %s", path_results)
return results
def format_coefs(models, columns, city_list, path_save=None):
""" Find coeficients and create a dataframe with them.
Arguments:
models {List} -- List of models for each city.
columns {str, list} -- List of what to name columns for each coefficient, should be feature variables trained on.
path_save {str} -- path to save coefficients to. (default: {None})
city_list {str, List} -- List of cities used in model.
Returns:
coefdf{dataframe} -- Dataframe with all coeficients for variables and cities.
"""
coefs = []
#loop through models and grab coefficients for each.
for i in range(len(models)):
model1 = models[i]
coef1 = model1.coef_
coefs.append(coef1)
#format coeficients into dataframe with relative variable names and cities
coefdf = pd.DataFrame(coefs)
coefdf.columns= columns
coefdf.index = city_list
if path_save is not None:
coefdf.to_csv(path_save)
logger.info("coeficient data saved to %s", path_save)
return coefdf
def run_train(args):
"""Orchestrates the training of the model using command line arguments."""
with open(args.config, "r") as f:
config = yaml.load(f)
path = args.output
config_try = config['train_model']
if args.input is not None:
df = pd.read_csv(args.input)
logger.info("Features for input into model loaded from %s", args.input)
else:
raise ValueError("Path to CSV for input data must be provided through --input for training.")
df_dict = splitDF_cities(df, **config_try['splitDF_cities'])
yDict = get_target(df_dict, **config_try['get_target'])
models, xDict, yDict = model_train(df_dict, yDict, **config_try['model_train'])
#save model to output argument path
if args.output is not None:
with open(args.output, "wb") as f:
pickle.dump(models, f)
logger.info("Trained model object saved to %s", args.output)
else:
raise ValueError("Path to save models must be given with --output to use for app running.")
columns = xDict[0]['train'].columns
results = model_score(models, xDict, yDict, **config_try['model_score'])
coefdf = format_coefs(models, columns, **config_try['format_coefs'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train models for each city to predict price")
parser.add_argument('--config', help='path to yaml file with configurations', default="config.yml")
parser.add_argument('--input', help='path to features data', default='data/data_features.csv')
parser.add_argument("--output", default="data/model.pkl",
help="Path to saving models.")
args = parser.parse_args()
run_train(args) |
# -*- coding: utf-8 -*-
from src.sentence_embedding.model import UniSkip, Encoder
from src.sentence_embedding.data_loader import DataLoader
from src.sentence_embedding.vocab import load_dictionary
from src.sentence_embedding.config import *
from torch import nn
import numpy as np
from torch.autograd import Variable
import torch
import jieba
from src.utils import singleton
@singleton
class UsableEncoder:
def __init__(self, model_path='./sentence_embedding/saved_models/skip_best', dict_path='./sentence_embedding/data/faq.txt.pkl'):
print("Preparing the DataLoader. Loading the word dictionary")
print("sentence_emb.py: load dict", load_dictionary(dict_path), "\n",
load_dictionary(dict_path) )
self.d = DataLoader(sentences=[''], word_dict=load_dictionary(dict_path))
self.encoder = None
print("Loading encoder from the saved model at {}". format(model_path))
model = UniSkip()
# print('sentence_emb: ', os.getcwd())
model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
self.encoder = model.encoder
if USE_CUDA:
self.encoder.cuda(CUDA_DEVICE)
def encode(self, sentence):
sen_idx = [self.d.convert_sentence_to_indices(sentence)]
sen_idx = torch.stack(sen_idx)
sen_emb, _ = self.encoder(sen_idx)
sen_emb = sen_emb.view(-1, self.encoder.thought_size)
sen_emb = sen_emb.data.cpu().numpy()
ret = np.array(sen_emb)
return ret
if __name__ == "__main__":
import os
print(os.getcwd())
model_path = './saved_models/skip_best'
dict_path = './data/faq.txt.pkl'
usable_encoder = UsableEncoder(model_path, dict_path)
sentence = u'实现社会主义制度'
sent_seg = jieba.cut(sentence)
sent_new = ' '.join(sent_seg)
sent_strip = sent_new.strip()
sentence_emb = usable_encoder.encode(sent_strip)
print(sentence_emb)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-18 13:21
from __future__ import unicode_literals
from django.db import migrations, models
import imagekit.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('desc', models.TextField()),
('image', imagekit.models.fields.ProcessedImageField(upload_to='images')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
"""
A network flow multiple object tracker
Zhang et al. 2008 Section 3
This formulation can be mapped into a cost-flow network
G(X ) with source s and sink t. Given an observation set
X : for every observation xi ∈ X , create two nodes ui, vi,
create an arc (ui, vi) with cost c(ui, vi) = Ci and flow
f(ui, vi) = fi, an arc (s, ui) with cost c(s, ui) = Cen,i
and flow f(s, ui) = fen,i, and an arc (vi, t) with cost
c(vi, t) = Cex,i and flow f(vi, t) = fex,i. For every transition
Plink(xj |xi) = 0, create an arc (vi, uj ) with cost
c(vi, uj ) = Ci,j and flow f(vi, uj ) = fi,j . An example
of such a graph is shown in Figure 2. Eqn.10 is equivalent
to the flow conservation constraint and Eqn.11 to the cost
of flow in G. Finding optimal association hypothesis T ∗ is
equivalent to sending the flow from source s to sink t that
minimizes the cost.
Cen = -log(Pentr(xi))
Cex = -log(Pexit(xi))
Cij = -Log(Plink(xi|xj))
Ci = log(Bi/(1-Bi)
Bi := miss detection rate of detector
Pentr = Pexit
= #traj / #hyp
Plink = Psize*Pposiiton*Pappearance*Ptime
Cost Strategies:
- Try dot product on the encoded state (Nilanjan)
- allows for computation of all forward passes before graph time
- Aggregate all the node pairs with edges between them,
then forward pass, the set cost
- Use engineered cost
- Use engineered + network cost
"""
import config
from lib.Database import Database
import numpy as np
import os
from os.path import isdir, isfile, join
import shutil
import traceback
import multiprocessing
import subprocess
import threading
import concurrent.futures
import tempfile
import queue
import asyncio
from scipy.optimize import golden
from skimage import io
import time
from uuid import uuid4, UUID
import pyMCFSimplex
async def main(args):
if len(args) < 1:
print("What you want to track?")
print("USING: experiment-uuid [method]")
else:
if args[0] == "deep":
if args[1] == "all_models":
start = time.time()
await track_allModels(*args[2:])
# print('Total time:',time.time()-start)
else:
start = time.time()
await track_model(*args[1:])
# print('Total time:',time.time()-start)
else:
start = time.time()
return await track_experiment(*args)
async def track_model(experiment_uuid, method, modelName, epoch):
from lib.models.DeepVelocity import DeepVelocity
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
DV = DeepVelocity()
DV.compile()
model = DV.probabilityNetwork
experimentPath = os.path.join(config.training_dir, modelName)
weightEpoch = "weightsEpoch{epoch}.h5".format(epoch=epoch)
models = [file for file in os.listdir(experimentPath) if file.startswith("weight")]
if not weightEpoch in models:
print("weight file not found")
return
weightFile = os.path.join(experimentPath, weightEpoch)
model.load_weights(weightFile)
methodTmp = method + weightEpoch.split(".")[0]
print("tracking", methodTmp)
return await track_experiment(experiment_uuid, methodTmp, model)
async def track_allModels(experiment_uuid, method, modelName):
from lib.models.DeepVelocity import DeepVelocity
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
DV = DeepVelocity()
DV.compile()
model = DV.probabilityNetwork
experimentPath = os.path.join(config.training_dir, modelName)
models = [file for file in os.listdir(experimentPath) if file.startswith("weight")]
for weightEpoch in models:
weightFile = os.path.join(experimentPath, weightEpoch)
model.load_weights(weightFile)
methodTmp = method + weightEpoch.split(".")[0]
print("tracking", methodTmp)
await track_experiment(experiment_uuid, methodTmp, model)
async def track_experiment(experiment_uuid, method="Tracking", model=None):
"""
"""
verbose = False
cpus = multiprocessing.cpu_count()
loop = asyncio.get_event_loop()
db = Database()
if isinstance(model, str):
from lib.models.DeepVelocity import DeepVelocity
DV = DeepVelocity()
DV.compile()
model = DV.probabilityNetwork
weightFile = os.path.join(config.model_dir, model, ".h5")
model.load_weights(weightFile)
# Clone the experiment
try:
start = time.time()
tx, transaction = await db.transaction()
new_experiment_uuid, frame_uuid_map, track_uuid_map = await clone_experiment(
experiment_uuid, tx, testing=False, method=method
)
new_experiment_dir = os.path.join(
config.experiment_dir, str(new_experiment_uuid)
)
if verbose:
print("clone time:", time.time() - start)
# End Cloning
# 2) Perform tracking analysis
async for segment in tx.cursor(
"""
SELECT segment, number
FROM segment
WHERE experiment = $1
ORDER BY number ASC
""",
experiment_uuid,
):
if verbose:
print("tracking segment", segment["number"])
mcf_graph = MCF_GRAPH_HELPER()
mcf_graph.add_node("START")
mcf_graph.add_node("END")
Ci = -100
Cen = 150
Cex = 150
edge_data = dict()
dvEdges = []
costs = []
dvCosts = []
q = """
SELECT f1.frame as fr1, f2.frame as fr2,
t1.location as location1, t2.location as location2,
t1.bbox as bbox1, t2.bbox as bbox2,
t1.latent as latent1, t2.latent as latent2,
p1.area as area1, p2.area as area2,
p1.intensity as intensity1, p2.intensity as intensity2,
p1.radius as radius1, p2.radius as radius2,
p1.category as category1, p2.category as category2,
p1.perimeter as perimeter1, p2.perimeter as perimeter2,
tr1, tr2,
cost1,
cost2,
cost3
FROM frame f1, frame f2,track t1, track t2, particle p1, particle p2
JOIN LATERAL (
SELECT t3.track AS tr1, tr2, cost1, cost2, cost3
FROM track t3
JOIN LATERAL (
SELECT t4.track AS tr2,
((1 + (t3.latent <-> t4.latent))
*(1 + (t3.location <-> t4.location))) AS cost1,
(1 + (t3.location <-> t4.location)) AS cost2,
(1 + (t3.latent <-> t4.latent)) AS cost3
FROM track t4
WHERE t4.frame = f2.frame
ORDER BY cost1 ASC
LIMIT 5
) C ON TRUE
WHERE t3.frame = f1.frame
) E on true
WHERE f1.number = f2.number-1
AND t1.track = tr1 AND t2.track = tr2
AND t1.particle = p1.particle AND t2.particle = p2.particle
AND f1.segment = '{segment}'
AND f2.segment = '{segment}'
ORDER BY f1.number ASC;
"""
# The following uses no deep learning
## Developed on the Syncrude bead 200-300 um megaspeed camera video
q = """
SELECT f1.frame as fr1, f2.frame as fr2,
t1.location as location1, t2.location as location2,
t1.bbox as bbox1, t2.bbox as bbox2,
t1.latent as latent1, t2.latent as latent2,
p1.area as area1, p2.area as area2,
p1.intensity as intensity1, p2.intensity as intensity2,
p1.radius as radius1, p2.radius as radius2,
p1.category as category1, p2.category as category2,
p1.perimeter as perimeter1, p2.perimeter as perimeter2,
p1.major as major1, p2.major as major2,
p1.minor as minor1, p2.minor as minor2,
p1.eccentricity as eccentricity1, p2.eccentricity as eccentricity2,
p1.orientation as orientation1, p2.orientation as orientation2,
p1.solidity as solidity1, p2.solidity as solidity2,
tr1, tr2,
cost1,
cost2,
cost3
FROM frame f1, frame f2,track t1, track t2, particle p1, particle p2
JOIN LATERAL (
SELECT t3.track AS tr1, tr2, cost1, cost2, cost3
FROM track t3
JOIN LATERAL (
SELECT t4.track AS tr2,
((1 + (t3.latent <-> t4.latent))
*(1 + (t3.location <-> t4.location))) AS cost1,
(1 + (t3.location <-> t4.location)) AS cost2,
(1 + (t3.latent <-> t4.latent)) AS cost3
FROM track t4
WHERE t4.frame = f2.frame
ORDER BY cost2 ASC
LIMIT 5
) C ON TRUE
WHERE t3.frame = f1.frame
) E on true
WHERE f1.number = f2.number-1
AND t1.track = tr1 AND t2.track = tr2
AND t1.particle = p1.particle AND t2.particle = p2.particle
AND f1.segment = '{segment}'
AND f2.segment = '{segment}'
ORDER BY f1.number ASC;
"""
s = q.format(segment=segment["segment"])
async for edges in db.query(s):
if edges["tr1"] not in edge_data:
edge_data[edges["tr1"]] = {
"track": edges["tr1"],
"frame": edges["fr1"],
"location": edges["location1"],
"bbox": edges["bbox1"],
"latent": edges["latent1"],
"area": edges["area1"],
"intensity": edges["intensity1"],
"radius": edges["radius1"],
"perimeter": edges["perimeter1"],
"major": edges["major1"],
"minor": edges["minor1"],
"orientation": edges["orientation1"],
"solidity": edges["solidity1"],
"eccentricity": edges["eccentricity1"],
"category": edges["category1"],
}
edge_data[edges["tr2"]] = {
"track": edges["tr2"],
"frame": edges["fr2"],
"location": edges["location2"],
"bbox": edges["bbox2"],
"latent": edges["latent2"],
"area": edges["area2"],
"intensity": edges["intensity2"],
"radius": edges["radius2"],
"perimeter": edges["perimeter2"],
"major": edges["major2"],
"minor": edges["minor2"],
"orientation": edges["orientation2"],
"solidity": edges["solidity2"],
"eccentricity": edges["eccentricity2"],
"category": edges["category2"],
}
u1, v1 = "u_" + str(edges["tr1"]), "v_" + str(edges["tr1"])
u2, v2 = "u_" + str(edges["tr2"]), "v_" + str(edges["tr2"])
# create ui, create vi, create edge (ui, vi), cost CI(ui,vi), cap = 1
if mcf_graph.add_node(u1):
mcf_graph.add_node(v1)
# Heuristic reward for larger, darker; penalize undefined
larger = 500
darker = 0
area = edge_data[edges["tr1"]]["area"]
intensity = edge_data[edges["tr1"]]["intensity"]
nodeCi = Ci * (1 + (area / larger) * ((255 - intensity) / 255))
# if not edge_data[edges["tr1"]]["category"]:
# nodeCi = 10
# End heuristic reward
mcf_graph.add_edge(u1, v1, capacity=1, weight=int(nodeCi))
mcf_graph.add_edge("START", u1, capacity=1, weight=Cen)
mcf_graph.add_edge(v1, "END", capacity=1, weight=Cex)
if mcf_graph.add_node(u2):
mcf_graph.add_node(v2)
# Heuristic reward for larger, darker; penalize undefined
larger = 500
darker = 0
area = edge_data[edges["tr2"]]["area"]
intensity = edge_data[edges["tr2"]]["intensity"]
nodeCi = Ci * (1 + (area / larger) * ((255 - intensity) / 255))
# if not edge_data[edges["tr1"]]["category"]:
# nodeCi = 10
# End heuristic reward
mcf_graph.add_edge(u2, v2, capacity=1, weight=int(nodeCi))
mcf_graph.add_edge("START", u2, capacity=1, weight=Cen)
mcf_graph.add_edge(v2, "END", capacity=1, weight=Cex)
# Cij = -Log(Plink(xi|xj)), Plink = Psize*Pposiiton*Pappearance*Ptime
Cij = int(2 * edges["cost2"])
costs.append(Cij)
if not model:
mcf_graph.add_edge(v1, u2, weight=Cij, capacity=1)
else:
dvEdges.append((v1, u2))
if mcf_graph.n_nodes == 2: # only START and END nodes present (empty)
if verbose:
print("Nothing in segment")
continue
if model:
batch = DataBatch()
for v1, u2 in dvEdges:
v1Data = edge_data[UUID(v1[2:])]
u2Data = edge_data[UUID(u2[2:])]
loc1 = (v1Data["location"][0], v1Data["location"][1])
loc2 = (u2Data["location"][0], u2Data["location"][1])
frameFile1 = os.path.join(
config.experiment_dir,
experiment_uuid,
str(v1Data["frame"]),
"64x64.png",
)
frame1 = io.imread(frameFile1, as_grey=True)
frameFile2 = os.path.join(
config.experiment_dir,
experiment_uuid,
str(u2Data["frame"]),
"64x64.png",
)
frame2 = io.imread(frameFile2, as_grey=True)
latent1_string = v1Data["latent"][1:-1].split(",")
latent1 = [float(i) for i in latent1_string]
latent2_string = u2Data["latent"][1:-1].split(",")
latent2 = [float(i) for i in latent2_string]
batch.addLocation(loc1, loc2)
batch.addFrame(frame1, frame2)
batch.addLatent(latent1, latent2)
batch.addOutput([0.0, 0.0])
# make the batch divisible by # gpus
while len(batch) % 4:
# print(len(batch))
batch.addLocation(loc1, loc2)
batch.addFrame(frame1, frame2)
batch.addLatent(latent1, latent2)
batch.addOutput([0.0, 0.0])
batch.normParams()
batch.toNumpy()
batch.normalize(batch.normalizeParams)
probs = model.predict(batch.getInput())
for i, (v1, u2) in enumerate(dvEdges):
Cij = np.int32(min(-100 * np.log(probs[i][0]), 2147483647))
dvCosts.append(Cij)
mcf_graph.add_edge(v1, u2, weight=Cij, capacity=1)
if verbose:
print("dvCt", np.min(dvCosts), np.mean(dvCosts), np.max(dvCosts))
if verbose:
print("cost", np.min(costs), np.mean(costs), np.max(costs))
if verbose:
print("Solving min-cost-flow for segment")
demand = goldenSectionSearch(
mcf_graph.solve,
0,
mcf_graph.n_nodes // 4,
mcf_graph.n_nodes // 2,
10,
memo=None,
)
if verbose:
print("Optimal number of tracks", demand)
min_cost = mcf_graph.solve(demand)
if verbose:
print("Min cost", min_cost)
mcf_flow_dict = mcf_graph.flowdict()
mcf_graph = None
tracks = dict()
for dest in mcf_flow_dict["START"]:
new_particle_uuid = uuid4()
track = []
curr = dest
while curr != "END":
if curr[0] == "u":
old_particle_uuid = UUID(curr.split("_")[-1])
track.append(old_particle_uuid)
curr = mcf_flow_dict[curr][0]
tracks[new_particle_uuid] = track
if verbose:
print("Tracks reconstructed", len(tracks))
"""
Headers for Syncrude 2018
Frame ID,
Particle ID,
Particle Area,
Particle Velocity,
Particle Intensity,
Particle Perimeter,
X Position,
Y Position,
Major Axis Length,
Minor Axis Length,
Orientation,
Solidity,
Eccentricity.
"""
start = time.time()
particle_inserts = []
track_inserts = []
for new_particle_uuid, track in tracks.items():
mean_area = 0.0
mean_intensity = 0.0
mean_perimeter = 0.0
# mean_radius = 0.0
mean_major = 0.0
mean_minor = 0.0
mean_orientation = 0.0
mean_solidity = 0.0
mean_eccentricity = 0.0
category = []
for data in [edge_data[i] for i in track]:
mean_area += data["area"] / len(track)
mean_intensity += data["intensity"] / len(track)
mean_perimeter += data["perimeter"] / len(track)
# mean_radius += data["radius"] / len(track)
mean_major += data["major"] / len(track)
mean_minor += data["minor"] / len(track)
mean_orientation += data["orientation"] / len(track)
mean_solidity += data["solidity"] / len(track)
mean_eccentricity += data["eccentricity"] / len(track)
category.append(data["category"])
new_frame_uuid = frame_uuid_map[data["frame"]]
new_track_uuid = track_uuid_map[data["track"]]
track_inserts.append(
(
new_track_uuid,
new_frame_uuid,
new_particle_uuid,
data["location"],
data["bbox"],
data["latent"],
)
)
category = np.argmax(np.bincount(category))
particle_inserts.append(
(
new_particle_uuid,
new_experiment_uuid,
mean_area,
mean_intensity,
mean_perimeter,
mean_major,
mean_minor,
mean_orientation,
mean_solidity,
mean_eccentricity,
category,
)
)
await tx.executemany(
"""
INSERT INTO Particle (particle,
experiment,
area,
intensity,
perimeter,
major,
minor,
orientation,
solidity,
eccentricity,
category)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
""",
particle_inserts,
)
await tx.executemany(
"""
INSERT INTO Track (track, frame, particle, location, bbox, latent)
VALUES ($1, $2, $3, $4, $5, $6)
""",
track_inserts,
)
if verbose:
print("Tracks inserted", time.time() - start, "s")
except Exception as e: ### ERROR: UNDO EVERYTHING ! #################
print("Uh oh. Something went wrong")
traceback.print_exc()
await transaction.rollback()
if os.path.exists(new_experiment_dir):
shutil.rmtree(new_experiment_dir)
traceback.print_exc()
else:
################## OK: COMMIT DB TRANSACTRION ###############
if verbose:
print("made it! :)")
await transaction.commit()
return str(new_experiment_uuid)
async def clone_experiment(experiment_uuid, tx, method, testing=False):
# Create maps
new_experiment_uuid = uuid4()
experiment_path = join(config.experiment_dir, str(experiment_uuid))
base_files = [
file
for file in os.listdir(experiment_path)
if isfile(join(experiment_path, file))
]
s = """
SELECT frame
FROM frame
WHERE experiment = '{experiment}'
"""
q = s.format(experiment=experiment_uuid)
dbFrames = []
async for row in tx.cursor(q):
dbFrames.append(str(row["frame"]))
osFrames = [
frame
for frame in os.listdir(experiment_path)
if isdir(join(experiment_path, frame))
]
frame_uuid_map = {UUID(f): uuid4() for f in dbFrames}
s = """
SELECT t.frame as frame, track
FROM track t, frame f
WHERE t.frame = f.frame
AND f.experiment = '{experiment}'
"""
q = s.format(experiment=experiment_uuid)
dbTracks = []
async for row in tx.cursor(q):
dbTracks.append(str(row["track"]))
osTracks = [
(frame, os.path.splitext(track))
for frame in osFrames
for track in os.listdir(join(experiment_path, frame))
if len(track) == 40
]
track_uuid_map = {UUID(track): uuid4() for track in dbTracks}
# tracks = [(frame, (uuid, ext))]
# Copy data
new_experiment_path = join(config.experiment_dir, str(new_experiment_uuid))
if not testing:
os.mkdir(new_experiment_path)
await tx.execute(
"""
INSERT INTO Experiment (experiment, day, name, method, notes)
SELECT $1, day, name, $3, notes FROM Experiment
WHERE experiment = $2
""",
new_experiment_uuid,
experiment_uuid,
method,
)
if not testing:
for file in base_files:
os.link(join(experiment_path, file), join(new_experiment_path, file))
for old_frame_uuid, new_frame_uuid in frame_uuid_map.items():
os.mkdir(join(new_experiment_path, str(new_frame_uuid)))
segment_uuid_map = {}
segment_insert = []
async for s in tx.cursor(
"SELECT segment, number FROM Segment WHERE experiment = $1", experiment_uuid
):
segment_uuid = uuid4()
segment_uuid_map[s["segment"]] = {
"segment": segment_uuid,
"number": s["number"],
}
segment_insert.append((segment_uuid, new_experiment_uuid, s["number"]))
# Create null segment for frames with no segment.
# A workaround until segment is improved
# segment_uuid = uuid4()
# segment_uuid_map[None] = {"segment": segment_uuid, "number": -1}
# segment_insert.append((segment_uuid, new_experiment_uuid, -1))
await tx.executemany(
"INSERT INTO Segment (segment, experiment, number) VALUES ($1, $2, $3)",
segment_insert,
)
frame_segment_map = {}
async for f in tx.cursor(
"select frame, segment From Frame WHERE experiment = $1", experiment_uuid
):
frame_segment_map[f["frame"]] = segment_uuid_map[f["segment"]]["segment"]
await tx.executemany(
"""
INSERT INTO Frame (frame, experiment, segment, number)
SELECT $1, $2, $3, number FROM Frame
WHERE frame = $4
""",
[
(
frame_uuid_map[UUID(frame)],
new_experiment_uuid,
frame_segment_map[UUID(frame)],
UUID(frame),
)
for frame in dbFrames
],
)
if not testing:
for track in osTracks:
os.link(
join(experiment_path, track[0], "".join(track[1])),
join(
new_experiment_path,
str(frame_uuid_map[UUID(track[0])]),
str(track_uuid_map[UUID(track[1][0])]) + track[1][1],
),
)
return (new_experiment_uuid, frame_uuid_map, track_uuid_map)
class MCF_GRAPH_HELPER:
"""
Add nodes with UUID substrings to this helper,
will manage the mapping between nodes and an integer
name.
Simplified for our purposes, the graph will always have a
super-source/super-sink 'START'/'END',
and will be assigned demand and -demand respectively.
Simplified for our purposes, the capacity will always have
a lower bound zero, and can not be set explicitly.
"""
def __init__(self, verbose=False):
self.nodes = dict()
self.edges = []
self.n_nodes = 0
self.n_edges = 0
self.demand = 0
self.verbose = verbose
def add_node(self, k):
"""
expects uuid
pyMCFSimplex node names {1,2,...,n}
returns true if the node was added, false
if the node was added prior.
"""
if not k in self.nodes:
self.nodes[k] = self.n_nodes + 1
self.nodes[self.n_nodes + 1] = k
self.n_nodes += 1
return True
else:
return False
def add_edge(self, start, end, capacity, weight):
"""
expects uuid start/end nodes
"""
self.edges.append((self.nodes[start], self.nodes[end], 0, capacity, weight))
self.n_edges += 1
def remove(self, k):
self.d.pop(self.d.pop(k))
def get_node(self, k):
"""
given an integer returns {'u_', 'v_'}+str(UUID)
given {'u_', 'v_'}+str(UUID) returns integer
"""
if isinstance(k, int):
return self.nodes[k]
else:
return self.nodes[k]
def write(self, file):
"""
writes graph to file for input to pyMCFSimplex
"""
file.write("p min %s %s\n" % (self.n_nodes, self.n_edges))
file.write("n %s %s\n" % (self.nodes["START"], self.demand))
file.write("n %s %s\n" % (self.nodes["END"], -self.demand))
for (start, end, low, high, weight) in self.edges:
file.write("a %s %s %s %s %s\n" % (start, end, low, high, weight))
def solve(self, demand):
self.demand = demand
self.mcf = pyMCFSimplex.MCFSimplex()
fp = tempfile.TemporaryFile("w+")
self.write(fp)
fp.seek(0)
inputStr = fp.read()
fp.close()
self.mcf.LoadDMX(inputStr)
# solve graph
self.mcf.SetMCFTime()
self.mcf.SolveMCF()
if self.mcf.MCFGetStatus() == 0:
min_cost = self.mcf.MCFGetFO()
if self.verbose:
print("Optimal solution: %s" % self.mcf.MCFGetFO())
print("Time elapsed: %s sec " % (self.mcf.TimeMCF()))
return min_cost
else:
if self.verbose:
print("Problem unfeasible!")
print("Time elapsed: %s sec " % (self.mcf.TimeMCF()))
return float("inf")
def flowdict(self):
mcf_flow_dict = dict()
# Build flowdict
# BEGIN FROM EXAMPLE
mmx = self.mcf.MCFmmax()
pSn = []
pEn = []
startNodes = pyMCFSimplex.new_uiarray(mmx)
endNodes = pyMCFSimplex.new_uiarray(mmx)
self.mcf.MCFArcs(startNodes, endNodes)
for i in range(0, mmx):
pSn.append(pyMCFSimplex.uiarray_get(startNodes, i) + 1)
pEn.append(pyMCFSimplex.uiarray_get(endNodes, i) + 1)
length = self.mcf.MCFm()
cost_flow = pyMCFSimplex.new_darray(length)
self.mcf.MCFGetX(cost_flow)
# END FROM EXAMPLE
for i in range(0, length):
startNode = pSn[i]
endNode = pEn[i]
flow = pyMCFSimplex.darray_get(cost_flow, i)
if flow > 0:
if not self.get_node(startNode) in mcf_flow_dict:
mcf_flow_dict[self.get_node(startNode)] = []
mcf_flow_dict[self.get_node(startNode)].append(self.get_node(endNode))
# print("Flow on arc %s from node %s to node %s: %s " %(i,startNode,endNode,flow,))
return mcf_flow_dict
phi = (1 + np.sqrt(5)) / 2
resphi = 2 - phi
# a and b are the current bounds; the minimum is between them.
# c is the center pointer pushed slightly left towards a
def goldenSectionSearch(f, a, c, b, absolutePrecision, memo=None):
if memo is None:
memo = dict()
if abs(a - b) < absolutePrecision:
return int((a + b) / 2)
# Create a new possible center, in the area between c and b, pushed against c
d = int(c + resphi * (b - c))
if d in memo:
f_d = memo[d]
else:
f_d = f(d)
memo[d] = f_d
# print(d, f_d)
if c in memo:
f_c = memo[c]
else:
f_c = f(c)
memo[c] = f_c
# print(c, f_c)
if f_d < f_c:
return goldenSectionSearch(f, c, d, b, absolutePrecision, memo)
else:
return goldenSectionSearch(f, d, c, a, absolutePrecision, memo)
class DataBatch:
def __init__(self):
self.data = dict()
self.data["frame"] = {"A": [], "B": []}
self.data["latent"] = {"A": [], "B": []}
self.data["location"] = {"A": [], "B": []}
self.data["output"] = []
self.current = 0
def copy(self):
"""
return a copy of this databatch
"""
foo = DataBatch()
for k, v in self.data.items():
if isinstance(v, dict):
for _k, _v in v.items():
foo.data[k][_k] = _v.copy()
else:
foo.data[k] = v.copy()
return foo
def combine(self, dataBatch):
"""
combines two databatches using the randomMasks method
"""
self.randomMasks()
self.data["frame"]["A"][self.mask["frame"]["A"]] = dataBatch.data["frame"]["A"][
self.mask["frame"]["A"]
]
self.data["frame"]["B"][self.mask["frame"]["B"]] = dataBatch.data["frame"]["B"][
self.mask["frame"]["B"]
]
self.data["latent"]["A"][self.mask["latent"]["A"]] = dataBatch.data["latent"][
"A"
][self.mask["latent"]["A"]]
self.data["latent"]["B"][self.mask["latent"]["B"]] = dataBatch.data["latent"][
"B"
][self.mask["latent"]["B"]]
self.data["location"]["A"][self.mask["location"]["A"]] = dataBatch.data[
"location"
]["A"][self.mask["location"]["A"]]
self.data["location"]["B"][self.mask["location"]["B"]] = dataBatch.data[
"location"
]["B"][self.mask["location"]["B"]]
# self.data["output"][self.mask["output"]] = dataBatch.data["output"][self.mask["output"]]
def randomMasks(self):
self.mask = dict()
self.mask["frame"] = {"A": [], "B": []}
self.mask["latent"] = {"A": [], "B": []}
self.mask["location"] = {"A": [], "B": []}
self.mask["output"] = []
# Probability of a feature remaining unchanged... sort of
# we'll take the complement for A B state pairs probability
# to guarantee only either A or B are randomized
probs = {"frame": 0.5, "latent": 0.5, "location": 0.5}
keys = ["frame", "latent", "location"]
selectedKeys = np.random.choice(keys, size=1, replace=False)
for key in selectedKeys:
prob = probs[key]
self.mask[key]["A"] = np.random.random(len(self.data[key]["A"]))
self.mask[key]["B"] = 1.0 - self.mask[key]["A"]
self.mask[key]["A"][self.mask[key]["A"] >= prob] = 1
self.mask[key]["A"][self.mask[key]["A"] < prob] = 0
self.mask[key]["B"][self.mask[key]["B"] >= prob] = 1
self.mask[key]["B"][self.mask[key]["B"] < prob] = 0
self.mask[key]["A"] = np.array(self.mask[key]["A"], dtype=bool)
self.mask[key]["B"] = np.array(self.mask[key]["B"], dtype=bool)
# self.mask["frame"]["A"] = np.random.random(len(self.data["frame"]["A"]))
# self.mask["frame"]["B"] = 1.0 - self.mask["frame"]["A"]
# self.mask["latent"]["A"] = np.random.random(len(self.data["latent"]["A"]))
# self.mask["latent"]["B"] = 1.0 - self.mask["latent"]["A"]
# self.mask["location"]["A"] = np.random.random(len(self.data["location"]["A"]))
# self.mask["location"]["B"] = 1.0 - self.mask["location"]["A"]
# self.mask["frame"]["A"][self.mask["frame"]["A"]>=frameProb] = 1
# self.mask["frame"]["A"][self.mask["frame"]["A"]<frameProb] = 0
# self.mask["frame"]["B"][self.mask["frame"]["B"]>=frameProb] = 1
# self.mask["frame"]["B"][self.mask["frame"]["B"]<frameProb] = 0
# self.mask["latent"]["A"][self.mask["latent"]["A"]>=latentProb] = 1
# self.mask["latent"]["A"][self.mask["latent"]["A"]<latentProb] = 0
# self.mask["latent"]["B"][self.mask["latent"]["B"]>=latentProb] = 1
# self.mask["latent"]["B"][self.mask["latent"]["B"]<latentProb] = 0
# self.mask["location"]["A"][self.mask["location"]["A"]>=locationProb] = 1
# self.mask["location"]["A"][self.mask["location"]["A"]<locationProb] = 0
# self.mask["location"]["B"][self.mask["location"]["B"]>=locationProb] = 1
# self.mask["location"]["B"][self.mask["location"]["B"]<locationProb] = 0
# self.mask["frame"]["A"] = np.array(self.mask["frame"]["A"], dtype=bool)
# self.mask["frame"]["B"] = np.array(self.mask["frame"]["B"], dtype=bool)
# self.mask["latent"]["A"] = np.array(self.mask["latent"]["A"], dtype=bool)
# self.mask["latent"]["B"] = np.array(self.mask["latent"]["B"], dtype=bool)
# self.mask["location"]["A"] = np.array(self.mask["location"]["A"], dtype=bool)
# self.mask["location"]["B"] = np.array(self.mask["location"]["B"], dtype=bool)
# self.mask["frame"]["A"][:split] = False
# self.mask["frame"]["B"][:split] = False
# self.mask["latent"]["A"][:split] = False
# self.mask["latent"]["B"][:split] = False
# self.mask["location"]["A"][:split] = False
# self.mask["location"]["B"][:split] = False
# self.mask["output"] = np.zeros(len(self.data["output"]), dtype=bool)
# self.mask["output"][split:] = True
def join(self, dataBatch):
self.data["frame"]["A"].extend(dataBatch.data["frame"]["A"])
self.data["frame"]["B"].extend(dataBatch.data["frame"]["B"])
self.data["latent"]["A"].extend(dataBatch.data["latent"]["A"])
self.data["latent"]["B"].extend(dataBatch.data["latent"]["B"])
self.data["location"]["A"].extend(dataBatch.data["location"]["A"])
self.data["location"]["B"].extend(dataBatch.data["location"]["B"])
self.data["output"].extend(dataBatch.data["output"])
def toNumpy(self):
self.data["frame"]["A"] = np.array(self.data["frame"]["A"], dtype=float)
self.data["frame"]["B"] = np.array(self.data["frame"]["B"], dtype=float)
self.data["latent"]["A"] = np.array(self.data["latent"]["A"])
self.data["latent"]["B"] = np.array(self.data["latent"]["B"])
self.data["location"]["A"] = np.array(self.data["location"]["A"])
self.data["location"]["B"] = np.array(self.data["location"]["B"])
self.data["output"] = np.array(self.data["output"])
def shuffle(self):
rng_state = np.random.get_state()
for k, v in self.data.items():
if isinstance(v, dict):
for _k, _v in v.items():
np.random.shuffle(_v)
np.random.set_state(rng_state)
else:
np.random.shuffle(v)
np.random.set_state(rng_state)
def normParams(self):
d = {"frame": None, "location": None}
frameMean = np.mean(self.data["frame"]["A"] + self.data["frame"]["B"])
frameStd = np.std(self.data["frame"]["A"] + self.data["frame"]["B"])
locMean = np.mean(self.data["location"]["A"] + self.data["location"]["B"])
locStd = np.std(self.data["location"]["A"] + self.data["location"]["B"])
# print("###### Frame std normalize param set to 1.0 -KG ######")
# frameStd = 1.0
d["frame"] = (frameMean, frameStd)
d["location"] = (locMean, locStd)
# print("Computed norms", (frameMean, frameStd), (locMean, locStd))
# print("###### Manual normalize params set. -KG ######")
# d["frame"] = (118.67253073730468, 30.679692465465511)
# d["location"] = (1026.3889721524438, 611.5679274993953)
self.normalizeParams = d
def normalize(self, params):
for feature, stats in params.items():
mean, std = stats
self.data[feature]["A"] -= mean
self.data[feature]["A"] /= std
self.data[feature]["B"] -= mean
self.data[feature]["B"] /= std
def denormalize(self, params):
for feature, stats in params.items():
mean, std = stats
self.data[feature]["A"] *= std
self.data[feature]["A"] += mean
self.data[feature]["B"] *= std
self.data[feature]["B"] += mean
def addInput(self, A):
"""
eng = [locationA, latentA, frameA,
locationB, latentB, frameB]
"""
self.addLocation(A[0], A[3])
self.addLatent(A[1], A[4])
self.addFrame(A[2], A[5])
def addDataPoint(self, d):
self.addLocation(d["loc1"], d["loc2"])
self.addLatent(d["lat1"], d["lat2"])
self.addFrame(d["frame1"], d["frame2"])
self.addOutput(d["output"])
def getDataPoint(self, i):
r = {
"frame1": self.data["frame"]["A"][i],
"frame2": self.data["frame"]["B"][i],
"lat1": self.data["latent"]["A"][i],
"lat2": self.data["latent"]["B"][i],
"loc1": self.data["location"]["A"][i],
"loc2": self.data["location"]["B"][i],
"output": self.data["output"][i],
}
return r
def getInput(self, start=None, end=None):
engA = [
self.data["location"]["A"][start:end],
self.data["latent"]["A"][start:end],
self.data["frame"]["A"][start:end],
]
engB = [
self.data["location"]["B"][start:end],
self.data["latent"]["B"][start:end],
self.data["frame"]["B"][start:end],
]
return engA + engB
def getOutput(self, start=None, end=None):
return self.data["output"][start:end]
def addOutput(self, A):
self.data["output"].append(A)
def addLocation(self, A, B):
self.data["location"]["A"].append(A)
self.data["location"]["B"].append(B)
def addLatent(self, A, B):
self.data["latent"]["A"].append(A)
self.data["latent"]["B"].append(B)
def addFrame(self, A, B):
self.data["frame"]["A"].append(A)
self.data["frame"]["B"].append(B)
def setOutput(self, A):
self.data["output"] = A
def __len__(self):
return len(self.data["output"])
def __iter__(self):
return self
def __next__(self):
if self.current > len(self):
raise StopIteration
else:
self.current += 1
dataInputs = self.getInput(self.current - 1, self.current)
dataInput = [i[0] for i in dataInputs]
dataOutput = self.getOutput(self.current - 1, self.current)
return (dataInput, dataOutput)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-21 16:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fetcher', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='datasource',
name='system_name',
field=models.CharField(default=None, max_length=255, null=True, unique=True),
),
]
|
import sys
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.ticker
import seaborn as sns
plotfile = sys.argv[0].replace('.py', '.pdf')
sns.set_style('white')
fig, ax = plt.subplots(figsize=(5, 5))
inc = np.linspace(0.0, 0.5*np.pi, 500, endpoint=False)
inc_deg = np.degrees(inc)
Rcs = [0.5, 1.0, 2.0, 4.0, 8.0]
Tcs = [-2.0, -1.0, -0.5, 1e-8, 0.5, 1.0, 2.0]
n_Rc = len(Rcs)
n_Tc = len(Tcs)
lws = np.linspace(1.0, 2.0, n_Rc)
dash_solid = []
dash_dashed = [3, 2]
dash_dotted = [1, 2]
dash_dot_dashed = [1, 2, 4, 2]
dash_triple_dot_dashed = [1, 2, 1, 2, 1, 2, 4, 2]
dashes = [dash_triple_dot_dashed, dash_solid,
dash_dashed, dash_dotted, dash_dot_dashed]
lss = ['-.', '-', '--', ':', '-.']
alphas = np.linspace(1.0, 0.2, n_Rc)
cols = sns.color_palette('magma', n_colors=n_Tc)
def Rc_prime(inc, Tc, Rc):
f = np.sqrt(1.0 + Tc*np.tan(inc)**2)
return Rc * (1 + np.tan(inc)**2) / f / (1.0 + Rc*(f - 1.0) / Tc)
def Tc_prime(inc, Tc):
fsquared = 1.0 + Tc*np.tan(inc)**2
return Tc * (1.0 + np.tan(inc)**2) / fsquared
for Rc, lw, alpha, dash in list(zip(Rcs, lws, alphas, dashes))[::-1]:
for Tc, col in list(zip(Tcs, cols))[::-1]:
if Rc == 1.0:
label = fr'$T_c = {Tc:.1f}$'
else:
label = None
ax.plot(Rc_prime(inc, Tc, Rc), Tc_prime(inc, Tc),
c=col, dashes=dash, label=label)
# ax.plot(Rc_dash(inc, Tc, Rc), Tc_dash(inc, Tc), '.', alpha=0.1, ms=4,
# c=col, label=label)
ax.plot([Rc_prime(0, Tc, Rc)], [Tc_prime(0, Tc)], 'o', c=col)
ax.legend(ncol=1, fontsize='xx-small', frameon=True)
ax.set(
yscale='linear',
xlim=[0.0, 8.1],
ylim=[-5.0, 2.1],
xlabel=r"Projected dimensionless radius of curvature: $\widetilde{R}_{c}{}'$",
ylabel=r"Projected conic discriminant: $T_c{}'$",
)
fig.tight_layout()
fig.savefig(plotfile)
print(plotfile, end='')
|
class Main3:
def condition3(self):
if self.v == 3:
r2 = self.q1 / self.q2
print(r2)
#Создаем класс Division и реализуем функцию деления |
# Generated by Django 3.2.6 on 2021-09-27 23:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Auction',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('asset_title', models.CharField(default='default_name', max_length=100)),
('description', models.TextField(default='default_description')),
('entry_price', models.FloatField(default=None)),
('bid', models.FloatField(default=None)),
('status', models.CharField(default='OPEN', max_length=20)),
('start_date', models.DateTimeField(auto_now_add=True)),
('end_date', models.DateTimeField()),
('hash', models.CharField(default=None, max_length=66, null=True)),
('txId', models.CharField(default=None, max_length=66, null=True)),
('vendor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='auction_vendor', to=settings.AUTH_USER_MODEL)),
('winner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='auction_winner', to=settings.AUTH_USER_MODEL)),
],
),
]
|
# A basic code for matrix input from user
print("Este programa realiza la suma de dos matrices\n ")
R = int(input("Ingrese el numero de filas de la matrix A: "))
C = int(input("Ingrese el numero de columnas de la matrix A: "))
M = int(input("Ingrese el numero de filas de la matrix B: "))
N = int(input("Ingrese el numero de columnas de la matrix B: "))
if R != M or C != N:
print()
print("Las matrices no se pueden sumar")
else:
matrixA = []
matrixB = []
matrixC = []
print("Ingrese matris A por pantalla ")
print()
for i in range(R):
a = []
for j in range(C):
a.append(int(input("Elemento de A [%d,%d] ---> "%(i,j))))
matrixA.append(a)
print()
print("Ingrese matris A por pantalla ")
print()
for i in range(R):
b = []
for j in range(C):
b.append(int(input("Elemento de B [%d,%d] ---> "%(i,j))))
matrixB.append(b)
print()
print("-------Matris A-------")
for i in range(R):
for j in range(C):
print(" ",matrixA[i][j], end = " ")
print()
print()
print("-------Matris B-------")
for i in range(R):
for j in range(C):
print(" ",matrixB[i][j], end = " ")
print()
print("------Matris Suma-------")
print()
for i in range(R):
c = []
for j in range(C):
c.append(matrixA[i][j] + matrixB[i][j])
matrixC.append(c)
for i in range(R):
for j in range(C):
print(" ",matrixC[i][j], end = " ")
print()
|
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import pathlib
from app import app
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("../data/").resolve()
#------------- Importing ----------------
df = pd.read_csv(DATA_PATH.joinpath("final_df.csv"))
#------ Functions ------
def salary_analysis(df):
data6=df.copy()
data6['Min Salary']= data6['Min Salary'].astype('float')
data6['Max Salary']= data6['Max Salary'].astype('float')
data6['Average Salary']= data6['Min Salary']+((data6['Max Salary']-data6['Min Salary'])/2)
data6_1=data6[['Type of Job','Average Salary']]
data6_DS=data6_1[data6_1['Type of Job']=='Data Scientist']
data6_DE=data6_1[data6_1['Type of Job']=='Data Engineer']
data6_DA=data6_1[data6_1['Type of Job']=='Data Analyst']
data6_BA=data6_1[data6_1['Type of Job']=='Business Analyst']
data6_BA= data6_BA[data6_BA['Average Salary']>30]
data6_DA= data6_DA[data6_DA['Average Salary']>30]
# Add histogram data
x1 = data6_BA['Average Salary']#_filtered['Average Salary']
x2 = data6_DA['Average Salary']#_filtered['Average Salary']
x3 = data6_DE['Average Salary']#_filtered['Average Salary']
x4 = data6_DS['Average Salary']#_filtered['Average Salary']
# Group data together
hist_data = [x1, x2, x3, x4]
group_labels = ['Business Analyst', 'Data Analyst', 'Data Engineer', 'Data Scientist']
# Create distplot with custom bin_size
fig = ff.create_distplot(hist_data,
group_labels,
bin_size=5,
show_hist=False)
fig.update_layout(
title='Salary Range',
xaxis_title="Salary in US dollars",
yaxis_title="Frequency",
legend_title="Type of Job",
title_x=0.5)
#colorscale = ['#195b4b''#61af54','#a1cf99','#c1dfbc']
#height=500,
#width=750
return fig
def location_bar(df):
location = df['Location'].value_counts().nlargest(n=10)
fig = px.bar(
y=location.index,
x=location.values,
color=location.values,
text=location.values,
color_continuous_scale='mint',
orientation='h',
title='Number of Job Postings per City',
# height=500,
# width=750
)
fig.update_traces(hovertemplate='Number of Job Postings: %{x} <br>City: %{y} <extra></extra>',
textposition='outside',
#marker_line_color='rgb(8,48,107)',
#marker_line_width=1.5,
opacity=0.7)
fig.update_layout(#width=800,
showlegend=False,
xaxis_title="Count",
yaxis_title="City",
title="Top 10 Cities by Number of Job Postings",
title_x=0.5)
fig.update(layout_coloraxis_showscale=False)
return fig
def map(df):
df_map = df.groupby('Region').size().to_frame().reset_index().rename(columns = {0:'Number of postings'})
map_df = df[['State Code', 'Region']]
cloro_df = pd.merge(df_map,map_df).drop_duplicates().reset_index(drop = True)
data_choropleth = dict(type='choropleth',
locations=cloro_df['State Code'],
# There are three ways to 'merge' your data with the data pre embedded in the map
locationmode='USA-states',
z= cloro_df['Number of postings'].astype(int),
text=cloro_df['Region'],
colorscale='mint'
)
layout_choropleth = dict(geo=dict(scope='usa'),
title=dict(
text='Number of Job Postings per Region',
x=.5 # Title relative position according to the xaxis, range (0,1)
),
#height=500,
#width=750
)
fig = go.Figure(data=data_choropleth, layout=layout_choropleth)
fig.update_traces(
hovertemplate='State: %{location} <br>Region: %{text} <br>Number of postings: %{z} <extra></extra>',
zmin = 0,
zmax = 4000)
return fig
def job_bar(df):
job = px.bar(
pd.DataFrame(df["Job Title"].value_counts().nlargest(10))["Job Title"],
x="Job Title",
y=pd.DataFrame(df["Job Title"].value_counts().nlargest(10)).index,
orientation='h',
color='Job Title',
text="Job Title",
title='Number of Job Postings per Job Title',
color_continuous_scale='mint',
labels=dict(x="Count", y="Job Title", color="Count")
# height=500,
# width=750
)
job.update_traces(hovertemplate='Number of Job Postings: %{x} <br>Job Title: %{y} <extra></extra>',
textposition='outside',
showlegend=False,
#marker_line_color='rgb(8,48,107)',
#marker_line_width=1.5,
opacity=0.7)
job.update_layout(#width=800,
#showscale=False,
showlegend=False,
xaxis_title="Count",
yaxis_title="Job Title",
title="Top 10 Job Titles by Number of Job Postings",
title_x=0.5)
job.update(layout_coloraxis_showscale=False)
return job
#------Layout ------
layout = dbc.Container([
dbc.Row([
dbc.Col([
html.H2("Data Science and Analytics Job Openings Interactive Dashboard", className="text-center")
],width=12)
]),
dbc.Row([
dbc.Col([
html.H5("António Carvalho | Bruno Fernandes | Manuel Borges | Miguel Zina",className="text-center")
])
]),
dbc.Row([
html.A([
html.H6("Over the last years, data science and analytics jobs have increased exponentially. "
" Having this into account, we decided to explore this area and come up with the brightest insights from it to show to all "
"interested students at NOVA IMS their opportunities. Here we can visualize maps, scatter plots, pie charts and others form of graphs from all Data scientist, "
"analyst and engineer jobs openings. 2.5 millions of terabytes of data is created each day and thus the importance of analyzing it is crucial. "
"We hope you find this interactive Dashboard interesting and useful. ",
className='ml-5 mr-5 mt-5 mb-5')
])
]),
dbc.Row([
dbc.Col([
dbc.Card(
dcc.Graph(
id = "graph-1",
figure= job_bar(df)
), body = True,color = "#4E8975"
)
],width={'size':6}, className="mb-3 mt-3"),
dbc.Col([
dbc.Card(
dcc.Graph(
id = "graph-2",
figure= map(df)
), body = True,color = "#4E8975"
)
],width={'size':6}, className="mb-3 mt-3"),
]),
dbc.Row([
dbc.Col([
dbc.Card(
dcc.Graph(
id = "graph-3",
figure= location_bar(df)
), body = True, color = "#4E8975"
)
],width={'size':6}, className="mb-4 mt-3"),
dbc.Col([
dbc.Card(
dcc.Graph(
id = "graph-4",
figure= salary_analysis(df)
), body = True,color = "#4E8975"
)
],width={'size':6}, className="mb-4 mt-3")
])
],style={'background-color':'#f9fff0'} ,fluid = True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import owncloud,request
login_url = 'http://10.20.30.25'
user_name = 'admin'
user_pass = 'admin'
#
of = owncloud.FileInfo
class rewrite_oc(object):
def __init__(self,login_url=None, user_name=None, user_pass=None):
#
self.oc = owncloud.Client(login_url)
self.oc.login(user_name, user_pass)
def file_info(self,file_path):
self.file_content = self.oc.file_info(file_path)
_f_info = {}
_f_info['file_path'] = of.get_path(self.file_content)
_f_info['file_name'] = of.get_name(self.file_content)
_f_info['file_modify'] = of.get_last_modified(self.file_content)
_f_info['file_type'] = of.get_content_type(self.file_content)
_f_info['file_size'] = of.get_size(self.file_content)
return _f_info
def _file_list(self,file_path='/'):
whole_rets = self.oc.list(file_path, depth=1)
nums = len(whole_rets)
ret = []
for i in range(nums):
ret.append(of.get_name(whole_rets[i]))
return nums,ret
def _whole_file(self):
whole_rets = self.oc.list('/', depth='infinity')
nums = len(whole_rets)
ret = []
for i in range(nums):
ret.append(of.get_name(whole_rets[i]))
return ret
def _whole_rets(self):
whole_rets = self.oc.list('/', depth='infinity')
nums = len(whole_rets)
ret = []
for i in range(nums):
ret.append(of.get_context(whole_rets[i]))
return ret
def is_dir(self,file_path):
file_content = self.oc.file_info(file_path)
ret = of.is_dir(file_content)
return ret
def get_dir_zip(self,remote_path,local_filename):
#
self.oc.get_directory_as_zip(remote_path,local_filename)
#
def get_file(self,remote_file,local_file=None):
self.oc.get_file(remote_file,local_file)
#
def _put_file(self,target_path, local_source_file):
return self.oc.put_file(target_path, local_source_file)
#
def _put_directory(self,remote_path, local_directory):
return self.oc.put_directory(remote_path, local_directory)
#
def _get_process(self):
return self.oc.get_upload_progress()
def _share_file_link(self,file_path, **kwargs):
return self.oc.share_file_with_link(file_path,**kwargs)
if __name__ == '__main__':
pass |
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import torch
from torchvision import transforms, models
import cv2
from torchvision.utils import save_image
model = models.segmentation.deeplabv3_resnet101(pretrained=True).eval()
labels = ['background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
cmap = plt.cm.get_cmap('tab20c')
colors = (cmap(np.arange(cmap.N)) * 255).astype(np.int)[:, :3].tolist()
np.random.seed(2020)
np.random.shuffle(colors)
colors.insert(0, [0, 0, 0]) # background color must be black
colors = np.array(colors, dtype=np.uint8)
palette_map = np.empty((10, 0, 3), dtype=np.uint8)
legend = []
for i in range(21):
legend.append(mpatches.Patch(color=np.array(colors[i]) / 255., label='%d: %s' % (i, labels[i])))
c = np.full((10, 10, 3), colors[i], dtype=np.uint8)
palette_map = np.concatenate([palette_map, c], axis=1)
plt.figure(figsize=(20, 2))
plt.legend(handles=legend)
plt.imshow(palette_map)
def segment(net, img):
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
input_tensor = preprocess(img)
input_batch = input_tensor.unsqueeze(0)
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
output = model(input_batch)['out'][0] # (21, height, width)
#스칼라로 0 차원이다
output_predictions = output.argmax(0).byte().cpu().numpy() # (height, width)
#numpy 로 가져온다
r = Image.fromarray(output_predictions).resize((img.shape[1], img.shape[0]))
#numpy로 가져온걸 array로 pillow image로 가져온다
r.putpalette(colors)
return r, output_predictions
#ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ#
import os
import glob
import cv2 as cv
from PIL import Image
import os
raw_path = 'C:/Study/semantic-segmentation-pytorch-master/frame_frame/frame/' # 원본 이미지 경로
token_list = os.listdir(raw_path) # 원본 이미지 경로 내 폴더들 list
data_path = 'C:/Study/semantic-segmentation-pytorch-master/frame2/' # 저장할 이미지 경로
# resize 시작 --------------------
for token in token_list:
#원본 이미지 경로와 저장할 경로 이미지 지정
image_path = raw_path + '/'
save_path = data_path + '/'
#원본 이미지 경로의 모든 이미지 list 지정
data_list = os.listdir(image_path)
print(len(data_list))
# 모든 이미지 resize 후 저장하기
for name in data_list:
# 이미지 열기
im = Image.open(image_path + name)
# 이미지 resize
im = im.resize((500, 300))
# 이미지 JPG로 저장
img = im.convert('RGB')
img.save(save_path + name)
i = 0
data_list2 = os.listdir(data_path)
frame_human = data_path + '/'
for name in data_list2:
img = np.array(Image.open(data_path + name))
fg_h, fg_w, _ = img.shape
segment_map, pred = segment(model, img)
fig, axes = plt.subplots(1, 2, figsize=(20, 10))
axes[0].imshow(img)
axes[1].imshow(segment_map)
mask = (pred == 15).astype(float) * 255 # 15: person
alpha = cv2.GaussianBlur(mask, (7, 7), 0).astype(float)
alpha = alpha / 255. # (height, width)
alpha = np.repeat(np.expand_dims(alpha, axis=2), 3, axis=2) # (height, width, 3)
foreground = cv2.multiply(alpha, img.astype(float))
i += 1
print(i)
#ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ#
# 사람만
result = foreground.astype(np.uint8)
Image.fromarray(result).save(frame_human + name)
# #ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ#
|
import threading
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.db.models import Model
from mypy_extensions import TypedDict
from .cache import element_cache, get_element_id
from .projector import get_projector_data
from .utils import get_model_from_collection_string
class ElementBase(TypedDict):
id: int
collection_string: str
full_data: Optional[Dict[str, Any]]
class Element(ElementBase, total=False):
"""
Data container to handle one root rest element for the autoupdate, history
and caching process.
The fields `id`, `collection_string` and `full_data` are required, the other
fields are optional.
if full_data is None, it means, that the element was deleted. If reload is
True, full_data is ignored and reloaded from the database later in the
process.
"""
information: List[str]
restricted: bool
user_id: Optional[int]
disable_history: bool
reload: bool
AutoupdateFormat = TypedDict(
"AutoupdateFormat",
{
"changed": Dict[str, List[Dict[str, Any]]],
"deleted": Dict[str, List[int]],
"from_change_id": int,
"to_change_id": int,
"all_data": bool,
},
)
def inform_changed_data(
instances: Union[Iterable[Model], Model],
information: List[str] = None,
user_id: Optional[int] = None,
restricted: bool = False,
) -> None:
"""
Informs the autoupdate system and the caching system about the creation or
update of an element.
The argument instances can be one instance or an iterable over instances.
History creation is enabled.
"""
if information is None:
information = []
root_instances = set()
if not isinstance(instances, Iterable):
instances = (instances,)
for instance in instances:
try:
root_instances.add(instance.get_root_rest_element())
except AttributeError:
# Instance has no method get_root_rest_element. Just ignore it.
pass
elements: Dict[str, Element] = {}
for root_instance in root_instances:
key = root_instance.get_collection_string() + str(root_instance.get_rest_pk())
elements[key] = Element(
id=root_instance.get_rest_pk(),
collection_string=root_instance.get_collection_string(),
full_data=root_instance.get_full_data(),
information=information,
restricted=restricted,
user_id=user_id,
)
bundle = autoupdate_bundle.get(threading.get_ident())
if bundle is not None:
# Put all elements into the autoupdate_bundle.
bundle.update(elements)
else:
# Send autoupdate directly
handle_changed_elements(elements.values())
def inform_deleted_data(
deleted_elements: Iterable[Tuple[str, int]],
information: List[str] = None,
user_id: Optional[int] = None,
restricted: bool = False,
) -> None:
"""
Informs the autoupdate system and the caching system about the deletion of
elements.
History creation is enabled.
"""
if information is None:
information = []
elements: Dict[str, Element] = {}
for deleted_element in deleted_elements:
key = deleted_element[0] + str(deleted_element[1])
elements[key] = Element(
id=deleted_element[1],
collection_string=deleted_element[0],
full_data=None,
information=information,
restricted=restricted,
user_id=user_id,
)
bundle = autoupdate_bundle.get(threading.get_ident())
if bundle is not None:
# Put all elements into the autoupdate_bundle.
bundle.update(elements)
else:
# Send autoupdate directly
handle_changed_elements(elements.values())
def inform_changed_elements(changed_elements: Iterable[Element]) -> None:
"""
Informs the autoupdate system about some elements. This is used just to send
some data to all users.
If you want to save history information, user id or disable history you
have to put information or flag inside the elements.
"""
elements = {}
for changed_element in changed_elements:
key = changed_element["collection_string"] + str(changed_element["id"])
elements[key] = changed_element
bundle = autoupdate_bundle.get(threading.get_ident())
if bundle is not None:
# Put all collection elements into the autoupdate_bundle.
bundle.update(elements)
else:
# Send autoupdate directly
handle_changed_elements(elements.values())
"""
Global container for autoupdate bundles
"""
autoupdate_bundle: Dict[int, Dict[str, Element]] = {}
class AutoupdateBundleMiddleware:
"""
Middleware to handle autoupdate bundling.
"""
def __init__(self, get_response: Any) -> None:
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request: Any) -> Any:
thread_id = threading.get_ident()
autoupdate_bundle[thread_id] = {}
response = self.get_response(request)
bundle: Dict[str, Element] = autoupdate_bundle.pop(thread_id)
handle_changed_elements(bundle.values())
return response
def handle_changed_elements(elements: Iterable[Element]) -> None:
"""
Helper function, that sends elements through a channel to the
autoupdate system and updates the cache.
Does nothing if elements is empty.
"""
async def update_cache(elements: Iterable[Element]) -> int:
"""
Async helper function to update the cache.
Returns the change_id
"""
cache_elements: Dict[str, Optional[Dict[str, Any]]] = {}
for element in elements:
element_id = get_element_id(element["collection_string"], element["id"])
cache_elements[element_id] = element["full_data"]
return await element_cache.change_elements(cache_elements)
async def async_handle_collection_elements(elements: Iterable[Element]) -> None:
"""
Async helper function to update cache and send autoupdate.
"""
# Update cache
change_id = await update_cache(elements)
# Send autoupdate
channel_layer = get_channel_layer()
await channel_layer.group_send(
"autoupdate", {"type": "send_data", "change_id": change_id}
)
projector_data = await get_projector_data()
# Send projector
channel_layer = get_channel_layer()
await channel_layer.group_send(
"projector",
{
"type": "projector_changed",
"data": projector_data,
"change_id": change_id,
},
)
if elements:
for element in elements:
if element.get("reload"):
model = get_model_from_collection_string(element["collection_string"])
try:
instance = model.objects.get(pk=element["id"])
except model.DoesNotExist:
# The instance was deleted so we set full_data explicitly to None.
element["full_data"] = None
else:
element["full_data"] = instance.get_full_data()
# Save histroy here using sync code.
save_history(elements)
# Update cache and send autoupdate using async code.
async_to_sync(async_handle_collection_elements)(elements)
def save_history(elements: Iterable[Element]) -> Iterable:
# TODO: Try to write Iterable[History] here
"""
Thin wrapper around the call of history saving manager method.
This is separated to patch it during tests.
"""
from ..core.models import History
return History.objects.add_elements(elements)
|
import sys
N = int(sys.stdin.readline())
List = [int(sys.stdin.readline()) for _ in range(N)]
Sort_List = sorted(List)
for i in Sort_List:
print(i) |
import z
import os
import csv
transmap = dict()
transmap["bitcoin"] = "btc"
transmap["ethereum"] = "eth"
transmap["binancecoin"] = "bnb"
transmap["cardano"] = "ada"
transmap["polkadot"] = "dot"
map2 = dict()
for key, symbol in transmap.items():
map2[symbol.upper()] = key
#exit()
def getYears(date):
away_year = int(date.split("-")[0])
while int(away_year) != z.YEAR:
yield away_year
away_year += 1
yield away_year
def getFiles(astock, date = "2000"):
# if astock in map2.keys():
# yield z.getPath("coins/{}.csv".format(astock))
# else:
for year in getYears(date):
yield z.getPath("split/{}/{}_{}.csv".format(astock[0], astock, year))
def getRows(astock, date = "2000"):
date_year = date.split("-")[0]
for apath in getFiles(astock, date):
started = False
try:
for row in csv.DictReader(open(apath)):
if date_year not in apath:
yield row
elif started:
yield row
elif row['Date'] == date:
started = True
yield row
else:
daysplits = date.split("-")
cdate = row['Date']
cdaysplits = cdate.split("-")
bar = int(daysplits[1])
cbar = int(cdaysplits[1])
if bar == cbar:
bar = int(daysplits[2])
cbar = int(cdaysplits[2])
if cbar >= bar:
started = True
yield row
except Exception as e:
pass
def getRowsRange(astock, count = 20000, date = "2000"):
date_year = date.split("-")[0]
total = 0
for apath in getFiles(astock, date):
started = False
try:
for row in csv.DictReader(open(apath)):
ok = False
if date_year not in apath:
ok = True
elif started:
ok = True
elif row['Date'] == date:
started = True
ok = True
else:
daysplits = date.split("-")
cdate = row['Date']
cdaysplits = cdate.split("-")
bar = int(daysplits[1])
cbar = int(cdaysplits[1])
if bar == cbar:
bar = int(daysplits[2])
cbar = int(cdaysplits[2])
if cbar >= bar:
started = True
ok = True
yield row
if ok:
total += 1
if total < count:
yield row
except Exception as e:
pass
|
from testing import *
from testing.tests import *
from testing.assertions import *
with all_or_nothing(), tested_function_name("find_episode_titles"):
check = reftest()
check('Breaking Bad')
check('Westworld')
check('Game of Thrones')
check('The Wire')
with all_or_nothing(), tested_function_name("best_movie_from_year"):
check = reftest()
check(1968)
check(2000)
check(2016)
with all_or_nothing(), tested_function_name("episode_count"):
reftest()()
with all_or_nothing(), tested_function_name("series_average_ratings"):
reftest()()
|
__version__ = "v0.4.9"
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 17:56:07 2015
@author: eejvt
Code developed by Jesus Vergara Temprado
Contact email eejvt@leeds.ac.uk
University of Leeds 2015
"""
import numpy as np
import sys
import matplotlib.pyplot as plt
#sys.path.append('C:\opencv\build\x64\vc12\bin')
import cv2
from glob import glob
import os
from scipy import stats
from scipy.optimize import curve_fit
def f(x, A, B): # this is your 'straight line' y=f(x)
return np.exp(-B*x + A)
folder='C:\Users\eejvt\Mace head 2015\Experiments\ul-assay\\'
day='150818'
os.chdir(folder+day)
a=glob('*\\')
#fig=plt.figure()
if not os.path.isdir("blanks"):
os.mkdir('blanks')
#%%
a_blanks=[]
#total
for file_name in a:
if 'blank' in file_name:
if not file_name=='blanks\\':
print 'Use %s? \n 1:Yes 0:No'%file_name
awnser_blank= int(raw_input())
if awnser_blank:
a_blanks.append(file_name)
temps_total=np.array([])
ff_total=np.array([])
lam_total=np.array([])
for file_name in a_blanks:
os.chdir(folder+day+'\\'+file_name)
temps=np.genfromtxt('temps.csv',delimiter=',')
ff=np.genfromtxt('ff.csv',delimiter=',')
temps_total=np.concatenate((temps_total,temps))
ff_total=np.concatenate((ff_total,ff))
ff09=ff
ff09[ff09==1]=0.99
lam=-np.log(1-ff09)
lam_total=np.concatenate((lam_total,lam))
os.chdir(folder+day+'\\blanks')
np.savetxt('temps.csv',temps_total,delimiter=',')
np.savetxt('ff.csv',ff_total,delimiter=',')
np.savetxt('lamda.csv',lam_total,delimiter=',')
fig=plt.figure()
ax=plt.subplot(111)
plt.plot(temps_total,lam_total,'o')
log_lam=np.log(lam_total)
plt.yscale('log')
plt.show()
popt,pcov = curve_fit(f, temps_total, lam_total)
#popt=np.array([-18.59897567, 1.10249526])
#pcov=np.array([[ 0.50795402, -0.02496729],[-0.02496729, 0.00123657]])
perr = np.sqrt(np.diag(pcov))
ci = 0.95
pp = (1. + ci) / 2.
nstd = stats.norm.ppf(pp)
popt_up = popt + nstd * perr
popt_dw = popt - nstd * perr
temps_plot=np.linspace(temps_total.min(),temps_total.max(),100)
lam_fitted=f(temps_plot,popt[0],popt[1])
lam_low=f(temps_plot,*popt_dw)
lam_high=f(temps_plot,*popt_up)
plt.plot(temps_plot,lam_fitted,'k-',lw=3)
plt.plot(temps_plot,lam_high,'k--')
plt.plot(temps_plot,lam_low,'k--')
plt.xlabel('Temperature')
plt.ylabel('Expected value')
plt.text(0.8, 0.95,'Function $n_s=e^{(A-B*T)}$', ha='center', va='center', transform=ax.transAxes)
plt.text(0.8, 0.9,'$A=%.6f\pm%.6f$'%(popt[0],nstd*perr[0]), ha='center', va='center', transform=ax.transAxes)
plt.text(0.8, 0.85,'$B=%.6f\pm%.6f$'%(popt[1],nstd*perr[1]), ha='center', va='center', transform=ax.transAxes)
plt.text(0.8, 0.8,'95% confidence interval', ha='center', va='center', transform=ax.transAxes)
plt.title('Blank day %s'%day)
plt.savefig('Plot')
#%%
param={}
param['A']=popt[0]
param['errA']=nstd*perr[0]
param['B']=popt[1]
param['errB']=nstd*perr[1]
import csv
file_param= open('parameterization.csv', 'wb')
writer = csv.writer(file_param)
for key, value in param.items():
writer.writerow([key, value])
file_param.close()
#%%
#%%
|
# @Time :2019/8/12 21:59
# @Author :jinbiao
import logging
logging.getLogger() |
import ctypes as C
from sigpyproc.Utils import File
from numpy.ctypeslib import as_ctypes as as_c
import numpy as np
from .ctype_helper import load_lib
lib = load_lib("libSigPyProcSpec.so")
class PowerSpectrum(np.ndarray):
"""Class to handle power spectra.
:param input_array: 1 dimensional array of shape (nsamples)
:type input_array: :class:`numpy.ndarray`
:param header: observational metadata
:type header: :class:`~sigpyproc.Header.Header`
"""
def __new__(cls,input_array,header):
obj = input_array.astype("float32").view(cls)
obj.header = header
return obj
def __array_finalize__(self,obj):
if obj is None: return
if hasattr(obj,"header"):
self.header = obj.header
def bin2freq(self,bin_):
"""Return centre frequency of a given bin.
:param bin_: bin number
:type bin_: int
:return: frequency of bin
:rtype: float
"""
return (bin_)/(self.header.tobs)
def bin2period(self,bin_):
"""Return centre period of a given bin.
:param bin_: bin number
:type bin_: int
:return: period of bin
:rtype: float
"""
return 1/self.bin2freq(bin_)
def freq2bin(self,freq):
"""Return nearest bin to a given frequency.
:param freq: frequency
:type freq: float
:return: nearest bin to frequency
:rtype: float
"""
return int(round(freq*self.header.tobs))
def period2bin(self,period):
"""Return nearest bin to a given periodicity.
:param period: periodicity
:type period: float
:return: nearest bin to period
:rtype: float
"""
return self.freq2bin(1/period)
def harmonicFold(self,nfolds=1):
"""Perform Lyne-Ashworth harmonic folding of the power spectrum.
:param nfolds: number of harmonic folds to perform (def=1)
:type nfolds: int
:return: A list of folded spectra where the i :sup:`th` element is the spectrum folded i times.
:rtype: :func:`list` of :class:`~sigpyproc.FourierSeries.PowerSpectrum`
"""
sum_ar = self.copy()
sum_ar_c = as_c(sum_ar)
nfold1 = 0 #int(self.header.tsamp*2*self.size/maxperiod)
folds = []
for ii in range(nfolds):
nharm = 2**(ii+1)
nfoldi =int(max(1,min(nharm*nfold1-nharm/2,self.size)))
harm_ar = np.array([int(kk*ll/float(nharm))
for ll in range(nharm)
for kk in range(1,nharm,2)]).astype("int32")
facts_ar = np.array([(kk*nfoldi+nharm/2)/nharm for kk in range(1,nharm,2)]).astype("int32")
lib.sumHarms(as_c(self),
sum_ar_c,
as_c(harm_ar),
as_c(facts_ar),
C.c_int(nharm),
C.c_int(self.size),
C.c_int(nfoldi))
new_header = self.header.newHeader({"tsamp":self.header.tsamp*nharm})
folds.append(PowerSpectrum(sum_ar,new_header))
return folds
class FourierSeries(np.ndarray):
"""Class to handle output of FFT'd time series.
:param input_array: 1 dimensional array of shape (nsamples)
:type input_array: :class:`numpy.ndarray`
:param header: observational metadata
:type header: :class:`~sigpyproc.Header.Header`
"""
def __new__(cls,input_array,header):
obj = input_array.astype("float32").view(cls)
obj.header = header
return obj
def __array_finalize__(self,obj):
if obj is None: return
if hasattr(obj,"header"):
self.header = obj.header
def __mul__(self,other):
if type(other) == type(self):
if other.size != self.size:
raise Exception("Instances must be the same size")
else:
out_ar = np.empty_like(self)
lib.multiply_fs(as_c(self),
as_c(other),
as_c(out_ar),
C.c_int(self.size))
return FourierSeries(out_ar,self.header.newHeader())
else:
return super(FourierSeries,self).__mul__(other)
def __rmul__(self,other):
self.__mul__(other)
def formSpec(self,interpolated=True):
"""Form power spectrum.
:param interpolated: flag to set nearest bin interpolation (def=True)
:type interpolated: bool
:return: a power spectrum
:rtype: :class:`~sigpyproc.FourierSeries.PowerSpectrum`
"""
spec_ar = np.empty(self.size/2,dtype="float32")
if interpolated:
lib.formSpecInterpolated(as_c(self),
as_c(spec_ar),
C.c_int(self.size/2))
else:
lib.formSpec(as_c(self),
as_c(spec_ar),
C.c_int(self.size))
return PowerSpectrum(spec_ar,self.header.newHeader())
def iFFT(self):
"""Perform 1-D complex to real inverse FFT using FFTW3.
:return: a time series
:rtype: :class:`~sigpyproc.TimeSeries.TimeSeries`
"""
tim_ar = np.empty(self.size-2,dtype="float32")
lib.ifft(as_c(self),
as_c(tim_ar),
C.c_int(self.size-2))
return TimeSeries(tim_ar,self.header.newHeader())
def rednoise(self,startwidth=6,endwidth=100,endfreq=1.0):
"""Perform rednoise removal via Presto style method.
:param startwidth: size of initial array for median calculation
:type startwidth: int
:param endwidth: size of largest array for median calculation
:type endwidth: int
:param endfreq: remove rednoise up to this frequency
:type endfreq: float
:return: whitened fourier series
:rtype: :class:`~sigpyproc.FourierSeries.FourierSeries`
"""
out_ar = np.empty_like(self)
buf_c1 = np.empty(2*endwidth,dtype="float32")
buf_c2 = np.empty(2*endwidth,dtype="float32")
buf_f1 = np.empty(endwidth,dtype="float32")
lib.rednoise(as_c(self),
as_c(out_ar),
as_c(buf_c1),
as_c(buf_c2),
as_c(buf_f1),
C.c_int(self.size/2),
C.c_float(self.header.tsamp),
C.c_int(startwidth),
C.c_int(endwidth),
C.c_float(endfreq))
return FourierSeries(out_ar,self.header.newHeader())
def conjugate(self):
"""Conjugate the Fourier series.
:return: conjugated Fourier series.
:rtype: :class:`sigpyproc.FourierSeries.FourierSeries`
.. note::
Function assumes that the Fourier series is the non-conjugated
product of a real to complex FFT.
"""
out_ar = np.empty(2*self.size-2,dtype="float32")
lib.conjugate(as_c(self),
as_c(out_ar),
C.c_int(self.size))
return FourierSeries(out_ar,self.header.newHeader())
def reconProf(self,freq,nharms=32):
"""Reconstruct the time domain pulse profile from a signal and its harmonics.
:param freq: frequency of signal to reconstruct
:type freq: float
:param nharms: number of harmonics to use in reconstruction (def=32)
:type nharms: int
:return: a pulse profile
:rtype: :class:`sigpyproc.FoldedData.Profile`
"""
bin_ = freq*self.header.tobs
real_ids = np.array([int(round(ii*2*bin_)) for ii in range(1,nharms+1)])
imag_ids = real_ids+1
harms = self[real_ids] + 1j*self[imag_ids]
harm_ar = np.hstack((harms,np.conj(harms[1:][::-1])))
return Profile(abs(np.fft.ifft(harm_ar)))
def toFile(self,filename=None):
"""Write spectrum to file in sigpyproc format.
:param filename: name of file to write to (def=``basename.spec``)
:type filename: str
:return: name of file written to
:rtype: :func:`str`
"""
if filename is None:
filename = "%s.spec"%(self.header.basename)
outfile = self.header.prepOutfile(filename,nbits=32)
self.tofile(outfile)
return outfile.name
def toFFTFile(self,basename=None):
"""Write spectrum to file in sigpyproc format.
:param basename: basename of .fft and .inf file to be written
:type filename: str
:return: name of files written to
:rtype: :func:`tuple` of :func:`str`
"""
if basename is None: basename = self.header.basename
self.header.makeInf(outfile="%s.inf"%(basename))
fftfile = File("%s.fft"%(basename),"w+")
self.tofile(fftfile)
return "%s.fft"%(basename),"%s.inf"%(basename)
from sigpyproc.TimeSeries import TimeSeries
from sigpyproc.FoldedData import Profile
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-08-08 14:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0012_usercategoryhistory_usereventhistory'),
]
operations = [
migrations.RemoveField(
model_name='usercategoryhistory',
name='andela_user_profile',
),
migrations.RemoveField(
model_name='usercategoryhistory',
name='category',
),
migrations.RemoveField(
model_name='usereventhistory',
name='andela_user_profile',
),
migrations.RemoveField(
model_name='usereventhistory',
name='event',
),
migrations.DeleteModel(
name='UserCategoryHistory',
),
migrations.DeleteModel(
name='UserEventHistory',
),
]
|
w = input()
l = len(w)
for i in range(0, l//10):
print(w[i*10:(i+1)*10])
print(w[(l//10)*10:])
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
import warnings
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from wadebug import exceptions
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class WABizAPI:
LOGIN_USER_ENDPOINT = "/v1/users/login"
SUPPORT_INFO_ENDPOINT = "/v1/support"
APP_SETTINGS_ENDPOINT = "/v1/settings/application"
WEBHOOK_CERTS_ENDPOINT = "/v1/certificates/webhooks/ca"
def __init__(self, **kwargs):
baseUrl = kwargs.get("baseUrl")
user = kwargs.get("user")
password = kwargs.get("password")
if baseUrl and user and password:
self.api_baseUrl = baseUrl
self.api_user = user
self.api_password = password
# suppress unverified https request warnings
warnings.simplefilter("ignore", InsecureRequestWarning)
self.api_header = self.__gen_req_header()
else:
raise ValueError(
"One or more required params (baseUrl, user, password) are missing."
)
def __gen_req_header(self):
# encode(): string -> byte, to use in b64encode()
# decode(): byte -> string, to use in header
encoded = base64.b64encode(
"{}:{}".format(self.api_user, self.api_password).encode()
).decode()
try:
res = requests.post(
url=urljoin(self.api_baseUrl, self.LOGIN_USER_ENDPOINT),
headers={"AUTHORIZATION": "Basic {}".format(encoded)},
verify=False, # disable ssl verification
)
if res.status_code == 401:
raise exceptions.WABizAuthError(
"API authentication error. Please check your "
"configuration file (wadebug.conf.yml "
"in current directory)."
)
res = res.json()
except requests.exceptions.RequestException as e:
raise exceptions.WABizNetworkError(
"Network request error. Please check your "
"configuration (wadebug.conf.yml in current directory)."
"\n{}".format(e)
)
token = res["users"][0]["token"]
return {
"AUTHORIZATION": "Bearer {}".format(token),
"CONTENT_TYPE": "application/json",
}
def __get(self, endpoint):
try:
res = requests.get(
url=urljoin(self.api_baseUrl, endpoint),
headers=self.api_header,
verify=False, # disable ssl verification
)
if res.status_code == 401:
raise exceptions.WABizAuthError(
"API authentication error. Please check your configuration."
)
res_json = res.json()
if res.status_code < 200 or res.status_code > 299:
self.__checkForErrors(res_json, endpoint)
return res_json
except requests.exceptions.RequestException as e:
raise exceptions.WABizNetworkError(
"Network request error. Please check your "
"configuration (wadebug.conf.yml in current directory)."
"\n{}".format(e)
)
def __get_raw(self, endpoint):
res = requests.get(
url=urljoin(self.api_baseUrl, endpoint),
headers=self.api_header,
verify=False, # disable ssl verification
)
if res.status_code == 401:
raise exceptions.WABizAuthError(
"API authentication error. Please check your configuration."
)
if res.status_code < 200 or res.status_code > 299:
res_json = res.json()
self.__checkForErrors(res_json, endpoint)
# res.status = 200 OK
return res.content
def __checkForErrors(self, res_json, src_endpoint):
errors = res_json.get("errors")
if errors is not None:
err = errors[0]
if "code" in err and err["code"] == 1005:
raise exceptions.WABizAccessError(
"This endpoint ({}) requires Admin role. "
"Please update the credentials in your "
"configuration (wadebug.conf.yml in current directory).".format(
src_endpoint
)
)
elif "code" in err and err["code"] == 1006:
raise exceptions.WABizResourceNotFound(
"The requested resource at endpoint ({}) could not be found."
"\n{}".format(src_endpoint, err["details"])
)
else:
raise exceptions.WABizGeneralError(
"The endpoint ({}) returned an errorneous response."
"\n{}".format(src_endpoint, err["details"])
)
def get_support_info(self):
return self.__get(self.SUPPORT_INFO_ENDPOINT)
def get_phone_number(self):
res = self.get_support_info()
return res["support"]["debug_info"]
def get_webhook_url(self):
res = self.__get(self.APP_SETTINGS_ENDPOINT)
return res["settings"]["application"]["webhooks"]["url"]
def get_webhook_cert(self):
try:
return self.__get_raw(self.WEBHOOK_CERTS_ENDPOINT)
except exceptions.WABizResourceNotFound:
return None
|
import os
import pprint
pp = pprint.PrettyPrinter()
churchs_foler = 'download/christianity/'
all_churchs = os.listdir(churchs_foler)
# pp.pprint(all_churchs)
for c in all_churchs:
# print(c)
if os.path.isdir(churchs_foler + c):
reports = os.listdir(churchs_foler + c + '/')
if len(reports) == 0:
print(c)
# else:
# print(c) |
from oracle import *
|
#!/usr/bin/python
import mysql.connector
### Start up connections to both DBs
dbh_s12 = mysql.connector.connect(user='s12', password='jazzduck', database='s12')
cursor_s12 = dbh_s12.cursor()
dbh_gene_db = mysql.connector.connect(user='s12', password='jazzduck', database='gene_db')
cursor_gene_db = dbh_gene_db.cursor()
# Set up queries for both DBs
###############################################################################
# Queries for DB S12
# Collect Unique blast hitting genes
blast_hitting_genes_query = ("SELECT qseqid FROM Blast_output")
# collect lowest evalue blast hit per gene
query_blast_out = ("SELECT qseqid, sseqid, evalue FROM Blast_output WHERE qseqid REGEXP "'%s'" ORDER BY evalue ASC LIMIT 1;")
cursor_s12.execute(blast_hitting_genes_query)
# capture list of hitting genes and corresponding trans ids
hits_gene_list = []
for value in cursor_s12:
split_val = value[0].split("_")
if split_val[0] not in hits_gene_list:
hits_gene_list.append(split_val[0])
else:
continue
###############################################################################
# Queries for DB gene_db
# Pull out transcript seq for specfic blast outputBlast_out
transcript_seq_blast_hit = ("SELECT sequence FROM transcript WHERE trans_id=%s")
###############################################################################
# ----- Blast Output ----- #
print "Gene id", '\t', "Trans_id", '\t' "Blast hit", '\t', "E-value"', \t', "Transcript Seq"
for gene_id in hits_gene_list:
# Query lowest evalue blast hit
gene_to_search = '^' + str(gene_id)
cursor_s12.execute(query_blast_out, (gene_to_search,))
blast_hit = cursor_s12.fetchall() # three outputs: gene_id+trans_id, hit seq, and eval
# recapture trans id from lowest evalue blast hit
trans_id_val = blast_hit[0][0].split("_")
# Query transcript
cursor_gene_db.execute(transcript_seq_blast_hit, (trans_id_val[1],))
transcript_out = cursor_gene_db.fetchall()
# output in tsv
print gene_id, '\t', trans_id_val[1], '\t', blast_hit[0][1], '\t', blast_hit[0][2], '\t', transcript_out[0][0]
# Closes out connections
cursor_s12.close()
dbh_s12.close()
cursor_gene_db.close()
dbh_gene_db.close() # DONE!
|
# Generated by Django 3.0.3 on 2020-05-09 02:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resume', '0002_auto_20200504_1223'),
]
operations = [
migrations.AlterField(
model_name='education',
name='passing_year',
field=models.DateField(blank=True),
),
migrations.AlterField(
model_name='projectorjob',
name='end_date',
field=models.DateField(blank=True),
),
migrations.AlterField(
model_name='projectorjob',
name='start_date',
field=models.DateField(blank=True),
),
]
|
import os
from memory_profiler import profile
from http import HTTPStatus
from django.test import TestCase
from app.models import City, Location
class AppTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.passed = 0
def test_e2e(self):
self.n = int(os.getenv('REPEATS', 100))
self.passed = 0
for i in range(self.n):
self.__test_e2e()
self.assertEqual(self.n, self.passed)
# @profile
def __test_e2e(self):
# Add city
city_data = {
'name': 'Москва'
}
response = self.client.post('/api/v1/city/', data=city_data, content_type='application/json')
try:
city = City.objects.all().first()
except City.DoesNotExist:
city = {'name': None}
self.assertEqual(response.status_code, HTTPStatus.CREATED)
self.assertEqual(city.name, city_data['name'])
# Add location
location_data = {
'city': city.id,
'street': 'ул. Тверская',
'support': 1
}
response = self.client.post('/api/v1/location/', data=location_data, content_type='application/json')
try:
location = Location.objects.all().first()
except Location.DoesNotExist:
location = {'street': None}
self.assertEqual(response.status_code, HTTPStatus.CREATED)
self.assertEqual(location.street, location_data['street'])
self.assertEqual(location.city.name, city_data['name'])
# Update location
location_new_data = {
'city': city.id,
'street': 'ул. Пушкинская',
'support': 2,
}
response = self.client.put(f'/api/v1/location/{location.id}/', data=location_new_data,
content_type='application/json')
try:
location = Location.objects.all().first()
except Location.DoesNotExist:
location = {'street': None}
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertEqual(location.street, location_new_data['street'])
self.assertEqual(location.city.name, city_data['name'])
# Delete city
response = self.client.delete(f'/api/v1/city/{city.id}/')
exception = False
try:
Location.objects.get(id=location.id)
except Location.DoesNotExist:
exception = True
self.assertEqual(response.status_code, HTTPStatus.NO_CONTENT)
self.assertTrue(exception)
self.passed += 1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/2 19:53
# @Author : wildkid1024
# @Site :
# @File : __init__.py
# @Software: PyCharm
from .lenet import *
from .alexnet import *
from .resnet import *
from .vgg import *
from .googlenet import *
from .mobilenet import *
from .mobilenetv2 import *
|
import os
import re
import shutil
import csv
import filecmp
def run_testsuites(benchmarks,criteria_types,prioritization_methods):
dir_pattern = re.compile(r"^v[0-9]+$")
for benchmark in benchmarks:
benchmark_path = os.path.join("../benchmarks",benchmark)
os.chdir(benchmark_path)
if os.path.exists('testruns'):
shutil.rmtree('testruns')
os.mkdir('testruns')
for criteria in criteria_types:
for method in prioritization_methods:
for subdir, dirs, files in os.walk(os.getcwd()):
for dir in dirs:
if dir_pattern.match(dir):
filename = dir+'/testruns/'+criteria+'-'+method+'-output.txt'
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
if os.path.exists(filename):
os.remove(filename)
with open(filename, 'w'): pass
with open(os.path.join('testsuites',criteria+'-'+method+'.txt'), 'r') as reader:
for line in reader.readlines():
os.system('./'+benchmark+' '+line.strip()+' >> ./testruns/'+criteria+'-'+method+'-output.txt 2>&1')
for subdir, dirs, files in os.walk(os.getcwd()):
for dir in dirs:
if dir_pattern.match(dir):
os.system('./'+dir+'/'+benchmark+' '+line.strip()+' >> ./'+dir+'/testruns/'+criteria+'-'+method+'-output.txt 2>&1')
os.chdir('..')
os.chdir('..')
def expose_faults(cur_dir,benchmarks,criteria_types,prioritization_methods):
dir_pattern = re.compile(r"^v[0-9]+$")
os.chdir(cur_dir)
with open('expose_faults.csv', 'w', newline='') as csvfile:
fieldnames = ['benchmark', 'criteria', 'method', 'faults_exposed']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for benchmark in benchmarks:
benchmark_path = os.path.join("../benchmarks",benchmark)
for criteria in criteria_types:
for method in prioritization_methods:
benchmark_run = os.path.join(benchmark_path,'testruns/'+criteria+'-'+method+'-output.txt')
faults = 0
for subdir, dirs, files in os.walk(benchmark_path):
for dir in dirs:
if dir_pattern.match(dir):
faulty_run = os.path.join(benchmark_path,dir,'testruns/'+criteria+'-'+method+'-output.txt')
#print('benchmark run',benchmark_run,'faulty run',faulty_run)
if not filecmp.cmp(benchmark_run,faulty_run):
faults+=1
#shutil.rmtree(fault_run_path)
writer.writerow({'benchmark': benchmark, 'criteria':criteria, 'method': method,'faults_exposed':faults})
#shutil.rmtree(os.path.join(benchmark_path,'testruns')) |
def solution(n, t, m, p):
answer = ''
A = '01'
for i in range(2, 100000):
a = ''
while i:
if i % n >= 10:
a += chr(i % n + 55)
else:
a += str(i % n)
i = i // n
A += a[::-1]
for i in range(p - 1, len(A), m):
if len(answer) == t:
break
answer += A[i]
return answer
|
import sys
import os.path
from FileCreator import FileCreator
sceneName = input("Please enter Scene name: ")
while not len(sceneName) > 0:
sceneName = input("Please enter Scene name: ")
projectName = input("(Optional) Please enter Project name: ")
fullUserName = input("(Optional) Please enter User name: ")
organizationName = input("(Optional) Please enter Organization Name: ")
documentDirectory = input("Please enter Document Directory : ")
documentDirectoryPath = os.path.dirname(os.path.realpath(documentDirectory))
isDirectory = os.path.isdir(documentDirectoryPath)
# location check
while not isDirectory:
documentDirectory = input("Please enter Document Directory : ")
documentDirectoryPath = os.path.dirname(os.path.realpath(documentDirectoryPath))
isDirectory = os.path.isdir(documentDirectoryPath)
# get location
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
filesFolderPath = os.path.join(os.path.dirname(os.path.commonpath([__location__])), 'Files')
fileCreator = FileCreator(sceneName, filesFolderPath, documentDirectory, projectName, organizationName, fullUserName)
fileCreator.create()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.