text stringlengths 8 6.05M |
|---|
"""Management command tests."""
import os
import shutil
import tempfile
from django.core.management import call_command
from django.urls import reverse
from modoboa.core.tests.test_views import SETTINGS_SAMPLE
from modoboa.lib.tests import ModoTestCase
from .. import factories
class NeedDovecotUpdateTestCase(ModoTestCase):
"""Test need dovecot ldap update command."""
@classmethod
def setUpTestData(cls):
"""Create test data."""
super().setUpTestData()
factories.populate_database()
def setUp(self):
"""Initiate initial env."""
super().setUp()
self.workdir = tempfile.mkdtemp()
self.localconfig.need_dovecot_update = False
self.localconfig.save()
def tearDown(self):
"""Reset test env."""
shutil.rmtree(self.workdir)
def test_update_dovecot_update_state_valid_form(self):
url = reverse("core:parameters")
settings = SETTINGS_SAMPLE.copy()
response = self.client.post(url, settings, format="json")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), "Parameters saved")
self.localconfig.refresh_from_db()
self.assertTrue(self.localconfig.need_dovecot_update)
def test_update_dovecot_ldap_conf(self):
self.localconfig.need_dovecot_update = True
self.localconfig.save()
self.assertTrue(self.localconfig.need_dovecot_update)
tmp_file = os.path.join(self.workdir, "test-dovecot-ldap.conf")
self.set_global_parameters({
"authentication_type": "ldap",
"ldap_dovecot_sync": True,
"ldap_dovecot_conf_file": tmp_file,
"ldap_server_address": "localhost",
"ldap_server_port": "636",
"ldap_enable_secondary_server": True,
"ldap_secondary_server_address": "localhost2",
"ldap_secondary_server_port": "636",
"ldap_secured": "ssl",
"ldap_bind_dn": "DC=test,DC=lan",
"ldap_bind_password": "test",
"ldap_search_base": "CN=Users,DC=test,DC=lan",
"ldap_search_filter": "(& (objectClass=user) (|(mail=%(user)s)(sAMAccountName=%(user)s)) )"
}, app="core")
# Generated file checks
call_command("update_dovecot_conf")
self.assertTrue(os.path.exists(tmp_file))
with open(tmp_file) as tmp:
content = tmp.read()
self.assertIn("uris = ldaps://localhost:636 ldaps://localhost2:636", content)
self.assertIn("dn = \"DC=test,DC=lan\"", content)
self.assertIn("dnpass = \'test\'", content)
self.assertIn("base = CN=Users,DC=test,DC=lan", content)
self.assertIn("user_filter = (& (objectClass=user) (|(mail=%u)(sAMAccountName=%u)) )", content)
self.assertIn("pass_filter = (& (objectClass=user) (|(mail=%u)(sAMAccountName=%u)) )", content)
self.localconfig.refresh_from_db()
self.assertFalse(self.localconfig.need_dovecot_update)
|
"""
Label classifier
"""
# Internal libraries
from big_picture.pre_processor import pre_process
from big_picture.vectorizers import embedding_strings, tf_idf
from big_picture.label import Label
# General libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import os
import io
import gcsfs
import torch
# Encoding libraries
from sklearn.preprocessing import OneHotEncoder
# Modelling libraries
from tensorflow.keras.models import load_model
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
from tensorflow.nn import softmax
# Reports libraries
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# Label dictionary
labels_dict = {0: 'Activism',
1: 'Business',
2: 'Crime',
3: 'Culture',
4: 'Education',
5: 'Entertainment',
6: 'Health',
7: 'Media',
8: 'Other',
9: 'Politics',
10: 'Religion',
11: 'Science',
12: 'Sports',
13: 'Technology',
14: 'Trends',
15: 'World News'}
class Classifier():
"""
Class that creates an object with a model and the topics associated to the model.
Parameters
----------
labels: class
Class containing the subsets of the original dataset by label.
threshold: float
Value between 0 and 1 for the classifier to consider that a prediction belongs to a certain topic.
"""
def __init__(self, threshold=0.4):
self.threshold = threshold
self.model = None
self.labels = None
self.labels_tag = None
self.input_shape = None
self.output_shape = None
def fit(self, train, model='dropout', source='web', params=None, sample=None, printed=False, pre_made=False):
'''
Generate a model and fit it to the train_data.
Parameters
----------
train : df
DataFrame containing the data to train
world : df
DataFrame to predict labels from and generate world of clusters.
model : model
Classification model to be used.
'''
# Train classifier with train data
ohe = OneHotEncoder()
params = {
'lemmatize': False,
}
train = pre_process(
train,
source=source,
params=params,
sample=sample,
printed=printed)
X = embedding_strings(train['minor_preprocessing'])
y = ohe.fit_transform(train[['label']]).toarray()
# Save tags for labels to class
self.labels_tag = ohe.categories_[0]
# Save model variable to class
es = EarlyStopping(patience=10)
if model == 'dropout':
self.input_shape = X.shape[1]
self.output_shape = y.shape[1]
self.model = initialize_class_bert_dropout(self.input_shape, self.output_shape)
self.model.fit(
X,
y,
epochs=20,
validation_split=0.25,
batch_size=32,
callbacks=[es],
verbose=1
)
def save(self, path):
'''Saves a classifying model'''
mdl_path = os.path.join(path, 'model.pkl')
state_path = os.path.join(path, 'state.pkl')
os.makedirs(path)
if self.model != None:
self.model.save_weights(mdl_path)
else:
raise Exception('Please fit a model first')
state = {
'labels': self.labels,
'labels_tag': self.labels_tag,
'threshold': self.threshold,
'input_shape': self.input_shape,
'output_shape': self.output_shape
}
with open(state_path, 'wb') as fp:
pickle.dump(state, fp)
def load(self, path):
mdl_path = os.path.join(path, 'model.pkl')
state_path = os.path.join(path, 'state.pkl')
# class CPU_Unpickler(pickle.Unpickler):
# def find_class(self, module, name):
# if module == 'torch.storage' and name == '_load_from_bytes':
# return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
# else: return super().find_class(module, name)
# fs = gcsfs.GCSFileSystem(project = 'wagon-bootcamp-311206')
# fs.ls('big_picture_model')
with open(state_path, 'rb') as fp:
state = pickle.load(fp)
# with open('big_picture_model/model/state.pkl', 'rb') as file:
# state = CPU_Unpickler(file).load()
self.labels = state['labels']
self.labels_tag = state['labels_tag']
self.threshold = state['threshold']
self.input_shape = state['input_shape']
self.output_shape = state['output_shape']
self._init()
self.model = initialize_class_bert_dropout(self.input_shape, self.output_shape)
self.model.load_weights(path+'/model.pkl')
#self._init()
def _init(self):
self.tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
self.sa_model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
def divide_labels(self, world, source='web', params=None, sample=None, printed=False, model_name='kmeans'):
'''
Populates the classifier with data for clustering.
Parameters
----------
world : df
DataFrame to predict labels from and generate world of clusters.
'''
if self.model != None:
# Pre-process data
world = pre_process(
world,
source=source,
params=params,
sample=sample,
printed=printed)
# Set data to predict
X = embedding_strings(world['minor_preprocessing'])
# Predict data
results = self.model.predict(X)
# print('results')
# print(results)
# Divide into labels
labels = {key: [] for key in labels_dict.keys()}
for i, result in enumerate(results):
for j, label_pred in enumerate(result):
if label_pred >= self.threshold:
labels[j].append(i)
# print(labels)
# Transform into Label() instances
self.labels = {}
self._init()
for key, value in labels.items():
#print(key, value)
if value:
self.labels[self.labels_tag[key]] = Label(
world.iloc[value, :].reset_index(),
self.labels_tag[key],
tokenizer=self.tokenizer,
sa_model=self.sa_model,
model_name=model_name
)
else:
raise Exception('Please fit a model first')
def predict(self, df, source='prepared', params=None, sample=None, printed=False):
# Pre-process data
df = pre_process(
df,
source=source,
params=params,
sample=sample,
printed=printed)
# Set data to predict
X = embedding_strings(df['minor_preprocessing'])
prediction = self.model.predict(X)
#print(prediction)
# Put into correct labels
labels = []
for i, result in enumerate(prediction):
for j, label_pred in enumerate(result):
if label_pred >= self.threshold:
#print(j)
labels.append(self.labels_tag[j])
#print(labels)
if not labels:
labels = ['Other']
self._init()
sa = softmax(self.sa_model(self.tokenizer(
df['minor_preprocessing'].iloc[0],
return_tensors='tf',
padding=True,
max_length=500, #!!!!!!!!!!!!!!!!might need to change
truncation=True
)).logits).numpy()
output_df = df[['title', 'url', 'publishedAt', 'author', 'source']]
output_df[['SA']] = sa[0][1]-sa[0][0]
output = {}
# Check if it is embedded X
for label in labels:
cluster = self.labels[label].predict(X)
output[label] = self.labels[label].clusters[cluster]
output[label].df = pd.concat([output_df,output[label].df],axis=0).drop_duplicates('url')
return output
### Classification reports
def reports(self, y_true, report=1):
'''
Generate a model and fit it to the train_data.
Parameters
----------
y_true : pd.series
Panda Series containing the y_true used for training the model.
report : int
Default value is 1, which will print the classification report.
Use 0 to plot the confusion matrix for the different labels.
'''
if self.model != None:
classes = np.argmax(self.model.predict(X), axis=-1)
val_dict = {}
for idx, val in enumerate(self.labels_tag):
val_dict[val] = int(idx)
y_true = y_true.map(val_dict)
# Classification report
if report:
return print(classification_report(y_true, self.classes))
# Confusion Matrix plot
else:
cm_array = confusion_matrix(y_true, self.classes)
df_cm = pd.DataFrame(cm_array,
index = [i for i in labels],
columns = [i for i in labels])
plt.figure(figsize = (15,8))
return sns.heatmap(df_cm, annot=True)
else:
raise Exception('Please fit a model first')
### Models in use
def initialize_class_bert_0():
'''Initial classifier using BERT enconding with sentence transformer'''
model = models.Sequential()
model.add(layers.Dense(300, activation='relu', input_dim=embeddings.shape[1]))
model.add(layers.Dense(150, activation='relu'))
model.add(layers.Dense(50, activation='relu'))
model.add(layers.Dense(16, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def initialize_class_bert_dropout(shape, output):
model = models.Sequential()
model.add(layers.Dense(700, activation='relu', input_dim=shape))
#model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
#model.add(layers.Dropout(0.2))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(output, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
|
from page.page_address import PageAddress
from page.page_login import PageLogin
from tools.read_data import read_data
class PageIn:
# 获取登录page页面对象
@classmethod
def get_page_login(cls):
return PageLogin()
@classmethod
def read_data(cls):
return read_data("data.yaml")
@classmethod
def get_page_address(cls):
return PageAddress()
|
#Format Strings
my_name='Kalyan Ghosh'
my_age=27
my_height=5.7
my_weight=60
my_eyes='Black'
my_teeth='White'
my_hair='Brown'
print "Let's talk about %s." %my_name
print "He's %f feet tall." %my_height
print "He's %d pounds heavy." %my_weight
print "He's got %s eyes and %s hair."%(my_eyes,my_hair)
print "If I add %d %d and %d I get %d." %(my_age,my_height,my_weight,my_age+my_height+my_weight) |
import numpy as np
import sys
import os
import yaml
def read_yaml(path):
return yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
def get_correct_path(relative_path):
'''
Used when packaged app with PyInstaller
To find external paths outside of the packaged app
'''
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def load_classes(path):
"""
Loads class labels at 'path'
"""
fp = open(path, "r")
names = fp.read().split("\n")[:-1]
return names
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import scipy.integrate as integrate
plt.close('all')
# ------ defining constants ----- #
# -- using mks for convenience -- #
c = 2.998e8 # m / s
h = 6.626e-34 # m^s * kg / s
k = 1.31e-23 # J / K
b = 2.898e-3 # m * K
# ------ FUNCTIONS ----- #
# ----- Wien's Law ----- #
def wiens(T):
return b / T # will be in meters
# ---- Planck's Law ---- #
def planck(x,T):
return (2*h*c**2) / (x**5 * (np.exp((h*c)/(x*k*T))-1))
# ---- Integrate over Bandpass ---- #
def integrated_flux(nu,filt,flux):
return integrate.trapz(filt * flux / nu, nu) / integrate.trapz(filt / nu, nu)
# ---------------------- #
# reading in bandpass data
filts = np.loadtxt('UBV_ma06.txt',skiprows=17)
ufilt = [filts[:,0],filts[:,1]] # wavelength in A
bfilt = [filts[:,2],filts[:,3]]
vfilt = [filts[:,4],filts[:,5]]
# -- calculating colors for BB -- #
temp = [10000]
lam = np.arange(1e-9,4e-6,1e-9) # in m
flux = []
for filt in [ufilt,bfilt,vfilt]:
f = interp1d(lam*1e10,planck(lam,temp))
indx = np.arange(len(filt[0]))
indx = indx[filt[0] > 0]
bb_match = f(filt[0][indx])
flux.append(integrated_flux(2.998e18/filt[0][indx],filt[1][indx],bb_match))
flux = np.asarray(flux)
# plotting integrating example
plt.figure(figsize=(9,3))
plt.plot(lam*1e10,planck(lam,temp)/max(planck(lam,temp)),color='k',lw=2.)
plt.text(0.02,0.86,'Blackbody, T: 10$^4$ K',transform=plt.gca().transAxes,fontsize=15)
x = [3600,4350,5470]
nam = ['$U$','$B$','$V$']
count = 0
for filt in [ufilt,bfilt,vfilt]:
indx = np.arange(len(filt[0]))
indx = indx[filt[0] > 0]
plt.plot(filt[0][indx],filt[1][indx]*1.2,color='k')
plt.fill_between(filt[0][indx],filt[1][indx]*1.2,alpha=0.3,label=nam[count])
plt.scatter(x[count],flux[count]/max(planck(lam,temp)),s=200,\
edgecolor='k',color='C%s'%int(count))
count += 1
plt.legend(frameon=False,fontsize=15)
plt.ylim(0.,1.5)
plt.xlim(ufilt[0][0]-100,vfilt[0][-1]+100)
plt.ylabel('flux')
plt.xlabel('wavelength [$\AA$]')
plt.gca().set_yticklabels([])
plt.tight_layout()
plt.savefig('plots-data/hw1_prob2a.pdf',dpi=200)
plt.close('all')
|
#Q.1- Print anything you want on screen.
print("anything you want on screen")
#Q.2- Join two strings using '+'. E.g.-"Acad"+"View”
a=input(" enter ur first string ")
b=input("enter ur second string ")
print(a+b)
#Q.3- Take the input of 3 variables x, y and z . Print their values on screen.
a=input(" enter ur first string ")
b=input("enter ur second string ")
c=input("enter ur second string ")
print(a,b,c)
#Q.4- Print “Let’s get started” on screen.
print('''“Let’s get started”''')
#Q.5- Print the following values using placeholders. s=”Acadview” course=”Python” fees=5000
s="Acadview"
course="Python"
fees=5000
print('%s charge you %d for %s ' % (s,fees,course))
#Q.6- Find the area of circle pi = 3.14 Take radius as input from user Print the area of circle
pi=3.14
radius=int(input("enter a radius"))
print("area of circle is ",pi*radius*radius)
|
import mysql.connector
mydb = mysql.connector.connect(
host="",
user="",
password="",
database=""
)
print("please enter your email:")
email = input()
print("please enter your password:")
password = input()
mycursor = mydb.cursor()
sql = "INSERT INTO info (Emails, PASSWORDS) VALUES (%s, %s)"
val = (email, password)
mycursor.execute(sql, val)
mydb.commit()
print(mycursor.rowcount, "record insterted.") |
"""
Functions for projecting between pushbroom sensors (with known orientation/position from IMU data) and a 3D point cloud.
"""
import hylite
from hylite.project import rasterize, PMap, push_to_cloud
from scipy import spatial
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import scipy as sp
from scipy.optimize import minimize
class Pushbroom(object):
"""
An extension of the Camera class to handle pushbroom sensors.
A note on coordinates: hylite always considers the x-axis of an image to be in the cross-track direction, and
the y-axis to be in the along-track (movement) direction. This is consistent with most sensor manufacturers, but
should not be confused with the coordinates used for panoramic sensors (where x = lines at each rotation, y=cross track
direction). Issues plotting can be resolved using the rot, flipX and flipY arguments for image.quick_plot( ... ).
"""
def __init__(self, pos, ori, xfov, lfov, dims ):
"""
Initialise this pushbroom camera instance.
*Arguments*:
- pos = a list containing the position of the sensor at each frame.
- ori = a list containing the orientation of the sensor (roll, pitch, yaw in degrees) at each frame.
- xfov = the across-track field of view of one pixel (e.g. 0.01 degree).
- lfov = the along-track field of view of one pixel (e.g. 0.01 degree).
- dims = the dimensions of the resulting image. Should be (pixels_in_sensor, number_of_frames).
"""
# store track and fov info
self.cp = np.array(pos) # camera position track
self.co = np.array(ori) # camera orientation track
self.xfov = xfov
self.lfov = lfov
self.px = 2 * np.tan(0.5 * np.deg2rad(self.xfov)) # pixel pitch (across track)
self.pl = 2 * np.tan(0.5 * np.deg2rad(self.lfov)) # pixel pitch (along track)
# calculate and store rotation matrices
self.R = [spatial.transform.Rotation.from_euler('xyz', [a[0] - 180, a[1], 90 - a[2]], degrees=True) for a in
self.co]
# N.B. 90 degree rotation of yaw transforms to geographic coordinates (000 = North). 180 degree rotation
# transforms camera direction from pointing up to down.
# check and store dims
assert dims[1] == len(pos), "Error - dims has %d lines, but only %d positions provided." % (dims[1], len(pos))
assert dims[1] == len(ori), "Error - dims has %d lines, but only %d orientations provided." % (
dims[1], len(pos))
self.dims = dims
def fudge(self, t_all, t_known, cp_known, co_known, method='quadratic'):
"""
Apply a fudge-factor to fit IMU data to sparse points with known positions / orientations (e.g.
from SfM results using a co-aligned camera). This (1) matches the two position datasets using the
two time values (these should be floating point values like GPS seconds), (2) calculates the residual
at these points, (3) interpolates these residuals between the known points and (4) subtracts them from
the IMU track.
*Arguments*:
- t_all = timestamps associated with the orientation and position data in this pushbroom track.
- t_known = an array of timestamps of shape (n,) associated with the known positions/orientations.
- cp_known = an array of shape (n,3) containing known positions to fudge to.
- co_known = an array of of shape (n,3) containing known orientations to fudge to.
- method = the interpolation method to use, as defined in scipy.interpolate.interp1d. Default is 'quadratic'.
*Returns*:
- a fudged copy of this track.
"""
# check shapes
assert t_all.shape[0] == self.co.shape[0], "Error - timestamps t_all has %d entries, but %d are needed." % (t_all.shape[0], self.co.shape[0])
assert t_known.shape[0] == cp_known.shape[0], "Error - timestamp shape (%d) != cp_known shape (%d)." % (t_known.shape[0], cp_known.shape[0])
assert t_known.shape[0] == co_known.shape[0], "Error - timestamp shape (%d) != co_known shape (%d)." % (t_known.shape[0], co_known.shape[0])
# get indices of main track that correspond to known frames
S = [np.argmin(np.abs(t_all - _t)) for _t in t_known]
if len(np.unique(S)) != len(S):
print("Warning - duplicate matches are present; tracks probably do not overlap properly?")
# compute errors
e_p = cp_known - self.cp[S, :] # error in position
e_o = co_known - self.co[S, :] # error in orientation
# apply fudge to remove these (and interpolate fudge factor between known points)
cp_adj = self.cp + sp.interpolate.interp1d(t_known, e_p, bounds_error=False, axis=0, kind='quadratic', fill_value=0)(t_all)
co_adj = self.co + sp.interpolate.interp1d(t_known, e_o, bounds_error=False, axis=0, kind='quadratic', fill_value=0)(t_all)
# return a copy of this track
return Pushbroom( cp_adj, co_adj, self.xfov, self.lfov, self.dims )
def rebuild_R(self):
"""
Rebuild the internal rotation matrix from the self.co arrays.
"""
self.R = [spatial.transform.Rotation.from_euler('xyz', [a[0] - 180, a[1], 90 - a[2]], degrees=True) for a in
self.co]
def apply_boresight(self, roll, pitch, yaw ):
"""
Add constant values (boresight) to the roll, pitch and yaw angles.
*Returns*: a new Pushbroom instance with the adjusted values.
"""
# appy boresight
co_adj = self.co + np.array( [roll, pitch, yaw ] )
return Pushbroom( self.cp, co_adj, self.xfov, self.lfov, self.dims )
def get_R(self, i=None):
"""
Return scipy.spatial.Rotation objects that store camera orientation data.
*Arguments*:
i = the frame index to get a rotation object for. If None (Default) a list of all Rotations is returned.
"""
if i is not None:
return self.R[i]
else:
return self.R
def get_axis(self, axis, i=None):
"""
Get the local camera x (0), y (1) or z (2) axis vector from the rotation matrices.
*Arguments*:
- axis = 0 (x), 1 (y) or 2 (z).
- i = the frame index to get vector for. If None (Default) a list of all vectors is returned.
"""
if i is not None:
assert i >= 0 and i < 3, "Error - i must be 0, 1 or 2."
return self.R[i].as_matrix()[:, axis]
else:
return np.array([_R.as_matrix()[:, axis] for _R in self.R])
def get_x(self, i=None):
"""
Get the Camera's along-track (movement) vector.
*Arguments*:
i = the frame index to get vector for. If None (Default) a list of all vectors is returned.
"""
return self.get_axis(0, i)
def get_y(self, i=None):
"""
Get the Camera's cross-track vector.
*Arguments*:
i = the frame index to get vector for. If None (Default) a list of all vectors is returned.
"""
return self.get_axis(1, i)
def get_z(self, i=None):
"""
Get the Camera's view vector.
*Arguments*:
i = the frame index to get vector for. If None (Default) a list of all vectors is returned.
"""
return self.get_axis(2, i)
def project_to_frame(self, cloud, i, flip=True):
"""
Project the point cloud onto an (instantaneous) frame from this pushbroom camera.
*Arguments*:
- cloud = the point cloud to project. Must have points stored in cloud.xyz (HyCloud) or be a (n,3) array.
- i = the camera frame (along track index) to use.
- flip = True if pixel coordinates should be flipped.
*Returns*:
- a (3,) array of projected points with coordinates that are:
0. xtrack = the position (in pixels) of the points across track (pixels on the sensor)
1. ltrack = the position (in pixels) of the points along track. Values between 0 and 1 will pass through the sensor slit.
2. depth = the depth along the view direction of each point (in meters).
"""
# project into camera coordinates
if isinstance(cloud, np.ndarray):
#xyz = np.dot(cloud - self.cp[i], self.R[i].as_matrix())
xyz = (cloud - self.cp[i])@(self.R[i].as_matrix())
else:
#xyz = np.dot(cloud.xyz - self.cp[i], self.R[i].as_matrix())
xyz = (cloud.xyz - self.cp[i])@(self.R[i].as_matrix())
# calculate along-track coordinate (perspective projection perpendicular to flight line)
ltrack = 0.5 + (
xyz[:, 0] / xyz[:, 2]) / self.pl # N.B. +0.5 moves from pixel-center coords to pixel-edge coords
# calculate across-track coordinate (perspective projection onto sensor array)
if flip:
xtrack = (self.dims[0] / 2) - ((xyz[:, 1] / xyz[:, 2]) / self.px)
else:
xtrack = (self.dims[0] / 2) + ((xyz[:, 1] / xyz[:, 2]) / self.px)
return np.array([xtrack, ltrack, xyz[:, 2]]).T
def crop(self, start, end, image=None):
"""
Clip this tracks IMU data to the specified frames. Note that this is not reversable.
*Arguments*:
- start = the start frame.
- end = the end frame.
- image = a HyImage instance to clip also, if provided (default is None). Clipping will
be applied to the y-direction.
"""
assert self.co.shape[0] > end and start > 0, "Error - invalid range %d - %d (data shape is %s)" % (start,end,self.co.shape)
assert start < end, "Error - start (%d) cannot be after end (%d)." % (start,end)
self.co = self.co[start:end, :]
self.cp = self.cp[start:end, :]
self.R = self.R[start:end]
self.dims = (self.dims[0], self.co.shape[0] )
if image is not None:
image.data = image.data[ :, start:end, : ]
def plot_waterfall(self, image = None, bands=(0,1,2), flipY=True, flipX=False, rot=True ):
"""
Plot IMU data and (if provided) the raw image frames.
*Arguments*:
- image = the waterfall image to plot. Default is None (no plot).
- bands = the bands of the waterfall image to plot. Default is (0,1,2).
- flipY = flip the Y axis of the image. Default is True.
- flipX = flip the X axis of the image. Default is False.
- rot = rotate the image by 90 degrees. Default is True.
*Returns*:
- fig,ax = the figure and a list of associated axes.
"""
if image is None:
fig, ax = plt.subplots(6, 1, figsize=(18, 12))
ax = [ax[0],ax[0],ax[1],ax[2],ax[3],ax[4],ax[5]]
else:
fig, ax = plt.subplots(7, 1, figsize=(18, 8))
image.quick_plot(bands, ax=ax[0], rot=rot, flipY=flipY, flipX=flipX)
ax[0].set_aspect('auto') # override aspect equal, as this is probably wrong anywa
ax[0].set_title("Waterfall")
ax[0].set_yticks([])
# plot IMU data
for i,(y,l,c) in enumerate(zip(
[self.cp[:,0], self.cp[:,1], self.cp[:,2], self.co[:,0], self.co[:,1], self.co[:,2]],
['X','Y','Z','Roll','Pitch','Yaw'],
['r','g','b','r','g','b'])):
ax[i+1].set_ylabel(l)
ax[i+1].plot(y,color=c)
# clean up axes
for a in ax:
a.set_xticks(range(0, self.co.shape[0], 250))
a.set_xlim(0, self.co.shape[0])
a.grid()
[ax[i].set_xticklabels([]) for i in range(6)]
fig.tight_layout()
return fig, ax
def plot_curtain(self, scale=10, alpha=0.1, ax=None, ortho=None):
"""
Create a curtain plot for visualising this camera track.
*Arguments*:
- scale = the scale of the camera basis vectors (curtain). This is in transformed coordinates.
- alpha = the alpha value to use for the curtain. Default is 0.1.
- ax = an external axis to plot too.
- ortho = an orthoimage to use to project to pixel coordinates. Image must have a defined affine transform.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.set_aspect('equal')
# get camera basis
X = self.get_x()
Y = self.get_y()
Z = self.get_z()
# build curtain
S = [] # store camera pos for plotting
XV = []
YV = []
ZV = []
for i in range(len(self.cp)):
# get camera position at this frame
px = self.cp[i, 0]
py = self.cp[i, 1]
if ortho is not None:
assert ortho.affine is not None, "Error - image must have an affine transform set."
px, py = ortho.world_to_pix(px, py)
S.append([px, py])
if ortho is None:
ZV += [[px, px + Z[i, 0] * scale], [py, py + Z[i, 1] * scale]]
YV += [[px, px + Y[i, 0] * scale], [py, py + Y[i, 1] * scale]]
XV += [[px, px + X[i, 0] * scale], [py, py + X[i, 1] * scale]]
else:
# we need to flip the y-axis as imshow puts the origin in the top-left.
ZV += [[px, px + Z[i, 0] * scale], [py, py - Z[i, 1] * scale]]
YV += [[px, px + Y[i, 0] * scale], [py, py - Y[i, 1] * scale]]
XV += [[px, px + X[i, 0] * scale], [py, py - X[i, 1] * scale]]
ax.plot(*ZV, color='r', lw=1, alpha=alpha, zorder=3)
ax.plot(*YV, color='g', lw=1, alpha=alpha, zorder=2)
ax.plot(*XV, color='b', lw=1, alpha=alpha, zorder=1)
# plot camera track
S = np.array(S)
ax.cmap = ax.scatter(S[:, 0], S[:, 1], c=np.arange(0, S.shape[0]), cmap='jet', zorder=4) # also store colorbar
return ax.get_figure(), ax
def plot_pose(self, i):
"""
Plot the camera axes and scan line in world coordinates. Useful for checking camera orientation at a
given frame (and debugging!).
*Arguments*:
- i = plot the i'th camera.
*Returns*:
- fig,ax = the matplotlib plot.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# get local axes from rotation matrix
M = self.get_R(i).as_matrix()
x = M[:, 0]
y = M[:, 1]
z = M[:, 2]
# plot them in 3D
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
ax.quiver([0], [0], [0],
[x[0]], [x[1]], [x[2]],
length=1.0, normalize=False, colors='b', label='x (flight direction)')
ax.quiver([0], [0], [0],
[y[0]], [y[1]], [y[2]],
length=1.0, normalize=False, colors='g', label='y (pixel direction)')
ax.quiver([0], [0], [0],
[z[0]], [z[1]], [z[2]],
length=1.0, normalize=False, colors='r', label='z (view direction)')
# add plot showing scan line
V0 = np.array([0, 0, 0])
V1 = z - 0.4 * y
V2 = z + 0.4 * y
xx = [V0[0], V1[0], V2[0], V0[0]]
yy = [V0[1], V1[1], V2[1], V0[1]]
zz = [V0[2], V1[2], V2[2], V0[2]]
verts = [list(zip(xx, yy, zz))]
ax.add_collection3d(Poly3DCollection(verts, alpha=0.2))
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_xlabel("Easting")
ax.set_ylabel("Northing")
ax.set_zlabel("Elevation")
ax.legend()
return fig, ax
def plot_strip(self, line, width=100, cloud=None, image=None, s=2, aspect='auto'):
"""
Plot a projected strip for comparision between an image and a point cloud.
*Arguments*:
- line = the line ID to plot.
- width = how many pixels to plot either side of the line. Default is 10.
- cloud = the cloud to plot. Can be None.
- image = the (linescanner) image to plot. Can be None.
- s = size of rendered points in pixels. Default is 2.
- aspect = the aspect ratio of the renders / image plot. Default is 'equal'. Change to
'auto' to stretch data to figure size.
"""
# build plot
if cloud is None and image is None:
assert False, "At least one dataset (cloud or image) must be passed for plotting."
elif cloud is None or image is None:
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
iax = cax = ax
else:
fig, ax = plt.subplots(2, 1, figsize=(15, 5))
cax = ax[0]
iax = ax[1]
# plot cloud
if cloud is not None:
# project onto frame
pp = self.project_to_frame(cloud, line)
pp[:, 1] += width / 2 # offset origin to left side of image
# calculate point visibility (including points within width)
vis = (pp[:, 0] > 0) & (pp[:, 0] < self.dims[0]) & (pp[:, 1] > -(width / 2)) & (pp[:, 1] < width / 2)
# pp[:, 0] += width / 2 # offset so we see either side of scan line
# calculate point visibility (including points within width)
vis = (pp[:, 0] > 0) & (pp[:, 0] < self.dims[0]) & (pp[:, 1] > 0) & (pp[:, 1] < width)
# rasterise
grd, z = rasterize(pp, vis, cloud.rgb, dims=(self.dims[0], width), s=s)
# plot
cax.imshow(np.transpose(grd, (1, 0, 2)) / 255.)
cax.set_yticks([width / 2])
cax.set_yticklabels(["line %d" % line])
cax.set_aspect(aspect)
cax.set_xticks([])
cax.set_title("Point cloud RGB")
if image is not None:
image.quick_plot((0, 1, 2), ax=ax[1])
iax.set_ylim(line - width, line + width)
iax.set_yticks([line])
iax.set_yticklabels(["line %d" % line])
iax.set_aspect(aspect)
iax.set_title("Scanner frame")
fig.tight_layout()
fig.show()
def project_pushbroom(image, cloud, cam, chunk=500, step=100, near_clip=10., vb=True):
"""
Map an image acquired using a moving pushbroom scanner onto a point cloud using known
camera position and orientations for each line of the image.
*Arguments*:
- image = a HyImage instance containing the data to project. This is only used to determine image dimensions.
- cloud = the destination point cloud to project data onto.
- cam = a pushbroom camera instance.
- chunk = The size of chunks used to optimise the projection step. Points are culled based on the first and
last line of each chunk prior to processing to reduce the number of projections that need to be
performed. To reduce errors at chunk margins these chunks are padded by 50%. Default is 500.
- step = the step to use in the masking step for each chunk. Default is 100. Reducing this will ensure no points
are missed, but at large performance cost.
- near_clip = the z-depth of the near clipping plane. Default is 10.0 m.
- vb = True if a progress bar should be displayed. Default is True.
*Returns*:
- A hylite.project.PMap instance containing the mapping matrix between points and pixels.
"""
# check dims match
assert image.xdim() == cam.dims[0] \
and image.ydim() == cam.dims[1], \
"Error - image and camera dimensions do not match. Try rotating the image using image.rot90()."
# store point IDs (so these don't get ruined by clipping)
pointIDs = np.arange(0, cloud.point_count(), dtype=np.uint32)
# build an adjacency matrix in flat form
points = []
pixels = []
depths = []
# loop through each chunk
for c in range(0, image.ydim(), chunk):
#######################################################
## Clip point cloud to chunk using coarse projections
#######################################################
# calculate start of frame projection and facing
P0 = cam.project_to_frame(cloud, c)
F0 = P0[:, 1] > 0 # true for points in front of scan line
mask = np.full(cloud.point_count(), False) # mask of points visible in chunk
loop = range(c + step, c + chunk + step, step)
if vb:
loop = tqdm(loop, leave=False, desc="Masking chunk %d/%d" % (c / chunk + 1, image.ydim() / chunk))
for i in loop:
if i >= image.ydim(): # reached end of image
continue # use continue here rather than break to use up the rest of the progress bar so it is removed
P1 = cam.project_to_frame(cloud, i)
F1 = P1[:, 1] > 0 # true for points in front of scan line
vis = (F0 != F1) # points visible in this frame
vis = vis & np.logical_or((P0[:, 0] > 0) & (P0[:, 0] < cam.dims[0]),
(P1[:, 0] > 0) & (P1[:, 0] < cam.dims[0]))
mask[vis] = True # these points should be included in final projection
F0 = F1
P0 = P1
# subset points to those visible in this chunk (only)
xyz = cloud.xyz[mask] # get points
pIDs = pointIDs[mask] # store IDs in the original point cloud
#######################################################
## Do per-line projections for subset point cloud
#######################################################
# calculate start of frame projection and facing (with subsetted cloud)
P0 = cam.project_to_frame(xyz, c)
F0 = P0[:, 1] > 0 # true for points in front of scan line
loop = range(c + 1, c + chunk + 1)
if vb:
loop = tqdm(loop, leave=False,
desc="Projecting chunk %d/%d (%d points)" % (c / chunk + 1, image.ydim() / chunk, xyz.shape[0]))
for i in loop:
if i >= image.ydim(): # reached end of image
continue # use continue here rather than break to use up the rest of the progress bar so it is removed
# calculate end of frame projection and facing
P1 = cam.project_to_frame(xyz, i)
F1 = P1[:, 1] > 0 # true for points in front of scan line
# get points crossed by pushbroom
vis = (F0 != F1) # N.B. this culls most of the points in the cloud!
# remove points out of field of view (but co-planar with line)
vis = vis & np.logical_or((P0[:, 0] > 0) & (P0[:, 0] < cam.dims[0]),
(P1[:, 0] > 0) & (P1[:, 0] < cam.dims[0])) \
& np.logical_or(P0[:, 2] > near_clip, P1[:, 2] > near_clip)
# cull invisible points
_PP = np.vstack([np.clip(P0[vis, 0], 0, image.xdim() - 1),
np.clip(P1[vis, 0], 0, image.xdim() - 1)]).T # cast to indices
_PP = np.sort(_PP, axis=1).astype(np.uint) # sort so _PP[i,0]:_pp[i,1] gives the range of indices.
_PP[:, 1] += 1 # add one to final index for slicing
_Z = np.max([P0[vis, 2], P1[vis, 2]], axis=0)
for n, pid in enumerate(pIDs[vis]):
# add to link matrix
px = range(int(i * image.xdim() + _PP[n, 0]), int(i * image.xdim() + _PP[n, 1]))
pixels += px
points += [pid] * len(px)
depths += [_Z[n]] * len(px)
# update left clipping plane
F0 = F1
P0 = P1
# build a projection map and return it
pmap = PMap(image.xdim(), image.ydim(), cloud.point_count(), cloud=cloud, image=image)
pmap.set_flat(points, pixels, np.array(depths))
return pmap
def get_corr_coef(pmap, bands=(0, 1, 2)):
"""
Calculate the pearsons correlation coefficient between projected RGB colours (pmap.image[...,bands])
and pmap.cloud.rgb. Used to optimise boresight values against point
cloud colours (slow, but worthwhile).
*Arguments*:
- pmap = the pmap instance containing the projection.
- bands = indices for the red, green and blue bands of pmap.image. Default is (0,1,2).
*Returns*:
- the corellation of the red, green and blue bands.
"""
assert pmap.cloud.rgb is not None, "Error - cloud must contain independent RGB information."
rgb_ref = pmap.cloud.rgb # 'true' RGB from SfM
rgb_adj = push_to_cloud(pmap, bands, method='count').data # calculate projected RGB
# filter to known points
mask = (rgb_adj > 0).any(axis=-1) & (rgb_ref > 0).any(axis=-1)
# return the corellation of the red, green and blue bands.
return [sp.stats.pearsonr(rgb_ref[mask, i], rgb_adj[mask, i])[0] for i in range(3)]
def optimize_boresight(track, cloud, image, bands=(0, 1, 2), n=100, iv=np.array([0, 0, 0]), scale=3.,
coarse_eps=0.5, fine_eps=0.05, ztol=0, ftol=0,
vb=True, gf=True):
"""
Applies a least-squares solver to find boresight values that result in the best correlation
between projected RGB and RGB stored on a photogrammetric point cloud. This can be very slow,
so use the subsample argument to significantly subsample the point cloud. It is also a good idea
to calculate these values using a small but high-quality subset of a swath; the optimised values
can subsequently be applied to the whole dataset
*Arguments*:
- cloud = the cloud containing the geometry to project onto and associated RGB values.
- image = the image containing RGB data.
- n = the subsampling factor for the pointcloud (uses cloud.xyz[::n,:]). Default is 100.
- iv = the initial value for the boresight estimation. Default is [0,0,0].
- bands = indices for the red, green and blue bands of image. Default is (0,1,2).
- scale = The size of the search space. Default is ±3 degrees from the initial value.
- coarse_eps = the perturbation step size for the first solution.
- fine_eps = the perturbation step size for subsequent refinement.
- ztol = the tolerance of the z-filtering step. Set to 0 to disable (default).
- ftol = the maximum number of points per (valid) pixel. Set to 0 to disable (default).
- vb = True if output of the least-squares solver should be printed at each state.
- gf = True if a plot should be generated showing the search trace. Default is True
*Returns*:
- track = an updated track with the boresight applied.
- boresight = a (3,) array with optimised boresight adjustments (roll, pitch, yaw).
- trace = the search path of the optimisation, with shape (niter,4). The last column contains
the cost function at each iteration point.
"""
# check cloud and build subsampled copy
assert cloud.rgb is not None, "Error - cloud must contain independent RGB information."
cloud = hylite.HyCloud(cloud.xyz[::n, :], rgb=cloud.rgb[::n, :])
# setup cost function
trace = []
def _opt(X, track, cloud, image):
# update track
track = track.apply_boresight(*X)
# build projection map
pmap = project_pushbroom(image, cloud, track, chunk=200, step=50, vb=False)
# filter
if ztol > 0:
pmap.filter_occlusions( ztol )
if ftol > 0:
pmap.filter_footprint( ftol )
# return cost
c = -np.sum(get_corr_coef(pmap, bands=bands))
trace.append((X[0], X[1], X[2], -c)) # add to trace
if vb:
print("\r%d: boresight = [%.3f,%.3f,%.3f]; correlation=%.4f." % (len(trace), X[0], X[1], X[2], -c / 3),
end=' ')
return c
# calculate bounds
bounds = [(v - scale, v + scale) for v in iv]
# run coarse optimisation
res = minimize(_opt, np.array(iv), args=(track, cloud, image),
bounds=bounds, method='TNC', options=dict(eps=coarse_eps))
if vb:
print("Complete [%s]." % res.message)
tr = np.array(trace)
s = np.argmax(
tr[:, -1]) # get best value from iterations (with TNC this isn't always the optimal solution for some reason?)
# run fine optimisation
if fine_eps > 0:
if vb:
print("Running fine adjustment...")
bounds = [(v - scale, v + scale) for v in iv]
res = minimize(_opt, tr[s, :3], args=(track, cloud, image),
bounds=bounds, method='TNC', options=dict(eps=fine_eps))
if vb:
print("Complete [%s]." % res.message)
tr = np.array(trace)
tr[:, -1] /= 3 # convert sum of corellation coeffs to average (is more interpretable)
s = np.argmax(
tr[:, -1]) # get best value from iterations (with TNC this isn't always the optimal solution for some reason?)
# plot?
if gf:
plt.plot(tr[:, 0], tr[:, 3], color='r', label='Roll')
plt.plot(tr[:, 1], tr[:, 3], color='g', label='Pitch')
plt.plot(tr[:, 2], tr[:, 3], color='b', label='Yaw')
plt.scatter(tr[s, 0], tr[s, 3], color='k', zorder=10)
plt.scatter(tr[s, 1], tr[s, 3], color='k', zorder=10)
plt.scatter(tr[s, 2], tr[s, 3], color='k', zorder=10)
plt.title("Iter %d: boresight = [%.3f,%.3f,%.3f]" % (tr.shape[0], tr[s, 0], tr[s, 1], tr[s, 2]))
plt.legend()
plt.xlabel('Value')
plt.ylabel('Correlation')
plt.show()
# apply to track
return track.apply_boresight(*tr[s, :3]), tr[s, :3], tr |
n1 = int (input ('Digite um nº:'))
n2 = int (input ('Digite outro nº:'))
soma = n1+n2
print ('A soma é: {}'.format(soma))
|
n = 1000
ans = 0
prod = 2**n
for n in str(prod):
ans += int(n)
print(ans)
|
Ceci est un script parce que simon a tout merger
|
# 5-10
current_users = ['markfromjoberg', 'ncmbartlett', 'nakedcranium', 'naomiche', 'therealekevin']
new_user = input('Please enter your desired username: ')
new_user = new_user.lower()
while new_user in current_users:
print('Sorry that username is taken.')
new_user = input('Please enter another username: ')
new_user = new_user.lower()
current_users.append(new_user)
if new_user in current_users:
print('Username successfully registered.')
print('\r')
print(current_users) |
import random
from center import Center
from tract import Tract
class Model:
def __init__(self, census_tracts, number_of_districts):
self.census_tracts = [Tract(tract) for tract in census_tracts]
self.number_of_districts = number_of_districts
self.target_district_population = sum([tract['population'] for tract in self.census_tracts]) / self.number_of_districts
self.state_center = Center.locate_center(self.census_tracts)
self.model_parameters = {
'census_tracts': self.census_tracts,
'number_of_districts': self.number_of_districts,
'target_district_population': self.target_district_population,
'state_center': self.state_center
}
feedback.pushInfo('Model Initialized')
self.complete_district_assignment()
# District Assignment
def complete_district_assignment(self):
assignment = DistrictAssignment(self.model_parameters)
|
from PyQt5.QtCore import QAbstractListModel, Qt, pyqtSignal, pyqtSlot, QModelIndex
class BestellModel(QAbstractListModel):
NameRole = Qt.UserRole + 1
PreisRole = Qt.UserRole + 2
bestellungChanged = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.bestellung = []
def data(self, index, role=Qt.DisplayRole):
row = index.row()
if role == BestellModel.NameRole:
return self.bestellung[row]["name"]
if role == BestellModel.PreisRole:
return self.bestellung[row]["preis"]
def rowCount(self, parent=QModelIndex()):
return len(self.bestellung)
def roleNames(self):
return {
BestellModel.NameRole: b'name',
BestellModel.PreisRole: b'preis',
}
@pyqtSlot(str, str)
def addProdukt(self, name, preis):
self.beginInsertRows(QModelIndex(), self.rowCount(), self.rowCount())
self.bestellung.append({'name': name, 'preis': preis})
self.endInsertRows()
@pyqtSlot(int, str, str)
def editProdukt(self, row, name, preis):
ix = self.index(row, 0)
self.bestellung[row] = {'name': name, 'preis': preis}
self.bestellungChanged.emit(ix, ix, self.roleNames())
@pyqtSlot(int)
def deleteProdukt(self, row):
self.beginRemoveColumns(QModelIndex(), row, row)
del self.bestellung[row]
self.endRemoveRows() |
from bibpdf import normalizer
from bibpdf.formatters import simple_format
__author__ = 'Keji Li'
def order_str(order: int) -> str:
if order % 10 == 1 and order % 100 != 11:
return "{0}st".format(order)
elif order % 10 == 2 and order % 100 != 12:
return "{0}nd".format(order)
elif order % 10 == 3 and order % 100 != 13:
return "{0}rd".format(order)
else:
return "{0}th".format(order)
def author_list(x: list) -> str:
"""
:param x: a list of authors with that last name, each list has the author info and a list of paper objects
:type x: [((int, str, str), [(dict, int)])]
"""
if len(x) == 0:
return "Cannot find any author with that last name in the library, check spelling?\n"
result_str = list()
for idx, (author, paper_list) in enumerate(x):
result_str.append("{0} {1}:\n".format(author[1].title(), author[0].title()))
for paper, order, role in paper_list:
result_str.append("\t{2}: as {0} {1}, {3}\n".format(order_str(order + 1), role, paper['ID'],
simple_format.apply(paper)))
return ''.join(result_str)
def item_list(x: list) -> str:
"""convert a list of bibtex entry items to string
:param x: a list of item objects (which are dicts)
:type x: list[dict]
:return: the formatted string
"""
if len(x) == 0:
return "Cannot find any paper with that combination of keywords?\n"
result_str = list()
for paper in x:
result_str.append("{0}\n".format(simple_format.apply(paper)))
return ''.join(result_str)
def get_id(x: dict) -> str:
if 'author' in x:
result_str = normalizer.apply(x['author'][0][0].lower().replace(' ', ''))
elif 'editor' in x:
result_str = normalizer.apply(x['editor'][0][0].lower().replace(' ', ''))
else:
result_str = 'anon'
if 'year' in x:
result_str += str(x['year'])
else:
result_str += x['title'].split()[0].title()
return result_str
|
a="pikachu"
print(a[0])
print(a[5])
print(a[-1])
print(a[-2])
print(a[-3])
print(a[0:3])
print(a[0:7])
print(a[0:9])
print(a[1:5]) |
from marshmallow import Schema, fields, ValidationError
import os
# Custom validators
def must_not_be_blank(data):
if not data:
raise ValidationError("Data not provided.")
def must_be_in_allowed_extension(extension: str):
return extension in os.environ.get('ALLOWED_EXTENSIONS')
def validate_quantity(n):
if n < 0:
raise ValidationError("Quantity must be greater than 0.")
if n > 30:
raise ValidationError("Quantity must not be greater than 30.")
def validate_thumbnail(thumbnail):
pass
|
from copy import deepcopy
from indigox.config import BALL_DATA_FILE, INFINITY, MAX_SOLUTIONS
from indigox.misc import BondOrderAssignment, graph_to_dist_graph, node_energy
try:
import BALLCore as BALL
BALL_AVAILABLE = True
BALL_ELEMENTS = dict(
H=BALL.PTE['H'], He=BALL.PTE['HE'], Li=BALL.PTE['LI'],
Be=BALL.PTE['BE'], B=BALL.PTE['B'], C=BALL.PTE['C'],
N=BALL.PTE['N'], O=BALL.PTE['O'], F=BALL.PTE['F'],
Ne=BALL.PTE['NE'], Na=BALL.PTE['NA'], Mg=BALL.PTE['MG'],
Al=BALL.PTE['AL'], Si=BALL.PTE['SI'], P=BALL.PTE['P'],
S=BALL.PTE['S'], Cl=BALL.PTE['CL'], Ar=BALL.PTE['AR'],
K=BALL.PTE['K'], Ca=BALL.PTE['CA'], Sc=BALL.PTE['SC'],
Ti=BALL.PTE['TI'], V=BALL.PTE['V'], Cr=BALL.PTE['CR'],
Mn=BALL.PTE['MN'], Fe=BALL.PTE['FE'], Co=BALL.PTE['CO'],
Ni=BALL.PTE['NI'], Cu=BALL.PTE['CU'], Zn=BALL.PTE['ZN'],
Ga=BALL.PTE['GA'], Ge=BALL.PTE['GE'], As=BALL.PTE['AS'],
Se=BALL.PTE['SE'], Br=BALL.PTE['BR'], Kr=BALL.PTE['KR'],
Rb=BALL.PTE['RB'], Sr=BALL.PTE['SR'], Y=BALL.PTE['Y'],
Zr=BALL.PTE['ZR'], Nb=BALL.PTE['NB'], Mo=BALL.PTE['MO'],
Tc=BALL.PTE['TC'], Ru=BALL.PTE['RU'], Rh=BALL.PTE['RH'],
Pd=BALL.PTE['PD'], Ag=BALL.PTE['AG'], Cd=BALL.PTE['CD'],
In=BALL.PTE['IN'], Sn=BALL.PTE['SN'], Sb=BALL.PTE['SB'],
Te=BALL.PTE['TE'], I=BALL.PTE['I'], Xe=BALL.PTE['XE'],
Cs=BALL.PTE['CS'], Ba=BALL.PTE['BA'], La=BALL.PTE['LA'],
Ce=BALL.PTE['CE'], Pr=BALL.PTE['PR'], Nd=BALL.PTE['ND'],
Pm=BALL.PTE['PM'], Sm=BALL.PTE['SM'], Eu=BALL.PTE['EU'],
Gd=BALL.PTE['GD'], Tb=BALL.PTE['TB'], Dy=BALL.PTE['DY'],
Ho=BALL.PTE['HO'], Er=BALL.PTE['ER'], Tm=BALL.PTE['TM'],
Yb=BALL.PTE['YB'], Lu=BALL.PTE['LU'], Hf=BALL.PTE['HF'],
Ta=BALL.PTE['TA'], W=BALL.PTE['W'], Re=BALL.PTE['RE'],
Os=BALL.PTE['OS'], Ir=BALL.PTE['IR'], Pt=BALL.PTE['PT'],
Au=BALL.PTE['AU'], Hg=BALL.PTE['HG'], Tl=BALL.PTE['TL'],
Pb=BALL.PTE['PB'], Bi=BALL.PTE['BI'], At=BALL.PTE['AT'],
Rn=BALL.PTE['RN'], Fr=BALL.PTE['FR'], Ra=BALL.PTE['RA'],
Ac=BALL.PTE['AC'], Th=BALL.PTE['TH'], Pa=BALL.PTE['PA'],
U=BALL.PTE['U'], Np=BALL.PTE['NP'], Pu=BALL.PTE['PU'],
Po=BALL.PTE['PO'], Am=BALL.PTE['AM'], Cm=BALL.PTE['CM'],
Bk=BALL.PTE['BK'], Cf=BALL.PTE['CF'], Es=BALL.PTE['ES'],
Fm=BALL.PTE['FM'], Md=BALL.PTE['MD'], No=BALL.PTE['NO'],
Lr=BALL.PTE['LR'], Rf=BALL.PTE['RF'], Db=BALL.PTE['DB'],
Sg=BALL.PTE['SG'], Bh=BALL.PTE['BH'], Hs=BALL.PTE['HS'],
Mt=BALL.PTE['MT'],)
# setup the bond order processor
bop = BALL.AssignBondOrderProcessor()
# alias' for long name
opts = BALL.AssignBondOrderProcessor.Option
algo = BALL.AssignBondOrderProcessor.Algorithm
bop.options.setBool(opts.KEKULIZE_RINGS, True)
bop.options.setBool(opts.OVERWRITE_SINGLE_BOND_ORDERS, True)
bop.options.setBool(opts.OVERWRITE_DOUBLE_BOND_ORDERS, True)
bop.options.setBool(opts.OVERWRITE_TRIPLE_BOND_ORDERS, True)
bop.options.set(opts.ALGORITHM, algo.A_STAR)
bop.options.setReal(opts.BOND_LENGTH_WEIGHTING, 0)
bop.options.setInteger(opts.MAX_NUMBER_OF_SOLUTIONS, MAX_SOLUTIONS)
bop.options.setBool(opts.COMPUTE_ALSO_NON_OPTIMAL_SOLUTIONS, False)
bop.options.setBool(opts.ADD_HYDROGENS, False)
bop.options.set(opts.INIFile, str(BALL_DATA_FILE))
except ImportError:
BALL_AVAILABLE = False
class BallOpt(BondOrderAssignment):
def __init__(self, G):
self.init_G = G
def initialise(self):
self.G = graph_to_dist_graph(self.init_G)
self.system = BALL.System()
self.mol = BALL.Molecule()
self.atoms = {}
self.bonds = []
for a, d in self.init_G.nodes(True):
ball_e = BALL_ELEMENTS[d['element']]
atom = BALL.Atom()
atom.setName(str(a))
atom.setElement(ball_e)
self.atoms[a] = atom
for a, b, d in self.init_G.edges(data=True):
bond = self.atoms[a].createBond(self.atoms[b])
bond.setOrder(1)
self.bonds.append(bond)
for atom in self.atoms.values():
self.mol.insert(atom)
self.system.insert(self.mol)
def run(self):
if not BALL_AVAILABLE:
self.log.warning('BALL method is unavailable as BALLCore could not '
'be loaded.')
for x in self.init_G:
self.init_G.node[x]['formal_charge'] = 0
for y in self.init_G[x]:
self.init_G[x][y]['order'] = 1
return self.init_G, INFINITY
else:
self.log.warning("BALL method selected. Formal charges will not be "
"optimised.")
best_ene = INFINITY * INFINITY
best_g = None
self.initialise()
self.system.apply(bop)
for i in range(bop.getNumberOfComputedSolutions()):
bop.apply(i)
for atom in BALL.atoms(self.system):
a = int(str(atom.getName()))
fc = int(atom.getFormalCharge())
self.G.node[(a,)]['fc'] = fc
for bond in BALL.bonds(self.system):
a = int(str(bond.getFirstAtom().getName()))
b = int(str(bond.getSecondAtom().getName()))
bo = int(bond.getOrder())
if a > b:
a, b = b, a
self.G.node[(a, b)]['e-'] = bo * 2
i_ene = round(sum(node_energy(self.G, n) for n in self.G),5)
if i_ene < best_ene:
best_ene = i_ene
best_g = self.assignment_to_graph()
return best_g, best_ene
def assignment_to_graph(self):
G = deepcopy(self.init_G)
for v in self.G:
if len(v) == 1:
G.node[v[0]]['formal_charge'] = 0
if len(v) == 2:
G[v[0]][v[1]]['order'] = self.G.node[v]['e-'] // 2
return G
|
#!/usr/bin/env python
#Bao Dang
#Assignment 1
class arraylist:
def __init__(self):
self.maxlength = 10000
self.elements = [None]*self.maxlength
self.last = 0
#Print the first position
def first(self):
return 0
#Print the last position
def end(self):
return self.last
#Return the value of a position
def retrieve(self,p):
if p > self.last or p < 0:
print "Position does not exist"
else:
return self.elements[p]
#Locate a position of a value
def locate(self,x):
for q in range(self.last):
if self.elements[q] == x:
return q
#Return the next position
def next_cell(self,p):
if p >= self.last or p < 0:
return None
else:
return p + 1
# Return the previous position
def previous(self,p):
if p > self.last or p <= 0:
return None
else:
return p - 1
#Insert position
def insert(self,x,p):
if self.last >= self.maxlength:
print "List is full"
elif p > self.last or p < 0:
print "Position does not exist"
elif p == self.last:
self.elements[p] = x
self.last = self.last + 1
else:
self.elements[p+1:self.last+1] = self.elements[p:self.last]
self.elements[p] = x
self.last = self.last + 1
#Delete a position
def delete(self,p):
if 0 <= p <= self.last:
for q in range(p,self.last):
self.elements[q-1] = self.elements[q]
self.last = self.last - 1
#Clear the list
def makenull(self):
self.__init__()
if __name__ == "__main__":
print "Insert 1-5"
test = arraylist()
test.insert(1, 0)
test.insert(2, 0)
test.insert(3, 0)
test.insert(4, 0)
test.insert(5, 0)
print "Delete the last numbers"
test.delete(test.end())
print "The last position is", test.end()
print "Clear the list"
test.makenull()
test.insert("a", 0)
test.insert("b", 1)
test.insert("c", 2)
test.insert("d", 3)
test.insert("e", 4)
print "Create a list: a, b, c, d, e"
print "Print the last position", test.end()
print "Retrieve the second element", test.retrieve(1)
print "Locate the index of b:", test.locate("b")
print "Next position after 'd':", test.next_cell(3)
print "Previous of 'd' index:", test.previous(test.locate("d")) |
from django.shortcuts import render
def index_planner(request):
return render(request, 'planner/index.html')
def login_planner(request):
pass |
from repositories.DataRepository import DataRepository
from flask import Flask, request, jsonify
from flask_socketio import SocketIO
from flask_cors import CORS
import os
import json
import time
#from datetime import datetime
import datetime
from datetime import timedelta
import threading
from subprocess import check_output
from serial import Serial,PARITY_NONE
import pygame
# Klasses
from RPi import GPIO
import spidev
from helpers.MCP3008 import MCP3008
from helpers.LCD import LCD
from helpers.SHIFTREGISTER import SHIFTREGISTER
from helpers.WS2801 import WS2801
#Gpio
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# GPIO PINNEN
#LCD
rs = 19
E = 26
#shiftregister
ds = 16
oe = 25
st_cp = 24
sh_cp = 23
mr = 18
#WS2801
sda = 21
clk = 20
#Klok
vorige_tijd = 0000
#Seriele communicatie
ser = Serial('/dev/ttyS0', 115200, bytesize=8, parity=PARITY_NONE, stopbits=1)
#SPI openen
spi = spidev.SpiDev()
#Objecten van klasses
mcp = MCP3008(spi)
shift = SHIFTREGISTER(ds,sh_cp,st_cp,mr,oe)
lcd= LCD(E,rs,shift)
ledstrip = WS2801(sda,clk,15)
#FLASK
app = Flask(__name__)
app.config['SECRET_KEY'] = 'Hier mag je om het even wat schrijven, zolang het maar geheim blijft en een string is'
socketio = SocketIO(app, cors_allowed_origins="*")
CORS(app)
#Programmma variabelen
delay = 60
#Variabel licht
statusLicht = "uit"
wekkerAan = 0
#Acties
def setup():
shift.setup()
lcd.setup()
#Shift register aanzetten
shift.init_shift_register()
#LCD Aanzetten
lcd.init_lcd()
ledstrip.setup()
def getTemp():
while True:
volts = mcp.read_channel(0)
temp = ((volts * 460) / float(1023)) - 50
temp = round(temp, 1)
vandaag = datetime.datetime.now()
vandaag = vandaag.strftime('%Y-%m-%d %H:%M:%S')
#print(vandaag)
DataRepository.insert_meting(temp,vandaag,1,1)
temp = DataRepository.read_sensor_by_id(1,1)
#print(temp)
socketio.emit('B2F_temperatuur', temp,broadcast=True)
time.sleep(delay)
def getTempGrafiek():
while True:
s = DataRepository.read_temperaturen(1,1)
socketio.emit('B2F_grafiek', s,broadcast=True)
time.sleep(delay)
def statusLight():
global statusLicht
while True:
s = DataRepository.read_status_lamp(1)
socketio.emit('B2F_statusLamp',s,broadcast=True)
#print(s)
#print(s["status"])
if(statusLicht != s["status"]):
statusLicht = s["status"]
time.sleep(1)
def lichtAanpassen():
global statusLicht
global wekkerAan
while wekkerAan == 0:
lichten(statusLicht)
time.sleep(0.1)
def checkAlarm():
while True:
global wekkerAan
#Checken welke alarmen aanstaan
#Checken welke dag we zijn (maandag, dinsda,...)
#Checken of het alarm vandaag al is geweest
#Als de dag klopt en het uur is gepasseerd maar het alarm is nog niet geweest -> laten afgaan
#Dag van vandaag ophalen
vandaag = datetime.datetime.today()
dag = vandaag.weekday()
uur = vandaag.strftime("%H:%M")
vandaag = vandaag.strftime('%Y-%m-%d 00:00:00')
#Wekkers ophalen die aanstaan
s = DataRepository.read_alarmen_aan(1,vandaag)
#print(s)
for alarm in s:
#Elke alarm wordt apart opgehaald.
#De alarm["herhaling"] is een string met 7 cijfers in 1=afgaan 0=uit De dag is de index waarde
p = alarm["herhaling"]
idAlarm = alarm["idAlarm"]
tijdstip = alarm["tijdstip"]
#print(tijdstip)
tijdstip = datetime.datetime.strptime(tijdstip, '%H:%M')
tijdstip = tijdstip + timedelta(seconds=300)
tijdstip = tijdstip.strftime("%H:%M")
#print(str(tijdstip))
#Als de dag een 1 heeft moet de wekker vandaag afgaan
if(int(p[dag]) == 1):
#Kijken hoelaat het is en kijken of het uur al gepasseerd is
if(uur > alarm["tijdstip"] and uur < str(tijdstip)):
print(wekkerAan)
if(wekkerAan == 0):
#Het uur is net gepasseerd dus alarm laten afgaan
print("Alarm laten afgaan")
#Muziek afspelen
print(alarm["deuntje"])
licht = alarm["lichteffect"]
liedje = alarm["deuntje"]
liedje = "muziek/" + liedje
#Juiste leds laten aangaan
pygame.mixer.init()
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.load(liedje)
pygame.mixer.music.play(50)
while mcp.read_channel(2) > 1000:
wekkerAan = 1
lichten(licht)
#print(mcp.read_channel(2))
print("Uitzetten")
wekkerAan = 0
ledstrip.allesUit(15)
pygame.mixer.music.stop()
DataRepository.update_alarm_uit(vandaag,idAlarm)
time.sleep(1)
def lichten(licht):
if(licht == "flikker"):
ledstrip.flikker(15,0.5,255)
elif(licht == "kleur"):
ledstrip.kleur(15,0.5,255)
elif(licht == "gewoon"):
ledstrip.gewoon(15,0.5,255)
else:
ledstrip.allesUit(15)
def initialLCD():
ips = check_output(['hostname', '--all-ip-addresses'])
adressen = str(ips)
adressen = adressen.replace("n", "")
adressen = adressen.replace("b", "")
adressen = "IP:" + adressen
print(adressen)
lcd.send_instruction_lcd(1)
lcd.write_message(adressen)
print("Lcd opgestart")
def read_serialport():
global ser
recv_mesg = ser.readline()
recv_mesg = str(recv_mesg,encoding='utf-8')
if(recv_mesg != ""):
print(recv_mesg)
def tijd_sturen():
while True:
global vorige_tijd
nu = datetime.datetime.now()
tijd = nu.strftime("%H%M")
if tijd != vorige_tijd:
ser.write(tijd.encode(encoding="utf-8"))
read_serialport()
vorige_tijd = tijd
time.sleep(1)
#Threading
#API ENDPOINTS
endpoint = '/api'
@app.route('/')
def hallo():
return "Server is running, er zijn momenteel geen API endpoints beschikbaar."
@app.route(endpoint + '/alarmen', methods=['GET','POST'])
def get_alarmen():
if request.method == 'GET':
s = DataRepository.read_alarmen(1)
return jsonify(s), 200
elif request.method == 'POST':
gegevens = DataRepository.json_or_formdata(request)
tijdstip = gegevens["tijdstip"]
tijdstip = "2020-01-01 " +tijdstip+":00"
print(tijdstip)
data = DataRepository.insert_alarm(gegevens["titel"], tijdstip, gegevens["herhaling"], gegevens["lichteffect"], gegevens["deuntje"], gegevens["wekkerID"], gegevens["status"], gegevens["kleur"], gegevens["kleurLedstrip"])
if data is not None:
if data > 0:
return jsonify(status="success"), 200
else:
return jsonify(status=data), 204
else:
return jsonify(status="error"), 404
@app.route(endpoint + '/alarmen/update', methods=['PUT'])
def update_alarmen():
if request.method == 'PUT':
gegevens = DataRepository.json_or_formdata(request)
tijdstip = gegevens["tijdstip"]
tijdstip = "2020-01-01 " +tijdstip+":00"
print(tijdstip)
data = DataRepository.update_alarm_alles(gegevens["titel"], tijdstip, gegevens["herhaling"], gegevens["lichteffect"], gegevens["deuntje"], gegevens["status"], gegevens["kleur"], gegevens["kleurLedstrip"], gegevens["idAlarm"])
if data is not None:
if data > 0:
return jsonify(status="success"), 200
else:
return jsonify(status=data), 204
else:
return jsonify(status="error"), 404
@app.route(endpoint + '/alarmen/<wekkerID>/<alarmID>', methods=['GET'])
def get_alarm_id(wekkerID,alarmID):
if request.method == 'GET':
s = DataRepository.read_alarm_by_id(alarmID,wekkerID)
return jsonify(s), 200
@app.route(endpoint + '/muziekjes', methods=['GET'])
def get_muziekjes():
if request.method == 'GET':
s = DataRepository.read_muziekjes()
return jsonify(s), 200
@app.route(endpoint + '/alarmen/changeOnOff/<id>', methods=['PUT','GET'])
def updatetypes(id):
gegevens = DataRepository.json_or_formdata(request)
data = DataRepository.update_alarm(gegevens["value"],id)
if data is not None:
if data > 0:
return jsonify(idtype=id, status="success"), 200
else:
return jsonify(status=data), 204
else:
return jsonify(status="error"), 404
@app.route(endpoint + '/smartlamp/changeOnOff/<wekkerID>', methods=['PUT','GET'])
def updateLamp(wekkerID):
data = DataRepository.update_lamp_uit(wekkerID)
if data is not None:
if data > 0:
return jsonify(wekkerID=wekkerID, status="success"), 200
else:
return jsonify(status=data), 204
else:
return jsonify(status="error"), 404
@app.route(endpoint + '/smartlamp/changeStatus/<wekkerID>/<status>', methods=['PUT','GET'])
def updateLampStatus(wekkerID,status):
data = DataRepository.update_lamp_status(wekkerID,status)
if data is not None:
if data > 0:
return jsonify(wekkerID=wekkerID, status="success"), 200
else:
return jsonify(status=data), 204
else:
return jsonify(status="error"), 404
@app.route(endpoint + '/alarmen/delete/<alarmID>', methods=['DEL','GET'])
def delete_alarm_id(alarmID):
data = DataRepository.delete_alarm(alarmID)
if data is not None:
if data > 0:
return jsonify(alarmID=alarmID, status="success"), 200
else:
return jsonify(status=data), 204
else:
return jsonify(status="error"), 404
#SOCKET IO
@socketio.on('connect')
def initial_connection():
print('New client connect')
setup()
threading.Thread(target=getTemp).start()
threading.Thread(target=getTempGrafiek).start()
threading.Thread(target=tijd_sturen).start()
threading.Thread(target=checkAlarm).start()
threading.Thread(target=statusLight).start()
threading.Thread(target=lichtAanpassen).start()
initialLCD()
@socketio.on('F2B_shutdown')
def initial_connection():
print('Uitzetten')
os.system('sudo shutdown -r now')
#
if __name__ == '__main__':
socketio.run(app, debug=True, host='0.0.0.0')
app.run(host="169.254.10.1", port=5000, debug=True)
|
import itertools
from collections import deque
from heapq import heappush, heappop, heapify
from Heuristic import Heuristic
from Node import Node
from Puzzle import Puzzle
class Solver:
def __init__(self):
self.expanded_nodes: int = 0
self.max_search_depth: int = 0
self.max_frontier_size: int = 0
@staticmethod
def backtrace(goal_node: Node):
current_node: Node = goal_node
moves: list = []
while current_node.parent is not None:
moves.append(current_node.move)
current_node = current_node.parent
return moves
"""
this function
"""
def breadth_first_search(self, root_puzzle: Puzzle) -> tuple:
root_node: Node = Node(root_puzzle)
explored_nodes, queue = set(), deque([root_node])
while queue:
current_node: Node = queue.popleft()
explored_nodes.add(current_node.map)
if current_node.puzzle.state == root_puzzle.goal_state:
return self.backtrace(current_node), current_node, queue, self.expanded_nodes, self.max_search_depth, \
self.max_frontier_size
for neighbor in current_node.expand:
self.expanded_nodes += 1
if neighbor.map not in explored_nodes:
queue.append(neighbor)
explored_nodes.add(neighbor.map)
if neighbor.depth > self.max_search_depth:
self.max_search_depth += 1
if len(queue) > self.max_frontier_size:
self.max_frontier_size = len(queue)
"""
this function
"""
def depth_first_search(self, root_puzzle: Puzzle) -> tuple:
root_node: Node = Node(root_puzzle)
explored_nodes, stack = set(), list([root_node])
while stack:
current_node: Node = stack.pop()
explored_nodes.add(current_node.map)
if current_node.puzzle.state == root_puzzle.goal_state:
return self.backtrace(current_node), current_node, stack, self.expanded_nodes, self.max_search_depth, \
self.max_frontier_size
for neighbor in reversed(current_node.expand):
self.expanded_nodes += 1
if neighbor.map not in explored_nodes:
stack.append(neighbor)
explored_nodes.add(neighbor.map)
if neighbor.depth > self.max_search_depth:
self.max_search_depth += 1
if len(stack) > self.max_frontier_size:
self.max_frontier_size = len(stack)
"""
this function
"""
def iterative_depth_first_search(self, root_puzzle: Puzzle) -> tuple:
count: int = 1
while True:
root_node: Node = Node(root_puzzle)
explored_nodes, stack = set(), list([root_node])
while stack:
current_node: Node = stack.pop()
explored_nodes.add(current_node.map)
if current_node.puzzle.state == root_puzzle.goal_state:
return self.backtrace(
current_node), current_node, stack, self.expanded_nodes, self.max_search_depth, \
self.max_frontier_size, count
if current_node.depth < count:
for neighbor in reversed(current_node.expand):
self.expanded_nodes += 1
if neighbor.map not in explored_nodes:
stack.append(neighbor)
explored_nodes.add(neighbor.map)
if neighbor.depth > self.max_search_depth:
self.max_search_depth += 1
if len(stack) > self.max_frontier_size:
self.max_frontier_size = len(stack)
count += 1
"""
this function
"""
def a_star(self, root_puzzle: Puzzle) -> tuple:
explored_nodes, heap, heap_entry, counter = set(), list(), {}, itertools.count()
key: int = Heuristic.manhattan_distance(root_puzzle)
root_node: Node = Node(root_puzzle, key=key)
entry: tuple = (key, root_node)
heappush(heap, entry)
heap_entry[root_node.map] = entry
while heap:
current_entry = heappop(heap)
explored_nodes.add(current_entry[1].map)
if current_entry[1].puzzle.state == root_puzzle.goal_state:
return self.backtrace(current_entry[1]), current_entry[1], heap, self.expanded_nodes, \
self.max_search_depth, self.max_frontier_size
for neighbor in current_entry[1].expand:
self.expanded_nodes += 1
neighbor.key = neighbor.cost + Heuristic.manhattan_distance(neighbor.puzzle)
entry = (neighbor.key, neighbor)
if neighbor.map not in explored_nodes:
heappush(heap, entry)
explored_nodes.add(neighbor.map)
heap_entry[neighbor.map] = entry
if neighbor.depth > self.max_search_depth:
self.max_search_depth += 1
elif neighbor.map in heap_entry and neighbor.key < heap_entry[neighbor.map][1].key:
heap_index = heap.index((heap_entry[neighbor.map][1].key,
heap_entry[neighbor.map][1]))
heap[int(heap_index)] = entry
heap_entry[neighbor.map] = entry
heapify(heap)
if len(heap) > self.max_frontier_size:
self.max_frontier_size = len(heap)
"""
this function
"""
def best_first_search(self, root_puzzle: Puzzle) -> tuple:
explored_nodes, heap, counter = set(), list(), itertools.count()
key: int = Heuristic.manhattan_distance(root_puzzle)
root_node: Node = Node(root_puzzle, key=key)
entry: tuple = (key, root_node)
heappush(heap, entry)
while heap:
current_entry = heappop(heap)
explored_nodes.add(current_entry[1].map)
if current_entry[1].puzzle.state == root_puzzle.goal_state:
return self.backtrace(current_entry[1]), current_entry[1], heap, self.expanded_nodes, \
self.max_search_depth, self.max_frontier_size
for neighbor in current_entry[1].expand:
self.expanded_nodes += 1
neighbor.key = neighbor.cost + Heuristic.manhattan_distance(neighbor.puzzle)
entry = (neighbor.key, neighbor)
if neighbor.map not in explored_nodes:
heappush(heap, entry)
explored_nodes.add(neighbor.map)
if neighbor.depth > self.max_search_depth:
self.max_search_depth += 1
if len(heap) > self.max_frontier_size:
self.max_frontier_size = len(heap)
|
from unittest import TestCase
import newMain as mn
__author__ = 'bartek'
def TestCountCenters(self, TestCase):
nodes = [mn.Node([3,3],1),mn.Node([1,1],1),mn.Node([-1,-1],1)]
allgroups = [nodes, nodes]
centers = mn.countCenters(allgroups)
for i in centers:
assert isinstance(i, mn.Node)
assert iseqal
pass
|
# -*- coding: utf-8 -*-
from importlib import import_module
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy as np
extensions = []
EXT_FILES = ['c_formula_int64', 'cpp_formula_int64', 'cpp_polynomial_int64', 'cpp_prime_int64', 'simple_bigint']
for f in EXT_FILES:
extensions.append(Extension("ProjectEulerToolkit.ext.{}".format(f),
["ProjectEulerToolkit/ext/{}.pyx".format(f)]))
EXT_FILES_INC = ['c_linalg_int64', 'c_prime_int64']
for f in EXT_FILES_INC:
extensions.append(Extension("ProjectEulerToolkit.ext.{}".format(f),
["ProjectEulerToolkit/ext/{}.pyx".format(f)],
include_dirs=[np.get_include()],
define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")]))
setup(
cmdclass={'build_ext': build_ext},
ext_modules=cythonize(extensions),
)
|
import turtle # 导入模块
zhufu = turtle.Turtle() # 创建Turtle对象,命名为zhufu
chuangkou = turtle.Screen # 创建窗口
turtle.screensize(400,300,"green") # 设置窗口长宽,背景色
zhufu.pencolor("green") # 设置画笔颜色
zhufu.hideturtle() # 隐藏箭头
zhufu.setpos(-150,50) # 移动画笔至坐标 处
zhufu.pencolor("red") # 设置画笔颜色
if True: # 输入5
zhufu.pensize(15)
zhufu.left(180)
zhufu.forward(50)
zhufu.left(90)
zhufu.forward(50)
zhufu.left(90)
zhufu.forward(50)
zhufu.right(90)
zhufu.forward(50)
zhufu.right(90)
zhufu.forward(50)
if True: # 输入2
zhufu.penup() # 抬起画笔
zhufu.setpos(-50,50) # 移动画笔至坐标 处
zhufu.pendown() # 放下画笔
zhufu.left(180)
zhufu.forward(50)
zhufu.right(90)
zhufu.forward(50)
zhufu.right(90)
zhufu.forward(50)
zhufu.left(90)
zhufu.forward(50)
zhufu.left(90)
zhufu.forward(50)
zhufu.penup()
zhufu.setpos(100,50)
zhufu.pendown()
if True: # 输入0
zhufu.forward(50)
zhufu.right(90)
zhufu.forward(100)
zhufu.right(90)
zhufu.forward(50)
zhufu.right(90)
zhufu.forward(100)
zhufu.penup()
zhufu.setpos(-75,-150)
zhufu.pendown()
zhufu.write('I love you ',font=('times',20,'bold')) # 输入文本
turtle.exitonclick() |
# Estimtate the Lyapunov spectrum of the QG model,
# using a limited (rank-N) ensemble.
# Inspired by EmblAUS/Lor95_Lyap.py
from common import *
from mods.QG.core import shape, step, sample_filename, dt
import mods.QG.core as mod
# NB: "Sometimes" multiprocessing does not work here.
# This may be nn ipython bug (stackoverflow.com/a/45720872).
# Solutions: 1) run script from outside of ipython,
# or 2) Turn it off:
mod.mp = True
sd0 = seed(5)
eps = 0.01 # ensemble rescaling
T = 1000.0
K = round(T/dt)
tt = linspace(dt,T,K)
m = np.prod(shape) # ndim
########################
# Main loop
########################
x = np.load(sample_filename)['sample'][-1]
# Init U
N = 300
U = eye(m)[:N]
E = x + eps*U
LL_exp = zeros((K,N))
for k,t in enumerate(tt):
if t%10.0==0: print(t)
x = step(x,t,dt)
E = step(E,t,dt)
E = (E-x).T/eps
[Q, R] = sla.qr(E,mode='economic')
E = x + eps*Q.T
LL_exp[k] = log(abs(diag(R)))
# Running averages
running_LS = ( tp(1/tt) * np.cumsum(LL_exp,axis=0) )
LS = running_LS[-1]
print('Lyap spectrum estimate at t=T:')
with printoptions(precision=4): print(LS)
n0 = sum(LS >= 0)
print('n0: ', n0)
#########################
## Plot
#########################
plt.clf()
plt.plot(tt,running_LS,lw=1,alpha=0.4)
plt.title('Lyapunov Exponent estimates')
plt.xlabel('Time')
plt.ylabel('Exponent value')
|
import gtk
try :
import hildon
except :
hildon = None
class FuelpadAbstractCombo :
def __init__ ( self ) :
raise Exception( "Instantiating abstract class %s" % self.__class__ )
def fill_combo( self , items , active=None ) :
raise Exception( "Calling uninmplemented method 'fill_combo' on class %s" % self.__class__ )
class FuelpadAbstractDBCombo ( FuelpadAbstractCombo ) :
toggle = False
def render_label ( self , row ) :
return( "%s %s" % ( row[0] , row[1] ) )
def fill_combo( self , db , active=None ) :
active=0
i=0
if db.is_open() :
for row in db.get_rows( self.query ) :
listitemtext = self.render_label( row )
self.append_text( listitemtext )
if row[2] == db.get_current( self.key ) :
active = i
i += 1
self.set_active( active )
def set_toggle( self , toggle ) :
self.toggle = toggle
def changed_cb ( self , widget , database ) :
index = widget.get_active()
database.currentcar = int( database.carid( index ) )
if self.toggle :
self.toggle.changed = True
class FuelpadAbstractListCombo ( FuelpadAbstractCombo ) :
def fill_combo( self , items , active=None ) :
for i in range(len(items)) :
listitemtext = "%s" % items[i]
self.append_text( listitemtext )
if i == active :
active = i
self.set_active( active )
class FuelpadAbstractItem ( gtk.ToolItem ) :
def __init__ ( self , config ) :
gtk.ToolItem.__init__( self )
self.add( self.combo )
def add_to( self , parent , position ) :
parent.insert( self , position )
if hildon :
class FuelpadSelector ( hildon.TouchSelector ) :
def __init__ ( self ) :
hildon.TouchSelector.__init__( self , text=True )
def set_active( self , index ) :
return hildon.TouchSelector.set_active( self , 0 , index )
def get_active( self ) :
return hildon.TouchSelector.get_active( self , 0 )
class FuelpadDBSelector ( FuelpadSelector , FuelpadAbstractDBCombo ) :
def __init__ ( self , config , parentCombo ) :
self.key = parentCombo.key
self.query = parentCombo.query
FuelpadSelector.__init__( self )
self.fill_combo( config.db )
# NOTE : registering the callback will drive permanent changes (even to DB) even with cancellation !!!
self.connect( "changed", self.changed_cb, config.db )
def changed_cb ( self , widget , id , database ) :
FuelpadAbstractDBCombo.changed_cb( self , widget , database )
class FuelpadListSelector ( FuelpadSelector , FuelpadAbstractListCombo ) :
def __init__ ( self , items , active ) :
FuelpadSelector.__init__( self )
self.fill_combo( items , active )
class FuelpadButton ( hildon.PickerButton ) :
def __init__ ( self , title , selector ) :
hildon.PickerButton.__init__( self , gtk.HILDON_SIZE_AUTO, hildon.BUTTON_ARRANGEMENT_VERTICAL )
self.set_title( title )
self.set_selector( selector )
class FuelpadCombo ( FuelpadButton ) :
def __init__ ( self , config ) :
selector = FuelpadDBSelector( config , self )
FuelpadButton.__init__( self , self.key , selector )
def render_label ( self , row ) :
return self.get_selector().render_label( row )
def set_toggle( self , toggle ) :
self.get_selector().set_toggle( toggle )
class FuelpadListCombo ( FuelpadButton ) :
def __init__ ( self , items , active=None ) :
selector = FuelpadListSelector( items , active )
FuelpadButton.__init__( self , self.key , selector )
class FuelpadItem ( FuelpadAbstractItem ) :
def set_action_callback( self , callback , user_data ) :
self.combo.connect( "value-changed" , callback , user_data )
def set_active( self , state ) :
gtk.ToolItem.set_active( self , 0 , active )
def get_active ( self ) :
return gtk.ToolItem.get_active( self , 0 )
else :
class FuelpadCombo ( gtk.ComboBox , FuelpadAbstractDBCombo ) :
def __init__ ( self , config ) :
gtk.ComboBox.__init__( self , gtk.ListStore(str) )
cell = gtk.CellRendererText()
gtk.ComboBox.pack_start( self , cell , True )
gtk.ComboBox.add_attribute( self , cell , 'text' , 0 )
self.fill_combo( config.db )
# NOTE : If registerd before filling, we must ensure we block during that phase
self.connect( "changed", self.changed_cb, config.db )
class FuelpadListCombo ( gtk.ComboBox , FuelpadAbstractListCombo ) :
def __init__ ( self , items , active=None ) :
gtk.ComboBox.__init__( self , gtk.ListStore(str) )
cell = gtk.CellRendererText()
gtk.ComboBox.pack_start( self , cell , True )
gtk.ComboBox.add_attribute( self , cell , 'text' , 0 )
self.fill_combo( items , active )
class FuelpadItem ( FuelpadAbstractItem ) :
def __init__ ( self , config ) :
self.apply = gtk.ToolButton(gtk.STOCK_OK)
FuelpadAbstractItem.__init__( self , config )
def add_to( self , parent , position ) :
FuelpadAbstractItem.add_to( self , parent , position )
parent.insert( self.apply , position )
def set_action_callback( self , callback , user_data ) :
self.apply.connect( "clicked" , callback , user_data )
def set_expand( self , value ) :
FuelpadAbstractItem.set_expand( self , value )
self.apply.set_expand( value )
class FuelpadDriverCombo ( FuelpadCombo ) :
def __init__ ( self , config ) :
self.key = "Driver"
self.query = config.db.ppStmtDriver
FuelpadCombo.__init__( self , config )
class FuelpadCarCombo ( FuelpadCombo ) :
def __init__ ( self , config ) :
self.key = "Car"
self.query = config.db.ppStmtCar
FuelpadCombo.__init__( self , config )
class FuelpadCarItem ( FuelpadItem ) :
def __init__ ( self , config ) :
self.combo = FuelpadCarCombo( config )
self.combo.set_toggle( config )
FuelpadItem.__init__( self , config )
def select_combo_item ( self , model , db ) :
query = "SELECT mark,register,id FROM car WHERE id=%s" % db.currentcar
itemtext = self.combo.render_label( db.get_rows( query )[0] )
iter = model.get_iter_first()
while iter :
if model.get( iter , 0 )[0] == itemtext :
self.set_active_iter( iter )
return
iter = model.iter_next( iter )
import configuration
class FuelpadUnitsystemCombo ( FuelpadListCombo ) :
def __init__ ( self , config , label=None ) :
self.key = label
FuelpadListCombo.__init__( self , configuration.unitsystem , config.units )
class FuelpadFontsizeCombo ( FuelpadListCombo ) :
def __init__ ( self , config , label=None ) :
self.key = label
FuelpadListCombo.__init__( self , configuration.fontsizes , config.fontsize )
|
import numpy as np
import math
import matplotlib.pyplot as plt
from .baseKernel import baseKernel
from ..tools.utils import sigmoid
from ..tools.utils import timeit
# Weighted Kernel Ridge Regression
class WKRR():
def __init__(self):
self.alpha_ = None
def fit(self,K_train,y_train,w,lambda_reg=0.01):
#y_train = y_train.reshape(-1,1)
n = K_train.shape[0]
if w is None:
w = np.ones(n)
# compute W12
W12 = np.diag(np.sqrt(w))
inv = np.linalg.inv(W12.dot(K_train.dot(W12))+ n*lambda_reg*np.eye(n))
self.alpha_ = W12.dot(inv.dot(W12.dot(y_train)))
return self.alpha_
class KLR(baseKernel):
def __init__(self,lambda_reg=0.01, niters=1000,tolerance=1.e-5, **kwargs):
super(KLR, self).__init__(**kwargs)
self.alpha_ = 0
self.lambda_reg_ = kwargs.get('lambda_reg',lambda_reg)
self.niters_ = niters
self.tolerance_ = tolerance
def get_coef(self):
return list(self.alpha_)
def fit_use_K(self,K_train,y_train):
'''
K: gram matrix np.array(n_samples_train,n_samples_train)
Y: label np.array(n_sample_train)
niters : the number of iteration
tolerance : stopping criteria
lambda_reg : lambda regularization
'''
with timeit('Fit with Logistic Regression ', font_style='bold', bg='Red', fg='White'):
print('lambda_reg =',self.lambda_reg_)
n= K_train.shape[0]
self.alpha_ = np.random.rand(n)
# solving KLP by IRLS
for i in range(self.niters_):
alpha_old = self.alpha_
M = K_train.dot(self.alpha_)
sig_pos, sig_neg = sigmoid(M * y_train), sigmoid(-M * y_train)
W = sig_neg * sig_pos
Z = M + y_train / np.maximum(sig_pos, 1.e-6)
wkrr = WKRR()
self.alpha_ = wkrr.fit(K_train=K_train,y_train=Z,w=W,lambda_reg=self.lambda_reg_)
if np.linalg.norm(self.alpha_ - alpha_old) < self.tolerance_:
break
if i == self.niters_-1:
print('Warning: please increase the number of iteration to ensure the convergence')
return self.alpha_
def predict_prob_use_K(self,K_test):
'''
K_test : gram matrix for test set np.array(n_samples_test,n_samples_train)
return : probability for each data
'''
prediction = sigmoid(K_test.dot(self.alpha_))
return prediction
def predict_use_K(self,K_test):
with timeit('Predict with Logistic Regression ', font_style='bold', bg='Red', fg='White'):
prediction = np.array(self.predict_prob_use_K(K_test)>0.5,dtype=int)
prediction[prediction ==0] = -1
return prediction
# def fit(self,X_train,y_train):
# self.X_train = X_train
# K_train = self.kernel_function_(X_train, X_train)
# return self.fit_use_K(K_train, y_train)
#
# def predict(self,X_test):
# K_test = self.kernel_function_(X_test, self.X_train)
# return self.predict_use_K(K_test)
|
import re
origin = open('idf_sample/demo.idf', 'r')
for line in origin:
if "U-Factor" in line:
print re.search(r'[-+]?\d*\.\d+|\d+', line).group()
origin.close()
|
import sys
import os
f = open("C://Users/OZ/Documents/python/atcoder/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
n=int(input())
l=list(map(int,input().split()))
sl=l[:]
sl.sort()
a=sl[n//2-1]
b=sl[n//2]
for i in l:
if i<=a:
print(b)
else:
print(a)
|
"""
Various helper utilities for the HTCondor-ES integration
"""
import os
import pwd
import sys
import time
import errno
import socket
import logging
import smtplib
import email.mime.text
import logging.handlers
TIMEOUT_MINS = 11
def send_email_alert(recipients, subject, message):
"""
Send a simple email alert (typically of failure).
"""
if not recipients:
return
msg = email.mime.text.MIMEText(message)
msg['Subject'] = "%s - %sh: %s" % (socket.gethostname(),
time.strftime("%b %d, %H:%M"),
subject)
domain = socket.getfqdn()
uid = os.geteuid()
pw_info = pwd.getpwuid(uid)
if 'cern.ch' not in domain:
domain = '%s.unl.edu' % socket.gethostname()
msg['From'] = '%s@%s' % (pw_info.pw_name, domain)
msg['To'] = recipients[0]
try:
sess = smtplib.SMTP('localhost')
sess.sendmail(msg['From'], recipients, msg.as_string())
sess.quit()
except Exception as exn: # pylint: disable=broad-except
logging.warning("Email notification failed: %s", str(exn))
def time_remaining(starttime, timeout=TIMEOUT_MINS*60, positive=True):
"""
Return the remaining time (in seconds) until starttime + timeout
Returns 0 if there is no time remaining
"""
elapsed = time.time() - starttime
if positive:
return max(0, timeout - elapsed)
return timeout - elapsed
def set_up_logging(args):
"""Configure root logger with rotating file handler"""
logger = logging.getLogger()
log_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(log_level, int):
raise ValueError('Invalid log level: %s' % log_level)
logger.setLevel(log_level)
if log_level <= logging.INFO:
logging.getLogger("htcondor_es.StompAMQ").setLevel(log_level + 10)
logging.getLogger("stomp.py").setLevel(log_level + 10)
try:
os.makedirs(args.log_dir)
except OSError as oserr:
if oserr.errno != errno.EEXIST:
raise
log_file = os.path.join(args.log_dir, 'spider_cms.log')
filehandler = logging.handlers.RotatingFileHandler(log_file, maxBytes=100000)
filehandler.setFormatter(
logging.Formatter('%(asctime)s : %(name)s:%(levelname)s - %(message)s'))
logger.addHandler(filehandler)
if os.isatty(sys.stdout.fileno()):
streamhandler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(streamhandler)
|
#!/usr/bin/python3
n = 100
sum = 0
counter = 1
while counter <= n:
sum = sum + counter
counter += 1
print("1 ~ %d sum is: %d" % (n,sum)) |
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
import numpy as
import scipy.ndimage
import scipy.misc
sess = tf.Session()
saver = tf.train.import_meta_graph('model.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
graph = tf.get_default_graph()
output_img = graph.get_tensor_by_name("output_img:0")
x = graph.get_tensor_by_name("x:0")
batch_size = graph.get_tensor_by_name("batch_size:0")
batch_size_now = 50
batch = mnist.train.next_batch(50)[0]
result = sess.run(output_img, feed_dict = {x: batch, batch_size:50})
img = np.reshape(result, [50, 28, 28])
for i in range(0, batch_size_now):
orig_img = np.reshape(batch[i], [28, 28])
scipy.misc.imsave('./images/orig_%s.png'%i, orig_img)
generated_img = img[i]
scipy.misc.imsave('./images/generated_%s.png'%i, generated_img)
# scipy.misc.imshow(orig_img)
# scipy.misc.imshow(generated_img)
|
from django.contrib.auth.models import User
from rest_framework import mixins, status
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from account.serializers import UserSerializer
from .permissions import IsStaffOrTargetUser
class UserView(mixins.CreateModelMixin, GenericViewSet):
serializer_class = UserSerializer
model = User
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
user = User.objects.create_user(self.request.data['username'], self.request.data['email'],
self.request.data['password'])
user.first_name = self.request.data['first_name']
user.last_name = self.request.data['last_name']
user.save()
def get_permissions(self):
# allow non-authenticated user to create via POST
return (AllowAny() if self.request.method == 'POST'
else IsStaffOrTargetUser()),
|
def chiffre (c):
n = str(c)
arr = []
for i in range(0,len(n)):
arr.append(int(n[i]))
return arr
print(chiffre(1234))
def liste (arr):
c=0
n=len(arr)
for i in range(n):
c += arr[i]*10**(n-1-i)
return c
print(liste([1, 2, 3, 4]))
def decroissant (c):
arr = chiffre(c)
def echange (i,j):
u = arr[i]
arr[i]=arr[j]
arr[j]=u
# tri selection
n = len(arr)
ind_max = 0
for i in range(n):
ind_max = i
for j in range(i+1,n):
if (arr[j] > arr[ind_max]):
ind_max = j
echange(i, ind_max)
return liste(arr)
print(decroissant(8459))
def croissant (c):
arr = chiffre(c)
def echange (i,j):
u = arr[i]
arr[i]=arr[j]
arr[j]=u
# tri selection
n = len(arr)
ind_min = 0
for i in range(n):
ind_min = i
for j in range(i+1,n):
if (arr[j] < arr[ind_min]):
ind_min = j
echange(i, ind_min)
return liste(arr)
print(croissant(8459))
def Kaprekar (n):
return decroissant(n)-croissant(n)
print(Kaprekar(1826))
def test_val_kaprekar (init, final, val):
arr = []
for i in range(init,final+1):
if (Kaprekar(i) == val):
arr.append(i)
return arr
print(test_val_kaprekar(1000,9999, 0))
print(test_val_kaprekar(1000,9999, 999))
def test_id_kaprekar (init, final):
arr = []
for i in range(init,final+1):
if (Kaprekar(i) == i):
arr.append(i)
return arr
print(test_id_kaprekar(1000,9999)) # Seul 6174 convient
def Kaprekar_it (n,i):
c=n
for i in range(i):
c=Kaprekar(c)
return c
print(Kaprekar_it(1826,2))
def chaine (n):
c=n
arr=[n]
while (c != 6174 and c != 0):
c=Kaprekar(c)
arr.append(c)
return arr
print(chaine(1826))
def min_it (init, final, j):
arr = []
for i in range(init,final+1):
if (len(chaine(i)) == j):
arr.append(i)
return arr
print(min_it(1000,9999, 8))
print(chaine(9006)) |
import unittest
from btree import BTree
class BTreeTest(unittest.TestCase):
def setUp(self):
self.btree = BTree()
self.btree.add(10)
self.btree.add(20)
self.btree.add(40)
self.btree.add(50)
def test_contains(self):
assert 10 in self.btree
assert 20 in self.btree
assert 40 in self.btree
assert 50 in self.btree
def test_does_not_contain(self):
assert 100 not in self.btree
assert 23423 not in self.btree
|
import kivy
import socket, os
import time
import download_dhaga
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.clock import Clock
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.label import Label
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.screenmanager import ScreenManager, Screen
kivy.require('1.8.0')
#To get ip of current node
x=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
x.connect(("gmail.com",80))
myip=x.getsockname()[0]
except:
print "Client not connected to internet !!!!!"
exit
serverip=''
serveraddr=''
totno=''
def ifyes(instance):
global serveraddr
clientSocket=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
clientSocket.sendto('Yes',serveraddr)
except:
print "UDP acceptance msg was not sent"
return
clientSocket.close()
time.sleep(50)
print "Client on TCP"
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
clientSocket.connect((serverip,50005))
except:
print "Error: Client could not establish TCP connection"
return
print "connection established"
print clientSocket.getsockname()
ext=clientSocket.recv(1024)
msg=clientSocket.recv(1024)
print msg
rurl,st,en,i=msg.split()
start=int(st)
end=int(en)
ind=int(i)
download_dhaga.download(rurl,start,end,ext)
f=open('final'+ext,'rb')
l=f.read(1024)
while l:
clientSocket.send(l)
l=f.read(1024)
os.remove('final'+ext)
clientSocket.close()
def ifno(instance):
global serveraddr
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.sendto('No',serveraddr)
clientSocket.close()
class MyApp(App):
def build(self):
#UDP part
global serverip
global serveraddr
broadSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
broadSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
broadSocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
broadSocket.bind(('<broadcast>',50020))
msg,serveraddr=broadSocket.recvfrom(2048)
serverip=serveraddr[0]
print msg
print "Hello to this world :Testing..... "
if msg=='':
print "Error:No msg from server"
return
broadSocket.close()
box = BoxLayout(orientation='vertical')
label1 = Label(text=msg+'\nEnter Yes/No\n')
btn1 = Button(text='Yes', state='normal')
btn2 = Button(text='No', state='normal')
btn1.bind(on_press=ifyes)
btn2.bind(on_press=ifno)
box.add_widget(label1)
box.add_widget(btn1)
box.add_widget(btn2)
return box
#time.sleep(10)
MyApp().run()
|
from .data_augment import *
from .averageMeter import AverageMeter |
from contextlib import redirect_stdout
from io import StringIO
stream = StringIO()
write_to_stream = redirect_stdout(stream)
with write_to_stream:
print("This is written to the stream rather than stdout")
with write_to_stream:
print("This is also written to the stream") |
import math
from datetime import datetime
import numpy as np
from flask import Flask, request
from matplotlib.figure import Figure
import base64
from io import BytesIO
import matplotlib.dates as mdates
import matplotlib.ticker as tick
import yfinance as yf
app = Flask(__name__)
def smav2(data, window):
res = []
s = 0
for i, day in enumerate(data):
s += day
if i < window - 1:
res.append(math.nan)
elif i + 1 == window:
ma = s / window
res.append(ma)
else:
s -= data[i - window]
ma = s / window
res.append(ma)
return res
def normalize(val, minVal, maxVal, newMin, newMax):
return newMin + (val - minVal) * (newMax - newMin) / (maxVal - minVal)
@app.route('/', methods=['GET'])
def risk():
sym = request.args.get('sym')
ticker = request.args.get('sym') + '-USD'
coin = yf.Ticker(ticker)
data = coin.history(period="max")
close_prices = [c for c in data['Close']]
dates = [d.to_pydatetime() for d in data.index]
risk = []
for i in range(len(dates)):
sma50 = smav2(close_prices, 50)
sma350 = smav2(close_prices, 350)
risk.append(sma50[i] / sma350[i])
minR = np.nanmin(risk)
maxR = np.nanmax(risk)
normalized_risk = []
for r in risk:
normalized_risk.append(normalize(r, minR, maxR, 0, 1))
# Chart
fig = Figure()
ax1 = fig.subplots()
yLabel = sym + '/USD'
ax1.xaxis.set_major_locator(mdates.MonthLocator(interval=6))
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b-%y'))
ax1.set_yscale('log')
ax1.set_ylabel(yLabel, color="black")
ax1.plot(dates, close_prices, color="black")
ax2 = ax1.twinx()
ax2.xaxis.set_major_locator(mdates.MonthLocator(interval=6))
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%b-%y'))
ax2.yaxis.set_ticks(np.arange(0, 1.1, 0.1))
ax2.yaxis.set_major_formatter(tick.FormatStrFormatter('%0.1f'))
ax2.yaxis.grid(linestyle="dashed")
ax2.set_ylabel('RISK', color="red")
ax2.plot(dates, normalized_risk, color="red")
ax2.tick_params(axis='y', labelcolor="red")
for i in range(10):
if i == 3:
# Grey Zone
ax2.axhspan(i / 10, (i / 10) + 0.1, facecolor ='grey', alpha = 0.7)
elif i < 3:
# Buy Zone
ax2.axhspan(i / 10, (i / 10) + 0.1, facecolor ='green', alpha = (i / 10) + 0.3)
elif i > 3:
# Sell Zone
ax2.axhspan(i / 10, (i / 10) + 0.1, facecolor ='red', alpha = 1 - (i / 10))
buffer = BytesIO()
fig.savefig(buffer, format = 'png')
data = base64.b64encode(buffer.getbuffer()).decode('ascii')
return data
#return f"<img src='data:image/png;base64,{data}'/>"
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
from drone_model import Drone
from math import pi,cos,sin
import numpy as np
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from trajectory_planner import Trajectories
def simulate():
def init_plot():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d([-1.0, 1.0])
ax.set_xlabel('X')
ax.set_ylim3d([-1.0, 1.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0, 5.0])
ax.set_zlabel('Z')
fig.show()
return fig, ax
def update_plot():
fig.canvas.draw()
fig.canvas.flush_events()
time.sleep(dt)
ax.clear()
ax.set_xlim3d([-1.0, 1.0])
ax.set_xlabel('X')
ax.set_ylim3d([-1.0, 1.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0, 5.0])
ax.set_zlabel('Z')
def getRotation(X):
phi = X[6]
theta = X[7]
psi = X[8]
Rx = np.array([
[1, 0, 0],
[0,cos(phi),-sin(phi)],
[0,sin(phi),cos(phi)]
])
Ry = np.array([
[ cos(theta),0,sin(theta)],
[ 0, 1, 0],
[-sin(theta),0,cos(theta)]
])
Rz = np.array([
[cos(psi),-sin(psi),0],
[sin(psi), cos(psi),0],
[0, 0, 1]
])
return np.dot(Rz,np.dot(Rx,Ry))
def plot_drone(X):
x = X[0]
y = X[1]
z = X[2]
arm_array = np.arange(-mDrone.L,mDrone.L+dt,dt)
zeros = np.zeros(len(arm_array))
R = getRotation(X)
arm1 = []
arm2 = []
rot_arm1_x = []
rot_arm1_y = []
rot_arm1_z = []
rot_arm2_x = []
rot_arm2_y = []
rot_arm2_z = []
for i in range(len(arm_array)):
arm1 = np.array([arm_array[i],0,0])
arm2 = np.array([0,arm_array[i],0])
rot_arm1 = np.matmul(R,arm1)
rot_arm2 = np.matmul(R,arm2)
rot_arm1_x.append(rot_arm1[0]+x)
rot_arm1_y.append(rot_arm1[1]+y)
rot_arm1_z.append(rot_arm1[2]+z)
rot_arm2_x.append(rot_arm2[0]+x)
rot_arm2_y.append(rot_arm2[1]+y)
rot_arm2_z.append(rot_arm2[2]+z)
arm1 = ax.plot(rot_arm1_x,rot_arm1_y,rot_arm1_z,
color='blue',linewidth=3,antialiased=False)
arm2 = ax.plot(rot_arm2_x,rot_arm2_y,rot_arm2_z,
color='red',linewidth=3,antialiased=False)
t = 0
dt = 0.01
duration = 10
time_steps = int(duration/dt)+1
ICs = [0,1,2, # rOG
0,0,0, # rOG_d
0,0,0, # theta
0,0,0] # theta_d
X_T = [0,0,2, # rOG
0,0,0, # rOG_d
0,0,0, # theta
0,0,0]
mDrone = Drone(ICs,dt)
traj_plan = Trajectories()
traj_plan.setCoeff_MinJerkTraj(ICs,X_T,duration)
fig, ax = init_plot()
while mDrone.check_crash() and t<duration:
plot_drone(mDrone.X)
ref = traj_plan.getReferences(t)
mDrone.updateReferences(ref)
mDrone.update_controller()
mDrone.solve_dynamics()
t+=dt
# print("t = %.2f, x = %.2f, y = %.2f, z = %.2f, x_d = %.2f, y_d = %.2f, z_d = %.2f" %
# (t,mDrone.X[0],mDrone.X[1],mDrone.X[2],
# mDrone.X[3],mDrone.X[4],mDrone.X[5]),end='\r')
update_plot()
simulate()
def optimise_gains(i):
def init_plot():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d([-1.0, 1.0])
ax.set_xlabel('X')
ax.set_ylim3d([-1.0, 1.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0, 5.0])
ax.set_zlabel('Z')
fig.show()
return fig, ax
def update_plot():
fig.canvas.draw()
fig.canvas.flush_events()
time.sleep(dt)
ax.clear()
ax.set_xlim3d([-1.0, 1.0])
ax.set_xlabel('X')
ax.set_ylim3d([-1.0, 1.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0, 5.0])
ax.set_zlabel('Z')
def getRotation(X):
phi = X[6]
theta = X[7]
psi = X[8]
Rx = np.array([
[1, 0, 0],
[0,cos(phi),-sin(phi)],
[0,sin(phi),cos(phi)]
])
Ry = np.array([
[ cos(theta),0,sin(theta)],
[ 0, 1, 0],
[-sin(theta),0,cos(theta)]
])
Rz = np.array([
[cos(psi),-sin(psi),0],
[sin(psi), cos(psi),0],
[0, 0, 1]
])
return np.dot(Rz,np.dot(Rx,Ry))
def plot_drone(X):
x = X[0]
y = X[1]
z = X[2]
arm_array = np.arange(-mDrone.L,mDrone.L+dt,dt)
zeros = np.zeros(len(arm_array))
R = getRotation(X)
arm1 = []
arm2 = []
rot_arm1_x = []
rot_arm1_y = []
rot_arm1_z = []
rot_arm2_x = []
rot_arm2_y = []
rot_arm2_z = []
for i in range(len(arm_array)):
arm1 = np.array([arm_array[i],0,0])
arm2 = np.array([0,arm_array[i],0])
rot_arm1 = np.matmul(R,arm1)
rot_arm2 = np.matmul(R,arm2)
rot_arm1_x.append(rot_arm1[0]+x)
rot_arm1_y.append(rot_arm1[1]+y)
rot_arm1_z.append(rot_arm1[2]+z)
rot_arm2_x.append(rot_arm2[0]+x)
rot_arm2_y.append(rot_arm2[1]+y)
rot_arm2_z.append(rot_arm2[2]+z)
arm1 = ax.plot(rot_arm1_x,rot_arm1_y,rot_arm1_z,
color='blue',linewidth=3,antialiased=False)
arm2 = ax.plot(rot_arm2_x,rot_arm2_y,rot_arm2_z,
color='red',linewidth=3,antialiased=False)
t = 0
dt = 0.01
duration = 10
time_steps = int(duration/dt)+1
ICs = [0,1,2, # rOG
0,0,0, # rOG_d
0,0,0, # theta
0,0,0] # theta_d
X_T = [0,0,2, # rOG
0,0,0, # rOG_d
0,0,0, # theta
0,0,0]
mDrone = Drone(ICs,dt)
mDrone.Kp[0] = i
traj_plan = Trajectories()
traj_plan.setCoeff_MinJerkTraj(ICs,X_T,duration)
if i ==3:
fig, ax = init_plot()
J = 0
while mDrone.check_crash() and t<duration:
if i ==3:
plot_drone(mDrone.X)
ref = traj_plan.getReferences(t)
mDrone.updateReferences(ref)
mDrone.update_controller()
mDrone.solve_dynamics()
t+=dt
J += (ref[0] - mDrone.X[0])**2
# print("t = %.2f, x = %.2f, y = %.2f, z = %.2f, x_d = %.2f, y_d = %.2f, z_d = %.2f" %
# (t,mDrone.X[0],mDrone.X[1],mDrone.X[2],
# mDrone.X[3],mDrone.X[4],mDrone.X[5]),end='\r')
if i ==3:
update_plot()
return J
# optimise_gains(3)
# cost = []
# for i in range(5):
# cost.append(simulate(i))
# print(cost)
# t_vec = []
# x_ref = []
# x_dot_ref = []
# x_dd_ref = []
# for i in range(time_steps):
# ref = traj_plan.getReferences(t)
# t+=dt
#
# t_vec.append(t)
# x_ref.append(ref[0])
# x_dot_ref.append(ref[1])
# x_dd_ref.append(ref[2])
#
# t_vec = np.array(t_vec)
# x_ref = np.array(x_ref)
# x_dot_ref = np.array(x_dot_ref)
# x_dd_ref = np.array(x_dd_ref)
# #
# # print(t_vec)
# fig, ax = plt.subplots()
# ax.plot(t_vec, x_ref)
# ax.plot(t_vec, x_dot_ref)
# ax.plot(t_vec, x_dd_ref)
# plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 13 21:55:58 2017
@author: XuGang
"""
import numpy as np
import DQN
import random
#给Card排序
class SortCards(object):
def __init__(self, cards_combination,cards_type):
self.cards_combination = cards_combination
self.rank = 0
self.cards = []
self.cards_type =cards_type
#展示扑克函数
def card_show(cards, info, n):
#扑克牌记录类展示
if n == 1:
print(info)
names = []
for i in cards:
names.append(i.name+i.color)
print(names)
#Moves展示
elif n == 2:
if len(cards) == 0:
return 0
print(info)
moves = []
for i in cards:
names = []
for j in i:
names.append(j.name+j.color)
moves.append(names)
print(moves)
#record展示
elif n == 3:
print(info)
names = []
for i in cards:
tmp = []
tmp.append(i[0])
tmp_name = []
#处理要不起
try:
for j in i[1]:
tmp_name.append(j.name+j.color)
tmp.append(tmp_name)
except:
tmp.append(i[1])
names.append(tmp)
print(names)
#在Player的next_moves中选择出牌方法
def choose(next_move_types, next_moves, last_move_type, model, cards, net):
if model == "random":
return choose_random(next_move_types, next_moves, last_move_type)
if model == "DQN":
return choose_DQN(next_move_types, next_moves, last_move_type, cards, net)
if model == "manual":
return choose_manual(next_move_types, next_moves, last_move_type, cards)
if model == 'little_smart':
return choose_with_little_smart(next_move_types, next_moves, last_move_type)
#random
def choose_random(next_move_types, next_moves, last_move_type):
#要不起
if len(next_moves) == 0:
return "yaobuqi", []
else:
#start不能不要
if last_move_type == "start":
r_max = len(next_moves)
else:
r_max = len(next_moves)+1
r = np.random.randint(0,r_max)
#添加不要
if r == len(next_moves):
return "buyao", []
return next_move_types[r], next_moves[r]
#DQN
def choose_DQN(next_move_types, next_moves, last_move_type, cards, net):
#要不起
if len(next_moves) == 0:
return "yaobuqi", []
else:
# 有一定的概率随机出牌,方便在训练中遍历更多情况(10%)
prop = random.randint(1,100)
if prop > 89:
choose_random(next_move_types, next_moves, last_move_type)
# 根据DQN出牌
best_action = ""
best_action_type = ""
max_value = -999999999
cards_table = DQN.get_table_of_cards(cards)
if last_move_type != "start":
next_move_types.append("buyao")
next_moves.append([])
for i in range(len(next_moves)):
move_table = DQN.get_table_of_cards(next_moves[i])
input = cards_table.copy()
input.extend(move_table)
value = net.get_value_only(input)
if value > max_value:
max_value = value
best_action = next_moves[i]
best_action_type = next_move_types[i]
return best_action_type, best_action
#manual
def choose_manual(next_move_types, next_moves, last_move_type, cards):
#要不起
if len(next_moves) == 0:
return "yaobuqi", []
else:
# 展示手牌
card_show(cards,"Your card: ", 1)
card_show(next_moves,"Moves: ", 2)
move_index_list = []
for move in next_moves:
move_index_list.append([cards.index(card) for card in move])
print("Move index combination: ", move_index_list)
# 要求输入
print("Print the index of cards in the deck shown above, split with comma")
input_list = input('>>>')
# 最开始不能出不要
while last_move_type == 'start' and input_list == 'buyao':
print("Illegal combinations, try again!")
input_list = input('>>>')
if last_move_type != 'start' and input_list == 'buyao':
return 'buyao', []
# 处理输入
move_ind_list = [int(ind) for ind in input_list.split(',')]
move = [cards[i] for i in move_ind_list]
while move not in next_moves:
print("Illegal combinations, try again!")
input_list = input('>>>')
move_ind_list = [int(ind) for ind in input_list.split(',')]
move = [cards[i] for i in move_ind_list]
# 返回出牌类型和牌组
index = next_moves.index(move)
return next_move_types[index], next_moves[index]
#little_smart
def choose_with_little_smart(next_move_types, next_moves, last_move_type):
if len(next_moves) == 0:
return "yaobuqi", []
else:
return sort_all_rank(next_moves,next_move_types,last_move_type)
#发牌
def game_init(players, playrecords, cards):
#洗牌
np.random.shuffle(cards.cards)
#排序
p1_cards = cards.cards[:20]
p1_cards.sort(key=lambda x: x.rank)
p2_cards = cards.cards[20:40]
p2_cards.sort(key=lambda x: x.rank)
Dizhupai = cards.cards[40:43]
left = cards.cards[43:]
players[0].cards_left = playrecords.cards_left1 = p1_cards
players[1].cards_left = playrecords.cards_left2 = p2_cards
#card_show(p1_cards, "1", 1)
#card_show(p2_cards, "2", 1)
#card_show(left, "left", 1)
#排序
def sort_all_rank(next_moves,next_move_types,last_move_type):
# 50%概率随机出牌出牌
prob = random.randint(1,4)
if prob > 2:
choose_random(next_moves,next_move_types,last_move_type)
rankList = {}
i = 0
for cards_combination in next_moves:
#print(i)
sorted_cards =SortCards(cards_combination,next_move_types[i])
for cards in cards_combination:
sorted_cards.cards.append(cards.name)
sorted_cards.rank += cards.rank
rankList[i] = sorted_cards
i += 1
min_pai = sorted(rankList.items(), key=lambda x: x[1].rank,reverse=False)
max_pai = sorted(rankList.items(), key=lambda x: x[1].rank, reverse=True)
"""print("next moves leng", len(next_moves))
print("next moves type leng",len(next_move_types))
print("ranklist leng",len(rankList))
print("min_pai leng:", len(min_pai))
print("max_pai leng:",len(max_pai))"""
"""for i in range(len(max_pai)):
print(max_pai[i][1].cards_type,max_pai[i][1].cards)"""
if last_move_type != "start":
return min_pai[0][1].cards_type,min_pai[0][1].cards_combination
else:
return max_pai[0][1].cards_type,max_pai[0][1].cards_combination
|
import argparse
import json
import time
from annotation_pipeline.pipeline import Pipeline
from annotation_pipeline.utils import get_pywren_stats
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run annotation pipeline', usage='')
parser.add_argument('--config', type=argparse.FileType('r'), default='config.json', help='config.json path')
parser.add_argument('--ds', type=argparse.FileType('r'), default='metabolomics/ds_config2.json',
help='ds_config.json path')
parser.add_argument('--db', type=argparse.FileType('r'), default='metabolomics/db_config2.json',
help='db_config.json path')
parser.add_argument('--no-cache', dest='use_cache', action='store_false',
help='disable loading and saving cached cloud objects')
parser.set_defaults(use_cache=True)
args = parser.parse_args()
config = json.load(args.config)
ds_config = json.load(args.ds)
db_config = json.load(args.db)
pipeline = Pipeline(config, ds_config, db_config, use_cache=args.use_cache)
start = time.time()
pipeline()
results_df = pipeline.get_results()
print(f'--- {time.time() - start:.2f} seconds ---')
stats = get_pywren_stats()
print(stats)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw PASCAL dataset to TFRecord for object_detection.
Example usage:
in the research folder run:
python path/to/create_tf_record.py \
--data_dir=path/to/root_of_subfolders_of_PascalVoC \
--output_dir=path/to/generated_tfrecords_for_all_subfolders
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import os
from lxml import etree
import PIL.Image
import tensorflow as tf
import re
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
import random
import csv
import filecmp
import logging
import logging.config
import yaml
import shutil
from os import listdir
from os.path import isfile, join
flags = tf.app.flags
flags.DEFINE_string('data_dir', None, 'Root directory to raw PASCAL VOC dataset.')
flags.DEFINE_string('annotations_dir', 'Annotations',
'(Relative) path to annotations directory.')
flags.DEFINE_string('output_dir', None, 'Path to output TFRecord')
flags.DEFINE_string('label_map_file', 'pascal_label_map.pbtxt', 'Path to label map proto')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore difficult instances')
flags.DEFINE_boolean('force_regenerate', False, 'If true will regenerate TFrecords '
'even if the existing TFrecords were created after the last modification of all the sub folders.')
flags.DEFINE_string('log_dir', 'logs', 'Folder where the log files should go.')
flags.DEFINE_boolean('classname_in_filename', False, 'Whether classname is in filename')
FLAGS = flags.FLAGS
RANDOM_SEED = 43
logger = None
def data_changed(root_data_dir, train_output_file):
#if no output yet
if not os.path.isfile(train_output_file):
return True
#if output is older than data files
lastmodtime = max(os.stat(root).st_mtime for root,_,_ in os.walk(root_data_dir))
outputtime = os.stat(train_output_file).st_mtime
logger.info('data folders last updated {}, training tf records last genereated {}'.format(lastmodtime, outputtime))
return lastmodtime > outputtime
def get_label_map(root_data_dir):
first = True
logger.info('get_label_map root_data_dir {}'.format(root_data_dir))
dirs = os.listdir(root_data_dir)
for subdir in dirs:
if first:
sample_file = os.path.join(root_data_dir, subdir, FLAGS.label_map_file)
logger.info('base pascal label file: {}'.format(sample_file))
first = False
label_map_dict = label_map_util.get_label_map_dict(sample_file)
else:
if not filecmp.cmp(sample_file, os.path.join(root_data_dir, subdir, FLAGS.label_map_file), shallow=False):
logger.error('label map file in {} is different from {}'.format(subdir, sample_file))
label_map_dict = None
break
return dirs, label_map_dict, sample_file
def split_train_val_test(data_dir,
annotations_dir):
logger.info('spliting in {}'.format(annotations_dir))
allxml = [f for f in listdir(annotations_dir) if isfile(join(annotations_dir, f))]
random.Random(RANDOM_SEED).shuffle(allxml)
cnt = len(allxml)
train = allxml[:int(cnt*.70)]
val = allxml[int(cnt*.70):int(cnt*.90)]
test = allxml[int(cnt*.90):]
with open(os.path.join(data_dir, 'train.txt'), 'w') as resultFile:
for x in train:
resultFile.write(x + "\n")
with open(os.path.join(data_dir, 'val.txt'), 'w') as resultFile:
for x in val:
resultFile.write(x + "\n")
with open(os.path.join(data_dir, 'test.txt'), 'w') as resultFile:
for x in test:
resultFile.write(x + "\n")
return train, val, test
def generate_tf_for_set(set_file_list,
set_writer,
data_dir,
annotations_dir,
label_map_dict):
for idx, set_file in enumerate(set_file_list):
if idx % 5 == 0:
logger.debug('On image %d of %d', idx, len(set_file_list))
path = os.path.join(annotations_dir, set_file)
with tf.gfile.GFile(path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(data, data_dir, label_map_dict, FLAGS.ignore_difficult_instances)
set_writer.write(tf_example.SerializeToString())
def process_one_subdir(data_dir,
label_map_dict,
train_writer,
val_writer,
test_writer):
annotations_dir = os.path.join(data_dir, 'Annotations')
train, val, test = split_train_val_test(data_dir, annotations_dir)
logger.info('generate training tfrecords')
generate_tf_for_set(train, train_writer, data_dir, annotations_dir, label_map_dict)
logger.info('generate validation tfrecords')
generate_tf_for_set(val, val_writer, data_dir, annotations_dir, label_map_dict)
logger.info('generate testing tfrecords')
generate_tf_for_set(test, test_writer, data_dir, annotations_dir, label_map_dict)
def get_class_name_from_filename(file_name):
"""Gets the class name from a file.
Args:
file_name: The file name to get the class name from.
ie. "american_pit_bull_terrier_105.jpg"
Returns:
A string of the class name.
"""
match = re.match(r'([A-Za-z_]+)(_[0-9]+\.jpg)', file_name, re.I)
return match.groups()[0]
def dict_to_tf_example(data,
dataset_directory,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory='JPEGImages'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
if not os.path.isfile(full_path):
if os.path.isfile(full_path + '.JPG'):
full_path = full_path + '.JPG'
elif os.path.isfile(full_path + '.jpg'):
full_path = full_path + '.jpg'
logger.info('process image {}'.format(full_path))
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
logger.info('here again {}'.format(FLAGS.classname_in_filename))
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
if FLAGS.classname_in_filename:
class_name = get_class_name_from_filename(data['filename'])
else:
class_name = obj['name']
classes_text.append(class_name.encode('utf8'))
classes.append(label_map_dict[class_name])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def main(_):
global logger
os.makedirs(FLAGS.log_dir, exist_ok=True)
logconf = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logging.yml')
print('logconf {}'.format(logconf))
if os.path.exists(logconf):
with open(logconf, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
print('logconf loaded')
else:
logging.basicConfig(level=default_level)
print('logconf fall back to default')
logger = logging.getLogger('createTFRecord')
root_data_dir = FLAGS.data_dir
output_dir = FLAGS.output_dir
logger.info('data_dir: {}'.format(root_data_dir))
logger.info('output_dir: {}'.format(root_data_dir))
logger.info('classname is in filename: {}'.format(FLAGS.classname_in_filename))
os.makedirs(output_dir, exist_ok=True)
train_output_file = os.path.join(output_dir, 'train.record')
val_output_file = os.path.join(output_dir, 'val.record')
test_output_file = os.path.join(output_dir, 'test.record')
#if tfrecords were already generated after all the subdirectories were created,do nothing
if not FLAGS.force_regenerate and not data_changed(root_data_dir, train_output_file):
logger.info('data directories have not changed...exiting')
return
#check if all subfolders have the same shape of data
dirs, label_map_dict, label_map_file = get_label_map(root_data_dir)
if label_map_dict is None:
logger.error('label map file must be same in all sub folders...exiting')
return
train_writer = tf.python_io.TFRecordWriter(train_output_file)
val_writer = tf.python_io.TFRecordWriter(val_output_file)
test_writer = tf.python_io.TFRecordWriter(test_output_file)
for subdir in dirs:
process_one_subdir(os.path.join(root_data_dir, subdir), label_map_dict, train_writer, val_writer, test_writer)
train_writer.close()
val_writer.close()
test_writer.close()
shutil.copy(label_map_file, output_dir)
if __name__ == '__main__':
flags.mark_flag_as_required('data_dir')
flags.mark_flag_as_required('output_dir')
tf.app.run()
|
# coding: utf-8
# In[1]:
import cv2
import numpy as np
import imutils
# In[2]:
img = cv2.imread('./datasets/flower3.jpg')
# In[9]:
#Resizing image by width
(h,w) = img.shape[:2]
new_width = 800.0
r = new_width/w
calc_height = h*r
dim = (int(new_width), int(calc_height))
resized_image1 = cv2.resize(img, dim, cv2.INTER_AREA)
cv2.imshow("Image Resized(width)",resized_image1 )
cv2.waitKey(0)
# In[6]:
# Displaying Original image
cv2.imshow('Orginal Image', img)
cv2.waitKey(0)
# In[10]:
# Resizing image by height
new_height = 50
r = new_height/float(h)
calc_width = int(w * r)
dim = (calc_width, new_height)
resized_image2 = cv2.resize(img, dim, cv2.INTER_AREA )
cv2.imshow('Resized image(height)', resized_image2)
cv2.waitKey(0)
# In[11]:
#Resizing image using imutils
resized_image_imutils = imutils.resize(img, width=600)
cv2.imshow('Resized image(width) using imutils', resized_image_imutils)
cv2.waitKey(0)
# In[ ]:
|
from django.db import models
class NbaNews(models.Model):
created = models.DateTimeField(blank = True, default = '')
title = models.CharField(max_length = 100, blank = True, default='')
author = models.CharField(max_length = 100, blank = True, default = '')
context = models.CharField(max_length = 1000, blank = True, default = '')
photo = models.URLField(blank = True, default = '')
video = models.URLField(blank = True, default = '')
|
def generateShape(int):
return "".join("{}\n".format("+"*int) for x in range(int))[:-1]
'''
I will give you an integer. Give me back a shape that is as long and wide
as the integer. The integer will be a whole number between 0 and 50.
Example
n = 3, so I expect a 3x3 square back just like below as a string:
+++
+++
+++
'''
|
import logging
from six.moves import input
from django.core.management import BaseCommand, CommandError, call_command
from elasticsearch_dsl import connections
from stretch import stretch_app
class Command(BaseCommand):
"""
List the Stretch Indices in a Project (not in Elasticsearch)
"""
can_import_settings = True
def handle(self, *args, **options):
call_command('stretch', 'list_indices')
|
import difflib
import random
import re
import string
import subprocess
import sys
import time
from os import path
import requests
class Nginx:
command_config_test = ["nginx", "-t"]
command_reload = ["nginx", "-s", "reload"]
command_start = ["nginx"]
def __init__(self, config_file_path):
self.config_file_path = config_file_path
if path.exists(config_file_path):
with open(config_file_path) as file:
self.last_working_config = file.read()
else:
with open(config_file_path, "w") as file:
self.last_working_config = ""
self.config_stack = [self.last_working_config]
def start(self) -> bool:
start_result = subprocess.run(Nginx.command_start, stderr=subprocess.PIPE)
if start_result.returncode != 0:
print(start_result.stderr, file=sys.stderr)
return start_result.returncode == 0
def config_test(self) -> bool:
"""
Test the current nginx configuration to determine whether or not it fails
:return: true if config test is successful otherwise false
"""
test_result = subprocess.run(Nginx.command_config_test, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if test_result.returncode is not 0:
print("Nginx configtest failed!", file=sys.stderr)
self.last_error = test_result.stderr.decode("utf-8")
print(self.last_error, file=sys.stderr)
return False
return True
def push_config(self, config_str):
if config_str == self.last_working_config:
self.config_stack.append(config_str)
return self.reload()
with open(self.config_file_path, "w") as file:
file.write(config_str)
if not self.reload():
with open(self.config_file_path, "w") as file:
file.write(self.config_stack[-1])
self.reload()
return False
else:
self.config_stack.append(config_str)
return True
def pop_config(self):
with open(self.config_file_path, "w") as file:
file.write(self.config_stack.pop())
return self.reload()
def forced_update(self, config_str):
"""
Simply reload the nginx with the configuration, don't check whether or not configuration is changed or not.
If change causes nginx to fail, revert to last working config.
:param config_str:
:return:
"""
with open(self.config_file_path, "w") as file:
file.write(config_str)
if not self.start():
with open(self.config_file_path, "w") as file:
file.write(self.last_working_config)
return False
else:
self.last_working_config = config_str
return True
def update_config(self, config_str) -> bool:
"""
Change the nginx configuration.
:param config_str: string containing configuration to be written into config file
:return: true if the new config was used false if error or if the new configuration is same as previous
"""
if config_str == self.last_working_config:
print("Configuration not changed, skipping nginx reload")
return False
diff = str.join("\n", difflib.unified_diff(self.last_working_config.splitlines(),
config_str.splitlines(),
fromfile='Old Config',
tofile='New Config',
lineterm='\n'))
with open(self.config_file_path, "w") as file:
file.write(config_str)
if not self.reload():
print(diff, file=sys.stderr)
print("ERROR: Above change made nginx to fail. Thus it's rolled back", file=sys.stderr)
with open(self.config_file_path, "w") as file:
file.write(self.last_working_config)
return False
else:
print(diff)
self.last_working_config = config_str
return True
def reload(self) -> bool:
"""
Reload nginx so that new configurations are applied.
:return: true if nginx reload was successful false otherwise
"""
if self.config_test():
reload_result = subprocess.run(Nginx.command_reload, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if reload_result.returncode is not 0:
print("Nginx reload failed with exit code ", file=sys.stderr)
print(reload_result.stderr.decode("utf-8"), file=sys.stderr)
return False
return True
return False
def verify_domain(self, domain: list or str):
if type(domain) is str:
domain = [domain]
## when not included, one invalid domain in a list of 100 will make all domains to be unverified due to nginx failing to start.
domain = [x for x in domain if Nginx.is_valid_hostname(x)]
r1 = "".join([random.choice(string.ascii_letters + string.digits) for _ in range(32)])
r2 = "".join([random.choice(string.ascii_letters + string.digits) for _ in range(32)])
config = '''server {
listen 80 ;
server_name %s;
location /%s {
return 301 http://$host/%s;
}
}''' % (" ".join(domain), r1, r2)
if self.push_config(config):
# it appears that "nginx -s reload" doesn't reload immediately. It only signals reload and returns. Reload process may take some time
time.sleep(2)
success = []
for d in domain:
try:
response = requests.get("http://%s/%s" % (d, r1), allow_redirects=False)
if (response.is_permanent_redirect):
if ("Location" in response.headers):
if response.headers.get("Location").split("/")[-1] == r2:
success.append(d)
continue
except requests.exceptions.ConnectionError as e:
pass
print("[ERROR] Domain is not owned by this machine :" + d, file=sys.stderr)
elif type(domain) is str:
return False
else:
return []
if type(domain) is str:
return True
return success
@staticmethod
def is_valid_hostname(hostname: str):
"""
https://stackoverflow.com/a/33214423/2804342
:return: True if for valid hostname False otherwise
"""
if hostname[-1] == ".":
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
if len(hostname) > 253:
return False
labels = hostname.split(".")
# the TLD must be not all-numeric
if re.match(r"[0-9]+$", labels[-1]):
return False
allowed = re.compile(r"(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(label) for label in labels)
|
from user.models import Client
from django.contrib.auth.backends import ModelBackend
def jwt_response_payload_handler(token, user=None, request=None):
return {'token': token, 'user_id': user.id, 'username': user.name}
|
from collections.abc import Iterable
from networkx.classes.graph import Graph, _Node
def enumerate_all_cliques(G: Graph[_Node]) -> Iterable[list[_Node]]: ...
def find_cliques(
G: Graph[_Node], nodes: list[_Node] | None = ...
) -> Iterable[list[_Node]]: ...
def find_cliques_recursive(
G: Graph[_Node], nodes: list[_Node] | None = ...
) -> Iterable[list[_Node]]: ...
def make_max_clique_graph(
G: Graph[_Node], create_using: type[Graph[_Node]] = ...
) -> Graph[_Node]: ...
def make_clique_bipartite(
G: Graph[_Node],
fpos: None = ...,
create_using: type[Graph[_Node]] = ...,
name: None = ...,
) -> Graph[_Node]: ...
def graph_clique_number(G: Graph[_Node], cliques: list[_Node] | None = ...) -> int: ...
def graph_number_of_cliques(
G: Graph[_Node], cliques: list[_Node] | None = ...
) -> int: ...
def node_clique_number(
G: Graph[_Node],
nodes: list[_Node] | None = ...,
cliques: list[list[_Node]] | None = ...,
) -> int: ...
def number_of_cliques(
G: Graph[_Node],
nodes: list[_Node] | None = ...,
cliques: list[list[_Node]] | None = ...,
) -> int: ...
def cliques_containing_node(
G: Graph[_Node],
nodes: list[_Node] | None = ...,
cliques: list[list[_Node]] | None = ...,
) -> Iterable[list[_Node]]: ...
|
import socket
import select
import errno
import threading
import patterns
import re
class SocketClient(threading.Thread, patterns.Publisher):
def __init__(self, HOST, PORT):
super().__init__(daemon=True)
self.HOST = HOST
self.PORT = PORT
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.HOST, self.PORT))
self.msg = b''
self.sender = None
self.inputs = [self.s]
self.outputs = []
self.irc = None
self.username = ''
def set_irc(self, irc):
self.irc = irc
def set_irc_username(self, username):
self.irc.username = username
def update(self, msg):
self.irc.add_msg(msg)
def handleRead(self, read):
for s in read:
try:
data = self.s.recv(1024)
if data:
if self.irc:
re_user = re.compile('(\S+)~([\S+\s?].*)')
msg = str(data, 'utf-8')
m = re_user.match(msg)
if m:
if not m.group(1) == self.username:
self.irc.username = m.group(1)
msg = m.group(2)
self.update(msg)
self.irc.username = self.username
if s not in self.outputs:
self.outputs.append(s)
except socket.error as e:
if e.errno == errno.ECONNRESET:
data = None
else:
raise e
def handleWrite(self, write):
for s in write:
if self.msg:
re_nick = re.compile('(NICK)\W(\S+)')
m_nick = re_nick.match(str(self.msg, 'utf-8'))
if m_nick:
self.username = m_nick.group(2)
self.set_irc_username(self.username)
self.s.send(self.msg)
self.outputs.remove(s)
self.msg = b''
def run(self):
while 1:
read, write, err = self.get_ready_sockets()
self.handleRead(read)
self.handleWrite(write)
def get_ready_sockets(self):
return select.select(self.inputs, self.outputs, self.inputs)
def setMsg(self, msg):
self.sender = self.s
self.msg = bytes(msg, 'utf-8')
|
import numpy as np
class ExtractedFeatures:
def __init__(self, num_items, dim):
self.patches6 = np.zeros( (num_items, dim), dtype='float32' )
self.patches7 = np.zeros( (num_items, dim), dtype='float32' )
self.pos = np.zeros( (num_items, 2), dtype='uint16' )
self.cursor = 0
def append(self, features6, features7, pos):
self.patches6[self.cursor:self.cursor+features6.shape[0], :] = features6
self.patches7[self.cursor:self.cursor+features7.shape[0], :] = features7
self.pos[self.cursor:self.cursor+pos.shape[0], :] = pos
self.cursor += features6.shape[0]
assert features6.shape==features7.shape, "Size mismatch between features"
assert features6.shape[0]==pos.shape[0], "Size mismatch between features and positions"
def get(self):
print "Was " + str(self.patches6.shape)
self.patches6.resize( (self.cursor, self.patches6.shape[1]) ) #Throws away unused patches
self.patches7.resize( (self.cursor, self.patches7.shape[1]) )
self.pos.resize( (self.cursor, self.pos.shape[1]) )
print "Is " + str(self.patches6.shape)
return (self.patches6, self.patches7, self.pos) |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest
from pants.backend.docker.subsystems.dockerfile_parser import rules as parser_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs
from pants.backend.docker.util_rules.dockerfile import rules as dockerfile_rules
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules.pex import rules as pex_rules
from pants.engine.addresses import Address
from pants.engine.internals.scheduler import ExecutionError
from pants.testutil.pants_integration_test import run_pants
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*dockerfile_rules(),
*parser_rules(),
*pex_rules(),
QueryRule(DockerfileInfo, (DockerfileInfoRequest,)),
],
target_types=[DockerImageTarget, PexBinary],
)
rule_runner.set_options(
[],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
return rule_runner
@pytest.mark.parametrize(
"files",
[
pytest.param(
[
("test/BUILD", "docker_image()"),
("test/Dockerfile", "{dockerfile}"),
],
id="source Dockerfile",
),
pytest.param(
[
("test/BUILD", "docker_image(instructions=[{dockerfile!r}])"),
],
id="generate Dockerfile",
),
],
)
def test_parsed_injectables(files: list[tuple[str, str]], rule_runner: RuleRunner) -> None:
dockerfile_content = dedent(
"""\
ARG BASE_IMAGE=:base
FROM $BASE_IMAGE
COPY some.target/binary.pex some.target/tool.pex /bin
COPY --from=scratch this.is/ignored.pex /opt
COPY binary another/cli.pex tool /bin
"""
)
rule_runner.write_files(
{filename: content.format(dockerfile=dockerfile_content) for filename, content in files}
)
addr = Address("test")
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
assert info.from_image_build_args.to_dict() == {"BASE_IMAGE": ":base"}
assert info.copy_source_paths == (
"some.target/binary.pex",
"some.target/tool.pex",
"binary",
"another/cli.pex",
"tool",
)
def test_build_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image()",
"test/Dockerfile": dedent(
"""\
ARG registry
FROM ${registry}/image:latest
ARG OPT_A
ARG OPT_B=default_b_value
ENV A=${OPT_A:-A_value}
ENV B=${OPT_B}
"""
),
}
)
addr = Address("test")
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
assert info.build_args == DockerBuildArgs.from_strings(
"registry",
"OPT_A",
"OPT_B=default_b_value",
)
def test_from_image_build_arg_names(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/upstream/BUILD": "docker_image(name='image')",
"test/upstream/Dockerfile": "FROM upstream",
"test/downstream/BUILD": "docker_image(name='image')",
"test/downstream/Dockerfile": dedent(
"""\
ARG BASE_IMAGE=test/upstream:image
FROM ${BASE_IMAGE} AS base
"""
),
}
)
addr = Address("test/downstream", target_name="image")
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
assert info.from_image_build_args.to_dict() == {"BASE_IMAGE": "test/upstream:image"}
def test_inconsistent_build_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image(name='image')",
"test/Dockerfile": dedent(
"""\
FROM image1:latest
ARG OPT_A=default_1
FROM image2:latest
ARG OPT_A=default_2
"""
),
}
)
addr = Address("test", target_name="image")
err_msg = (
r"Error while parsing test/Dockerfile for the test:image target: DockerBuildArgs: "
r"duplicated 'OPT_A' with different values: 'default_1' != 'default_2'\."
)
with pytest.raises(ExecutionError, match=err_msg):
rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
def test_copy_source_references(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image()",
"test/Dockerfile": dedent(
"""\
FROM base
COPY a b /
COPY --option c/d e/f/g /h
ADD ignored
COPY j k /
COPY
"""
),
}
)
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(Address("test"))])
assert info.copy_source_paths == ("a", "b", "c/d", "e/f/g", "j", "k")
def test_baseimage_tags(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image()",
"test/Dockerfile": (
"FROM untagged\n"
"FROM tagged:v1.2\n"
"FROM digest@sha256:d1f0463b35135852308ea815c2ae54c1734b876d90288ce35828aeeff9899f9d\n"
"FROM gcr.io/tekton-releases/github.com/tektoncd/operator/cmd/kubernetes/operator:"
"v0.54.0@sha256:d1f0463b35135852308ea815c2ae54c1734b876d90288ce35828aeeff9899f9d\n"
"FROM $PYTHON_VERSION AS python\n"
),
}
)
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(Address("test"))])
assert info.version_tags == (
"stage0 latest",
"stage1 v1.2",
# Stage 2 is not pinned with a tag.
"stage3 v0.54.0",
"python build-arg:PYTHON_VERSION", # Parse tag from build arg.
)
def test_generate_lockfile_without_python_backend() -> None:
"""Regression test for https://github.com/pantsbuild/pants/issues/14876."""
run_pants(
[
"--backend-packages=pants.backend.docker",
"--python-resolves={'dockerfile-parser':'dp.lock'}",
"generate-lockfiles",
"--resolve=dockerfile-parser",
]
).assert_success()
|
def solution(s):
if len(s) % 2 == 1:
center = len(s) // 2
result = s[center]
else:
center = len(s) // 2
result = s[center-1:center+1]
return result
solution("ABCDEFG")
|
from django.apps import AppConfig
class AggsConfig(AppConfig):
name = 'aggs'
|
import sqlite3 as lite
import pandas as pd
con = lite.connect('getting_started.db')
cities = (
('New York City', 'NY'),
('Boston', 'MA'),
('Chicago', 'IL'),
('Miami', 'FL'),
('Dallas', 'TX'),
('Seattle', 'WA'),
('Portland', 'OR'),
('San Francisco', 'CA'),
('Los Angeles', 'CA'),
('Las Vegas', 'NV'),
('Atlanta', 'GA')
)
weather = (
('New York City', 2013,'July','January',62),
('Boston',2013,'July','January',59),
('Chicago', 2013,'July','January',59),
('Miami', 2013,'August','January',84),
('Dallas',2013,'July','January',77),
('Seattle', 2013,'July','January',61),
('Portland',2013,'July','December', 63),
('San Francisco', 2013,'September', 'December', 64),
('Los Angeles', 2013,'September', 'December', 75),
('Las Vegas', 2013, 'July', 'December', 35),
('Atlanta', 2013, 'July', 'January', 40)
)
with con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS cities")
cur.execute("DROP TABLE IF EXISTS weather")
cur.execute("CREATE TABLE cities (name text, state text)")
cur.execute("CREATE TABLE weather (city test, year integer, warm_month text, cold_month text, average_high integer)")
cur.executemany("INSERT INTO cities VALUES(?,?)", cities)
cur.executemany("INSERT INTO weather VALUES(?,?,?,?,?)", weather)
cur.execute("SELECT weather.city, cities.state, weather.year, weather.warm_month, weather.cold_month, weather.average_high FROM cities INNER JOIN weather ON cities.name = weather.city")
rows = cur.fetchall()
cols = [desc[0] for desc in cur.description]
df = pd.DataFrame(rows, columns=cols)
cities = df['city']
states = df['state']
print("The cities that are warmest in July are:")
for i in range(1,cities.size):
print(" {}, {}".format(cities.iloc[i], states.iloc[i]))
|
#!/usr/bin/env python
#+
# Name:
# download_era_interim_sfc_pl_v2
# Purpose:
# An IDL procedure to download the majority of ERA-Interim variables on
# model levels and at the surface.
# Inputs:
# None.
# Outputs:
# netCDF files with both the pressure level and surface data
# Keywords:
# VERBOSE : Set to increase verbosity
# Author and History:
# Kyle R. Wodzicki Created 15 May 2017
#-
import os;
from datetime import datetime, timedelta;
from send_email import send_email;
from compress_netcdf_file import compress_netcdf_file;
from download_era_interim_v2 import download_era_interim_v2
def download_era_interim_sfc_pl_v2();
start_year = 2010;
start_month = 1;
date = datetime.now();
date_str = ( '_'.join( str(date).split() ) ).split('.')[0];
#dir = '/Volumes/localdata/ERA_Interim/';
dir = '/Volumes/Data_Rapp/ERA_Interim/';
pl_an_dir = dir + 'Analysis/Pressure_Levels/';
sfc_an_dir = dir + 'Analysis/Surface/';
sfc_fc_dir = dir + 'Forecast/Surface/';
pl_an_log = dir + 'logs/ERAI_download_pl_an_' +date_str+'.log'
sfc_an_log = dir + 'logs/ERAI_download_sfc_an_'+date_str+'.log'
sfc_fc_log = dir + 'logs/ERAI_download_sfc_fc_'+date_str+'.log'
time = [ range(0, 24, 6), range(0, 24, 6), [0, 12] ]; # Set initialization times
step = [ [0], [0], [6, 12]]; # Set step times
leveltype = ['pl', 'sfc', 'sfc']; # Set level type
type = ['an', 'an', 'fc']; # Set data type
out_dirs = [pl_an_dir, sfc_an_dir, sfc_fc_dir] # Set output directories
grid = '1.5'
pl_list = [1000, 975, 950, 925, 900, 875, 850, 825, 800, 775, 750, 700, 650, \
600, 550, 500, 450, 400, 350, 300, 250, 225, 200, 175, 150, 125, \
100, 50, 10, 5, 1]; # Set list of model levels to download
pl_an_var = [ 60.128, 129.128, 130.128, 131.128, 132.128, 133.128, 135.128, \
138.128, 155.128, 157.128, 203.128, 246.128, 247.128, 248.128] # List of analysis pressure level variables to download
sfc_an_var = [ 31.128, 32.128, 33.128, 34.128, 35.128, 36.128, 37.128, \
38.128, 39.128, 40.128, 41.128, 42.128, 53.162, 54.162, \
55.162, 56.162, 57.162, 58.162, 59.162, 60.162, 61.162, \
62.162, 63.162, 64.162, 65.162, 66.162, 67.162, 68.162, \
69.162, 70.162, 71.162, 72.162, 73.162, 74.162, 75.162, \
76.162, 77.162, 78.162, 79.162, 80.162, 81.162, 82.162, \
83.162, 84.162, 85.162, 86.162, 87.162, 88.162, 89.162, \
90.162, 91.162, 92.162, 134.128, 136.128, 137.128, 139.128, \
141.128, 148.128, 151.128, 164.128, 165.128, 166.128, 167.128, \
168.128, 170.128, 173.128, 174.128, 183.128, 186.128, 187.128, \
188.128, 198.128, 206.128, 234.128, 235.128, 236.128, 238.128];
sfc_fc_var = [ 44.128, 45.128, 49.128, 50.128, 142.128, 143.128, 144.128, \
146.128, 147.128, 159.128, 169.128, 175.128, 176.128, 177.128, \
178.128, 179.128, 180.128, 182.128, 205.128, 208.128, 209.128, \
210.128, 211.128, 212.128, 228.128, 231.128, 232.128, 239.128, \
240.128, 243.128, 244.128]; # List of forecast surface variables to download
pl_list = '/'.join( [ str(i) for i in pl_list] ); # Generate pressure levels as forward slash separated list
vars = [ '/'.join( [ str(i) for i in pl_an_var] ), \
'/'.join( [ str(i) for i in sfc_an_var] ), \
'/'.join( [ str(i) for i in sfc_fc_var] ) ]; # Generate list of variables
const_info = {"class" : "ei",
"dataset" : "interim",
"expver" : "1",
"grid" : grid + '/' + grid,
"area" : "90/0/-90/360",
"stream" : "oper",
"format" : 'netcdf'};
#######
pl_an_info = const_info.update(
{"levtype" : 'pl',
"levelist" : pl_list,
"type" : 'an',
"time" : '00:00:00/06:00:00/12:00:00/18:00:00',
"step" : '0',
"param" : pl_an_var,
"target" : ''});
########
sfc_an_info = const_info.update(
{"levtype" : 'sfc',
"type" : 'an',
"time" : '00:00:00/06:00:00/12:00:00/18:00:00',
"step" : '0',
"param" : sfc_an_var,
"target" : ''});
########
sfc_fc_info = const_info.update(
{"levtype" : 'sfc',
"type" : 'fc',
"time" : '00:00:00/12:00:00',
"step" : '6/12',
"param" : sfc_fc_var,
"target" : ''});
pl_an = download_era_interim_v2(pl_an_info, logfile = pl_an_log, verbose = True, netcdf = True)
sfc_an = download_era_interim_v2(sfc_an_info, logfile = sfc_an_log, verbose = True, netcdf = True)
sfc_fc = download_era_interim_v2(sfc_fc_info, logfile = sfc_fc_log, verbose = True, netcdf = True)
x = download( pl_an, start_year, start_month, date, pl_an_dir, 'an', 'pl', email ):
y = download( sfc_an, start_year, start_month, date, pl_an_dir, 'an', 'sfc', email ):
z = download( sfc_fc, start_year, start_month, date, pl_an_dir, 'fc', 'sfc', email ):
def download( era_class, start_year, start_month, date, out_dir, type, leveltype, email ):
email = 'wodzicki@tamu.edu';
date -= timedelta(weeks = 26); # Get current date and subtract 26 weeks
yy, mm, dd = date.year, date.month, date.day; # Convert back to calendar date
while start_year * 100L + start_month <= yy * 100L + mm:
date = str(start_year * 100L + start_month); # Set the date
era_class.info['target'] = out_dir+'_'.join( [type, leveltype, date] )+'.nc'; # Set target file name
era_class.info['date'] = '{:4}-{:02}'.format(start_year, start_month); # Construct date for download
attempt, max_attempt = 0, 5; # Set attempt and maximum attempt for downloading and compressing
while attempt < max_attempt: # Try three times to download and compress the file
era_class.download(); # Download the data
if era_class.status < 2: # If the status returned by the download is less than 2, then the file downloaded and needs compressed
status = compress_netcdf_file(file, email=email, gzip=5, delete = True);# Compress the file
if status == 3: # If the return status of the compression failed, then delete the downloaded file, increment the attempt counter and try to download/compress again
if os.path.exists( file ): os.remove( file ); # IF the download file exists, delete it
attempt = attempt + 1; # Increment the attempt
else:
attempt = max_attempt+1; # Set attempt to four (4)
elif era_class.status == 2: # If the return status of the download is 2, then the compressed file already exists
with open(era_class.logfile, 'a') as f:
f.write('Compressed file already exists:\n '+file+'\n'); # Print a message
attempt = max_attempt+1; # Set attempt to four
else:
if os.path.exists( file ): os.remove( file ); # If any other number was returned, delete the downloaded file IF it exists
attempt = attempt + 1; # Increment the attempt
if attempt == max_attempt: # If attempt is equal to three (3), then the file failed to download/compress three times and the program halts
status = send_email(email, subject); # Send an email that the download failed
return 1; # Exit status one (1)
start_month += 1 # Increment the month
if start_month == 13: start_year, start_month = start_year + 1, 1; # If the new month is number 13
if __name__ == "__main__":
import argparse; # Import library for parsing
parser = argparse.ArgumentParser(description="Download ERA-Interim"); # Set the description of the script to be printed in the help doc, i.e., ./script -h
### Data storage keywords; https://software.ecmwf.int/wiki/display/UDOC/Data+storage+keywords
parser.add_argument("--target", type=str, help="specifies a Unix file into which data is to be written after retrieval or manipulation.")
exit(0); # Exit status zero (0) on end |
class Optimizer(object):
def __init__(self, loss_var, lr, lr_schedualer=None):
self._prog = None
self._lr_schedualer = lr_schedualer
def _build(self, grad_clip=None):
raise NotImplementedError()
def _set_prog(self, prog, init_prog):
self._prog = prog
self._init_prog = prog
if self._lr_schedualer is not None:
self._lr_schedualer._set_prog(prog)
def get_cur_learning_rate(self):
pass
|
#!/usr/bin/env python3
from threading import Thread, Condition
from time import sleep
'''
1115. Print FooBar Alternately
https://leetcode.com/problems/print-foobar-alternately/
'''
class FooBar(object):
def __init__(self, n):
self.n = n
self.cond = Condition()
self.order = 0
self.fooTurn = lambda: self.order == 0
self.barTurn = lambda: self.order == 1
def printFoo(self):
print("foo")
def printBar(self):
print("bar")
def foo(self, printFoo):
"""
:type printFoo: method
:rtype: void
"""
for i in range(self.n):
with self.cond:
self.cond.wait_for(self.fooTurn)
# printFoo() outputs "foo". Do not change or remove this line.
#sleep(0.01)
printFoo()
self.order = 1
self.cond.notify()
def bar(self, printBar):
"""
:type printBar: method
:rtype: void
"""
for i in range(self.n):
with self.cond:
print(self.barTurn)
self.cond.wait_for(self.barTurn)
# printBar() outputs "bar". Do not change or remove this line.
#sleep(0.01)
printBar()
self.order = 0
if i != (self.n -1):
self.cond.notify()
if __name__ == "__main__" :
fb = FooBar(3)
Thread(target=fb.foo, args=(fb.printFoo,)).start()
Thread(target=fb.bar, args=(fb.printBar,)).start() |
#!/usr/bin/env python3
import csv
from enum import Enum
import openpyxl
import utm
# Enum to differentiate between the two file formats
class Version(Enum):
V1 = 1
V2 = 2
# Reads an xlsx file with harbour porpoise observations and copies it to a csv. If multiple harbour porpoises are
# spotted the row is duplicated so that each row represents one observation.
def expand(input_file, output_file, version):
workbook = openpyxl.load_workbook(input_file)
ws = workbook.active
with open(output_file, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
first = True
row_count = 1
for row in ws.rows:
if first:
if version == Version.V2:
convert_lat_lon(row, True)
# Prepend the ID header and copy the header row to the csv
id_header = ["ID"]
header_row = [cell.value for cell in row]
id_header.extend(header_row)
csv_writer.writerow(id_header)
first = False
else:
# Copy the row to csv. The row is copied as many times as the number of harbour porpoises found.
count = get_count(row, version)
set_count_to_one(row, version)
if version == Version.V2:
convert_lat_lon(row)
for j in range(count):
# Prepend the row count
row_cell = [str(row_count)]
row_list = [cell.value for cell in row]
row_cell.extend(row_list)
csv_writer.writerow(row_cell)
row_count += 1
# Convert latitude and longitude columns to UTM format.
def convert_lat_lon(row, first=False):
if first:
row[5].value = "XUTM"
row[6].value = "YUTM"
else:
lat = row[5].value
lon = row[6].value
utm_value = utm.from_latlon(lat, lon)
row[5].value = int(utm_value[0])
row[6].value = int(utm_value[1])
# Sets the harbour porpoise count to 1 in the row
def set_count_to_one(row, version):
if version == Version.V1:
row[25].value = 1
else:
row[4].value = 1
# Retrieves the harbour porpoise count from the row
def get_count(row, version):
if version == Version.V1:
count = int(row[25].value)
else:
count = int(row[4].value)
return count
if __name__ == "__main__":
expand("data/in/Bruinvis alle waarnemingen 1991_2013.xlsx", "data/out/bruinvis_waarnemingen_1991_2013_expanded.csv",
Version.V1)
expand("data/in/export_BV_waarnemingen_14270_20171025.xlsx",
"data/out/bruinvis_waarnemingen_2013_heden_expanded.csv",
Version.V2)
|
# Generated by Django 3.0.8 on 2020-07-15 11:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('photos', '0003_auto_20200715_1105'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='photo',
new_name='commented_photo',
),
]
|
from gym.envs.registration import register
register(
id='VizdoomBasic-v0',
entry_point='vizdoomgym.envs:VizdoomBasic',
max_episode_steps=10000,
reward_threshold=10.0
)
register(
id='VizdoomCorridor-v0',
entry_point='vizdoomgym.envs:VizdoomCorridor',
max_episode_steps=10000,
reward_threshold=1000.0
)
register(
id='VizdoomDefendCenter-v0',
entry_point='vizdoomgym.envs:VizdoomDefendCenter',
max_episode_steps=10000,
reward_threshold=10.0
)
register(
id='VizdoomDefendLine-v0',
entry_point='vizdoomgym.envs:VizdoomDefendLine',
max_episode_steps=10000,
reward_threshold=15.0
)
register(
id='VizdoomHealthGathering-v0',
entry_point='vizdoomgym.envs:VizdoomHealthGathering',
max_episode_steps=10000,
reward_threshold=1000.0
)
register(
id='VizdoomMyWayHome-v0',
entry_point='vizdoomgym.envs:VizdoomMyWayHome',
max_episode_steps=2099, # This value must be one less than the episode_timeout value set in the .cfg file
reward_threshold=0.5
)
register(
id='VizdoomMyWayHomeFixed-v0',
entry_point='vizdoomgym.envs:VizdoomMyWayHomeFixedEnv',
max_episode_steps=2099, # See above
reward_threshold=0.5
)
register(
id='VizdoomMyWayHomeFixed15-v0',
entry_point='vizdoomgym.envs:VizdoomMyWayHomeFixed15Env',
max_episode_steps=2099, # See above
reward_threshold=0.5
)
register(
id='VizdoomPredictPosition-v0',
entry_point='vizdoomgym.envs:VizdoomPredictPosition',
max_episode_steps=10000,
reward_threshold=0.5
)
register(
id='VizdoomTakeCover-v0',
entry_point='vizdoomgym.envs:VizdoomTakeCover',
max_episode_steps=10000,
reward_threshold=750.0
)
register(
id='VizdoomDeathmatch-v0',
entry_point='vizdoomgym.envs:VizdoomDeathmatch',
max_episode_steps=10000,
reward_threshold=20.0
)
register(
id='VizdoomHealthGatheringSupreme-v0',
entry_point='vizdoomgym.envs:VizdoomHealthGatheringSupreme',
max_episode_steps=10000,
)
|
# coding: utf-8
# シーザー暗号解くためのスクリプト
MAPPING = [
'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z'
]
TARGET_STRING = 'EBG KVVV vf n fvzcyr yrggre fhofgvghgvba pvcure gung ercynprf n yrggre jvgu gur yrggre KVVV yrggref nsgre vg va gur nycunorg. EBG KVVV vf na rknzcyr bs gur Pnrfne pvcure, qrirybcrq va napvrag Ebzr. Synt vf SYNTFjmtkOWFNZdjkkNH. Vafreg na haqrefpber vzzrqvngryl nsgre SYNT.'
if __name__ == '__main__':
taget_string = TARGET_STRING
char_list = list(taget_string)
print char_list
add_index = 13
index = 0
count = len(MAPPING)
new_char_list = [None] * 500
print new_char_list
for char in MAPPING:
# index + add_index した文字列に変換する
target_index = index + add_index
if target_index >= count:
target_index = target_index - count
target_char = MAPPING[target_index]
char_index = 0
for change_char in char_list:
is_upper = None
if change_char.isupper():
is_upper = 1
change_char = change_char.lower()
if change_char == char:
if is_upper:
new_char_list[char_index] = target_char.upper()
else:
new_char_list[char_index] = target_char
char_index = char_index + 1
index = index + 1
puts_string = ''
for char in new_char_list:
if char is None:
char = ' '
puts_string = puts_string + char
print puts_string
|
# Generated by Django 2.0.5 on 2018-06-05 10:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calculation', '0015_auto_20180605_1046'),
]
operations = [
migrations.AlterField(
model_name='regprice',
name='created_at',
field=models.DateField(db_index=True, verbose_name='создано'),
),
]
|
"""Limits serializers for API v2."""
import os
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers, status
from rest_framework.exceptions import PermissionDenied, APIException
from modoboa.core.models import User
from modoboa.parameters import tools as param_tools
from ...lib import decrypt_file, get_creds_filename
from ...constants import CONNECTION_SECURITY_MODES
class PDFCredentialsSettingsSerializer(serializers.Serializer):
"""A serializer for global parameters."""
# General
enabled_pdfcredentials = serializers.BooleanField(default=True)
# Document storage
storage_dir = serializers.CharField(default="/var/lib/modoboa/pdf_credentials")
# Security options
delete_first_dl = serializers.BooleanField(default=True)
generate_at_creation = serializers.BooleanField(default=True)
# Customization options
title = serializers.CharField(default=_("Personal account information"))
webpanel_url = serializers.URLField()
custom_message = serializers.CharField(
required=False, allow_blank=True, allow_null=True)
include_connection_settings = serializers.BooleanField(default=False)
smtp_server_address = serializers.CharField()
smtp_server_port = serializers.IntegerField(default=587)
smtp_connection_security = serializers.ChoiceField(
choices=CONNECTION_SECURITY_MODES, default="starttls")
imap_server_address = serializers.CharField()
imap_server_port = serializers.IntegerField(default=143)
imap_connection_security = serializers.ChoiceField(
choices=CONNECTION_SECURITY_MODES, default="starttls")
def validate(self, data):
"""Check that directory exists."""
enabled_pdfcredentials = data.get("enabled_pdfcredentials", None)
condition = (enabled_pdfcredentials or
(enabled_pdfcredentials is None and
param_tools.get_global_parameter("enabled_pdfcredentials")
)
)
if condition:
storage_dir = data.get("storage_dir", None)
if storage_dir is not None:
if not os.path.isdir(storage_dir):
raise serializers.ValidationError(
_("Directory not found.")
)
if not os.access(storage_dir, os.W_OK):
raise serializers.ValidationError(
_("Directory is not writable")
)
return data
class GetAccountCredentialsSerializer(serializers.Serializer):
"""A serializer for get account credential view."""
account_id = serializers.IntegerField()
def validate(self, data):
request = self.context["request"]
account = User.objects.get(pk=data["account_id"])
if not request.user.can_access(account):
raise PermissionDenied()
self.context["account"] = account
return data
def save(self):
fname = get_creds_filename(self.context["account"])
if not os.path.exists(fname):
raise APIException(
_("No document available for this user"),
status.HTTP_400_BAD_REQUEST
)
self.context["content"] = decrypt_file(fname)
if param_tools.get_global_parameter("delete_first_dl"):
os.remove(fname)
self.context["fname"] = os.path.basename(fname)
|
from time import sleep
from appium.webdriver.webdriver import WebDriver
from appium import webdriver
from pageobject.page import Page
class MockMethodsLocator(object):
""" Class contains locator for behaviour mocking methods. """
getting_started_id = "com.flickr.android:id/activity_welcome_sign_button"
login_webview_context = 'WEBVIEW_com.flickr.android'
email_field_xpath = '//input[@id="login-email"]'
proceed_button_xpath = '//form[@id="login-form"]/button'
password_field_xpath = '//input[@id="login-password"]'
class MockMethods(Page):
""" Class contains general methods. """
def __init__(self, driver: WebDriver = None):
Page.__init__(self, driver)
def login(self, email: str = "sasasabry290@gmail.com",
password: str = "C%D5KBSN?$w&QKv"):
""" log in flickr using input credentials.
:param email: Flickr Account email
:param password: Flickr Account password
:return:
"""
self.driver.find_element_by_id(
MockMethodsLocator.getting_started_id).click()
sleep(2)
self.driver.switch_to.context(
MockMethodsLocator.login_webview_context)
email_field = self.driver.find_element_by_xpath(
MockMethodsLocator.email_field_xpath)
email_field.send_keys(email)
self.driver.find_element_by_xpath(
MockMethodsLocator.proceed_button_xpath).click()
sleep(2)
password_field = self.driver.find_element_by_xpath(
MockMethodsLocator.password_field_xpath)
password_field.send_keys("C%D5KBSN?$w&QKv")
self.driver.find_element_by_xpath(
MockMethodsLocator.proceed_button_xpath).click()
self.driver.switch_to.context('NATIVE_APP')
self.driver.contexts
|
#-*—coding:utf8-*-
import numpy as np
import gc
import re
import csv
import codecs
from decimal import *
try:
fil_winsize = codecs.open("r.txt", "r", 'utf_8_sig')
# fil6 = codecs.open("channel_ssid_time.csv", "w", 'utf_8_sig')
winsize = csv.reader(fil_winsize)
# write_ssid = csv.writer(fil6)
except Exception:
print "winsize_filelist open failed"
exit()
for i in winsize:
print i
if fil_winsize:
fil_winsize.close()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
if __name__ =="__main__":
eat = pd.read_csv('EatingDataUnFiltered.csv')
write = pd.read_csv('WriteData.csv')
columns = eat.columns.tolist()[1:]
#create this path manually
path = "plot/comparision"
for column in columns:
variable1 = eat[column].values
variable2 = write[column].values
x = np.arange(len(eat))
plt.cla()
plt.plot(x, variable1, label='EatFood', color = 'red')
plt.plot(x, variable2, label='Non Eat', color = 'green')
plt.legend()
plt.ylim(-3,3)
plt.xlabel('Activity')
plt.ylabel(column)
plt.title('EatFood VS Non Eat '+column)
plt.savefig(path+'/'+column+'.jpg') |
from flask import render_template, redirect, url_for, request
from app import app, db
from app.forms import LoginForm, RegistrationForm
from app.models import User, Post
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
from datetime import datetime
@app.before_request
def before_request():
current_user.last_seen = datetime.utcnow()
db.session.commit()
@app.route('/')
@app.route('/index')
@login_required
def index():
return render_template('homepage.html')
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
posts = user.posts
return render_template('user.html', title='User', posts=posts, user=user)
@app.route('/registration', methods=['GET', 'POST'])
def registration():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('login'))
return render_template('registration.html', title='Register', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
return 'Вы ввели неправильный логин/пароль!'
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Login', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index')) |
def read_the_file(file):
"""
Returns the data of the file file
:param file: the file to read (str)
:return: the data (dictionnary)
"""
fh = open(file, 'r')
lines = fh.readlines()
dico = {}
for line in lines:
elements = line.split(';')
dico[elements[0].strip()] = (elements[1].strip(), elements[2].strip())
fh.close()
return dico
print(read_the_file('mountains.txt'))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 14 11:59:22 2017
@author: ares
"""
import sys
sys.path.append('~/home/ares/Code/PredyNet/predynet')
from helpers import *
import torch
from torch.autograd import Variable
import numpy as np
M = 20 # Size of Patch Array
ySize = 120
timelength = 1000
stim = np.zeros([M,M,timelength])
for ind in range(timelength):
stim[:,:,ind] = getSineWavePatternPatch(size=[M,M],mult=[1,1],orientation = 0.25*ind, sf = 4,
phase = 0,wave_type = 'sine',radius = [8,8],center = [M/2,M/2])
|
import boto3
import os
import csv
import time
os.system("aws configure set aws_access_key_id XXXX(you access key)")
os.system("aws configure set aws_secret_access_key XXXX(you secret key)")
os.system("aws configure set default.region eu-west-2")
shell = """#!/bin/bash
sudo python /home/ec2-user/1.py
"""
shell2 = """#!/bin/bash
sudo python /home/ec2-user/2.py
"""
def lambda_handler(event, context):
M = int(event['M'])
R = int(event['R'])
deviation = float(event['dev'])
mean = float(event['mean'])
p= float(event['p'])
#now = str(event['now'])
Z = M//R
Y = M//R+M%R
with open('/tmp/value.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(('p','mean','deviation','Z','Y'))
writer.writerow((p,mean,deviation,Z,Y))
with open('/tmp/mt.csv','w') as f:
writer = csv.writer(f)
writer.writerow(['95%','99%'])
s3 = boto3.resource('s3')
s3.meta.client.upload_file('/tmp/value.csv', 'zhousq12cw', 'value.csv')
s3.meta.client.upload_file('/tmp/mt.csv', 'zhousq12cw', 'mt.csv')
os.system("rm -f /tmp/value.csv")
#os.system("rm -f '/tmp/mt_%s.csv %now'")
#run instance
client = boto3.client('ec2')
if R == 1:
response2 = client.run_instances(ImageId='ami-fdabbf99', MinCount=1, MaxCount=1,InstanceType='t2.micro', UserData=shell2,KeyName='mac-euwestkp')
else:
response = client.run_instances(ImageId='ami-fdabbf99', MinCount=1, MaxCount=(int(R)-1),InstanceType='t2.micro', UserData=shell,KeyName='mac-euwestkp')
time.sleep(5)
response2 = client.run_instances(ImageId='ami-fdabbf99', MinCount=1, MaxCount=1,InstanceType='t2.micro', UserData=shell2,KeyName='mac-euwestkp')
|
import numpy as np
def main():
sampl = np.random.uniform(low=-49.0, high=49.0, size=(100,))
sampl2 = np.random.uniform(low=-49.0, high=49.0, size=(100,))
for i in range(1, len(sampl)):
print("treeLocations.push_back(make_pair(%ff, %ff));" %
(sampl[i], sampl2[i]))
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Create report for Apache logfiles. You can use
--consolidate or --regex keys for creating report
"""
from optparse import OptionParser
def open_files(files):
for f in files:
yield (f, open(f))
def combine_lines(files):
for f, f_obj in files:
for line in f_obj:
yield line
def obfuscate_ipaddr(addr):
return '.'.join(str((int(n)/10) * 10) for n in addr.split('.'))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-c', '--consolidate', dest='consolidate',
default=False, action='store_true',
help='consolidate log files')
parser.add_option('-r', '--regex', dest='regex', default=False,
action='store_true', help='use regex parser')
options, args = parser.parse_args()
logfiles = args
if options.regex:
from apache_log_parser_regex import generate_log_report
else:
from apache_log_parser_split import generate_log_report
opend_files = open_files(logfiles)
if options.consolidate:
opend_files = (('CONSOLIDATED', combine_lines(opend_files)),)
for filename, file_obj in opend_files:
print('*' * 60)
print(filename)
print('-' * 60)
print('%-20s%s' % ('IP ADDRESS', 'BYTES TRANSFERRED'))
print('-' * 60)
report_dict = generate_log_report(file_obj)
for ip_addr, bytes in report_dict:
print('%-20s%s' % (obfuscate_ipaddr(ip_addr), sum(bytes)))
print('=' * 60) |
import gym
import baxter_env
import pybullet as p
import pybullet_data
import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
class ObsWrapper(gym.ObservationWrapper):
def __init__(self, env):
super(ObsWrapper, self).__init__(env)
self.observation_space = gym.spaces.Box(low = -1, high = 1, shape = (128, 128, 4))
def observation(self, obs):
# Take image from the state dictionary and scale in range [-1, 1]
img = obs['eye_view']/255.
# Take the depth map from the state dictionary and scale (max depth is 10 and min 0)
depth = obs['depth'].reshape(img.shape[0],img.shape[0],1)/10.
obs = np.concatenate([img, depth], axis=-1)
obs = np.clip(obs, -1,1)
return obs.reshape((128, 128, 4))
class ActWrapper(gym.ActionWrapper):
def __init__(self, env, action_verbose=100):
self.action_steps = 0
self.action_verbose = action_verbose
super(ActWrapper, self).__init__(env)
self.action_space = gym.spaces.Box(-1, 1, (6,))
def action(self, act):
act = (act+1)*10
act = np.clip(act,0,20)
if self.action_steps % self.action_verbose == 0:
print(f"actions took in action_step {self.action_steps} are: {act}")
self.action_steps += 1
return act
def make_env():
env = gym.make('baxter_env-v0')
env = ObsWrapper(env)
env = ActWrapper(env)
return env
if __name__ == "__main__":
env = make_env()
s = env.reset()
num_step = 0
while True:
action = env.sample_action()
next_state, r, done, info = env.step(action)
s = next_state
print(f"Compleated step: {num_step}")
print("Reward: ", r)
print("Done: ", done)
print("Info: ", info)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from azure.storage.blob import BlockBlobService
from posts.forms import UserForm, UserUpdateForm, UserChangePassword, UserUpdateGTIForm, \
HerramientaForm, HerramientaUpdateForm
from django.contrib.auth.models import User
from django.contrib import messages
from posts.models import Perfil, Herramienta
from decouple import config
def index(request):
herramientas = Herramienta.objects.all()
context = {'herramientas': herramientas}
return render(request, 'index.html',context)
# Autenticación
def login_view(request):
if request.user.is_authenticated():
return redirect(reverse('catalogo:index'))
mensaje = ''
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect(reverse('catalogo:index'))
else:
mensaje = 'Nombre de usuario o clave no valido'
return render(request, 'login.html', {'mensaje': mensaje})
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse('catalogo:index'))
# Herramientas
def herramienta_create(request):
if request.method == 'POST':
form = HerramientaForm(request.POST, request.FILES)
if form.is_valid():
cleaned_data = form.cleaned_data
nombre = cleaned_data.get('nombre')
sistemaoperativo = cleaned_data.get('sistemaOperativo')
plataforma = cleaned_data.get('plataforma')
fichatecnica = cleaned_data.get('fichaTecnica')
licencia = cleaned_data.get('licencia')
estado = 1
revisor1 = 0
revisor2 = 0
autor = request.user.id
descripcion = cleaned_data.get('descripcion')
urlreferencia = cleaned_data.get('urlReferencia')
logo = 'default'
id_anterior = 0
logoL = True if 'logo' in request.FILES else False
if logoL:
myfile = request.FILES['logo']
chu = myfile.chunks()
st = ''
file = str(st)
for chunk in chu:
a = chunk
file = file + a
url = guardarDarUrl(file, myfile.name)
logo = url
herramienta = Herramienta.objects.create(id_anterior=id_anterior, nombre=nombre,
sistemaOperativo=sistemaoperativo, plataforma=plataforma,
fichaTecnica=fichatecnica, licencia=licencia, estado=estado,
revisor1=revisor1, revisor2=revisor2, autor=autor,
descripcion=descripcion, urlReferencia=urlreferencia, logo=logo)
herramienta.save()
messages.success(request, 'Se ha creado con éxito la herramienta ' +
herramienta.nombre + ', los cambios serán publicados hasta terminar el proceso de vigía',
extra_tags='alert alert-success')
return redirect(reverse('catalogo:index'))
else:
form = HerramientaForm()
return render(request, 'herramienta_create.html', {'form': form})
def herramienta_update(request, pk):
herramienta = Herramienta.objects.get(id=pk)
current_logo = herramienta.logo
if request.method == 'POST':
form = HerramientaUpdateForm(request.POST, request.FILES)
if form.is_valid():
herramienta.estado = 4
herramienta.save()
cleaned_data = form.cleaned_data
nombre = cleaned_data.get('nombre')
sistemaOperativo = cleaned_data.get('sistemaOperativo')
plataforma = cleaned_data.get('plataforma')
fichaTecnica = cleaned_data.get('fichaTecnica')
licencia = cleaned_data.get('licencia')
descripcion = cleaned_data.get('descripcion')
urlReferencia = cleaned_data.get('urlReferencia')
logo = current_logo
logoL = True if 'logo' in request.FILES else False
if logoL:
myfile = request.FILES['logo']
chu = myfile.chunks()
st = ''
file = str(st)
for chunk in chu:
a = chunk
file = file + a
url = guardarDarUrl(file, myfile.name)
logo = url
id_anterior = pk
revisor1 = 0
revisor2 = 0
estado = 1
autor = request.user.id
herramienta_n = Herramienta.objects.create(id_anterior=id_anterior, nombre=nombre,
sistemaOperativo=sistemaOperativo, plataforma=plataforma,
fichaTecnica=fichaTecnica, licencia=licencia, estado=estado,
revisor1=revisor1, revisor2=revisor2, autor=autor,
descripcion=descripcion, urlReferencia=urlReferencia, logo=logo)
herramienta_n.save()
messages.success(request, 'Se ha editado con éxito la herramienta '+
herramienta.nombre+', los cambios serán publicados hasta terminar el proceso de vigía',
extra_tags='alert alert-success')
return render(request, 'herramienta_detail.html',{'herramienta': herramienta})
else:
form = HerramientaUpdateForm(instance=herramienta)
return render(request, 'herramienta_update.html', {'form': form, 'id':pk})
def herramienta_detail(request, pk):
herramienta = Herramienta.objects.get(id=pk)
if request.user.is_authenticated():
if request.method == 'POST' and request.user.perfil.role == 1:
herramienta.delete()
messages.success(request, 'Ha eliminado con éxito a ' + herramienta.nombre,
extra_tags='alert alert-success')
return redirect(reverse('catalogo:index'))
elif herramienta.estado == 3:
context = {'herramienta': herramienta}
return render(request, 'herramienta_detail.html', context)
elif herramienta.estado == 4:
temp = list(Herramienta.objects.all().filter(id_anterior=pk).exclude(estado=5))
herramienta_old = temp.pop()
autor = Perfil.objects.get(id=herramienta_old.autor)
estado = 'en revisión' if herramienta_old.estado == 1 else 'pendiente a publicar se'
msg = 'Esta es una versión que será modificada, ' + autor.user.first_name +\
' ' + autor.user.last_name + ' la ha modificado y está ' + estado +\
'. por tal motivo no es posible crear ediciones'
context = {'herramienta': herramienta, 'herramienta_old': herramienta_old}
messages.warning(request,msg, extra_tags='alert alert-warning')
return render(request, 'herramienta_detail.html', context)
elif herramienta.estado == 1 or herramienta.estado == 2:
if herramienta.id_anterior == 0:
context = {'herramienta': herramienta}
return render(request, 'herramienta_detail.html', context)
else:
try:
herramienta_old = Herramienta.objects.get(id=herramienta.id_anterior, estado=4)
autor = Perfil.objects.get(id=herramienta.autor)
context = {'herramienta': herramienta, 'herramienta_old': herramienta_old}
msg = 'Esta modificación fue realizada por: ' + autor.user.first_name + ' ' + autor.user.last_name
if not herramienta.revisor1 == 0 and not herramienta.revisor2 == 0:
revisor1 = Perfil.objects.get(id=herramienta.revisor1)
revisor2 = Perfil.objects.get(id=herramienta.revisor2)
msg = msg + ' y los revisores fueron: ' + revisor1.user.first_name + ' ' +\
revisor1.user.last_name + ' y ' + revisor2.user.first_name \
+ ' ' + revisor2.user.last_name
messages.warning(request, msg, extra_tags='alert alert-warning')
return render(request, 'herramienta_detail.html', context)
except Herramienta.DoesNotExist:
print("Base de datos inconsistente")
except Herramienta.MultipleObjectsReturned:
print("Base de datos inconsistente")
else:
if herramienta.estado == 3:
context = {'herramienta': herramienta}
return render(request, 'herramienta_detail.html', context)
else:
if herramienta.estado == 3 or herramienta.estado == 4:
context = {'herramienta': herramienta}
return render(request, 'herramienta_detail.html', context)
def herramientas_vigia(request):
if request.user.is_authenticated():
herramientas_r = Herramienta.objects.all().filter(estado=1).exclude(autor=request.user.id). \
exclude(revisor1=request.user.id)
herramientas_p = Herramienta.objects.all().filter(estado=2)
context = {'herramientas_r': herramientas_r, 'herramientas_p': herramientas_p}
return render(request, 'vigia.html', context)
else:
herramientas = Herramienta.objects.all().filter(estado=3)
context = {'herramientas': herramientas}
return render(request, 'index.html', context)
def herramienta_revisar(request, pk):
if request.user.is_authenticated():
herramienta = Herramienta.objects.get(id=pk)
if herramienta.revisor1 == 0:
herramienta.revisor1 = request.user.id
else:
herramienta.revisor2 = request.user.id
herramienta.estado = 2
herramienta.save()
messages.success(request, 'Ha revisado con éxito a '+herramienta.nombre, extra_tags='alert alert-success')
return redirect(reverse('catalogo:vigia'))
else:
herramientas = Herramienta.objects.all().filter(estado=3)
context = {'herramientas': herramientas}
return render(request, 'index.html', context)
def herramienta_publicar(request,pk):
if request.user.is_authenticated():
herramienta = Herramienta.objects.get(id=pk)
herramienta.estado = 3
messages.success(request, 'Ha sido publicado con éxito a '+herramienta.nombre, extra_tags='alert alert-success')
if not herramienta.id_anterior == 0:
herramienta_delete = Herramienta.objects.get(id=herramienta.id_anterior)
herramienta_delete.estado = 5
herramienta_delete.save()
herramienta.id_anterior = 0
herramienta.save()
return redirect(reverse('catalogo:vigia'))
else:
herramientas = Herramienta.objects.all().filter(estado=3)
context = {'herramientas': herramientas}
return render(request, 'index.html', context)
#Manejo de cuentas de usuario
def usuario_create(request):
if request.method == 'POST':
form = UserForm(request.POST, request.FILES)
if form.is_valid():
cleaned_data = form.cleaned_data
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
password = cleaned_data.get('password')
email = cleaned_data.get('email')
roles = cleaned_data.get('roles')
user_model = User.objects.create_user(username=username, password=password)
user_model.first_name = first_name
user_model.last_name = last_name
user_model.email = email
user_model.save()
user = User.objects.get(username=username)
profile_model = Perfil.objects.get(user_id=user.id)
profile_model.role = roles[0]
fotoSubio = True if 'foto' in request.FILES else False
if fotoSubio:
myfile = request.FILES['foto']
chu = myfile.chunks()
st = ''
file = str(st)
for chunk in chu:
a = chunk
file = file + a
url = guardarDarUrl(file, myfile.name)
profile_model.fotoUrl = url
profile_model.save()
return HttpResponseRedirect(reverse('catalogo:users_list'))
else:
form = UserForm()
return render(request, 'user_form.html', {'form': form})
def user_update(request, pk):
user_model = User.objects.get(id=pk)
if request.method == 'POST':
form = UserUpdateForm(request.POST, request.FILES, instance=user_model)
if form.is_valid():
cleaned_data = form.cleaned_data
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
roles = cleaned_data.get('roles')
user_model.first_name = first_name
user_model.last_name = last_name
user_model.email = email
if user_model.username != username:
user_model.username = username
user_model.save()
profile_model = Perfil.objects.get(user_id=user_model.id)
fotoSubio = True if 'foto' in request.FILES else False
if fotoSubio:
myfile = request.FILES['foto']
chu = myfile.chunks()
st = ''
file = str(st)
for chunk in chu:
a = chunk
file = file + a
url = guardarDarUrl(file, myfile.name)
profile_model.fotoUrl = url
profile_model.role = roles[0]
profile_model.save()
return HttpResponseRedirect(reverse('catalogo:users_list'))
else:
form = UserUpdateForm(instance=user_model)
return render(request, 'user_update.html', {'form': form})
def user_updateGTI(request, pk):
user_model = User.objects.get(id=pk)
if request.method == 'POST':
form = UserUpdateGTIForm(request.POST, request.FILES, instance=user_model)
if form.is_valid():
cleaned_data = form.cleaned_data
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
roles = cleaned_data.get('roles')
user_model.first_name = first_name
user_model.last_name = last_name
user_model.email = email
if user_model.username != username:
user_model.username = username
user_model.save()
profile_model = Perfil.objects.get(user_id=user_model.id)
fotoSubio = True if 'foto' in request.FILES else False
if fotoSubio:
myfile = request.FILES['foto']
chu = myfile.chunks()
st = ''
file = str(st)
for chunk in chu:
a = chunk
file = file + a
url = guardarDarUrl(file, myfile.name)
profile_model.fotoUrl = url
profile_model.role = roles[0]
profile_model.save()
return HttpResponseRedirect(reverse('catalogo:index'))
else:
form = UserUpdateGTIForm(instance=user_model)
return render(request, 'user_update.html', {'form': form})
def user_change_password(request):
if request.user.is_authenticated():
if request.method == 'POST':
form = UserChangePassword(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Su contraseña fue exitosamente cambiada!', extra_tags='alert alert-success')
return HttpResponseRedirect(reverse('catalogo:index'))
else:
form = UserChangePassword(request.user)
return render(request, 'user_change_password.html', {'form': form})
else:
return HttpResponseRedirect(reverse('catalogo:login'))
def users_list(request):
if request.user.is_authenticated() and request.user.perfil.role == 1:
usuarios = User.objects.all()
context = {'usuarios': usuarios}
return render(request, 'user_list_detail.html', context)
# guardar en AZURE
def guardarDarUrl(file, filemane):
baseUrl = 'https://catalogo2018storage.blob.core.windows.net/pictures/'
sas = '?sv=2017-07-29&ss=bf&srt=co&sp=rwdlac&se=2018-05-19T00:27:02Z&st=2018-04-01T16:27:02Z&spr=https,http&sig=iJy3%2BhD2JhuYvXTRfsXT2qTM2p08tfhNGAfb%2BG5YR6w%3D'
# Create the BlockBlockService that is used to call the Blob service for the storage account
block_blob_service = BlockBlobService(account_name=config('ACCOUNT_NAME', default=''),
account_key=config('ACCOUNT_KEY', default=''))
# Upload the created file, use local_file_name for the blob name
block_blob_service.create_blob_from_bytes('pictures', filemane, file)
return baseUrl+filemane+sas |
#!/usr/bin/env python3
from flask import Flask, send_from_directory
app = Flask(__name__)
@app.route("/video", methods=["GET"])
def get_movie():
return send_from_directory(
"/",
"video.mp4",
conditional=True,
)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
from appuser.models import Post
from django.db import models
from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView, UpdateView
from .models import Post
from .forms import PostForm , EditForm
# Create your views here.
#def home(request):
#return render(request, 'home.html',{})
class HomeView(ListView):
model = Post
template_name = 'home.html'
class ArticleDetailView(DetailView):
model = Post
template_name = 'article_details.html'
class AddPostView(CreateView):
model = Post
form_class = PostForm
template_name = 'add_post.html'
#fields = '__all__'
#fields = ('title', 'body')
class UpdatePostView(UpdateView):
model = Post
form_class = EditForm
template_name = 'update_post.html'
#fields = ['title', 'title_tag','body']
|
#MenuTitle: Batch Generate Fonts
# -*- coding: utf-8 -*-
__doc__="""
Batch Generate Fonts.
"""
from GlyphsApp import OTF, TTF, WOFF, WOFF2, EOT, UFO
fileFolder = "~/Desktop/files"
otf_path = "~/Desktop/export"
ttf_path = "~/Desktop/export"
ufo_path = "~/Desktop/export"
web_path = "~/Desktop/export"
OTF_AutoHint = True
TTF_AutoHint = True
RemoveOverlap = True
UseSubroutines = True
UseProductionNames = True
Web_OutlineFormat = TTF
import os
fileFolder = os.path.expanduser(fileFolder)
fileNames = os.listdir(fileFolder)
for fileName in fileNames:
if os.path.splitext(fileName)[1] == ".glyphs":
font = GSFont(os.path.join(fileFolder, fileName))
print font.familyName
for instance in font.instances:
print "== Exporting OTF =="
print instance.generate(Format=OTF, FontPath=os.path.expanduser(otf_path), AutoHint=OTF_AutoHint, RemoveOverlap=RemoveOverlap, UseSubroutines=UseSubroutines, UseProductionNames=UseProductionNames)
print
for instance in font.instances:
print "== Exporting TTF =="
print instance.generate(Format=TTF, FontPath=os.path.expanduser(ttf_path), AutoHint=TTF_AutoHint, RemoveOverlap=RemoveOverlap, UseProductionNames=UseProductionNames)
print
for instance in font.instances:
print "== Exporting Web =="
print instance.generate(Format=Web_OutlineFormat, FontPath=os.path.expanduser(web_path), AutoHint=TTF_AutoHint, RemoveOverlap=RemoveOverlap, UseSubroutines=UseSubroutines, UseProductionNames=UseProductionNames, Containers=[WOFF, WOFF2, EOT])
print
for instance in font.instances:
print "== Exporting UFO =="
print instance.generate(Format=UFO, FontPath=os.path.expanduser(ufo_path), UseProductionNames=UseProductionNames)
print |
#!/usr/local/bin/python
import json
import os
import sys
import traceback
import urllib2
import Config
def makeOpener():
manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, *Config.ADMIN)
handler = urllib2.HTTPBasicAuthHandler(manager)
return urllib2.build_opener(handler)
_index = 0
def readUrl(url):
try:
global _index
_index += 1
sep = '&' if '?' in url else '?'
url = '%s%s%d' % (url, sep, _index)
return makeOpener().open(url).read()
except:
print "Couldn't open URL", url
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return None
def replaceAtomic(filename, value):
tmpname = filename + '.tmp'
try:
f = open(tmpname, 'w')
f.write(value)
f.close()
except:
print "Couldn't write to", tmpname, value
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return False
try:
os.rename(tmpname, filename)
Config.log('Wrote ' + filename)
return True
except:
print "Couldn't rename", tmpname, "to", filename
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return False
def replaceJson(filename, **args):
return replaceAtomic(filename, json.dumps(args))
def readFile(f):
try:
return open(f).read()
except:
print "Couldn't open file", f
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return None
|
""" Given the root node of a binary search tree,
return the sum of values of all nodes with value between L and R (inclusive).
The binary search tree is guaranteed to have unique values.
Input: root = [10,5,15,3,7,null,18], L = 7, R = 15
Output: 32
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
def dfs(node):
if node:
if L <= node.val <= R:
self.sum += node.val
if node.val < R:
dfs(node.right)
if node.val > L:
dfs(node.left)
self.sum = 0
dfs(root)
return self.sum
if __name__ == "__main__":
my_solution = Solution()
print(my_solution.rangeSumBST(root = [10,5,15,3,7,None,18], L = 7, R = 15))
#print(sum([1,1,2,3]))
|
import tkinter as tk
import pyglet
window = tk.Tk()
window.title('卷积实现过程')
window.geometry('900x600')
txt = tk.StringVar()
txt.set('请选择连续信号或者是离散信号')
Bar = tk.Label(window, textvariable=txt, width=50, height=2)
Bar.place(x=300, y=10)
Barflag = True
def d_app(): # 离散信号
global d_app1
global d_app2
global d_app3
global d_app4
global d_app5
d_app1 = tk.Button(window, text='一', width=5, height=2, command=d_a1)
d_app1.place(x=120, y=135)
d_app2 = tk.Button(window, text='二', width=5, height=2, command=d_a2)
d_app2.place(x=120, y=195)
d_app3 = tk.Button(window, text='三', width=5, height=2, command=d_a3)
d_app3.place(x=120, y=255)
d_app4 = tk.Button(window, text='四', width=5, height=2, command=d_a4)
d_app4.place(x=120, y=315)
d_app5 = tk.Button(window, text='五', width=5, height=2, command=d_a5)
d_app5.place(x=120, y=375)
def d_a1(): # gif动画
ag_file = "abc.gif"
animation = pyglet.resource.animation(ag_file)
sprite = pyglet.sprite.Sprite(animation)
win = pyglet.window.Window(width=sprite.width, height=sprite.height)
green = 0, 1, 0, 1
pyglet.gl.glClearColor(*green)
@win.event
def on_draw():
win.clear()
sprite.draw()
pyglet.app.run()
def d_a2_1():
global image_file
Demonstration = tk.Canvas(window, height='550', width='750')
image_file = tk.PhotoImage(file="abc.gif")
Demonstration.create_image(0, 0, anchor="nw", image=image_file)
Demonstration.place(x=200, y=50)
def d_a2():
global Demonstration
global image_file
Demonstration.destroy()
d_a2_1()
def d_a3():
pass
def d_a4():
pass
def d_a5():
pass
def c_app(): # 连续信号
d_app1.destroy()
d_app2.destroy()
d_app3.destroy()
d_app4.destroy()
d_app5.destroy()
c_app1 = tk.Button(window, text='Ⅰ', width=5, height=2, command=c_a1)
c_app1.place(x=120, y=135)
c_app2 = tk.Button(window, text='Ⅱ', width=5, height=2, command=c_a2)
c_app2.place(x=120, y=195)
c_app3 = tk.Button(window, text='Ⅲ', width=5, height=2, command=c_a3)
c_app3.place(x=120, y=255)
c_app4 = tk.Button(window, text='Ⅳ', width=5, height=2, command=c_a4)
c_app4.place(x=120, y=315)
c_app5 = tk.Button(window, text='Ⅴ', width=5, height=2, command=c_a5)
c_app5.place(x=120, y=375)
def c_a1(): # gif动画
ag_file = "a.gif"
animation = pyglet.resource.animation(ag_file)
sprite = pyglet.sprite.Sprite(animation)
win = pyglet.window.Window(width=sprite.width, height=sprite.height)
green = 0, 1, 0, 1
pyglet.gl.glClearColor(*green)
@win.event
def on_draw():
win.clear()
sprite.draw()
pyglet.app.run()
def c_a2():
pass
def c_a3():
pass
def c_a4():
pass
def c_a5():
pass
def Discrete_app(): # 离散信号
global image_file
Demonstration = tk.Canvas(window, height='550', width='750')
image_file = tk.PhotoImage(file="b.gif")
Demonstration.create_image(0, 0, anchor="nw", image=image_file)
Demonstration.place(x=200, y=50)
d_app()
exec(open("hello.py").read())
def Continuous_app(): # 连续信号
global image_file
Demonstration = tk.Canvas(window, height='550', width='750')
image_file = tk.PhotoImage(file="a.gif")
Demonstration.create_image(0, 0, anchor="nw", image=image_file)
Demonstration.place(x=200, y=50)
c_app()
exec(open("hello.py").read())
def fc():
global Barflag
if Barflag == False:
Barflag = True
txt.set('连续信号')
Demonstration.destroy()
Continuous_app()
def fd():
global Barflag
global Demonstration
if Barflag == True:
Barflag = False
txt.set('离散信号')
Demonstration.destroy()
Discrete_app()
Continuous = tk.Button(window, text='连续信号', width=10, height=3, command=fc)
Continuous.place(x=5, y=300)
Discrete = tk.Button(window, text='离散信号', width=10, height=3, command=fd)
Discrete.place(x=5, y=200)
Demonstration = tk.Canvas(window, height='550', width='750')
image_file = tk.PhotoImage(file="formulas.gif")
image = Demonstration.create_image(0, 0, anchor="nw", image=image_file)
Demonstration.place(x=200, y=50)
window.mainloop()
|
def example_sort(arr, example_arr):
keys = {k: i for i, k in enumerate(example_arr)}
return sorted(arr, key=lambda a: keys[a])
|
def main():
vDict = {}
with open('test.txt') as f:
for line in f:
tempList = line.split('\t')
vDict[int(tempList[0])] = []
for v in tempList[1:]: # use [1:-1] for actual graph, [1:] for test
tempstr = v.split(',')
vDict[int(tempList[0])].append((int(tempstr[0]), int(tempstr[1])))
start = int(input('enter start vertex: '))
end = int(input('enter end vertex: '))
print('shortest path length = {}'.format(dijkstra(vDict, start, end)))
def dijkstra(vDict, start, end):
"""
dijkstra(vDict, start, end):
Function to return shortest path length from start to end vertices using dijkstra's algorithm,
Given directed, weighted graph vDict = {all v: [all (u, weight) for u connected to v]}.
"""
dist = {start: 0, 'INITIAL': 9999999999}
seen = {start}
while(end not in seen):
mintail = 'INITIAL'
minweight = dist[mintail]
for tail in seen:
for head, weight in vDict[tail]:
if head not in seen and weight + dist[tail] < minweight + dist[mintail]:
minweight = weight
mintail = tail
minhead = head
if minweight == 9999999999:
return 'NO PATH FOUND'
dist[minhead] = minweight + dist[mintail]
seen.add(minhead)
return dist[end]
main() |
#!/usr/bin/python
import sys
import time
import struct
import re
import numpy as np
import katcp_wrapper
import katadc
import pyqtgraph as pg
# boffile='katadc_zdok0_snap_2016_Mar_10_2010.bof.gz'
boffile='katadc_zdok0_snap_2017_Oct_23_1613.bof.gz'
# boffile='katadc_zdok1_snap_2016_Aug_18_1351.bof.gz'
# boffile='katadc_zdok1_snap_2018_Feb_08_1443.bof.gz'
# boffile='adc5g_zdok0_snap_2016_Mar_11_1753.bof.gz'
# roach = 'r1510'
# roach = 'r1511'
# roach = 'r1807'
roach = '10.32.127.33'
katcp_port = 7147
zdok = 0
m = re.search('zdok(\d)', boffile)
if m:
zdok = int(m.group(1))
def katadc_init(fpga):
addr = [0x0000, 0x0001, 0x0002, 0x0003, 0x0009, 0x000A, 0x000B, 0x000E, 0x000F]
# val = [0x7FFF, 0xBAFF, 0x007F, 0x807F, 0x03FF, 0x007F, 0x807F, 0x00FF, 0x007F]
val = [0x7FFF, 0xB2FF, 0x007F, 0x807F, 0x03FF, 0x007F, 0x807F, 0x00FF, 0x007F] # 300 MHz
#if interleaved: val[4] = 0x23FF # Uncomment this line for interleaved mode
for i in range(len(addr)):
print('Setting ADC register %04Xh to 0x%04X' % (addr[i], val[i]))
# Program both ZDOKs (this could be made smarter if needed).
katadc.spi_write_register(fpga, 0, addr[i], val[i])
katadc.spi_write_register(fpga, 1, addr[i], val[i])
print('Connecting to server %s on port %i ... ' % (roach, katcp_port)),
fpga = katcp_wrapper.FpgaClient(roach, katcp_port, timeout=10)
time.sleep(0.1)
if fpga.is_connected():
print('ok')
print('-' * 20)
else:
print('ERROR connecting to server %s on port %i.\n' % (roach,katcp_port))
fpga.stop()
exit()
katadc_init(fpga)
if len(sys.argv) < 2 or sys.argv[1] != '-s':
print('Programming %s ...' % boffile),
fpga.progdev(boffile)
print('done')
# Initialize RF frontend
# for inp in ('I', 'Q'):
# rf = katadc.rf_fe_get(fpga, zdok, inp)
# if not rf['enabled']:
# print('Enable gain in zdok%d %s to %d' % (zdok, inp, 0))
# katadc.rf_fe_set(fpga, zdok, inp, 0)
# else:
# print('Already enabled:'),
# print(rf)
snap = fpga.snapshot_get('pol0', man_trig=True, man_valid=True)
pol0 = struct.unpack('%db' % snap['length'], snap['data'])
snap = fpga.snapshot_get('pol1', man_trig=True, man_valid=True)
pol1 = struct.unpack('%db' % snap['length'], snap['data'])
win = pg.GraphicsWindow('ADC SNAP')
p0_curve = win.addPlot(title='pol0 curve', row=0, col=0)
p0_curve.plot(pol0[0:1024])
y, x = np.histogram(pol0, 100)
p0_hist = win.addPlot(title='pol0 hist', row=1, col=0)
p0_hist.plot(x, y, stepMode=True, fillLevel=0, brush=(0,255,0,150))
p1_curve = win.addPlot(title='pol1 curve', row=0, col=1)
p1_curve.plot(pol1[0:1024])
y, x = np.histogram(pol1, 100)
p1_hist = win.addPlot(title='pol1 hist', row=1, col=1)
p1_hist.plot(x, y, stepMode=True, fillLevel=0, brush=(0,255,0,150))
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
if sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):
pg.QtGui.QApplication.exec_()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.debian import rules as debian_rules
from pants.backend.debian.target_types import DebianPackage
def target_types():
return [DebianPackage]
def rules():
return [*debian_rules.rules()]
|
# -*- coding: utf-8 -*-
import itertools
class Solution:
def checkZeroOnes(self, s: str) -> bool:
zero_group_max_length, one_group_max_length = float("-inf"), float("-inf")
for digit, group in itertools.groupby(s):
if digit == "0":
zero_group_max_length = max(zero_group_max_length, len(list(group)))
elif digit == "1":
one_group_max_length = max(one_group_max_length, len(list(group)))
return zero_group_max_length < one_group_max_length
if __name__ == "__main__":
solution = Solution()
assert solution.checkZeroOnes("1101")
assert not solution.checkZeroOnes("111000")
assert not solution.checkZeroOnes("110100010")
|
__author__ = 'pandazxx'
from utilities import trace
class DummyObject(object):
@trace
def __init__(self, *args, **kwargs):
self.__args = args
self.__kwargs = kwargs
class DecoratorExample(object):
@trace
def __init__(self, name, defval):
self.__name = name
self.__val = defval
@trace
def __get__(self, instance, owner):
return self.__val
@trace
def __set__(self, instance, value):
self.__val = value
class AttrRefBase(object):
class_attr1 = "class_attr1"
class_attr2 = DummyObject('1', '2', first='first', second='second')
decorator_attr1 = DecoratorExample(name='decorator_attr1', defval=10)
@trace
def __new__(cls, *args, **kwargs):
return super(AttrRefBase, cls).__new__(cls, *args, **kwargs)
@trace
def __init__(self):
pass
def main():
print('AttrRefBase.class_attr2: {0}'.format(str(AttrRefBase.class_attr2)))
print('AttrRefBase.__dict__: {0}'.format(str(AttrRefBase.__dict__)))
print('dir(AttrRefBase): {dir}'.format(dir=dir(AttrRefBase)))
print('isinstance(AttrRefBase, type): {0}'.format(isinstance(AttrRefBase, type)))
arb = AttrRefBase()
arb.class_attr1
arb.decorator_attr1
arb.decorator_attr1 = 20
print('arb.class_attr2: {0}'.format(str(arb.class_attr2)))
print('arb.__dict__: {0}'.format(str(arb.__dict__)))
print('dir(arb): {dir}'.format(dir=dir(arb)))
arb.class_attr2 = DummyObject()
print('arb.__dict__: {0}'.format(str(arb.__dict__)))
if __name__ == '__main__':
main() |
#I think this is slower than solve.py, actually
import socket
import time
def cubeEnding(sequence, ending):
i = 0
count = 0
answer = 0
while(count != sequence):
i += 1
cube = int(str(i) + str(ending))
answer = root3rd(cube)
if(answer != -1):
count += 1
print "i= " + str(i)
print "cube= " + str(cube)
print "count= " + str(count)
print "sequence= " + str(sequence)
return answer
def getSequenceNumber(question):
#chop off the begining
question = question[21:]
seqString = ""
i = 0
char = question[i]
while(char.isdigit()):
char = question[i]
if(char.isdigit()):
seqString += char
i += 1
return int(seqString)
def getEnding(question):
#find "digits"
index = question.find("digits ")
index += 7
return int(question[index:index+3])
def root3rd(x):
y, y1 = None, 2
while y!=y1:
y = y1
y3 = y**3
d = (2*y3+x)
y1 = (y*(y3+2*x)+d//2)//d
if y*y*y != x:
return -1
return y
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("nullcon-e1.no-ip.org", 2000))
intro = s.recv(1024);
intro2 = s.recv(79);
for i in range(1, 100):
print "i: " + str(i)
question = s.recv(1024);
if(i > 1):
question += s.recv(1024);
question = question[2:]
print question
sequence = getSequenceNumber(question)
ending = getEnding(question)
print sequence
print ending
answer = cubeEnding(sequence, ending)
print answer
s.sendall(str(answer) + "\n")
|
from django.urls import path, include, re_path
from api.endpoint import history_view
urlpatterns = [
re_path(r'entity', history_view.HistoryView.as_view()),
re_path(r'list', history_view.HistoryListView.as_view()),
]
|
BROKER_URL = "sqla+sqlite:///vpt_tasks.db"
CELERY_ACCEPT_CONTENT = ["pickle"]
CELERY_IGNORE_RESULT = True
CELERY_IMPORTS = ("app.notifications.handlers", ) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import json
class Database:
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
__DATA_FILEPATH = os.path.join(__location__, 'data', 'database.json')
def __init__(self):
self._filepath = Database.__DATA_FILEPATH
with open(self._filepath, 'r') as file:
data = json.load(file)
self._impot_data = data['impot']
@property
def salaire_abattement(self):
return self._impot_data['salaire_abattement']
def irpp_bareme(self, annee):
if isinstance(annee, int):
annee = str(annee)
try:
return self._impot_data['irpp_bareme'][annee]
except KeyError:
return self._impot_data['irpp_bareme']['2021']
def plafond_quotient_familial(self, annee):
if isinstance(annee, int):
annee = str(annee)
try:
return self._impot_data['plafond_quotient_familial'][annee]
except KeyError:
return self._impot_data['plafond_quotient_familial']['2021']
@property
def reduction_dons(self):
return self._impot_data['reduction']['dons']
@property
def reduction_syndicat(self):
return self._impot_data['reduction']['syndicat']
@property
def prelevement_sociaux_taux(self):
return self._impot_data['prelevement_sociaux_taux']
@property
def micro_foncier_taux(self):
return self._impot_data['micro_foncier']['taux']
@property
def micro_foncier_revenu_foncier_plafond(self):
return self._impot_data['micro_foncier']['revenu_foncier_plafond']
|
#Tamishia Ayala
#Date: 01/27/2019
#This program will prompt the user to enter miles and then convert the user input (miles)to kilometers
sMiles = input ('Enter miles: ')
float_KMs = float (sMiles)
def sMilestoKilometers (x):
c = 1.609 * x
return c
Kilom = sMilestoKilometers(float_KMs)
print ('Kilometers are ' + str(Kilom))
|
from Bio import SeqIO
import pandas as pd
dictseq = {}
n = 0
for seq_record in SeqIO.parse("TIR.fasta", "fasta"):
n = n + 1
xid = seq_record.id.split("|")
yid = xid[1]
z = str(seq_record.seq)
m = {yid:z}
dictseq.update(m)
print('运行至第',n,'次,蛋白质ID为',yid)
|
"""
Dieses Programm trainiert das neuronale Netz.
Dafür werden die Daten aus dem "dataset"-Verzeichnis verwendet.
Verwendung: 'python3 train-netzwerk.py'
(am besten zusamen mit 'nice' ausführen, da das Training lange
dauert und sehr rechenintensiv ist)
"""
import sys
import os
import numpy as np
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Dropout, Flatten, Dense, Activation
from keras import callbacks
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
np.random.seed(42)
DEV = False
argvs = sys.argv
argc = len(argvs)
if argc > 1 and (argvs[1] == "--development" or argvs[1] == "-d"):
DEV = True
if DEV:
epochs = 3
else:
epochs = 20
train_data_path = './data/train'
validation_data_path = './data/validation'
"""
Parameters
"""
img_width, img_height = 150, 150
batch_size = 32
samples_per_epoch = 1000
validation_steps = 20
filters1 = 32
filters2 = 64
conv1_size = 3
conv2_size = 2
pool_size = 2
classes_num = 3
lr = 0.0004
model = Sequential()
model.add(Conv2D(filters1, conv1_size, padding="same", input_shape=(img_width, img_height, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(data_format="channels_last", pool_size=(pool_size, pool_size)))
model.add(Conv2D(filters2, conv2_size, padding ="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(data_format="channels_first", pool_size=(pool_size, pool_size)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(classes_num, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=lr),
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
"""
Tensorboard log
"""
log_dir = './tf-log/'
tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0)
cbks = [tb_cb]
model.fit_generator(
train_generator,
steps_per_epoch = samples_per_epoch,
epochs = epochs,
verbose = 1,
workers = 1,
use_multiprocessing = False,
validation_data = validation_generator,
callbacks = cbks,
validation_steps = validation_steps)
target_dir = './models/'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
model.save('./models/model.h5')
model.save_weights('./models/weights.h5')
|
from pycaret.classification import * # Preprocessing, modelling, interpretation, deployment...
import pandas as pd # Basic data manipulation
import dabl as db # Summary plot
from sklearn.model_selection import train_test_split # Data split
from sdv.tabular import CopulaGAN # Synthetic data
from sdv.evaluation import evaluate # Evaluate synthetic data
from btb.tuning import Tunable, GCPTuner # CopulaGAN optimising
from btb.tuning import hyperparams as hp # Set hyperparameters for optimising
import joblib # Saving preparation steps
# Read and output the top 5 rows
original_data = pd.read_csv('KaggleV2-May-2016.csv')
# Split real data into training + test set
#extract features
def extract_features(dataset):
# get month, day name and hour from Start Time after convert
dataset['Appointment_year'] = dataset['AppointmentDay'].dt.year
dataset['Appointment_month'] = dataset['AppointmentDay'].dt.month
dataset['Appointment_day'] = dataset['AppointmentDay'].dt.day
dataset['Appointment_day_name'] = dataset['AppointmentDay'].dt.day_name()
#appointment hour is always 0 so we leave it out
# get month and day name and hour from Start Time after convert
dataset['Register_year'] = dataset['RegisterDay'].dt.year
dataset['Register_month'] = dataset['RegisterDay'].dt.month
dataset['Register_day'] = dataset['RegisterDay'].dt.day
dataset['Register_day_name'] = dataset['RegisterDay'].dt.day_name()
dataset['Register_hour'] = dataset['RegisterDay'].dt.hour
dataset.drop('AppointmentDay', axis=1, inplace=True)
dataset.drop('RegisterDay', axis=1, inplace=True)
def convert_datetime(dataset):
dataset.rename(columns={'Handcap':'Handicap'},inplace=True)
dataset.rename(columns={"ScheduledDay":"RegisterDay"},inplace=True)
dataset['AppointmentDay'] = pd.to_datetime(dataset['AppointmentDay']).dt.tz_localize(None)
dataset['RegisterDay'] = pd.to_datetime(dataset['RegisterDay']).dt.tz_localize(None)
convert_datetime(original_data)
tuner = GCPTuner(Tunable({
'epochs': hp.IntHyperParam(min = 1, max = 2),
'batch_size' : hp.IntHyperParam(min = 1, max = 100),
'embedding_dim' : hp.IntHyperParam(min = 1, max = 100),
'gen' : hp.IntHyperParam(min = 1, max = 1000),
'dim_gen' : hp.IntHyperParam(min = 1, max = 1000)
}))
real = original_data[original_data["No-show"] == "Yes"] # Filter to only those employees that left
## TRAINING LOOP START ##
model = None
# Create the CopulaGAN
model = CopulaGAN(primary_key = "AppointmentID",
batch_size = 100,
epochs = 2)
# Fit the CopulaGAN
print("fit")
model.fit(real)
print("sample")
# Create 40000 rows of data
synth_data = model.sample(40000, max_retries = 300)
# Evaluate the synthetic data against the real data
score = evaluate(synthetic_data = synth_data, real_data = real)
print(score)
model.save('best_copula.pkl')
synth_data.to_csv("synth_data.csv", index = False) |
# Generated by Django 3.1.5 on 2021-01-28 13:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_auto_20210128_1355'),
]
operations = [
migrations.AddField(
model_name='diary',
name='title',
field=models.CharField(max_length=50, null=True, verbose_name='user_id'),
),
]
|
from tkinter import *
from tkinter.messagebox import showinfo
import echec.classe
from echec.classe import Pieces
from PIL import ImageTk, Image
import pickle
from tkinter.messagebox import *
from tkinter.simpledialog import *
class Plateau:
def __init__(self, window, couleur, netConn, username, room):
self.window = window
self.myCouleur = couleur
self.n = netConn
self.playerName = username
self.room = room
self.plateau = Canvas(window, width =400, height =400, bg ='white')
self.quiJoue = "blanc"
self.firstClick = True
self.MortBlanc = {}
self.MortNoir = {}
#=================================================================BLANC===================================================================================
"Tour"
self.tourBA = Pieces.Pieces('tour', 'blanc', 25,25)
self.tourBH = Pieces.Pieces('tour', 'blanc', 375,25)
"Cavalier"
self.cavalierBB = Pieces.Pieces('cavalier','blanc',75,25)
self.cavalierBG = Pieces.Pieces('cavalier','blanc',325,25)
"Fou"
self.fouBC = Pieces.Pieces('fou','blanc',125,25)
self.fouBF = Pieces.Pieces('fou','blanc',275,25)
"Reine/Roi"
self.reineB = Pieces.Pieces('reine','blanc',175,25)
self.roiB = Pieces.Pieces('roi','blanc',225,25)
"Pion"
self.pionBA = Pieces.Pieces('pion','blanc',25,75)
self.pionBB = Pieces.Pieces('pion','blanc',75,75)
self.pionBC = Pieces.Pieces('pion','blanc',125,75)
self.pionBD = Pieces.Pieces('pion','blanc',175,75)
self.pionBE = Pieces.Pieces('pion','blanc',225,75)
self.pionBF = Pieces.Pieces('pion','blanc',275,75)
self.pionBG = Pieces.Pieces('pion','blanc',325,75)
self.pionBH = Pieces.Pieces('pion','blanc',375,75)
#=================================================================NOIR===================================================================================
"Tour"
self.tourNA = Pieces.Pieces('tour','noir', 25,375)
self.tourNH = Pieces.Pieces('tour','noir', 375,375)
"Cavalier"
self.cavalierNB = Pieces.Pieces('cavalier','noir',75,375)
self.cavalierNG = Pieces.Pieces('cavalier','noir',325,375)
"Fou"
self.fouNC = Pieces.Pieces('fou','noir',125,375);
self.fouNF = Pieces.Pieces('fou','noir',275,375)
"Reine/Roi"
self.reineN = Pieces.Pieces('reine','noir',175,375)
self.roiN = Pieces.Pieces('roi','noir',225,375)
"Pion"
self.pionNA = Pieces.Pieces('pion','noir',25,325)
self.pionNB = Pieces.Pieces('pion','noir',75,325)
self.pionNC = Pieces.Pieces('pion','noir',125,325)
self.pionND = Pieces.Pieces('pion','noir',175,325)
self.pionNE = Pieces.Pieces('pion','noir',225,325)
self.pionNF = Pieces.Pieces('pion','noir',275,325)
self.pionNG = Pieces.Pieces('pion','noir',325,325)
self.pionNH = Pieces.Pieces('pion','noir',375,325)
#initialisation du dictionnaire du plateau
self.dicoJeux = {0:self.tourBA,1:self.cavalierBB,2:self.fouBC,3:self.reineB,4:self.roiB,5:self.fouBF,6:self.cavalierBG,7:self.tourBH,
8:self.pionBA,9:self.pionBB,10:self.pionBC,11:self.pionBD,12:self.pionBE,13:self.pionBF,14:self.pionBG,15:self.pionBH,
48:self.pionNA,49:self.pionNB,50:self.pionNC,51:self.pionND,52:self.pionNE,53:self.pionNF,54:self.pionNG,55:self.pionNH,
56:self.tourNA,57:self.cavalierNB,58:self.fouNC,59:self.reineN,60:self.roiN,61:self.fouNF,62:self.cavalierNG,63:self.tourNH}
for i in range(16,48):
self.dicoJeux[i] = ' '
#Liste des differente position avec coordonnées
self.listPosition= []
for y in range(0, 8):
y = y*50+25
for x in range(0, 8):
x = x*50+25
self.listPosition.append([x,y])
self.pieceSelect = None
self.plateau.delete(ALL)
self.plateau.pack()
self.plateau.bind('<Button-1>', self.select)
#Création du plateau
self.creationPlateau(self.dicoJeux)
if self.myCouleur == "noir":
self.recep()
def change(self):
if self.quiJoue == "blanc":
self.quiJoue = "noir"
else:
self.quiJoue = "blanc"
def select(self, event):
if self.quiJoue != self.myCouleur:
showinfo("aled", "c'est pas ton tour")
else:
x = event.x
y = event.y
x=event.x%50
x=(event.x-x)+25
y=event.y%50
y=(event.y-y)+25
listXY = [x,y]
listXYR = [x,y,'red']
listXYG = [x,y,'green']
#Selection de la piece à déplacer
if self.firstClick == True:
self.pieceSelect = self.dicoJeux[self.listPosition.index(listXY)]
if self.pieceSelect == " ":
pass
elif self.pieceSelect.couleur != self.myCouleur:
showinfo("aled", "tu es les "+self.myCouleur)
else:
lastItem = None
preshot = self.pieceSelect.preshot(self.dicoJeux, self.listPosition)
if preshot == None or not(preshot):
self.firstClick = True
showinfo("aled", "Vous ne pouvez pas déplacer cette piece")
else:
self.firstClick = False
for item in preshot:
self.plateau.create_rectangle(item[0]-25,item[1]-25,item[0]+25,item[1]+25,outline=item[2], width="5")
#Selection de la case où déplacer la piece
else:
preshot = self.pieceSelect.preshot(self.dicoJeux, self.listPosition)
if listXYR not in preshot and listXYG not in preshot:
pass
else:
self.pieceSelect.setX(x)
self.pieceSelect.setY(y)
index = self.listPosition.index(listXY)
for key, value in self.dicoJeux.items():
if value == self.pieceSelect:
self.dicoJeux[key] = ' '
elif key == index:
if self.dicoJeux[key] == ' ':
pass
elif self.dicoJeux[key].couleur == "blanc":
self.MortBlanc[tuple([self.dicoJeux[key].positionX,self.dicoJeux[key].positionY])] = self.dicoJeux[key]
elif self.dicoJeux[key].couleur == "noir":
self.MortNoir[tuple([self.dicoJeux[key].positionX,self.dicoJeux[key].positionY])] = self.dicoJeux[key]
self.dicoJeux[key] = self.pieceSelect
self.creationPlateau()
self.firstClick = True
if self.roiB in self.MortBlanc.values():
showinfo("aled", "le roi Blanc est mort")
elif self.roiN in self.MortNoir.values():
showinfo("aled", "le roi Noir est mort")
self.checkPion()
self.change()
self.envoie()
def checkPion(self):
aled = [0,1,2,3,4,5,6,7,63,62,61,60,59,58,57,56]
for i in aled:
if self.dicoJeux[i] != " " and self.dicoJeux[i].type == 'pion':
self.aled = [self.dicoJeux[i].positionX, self.dicoJeux[i].positionY]
self.plateau.destroy()
self.mort = Canvas(self.window, width =400, height =400, bg ='grey')
self.mort.pack()
self.mort.bind('<Button-1>', self.changementPieces)
if self.dicoJeux[i].couleur and self.quiJoue == "blanc":
for value in self.MortBlanc.values():
self.plateau.create_text(
value.positionX ,
value.positionY,
text = value.text,
font = "Arial 20 bold",
fill='green')
break
elif self.dicoJeux[i].couleur and self.quiJoue == "noir":
for value in self.MortNoir.values():
self.plateau.create_text(
value.positionX ,
value.positionY,
text = value.text,
font = "Arial 20 bold",
fill='red')
break
def changementPieces(self, event):
x = event.x
y = event.y
x=event.x%50
x=(event.x-x)+25
y=event.y%50
y=(event.y-y)+25
listXY = tuple([x,y])
if listXY in self.MortBlanc.keys():
self.MortBlanc[listXY].positionX = self.aled[0]
self.MortBlanc[listXY].positionY = self.aled[1]
self.dicoJeux[self.listPosition.index(self.aled)] = self.MortBlanc[listXY]
del self.MortBlanc[listXY]
self.mort.destroy()
self.plateau = Canvas(self.window, width =400, height =400, bg ='white')
self.creationPlateau()
elif listXY in self.MortNoir:
self.MortNoir[listXY].positionX = self.aled[0]
self.MortNoir[listXY].positionY = self.aled[1]
self.dicoJeux[self.listPosition.index(self.aled)] = self.MortNoir[listXY]
del self.MortNoir[listXY]
self.mort.destroy()
self.plateau = Canvas(self.window, width =400, height =400, bg ='white')
self.creationPlateau()
#Fonction pour creer le plateau
def creationPlateau(self, newDico = None):
for l in range(0, 8):
for c in range(0, 8):
if (l+c)%2 == 0:
fill = 'black'
else:
fill = 'white'
self.plateau.create_rectangle(l*50,c*50,l*50+50,c*50+50,fill=fill)
for value in self.dicoJeux.values():
if value == ' ':
pass
elif value.couleur == 'noir':
self.plateau.create_text(
value.positionX ,
value.positionY,
text = value.text,
font = "Arial 20 bold",
fill='red')
elif value.couleur == 'blanc':
self.plateau.create_text(
value.positionX ,
value.positionY,
text = value.text,
font = "Arial 20 bold",
fill='green')
def envoie(self):
data = pickle.dumps(["dataGame", self.room, self.playerName, self.dicoJeux, self.quiJoue, self.MortBlanc, self.MortNoir])
response = self.trySendServer(data)
if response[0] == "500" :
showerror("Une erreur est survenue", response[1])
elif response[0] == "0":
self.recep()
pass
else :
print("SOME ERROR OCCURED send")
def recep(self):
print("aaaaaaaaaaaaaaa")
brut = self.n.client.recv(2048)
print("bbbbbbbbbbbbbbbbb")
try :
data = pickle.loads(brut)
except :
data = brut.decode()
if data[0] == "0" and data[1] == "boardsInfo":
print("================= DANS LE IF")
self.dicoJeux = data[2]
self.quiJoue = data[3]
self.MortBlanc = data[4]
self.MortNoir = data[5]
self.creationPlateau()
pass
print("========================= SORTIE DU IF")
def checkConn(self):
if (self.n.send("isItWorking") is None ) :
return False
else :
return True
def trySendServer(self, data):
if self.checkConn():
aled = self.n.sendBytes(data)
if isinstance(aled, list):
return aled
else :
return aled.split('///')
else:
return ["500","La connexion au serveur a échoué"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.