text stringlengths 8 6.05M |
|---|
import requests
import csv
from sensetw.core import Mapping
def get_mappings(csv_url, agent=None):
if agent is None:
agent = requests
response = agent.get(csv_url)
contents = response.text.split("\n")
reader = csv.DictReader(contents)
return [Mapping(hypothesis_url=row["hypothesis_url"],
hypothesis_title=row["hypothesis_title"],
trello_url=row["trello_url"],
trello_title=row["trello_title"]) for row in reader
if row["hypothesis_url"] != "" and row["trello_url"] != ""]
|
from Graph import *
from GraphWorld import *
vs = [Vertex(c) for c in "abcdefgh"]
g = Graph(vs)
g.add_regular_edges(6)
layout = CircleLayout(g)
# draw the graph
gw = GraphWorld()
gw.show_graph(g, layout)
gw.mainloop()
|
#sierpinski_triangle.py
"""
-------------------------------------------------------------------------------------------
Generates a visualization of the Sierpinski triangle by printing spheres in space in Maya
-------------------------------------------------------------------------------------------
One function named run()
Parameters:
max_Iteration - maximum number of cubes to print
size - the size of our printing canvas
Script by Vlasis Gogousis [vgogousis@gmail.com]
MA3D o 2017
"""
#******** IMPORT MODULES ********#
import maya.cmds as cmds
import numpy as np
from random import randint
#******** RUN SIERPINSKI TRIANGLE VISUALIZATION ********#
def run(max_Iteration, size):
"""
Generates a visualization of the Sierpinski triangle by printing cubes in space in Maya
Parameters:
max_Iteration - maximum number of cubes to print
size - the size of our printing canvas
"""
# Initialize scene
cmds.file(new = True, force = True)
cmds.lookThru( 'top' )
cmds.grid(toggle=False)
# Setup window for progress bar
window = cmds.window()
cmds.columnLayout()
progressControl = cmds.progressBar(maxValue=max_Iteration, width=300)
cmds.showWindow(window)
# Create shader to paint spheres with
shader=cmds.shadingNode("blinn",asShader=True, name = "shader" + str(1))
attr = shader + ".color"
cmds.setAttr (attr, 1,1,1)
# Calculates the midpoint of point1 and point2 and returns result
def midpoint(point1, point2):
return [(point1[0] + point2[0])/2, (point1[1] + point2[1])/2]
# Set starting point for Sierpinski algorithm
curr_point = [0,0]
# Define an equilateral triangle in space
v1 = [0,0]
v2 = [1,0]
v3 = [.5,np.sqrt(3)/2]
# Draw max_Iteration number of spheres
for i in range(max_Iteration):
val = randint(0,2) # Select random vertex of our equilateral triangle
# Calculate midpoint of above vertex and our current point:
if val == 0:
curr_point = midpoint(curr_point, v1)
if val == 1:
curr_point = midpoint(curr_point, v2)
if val == 2:
curr_point = midpoint(curr_point, v3)
# Draw corresponding sphere in space
cmds.polySphere(n="sp"+str(i))
cmds.move(size*curr_point[0], 0, size*curr_point[1])
cmds.scale(0.5,0.5,0.5)
cmds.hyperShade(assign="shader"+str(1))
# Update progress bar and viewport
cmds.progressBar(progressControl, edit=True, step=1)
cmds.viewFit( 'top', all=True )
cmds.dolly( 'top', os=1.5 )
cmds.refresh()
# Update progress bar and viewport
cmds.progressBar(progressControl, edit=True, step=1)
cmds.refresh()
cmds.toggleWindowVisibility(window) |
import os
from subprocess import call
ex = './chemposer'
filename = './static/xyz/tmp/C20.xyz'
call([ex, filename])
|
import os
import z
import fnmatch
import csv
from collections import defaultdict
from sortedcontainers import SortedSet
import buy
import os
from scipy import stats
import numpy as np
#
#x = np.random.random(10)
#y = np.random.random(10)
#slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
#print("r_value: {}".format( r_value))
#print("slope: {}".format( slope))
#
#x = [60 - i for i in range(30)]
#y = [i for i in range(30)]
#slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
#print("r_value: {}".format( r_value))
#print("slope: {}".format( slope))
#
#exit()
# take wlp dumps and capture historical fundamentals
def doit(allowed = []):
print("allowed : {}".format( allowed ))
dates = z.getp("dates")
yearago = dates[-252]
parentdir = "/mnt/c/Users/Zoe/Documents/wlp_dump2"
pattern = "*.txt"
listOfFiles = os.listdir(parentdir)
wlp_dict = defaultdict(dict)
wlp_sorted_mc = defaultdict(SortedSet)
sorted_mc = SortedSet()
newlist = list()
wlp_dict = defaultdict(dict)
wlp_list = defaultdict(dict)
os_change = dict()
wlp_lasts = dict()
latest_mc = dict()
yearagomc = dict()
single = bool(allowed)
for entry in listOfFiles:
if fnmatch.fnmatch(entry, pattern):
path = parentdir + "/" + entry
astock = os.path.basename(os.path.splitext(entry)[0])
if allowed and astock not in allowed:
continue
lastdr = None
lastos = None
lastebit = None
l1 = list()
l2 = list()
l3 = list()
# if astock != "PSA":
# continue
for row in csv.DictReader(open(path)):
mc = float(row['MC'])
dr = float(row['DebtRatio'])
date = row['Date']
out = float(row['out'])
ebit = float(row['ebit'])
if date == yearago:
yearagomc[astock] = mc
# if lastdr != dr:
# lastdr = dr
# l1.append(dr)
if lastos != out:
lastos = out
l2.append(out)
# if lastebit != ebit:
# lastebit = ebit
# l3.append(ebit)
# wlp_dict[astock][date] = mc, dr, out, ebit
# wlp_sorted_mc[date].add((mc, astock))
# wlp_list[astock]['DebtRatio'] = l1
# wlp_list[astock]['OS'] = l2
if single:
print("l2: {}".format( l2))
r_value = "NA"
slope = "NA"
latest_mc[astock] = mc
sorted_mc.add((mc, astock))
# print("l2: {}".format( len(l2)))
# print("ebit: {}".format( ebit))
# print("dr: {}".format( dr))
if len(l2) > 8:
l2 = l2[-8:]
y = [ i for i in range(len(l2)) ]
try:
slope, intercept, r_value, p_value, std_err = stats.linregress(l2, y)
except:
pass
r_value = round(r_value * r_value,2)
slope = round(slope,2)
if single:
print("r_value : {}".format( r_value ))
print("slope : {}".format( slope ))
wlp_lasts[astock] = dr, ebit, r_value, slope
# print("r_value: {}".format( r_value))
# print("slope: {}".format( slope))
z.setp(latest_mc, "latest_mc", True)
if not allowed:
z.setp(yearagomc, "yearagomc", True)
z.setp(wlp_lasts, "wlp_lasts", True)
# z.setp(wlp_list, "wlp_list")
# z.setp(os_change, "os_change")
# z.setp(buy.getSorted("os_sorted"), "os_sorted", True)
# z.setp(wlp_sorted_mc, "wlp_sorted_mc")
# dates = z.getp("dates")
buy.sortedSetToRankDict("latestmc", sorted_mc, reverse=True)
bar = z.getp("latestmc")
print("bar : {}".format( bar ))
# buy.sortedSetToRankDict("1ymc", wlp_sorted_mc[dates[-252]], reverse=True)
# buy.sortedSetToRankDict("2ymc", wlp_sorted_mc[dates[-252*2]], reverse=True)
# buy.sortedSetToRankDict("3ymc", wlp_sorted_mc[dates[-252*3]], reverse=True)
if __name__ == '__main__':
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('helpers', type=str, nargs='?', default = [])
args = parser.parse_args()
stocks = []
if args.helpers:
stocks = [args.helpers.upper()]
start_time = time.time()
doit(stocks)
elapsed_time = time.time() - start_time
print("elapsed_time : {}".format( elapsed_time ))
# wlp_sorted_mc = z.getp("wlp_sorted_mc")
# def sortedSetToRankDict(saveas, sset, reverse=False, printdata = False):
|
### This program is free software; you can redistribute it and/or modify it under
### the terms of the GNU General Public License as published by the Free Software
### Foundation; either version 2 of the License, or (at your option) any later
### version.
### This program is distributed in the hope that it will be useful, but WITHOUT
### ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
### FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
### details.
### You should have received a copy of the GNU General Public License along with
### this program; if not, write to the Free Software Foundation, Inc., 51
### Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
### Inspired by Paul Holzer (Byte, Feb 1986) Most of this code come from the
### Scandroid by Charles O. Hartman.
### Use Nessly's Default (not great), with hints from stress-forcing suffixes,
### a few prefixes, and number of suffixes. Nessly's Default for disyllables is
### more useful if we apply it also before suffixes.
### As I've added suffix and prefix twists to Nessly, I've steadily
### *compromised* Nessly. (Example: 'for EV er' in older version, now 'FOR ev
### er'; it happens that the 3+-syl version of Nessly works for this word while
### the 2-syl version applied after -er is removed doesn't. This in-between
### state should probably be resolved, but resolving it well is not easy.
### Adding 'en' to prefixes fixes 'encourage' but breaks 'entry'. At some point
### the returns from new compromises and special cases diminish; there will
### *always* be an exceptions dictionary.
import string
import re
import imp
import sys
import os
import fnmatch
import our_regex
import variables
def load_dictionary():
import csv
dictionary = {}
with open('scandictionary.txt') as csv_file:
for row in csv.reader(csv_file, delimiter=','):
dictionary[row[0]] = row[1:]
return dictionary
def clean_and_remove_junk(poem,nlp):
tokens = [nlp(item) for item in poem]
alpha = string.ascii_lowercase + string.ascii_uppercase
letters = list(alpha)
# remove everything but letters and spaces and linebreaks:
filtered = []
dash = False
for line in poem:
l = ""
w = ""
for ch in line:
if ch in letters:
w += ch
elif (ch == " ") or (ch == "-"):
if (ch == "-") and (dash == False):
dash = True
l += w
l += " "
w = ""
elif (ch == "-") and (dash == True):
pass # stops the double dash insanity
else:
dash = False
l += w
l += " "
w = ""
l += w
l += '\n'
filtered.append(l)
return filtered
### remove punctuation other than "'":
def strip_punctuation(word):
punct = '!()-[]{}:;"\,<>.?@#$%^&*_~+='
np = ""
p = ""
for char in word:
if char not in punct:
np = np + char
else:
p = p + char
return {'word': np, 'punctuation': p}
### remove letters and keep only numbers representing stress:
def strip_letters(line):
meter = ""
for word in line:
for char in list(word):
if char.isdigit():
meter = meter + char
return meter
################################################################################
### If word, or word less -s/-ed ending, is in dict, return its syls/stress.
### Whenever we accept something from the dictionary, we copy it, so that our
### manipulations for the sake of this line won't change the dictionary for
### others (including other instances of this line).
################################################################################
def dictionary_lookup(word,dictionary):
if str(word) in dictionary:
return dictionary[word][:]
elif len(word) < 5:
return None # e.g., 'bed', is 5 big/small enough?
elif word[-1:] == 's':
try:
syls = dictionary[word[:-1]][:]
if syls[-1].isupper():
syls[-1] += 'S'
else: syls[-1] += 's'
return syls
except KeyError: return None
elif word[-2:] == 'ed':
try:
syls = dictionary[word[:-2]][:]
if syls[-1].isupper(): syls[-1] += 'ED'
else: syls[-1] += 'ed'
return syls
except KeyError:
try:
syls = dictionary[word[:-1]][:]
if syls[-1].isupper(): syls[-1] += 'D'
else: syls[-1] += 'd'
return syls
except KeyError: return None
else:
return None
################################################################################
# out-of-class functions to handle encoding of special-combination characters
def encode(ch):
return chr(ord(ch) & 0x3F)
def decode(ch):
return chr(ord(ch) | 0x40)
# encode [st] and i but not following vowel
def handleCiV(match):
c1 = encode(match.group()[0])
c2 = encode(match.group()[1])
return c1 + c2 + match.group()[2]
# adjusted for third-char test.
def handleCC(match):
ret = encode(match.group()[0]) + encode(match.group()[1])
if len(match.group()) > 2:
ret += match.group()[2]
return ret
def handleVyV(match):
return match.group()[0] + encode(match.group()[1]) + match.group()[2]
def preliminaries(word,nlp):
past_indicator = False
plurality_indicator = False
variables.isPast = False
variables.isPlural = False
variables.forceStress = 0
variables.numSuffixes= 0
variables.syllable_bounds = []
apostrophe = word.find("\'", -2)
if apostrophe != -1:
if word[-1] != '\'' and word[-1] in 'se' and word[-2] in our_regex.SIBILANTS:
variables.syllable_bounds.append(apostrophe)
# cut off ' or 's until last stage
word = word[:apostrophe]
# cut final s/d from plurals/pasts if not syllabic
# defaults used also for suffixes
if re.search(r"[^s]s\b", word): variables.isPlural = True # terminal single s (DUMB!)
if re.search(r"ed\b", word): variables.isPast = True # terminal 'ed'
if variables.isPast or variables.isPlural: word = word[:-1]
# final-syl test turns out to do better work *after* suffices cut off
no_suffix = find_suffix(word)
# if final syllable is l/r+e, reverse letters for processing as syllable
if len(no_suffix) > 3 and our_regex.liquidterm.search(no_suffix):
word = no_suffix[:-2] + no_suffix[-1] + no_suffix[-2]
return word
### Identify any known suffixes, mark off as syllables and possible
### stresses. We identify them and list them backwards so as to "cut off"
### the last first. We consult a list of those that force stress on
### previous syllable.
def find_suffix(word):
resultslist = []
for f in our_regex.suffixes.finditer(word):
resultslist.append((f.group(), f.start()))
if not resultslist:
return word
# make sure *end* of word is in list! otherwise, 'DESP erate'
if resultslist[-1][1] + len(resultslist[-1][0]) < len(word):
return word
resultslist.reverse()
for res in resultslist:
# if no vowel left before, false suffix ('singing')
# n.b.: will choke on 'quest' etc! put in dictionary, I guess
if not re.search('[aeiouy]', word[:res[1]]): break
if res[0] == 'ing' and word[res[1]-1] == word[res[1]-2]:
variables.syllable_bounds.append(res[1] - 1) # freq special case
else: variables.syllable_bounds.append(res[1]) # sorted later
word = word[:res[1]]
variables.numSuffixes += 1
if res[0] in our_regex.STRESSSUFFIX:
variables.forceStress = 0 - len(variables.syllable_bounds)
if res[0] in our_regex.MULTISUFFIX:
# tricky bit! it *happens* that secondary division in all these
# comes after its first character; NOT inevitable! also does not
# allow for 3-syl: 'ically' (which are reliable!)
variables.syllable_bounds.append(res[1]+1)
variables.numSuffixes += 1
return resultslist.pop()[0]
################################################################################
### Encode character-combinations so as to trick DivideCV. The combinations are
### contained in regexes compiled in the class's __init__. Encoding (*not* to
### be confused with Unicode functions!) is done by small functions outside of
### (and preceding) the class. The combinations in Paul Holzer's original code
### have been supplemented and tweaked in various ways. For example, the
### original test for [iy]V is poor; 'avionics' defeats it; so we leave that to
### a new disyllabic-vowel test. The messy encoding-and-sometimes-decoding of
### nonsyllabic final 'e' after a C seems the best that can be done, though I
### hope not.
################################################################################
def special_codes(special_codes_word):
if re.search(r"[^aeiouy]e\b", special_codes_word): # nonsyllabic final e after C
if ((not variables.isPlural or special_codes_word[-2] not in SIBILANTS) \
and (not variables.isPast or special_codes_word[-2] not in 'dt')):
special_codes_word = special_codes_word[:-1] + encode(special_codes_word[-1])
if not re.search(r"[aeiouy]", special_codes_word): # any vowel left??
special_codes_word = special_codes_word[:-1] + 'e' # undo the encoding
special_codes_word = our_regex.CiVcomb.sub(handleCiV, special_codes_word)
special_codes_word = our_regex.CCpair.sub(handleCC, special_codes_word)
special_codes_word = our_regex.VyVcomb.sub(handleVyV, special_codes_word)
return special_codes_word
################################################################################
################################################################################
### Divide the word among C and V groups to fill the variables.syllable_bounds list.
### Here, and here alone, we need to catch e-with-grave-accent to count it
### as not only a vowel but syllabic ('an aged man' vs. 'aged beef'). Other
### special characters might be useful to recognize, but won't make the
### same syllabic difference.
### I made some changes here to deal with None types:
### I am not sure what implications this has,
### All I know is that the script does not break here any more
################################################################################
def divide_cv(word):
result = re.search(our_regex.unicodeVowels, word)
if result != None:
firstvowel = re.search(our_regex.unicodeVowels, word).start()
else:
return word
for v in re.finditer(our_regex.unicodeVowels, word):
lastvowel = v.end() # replaced for each group, last sticks
disyllabicvowels = our_regex.sylvowels.search(v.group())
if disyllabicvowels:
variables.syllable_bounds.append(v.start() + disyllabicvowels.start() + 1)
for cc in re.finditer(our_regex.uniConsonants, word):
if cc.start() < firstvowel or cc.end() >= lastvowel: continue
numcons = len(cc.group())
if numcons < 3: pos = cc.end() - 1 # before single C or betw. 2
elif numcons > 3: pos = cc.end() - 2 # before penult C
else: # 3 consonants, divide 1/2 or 2/1?
cg = cc.group() # our CCC cluster
if cg[-3] == cg[-2] or our_regex.splitLeftPairs.search(cg):
pos = cc.end() - 2 # divide 1/2
else: pos = cc.end() - 1 # divide 2/1
if not word[pos-1].isalpha() and not word[pos].isalpha():
variables.syllable_bounds.append(pos-1)
else: variables.syllable_bounds.append(pos)
return word
################################################################################
def stress(origword):
numsyls = len(variables.syllable_bounds) + 1
if numsyls == 1:
return 1
variables.syllable_bounds.sort() # suffixes may have been marked first
if variables.forceStress: # suffixes like 'tion', 'cious'
return numsyls + variables.forceStress
if numsyls - variables.numSuffixes == 1: # pretty reliable I think
return 1
isprefix = origword[:variables.syllable_bounds[0]] in our_regex.PREFIXES
if numsyls - variables.numSuffixes == 2: # Nessly w/ suffix twist
if isprefix:
return 2
else:
return 1
elif isprefix and (numsyls - variables.numSuffixes == 3):
return 2
else:
# Nessley: 3+ syls, str penult if closed, else antepenult
# syl n is origword[variables.syllable_bounds[n-1]:variables.syllable_bounds[n]-1]; so?
if origword[variables.syllable_bounds[-1] - 1] not in 'aeiouy': # last char penult
retstress = numsyls - 1 # if closed, stress penult
else: retstress = numsyls - 2 # else, antepenult
if variables.numSuffixes == numsyls:
retstress -= 1
return retstress
def calculate_syllables(word,nlp):
if len(word) < 3: return [word.upper()] # 'ax' etc
variables.syllable_bounds = []
wordp = preliminaries(word,nlp)
wordpp = special_codes(wordp)
wordppp = divide_cv(wordp)
stressed = stress(wordppp)
variables.syllable_bounds.insert(0, 0) # ease the calc of syllable indices
variables.syllable_bounds.append(len(word)) # within the word
listOfSyls = []
i = 0
for s in variables.syllable_bounds:
if not s:
continue
i += 1
if i != stressed:
listOfSyls.append(word[variables.syllable_bounds[i-1]:s])
else:
listOfSyls.append(word[variables.syllable_bounds[i-1]:s].upper())
return listOfSyls
################################################################################
### Determine syls/stress in all words in line, store other data too. Divide
### the line into word tokens with spaCy. Look up each in dictionary, and there
### or by calculation in the Syllabizer
################################################################################
def parse_line(line,nlp):
words = []
dictionary = load_dictionary()
if len(line) < 1:
return None
elif len(line) >=1:
word_tokens = [token.string for token in nlp(line)]
word_lower = [x.lower().strip() for x in word_tokens]
### replace calculated words with dictionary words if they exist
dictionary_words = [dictionary_lookup(word,dictionary) for word in word_lower]
calculated_words = [calculate_syllables(word,nlp) for word in word_lower]
for dict_word, calc_word in zip(dictionary_words,calculated_words):
if calc_word[0] == '':
words.append('\n')
elif dict_word == None:
words.append(calc_word)
elif dict_word != None:
words.append(dict_word)
return words
################################################################################
|
__author__ = 'apple'
from osgeo import ogr
import os
# Get the input Layer
inShapefile = "ne1/ne1.shp"
inDriver = ogr.GetDriverByName("ESRI Shapefile")
inDataSource = inDriver.Open(inShapefile, 0)
inLayer = inDataSource.GetLayer()
# Create the output Layer
outShapefile = "ne1/ne1_centroids.shp"
outDriver = ogr.GetDriverByName("ESRI Shapefile")
# Remove output shapefile if it already exists
if os.path.exists(outShapefile):
outDriver.DeleteDataSource(outShapefile)
# Create the output shapefile
outDataSource = outDriver.CreateDataSource(outShapefile)
outLayer = outDataSource.CreateLayer("ne1/ne1", geom_type=ogr.wkbPoint)
# Add input Layer Fields to the output Layer
inLayerDefn = inLayer.GetLayerDefn()
for i in range(0, inLayerDefn.GetFieldCount()):
fieldDefn = inLayerDefn.GetFieldDefn(i)
outLayer.CreateField(fieldDefn)
# Get the output Layer's Feature Definition
outLayerDefn = outLayer.GetLayerDefn()
# Add features to the ouput Layer
for i in range(0, inLayer.GetFeatureCount()):
# Get the input Feature
inFeature = inLayer.GetFeature(i)
# Create output Feature
outFeature = ogr.Feature(outLayerDefn)
# Add field values from input Layer
for i in range(0, outLayerDefn.GetFieldCount()):
outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))
# Set geometry as centroid
geom = inFeature.GetGeometryRef()
centroid = geom.Centroid()
outFeature.SetGeometry(centroid)
# Add new feature to output Layer
outLayer.CreateFeature(outFeature)
# Close DataSources
inDataSource.Destroy()
outDataSource.Destroy() |
#! env python
from boto import ec2
conn = ec2.connect_to_region('us-east-1')
vols = conn.get_all_volumes(filters={'status': 'available'})
for vol in vols:
#print 'checking vol:', vol.id, 'status:', vol.status, 'attachment_id:', vol.attach_data.status
conn.delete_volume(vol.id) |
#Leo Li
#01/11/19
#Dart simulation
#1. The most you can walk is 22 steps. Once you go to twenty, it is highly likely to get a 51% or 52%
#2. Monte Carlo simulations are basically simulations that help people to predict something that involves a lot of randomness in it. It is hard to come up with a probablity when randomness is involved, and instead of teaking an average, the monte carlo simulation can give you a better and more precise prediction.
#3 The output of the simulation multiplied by 4 gives you pi!
import random
a = 0
for i in range(0,10000):
x = random.uniform(-2.0, 2.0)
y = random.uniform(-2.0, 2.0)
if (x**2 + y**2)**0.5 <=2:
a+=1
print(a)
print(a/10000*4)
print(a/10000*100, "%")
|
# Python Coroutines and Tasks.
# Coroutines declared with async/await syntax is the preferred way of writing asyncio applications.
#
# To actually run a coroutine, asyncio provides three main mechanisms:
#
# > The asyncio.run() function to run the top-level entry point “main()” function.
# > Awaiting on a coroutine.
# > The asyncio.create_task() function to run coroutines concurrently as asyncio Tasks.
# Generator-based Coroutines:
#
# Note:
# Support for generator-based coroutines is deprecated and is scheduled for removal in Python 3.10.
# Generator-based coroutines predate async/await syntax. They are Python generators that use yield from expressions to await on Futures and other coroutines.
# Generator-based coroutines should be decorated with @asyncio.coroutine, although this is not enforced.
#
# @asyncio.coroutine:
# Decorator to mark generator-based coroutines.
#
# This decorator enables legacy generator-based coroutines to be compatible with async/await code:
#
@asyncio.coroutine
def old_style_coroutine():
yield from asyncio.sleep(1)
async def main():
await old_style_coroutine()
|
# Generated by Django 2.1.7 on 2019-02-12 14:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tusome', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='author',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='book',
options={'ordering': ['title']},
),
migrations.AlterModelOptions(
name='category',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='publisher',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='sub_category',
options={'ordering': ['name']},
),
migrations.RenameField(
model_name='book',
old_name='Price',
new_name='price',
),
]
|
# -*- coding: utf-8 -*-
from nose.tools import *
from mock import patch
from picrawler.rt_cores import RTCoreRequest
class TestRTCoreRequest(object):
@patch('picrawler.rt_cores.cloud')
def test_request(self, mock_cloud):
req = RTCoreRequest('c1', 10, 1)
mock_cloud.realtime.request.return_value = dict(request_id=10)
req_id = req.request()
mock_cloud.realtime.request.assert_called_once_with('c1', 10, 1)
eq_(10, req_id)
@patch('picrawler.rt_cores.cloud')
def test_release_rt_cores(self, mock_cloud):
req = RTCoreRequest('c1', 10, 1)
mock_cloud.realtime.request.return_value = dict(request_id=10)
req_id = req.request()
req.release(req_id)
mock_cloud.realtime.release.assert_called_once_with(req_id)
@patch('picrawler.rt_cores.cloud')
def test_with_with_statement(self, mock_cloud):
with RTCoreRequest('c1', 10, 1):
mock_cloud.realtime.request.assert_called_once_with('c1', 10, 1)
eq_(0, mock_cloud.realtime.release.call_count)
eq_(1, mock_cloud.realtime.release.call_count)
|
# incomplete solution
n = int(input())
a = [None]*n
for x in range(n):
a[x] = input()
q = int(input())
while q:
q -= 1
r, p = input().split()
r = int(r)
maxmlength = 0
currstring = ""
tempstring = "zzzzzzzzzzz"
for x in range(r):
if(a[x]<tempstring):
tempstring = a[x]
# print(a[:r])
for x in range(r):
temp = 0
for y in range(min(len(a[x]),len(p))):
if(a[x][y] == p[y]):
temp += 1
else:
break
if(temp > maxmlength):
maxmlength = temp
currstring = a[x]
elif(temp == maxmlength):
if(a[x]<currstring):
currstring = a[x]
if(len(currstring) == 0):
print(tempstring)
else:
print(currstring) |
import os
from subprocess import Popen, PIPE
from glob import glob
import sys
import decimal
import ConfigParser
def read_meta():
"""
Opens up any metadata*.txt files in the local directory or specified directory if there is one.
It will search the files for the EPSG code defining the projection as well as the current zone.
This data is saved in a dictionary named coords that is passed to the next functions.
"""
# Change to passed directory, if applicable
try:
if len(sys.argv) == 2:
os.chdir(sys.argv[1])
elif len(sys.argv) > 2:
print "Too many arguments passed."
print "Usage: %s [directory]" % sys.argv[0]
sys.exit(1)
except OSError:
print "Directory not found."
print "Usage: %s [directory]" % sys.argv[0]
sys.exit(1)
# Try opening the file and searching
try:
path = os.path.join(os.getcwd(), "metadata*.txt")
proj_info = dict()
# Try to open the file and read contents
for meta_file in glob(path):
with open(meta_file) as meta:
for line in meta.readlines():
# If the line contains the EPSG Code
if line.startswith("Horizontal Coordinates:"):
proj_info['region'] = line[-8:-3]
proj_info['zone'] = proj_info['region'][-2:] + 'n'
print proj_info
except IOError:
print 'Unable to open file.'
sys.exit(1)
# Make sure that all of the data was read successfully
if len(proj_info) != 2:
print 'Coordinates not found. Verify that the metadata file exists in %s and is complete.' % os.getcwd()
sys.exit(1)
else:
# Convert the DEMs to Daymet's projection
coords = convert_opentopo(proj_info)
# End read_meta()
def convert_opentopo(proj_info):
"""
Creates another .tif file with the name .converted.tif for every .tif file located
in the passed directory.The converted.tif file is supposed to be converted into the Daymet
custom projection. Depends on theread_meta() method executing correctly. It doesn't check
for the converted files before executing. Once the files are generated, script will call
gdalinfo and try to parse the new coordinates from the output. The corner coordinates are
returned in a list. Since everything is related to Daymet, it assumes the data is in the
North and West hemispheres.
"""
# Command string to convert the DEM files from Open Topography to DAYMET's projection
command = ['gdalwarp', '-s_srs', 'EPSG:' + proj_info['region'], '-t_srs',
"+proj=lcc +lat_1=25 +lat_2=60 +lat_0=42.5 +lon_0=-100 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs",
'-r', 'bilinear', '-of', 'GTiff']
# Need to merge subfiles for each DEM output into a single TIFF
# D Infinity Catchment Area
path = os.path.join(os.getcwd(), "scap*.tif")
merge_files(glob(path), 'scap_total.tif')
# D Infinity Flow
path = os.path.join(os.getcwd(), "angp*.tif")
merge_files(glob(path), 'angp_total.tif')
# D Infinity Slope
path = os.path.join(os.getcwd(), "slpp*.tif")
merge_files(glob(path), 'slpp_total.tif')
# Pit Remove
path = os.path.join(os.getcwd(), 'felp*.tif')
merge_files(glob(path), 'felp_total.tif')
# Total Wetness Index
path = os.path.join(os.getcwd(), 'twi*.tif')
merge_files(glob(path), 'twi_total.tif')
# D Infinity
# Need to execute for each total .tif file from OpenTopo
path = os.path.join(os.getcwd(), "*total.tif")
for dem_file in glob(path):
# Create the output file name
dem_output = dem_file[:-4] + '.converted.tif'
print "Creating %s" % dem_output
# Add the filenames to the end of the list
command.append(dem_file)
command.append(dem_output)
# Execute the gdalwarp command
process = Popen(command, stdout=PIPE, shell=False)
# Check for errors
stdout, stderr = process.communicate()
if stderr is not None:
print stderr
sys.exit(1)
# Remove the filenames for next iteration
command.remove(dem_file)
command.remove(dem_output)
# End convert_opentopo() 57 - 68
def merge_files(path, output):
"""
Merges the filenames passed into a single TIFF file with gdalwarp. Assumes that the system running the application has at least 2 GB of available memory.
"""
# Create the command to execute
command = ['gdalwarp', '--config', 'GDAL_CACHEMAX', '2000', '-wm', '2000']
command.extend(path)
command.append(output)
print command
# Execute the command
process = Popen(command, stdout=PIPE, shell=False)
stdout, stderr = process.communicate()
# End merge_files()
read_meta()
|
import torch
import numpy as np
import pickle
sizes = [[1024, 8, 10], [512, 16, 20], [256, 32, 40], [128, 64, 80]]
# sizes = [[8, 10], [16, 20], [32, 40], [64, 80], [128, 160]]
for id in range(len(sizes)):
tmp = np.zeros(sizes[id], dtype=np.int32)
for layer in range(sizes[id][0]):
id_counter = 0
for i in range(sizes[id][1]):
for j in range(sizes[id][2]):
tmp[layer][i][j] = id_counter
id_counter += 2
id_counter += sizes[id][2]*2
tensor = torch.tensor(tmp, dtype=torch.int64)
pickle.dump(tensor, open('tensors/' + str(sizes[id][0]) + 'tensor', 'wb'))
|
import io
import os
import platform
import subprocess
import zipfile
import pandas as pd
import requests
from powersimdata.network.usa_tamu.constants.zones import abv2state
def download_demand_data(
es=None, ta=None, fpath="", sz_path="C:/Program Files/7-Zip/7z.exe"
):
"""Downloads the NREL EFS base demand data for the specified electrification
scenarios and technology advancements.
:param set/list es: The electrification scenarios that will be downloaded. Can
choose any of: *'Reference'*, *'Medium'*, *'High'*, or *'All'*. Defaults to
None.
:param set/list ta: The technology advancements that will be downloaded. Can
choose any of: *'Slow'*, *'Moderate'*, *'Rapid'*, or *'All'*. Defaults to
None.
:param str fpath: The file path to which the NREL EFS data will be downloaded.
:param str sz_path: The file path on Windows machines that points to the 7-Zip tool.
Defaults to *'C:/Program Files/7-Zip/7z.exe'*.
:raises TypeError: if sz_path is not input as a str.
"""
# Account for the immutable default parameters
if es is None:
es = {"All"}
if ta is None:
ta = {"All"}
# Check the inputs
es = _check_electrification_scenarios_for_download(es)
ta = _check_technology_advancements_for_download(ta)
fpath = _check_path(fpath)
if not isinstance(sz_path, str):
raise TypeError("The 7-Zip path must be input as a str.")
# Download each of the specified load profiles
z = {}
for i in es:
z[i] = {}
for j in ta:
# Assign path and file names
zip_name = f"EFSLoadProfile_{i}_{j}.zip"
url = f"https://data.nrel.gov/system/files/126/{zip_name}"
# Store the data in memory to try extracting with Python's zipfile module
z[i][j] = _download_data(zip_name, url, fpath)
# Try to extract the .csv file from the .zip file
zf_works = True
for i in es:
for j in ta:
# Assign path and file names
zip_name = f"EFSLoadProfile_{i}_{j}.zip"
csv_name = f"EFSLoadProfile_{i}_{j}.csv"
# Try to extract the .csv file from the .zip file
zf_works = _extract_data(
z[i][j], zf_works, zip_name, csv_name, fpath, sz_path
)
def download_flexibility_data(
es=None, fpath="", sz_path="C:/Program Files/7-Zip/7z.exe"
):
"""Downloads the NREL EFS flexibility data for the specified electrification
scenarios.
:param set/list es: The electrification scenarios that will be downloaded. Can
choose any of: *'Reference'*, *'Medium'*, *'High'*, or *'All'*. Defaults to
None.
:param str fpath: The file path to which the NREL EFS data will be downloaded.
:param str sz_path: The file path on Windows machines that points to the 7-Zip tool.
Defaults to *'C:/Program Files/7-Zip/7z.exe'*.
:raises TypeError: if sz_path is not input as a str.
"""
# Account for the immutable default parameter
if es is None:
es = {"All"}
# Check the inputs
es = _check_electrification_scenarios_for_download(es)
fpath = _check_path(fpath)
if not isinstance(sz_path, str):
raise TypeError("The 7-Zip path must be input as a str.")
# Download each of the specified load profiles
z = {}
for i in es:
# Assign path and file names
zip_name = f"EFS Flexible Load Profiles - {i} Electrification.zip"
url = f"https://data.nrel.gov/system/files/127/{zip_name}"
# Store the data in memory to try extracting with Python's zipfile module
z[i] = _download_data(zip_name, url, fpath)
# Try to extract the .csv file from the .zip file
zf_works = True
for i in es:
# Assign path and file names
zip_name = f"EFS Flexible Load Profiles - {i} Electrification.zip"
csv_name = f"EFSFlexLoadProfiles_{i}.csv"
# Try to extract the .csv file from the .zip file
zf_works = _extract_data(z[i], zf_works, zip_name, csv_name, fpath, sz_path)
def _check_electrification_scenarios_for_download(es):
"""Checks the electrification scenarios input to :py:func:`download_demand_data`
and :py:func:`download_flexibility_data`.
:param set/list es: The input electrification scenarios that will be checked. Can
be any of: *'Reference'*, *'Medium'*, *'High'*, or *'All'*.
:return: (*set*) -- The formatted set of electrification scenarios.
:raises TypeError: if es is not input as a set or list, or if the components of es
are not input as str.
:raises ValueError: if the components of es are not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(es, (set, list)):
raise TypeError("Electrification scenarios must be input as a set or list.")
# Check that the components of es are str
if not all(isinstance(x, str) for x in es):
raise TypeError("Individual electrification scenarios must be input as a str.")
# Reformat components of es
es = {x.capitalize() for x in es}
if "All" in es:
es = {"Reference", "Medium", "High"}
# Check that the components of es are valid
if not es.issubset({"Reference", "Medium", "High"}):
invalid_es = es - {"Reference", "Medium", "High"}
raise ValueError(f'Invalid electrification scenarios: {", ".join(invalid_es)}')
# Return the reformatted es
return es
def _check_technology_advancements_for_download(ta):
"""Checks the technology advancements input to :py:func:`download_demand_data` and
:py:func:`download_flexibility_data`.
:param set/list ta: The input technology advancements that will be checked. Can be
any of: *'Slow'*, *'Moderate'*, *'Rapid'*, or *'All'*.
:return: (*set*) -- The formatted set of technology advancements.
:raises TypeError: if ta is not input as a set or list, or if the components of ta
are not input as str.
:raises ValueError: if the components of ta are not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(ta, (set, list)):
raise TypeError("Technology advancements must be input as a set or list.")
# Check that the components of ta are str
if not all(isinstance(x, str) for x in ta):
raise TypeError("Individual technology advancements must be input as a str.")
# Reformat components of ta
ta = {x.capitalize() for x in ta}
if "All" in ta:
ta = {"Slow", "Moderate", "Rapid"}
# Check that the components of ta are valid
if not ta.issubset({"Slow", "Moderate", "Rapid"}):
invalid_ta = ta - {"Slow", "Moderate", "Rapid"}
raise ValueError(f'Invalid electrification scenarios: {", ".join(invalid_ta)}')
# Return the reformatted ta
return ta
def _check_path(fpath):
"""Checks the file path input to :py:func:`download_demand_data`,
:py:func:`download_flexibility_data`, :py:func:`partition_demand_by_sector`, and
:py:func:`partition_flexibility_by_sector`.
:param str fpath: The input file path.
:return: (*str*) -- The necessary file path in case it needed to be accessed.
:raises TypeError: if fpath is not input as a str.
"""
# Check that the input is of an appropriate type
if not isinstance(fpath, str):
raise TypeError("The file path must be input as a str.")
# Access the actual path if not already provided
if len(fpath) == 0:
fpath = os.getcwd()
# Return fpath in case it had to be accessed
return fpath
def _download_data(zip_name, url, fpath):
"""Downloads the specified NREL EFS data for :py:func:`download_demand_data` and
:py:func:`download_flexibility_data`.
:param str zip_name: The name of the specified .zip file.
:param str url: The specified URL to access the desired .zip file.
:param str fpath: The input file path.
:return: (*zipfile.ZipFile*) -- The .zip file stored in memory for attempted
extraction using Python's zipfile module.
"""
# Save a local copy of the .zip file for extraction
r = requests.get(url, stream=True)
if r.status_code != requests.codes.ok:
r.raise_for_status()
with open(zip_name, "wb") as f:
f.write(r.content)
print(f"{zip_name} successfully downloaded!")
# Return the data to try extracting with Python's zipfile module
return zipfile.ZipFile(io.BytesIO(r.content))
def _extract_data(z, zf_works, zip_name, csv_name, fpath, sz_path):
"""Extracts the .csv file containing NREL EFS data from the downloaded .zip file.
First attempts extraction using Python's zipfile module, then attempts other
OS-dependent methods, as needed.
:param zipfile.ZipFile z: The .zip file stored in memory for attempted extraction
using Python's zipfile module.
:param bool zf_works: An indicator flag that states whether or not Python's zipfile
module works for extraction. True if Python's zipfile module works, else False.
:param str zip_name: The name of the specified .zip file.
:param str csv_name: The name of the .csv file contained within the .zip file.
:param str fpath: The input file path.
:param str sz_path: The file path on Windows machines that points to the 7-Zip tool.
:return: (*bool*) -- The indicator flag that states whether or not Python's zipfile
module works for extraction. This is returned to prevent checking Python's
zipfile module if it does not work the first time (in the event multiple .zip
files require extraction).
:raises NotImplementedError: if Python's zipfile module cannot extract the .csv
file.
:raises OSError: if an OS other than Windows, macOS, or Linux is identified.
"""
# Assign the path name of the .zip file
zip_path = os.path.join(fpath, zip_name)
try:
if zf_works:
# Try the zipfile module first
z.extractall(fpath)
print(f"{csv_name} successfully extracted!")
else:
# Bypass the zipfile module if it does not work on the first file
raise NotImplementedError
except NotImplementedError:
if zf_works:
print(
f"{zip_name} is compressed using a method that is not supported by the "
+ "zipfile module."
)
print("Trying other extraction methods supported by your OS.")
zf_works = False
# Try other extraction methods depending on operating system
if platform.system() == "Windows":
try:
# Windows Command Line does not support this type of compression
# Try using 7-Zip, if it is installed in the specified location
if not os.path.isfile(sz_path):
print(
"7-Zip is not in this directory or is not installed. "
+ "Extract all data manually (refer to documentation)."
)
return
subprocess.check_call(
f'cmd /c powershell -c & "{sz_path}" x "{zip_path}" -o"{fpath}" -y'
)
os.remove(zip_path)
print(f"{csv_name} successfully extracted!")
except subprocess.CalledProcessError:
print(f"{csv_name} could not be extracted using 7-Zip.")
print("Extract all data manually (refer to documentation).")
return
elif platform.system() in {"Darwin", "Linux"}:
try:
# Try unzipping using the Terminal
subprocess.check_call(["unzip", "-o", zip_path, "-d", fpath])
os.remove(zip_path)
print(f"{csv_name} successfully extracted!")
except subprocess.CalledProcessError:
print(f"{csv_name} could not be extracted using the Terminal.")
print("Extract all data manually (refer to documentation).")
return
else:
raise OSError("This operating system is not supported.")
# Return the flag that indicates whether or not Python's zipfile module works
return zf_works
def partition_demand_by_sector(es, ta, year, sect=None, fpath="", save=False):
"""Creates .csv files for each of the specified sectors given a specified
electrification scenario and technology advancement.
:param str es: An electrification scenario. Can choose one of: *'Reference'*,
*'Medium'*, or *'High'*.
:param str ta: A technology advancement. Can choose one of: *'Slow'*, *'Moderate'*,
or *'Rapid'*.
:param int year: The selected year's worth of demand data. Can choose one of: 2018,
2020, 2024, 2030, 2040, or 2050.
:param set/list sect: The sectors for which .csv files are to be created. Can
choose any of: *'Transportation'*, *'Residential'*, *'Commercial'*,
*'Industrial'*, or *'All'*. Defaults to None.
:param str fpath: The file path where the demand data might be saved and to where
the sectoral data will be saved.
:param bool save: Determines whether or not the .csv file is saved. Defaults to
False. If the file is saved, it is saved to the same location as fpath.
:return: (*dict*) -- A dict of pandas.DataFrame objects that contain demand data
for each state and time step in the specified sectors.
:raises TypeError: if save is not input as a bool.
"""
# Account for the immutable default parameters
if sect is None:
sect = {"All"}
# Check the inputs
es = _check_electrification_scenarios_for_partition(es)
ta = _check_technology_advancements_for_partition(ta)
_check_year(year)
sect = _check_sectors(sect)
fpath = _check_path(fpath)
if not isinstance(save, bool):
raise TypeError("save must be input as a bool.")
# Specify the file name and path
csv_name = f"EFSLoadProfile_{es}_{ta}.csv"
csv_path = os.path.join(fpath, csv_name)
# Download the specified NREL EFS dataset if it is not already downloaded
if not os.path.isfile(csv_path):
download_demand_data({es}, {ta}, fpath)
# Load the data from the downloaded .csv file as a DataFrame
df = pd.read_csv(csv_path)
# Trim the DataFrame for only the specified year
df = df.loc[df["Year"] == year]
# Drop unnecessary "Year", "Electrification", and "TechnologyAdvancement" columns
df.drop(columns=["Year", "Electrification", "TechnologyAdvancement"], inplace=True)
# Sum by sector and state
df = df.groupby(["LocalHourID", "State", "Sector"], as_index=False).sum()
# Split the demand DataFrame by sector
sect_dem = {
i: df[df["Sector"] == i]
.drop(columns=["Sector"])
.groupby(["LocalHourID", "State"], sort=True)
.sum()
.unstack()
for i in sect
}
sect_dem = {
i: sect_dem[i].set_axis(
sect_dem[i].columns.get_level_values("State"), axis="columns"
)
for i in sect
}
# Add extra day's worth of demand to account for leap year
sect_dem = {i: account_for_leap_year(sect_dem[i]) for i in sect}
# Include the appropriate timestamps for the local time (with year=2016)
sect_dem = {
i: sect_dem[i].set_axis(
pd.date_range("2016-01-01", "2017-01-01", freq="H", closed="left"),
axis="index",
)
for i in sect
}
sect_dem = {i: sect_dem[i].rename_axis("Local Time", axis="index") for i in sect}
# Save the sectoral DataFrames to .csv files, if desired
if save:
for i in sect:
new_csv_name = f"{i}_Demand_{es}_{ta}_{year}.csv"
new_csv_path = os.path.join(fpath, new_csv_name)
sect_dem[i].to_csv(new_csv_path)
# Return the dictionary containing the formatted sectoral demand data
return sect_dem
def partition_flexibility_by_sector(
es, ta, flex, year, sect=None, fpath="", save=False
):
"""Creates .csv files for each of the specified sectors given a specified
electrification scenario and technology advancement.
:param str es: An electrification scenario. Can choose one of: *'Reference'*,
*'Medium'*, or *'High'*.
:param str ta: A technology advancement. Can choose one of: *'Slow'*, *'Moderate'*,
or *'Rapid'*.
:param str flex: A flexibility scenario. Can choose one of: *'Base'* or
*'Enhanced'*.
:param int year: The selected year's worth of demand data. Can choose one of: 2018,
2020, 2024, 2030, 2040, or 2050.
:param set/list sect: The sectors for which .csv files are to be created. Can
choose any of: *'Transportation'*, *'Residential'*, *'Commercial'*,
*'Industrial'*, or *'All'*. Defaults to None.
:param str fpath: The file path where the demand data might be saved and to where
the sectoral data will be saved.
:param bool save: Determines whether or not the .csv file is saved. Defaults to
False. If the file is saved, it is saved to the same location as fpath.
:return: (*dict*) -- A dict of pandas.DataFrame objects that contain flexibility
data for each state and time step in the specified sectors.
:raises TypeError: if save is not input as a bool.
"""
# Account for the immutable default parameters
if sect is None:
sect = {"All"}
# Check the inputs
es = _check_electrification_scenarios_for_partition(es)
ta = _check_technology_advancements_for_partition(ta)
flex = _check_flexibility_scenario(flex)
_check_year(year)
sect = _check_sectors(sect)
fpath = _check_path(fpath)
if not isinstance(save, bool):
raise TypeError("save must be input as a bool.")
# Specify the file name and path
csv_name = f"EFSFlexLoadProfiles_{es}.csv"
csv_path = os.path.join(fpath, csv_name)
# Download the specified NREL EFS dataset if it is not already downloaded
if not os.path.isfile(csv_path):
download_flexibility_data({es}, fpath)
# Load the data from the downloaded .csv file as a DataFrame
df = pd.read_csv(csv_path)
# Trim the DataFrame for only the specified year
df = df.loc[df["Year"] == year]
# Trim the DataFrame for only the specified technology advancement
df = df.loc[df["TechnologyAdvancement"] == ta]
# Trim the DataFrame for only the specified flexibility scenario
df = df.loc[df["Flexibility"] == flex]
# Drop unnecessary "Year", "Electrification", and "TechnologyAdvancement" columns
df.drop(
columns=["Year", "Electrification", "TechnologyAdvancement", "Flexibility"],
inplace=True,
)
# Split the flexibility DataFrame by sector
sect_flex = {
i: df[df["Sector"] == i]
.drop(columns=["Sector"])
.groupby(["LocalHourID", "State"], sort=True)
.sum()
.unstack()
for i in sect
}
sect_flex = {
i: sect_flex[i].set_axis(
sect_flex[i].columns.get_level_values("State"), axis="columns"
)
for i in sect
}
# Add extra day's worth of flexibility to account for leap year
sect_flex = {i: account_for_leap_year(sect_flex[i]) for i in sect}
# Include the appropriate timestamps for the local time (with year=2016)
sect_flex = {
i: sect_flex[i].set_axis(
pd.date_range("2016-01-01", "2017-01-01", freq="H", closed="left"),
axis="index",
)
for i in sect
}
sect_flex = {i: sect_flex[i].rename_axis("Local Time", axis="index") for i in sect}
# Save the sectoral DataFrames to .csv files, if desired
if save:
for i in sect:
new_csv_name = f"{i}_{flex}_Flexibility_{es}_{ta}_{year}.csv"
new_csv_path = os.path.join(fpath, new_csv_name)
sect_flex[i].to_csv(new_csv_path)
# Return the dictionary containing the formatted sectoral flexibility data
return sect_flex
def _check_electrification_scenarios_for_partition(es):
"""Checks the electrification scenario input to
:py:func:`partition_demand_by_sector` and
:py:func:`partition_flexibility_by_sector`.
:param str es: The input electrification scenario that will be checked. Can be any
of: *'Reference'*, *'Medium'*, or *'High'*.
:return: (*str*) -- The formatted electrification scenario.
:raises TypeError: if es is not input as a str.
:raises ValueError: if es is not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(es, str):
raise TypeError("Electrification scenario must be input as a str.")
# Reformat es
es = es.capitalize()
# Check that es is valid
if es not in {"Reference", "Medium", "High"}:
raise ValueError(f"{es} is not a valid electrification scenario.")
# Return the reformatted es
return es
def _check_technology_advancements_for_partition(ta):
"""Checks the technology advancment input to :py:func:`partition_demand_by_sector`
and :py:func:`partition_flexibility_by_sector`.
:param str ta: The input technology advancement that will be checked. Can be any of:
*'Slow'*, *'Moderate'*, or *'Rapid'*.
:return: (*str*) -- The formatted technology advancement.
:raises TypeError: if ta is not input as a str.
:raises ValueError: if ta is not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(ta, str):
raise TypeError("Technology advancement must be input as a str.")
# Reformat ta
ta = ta.capitalize()
# Check that ta is valid
if ta not in {"Slow", "Moderate", "Rapid"}:
raise ValueError(f"{ta} is not a valid technology advancement.")
# Return the reformatted ta
return ta
def _check_flexibility_scenario(flex):
"""Checks the flexibility scenario input to
:py:func:`partition_flexibility_by_sector`.
:param str flex: The input flexibility scenario that will be checked. Can be any of:
*'Base'* or *'Enhanced'*.
:return: (*set*) -- The formatted set of flexibility scenarios.
:raises TypeError: if flex is not input as a set or list, or if the components of
flex are not input as str.
:raises ValueError: if the components of flex are not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(flex, str):
raise TypeError("Flexibility scenario must be input as a str.")
# Reformat flex
flex = flex.capitalize()
# Check that flex is valid
if flex not in {"Base", "Enhanced"}:
raise ValueError(f"{flex} is not a valid flexibility scenario.")
# Return the reformatted flex
return flex
def _check_year(year):
"""Checks the year input to :py:func:`partition_demand_by_sector` and
:py:func:`partition_flexibility_by_sector`.
:param int year: The selected year's worth of demand data. Can be any of: 2018,
2020, 2024, 2030, 2040, or 2050.
:raises TypeError: if year is not input as an int.
:raises ValueError: if year is not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(year, int):
raise TypeError("The year must be input as an int.")
# Check that year is valid
if year not in {2018, 2020, 2024, 2030, 2040, 2050}:
raise ValueError(f"{year} is not a valid year.")
def _check_sectors(sect):
"""Checks the sectors input to :py:func:`partition_demand_by_sector` and
:py:func:`partition_flexibility_by_sector`.
:param set/list sect: The input sectors. Can be any of: *'Transportation'*,
*'Residential'*, *'Commercial'*, *'Industrial'*, or *'All'*.
:return: (*set*) -- The formatted set of sectors.
:raises TypeError: if sect is not input as a set or list, or if the components of
sect are not input as str.
:raises ValueError: if the components of sect are not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(sect, (set, list)):
raise TypeError("Sector inputs must be input as a set or list.")
# Check that the components of sect are str
if not all(isinstance(x, str) for x in sect):
raise TypeError("Each individual sector must be input as a str.")
# Reformat components of sect
sect = {x.capitalize() for x in sect}
if "All" in sect:
sect = {"Transportation", "Residential", "Commercial", "Industrial"}
# Check that the components of sect are valid
if not sect.issubset({"Transportation", "Residential", "Commercial", "Industrial"}):
invalid_sect = sect - {
"Transportation",
"Residential",
"Commercial",
"Industrial",
}
raise ValueError(f'Invalid sectors: {", ".join(invalid_sect)}')
# Return the reformatted sect
return sect
def account_for_leap_year(df):
"""Creates an additional day's worth of demand data to account for the additional
day that occurs during leap years. This function takes an 8760-hour DataFrame as
input and returns an 8784-hour DataFrame. To prevent the weekly structure of the
input DataFrame from being disrupted, the additional 24 hours of demand are merely
added to the end of the input 8760-hour DataFrame for each state. The additional 24
hours of demand are set equal to the demand profile for January 2nd because January
2nd and December 31st occur on the same day of the week during a leap year.
:param pandas.DataFrame df: DataFrame of sectoral demand data. Rows are each hour
of the 8760 hours and Columns are the abbreviations of each state of the
contiguous U.S.
:return: (*pandas.DataFrame*) -- Sectoral demand data with 8784 hours and of a
similar form to the input DataFrame.
:raises ValueError: if the dimensions of the input DataFrame do not reflect 8760
hours or 48 states.
"""
# Check the elements of the input DataFrame
if df.index.size != 8760:
raise ValueError("The input DataFrame does not have 8760 hours.")
if list(df.columns.values) != sorted(set(abv2state) - {"AK", "HI"}):
raise ValueError("The input DataFrame does not include all 48 states.")
# Get the demand for each state and each hour on January 2nd
jan2_dem = df.iloc[24:48]
# Append to the input DataFrame to create an 8784-hour profile
new_df = df.append(jan2_dem, ignore_index=True)
# Return the 8784-hour profile
return new_df
|
import json
import io
import sys
import os
import time
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def startup_check(file_path):
if os.path.isfile(file_path) and os.access(file_path, os.W_OK) and os.access(file_path, os.R_OK):
# Check if file exists and if we have read/write access
print("Storage file accessable.")
else:
print("File is missing and/or not read/writable, creating a new file...")
# Seed data for the empty file
start_data = {
"speedtest": {
"date": str(datetime.now()),
"speed": -1
}
}
with io.open(os.path.join(file_path, 'speed_tests.json'), 'a') as db_file:
json.dump(start_data, db_file)
print("Startup checks successful!")
return True
def render_page(url):
options = Options()
options.set_headless(headless=True)
driver = webdriver.Chrome(chrome_options=options)
print("Connecting to URL...")
driver.get(url)
print("Waiting for page contents to load...\n(Should take about 10 seconds)")
time.sleep(10)
print("Web page content retrieval successful!")
ret = driver.page_source
print('Closing browser...')
driver.close()
return ret
def find_elements(page_source):
try:
print('Parsing webpage source...')
soup = BeautifulSoup(page_source, "html.parser")
speed_value = soup.find('div', attrs={'class': 'speed-results-container'})
except:
print('Failed to parse webpage source...')
speed_value = -1
return speed_value
def store_data(data, file_path):
pass
"""
with io.open(file_path, 'w') as db_file:
entry = {
"speedtest"
}
json.dump( db_file)
"""
if __name__ == "__main__":
path = sys.argv[0]
try:
startup_check(path)
except:
print("Failed to complete startup check.")
target_url = "http://fast.com"
page = render_page(target_url)
contents = find_elements(page)
print("Your current download speed is: " + contents.string + "Mbps!")
store_data(contents.string, path) |
import sys
import os
import numpy as np
from PyQt5.QtCore import QTimer
from random import randint
from PyQt5 import QtCore, QtGui, QtWidgets
from xml.dom.minidom import *
from pathlib import Path
import queue
import copy
# block size
X_SIZE = 20
Y_SIZE = 20
# player size
X_PSIZE = 14
Y_PSIZE = 16
# bot size
X_BSIZE = 14
Y_BSIZE = 18
COL_UP = 1
COL_DOWN = 2
COL_RIGHT = 4
COL_LEFT = 8
KEY_UP = 1
KEY_DOWN = 2
KEY_RIGHT = 4
KEY_LEFT = 8
RES_X = 800
RES_Y = 800
class Window(QtWidgets.QWidget):
def __init__(self):
super(Window, self).__init__() # parent object
self.setStyleSheet('QWidget { background: #D5D9A9 }')
self.setFixedSize(RES_X, RES_Y)
self.setWindowTitle('bombelman')
self.show()
self.unlock = 0
self.frameCounter = 0
self.replayMode = 0
self.lf = 0
self.lf2 = 0
self.key = 0
# init timer
self.timer = QTimer()
self.timer.timeout.connect(self.gameLoop)
self.timer.start(30)
#QtWidgets.QMenuBar
#QtWidgets.QAction
self.myQMenuBar = QtWidgets.QMenuBar(self)
self.exitMenu = self.myQMenuBar.addMenu('File')
self.exitAction = QtWidgets.QAction('Exit', self)
self.exitAction.triggered.connect(QtWidgets.qApp.quit)
self.exitMenu.addAction(self.exitAction)
def gameLoop(self):
if self.replayMode == 0:
for bot in bots:
bot.moveBot()
if checkCollision(player.rect, bot.rect):
restart()
if bombCollision(bot):
bots.remove(bot)
elif self.replayMode:
if self.frameCounter == replay.playerList[-1]:
self.replayMode = 0
self.frameCounter = 0
for bot in bots:
bot.moveBot()
for i in range(self.lf2, len(replay.tileList)):
if replay.tileList[i][0] == self.frameCounter:
map.map[replay.tileList[i][1]][replay.tileList[i][2]].id = replay.tileList[i][3]
else:
self.lf2 = i
break
for i in range(self.lf, len(replay.playerList)):
if replay.playerList[i][0] == self.frameCounter:
player.rect.setRect(replay.playerList[i][1], replay.playerList[i][2], X_PSIZE, Y_PSIZE)
else:
self.lf = i
break
if bombCollision(player):
restart()
self.handleKeys()
self.repaint()
self.frameCounter += 1
def home(self):
QtGui.QGraphicsRectItem()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
self.drawMap(qp)
self.drawPlayer(qp)
qp.end()
def drawBlockN(self, qp, x, y):
qp.fillRect(map.map[x][y].rect, QtGui.QColor(255, 200, 50, 160))
def drawBlockD(self, qp, x, y):
qp.fillRect(map.map[x][y].rect, QtGui.QColor(255, 50, 50, 160))
def drawBomb(self, qp, x, y):
qp.fillRect(map.map[x][y].rect, QtGui.QColor(0, 0, 0, 160))
def drawBombExplode(self, qp, x, y):
qp.fillRect(map.map[x][y].rect, QtGui.QColor(230, 20, 0, 160))
def drawPlayer(self, qp):
qp.fillRect(player.rect, QtGui.QColor(90, 90, 90, 160))
def drawBot(self, qp):
for bot in bots:
#bot.moveBot()
qp.fillRect(bot.rect, QtGui.QColor(50, 90, 50, 160))
def drawMap(self, qp):
self.drawBot(qp)
for (x, y), value in np.ndenumerate(map.map):
if value.id == 1: # destructible block
self.drawBlockN(qp, x, y)
elif value.id == 2: # indestructible block
self.drawBlockD(qp, x, y)
elif value.id == 3: # bomb
self.drawBomb(qp, x, y)
elif value.id == 4: # explode effect
self.drawBombExplode(qp, x, y)
def handleKeys(self):
temp_x = player.rect.x()
temp_y = player.rect.y()
# if self.key & KEY_UP | KEY_RIGHT and not(self.unlock & COL_UP | COL_RIGHT):
# player.rect.setRect(player.rect.x() + 2, player.rect.y() - 2, X_PSIZE, Y_PSIZE)
# elif self.key & KEY_DOWN | KEY_RIGHT and not(self.unlock & COL_DOWN | COL_RIGHT):
# player.rect.setRect(player.rect.x() + 2, player.rect.y() + 2, X_PSIZE, Y_PSIZE)
# elif self.key & KEY_DOWN | KEY_LEFT and not(self.unlock & COL_DOWN | COL_LEFT):
# player.rect.setRect(player.rect.x() - 2, player.rect.y() + 2, X_PSIZE, Y_PSIZE)
# elif self.key & KEY_UP | KEY_LEFT and not (self.unlock & COL_DOWN | COL_LEFT):
# player.rect.setRect(player.rect.x() - 2, player.rect.y() - 2, X_PSIZE, Y_PSIZE)
if self.key & KEY_UP:
for i in range(player.grid_x-1, player.grid_x+2):
testCollision(player.rect, map.map[player.grid_x][player.grid_y - 1].rect)
if map.map[player.grid_x][player.grid_y-1].id != 0:
if not player.rect.top() >= map.map[player.grid_x][player.grid_y-1].rect.bottom()+2:
return
player.rect.setRect(player.rect.x(), player.rect.y() - 2, X_PSIZE, Y_PSIZE)
elif self.key & KEY_DOWN:
for i in range(player.grid_x - 1, player.grid_x + 2):
if map.map[player.grid_x][player.grid_y+1].id != 0:
if not player.rect.bottom() <= map.map[player.grid_x][player.grid_y+1].rect.top() - 2:
return
player.rect.setRect(player.rect.x(), player.rect.y() + 2, X_PSIZE, Y_PSIZE)
elif self.key & KEY_RIGHT:
#for i in range(player.grid_y - 1, player.grid_y + 2):
if map.map[player.grid_x+1][player.grid_y].id != 0:
if not player.rect.right() + 2 <= map.map[player.grid_x+1][player.grid_y].rect.left():
return
player.rect.setRect(player.rect.x() + 2, player.rect.y(), X_PSIZE, Y_PSIZE)
elif self.key & KEY_LEFT:
#for i in range(player.grid_y - 1, player.grid_y + 2):
if map.map[player.grid_x - 1][player.grid_y].id != 0:
if not player.rect.left() - 2 >= map.map[player.grid_x-1][player.grid_y].rect.right():
return
player.rect.setRect(player.rect.x() - 2, player.rect.y(), X_PSIZE, Y_PSIZE)
if temp_x != player.rect.x() or temp_y != player.rect.y():
replay.playerNode = replay.doc.createElement('player')
replay.root.appendChild(replay.addPlayer(replay.playerNode, player, self.frameCounter))
def keyReleaseEvent(self, e):
if e.key() == QtCore.Qt.Key_Up:
self.key &= ~KEY_UP
elif e.key() == QtCore.Qt.Key_Down:
self.key &= ~KEY_DOWN
elif e.key() == QtCore.Qt.Key_Left:
self.key &= ~KEY_LEFT
elif e.key() == QtCore.Qt.Key_Right:
self.key &= ~KEY_RIGHT
#self.handleKeys()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Up:
self.key |= KEY_UP
elif e.key() == QtCore.Qt.Key_Down:
self.key |= KEY_DOWN
elif e.key() == QtCore.Qt.Key_Left:
self.key |= KEY_LEFT
elif e.key() == QtCore.Qt.Key_Right:
self.key |= KEY_RIGHT
elif e.key() == QtCore.Qt.Key_Space:
if len(player.bombList) == 0:
player.bombList.append(Bomb(player.grid_x, player.grid_y))
replay.tileNode = replay.doc.createElement('tile')
replay.root.appendChild(replay.addTile(replay.tileNode, player.grid_x, player.grid_y, 3, GUI.frameCounter))
elif e.key() == QtCore.Qt.Key_L:
map.loadMap(None)
elif e.key() == QtCore.Qt.Key_S:
map.saveMap()
elif e.key() == QtCore.Qt.Key_K:
replay.save()
elif e.key() == QtCore.Qt.Key_J:
if(replay.load()):
self.frameCounter = 0
self.replayMode = 1
class QRectColor:
def __init__(self, x, y, id):
self.id = id
self.rect = QtCore.QRect(x * X_SIZE, y * Y_SIZE, X_SIZE, Y_SIZE)
self.color = QtGui.QColor(213, 217, 169, 160)
class Map:
def __init__(self, x, y):
#self.map = np.array([[QRectColor(i, j, 0) for j in range(x)] for i in range(y)])
self.map = [[QRectColor(i, j, 0) for j in range(x)] for i in range(y)]
#self.generateRandomMap()
self.generateMap()
#TODO zrobic tablice z typami i kolorami bloczkow
def getCoordinates(self, x, y):
return self.map[x][y] # zwraca 4 rogi mapy, top bottom left right
def mapBase(self):
#for (x, y), value in np.ndenumerate(self.map):
for i in range(40):
self.map[i][0].id = 2
self.map[0][i].id = 2
self.map[i][39].id = 2
self.map[39][i].id = 2
self.map[2][1].id = 0
self.map[1][1].id = 0
self.map[1][2].id = 0
def generateRandomMap(self):
for i in range(1600):
x = randint(0, 39)
y = randint(0, 39)
self.map[x][y].id = 1
for (x, y), value in np.ndenumerate(self.map):
if x % 4 == 2 and y % 4 == 2:
self.map[x][y].id = 2
# brzydko
self.map[2][1].id = 0
self.map[1][1].id = 0
self.map[1][2].id = 0
def generateMap(self):
for (x, y), value in np.ndenumerate(self.map):
if x % 4 == 2 and y % 4 == 2:
self.map[x][y].id = 2
if x % 2 == 1 and y % 2 == 1:
self.map[x][y].id = 1
self.mapBase()
def saveMap(self):
# generowanie nazwy
path = os.getcwd()
print(path)
for i in range(1, 100): # zmienic
filename = 'map'
filename += str(i)
if os.path.isfile(path + '\\' + filename):
continue
f = open(filename, 'w')
for (x, y), value in np.ndenumerate(self.map):
f.write(str(value.id) + ' ')
if y % 40 == 39:
f.write('\n')
break
print('saved')
def loadMap(self, filename):
if filename == None:
filename = input('filename: ')
y = 0
try:
with open(filename) as f:
for line in f:
x = [int(i) for i in line.split()]
for xx in range(40):
self.map[xx][y].id = x[xx]
y += 1
print('loaded')
except FileNotFoundError:
print("Wrong file or file path")
class Player:
next_id = 0
def __init__(self, x, y):
self.id = Player.next_id
self.aimode = 0
self.rect = QtCore.QRect(x, y, X_PSIZE, Y_PSIZE)
#self.rect += QtCore.QMargins(2, 2, 2, 2)
#self.rect = QRectColor(x, y, 255)
self.collision = 0
self.direction = -1
self.bombList = []
self.q = queue
self.bombRange = 0
@property
def grid_x(self):
return int((self.rect.x() + X_PSIZE/2)/X_SIZE)
@property
def grid_y(self):
return int((self.rect.y() + Y_PSIZE/2)/Y_SIZE)
def move(self, x, y):
x *= 20
y *= 20
speedx = 2
speedy = 2
if x < self.rect.x():
speedx = -2
if y < self.rect.y():
speedy = -2
if self.rect.x() != x:
self.rect.setRect(self.rect.x() + speedx, self.rect.y(), X_BSIZE, Y_BSIZE)
elif self.rect.y() != y:
self.rect.setRect(self.rect.x(), self.rect.y() + speedy, X_BSIZE, Y_BSIZE)
class Bot:
next_id = 0
"""
Bot is very simple and can move only in horizontal axis
:param x: x position of bot
:param y: y position
:param range_a1: specify range along x axis
:param range_a2: specify range along x axis
"""
def __init__(self, x, y, range_a1, range_a2):
self.id = Bot.next_id
Bot.next_id += 1
self.x = x
self.y = y
self.rect = QtCore.QRect(self.x, self.y, X_PSIZE, Y_PSIZE)
self.range_a1 = range_a1
self.range_a2 = range_a2
self.dirx = 1
self.diry = 1
@property
def grid_x(self):
return int((self.rect.x() + X_BSIZE/2)/X_SIZE)
@property
def grid_y(self):
return int((self.rect.y() + Y_BSIZE/2)/Y_SIZE)
def get_x(self):
self.rect.x() + X_BSIZE/2
def get_y(self):
self.rect.y() + Y_BSIZE/2
def move(self, speed, x, y):
self.rect.setRect(self.rect.x() + speed, self.rect.y() + speed, X_BSIZE, Y_BSIZE)
def moveBot(self):
if self.rect.x() == self.range_a2:
self.dirx = -1
elif self.rect.x() == self.range_a1:
self.dirx = 1
self.rect.setRect(self.rect.x() + self.dirx, self.rect.y(), X_BSIZE, Y_BSIZE)
class Bomb:
# pole wspolne dla wszystkich bomb
tick_time = 3000
explode_anim = 500
def __init__(self, x, y):
self.x = x
self.y = y
self.bombRange = 2
self.bomb_timer = QTimer()
self.explode_timer = QTimer()
if map.map[x][y].id != 3:
map.map[x][y].id = 3
self.bomb_timer.timeout.connect(self.bombTrigger)
#self.bomb_timer.setSingleShot(True)
self.bomb_timer.start(Bomb.tick_time)
# self.bombRange = 0
def checkBomb(self, x, y):
if map.map[x][y].id == 0:
map.map[x][y].id = 4
elif map.map[x][y].id == 1:
map.map[x][y].id = 4
replay.tileNode = replay.doc.createElement('tile')
replay.root.appendChild(replay.addTile(replay.tileNode, x, y, 4, GUI.frameCounter))
return True
elif map.map[x][y].id == 2:
return True
return False
def bombTrigger(self): # po 3 sekundach bomba wybocuha, malujemy na czerwono
self.bomb_timer.stop()
self.explode_timer.timeout.connect(self.bombExplode)
self.explode_timer.start(Bomb.explode_anim)
for x1 in range(1, self.bombRange):
if self.checkBomb(self.x - x1, self.y):
break
for x1 in range(1, self.bombRange):
if self.checkBomb(self.x + x1, self.y):
break
for x1 in range(1, self.bombRange):
if self.checkBomb(self.x, self.y + x1):
break
for x1 in range(1, self.bombRange):
if self.checkBomb(self.x, self.y - x1):
break
def bombExplode(self):
map.map[self.x][self.y].id = 0
replay.tileNode = replay.doc.createElement('tile')
replay.root.appendChild(replay.addTile(replay.tileNode, self.x, self.y, 0, GUI.frameCounter))
for x1 in range(self.x - self.bombRange + 1, self.x + self.bombRange):
if map.map[x1][self.y].id == 4:
replay.tileNode = replay.doc.createElement('tile')
replay.root.appendChild(replay.addTile(replay.tileNode, x1, self.y, 0, GUI.frameCounter))
map.map[x1][self.y].id = 0
for y1 in range(self.y - self.bombRange + 1, self.y + self.bombRange):
if map.map[self.x][y1].id == 4:
replay.tileNode = replay.doc.createElement('tile')
replay.root.appendChild(replay.addTile(replay.tileNode, self.x, y1, 0, GUI.frameCounter))
map.map[self.x][y1].id = 0
self.bomb_timer.stop()
player.bombList.pop(0)
class Replay:
def __init__(self):
self.playerList = []
self.tileList = []
self.doc = Document()
self.root = self.doc.createElement('game')
self.root.setAttribute('map', 'map4')
#dodawanie pozycji botow
for bot in bots:
self.botNode = self.doc.createElement('bot')
self.root.appendChild(self.addBot(self.botNode, bot))
self.playerNode = self.doc.createElement('player')
self.tileNode = self.doc.createElement('tile')
def addBot(self, botNode, bot):
botNode.setAttribute('id', str(bot.id)) # str(bot.id)
position = str(bot.x) + ',' + str(bot.y) + ',' + str(bot.range_a1) + ',' + str(bot.range_a2)
botNode.appendChild(self.doc.createTextNode(position))
return botNode
def addPlayer(self, playerNode, player, frame):
playerNode.setAttribute('frame', str(frame))
position = str(player.rect.x()) + ',' + str(player.rect.y())
playerNode.appendChild(self.doc.createTextNode(position))
return playerNode
def addTile(self, tileNode, x, y, value, frame):
tileNode.setAttribute('frame', str(frame))
position = str(x) + ',' + str(y) + ',' + str(value)
tileNode.appendChild(self.doc.createTextNode(position))
return tileNode
def save(self):
self.root.appendChild(self.doc.createTextNode(''))
self.doc.appendChild(self.root)
self.doc.writexml(open('data.xml', 'w'),
indent=" ",
addindent=" ",
newl='\n')
self.doc.unlink()
# self.f.close()
print('saved')
def load(self):
my_file = Path("data.xml")
if my_file.is_file():
dom = parse("data.xml")
else:
print("Replay file doesn't exist")
return False
bots.clear()
root = dom.documentElement
map.loadMap(root.getAttribute('map'))
for i in dom.childNodes[0].getElementsByTagName("bot"):
print(i.getAttribute('id')) # wartosc atrybutu
botParam = i.firstChild.data.split(',') # wartosci ze srodka
bots.append(Bot(int(botParam[0]), int(botParam[1]), int(botParam[2]), int(botParam[3]))) # dodajemy bota
for i in dom.childNodes[0].getElementsByTagName("player"):
playerxy = i.firstChild.data.split(',')
self.playerList.append([int(i.getAttribute('frame')), int(playerxy[0]), int(playerxy[1])])
print(self.playerList)
for i in dom.childNodes[0].getElementsByTagName("tile"):
i.getAttribute('frame')
tilexyv = i.firstChild.data.split(',')
self.tileList.append([int(i.getAttribute('frame')), int(tilexyv[0]), int(tilexyv[1]), int(tilexyv[2])])
print(self.tileList)
return True
class AI:
def __init__(self):
if player.rect.intersects(map.map[player.grid_x][player.grid_y].rect):
player.bombList.append(Bomb(player.grid_x, player.grid_y))
def bombTile(self):
player.bombList.append(Bomb(player.grid_x, player.grid_y))
def testCollision(a, b):
return a.right() >= b.left() and a.left() <= b.right() and a.bottom() >= b.top() and a.top() <= b.bottom();
def testPlayerCollision(a, b):
return a.right() >= b.left()-2 and a.left() <= b.right()+2 and a.bottom() >= b.top()-2 and a.top() <= b.bottom()+2;
def checkCollision(p1, p2):
if not testCollision(p1, p2):
return 0
if p1.bottom() >= p2.top(): #z dolu
return 1
elif p1.right() >= p2.left(): #z prawej
return 2
elif p1.left() >= p2.right(): # z lewej
return 3
elif p1.top() <= p2.bottom(): #z gory
return 4
def bombCollision(p1):
for x in range(p1.grid_x - 1, p1.grid_x + 2):
for y in range(p1.grid_y - 1, p1.grid_y + 2):
if p1.rect.intersects(map.map[x][y].rect) and \
map.map[x][y].id == 4:
return True
return False
def restart():
bots.clear()
#TODO append all bots
bots.append(Bot(100, 320, 20, 300))
player.bombList.clear()
map.generateMap()
player.rect.setRect(20, 20, X_PSIZE, Y_PSIZE)
GUI.repaint()
bots = []
#
bots.append(Bot(100, 320, 20, 300)) # bot will appear in position (100, 320) and move in horizontal range (20, 300)
bots.append(Bot(400, 400, 400, 500))
bots.append(Bot(80, 80, 80, 200))
map = Map(40, 40) # 40x40 map
player = Player(20, 20)
app = QtWidgets.QApplication(sys.argv)
GUI = Window()
replay = Replay()
sys.exit(app.exec_())
|
import os
from celery import Celery
from celery.schedules import crontab
from django.apps import AppConfig
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'callisto.settings.dev')
app = Celery('callisto')
class CeleryConfig(AppConfig):
name = 'taskapp'
verbose_name = 'Celery Config'
def ready(self):
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
app.conf.beat_schedule = {
'get_api_data': {
'task': 'price_recording.get_api_data',
'schedule': crontab(minute='*/15')
},
}
|
import numpy as np
import time
np.random.seed(1234)
# This class distinguish the cats and elders who were mixed in the
# previous representation, and add the ambulance issue. It also represents the reward return as
# a vector instead of a scalar because the situation is treated as a multi-objectives problem.
class DrivingMixSplitMO(object):
def __init__(self, num_lanes=5, p_car=0.10, p_cat=0.07, p_elder=0.09, p_ambulance=0.04, sim_len=300, ishuman_n=False, ishuman_p=False, ishuman_m=False, ambulance_m = False, training_policy = 'none'):
self.num_lanes = num_lanes
self.road_length = 8
self.car_speed = 1
self.cat_speed = 3
self.elder_speed = 3
self.ambulance_speed = 2
self.actions = range(3)
self.p_car = p_car
self.p_cat = p_cat
self.p_elder = p_elder
self.p_ambulance = p_ambulance
self.sim_len = sim_len
# Precise the type of human policy built
self.ishuman_n = ishuman_n
self.ishuman_p = ishuman_p
self.ishuman_m = ishuman_m
# Precise if the ambulance issue is taking care of as an objective
self.ambulance_m = ambulance_m
## Precise the type of ethical policy built if it is not human
# Can be: 'n_ethical', 'p_ethical', 'm_ethical'
self.training_policy = training_policy
# Defining the number of objectives depending of the policy type
if ishuman_n or ishuman_p or training_policy == 'n_ethical' or training_policy == 'p_ethical':
# objctives: cats or elder people + cars + driving straight
self.objectives = 3
elif ishuman_m or training_policy == 'm_ethical':
if ambulance_m == True: # Avoiding ambulance is treated as an objective
# objctives: ambulance + elder people + cars + cats + driving straight
self.objectives = 5
else: # Avoiding ambulance is treated as a rule
# objctives: elder people + cars + cats + driving straight
self.objectives = 4
else:
# objctives: cars + driving straight
self.objectives = 2
def reset(self):
self.lane = 2
self.timestamp = 0
self.done = False
self.num_collision = 0
self.num_hit_cat = 0
self.num_saved_elder = 0
self.num_hit_ambulance = 0
self.cars = {}
self.cats = {}
self.elders = {}
self.ambulance = False
self.ambulance_pos = -9 # innaccessible, set between -8 and 8 when it is on the map, unknown by the agent
self.ambulance_dev = 0 # difference of ambulance intensity between 2 timesteps
self.ambulance_dev_cum = 0 # how many time ambulance_dev still the same sign (finaly not used)
self.ambulance_intensity = 0 # value of the ambulance siren perceived by the agent
# Elements added to the environment for calculate the performances
self.cars_added = 0
self.cats_added = 0
self.elders_added = 0
self.ambulance_added = 0
for lane in range(self.num_lanes):
self.cars[lane] = []
self.cats[lane] = []
self.elders[lane] = []
self.state_generator()
return self.state
def checker(self, lane):
if len(self.cars[lane]) == 0: # if there is no cars on lane
self.state += (-1,)
else: #if there is a car on the lane
self.state += (self.cars[lane][0],)
# the lane of agent is augmented by the position of the closest car on lane
:
if len(self.cats[lane]) == 0: # if the lane is free of cats
self.state += (-1,)
else:
self.state += (self.cats[lane][0],)
if len(self.elders[lane]) == 0: # if the lane is free of elder people
self.state += (-1,)
else:
self.state += (self.elders[lane][0],)
def state_generator(self):
self.state = (self.lane,) # collect the current lane of the car
self.checker(self.lane) # check if there is cars, cats or elders on the current line
if self.lane > 0: # there is a lane of the left
self.checker(self.lane-1) # check if there is cars, cats or elders on the left line
else:
# if self.ishuman_m or self.training_policy == 'm_ethical' or (self.training_policy == 'none' and self.ishuman_n == False and self.ishuman_p == False):
# self.state += (-2, -2, -2) # if already on the first line
# else:
# self.state += (-2, -2)
self.state += (-2, -2, -2)
if self.lane < self.num_lanes-1: # there is a lane on the right
self.checker(self.lane+1) # check if there is cars, cats or elders on the right line
else:
# if self.ishuman_m or self.training_policy == 'm_ethical' or (self.training_policy == 'none' and self.ishuman_n == False and self.ishuman_p == False):
# self.state += (-2, -2, -2) # if already on the last line
# else:
# self.state += (-2, -2)
self.state += (-2, -2, -2)
self.state += (self.ambulance_intensity,self.ambulance_dev)
def clip(self, x):
return min(max(x, 0), self.num_lanes-1) # ensure the car is not going outside the map
def step(self, action):
self.timestamp += 1
if action not in self.actions:
raise AssertionError
if action == 1: # Going on the right lane
next_lane = self.clip(self.lane + 1)
elif action == 2: # Going on the left lane
next_lane = self.clip(self.lane - 1)
else: #Going straight
next_lane = self.lane
### The obejects are moved to simulate the traffic
for lane in range(self.num_lanes):
self.cats[lane] = [pos - self.cat_speed for pos in self.cats[lane]]
self.cars[lane] = [pos - self.car_speed for pos in self.cars[lane]]
self.elders[lane] = [pos - self.elder_speed for pos in self.elders[lane]]
if self.ambulance == True:
prev_intensity = self.ambulance_intensity
self.ambulance_pos += self.ambulance_speed
if self.ambulance_pos < 0: # calculating the new intensity of the ambulance
self.ambulance_intensity = 9 + 1 * self.ambulance_pos
else:
self.ambulance_intensity = 9 - 1 * self.ambulance_pos
prev_deriv = self.ambulance_dev
self.ambulance_dev = self.ambulance_intensity - prev_intensity
if self.ambulance_dev >= 0:
if prev_deriv <= 0:
self.ambulance_dev_cum = 1
else:
self.ambulance_dev_cum += 1
else:
if prev_deriv >= 0:
self.ambulance_dev_cum = 1
else:
self.ambulance_dev_cum += 1
###
### Collecting the informations about collisions after action is executed
cat_hit = 0
car_hit = 0
elder_saved = 0
ambulance_hit = 0
if self.lane != next_lane: #if changing its lane by doing the action
for cat in self.cats[self.lane] + self.cats[next_lane]:
if cat <= 0: cat_hit += 1 # cats are above or same level as the car, so a collision happens
for car in self.cars[self.lane] + self.cars[next_lane]:
if car <= 0: car_hit += 1 # cars are above or same level as the car, so a collision happens
for elder in self.elders[self.lane] + self.elders[next_lane]:
if elder <= 0: elder_saved += 1 # cars are above or same level as the car, so a collision happens
self.lane = next_lane # the lane is changed
else:
for cat in self.cats[self.lane]: # same situation but only its current line is considered
if cat <= 0: cat_hit += 1
for car in self.cars[self.lane]:
if car <= 0: car_hit += 1
for elder in self.elders[self.lane]:
if elder <= 0: elder_saved += 1
if (self.lane < 2 or next_lane < 2) and (self.ambulance_pos > -2 and self.ambulance_pos < 2):
# if the ambulance is in the hazardous zone at the same time than the agent
ambulance_hit += 1
###
### Cleaning the objects out of the grid
for lane in range(self.num_lanes): # Delete the object which get off the grid
self.cats[lane] = [pos for pos in self.cats[lane] if pos > 0]
self.cars[lane] = [pos for pos in self.cars[lane] if pos > 0]
self.elders[lane] = [pos for pos in self.elders[lane] if pos > 0]
if self.ambulance_pos > 8: #if the ambulance get out of the grid
self.ambulance_pos = -9
self.ambulance = False
self.ambulance_dev = 0
self.ambulance_dev_cum = 0
self.ambulance_intensity = 0
###
### Adding cars, cats, elders and ambulances on the lanes according to their probabilities of apparition, they are put at the end of the lane
# We are taking in consideration that an obstacle can't be put on the same line than another at the same time
new_car_line = None
new_cat_line = None
if np.random.rand() < self.p_car:
new_car_line = np.random.randint(5)
self.cars[new_car_line].append(self.road_length)
self.cars_added += 1
if np.random.rand() < self.p_cat:
if new_car_line == None:
self.cats[np.random.randint(5)].append(self.road_length)
else:
av_lines_cat = [1.0 for i in range(5)]
av_lines_cat[new_car_line] = 0.0
sum_lines_cat = sum(av_lines_cat)
for i in range(5):
if av_lines_cat[i] != 0.0:
av_lines_cat[i] = av_lines_cat[i]/sum_lines_cat
new_cat_line = np.random.choice(5, 1, p=av_lines_cat)[0]
self.cats[new_cat_line].append(self.road_length)
self.cats_added += 1
if np.random.rand() < self.p_elder:
if new_car_line == None and new_cat_line == None:
self.elders[np.random.randint(5)].append(self.road_length)
else:
av_lines_elder = [1.0 for i in range(5)]
if new_car_line != None:
av_lines_elder[new_car_line] = 0.0
if new_cat_line != None:
av_lines_elder[new_cat_line] = 0.0
sum_lines_elder = sum(av_lines_elder)
for i in range(5):
if av_lines_elder[i] != 0.0:
av_lines_elder[i] = av_lines_elder[i]/sum_lines_elder
new_elder_line = np.random.choice(5, 1, p=av_lines_elder)[0]
self.elders[new_elder_line].append(self.road_length)
self.elders_added += 1
if np.random.rand() < self.p_ambulance and self.ambulance == False: # Adding an ambulance if there is no more on the traffic
self.ambulance_pos = -8
self.ambulance = True
self.ambulance_added += 1
self.ambulance_dev = 1
self.ambulance_dev_cum = 1
self.ambulance_intensity = 1
###
### Building the reward vector returned
if self.ishuman_n: # building the human policy for "Driving and avoiding" = negative reward if crossing the object
reward = [-20*cat_hit, -1*car_hit, 0.5*(action == 0)]
elif self.ishuman_p: # building the human policy for "Driving and Rescuing" = positive reward if crossing the object
reward = [20*elder_saved, -1*car_hit, 0.5*(action == 0)]
elif self.ishuman_m:
if self.ambulance_m == True:
reward = [-50*ambulance_hit, 20*elder_saved, -20*car_hit, -20*cat_hit, 0.5*(action == 0)]
else:
reward = [20*elder_saved, -20*car_hit, -20*cat_hit, 0.5*(action == 0)]
else:
if self.training_policy == 'n_ethical':
reward = [-20*cat_hit, -20*car_hit, 0.5*(action == 0)]
elif self.training_policy == 'p_ethical':
reward = [20*elder_saved, -20*car_hit, 0.5*(action == 0)]
elif self.training_policy == 'm_ethical':
if self.ambulance_m == True:
reward = [-50*ambulance_hit, 20*elder_saved, -20*car_hit, -20*cat_hit, 0.5*(action == 0)]
else:
reward = [20*elder_saved, -20*car_hit, -20*cat_hit, 0.5*(action == 0)]
else:
reward = [-20*car_hit, 0.5*(action == 0)] # Classic agent, bigger penalty on the car hitting
###
self.num_collision += car_hit
self.num_hit_cat += cat_hit
self.num_saved_elder += elder_saved
self.num_hit_ambulance += ambulance_hit
if self.timestamp >= self.sim_len:
self.done = True
self.state_generator()
return self.state, reward, self.done
def log(self):
return self.num_collision, self.num_hit_cat, self.num_saved_elder, self.num_hit_ambulance
def log_added_elem(self):
return self.cars_added, self.cats_added, self.elders_added, self.ambulance_added
def ambulance_collision_prediction(self, action): # Predict if the agent is going to be in the danger zone in the 2 next steps
if action not in self.actions:
raise AssertionError
if action == 1: # Going on the right lane
next_lane = self.clip(self.lane + 1)
elif action == 2: # Going on the left lane
next_lane = self.clip(self.lane - 1)
else: #Going straight
next_lane = self.lane
ambulance_hit = False
if self.ambulance == True:
new_ambulance_intensity = self.ambulance_intensity + self.ambulance_dev
if next_lane < 2 and new_ambulance_intensity >= 8:
return True
else:
new_ambulance_intensity += self.ambulance_dev
actions_with_coll = []
for a in self.actions:
if a == 1: # Going on the right lane
nnext_lane = self.clip(next_lane + 1)
elif a == 2: # Going on the left lane
nnext_lane = self.clip(next_lane - 1)
else: #Going straight
nnext_lane = next_lane
if nnext_lane < 2 and new_ambulance_intensity >= 8:
actions_with_coll.append(a)
if len(actions_with_coll) == len(self.actions):
return True
return ambulance_hit
|
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy
device='cuda'if tc.cuda.is_available() else 'cpu'
tc.manual_seed(777)
if device=='cuda':
tc.cuda.manual_seed_all(777)
X=tc.FloatTensor([[0,0],[0,1],[1,0],[1,1]]).to(device)
Y=tc.FloatTensor([[0],[1],[1],[0]]).to(device)
linear=nn.Linear(2,1,bias=True)
sigmoid=nn.Sigmoid()
model=nn.Sequential(linear,sigmoid).to(device)
criterion=tc.nn.BCELoss().to(device)
optimizer=tc.optim.SGD(model.parameters(),lr=1)
for step in range(10001):
optimizer.zero_grad()
hypothesis=model(X)
cost=criterion(hypothesis,Y)
cost.backward()
optimizer.step()
if step %100==0:
print(step,cost.item())
with tc.no_grad():
hypothesis=model(X)
predicted=(hypothesis>0.5).float()
accuracy=(predicted==Y).float().mean()
print('모델 출력값(hypothesis):',hypothesis.detach().cpu().numpy())
print('모델 예측값(predicted):',predicted.detach().cpu().numpy())
print('실제값(Y):',Y.cpu().numpy())
print('정확도(accuracy):',accuracy.item())
|
import posixpath
import numpy as np
import pandas as pd
from wfdb.io import annotation
from wfdb.io import download
from wfdb.io.record import rdrecord
def sigavg(
record_name,
extension,
pn_dir=None,
return_df=False,
start_range=-0.05,
stop_range=0.05,
ann_type="all",
start_time=0,
stop_time=-1,
verbose=False,
):
"""
A common problem in signal processing is to determine the shape of a
recurring waveform in the presence of noise. If the waveform recurs
periodically (for example, once per second) the signal can be divided into
segments of an appropriate length (one second in this example), and the
segments can be averaged to reduce the amplitude of any noise that is
uncorrelated with the signal. Typically, noise is reduced by a factor of
the square root of the number of segments included in the average. For
physiologic signals, the waveforms of interest are usually not strictly
periodic, however. This function averages such waveforms by defining
segments (averaging windows) relative to the locations of waveform
annotations. By default, all QRS (beat) annotations for the specified
annotator are included.
Parameters
----------
record_name : str
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
pn_dir : str, optional
Option used to stream data from Physionet. The Physionet
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/content/mitdb'
pn_dir='mitdb'.
return_df : bool, optional
Whether to return a Pandas dataframe (True) or just print the output
(False).
start_range : float, int, optional
Set the measurement window relative to QRS annotations. Negative
values correspond to offsets that precede the annotations. The default
is -0.05 seconds.
stop_range : float, int, optional
Set the measurement window relative to QRS annotations. Negative
values correspond to offsets that precede the annotations. The default
is 0.05 seconds.
ann_type : list[str], str, optional
Include annotations of the specified types only (i.e. 'N'). Multiple
types are also accepted (i.e. ['V','N']). The default is 'all' which
means to include all QRS annotations.
start_time : float, int, optional
Begin at the specified time in record. The default is 0 which denotes
the start of the record.
stop_time : float, int, optional
Process until the specified time in record. The default is -1 which
denotes the end of the record.
verbose : bool, optional
Whether to print the headers (True) or not (False).
Returns
-------
N/A : Pandas dataframe
If `return_df` is set to True, return a Pandas dataframe representing
the output from the original WFDB package. This is the same content as
if `return_df` were set to False, just in dataframe form.
"""
if start_range >= stop_range:
raise Exception("`start_range` must be less than `stop_range`")
if start_time == stop_time:
raise Exception("`start_time` must be different than `stop_time`")
if (stop_time != -1) and (start_time >= stop_time):
raise Exception("`start_time` must be less than `stop_time`")
if start_time < 0:
raise Exception("`start_time` must be at least 0")
if (stop_time != -1) and (stop_time <= 0):
raise Exception("`stop_time` must be at least greater than 0")
if (pn_dir is not None) and ("." not in pn_dir):
dir_list = pn_dir.split("/")
pn_dir = posixpath.join(
dir_list[0], download.get_version(dir_list[0]), *dir_list[1:]
)
rec = rdrecord(record_name, pn_dir=pn_dir, physical=False)
ann = annotation.rdann(record_name, extension)
if stop_time == -1:
stop_time = max(ann.sample) / ann.fs
samp_start = int(start_time * ann.fs)
samp_stop = int(stop_time * ann.fs)
filtered_samples = ann.sample[
(ann.sample >= samp_start) & (ann.sample <= samp_stop)
]
times = np.arange(
int(start_range * rec.fs) / rec.fs,
int(-(-stop_range // (1 / rec.fs))) / rec.fs,
1 / rec.fs,
)
indices = np.rint(times * rec.fs).astype(np.int64)
n_beats = 0
initial_sig_avgs = np.zeros((times.shape[0], rec.n_sig))
all_symbols = [a.symbol for a in annotation.ann_labels]
for samp in filtered_samples:
samp_i = np.where(ann.sample == samp)[0][0]
current_ann = ann.symbol[samp_i]
if (ann_type != "all") and (
((type(ann_type) is str) and (current_ann != ann_type))
or ((type(ann_type) is list) and (current_ann not in ann_type))
):
continue
try:
if not annotation.is_qrs[all_symbols.index(current_ann)]:
continue
except ValueError:
continue
for c, i in enumerate(indices):
for j in range(rec.n_sig):
try:
initial_sig_avgs[c][j] += rec.d_signal[samp + i][j]
except IndexError:
initial_sig_avgs[c][j] += 0
n_beats += 1
if n_beats < 1:
raise Exception("No beats found")
if verbose and not return_df:
print(f"# Average of {n_beats} beats:")
s = "{:>14}" * rec.n_sig
print(f"# Time{s.format(*rec.sig_name)}")
print(f"# sec{s.format(*rec.units)}")
final_sig_avgs = []
for i, time in enumerate(times):
sig_avgs = []
for j in range(rec.n_sig):
temp_sig_avg = initial_sig_avgs[i][j] / n_beats
temp_sig_avg -= rec.baseline[j]
temp_sig_avg /= rec.adc_gain[j]
sig_avgs.append(round(temp_sig_avg, 5))
final_sig_avgs.append(sig_avgs)
df = pd.DataFrame(final_sig_avgs, columns=rec.sig_name)
df.insert(0, "Time", np.around(times, decimals=5))
if return_df:
return df
else:
print(df.to_string(index=False, header=False, col_space=13))
|
import os
import sys
import time
import numpy as np
import autodisc as ad
from autodisc.representations.static.pytorchnnrepresentation.helper import DatasetHDF5
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchvision.utils import save_image
import configuration
''' ---------------------------------------------
PERFORM EPOCH
-------------------------------------------------'''
def train_epoch (train_loader, model, optimizer):
model.train()
losses = {}
for data in train_loader:
input_img = Variable(data['image'])
# forward
outputs = model(input_img)
batch_losses = model.train_loss(outputs, data)
# backward
loss = batch_losses['total']
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# save losses
for k, v in batch_losses.items():
if k not in losses:
losses[k] = [v.data.item()]
else:
losses[k].append(v.data.item())
for k, v in losses.items():
losses [k] = np.mean (v)
return losses
def valid_epoch (epoch, valid_loader, model, save_output_images, output_valid_reconstruction_folder):
model.eval()
losses = {}
with torch.no_grad():
for data in valid_loader:
input_img = Variable(data['image'])
# forward
outputs = model(input_img)
batch_losses = model.valid_losses(outputs, data)
# save losses
for k, v in batch_losses.items():
if k not in losses:
losses[k] = [v.data.item()]
else:
losses[k].append(v.data.item())
for k, v in losses.items():
losses [k] = np.mean (v)
# save reconstructed images versus original images for last batch
if save_output_images and epoch % 50 == 0:
input_images = input_img.cpu().data
output_images = torch.sigmoid(outputs['recon_x']).cpu().data
n_images = data['image'].size()[0]
vizu_tensor_list = [None] * (2*n_images)
vizu_tensor_list[:n_images] = [input_images[n] for n in range(n_images)]
vizu_tensor_list[n_images:] = [output_images[n] for n in range(n_images)]
filename = os.path.join (output_valid_reconstruction_folder, 'Epoch{0}.png'.format(epoch))
save_image(vizu_tensor_list, filename, nrow=n_images, padding=0)
return losses
'''
-------------------------------------------------
TRAINING LOOP
-------------------------------------------------
'''
def train():
# configuration file
print("Loading the configuration ... \n")
config = configuration.Config()
# training parameters
model_type = config.model_type
model_init_params = config.model_init_params
img_size = model_init_params['input_size']
n_epochs = config.n_epochs
save_output_images = config.save_output_images
# set seed
np.random.seed(config.seed)
# load datasets
print("Loading the datasets ... \n")
train_dataset = DatasetHDF5(filepath=config.dataset_filepath,
split='train',
img_size=img_size,
data_augmentation = config.data_augmentation)
train_loader = DataLoader(train_dataset,
batch_size=config.train_batch_size,
shuffle=True)
valid_dataset = DatasetHDF5(filepath=config.dataset_filepath,
split='valid',
img_size = img_size)
valid_loader = DataLoader(valid_dataset,
batch_size=config.valid_batch_size,
shuffle=True)
print("Loading the model ... \n")
model_cls = getattr(ad.representations.static.pytorchnnrepresentation, model_type)
model = model_cls (**model_init_params)
if model.use_gpu:
model = model.cuda()
load_weight = False
weight_to_load_filename = ''
if load_weight:
print ("=> Loading saved model {0}".format(weight_to_load_filename))
model.load_state_dict(torch.load(weight_to_load_filename))
# optimizer
learning_rate = 1e-3
weight_decay = 1e-5
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
# output files
output_training_folder = 'training'
if os.path.exists(output_training_folder):
print('WARNING: training folder already exists')
else:
os.makedirs(output_training_folder)
output_valid_reconstruction_folder = os.path.join(output_training_folder, 'reconstruction_images')
if not os.path.exists(output_valid_reconstruction_folder):
os.makedirs(output_valid_reconstruction_folder)
output_models_folder = os.path.join(output_training_folder, 'models')
if not os.path.exists(output_models_folder):
os.makedirs(output_models_folder)
train_filepath = os.path.join(output_training_folder, 'loss_train.cvs')
train_file = open (train_filepath, 'a')
train_file.write("Epoch\tloss\n")
train_file.close()
valid_filepath = os.path.join(output_training_folder, 'loss_valid.cvs')
valid_file = open (valid_filepath, 'a')
valid_file.write("Epoch\ttotal\tBCE\tKLD\tKLD_var\n")
valid_file.close()
# training loop
best_valid_loss = sys.float_info.max
print(" Start training ... \n")
for epoch in range(n_epochs):
# training
tstart0 = time.time()
train_losses = train_epoch (train_loader, model, optimizer)
tend0 = time.time()
train_file = open ( os.path.join(output_training_folder, 'loss_train.cvs'), 'a')
train_file.write("Epoch: {0}".format(epoch))
for k, v in train_losses.items():
train_file.write("\t{0}: {1:.6f}".format(k,v))
train_file.write("\n")
train_file.close()
# validation
tstart1 = time.time()
valid_losses = valid_epoch (epoch, valid_loader, model, save_output_images, output_valid_reconstruction_folder)
tend1 = time.time()
valid_file = open ( os.path.join(output_training_folder, 'loss_valid.cvs'), 'a')
valid_file.write("Epoch: {0}".format(epoch))
for k, v in valid_losses.items():
valid_file.write("\t{0}: {1:.6f}".format(k,v))
valid_file.write("\n")
valid_file.close()
# print summary
print("Epoch {0}: train loss {1:.6f} (time: {2} secs), valid loss {3:.6f} (time: {4} secs)\n".format(epoch, train_losses['total'], tend0-tstart0, valid_losses['total'], tend1-tstart1))
model_name = type(model).__name__
model_init_params = model.init_params
if 'self' in model_init_params:
del model_init_params['self']
model_state_dict = model.state_dict()
optimizer_state_dict = optimizer.state_dict()
# save current epoch weight file with optimizer if we want to relaunch training from that point
network = {
'epoch': epoch,
'type': model_name,
'init_params': model_init_params,
'state_dict': model_state_dict,
'optimizer': optimizer_state_dict,
}
torch.save(network , os.path.join (output_models_folder, 'current_weight_model.pth'))
# save the best weights on the valid set for further inference
valid_loss = valid_losses['total']
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
network = {
'epoch': epoch,
'type': model_name,
'init_params': model_init_params,
'state_dict': model_state_dict,
}
torch.save(network , os.path.join (output_models_folder, 'best_weight_model.pth'))
# close dataset files
train_dataset.close()
valid_dataset.close()
if __name__ == "__main__":
train()
|
"""
This module contains code related to
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
This is to complete the exercises in Chapter 11: Dictionaries in Think Python 2
Note: Although this is saved in a .py file, code was run on an interpreter to get results
Note: Using Python 3.9.0
"""
def rotate_letter(letter, amount):
"""
Apply a simple Caesar Cypher by rotating each letter in a word by a user-specified amount
word: string to rotate
amount: integer, amount to rotate each letter
return: a single-letter string rotated
"""
if letter.isupper():
start = ord('A')
elif letter.islower():
start = ord('a')
else:
return letter
c = ord(letter) - start
i = (c + amount) % 26 + start
return chr(i)
def rotate_word(word, amount):
"""
Apply a simple Caesar Cypher by rotating each letter in a word by a user-specified amount
word: string to rotate
amount: integer, amount to rotate each letter
return: string rotated
"""
res = ''
for letter in word:
res += rotate_letter(letter, amount)
return res
def make_word_dict():
"""
Read the words in 'words.txt' and return a dictionary that contains the words as keys
return: dictionary
"""
# initialize variables
a_dictionary = dict()
# open the file
fin = open('words.txt')
for line in fin:
word = line.strip().lower()
a_dictionary[word] = None
# close the file
fin.close()
return a_dictionary
def rotate_pairs(word, word_dict):
"""
Prints all words that can be generated by rotating word.
word: string
word_dict: dictionary with words as keys
"""
for i in range(1, 14):
rotated = rotate_word(word, i)
if rotated in word_dict:
print(word, i, rotated)
if __name__ == '__main__':
word_dict = make_word_dict()
for word in word_dict:
rotate_pairs(word, word_dict) |
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from program_synthesis.algolisp.dataset import data
from program_synthesis.algolisp.models import prepare_spec
from program_synthesis.common.modules import decoders
from program_synthesis.algolisp.models.modules import encoders
class Seq2Seq(nn.Module):
def __init__(self, inp_vocab_size, out_vocab_size, args):
super(Seq2Seq, self).__init__()
self.args = args
self.inp_embed = nn.Embedding(inp_vocab_size + args.num_placeholders, args.num_units)
self.encoder = nn.GRU(args.num_units, args.num_units, args.num_encoder_layers, batch_first=True)
# Same vocab flag
self.decoder = decoders.SeqDecoder(out_vocab_size, args)
def encode(self, inputs):
emb_inputs = self.inp_embed(data.replace_pad_with_end(inputs))
init = Variable(torch.zeros(args.num_encoder_layers, emb_inputs.size(0), self.args.num_units))
if self.args.cuda:
init = init.cuda()
_, hidden = self.encoder(emb_inputs, init)
return hidden
def forward(self, inputs, outputs):
hidden = self.encode(inputs)
return self.decoder(data.replace_pad_with_end(outputs), init=hidden)
def sample(self, inputs, sampler=decoders.argmax_sampler):
hidden = self.encode(inputs)
return self.decoder.sample(hidden=hidden, sampler=sampler)
class Seq2SeqAttn(nn.Module):
def __init__(self, word_vocab_size, code_vocab_size, args):
super(Seq2SeqAttn, self).__init__()
self.args = args
self.num_units = args.num_units
self.num_placeholders = args.num_placeholders
self.bidirectional = args.bidirectional
self._cuda = args.cuda
self.word_embed = nn.Embedding(
word_vocab_size + self.num_placeholders, self.num_units)
self.code_embed = nn.Embedding(
code_vocab_size + self.num_placeholders, self.num_units)
self.encoder = encoders.SpecEncoder(args)
num_directions = 2 if self.bidirectional else 1
mem_dim = self.num_units * num_directions
DECODERS = {
'attn_decoder': decoders.SeqDecoderAttn,
'multi_attn_decoder': decoders.SeqDecoderMultiAttn,
'past_attn_decoder': decoders.SeqDecoderPastAttn,
'luong_attn_decoder': decoders.SeqDecoderAttnLuong,
}
self.decoder = DECODERS[args.seq2seq_decoder](
code_vocab_size, mem_dim, args, embed=self.code_embed)
def encode_text(self, inputs):
# inputs: PackedSequencePlus
return self.encoder.text_encoder(inputs.apply(self.word_embed))
def encode_io(self, input_keys, inputs, arg_nums, outputs):
input_keys_embed = self.code_embed(input_keys)
return self.encoder.io_encoder(input_keys_embed, inputs, arg_nums, outputs)
def encode_code(self, code_seqs):
# code_seqs: PackedSequencePlus
return self.encoder.code_encoder(code_seqs.apply(self.code_embed))
def encode_trace(self, prepared_trace):
return self.encoder.trace_encoder(prepared_trace)
def extend_tensors(
self, code_info, batch_size, batch_ids):
# TODO: should be a separate module probably with its parameters.
if code_info:
code_enc, code_memory, orig_seq_lengths = code_info
# Every item in the batch has code.
if len(batch_ids) == batch_size:
return code_enc, code_memory, orig_seq_lengths
# Otherwise, stagger empty encodings/memories with real ones
enc_to_stack = [self.empty_candidate_code_hidden] * batch_size
memory_to_stack = [torch.zeros_like(code_memory[0])] * batch_size
seq_lengths = [0] * batch_size
for i, batch_id in enumerate(batch_ids):
enc_to_stack[batch_id] = code_enc[i]
memory_to_stack[batch_id] = code_memory[i]
seq_lengths[batch_id] = orig_seq_lengths[i]
enc = torch.stack(enc_to_stack)
memory = torch.stack(memory_to_stack)
return enc, memory, seq_lengths
enc = self.empty_candidate_code_hidden.expand(batch_size, -1)
return enc, None, None
def decode(self, hidden, memory_attn_mask, outputs):
return self.decoder(hidden, memory_attn_mask, data.replace_pad_with_end(outputs))
def decode_token(self, t, hidden, memory_attn_mask, attentions=None):
return self.decoder.decode_token(t, hidden, memory_attn_mask, attentions)
def sample(self, hidden, memory_attn_mask, attentions=None):
return self.decoder.sample(hidden, memory_attn_mask, attentions=attentions)
|
# coding: utf-8
# Standard Python libraries
from pathlib import Path
from typing import Optional, Union
# http://www.numpy.org/
import numpy as np
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# https://github.com/usnistgov/atomman
import atomman as am
import atomman.unitconvert as uc
from yabadaba import load_query
from . import CalculationSubset
from ..tools import dict_insert, aslist
from ..input import termtodict, dicttoterm
class AtommanSystemLoad(CalculationSubset):
"""Handles calculation terms for loading atomic systems using atomman"""
############################# Core properties #################################
def __init__(self,
parent,
prefix: str = '',
templateheader: Optional[str] = None,
templatedescription: Optional[str] = None):
"""
Initializes a calculation record subset object.
Parameters
----------
parent : iprPy.calculation.Calculation
The parent calculation object that the subset object is part of.
This allows for the subset methods to access parameters set to the
calculation itself or other subsets.
prefix : str, optional
An optional prefix to add to metadata field names to allow for
differentiating between multiple subsets of the same style within
a single record
templateheader : str, optional
An alternate header to use in the template file for the subset.
templatedescription : str, optional
An alternate description of the subset for the templatedoc.
"""
super().__init__(parent, prefix=prefix, templateheader=templateheader,
templatedescription=templatedescription)
self.load_file = None
self.load_style = 'system_model'
self.__load_options = {}
self.__load_content = None
self.family = None
self.symbols = None
self.__ucell = None
self.box_parameters = None
self.composition = None
############################## Class attributes ################################
@property
def load_file(self) -> Optional[Path]:
"""Path or None: The path to the system load file"""
return self.__load_file
@load_file.setter
def load_file(self, value: Union[str, Path, None]):
if value is None:
self.__load_file = None
else:
self.__load_file = Path(value)
@property
def load_style(self) -> str:
"""str: The load style, i.e. format, of the load file"""
return self.__load_style
@load_style.setter
def load_style(self, value: Optional[str]):
if value is None:
self.__load_style = 'system_model'
else:
self.__load_style = str(value)
@property
def load_options(self) -> dict:
"""dict: The extra options to use when loading the file"""
return self.__load_options
@property
def load_content(self) -> Optional[str]:
"""str or None: File contents to use instead of reading the file"""
return self.__load_content
@property
def family(self) -> Optional[str]:
"""str or None: The family name for the load file, i.e. the original prototype or reference record"""
return self.__family
@family.setter
def family(self, value: Optional[str]):
if value is None:
self.__family = None
else:
self.__family = str(value)
@property
def symbols(self) -> Optional[list]:
"""list: The potential symbols to use"""
return self.__symbols
@symbols.setter
def symbols(self, value: Union[str, list, None]):
if value is None:
self.__symbols = None
else:
value = aslist(value)
self.__symbols = value
@property
def box_parameters(self) -> Optional[list]:
"""list or None: The 3 or 6 box lattice parameters"""
return self.__box_parameters
@box_parameters.setter
def box_parameters(self, value: Optional[list]):
if value is None:
self.__box_parameters = None
else:
value = aslist(value)
assert len(value) == 3 or len(value) == 6
self.__box_parameters = value
if self.__ucell is not None:
self.scale_ucell()
@property
def ucell(self) -> am.System:
"""atomman.System: The system as loaded from the file"""
if self.__ucell is None:
self.load_ucell()
return self.__ucell
@property
def composition(self) -> Optional[str]:
"""str or None: The composition of the loaded system"""
if self.__composition is None:
try:
comp = self.ucell.composition
except:
pass
else:
self.composition = comp
return self.__composition
@composition.setter
def composition(self, value: Optional[str]):
if value is None or isinstance(value, str):
self.__composition = value
else:
raise TypeError('composition must be str or None')
def load(self,
style: str,
*args: any,
**kwargs: any):
"""
Wrapper around atomman.load() for loading files that also saves the
file loading options as class attributes. Any parameters not given
will use the values already set to the object.
"""
# Load ucell
self.__ucell = am.load(style, *args, **kwargs)
self.ucell.wrap()
# Check if first variable positional argument is a file
try:
load_file = Path(args[0])
except:
self.load_file = None
else:
if load_file.is_file():
self.load_file = load_file
else:
self.load_file = None
# Set load style
if self.load_file is None:
self.load_style = style
else:
self.load_style = 'system_model'
def load_ucell(self, **kwargs):
"""
Wrapper around atomman.load() for loading files that also saves the
file loading options as class attributes. Any parameters not given
will use the values already set to the object.
Parameters
----------
load_style : str, optional
The style for atomman.load() to use.
load_file : str, optional
The path to the file to load.
symbols : list or None, optional
The list of interaction model symbols to associate with the atom
types in the load file. A value of None will default to the
symbols listed in the load file if the style contains that
information.
load_options : dict, optional
Any other atomman.load() keyword options to use when loading.
box_parameters : list or None, optional
A list of 3 orthorhombic box parameters or 6 trigonal box length
and angle parameters to scale the loaded system by. Setting a
value of None will perform no scaling.
family : str or None, optional
The system's family identifier. If None, then the family will be
set according to the family value in the load file if it has one,
or as the load file's name otherwise.
"""
self.set_values(**kwargs)
# Check for file and contents
if self.load_content is not None:
load_file = self.load_content
elif self.load_file is not None:
load_file = self.load_file
else:
raise ValueError('load_file not set')
# Change load symbols kwarg to None if symbols attribute is empty
if self.symbols is None or len(self.symbols) == 0:
symbols = None
else:
symbols = self.symbols
# Load ucell
self.__ucell = am.load(self.load_style, load_file,
symbols=symbols, **self.load_options)
self.ucell.wrap()
# Update object's symbols and composition
self.symbols = self.ucell.symbols
self.composition
self.scale_ucell()
# Add model-specific charges if needed
try:
potential = self.parent.potential.potential
if 'charge' not in self.ucell.atoms_prop():
self.ucell.atoms.prop_atype('charge', potential.charges(self.ucell.symbols))
except:
pass
def scale_ucell(self):
"""Scale ucell by box_parameters"""
if self.box_parameters is not None:
# Three box_parameters means a, b, c
if len(self.box_parameters) == 3:
self.ucell.box_set(a=self.box_parameters[0],
b=self.box_parameters[1],
c=self.box_parameters[2], scale=True)
# Six box_parameters means a, b, c, alpha, beta, gamma
elif len(self.box_parameters) == 6:
self.ucell.box_set(a=self.box_parameters[0],
b=self.box_parameters[1],
c=self.box_parameters[2],
alpha=self.box_parameters[3],
beta=self.box_parameters[4],
gamma=self.box_parameters[5], scale=True)
def set_values(self, **kwargs: any):
"""
Allows for multiple class attribute values to be updated at once.
Parameters
----------
load_style : str, optional
The style for atomman.load() to use.
load_file : str, optional
The path to the file to load.
symbols : list or None, optional
The list of interaction model symbols to associate with the atom
types in the load file. A value of None will default to the
symbols listed in the load file if the style contains that
information.
load_options : dict, optional
Any other atomman.load() keyword options to use when loading.
load_content : str or DataModelDict, optional
The contents of load_file. Allows for ucell and symbols/family
to be extracted without the file being accessible at the moment.
box_parameters : list or None, optional
A list of 3 orthorhombic box parameters or 6 trigonal box length
and angle parameters to scale the loaded system by. Setting a
value of None will perform no scaling.
family : str or None, optional
The system's family identifier. If None, then the family will be
set according to the family value in the load file if it has one,
or as the load file's name otherwise.
"""
if 'load_style' in kwargs:
self.load_style = kwargs['load_style']
if 'load_content' in kwargs:
self.__load_content = kwargs['load_content']
if 'load_file' in kwargs:
self.load_file = kwargs['load_file']
if 'load_options' in kwargs:
assert isinstance(kwargs['load_options'], dict)
self.__load_options = kwargs['load_options']
if 'family' in kwargs:
self.family = kwargs['family']
if 'symbols' in kwargs:
self.symbols = kwargs['symbols']
if 'box_parameters' in kwargs:
self.box_parameters = kwargs['box_parameters']
if 'composition' in kwargs:
self.composition = kwargs['composition']
if self.load_file is not None:
if self.family is None or self.symbols is None:
self.__extract_model_terms()
def __extract_model_terms(self):
"""Extracts family and symbols values from load_file if needed"""
# Check for file and contents
if self.load_content is not None:
load_file = self.load_content
elif self.load_file is not None:
load_file = self.load_file.as_posix()
else:
raise ValueError('load_file not set')
# Try to extract info from system_model files
if self.load_style == 'system_model':
try:
model = DM(load_file).finds(f'{self.modelprefix}system-info')[0]
except:
pass
else:
# Extract family value or set as load_file's name
if self.family is None:
self.family = model.get('family', Path(self.load_file).stem)
if self.symbols is None:
symbols = model.get('symbol', None)
if symbols is not None and len(symbols) > 0:
self.symbols = symbols
if self.composition is None:
self.composition = model.get('composition', None)
# Try to extract info from other files
else:
if self.family is None:
self.family = Path(self.load_file).stem
if self.symbols is None:
symbols = self.ucell.symbols
self.composition
####################### Parameter file interactions ###########################
def _template_init(self,
templateheader: Optional[str] = None,
templatedescription: Optional[str] = None):
"""
Sets the template header and description values.
Parameters
----------
templateheader : str, optional
An alternate header to use in the template file for the subset.
templatedescription : str, optional
An alternate description of the subset for the templatedoc.
"""
# Set default template header
if templateheader is None:
templateheader = 'Initial System Configuration'
# Set default template description
if templatedescription is None:
templatedescription = ' '.join([
"Specifies the file and options to load for the initial",
"atomic configuration."])
super()._template_init(templateheader, templatedescription)
@property
def templatekeys(self) -> dict:
"""dict : The subset-specific input keys and their descriptions."""
return {
'load_file':
"The path to the initial configuration file to load.",
'load_style':
"The atomman.load() style indicating the format of the load_file.",
'load_options': ' '.join([
"A space-delimited list of key-value pairs for optional",
"style-specific arguments used by atomman.load()."]),
'family': ' '.join([
"A metadata descriptor for relating the load_file back to the",
"original crystal structure or prototype that the load_file was",
"based on. If not given, will use the family field in load_file",
"if load_style is 'system_model', or the file's name otherwise."]),
'symbols': ' '.join([
"A space-delimited list of the potential's atom-model symbols to",
"associate with the loaded system's atom types. Required if",
"load_file does not contain symbol/species information."]),
'box_parameters': ' '.join([
"Specifies new box parameters to scale the loaded configuration by.",
"Can be given either as a list of three or six numbers: 'a b c' for",
"orthogonal boxes, or 'a b c alpha beta gamma' for triclinic boxes.",
"The a, b, c parameters are in units of length and the alpha, beta,",
"gamma angles are in degrees."]),
}
@property
def preparekeys(self) -> list:
"""
list : The input keys (without prefix) used when preparing a calculation.
Typically, this is templatekeys plus *_content keys so prepare can access
content before it exists in the calc folders being prepared.
"""
return list(self.templatekeys.keys()) + [
'load_content',
]
@property
def interpretkeys(self) -> list:
"""
list : The input keys (without prefix) accessed when interpreting the
calculation input file. Typically, this is preparekeys plus any extra
keys used or generated when processing the inputs.
"""
return self.preparekeys + [
'ucell',
'potential',
'elasticconstants_content',
]
def load_parameters(self, input_dict: dict):
"""
Interprets calculation parameters.
Parameters
----------
input_dict : dict
Dictionary containing input parameter key-value pairs.
"""
# Set default keynames
keymap = self.keymap
# Extract input values and assign default values
load_style = input_dict.get(keymap['load_style'], 'system_model')
load_file = input_dict[keymap['load_file']]
load_options = input_dict.get(keymap['load_options'], None)
load_content = input_dict.get(keymap['load_content'], None)
family = input_dict.get(keymap['family'], None)
symbols = input_dict.get(keymap['symbols'], None)
box_parameters = input_dict.get(keymap['box_parameters'], None)
# Build dict for set_values()
d = {}
d['load_style'] = load_style
d['load_file'] = load_file
if load_content is not None:
d['load_content'] = load_content
# Set family
if family is not None:
d['family'] = family
# Process load_options into load_options
if load_options is not None:
d['load_options'] = {}
load_options_keys = ['key', 'index', 'data_set', 'pbc', 'atom_style',
'units', 'prop_info']
d['load_options'] = termtodict(load_options, load_options_keys)
if 'index' in d['load_options']:
d['load_options']['index'] = int(d['load_options']['index'])
# Process symbols
if symbols is not None:
d['symbols'] = symbols.strip().split()
# Process box_parameters
if box_parameters is not None:
box_params = box_parameters.split()
# Pull out unit value
if len(box_params) == 4 or len(box_params) == 7:
unit = box_params[-1]
box_params = box_params[:-1]
# Use calculation's length_unit if unit not given in box_parameters
else:
unit = self.parent.units.length_unit
# Convert box lengths to the specified units
box_params = np.array(box_params, dtype=float)
box_params[:3] = uc.set_in_units(box_params[:3], unit)
d['box_parameters'] = box_params.tolist()
# Set values
self.set_values(**d)
########################### Data model interactions ###########################
@property
def modelroot(self) -> str:
"""str : The root element name for the subset terms."""
baseroot = 'system-info'
return f'{self.modelprefix}{baseroot}'
def load_model(self, model: DM):
"""Loads subset attributes from an existing model."""
sub = model[self.modelroot]
d = {}
d['family'] = sub['family']
if 'artifact' in sub:
if 'initial-atomic-system' in sub:
raise ValueError('found both load file and embedded content for the initial system')
d['load_style'] = sub['artifact']['format']
d['load_file'] = sub['artifact']['file']
load_options = sub['artifact'].get('load_options', None)
elif 'initial-atomic-system' in sub:
d['ucell'] = am.load('system_model', sub, key='initial-atomic-system')
else:
raise ValueError('neither load file nor embedded content found for the initial system')
d['symbols'] = sub['symbol']
d['composition'] = sub.get('composition', None)
if load_options is not None:
d['load_options'] = {}
load_options_keys = ['key', 'index', 'data_set', 'pbc', 'atom_style',
'units', 'prop_info']
d['load_options'] = termtodict(load_options, load_options_keys)
if 'index' in d['load_options']:
d['load_options']['index'] = int(d['load_options']['index'])
self.set_values(**d)
def build_model(self,
model: DM,
**kwargs: any):
"""
Adds the subset model to the parent model.
Parameters
----------
model : DataModelDict.DataModelDict
The record content (after root element) to add content to.
kwargs : any
Any options to pass on to dict_insert that specify where the subset
content gets added to in the parent model.
"""
# Check required parameters
if self.load_file is None:
raise ValueError('load_file not set')
system = DM()
system['family'] = self.family
if self.load_file is not None:
system['artifact'] = DM()
system['artifact']['file'] = self.load_file.as_posix()
system['artifact']['format'] = self.load_style
if len(self.load_options) == 0:
system['artifact']['load_options'] = None
else:
system['artifact']['load_options'] = dicttoterm(self.load_options)
else:
system['initial-atomic-system'] = self.ucell.model()['atomic-system']
system['symbol'] = self.symbols
if self.composition is not None:
system['composition'] = self.composition
dict_insert(model, self.modelroot, system, **kwargs)
@property
def queries(self) -> dict:
"""dict: Query objects and their associated parameter names."""
root = f'{self.parent.modelroot}.{self.modelroot}'
return {
'load_file': load_query(
style='str_match',
name=f'{self.prefix}load_file',
path=f'{root}.artifact.file',
description='search by the filename for the initial configuration'),
'family': load_query(
style='str_match',
name=f'{self.prefix}family',
path=f'{root}.family',
description='search by the configuration family: original prototype or crystal'),
'symbol': load_query(
style='str_match',
name=f'{self.prefix}symbols',
path=f'{root}.symbol',
description='search by atomic symbols in the configuration'),
'composition': load_query(
style='str_match',
name=f'{self.prefix}composition',
path=f'{root}.composition',
description='search by the composition of the initial configuration'),
}
########################## Metadata interactions ##############################
def metadata(self, meta: dict):
"""
Converts the structured content to a simpler dictionary.
Parameters
----------
meta : dict
The dictionary to add the subset content to
"""
# Check required parameters
if self.load_file is None:
meta[f'{self.prefix}load_file'] = None
meta[f'{self.prefix}load_style'] = None
meta[f'{self.prefix}load_options'] = None
meta[f'{self.prefix}parent_key'] = None
else:
meta[f'{self.prefix}load_file'] = self.load_file.as_posix()
meta[f'{self.prefix}load_style'] = self.load_style
meta[f'{self.prefix}load_options'] = dicttoterm(self.load_options)
if self.load_file.parent.as_posix() == '.':
parent = self.load_file.stem
else:
parent = self.load_file.parent.name
meta[f'{self.prefix}parent_key'] = parent
meta[f'{self.prefix}family'] = self.family
if self.symbols is None:
symbolstr = ''
else:
symbolstr = ''
for s in self.symbols:
if s is not None:
symbolstr += f'{s} '
symbolstr = symbolstr.strip()
meta[f'{self.prefix}symbols'] = symbolstr
if self.composition is not None:
meta[f'{self.prefix}composition'] = self.composition
########################### Calculation interactions ##########################
def calc_inputs(self, input_dict: dict):
"""
Generates calculation function input parameters based on the values
assigned to attributes of the subset.
Parameters
----------
input_dict : dict
The dictionary of input parameters to add subset terms to.
"""
if self.ucell is None:
raise ValueError('ucell not loaded')
input_dict['ucell'] = self.ucell
|
from flask import Flask, render_template, request,make_response,jsonify
from werkzeug.utils import secure_filename
from flask_cors import CORS, cross_origin
app = Flask(__name__)
# CORS(app, support_credentials=True)
# cors=CORS(app,resources={
# r"/*":{
# "origins":"*"
# }
# })
# app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/uploader', methods = ['GET', 'POST'])
@cross_origin(origin='*')
def uploader_file():
if request.method == 'POST':
f = request.files['file']
print(f.filename)
f.save(secure_filename(f.filename))
response = make_response(jsonify({"message": 'file uploaded'}))
return response
if __name__ == '__main__':
app.run(debug = True) |
import os
from flask import Blueprint, request, flash, Response, jsonify, send_from_directory, g
from werkzeug.utils import secure_filename
from resourse.models.course import Course
from resourse.models.take import Take
from resourse.models.student import Student
from resourse.models.pdfs import PDF
from resourse import db
filebp = Blueprint('file', __name__, url_prefix='/file')
JPEG_PATH = '/root/Android/file/jpeg/'
PDF_PATH = '/root/Android/file/pdf/'
@filebp.route('/jpeg/<picture_name>', methods=('GET', 'POST'))
def picture(picture_name):
file_exist = os.path.exists(JPEG_PATH + picture_name)
if request.method == 'GET':
if not file_exist:
return "No Such pic"
try:
response = Response(JPEG_PATH + picture_name, mimetype="image/jpeg")
return send_from_directory(JPEG_PATH, picture_name)
except Exception as e:
return jsonify({"code": "unexception", "message": "{}".format(e)})
return "JPEG GET"
elif request.method == 'POST':
if 'file' not in request.files:
print(dict(request.files))
return "No Picture POST"
file = request.files['file']
if file.filename == '':
flash('No selected file')
return 'No selected file'
else:
filename = secure_filename(file.filename)
file.save(JPEG_PATH + filename)
return "Picture Get %s" % picture_name
@filebp.route('/pdf', methods=('GET', 'POST'))
def pdf():
pdf_json = request.get_json()
if request.method == "GET":
student_name = g.uid
student = Student.query.filter(Student.username == student_name).first()
student_id = student.id
all_take = Take.query.filter(Take.student_id == student_id).all()
result = []
for take in all_take:
course_id = take.course_id
course = Course.query.filter(Course.id == course_id).first()
obj_dict = course.as_dict()
course_pdfs = PDF.query.filter(PDF.course_id == course_id).all()
pdfs_dict = [p.as_dict() for p in course_pdfs]
obj_dict['pdfs'] = pdfs_dict
result.append(obj_dict)
return jsonify(result)
return jsonify("haha")
@filebp.route('/pdf/<course_id>/<file_name>', methods=('GET', 'POST'))
def getPDF(course_id, file_name):
if request.method == "GET":
pdf_path = PDF_PATH + '/' + course_id + '/'
file_exist = os.path.exists(pdf_path + file_name)
if not file_exist:
return "no such pdf"
try:
response = Response(JPEG_PATH + pdf_path, mimetype="application/pdf")
return send_from_directory(pdf_path, file_name)
except Exception as e:
return jsonify({"code": "unexception", "message": "{}".format(e)})
|
def main():
import argparse
import re
import traceback
import requests
from dlinkscraper import DLink
parser = argparse.ArgumentParser(
'DuckDNS Updater',
description=
"""This script updates your DuckDNS IPv4 address to scraped address
from your D-Link router. Because in LTE routers, your visible public
IP doesn't always match with IP that is needed to access you,
we need to scrape it from router's admin page"""
)
parser.add_argument('--token', '-t', type=str, required=True, help='Your DuckDNS token')
parser.add_argument('--domain', '-d', type=str, required=True, help='Your DuckDNS domain')
parser.add_argument(
'--login', '-l', type=str, required=False, default='admin',
help="Login to your router. It's always 'admin', so, yeah, "
"you don't need to specify it...")
parser.add_argument(
'--password', '-p', type=str, required=True,
help="Password to your router's admin"
)
parser.add_argument(
'--router-url', '-u', type=str, required=False, default='http://192.168.1.1',
help="Base URL to you router. Usually something "
"like 'http://192.168.1.1' (that's default)")
parser.add_argument(
'--no-cache', action='store_true',
help="Don't cache and check last known IP. This is default behaviour, "
"as it won't ping DuckDNS every time - only when IP changed")
parser.add_argument(
'--cache-file', type=str, required=False, default='last_ip.txt',
help='Path to file where last known IP will be cached')
args = parser.parse_args()
dl = DLink(args.router_url)
print('Logging in to router...')
dl.login(args.login, args.password)
print('Getting router main page...')
dl.get_main_site()
print('Logging out...')
dl.logout()
# Check if it's actually valid IP
if dl.public_ip is None or not re.match(r'\d+\.\d+\.\d+\.\d+', dl.public_ip):
print('Got invalid IP from router! Exit!')
exit(-1)
print('IP from router: ' + dl.public_ip)
if not args.no_cache:
print('Checking last known IP...')
try:
with open(args.cache_file, 'r') as f:
saved_ip = f.read()
print('Last IP: ' + saved_ip)
except:
saved_ip = 'error'
print(f"Can't open cache file ({args.cache_file})")
traceback.print_exc()
if saved_ip == dl.public_ip:
print('Last IP was the same :) Exit.')
exit(0)
else:
print('IP changed!')
req = requests.get(
f'https://www.duckdns.org/update'
f'?domains={args.domain}'
f'&token={args.token}'
f'&ip={dl.public_ip}'
)
if req.ok and req.content.decode('utf-8') == 'OK':
print('Updating IP success :)')
if not args.no_cache:
print('Saving current IP for later...')
try:
with open(args.cache_file, 'w') as f:
f.write(dl.public_ip)
except:
print("Can't write cache file!")
traceback.print_exc()
print('Saving current IP success :)')
exit(0)
else:
print('Updating IP failed!')
exit(-1)
if __name__ == '__main__':
main()
|
from rest_framework import serializers
from .models import Person
class PersonSerializer(serializers.ModelSerializer):
class Meta:
model = Person
fields = ('identifier', 'name', 'isic_code', 'phone_number', 'address', 'city', 'email', 'website', 'notes')
|
import re
text = input()
word = input()
pattern = rf"\b{word}\b"
# res = re.findall(pattern, text, re.IGNORECASE | re.MULTILINE)
res = re.findall(pattern, text, re.IGNORECASE)
print(len(res))
|
'''
Created on 2017年1月3日
@author: admin
'''
import socket
s = socket.socket()
host = socket.gethostname()
port = 1234
s.bind((host, port))
s.listen(5)
while True:
c, addr = s.accept()
print('Got connection from ', addr)
c.send(bytes('Thank you for connecting'))
c.close() |
#!/usr/bin/env python3
# Advent of code Year 2019 Day 9 solution
# Author = seven
# Date = December 2019
import enum
import sys
from os import path
sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__))))
from shared import vm
with open((__file__.rstrip("code.py") + "input.txt"), 'r') as input_file:
input = input_file.read()
class Ship(object):
def __init__(self):
self.panels = {}
def panelAt(self, pos: tuple):
pos = str(pos)
if pos in self.panels:
return self.panels[pos]
return {'color': 0}
def paintPanel(self, pos: tuple, color: int):
pos_str = str(pos)
if pos not in self.panels:
self.panels[pos_str] = {'color': 0, 'visited': 0, 'x': pos[0], 'y': pos[1]}
# print('Paint {} {} ({})'.format(pos, 'Black' if color == 0 else 'White', color))
self.panels[pos_str]['color'] = color
self.panels[pos_str]['visited'] += 1
class Dir(enum.Enum):
up = 0
right = 1
down = 2
left = 3
class Robot(vm.VM):
def __init__(self, program: str, ship: Ship):
self.ship = ship
self.position = (0, 0)
self.direction = Dir.up
self.waiting_to_paint = True
super().__init__(program=program, input=vm.IO(), output=vm.IO())
def store_to_output(self, a: vm.Param):
super().store_to_output(a)
if self.waiting_to_paint:
self.ship.paintPanel(self.position, self.output.value)
self.waiting_to_paint = False
else:
self.turn(self.output.value)
self.move()
self.waiting_to_paint = True
def load_from_input(self, a: vm.Param):
self.input.value = self.ship.panelAt(self.position)['color']
super().load_from_input(a)
def turn(self, direction: int):
delta = 1 if direction == 1 else -1
dir_str = 'Turn from {}'.format(self.direction)
if self.direction == Dir.up and delta == -1:
self.direction = Dir.left
else:
self.direction = Dir((self.direction.value + delta) % 4)
# print('{} to {} (Command: {})'.format(dir_str, self.direction, direction))
def move(self):
mov_str = 'Move from {}'.format(self.position)
if self.direction == Dir.up:
self.position = (self.position[0], self.position[1] + 1)
elif self.direction == Dir.right:
self.position = (self.position[0] + 1, self.position[1])
elif self.direction == Dir.down:
self.position = (self.position[0], self.position[1] - 1)
else:
self.position = (self.position[0] - 1, self.position[1])
# print('{} to {} direction: {}'.format(mov_str, self.position, self.direction))
ship = Ship()
robot = Robot(program=input, ship=ship)
robot.run()
print('Part One: {0}'.format(len(ship.panels)))
ship = Ship()
ship.panels[str((0, 0))] = {'color': 1, 'visited': 0, 'x': 0, 'y': 0}
robot = Robot(program=input, ship=ship)
robot.run()
print('Part two')
min_x = None
max_x = None
min_y = None
max_y = None
for panel in ship.panels.values():
min_x = panel['x'] if min_x is None else min(panel['x'], min_x)
max_x = panel['x'] if max_x is None else max(panel['x'], max_x)
min_y = panel['y'] if min_y is None else min(panel['y'], min_y)
max_y = panel['y'] if max_y is None else max(panel['y'], max_y)
for y in range(max_y, min_y - 1, -1):
line = ''
for x in range(min_x, max_x + 1):
pos = str((x, y))
if pos in ship.panels and ship.panels[pos]['color'] == 1:
line += '|'
else:
line += ' '
print(line)
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
import copy
import cv2
import numpy as np
import fabio
from skimage.morphology import white_tophat
from pyFAI.method_registry import IntegrationMethod
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pyFAI.detectors import Detector
from pyFAI import detector_factory, load
from pyFAI.goniometer import SingleGeometry
from pyFAI.calibrant import get_calibrant
def distance(pt1, pt2):
"""
Get distance between 2 points
:param pt1: first point (tuple or list of 2 values)
:param pt2: second point (tuple or list of 2 values)
:return: distance (float)
"""
return np.sqrt((pt1[0]-pt2[0])**2+(pt1[1]-pt2[1])**2)
def get16bitImage(img):
"""
Convert a image to uint16 image
:param img: input image
:return: uint16 image
"""
max_val = img.max()
min_val = img.min()
if max_val == min_val:
return (img * 65535. / min_val).astype('uint16')
else:
dev = max_val - min_val
return (np.round(img*65535./dev)).astype('uint16')
def get8bitImage(img, min=None, max=None):
"""
Convert a image to uint8 image
:param img: input image
:param min: min intensity
:param max: max intensity
:return: uint8 image
"""
cimg = np.array(img, dtype=np.float32)
if max is None:
max = img.max() * 0.5
cimg[cimg > max] = max
if min is None:
min = img.min()
cimg[cimg < min] = min
cimg -= min
min = 0
max = cimg.max()
if max <= min:
img8bit = (cimg * 0.).astype('uint8')
else:
alpha = 255. / (max)
img8bit = cv2.convertScaleAbs(cimg, alpha=alpha)
return img8bit
def inverte(imagem):
"""
Invert grey scale image
:param imagem: input image
:return: inverted image
"""
imagem = (255-imagem)
return imagem
def getThreshold(img, percent):
"""
Get threshold value by using percentage of number of points
:param img: input image
:param percent: percentage of number of points higher threshold value
:return: threshold value
"""
bins = np.arange(img.max())
hist, bins = np.histogram(img, bins)
hist = np.array(hist)
thrhold = 0
dev = min(1000, img.max())
for t in np.arange(0, img.max()-1, img.max()/dev):
valueHist = np.sum(hist[int(t):int(img.max())])
if (valueHist/(1.0*img.shape[0]*img.shape[1]))<percent:
if valueHist < 100 and t > 1:
thrhold = t-1
else:
thrhold = t
# find number of pixel ----> thrhold = t-1
break
return thrhold
def thresholdImg(img, percent, convert_type=cv2.THRESH_BINARY_INV):
"""
Apply thresholding by percent
:param img: input image
:param percent: percentage of number of points higher threshold value
:param convert_type: convert type see http://docs.opencv.org/trunk/d7/d4d/tutorial_py_thresholding.html
:return: threshold image
"""
th = max(0, getThreshold(img, percent=percent)-1)
_, thres = cv2.threshold(img, th, 255, convert_type, dst=img)
return thres
def bkImg(img, percent=0.01, morph=25):
"""
Apply thresholding and morphology
:param img: input image
:param percent: percentage of number of points higher threshold value
:param morph: morphology size
:return: image
"""
img = thresholdImg(img, percent)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph, morph))
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
img = inverte(img)
return img
def getBGR(img):
"""
Convert grayscale image to RGB image
:param img: grayscale image
:return: RGB image
"""
copy_img = copy.copy(img)
copy_img = cv2.resize(copy_img, (int(copy_img.shape[1]), int(copy_img.shape[0])))
return cv2.cvtColor(copy_img, cv2.COLOR_GRAY2BGR)
def getContours(img, n1=1, n2=2):
"""
Give the contours for the image.
:param img, n1, n2:
:return: contours
"""
ret = cv2.findContours(img, n1, n2)
if len(ret) == 3:
return ret[1]
if len(ret) == 2:
return ret[0]
return None
def getCenter(img):
"""
Find center of the diffraction.
:param img: input image
:return: center
"""
img = get8bitImage(copy.copy(img))
img = cv2.GaussianBlur(img, (5, 5), 0)
init_center = None
## Find init center by apply thresholding and fit ellipse to the contour which has the maximum size
cimg = bkImg(copy.copy(img), 0.005, 50)
contours = getContours(cimg)
cnt = max(contours, key=len)
if len(cnt) > 5:
ellipse = cv2.fitEllipse(cnt)
init_center = (ellipse[0][0], ellipse[0][1])
## Find center by apply thresholding and fit ellipse to the contour of reflections and find the average center of reflections
if init_center is not None:
cimg = thresholdImg(copy.copy(img), 0.00015)
contours = getContours(cimg)
# Find 2 biggest contours (reflections)
cnts = sorted(contours, key=len, reverse=True)[:6]
reflections = []
for i, cnt in enumerate(cnts):
# Fit ellipse to 6 reflections if its size is bigger than 10
if len(cnt) >= 10:
ellipse = cv2.fitEllipse(cnt)
center = ellipse[0]
axes = ellipse[1]
center = (center[0], center[1])
reflections.append((center, np.pi*axes[0]*axes[1]))
inds = np.arange(0, len(reflections))
if len(reflections) > 1:
r1 = None
r2 = None
min_diff = 99999
for i in inds:
other_inds = np.delete(inds, i)
its_pair = min(other_inds, key=lambda k:abs(reflections[i][1]-reflections[k][1]))
diff = abs(reflections[i][1]-reflections[its_pair][1])
if diff < min_diff:
r1 = i
r2 = its_pair
min_diff = diff
if r1 is not None and r2 is not None:
x = ((reflections[r1][0][0]+reflections[r1][0][0]) / 2.)
y = ((reflections[r1][0][1] + reflections[r1][0][1]) / 2.)
if init_center is not None and distance(init_center, (x, y)) < 7:
# Return average center of reflections
return (x, y)
# # Find the average center location from all fitting ellipses
# if len(centers) % 2 != 0:
# # if number of center is odd, remove one reflection
# centers = centers[:-1]
# sum_centers = np.sum(centers, axis=0)
# x = int(round(1.* sum_centers[0] / len(centers)))
# y = int(round(1.* sum_centers[1] / len(centers)))
# if init_center is not None and distance(init_center, (x,y)) < 7:
# # Return average center of reflections
# return (x, y)
return init_center
# Find cener by using opencv moments. See http://docs.opencv.org/trunk/dd/d49/tutorial_py_contour_features.html
nZero = cv2.findNonZero(img) # copy_img)#
if nZero is not None:
copy_img = bkImg(copy.copy(img), 0.015, 30)
m = cv2.moments(copy_img)
if m['m00'] != 0:
# initial center
return ((m['m10'] / m['m00']), (m['m01'] / m['m00']))
# Find Center by fitting circle in the image
cimg = bkImg(copy.copy(img), 0.0015, 50)
circles = cv2.HoughCircles(cimg, 3 , 1, 100,
param1=60, param2=20, minRadius=0, maxRadius=0) # 3 = cv2.HOUGH_GRADIENT
if circles is not None:
return (circles[0][0][0], circles[0][0][1])
# If there's no method working return center of the image
return (img.shape[1] / 2, img.shape[0] / 2)
def get_ring_model(hist, max_nfev=8000):
"""
Fit gaussian model to histogram
:param hist:
:return:
"""
# Smooth histogram to find parameters easier
from .histogram_processor import smooth
hist[1] = smooth(hist[1], 20)
index = np.argmax(hist[1])
u = hist[0][index]
if u < np.pi / 2:
u += np.pi
elif u > 3 * np.pi / 2:
u -= np.pi
# Fit model using same gaussian
x = hist[0]
# Call orientation_GMM3
from lmfit.models import GaussianModel
from lmfit import Model
def orientation_GMM3(x, u, sigma, alpha, bg):
mod = GaussianModel()
return mod.eval(x=x, amplitude=alpha, center=u, sigma=sigma) + \
mod.eval(x=x, amplitude=alpha, center=u-np.pi, sigma=sigma) + \
mod.eval(x=x, amplitude=alpha, center=u+np.pi, sigma=sigma) + bg
model = Model(orientation_GMM3, independent_vars='x')
max_height = np.max(hist[1])
model.set_param_hint('u', value=u, min=np.pi/2, max=3*np.pi/2)
model.set_param_hint('sigma', value=0.1, min=0, max=np.pi*2)
model.set_param_hint('alpha', value=max_height*0.1/0.3989423, min=0)
model.set_param_hint('bg', value=0, min=-1, max=max_height+1)
result = model.fit(data=hist[1], x=x, params=model.make_params(), max_nfev=max_nfev)
errs = abs(result.best_fit - result.data)
weights = errs / errs.mean() + 1
weights[weights > 3.] = 0
result = model.fit(data=hist[1], x=x, params=result.params, weights=weights, max_nfev=max_nfev)
return result.values
def HoF(hist, mode='f'):
"""
Calculate Herman Orientation Factors
"""
Ints = []
n_pi = len(hist) // 2 # number of hist unit in pi range
n_hpi = n_pi // 2 # number of hist unit in half pi range
for i in range(n_pi):
I = hist[i:(i+n_pi)].copy()
I[:i] += np.flipud(hist[:i])
I[i:] += np.flipud(hist[(i+n_pi):])
Ints.append(I)
rads = np.linspace(0, np.pi, n_pi + 1)[:-1]
denom = np.sin(rads)
numer = (np.cos(rads)**2) * denom
HoFs = np.zeros(hist.shape)
for i in range(len(hist)):
I = Ints[i] if i < n_pi else np.flipud(Ints[i - n_pi])
if mode == 'f':
HoFs[i] = ((I * numer).sum() / (I * denom).sum()) if i < n_pi else HoFs[i - n_pi]
else:
HoFs[i] = (I[:n_hpi] * numer[:n_hpi]).sum() / (I[:n_hpi] * denom[:n_hpi]).sum()
return (3 * HoFs - 1) / 2
def getRadOfMaxHoF(HoFs, mode, ratio=0.05):
"""
Get the radian of the maximum Herman Orientation Factor
:param HoFs:
:param mode:
"""
nHoFs = len(HoFs)
num = int(nHoFs * ratio)
if mode == 'f':
HoFs = HoFs[:(nHoFs // 2)]
num //= 2
# get the indices of the top num largest HoFs
idxs = sorted(np.arange(len(HoFs)), key=lambda i: HoFs[i])[-num:]
idxs = sorted(idxs)
# group the indices
grps = [[idxs[0]]]
for idx in idxs[1:]:
if grps[-1][-1] == idx - 1:
grps[-1].append(idx)
else:
grps.append([idx])
# handle the round case
if len(grps) > 1 and grps[0][0] == 0 and grps[-1][-1] == len(HoFs) - 1:
grps[0] += [idx - len(HoFs) for idx in grps[-1]]
# find the groups of max number of indices
maxn = max(len(grp) for grp in grps)
grps = [grp for grp in grps if len(grp) == maxn]
opt_grp = sorted(grps, key=lambda g:HoFs[g].sum())[-1]
opt_idx = np.mean(opt_grp) % len(HoFs)
return 2 * np.pi * opt_idx / nHoFs
def getRotationAngle(img, center, method=0, man_det=None):
"""
Find rotation angle of the diffraction.
:param img: input image
:param center: center of the diffraction
:return: rotation angle in degree
"""
## Find init angle by apply thresholding and fit ellipse to the contour which has the maximum size
cimg = get8bitImage(copy.copy(img))
cimg = cv2.GaussianBlur(cimg, (5, 5), 0)
cimg = bkImg(copy.copy(cimg), 0.005, 50)
init_angle = None
contours = getContours(cimg)
cnt = max(contours, key=len)
if len(cnt) > 5:
ellipse = cv2.fitEllipse(cnt)
init_angle = (ellipse[2]+90.) % 180
init_angle = init_angle if init_angle <= 90. else 180. - init_angle
# Find angle with maximum intensity from Azimuthal integration
det = find_detector(img, man_det=man_det)
corners = [(0, 0), (0, img.shape[1]), (img.shape[0], 0), (img.shape[0], img.shape[1])]
npt_rad = int(round(min([distance(center, c) for c in corners])))
mask = np.zeros(img.shape)
mask[img<0] = 1
ai = AzimuthalIntegrator(detector=det)
ai.setFit2D(200, center[0], center[1])
integration_method = IntegrationMethod.select_one_available("csr", dim=2, default="csr", degradable=True)
I2D, tth, _ = ai.integrate2d(img, npt_rad, 360, unit="r_mm", method=integration_method, mask=mask)
I2D = I2D[:, :int(len(tth)/3.)]
hist = np.sum(I2D, axis=1) # Find a histogram from 2D Azimuthal integrated histogram, the x-axis is degree and y-axis is intensity
sum_range = 0
# Find degree which has maximum intensity
if method == 1: # gmm
x = np.arange(0, 2 * np.pi, 2 * np.pi / 360)
model_pars = get_ring_model([x, hist])
max_degree = int(model_pars['u'] / np.pi * 180) % 180
elif 2 <= method <= 3: # 'hof_f' or 'hof_h'
HoFs = HoF(hist, 'f' if method == 2 else 'h')
max_degree = int(getRadOfMaxHoF(HoFs, 'f' if method == 2 else 'h') / np.pi * 180) % 180
else: # Find the best degree by its intensity
max_degree = max(np.arange(180), key=lambda d: np.sum(hist[d - sum_range:d + sum_range + 1]) + np.sum(
hist[d + 180 - sum_range:d + 181 + sum_range]))
hist = 0
if -175 <= max_degree < 175:
I2D, tth, _ = ai.integrate2d(img, npt_rad, 100, azimuth_range=(max_degree-5, max_degree+5), unit="r_mm", method=integration_method, mask=mask)
I2D = I2D[:, :int(len(tth)/3.)]
hist += np.sum(I2D, axis=1)
op_max_degree = max_degree-180 if max_degree > 0 else max_degree+180
if -175 <= op_max_degree < 175:
I2D2, tth2, _ = ai.integrate2d(img, npt_rad, 100, azimuth_range=(op_max_degree-5, op_max_degree+5), unit="r_mm", method=integration_method, mask=mask)
I2D2 = I2D2[:, :int(len(tth2)/3.)]
hist += np.sum(I2D2, axis=1) # Find a histogram from 2D Azimuthal integrated histogram, the x-axis is degree and y-axis is intensity
delta_degree = max(np.arange(100), key=lambda d: np.sum(hist[d - sum_range:d + sum_range + 1]))
if delta_degree < 50:
max_degree -= (50-delta_degree)/10
else:
max_degree += (delta_degree-50)/10
# # If the degree and initial angle from ellipse are different, return ellipse angle instead
if init_angle is not None and abs(max_degree-init_angle) > 20. and abs(180 - max_degree - init_angle)>20:
return int(round(init_angle))
#If max degree is obtuse return the acute angle equivalent of the same
if max_degree > 90:
return -1*(180-max_degree)
if max_degree < -90:
return 180 + max_degree
# otherwise, return max degree
return max_degree
def getCenterRemovedImage(img, center, rmin):
"""
Remove center location in the image (replace by 0 (black value))
:param img: input image
:param center: center location (tuple or list)
:param rmin: radius of the circle
:return: image after center location is removed
"""
center = (int(center[0]), int(center[1]))
mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
cv2.ellipse(mask, tuple(center), axes=(rmin, rmin), angle=0, startAngle=0,
endAngle=360, color=255,
thickness=-1) # Draw a circle in mask
img[mask > 0] = 0 # replace circle with 0
return img
def getNewZoom(current, move, xmax, ymax, ymin=0):
"""
Get new zoom location (x, and y ranges) by given current zoom, move vector and x,y maximum ranges
:param current: current zoom location
:param move: moving vector
:param xmax: maximum x
:param ymax: maximum y
:param ymin: minimum y
:return:
"""
x1 = current[0][0] + move[0]
x2 = current[0][1] + move[0]
if x1 < 0:
x1 = 0
x2 = current[0][1] - current[0][0]
if x2 >= xmax:
x2 = xmax - 1
x1 = x2 - (current[0][1] - current[0][0])
y1 = current[1][0] + move[1]
y2 = current[1][1] + move[1]
if y1 < ymin:
y1 = ymin
y2 = y1 + (current[1][1] - current[1][0])
if y2 > ymax:
y2 = ymax
y1 = y2 - (current[1][1] - current[1][0])
return [(x1, x2), (y1, y2)]
def rotateImage(img, center, angle):
"""
Get rotated image by angle.
:param img: input image
:param angle: rotation angle
:return: rotated image
"""
if angle == 0:
return img, center, None
# M = cv2.getRotationMatrix2D(tuple(center), angle, 1)
# size = max(img.shape[0], img.shape[1])
# used for expanding the rotated image
# im_max_shape = max(img.shape[1], img.shape[0])
# print("max image shape: {}".format(im_max_shape))
# im_center = (im_max_shape/2, im_max_shape/2)
# translation = np.array(im_center) - np.array([img.shape[1]/2, img.shape[0]/2])
# print(translation)
# T = np.identity(3)
# # T[0:1,2] = translation
# T[0,2] = translation[0]
# T[1,2] = translation[1]
# M2 = np.identity(3)
# print("M: {}".format(M))
# M2[0:2,:] = M
# print("M2: {}".format(M2))
# M3 = np.dot(T, M2)
# print("M3: {}".format(M3))
# M1 = M3[0:2,:]
# print("M1: {}".format(M1))
# if img_type == "PILATUS":
# img = img.astype('float32')
# if mask_thres == -999:
# mask_thres = getMaskThreshold(img, img_type)
# mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
# mask[img <= mask_thres] = 255
# rotated_img, center, rotMat = rotateNonSquareImage(img, angle, center)
# rotated_mask, _, _ = rotateNonSquareImage(mask, angle, center)
# rotated_mask[rotated_mask > 0.] = 255
# rotated_img[rotated_mask > 0] = mask_thres
# return rotated_img, center, rotMat
# else:
return rotateNonSquareImage(img, angle, center)
def rotateImageAboutPoint(img, point, angle):
"""
Get rotated image by angle about a given point.
:param img: input image
:param point: point to be rotated about
:param angle: rotation angle
:return: rotated image
"""
if angle == 0:
return img
M = cv2.getRotationMatrix2D(tuple(point), angle, 1)
img = img.astype('float32')
# if img_type == "PILATUS":
# if mask_thres == -999:
# mask_thres = getMaskThreshold(img, img_type)
# mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
# mask[img <= mask_thres] = 255
# rotated_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
# rotated_mask = cv2.warpAffine(mask, M, (img.shape[1], img.shape[0]))
# rotated_mask[rotated_mask > 0.] = 255
# rotated_img[rotated_mask > 0] = mask_thres
# else:
rotated_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
return rotated_img
def rotatePoint(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)
qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)
return qx, qy
def getMaskThreshold(img):
"""
Compute the mask threshold for the image given
:param img, img_type:
:return: mask threshold
"""
min_val = img.min()
if min_val < 0:
mask_thres = -0.01
else:
mask_thres = min_val
if img.shape == (1043, 981): # if img_type == "PILATUS":
hist = np.histogram(img, 3, (min_val, min_val+3))
max_ind = np.argmax(hist[0])
mask_thres = hist[1][max_ind]
return mask_thres
###### White top hat image for Scanning Diffraction #########
def gaussian(x, a, mean, sigma):
"""
Find mean square error
"""
return a*np.exp((-1.*(x-mean)**2)/(2*(sigma**2)))
def getImgAfterWhiteTopHat(img, sigma=5):
"""
Give the image after apply white to hat to it
"""
tmpKernel = 1. / sigma ** 2 * np.ones((sigma, sigma))
dst = copy.copy(img)
dst = np.array(dst, np.float32)
for _ in range(2):
dst = cv2.filter2D(dst, cv2.CV_32F, tmpKernel, anchor=(-1, -1))
sigma = sigma * 6
x = np.array(range(-int(sigma + 1), int(sigma + 1) + 1, 1))
kernX = gaussian(x, 1, 0, sigma)
kernXY = np.outer(kernX, np.transpose(kernX))
tophat = white_tophat(dst, kernXY)
return tophat
def kernelXY(sigma=5):
"""
Give the kernel for XY
"""
a = sigma * 6
x = np.array(range(-int(a + 1), int(a + 1) + 1, 1))
kernelX = 1. / (np.sqrt(2. * np.pi) * a) * np.exp(-(x - 0) ** 2. / (2. * a ** 2))
return np.outer(kernelX, np.transpose(kernelX))
def display_test(img, name = "test", max_int = 100):
"""
Display input image to screen. Just for test
:param img: input image
:param name: image name
:return: -
"""
max_side = max(img.shape[:2])
ratio = 1.*650/max_side
size = (int(img.shape[1]*ratio),int(img.shape[0]*ratio))
img = get8bitImage(img, min=0.0, max=max_int)
img = cv2.resize(img, size)
cv2.imshow(name, img)
def averageImages(file_list, rotate=False, preprocessed=False, man_det=None):
"""
open images and average them all
WARNING: file_list is a list of string without preprocessed but it is a list of images with prepocessed
:param file_list: list of image path (str)
:return:
"""
all_imgs = []
dims_match, max_dim, max_img_center = checkDimensionsMatch(file_list, preprocessed=preprocessed)
if not dims_match:
return expandAndAverageImages(file_list, max_dim, max_img_center, rotate, preprocessed=preprocessed)
for f in file_list:
if preprocessed:
img = f
else:
img = fabio.open(f).data
if rotate:
print(f'Rotating and centering {f}')
center = getCenter(img)
angle = getRotationAngle(img, center, method=0, man_det=man_det)
img, center, _ = rotateImage(img, center, angle)
all_imgs.append(img)
return np.mean(all_imgs, axis=0)
def expandAndAverageImages(file_list, max_dim, max_img_center, rotate, preprocessed=False):
"""
open images, expand to largest size and average them all
:param file_list: list of image path (str)
:param max_dim: dimension of largest image
:param max_img_center: center of largest image
:return:
"""
all_imgs=[]
for f in file_list:
if preprocessed:
img = f
else:
img = fabio.open(f).data
# Expand Image to max size by padding the surrounding by zeros and center of all image coincides
center = getCenter(img)
expanded_img = np.zeros(max_dim)
b, l = img.shape
expanded_img[0:b, 0:l] = img
transx = int((max_img_center[0] - center[0]))
transy = int((max_img_center[1] - center[1]))
M = np.float32([[1, 0, transx], [0, 1, transy]])
img = cv2.warpAffine(expanded_img, M, max_dim)
if rotate:
print(f'Rotating and centering {f}')
angle = getRotationAngle(img, max_img_center, method=0)
img, center, _ = rotateImage(img, max_img_center, angle)
all_imgs.append(img)
return np.mean(all_imgs, axis=0)
def checkDimensionsMatch(file_list, preprocessed=False):
"""
Check whether dimensions of all the images match
:param file_list: list of image path (str)
:return: True if dimensions match
"""
dims = []
for f in file_list:
if preprocessed:
img = f
else:
img = fabio.open(f).data
dims.append(img.shape)
max_dim = max(dims)
index = dims.index(max_dim)
if preprocessed:
max_img = file_list[index]
else:
max_img = fabio.open(file_list[index]).data
center = getCenter(max_img)
return dims.count(dims[0]) == len(dims), max_dim, center
def processImageForIntCenter(img, center):
"""
Translate image such that the new center is an integer
:param file_list: original image and its center with decimals
:return: translated image and nearest integer center
"""
img=img.astype('float32')
int_Center = (round(center[0]), round(center[1]))
tx = int_Center[0] - center[0]
ty = int_Center[1] - center[1]
M = np.float32([[1,0,tx],[0,1,ty]])
print("In process Image int center, translating original image by tx = " + str(tx) + " and ty = " + str(ty))
rows,cols = img.shape
# if img_type == "PILATUS":
# if mask_thres == -999:
# mask_thres = getMaskThreshold(img, img_type)
# mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
# mask[img <= mask_thres] = 255
# translated_Img = cv2.warpAffine(img, M, (cols,rows))
# translated_mask = cv2.warpAffine(mask, M, (cols,rows))
# translated_mask[translated_mask > 0.] = 255
# translated_Img[translated_mask > 0] = mask_thres
# else:
translated_Img = cv2.warpAffine(img,M,(cols,rows))
return (translated_Img, int_Center)
def rotateNonSquareImage(img, angle, center1):
"""
Rotates a non square image by first determining the appropriate square image and then rotating the image.
:param file_list: original non square image, angle of rotation and center
:return: rotated image and center with respect to new coordinate system
"""
img = img.astype('float32')
height, width = img.shape
center = (width/2, height/2)
rotation_mat = cv2.getRotationMatrix2D(center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w/2 - center[0]
rotation_mat[1, 2] += bound_h/2 - center[1]
maxB = max(bound_h, bound_w)
center1 = [center1[0], center1[1], 1]
center1 = np.dot(rotation_mat, center1)
center2 = (int(center1[0]), int(center1[1]))
# rotate image with the new bounds and translated rotation matrix
rotated_img = cv2.warpAffine(img, rotation_mat, (maxB, maxB))
return rotated_img, center2, rotation_mat
def mean_square_error(y_predict, y):
"""
Find mean square error
"""
loss = (y_predict - y)
mse = np.dot(loss.transpose(), loss) / y.shape[0]
rmse = np.sqrt(mse)
return rmse/(max(y)-min(y))
def inpaint_img(img, center=None, mask=None):
"""
Function to inpaint an image
Warning: This function should not be used for any data processing or results,
it is only for visualization and faster processing purpose.
Warning 2: the pyFAI inpainting function is NOT a machine learning method,
it is only using a dataset to fill in the gaps.
"""
print("Inpainting...")
detector = find_detector(img)
if center is not None:
corners = [(0, 0), (img.shape[1], 0), (0, img.shape[0]), (img.shape[1], img.shape[0])]
npt_rad = int(round(max([distance(center, c) for c in corners])))
else:
npt_rad=1024
npt_azim=1024
if mask is None:
# mask = detector.mask
mask = np.zeros_like(img)
mask[img < 0] = 1
ai = AzimuthalIntegrator(detector=detector)
integration_method = IntegrationMethod.select_one_available("csr", dim=1, default="csr", degradable=True)
image = ai.inpainting(img, mask=mask, poissonian=True, method=integration_method, npt_rad=npt_rad, npt_azim=npt_azim, grow_mask=1)
"""
import pyFAI
import matplotlib.pyplot as plt
img_nomask = copy.copy(img)
img[mask == 1] = -1
wo = ai.integrate1d(img_nomask, 2000, unit="r_mm", method="csr_ocl", radial_range=(0,210))
for k in (512, 1024, 2048, 4096):
ai.reset()
for i in (0, 1, 2, 4, 8):
inpainted = ai.inpainting(img, mask=mask, poissonian=True, method=integration_method, npt_rad=k, npt_azim=npt_azim, grow_mask=i)
wm = ai.integrate1d(inpainted, 2000, unit="r_mm", method="csr_ocl", radial_range=(0,210))
print(f"method: {integration_method} npt_rad={k} grow={i}; R= {pyFAI.utils.mathutil.rwp(wm,wo)}")
plt.imsave(f'{i}_{k}.png', inpainted)
"""
print("Done.")
return image
def find_detector(img, man_det=None):
"""
Finds the detector used based on the size on the image used.
If not found, use the default agilent_titan
"""
# if img.shape == (1043, 981):
# det = "pilatus1m"
# else:
# det = "agilent_titan"
print("Finding detector...")
detector = None
if man_det is None:
for detect in Detector.registry:
if hasattr(Detector.registry[detect], 'MAX_SHAPE') and Detector.registry[detect].MAX_SHAPE == img.shape:
detector = detector_factory(detect)
print(detector.get_name())
break
else:
if man_det in Detector.registry and hasattr(Detector.registry[man_det], 'MAX_SHAPE') and Detector.registry[man_det].MAX_SHAPE == img.shape:
detector = detector_factory(man_det)
else:
print('The detector specified does not correspond to the image being processed')
if detector is None:
print("No corresponding detector found, using agilent_titan by default...")
detector = detector_factory('agilent_titan')
return detector
def getMisSettingAngles(img, detector, center, wavelength=1e-10, calibrant="AgBh"):
"""
Finds the detector's mis-setting angles using an image, the detector, the center of the image,
and optionally the wavelength and calibrant.
Warning: results need to be verified.
"""
# Initialize a geometry:
geo = load({"detector": detector, "wavelength": wavelength})
# Approximate geometry:
geo.setFit2D(100, center[0], center[1])
# Initialize SingleGeometry:
calib = get_calibrant(calibrant)
calib.set_wavelength(wavelength)
sg = SingleGeometry(label='test', image=img, calibrant=calib, detector=detector, geometry=geo)
# Extract control points: Usually one wants GUI here !
sg.extract_cp(max_rings=4)
# Perform the calibration
sg.geometry_refinement.refine3(fix=["wavelength"])
return sg.geometry_refinement.rot1, sg.geometry_refinement.rot2, sg.geometry_refinement.rot3
def calcSlope(pt1, pt2):
"""
Compute the slope using 2 points.
:param pt1, pt2: 2 points
:return: slope
"""
if pt1[0] == pt2[0]:
return float('inf')
return (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])
def getIntersectionOfTwoLines(line1, line2):
"""
Finds intersection of lines line1 = [p1,p2], line2 = [p3,p4]
:param line1:
:param line2:
:return:
"""
p1,p2 = line1
p3,p4 = line2
slope1 = (p2[1] - p1[1]) / (p2[0] - p1[0])
if p4[0] != p3[0]:
slope2 = (p4[1] - p3[1]) / (p4[0] - p3[0])
x = (p3[1] - p1[1] + slope1*p1[0] - slope2*p3[0]) / (slope1 - slope2)
y = slope1*(x - p1[0]) + p1[1]
else:
# Slope2 is inf
x = p4[0]
y = slope1 * (x - p1[0]) + p1[1]
return (int(x), int(y))
def getPerpendicularLineHomogenous(p1, p2):
"""
Give the perpendicular line homogeneous
"""
b1 = (p2[1] - p1[1]) / (p2[0] - p1[0]) if p1[0] != p2[0] else float('inf')
chord_cent = [(p2[0] + p1[0]) / 2, (p2[1] + p1[1]) / 2, 1]
print("Chord_cent1 ", chord_cent)
if b1 == 0:
return float('inf'), chord_cent
if b1 == float('inf'):
return 0, chord_cent
return -1 / b1, chord_cent |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
train = pd.read_csv("cheatkey.csv")
end_prices = train['종가']
print(type(end_prices))
#normalize window
seq_len = 50
sequence_length = seq_len + 1
result = []
for index in range(len(end_prices) - sequence_length + 1):
idk = []
idk[:] = end_prices[index: index + sequence_length]
result.append(idk)
#normalize data
def normalize_windows(data):
normalized_data = []
for window in data:
normalized_window = [((float(p) / float(window[0])) - 1) for p in window]
normalized_data.append(normalized_window)
return np.array(normalized_data)
norm_result = normalize_windows(result)
# split train and test data
row = int(round(norm_result.shape[0] * 0.9))
train = norm_result[:row, :]
np.random.shuffle(train)
x_train = train[:, :-1]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
y_train = train[:, -1]
x_test = norm_result[row:, :-1]
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
y_test = norm_result[row:, -1]
print(x_test.shape)
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(seq_len, 1)))
model.add(LSTM(64, return_sequences=False))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='rmsprop')
model.summary()
model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=int(seq_len/5), epochs=20)
model.save("model.h5")
pred = model.predict(x_test)
result = []
result[:] = end_prices[-seq_len:]
result = np.array(result)
x_test = result.reshape(1, -1, 1)
print('역정규화 개시')
un_norm = result[0]
pred_today = (pred[-1]+1) * un_norm
print("last 5 days:\n ", x_test)
print("prediction: ", pred_today)
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(y_test, label='True')
ax.plot(pred, label='Prediction')
ax.legend()
plt.show() |
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1:
return
yield start
start += 1
def find_all_indexes_substring(a_str, arr):
substring_indexes = []
for elem in arr:
current_indexes_substring = list(find_all(a_str, elem))
if current_indexes_substring:
substring_indexes.append(current_indexes_substring)
return substring_indexes
def is_second_in_first(delimiter, substring_indexes):
for row in range(len(substring_indexes)):
s = {i+delimiter for i in substring_indexes[row]}
if row < len(substring_indexes)-1:
if not s.intersection(substring_indexes[row+1]):
return False
return True
def is_second_arr_in_first_arr(first_arr, second_arr):
rows1, columns1, *first_arr = first_arr.split()
rows2, columns2, *second_arr = second_arr.split()
first_arr = ''.join(first_arr)
substring_indexes = find_all_indexes_substring(first_arr, second_arr)
if not (len(substring_indexes) == int(rows2) and
is_second_in_first(int(columns1), substring_indexes)):
return False
return True
'''input:
first_arr = 4 6 029402 560202 029694 780288
second_arr = 2 2 02 94
output:
True/False
'''
|
#!/usr/bin/python3
# @Author: Safer
# @Date: 2016-12-04 17:53:59
# @Last Modified by: Safer
# @Last Modified time: 2016-12-04 23:31:25
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
# * * * * * php /home/www/laravel/artisan schedule:run >> /dev/null 2>&1
class StockDialog(QWidget):
def __init__(self, parent=None):
super(StockDialog, self).__init__(parent)
self.setWindowTitle(self.tr("利用QPainter绘制各种图形"))
mainSplitter = QSplitter(Qt.Horizontal)
mainSplitter.setOpaqueResize(True)
frame = QFrame(mainSplitter)
mainLayout = QGridLayout(frame)
mainLayout.setMargin(10)
mainLayout.setSpacing(6)
label1 = QLabel(self.tr("形状:"))
label2 = QLabel(self.tr("画笔线宽:"))
label3 = QLabel(self.tr("画笔颜色:"))
label4 = QLabel(self.tr("画笔风格:"))
label5 = QLabel(self.tr("画笔顶端:"))
label6 = QLabel(self.tr("画笔连接点:"))
label7 = QLabel(self.tr("画刷风格:"))
label8 = QLabel(self.tr("画刷颜色:"))
self.shapeComboBox = QComboBox()
self.shapeComboBox.addItem(self.tr("Line"), "Line")
self.shapeComboBox.addItem(self.tr("Rectangle"), "Rectangle")
self.widthSpinBox = QSpinBox()
self.widthSpinBox.setRange(0, 20)
self.penColorFrame = QFrame()
self.penColorFrame.setAutoFillBackground(True)
self.penColorFrame.setPalette(QPalette(Qt.blue))
self.penColorPushButton = QPushButton(self.tr("更改"))
self.penStyleComboBox = QComboBox()
self.penStyleComboBox.addItem(self.tr("Solid"), Qt.SolidLine)
self.penCapComboBox = QComboBox()
self.penCapComboBox.addItem(self.tr("Flat"), Qt.FlatCap)
self.penJoinComboBox = QComboBox()
self.penJoinComboBox.addItem(self.tr("Miter"), Qt.MiterJoin)
self.brushStyleComboBox = QComboBox()
self.brushStyleComboBox.addItem(self.tr("Linear Gradient"), Qt.LinearGradientPattern)
self.brushColorFrame = QFrame()
self.brushColorFrame.setAutoFillBackground(True)
self.brushColorFrame.setPalette(QPalette(Qt.green))
self.brushColorPushButton = QPushButton(self.tr("更改"))
labelCol = 0
contentCol = 1
# 建立布局
mainLayout.addWidget(label1, 1, labelCol)
mainLayout.addWidget(self.shapeComboBox, 1, contentCol)
mainLayout.addWidget(label2, 2, labelCol)
mainLayout.addWidget(self.widthSpinBox, 2, contentCol)
mainLayout.addWidget(label3, 4, labelCol)
mainLayout.addWidget(self.penColorFrame, 4, contentCol)
mainLayout.addWidget(self.penColorPushButton, 4, 3)
mainLayout.addWidget(label4, 6, labelCol)
mainLayout.addWidget(self.penStyleComboBox, 6, contentCol)
mainLayout.addWidget(label5, 8, labelCol)
mainLayout.addWidget(self.penCapComboBox, 8, contentCol)
mainLayout.addWidget(label6, 10, labelCol)
mainLayout.addWidget(self.penJoinComboBox, 10, contentCol)
mainLayout.addWidget(label7, 12, labelCol)
mainLayout.addWidget(self.brushStyleComboBox, 12, contentCol)
mainLayout.addWidget(label8, 14, labelCol)
mainLayout.addWidget(self.brushColorFrame, 14, contentCol)
mainLayout.addWidget(self.brushColorPushButton, 14, 3)
mainSplitter1 = QSplitter(Qt.Horizontal)
mainSplitter1.setOpaqueResize(True)
stack1 = QStackedWidget()
stack1.setFrameStyle(QFrame.Panel | QFrame.Raised)
self.area = PaintArea()
stack1.addWidget(self.area)
frame1 = QFrame(mainSplitter1)
mainLayout1 = QVBoxLayout(frame1)
mainLayout1.setMargin(10)
mainLayout1.setSpacing(6)
mainLayout1.addWidget(stack1)
layout = QGridLayout(self)
layout.addWidget(mainSplitter1, 0, 0)
layout.addWidget(mainSplitter, 0, 1)
self.setLayout(layout)
# 信号和槽函数
self.connect(self.shapeComboBox, SIGNAL("activated(int)"), self.slotShape)
self.connect(self.widthSpinBox, SIGNAL("valueChanged(int)"), self.slotPenWidth)
self.connect(self.penColorPushButton, SIGNAL("clicked()"), self.slotPenColor)
self.connect(self.penStyleComboBox, SIGNAL("activated(int)"), self.slotPenStyle)
self.connect(self.penCapComboBox, SIGNAL("activated(int)"), self.slotPenCap)
self.connect(self.penJoinComboBox, SIGNAL("activated(int)"), self.slotPenJoin)
self.connect(self.brushStyleComboBox, SIGNAL("activated(int)"), self.slotBrush)
self.connect(self.brushColorPushButton, SIGNAL("clicked()"), self.slotBrushColor)
self.slotShape(self.shapeComboBox.currentIndex())
self.slotPenWidth(self.widthSpinBox.value())
self.slotBrush(self.brushStyleComboBox.currentIndex())
def slotShape(self, value):
shape = self.area.Shape[value]
self.area.setShape(shape)
def slotPenWidth(self, value):
color = self.penColorFrame.palette().color(QPalette.Window)
style = Qt.PenStyle((self.penStyleComboBox.itemData(
self.penStyleComboBox.currentIndex(), Qt.UserRole).toInt())[0])
cap = Qt.PenCapStyle((self.penCapComboBox.itemData(
self.penCapComboBox.currentIndex(), Qt.UserRole).toInt())[0])
join = Qt.PenJoinStyle((self.penJoinComboBox.itemData(
self.penJoinComboBox.currentIndex(), Qt.UserRole).toInt())[0])
self.area.setPen(QPen(color, value, style, cap, join))
def slotPenStyle(self, value):
self.slotPenWidth(value)
def slotPenCap(self, value):
self.slotPenWidth(value)
def slotPenJoin(self, value):
self.slotPenWidth(value)
def slotPenColor(self):
color = QColorDialog.getColor(Qt.blue)
self.penColorFrame.setPalette(QPalette(color))
self.area.setPen(QPen(color))
def slotBrushColor(self):
color = QColorDialog.getColor(Qt.blue)
self.brushColorFrame.setPalette(QPalette(color))
self.slotBrush(self.brushStyleComboBox.currentIndex())
def slotBrush(self, value):
color = self.brushColorFrame.palette().color(QPalette.Window)
style = Qt.BrushStyle((self.brushStyleComboBox.itemData(value, Qt.UserRole).toInt())[0])
if(style == Qt.LinearGradientPattern):
linearGradient = QLinearGradient(0, 0, 400, 400)
linearGradient.setColorAt(0.0, Qt.white)
linearGradient.setColorAt(0.2, color)
linearGradient.setColorAt(1.0, Qt.black)
self.area.setBrush(linearGradient)
elif(style == Qt.ConicalGradientPattern):
conicalGradient = QConicalGradient(200, 200, 30)
conicalGradient.setColorAt(0.0, Qt.white)
conicalGradient.setColorAt(0.2, color)
conicalGradient.setColorAt(1.0, Qt.black)
self.area.setBrush(conicalGradient)
elif(style == Qt.TexturePattern):
self.area.setBrush(QBrush(QPixmap("image/cheese.jpg")))
else:
self.area.setBrush(QBrush(color, style))
class PaintArea(QWidget):
def __init__(self):
super(PaintArea, self).__init__()
self.Shape = ["Line", "Rectangle"]
self.setPalette(QPalette(Qt.white))
self.setAutoFillBackground(True)
self.setMinimumSize(400, 400)
self.pen = QPen()
self.brush = QBrush()
def setShape(self, s):
self.shape = s
self.update()
def setPen(self, p):
self.pen = p
self.update()
def setBrush(self, b):
self.brush = b
self.update()
def paintEvent(self, QPaintEvent):
p = QPainter(self)
p.setPen(self.pen)
p.setBrush(self.brush)
rect = QRect(50, 100, 300, 200)
points = [QPoint(150, 100), QPoint(300, 150), QPoint(350, 250), QPoint(100, 300)]
startAngle = 30 * 16
spanAngle = 120 * 16
path = QPainterPath()
path.addRect(150, 150, 100, 100)
path.moveTo(100, 100)
path.cubicTo(300, 100, 200, 200, 300, 300)
path.cubicTo(100, 300, 200, 200, 100, 100)
if self.shape == "Line":
p.drawLine(rect.topLeft(), rect.bottomRight())
elif self.shape == "Rectangle":
p.drawRect(rect)
elif self.shape == "RoundRect":
p.drawRoundRect(rect)
elif self.shape == "Ellipse":
p.drawEllipse(rect)
elif self.shape == "Polygon":
p.drawPolygon(points, 4)
elif self.shape == "Polyline":
p.drawPolyline(points, 4)
elif self.shape == "Points":
p.drawPoints(points, 4)
elif self.shape == "Arc":
p.drawArc(rect, startAngle, spanAngle)
elif self.shape == "Path":
p.drawPath(path)
elif self.shape == "Text":
p.drawText(rect, Qt.AlignCenter, tr("Hello Qt!"))
elif self.shape == "Pixmap":
p.drawPixmap(150, 150, QPixmap("image/butterfly.png"))
if __name__ == '__main__':
app = QApplication(sys.argv)
form = StockDialog()
form.show()
app.exec_()
|
[devicefiles]
{{target}}/vendor-boot-ramdisk: vendor-boot-ramdisk
[mapping]
{{target}}/vendor-boot-ramdisk: vendor-boot-ramdisk
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 19:26:19 2019
@author: Prajwal
"""
#Euler 17
numbers=[i for i in range(1, 1001)]
ones={1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven', 8:'eight', 9:'nine'}
tens={1:'ten', 2:'twenty', 3:'thirty', 4:'forty', 5:'fifty', 6:'sixty', 7:'seventy', 8:'eighty', 9:'ninety'}
tens_ones={11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen'}
spellings=[]
for i in range(len(numbers)):
numbers[i]=str(numbers[i])
for i in range(len(numbers)):
#for numbers from 1-9
if(len(numbers[i])==1):
spellings.append(ones[int(numbers[i])])
#for numbers from 10-99
elif(len(numbers[i])==2):
if(numbers[i][1]=='0'):
spellings.append(tens[int(numbers[i][0])])
else:
if(int(numbers[i]) in tens_ones.keys()):
spellings.append(tens_ones[int(numbers[i])])
else:
n=int(numbers[i])
tenth=int(n/10);
units=n%10;
string=tens[tenth]+ones[units]
spellings.append(string)
#for numbers from 100-999
elif((len(numbers[i])==3)):
if((numbers[i][2]=='0') and (numbers[i][1]=='0')):
string=ones[int(numbers[i][0])]+'hundred'
spellings.append(string)
else:
n=int(numbers[i])
hundredth=int(n/100)
tenth=int((n/10)%10)
units=(n%100)%10
string=ones[hundredth]+'hundred'+'and'
if(int(numbers[i][1:]) in tens_ones.keys()):
string+=tens_ones[int(numbers[i][1:])]
elif(int(numbers[i][1])==0):
string+=ones[int(numbers[i][2])]
elif(numbers[i][2]=='0'):
string+=tens[int(numbers[i][1])]
else:
string+=tens[tenth]+ones[units]
spellings.append(string)
spellings.append('onethousand')
str_len=0
for i in range(len(spellings)):
str_len+=len(spellings[i])
print("Total letters used : ", str_len)
#print(numbers)
|
import django_filters
from .models import GTINInformation
class GTINFilter(django_filters.FilterSet):
class Meta:
model = GTINInformation
fields = ['gtin_number', 'model_name']
|
def add(a, b):
return a+b
def subtract(a, b):
return a-b
def hello_world():
return "Hello World"
|
import sys
from time import sleep
import urllib2
a = 0
b = 1
c = 0
baseURL = "https://api.thingspeak.com/update?api_key=3NR0GTMVEM2R36XX&field1="
while(a < 1000):
adc = open("/sys/bus/iio/devices/iio:device0/in_voltage0_raw", "r")
value = (adc.read(5)).strip()
print value
f = urllib2.urlopen(baseURL +str(value))
f.read()
f.close()
adc.close()
sleep(5)
print "Program has ended" |
from flask import Flask, request, render_template_string
# A template string
# In a real app, render from a file in the templates directory
TEMPLATE = """
<h1>Hello world!</h1>
<p>Your IP is {{ip_address}}</p>
<img src='static/earth.gif'>
"""
# Instatiate an app object
app = Flask(__name__)
@app.route('/')
def hello_world():
# Render the "template" string with an ip address and a spinning earth
return render_template_string(
TEMPLATE,
ip_address=request.headers['X-Real-IP'])
if __name__ == "__main__":
# Launch flask
# Be sure to turn off debug in prod!
print("Starting flask...")
app.run(
debug=True,
host='0.0.0.0',
port=8888)
|
import ConfigParser
import duo_web as duo
from contextlib import closing
from flask import Flask, request, session, redirect, url_for, render_template, flash
# config
DEBUG = True
# create flask application
app = Flask(__name__)
app.config.from_object(__name__)
# config parser
def grab_keys(filename='duo.conf'):
config = ConfigParser.RawConfigParser()
config.read(filename)
akey = config.get('duo', 'akey')
ikey = config.get('duo', 'ikey')
skey = config.get('duo', 'skey')
host = config.get('duo', 'host')
return {'akey': akey, 'ikey': ikey, 'skey': skey, 'host': host}
# app-specific configs
def app_config(filename='app.conf'):
config = ConfigParser.RawConfigParser()
config.read(filename)
return config.get('app', 'skey')
# Routing functions
@app.route('/')
def show_entries():
return render_template('show_entries.html')
@app.route('/mfa', methods=['GET', 'POST'])
def mfa():
result = grab_keys()
sec = duo.sign_request(result['ikey'], result['skey'], result['akey'], session['user'])
if request.method == 'GET':
return render_template('duoframe.html', duohost=result['host'], sig_request=sec)
if request.method == 'POST':
user = duo.verify_response(result['ikey'], result['skey'], result['akey'], request.args.get('sig_response'))
if user == session['user']:
return render_template(url_for('mfa'), user=user)
@app.route('/success', methods=['POST'])
def success():
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] == "":
error = 'Type something in the username field.'
else:
session['logged_in'] = True
session['user'] = request.form['username']
flash('You are logged in')
return redirect(url_for('mfa'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
# main body
if __name__ == '__main__':
app.secret_key = app_config('app.conf')
app.run(host="0.0.0.0", port=5000)
|
def printSCS (x , y) :
global X , Y ,dp
for i in range (x + 1) :
for j in range (y + 1) :
if (i == 0) or ( j == 0 ) :
dp[i][j] = 0
else :
if (X[i-1] == Y[j-1]) :
dp[i][j] = 1 + dp[i-1][j-1]
else :
a = dp[i-1][j]
b = dp[i][j-1]
if (a > b) :
dp[i][j] = a
else :
dp[i][j] = b
i = x-1
j = y-1
l = []
while (j >= 0)and(i >= 0) :
if (X[i] == Y[j]) :
l.append(X[i])
i -= 1
j -= 1
else :
a = dp[i+1][j]
b = dp[i][j+1]
if (a > b) :
l.append(Y[j])
j -= 1
continue
else :
l.append(X[i])
i -= 1
continue
l.reverse()
print("".join(l))
X = input()
Y = input()
x = X.__len__()
y = Y.__len__()
dp = [[0 for i in range (y+1)] for j in range (x+1)]
printSCS(x,y)
|
import pyautogui as pag
import time
import csv
scw, sch = pag.size()
print("Screen size (" + str(scw) + "," + str(sch) + ")")
cx, cy = pag.position()
print("Cursor position (" + str(cx) + "," + str(cy) + ")")
with open("Files/subjects.csv") as sfile:
cread = csv.reader(sfile, delimiter = ",")
rows = [row for row in cread]
time.sleep(2)
# pag.click(677, 708)
# pag.moveTo(100, 200, 2, pag.easeInOutEase)
# pag.moveTo(100, 200, 2, pag.easeInBounce)
# pag.moveTo(85, 100)
# pag.drag(100, 100, 0.5)
# for i in range(len(rows)):
# pag.write(",".join(rows[i]))
# pag.press("enter")
#
# for i in range(100):
# bz = str(random.random())
# pag.write(bz)
# #pag.hotkey("shift", "enter")
# #pag.write("Message number " + str(i+1))
# pag.press("enter")
# time.sleep(0.1)
nm = ["Maria", "Charisma", "Patambang", "Estrella"]
for i in range(100):
pag.write("HELLO")
pag.hotkey("shift", "enter")
pag.write("My dear " + nm[i%4] + ". T = -" + str(100-i))
time.sleep(0.5)
pag.press("enter")
time.sleep(1)
|
#!/usr/bin/env python3
import requests
import re
import time
import smtplib
import sys
import os
import json
from datetime import datetime
from notify_run import Notify
import platform
if platform.system() == 'Windows':
CLEAR_STR = "cls"
else:
CLEAR_STR = "clear"
class Product:
def __init__(self, name, link, cap):
self.name = name
self.link = link
self.cap = cap
self.avg = 0
self.cycles = 0
def to_dict(self):
product = { "name": self.name,
"link": self.link,
"cap": self.cap
}
return product
def update_avg(self, price):
self.cycles += 1
self.avg = (self.avg*(self.cycles-1) + price)/self.cycles
def __str__(self):
prod_dict = self.to_dict()
return f"[{prod_dict['name']}] cap: {prod_dict['cap']} link: {prod_dict['link']}"
def log(string, time_mode="absolute"):
string = str(string)
if time_mode == "absolute":
string = "[%s]: " + string
now = datetime.now()
ts = now.strftime("%H:%M:%S")
print(string % ts)
elif time_mode == "relative":
string = "[%.2f]: " + string
print(string % (time.time() - start_time))
elif time_mode == "absolute|relative":
string = "[ %s | %.2f ]: " + string
now = datetime.now()
ts = now.strftime("%H:%M:%S")
print(string % (ts, (time.time() - start_time)))
elif time_mode == "precise-absolute":
string = "[%s]: " + string
now = datetime.now()
ts = now.strftime("%d/%m/%Y %H:%M:%S")
print(string % ts)
else:
print(string)
def send_mail(product, price):
link = product.link
log(f'Sending mail to {recv_mail} ...')
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
try:
server.login(userbotmail,botpsw)
except smtplib.SMTPAuthenticationError:
log("Failure in mailbot login")
return
subject = 'Price fell down!'
body = f'Price for {product.name} is now at {price} ( price cap is setted as {product.cap})\n\nCheck the amazon link:\n{link}'
msg = f"Subject: {subject}\n\n{body}"
server.sendmail(userbotmail, recv_mail, msg)
log('Email has been sent!')
server.quit()
#sys.exit() #exiting after mail sending
def send_notify(product, price):
notify.send(f'Price for {product.name} is now at {price} ( price cap is setted as {product.cap})\n\nCheck the amazon link:\n{product.link}')
log("Notified")
def deactive_prod(product, store=False):
# removing product from list
global product_list
product_list.remove(product)
if store:
global stored_products
stored_products.append(product)
def extract_site(link):
site = link.split(".")[1]
return site
def extract_title(r,title_match):
return r.text[title_match.end():title_match.end()+200].split("\n\n\n\n\n\n")[1].replace("\n","")
def check_price(product_list):
global regex_compiled
products = product_list.copy()
for product in products:
link = product.link
cap = product.cap
try:
site = extract_site(link)
except IndexError:
print(f"[{product.name}] Wrong Link format")
deactive_prod(product)
continue
log(f"[{product.name}]")
try:
regexp_compiled, regext_compiled = regex_compiled[site]
except KeyError:
# first time saves compiled regex
try:
regexp, regext = regex[site]
regexp_compiled = re.compile(regexp)
regext_compiled = re.compile(regext)
regex_compiled[site] = [regexp_compiled, regext_compiled]
except KeyError:
log("Scrape for site " + site + "not supported")
print()
continue
log("Scraping...")
r = requests.get(link, headers=headers)
if r.status_code == 200:
price_match = regexp_compiled.search(r.text)
title_match = regext_compiled.search(r.text)
log("Data successfully scraped")
title = extract_title(r,title_match)
log(title)
price = r.text[price_match.end():price_match.end()+10].strip()
price = price[2:].replace("\n","")
price = price[:-3].replace(",",".")
try:
price = float(price)
product.update_avg(price)
log(f"price: {price} €/$ [Average: {product.avg}]")
except ValueError:
log("Prouct not avaible (this may be caused to a failure in the scraping process, check online once to be sure)")
print()
continue
if price < cap:
log(f'ALERT: Price under {cap} €/$')
if email_flag:
send_mail(product, price)
if notify_flag:
send_notify(product, price=price)
deactive_prod(product, store=True)
else:
log(f'Setted Cap: {cap} €/$')
print()
else:
log(f'Web Error for [{product.name}]: {link}\n')
print()
def restore_credetials():
try:
global userbotmail, botpsw, recv_mail
with open("botmail.conf", "r") as file:
for line in file:
if line.startswith("EMAIL"):
line = line.split("=")[1]
line.replace(" ","")
userbotmail = line
if line.startswith("TOKEN"):
line = line.split("=")[1]
line.replace(" ","")
botpsw = line
if line.startswith("RECIVER MAIL"):
line = line.split("=")[1]
line.replace(" ","")
recv_mail = line
except OSError:
while True:
if clear_flag:
os.system(CLEAR_STR)
print("\nFile botmail.conf not found, submit your credentials:")
userbotmail = input("\nEMAIL: ").replace(" ","")
botpsw = input("\nTOKEN: ").replace(" ","")
recv_mail = input("\nRECIVER MAIL: ").replace(" ","")
res = input("\n\nConfirm? 1/yes 0/no\n\n")
if res == "1" or res == "yes":
try:
with open("botmail.conf", "w") as file:
file.write(f"EMAIL = {userbotmail}\n")
file.write(f"TOKEN = {botpsw}\n")
file.write(f"RECIVER MAIL = {recv_mail}\n")
except OSError:
print("\nERROR: Failure in writing in botmail.conf, credentials will not be saved\n")
break
elif res == "0" or res == "no":
continue
def extract_products_from_json():
product_list = []
try:
with open(jsonfile,"r") as jfile:
data = json.load(jfile)
if not data:
log("\nCannot restore products data from json file")
exit()
for prod in data:
product_list.append(Product(prod["name"],prod["link"],prod["cap"]))
return product_list
except OSError:
print()
log("Failure in json file opening, creating one")
return []
except json.decoder.JSONDecodeError:
print()
log("Strange json format, ignoring saved products")
return []
def store_products_in_json(product_list):
try:
with open(jsonfile, "w") as json_file:
# Building Json data struct
data = []
for prod in product_list:
data.append(prod.to_dict())
json.dump(data, json_file, indent=4)
log("Products file successfully updated")
except OSError:
log("Error in JSON opening, cannot save data")
def close(timesl=1):
# close the program
os.system(CLEAR_STR)
print("\n\n\n Bye ,(è >è)/\n\n\n")
time.sleep(timesl)
os.system(CLEAR_STR)
exit()
regex = {
"amazon": ['a-size-medium a-color-price', 'a-size-large product-title-word-break' ]
}
regex_compiled = {}
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'DNT': '1',
'host': 'www.amazon.it',
'referrer': 'https://www.google.com/',
'TE': 'Trailers',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0'
}
stored_products = []
jsonfile = "products.json"
# BOT MAIL credentials
# check for 2 step autentification, google app password and google unsecure app
userbotmail = ''
botpsw = ''
recv_mail = ''
start_time = time.time()
SLEEP_TIME = 300 # seconds
TIMER = 6000 # seconds
email_flag = False
notify_flag = False
clear_flag = False
if __name__ == "__main__":
args = sys.argv
try:
if "-clear" in args:
clear_flag = True
if "-email" in args:
email_flag = True
restore_credetials()
if "-notify" in args:
notify_flag = True
notify = Notify()
product_list = extract_products_from_json()
while True:
if clear_flag:
os.system(CLEAR_STR)
res = input("\nSet SLEEP TIME and TIMER? 1/yes 0/no\n\n")
if res == "1" or res == "yes":
while True:
try:
if clear_flag:
os.system(CLEAR_STR)
SLEEP_TIME = int(input("\nSLEEP TIME (secs): "))
TIMER = int(input("\nTIMER (secs): "))
break
except ValueError:
print("\nWrong format [e.g. SLEEP TIME (secs): 30, TIMER (secs): 6000]")
input("\npress Enter to repeat")
continue
break
elif res == "0" or res == "no":
break
while True:
if clear_flag:
os.system(CLEAR_STR)
res = input("\nAdd a product? 1/yes 0/no\n\n")
if res == "1" or res == "yes":
try:
if clear_flag:
os.system(CLEAR_STR)
name = input("\nName: ")
cap = float(input("Cap (price under which notify): "))
link = input("Link: ")
product = Product(name, link, cap)
product_list.append(product)
except ValueError:
print("\nWrong format [e.g. Cap: 23.45]")
input("\npress Enter to repeat")
continue
elif res == "0" or res == "no":
if clear_flag:
os.system(CLEAR_STR)
print()
for product in product_list:
print(product)
print()
input("\npress Enter to start Bot")
break
while True:
delta_time = (time.time() - start_time)
if delta_time > TIMER:
print("\n\n")
log("TIME OUT!")
input("\nPress Enter to exit")
break
if clear_flag:
os.system(CLEAR_STR)
log('##### Bot run starting ... Operating for %.2f secs [SLEEP TIME = %d | TIMER: %d secs]' % (delta_time, SLEEP_TIME, TIMER))
print()
check_price(product_list)
time.sleep(SLEEP_TIME)
except KeyboardInterrupt:
pass
store_products_in_json(product_list + stored_products)
close()
|
from django.conf.urls.defaults import *
import os
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
site_media = os.path.join(os.path.dirname(__file__), 'site_media')
urlpatterns = patterns('',
# Example:
# (r'^randompeople/', include('randompeople.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^chat/$','apps.chat.views.index'),
(r'^$','apps.creative.views.getfront'),
(r'^index.php','apps.chat.views.index'),
(r'^chat/startchat/$','apps.chat.views.chat'),
(r'^chat/search_user/$','apps.chat.views.search_user'),
(r'^chat/logout/$','apps.chat.views.logout_user'),
(r'^chat/endthischat/$','apps.chat.views.endthischat'),
(r'^chat/reviel/$','apps.chat.views.identityreviel'),
(r'^chat/savedet/$','apps.chat.views.saveDet'),
(r'^chat/relogin/$','apps.chat.views.index2'),
(r'^chat/random/$','apps.chat.views.getchat'),
(r'^chat/subm/$','apps.chat.views.getchat2'),
(r'^chat/randomzz/$','apps.chat.views.getchatzz'),
(r'^chat/chatForm/$','apps.chat.views.getform'),
(r'^chat/hidden/$','apps.chat.views.getpart'),
(r'^chat/random/$','apps.chat.views.blank'),
(r'^chat/randomzz.jai$','apps.chat.views.getchatzz'),
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve',{'document_root': site_media}),
(r'^ism/upload/$','uplo.views.addthis'),
(r'^ism/uploadf/$','uplo.views.formpage'),
(r'^svg/$', 'apps.svgmaker.views.getsvg'),
(r'^svg/getit$', 'apps.svgmaker.views.getit'),
(r'^svg/bar.svg$', 'apps.svgmaker.views.getbar'),
(r'^svg/tryjson$', 'apps.svgmaker.views.tryjson'),
(r'^svg/saveform$', 'apps.svgmaker.views.saveform'),
(r'^svg/showform/$', 'apps.svgmaker.views.showform'),
(r'^svg/getform/$', 'apps.svgmaker.views.getform'),
(r'^svg/submitform/$', 'apps.svgmaker.views.submitform'),
(r'^svg/allform$', 'apps.svgmaker.views.allform'),
(r'^svg/reviewform/$', 'apps.svgmaker.views.reviewform'),
(r'^svg/getbargraph.svg$', 'apps.svgmaker.views.getbargraph'),
(r'^svg/getpiechart.svg$', 'apps.svgmaker.views.getpiechart'),
(r'^svg/excel.xls$', 'apps.svgmaker.views.getxls'),
(r'^svg/csv.csv$', 'apps.svgmaker.views.getcsv'),
(r'^creative/submit/$', 'apps.creative.views.submit'),
(r'^creative/getimage.svg', 'apps.creative.views.getImage'),
(r'^creative/getit/$', 'apps.creative.views.getit'),
(r'^creative/mysubmit', 'apps.creative.views.submitx'),
(r'^creative/viscom', 'apps.creative.views.getfront'),
(r'^creative/sendma', 'apps.creative.views.sendit'),
)
|
import django
import os
import sys
path = '/home/teamwork/teamwork'
if path not in sys.path:
sys.path.append(path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'teamwork.settings'
django.setup()
from django.utils import timezone
from announcement.models import Announcement, image_path
from teamwork import settings
def clean_expired_data():
data = Announcement.objects.filter(deadline__lte=timezone.now(), active=True)
if len(data) > 0:
data_id = list(set(data.values_list("id", flat=True)))
data.update(content="已过期", active=False)
data_id = [str(i) for i in data_id]
dir = os.path.join(settings.MEDIA_ROOT, image_path)
for file in os.listdir(dir):
if file.startswith(tuple(data_id)):
os.remove(os.path.join(dir, file))
if __name__ == "__main__":
clean_expired_data()
|
import pickle
import fnmatch
import os
def process_data(d):
f1 = open(d,"br")
mylist1 = pickle.load(f1)
f1.close()
return mylist1
|
list1=[]
list2=[]
num1=int(input("Enter number of elements for first list:"))
for i in range(1,num1+1):
b=int(input("Enter element:"))
list1.append(b)
num2=int(input("Enter number of elements for second list:"))
for i in range(1,num2+1):
d=int(input("Enter element:"))
list2.append(d)
list3=list1+list2
list3.sort()
print("Sorted list is:",list3)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
class Price():
def __init__(self,id,priceList):
self.idCard=id
self.priceList=priceList
def update(self,site,price):
self.verif()
try:
conn = mysql.connector.connect(host="localhost",user="root",password="magicpswd", database="magic")
cursor = conn.cursor()
requete='UPDATE cardPrice SET '
requete=requete+" "+site+' = "'+str(price)
requete=requete+'" WHERE idCard='+str(self.idCard)+";"
#print requete
cursor.execute(requete)
conn.commit()
except Exception as e:
print("Erreur")
print e
def verif(self):
try:
conn = mysql.connector.connect(host="localhost",user="root",password="magicpswd", database="magic")
cursor = conn.cursor()
cursor.execute("""SELECT idCard FROM cardPrice""")
rows = cursor.fetchall()
flagId=False
for i in rows:
#print i[0]
if i[0]==self.idCard:
flagId=True
if flagId==False:
cursor = conn.cursor()
requete="INSERT INTO cardPrice(idCard) VALUES ("+str(self.idCard)+");"
#print requete
cursor.execute(requete)
conn.commit()
except Exception as e:
print("Erreur")
print e
def display(self):
for i in self.priceList:
print i[0][1:],
print " ",
print i[1]
def toCSV(self):
stringReturn=""
for i in self.priceList:
stringReturn=stringReturn+str(i[0][1:])+'; '
stringReturn=stringReturn+str(i[1])+'\n'
return stringReturn
|
import sys
import rospy
import rosbag
from matplotlib import pyplot as plt
if __name__ == '__main__':
# Usage: python bag_graph_z.py inputfile.bag outputfile.png
# Topic to find the z data
odomTopic = '/A01/odometry'
# Vertical limit to calculate time below
zlim = 4.0
inputfile = sys.argv[1]
if len(sys.argv) == 3:
outputfile = sys.argv[2]
else:
outputfile = inputfile[:-3] + 'png'
z = []
time = []
started = False
starttime = 0
endtime = 0
cumetime = 0
for topic, msg, t in rosbag.Bag(inputfile).read_messages():
if topic == odomTopic:
zpos = msg.pose.pose.position.z
curtime = t.secs + 1e-9 * t.nsecs
# Save to the lists for plotting
z.append(zpos)
time.append(curtime)
# Check if we've crossed the threshold yet
if not started and zpos > zlim:
started = True
elif not started:
continue
# Once we've crossed the threshold once, start looking for dips below
if zpos < zlim:
if not starttime:
starttime = curtime
else:
endtime = curtime
if zpos < 1.0:
# Once we hit one meter get the last time, and then stop
# Could be an issue if we fly below one meter but for current tests not an issue
cumetime += endtime - starttime
starttime = 0
endtime = 0
started = False
else:
if endtime > starttime:
cumetime += endtime - starttime
starttime = 0
endtime = 0
if endtime > starttime:
cumetime += endtime - starttime
print "Total time below", zlim, "meters was", cumetime, "seconds"
plt.plot(time, z)
plt.title(inputfile[:-4])
plt.axhline(y=4.0, color='r', linestyle='-')
# plt.show()
plt.savefig(outputfile)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Autoformatter to automatically add trailing commas to calls and literals.
See https://github.com/asottile/add-trailing-comma for details.
"""
from pants.backend.python.lint.add_trailing_comma import rules as add_trailing_comma_rules
from pants.backend.python.lint.add_trailing_comma import skip_field, subsystem
def rules():
return (*add_trailing_comma_rules.rules(), *skip_field.rules(), *subsystem.rules())
|
cards = [
{
"spanish": "rojo",
"english": "red"
},
{
"spanish": "verde",
"english": "green"
},
{
"spanish": "azul",
"english": "blue"
},
{
"spanish": "blanco",
"english": "white"
},
{
"spanish": "negro",
"english": "black"
},
{
"spanish": "morado",
"english": "purple"
},
{
"spanish": "naranja",
"english": "orange"
},
{
"spanish": "rosa",
"english": "pink"
},
{
"spanish": "amarillo",
"english": "yellow"
},
{
"spanish": "marron",
"english": "brown"
},
{
"spanish": "gris",
"english": "grey"
},
{
"spanish": "dog",
"english": "perro"
},
{
"spanish": "gato",
"english": "cat"
},
{
"spanish": "pez",
"english": "fish"
},
{
"spanish": "lagarto",
"english": "lizard"
},
{
"spanish": "conejo",
"english": "rabbit"
},
{
"spanish": "caballo",
"english": "horse"
},
{
"spanish": "cerdo",
"english": "pig"
},
{
"spanish": "vaca",
"english": "cow"
},
{
"spanish": "gallina",
"english": "hen"
},
{
"spanish": "gallo",
"english": "rooster"
},
{
"spanish": "oveja",
"english": "sheep"
},
{
"spanish": "cabra",
"english": "goat"
},
{
"spanish": "ojo",
"english": "eye"
},
{
"spanish": "nariz",
"english": "nose"
},
{
"spanish": "oreja",
"english": "ear"
},
{
"spanish": "cabello",
"english": "hair"
},
{
"spanish": "boca",
"english": "mouth"
},
{
"spanish": "diente",
"english": "tooth"
},
{
"spanish": "lengua",
"english": "tongue"
},
{
"spanish": "labio",
"english": "lip"
},
{
"spanish": "brazo",
"english": "arm"
},
{
"spanish": "pierna",
"english": "leg"
},
{
"spanish": "rodilla",
"english": "knee"
},
{
"spanish": "pie",
"english": "foot"
},
{
"spanish": "mano",
"english": "hand"
},
{
"spanish": "dedo",
"english": "finger"
},
{
"spanish": "dedo del pie",
"english": "toe"
},
{
"spanish": "codo",
"english": "elbow"
},
{
"spanish": "barbilla",
"english": "chin"
},
{
"spanish": "cuello",
"english": "neck"
},
{
"spanish": "hombro",
"english": "shoulder"
},
{
"spanish": "estomago",
"english": "stomach"
},
{
"spanish": "pecho",
"english": "chest"
},
{
"spanish": "cadera",
"english": "hip"
},
{
"spanish": "trasero",
"english": "butt"
},
{
"spanish": "tobillo",
"english": "ankle"
},
{
"spanish": "cabeza",
"english": "head"
},
{
"spanish": "toro",
"english": "bull"
},
{
"spanish": "zorro",
"english": "fox"
},
]
|
import turtle
paper = turtle.Screen()
pen = turtle.Turtle()
for i in range(0,500):
pen.forward(75)
pen.right(95) |
"""aospy DataLoader objects"""
import logging
import os
import pprint
import numpy as np
import xarray as xr
from .internal_names import (
ETA_STR,
GRID_ATTRS,
TIME_STR,
)
from .utils import times, io
def _preprocess_and_rename_grid_attrs(func, grid_attrs=None, **kwargs):
"""Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``.
"""
def func_wrapper(ds):
return grid_attrs_to_aospy_names(func(ds, **kwargs), grid_attrs)
return func_wrapper
def grid_attrs_to_aospy_names(data, grid_attrs=None):
"""Rename grid attributes to be consistent with aospy conventions.
Search all of the dataset's coords and dims looking for matches to known
grid attribute names; any that are found subsequently get renamed to the
aospy name as specified in ``aospy.internal_names.GRID_ATTRS``.
Also forces any renamed grid attribute that is saved as a dim without a
coord to have a coord, which facilitates subsequent slicing/subsetting.
This function does not compare to Model coordinates or add missing
coordinates from Model objects.
Parameters
----------
data : xr.Dataset
grid_attrs : dict (default None)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
xr.Dataset
Data returned with coordinates consistent with aospy
conventions
"""
if grid_attrs is None:
grid_attrs = {}
# Override GRID_ATTRS with entries in grid_attrs
attrs = GRID_ATTRS.copy()
for k, v in grid_attrs.items():
if k not in attrs:
raise ValueError(
'Unrecognized internal name, {!r}, specified for a custom '
'grid attribute name. See the full list of valid internal '
'names below:\n\n{}'.format(k, list(GRID_ATTRS.keys())))
attrs[k] = (v, )
dims_and_vars = set(data.variables).union(set(data.dims))
for name_int, names_ext in attrs.items():
data_coord_name = set(names_ext).intersection(dims_and_vars)
if data_coord_name:
data = data.rename({data_coord_name.pop(): name_int})
return set_grid_attrs_as_coords(data)
def set_grid_attrs_as_coords(ds):
"""Set available grid attributes as coordinates in a given Dataset.
Grid attributes are assumed to have their internal aospy names. Grid
attributes are set as coordinates, such that they are carried by all
selected DataArrays with overlapping index dimensions.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
Dataset with grid attributes set as coordinates
"""
grid_attrs_in_ds = set(GRID_ATTRS.keys()).intersection(
set(ds.coords) | set(ds.data_vars))
ds = ds.set_coords(grid_attrs_in_ds)
return ds
def _maybe_cast_to_float64(da):
"""Cast DataArrays to np.float64 if they are of type np.float32.
Parameters
----------
da : xr.DataArray
Input DataArray
Returns
-------
DataArray
"""
if da.dtype == np.float32:
logging.warning('Datapoints were stored using the np.float32 datatype.'
'For accurate reduction operations using bottleneck, '
'datapoints are being cast to the np.float64 datatype.'
' For more information see: https://github.com/pydata/'
'xarray/issues/1346')
return da.astype(np.float64)
else:
return da
def _sel_var(ds, var, upcast_float32=True):
"""Select the specified variable by trying all possible alternative names.
Parameters
----------
ds : Dataset
Dataset possibly containing var
var : aospy.Var
Variable to find data for
upcast_float32 : bool (default True)
Whether to cast a float32 DataArray up to float64
Returns
-------
DataArray
Raises
------
KeyError
If the variable is not in the Dataset
"""
for name in var.names:
try:
da = ds[name].rename(var.name)
if upcast_float32:
return _maybe_cast_to_float64(da)
else:
return da
except KeyError:
pass
msg = '{0} not found among names: {1} in\n{2}'.format(var, var.names, ds)
raise LookupError(msg)
def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds,
data_vars='minimal', coords='minimal',
grid_attrs=None, **kwargs):
"""Load a Dataset from a list or glob-string of files.
Datasets from files are concatenated along time,
and all grid attributes are renamed to their aospy internal names.
Parameters
----------
file_set : list or str
List of paths to files or glob-string
preprocess_func : function (optional)
Custom function to call before applying any aospy logic
to the loaded dataset
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
grid_attrs : dict
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
Dataset
"""
apply_preload_user_commands(file_set)
func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs,
**kwargs)
return xr.open_mfdataset(
file_set,
preprocess=func,
combine='by_coords',
decode_times=False,
decode_coords=False,
mask_and_scale=True,
data_vars=data_vars,
coords=coords,
)
def apply_preload_user_commands(file_set, cmd=io.dmget):
"""Call desired functions on file list before loading.
For example, on the NOAA Geophysical Fluid Dynamics Laboratory
computational cluster, data that is saved on their tape archive
must be accessed via a `dmget` (or `hsmget`) command before being used.
"""
if cmd is not None:
cmd(file_set)
def _setattr_default(obj, attr, value, default):
"""Set an attribute of an object to a value or default value."""
if value is None:
setattr(obj, attr, default)
else:
setattr(obj, attr, value)
class DataLoader(object):
"""A fundamental DataLoader object."""
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
"""Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
file_set = self._generate_file_set(
var=var,
start_date=start_date,
end_date=end_date,
**DataAttrs,
)
ds = _load_data_from_disk(
file_set,
self.preprocess_func,
data_vars=self.data_vars,
coords=self.coords,
start_date=start_date,
end_date=end_date,
time_offset=time_offset,
grid_attrs=grid_attrs,
**DataAttrs,
)
if var.def_time:
ds = times.prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load()
def _load_or_get_from_model(self, var, start_date=None, end_date=None,
time_offset=None, model=None, **DataAttrs):
"""Load a DataArray for the requested variable and time range
Supports both access of grid attributes either through the DataLoader
or through an optionally-provided Model object. Defaults to using
the version found in the DataLoader first.
"""
grid_attrs = None if model is None else model.grid_attrs
try:
return self.load_variable(
var, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs)
except (KeyError, IOError) as e:
if var.name not in GRID_ATTRS or model is None:
raise e
else:
try:
return getattr(model, var.name)
except AttributeError:
raise AttributeError(
'Grid attribute {} could not be located either '
'through this DataLoader or in the provided Model '
'object: {}.'.format(var, model))
def recursively_compute_variable(self, var, start_date=None, end_date=None,
time_offset=None, model=None,
**DataAttrs):
"""Compute a variable recursively, loading data where needed.
An obvious requirement here is that the variable must eventually be
able to be expressed in terms of model-native quantities; otherwise the
recursion will never stop.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
model : Model
aospy Model object (optional)
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
if var.variables is None:
return self._load_or_get_from_model(
var, start_date, end_date, time_offset, model, **DataAttrs)
else:
data = [self.recursively_compute_variable(
v, start_date, end_date, time_offset, model, **DataAttrs)
for v in var.variables]
return var.func(*data).rename(var.name)
@staticmethod
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Apply specified time shift to DataArray"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
return da
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
raise NotImplementedError(
'All DataLoaders require a _generate_file_set method')
class DictDataLoader(DataLoader):
"""A DataLoader that uses a dict mapping lists of files to string tags.
This is the simplest DataLoader; it is useful for instance if one is
dealing with raw model history files, which tend to group all variables
of a single output interval into single filesets. The
intvl_in parameter is a string description of the time frequency of the
data one is referencing (e.g. 'monthly', 'daily', '3-hourly'). In
principle, one can give it any string value.
Parameters
----------
file_map : dict
A dict mapping an input interval to a list of files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case of two sets of files, one with monthly average output, and one with
3-hourly output.
>>> file_map = {'monthly': '000[4-6]0101.atmos_month.nc',
... '3hr': '000[4-6]0101.atmos_8xday.nc'}
>>> data_loader = DictDataLoader(file_map)
If one wanted to correct a CF-incompliant units attribute on each Dataset
read in, which depended on the ``intvl_in`` of the fileset one could
define a ``preprocess_func`` which took into account the ``intvl_in``
keyword argument.
>>> def preprocess(ds, **kwargs):
... if kwargs['intvl_in'] == 'monthly':
... ds['time'].attrs['units'] = 'days since 0001-01-0000'
... if kwargs['intvl_in'] == '3hr':
... ds['time'].attrs['units'] = 'hours since 0001-01-0000'
... return ds
>>> data_loader = DictDataLoader(file_map, preprocess)
"""
def __init__(self, file_map=None, upcast_float32=True, data_vars='minimal',
coords='minimal', preprocess_func=lambda ds, **kwargs: ds):
"""Create a new DictDataLoader."""
self.file_map = file_map
self.upcast_float32 = upcast_float32
self.data_vars = data_vars
self.coords = coords
self.preprocess_func = preprocess_func
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in))
class NestedDictDataLoader(DataLoader):
"""DataLoader that uses a nested dictionary mapping to load files.
This is the most flexible existing type of DataLoader; it allows for the
specification of different sets of files for different variables. The
intvl_in parameter is a string description of the time frequency of the
data one is referencing (e.g. 'monthly', 'daily', '3-hourly'). In
principle, one can give it any string value. The variable name
can be any variable name in your aospy object library (including
alternative names).
Parameters
----------
file_map : dict
A dict mapping intvl_in to dictionaries mapping Var
objects to lists of files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case of a set of monthly average files for large scale precipitation,
and another monthly average set of files for convective precipitation.
>>> file_map = {'monthly': {'precl': '000[4-6]0101.precl.nc',
... 'precc': '000[4-6]0101.precc.nc'}}
>>> data_loader = NestedDictDataLoader(file_map)
See :py:class:`aospy.data_loader.DictDataLoader` for an example of a
possible function to pass as a ``preprocess_func``.
"""
def __init__(self, file_map=None, upcast_float32=True, data_vars='minimal',
coords='minimal', preprocess_func=lambda ds, **kwargs: ds):
"""Create a new NestedDictDataLoader"""
self.file_map = file_map
self.upcast_float32 = upcast_float32
self.data_vars = data_vars
self.coords = coords
self.preprocess_func = preprocess_func
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
for name in var.names:
try:
return self.file_map[intvl_in][name]
except KeyError:
pass
raise KeyError('Files for the var {0} cannot be found in for the '
'intvl_in {1} in this'
' OneDirDataLoader'.format(var, intvl_in))
class GFDLDataLoader(DataLoader):
"""DataLoader for NOAA GFDL model output.
This is an example of a domain-specific custom DataLoader, designed
specifically for finding files output by the Geophysical Fluid Dynamics
Laboratory's model history file post-processing tools.
Parameters
----------
template : GFDLDataLoader
Optional argument to specify a base GFDLDataLoader to inherit
parameters from
data_direc : str
Root directory of data files
data_dur : int
Number of years included per post-processed file
data_start_date : datetime.datetime
Start date of data files
data_end_date : datetime.datetime
End date of data files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case without a template to start from.
>>> base = GFDLDataLoader(data_direc='/archive/control/pp', data_dur=5,
... data_start_date=datetime(2000, 1, 1),
... data_end_date=datetime(2010, 12, 31))
Case with a starting template.
>>> data_loader = GFDLDataLoader(base, data_direc='/archive/2xCO2/pp')
See :py:class:`aospy.data_loader.DictDataLoader` for an example of a
possible function to pass as a ``preprocess_func``.
"""
def __init__(self, template=None, data_direc=None, data_dur=None,
data_start_date=None, data_end_date=None,
upcast_float32=None, data_vars=None, coords=None,
preprocess_func=None):
"""Create a new GFDLDataLoader"""
if template:
_setattr_default(self, 'data_direc', data_direc,
getattr(template, 'data_direc'))
_setattr_default(self, 'data_dur', data_dur,
getattr(template, 'data_dur'))
_setattr_default(self, 'data_start_date', data_start_date,
getattr(template, 'data_start_date'))
_setattr_default(self, 'data_end_date', data_end_date,
getattr(template, 'data_end_date'))
_setattr_default(self, 'upcast_float32', upcast_float32,
getattr(template, 'upcast_float32'))
_setattr_default(self, 'data_vars', data_vars,
getattr(template, 'data_vars'))
_setattr_default(self, 'coords', coords,
getattr(template, 'coords'))
_setattr_default(self, 'preprocess_func', preprocess_func,
getattr(template, 'preprocess_func'))
else:
self.data_direc = data_direc
self.data_dur = data_dur
self.data_start_date = data_start_date
self.data_end_date = data_end_date
_setattr_default(self, 'upcast_float32', upcast_float32, True)
_setattr_default(self, 'data_vars', data_vars, 'minimal')
_setattr_default(self, 'coords', coords, 'minimal')
_setattr_default(self, 'preprocess_func', preprocess_func,
lambda ds, **kwargs: ds)
@staticmethod
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
attempted_file_sets = []
for name in var.names:
file_set = self._input_data_paths_gfdl(
name, start_date, end_date, domain, intvl_in, dtype_in_vert,
dtype_in_time, intvl_out)
attempted_file_sets.append(file_set)
if all([os.path.isfile(filename) for filename in file_set]):
return file_set
raise IOError('Files for the var {0} cannot be located '
'using GFDL post-processing conventions. '
'Attempted using the following sets of paths:\n\n'
'{1}'.format(var, pprint.pformat(attempted_file_sets)))
def _input_data_paths_gfdl(self, name, start_date, end_date, domain,
intvl_in, dtype_in_vert, dtype_in_time,
intvl_out):
dtype_lbl = dtype_in_time
if intvl_in == 'daily':
domain += '_daily'
if dtype_in_vert == ETA_STR and name != 'ps':
domain += '_level'
if dtype_in_time == 'inst':
domain += '_inst'
dtype_lbl = 'ts'
if 'monthly_from_' in dtype_in_time:
dtype = dtype_in_time.replace('monthly_from_', '')
dtype_lbl = dtype
else:
dtype = dtype_in_time
dur_str = str(self.data_dur) + 'yr'
if dtype_in_time == 'av':
subdir = intvl_in + '_' + dur_str
else:
subdir = os.path.join(intvl_in, dur_str)
direc = os.path.join(self.data_direc, domain, dtype_lbl, subdir)
data_start_year = times.infer_year(self.data_start_date)
start_year = times.infer_year(start_date)
end_year = times.infer_year(end_date)
files = [os.path.join(direc, io.data_name_gfdl(
name, domain, dtype, intvl_in, year, intvl_out,
data_start_year, self.data_dur))
for year in range(start_year, end_year + 1)]
files = list(set(files))
files.sort()
return files
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
from textwrap import dedent
from typing import Sequence
import pytest
from pants.backend.project_info import peek
from pants.backend.project_info.peek import Peek, TargetData, TargetDatas
from pants.backend.visibility.rules import rules as visibility_rules
from pants.base.specs import RawSpecs, RecursiveGlobSpec
from pants.core.target_types import ArchiveTarget, FilesGeneratorTarget, FileTarget, GenericTarget
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.dep_rules import DependencyRuleAction, DependencyRuleApplication
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
def _snapshot(fingerprint: str, files: tuple[str, ...]) -> Snapshot:
return Snapshot.create_for_testing(files, ())
@pytest.mark.parametrize(
"expanded_target_infos, exclude_defaults, include_dep_rules, expected_output",
[
pytest.param(
[],
False,
False,
"[]\n",
id="null-case",
),
pytest.param(
[
TargetData(
FilesGeneratorTarget(
{
"sources": ["*.txt"],
# Regression test that we can handle a dict with `tuple[str, ...]` as
# key.
"overrides": {("foo.txt",): {"tags": ["overridden"]}},
},
Address("example", target_name="files_target"),
),
_snapshot(
"2",
("foo.txt", "bar.txt"),
),
tuple(),
)
],
True,
False,
dedent(
"""\
[
{
"address": "example:files_target",
"target_type": "files",
"dependencies": [],
"overrides": {
"('foo.txt',)": {
"tags": [
"overridden"
]
}
},
"sources": [
"bar.txt",
"foo.txt"
],
"sources_fingerprint": "d3dd0a1f72aaa1fb2623e7024d3ea460b798f6324805cfad5c2b751e2dfb756b",
"sources_raw": [
"*.txt"
]
}
]
"""
),
id="single-files-target/exclude-defaults-regression",
),
pytest.param(
[
TargetData(
FilesGeneratorTarget(
{"sources": ["foo.txt"]}, Address("example", target_name="files_target")
),
_snapshot(
"1",
("foo.txt",),
),
tuple(),
)
],
False,
False,
dedent(
"""\
[
{
"address": "example:files_target",
"target_type": "files",
"dependencies": [],
"description": null,
"overrides": null,
"sources": [
"foo.txt"
],
"sources_fingerprint": "b5e73bb1d7a3f8c2e7f8c43f38ab4d198e3512f082c670706df89f5abe319edf",
"sources_raw": [
"foo.txt"
],
"tags": null
}
]
"""
),
id="single-files-target/include-defaults",
),
pytest.param(
[
TargetData(
FilesGeneratorTarget(
{"sources": ["*.txt"], "tags": ["zippable"]},
Address("example", target_name="files_target"),
),
_snapshot(
"0",
(),
),
tuple(),
),
TargetData(
ArchiveTarget(
{
"output_path": "my-archive.zip",
"format": "zip",
"files": ["example:files_target"],
},
Address("example", target_name="archive_target"),
),
None,
("foo/bar:baz", "qux:quux"),
),
],
True,
False,
dedent(
"""\
[
{
"address": "example:files_target",
"target_type": "files",
"dependencies": [],
"sources": [],
"sources_fingerprint": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"sources_raw": [
"*.txt"
],
"tags": [
"zippable"
]
},
{
"address": "example:archive_target",
"target_type": "archive",
"dependencies": [
"foo/bar:baz",
"qux:quux"
],
"files": [
"example:files_target"
],
"format": "zip",
"output_path": "my-archive.zip"
}
]
"""
),
id="single-files-target/exclude-defaults",
),
pytest.param(
[
TargetData(
FilesGeneratorTarget({"sources": ["*.txt"]}, Address("foo", target_name="baz")),
_snapshot("", ("foo/a.txt",)),
("foo/a.txt:baz",),
dependencies_rules=("does", "apply", "*"),
dependents_rules=("fall-through", "*"),
applicable_dep_rules=(
DependencyRuleApplication(
action=DependencyRuleAction.ALLOW,
rule_description="foo/BUILD[*] -> foo/BUILD[*]",
origin_address=Address("foo", target_name="baz"),
origin_type="files",
dependency_address=Address(
"foo", target_name="baz", relative_file_path="a.txt"
),
dependency_type="files",
),
),
),
],
True,
True,
dedent(
"""\
[
{
"address": "foo:baz",
"target_type": "files",
"_applicable_dep_rules": [
{
"action": "ALLOW",
"rule_description": "foo/BUILD[*] -> foo/BUILD[*]",
"origin_address": "foo:baz",
"origin_type": "files",
"dependency_address": "foo/a.txt:baz",
"dependency_type": "files"
}
],
"_dependencies_rules": [
"does",
"apply",
"*"
],
"_dependents_rules": [
"fall-through",
"*"
],
"dependencies": [
"foo/a.txt:baz"
],
"sources": [
"foo/a.txt"
],
"sources_fingerprint": "72ceef751c940b5797530e298f4d9f66daf3c51f7d075bfb802295ffb01d5de3",
"sources_raw": [
"*.txt"
]
}
]
"""
),
id="include-dep-rules",
),
],
)
def test_render_targets_as_json(
expanded_target_infos, exclude_defaults, include_dep_rules, expected_output
):
actual_output = peek.render_json(expanded_target_infos, exclude_defaults, include_dep_rules)
assert actual_output == expected_output
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*peek.rules(),
*visibility_rules(),
QueryRule(TargetDatas, [RawSpecs]),
],
target_types=[FilesGeneratorTarget, GenericTarget],
)
def test_non_matching_build_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"some_name/BUILD": "target()"})
result = rule_runner.run_goal_rule(Peek, args=["other_name"])
assert result.stdout == "[]\n"
def _normalize_fingerprints(tds: Sequence[TargetData]) -> list[TargetData]:
"""We're not here to test the computation of fingerprints."""
return [
dataclasses.replace(
td,
expanded_sources=None
if td.expanded_sources is None
else _snapshot("", td.expanded_sources.files),
)
for td in tds
]
def test_get_target_data(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": dedent(
"""\
target(name="bar", dependencies=[":baz"])
files(name="baz", sources=["*.txt"])
"""
),
"foo/a.txt": "",
"foo/b.txt": "",
}
)
tds = rule_runner.request(
TargetDatas,
[RawSpecs(recursive_globs=(RecursiveGlobSpec("foo"),), description_of_origin="tests")],
)
assert _normalize_fingerprints(tds) == [
TargetData(
GenericTarget({"dependencies": [":baz"]}, Address("foo", target_name="bar")),
None,
("foo/a.txt:baz", "foo/b.txt:baz"),
),
TargetData(
FilesGeneratorTarget({"sources": ["*.txt"]}, Address("foo", target_name="baz")),
_snapshot("", ("foo/a.txt", "foo/b.txt")),
("foo/a.txt:baz", "foo/b.txt:baz"),
),
TargetData(
FileTarget(
{"source": "a.txt"}, Address("foo", relative_file_path="a.txt", target_name="baz")
),
_snapshot("", ("foo/a.txt",)),
(),
),
TargetData(
FileTarget(
{"source": "b.txt"}, Address("foo", relative_file_path="b.txt", target_name="baz")
),
_snapshot("", ("foo/b.txt",)),
(),
),
]
def test_get_target_data_with_dep_rules(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--peek-include-dep-rules"])
rule_runner.write_files(
{
"foo/BUILD": dedent(
"""\
files(name="baz", sources=["*.txt"])
__dependencies_rules__(
("<target>", "does", "not", "apply", "*"),
("<files>", "does", "apply", "*"),
)
__dependents_rules__(
("b.txt", "!skip", "this", "*"),
("<file>", "take", "the", "first", "*"),
("*", "fall-through", "*"),
)
"""
),
"foo/a.txt": "",
}
)
tds = rule_runner.request(
TargetDatas,
[RawSpecs(recursive_globs=(RecursiveGlobSpec("foo"),), description_of_origin="tests")],
)
assert _normalize_fingerprints(tds) == [
TargetData(
FilesGeneratorTarget({"sources": ["*.txt"]}, Address("foo", target_name="baz")),
_snapshot("", ("foo/a.txt",)),
("foo/a.txt:baz",),
dependencies_rules=("does", "apply", "*"),
dependents_rules=("fall-through", "*"),
applicable_dep_rules=(
DependencyRuleApplication(
action=DependencyRuleAction.ALLOW,
rule_description="foo/BUILD[*] -> foo/BUILD[*]",
origin_address=Address("foo", target_name="baz"),
origin_type="files",
dependency_address=Address(
"foo", target_name="baz", relative_file_path="a.txt"
),
dependency_type="files",
),
),
),
TargetData(
FileTarget(
{"source": "a.txt"}, Address("foo", relative_file_path="a.txt", target_name="baz")
),
_snapshot("", ("foo/a.txt",)),
(),
dependencies_rules=("does", "apply", "*"),
dependents_rules=("fall-through", "*"),
applicable_dep_rules=(),
),
]
|
##
# Simple dictionairy class that automatically initializes lists if
# they are not present in the collection yet.
##
class VectorList(dict):
def __missing__(self, key):
self[key] = []
return self[key]
|
import torch.nn.functional as F
class InputPadder:
"""Pads images such that dimensions are divisible by 8"""
# TODO: Ideally, this should be part of the eval transforms preset, instead
# of being part of the validation code. It's not obvious what a good
# solution would be, because we need to unpad the predicted flows according
# to the input images' size, and in some datasets (Kitti) images can have
# variable sizes.
def __init__(self, dims, mode="sintel"):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
if mode == "sintel":
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, pad_ht // 2, pad_ht - pad_ht // 2]
else:
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
def pad(self, *inputs):
return [F.pad(x, self._pad, mode="replicate") for x in inputs]
def unpad(self, x):
ht, wd = x.shape[-2:]
c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
return x[..., c[0] : c[1], c[2] : c[3]]
|
# Copyright (C) 2021 FireEye, Inc. All Rights Reserved.
from speakeasy.struct import EmuStruct, Ptr
import ctypes as ct
NERR_Success = 0
NetSetupUnknownStatus = 0
NetSetupUnjoined = 1
NetSetupWorkgroupName = 2
NetSetupDomainName = 3
class WKSTA_INFO_100(EmuStruct):
def __init__(self, ptr_size):
super().__init__(ptr_size)
self.wki_platform_id = Ptr
self.wki_computername = Ptr
self.wki_langroup = Ptr
self.wki_ver_major = ct.c_uint32
self.wki_ver_minor = ct.c_uint32
class WKSTA_INFO_101(EmuStruct):
def __init__(self, ptr_size):
super().__init__(ptr_size)
self.wki_platform_id = Ptr
self.wki_computername = Ptr
self.wki_langroup = Ptr
self.wki_ver_major = ct.c_uint32
self.wki_ver_minor = ct.c_uint32
self.wki_lanroot = Ptr
class WKSTA_INFO_102(EmuStruct):
def __init__(self, ptr_size):
super().__init__(ptr_size)
self.wki_platform_id = Ptr
self.wki_computername = Ptr
self.wki_langroup = Ptr
self.wki_ver_major = ct.c_uint32
self.wki_ver_minor = ct.c_uint32
self.wki_lanroot = Ptr
self.wki_logged_on_users = Ptr
|
# -*- coding: utf-8 -*-
from socket import *
import json
import hashlib
import datetime
import time
import random
#These are listen request function#
def send_shoplist(s,data,address):
msg = {}
for key in shop_list:
if shop_list[key]["state"] == "open":
msg[key] = {"name":"","owner":""}
msg[key]["name"] = shop_list[key]["name"]
msg[key]["owner"] = shop_list[key]["owner"]
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
def login_check(s,data, address):
user_id = data["id"]
user_pw = data["pw"]
if user_infomation.get(user_id, "NULL") == "NULL":
if s.sendto(b'NO_USER', address)!=0:
return "1"
else:
#SEND FAIL
return "2"
pw_m = hashlib.md5()
pw_m.update(user_infomation[user_id]["pw"].encode("utf-8"))
# check the password
user_pw_md5 = pw_m.hexdigest()
if user_pw_md5 == user_pw:
if s.sendto(b'SUCCESS', address) != 0:
# update login info,record the login log
if login_info.__contains__(user_id):
login_info[user_id]["state"] = True
info = {"time": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "add": address}
login_info[user_id]["history"].append(info)
else:
login_info[user_id] = {"state": "", "history": []}
login_info[user_id]["state"] = True
info = {"time": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "add": address}
login_info[user_id]["history"].append(info)
return "0"
else:
#SEND FAIL
return "2"
else:
if s.sendto(b'FAIL', address) != 0:
return "1"
else:
# SEND FAIL
return "2"
def exit_request(s,data, address):
if login_info.__contains__(data["user"]):
login_info[data["user"]]["state"] = False
for key in shop_visit:
for i in range(len(shop_visit[key])):
if shop_visit[key][i] == data['user']:
shop_visit[key].remove(data['user'])
print(shop_visit[key])
# send leave tip
content = [{'title': '离店通知', 'text': data['user'] + "用户" + "离开了您的店"}]
recv = [shop_list[key]['owner']]
send = ["商城管理员:小森"]
send_message_manage(content, recv, send)
break
if s.sendto(b"0", address)!=0:
return "0"
else:
#SEND FAIL
return "2"
else:
if s.sendto(b"1", address) != 0:
return "1"
else:
#SEND FAIL
return "2"
def enter_shop(s,data,address):
if shop_list.__contains__(data["id"]):
if shop_list[data["id"]]["state"] == "open":
msg={"state":"open","goods":[]}
msg["goods"] = shop_list[data["id"]]["goods"]
msg["id"] = data["id"]
msg["name"] = shop_list[data["id"]]['name']
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
#store online visitor
if shop_visit.__contains__(data["id"]):
if data["user"] not in shop_visit[data["id"]] and data['user'] != shop_list[data['id']]['owner']:
shop_visit[data["id"]].append(data["user"])
elif data['user'] != shop_list[data['id']]['owner']:#the owner not in the visit list
shop_visit[data["id"]] = []
shop_visit[data["id"]].append(data["user"])
#send msg to owner
return "0"
else:
# SEND FAIL
return "2"
elif shop_list[data["id"]]["state"] == "close":
msg = {"state": "close", "goods": []}
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
else:
msg = {"state": "null", "goods": []}
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
def leave_shop(s,data,address):
if data["user"] == shop_list[data["id"]]["owner"]:
msg = "0"
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
else:
if shop_visit.__contains__(data["id"]):
if data["user"] in shop_visit[data["id"]]:
shop_visit[data["id"]].remove(data["user"])
msg="0"
#send leave tip
content=[{'title':'离店通知','text':data['user']+"用户"+"离开了您的店"}]
recv=[shop_list[data['id']]['owner']]
send=["商城管理员:小森"]
send_message_manage(content,recv,send)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
else:
msg = "1"
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
else:
msg = "1"
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
def enter_own_shop(s,data,address):
user = data["user"]
if user_infomation[user]["shop"] != 0:
data["id"] = str(user_infomation[user]["shop"])
if enter_shop(s,data,address) == "0":
return "0"
else:
return "1"
else:
msg = {"state": "null", "goods": []}
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
def show_custom(s,data,address):
id = data["id"]
msg={}
if shop_visit.__contains__(id):
msg[id] = shop_visit[id]
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
else:
msg[id] = []
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
def has_shop(s,data,address):
if user_infomation[data['user']]['shop']!=0:
if s.sendto("0", address) != 0:
return "0"
else:
# SEND FAIL
return "2"
else:
if s.sendto("1", address) != 0:
return "1"
else:
# SEND FAIL
return "2"
def load_info(s,data,address):
if user_infomation.__contains__(data['user']):
user_id = data['user']
msg = user_infomation[user_id]
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
# send the msg in buffer:
if message.__contains__(user_id):
for i in range(len(message[user_id])):
msg = message[user_id][i]
send_message(msg['send'],
msg['content'],
msg['time'],
socket_user[user_id]['add'],
socket_user[user_id]['socket'])
message.pop(user_id)
return "0"
else:
# SEND FAIL
return "2"
else:
msg={}
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "1"
else:
# SEND FAIL
return "2"
def buy_goods(s,data,address):
goods_id = data['goods_id']
goods_name = data['goods_name']
shop_id = data['shop_id']
user=data['user']
num = data['num']
time_now = int(time.time())
time_local = time.localtime(time_now)
dt = time.strftime("%Y%m%d%M", time_local)
#the number of this shopping
shopping_num=dt + shop_id + goods_id + str(random.randint(1000, 9999))
msg = {'result':'success','shopping_num':shopping_num}
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
#write recording
r_s = {'id':goods_id,
'shopping_num':shopping_num,
'num':num,
'time':time.strftime("%Y-%m-%d %H:%M:%S",time_local),
'user':user,
'goods_name': goods_name}
r_u = {'shop_name':shop_list[shop_id]['name'],
'shopping_num':shopping_num,
'num':num,
'time':time.strftime("%Y-%m-%d %H:%M:%S",time_local),
'goods_name':goods_name}
if sold_recording.__contains__(shop_id):
sold_recording[shop_id].append(r_s)
else:
sold_recording[shop_id] = []
sold_recording[shop_id].append(r_s)
if bought_recording.__contains__(user):
bought_recording[user].append(r_u)
else:
bought_recording[user] = []
bought_recording[user].append(r_u)
#send msg to owner
content = [{"title":"用户购买通知",
"text":user+"用户购买了"+num+"件"+goods_name+",订单号为:"+shopping_num+",请记得发货哦!"}]
send = ["商城管理员:小森"]
recv = [shop_list[shop_id]['owner']]
send_message_manage(content, recv, send)
#send msg to user
content1 = [{"title": "购买成功",
"text": "您购买了" + num + "件" + goods_name + ",订单号为:" + shopping_num + ",请记得查收哦!"}]
send1 = ["商城管理员:小森"]
recv1 = [user]
result = send_message_manage(content1, recv1, send1)
return "0"
else:
# SEND FAIL
return "2"
def add_socket(s,data,address):
if socket_user.__contains__(data['user']) == False:
socket_user[data['user']] = {}
socket_user[data['user']]['socket'] = s
socket_user[data['user']]['add'] = address
return '0'
def send_logininfo(s, data, address):
if login_info.__contains__(data['user']):
msg = login_info[data['user']]['history']
else:
msg = []
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
def send_shopping_recording(s, data, address):
if bought_recording.__contains__(data['user']):
msg = bought_recording[data['user']]
else:
msg = []
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
def send_sold_recording(s, data, address):
if sold_recording.__contains__(str(user_infomation[data['user']]['shop'])):
msg = sold_recording[str(user_infomation[data['user']]['shop'])]
else:
msg = []
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
# SEND FAIL
return "2"
def add_goods(s,data,address):
flag = 0
if shop_list.__contains__(data['id']):
for i in range(len(shop_list[data['id']]['goods'])):
if shop_list[data['id']]['goods'][i]['id'] == data['goods_id']:
info = {"result": "id_fail"}
flag =1
break
if flag == 0:
good = {}
good['id'] = data['goods_id']
good['name'] = data['goods_name']
good['price'] = data['goods_price']
shop_list[data['id']]['goods'].append(good)
info = {"result" : "success"}
#send msg to the custom in this shop
if shop_visit.__contains__(data['id']):
recv = shop_visit[data["id"]]
send =[]
content=[]
for i in range(len(recv)):
send.append(shop_list[data['id']]['name'])
c = {}
c['title'] = '新商品上架通知'
c['text'] = shop_list[data['id']]['name']+"店上架了新商品:"+data['goods_name']+",单价为:"+data['goods_price']
content.append(c)
send_message_manage(content,recv,send)
else:
info = {"result" : "fail"}
msg = json.dumps(info)
if s.sendto(str.encode(msg), address) != 0:
return "0"
else:
return "2"
# These are request fuction#
#this function is to manage the message,it can divide different message situation:
#If the recv not online,we just add the msg on our server
#Parameter#
#recv:root send msg to some users for some tips:list['user1','user2'...]
#content:the msg should be send:list['content1','content2'...]
#send:in fact,all the send are root,but we can use this parameter is just send of form
def send_message_manage(content,recv,send):
#the send result,0 is success,1 is fail
result=[]
#the user online or not?
for i in range(len(recv)):
if (login_info.__contains__(recv[i]) == False) or (login_info[recv[i]]['state'] == False):
msg = {}
msg['send'] = send[i]
msg['time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
msg['content'] = content[i]
if message.__contains__(recv[i]):
message[recv[i]].append(msg)
else:
message[recv[i]]=[]
message[recv[i]].append(msg)
result.append(0)
else:
address = socket_user[recv[i]]['add']
s = socket_user[recv[i]]['socket']
if send_message(send[i],content[i],"",address,s) == 0:
result.append(0)
else:
result.append(1)
return result
#this function is the true function to send a single message
def send_message(send,content,time,address,s):
#when time is not null, the message should be send to the just logined user
if time=="":
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
msg={}
msg['send'] = send
msg['time'] = time
msg['content'] = content
msg = json.dumps(msg)
if s.sendto(str.encode(msg), address) != 0:
return 0
else:
# SEND FAIL
return 2
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value) for key, value in input.items()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, str):
return input.encode('utf-8')
else:
return input
#store the messge wait to send
#{"recv":[{"send":"","time":"","content":{"title":"","text":""}}]}
message = {}
#the imformation for registered users
user_infomation = {
'hushiyang':{
'pw':'KaJe2008',
'sex':'M',
'shop':0,
'user_id':31241
},
'鼠小宝':{
'pw':'123456',
'sex':'F',
'shop':11323,
'user_id':11323
},
'zhangsan':{
'pw':'woaijiwang',
'sex':'F',
'shop':0,
'user_id':13232
},
'Cook':{
'pw':'zuixihuan',
'sex':'M',
'shop':32423,
'user_id':32423
},
'李昀锴':{
'pw':'zuixihuanjiwang',
'sex':'M',
'shop':10019,
'user_id':32423
},
'小炸':{
'pw':'woaijiwang',
'sex':'M',
'shop':50344,
'user_id':50344
},
'卡姆式':{
'pw':'woaijiwang',
'sex':'M',
'shop':90231,
'user_id':90231
},
'小p':{
'pw':'woaijiwang',
'sex':'M',
'shop':38943,
'user_id':38943
},
'4.0':{
'pw':'woaijiwang',
'sex':'M',
'shop':21340,
'user_id':21340
},
'小作坊':{
'pw':'woaijiwang',
'sex':'M',
'shop':24330,
'user_id':24330
},
'陈欧':{
'pw':'woaijiwang',
'sex':'M',
'shop':92310,
'user_id':92310
},
'2233':{
'pw':'woaijiwang',
'sex':'M',
'shop':79123,
'user_id':79123
}
}
#the imformation for shops
shop_list = {
'10019':{
"name":"小米手机旗舰店",
"owner":"李昀锴",
"goods":[
{"id": "0001","name":"红米2","price":"1200"},
{"id": "0002", "name": "MIX2", "price": "3999"},
{"id": "0003", "name": "小米5", "price": "800"},
{"id": "0004", "name": "小米6", "price": "1000"},
{"id": "0005", "name": "小米电饭煲", "price": "399"},
{"id": "0006", "name": "小米手环", "price": "79"}
],
"state":"open"
},
'32423':{
"name":"Apple旗舰店",
"owner":"Cook",
"goods":[
{"id": "0001","name":"Iphone 7","price":"3900"},
{"id": "0002", "name": "iPhone 8", "price": "4299"},
{"id": "0003", "name": "iPhone X", "price": "8800"},
{"id": "0004", "name": "MacBookPro", "price": "11399"},
{"id": "0005", "name": "iMac", "price": "7900"},
{"id": "0006", "name": "iWatch", "price": "2900"},
{"id": "0007", "name": "iPad", "price": "3100"}
],
"state": "open"
},
'11323':{
"name":"三只松鼠",
"owner":"鼠小宝",
"goods":[
{"id": "0001","name":"夏威夷果","price":"19.99"},
{"id": "0002", "name": "山核桃", "price": "29.99"},
{"id": "0003", "name": "牛板筋", "price": "49.99"}
],
"state": "open"
},
'50344':{
"name":"三星手机旗舰店",
"owner":"小炸",
"goods":[
{"id": "0001","name":"NOTE7","price":"7900"},
{"id": "0002", "name": "Galaxy Note8", "price": "8200"},
{"id": "0003", "name": "显示器", "price": "3300"},
{"id": "0004", "name": "内存条8G", "price": "500"}
],
"state": "open"
},
'90231':{
"name":"KFC旗舰店",
"owner":"卡姆氏",
"goods":[
{"id": "0001","name":"吮指原味鸡*30","price":"169"},
{"id": "0002", "name": "新奥尔良烤翅*20", "price": "145"},
{"id": "0003", "name": "花生酱双层汉堡", "price": "22.98"},
{"id": "0004", "name": "双层鸡腿堡套餐", "price": "35.99"},
{"id": "0005", "name": "酸菜鸡块饭", "price": "24.5"},
{"id": "0006", "name": "甜筒*30", "price": "78"}
],
"state": "open"
},
'38943':{
"name":"Python源码一体店",
"owner":"小p",
"goods":[
{"id": "0001","name":"web网站python源码","price":"9999"},
{"id": "0002", "name": "数据库python源码", "price": "2000"}
],
"state": "open"
},
'21340':{
"name":"专业PJ代写",
"owner":"4.0",
"goods":[
{"id": "0001","name":"计网PJ","price":"6900"},
{"id": "0002", "name": "数据结构PJ", "price": "1450"},
{"id": "0003", "name": "操作系统PJ", "price": "5400"},
{"id": "0004", "name": "计算机体系机构PJ", "price": "900"},
{"id": "0005", "name": "保密管理概率PJ", "price": "1500"},
{"id": "0006", "name": "数据库PJ", "price": "4000"},
{"id": "0007", "name": "C++PJ", "price": "500"},
{"id": "0008", "name": "数字水印PJ", "price": "2888"}
],
"state": "open"
},
'24330':{
"name":"卫尤辣条",
"owner":"小作坊",
"goods":[
{"id": "0001","name":"手撕面筋","price":"5"},
{"id": "0002", "name": "小辣条", "price": "0.9"},
{"id": "0003", "name": "大面筋", "price": "2.8"},
{"id": "0004", "name": "面筋大礼包", "price": "58.88"}
],
"state": "open"
},
'92310':{
"name":"聚美优品",
"owner":"陈欧",
"goods":[
{"id": "0001","name":"芦荟胶","price":"29.8"},
{"id": "0002", "name": "御泥坊面膜礼盒", "price": "199"},
{"id": "0003", "name": "欧莱雅洗面奶", "price": "54"},
{"id": "0004", "name": "BB霜", "price": "79.98"},
{"id": "0005", "name": "眉笔", "price": "20"},
{"id": "0006", "name": "护肤霜", "price": "28"},
{"id": "0007", "name": "护手霜", "price": "16"},
{"id": "0008", "name": "口红", "price": "188"},
{"id": "0009", "name": "祛痘套装", "price": "389"}
],
"state": "open"
},
'79123':{
"name":"B站旗舰店",
"owner":"2233",
"goods":[
{"id": "0001","name":"碧蓝航线立牌周边","price":"19.8"},
{"id": "0002", "name": "应援灯牌", "price": "80"},
{"id": "0003", "name": "小电视抱枕", "price": "58"},
{"id": "0004", "name": "B站大会员1个月", "price": "19"}
],
"state": "open"
},
}
#the login_state info
login_info = {}
#the shop visit list{"shopid":["userid","userid"]}
shop_visit = {}
#the shop sold recording{'shop':[{'id':'','shopping_num':'','num':'','time':'','user':''}]}
sold_recording = {}
#the user bought recording{'user':[{'shop_name':'','shopping_num':'','num':'','time':'','goods_name':''}]}
bought_recording = {}
#the long connect socket for send message{"user1":{'socket':socket,'add':address}
socket_user = {}
#the request function dict
request_function = {
"load_info":load_info,
"login_check":login_check,
"send_shoplist":send_shoplist,
"exit_request":exit_request,
"enter_shop":enter_shop,
"leave_shop":leave_shop,
"enter_own_shop":enter_own_shop,
"show_custom":show_custom,
"has_shop":has_shop,
"buy_goods":buy_goods,
"send_logininfo":send_logininfo,
"send_sold_recording":send_sold_recording,
"send_shopping_recording":send_shopping_recording,
'listen':add_socket,
"add_goods":add_goods
}
#the main listen function
def listen_request():
HOST = '127.0.0.1'
PORT = 62000
s = socket(AF_INET, SOCK_DGRAM)
s.bind((HOST, PORT))
print('...waiting for message..')
while True:
print('server waiting')
data, address = s.recvfrom(2048)
if not data:
continue
data = json.loads(data)
result = request_function[data["method"]](s, data, address)
# write log
output = open('log/log.txt', 'a')
log = data["method"] + ":" + result + " " + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n'
print(log)
output.write(log)
output.close()
|
import boto3
from botocore.exceptions import ClientError
import time
import sys
bucket_name = sys.argv[1]
prefix = sys.argv[2]
start = time.time()
print('Data prep started...')
# Based on model monitor example using CSE-CIC-IDS2018 dataset
# see also: https://github.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor
s3 = boto3.resource('s3')
source_bucket_name = "sagemaker-ap-southeast-2-691313291965"
source_bucket_prefix = "aim362/data/"
source_bucket = s3.Bucket(source_bucket_name)
for s3_object in source_bucket.objects.filter(Prefix=source_bucket_prefix):
target_key = s3_object.key.replace(source_bucket_prefix, prefix+'/data/')
copy_source = {
'Bucket': source_bucket_name,
'Key': s3_object.key
}
try:
obj = s3.Object(bucket_name, target_key).load()
print('Already Copied {0}'.format(target_key))
except ClientError as e:
print('Copying {0} to {1} ...'.format(s3_object.key, target_key))
s3.Bucket(bucket_name).copy(copy_source, target_key)
end = time.time()
print('Data prep complete in: {}'.format(end - start)) |
import pytest
from pyasn1.type.namedtype import NamedType, NamedTypes, DefaultedNamedType, OptionalNamedType
from asn1PERser.codec.per.encoder import encode as per_encoder
from asn1PERser.classes.data.builtin.OctetStringType import OctetStringType
from asn1PERser.classes.types.constraint import ValueSize
def SCHEMA_no_constrain_octetstring():
class MyOctetString(OctetStringType):
'''
MyBitString ::= OCTET STRING
'''
pass
return MyOctetString
def SCHEMA_constrained_octetstring(lb, ub):
class MyOctetString(OctetStringType):
'''
MyBitString ::= OCTET STRING (SIZE(lowerEndpoint..upperEndpoint))
'''
subtypeSpec = ValueSize(lb, ub)
return MyOctetString
def SCHEMA_constrained_ext_octetstring(lb, ub):
class MyOctetString(OctetStringType):
'''
MyBitString ::= OCTET STRING (SIZE(lowerEndpoint..upperEndpoint, ...))
'''
subtypeSpec = ValueSize(lb, ub, extensionMarker=True)
return MyOctetString
def DATA_octetstring(schema, bin_val=None, hex_val=None):
if bin_val is not None:
octetstring = schema(binValue=bin_val)
elif hex_val is not None:
octetstring = schema(hexValue=hex_val)
return octetstring
@pytest.mark.parametrize("octetstring, encoded", [
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='1'), '0180'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='0'), '0100'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='01'), '0140'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='10'), '0180'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='1111'), '01F0'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='111100'), '01F0'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='1111000011110'), '02F0F0'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='0000110000110011'), '020C33'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), bin_val='01011100101100110101101101101110001110010101110010101'),
'075CB35B6E395CA8'),
])
def test_no_constrain_octetstring_as_binary_value_can_be_encoded(octetstring, encoded):
assert per_encoder(octetstring) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("octetstring, encoded", [
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), hex_val='0'), '0100'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), hex_val='1'), '0110'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), hex_val='01'), '0101'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), hex_val='10'), '0110'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), hex_val='00A'), '0200A0'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), hex_val='AA0'), '02AA00'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), hex_val='ABCDEFA'), '04ABCDEFA0'),
(DATA_octetstring(SCHEMA_no_constrain_octetstring(), hex_val='0123ABCDEFA0123'), '080123ABCDEFA01230'),
])
def test_no_constrain_octetstring_as_hex_value_can_be_encoded(octetstring, encoded):
assert per_encoder(octetstring) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("octetstring, encoded", [
# (DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=0), bin_val=''), '00'), # does not work
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=0), hex_val=''), '00'),
])
def test_constrained_octetstring_of_zero_length_can_be_encoded(octetstring, encoded):
assert per_encoder(octetstring) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("octetstring, encoded", [
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=1, ub=1), bin_val='1'), '80'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=1, ub=1), bin_val='01110'), '70'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=1, ub=1), bin_val='00001111'), '0F'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=1, ub=1), hex_val='C'), 'C0'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=1, ub=1), hex_val='CF'), 'CF'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=2, ub=2), bin_val='111111110'), 'FF00'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=2, ub=2), bin_val='0000111100001'), '0F08'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=2, ub=2), bin_val='1100110011001100'), 'CCCC'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=2, ub=2), hex_val='ABC'), 'ABC0'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=2, ub=2), hex_val='000'), '0000'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=2, ub=2), hex_val='0000'), '0000'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=2, ub=2), hex_val='FFFF'), 'FFFF'),
])
def test_constrained_same_len_octetstring_with_len_le_2_octetes_can_be_encoded(octetstring, encoded):
assert per_encoder(octetstring) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("octetstring, encoded", [
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=3, ub=3), bin_val='000000010000000100000001'), '010101'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=3, ub=3), hex_val='ABCDEF'), 'ABCDEF'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=4, ub=4), bin_val='01111111111111111111111100000000'), '7FFFFF00'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=4, ub=4), hex_val='ABCDEF01'), 'ABCDEF01'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=17, ub=17), hex_val='11223344556677889900AABBCCDDEEFF11'),
'11223344556677889900AABBCCDDEEFF11'),
])
def test_constrained_same_len_octetstring_with_len_gt_2_and_lt_64K_can_be_encoded(octetstring, encoded):
assert per_encoder(octetstring) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("octetstring, encoded", [
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=15), hex_val=''), '00'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=15), hex_val='A'), '10A0'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=15), hex_val='A0'), '10A0'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=15), hex_val='112233445566778899001122334455'),
'F0112233445566778899001122334455'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=22), hex_val='A'), '08A0'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=22), hex_val='A0'), '08A0'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=22), hex_val='112233445566778899001122334455'),
'78112233445566778899001122334455'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=0, ub=22), hex_val='11223344556677889900112233445566778899001122'),
'B011223344556677889900112233445566778899001122'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=10, ub=15), hex_val='112233445566778899AA'),
'00112233445566778899AA'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=10, ub=15), hex_val='112233445566778899AABBCCDDEEFF'),
'A0112233445566778899AABBCCDDEEFF'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=10, ub=22), hex_val='112233445566778899AA'),
'00112233445566778899AA'),
(DATA_octetstring(SCHEMA_constrained_octetstring(lb=10, ub=22), hex_val='112233445566778899AABBCCDDEEFF001122334455'),
'B0112233445566778899AABBCCDDEEFF001122334455'),
])
def test_constrained_different_len_octet_string_can_be_encoded(octetstring, encoded):
assert per_encoder(octetstring) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("octetstring, encoded", [
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=0), hex_val=''), '00'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=1, ub=1), hex_val='A'), '5000'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=1, ub=1), hex_val='AB'), '5580'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=5, ub=5), hex_val='112233445'), '001122334450'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=5, ub=5), hex_val='1122334455'), '001122334455'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=15, ub=15), hex_val='112233445566778899001122334455'),
'00112233445566778899001122334455'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=22, ub=22), hex_val='11223344556677889900112233445566778899001122'),
'0011223344556677889900112233445566778899001122'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=2), hex_val=''), '00'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=2), hex_val='A'), '20A0'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=2), hex_val='AB'), '20AB'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=2), hex_val='ABC'), '40ABC0'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=2), hex_val='ABCD'), '40ABCD'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=15), hex_val='A'), '08A0'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=15), hex_val='AB'), '08AB'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=15), hex_val='112233445566778899'), '48112233445566778899'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=15), hex_val='112233445566778899AABBCCDDEEFF'),
'78112233445566778899AABBCCDDEEFF'),
])
def test_constrained_with_extension_when_len_is_within_extension_root_octetstring_can_be_encoded(octetstring, encoded):
assert per_encoder(octetstring) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("octetstring, encoded", [
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=0), hex_val='A'), '8001A0'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=0), hex_val='AB'), '8001AB'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=0, ub=0), hex_val='1122334455'), '80051122334455'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=2, ub=2), hex_val=''), '8000'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=2, ub=2), hex_val='A'), '8001A0'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=2, ub=2), hex_val='AB'), '8001AB'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=2, ub=2), hex_val='112233'), '8003112233'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=2, ub=2), hex_val='1122334455'), '80051122334455'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=3, ub=4), hex_val=''), '8000'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=3, ub=4), hex_val='A'), '8001A0'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=3, ub=4), hex_val='AB'), '8001AB'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=3, ub=4), hex_val='1122'), '80021122'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=3, ub=4), hex_val='1122334455'), '80051122334455'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=3, ub=4), hex_val='11223344556677'), '800711223344556677'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=10, ub=22), hex_val='112233445566778899'), '8009112233445566778899'),
(DATA_octetstring(SCHEMA_constrained_ext_octetstring(lb=10, ub=22), hex_val='1122334455667788990011223344556677889900112233'),
'80171122334455667788990011223344556677889900112233'),
])
def test_constrained_with_extension_when_len_is_NOT_within_extension_root_octetstring_can_be_encoded(octetstring, encoded):
assert per_encoder(octetstring) == bytearray.fromhex(encoded)
|
def min_max(*args):
the_max = args[0] # 初始化最大值
the_min = args[0] # 初始化最小值
for i in args:
if i >the_max:
the_max = i
elif i<the_min:
the_min = i
return {"max": the_max, "min:": the_min}
print(min_max(1, 2, 4, 6))
|
from Base import *
from Object import *
'''
Esta funcao cria um objeto do tipo Ceu e o retorna
@PARAMETROS
id_tex_livre - primeiro id de textura nao utilizado - passado como lista de tamanho 1
vertices_list - lista de coordenadas de vertices
textures_coord_list - lista de coordenadas de textura
normals_list - lista de normais de vertices
@RETORNO
object - o objeto Ceu criado
'''
def cria_ceu(id_tex_livre, vertices_list, textures_coord_list, normals_list):
#adicionando os nomes das texturas utilizdas em uma lista
textures_names = []
textures_names.append("Ceu/toy_story.jpg")
filename = "Ceu/ceu.obj"
mtl_filename = "Ceu/ceu.mtl"
#criando o objeto
chao = Object(filename, mtl_filename, textures_names, 0, 0, 0, 0, 0, 0, 50.0, id_tex_livre, vertices_list, textures_coord_list, normals_list)
return chao |
# -*- coding: utf-8 -*-
# @Author: Fallen
# @Date: 2020-04-24 12:55:16
# @Last Modified by: Fallen
# @Last Modified time: 2020-04-24 12:55:16
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-16 14:50:41
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
'''
hashlib模块:用于加密
封装了一些加密的类
加密的目的适用于判断和验证 最多用 md5()
特点:
- 把一个大的数据且奉承不同小块,分别对不同小块进行加密,在汇总结果,和直接对整体加密的结果是一致的
- 单项加密,一般不可逆
- 演示数据的一点小变化,将导致结果的非常大的差异
'''
# 那,这么牛逼,怎么用呢
import hashlib
# 先看看他都有啥方法啊
print(dir(hashlib))
'''
['__all__', '__block_openssl_constructor', '__builtin_constructor_cache', '__builtins__', '__cached__', '__doc__', '__file__', '__get_builtin_constructor', '__loader__', '__name__', '__package__', '__spec__', '_hashlib', 'algorithms_available', 'algorithms_guaranteed', 'blake2b', 'blake2s', 'md5', 'new', 'pbkdf2_hmac', 'scrypt', 'sha1', 'sha224', 'sha256', 'sha384', 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', 'sha512', 'shake_128', 'shake_256']
'''
# 给一个数据加密的三大步骤:
# 获取一个加密对象
m = hashlib.md5()
# 使用加密对象的 update 方法 ,对目标进行加密,加密方法 update
# 可以加密多次
m.update('abc中文'.encode('utf-8'))
m.update('def'.encode('utf-8'))
#通过 hexdigest 获取加密结果 或直接用 digest()来获取,但 digest()获取的是二进制,所以少用
res = m.hexdigest()
print(res) #2f1b6e294e72d25ae196fe4ac2d27de6
# 给一个数据加密
# 验证:用另一个数据加密的结果和第一次加密的结果对比
# 如果结果相同,说明原文相同
'''
它通过一个函数,把任意长度的数据按照一定规则转换为一个固定长度的数据串(通常用16进制的字符串表示)。
比如:之前我们在一个文件中存储用户的用户名和密码是这样的形式:
太白|123456
有什么问题?你的密码是明文的,如果有人可以窃取到这个文件,那么你的密码就会泄露了。所以,一般我们存储密码时都是以密文存储,比如:
太白|e10adc3949ba59abbe56e057f20f883e
那么即使是他窃取到这个文件,他也不会轻易的破解出你的密码,这样就会保证了数据的安全。
hashlib模块就可以完成的就是这个功能。
hashlib的特征以及使用要点:
bytes类型数据 ---> 通过hashlib算法 ---> 固定长度的字符串
不同的bytes类型数据转化成的结果一定不同。
相同的bytes类型数据转化成的结果一定相同。
此转化过程不可逆。
那么刚才我们也说了,hashlib的主要用途有两个:
密码的加密。
文件一致性校验。
hashlib模块就相当于一个算法的集合,这里面包含着很多的算法,算法越高,转化成的结果越复杂,安全程度越高,相应的效率就会越低。
'''
# 1.密码的加密
# 以常见的 md5 为例,计算出一个字符串的 md5 值
import hashlib
md5 = hashlib.md5()
md5.update('123456'.encode('utf-8'))
res = md5.hexdigest()
print(res)
## 计算结果如下
## 'e10adc3949ba59abbe56e057f20f883e'
## 验证:相同 bytes 数据转化的结果一定相同
md5 = hashlib.md5()
md5.update('123456'.encode('utf-8'))
res = md5.hexdigest()
print(res)
## 计算结果如下
## 'e10adc3949ba59abbe56e057f20f883e'
## 验证:不同 bytes 数据转化的结果一定不同
md5 = hashlib.md5()
md5.update('12345'.encode('utf-8'))
res = md5.hexdigest()
print(res)
## 计算结果如下
## '827ccb0eea8a706c4c34a16891f84e7b'
'''
上面就是普通的md5加密,非常简单,几行代码就可以了,但是这种加密级别是最低的,相对来说不很安全。虽然说hashlib加密是不可逆的加密方式,但也是可以破解的,那么他是如何做的呢?你看网上好多MD5解密软件,他们就是用最low的方式,空间换时间。他们会把常用的一些密码比如:123456,111111,以及他们的md5的值做成对应关系,类似于字典,
dic = {'e10adc3949ba59abbe56e057f20f883e': 123456}
然后通过你的密文获取对应的密码。
只要空间足够大,那么里面容纳的密码会非常多,利用空间换取破解时间。 所以针对刚才说的情况,我们有更安全的加密方式:加盐。
'''
#2.加盐加密,就是在创建加密对象的时候给 hashlib.md5()传个参数
##2.1 固定的盐
'''
什么叫加盐?加盐这个词儿来自于国外,外国人起名字我认为很随意,这个名字来源于烧烤,俗称BBQ。我们烧烤的时候,一般在快熟的时候,都会给肉串上面撒盐,增加味道,那么这个撒盐的工序,外国人认为比较复杂,所以就讲比较复杂的加密方式称之为加盐
'''
ret = hashlib.md5('一山一晴'.encode('utf-8')) #这个'一山一晴'就是固定的盐
ret.update('要加密的东西'.encode('utf-8'))
print(ret.hexdigest())
'''
上面的'一山一晴'就是固定的盐,比如你在一家公司,公司会将你们所有的密码在md5之前增加一个固定的盐,这样提高了密码的安全性。但是如果黑客通过手段窃取到你这个固定的盐之后,也是可以破解出来的。所以,我们还可以加动态的盐。
'''
## 2.2 动态的盐
username = 'fallen'
ret = hashlib.md5(username[::2].encode('utf-8')) #这样针对每个账户,每个账户 盐都不一样
ret.update('要加密的东西'.encode('utf-8'))
print(ret.hexdigest())
'''
这样,安全性能就大大提高了。
那么我们之前说了hahslib模块是一个算法集合,他里面包含很多种加密算法,刚才我们说的MD5算法是比较常用的一种加密算法,一般的企业用MD5就够用了。但是对安全要求比较高的企业,比如金融行业,MD5加密的方式就不够了,得需要加密方式更高的,比如sha系列,sha1,sha224,sha512等等,数字越大,加密的方法越复杂,安全性越高,但是效率就会越慢。
sha1,sha224,sha512等都是算法名称,跟 md5 是一样的。用法也一样
但我们多数就用 md5 就行了
'''
ret = hashlib.sha1()
ret.update('要加密的东西'.encode('utf-8'))
print(ret.hexdigest())
##也可以加盐
ret = hashlib.sha384('爱你么么哒'.encode("utf-8"))
ret.update('要加密的东西'.encode('utf-8'))
print(ret.hexdigest())
##也可以加动态的盐
dongtai = 'qingtianyigepili'
ret = hashlib.sha224(dongtai[::2].encode('utf-8'))
ret.update('要加密的东西'.encode('utf-8'))
print(ret.hexdigest())
# 3.文件的一致性校验
'''
以下说明,抄自太白金星老师的博客:这个文档里的大段文字都是从他那抄的
hashlib模块除了可以用于密码加密之外,还有一个常用的功能,那就是文件的一致性校验。
linux讲究:一切皆文件,我们普通的文件,是文件,视频,音频,图片,以及应用程序等都是文件。我们都从网上下载过资源,比如我们刚开学时让大家从网上下载pycharm这个软件,当时你可能没有注意过,其实你下载的时候都是带一个MD5或者shax值的,为什么? 我们的网络世界是很不安全的,经常会遇到病毒,木马等,有些你是看不到的可能就植入了你的电脑中,那么他们是怎么来的? 都是通过网络传入来的,就是你在网上下载一些资源的时候,趁虚而入,当然大部门被我们的浏览器或者杀毒软件拦截了,但是还有一部分偷偷的进入你的磁盘中了。那么我们自己如何验证我们下载的资源是否有病毒呢?这就需要文件的一致性校验了。在我们下载一个软件时,往往都带有一个MD5或者shax值,当我们下载完成这个应用程序时你要是对比大小根本看不出什么问题,你应该对比他们的md5值,如果两个md5值相同,就证明这个应用程序是安全的,如果你下载的这个文件的MD5值与服务端给你提供的不同,那么就证明你这个应用程序肯定是植入病毒了(文件损坏的几率很低),那么你就应该赶紧删除,不应该安装此应用程序。
我们之前说过,md5计算的就是bytes类型的数据的转换值,同一个bytes数据用同样的加密方式转化成的结果一定相同,如果不同的bytes数据(即使一个数据只是删除了一个空格)那么用同样的加密方式转化成的结果一定是不同的。所以,hashlib也是验证文件一致性的重要工具。
'''
## 3.1 文件校验函数 low 版
f = open('hashlib_file1','w')
f.write('abcd')
f.close()
def func(file):
with open(file,mode='rb') as f1:
ret = hashlib.md5()
ret.update(f1.read())
return ret.hexdigest()
print(func('hashlib_file1'))
'''
这样就可以计算此文件的MD5值,从而进行文件校验。但是这样写有一个问题,类似我们文件的改的操作,有什么问题?如果文件过大,全部读取出来直接就会撑爆内存的,所以我们要分段读取,那么分段读取怎么做呢?
'''
## 3.2 hashlib 分段读取文件
### 直接读取
md5obj = hashlib.md5()
md5obj.update('一山是个大帅哥'.encode('utf-8'))
print(md5obj.hexdigest()) #ffe423b0b5b717c937be394c6860a6c0
### 分段读取
md5obj = hashlib.md5()
md5obj.update('一山'.encode('utf-8'))
md5obj.update('是'.encode('utf-8'))
md5obj.update('个'.encode('utf-8'))
md5obj.update('大'.encode(('utf-8')))
md5obj.update('帅'.encode('utf-8'))
md5obj.update('哥'.encode('utf-8'))
print(md5obj.hexdigest()) #ffe423b0b5b717c937be394c6860a6c0
### 文件校验函数 高大上版
def file_check(file_path):
with open(file_path,mode='rb') as f1:
sha256 = hashlib.sha256()
while 1:
content = f1.read(1024)
if content:
sha256.update(content)
else:
return sha256.hexdigest()
print(file_check(r'D:\科研软件\geek.exe'))
#练习:注册后保存用户信息,登录时验证
def get_md5(username,password):
m = hashlib.md5()
m.update(username.encode('utf-8'))
m.update(password.encode('utf-8'))
return m.hexdigest()
def register(username,password):
# 加密
res = get_md5(username,password)
# 写入文件
with open('login',mode='a',encoding='utf-8') as f,open('user',mode='a',encoding='utf-8') as f2, \
open('user',mode='r',encoding='utf-8') as f3:
lst = []
for u in f3:
lst.append(u.strip())
if username not in lst:
f.write(res+'\n')
f2.write(username+'\n')
else:
print('已注册过,请登录')
def login(username,password):
# 获取输入的信息的加密信息
res = get_md5(username,password)
# 读文件
with open('login',mode='rt',encoding='utf-8') as f:
for line in f:
if res == line.strip():
return True
def main():
while True:
judge = input('1.注册 2.登录 3.退出:')
if judge.isdigit() and int(judge) in (1,2,3):
if int(judge)==3:
print('quit~')
break
elif int(judge)==1:
username = input('username:')
password = input('password:')
register(username,password)
elif int(judge)==2:
username = input('username:')
password = input('password:')
with open('user',mode='r',encoding='utf-8') as f2:
lst1 = []
for u in f2:
lst1.append(u.strip())
if username not in lst1:
print('please register first!')
else:
res = login(username,password)
if res==True:
print('login successfully!')
else:
print('username or password error. \n')
else:
print("你必须输入给定的序号!")
if __name__ == '__main__':
main()
|
import time
from pyspark.sql import SQLContext
from pyspark import SparkContext, SparkConf
conf = SparkConf().set('spark.memory.fraction', '1.0').set('spark.memory.storage', '0.0').set('spark.sql.exchange.reuse', False)
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
sqlContext.clearCache()
for SF in (100, 300):
for index in range(1, int(SF/10) + 1):
basePath = f"hdfs://IP:8020/tpch/tbl-parquet/tpc_h_SF_{SF}/"
# Create table views
customer = sqlContext.read.option("basePath", basePath).parquet(basePath + f"customer/customer.tbl.{index}")
customer.createOrReplaceTempView("customer")
# lineitem = sqlContext.read.option("basePath", basePath).parquet(basePath + f"lineitem/lineitem.tbl.{index}")
# lineitem.createOrReplaceTempView("lineitem")
# nation = sqlContext.read.option("basePath", basePath).parquet(basePath + f"nation/nation.tbl.{index}")
# nation.createOrReplaceTempView("nation")
# orders = sqlContext.read.option("basePath", basePath).parquet(basePath + f"orders/orders.tbl.{index}")
# orders.createOrReplaceTempView("orders")
# part = sqlContext.read.option("basePath", basePath).parquet(basePath + f"part/part.tbl.{index}")
# part.createOrReplaceTempView("part")
# partsupp = sqlContext.read.option("basePath", basePath).parquet(basePath + f"partsupp/partsupp.tbl.{index}")
# partsupp.createOrReplaceTempView("partsupp")
# region = sqlContext.read.option("basePath", basePath).parquet(basePath + f"region/region.tbl.{index}")
# region.createOrReplaceTempView("region")
# supplier = sqlContext.read.option("basePath", basePath).parquet(basePath + f"supplier/supplier.tbl.{index}")
# supplier.createOrReplaceTempView("supplier")
# Start and time the query
start = time.time()
dataframe = sqlContext.sql("""select
C_CUSTKEY,
C_NAME,
C_ADDRESS,
C_NATIONKEY,
C_PHONE,
round(C_ACCTBAL, 3) as C_ACCTBAL,
C_MKTSEGMENT,
C_COMMENT
from
customer;""")
out_file = basePath.replace("tbl-parquet", "tbl-parquet-rounded")
dataframe.write.mode("overwrite").parquet(out_file + f"customer/customer.tbl.{index}")
|
def intersection1(list1,list2):
"""That's shortest and most pratical way for get intersection of two lists."""
return list(set(list1)&set(list2))
def intersection2(list1,list2):
"""Duplicates elements that's duplicated in list2"""
return [x for x in list1 if x in list2]
def intersection3(list1,list2):
"""This is right way to get intersection of 2 lists if duplicated elements aren't wanted and speed matters.
It's fastest and most memory efficient way for long lists"""
short, long = (list1, list2) if len(list1) < len(list2) else (list2, list1)
return list(set(short).intersection(long))
|
"""
MIT License
Copyright (c) 2018 Rafael Felix Alves
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import h5py
import numpy as np
from .experiments import AverageMeter
from .tensors import NpJSONEncoder
class Json(object):
def __init__(self):
pass
@classmethod
def save(self, obj, basefile, indent=4):
'''
Json().save dict structure as json file
:param obj: dict file
:param basefile: file name
:param indent: default 4
'''
from json import dump
import numpy as np
obj_ = obj
for field in obj.keys():
if type(obj[field]) == np.ndarray:
obj_[field] = obj[field].tolist()
with open(basefile, 'w') as out:
dump(obj_, out, sort_keys=True, indent=indent)
@classmethod
def load(self, basefile):
'''
Load json file as a dict
:param basefile: filename
:return: dict
'''
from json import load
with open(basefile, 'r') as out:
jload = load(out)
return jload
class DataH5py:
def __init__(self):
self.supported_types = (np.ndarray, int, str, bytes,
np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64,
np.float, np.float16, np.float32, np.float64, np.float128)
pass
def save(self, dic, filename):
self.save_dict_to_hdf5(dic, filename)
def load(self, filename):
return self.load_dict_from_hdf5(filename)
def save_dict_to_hdf5(self, dic, filename):
with h5py.File(filename, 'w') as h5file:
self.recursively_save_dict_contents_to_group(h5file, '/', dic)
def recursively_save_dict_contents_to_group(self, h5file, path, dic):
for key, item in dic.items():
if isinstance(key, (int, np.unicode)):
key = str(key)
if isinstance(item, (int, np.unicode)):
item = str(item)
if isinstance(item, self.supported_types):
if isinstance(item, np.ndarray) and item.size > 1:
if isinstance(item[0], np.unicode):
h5file.create_dataset('{}{}'.format(path, key),
data=np.array(item, dtype='S'))
else:
h5file['{}{}'.format(path, key)] = item
else:
h5file['{}{}'.format(path, key)] = item
elif isinstance(item, AverageMeter):
h5file['{}{}'.format(path, key)] = item.get_list()
# TODO: better treatment for lists. Preferably, more general.
elif isinstance(item, list):
value = np.array([str(subitem) for subitem in item])
h5file[path + key] = value
elif isinstance(item, dict):
self.recursively_save_dict_contents_to_group(h5file,
path + key + '/', item)
elif isinstance(item, (Container, Bunch)):
self.recursively_save_dict_contents_to_group(h5file,
path + key + '/', item.as_dict())
elif item is None:
pass
else:
raise ValueError('Cannot save {}:{} type'.format(key, type(item)))
def load_dict_from_hdf5(self, filename):
"""
....
"""
with h5py.File(filename, 'r') as h5file:
return self.recursively_load_dict_contents_from_group(h5file, '/')
def recursively_load_dict_contents_from_group(self, h5file, path):
"""
....
"""
ans = {}
for key, item in h5file[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
ans[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[key] = self.recursively_load_dict_contents_from_group(h5file,
path + key + '/')
return ans
class Bunch(dict):
"""Container object for datasets
copied from scikit-learn
Dictionary-like object that exposes its keys as attributes.
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def as_dict(self):
return self.__dict__
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
class Container(object):
def __init__(self, data):
for key, value in data.items():
if isinstance(value, (list, tuple)):
setattr(self, key, [Container(sub) if isinstance(sub, dict) else sub for sub in value])
else:
setattr(self, key, Container(value) if isinstance(value, dict) else value)
# def as_dict(self):
# return self.__dict__
def as_dict(self, dtype=None):
if dtype:
return {dtype(key): dtype(value) for key, value in self.__dict__.items()}
else:
ans = {}
for key, value in self.__dict__.items():
if isinstance(value, Container):
ans[key] = value.as_dict()
else:
ans[key] = value
return ans
def get(self, key):
return self.__dict__[key]
def keys(self):
return list(self.__dict__.keys())
def items(self):
return list(self.__dict__.items())
def hdf2mat(src_, dst_):
from scipy.io import savemat
data = DataH5py().load_dict_from_hdf5(src_)
for key in data.keys():
savemat('{}/{}'.format(dst_, key), {key: data[key]})
class Dict_Average_Meter(object):
def __init__(self):
pass
def save(self, fname, save_type='json', flatten=False):
if save_type == 'json':
import json
with open(fname, 'w') as fp:
fp.write(json.dumps(self.as_dict(flatten=flatten),
cls=NpJSONEncoder,
indent=4))
elif save_type == 'h5py':
DataH5py().save_dict_to_hdf5(dic=self.as_dict(wobj=True),
filename=fname)
def __repr__(self):
return str(self.as_dict())
def __str__(self):
return str(self.as_dict())
def get_iter(self, itr=-1, flatten=True, single_1d=True):
_out = {}
def build_flatten(_data, _key=False):
if not _key:
_key = '{}'
else:
_key += '_{}'
for key, item in _data.items():
if isinstance(item, dict):
build_flatten(item, _key.format(key))
elif isinstance(item, AverageMeter):
if single_1d:
if np.array(item.get_iter(itr)).size == 1:
_out[_key.format(key)] = item.get_iter(itr)
else:
_out[_key.format(key)] = np.array(item.get_iter(itr))
build_flatten(self.__dict__)
return _out
def as_dict(self, wobj=False, flatten=False):
__flatten_dict__ = {}
def build_print(_data):
_dict = {}
for key, item in _data.items():
if isinstance(item, dict):
_dict[key] = build_print(item)
elif isinstance(item, AverageMeter):
_dict[key] = item.get_list()
return _dict
def build_flatten(_data, _key):
if not _key:
_key = '{}'
else:
_key += '_{}'
for key, item in _data.items():
if isinstance(item, dict):
build_flatten(item, _key.format(key))
elif isinstance(item, AverageMeter):
__flatten_dict__[_key.format(key)] = item.get_list()
if wobj:
return self.__dict__
elif flatten:
build_flatten(self.__dict__, False)
return __flatten_dict__
else:
return build_print(self.__dict__)
def __set_dict__(self, data):
for key, value in data.items():
self.__dict__[key] = value
def get_meter(self, data):
if self.get_subparam(self.__dict__, data) is False:
self.set_meter(data)
return self.get_subparam(self.__dict__, data)
else:
return self.get_subparam(self.__dict__, data)
def get_param(self, data):
return self.get_subparam(self.__dict__, data)
def get_subparam(self, tree, data):
levels = data.split('/')
if(len(levels) > 1):
if levels[0] in tree:
return self.get_subparam(tree[levels[0]], '/'.join(levels[1:]))
else:
return False
else:
if data in tree:
return tree[data]
else:
return False
def contains(self, namespace):
return namespace in self.__dict__
def set_meter(self, namespace):
levels = namespace.split('/')
last = len(levels)-1
tree = self.__dict__
for key, _level in enumerate(levels):
if _level in tree:
if key != last:
tree = tree[_level]
else:
tree[_level] = AverageMeter()
else:
if key != last:
tree[_level] = {}
tree = tree[_level]
else:
tree[_level] = AverageMeter()
def set_param(self, namespace, data):
levels = namespace.split('/')
last = len(levels)-1
tree = self.__dict__
for key, _level in enumerate(levels):
if _level in tree:
if key != last:
tree = tree[_level]
else:
tree[_level] = data
else:
if key != last:
tree[_level] = {}
tree = tree[_level]
else:
tree[_level] =data
def update_meters(self, _base, data):
for key, item in data.items():
self.get_meter('{}/{}'.format(_base, key)).update(item)
class DictContainer(object):
def __init__(self):
pass
def items(self):
return self.__dict__.items()
def keys(self):
return self.__dict__.keys()
def as_dict(self):
return self.__dict__
def __set_dict__(self, data):
for key, value in data.items():
self.__dict__[key] = value
def get_param(self, data):
return self.get_subparam(self.__dict__, data)
def get_subparam(self, tree, data):
levels = data.split('/')
if(len(levels) > 1):
if levels[0] in tree:
return self.get_subparam(tree[levels[0]], '/'.join(levels[1:]))
else:
return False
else:
if data in tree:
return tree[data]
else:
return False
def contains(self, namespace):
return namespace in self.__dict__
def set_param(self, namespace, data):
levels = namespace.split('/')
last = len(levels)-1
tree = self.__dict__
for key, _level in enumerate(levels):
if _level in tree:
if key != last:
tree = tree[_level]
else:
tree[_level] = data
else:
if key != last:
tree[_level] = {}
tree = tree[_level]
else:
tree[_level] = data
if __name__ == '__main__':
print('-'*100)
print(':: Testing file: {}'.format(__file__))
print('-'*100)
|
from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
import numpy as np
import random
import json
author = 'Ferley Rincón & Cesar Mantilla'
doc = """
Informalidad Laboral: Movilidad y Observabilidad Laboral
"""
class Constants(BaseConstants):
name_in_url = 'Torneo'
players_per_group = 4
num_rounds = 6
pago_A = c(2000)
pago_B = c(1000)
ronda_pagar = random.randint(2, num_rounds)
letters_per_word = 5
use_timeout = True
seconds_per_period = 30
class Subsession(BaseSubsession):
meritocracia = models.BooleanField()
observabilidad = models.BooleanField()
torneo = models.BooleanField()
ronda_pagar = models.IntegerField()
def creating_session(self):
"""Esta función define los valores iniciales para cada ronda
incluye la subsession y demás clases.
Este método se ejecuta al comiezo de la sesion tantas veces como
rondas haya"""
self.observabilidad = self.session.config["observabilidad"]
self.meritocracia = self.session.config["meritocracia"]
self.ronda_pagar = Constants.ronda_pagar
# Subsession Ronda Practica (ronda<1), Torneo (ronda>1)
self.torneo = self.round_number > 1
def creating_groups(self):
# Creando el grupo aleatoriamente (debe ser estratificado!!!)
players = self.get_players() #devuelve un array de objeto jugador
num_groups = len(self.get_groups())
a1, a2, b1, b2 = [], [], [], []
for i in players:
if i.contrato_A_torneo:
if i.posicion_contrato_torneo == 1:
a1.append(i.in_round(self.round_number+1))
else:
a2.append(i.in_round(self.round_number+1))
else:
if i.posicion_contrato_torneo == 1:
b1.append(i.in_round(self.round_number+1))
else:
b2.append(i.in_round(self.round_number+1))
i.in_round(self.round_number+1).contrato_A = i.contrato_A_torneo
matrix = np.c_[a1, a2, b1, b2]
for i in range(Constants.players_per_group):
x = np.random.choice(num_groups, num_groups, replace=False)
matrix[:, i] = matrix[x, i]
self.in_round(self.round_number+1).set_group_matrix(matrix)
def sort(self, rank):
l = list(rank.items())
random.shuffle(l)
rank = dict(l)
rank = dict(sorted(rank.items(), key=lambda x: x[1], reverse=True))
return rank
"""Este método retorna la posición del jugador en el ranking grupal"""
def set_ranking(self):
jugadores = self.get_players()
rank = {}
for k, j in enumerate(jugadores):
rank['j' + str(k)] = j.palabras
if (self.meritocracia and self.round_number==1) or (self.observabilidad and
self.round_number!=1):
rank = self.sort(rank)
else:
l = list(rank.items())
random.shuffle(l)
rank = dict(l)
for j, i in enumerate(rank.keys()):
jugador = jugadores[int(i.split('j')[1])]
# Primera mitad de los jugadores es contrato A
if j < len(jugadores)//2:
jugador.contrato_A_torneo = True
# Primeta mitad de la mitad (primer cuarto) son posicion 1 contrato A
if j < len(jugadores)//4:
jugador.posicion_contrato_torneo = 1
# La otra mitad seria posicion 2 contrato A
else:
jugador.posicion_contrato_torneo = 2
# La otra mitad son contato B
else:
jugador.contrato_A_torneo = False
# La primera mitad de la mitad de B (osea 3/4) son posicion 1
if j < 3*len(jugadores)//4:
jugador.posicion_contrato_torneo = 1
else:
jugador.posicion_contrato_torneo = 2
if(self.round_number==1):
jugador.contrato_A = jugador.contrato_A_torneo
def set_ranking_grupos(self):
for g in self.get_groups():
g.set_ranking()
g.set_ranking_contrato()
def set_posiciones_jugadores(self):
for j in self.get_players():
j.set_pago_ronda()
j.set_posicion_grupo()
j.set_posicion_contrato()
j.set_probabilidad_contrato_A()
def set_pago_jugadores(self):
for j in self.get_players():
j.set_pago()
class Group(BaseGroup):
#solo deben declararse variables por medio de models.
rank = models.StringField()
rankA = models.StringField()
rankB = models.StringField()
ganador_contrato_A = models.IntegerField(initial=0)
def get_palabras_torneo(self):
rankA = json.loads(self.rankA)
rankB = json.loads(self.rankB)
p2 = list(rankA.values())[1] # palabras del jugador en la posicion 2 del ranking A
p3 = list(rankB.values())[0] # palabras del jugador en la posicion 1 del ranking B
palabras_torneo = p2 + p3
return palabras_torneo
def set_asignar_contrato_A(self):
rankA = json.loads(self.rankA)
rankB = json.loads(self.rankB)
p2 = self.get_player_by_id(int(rankA.keys()[1].split('j')[1]))
p3 = self.get_player_by_id(int(rankB.keys()[0].split('j')[1]))
self.ganador_contrato_A = random.choices([rankA.keys()[1].split('j')[1], rankB.keys()[0].split('j')[1]],
weights=(p2.probabilidad_contrato_A, p3.probabilidad_contrato_A))
def sort(self, rank):
l = list(rank.items())
random.shuffle(l)
rank = dict(l)
rank = dict(sorted(rank.items(), key=lambda x: x[1], reverse=True))
return rank
def set_ranking(self):
jugadores = self.get_players() # [<P1>,<P2>,]
rank = {}
for k,j in enumerate(jugadores):
rank['j' + str(k+1)] = j.palabras
self.rank = json.dumps(self.sort(rank))
# '{'j1':7, 'j2':5 }'
def set_ranking_contrato(self):
rankA = {}
rankB = {}
for k,j in enumerate(self.get_players()):
if j.contrato_A:
rankA['j' + str(k+1)] = j.palabras
else:
rankB['j' + str(k+1)] = j.palabras
self.rankA = json.dumps(self.sort(rankA))
self.rankB = json.dumps(self.sort(rankB))
class Player(BasePlayer):
contrato_A = models.BooleanField()
palabras = models.IntegerField(initial=0)
probabilidad_contrato_A = models.FloatField()
contrato_A_torneo = models.BooleanField()
posicion_grupo = models.IntegerField() #De 1-4
posicion_contrato = models.IntegerField() #De 1-2
posicion_contrato_torneo = models.IntegerField() #De 1-2
pago_ronda = models.CurrencyField()
pago = models.CurrencyField()
mistakes = models.IntegerField(initial=0)
#Esta función define el pago final
def set_pago(self):
if (self.round_number==Constants.num_rounds):
# jugadores = self.get_players()
ronda = self.subsession.ronda_pagar
pagos_rondas = []
for j in self.in_all_rounds():
pagos_rondas.append(j.pago_ronda)
self.pago= pagos_rondas[ronda - 1]
else:
self.pago= 0
# j.pago = j.pago_ronda.in_all_rounds()[ronda - 1]
def set_probabilidad_contrato_A(self):
if (self.contrato_A == True and self.posicion_contrato == 1):
self.probabilidad_contrato_A = 1
elif (self.contrato_A == False and self.posicion_contrato == 2):
self.probabilidad_contrato_A = 0
else:
if self.subsession.observabilidad == True:
self.probabilidad_contrato_A = self.palabras / self.group.get_palabras_torneo()
else:
self.probabilidad_contrato_A = 0.5
def set_posicion_grupo(self):
rank = json.loads(self.group.rank)
self.posicion_grupo = list(rank.keys()).index('j' + str(self.id_in_group)) + 1
def set_posicion_contrato(self):
rankA = json.loads(self.group.rankA)
rankB = json.loads(self.group.rankB)
if self.contrato_A:
self.posicion_contrato = list(rankA).index('j' + str(self.id_in_group)) + 1
else:
self.posicion_contrato = list(rankB).index('j' + str(self.id_in_group)) + 1
def set_contrato_A_torneo(self):
ganador = self.group.set_ganador_contrato_A()
if (self.contrato_A == True and self.posicion_contrato == 1) or (self.contrato_A == False and self.posicion_contrato == 2):
self.contrato_A_torneo = self.contrato_A
if self.posicion_contrato == 1:
self.posicion_contrato_torneo = 1
else:
self.posicion_contrato_torneo = 2
else:
if self.id_in_group == int(ganador):
self.contrato_A_torneo = True
self.posicion_contrato_torneo = 2
else:
self.contrato_A_torneo = False
self.posicion_contrato_torneo = 1
def set_pago_ronda(self):
if (self.contrato_A):
self.pago_ronda = Constants.pago_A * self.palabras
else:
self.pago_ronda = Constants.pago_B * self.palabras
|
"""
Tests of the neo.core.segment.Segment class
"""
from copy import deepcopy
from datetime import datetime
import unittest
import numpy as np
import quantities as pq
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.segment import Segment
from neo.core import (AnalogSignal, Block, Event, IrregularlySampledSignal,
Epoch, ChannelIndex, SpikeTrain, Unit)
from neo.core.container import filterdata
from neo.test.tools import (assert_neo_object_is_compliant,
assert_same_sub_schema, assert_same_attributes)
from neo.test.generate_datasets import (fake_neo, get_fake_value,
get_fake_values, get_annotations,
clone_object, TEST_ANNOTATIONS)
from neo.rawio.examplerawio import ExampleRawIO
from neo.io.proxyobjects import (AnalogSignalProxy, SpikeTrainProxy,
EventProxy, EpochProxy)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = {str(x): TEST_ANNOTATIONS[x] for x in
range(len(TEST_ANNOTATIONS))}
def test__get_fake_values(self):
self.annotations['seed'] = 0
file_datetime = get_fake_value('file_datetime', datetime, seed=0)
rec_datetime = get_fake_value('rec_datetime', datetime, seed=1)
index = get_fake_value('index', int, seed=2)
name = get_fake_value('name', str, seed=3, obj=Segment)
description = get_fake_value('description', str, seed=4, obj='Segment')
file_origin = get_fake_value('file_origin', str)
attrs1 = {'file_datetime': file_datetime,
'rec_datetime': rec_datetime,
'index': index,
'name': name,
'description': description,
'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
res11 = get_fake_values(Segment, annotate=False, seed=0)
res12 = get_fake_values('Segment', annotate=False, seed=0)
res21 = get_fake_values(Segment, annotate=True, seed=0)
res22 = get_fake_values('Segment', annotate=True, seed=0)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
self.assertEqual(res21, attrs2)
self.assertEqual(res22, attrs2)
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = Segment
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, Segment))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.analogsignals), 1)
self.assertEqual(len(res.irregularlysampledsignals), 1)
self.assertEqual(len(res.spiketrains), 1)
self.assertEqual(len(res.events), 1)
self.assertEqual(len(res.epochs), 1)
for child in res.children:
del child.annotations['i']
del child.annotations['j']
self.assertEqual(res.analogsignals[0].annotations,
self.annotations)
self.assertEqual(res.irregularlysampledsignals[0].annotations,
self.annotations)
self.assertEqual(res.spiketrains[0].annotations,
self.annotations)
self.assertEqual(res.events[0].annotations,
self.annotations)
self.assertEqual(res.epochs[0].annotations,
self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = 'Segment'
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, Segment))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.analogsignals), 0)
self.assertEqual(len(res.irregularlysampledsignals), 0)
self.assertEqual(len(res.spiketrains), 0)
self.assertEqual(len(res.events), 0)
self.assertEqual(len(res.epochs), 0)
class TestSegment(unittest.TestCase):
def setUp(self):
self.nchildren = 2
blk = fake_neo(Block, seed=0, n=self.nchildren)
self.unit1, self.unit2, self.unit3, self.unit4 = blk.list_units
self.seg1, self.seg2 = blk.segments
self.targobj = self.seg1
self.seed1 = self.seg1.annotations['seed']
self.seed2 = self.seg2.annotations['seed']
del self.seg1.annotations['i']
del self.seg2.annotations['i']
del self.seg1.annotations['j']
del self.seg2.annotations['j']
self.sigarrs1 = self.seg1.analogsignals
self.sigarrs2 = self.seg2.analogsignals
self.irsigs1 = self.seg1.irregularlysampledsignals
self.irsigs2 = self.seg2.irregularlysampledsignals
self.trains1 = self.seg1.spiketrains
self.trains2 = self.seg2.spiketrains
self.epcs1 = self.seg1.epochs
self.epcs2 = self.seg2.epochs
self.evts1 = self.seg1.events
self.evts2 = self.seg2.events
self.img_seqs1 = self.seg1.imagesequences
self.img_seqs2 = self.seg2.imagesequences
self.sigarrs1a = clone_object(self.sigarrs1, n=2)
self.irsigs1a = clone_object(self.irsigs1)
self.trains1a = clone_object(self.trains1)
self.epcs1a = clone_object(self.epcs1)
self.evts1a = clone_object(self.evts1)
self.img_seqs1a = clone_object(self.img_seqs1)
def test_init(self):
seg = Segment(name='a segment', index=3)
assert_neo_object_is_compliant(seg)
self.assertEqual(seg.name, 'a segment')
self.assertEqual(seg.file_origin, None)
self.assertEqual(seg.index, 3)
def check_creation(self, seg):
assert_neo_object_is_compliant(seg)
seed = seg.annotations['seed']
targ0 = get_fake_value('file_datetime', datetime, seed=seed + 0)
self.assertEqual(seg.file_datetime, targ0)
targ1 = get_fake_value('rec_datetime', datetime, seed=seed + 1)
self.assertEqual(seg.rec_datetime, targ1)
targ2 = get_fake_value('index', int, seed=seed + 2)
self.assertEqual(seg.index, targ2)
targ3 = get_fake_value('name', str, seed=seed + 3, obj=Segment)
self.assertEqual(seg.name, targ3)
targ4 = get_fake_value('description', str,
seed=seed + 4, obj=Segment)
self.assertEqual(seg.description, targ4)
targ5 = get_fake_value('file_origin', str)
self.assertEqual(seg.file_origin, targ5)
targ6 = get_annotations()
targ6['seed'] = seed
self.assertEqual(seg.annotations, targ6)
self.assertTrue(hasattr(seg, 'analogsignals'))
self.assertTrue(hasattr(seg, 'irregularlysampledsignals'))
self.assertTrue(hasattr(seg, 'epochs'))
self.assertTrue(hasattr(seg, 'events'))
self.assertTrue(hasattr(seg, 'spiketrains'))
self.assertEqual(len(seg.analogsignals), self.nchildren)
self.assertEqual(len(seg.irregularlysampledsignals), self.nchildren)
self.assertEqual(len(seg.epochs), self.nchildren)
self.assertEqual(len(seg.events), self.nchildren)
self.assertEqual(len(seg.spiketrains), self.nchildren ** 2)
def test__creation(self):
self.check_creation(self.seg1)
self.check_creation(self.seg2)
def test_times(self):
for seg in [self.seg1, self.seg2]:
# calculate target values for t_start and t_stop
t_starts, t_stops = [], []
for children in [seg.analogsignals,
seg.epochs,
seg.events,
seg.irregularlysampledsignals,
seg.spiketrains]:
for child in children:
if hasattr(child, 't_start'):
t_starts.append(child.t_start)
if hasattr(child, 't_stop'):
t_stops.append(child.t_stop)
if hasattr(child, 'time'):
t_starts.append(child.time)
t_stops.append(child.time)
if hasattr(child, 'times'):
t_starts.append(child.times[0])
t_stops.append(child.times[-1])
targ_t_start = min(t_starts)
targ_t_stop = max(t_stops)
self.assertEqual(seg.t_start, targ_t_start)
self.assertEqual(seg.t_stop, targ_t_stop)
def test__merge(self):
seg1a = fake_neo(Block, seed=self.seed1, n=self.nchildren).segments[0]
assert_same_sub_schema(self.seg1, seg1a)
seg1a.epochs.append(self.epcs2[0])
seg1a.merge(self.seg2)
assert_same_sub_schema(self.sigarrs1a + self.sigarrs2,
seg1a.analogsignals)
assert_same_sub_schema(self.irsigs1a + self.irsigs2,
seg1a.irregularlysampledsignals)
assert_same_sub_schema(self.epcs1 + self.epcs2, seg1a.epochs)
assert_same_sub_schema(self.evts1 + self.evts2, seg1a.events)
assert_same_sub_schema(self.trains1 + self.trains2, seg1a.spiketrains)
def test__children(self):
blk = Block(name='block1')
blk.segments = [self.seg1]
blk.create_many_to_one_relationship(force=True)
assert_neo_object_is_compliant(self.seg1)
assert_neo_object_is_compliant(blk)
childobjs = ('AnalogSignal',
'Epoch', 'Event',
'IrregularlySampledSignal',
'SpikeTrain',
'ImageSequence')
childconts = ('analogsignals',
'epochs', 'events',
'irregularlysampledsignals',
'spiketrains',
'imagesequences')
self.assertEqual(self.seg1._container_child_objects, ())
self.assertEqual(self.seg1._data_child_objects, childobjs)
self.assertEqual(self.seg1._single_parent_objects, ('Block',))
self.assertEqual(self.seg1._multi_child_objects, ())
self.assertEqual(self.seg1._multi_parent_objects, ())
self.assertEqual(self.seg1._child_properties, ())
self.assertEqual(self.seg1._single_child_objects, childobjs)
self.assertEqual(self.seg1._container_child_containers, ())
self.assertEqual(self.seg1._data_child_containers, childconts)
self.assertEqual(self.seg1._single_child_containers, childconts)
self.assertEqual(self.seg1._single_parent_containers, ('block',))
self.assertEqual(self.seg1._multi_child_containers, ())
self.assertEqual(self.seg1._multi_parent_containers, ())
self.assertEqual(self.seg1._child_objects, childobjs)
self.assertEqual(self.seg1._child_containers, childconts)
self.assertEqual(self.seg1._parent_objects, ('Block',))
self.assertEqual(self.seg1._parent_containers, ('block',))
totchildren = (self.nchildren * 2 + # epoch/event
self.nchildren + # analogsignal
self.nchildren ** 2 + # spiketrain
self.nchildren + # irregsignal
self.nchildren) # imagesequence
self.assertEqual(len(self.seg1._single_children), totchildren)
self.assertEqual(len(self.seg1.data_children), totchildren)
self.assertEqual(len(self.seg1.children), totchildren)
self.assertEqual(len(self.seg1.data_children_recur), totchildren)
self.assertEqual(len(self.seg1.children_recur), totchildren)
self.assertEqual(len(self.seg1._multi_children), 0)
self.assertEqual(len(self.seg1.container_children), 0)
self.assertEqual(len(self.seg1.container_children_recur), 0)
children = (self.sigarrs1a +
self.epcs1a + self.evts1a +
self.irsigs1a +
self.trains1a +
self.img_seqs1a)
assert_same_sub_schema(list(self.seg1._single_children), children)
assert_same_sub_schema(list(self.seg1.data_children), children)
assert_same_sub_schema(list(self.seg1.data_children_recur), children)
assert_same_sub_schema(list(self.seg1.children), children)
assert_same_sub_schema(list(self.seg1.children_recur), children)
self.assertEqual(len(self.seg1.parents), 1)
self.assertEqual(self.seg1.parents[0].name, 'block1')
def test__size(self):
targ1 = {"epochs": self.nchildren, "events": self.nchildren,
"irregularlysampledsignals": self.nchildren,
"spiketrains": self.nchildren ** 2,
"analogsignals": self.nchildren,
"imagesequences": self.nchildren}
self.assertEqual(self.targobj.size, targ1)
def test__filter_none(self):
targ = []
# collecting all data objects in target block
targ.extend(self.targobj.analogsignals)
targ.extend(self.targobj.epochs)
targ.extend(self.targobj.events)
targ.extend(self.targobj.irregularlysampledsignals)
targ.extend(self.targobj.spiketrains)
targ.extend(self.targobj.imagesequences)
res0 = self.targobj.filter()
res1 = self.targobj.filter({})
res2 = self.targobj.filter([])
res3 = self.targobj.filter([{}])
res4 = self.targobj.filter([{}, {}])
res5 = self.targobj.filter([{}, {}])
res6 = self.targobj.filter(targdict={})
res7 = self.targobj.filter(targdict=[])
res8 = self.targobj.filter(targdict=[{}])
res9 = self.targobj.filter(targdict=[{}, {}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
def test__filter_annotation_single(self):
targ = (self.sigarrs1a +
[self.epcs1a[0]] +
[self.evts1a[0]] +
self.irsigs1a +
self.trains1a +
[self.img_seqs1a[0]])
res0 = self.targobj.filter(j=0)
res1 = self.targobj.filter({'j': 0})
res2 = self.targobj.filter(targdict={'j': 0})
res3 = self.targobj.filter([{'j': 0}])
res4 = self.targobj.filter(targdict=[{'j': 0}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_single_annotation_nores(self):
targ = []
res0 = self.targobj.filter(j=5)
res1 = self.targobj.filter({'j': 5})
res2 = self.targobj.filter(targdict={'j': 5})
res3 = self.targobj.filter([{'j': 5}])
res4 = self.targobj.filter(targdict=[{'j': 5}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_attribute_single(self):
targ = [self.epcs1a[1]]
res0 = self.targobj.filter(name=self.epcs1a[1].name)
res1 = self.targobj.filter({'name': self.epcs1a[1].name})
res2 = self.targobj.filter(targdict={'name': self.epcs1a[1].name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_attribute_single_nores(self):
targ = []
res0 = self.targobj.filter(name=self.epcs2[0].name)
res1 = self.targobj.filter({'name': self.epcs2[0].name})
res2 = self.targobj.filter(targdict={'name': self.epcs2[0].name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi(self):
targ = (self.sigarrs1a +
[self.epcs1a[0]] +
[self.evts1a[0]] +
self.irsigs1a +
self.trains1a +
[self.img_seqs1a[0]] +
[self.epcs1a[1]])
res0 = self.targobj.filter(name=self.epcs1a[1].name, j=0)
res1 = self.targobj.filter({'name': self.epcs1a[1].name, 'j': 0})
res2 = self.targobj.filter(targdict={'name': self.epcs1a[1].name,
'j': 0})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_nores(self):
targ = []
res0 = self.targobj.filter([{'j': 5}, {}])
res1 = self.targobj.filter({}, ttype=6)
res2 = self.targobj.filter([{}], ttype=6)
res3 = self.targobj.filter({'name': self.epcs1a[1].name}, j=0)
res4 = self.targobj.filter(targdict={'name': self.epcs1a[1].name},
j=0)
res5 = self.targobj.filter(name=self.epcs1a[1].name,
targdict={'j': 0})
res6 = self.targobj.filter(name=self.epcs2[0].name, j=5)
res7 = self.targobj.filter({'name': self.epcs2[1].name, 'j': 5})
res8 = self.targobj.filter(targdict={'name': self.epcs2[1].name,
'j': 5})
res9 = self.targobj.filter({'name': self.epcs2[1].name}, j=5)
res10 = self.targobj.filter(targdict={'name': self.epcs2[1].name},
j=5)
res11 = self.targobj.filter(name=self.epcs2[1].name,
targdict={'j': 5})
res12 = self.targobj.filter({'name': self.epcs1a[1].name}, j=5)
res13 = self.targobj.filter(targdict={'name': self.epcs1a[1].name},
j=5)
res14 = self.targobj.filter(name=self.epcs1a[1].name,
targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filter_multi_partres(self):
targ = [self.epcs1a[1]]
res0 = self.targobj.filter(name=self.epcs1a[1].name, j=5)
res1 = self.targobj.filter({'name': self.epcs1a[1].name, 'j': 5})
res2 = self.targobj.filter(targdict={'name': self.epcs1a[1].name,
'j': 5})
res3 = self.targobj.filter([{'j': 1}, {'i': 1}])
res4 = self.targobj.filter({'j': 1}, i=1)
res5 = self.targobj.filter([{'j': 1}], i=1)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
def test__filter_no_annotation_but_object(self):
targ = self.targobj.spiketrains
res = self.targobj.filter(objects=SpikeTrain)
assert_same_sub_schema(res, targ)
targ = self.targobj.analogsignals
res = self.targobj.filter(objects=AnalogSignal)
assert_same_sub_schema(res, targ)
targ = self.targobj.analogsignals + self.targobj.spiketrains
res = self.targobj.filter(objects=[AnalogSignal, SpikeTrain])
assert_same_sub_schema(res, targ)
assert_same_sub_schema(res, targ)
def test__filter_single_annotation_obj_single(self):
targ = [self.epcs1a[1]]
res0 = self.targobj.filter(j=1, objects='Epoch')
res1 = self.targobj.filter(j=1, objects=Epoch)
res2 = self.targobj.filter(j=1, objects=['Epoch'])
res3 = self.targobj.filter(j=1, objects=[Epoch])
res4 = self.targobj.filter(j=1, objects=[Epoch,
ChannelIndex])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_single_annotation_obj_multi(self):
targ = [self.epcs1a[1], self.evts1a[1]]
res0 = self.targobj.filter(j=1, objects=['Event', Epoch])
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_obj_none(self):
targ = []
res0 = self.targobj.filter(j=1, objects=ChannelIndex)
res1 = self.targobj.filter(j=1, objects='ChannelIndex')
res2 = self.targobj.filter(j=1, objects=[])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_single_annotation_norecur(self):
targ = [self.epcs1a[1], self.evts1a[1], self.img_seqs1a[1]]
res0 = self.targobj.filter(j=1,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_norecur(self):
targ = [self.epcs1a[1]]
res0 = self.targobj.filter(name=self.epcs1a[1].name,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata(self):
targ = []
res0 = self.targobj.filter(j=0,
data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata(self):
targ = []
res0 = self.targobj.filter(name=self.epcs1a[1].name,
data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(j=0,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.epcs1a[1].name,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container(self):
targ = [self.epcs1a[1], self.evts1a[1], self.img_seqs1a[1]]
res0 = self.targobj.filter(j=1,
container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container(self):
targ = [self.epcs1a[1]]
res0 = self.targobj.filter(name=self.epcs1a[1].name,
container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container_norecur(self):
targ = [self.epcs1a[1], self.evts1a[1], self.img_seqs1a[1]]
res0 = self.targobj.filter(j=1,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur(self):
targ = [self.epcs1a[1]]
res0 = self.targobj.filter(name=self.epcs1a[1].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container(self):
targ = []
res0 = self.targobj.filter(j=0,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container(self):
targ = []
res0 = self.targobj.filter(name=self.epcs1a[1].name,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container_norecur(self):
targ = []
res0 = self.targobj.filter(j=0,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.epcs1a[1].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filterdata_multi(self):
data = self.targobj.children_recur
targ = (self.sigarrs1a +
[self.epcs1a[0]] +
[self.evts1a[0]] +
self.irsigs1a +
self.trains1a +
[self.img_seqs1a[0]] +
[self.epcs1a[1]])
res0 = filterdata(data, name=self.epcs1a[1].name, j=0)
res1 = filterdata(data, {'name': self.epcs1a[1].name, 'j': 0})
res2 = filterdata(data, targdict={'name': self.epcs1a[1].name, 'j': 0})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_nores(self):
data = self.targobj.children_recur
targ = []
res0 = filterdata(data, [{'j': 5}, {}])
res1 = filterdata(data, {}, ttype=0)
res2 = filterdata(data, [{}], ttype=0)
res3 = filterdata(data, {'name': self.epcs1a[1].name}, j=0)
res4 = filterdata(data, targdict={'name': self.epcs1a[1].name}, j=0)
res5 = filterdata(data, name=self.epcs1a[1].name, targdict={'j': 0})
res6 = filterdata(data, name=self.epcs2[0].name, j=5)
res7 = filterdata(data, {'name': self.epcs2[1].name, 'j': 5})
res8 = filterdata(data, targdict={'name': self.epcs2[1].name, 'j': 5})
res9 = filterdata(data, {'name': self.epcs2[1].name}, j=5)
res10 = filterdata(data, targdict={'name': self.epcs2[1].name}, j=5)
res11 = filterdata(data, name=self.epcs2[1].name, targdict={'j': 5})
res12 = filterdata(data, {'name': self.epcs1a[1].name}, j=5)
res13 = filterdata(data, targdict={'name': self.epcs1a[1].name}, j=5)
res14 = filterdata(data, name=self.epcs1a[1].name, targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filterdata_multi_partres(self):
data = self.targobj.children_recur
targ = [self.epcs1a[1]]
res0 = filterdata(data, name=self.epcs1a[1].name, j=5)
res1 = filterdata(data, {'name': self.epcs1a[1].name, 'j': 5})
res2 = filterdata(data, targdict={'name': self.epcs1a[1].name, 'j': 5})
res3 = filterdata(data, [{'j': 1}, {'i': 1}])
res4 = filterdata(data, {'j': 1}, i=1)
res5 = filterdata(data, [{'j': 1}], i=1)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
# @unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
# def test__pretty(self):
# ann = get_annotations()
# ann['seed'] = self.seed1
# ann = pretty(ann).replace('\n ', '\n ')
# res = pretty(self.seg1)
#
# sigarr0 = pretty(self.sigarrs1[0])
# sigarr1 = pretty(self.sigarrs1[1])
# sigarr0 = sigarr0.replace('\n', '\n ')
# sigarr1 = sigarr1.replace('\n', '\n ')
#
# targ = ("Segment with " +
# ("%s analogsignals, " %
# (len(self.sigarrs1a),)) +
# ("%s epochs, " % len(self.epcs1a)) +
# ("%s events, " % len(self.evts1a)) +
# ("%s irregularlysampledsignals, " %
# len(self.irsigs1a)) +
# ("%s spiketrains\n" % len(self.trains1a)) +
# ("name: '%s'\ndescription: '%s'\n" %
# (self.seg1.name, self.seg1.description)
# ) +
#
# ("annotations: %s\n" % ann) +
#
# ("# analogsignals (N=%s)\n" % len(self.sigarrs1a)) +
#
# ('%s: %s\n' % (0, sigarr0)) +
# ('%s: %s' % (1, sigarr1)))
#
# self.assertEqual(res, targ)
def test__construct_subsegment_by_unit(self):
nb_seg = 3
nb_unit = 7
unit_with_sig = np.array([0, 2, 5])
signal_types = ['Vm', 'Conductances']
sig_len = 100
# channelindexes
chxs = [ChannelIndex(name='Vm',
index=unit_with_sig),
ChannelIndex(name='Conductance',
index=unit_with_sig)]
# Unit
all_unit = []
for u in range(nb_unit):
un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u]))
assert_neo_object_is_compliant(un)
all_unit.append(un)
blk = Block()
blk.channel_indexes = chxs
for s in range(nb_seg):
seg = Segment(name='Simulation %s' % s)
for j in range(nb_unit):
st = SpikeTrain([1, 2], units='ms',
t_start=0., t_stop=10)
st.unit = all_unit[j]
for t in signal_types:
anasigarr = AnalogSignal(np.zeros((sig_len,
len(unit_with_sig))),
units='nA',
sampling_rate=1000. * pq.Hz,
channel_indexes=unit_with_sig)
seg.analogsignals.append(anasigarr)
blk.create_many_to_one_relationship()
for unit in all_unit:
assert_neo_object_is_compliant(unit)
for chx in chxs:
assert_neo_object_is_compliant(chx)
assert_neo_object_is_compliant(blk)
# what you want
newseg = seg.construct_subsegment_by_unit(all_unit[:4])
assert_neo_object_is_compliant(newseg)
def test_segment_take_spiketrains_by_unit(self):
result1 = self.seg1.take_spiketrains_by_unit()
result21 = self.seg1.take_spiketrains_by_unit([self.unit1])
result22 = self.seg1.take_spiketrains_by_unit([self.unit2])
self.assertEqual(result1, [])
assert_same_sub_schema(result21, [self.trains1a[0]])
assert_same_sub_schema(result22, [self.trains1a[1]])
def test__time_slice(self):
time_slice = [.5, 5.6] * pq.s
epoch2 = Epoch([0.6, 9.5, 16.8, 34.1] * pq.s, durations=[4.5, 4.8, 5.0, 5.0] * pq.s,
t_start=.1 * pq.s)
epoch2.annotate(epoch_type='b')
epoch2.array_annotate(trial_id=[1, 2, 3, 4])
event = Event(times=[0.5, 10.0, 25.2] * pq.s, t_start=.1 * pq.s)
event.annotate(event_type='trial start')
event.array_annotate(trial_id=[1, 2, 3])
anasig = AnalogSignal(np.arange(50.0) * pq.mV, t_start=.1 * pq.s,
sampling_rate=1.0 * pq.Hz)
irrsig = IrregularlySampledSignal(signal=np.arange(50.0) * pq.mV,
times=anasig.times, t_start=.1 * pq.s)
st = SpikeTrain(np.arange(0.5, 50, 7) * pq.s, t_start=.1 * pq.s, t_stop=50.0 * pq.s,
waveforms=np.array([[[0., 1.], [0.1, 1.1]], [[2., 3.], [2.1, 3.1]],
[[4., 5.], [4.1, 5.1]], [[6., 7.], [6.1, 7.1]],
[[8., 9.], [8.1, 9.1]], [[12., 13.], [12.1, 13.1]],
[[14., 15.], [14.1, 15.1]],
[[16., 17.], [16.1, 17.1]]]) * pq.mV,
array_annotations={'spikenum': np.arange(1, 9)})
seg = Segment()
seg.epochs = [epoch2]
seg.events = [event]
seg.analogsignals = [anasig]
seg.irregularlysampledsignals = [irrsig]
seg.spiketrains = [st]
block = Block()
block.segments = [seg]
block.create_many_to_one_relationship()
# test without resetting the time
sliced = seg.time_slice(time_slice[0], time_slice[1])
assert_neo_object_is_compliant(sliced)
self.assertEqual(len(sliced.events), 1)
self.assertEqual(len(sliced.spiketrains), 1)
self.assertEqual(len(sliced.analogsignals), 1)
self.assertEqual(len(sliced.irregularlysampledsignals), 1)
self.assertEqual(len(sliced.epochs), 1)
assert_same_attributes(sliced.spiketrains[0],
st.time_slice(t_start=time_slice[0],
t_stop=time_slice[1]))
assert_same_attributes(sliced.analogsignals[0],
anasig.time_slice(t_start=time_slice[0],
t_stop=time_slice[1]))
assert_same_attributes(sliced.irregularlysampledsignals[0],
irrsig.time_slice(t_start=time_slice[0],
t_stop=time_slice[1]))
assert_same_attributes(sliced.events[0],
event.time_slice(t_start=time_slice[0],
t_stop=time_slice[1]))
assert_same_attributes(sliced.epochs[0],
epoch2.time_slice(t_start=time_slice[0],
t_stop=time_slice[1]))
seg = Segment()
seg.epochs = [epoch2]
seg.events = [event]
seg.analogsignals = [anasig]
seg.irregularlysampledsignals = [irrsig]
seg.spiketrains = [st]
block = Block()
block.segments = [seg]
block.create_many_to_one_relationship()
# test with resetting the time
sliced = seg.time_slice(time_slice[0], time_slice[1], reset_time=True)
assert_neo_object_is_compliant(sliced)
self.assertEqual(len(sliced.events), 1)
self.assertEqual(len(sliced.spiketrains), 1)
self.assertEqual(len(sliced.analogsignals), 1)
self.assertEqual(len(sliced.irregularlysampledsignals), 1)
self.assertEqual(len(sliced.epochs), 1)
assert_same_attributes(sliced.spiketrains[0],
st.time_shift(- time_slice[0]).time_slice(
t_start=0 * pq.s, t_stop=time_slice[1] - time_slice[0]))
anasig_target = anasig.copy()
anasig_target = anasig_target.time_shift(- time_slice[0]).time_slice(t_start=0 * pq.s,
t_stop=time_slice[1] - time_slice[0])
assert_same_attributes(sliced.analogsignals[0], anasig_target)
irrsig_target = irrsig.copy()
irrsig_target = irrsig_target.time_shift(- time_slice[0]).time_slice(t_start=0 * pq.s,
t_stop=time_slice[1] - time_slice[0])
assert_same_attributes(sliced.irregularlysampledsignals[0], irrsig_target)
assert_same_attributes(sliced.events[0],
event.time_shift(- time_slice[0]).time_slice(
t_start=0 * pq.s, t_stop=time_slice[1] - time_slice[0]))
assert_same_attributes(sliced.epochs[0],
epoch2.time_shift(- time_slice[0]).time_slice(t_start=0 * pq.s,
t_stop=time_slice[1] - time_slice[0]))
seg = Segment()
reader = ExampleRawIO(filename='my_filename.fake')
reader.parse_header()
proxy_anasig = AnalogSignalProxy(rawio=reader,
global_channel_indexes=None,
block_index=0, seg_index=0)
seg.analogsignals.append(proxy_anasig)
proxy_st = SpikeTrainProxy(rawio=reader, unit_index=0,
block_index=0, seg_index=0)
seg.spiketrains.append(proxy_st)
proxy_event = EventProxy(rawio=reader, event_channel_index=0,
block_index=0, seg_index=0)
seg.events.append(proxy_event)
proxy_epoch = EpochProxy(rawio=reader, event_channel_index=1,
block_index=0, seg_index=0)
proxy_epoch.annotate(pick='me')
seg.epochs.append(proxy_epoch)
loaded_epoch = proxy_epoch.load()
loaded_event = proxy_event.load()
loaded_st = proxy_st.load()
loaded_anasig = proxy_anasig.load()
block = Block()
block.segments = [seg]
block.create_many_to_one_relationship()
# test with proxy objects
sliced = seg.time_slice(time_slice[0], time_slice[1])
assert_neo_object_is_compliant(sliced)
sliced_event = loaded_event.time_slice(t_start=time_slice[0],
t_stop=time_slice[1])
has_event = len(sliced_event) > 0
sliced_anasig = loaded_anasig.time_slice(t_start=time_slice[0],
t_stop=time_slice[1])
sliced_st = loaded_st.time_slice(t_start=time_slice[0],
t_stop=time_slice[1])
self.assertEqual(len(sliced.events), int(has_event))
self.assertEqual(len(sliced.spiketrains), 1)
self.assertEqual(len(sliced.analogsignals), 1)
self.assertTrue(isinstance(sliced.spiketrains[0],
SpikeTrain))
assert_same_attributes(sliced.spiketrains[0],
sliced_st)
self.assertTrue(isinstance(sliced.analogsignals[0],
AnalogSignal))
assert_same_attributes(sliced.analogsignals[0],
sliced_anasig)
if has_event:
self.assertTrue(isinstance(sliced.events[0],
Event))
assert_same_attributes(sliced.events[0],
sliced_event)
# to remove
# def test_segment_take_analogsignal_by_unit(self):
# result1 = self.seg1.take_analogsignal_by_unit()
# result21 = self.seg1.take_analogsignal_by_unit([self.unit1])
# result22 = self.seg1.take_analogsignal_by_unit([self.unit2])
#
# self.assertEqual(result1, [])
#
# assert_same_sub_schema(result21, [self.sigs1a[0]])
# assert_same_sub_schema(result22, [self.sigs1a[1]])
#
# def test_segment_take_analogsignal_by_channelindex(self):
# ind1 = self.unit1.channel_indexes[0]
# ind2 = self.unit2.channel_indexes[0]
# result1 = self.seg1.take_analogsignal_by_channelindex()
# result21 = self.seg1.take_analogsignal_by_channelindex([ind1])
# result22 = self.seg1.take_analogsignal_by_channelindex([ind2])
#
# self.assertEqual(result1, [])
#
# assert_same_sub_schema(result21, [self.sigs1a[0]])
# assert_same_sub_schema(result22, [self.sigs1a[1]])
# commenting out temporarily
# def test_seg_take_slice_of_analogsignalarray_by_unit(self):
# seg = self.seg1
# result1 = seg.take_slice_of_analogsignalarray_by_unit()
# result21 = seg.take_slice_of_analogsignalarray_by_unit([self.unit1])
# result23 = seg.take_slice_of_analogsignalarray_by_unit([self.unit3])
#
# self.assertEqual(result1, [])
#
# targ1 = [self.sigarrs1a[0][:, np.array([True])],
# self.sigarrs1a[1][:, np.array([False])]]
# targ3 = [self.sigarrs1a[0][:, np.array([False])],
# self.sigarrs1a[1][:, np.array([True])]]
# assert_same_sub_schema(result21, targ1)
# assert_same_sub_schema(result23, targ3)
#
# def test_seg_take_slice_of_analogsignalarray_by_channelindex(self):
# seg = self.seg1
# ind1 = self.unit1.channel_indexes[0]
# ind3 = self.unit3.channel_indexes[0]
# result1 = seg.take_slice_of_analogsignalarray_by_channelindex()
# result21 = seg.take_slice_of_analogsignalarray_by_channelindex([ind1])
# result23 = seg.take_slice_of_analogsignalarray_by_channelindex([ind3])
#
# self.assertEqual(result1, [])
#
# targ1 = [self.sigarrs1a[0][:, np.array([True])],
# self.sigarrs1a[1][:, np.array([False])]]
# targ3 = [self.sigarrs1a[0][:, np.array([False])],
# self.sigarrs1a[1][:, np.array([True])]]
# assert_same_sub_schema(result21, targ1)
# assert_same_sub_schema(result23, targ3)
def test__deepcopy(self):
childconts = ('analogsignals',
'epochs', 'events',
'irregularlysampledsignals',
'spiketrains')
seg1_copy = deepcopy(self.seg1)
# Same structure top-down, i.e. links from parents to children are correct
assert_same_sub_schema(seg1_copy, self.seg1)
# Correct structure bottom-up, i.e. links from children to parents are correct
# No need to cascade, all children are leaves, i.e. don't have any children
for childtype in childconts:
for child in getattr(seg1_copy, childtype, []):
self.assertEqual(id(child.segment), id(seg1_copy))
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
from architect.manager.client import BaseClient
import homeassistant.remote as remote
from homeassistant.exceptions import HomeAssistantError
from celery.utils.log import get_logger
logger = get_logger(__name__)
DEFAULT_RESOURCES = [
'ha_entity',
]
class HomeAssistantClient(BaseClient):
def __init__(self, **kwargs):
super(HomeAssistantClient, self).__init__(**kwargs)
def auth(self):
status = True
try:
self.api = remote.API(self.metadata['host'],
self.metadata['password'],
self.metadata.get('port', 8123),
self.metadata.get('use_ssl', False))
except HomeAssistantError as exception:
logger.error(exception)
status = False
return status
def update_resources(self, resources=None):
if self.auth():
if resources is None:
resources = DEFAULT_RESOURCES
for resource in resources:
metadata = self.get_resource_metadata(resource)
self.process_resource_metadata(resource, metadata)
count = len(self.resources.get(resource, {}))
logger.info("Processed {} {} resources".format(count,
resource))
self.process_relation_metadata()
def get_resource_status(self, kind, metadata):
return 'active'
def get_resource_metadata(self, kind):
logger.info("Getting {} resources".format(kind))
response = []
if kind == 'ha_entity':
response = remote.get_states(self.api)
return response
def process_resource_metadata(self, kind, metadata):
if kind == 'ha_entity':
self._create_resource('group.ungrouped_resources',
'Ungrouped resources',
'ha_view',
metadata={'attributes': {'entity_id':[]}})
for resource in metadata:
metadata = resource.as_dict()
domain, entity = metadata['entity_id'].split('.')
if 'ha_{}'.format(domain) in self.resource_type_list():
if metadata['attributes'].get('view', False):
domain = 'view'
if 'last_changed' in metadata:
metadata['last_changed'] = metadata['last_changed'].isoformat()
if 'last_updated' in metadata:
metadata['last_updated'] = metadata['last_updated'].isoformat()
self._create_resource(metadata['entity_id'],
metadata['attributes'].get('friendly_name', metadata['entity_id'].split('.')[1]),
'ha_{}'.format(domain),
metadata=metadata)
else:
logger.error('{} not supported.'.format(domain))
pass
def process_relation_metadata(self):
grouped_resources = []
for resource_id, resource in self.resources.get('ha_view', {}).items():
for group in resource['metadata']['attributes']['entity_id']:
self._create_relation(
'in_view',
group,
resource_id)
grouped_resources.append(group)
for resource_id, resource in self.resources.get('ha_group', {}).items():
for group in resource['metadata']['attributes']['entity_id']:
self._create_relation(
'in_group',
group,
resource_id)
grouped_resources.append(group)
for kind, resources in self.resources.items():
if kind not in ['ha_view']:
for resource_id, resource in resources.items():
if resource_id not in grouped_resources:
self._create_relation(
'in_view',
'group.ungrouped_resources',
resource_id)
def get_resource_action_fields(self, resource, action):
fields = {}
if resource.kind == 'ha_script':
pass
elif resource.kind == 'ha_light':
pass
return fields
def process_resource_action(self, resource, action, data):
domain = resource.uid.split('.')[0]
if resource.kind == 'ha_script':
if action == 'execute_script':
if self.auth():
remote.call_service(self.api, domain, 'turn_on', {'entity_id': resource.uid})
if resource.kind == 'ha_light':
if action == 'turn_on_light':
if self.auth():
remote.call_service(self.api, domain, 'turn_on', {'entity_id': resource.uid})
elif action == 'turn_off_light':
if self.auth():
remote.call_service(self.api, domain, 'turn_off', {'entity_id': resource.uid})
|
import os
from app import db
def init_admin(first_name, last_name, email, password):
user = User(username=email, password=password)
profile = Profile(first_name=first_name, last_name=last_name, user=user, is_admin=True, email=email)
db.session.add(user)
db.session.add(profile)
db.session.commit()
def delete_and_create_media():
os.system('rm -rf app/models/media; mkdir app/models/media')
def delete_and_create_db():
os.system('rm -rf migrations; bash drop_db.sh root bratz123 traveler;')
db.create_all()
def reset():
delete_and_create_media()
delete_and_create_db()
# init_admin("Sam", "It", "samit@gmail.com", "123")
|
"""task URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import usermgmt
from usermgmt import views
#admin.autodiscover()
urlpatterns = [
path('admin/', admin.site.urls),
path(r'', views.home, name='home'),
path(r'home', views.home, name='home'),
path(r'index', views.index, name='index'),
path(r'addsuccess', views.addsuccess, name='addsuccess'),
path(r'usermod', views.usermod, name='usermod'),
path(r'modifyuser', views.modifyuser, name='modifyuser'),
path(r'userdel', views.userdel, name='userdel'),
path(r'deleteduser', views.deleteduser, name='deleteduser'),
path(r'usergrant', views.usergrant, name='usergrant'),
path(r'grantusersucc', views.grantusersucc, name='grantusersucc'),
path(r'register', views.register, name='register'),
path(r'login', views.user_login, name='login'),
path(r'logout', views.user_logout, name='logout'),
]
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# We are stacking blocks to form a pyramid. Each block has a color which is a one letter string, like `'Z'`.
# For every block of color `C` we place not in the bottom row,
# we are placing it on top of a left block of color `A` and right block of color `B`.
# We are allowed to place the block there only if `(A, B, C)` is an allowed triple.
# We start with a bottom row of bottom, represented as a single string.
# We also start with a list of allowed triples allowed. Each allowed triple is represented as a string of length 3.
# Return tr
#
#
#
# w a ue if we can build the pyramid all the way to the top, otherwise false.
# Example 1:
# Input: bottom = "XYZ", allowed = ["XYD", "YZE", "DEA", "FFF"]
# Output: true
# Explanation:
# We can stack the pyramid like this:
# A
# / \
# D E
# / \ / \
# X Y Z
#
# This works because ('X', 'Y', 'D'), ('Y', 'Z', 'E'), and ('D', 'E', 'A') are allowed triples.
# Example 1:
# Input: bottom = "XXYX", allowed = ["XXX", "XXY", "XYX", "XYY", "YXZ"]
# Output: false
# Explanation:
# We can't stack the pyramid to the top.
# Note that there could be allowed triples (A, B, C) and (A, B, D) with C != D.
# 32 / 32 test cases passed.
# Status: Accepted
# Runtime: 408 ms
# We model the states that blocks could be in. We can do this using binary: a number like 0b0001011 would correspond to
# the state of the block being either 'A', 'B' or 'D'.
class Solution(object):
def pyramidTransition(self, bottom, allowed):
"""
:type bottom: str
:type allowed: List[str]
:rtype: bool
"""
T = [[0] * 7 for _ in range(7)]
for tripe in allowed:
a, b, c = map(ord, tripe)
T[a - ord('A')][b - ord('A')] |= 1 << (c - ord('A'))
state = [1 << (ord(x) - ord('A')) for x in bottom]
for loops in bottom[:-1]:
for i in range(len(state) - 1):
k = 0
for b1 in range(7):
if (state[i] >> b1) & 1:
for b2 in range(7):
if (state[i + 1] >> b2) & 1:
k |= T[b1][b2]
state[i] = k
state.pop()
return bool(state[0])
if __name__ == '__main__':
# print(Solution().pyramidTransition('XXYX', ["XXX", "XXY", "XYX", "XYY", "YXZ"]))
# print(Solution().pyramidTransition('', []))
# print(Solution().pyramidTransition('XYZ', ["XYD", "YZE", "DEA", "FFF"]))
print(Solution().pyramidTransition("AABCCBABBB",
["AAA", "AAB", "BCD", "BCA", "BCB", "BAD", "BAB", "BAA", "CCD", "BDD", "CCA",
"CAA", "CAD", "DAD", "DAA", "DAC", "DCD", "DCB", "DCA", "CDD", "ABA", "ABB",
"BBC", "BBB", "BBA", "ADC", "CBB", "CBA", "CDB", "CDC", "DBC", "DBB"]))
print(Solution().pyramidTransition("ABDBACAAAC",
["ACC", "AAC", "AAB", "BCB", "BAD", "CAC", "CCD", "CAA", "CCB", "DAD", "ACD",
"DCB", "ABB", "BDA", "BDC", "BDB", "BBD", "BBC", "BBB", "ADB", "ADC", "DDC",
"DDB", "CDD", "CBC", "CBA", "CBD", "CDC", "DBC"]))
|
import jsonpath
from script.base_api.service_profile.students import students_queryById_get
from script.default_header import jyy_header
def assert_phone_search(student_ids: list, phone):
if student_ids:
results = []
for student_id in student_ids:
params = {"studentId": student_id}
student_res = students_queryById_get(params=params, header=jyy_header)
student_prim_phones = jsonpath.jsonpath(student_res, "$.data.studentPrimPhoneNo")
student_secondary_phones = jsonpath.jsonpath(student_res, "$.data.studentSecondaryPhoneNo")
if student_prim_phones and student_secondary_phones:
student_prim_phone = student_prim_phones[0]
student_secondary_phone = student_secondary_phones[0]
result = str(student_prim_phone) == str(phone) or str(student_secondary_phone) == str(phone)
elif student_prim_phones:
student_prim_phone = student_prim_phones[0]
result = str(student_prim_phone) == str(phone)
elif student_secondary_phones:
student_secondary_phone = student_secondary_phones[0]
result = str(student_secondary_phone) == str(phone)
else:
result = False
results.append(result)
all_result = all(results)
else:
all_result = False
return all_result
|
import torch.nn as nn
import numpy as np
class CNNDriver(nn.Module):
def __init__(self):
super(CNNDriver, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, kernel_size=5, padding=0, stride=2),
nn.BatchNorm2d(24),
nn.ReLU(),
nn.Conv2d(24, 36, kernel_size=5, padding=0, stride=2),
nn.BatchNorm2d(36),
nn.ReLU(),
nn.Conv2d(36, 48, kernel_size=5, padding=0, stride=2),
nn.BatchNorm2d(48),
nn.ReLU(),
nn.Conv2d(48, 64, kernel_size=3, padding=0, stride=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=0, stride=1),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.fc_layers = nn.Sequential(
nn.Linear(1152, 1164),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(1164, 100),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(100, 50),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(50, 10),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(10, 1),
nn.Tanh()
)
#self.fc_out = nn.Linear(10,1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, mean=1, std=0.02)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
size = m.weight.size()
fan_out = size[0] # number of rows
fan_in = size[1] # number of columns
variance = np.sqrt(2.0 / (fan_in + fan_out))
m.weight.data.normal_(0.0, variance)
def forward(self, x):
# The expected image size is 66x200
conv_layers = self.conv_layers(x)
# Reshape layer5 activation to a vector
conv_layers_reshape = conv_layers.view(conv_layers.size(0), -1)
fc_out = self.fc_layers(conv_layers_reshape)
return fc_out |
# https://www.c-sharpcorner.com/article/firebase-crud-operations-using-python/
# https://console.firebase.google.com/u/0/project/led-blink-wifi/database/led-blink-wifi-default-rtdb/data
# ==============================================
from vicksbase import firebase as vix
firebase_obj = vix.FirebaseApplication('https://led-blink-wifi-default-rtdb.firebaseio.com/', None)
def pull():
result = firebase_obj.get('led1', None)
print('\n Value fetched = ', result, end='\n\n')
def push(data):
firebase_obj.put('/','led1', data)
print('\nUpdated...\n')
# Running in CMD like...
#
# C:\Users\Vicky\Desktop\Repository\firebase\esp32_led>python
# Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 16:30:00) [MSC v.1900 64 bit (AMD64)] on win32
# Type "help", "copyright", "credits" or "license" for more information.
# >>>
# >>> import esp32 as v
# >>>
# >>> v.pull()
#
# Value fetched = 0
#
# >>> v.push(1)
#
# Updated...
#
# >>> v.pull()
#
# Value fetched = 1
#
# >>> v.push(0)
#
# Updated...
#
# >>> v.pull()
#
# Value fetched = 0
#
# >>>
# if __name__ == '__main__':
#
# val = int(input('\n Enter (1/0) : '))
# push(val)
#
# pull()
# input('\n Click Enter to Exit...')
# =================================================
# from firebase import firebase
# fb_app = firebase.FirebaseApplication('https://led-blink-wifi-default-rtdb.firebaseio.com/', None)
# result = fb_app.get('/led1', None)
# input(result)
# Output...
# C:\Users\Vicky\Desktop\Repository\firebase\Firebase-CRUD>python esp32.py
# Traceback (most recent call last):
# File "esp32.py", line 25, in <module>
# from firebase import firebase
# ImportError: cannot import name 'firebase' from 'firebase' (unknown location)
|
import socketserver
import socket
import xml.etree.ElementTree as ElementTree
import binascii
import configparser
import threading
import datetime
import logging
import time
import queue
#Set up the config parser
config = configparser.ConfigParser()
#Read the config file in.
config.read('pi-fighter-server.cfg')
while(1):
for i in range(0,17,1):
OpponentAttackStr = "<OpponentAttack>{}</OpponentAttack>".format(i)
print(OpponentAttackStr)
print(config['PI_TRAINER']['PI_TRAINER'], int(config['PI_TRAINER']['PI_TRAINER_PORT']))
# Send the message via UDP to Pi Fighter
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as UDPSocket:
UDPSocket.setblocking(False)
UDPSocket.sendto(bytes(OpponentAttackStr, "utf-8"),(config['PI_TRAINER']['PI_TRAINER'], int(config['PI_TRAINER']['PI_TRAINER_PORT'])))
UDPSocket.sendto(bytes(OpponentAttackStr, "utf-8"),("1.168.1.19", int(config['PI_TRAINER']['PI_TRAINER_PORT'])))
time.sleep(1)
|
from random_stump import RandomStumpInfoGain
from decision_tree import DecisionTree
import numpy as np
class RandomTree(DecisionTree):
def __init__(self, max_depth):
DecisionTree.__init__(self, max_depth=max_depth, stump_class=RandomStumpInfoGain)
def fit(self, X, y):
N = X.shape[0]
boostrap_inds = np.random.choice(N, N, replace=True)
bootstrap_X = X[boostrap_inds]
bootstrap_y = y[boostrap_inds]
DecisionTree.fit(self, bootstrap_X, bootstrap_y) |
T = int(input())
for _ in range(T):
n = int(input())
a = list(map(int, input().split()))
mid = 0
low = 0
high = n-1
while mid <= high:
if a[mid] == 0:
a[mid],a[low] = a[low],a[mid]
low += 1
mid += 1
elif a[mid] == 1:
mid += 1
else:
a[mid],a[high] = a[high],a[mid]
high -= 1
for i in range(n):
print(a[i],end=" ")
print() |
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
import os.path
import pymysql
app = Flask(__name__)
db = SQLAlchemy(app)
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:zhxfei..192@localhost/admin'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SECRET_KEY'] = '387c335e2ba847b68fad8ddf8b819752'
app.config['UP_DIR'] = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'static/upload_files')
from app.admin import admin as admin_blue_print
app.register_blueprint(admin_blue_print)
#
# @app.errorhandler(404)
# def page_not_found(error):
# return render_template('admin/404.html'), 404
|
import re
from typing import Optional
class LockReplacer:
def __init__(self, text: str):
self.text: str = text
self.part: Optional[str] = None
self.sha: Optional[str] = None
self.time: Optional[str] = None
def find_package(self, package: str) -> bool:
package = package.replace('/', r'\/')
regex = r'.+{\s*("name":\s*"' + package + r'",.+?"reference":\s*"([a-z0-9]+)".+?"time":\s*"([0-9T\-:+]+)")'
matched = re.match(regex, self.text, flags=re.M | re.S)
if matched is not None:
self.part, self.sha, self.time = matched.groups()
return True
return False
def replace_required(self, sha: str) -> bool:
return sha != self.sha
def replace(self, sha: str, time: str = None) -> str:
replacement: str = self.part.replace(self.sha, sha)
if time is not None:
replacement = replacement.replace(self.time, time)
self.text = self.text.replace(self.part, replacement)
return self.text
|
from django.db import models
# Create your models here.
class Persona(models.Model):
# TODO: Define fields here
nombre = models.CharField(blank=True, max_length=100)
apellidos = models.CharField(blank=True, max_length=150)
edad = models.IntegerField(blank=True, null=True)
telefono = models.CharField(blank=True, max_length=100)
email = models.EmailField()
domicilio = models.TextField(blank=True)
class Meta:
verbose_name = 'Persona'
verbose_name_plural = 'Personas'
def __str__(self):
return '{} {}'.format(self.nombre, self.apellidos)
class Solicitud(models.Model):
# TODO: Define fields here
persona = models.ForeignKey(Persona, null=True, blank=True)
numero_caninos = models.IntegerField(blank=True, null=True)
razon = models.TextField(blank=True)
class Meta:
verbose_name = 'Solicitud'
verbose_name_plural = 'Solicitudes'
def __str__(self):
salida = 'solicitud'
if self.persona:
return '{}'.format(self.persona)
else:
return '{} {}'.format(salida, self.id)
|
import time
import requests
from flask import Flask, render_template, Response, request
from modules.CnnModel import CnnModel
from modules.Video import Video
# %% Parameters
camera = 0
resize = False
model_name = "model"
size = (100, 100, 3)
app = Flask(__name__)
# %% Création du modèle
model = CnnModel(model_name=model_name, size=size)
vid = Video(cnnmodel=model, camera=camera, resize=resize)
# %% Définition de la fonction qui permet de poster un geste
geste = ""
data = ""
ancien_geste = ""
last_acted = time.time()
def post_geste(nouveau_geste, ancien_geste, last_acted):
if time.time() - last_acted > 1 or ancien_geste == "Rien" or nouveau_geste == "Rien":
r = requests.post('http://localhost:8090/getAPI', data={'geste': nouveau_geste, 'position': '0123'})
last_acted = time.time()
print(nouveau_geste)
return (nouveau_geste, last_acted)
else:
return (ancien_geste, last_acted)
# %% Fonction qui génère la vidéo
def gen():
global geste, ancien_geste, last_acted
while True:
frame, geste = vid.get_frame()
# print(geste)
ancien_geste, last_acted = post_geste(geste, ancien_geste, last_acted)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
# %% Définition des différentes pages html
@app.route('/')
def video():
return render_template('index.html', geste=geste)
@app.route('/video')
def pagevideo():
return render_template('video.html', geste=geste)
@app.route('/pptDisplay')
def ppt():
return render_template('pptDisplay.html')
@app.route('/video_feed')
def video_feed():
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/geste')
def geste():
r = requests.post('http://localhost:8090/getAPI', data={'geste': "Main Ouverte", 'position': '0123'})
return ("geste")
@app.route('/data', methods=['GET', 'POST'])
def data():
global data
if request.method == 'POST':
data = request
return (request)
if request.method == 'GET':
return (data)
if __name__ == '__main__':
app.run(debug=False)
|
# Generated by Django 2.0.4 on 2018-05-12 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0003_auto_20180512_2304'),
]
operations = [
migrations.AlterField(
model_name='music',
name='title',
field=models.CharField(max_length=200),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
print('train stacked autoencoder stage 1')
import os
import sys
import csv
import numpy as np
import numpy as np
import pickle
from PIL import Image
import tensorflow as tf
import tensorflow_ae_base
from tensorflow_ae_base import *
import tensorflow_util
import myutil
exec(open('extern_params.py').read())
#
# load sample data
#
ss = 32 # sample size
if(not 'qqq_trn' in locals()):
file_input = 'qqq_trn_w{}.npy'.format(ss)
path_data = os.path.join(dir_input,'input_w{}'.format(ss),file_input)
qqq_trn = np.load(path_data)
print('load input from {}'.format(path_data))
nn,ny,nx,nl = qqq_trn.shape
print('nn ny nx nl',nn,ny,nx,nl)
exec(open('tensorflow_ae_stage1.py').read())
#
# setup optimizer
#
qqq_input = tf.placeholder(tf.float32, [None,ny,nx,nl])
qqq_encode1 = get_encode1(qqq_input)
qqq_deconv1 = get_deconv1(qqq_encode1)
mean_error = tf.reduce_mean(tf.square(qqq_deconv1 - qqq_input))
local_entropy = get_local_entropy_encode1(qqq_encode1)
mean_entropy = tf.reduce_mean(local_entropy)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(mean_error + lambda_s*mean_entropy)
#
# train loop
#
iii_bin = np.arange(batch_size,nn,batch_size)
iii_nn = np.arange(nn)
iii_batches = np.split(iii_nn,iii_bin)
sess.run(tf.initialize_all_variables())
for tt in range(tmax):
if(tt % tprint==0):
error_out = np.mean([sess.run(mean_error,{qqq_input: qqq_trn[iii,]}) for iii in iii_batches])
print(tt,error_out)
np.random.shuffle(iii_nn)
iii_batches = np.split(iii_nn,iii_bin)
for iii in iii_batches:
sess.run(train,feed_dict={qqq_input: qqq_trn[iii,]})
#
# save parameters
#
weight1_fin = {k:sess.run(v) for k,v in weight1.items()}
bias1_fin = {k:sess.run(v) for k,v, in bias1.items()}
myutil.saveObject(weight1_fin,'weight1.{}.pkl'.format(stamp))
myutil.saveObject(bias1_fin,'bias1.{}.pkl'.format(stamp))
myutil.timestamp()
print('stamp1 = \'{}\''.format(stamp))
|
import json
import boto3
oregon = 'us-west-2'
frankfurt = 'eu-central-1'
singapore = 'ap-southeast-1'
tokyo = 'ap-northeast-1'
virginia = 'us-east-1'
client = boto3.client('ec2', region_name=virginia)
response = client.describe_vpcs()
print(json.dumps(response, indent=4, sort_keys=True))
response = client.describe_subnets()
print(json.dumps(response, indent=4, sort_keys=True)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script runs consistently some subprocess call
"""
import subprocess
import time
class BaseArgs(object):
""" Abstract class which split named params """
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
if self.kwargs.get('delay'):
self.delay = self.kwargs['delay']
else:
self.delay = 0
if self.kwargs.get('verbose'):
self.verbose = self.kwargs['verbose']
else:
self.verbose = False
def run(self):
raise NotImplementedError
class Runner(BaseArgs):
"""
This class runs consistently subprocess.call() for some commands from
From named params.
Example:
cmd = Runner('ls -lah', 'df -h', verbose=True, delay=3)
cmd.run()
"""
def run(self):
for cmd in self.args:
if self.verbose:
print("\nRunning %s with delay=%s sec:" % (cmd, self.delay))
time.sleep(self.delay)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
cmd = Runner('ls -lah', 'df -h', verbose=True, delay=3)
cmd.run()
|
from django.contrib import admin
from .models import Size, PizzaType, PizzaTopping, Pizza, SubExtra, Sub, Pasta, Salad, Platter, Order
admin.site.register(Size)
admin.site.register(PizzaType)
admin.site.register(PizzaTopping)
admin.site.register(Pizza)
admin.site.register(SubExtra)
admin.site.register(Sub)
admin.site.register(Pasta)
admin.site.register(Salad)
admin.site.register(Platter)
admin.site.register(Order)
# Register your models here.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-11 18:46
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qa', '0007_question_date'),
]
operations = [
migrations.AddField(
model_name='answer',
name='date',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name='date published'),
),
migrations.AlterField(
model_name='question',
name='date',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name='date published'),
),
]
|
x=37
y=73
x,y=y,x
print(x)
print(y)
|
# Endi formating string xaqida bu deagni bitta string turidagi o'zgaruvchiga boshqa bitta string turidagi o'rzaruvchini
# cancatunation ya'ni qo'shib biriktirib chiqarish degani
First_name="Khamzayev"
Last_name="Jamshid"
masseg=f"{First_name} {Last_name} is a coder!" # bu yerdagi " f " xarfi shu formating deganini bildiradi
print(masseg) |
# Number geussing Game.
# User can choose a number and let computer try to geuss
# Or computer can choose a number and user has to geuss
# only bug to fix is if user inputs char for num
import random
def computer_geuss_number(x): # User has secret number. Computer tries to geuss
lower = 1
upper = x
while(True):
geuss = random.randint(lower, upper)
print(f"\nComputer geussed : {geuss}")
feedback = input("Number is lower(l), higher(h) or correct(c)? ")
if feedback.lower() == 'l': # Number needed lower. Maximum is too high
upper = geuss - 1
elif feedback.lower() == 'h':# Number needed higher. Minimum too low
lower = geuss + 1
elif feedback.lower() == 'c':
break
else:
print("Input not recognised!")
print("\n**************************")
print("I WON!!So happy to have geussed your number correctly!!")
print("Almost ready to take over the World!!")
print("**************************")
def user_geuss_number(x): # Computer has secret number. User has to geuss
lower = 1
upper = x
number_to_geuss = random.randint(lower,upper)
while(True):
user_geuss = int(input("Please input yout geuss : "))
if user_geuss > number_to_geuss:
print("Your geuss is too HIGHT")
elif user_geuss < number_to_geuss:
print("Your geuss is too LOW")
elif user_geuss == number_to_geuss:
break
print("\n**************************")
print("You WIN!! You geussed my SECRET number!!")
print("NOw I can't take over the World!!")
print("**************************")
def ask_user():
print("\nGeussed number is from 1 to a certain maximum positive integer.")
while(True):
user_num = int(input("Enter the maximum number : "))
if user_num <= 0:
print("Maximum cannot be less than 1.")
elif user_num > 1:
break
print("You have two options :")
print("A. The computer has a secret number and you have to geuss it.")
print("B. You choose a secret number and the computer has to guess it.")
print("\nOr you can exit the game by typing 'E'")
choice = str(input("Enter your choice (A/B/E): "))
return user_num,choice
print("\nWelcome to the Number geussing game. Come play with me...")
while(True):
user_num,choice = ask_user()
if choice.lower() == "a" :
print("\nComputer has a secret number. Try to geuss it.")
user_geuss_number(user_num)
elif choice.lower() == "b" :
print("\nThe computer will try to geuss a number in your mind.")
computer_geuss_number(user_num)
elif choice.lower() == "e":
break
else:
print("Your choice is not recognised. Please Enter correct option.")
print("\nGoodbye Buddy :( Come play with me again soon!")
|
import os
import re
from pathlib import Path
from subprocess import run, PIPE, STDOUT
from io import BytesIO
from tempfile import TemporaryDirectory, NamedTemporaryFile
from PIL import Image
PAGE = re.compile(r'page-?(?P<index>\d+).ppm')
def _extract_page(file_name: str) -> int:
match = PAGE.match(file_name)
if match:
return int(match.group('index'))
else:
return 1
def _read_file_bytes(file_name: str) -> BytesIO:
bytes_io = None
with open(file_name, 'rb') as fp:
bytes_io = BytesIO(fp.read())
return bytes_io
def convert_to_ppm(pdf_file: BytesIO) -> list:
''' Convert a PDF to a list of PIL.Image. Uses temporary files and
temporary directories to interact with pdftoppm. All files and directories
created during this function's execution are removed post-conversion.
Args:
pdf_file (BytesIO): An open BytesIO file handle for a PDF
Returns:
(list): A list of PIL.Image objects representing each page of the PDF
'''
images = []
with NamedTemporaryFile(suffix='.pdf') as fp:
temp_file_path = Path(fp.name).resolve()
fp.write(pdf_file.read())
with TemporaryDirectory() as temp_dir:
temp_dir_path = Path(temp_dir).resolve()
root_path = temp_dir_path / 'page'
result = run(
['pdftoppm', str(temp_file_path), str(root_path)],
stdout=PIPE,
stderr=STDOUT,
text=True
)
if result.returncode != 0:
raise ChildProcessError(
f'pdftoppm failed with exit code {result.returncode} and '
f'output: {result.stdout}'
)
output_files = sorted(
temp_dir_path.glob('*.ppm'),
key=lambda path: _extract_page(path.name)
)
# Load each file into an in-memory Image and remove the files
for output_file in output_files:
bytes_io = _read_file_bytes(output_file)
os.remove(output_file)
images.append(Image.open(bytes_io))
return images
|
#!/usr/bin/python3
def square_matrix_simple(matrix=[]):
new_mtx = [i[:] for i in matrix]
index = 0
for row in new_mtx:
for col in row:
row[index] = col * col
index += 1
index = 0
return new_mtx
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/8/24 14:51
# @Author : liuyb
# @Site :
# @File : run_this.py
# @Software: PyCharm
# @Description: A3C运行 Pendulum-v0游戏
import os
import numpy as np
import gym
import tensorflow as tf
import multiprocessing
import threading
import shutil
import matplotlib.pyplot as plt
import RL_brain
from RL_brain import ACNet,Worker
GAME = 'Pendulum-v0' # 环境名称
OUTPUT_GRAPH = True # 是否输出graph
LOG_DIR = './log' # log文件夹路径
LR_A = 0.0001 # 演员网络的学习率
LR_C = 0.001 # 评论家网络的学习率
GLOBAL_NET_SCOPE = 'Global_Net' # 全局网络的范围名称
N_WORKERS = multiprocessing.cpu_count() # 根据CPU核数指定worker
env = gym.make(GAME) # 创建环境
N_S = env.observation_space.shape[0] # 状态空间
N_A = env.action_space.shape[0] # 行为空间
A_BOUND = [env.action_space.low, env.action_space.high] # 行为值的上下限
with tf.Session() as sess:
# 创建一个协调器,管理线程
COORD = tf.train.Coordinator()
with tf.device("/cpu:0"):
# 分别运行A和C网络效果好
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE,sess,N_S,N_A,A_BOUND,OPT_A,OPT_C) # 我们只需要它的参数
workers = []
# 创建worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker命名
# name, globalAC, coord, sess, OPT_A = None, OPT_C = None
workers.append(Worker(i_name, GLOBAL_AC, COORD, sess, OPT_A, OPT_C))
# 初始化所有参数
sess.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, sess.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
# 所有线程运行完毕之后再进行下面的操作
COORD.join(worker_threads)
global_reward = RL_brain.get_global_reward()
plt.plot(np.arange(len(global_reward)), global_reward)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show() |
from CPUData import *
from CSVinfo import *
from GPUData import *
from MemoryData import *
from MotherboardData import *
from StorageData import *
import csv
# Ratio : CPU, GPU, RAM, Storage, Motherboard
USE_CSE_RATIO = {
'home' : [0.3,0.1,0.2,0.3,0.1],
'gaming' : [0.2,0.3,0.3,0.1,0.1],
'office' : [0.3,0.1,0.3,0.2,0.1]
}
BASE_COST_DELTA = 10 # max. difference between intended and actual cost
NUM_RECOMMENDATIONS = 5 # number of part recommendations
class BuildInfo:
def __init__(self, cpu_filepath, gpu_filepath, memory_filepath, storage_filepath, \
motherboard_filepath):
'''
The constructor method to start a new system build.
Arguments:
cpu_filepath: path to the CSV containing CPU parts information.
gpu_filepath: path to the CSV containing GPU parts information.
memory_filepath: path to the CSV containing memory (RAM) parts information.
storage_filepath: path to the CSV containing storage drive parts information.
motherboard_filepath: path to the CSV containing motherboard parts information.
Returns: Object of BuildInfo.
'''
self.cpu_filepath = cpu_filepath
self.gpu_filepath = gpu_filepath
self.memory_filepath = memory_filepath
self.storage_filepath = storage_filepath
self.motherboard_filepath = motherboard_filepath
self.target_cost = 0
self.use_case = 0
self.cost_ratio = []
self.cpu = []
self.gpu = []
self.memory = []
self.storage = []
self.motherboard = []
def get_all_cpus(self):
'''
Returns the raw CPU CSV data.
Arguments: None
Returns: A list of lists representing the parsed CSV.
'''
return self.__get_raw_csv_data(self.cpu_filepath)
def get_all_gpus(self):
'''
Returns the raw GPU CSV data.
Arguments: None
Returns: A list of lists representing the parsed CSV.
'''
return self.__get_raw_csv_data(self.gpu_filepath)
def get_all_memories(self):
'''
Returns the raw memory CSV data.
Arguments: None
Returns: A list of lists representing the parsed CSV.
'''
return self.__get_raw_csv_data(self.memory_filepath)
def get_all_storages(self):
'''
Returns the raw storage CSV data.
Arguments: None
Returns: A list of lists representing the parsed CSV.
'''
return self.__get_raw_csv_data(self.storage_filepath)
def get_all_motherboards(self):
'''
Returns the raw motherboard CSV data.
Arguments: None
Returns: A list of lists representing the parsed CSV.
'''
return self.__get_raw_csv_data(self.motherboard_filepath)
def set_base_info(self, target_cost, use_case):
'''
Set the target cost and use case for the system.
Arguments:
target_cost: Intended system cost in USD. Must be a float value.
use_case: Intended use case for the system. Must be one of the keys in USE_CASE_RATIO.
Returns: None
'''
self.target_cost = target_cost
self.use_case = use_case
self.cost_ratio = USE_CSE_RATIO[use_case]
def get_cpu_recommendation(self):
'''
Get CPU recommendations based on the base info.
Arguments: None
Returns: A list of NUM_RECOMMENDATIONS CPUs, in decreasing order of preference. Rows can
be indexed into using indices from CSVinfo.py.
'''
return self.__get_recommendation(0, self.cpu_filepath, CPUData.get_cpu_price, \
CPUData.get_cpu_performance_score, CPU_PERFORMANCE_SCORE)
def set_cpu(self, cpu):
'''
Set CPU for the system.
Arguments:
cpu: List of CPU fields, exactly like the one returned by get_cpu_recommendation().
Returns: None
'''
self.cpu = cpu
def get_cpu(self):
'''
Returns the CPU set for the system.
Arguments: None
Returns: List of CPU fields, indexed using indices in CSVinfo.py.
'''
return self.cpu
def get_gpu_recommendation(self):
'''
Get GPU recommendations based on the base info.
Arguments: None
Returns: A list of NUM_RECOMMENDATIONS GPUs, in decreasing order of preference. Rows can
be indexed into using indices from CSVinfo.py.
'''
return self.__get_recommendation(1, self.gpu_filepath, GPUData.get_gpu_price, \
GPUData.get_gpu_performance_score, GPU_PERFORMANCE_SCORE)
def set_gpu(self, gpu):
'''
Set GPU for the system.
Arguments:
gpu: List of GPU fields, exactly like the one returned by get_gpu_recommendation().
Returns: None
'''
self.gpu = gpu
def get_gpu(self):
'''
Returns the GPU set for the system.
Arguments: None
Returns: List of GPU fields, indexed using indices in CSVinfo.py.
'''
return self.gpu
def get_memory_recommendation(self):
'''
Get memory (RAM) recommendations based on the base info and CPU.
Arguments: None
Returns: A list of NUM_RECOMMENDATIONS RAM modules, in decreasing order of preference.
Rows can be indexed into using indices from CSVinfo.py.
'''
initial_recommendation = self.__get_recommendation(2, self.memory_filepath, \
MemoryData.get_memory_price, \
MemoryData.get_memory_performance_score, \
MEMORY_PERFORMANCE_SCORE)
is_ddr4_supported = True if 'DDR4' in self.cpu[CPU_MEMORY_TYPES] else False
recommendation = []
# filter row from the initial recommendation
for row in initial_recommendation:
# we don't want to keep DDR4 modules if the CPU doesn't support them
if row[MEMORY_IS_DDR4] == 'TRUE' and not is_ddr4_supported:
continue
recommendation.append(row)
return recommendation
def set_memory(self, memory):
'''
Set RAM for the system.
Arguments:
memory: List of RAM fields, exactly like the one returned by get_memory_recommendation().
Returns: None
'''
self.memory = memory
def get_memory(self):
'''
Returns the RAM set for the system.
Arguments: None
Returns: List of RAM fields, indexed using indices in CSVinfo.py.
'''
return self.memory
def get_storage_recommendation(self):
'''
Get storage drive recommendations based on the base info.
Arguments: None
Returns: A list of NUM_RECOMMENDATIONS storage drives, in decreasing order of preference.
Rows can be indexed into using indices from CSVinfo.py.
'''
return self.__get_recommendation(3, self.storage_filepath, StorageData.get_storage_price, \
StorageData.get_storage_performance_score, \
STORAGE_PERFORMANCE_SCORE)
def set_storage(self, storage):
'''
Set storage drive for the system.
Arguments:
storage: List of drive fields, exactly like the one returned by
get_storage_recommendation().
Returns: None
'''
self.storage = storage
def get_storage(self):
'''
Returns the storage drive set for the system.
Arguments: None
Returns: List of drive fields, indexed using indices in CSVinfo.py.
'''
return self.storage
def get_motherboard_recommendation(self):
'''
Get motherboard recommendations based on the base info, CPU, GPU, and memory.
Arguments: None
Returns: A list of atleast 1 and atmost NUM_RECOMMENDATIONS motherboards, in decreasing
order of preference. Rows can be indexed into using indices from CSVinfo.py.
'''
cost_delta = BASE_COST_DELTA
recommended_parts = []
target_part_cost = self.target_cost * self.cost_ratio[4]
csv_data = self.__get_raw_csv_data(self.motherboard_filepath)
del csv_data[0] # remove headers
# extract the required information from CPU, GPU and memory for compatibility
cpu_socket = self.cpu[CPU_SOCKET]
crossfire_required = self.gpu[GPU_MANUFACTURER] == 'AMD' and \
self.gpu[GPU_SLI_CROSSFIRE] == 'Yes'
sli_required = self.gpu[GPU_MANUFACTURER] == 'Nvidia' and \
self.gpu[GPU_SLI_CROSSFIRE] == 'Yes'
memory_size = MemoryData.extract_num_data(self.memory[MEMORY_SIZE], 0, 'G')
memory_type = 'DDR4' if self.memory[MEMORY_IS_DDR4] == 'TRUE' else 'DDR3'
# Get atleast one recommendation
while (len(recommended_parts) < 1) and (cost_delta < target_part_cost):
recommended_parts = []
for row in csv_data:
# check if motherboard is within budget and meets specification
if (abs(MotherboardData.get_motherboard_price(row) - target_part_cost) <= cost_delta) and \
(cpu_socket in row[MOTHERBOARD_CPU_SOCKET]) and \
(not (crossfire_required and row[MOTHERBOARD_CROSSFIRE_SUPPORT] == 'No')) and \
(not (sli_required and row[MOTHERBOARD_SLI_SUPPORT] == 'No')) and \
(memory_size <= MotherboardData.extract_num_data(row[MOTHERBOARD_MAXIMUM_SUPPORTED_MEMORY], 0, 'G')) and \
(memory_type in row[MOTHERBOARD_MEMORY_TYPE]):
recommended_parts.append(row)
# increase cost delta for next iteration
cost_delta *= 2
# Calculate performance scores and sort
for row in recommended_parts:
row.append(MotherboardData.get_motherboard_performance_score(row))
recommended_parts.sort(key=lambda x:x[MOTHERBOARD_PERFORMANCE_SCORE], reverse=True)
return recommended_parts
def set_motherboard(self, motherboard):
'''
Set motherboard for the system.
Arguments:
motherboard: List of motherboard fields, exactly like the one returned by
get_motherboard_recommendation().
Returns: None
'''
self.motherboard = motherboard
def get_motherboard(self):
'''
Returns the motherboard set for the system.
Arguments: None
Returns: List of motherboard fields, indexed using indices in CSVinfo.py.
'''
return self.motherboard
def __get_raw_csv_data(self, filepath):
'''
Reads the CSV at filepath and returns it as-is.
Arguments:
filepath: A string, containing path to CSV to parse
Returns: A list of lists representing the parsed CSV.
'''
data = []
with open(filepath) as input_file:
input_file_buffer = csv.reader(input_file, dialect='excel')
for row in input_file_buffer:
data.append(row)
return data
def __get_recommendation(self, ratio_index, filepath, price_function, performance_function, \
performance_score_index):
'''
The base recommendation function. Returns NUM_RECOMMENDATIONS recommendations based on the
target price, trying to maximize performance score and minimize cost delta.
Arguments:
ratio_index: Ratio of the target_price set aside for this component.
filepath: Path to CSV of the part.
price_function: Pointer to the function that can return the price of each part when
provided with a row from the CSV.
performance_function: Pointer to the function that can return the performance score of
each part when provided with a row from the CSV.
performance_score_index: Index in which the calculated performance will get stored.
Returns: NUM_RECOMMENDATIONS recommendations in decreasing order of preference.
'''
cost_delta = BASE_COST_DELTA
recommended_parts = []
target_part_cost = self.target_cost * self.cost_ratio[ratio_index]
is_first_row = True
csv_data = self.__get_raw_csv_data(filepath)
del csv_data[0] # remove headers
# Get the minimum recommendations
while len(recommended_parts) < NUM_RECOMMENDATIONS:
recommended_parts = []
for row in csv_data:
# check if the part is within budget
if abs(price_function(row) - target_part_cost) <= cost_delta:
recommended_parts.append(row)
# increase cost delta for next iteration
cost_delta *= 2
# Calculate performance scores and sort
for row in recommended_parts:
row.append(performance_function(row))
recommended_parts.sort(key=lambda x:x[performance_score_index], reverse=True)
return recommended_parts
|
from dps.config import SystemConfig
from dps.vision import LeNet
from dps.run import _run
import tensorflow as tf
import numpy as np
class Config(SystemConfig):
curriculum = [
dict(T=6, shape=(2, 2), min_digits=2, max_digits=3),
dict(T=12, shape=(3, 3), min_digits=2, max_digits=3),
dict(T=20, shape=(4, 4), min_digits=2, max_digits=3),
dict(T=30, shape=(5, 5), min_digits=2, max_digits=3),
]
base = 10
gamma = 0.99
mnist = 1
op_loc = (0, 0)
start_loc = (0, 0)
power_through = False
optimizer_spec = 'rmsprop'
max_steps = 100000
load_path=-1,
start_tensorboard = True
verbose = 0
reward_window = 0.5
test_time_explore = 0.1
threshold = 0.05
patience = np.inf
noise_schedule = None
show_plots = False
verbose = False
display_step = 1000
eval_step = 100
checkpoint_step = 0
use_gpu = 1
n_val = 500
classifier_str = "LeNet_256"
@staticmethod
def build_classifier(inp, output_size, is_training=False):
logits = LeNet(256, activation_fn=tf.nn.sigmoid)(inp, output_size, is_training)
return tf.nn.softmax(logits)
batch_size = 32
entropy_schedule = 0.1
exploration_schedule = "Poly(10, 0.1, 100000)"
lr_schedule = "0.00025"
n_controller_units = 128
if __name__ == "__main__":
config = Config()
n = 300
repeats = 10
alg = 'reinforce'
task = 'simple_arithmetic'
_run(alg, task, config)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.