text stringlengths 8 6.05M |
|---|
#2020F_hw5_submissions problem 1
#I pledge my honor that I have abided by the Stevens honor system -Maya O
def main():
def square(y):
return [y**2 for y in x]
n = int(input("How many numbers would you like to square? "))
x = []
for i in range(0,n):
list = float(input("Enter number: "))
x.append(list)
print("The given numbers squared are:", square(x))
main()
|
# Generated by Django 2.1.2 on 2018-11-12 03:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('datawarehouse', '0008_auto_20181110_1606'),
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama', models.CharField(max_length=100)),
('persen_training', models.IntegerField()),
('persen_testing', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['nama'],
},
),
migrations.CreateModel(
name='DatasetDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('jenis', models.BooleanField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ann.Dataset')),
('mahasiswa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='datawarehouse.Mahasiswa')),
],
options={
'ordering': ['jenis', 'mahasiswa'],
},
),
]
|
# @see https://adventofcode.com/2015/day/5
from lib.helper import filetolist
import re
words = filetolist('day5_input.txt')
def part1(w: list):
n = 0
for l in w:
if re.search(r'([aeiou].*){3,}', l) and re.search(r'([a-z])\1+', l) and not re.search(r'(?:ab|cd|pq|xy)+', l):
n += 1
return n
def part2(w: list):
n = 0
for l in w:
if re.search(r'([a-z][a-z]).*\1+', l) and re.search(r'([a-z])[a-z]\1+', l):
n += 1
return n
print('------------ PART 01 -------------')
nice = part1(words)
print('Matched', nice, 'nice strings')
print('\n------------ PART 02 -------------')
nice = part2(words)
print('Matched', nice, 'nice strings')
|
from __future__ import annotations
import typing as T
from pathlib import Path
import subprocess
import shutil
import json
import os
import tempfile
import importlib.resources
from ..web import git_download
__all__ = ["exe", "build", "find_library"]
def exe() -> str:
cmake = shutil.which("cmake")
if not cmake:
raise FileNotFoundError("CMake not found.")
cmake_version = (
subprocess.check_output([cmake, "--version"], text=True).split("\n")[0].split(" ")[2]
)
print("Using CMake", cmake_version)
return cmake
def build(
source_dir: Path,
build_dir: Path,
*,
config_args: list[str] = None,
build_args: list[str] = None,
wipe: bool = False,
env: T.Mapping[str, str] = None,
run_test: bool = False,
dryrun: bool = False,
install: bool = True,
):
"""build and install with CMake"""
cmake = exe()
cache_file = build_dir / "CMakeCache.txt"
if wipe:
if cache_file.is_file():
cache_file.unlink()
# %% Configure
cmd = [cmake, f"-B{build_dir}", f"-S{source_dir}"]
if config_args:
cmd += config_args
subprocess.check_call(cmd, env=env)
# %% Build
cmd = [cmake, "--build", str(build_dir), "--parallel"]
if build_args:
cmd += build_args
if dryrun:
print("DRYRUN: would have run\n", " ".join(cmd))
return None
subprocess.check_call(cmd)
if run_test:
subprocess.check_call(["ctest", "--output-on-failure"], cwd=str(build_dir))
if install:
subprocess.check_call([cmake, "--install", str(build_dir)])
def find_library(lib_name: str, lib_path: list[str], env: T.Mapping[str, str]) -> bool:
"""
check if library exists with CMake
lib_name must have the appropriate upper and lower case letter as would be used
directly in CMake.
"""
cmake = exe()
with importlib.resources.path("gemini3d.cmake", "FindLAPACK.cmake") as f:
mod_path = Path(f).parent
cmake_template = """
cmake_minimum_required(VERSION 3.15)
project(dummy LANGUAGES C Fortran)
"""
if mod_path.is_dir():
cmake_template += f'list(APPEND CMAKE_MODULE_PATH "{mod_path.as_posix()}")\n'
cmake_template += f"find_package({lib_name} REQUIRED)\n"
build_dir = f"find-{lib_name.split(' ', 1)[0]}"
# not context_manager to avoid Windows PermissionError on context exit for Git subdirs
d = tempfile.TemporaryDirectory()
r = Path(d.name)
(r / "CMakeLists.txt").write_text(cmake_template)
cmd = [cmake, "-S", str(r), "-B", str(r / build_dir)] + lib_path
# use cwd= to avoid spilling temporary files into current directory if ancient CMake used
# also avoids bugs if there is a CMakeLists.txt in the current directory
ret = subprocess.run(cmd, env=env, cwd=str(r))
try:
d.cleanup()
except PermissionError:
pass
return ret.returncode == 0
def get_gemini_root() -> Path:
gem_root = os.environ.get("GEMINI_ROOT")
if not gem_root:
gem_root = os.environ.get("GEMINI3D_ROOT")
if not gem_root:
raise EnvironmentError(
"Please set environment variable GEMINI_ROOT to (desired) top-level Gemini3D directory."
"If Gemini3D is not already there, PyGemini will download and build Gemini3D there."
)
return Path(gem_root).expanduser()
def build_gemini3d(targets: list[str]):
"""
build targets from gemini3d program
Specify environment variable GEMINI_ROOT to reuse existing development code
"""
if isinstance(targets, str):
targets = [targets]
gem_root = get_gemini_root()
src_dir = Path(gem_root).expanduser()
if not (src_dir / "CMakeLists.txt").is_file():
jmeta = json.loads(importlib.resources.read_text("gemini3d", "libraries.json"))
git_download(src_dir, repo=jmeta["gemini3d"]["git"], tag=jmeta["gemini3d"]["tag"])
build_dir = src_dir / "build"
build(
src_dir,
build_dir,
run_test=False,
install=False,
config_args=["-DBUILD_TESTING:BOOL=false"],
build_args=["--target", *targets],
)
for t in targets:
for n in {"build", "build/Debug", "build/Release"}:
exe = shutil.which(t, path=str(src_dir / n))
if exe:
break
if not exe:
raise RuntimeError(f"{t} not found in {build_dir}")
|
# = [0,10,20,40]
#L[::-1]
#[40, 20, 10, 0]
def reverse(text):
if len(text) <= 1:
return text
return reverse(text[1:]) + text[0]
print (reverse("Alex"))
|
from __future__ import division
from Module3 import *
from Module1 import *
import math
import random
import sys
import os
import itertools
import operator
import random
import string
import nltk
from nltk.corpus import brown as bw # corpus for different genres
from nltk.corpus import wordnet as wn # corpus for structured words
from nltk.corpus import stopwords # corpus for stopwords
from nltk.corpus import gutenberg as gut # corpus for e-books
from nltk.corpus import nps_chat as chat # corpus for chats
from nltk.corpus import inaugural as president_speeches # corpus for news/speeches
#function to find mod in a list
def most_common(L):
# get an iterable of (item, iterable) pairs
SL = sorted((x, i) for i, x in enumerate(L))
# print 'SL:', SL
groups = itertools.groupby(SL, key=operator.itemgetter(0))
# auxiliary function to get "quality" for an item
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(L)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
# print 'item %r, count %r, minind %r' % (item, count, min_index)
return count, -min_index
# pick the highest-count/earliest item
return max(groups, key=_auxfun)[0]
# function finds all possible lookhead letter combinations according to given order
def manp_possbls( order ):
lst=[]
for k in range(0,pow(26,order)):
count = 1
comb = ''
for letter in range(0,order):
comb = comb + get_char(((int(k/(pow(26,order-count))))%26)+1)
count+=1
lst.append([comb])
return lst
#function finds index respective character
def get_char(index):
letter_string = string.ascii_lowercase
return letter_string[index-1]
#os.system('CLS')
ALPHA = 0.2
BETA = 0.45
ETA = 0.4
PHI = 0.2
DELTA = 0.85
#def mod_2(SEEDS, DOB, ORDER, FAV_MOVIE_GENRE, NEWS_FLAG, FAV_HOBBY, OCCUPATION, CHAT_FLAG, BOOK_FLAG)
#mod_2(SEEDS, DOB, ORDER, FAV_MOVIE_GENRE, NEWS_FLAG, FAV_HOBBY, OCCUPATION, CHAT_FLAG, BOOK_FLAG):
ORDER = 2
print "Enter the following choices: "
print "Your name: "
nname = raw_input();
print "Enter a keyword of your choice: "
kkey = raw_input();
print "Enter your DOB: "
DOB = raw_input();
print "Enter your favourite move genre: \n\t1.adventure\n\t2.humor\n\t3.mystery\n\t4.romance\n\t5.science_fiction\n"
FAV_MOVIE_GENRE = int(raw_input())
print "Do you like news??(y/n): "
NEWS_FLAG = raw_input()
print "Enter your favourite hobby: "
FAV_HOBBY = raw_input()
print "Enter your occupation: "
OCCUPATION = raw_input()
print "Do you like chatting??(y/n): "
CHAT_FLAG = raw_input()
print "Do you like reading books??(y/n): "
BOOK_FLAG = raw_input()
SEEDS = sha512(nname,kkey)
#test_seed = int(SEEDS)
def get_best_synset_pair(word_1, word_2):
"""
Choose the pair with highest path similarity among all pairs.
Mimics pattern-seeking behavior of humans.
"""
max_sim = -1.0
synsets_1 = wn.synsets(word_1)
synsets_2 = wn.synsets(word_2)
if len(synsets_1) == 0 or len(synsets_2) == 0:
return None, None
else:
max_sim = -1.0
best_pair = None, None
for synset_1 in synsets_1:
for synset_2 in synsets_2:
sim = wn.path_similarity(synset_1, synset_2)
if sim > max_sim:
max_sim = sim
best_pair = synset_1, synset_2
return best_pair
def length_dist(synset_1, synset_2):
"""
Return a measure of the length of the shortest path in the semantic
ontology (Wordnet in our case as well as the paper's) between two
synsets.
"""
l_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return 0.0
if synset_1 == synset_2:
# if synset_1 and synset_2 are the same synset return 0
l_dist = 0.0
else:
wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
if len(wset_1.intersection(wset_2)) > 0:
# if synset_1 != synset_2 but there is word overlap, return 1.0
l_dist = 1.0
else:
# just compute the shortest path between the two
l_dist = synset_1.shortest_path_distance(synset_2)
if l_dist is None:
l_dist = 0.0
# normalize path length to the range [0,1]
return math.exp(-ALPHA * l_dist)
def hierarchy_dist(synset_1, synset_2):
"""
Return a measure of depth in the ontology to model the fact that
nodes closer to the root are broader and have less semantic similarity
than nodes further away from the root.
"""
h_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return h_dist
if synset_1 == synset_2:
# return the depth of one of synset_1 or synset_2
h_dist = max([x[1] for x in synset_1.hypernym_distances()])
else:
# find the max depth of least common subsumer
hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
lcs_candidates = set(hypernyms_1.keys()).intersection(
set(hypernyms_2.keys()))
if len(lcs_candidates) > 0:
lcs_dists = []
for lcs_candidate in lcs_candidates:
lcs_d1 = 0
if hypernyms_1.has_key(lcs_candidate):
lcs_d1 = hypernyms_1[lcs_candidate]
lcs_d2 = 0
if hypernyms_2.has_key(lcs_candidate):
lcs_d2 = hypernyms_2[lcs_candidate]
lcs_dists.append(max([lcs_d1, lcs_d2]))
h_dist = max(lcs_dists)
else:
h_dist = 0
return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /
(math.exp(BETA * h_dist) + math.exp(-BETA * h_dist)))
def word_similarity(word_1, word_2):
synset_pair = get_best_synset_pair(word_1, word_2)
return (length_dist(synset_pair[0], synset_pair[1]) *
hierarchy_dist(synset_pair[0], synset_pair[1]))
#def mod_2(SEEDS, DOB, ORDER, FAV_MOVIE_GENRE, NEWS_FLAG, FAV_HOBBY, OCCUPATION, CHAT_FLAG, BOOK_FLAG):
###############################################################################
##################### -----WORD SIMILARITY FUNCTIONS----- #####################
# Parameters to the algorithm. Currently set to values that was reported
# in the paper to produce "best" results.
###############################################################################
###############################################################################
brown_freqs = dict()
N = 0
test_seed = int(SEEDS)
print "seeds extracted..."
movie_genres = {
'1':'adventure',
'2':'humor',
'3':'mystery',
'4':'romance',
'5':'science_fiction'
}
#shakespeare_books = {
#
# '1':'14',
# '2':'15',
# '3':'16',
# }
#FAV_MOVIE_GENRE = int(raw_input('Q1. What is your favourite movie type?(1, 2, 3, 4 or 5): \
# \n\t1.Adventure\n\t2.Humor\n\t3.Mystery\
# \n\t4.Romatic\n\t5.Science Fiction\n')) #----brown(genres)------
#NEWS_FLAG = raw_input('Q2. Do you like news?(Y/N) : ')
NEWS_FLAG = NEWS_FLAG.lower() #----- inaugral---------
#FAV_HOBBY = raw_input('Q3. Which is your favourite hobby? : ') #-----brown(hobby)------
FAV_HOBBY = FAV_HOBBY.lower()
#OCCUPATION = raw_input('Q4. What is your occupation? : ') #-----brown(all words)--
OCCUPATION = OCCUPATION.lower()
#CHAT_FLAG = raw_input('Q5. Do you like chatting?(Y/N) : ') #------nps_chat---------
CHAT_FLAG = CHAT_FLAG.lower()
#BOOK_FLAG = raw_input('Q6. Do you like Shakespeares\' books?(Y/N) : ') #-----gutenberg---------
#BOOK_FLAG = raw_input('Q6. Do you like reading books?(Y/N) : ') #-----gutenberg---------
BOOK_FLAG = BOOK_FLAG.lower()
#if BOOK_FLAG == 'y':
# FAV_BOOK = int(raw_input('Which book will you prefer? :(1, 2 or 3) \
# \n\t1.Julius Caesar\n\t2.Hamlet\n\t3.Macbeth\n'))
related_words_MOVIE = []
related_words_NEWS = []
related_words_HOBBY = []
related_words_OCCUPATION = []
related_words_CHAT = []
related_words_BOOK = []
################## Filtering of stopwords from corpuses ##################
stop = set(stopwords.words('english'))
##--------------------------- MOVIE ---------------------------
#
#movie_cat = movie_genres.get(str(FAV_MOVIE_GENRE))
#movie_words = list(bw.words(categories = movie_cat))
#
#R1=random.getstate()[1][(long(test_seed%10000))%625]
#random.shuffle(movie_words, lambda: 1/R1) # deterministic shuffling using seeds
#test_seed/=10000
#
#filtered_movie_words = list(set(movie_words)-stop)
#[related_words_MOVIE.append(i) for i in filtered_movie_words if len(i)>4]
#related_words_MOVIE = related_words_MOVIE[0:2000]
#
##------------------------------------------------------------
#--------------------------- NEWS ---------------------------
print "starting extracted news data...\n"
if NEWS_FLAG != 'n':
# print "processing news!!!"
news_words = list(president_speeches.words())
R0=random.getstate()[1][(long(test_seed%10000))%625]
random.shuffle(news_words, lambda: 1/R0) # deterministic shuffling using seeds
test_seed/=10000
filtered_news_words = list(set(news_words)-stop)
[related_words_NEWS.append(i) for i in filtered_news_words if len(i)>3]
related_words_NEWS = related_words_NEWS[0:2000]
print "finished extracted news data...\n"
#------------------------------------------------------------
#--------------------------- CHAT ---------------------------
print "starting extracted chat data...\n"
if CHAT_FLAG != 'n':
chat_words = list(chat.words())
R1=random.getstate()[1][(long(test_seed%10000))%625]
random.shuffle(chat_words, lambda: 1/R1) # deterministic shuffling using seeds
test_seed/=10000
filtered_chat_words = list(set(chat_words)-stop)
[related_words_CHAT.append(i) for i in filtered_chat_words]
related_words_CHAT = related_words_CHAT[0:2000]
print "finished extracted chat data...\n"
#------------------------------------------------------------
#if BOOK_FLAG != 'n':
# book_words = gut.words(gut.fileids()[int(shakespeare_books.get(str(FAV_BOOK)))])
# filtered_book_words = set(book_words)-stop
# filtered_book_words = list(filtered_book_words)
#---------------------------- BOOK --------------------------
print "starting extracted book data...\n"
if BOOK_FLAG != 'n':
book_words = list(gut.words())
R2=random.getstate()[1][(long(test_seed%10000))%625]
random.shuffle(book_words, lambda: 1/R2) # deterministic shuffling using seeds
test_seed/=10000
filtered_book_words = list(set(book_words)-stop)
[related_words_BOOK.append(i) for i in filtered_book_words if len(i)>3]
related_words_BOOK = related_words_BOOK[0:2000]
print "finished extracted book data...\n"
#------------------------------------------------------------
############################ Filtering done #################################
#############################################################################
######################### Filtering by Similarity ###########################
#------------------------------ Occupation ---------------------------------
print "starting extracted occupation data...\n"
general_words_OCC = list(bw.words())
related_words_with_similarity_OCC=[]
R3=random.getstate()[1][(long(test_seed%10000))%625]
random.shuffle(general_words_OCC, lambda: 1/R3) # deterministic shuffling using seeds
test_seed/=10000
filtered_general_words_OCC = list(set(general_words_OCC)-stop)
print "processing occupation..."
for word_OCC in filtered_general_words_OCC[:2000]:
word_OCC.lower()
if wn.synsets(word_OCC) == [] or len(word_OCC)<5:
continue
else:
related_words_with_similarity_OCC.append([word_similarity(OCCUPATION,word_OCC),word_OCC])
related_words_with_similarity_OCC.sort()
related_words_with_similarity_OCC.reverse()
for i in related_words_with_similarity_OCC:
related_words_OCCUPATION.append(i[1])
print "finished extracted occupation data...\n"
#--------------------------------------------------------------------------
#------------------------------ Hobby ---------------------------------
print "starting extracted hobby data...\n"
general_words_HOBBY = list(bw.words(categories='hobbies'))
related_words_with_similarity_HOBBY=[]
R4=random.getstate()[1][(long(test_seed%10000))%625]
random.shuffle(general_words_HOBBY, lambda: 1/R4) # deterministic shuffling using seeds
test_seed/=10000
filtered_general_words_HOBBY = list(set(general_words_HOBBY)-stop)
for word_HOBBY in filtered_general_words_HOBBY[:2000]:
word_HOBBY.lower()
if wn.synsets(word_HOBBY) == [] or len(word_HOBBY)<4:
continue
else:
related_words_with_similarity_HOBBY.append([word_similarity(FAV_HOBBY,word_HOBBY),word_HOBBY])
related_words_with_similarity_HOBBY.sort()
related_words_with_similarity_HOBBY.reverse()
for i in related_words_with_similarity_HOBBY:
related_words_HOBBY.append(i[1])
print "finished extracted hobby data...\n"
#--------------------------------------------------------------------------
#------------------------------ Movie Genres ------------------------------
print "starting extracted movie genre data...\n"
mov_cat = movie_genres.get(str(FAV_MOVIE_GENRE))
general_words_MOVIE = list(bw.words(categories = mov_cat))
related_words_with_similarity_MOVIE=[]
if mov_cat == 'science_fiction':
mov_cat = 'gadgets'
R5=random.getstate()[1][(long(test_seed%10000))%625]
random.shuffle(general_words_MOVIE, lambda: 1/R5) # deterministic shuffling using seeds
test_seed/=10000
filtered_general_words_MOVIE = list(set(general_words_MOVIE)-stop)
for word_MOVIE in filtered_general_words_MOVIE[:2000]:
word_MOVIE.lower()
if wn.synsets(word_MOVIE) == [] or len(word_MOVIE)<5:
continue
else:
print word_MOVIE
related_words_with_similarity_MOVIE.append([word_similarity(mov_cat,word_MOVIE),word_MOVIE])
related_words_with_similarity_MOVIE.sort()
related_words_with_similarity_MOVIE.reverse()
for i in related_words_with_similarity_MOVIE:
related_words_MOVIE.append(i[1])
print "finished extracted movie genre data...\n"
#--------------------------------------------------------------------------
#------------------------- MIXED NEW CORPORA -----------------------------
print "making mixed corpora\n"
MIXED = []
MIXED.extend(related_words_BOOK)
MIXED.extend(related_words_CHAT)
MIXED.extend(related_words_HOBBY)
MIXED.extend(related_words_MOVIE)
MIXED.extend(related_words_NEWS)
MIXED.extend(related_words_OCCUPATION)
R6=random.getstate()[1][(long(test_seed%10000))%625]
random.shuffle(MIXED, lambda: 1/R6) # deterministic shuffling using seeds
test_seed/=10000
R7=random.getstate()[1][(long(test_seed%10000))%625]
random.shuffle(MIXED, lambda: 1/R7) # deterministic shuffling using seeds (Reshuffling)
test_seed/=10000
print "mixed corpora successfully...\n"
#-------------------------------------------------------------------------
######markov implementation##########
print "applying markov assumption...\n"
similar_data = MIXED
order=int(ORDER)
hash_now=str(SEEDS)
# determining the random factor for seeding in random generator
sum=0
for i in list(hash_now):
sum = sum + int(i)
# converting to floating point for more accurate precision
sum=float(sum)
sum = sum/((len(hash_now))*10)
if order==0:
random.shuffle(similar_data, lambda: sum) # passing seeds with actual similar words list to random organization of words
index = float(len(similar_data))*sum
word = similar_data[int(index)-1]
tmp = sum
word = word.encode('ascii','ignore')
if len(word) <= 4: #Filteration on retrieved password for length constraints
bias = (1-sum)/2
while bias>.01:
tmp = tmp + bias
index = float(len(similar_data))*(tmp)
word = similar_data[int(index)-1]
if len(''.join(word)) > 4:
break;
bias = bias/2
ccc=0
ccc1=0
ccc2=0
ccc3=0
if order>=1:
possbl_comb = manp_possbls(order)
for comb in range(0,pow(26,order)):
# print ccc
# print ".\n"
# ccc=ccc+1
for occur in similar_data:
# print ccc1
# ccc1=ccc1+1
# print "\n"
occur = occur.encode('ascii','ignore')
ind_match = occur.find(possbl_comb[comb][0]) # matching order combination string
if ind_match != -1 and (ind_match+order) < len(occur):
possbl_comb[comb].append(occur[ind_match+order]) # storing matches as they matches in words
print "In finding phras"
# print ccc
# ccc=ccc+1
tmp_list = []
for i in range(0,26):
# print ccc2
# ccc2=ccc2+1
# print "\n"
tmp_char=get_char(i+1)
tmp_list.append([possbl_comb[comb][1:].count(tmp_char),tmp_char]) # manipulating a temporary count for extracting only top most frequencies
tmp_list.sort()
for k in range(0,(26-int(sum*26))):
# print ccc3
# ccc3=ccc3+1
# print "\n"
if tmp_list[k][0] != 0:
while tmp_list[k][1] in possbl_comb[comb]:possbl_comb[comb].remove(tmp_list[k][1])
len_psswd=int(sum*10+2)
word = similar_data[(int(sum*len(similar_data)))-1].encode('ascii','ignore')
word = word[:order]
for k in range(0,len_psswd-order):
tmp_range = int(len(hash_now)/(len_psswd-1))
tmp_hash = hash_now[k*tmp_range:(k+1)*tmp_range]
hash_mod = tmp_hash.count(most_common(tmp_hash))
fraction_mod = float(float(hash_mod)/tmp_range)
for i in possbl_comb:
if word[-order:] == i[0] and len(i[1:]) != 0:
word=word+i[1:][int(fraction_mod*(len(i)-1))]
break
#Applying modification to maintain length complexities in password
print "markov finished...\n"
if len(word) <= 7:
if sum>0.45:
word = ''.join(list(word)+ list(DOB[(int(sum*len(DOB))-1)%len(DOB)]) + list(DOB[(int(sum*len(DOB)))%len(DOB)]) + list(DOB[(int(sum*len(DOB))+1)%len(DOB)]) + list(DOB[(int(sum*len(DOB))+2)%len(DOB)]))
if sum<=0.45:
word =''.join(list(DOB[(int(sum*len(DOB))-1)%len(DOB)]) + list(DOB[(int(sum*len(DOB)))%len(DOB)]) + list(DOB[(int(sum*len(DOB))+1)%len(DOB)]) + list(DOB[(int(sum*len(DOB))+2)%len(DOB)]) + list(word))
print "=====>GENERATED PASSWORD<=====\n"
print module_substitute(word,int(test_seed))
print "\n================================\n"
'''
#function to find mod in a list
def most_common(L):
# get an iterable of (item, iterable) pairs
SL = sorted((x, i) for i, x in enumerate(L))
# print 'SL:', SL
groups = itertools.groupby(SL, key=operator.itemgetter(0))
# auxiliary function to get "quality" for an item
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(L)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
# print 'item %r, count %r, minind %r' % (item, count, min_index)
return count, -min_index
# pick the highest-count/earliest item
return max(groups, key=_auxfun)[0]
# function finds all possible lookhead letter combinations according to given order
def manp_possbls( order ):
lst=[]
for k in range(0,pow(26,order)):
count = 1
comb = ''
for letter in range(0,order):
comb = comb + get_char(((int(k/(pow(26,order-count))))%26)+1)
count+=1
lst.append([comb])
return lst
#function finds index respective character
def get_char(index):
letter_string = string.ascii_lowercase
return letter_string[index-1]
'''
'''
#mod_2(SEEDS, DOB, ORDER, FAV_MOVIE_GENRE, NEWS_FLAG, FAV_HOBBY, OCCUPATION, CHAT_FLAG, BOOK_FLAG):
print "Enter the following choices: "
print "Your name: "
nname = raw_input();
print "Enter a keyword of your choice: "
kkey = raw_input();
print "Enter your DOB: "
ddob = raw_input();
print "Enter your favourite move genre: \n\t1.adventure\n\t2.humor\n\t3.mystery\n\t4.romance\n\t5.science_fiction\n"
ggenre = int(raw_input())
print "Do you like news??(y/n): "
nnews = raw_input()
print "Enter your favourite hobby: "
hhobby = raw_input()
print "Enter your occupation: "
ooccupation = raw_input()
print "Do you like chatting??(y/n): "
cchat = raw_input()
print "Do you like reading books??(y/n): "
bboks = raw_input()
hash_value = sha512(nname,kkey)
print mod_2(int(hash_value),ddob,2,ggenre,nnews,hhobby,ooccupation,cchat,bboks)'''
#print mod_2(int(hash_value),ddob,2,ggenre,nnews,hhobby,ooccupation,cchat,bboks)
|
"""Tests for the `data_loader` module."""
import os
import pytest
import a2d2.data_loader as data_loader
@pytest.mark.skipif(not os.path.exists("a2d2.tfrecord"), reason="needs access to a tfrecord")
def test_simple():
"""Tests loading a local tfrecord file."""
batch_size = 16
reader = data_loader.A2D2TFRecordReader("a2d2.tfrecord", batch_size)
dataset = reader.get_dataset()
images, labels = next(dataset.as_numpy_iterator())
print(images.shape)
print(labels.shape)
assert images.shape[0] == batch_size
assert labels.shape[0] == batch_size
|
# coding: utf-8
# In[50]:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# In[51]:
img = cv2.imread('/home/padmach/data/pyimagesearch/flower3.jpg')
#cv2.imshow('', img)
#cv2.waitKey(0)
# In[52]:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3,3),0)
#cv2.imshow('', blurred)
#cv2.waitKey(0)
# In[53]:
#Apply inverse thresholding; pixel value > T set to 0 and pixel value < T set to 255
#T = 200 # threshold value
(T, threshInv) = cv2.threshold(blurred, 110, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('Threshold Binary Inverse', threshInv)
cv2.waitKey(0)
# In[54]:
#Apply Normal thresholding
(T, thresh) = cv2.threshold(blurred, 120, 255, cv2.THRESH_BINARY)
cv2.imshow('Threshold Binary', thresh)
cv2.waitKey(0)
# In[49]:
#Visualize masked regions in the image
cv2.imshow('Output', cv2.bitwise_and(img, img, mask= threshInv))
cv2.waitKey(0)
# In[56]:
#Appling Otsu's method of thresholding
(T_otsu, threshInvOtsus) = cv2.threshold(blurred, 0, 255, cv2.THRESH_OTSU)
cv2.imshow('Threshold Otsu', threshInvOtsus)
cv2.waitKey(0)
# In[57]:
print('Otsus thresholding value {}'.format(T_otsu))
# In[58]:
#Visualize the masjed regions
cv2.imshow('Masked Region Otsu thresholding', cv2.bitwise_and(img, img, mask=threshInvOtsus))
cv2.waitKey(0)
# In[64]:
license_plate = cv2.imread('/home/padmach/data/pyimagesearch/adaptive_threhsold_license_plate.png')
gray_license_plate = cv2.cvtColor(license_plate, cv2.COLOR_BGR2GRAY)
license_blurred = cv2.GaussianBlur(gray_license_plate, (3,3),0)
(T_license_otsu, license_threshold_otsu) = cv2.threshold(license_blurred, 0, 255, cv2.THRESH_OTSU)
cv2.imshow('License plate using Otsu', license_threshold_otsu)
cv2.waitKey(0)
# In[65]:
#Final output of visualizing the license plate
cv2.imshow('Output', cv2.bitwise_and(license_plate, license_plate, mask=license_threshold_otsu))
cv2.waitKey(0)
# In[63]:
#Applying adaptiveThreshold
#adaptive threshold achieves better results
thresh_adaptive = cv2.adaptiveThreshold(license_blurred,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,25,15)
cv2.imshow('Adaptive Threshold Image', thresh_adaptive)
cv2.waitKey(0)
# In[ ]:
|
import itertools
import csv
import random
total_size = 500
from sparkpost_impl import send_email
def pick_one_person(combination, fields, csvFile):
fields = [3, 4]
for row in csvFile:
matched = False
for idx in xrange(len(fields)):
if row[fields[idx]] != combination[idx]:
continue
email = row[1]
del row
return email
return None
def form_groups(fields, number_of_groups, csv_file_stream, analyzed_result):
csv_file_stream.seek(0)
csv_file = csv.reader(csv_file_stream)
headers = csv_file.next()
combinations = []
for field in fields:
combinations.append(analyzed_result[field].keys())
combinations = list(itertools.product(*combinations))
group_size = total_size / number_of_groups
output_groups = []
for i in xrange(number_of_groups):
output_groups.append([])
for j in xrange(group_size):
selected_person = None
while selected_person == None:
random_group = random.randrange(0, len(combinations))
selected_person = pick_one_person(combinations[random_group], fields, csv_file)
output_groups[i].append(selected_person)
team_group_members = ['joemanley201@gmail.com', 'sweetha.k.kumar@gmail.com', 'melvix_2020@yahoo.co.in', 'sriniavireddy@gmail.com']
for team_member in team_group_members:
temp_group = team_group_members[:]
temp_group.remove(team_member)
send_email(team_member, temp_group)
return output_groups |
import numpy as np
import random
import torch
class RandomFault:
def __init__(self, layer_mask=None, seed=0, frac=0, random_addrs=False, fault_type="uniform", int_bits=2, frac_bits=6):
super(RandomFault,self).__init__()
self.frac = frac
self.random_addrs = random_addrs
self.random_seed = seed
self.fault_type = fault_type
self.int_bits = int_bits
self.frac_bits = frac_bits
self.total_bits = frac_bits + int_bits
def __call__(self, w):
def quantize(q, v):
(qi, qf) = q
(imin, imax) = (-np.exp2(qi-1), np.exp2(qi-1)-1)
fdiv = (np.exp2(-qf))
v.div_(fdiv).round_().mul_(fdiv)
v.clamp_(min=imin, max=imax)
def bit_inject(output, thres, n_bits):
pass
def _inject(w): #CONVERTED TO HERE
addrs = list(range(len(w)))
if self.random_addrs:
np.random.shuffle(addrs)
num_faults = int(len(w) * self.total_bits * self.frac)
#print("There are %d weights /n", len(w))
#print("There will be %d bit flips /n", num_faults)
# Generating random values with np.random (vectorized) is must faster
# than python random.random
faults = None
if self.fault_type == "uniform":
min_w = torch.min(w).detach().numpy()
max_w = torch.max(w).detach().numpy()
faults = np.random.uniform(min_w, max_w, num_faults)
elif self.fault_type == "normal":
mean, sigma = np.mean(w), np.std(w)
faults = np.random.normal(mean, sigma, num_faults)
elif self.fault_type == "sign":
# -1 means flip sign, 1 means maintain sign.
# 50% chance of flipping sign
faults = np.random.choice([-1, 1], num_faults)
for i in range(num_faults):
faults[i] = faults[i] * w[i]
elif self.fault_type == "percent":
#-1 means increase by percent, 1 means decrease by percent
#just set at 10% changes for now
percent = 0.1
faults = np.random.choice([-1,1], num_faults)
for i in range(num_faults):
faults[i] = w[i] * (1 + (faults[i] * percent))
elif self.fault_type == "bit":
# Eventually we should make sure we're not hitting the same bit.
# fine for now though
bit_inject(w, self.frac, (self.int_bits, self.frac_bits))
else:
assert False, "Fault type: %s is invalid" % self.fault_type
if self.fault_type == "bit":
pass
#print("Already updated.")
else:
if num_faults > 0:
fault_addrs = addrs[:num_faults]
for i in range(num_faults):
w[i] = faults[i]
return w
########################################
size = w.size()
w = w.flatten()
return _inject(w).view(size)
|
# Generated by Django 3.0.6 on 2020-05-31 20:33
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('research', '0006_remove_research_attended'),
]
operations = [
migrations.AddField(
model_name='research',
name='attended',
field=models.ManyToManyField(blank=True, related_name='attended_research', to=settings.AUTH_USER_MODEL),
),
]
|
epic_dict = {'Jack': 5, 'Bill': 14, 'Katy': 3, 'Jess': 33, 'Alex': 4}
# ordenando pela chave
# sorted_dict = sorted(epic_dict.items(), key = lambda t: t[0])
# ordenando pelos valores
sorted_dict = sorted(epic_dict.items(), key = lambda t: t[1])
from collections import OrderedDict
x = (OrderedDict(sorted_dict))
for elemento in x:
print(elemento) |
#a=list(map(int,input().split()))
a=[1,1,4,2,1,3]
b=sorted(a)
c=0
for i in range(len(a)):
if a[i]!=b[i]:
c+=1
print(c)
|
import sys
def main():
script = sys.argv[0]
option = sys.argv[1]
inputFile = sys.argv[2]
outputFile = sys.argv[3]
print(f' \n script {script} wird ')
print(f' \n mit option {option} \n ')
print(f' input file is "{inputFile}" ')
print(f' output file is "{outputFile}" ')
inf = open(inputFile, 'r')
a = int(inf.readline())
b = a + 1
outf = open(outputFile, 'w')
outf.write(str(b))
inf.close
outf.close
if __name__ == '__main__':
main()
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Teacher, Student
def teacher_list(request):
teacher_list = Teacher.objects.all().order_by('full_name')
context = {
'teachers': teacher_list
}
return render(request, 'teacher/list.html', context)
|
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'limit' plugin."""
import unittest
from test.helper import TestHelper
class LimitPluginTest(unittest.TestCase, TestHelper):
"""Unit tests for LimitPlugin
Note: query prefix tests do not work correctly with `run_with_output`.
"""
def setUp(self):
self.setup_beets()
self.load_plugins("limit")
# we'll create an even number of tracks in the library
self.num_test_items = 10
assert self.num_test_items % 2 == 0
for item_no, item in \
enumerate(self.add_item_fixtures(count=self.num_test_items)):
item.track = item_no + 1
item.store()
# our limit tests will use half of this number
self.num_limit = self.num_test_items // 2
self.num_limit_prefix = "".join(["'", "<", str(self.num_limit), "'"])
# a subset of tests has only `num_limit` results, identified by a
# range filter on the track number
self.track_head_range = "track:.." + str(self.num_limit)
self.track_tail_range = "track:" + str(self.num_limit + 1) + ".."
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_no_limit(self):
"""Returns all when there is no limit or filter."""
result = self.run_with_output("lslimit")
self.assertEqual(result.count("\n"), self.num_test_items)
def test_lslimit_head(self):
"""Returns the expected number with `lslimit --head`."""
result = self.run_with_output("lslimit", "--head", str(self.num_limit))
self.assertEqual(result.count("\n"), self.num_limit)
def test_lslimit_tail(self):
"""Returns the expected number with `lslimit --tail`."""
result = self.run_with_output("lslimit", "--tail", str(self.num_limit))
self.assertEqual(result.count("\n"), self.num_limit)
def test_lslimit_head_invariant(self):
"""Returns the expected number with `lslimit --head` and a filter."""
result = self.run_with_output(
"lslimit", "--head", str(self.num_limit), self.track_tail_range)
self.assertEqual(result.count("\n"), self.num_limit)
def test_lslimit_tail_invariant(self):
"""Returns the expected number with `lslimit --tail` and a filter."""
result = self.run_with_output(
"lslimit", "--tail", str(self.num_limit), self.track_head_range)
self.assertEqual(result.count("\n"), self.num_limit)
def test_prefix(self):
"""Returns the expected number with the query prefix."""
result = self.lib.items(self.num_limit_prefix)
self.assertEqual(len(result), self.num_limit)
def test_prefix_when_correctly_ordered(self):
"""Returns the expected number with the query prefix and filter when
the prefix portion (correctly) appears last."""
correct_order = self.track_tail_range + " " + self.num_limit_prefix
result = self.lib.items(correct_order)
self.assertEqual(len(result), self.num_limit)
def test_prefix_when_incorrectly_ordred(self):
"""Returns no results with the query prefix and filter when the prefix
portion (incorrectly) appears first."""
incorrect_order = self.num_limit_prefix + " " + self.track_tail_range
result = self.lib.items(incorrect_order)
self.assertEqual(len(result), 0)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
import math
x=float(input("x= "))
if((x>0)and(x<2)):
x=x*x
print("f(x): ",x)
elif(x<=0):
x=(-1)*x
print("f(x): ",x)
elif(x>=2):
x=4
print("f(x): ",x)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from dataclasses import dataclass
from pants.backend.helm.target_types import (
HelmChartFieldSet,
HelmChartMetaSourceField,
HelmChartSourcesField,
)
from pants.core.target_types import FileSourceField, ResourceSourceField
from pants.core.util_rules import source_files
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs, Snapshot
from pants.engine.internals.native_engine import RemovePrefix
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
DependenciesRequest,
HydratedSources,
HydrateSourcesRequest,
SourcesField,
Target,
Targets,
)
@dataclass(frozen=True)
class HelmChartRootRequest(EngineAwareParameter):
source: HelmChartMetaSourceField
def debug_hint(self) -> str | None:
return self.source.address.spec
@dataclass(frozen=True)
class HelmChartRoot:
path: str
@rule(desc="Detect Helm chart source root")
async def find_chart_source_root(request: HelmChartRootRequest) -> HelmChartRoot:
source = await Get(
HydratedSources,
HydrateSourcesRequest(
request.source, for_sources_types=[HelmChartMetaSourceField], enable_codegen=True
),
)
assert len(source.snapshot.files) == 1
return HelmChartRoot(os.path.dirname(source.snapshot.files[0]))
@dataclass(frozen=True)
class HelmChartSourceFilesRequest(EngineAwareParameter):
field_set: HelmChartFieldSet
include_resources: bool
include_files: bool
include_metadata: bool
@classmethod
def create(
cls,
target: Target,
*,
include_resources: bool = True,
include_files: bool = False,
include_metadata: bool = True,
) -> HelmChartSourceFilesRequest:
return cls.for_field_set(
HelmChartFieldSet.create(target),
include_resources=include_resources,
include_files=include_files,
include_metadata=include_metadata,
)
@classmethod
def for_field_set(
cls,
field_set: HelmChartFieldSet,
*,
include_resources: bool = True,
include_files: bool = False,
include_metadata: bool = True,
) -> HelmChartSourceFilesRequest:
return cls(
field_set=field_set,
include_resources=include_resources,
include_files=include_files,
include_metadata=include_metadata,
)
@property
def sources_fields(self) -> tuple[SourcesField, ...]:
fields: list[SourcesField] = [self.field_set.sources]
if self.include_metadata:
fields.append(self.field_set.chart)
return tuple(fields)
@property
def valid_sources_types(self) -> tuple[type[SourcesField], ...]:
types: list[type[SourcesField]] = [HelmChartSourcesField]
if self.include_metadata:
types.append(HelmChartMetaSourceField)
if self.include_resources:
types.append(ResourceSourceField)
if self.include_files:
types.append(FileSourceField)
return tuple(types)
def debug_hint(self) -> str | None:
return self.field_set.address.spec
@dataclass(frozen=True)
class HelmChartSourceFiles:
snapshot: Snapshot
unrooted_files: tuple[str, ...]
async def _strip_chart_source_root(
source_files: SourceFiles, chart_root: HelmChartRoot
) -> Snapshot:
if not source_files.snapshot.files:
return source_files.snapshot
if source_files.unrooted_files:
rooted_files = set(source_files.snapshot.files) - set(source_files.unrooted_files)
rooted_files_snapshot = await Get(
Snapshot, DigestSubset(source_files.snapshot.digest, PathGlobs(rooted_files))
)
else:
rooted_files_snapshot = source_files.snapshot
resulting_snapshot = await Get(
Snapshot, RemovePrefix(rooted_files_snapshot.digest, chart_root.path)
)
if source_files.unrooted_files:
# Add unrooted files back in
unrooted_digest = await Get(
Digest,
DigestSubset(source_files.snapshot.digest, PathGlobs(source_files.unrooted_files)),
)
resulting_snapshot = await Get(
Snapshot, MergeDigests([resulting_snapshot.digest, unrooted_digest])
)
return resulting_snapshot
@rule
async def get_helm_source_files(request: HelmChartSourceFilesRequest) -> HelmChartSourceFiles:
chart_root, dependencies = await MultiGet(
Get(HelmChartRoot, HelmChartRootRequest(request.field_set.chart)),
Get(Targets, DependenciesRequest(request.field_set.dependencies)),
)
source_files, original_sources = await MultiGet(
Get(
SourceFiles,
SourceFilesRequest(
sources_fields=[
*request.sources_fields,
*(
tgt.get(SourcesField)
for tgt in dependencies
if not HelmChartFieldSet.is_applicable(tgt)
),
],
for_sources_types=request.valid_sources_types,
enable_codegen=True,
),
),
Get(
SourceFiles,
SourceFilesRequest([request.field_set.sources], enable_codegen=False),
),
)
stripped_source_files = await _strip_chart_source_root(source_files, chart_root)
stripped_original_sources = await _strip_chart_source_root(original_sources, chart_root)
all_files_snapshot = await Get(
Snapshot, MergeDigests([stripped_source_files.digest, stripped_original_sources.digest])
)
return HelmChartSourceFiles(
snapshot=all_files_snapshot,
unrooted_files=(*source_files.unrooted_files, *original_sources.unrooted_files),
)
def rules():
return [*collect_rules(), *source_files.rules()]
|
from django.shortcuts import render
# Create your views here.
def chatbotview(request):
question="Hello"
data={
'quest':question,
}
return render(request,"bot.html",data) |
newlist = [x for x in range(10)]
print(newlist) |
# Generated by Django 2.2.4 on 2019-09-07 08:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('residents', '0001_initial'),
('visitors', '0008_auto_20190907_1559'),
]
operations = [
migrations.AddField(
model_name='entry_schedule',
name='lot',
field=models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, to='residents.Lot'),
preserve_default=False,
),
]
|
#|default_exp p00_opencv_cl
# sudo pacman -S python-opencv rocm-opencl-runtime python-mss
import time
import numpy as np
import cv2 as cv
import mss
start_time=time.time()
debug=True
_code_git_version="eb9657d970d6d5e734ec4ea64a9209136d8c70bd"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/105_amd_opencv/source/"
_code_generation_time="18:26:16 of Sunday, 2023-04-02 (GMT+1)"
print("{} nil cv.ocl.haveOpenCL()={}".format(((time.time())-(start_time)), cv.ocl.haveOpenCL()))
loop_time=time.time()
clahe=cv.createCLAHE(clipLimit=(7.0 ), tileGridSize=(12,12,))
with mss.mss() as sct:
loop_start=time.time()
count=0
while (True):
count += 1
img=np.array(sct.grab(dict(top=160, left=0, width=((1920)//(2)), height=((1080)//(2)))))
lab=cv.cvtColor(img, cv.COLOR_RGB2LAB)
lab_planes=cv.split(lab)
lclahe=clahe.apply(lab_planes[0])
lab=cv.merge([lclahe, lab_planes[1], lab_planes[2]])
imgr=cv.cvtColor(lab, cv.COLOR_LAB2RGB)
cv.imshow("screen", imgr)
delta=((time.time())-(loop_time))
target_period=((((1)/((60. ))))-((1.00e-4)))
if ( ((delta)<(target_period)) ):
time.sleep(((target_period)-(delta)))
fps=((1)/(delta))
fps_wait=((1)/(((time.time())-(loop_time))))
loop_time=time.time()
if ( ((0)==(((count)%(100)))) ):
print("{} nil fps={} fps_wait={}".format(((time.time())-(start_time)), fps, fps_wait))
if ( ((ord("q"))==(cv.waitKey(1))) ):
cv.destroyAllWindows()
break |
def reverse_digits(num):
reverse_list = []
still_have_digits = True
while still_have_digits:
digit = num % 10
if num <= 0:
still_have_digits = False
else:
reverse_list.append(digit)
num = num/10
for digit in reverse_list:
print digit
if __name__ == '__main__':
num = int(raw_input("Give me a number, and I'll reverse it for you!"))
reverse_digits(num)
|
import sys
from assignment3 import ConfigDict
cc = ConfigDict('config_file.txt')
if len(sys.argv) == 3:
key = sys.argv[1]
value = sys.argv[2]
print('wrting data {} {}'.format(key,value))
cc[key] = value
else:
print('reading data')
for key in cc.keys():
print(' {} = {}'.format(key, cc[key]))
print(cc['sql_query'])
print(cc['email_to'])
cc['database'] = 'mysql_managed'
print(cc['database'])
|
# web评分服务端
# coding:utf-8
from flask import Flask, render_template, request, redirect, url_for, make_response, jsonify
import os
import cv2
from keras.models import Sequential
from keras.models import load_model
import numpy as np
import time
from datetime import timedelta
def sc(imagePath,current):
global model
#imagePath=q.get()
frame = cv2.imread(imagePath)
sh = frame.shape
print(sh)
if sh[0] > 1079: # 图片过大时,缩小图片
frame = cv2.resize(frame, (int(sh[1] * 850 / sh[0]),850 ), interpolation=cv2.INTER_AREA)
print(frame.shape)
print("change size 1")
elif sh[1] > 1920:
frame = cv2.resize(frame, (1500, int(sh[0]*1500/sh[1])), interpolation=cv2.INTER_AREA)
print(frame.shape)
print("change size 2")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
print("Found {0} faces!".format(len(faces)))
print(frame.shape)
print(faces)
for (x, y, w, h) in faces:
new_image = frame[y:y + h, x:x + w]
new_image = cv2.resize(new_image, (220, 220), interpolation=cv2.INTER_CUBIC)
new_image = np.array([new_image]) # (1,220,220,3)
print(new_image.shape)
# k=Modle(model,new_image)
# 注意!此处一定要/25,统一数量级!与训练时的神经网络保持一致
k = model.predict((new_image / 25), batch_size=None, verbose=0, steps=None)
print(k)
print("!!!!!")
#j = model.predict((new_image / 25), batch_size=None, verbose=0, steps=None)
#print (j)
text = str(round(k[0][0],3))
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3)
cv2.putText(frame, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, 1.5, (0, 0, 255), 1)
cv2.imwrite('static/images2/'+current+'.png',frame)
#cv2.imshow('frame', frame)
#cv2.waitKey(0)
cv2.destroyAllWindows()
print("end!!")
# 设置允许的文件格式
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'JPG', 'PNG', 'bmp'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
app = Flask(__name__)
# 设置静态文件缓存过期时间
app.send_file_max_age_default = timedelta(seconds=1)
# @app.route('/upload', methods=['POST', 'GET'])
@app.route('/upload', methods=['POST', 'GET']) # 添加路由
def upload():
if request.method == 'POST':
current = str(round(time.time()))
f = request.files['file']
if not (f and allowed_file(f.filename)):
return jsonify({"error": 1001, "msg": "请检查上传的图片类型,仅限于png、PNG、jpg、JPG、bmp"})
user_input = request.form.get("name")
basepath = os.path.dirname(__file__) # 当前文件所在路径
#upload_path = os.path.join(basepath, 'static/images', secure_filename(f.filename)) # 注意:没有的文件夹一定要先创建,不然会提示没有该路径
upload_path = os.path.join(basepath, 'static/images',current+".png") #注意:没有的文件夹一定要先创建,不然会提示没有该路径
f.save(upload_path)
print(upload_path)
# 使用Opencv转换一下图片格式和名称
# img = cv2.imread(upload_path)
# cv2.imwrite(os.path.join(basepath, 'static/images', 'test.jpg'), img)
sc(upload_path,current)
return render_template('upload_ok.html', userinput=user_input, val1=current)
return render_template('upload.html')
if __name__ == '__main__':
model = Sequential()
model = load_model('DenseNet121_2_model.h5')
print("111111111111111111111111")
test = np.zeros((1, 220, 220, 3))
k = model.predict(test, batch_size=None, verbose=0, steps=None)
print(k)
app.run(host='localhost', port=8987, debug=False) |
# 错误、调试和测试:错误处理、调试、单元测试和文档测试
# 錯誤處理 try 。。except 。。finally
import unittest
import logging
try:
print('try ...')
r = 10 / 0
print('result:', r)
except ZeroDivisionError as e:
print('except:', e)
finally:
print('finally...')
print('end')
# 當我們任務某些代碼可能出錯時,就可以try來運行這段代碼
try:
print('try...')
r = 10 / int('a')
print('result:', r)
except ValueError as e:
print('ValueError:', e)
except ZeroDivisionError as e:
print('ZeroDivisionError:', e)
finally:
print('finally...')
print('END')
# python所有的錯誤類型都繼承BaseException,還要注意父子類關係,例如UnicodeError是ValueError的子類
# try。。。except有一個好處就是可以跨越多層調用
def foo(s):
return 10 / int(s)
def bar(s):
return foo(s) * 2
def main():
try:
bar('0')
except Exception as e:
print('ERROR', e)
finally:
print('finally..')
# 如果錯誤沒有被捕獲,就會一直往上拋,最後被python解釋器捕獲,大約一個錯誤信息然後退出程序。
# 記錄錯誤
# import logging
def foo(s):
return 10/int(s)
def bar(s):
return foo(s)*2
def main():
try:
bar('0')
except Exception as e:
logging.exception(e)
main()
# 拋出錯誤
class FooError(ValueError):
pass
def foo(s):
n = int(s)
if n==0:
raise FooError('invalid value:%s' % s)
return 10 /n
# 斷言 凡是用print()來輔助查看的地方,都可以用斷言assert來替代
def foo(s):
n = int(s)
assert n != 0, 'n is zero'
return 10 / n
def main():
foo('0')
# logging頁可以替代Print,與asset相比logging不會拋出錯誤而且可以輸出到文件
s = '0'
n = int(s)
logging.info('n = %d' % n)
print(10 / n)
# logging 的好處可以允許你指定記錄信息的級別,有debug,info,warning,error
# 等幾個級別,級別從小到大
# pdb 啟動Python的調試器 讓程序以單步執行
# (Pdb) l
# s = '0'
# n = int(s)
# print(10 / n)
# 單元測試
class Dict(dict):
def __init__(self, **kw):
super().__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
# import unittest
class TestDict(unittest.TestCase):
def test_init(self):
d = Dict(a=1, b='test')
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 'test')
self.assertTrue(isinstance(d, dict))
def test_key(self):
d = Dict()
d['key'] = 'value'
self.assertEqual(d.key, 'value')
def test_attr(self):
d = Dict()
d.key = 'value'
self.assertTrue('key' in d)
self.assertEqual(d['key'], 'value')
def test_KeyError(self):
d = Dict()
with self.assertRaises(KeyError):
value = d['empty']
def test_AttrError(self):
d = Dict()
with self.assertRaises(AttributeError):
value = d.empty
# 運行單元測試
if __name__ == '__main__':
unittest.main()
# setUp与tearDown 调用一个测试方法的前后分别被执行
class TestDict(unittest.TestCase):
def setUp(self):
print('setup...')
def tearDown(self):
print('tearDown...') |
import time
localtime = time.localtime(time.time())
print(localtime)
print(type(localtime))
print(localtime.tm_year)
print(localtime.tm_mon)
print(time.localtime())
|
# Settings from
# Pajonk, Oliver, et al.
# "A deterministic filter for non-Gaussian Bayesian estimation—applications to dynamical system estimation with noisy measurements."
# Physica D: Nonlinear Phenomena 241.7 (2012): 775-788.
#
# More interesting settings: mods.Lorenz84.harder
from common import *
from mods.Lorenz84.core import step, dfdx
from mods.Lorenz63.liveplotting import LP_setup
m = 3
p = m
day = 0.05/6 * 24 # coz dt=0.05 <--> 6h in "model time scale"
t = Chronology(0.05,dkObs=1,T=200*day,BurnIn=10*day)
m = 3
f = {
'm' : m,
'model': step,
'jacob': dfdx,
'noise': 0
}
X0 = GaussRV(C=0.01,m=m) # Decreased from Pajonk's C=1.
h = {
'm' : p,
'model': Id_op(),
'jacob': Id_mat(p),
'noise': 0.1,
}
other = {'name': os.path.relpath(__file__,'mods/')}
HMM = HiddenMarkovModel(f,h,t,X0,**other)
HMM.liveplotting = LP_setup(arange(m))
####################
# Suggested tuning
####################
# cfgs += ExtKF(infl=2)
# cfgs += EnKF('Sqrt',N=3,infl=1.01)
# cfgs += PartFilt(reg=1.0, N=100, NER=0.4) # add reg!
# cfgs += PartFilt(reg=1.0, N=1000, NER=0.1) # add reg!
|
# extract_overlapping_QME_data.py
# by Cody Moser (10/13/2014)
# cody.moser@amec.com
# AMEC
# Description: extracts overlapping QME (non-missing) data
#from two time series in a .csv file
#import script modules
import os
import csv
#USER INPUT SECTION
input_csv = r'P:\\NWS\\MBRFC\\QME\\MUSM8\\MUSM8_MSBM8_Monthly_QME.csv'
output_csv = r'P:\\NWS\\MBRFC\\QME\\MUSM8\\MUSM8_MSBM8_Monthly_QME_Overlap.csv'
#END USER INPUT SECTION
output_file = open(output_csv, 'w')
input_file = open(input_csv, 'r')
for line in input_file:
#line.replace('/', '-')
if "GMT" in line:
output_file.write(line)
if "QME" in line:
output_file.write(line)
if "-999" not in line:
output_file.write(line)
input_file.close()
output_file.close()
print 'Script Complete'
|
from src.output.DefaultOutput import DefaultOutput
class CliOutput(DefaultOutput):
def frequencyUpdated(self, value):
print("Frequency updated: ", value, " kHz\n") |
"""
1 Кредитование
Создать прогшрамму которая посчитает кредит для потребителя по формуле
Month = (Summ * Proc * (1 + Proc)Years) / (12 * ((1 + Proc)Years – 1))
Где:
Month - размер месячной выплаты;
Summ - сумма займа (кредита);
Proc - процент банка, выраженный в долях единицы (т. е. если 20%, то будет 0.2).
Years - количество лет, на которые берется займ.
All - сумма выплат за весь период кредитования.
Вывод должен быть похож на:
Сколько хотите взять денег: s
Под какой процент вам их дают: p
Насколько лет берете: n
Ваш месячный платеж составит: m
За весь период вы заплатите: ALL
"""
summ = float(input("Сколько хотите взять денег: \n"))
proc = float(input("Под какой процент вам их дают(если 20%, то будет 0.2): \n"))
years = float(input("Насколько лет берете: \n"))
month = 0
month = float(month)
month = (summ * proc * (1 + proc)*years) / (12 * ((1 + proc)*years - 1))
print("Ваш месячный платеж составит: ", month)
print("За весь период вы заплатите: ", month*12*years)
input("Для продложения нажмите Enter \n")
|
# Generated by Django 3.0.6 on 2020-06-04 13:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musicRun', '0003_auto_20200604_1334'),
]
operations = [
migrations.AddField(
model_name='song',
name='artists',
field=models.CharField(default='none', max_length=64),
),
migrations.AddField(
model_name='song',
name='danceability',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='song',
name='energy',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='song',
name='name',
field=models.CharField(default='0', max_length=64),
),
migrations.AddField(
model_name='song',
name='valence',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='song',
name='bpm',
field=models.IntegerField(default=0),
),
]
|
from telegram.ext.filters import BaseFilter
from .models import Chat
class GroupFilters(object):
class _AllowedGroups(BaseFilter):
name = 'GroupFilters.allowed_groups'
def filter(self, message):
chat_id = message.chat.id
return True if Chat.objects.get_or_none(chat_id=chat_id) else False
allowed_groups = _AllowedGroups() |
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
list1, list2 = [], []
prev,head = None, None
sum = 0
while l1:
list1.append(l1.val)
l1 = l1.next
while l2:
list2.append(l2.val)
l2 = l2.next
while list1 or list2:
sum = sum/10
if list1:
sum += list1.pop()
if list2:
sum += list2.pop()
head = ListNode(sum%10)
head.next = prev
prev = head
if sum >= 10:
head = ListNode(sum/10)
head.next = prev
return head
|
import setuptools
from sdcli.config import Config
EXTENSIONS = {
'Click',
'colorama'
}
setuptools.setup(
name=Config.NAME,
version=Config.VERSION,
author=Config.AUTHOR,
author_email=Config.EMAIL,
packages=setuptools.find_packages(),
license=Config.LICENSE,
description=Config.DESCRIPTION,
long_description=open('README.md').read(),
install_requires=['Click', 'colorama'],
python_requires='>=3.7',
platforms=['linux', 'macos'],
entry_points={
'console_scripts': [
'sdcli = sdcli.__main__:main',
]
},
project_urls={
'Code': Config.SOCIAL_MEDIAS['github']
}
) |
# Python3 default encoding is UTF-8
# 統一發票兌獎:
# 統一發票是一個八位數字(整數)的對獎方式,有一個特別獎號,一個特獎號及三個頭獎獎號,和數個增開六獎(三位數)
# 獎金則是根據下面規則給付:
# 特別獎:和特別獎號碼完全相同 獎金10000000 元
# 特獎:和特獎號碼完全相同 獎金 2000000 元
# 頭獎:和頭獎號碼完全相同 獎金 200000 元
# 二獎:和頭獎號碼最後 7 位數字相同,獎金 40000 元
# 三獎:和頭獎號碼最後 6 位數字相同,獎金 10000 元
# 四獎:和頭獎號碼最後 5 位數字相同,獎金 4000 元
# 五獎:和頭獎號碼最後 4 位數字相同,獎金 1000 元
# 六獎:和頭獎號碼最後 3 位數字相同,獎金 200 元
# 增開六獎:和增開六獎號碼完全相同 獎金 200 元
#
# 程式執行輸出畫面舉例如下:
# 特別獎號碼
# 請輸入第一個號碼:41275633
# 特獎號碼
# 請輸入第一個號碼:56188690
# 頭獎號碼
# 請輸入第一個號碼:68285428
# 請輸入第二個號碼:12031965
# 請輸入第三個號碼:92658431
# 使用者發票號碼
# 請輸入第1個號碼:41275633
# 請輸入第2個號碼:92631965
# 請輸入第3個號碼:51868431
# 獎金 10005000 元
#
# Copyright (c) 2016 by Hawk Sun
# No constant value in Python, just name a variable in all capital letters to differentiate from normal
# variable
DEBUG = 1
SPECIAL_PRICE_COUNT = 1
GRAND_PRICE_COUNT = 1
FIRST_PRICE_COUNT = 3
EXTRA_SIXTH_PRICE_CNT = 3
RECEIPT_LEN = 8
SUCCESS = 8
DIGITS_COMPARE_TIMES = RECEIPT_LEN - 3 + 1
chtDigits = ["一", "二", "三","四","五","六","七","八","九"]
price = [10000000, 2000000, 200000, 40000, 10000, 4000, 1000, 200, 200]
total = 0
def InputPriceReceiptNo(COUNT):
list = [] # create an empty list
for loop in range (0, COUNT):
list.append(input("請輸入第" + chtDigits[loop] + "個號碼:"))
return list
def InputUserReceiptNo():
list = [] # create an empty list
loop = 0
strUserNo = "Begin"
while (strUserNo != ""):
loop += 1
strUserNo = input("請輸入第" + str(loop) + "個號碼:")
if (strUserNo !="") : list.append(strUserNo)
return list
print("特別獎號碼")
specialPrice = InputPriceReceiptNo(SPECIAL_PRICE_COUNT) # create a global list named specialPrice
print("特獎號碼")
grandPrice = InputPriceReceiptNo(GRAND_PRICE_COUNT) # create a global list named grandPrice
print("頭獎號碼")
firstPrice = InputPriceReceiptNo(FIRST_PRICE_COUNT) # create a global list named firstPrice
print("增開六獎號碼")
extra6Price = InputPriceReceiptNo(EXTRA_SIXTH_PRICE_CNT) # create a global list named extra6Price
print("使用者發票號碼")
userNo = InputUserReceiptNo()
#if DEBUG
# specialPrice = ["91909013"]
# grandPrice = ["95976127"]
# firstPrice = ["54845444", "41876525", "86331065"]
# userNo = ["91909013","95976127","41876525","16331065","22845444"]
# extra6Price = ["013", "444", "555"]
for loop1 in range(0, len(userNo)):
# 特別獎獎金計算
for loop2 in range(0, SPECIAL_PRICE_COUNT):
if (userNo[loop1] == specialPrice[loop2]): # win
total += price[0]
if DEBUG:
print("(特別獎)累積獎金%d元\n" % total)
# loop2
for loop2 in range(0, GRAND_PRICE_COUNT):
if (userNo[loop1] == grandPrice[loop2]): # win
total += price[1]
if DEBUG:
print("(特獎)累積獎金%d元\n" % total)
# loop2
# 頭獎~六獎 獎金計算
for loop2 in range(0, FIRST_PRICE_COUNT):
for loop3 in range(0, DIGITS_COMPARE_TIMES):
if ((userNo[loop1][loop3:]) == (firstPrice[loop2][loop3:])): # win
total += price[2 + loop3]
if DEBUG:
print("userNo[%d][%d:] = %s, firstPrice[%d][%d:] = %s\n"
% (loop1, loop3, userNo[loop1][loop3:], loop2, loop3, firstPrice[loop2][loop3:]))
print("(頭獎~六獎)累積獎金%d元\n" % total)
break # exit the loop3, means to compare the next number
for loop2 in range(0, EXTRA_SIXTH_PRICE_CNT):
print(userNo[loop1][-3:])
if (userNo[loop1][-3:] == extra6Price[loop2]): # win
total += price[-1]
if DEBUG:
print("(增開六獎)累積獎金%d元\n" % total)
# loop2
# loop1
print("\n獎金%d元" % total, end=" ")
|
from flask import Blueprint
from flask import jsonify
from flask import request
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
from google.cloud import datastore
from google.cloud import bigquery
from google.cloud import storage
from google.cloud.exceptions import BadRequest
import logging
import uuid
import json
import urllib3
import urllib
import socket
import requests
import os
import dataflow_pipeline.massive as pipeline
import cloud_storage_controller.cloud_storage_controller as gcscontroller
import datetime
import time
import sys
probando_api = Blueprint('probando_api', __name__)
@probando_api.route("/prueba")
def prueba_api():
reload(sys)
sys.setdefaultencoding('utf8')
cedula = request.args.get('cedula')
if cedula is None:
return 'Ingrese una cedula'
else:
resultado = 'Cedula errada'
client = bigquery.Client()
QUERY = ('SELECT * FROM `contento-bi.linea_directa.prueba_api` WHERE cedula = "'+str(cedula)+'"')
query_job = client.query(QUERY)
rows = query_job.result()
result = {}
for row in rows:
result["identificador"] = row.identificador
result["tipo"] = row.tipo
result["nombre"] = row.nombre
result["dir1"] = row.dir1
result["dir2"] = row.dir2
result["dir3"] = row.dir3
result["dir4"] = row.dir4
result["barr1"] = row.barr1
result["barr2"] = row.barr2
result["barr3"] = row.barr3
result["barr4"] = row.barr4
result["cd1"] = row.cd1
result["cd2"] = row.cd2
result["cd3"] = row.cd3
result["cd4"] = row.cd4
result["fexpedicion"] = row.fexpedicion
result["fechanacimiento"] = row.fechanacimiento
result["fijo"] = row.fijo
result["Cel"] = row.Cel
result["direccion"] = row.direccion
result["Barrio"] = row.Barrio
result["ciudad"] = row.ciudad
result["pevidente1"] = row.pevidente1
result["pevidente2"] = row.pevidente2
result["revidente1"] = row.revidente1
result["revidente2"] = row.revidente2
respuesta = jsonify(result)
return respuesta
|
import requests
# r = requests.get("http://www.amazon.cn/gp/product/B01M8L5Z3Y")
# print(r.status_code)
# print(r.encoding)
# print(r.request.headers)
# kv = {'user-agent': 'Mozilla/5.0'}
# r = requests.get("http://www.amazon.cn/gp/product/B01M8L5Z3Y", headers=kv)
# print(r.request.headers)
# print(r.text)
def getAmazonItemPage(url):
try:
kv = {'user-agent': 'Mozilla/5.0'}
r = requests.get(url, headers=kv)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text[1000:3000]
except:
return "产生异常"
if __name__ == '__main__':
url = "http://www.amazon.cn/gp/product/B01M8L5Z3Y"
print(getAmazonItemPage(url))
|
import json
from elasticsearch import Elasticsearch
from elasticsearch import helpers
es_host = ''
index_name = 'terms-lookup'
def read_json_dump():
with open('/home/dandric/terms-lookup-1.json') as data_file:
print('Starting JSON Loading...')
data = json.load(data_file)
hits = data['hits']['hits']
return hits
def bootstrap_es():
es = Elasticsearch(hosts=[es_host])
return es
def hit_to_action(hit):
return {
'_index': index_name,
'_type': hit['_type'],
'_id': hit['_id'],
'_source': hit['_source']
}
def index_hits(es, hits):
print('Starting Bulk Index')
actions = map(hit_to_action, hits)
helpers.bulk(es, actions, max_retries=5, request_timeout=120)
def run():
hits = read_json_dump()
es = bootstrap_es()
index_hits(es, hits)
run()
|
from flask import Flask, render_template
import os
import sys
from flask import request
from random import randint
import tact_util as t_util
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def result():
name = request.form.get('name')
name = name.lower()
country_details = t_util.get_country_details(name)
result = {
'apiresult' : 0,
'apimessage' : 'OK',
'name' : name,
'country_details' : country_details
}
return render_template('index.html', result=result)
if __name__ == "__main__":
app.run()
|
"""
Result analysis.py
Observing results of the vowel elimination algorithm in different time bands. Results stored in file with _Analysis.csv
extension.
Author: Rishabh Brajabasi
Date: 2nd May 2017
"""
file_name_template_1 = 'F:\Projects\Active Projects\Project Intern_IITB\Vowel Evaluation PE V6\\Vowel_Evaluation_V6_Test_7.csv'
results_vowel = open(file_name_template_1) # The csv file where the results are saved
file_name_template_2 = file_name_template_1[:-4] + '_Analysis.csv'
results_analysis = open(file_name_template_2, 'w') # The csv file where the results are saved
data = results_vowel.read()
one = data.split('\n')
two = []
for i in range(len(one)):
two.append(one[i].split(','))
two.pop(0)
two.pop(-1)
results_analysis.write('Start time' + ',' + 'End time' + ',' + 'Count' + ',' + 'Precision' + ',' + 'Recall' + ',' + 'Files' + '\n')
def results(analyze, time_1, time_2):
count = 0
precision = []
recall = []
names = []
for element in range(len(one) - 2):
if analyze[element][8] == 'Fine':
if time_1 < float(analyze[element][5]) < time_2:
count += 1
precision.append(float(analyze[element][6]))
recall.append(float(analyze[element][7]))
names.append(analyze[element][0])
results_analysis.write(str(start_time) + ',' + str(end_time) + ',' + str(count) + ',' + str(sum(precision)/len(precision)) + ',' + str(sum(recall)/len(recall)) + ',' + str(names) + ',' + '\n')
start_time = 0.0
end_time = 0.5
results(two, start_time, end_time)
start_time = 0.5
end_time = 1.0
results(two, start_time, end_time)
start_time = 1.0
end_time = 1.5
results(two, start_time, end_time)
start_time = 1.5
end_time = 2.0
results(two, start_time, end_time)
start_time = 2.0
end_time = 2.5
results(two, start_time, end_time)
start_time = 2.5
end_time = 3.0
results(two, start_time, end_time)
start_time = 3.0
end_time = 3.5
results(two, start_time, end_time)
start_time = 3.5
end_time = 4.0
results(two, start_time, end_time)
start_time = 4.0
end_time = 4.5
results(two, start_time, end_time)
start_time = 4.5
end_time = 5.0
results(two, start_time, end_time)
start_time = 5.0
end_time = 5.5
results(two, start_time, end_time)
start_time = 5.5
end_time = 6.0
results(two, start_time, end_time)
start_time = 6.0
end_time = 6.5
results(two, start_time, end_time)
start_time = 6.5
end_time = 7.0
results(two, start_time, end_time)
start_time = 7.0
end_time = 7.5
results(two, start_time, end_time)
start_time = 7.5
end_time = 8.0
results(two, start_time, end_time)
start_time = 8.0
end_time = 8.5
results(two, start_time, end_time)
start_time = 8.5
end_time = 9.0
results(two, start_time, end_time)
start_time = 9.0
end_time = 9.5
results(two, start_time, end_time)
start_time = 9.5
end_time = 10.0
results(two, start_time, end_time)
start_time = 10.0
end_time = 10.5
results(two, start_time, end_time)
start_time = 10.5
end_time = 11.0
results(two, start_time, end_time)
start_time = 11.0
end_time = 11.5
results(two, start_time, end_time)
start_time = 11.5
end_time = 12.0
results(two, start_time, end_time)
start_time = 12.0
end_time = 60.0
results(two, start_time, end_time)
|
# Generated by Django 3.0.3 on 2020-05-09 22:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0003_auto_20200509_2240'),
]
operations = [
migrations.AddField(
model_name='case',
name='short_descriptions',
field=models.TextField(blank=True, null=True, verbose_name='简介'),
),
]
|
import os
import sys
import subprocess
import shutil
import hashlib
import fam
sys.path.insert(0, 'scripts')
sys.path.insert(0, os.path.join("tools", "phyldog"))
sys.path.insert(0, os.path.join("tools", "trees"))
import experiments as exp
import link_file_from_gene_tree as phyldog_link
import sequence_model
import rescale_bl
import analyze_tree
from ete3 import Tree
from ete3 import SeqGroup
def generate_jprime_species(species, output, seed):
print("species tree...")
species_parameters_file = os.path.join(output, "SpeciesTreeParameters.tsv")
command = []
command.append("java")
command.append("-jar")
command.append(exp.jprime_jar)
command.append("HostTreeGen")
command.append("-nox")
command.append(str(species))
command.append("-s")
command.append(str(seed))
command.append("-min")
command.append("5")
command.append("1")
command.append("0")
command.append(os.path.join(output, "species"))
subprocess.check_call(command)
species_tree = os.path.join(output, "species.pruned.tree")
subprocess.check_call(["sed", "-i", "s/1.443047701658439E-4/0.0001443047701658439/g", species_tree])
print(open(os.path.join(output, "species.pruned.tree")).read())
def generate_jprime_genome(families, dup_rate, loss_rate, transfer_rate, output, seed):
for i in range(0, families):
print("gene " + str(i) + "/" + str(families))
command = []
command.append("java")
command.append("-jar")
command.append(exp.jprime_jar)
command.append("GuestTreeGen")
#command.append("-nox")
command.append("-max")
command.append("5000000")
command.append("-s")
command.append(str(i + seed))
command.append(os.path.join(output, "species.pruned.tree"))
command.append(str(dup_rate))
command.append(str(loss_rate))
command.append(str(transfer_rate))
command.append(os.path.join(output, str(i) + "_gene"))
subprocess.check_call(command)
def generate_seqgen_sequence(families, sites, model, output, seed):
model_samples = sequence_model.get_model_samples(model)
seqgen_model_cmd_samples = [sequence_model.model_to_seqgen_cmd(m) for m in model_samples]
for i in range(0, families):
print("sequence " + str(i) + "/" + str(families))
jprime_tree = os.path.join(output, str(i) + "_gene.pruned.tree")
sequence_file = os.path.join(output, str(i) + ".fasta")
command = []
command.append(exp.seq_gen_exec)
command.append("-l")
command.append(str(sites))
command.append("-of")
command.append(jprime_tree)
command.append("-a")
command.append("1.0")
command.append("-i")
command.append("0.1")
command.append("-z")
command.append(str(int(i) + int(seed)))
command.extend(seqgen_model_cmd_samples[i % len(seqgen_model_cmd_samples)])
with open(sequence_file, "w") as writer:
subprocess.check_call(command, stdout=writer)
def build_mapping_file(jprime_mapping, phyldog_mapping, treerecs_mapping, family):
phyldog_writer = open(phyldog_mapping, "w")
treerecs_writer = open(treerecs_mapping, "w")
lines = open(jprime_mapping).readlines()
dico = {}
for line in lines:
split = line.split("\t")
split[1] = split[1].rstrip()
split[0] = rename_with_family(split[0], family)
treerecs_writer.write(split[0] + " " + split[1] + "\n")
if (not split[1] in dico):
dico[split[1]] = []
dico[split[1]].append(split[0])
for species, genes in dico.items():
phyldog_writer.write(species + ":" + ";".join(genes) + "\n")
def rename_with_family(zombi_gene_name, family):
return zombi_gene_name.replace("_", "UUU") + "UUU" + family.replace("_", "UUU")
"""
Rename all nodes from the zombi gene tree (see rename_with_family)
"""
def copy_and_rename_tree(src, dest, family):
tree = Tree(src, 1)
for node in tree.traverse("postorder"):
node.name = rename_with_family(node.name, family)
open(dest, "w").write(tree.write())
"""
Rename all taxa in the zombi alignments (see rename_with_family)
"""
def copy_and_rename_alignment(src, dest, family):
seqs = SeqGroup(open(src).read()) #, format="phylip_relaxed")
new_seqs = SeqGroup()
for entry in seqs.get_entries():
new_seqs.set_seq(rename_with_family(entry[0], family), entry[1])
open(dest, "w").write(new_seqs.write())
def jprime_to_families(jprime, out):
fam.init_top_directories(out)
families = []
for genetree_base in os.listdir(jprime):
if (not "gene.pruned.tree" in genetree_base):
continue
genetree = os.path.join(jprime, genetree_base)
if (os.path.getsize(genetree) < 2):
continue
family_number = genetree_base.split("_")[0]
family = family_number + "_pruned"
families.append(family)
fam.init_families_directories(out, families)
# species tree
species = os.path.join(jprime, "species.pruned.tree")
shutil.copyfile(species, fam.get_species_tree(out))
for family in families:
family_number = family.split("_")[0]
# true trees
gene_tree = os.path.join(jprime, family_number + "_gene.pruned.tree")
# alignment
alignment = os.path.join(jprime, family_number + ".fasta")
# true trees
copy_and_rename_tree(gene_tree, fam.get_true_tree(out, family), family)
# alignment
copy_and_rename_alignment(alignment, fam.get_alignment(out, family), family)
# link file
jprime_mapping = os.path.join(jprime, family_number + "_gene.pruned.leafmap")
phyldog_mapping = fam.get_mappings(out, family)
treerecs_mapping = fam.get_treerecs_mappings(out, family)
build_mapping_file(jprime_mapping, phyldog_mapping, treerecs_mapping, family)
def rescale_trees(jprime_output, families, bl_factor):
for i in range(0, families):
tree = os.path.join(jprime_output, str(i) + "_gene.pruned.tree")
subprocess.check_call(["sed", "-i", "s/\[[^][]*\]//g", tree])
subprocess.check_call(["sed", "-i", "s/)[^:]*:/):/g", tree])
rescale_bl.rescale_bl(tree, tree, bl_factor)
subprocess.check_call(["sed", "-i", "s/)1:/):/g", tree])
def get_output(tag, species, families, sites, model, bl_factor, dup_rate, loss_rate, transfer_rate, perturbation):
dirname = "jsim"
if (float(transfer_rate) != 0.0):
dirname += "dtl"
dirname += "_" + tag
dirname += "_s" + str(species) + "_f" + str(families)
dirname += "_sites" + str(sites)
dirname += "_" + model
dirname += "_bl" + str(bl_factor)
dirname += "_d" + str(dup_rate) + "_l" + str(loss_rate)
dirname += "_t" + str(transfer_rate)
dirname += "_p" + str(perturbation)
return dirname
def generate_jprime(tag, species, families, sites, model, bl_factor, dup_rate, loss_rate, transfer_rate, perturbation, root_output, seed):
to_hash = str(tag) + str(species) + str(families) + str(sites) + model + str(bl_factor) + str(dup_rate) + str(loss_rate) + str(transfer_rate) + str(seed) + str(perturbation)
md5 = hashlib.md5(to_hash.encode())
output = os.path.join(root_output, "jprime_temp_" + str(md5.hexdigest()))
shutil.rmtree(output, True)
print("Writing output in " + output)
os.makedirs(output)
jprime_output = os.path.join(output, "jprime")
os.makedirs(jprime_output)
with open(os.path.join(jprime_output, "jprime_script_params.txt"), "w") as writer:
writer.write(tag + " ")
writer.write(str(species) + " " + str(families) + " ")
writer.write(str(sites) + " " + str(model) + " ")
writer.write(str(bl_factor)+ " " + str(dup_rate) + " ")
writer.write(str(loss_rate) + " " + str(transfer_rate) + " " + str(perturbation) + " " + output)
writer.write(" " + str(seed))
generate_jprime_species(species, jprime_output, seed)
generate_jprime_genome(families, dup_rate, loss_rate, transfer_rate, jprime_output, seed)
rescale_trees(jprime_output, families, bl_factor)
generate_seqgen_sequence(families, sites, model, jprime_output, seed)
print("jprime output: " + jprime_output)
jprime_to_families(jprime_output, output)
species_nodes = analyze_tree.get_tree_taxa_number(os.path.join(jprime_output, "species.pruned.tree"))
new_output = os.path.join(root_output, get_output(tag, species_nodes, families, sites, model, bl_factor, dup_rate, loss_rate, transfer_rate, perturbation))
shutil.move(output, new_output)
fam.perturbate_species_tree(new_output, perturbation)
fam.postprocess_datadir(new_output)
print("Final output directory: " + new_output)
print("")
if (__name__ == "__main__"):
if (len(sys.argv) != 13 or not (sys.argv[4] in sequence_model.get_model_sample_names())):
if (len(sys.argv) != 13):
print("Invalid number of parameters")
print("Syntax: python generate_jprime.py tag species_time_interval families sites model bl_scaler dup_rate loss_rate transfer_rate species_perturbation output seed")
print("model should be one of " + str(sequence_model.get_model_sample_names()))
exit(1)
tag = sys.argv[1]
species = int(sys.argv[2])
families = int(sys.argv[3])
sites = int(sys.argv[4])
model = sys.argv[5]
bl_factor = float(sys.argv[6])
dup_rate = float(sys.argv[7])
loss_rate = float(sys.argv[8])
transfer_rate = float(sys.argv[9])
perturbation = float(sys.argv[10])
output = sys.argv[11]
seed = int(sys.argv[12])
generate_jprime(tag, species, families, sites, model, bl_factor, dup_rate, loss_rate, transfer_rate, perturbation, output, seed)
|
#!/usr/bin/env python
# coding: utf-8
# In[195]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridSpec
get_ipython().run_line_magic('matplotlib', 'notebook')
plt.style.available
plt.style.use('seaborn-colorblind');
# In[196]:
#DATASET 1 - MAPUTO
df3=pd.read_excel(r'C:\Users\bulule\Downloads\IPC_DE_Maputo.xlsx', skiprows=4,index_col=False)
df4=pd.DataFrame(df3)
df4.columns=df4.columns.str.replace(' ','')
df4['Descrição']=df4['Descrição'].str.replace(' ','')
plt.style.use(style='fast')
df4=df4.drop([0,1])
#Slice on the homologous variation
df_var_homologa=df4.loc[19:24,:]
df_var_homologa=df_var_homologa.drop(['Descrição','Unnamed:1'],axis=1)
df_var_homologa=df_var_homologa.set_index([[1,2,3,4,5,6], 'Ano'])
df_var_homologa.index=[2009, 2010, 2011, 2012, 2013, 2014]
df_var_homologaT=df_var_homologa.T
df_var_homologaT['average_inflation_rate']=df_var_homologaT.mean(axis=1)
df_var_homologa=df_var_homologaT.T
df_maputo_homol=df_var_homologaT
#Slice on the annual average rate
df4_variacao_media=df4.loc[25:,:]
df4_variacao_media=df4_variacao_media.drop(['Descrição','Unnamed:1'],axis=1)
df4_variacao_media=df4_variacao_media.set_index([[1,2,3,4,5,6], 'Ano'])
df4_variacao_media.index=[2009,2010,2011,2012,2013,2014]
df4_variacao_mediaT=df4_variacao_media.T
df4_variacao_mediaT=df4_variacao_media.T
df4_variacao_mediaT['avg_rate']=df4_variacao_mediaT.mean(axis=1)
df4_variacao_media=df4_variacao_mediaT.T
df_maputo_media=df4_variacao_mediaT
# In[197]:
#DATASET2 - - BEIRA
df_Beira_2018=pd.read_excel(r'C:\Users\bulule\Downloads\IPCBeira_Quadros_Dezembro18.xls', skiprows=8,skipfooter=42, index_col=False)
#Slicing the average inflation
df_Beira_infl=df_Beira_2018.loc[13:,:]
df_Beira_infl=df_Beira_infl.drop(['Descrição','Unnamed: 1'],axis=1)
df_Beira_infl=df_Beira_infl.set_index([[1,2,3], 'Ano'])
df_Beira_infl.index=[2016,2017,2018]
df_Beira_inflT=df_Beira_infl.T
df_Beira_inflT['avg_inflation']=df_Beira_inflT.mean(axis=1)
df_Beira_infl=df_Beira_inflT.T
df_Beira_media=df_Beira_inflT
#Slicing the homologous
df_Beira_homol=df_Beira_2018.loc[10:12,:]
df_Beira_homol=df_Beira_homol.drop(['Descrição','Unnamed: 1'], axis=1)
df_Beira_homol=df_Beira_homol.set_index([[1,2,3], 'Ano'])
df_Beira_homol.index=[2016,2017,2018]
df_Beira_homolT=df_Beira_homol.T
df_Beira_homolT['avg_homol']=df_Beira_homolT.mean(axis=1)
df_Beira_homol=df_Beira_homolT
# In[200]:
#
plt.figure(figsize=(8,6))
gspec = gridSpec.GridSpec(2,1)
ax_beira=plt.subplot(gspec[0,0])
ax_maputo=plt.subplot(gspec[1,0])
plt.subplots_adjust(hspace=0.9)
months=df_var_homologaT.index.values
#Plot Maputo
ax_maputo.plot(months,df_maputo_homol['average_inflation_rate'],'o-',linewidth=1,alpha=0.75, label='average_homologous_rate')
ax_maputo.plot(months,df_maputo_media['avg_rate'],'o-',linewidth=1,alpha=0.75, label='average_inflation_rate')
ax_maputo.set_title('Homologius variation vs average variation in Maputo Province [2009-2014]\n')
ax_maputo.grid(True)
ax_maputo.set_xticklabels(months)
ax_maputo.set_ylabel('Variation in %')
ax_maputo.set_xlabel('MONTHS')
ax_maputo.legend()
#Plot Beira
ax_beira.plot(months, df_Beira_homol['avg_homol'],'o--',linewidth=1,alpha=0.75, label='average_homologous_rate')
ax_beira.plot(months, df_Beira_media['avg_inflation'],'o--',linewidth=1,alpha=0.75, label='average_inflation_rate')
ax_beira.set_title('Homologius variation vs average variation in Beira Province [2016-2018]\n')
ax_beira.grid(True)
ax_beira.set_xticklabels(months)
ax_beira.set_ylabel('Variation in %')
ax_beira.set_xlabel('MONTHS')
ax_beira.legend()
# In[ ]:
# In[ ]:
|
# Generated by Django 3.2.9 on 2021-12-06 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osmcal', '0028_user_home_location'),
]
operations = [
migrations.AlterField(
model_name='event',
name='description',
field=models.TextField(blank=True, help_text='Tell people what the event is about and what they can expect. You may use <a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown</a> in this field.', null=True),
),
]
|
class Solution(object):
def bitwiseComplement(self, num):
"""
:type N: int
:rtype: int
"""
return (1 << len(bin(num)) >> 2) - num - 1
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm, neural_network, naive_bayes
from sklearn.linear_model import Perceptron
from sklearn import preprocessing
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
Attributes = pd.read_csv("hm_hospitales_covid_structured_30d_train.csv")
Outcomes = pd.read_csv("split_train_export_30d.csv")
Output_format = {'PATIENT ID': Attributes['PATIENT ID'], 'hospital_outcome': np.zeros(1834, dtype=int)}
Output = pd.DataFrame(Output_format)
Data = Attributes.drop(labels=['PATIENT ID', 'admission_datetime'], axis='columns')
Data.loc[Data['sex'] == 'FEMALE', 'sex'] = 0
Data.loc[Data['sex'] == 'MALE', 'sex'] = 1
Data.loc[Data['ed_diagnosis'] == 'sx_breathing_difficulty', 'ed_diagnosis'] = 1
Data.loc[Data['ed_diagnosis'] == 'sx_others', 'ed_diagnosis'] = 2
Data.loc[Data['ed_diagnosis'] == 'sx_flu', 'ed_diagnosis'] = 3
Data.loc[Data['ed_diagnosis'] == 'sx_fever', 'ed_diagnosis'] = 3
Data.loc[Data['ed_diagnosis'] == 'sx_cough', 'ed_diagnosis'] = 3
Data = Data.fillna(Data.median())
Data['hospital_outcome'] = Outcomes['hospital_outcome']
outliers = (Data - Data.median()).abs() > Data.std() + 1
Data[outliers] = np.nan
Data.fillna(Data.median(), inplace=True)
normalization = Data.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
normalization_scaled = min_max_scaler.fit_transform(normalization)
Data = pd.DataFrame(normalization_scaled, columns=Data.columns)
Dead_data = Data.loc[Data['hospital_outcome'] == 1]
# Male_Dead = Dead_data.loc[Dead_data['sex'] == 1]
# Female_Dead = Dead_data.loc[Dead_data['sex'] == 0]
#Dead_data = Dead_data.sample(n = 150)
Alive_data = Data.loc[Data['hospital_outcome'] == 0]
# Male_Alive = Alive_data.loc[Alive_data['sex'] == 1]
# Female_Alive = Alive_data.loc[Alive_data['sex'] == 0]
# pt = preprocessing.PowerTransformer(method='box-cox')
# pt.fit(Dead_data[['age', 'lab_s1odium']])
# Dead_normal = pt.transform(Dead_data[['age', 'lab_1sodium']])
# Dead_data['normal_age'] = Dead_normal[:,0]
# Dead_data['normal_sodium'] = Dead_normal[:,1]
# pt.fit(Alive_data[['age', 'lab_1sodium']])
# Alive_normal = pt.transform(Alive_data[['age', 'lab_so1dium']])
# Alive_data['normal_age'] = Alive_normal[:,0]
# Alive_data['normal_sodium'] = Alive_normal[:,1]
# print('Rate of value = 1:', len(Data.loc[Data['pmhx_diabetes'] == 1]) / len(Data['pmhx_diabetes']))
# print('1的死亡率:', len(Dead_data.loc[Dead_data['pmhx_diabetes'] == 1]) / len(Data.loc[Data['pmhx_diabetes'] == 1]))
# print('0的死亡率:', len(Dead_data.loc[Dead_data['pmhx_diabetes'] == 0]) / len(Data.loc[Data['pmhx_diabetes'] == 0]))
# print('全部的平均, 標準差, 最大值, 最小值', Data['pmhx_activecancer'].mean(), Data['lab_ddimer'].std(), Data['lab_ddimer'].max(), Data['lab_ddimer'].min())
# print('死亡的平均, 標準差, 最大值, 最小值', Dead_data['pmhx_activecancer'].mean(), Dead_data['pmhx_activecancer'].std(), Dead_data['lab_ddimer'].max(), Dead_data['lab_ddimer'].min())
# print('存活的平均, 標準差, 最大值, 最小值', Alive_data['pmhx_activecancer'].mean(), Alive_data['lab_ddimer'].std(), Alive_data['lab_ddimer'].max(), Alive_data['lab_ddimer'].min())
# plt.scatter(Alive_data['vitals_temp_ed_first'], Alive_data['vitals_sp1o2_ed_first'], color='red', alpha=0.4)
# plt.scatter(Dead_data['vitals_temp_ed_first'], Dead_data['vitals_spo2_1ed_first'], color='blue')
# plt.xlabel('vitals_temp_ed_first', fontsize=14)
# plt.ylabel('vitals_spo2_e1d_first', fontsize=14)
# plt.grid(True)
# plt.show()
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(Dead_data['age'], Dead_data['lab_lymphocyte_percentage'], Dead_data['lab_urea'])
ax.scatter3D(Alive_data['age'], Alive_data['lab_lymphocyte_percentage'], Alive_data['lab_urea'], alpha=0.3)
ax.set_xlabel('age')
ax.set_ylabel('lab_lymphocyte_percentage')
ax.set_zlabel('lab_urea')
plt.show() |
import rest_framework.urls
from django.contrib import admin
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from indig.views import IndicativosViewSet, index, sobre
router = DefaultRouter()
router.register('indicativos', IndicativosViewSet)
urlpatterns = [
path('', index, name='index'),
path('sobre/', sobre, name='sobre'),
path('api/', include(router.urls)),
path('master/', admin.site.urls),
path('indig-auth/', include(rest_framework.urls, namespace='rest_framework'))
]
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""用来和数据库进行交互"""
import hashlib
import uuid
import web
db = web.database(dbn='mysql', db='mailserver',
host='192.168.56.101', port=3306,
user='root', pw='mdsmds', charset='utf8')
def create_virtual_user():
pass
def delete_virtual_user():
pass
def update_virtual_user():
pass
def get_virtual_users_all():
t = db.query('select * from virtual_users')
raws = []
for i in t:
raws.append(dict(i))
print i
return raws
def get_virtual_user():
pass
def create_virtual_domain():
pass
def delete_virtual_domain():
pass
def update_virtual_domain():
pass
def get_virtual_domains_all():
t = db.query('select * from virtual_domains')
raws = []
for i in t:
raws.append(dict(i))
print i
return raws
def get_virtual_domain():
pass
def create_virtual_aliases():
pass
def delete_virtual_aliases():
pass
def update_virtual_aliases():
pass
def get_virtual_alias():
pass
def get_virtual_aliases_all():
t = db.query('select * from virtual_aliases')
raws = []
for i in t:
raws.append(dict(i))
print i
return raws
if __name__ == '__main__':
print get_virtual_users_all()
print get_virtual_domains_all()
print get_virtual_aliases_all()
|
#!/usr/bin/env python3
from flatactors import Actor
from interpretor import InterpretorActor
from irc import IRCMainActor
from logger import LoggerActor
class MasterActor(Actor):
def constructor(self):
self.daemon = False
def initialize(self):
self.make_babies(
('interpretor', InterpretorActor),
('irc', IRCMainActor),
('logger', LoggerActor),
use_family_name=False
)
if __name__ == "__main__":
main_actor = MasterActor(None, 'master')
|
from django.core import signing
from django.views.generic import ListView, TemplateView, View
from django.http import Http404
from django.shortcuts import redirect, get_object_or_404
from django.contrib import messages
from django.utils.translation import ugettext as _
from trueskill import rate_1vs1
from ranking import Ranking
from seasons.models import Season, PlayerSeason
from misc.utils import get_two_random
class PlayerListView(ListView):
template_name = 'players/ranking.html'
context_object_name = 'players_ranking'
model = PlayerSeason
paginate_by = 50
def get_queryset(self):
season = get_object_or_404(Season, abbr=self.kwargs.get('season'))
qs = super().get_queryset().filter(season=season)
if not qs.exists():
raise Http404
# Create players ranking
ranking = Ranking(qs, start=1, key=lambda x: x.rating_mu)
return list(ranking) # Not really a queryset, but helps with pagination
def get_context_data(self, **kwargs):
kwargs['season'] = self.kwargs['season']
return super().get_context_data(**kwargs)
class PlayerVoteModalView(TemplateView):
template_name = 'players/vote.html'
def get_context_data(self, **kwargs):
# Get two random players from given season
season = get_object_or_404(Season, abbr=self.kwargs.get('season'))
qs = PlayerSeason.objects.filter(season=season)
player_season_a, player_season_b = get_two_random(qs)
kwargs['player_season_a'] = player_season_a
kwargs['player_season_b'] = player_season_b
# Create signed keys (only work for 30 seconds)
# First player passed is the winner; has more then 2 elements if tied
kwargs['player_season_a_key'] = signing.dumps(
(player_season_a.pk, player_season_b.pk)
)
kwargs['player_season_b_key'] = signing.dumps(
(player_season_b.pk, player_season_a.pk)
)
kwargs['tie_key'] = signing.dumps(
(player_season_a.pk, player_season_b.pk, True)
)
return super().get_context_data(**kwargs)
class PlayerVoteSaveView(View):
def get(self, request, *args, **kwargs):
try:
data = signing.loads(self.kwargs['signed_data'], max_age=30)
except signing.SignatureExpired:
messages.info(request, _("Sorry, but your vote link expired. "
"Feel free to vote again."))
return redirect('index')
except signing.BadSignature:
raise Http404
player_a = get_object_or_404(PlayerSeason, pk=data[0])
rating_a = player_a.get_rating()
player_b = get_object_or_404(PlayerSeason, pk=data[1])
rating_b = player_b.get_rating()
if player_a.season != player_b.season:
raise Http404
if len(data) == 2:
# First one is the winner
rating_a, rating_b = rate_1vs1(rating_a, rating_b)
player_a.votes_win += 1
else:
# Tie
rating_a, rating_b = rate_1vs1(rating_a, rating_b, drawn=True)
player_a.votes_tie += 1
player_b.votes_tie += 1
# Save new ratings
player_a.rating_mu = rating_a.mu
player_a.rating_sigma = rating_a.sigma
player_a.save()
player_b.rating_mu = rating_b.mu
player_b.rating_sigma = rating_b.sigma
player_b.save()
messages.success(request, _("Thanks for voting!"))
return redirect('ranking', player_a.season)
|
from django.conf.urls import patterns, include, url
from django.contrib.auth.views import login, logout, password_change, password_change_done
urlpatterns = patterns('jaber.accounts.views',
url(r'^login', login, {'template_name': 'accounts/login.html', 'extra_context':{}}, name='login'),
)
|
from PIL import Image
img = Image.open('img.JPG')
print(img.format, "%dx%d" % img.size, img.mode)
img.show()
|
# -*- coding: utf-8 -*-
# @Author: zjx
# @Date : 2018/7/27
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
chrome_options = Options()
chrome_options.add_argument('--headless')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get('http://www.baidu.com')
driver.find_element_by_id("kw").send_keys(u"程序员")
driver.find_element_by_id("su").click()
time.sleep(3)
driver.save_screenshot("程序员.png")
|
from sys import stdin
class Expression:
def __init__(self, n1, n2, result):
self.n1 = n1
self.n2 = n2
self.result = result
def main():
for line in stdin:
t = int(line)
ls = []
for _ in range(t):
vals, res = input().split('=')
n1, n2 = map(int, vals.split())
res = int(res)
ls.append(Expression(n1, n2, res))
names = []
for _ in range(t):
n, e, r = input().split()
e = int(e) - 1
add = ls[e].n1 + ls[e].n2 == ls[e].result
sub = ls[e].n1 - ls[e].n2 == ls[e].result
mul = ls[e].n1 * ls[e].n2 == ls[e].result
imp = not (add or sub or mul)
if r == '+': ans = add
if r == '-': ans = sub
if r == '*': ans = mul
if r == 'I': ans = imp
if not ans:
names.append(n)
if not names:
print("You Shall All Pass!")
elif len(names) == t:
print("None Shall Pass!")
else:
names.sort()
sz = len(names)
for i in range(sz):
print(names[i], end=(' ' if i < sz - 1 else '\n'))
if __name__ == "__main__":
main() |
from django.db.models import Count
from rest_framework.viewsets import ModelViewSet
from rest_framework.filters import SearchFilter
from django_filters.rest_framework import DjangoFilterBackend
from .models import Dog, Breed
from .serializers import DogSerializer, CreateDogSerializer, BreedSerializer
from .permissions import IsAdminOrReadOnly
from .paginations import DefaultPagination
from . import filters
class BreedViewSet(ModelViewSet):
queryset = Breed.objects.annotate(dogs_count=Count('dogs')).all()
serializer_class = BreedSerializer
permission_classes = (IsAdminOrReadOnly,)
filter_backends = (DjangoFilterBackend,)
filterset_class = filters.BreedFilter
class DogViewSet(ModelViewSet):
queryset = Dog.objects.all().select_related('breed')
pagination_class = DefaultPagination
permission_classes = (IsAdminOrReadOnly,)
filter_backends = (DjangoFilterBackend, SearchFilter, )
filterset_class = filters.BreedFilter
search_fields = ['breed__name']
def get_serializer_class(self):
if self.request.method == 'POST':
return CreateDogSerializer
return DogSerializer
|
#find a column that has a value given a database and a table
import cx_Oracle
import os
import re
import sys
# raw_input('Which databaese do you ')
os.environ['ORACLE_HOME'] = '/oracle_64/orahome11g/'
os.environ['LD_LIBRARY_PATH'] = '/oracle_64/orahome11g/lib'
construct = 'qad/mfg@ny-oracle-ts-01.Yurman.com:1521/dwtest01' #TEST
#construct = 'qad/mfg@ny-oracle-pr-02.Yurman.com:1521/dwprod01' # PRODUCTION
conn = cx_Oracle.connect(construct)
cursorOracle = conn.cursor()
cursorOracle2 = conn.cursor()
print "Please enter a table name- like DW_COMMON.USER_DIMENSION"
#table_to_query = str(raw_input("Tell me your table: " )).upper()
table_to_query = "DW_COMMON.USER_DIMENSION"
print table_to_query
just_table_name = table_to_query.split('.')[1]
print "Please enter what you are looking for - everything is cast into a String including numbers like 3"
query_string = str(raw_input("Tell me your string: " )).upper()
print query_string
print "You are looking for " + str(query_string).upper() + " in the table " + str(table_to_query).upper() + "."
try:
select_statement = ("select * from " + str(table_to_query) )
selectResults = cursorOracle.execute(select_statement)
except Exception:
print "table not found"
sys.exit()
try:
column_select_query = ("select column_name from DBA_TAB_COLS where table_name = \'" + str(just_table_name) ) + "\'"
print column_select_query
print "type of column query " + str(type(column_select_query))
column_results = cursorOracle2.execute(column_select_query)
print "type(column_results) " + str(type(column_results))
print "column_results XXXX " + str(column_results)
except Exception:
print "XXXX Columns not found"
sys.exit()
print "++++++++++++++++++++"
print column_results
print "///////////////////////////////////////\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"
clean_columns =[]
for i in column_results:
print "Column name is " + str(i).strip(',').strip('(,)').replace("'", "")
x = str(i).strip(',').strip('(,)').replace("'", "")
clean_columns.append(x)
print "clean_columns " + str(clean_columns)
entire_row =[]
matches = []
combine_lists = []
final_results = {}
for entire_row in selectResults:
#zip() columns and the row_element
#print "entire_row " + str(entire_row)
#print "combined_list " + str(combine_lists)
combine_lists = zip(clean_columns, entire_row )
for row_element in combine_lists:
#print "testing " + str(row_element).upper()
#print type(row_element)
if (re.search(str(query_string).upper(), str(row_element[1]).upper())):
print "Found match!!! " + str(query_string).upper() + " " + str(row_element[1]).upper()
print "Found a match in table " + str(table_to_query) + " column " + str(row_element[0]).upper()
final_results[str(table_to_query).upper()] = str(row_element[0]).upper()
print "-----------JJJJJ--------------"
print final_results
'''
Columns do not match order also buggy code
'''
print "4444444444444444444444444"
#print column_select_query
print "555555555555555555555555555"
#print selectResults, column_results
'''
# Iterate through all rows in Data Frame
# for row in df.iterrows():
for row_index, row in df.iterrows():
# Set up ticketNumber with "H" preappended
fullTicketNumber = 'H' + str(ticketNumber)
ticketNumber += 1
vDtTransDate = row['DtTransDate']
vDtTransDate = str(vDtTransDate).split()
# Transform into correct format
vDtTransDate = "to_char(to_date('" + vDtTransDate[0] + "','YYYY-MM-DD'),'DD-MON-YYYY')"
# Logic for DOCUMENT_TYPE
if (row['Type'] == 'S'):
DOCUMENT_TYPE = 'SLC'
elif (row['Type'] == 'R'):
DOCUMENT_TYPE = 'CRD'
else:
DOCUMENT_TYPE = 'XXX'
# Set Price and Quantity
price = row['Price']
quantity = row['Qty']
# Set up UPC CODE
upcCode = row['UPC']
# Fix bad UPC CODES
if (len(str(upcCode)) < 12 or (str(upcCode).isdigit() == False)):
upcCode = '883932627398'
# Set up KWI UPC Code- cut off end and beginning
kwiUpcCode = str(upcCode)
kwiUpcCode = kwiUpcCode[:-1]
kwiUpcCode = kwiUpcCode[1:]
# Insert Statement
insertStatement = ("insert into RETAIL_MART_MERGE.RETAIL_SALES_HISTORY_CUSTOM " +
"(BATCH_NUMBER, TICKET_NUMBER, LINE_NUMBER, SALESPERSON, STORE_LOCATION, TRANSACTION_DATE, SYSTEM_DATE, DOCUMENT_TYPE, " +
"SPLIT_SALE, CUSTOMER_NUMBER, UNIT_PRICE, QUANTITY_SOLD, EXT_VALUE_SOLD_USD, EXT_COST_SOLD, TAXABLE_FLAG, TAX_VALUE, STATION_NUMBER, OWNERSHIP, LOAD_DATE, CURRENT_UNIT_RETAIL_PRICE, " +
"DISCOUNT_CODE, SALE_COUNT, TICKET_REFERENCE, RETAIL_UNIT_PRICE, UPC_CODE, EXT_STD_COST_SOLD, KWI_UPC_CODE, KWI_EMPLOYEE_NBR, COMPANY, LOCAL_EXT_VALUE_SOLD) " +
"values " +
"('KWI', " + # BATCH_NUMBER
"'" + fullTicketNumber + "', " + # TICKET_NUMBER
"'1', " + # Line_number
"'300101', " + # SALESPERSON
"'269350', " + # STORE_LOCATION
" " + vDtTransDate + ", " + # TRANSACTION_DATE
" " + vDtTransDate + ", " + # SYSTEM_DATE
"'" + DOCUMENT_TYPE + "', " + # DOCUMENT_TYPE
"'N','0', " + # SPLIT_SALE and CUSTOMER_NUMBER
"'" + str(price) + "', " + # UNIT_PRICE
"'" + str(quantity) + "'," + # QUANTITY_SOLD
"'" + str(price) + "'," + # EXT_VALUE_SOLD_USD
"'" + str(price) + "'," + # EXT_COST_SOLD
"'Y','" + str(price) + "', " + # TAXABLE_FLAG and TAX_VALUE
"'1', 'PD', trunc(sysdate)," + # STATION_NUMBER and OWNERSHIP and LOAD_DATE
"'" + str(price) + "'," + # CURRENT_UNIT_RETAIL_PRICE
"'0','0','0'," + # DISCOUNT_CODE, SALE_COUNT, TICKET_REFERENCE
"'" + str(price) + "', " + # RETAIL_UNIT_PRICE
"'" + str(upcCode) + "'," + # UPC_CODE
"'" + str(price) + "'," + # EXT_STD_COST_SOLD
"'" + str(kwiUpcCode) + "', " + # KWI_UPC_CODE
"'300101', '269', " + # KWI_EMPLOYEE_NBR and COMPANY
"'" + str(price) + "')") # LOCAL_EXT_VALUE_SOLD
# Load Data
productionValue = cursorOracle.execute(insertStatement)
conn.commit()
'''
# Clean up database connections
cursorOracle.close()
cursorOracle2.close()
conn.close() |
#! /usr/bin/env python3
# ---------------------------------------------------------------------------- #
# fn_c_heuristic_wrapper.py #
# #
# By - jacksonwb #
# Created: Wednesday December 1969 4:00:00 pm #
# Modified: Tuesday Sep 2019 3:02:07 pm #
# Modified By: jacksonwb #
# ---------------------------------------------------------------------------- #
import ctypes
import os
LIB = 'lib'
def fn_c_linear_conflicts(size, n_map, goal):
lib_path = os.path.join(os.path.split(os.path.dirname(__file__))[0], LIB)
lib = os.path.join(lib_path, 'fn_c_lib.so')
if not os.path.exists(lib):
exit("Make C binaries to use fast heuristics!");
fn_heur_lib = ctypes.CDLL(lib)
n_ar = []
for row in n_map:
n_ar += list(row)
g_ar = []
for row in goal:
g_ar += list(row)
return fn_heur_lib.linearConflictDist((ctypes.c_int * (size ** 2))(*n_ar),
(ctypes.c_int * (size ** 2))(*g_ar), size)
|
from django.contrib import admin
from .models import Position, Job
class PositionAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
list_display_links = ('id', 'name')
search_fields = ['name']
list_per_page = 10
admin.site.register(Position, PositionAdmin)
admin.site.register(Job)
|
import os
import re
import shutil
from PIL import Image
from PIL import ImageOps
from tqdm import tqdm
import time
def ResizeToSquare(path):
newWidth = 1536
newHeight = 1536
shrinkImg = Image.open(path)
shrinkImg.thumbnail((768, 1024), Image.ANTIALIAS)
emptyImg = Image.new("RGB", (newWidth, newHeight))
emptyImg.paste(shrinkImg.getpixel((1,1)), [0,0,newWidth,newHeight])
emptyImg.paste(shrinkImg, (int(newWidth * 0.85) // 3, 200))
emptyImg.show()
return emptyImg
counter = 0
dirPath = "D:\FjongImages\Items on hanger"
#dirPath = "D:\DeepFashionDatsets\img_highres"
for (root, dirs, files) in tqdm(os.walk(dirPath, topdown=False)):
for i, file in enumerate(files):
if file.endswith(".jpg"):#bool(re.search("\d\.jpg", file)) or bool(re.search("\d_target\.jpg", file)):
counter += 1
#print(root + dirs[0] + "\\" + file)
#resChangeImg = Image.open(root + "\\" + file)
paddedImage = ResizeToSquare(root + "\\" + file)
#print(resChangeImg.size)
#if resChangeImg.size != (192, 256):
# print(resChangeImg.size, counter)
paddedImage.thumbnail((1024, 1024), Image.ANTIALIAS)
#print(resChangeImg.size)
#print(paddedImage.size)
paddedImage.save(f"DatasetProcesssing\ShrunkClothing\\{counter}.jpg", "JPEG")
#shutil.copy(root + "\\" + file, f"SimplifiedDataset/{counter}.jpg")
|
import sys
import click
from ai.backend.cli.interaction import ask_yn
from ai.backend.client.session import Session
from ai.backend.client.func.domain import (
_default_list_fields,
_default_detail_fields,
)
# from ai.backend.client.output.fields import domain_fields
from . import admin
from ..pretty import print_info
from ..types import CLIContext
@admin.group()
def domain():
"""
Domain administration commands.
"""
@domain.command()
@click.pass_obj
@click.argument('name', type=str)
def info(ctx: CLIContext, name: str) -> None:
"""
Show the information about the given domain.
If name is not give, user's own domain information will be retrieved.
"""
with Session() as session:
try:
item = session.Domain.detail(name=name)
ctx.output.print_item(item, _default_detail_fields)
except Exception as e:
ctx.output.print_error(e)
sys.exit(1)
@domain.command()
@click.pass_obj
def list(ctx: CLIContext) -> None:
"""
List and manage domains.
(admin privilege required)
"""
with Session() as session:
try:
items = session.Domain.list()
ctx.output.print_list(items, _default_list_fields)
except Exception as e:
ctx.output.print_error(e)
sys.exit(1)
@domain.command()
@click.pass_obj
@click.argument('name', type=str, metavar='NAME')
@click.option('-d', '--description', type=str, default='',
help='Description of new domain')
@click.option('-i', '--inactive', is_flag=True,
help='New domain will be inactive.')
@click.option('--total-resource-slots', type=str, default='{}',
help='Set total resource slots.')
@click.option('--allowed-vfolder-hosts', type=str, multiple=True,
help='Allowed virtual folder hosts.')
@click.option('--allowed-docker-registries', type=str, multiple=True,
help='Allowed docker registries.')
def add(ctx: CLIContext, name, description, inactive, total_resource_slots,
allowed_vfolder_hosts, allowed_docker_registries):
"""
Add a new domain.
NAME: Name of new domain.
"""
with Session() as session:
try:
data = session.Domain.create(
name,
description=description,
is_active=not inactive,
total_resource_slots=total_resource_slots,
allowed_vfolder_hosts=allowed_vfolder_hosts,
allowed_docker_registries=allowed_docker_registries,
)
except Exception as e:
ctx.output.print_mutation_error(
e,
item_name='domain',
action_name='add',
)
sys.exit(1)
if not data['ok']:
ctx.output.print_mutation_error(
msg=data['msg'],
item_name='domain',
action_name='add',
)
sys.exit(1)
ctx.output.print_mutation_result(
data,
item_name='domain',
)
@domain.command()
@click.pass_obj
@click.argument('name', type=str, metavar='NAME')
@click.option('--new-name', type=str, help='New name of the domain')
@click.option('--description', type=str, help='Description of the domain')
@click.option('--is-active', type=bool, help='Set domain inactive.')
@click.option('--total-resource-slots', type=str,
help='Update total resource slots.')
@click.option('--allowed-vfolder-hosts', type=str, multiple=True,
help='Allowed virtual folder hosts.')
@click.option('--allowed-docker-registries', type=str, multiple=True,
help='Allowed docker registries.')
def update(ctx: CLIContext, name, new_name, description, is_active, total_resource_slots,
allowed_vfolder_hosts, allowed_docker_registries):
"""
Update an existing domain.
NAME: Name of new domain.
"""
with Session() as session:
try:
data = session.Domain.update(
name,
new_name=new_name,
description=description,
is_active=is_active,
total_resource_slots=total_resource_slots,
allowed_vfolder_hosts=allowed_vfolder_hosts,
allowed_docker_registries=allowed_docker_registries,
)
except Exception as e:
ctx.output.print_mutation_error(
e,
item_name='domain',
action_name='update',
)
sys.exit(1)
if not data['ok']:
ctx.output.print_mutation_error(
msg=data['msg'],
item_name='domain',
action_name='update',
)
sys.exit(1)
ctx.output.print_mutation_result(
data,
extra_info={
'name': name,
},
)
@domain.command()
@click.pass_obj
@click.argument('name', type=str, metavar='NAME')
def delete(ctx: CLIContext, name):
"""
Inactive an existing domain.
NAME: Name of a domain to inactive.
"""
with Session() as session:
try:
data = session.Domain.delete(name)
except Exception as e:
ctx.output.print_mutation_error(
e,
item_name='domain',
action_name='deletion',
)
sys.exit(1)
if not data['ok']:
ctx.output.print_mutation_error(
msg=data['msg'],
item_name='domain',
action_name='deletion',
)
sys.exit(1)
ctx.output.print_mutation_result(
data,
extra_info={
'name': name,
},
)
@domain.command()
@click.pass_obj
@click.argument('name', type=str, metavar='NAME')
def purge(ctx: CLIContext, name):
"""
Delete an existing domain.
NAME: Name of a domain to delete.
"""
with Session() as session:
try:
if not ask_yn():
print_info('Cancelled')
sys.exit(1)
data = session.Domain.purge(name)
except Exception as e:
ctx.output.print_mutation_error(
e,
item_name='domain',
action_name='purge',
)
sys.exit(1)
if not data['ok']:
ctx.output.print_mutation_error(
msg=data['msg'],
item_name='domain',
action_name='purge',
)
sys.exit(1)
ctx.output.print_mutation_result(
data,
extra_info={
'name': name,
},
)
|
from datetime import datetime
from db_config import db, ma
from models.probe_monitoring import probe_model
from sqlalchemy import Column, Integer, ForeignKey
class PerformanceAnalysis(db.Model):
__tablename__ = "performance_analysis"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
start_date = db.Column(db.DateTime, nullable=False, default=datetime.now)
end_date = db.Column(db.DateTime, nullable=False, default=datetime.now)
region = db.Column(db.String(32))
site_name = db.Column(db.String(32))
probe = db.Column(db.String(32))
app_type = db.Column(db.String(32))
timestamp = db.Column(
db.DateTime, default=datetime.now, onupdate=datetime.utcnow
)
class Applications(db.Model):
__tablename__ = 'applications'
application_id = db.Column(db.Integer, db.Sequence('seq_reg_id', start=201, increment=1), primary_key=True)
probe_id = Column(Integer, ForeignKey(probe_model.ProbeMonitoring.probe_id))
application_name = db.Column(db.String(50))
class PerformanceAnalysisSchema(ma.Schema):
class Meta:
fields = ('id', 'start_date', 'end_date', 'region', 'site_name', 'probe', 'app_type', 'timestamp')
class ApplicationSchema(ma.Schema):
class Meta:
fields = ('application_id', 'probe_id', 'application_name',)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from twitter import *
CONSUMER_KEY = "X"
CONSUMER_SECRET = "X"
TOKEN_KEY = "X"
TOKEN_SECRET = "X"
def sendTweet(message):
try:
t = Twitter(auth=OAuth(TOKEN_KEY, TOKEN_SECRET,CONSUMER_KEY, CONSUMER_SECRET))
t.statuses.update(status=message)
except:
pass |
#!/usr/bin/env python
VERBOSE = False;
VVERBOSE = False;
import sys;
import time;
sys.setrecursionlimit(200000);
class Graph:
def __init__(self, num_vertices = 0):
if VERBOSE:
sys.stderr.write("Creating Graph instance with " + str(num_vertices) + " vertices\n");
self.p = [[0 for x in range(num_vertices)] for x in range(num_vertices)];
def is_edge(self, i, j):
is_edge = self.p[i][j] != 0;
if VERBOSE:
pad = len(str(self.num_nodes()));
sys.stderr.write(str(i).rjust(pad) + (" --> " if is_edge else " X ") + str(j).rjust(pad) + "\n");
return is_edge;
def get_adjacent(self, i):
edges = [];
for j in xrange(self.num_nodes()):
if self.p[i][j] != 0:
edges.append(j);
if VVERBOSE:
sys.stderr.write("Adjacent nodes to " + str(i) + ": " + str(edges) + "\n");
elif VERBOSE:
sys.stderr.write("Adjacent nodes to " + str(i) + ": " + str(len(edges)) + "\n");
return edges;
def set_edge(self, i, j, value):
self.p[i][j] = value;
def num_nodes(self):
return len(self.p[0]);
def __str__(self):
return str(self.p);
def DFS(graph, vertex = 0, visited = None, depth = -1, is_iterative = False):
if visited is None:
visited = [False for x in xrange(graph.num_nodes())];
if not visited[vertex]:
visited[vertex] = True;
sys.stdout.write(str(vertex) + " ");
sys.stdout.flush();
for w in graph.get_adjacent(vertex):
if w is vertex:
continue;
if depth != 0 and (is_iterative or not visited[w]):
DFS(graph, w, visited, depth - 1, is_iterative);
def IDS(graph):
visited = [False for x in xrange(graph.num_nodes())];
for depth in xrange(graph.num_nodes()):
if all(visited):
break;
DFS(graph, 0, visited, depth, True);
def IDFS(graph, vertex = 0, visited = None, depth = -1):
if VERBOSE:
sys.stderr.write("IDFS(vertex=" + str(vertex) + ")\n");
if visited is None:
visited = [False for x in range(graph.num_nodes())];
if not visited[vertex]:
visited[vertex] = True;
sys.stdout.write(str(vertex) + " ");
sys.stdout.flush();
remembered_vertices = [];
for w in graph.get_adjacent(vertex):
if w is vertex:
continue;
if depth != 0:
remembered_vertices.extend(IDFS(graph, w, visited, depth - 1));
elif not visited[w]:
remembered_vertices.append(w);
return remembered_vertices;
def IIDS(graph, vertex = 0):
visited = [False for x in range(graph.num_nodes())];
remembered_vertices = [vertex];
if not all(visited):
while len(remembered_vertices) > 0:
remembered_vertices.extend(IDFS(graph, remembered_vertices.pop(0), visited, 0));
remembered_vertices = f10(remembered_vertices);
if VERBOSE:
sys.stderr.write("queue " + str(remembered_vertices) + "\n");
def BFS(graph, vertex = 0, visited = None):
visited = [False for x in xrange(graph.num_nodes())];
queue = [vertex];
while len(queue) > 0:
# if VVERBOSE:
# sys.stderr.write("queue " + str(queue) + "\n");
# elif VERBOSE:
# sys.stderr.write("queue " + str(len(queue)) + "\n");
vertex = queue.pop(0);
visited[vertex] = True;
# sys.stdout.write(str(vertex) + " ");
# sys.stdout.flush();
for w in graph.get_adjacent(vertex):
if not visited[w] and w is not vertex:
queue.append(w);
queue = f10(queue);
def BFS_orig(graph, vertex = 0, visited = None):
# maintain a queue of paths
queue = [];
visited = [False for x in xrange(graph.num_nodes())];
# push the first path into the queue
queue.append([vertex]);
visited[vertex] = True;
sys.stdout.write(str(vertex) + " ");
sys.stdout.flush();
while queue:
# get the first path from the queue
path = queue.pop(0);
# get the last node from the path
node = path[-1];
# enumerate all adjacent nodes, construct a new path and push it into the queue
for adjacent in graph.get_adjacent(node):
if not visited[adjacent]:
visited[adjacent] = True;
print str(adjacent),
sys.stdout.flush();
new_path = list(path);
new_path.append(adjacent);
queue.append(new_path);
def f8(seq): # Dave Kirby
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def f10(seq): # Andrew Dalke
# Order preserving
return list(_f10(seq))
def _f10(seq):
seen = set()
for x in seq:
if x in seen:
continue
seen.add(x)
yield x
def ReadFile(file):
f = open(file, 'r');
lines = f.readlines();
f.close();
arrs = [];
for line in lines:
arr = [];
for token in line.split(','):
if len(token) > 0:
arr.append(int(token));
arrs.append(arr);
return arrs;
if __name__ == '__main__':
do_dfs = False;
do_bfs = False;
do_ids = False;
do_iids = False;
input_file = 'input.txt';
if len(sys.argv) == 1:
print "Usage: %s [-dfs] [-bfs] [-ids] [-iids] [-v]" % (sys.argv[0]);
sys.exit(0);
for i in xrange(1, len(sys.argv)):
arg = sys.argv[i];
if arg == "-dfs":
do_dfs = True;
elif arg == "-bfs":
do_bfs = True;
elif arg == "-ids":
do_ids = True;
elif arg == "-iids":
do_iids = True;
elif arg == "--all":
do_dfs = True;
do_bfs = True;
do_ids = True;
do_iids = True;
elif arg == "-v":
if VERBOSE:
VVERBOSE = True;
else:
VERBOSE = True;
else:
input_file = arg;
data = ReadFile(input_file);
num_vertices = len(data[0]);
g = Graph(num_vertices);
for i in xrange(num_vertices):
for j in xrange(num_vertices):
g.set_edge(i, j, data[i][j]);
if do_dfs:
start_time = time.time();
sys.stdout.write("DFS\n");
DFS(g);
end_time = time.time();
print("\n DFS Time: %g" % (end_time - start_time));
sys.stdout.flush();
if do_bfs:
start_time = time.time();
sys.stdout.write("BFS\n");
BFS(g);
end_time = time.time();
print("\n BFS Time: %g" % (end_time - start_time));
sys.stdout.flush();
if do_ids:
start_time = time.time();
sys.stdout.write("IDS\n");
IDS(g);
end_time = time.time();
print("\n IDS Time: %g" % (end_time - start_time));
sys.stdout.flush();
if do_iids:
start_time = time.time();
sys.stdout.write("IIDS\n");
IIDS(g);
end_time = time.time();
print("\nIIDS Time: %g" % (end_time - start_time));
sys.stdout.flush();
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Interfaz.ui'
#
# Created by: PyQt5 UI code generator 5.8
#
# WARNING! All changes made in this file will be lost!
from fractions import Fraction
from PyQt5 import QtCore, QtWidgets, QtGui
from PyQt5.QtWidgets import *
from crearSecuenciaResultados import matrizToString, writeToFile
from OperacionesElementales import *
import sys
from registroLog import *
class Ui_TareaAlgebra(object):
matrizEnInterfaz = []
matrizActiva = []
matrizReal = []
filas = 5
columnas = 5
primeraMat = True
def setupUi(self, TareaAlgebra):
TareaAlgebra.setObjectName("TareaAlgebra")
TareaAlgebra.resize(524, 551)
self.centralwidget = QtWidgets.QWidget(TareaAlgebra)
self.centralwidget.setObjectName("centralwidget")
self.FilasGroupBox = QtWidgets.QGroupBox(self.centralwidget)
self.FilasGroupBox.setGeometry(QtCore.QRect(10, 10, 181, 31))
self.FilasGroupBox.setObjectName("FilasGroupBox")
self.filas2 = QtWidgets.QRadioButton(self.FilasGroupBox)
self.filas2.setGeometry(QtCore.QRect(10, 10, 31, 17))
self.filas2.setChecked(False)
self.filas2.setObjectName("filas2")
self.filas3 = QtWidgets.QRadioButton(self.FilasGroupBox)
self.filas3.setGeometry(QtCore.QRect(50, 10, 31, 17))
self.filas3.setObjectName("filas3")
self.filas4 = QtWidgets.QRadioButton(self.FilasGroupBox)
self.filas4.setGeometry(QtCore.QRect(90, 10, 31, 17))
self.filas4.setObjectName("filas4")
self.filas5 = QtWidgets.QRadioButton(self.FilasGroupBox)
self.filas5.setGeometry(QtCore.QRect(130, 10, 41, 17))
self.filas5.setChecked(True)
self.filas5.setObjectName("filas5")
self.ColumnasGroupBox = QtWidgets.QGroupBox(self.centralwidget)
self.ColumnasGroupBox.setGeometry(QtCore.QRect(10, 50, 181, 31))
self.ColumnasGroupBox.setObjectName("ColumnasGroupBox")
self.col2 = QtWidgets.QRadioButton(self.ColumnasGroupBox)
self.col2.setGeometry(QtCore.QRect(10, 10, 31, 17))
self.col2.setChecked(False)
self.col2.setObjectName("col2")
self.col3 = QtWidgets.QRadioButton(self.ColumnasGroupBox)
self.col3.setGeometry(QtCore.QRect(50, 10, 31, 17))
self.col3.setObjectName("col3")
self.col4 = QtWidgets.QRadioButton(self.ColumnasGroupBox)
self.col4.setGeometry(QtCore.QRect(90, 10, 31, 17))
self.col4.setObjectName("col4")
self.col5 = QtWidgets.QRadioButton(self.ColumnasGroupBox)
self.col5.setGeometry(QtCore.QRect(130, 10, 41, 17))
self.col5.setChecked(True)
self.col5.setObjectName("col5")
self.nuevaMatrizButt = QtWidgets.QPushButton(self.centralwidget)
self.nuevaMatrizButt.setGeometry(QtCore.QRect(20, 440, 141, 23))
self.nuevaMatrizButt.setObjectName("nuevaMatrizButt")
self.sResutadosButt = QtWidgets.QPushButton(self.centralwidget)
self.sResutadosButt.setGeometry(QtCore.QRect(20, 480, 141, 23))
self.sResutadosButt.setObjectName("sResutadosButt")
self.FIL1COL1 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL1COL1.setGeometry(QtCore.QRect(210, 10, 51, 91))
self.FIL1COL1.setObjectName("FIL1COL1")
self.DEMF1C1 = QtWidgets.QLineEdit(self.FIL1COL1)
self.DEMF1C1.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF1C1.setObjectName("DEMF1C1")
self.line = QtWidgets.QFrame(self.FIL1COL1)
self.line.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.NUMF1C1 = QtWidgets.QLineEdit(self.FIL1COL1)
self.NUMF1C1.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF1C1.setObjectName("NUMF1C1")
self.DEMF1C1.raise_()
self.NUMF1C1.raise_()
self.line.raise_()
self.DEMF1C1.raise_()
self.line.raise_()
self.NUMF1C1.raise_()
self.FIL1COL2 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL1COL2.setGeometry(QtCore.QRect(270, 10, 51, 91))
self.FIL1COL2.setObjectName("FIL1COL2")
self.DEMF1C2 = QtWidgets.QLineEdit(self.FIL1COL2)
self.DEMF1C2.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF1C2.setObjectName("DEMF1C2")
self.line_12 = QtWidgets.QFrame(self.FIL1COL2)
self.line_12.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_12.setFrameShape(QtWidgets.QFrame.HLine)
self.line_12.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_12.setObjectName("line_12")
self.NUMF1C2 = QtWidgets.QLineEdit(self.FIL1COL2)
self.NUMF1C2.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF1C2.setObjectName("NUMF1C2")
self.FIL1COL3 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL1COL3.setGeometry(QtCore.QRect(330, 10, 51, 91))
self.FIL1COL3.setObjectName("FIL1COL3")
self.DEMF1C3 = QtWidgets.QLineEdit(self.FIL1COL3)
self.DEMF1C3.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF1C3.setObjectName("DEMF1C3")
self.line_13 = QtWidgets.QFrame(self.FIL1COL3)
self.line_13.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_13.setFrameShape(QtWidgets.QFrame.HLine)
self.line_13.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_13.setObjectName("line_13")
self.NUMF1C3 = QtWidgets.QLineEdit(self.FIL1COL3)
self.NUMF1C3.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF1C3.setObjectName("NUMF1C3")
self.FIL1COL4 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL1COL4.setGeometry(QtCore.QRect(390, 10, 51, 91))
self.FIL1COL4.setObjectName("FIL1COL4")
self.DEMF1C4 = QtWidgets.QLineEdit(self.FIL1COL4)
self.DEMF1C4.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF1C4.setObjectName("DEMF1C4")
self.line_14 = QtWidgets.QFrame(self.FIL1COL4)
self.line_14.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_14.setFrameShape(QtWidgets.QFrame.HLine)
self.line_14.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_14.setObjectName("line_14")
self.NUMF1C4 = QtWidgets.QLineEdit(self.FIL1COL4)
self.NUMF1C4.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF1C4.setObjectName("NUMF1C4")
self.FIL1COL5 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL1COL5.setGeometry(QtCore.QRect(450, 10, 51, 91))
self.FIL1COL5.setObjectName("FIL1COL5")
self.DEMF1C5 = QtWidgets.QLineEdit(self.FIL1COL5)
self.DEMF1C5.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF1C5.setObjectName("DEMF1C5")
self.line_15 = QtWidgets.QFrame(self.FIL1COL5)
self.line_15.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_15.setFrameShape(QtWidgets.QFrame.HLine)
self.line_15.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_15.setObjectName("line_15")
self.NUMF1C5 = QtWidgets.QLineEdit(self.FIL1COL5)
self.NUMF1C5.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF1C5.setObjectName("NUMF1C5")
self.FIL2COL1 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL2COL1.setGeometry(QtCore.QRect(210, 110, 51, 91))
self.FIL2COL1.setObjectName("FIL2COL1")
self.DEMF2C1 = QtWidgets.QLineEdit(self.FIL2COL1)
self.DEMF2C1.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF2C1.setObjectName("DEMF2C1")
self.line_16 = QtWidgets.QFrame(self.FIL2COL1)
self.line_16.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_16.setFrameShape(QtWidgets.QFrame.HLine)
self.line_16.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_16.setObjectName("line_16")
self.NUMF2C1 = QtWidgets.QLineEdit(self.FIL2COL1)
self.NUMF2C1.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF2C1.setObjectName("NUMF2C1")
self.FIL2COL2 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL2COL2.setGeometry(QtCore.QRect(270, 110, 51, 91))
self.FIL2COL2.setObjectName("FIL2COL2")
self.DEMF2C2 = QtWidgets.QLineEdit(self.FIL2COL2)
self.DEMF2C2.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF2C2.setObjectName("DEMF2C2")
self.line_17 = QtWidgets.QFrame(self.FIL2COL2)
self.line_17.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_17.setFrameShape(QtWidgets.QFrame.HLine)
self.line_17.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_17.setObjectName("line_17")
self.NUMF2C2 = QtWidgets.QLineEdit(self.FIL2COL2)
self.NUMF2C2.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF2C2.setObjectName("NUMF2C2")
self.FIL2COL3 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL2COL3.setGeometry(QtCore.QRect(330, 110, 51, 91))
self.FIL2COL3.setObjectName("FIL2COL3")
self.DEMF2C3 = QtWidgets.QLineEdit(self.FIL2COL3)
self.DEMF2C3.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF2C3.setObjectName("DEMF2C3")
self.line_18 = QtWidgets.QFrame(self.FIL2COL3)
self.line_18.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_18.setFrameShape(QtWidgets.QFrame.HLine)
self.line_18.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_18.setObjectName("line_18")
self.NUMF2C3 = QtWidgets.QLineEdit(self.FIL2COL3)
self.NUMF2C3.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF2C3.setObjectName("NUMF2C3")
self.FIL2COL4 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL2COL4.setGeometry(QtCore.QRect(390, 110, 51, 91))
self.FIL2COL4.setObjectName("FIL2COL4")
self.DEMF2C4 = QtWidgets.QLineEdit(self.FIL2COL4)
self.DEMF2C4.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF2C4.setObjectName("DEMF2C4")
self.line_19 = QtWidgets.QFrame(self.FIL2COL4)
self.line_19.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_19.setFrameShape(QtWidgets.QFrame.HLine)
self.line_19.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_19.setObjectName("line_19")
self.NUMF2C4 = QtWidgets.QLineEdit(self.FIL2COL4)
self.NUMF2C4.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF2C4.setObjectName("NUMF2C4")
self.FIL2COL5 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL2COL5.setGeometry(QtCore.QRect(450, 110, 51, 91))
self.FIL2COL5.setObjectName("FIL2COL5")
self.DEMF2C5 = QtWidgets.QLineEdit(self.FIL2COL5)
self.DEMF2C5.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF2C5.setObjectName("DEMF2C5")
self.line_20 = QtWidgets.QFrame(self.FIL2COL5)
self.line_20.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_20.setFrameShape(QtWidgets.QFrame.HLine)
self.line_20.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_20.setObjectName("line_20")
self.NUMF2C5 = QtWidgets.QLineEdit(self.FIL2COL5)
self.NUMF2C5.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF2C5.setObjectName("NUMF2C5")
self.FIL3COL1 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL3COL1.setGeometry(QtCore.QRect(210, 210, 51, 91))
self.FIL3COL1.setObjectName("FIL3COL1")
self.DEMF3C1 = QtWidgets.QLineEdit(self.FIL3COL1)
self.DEMF3C1.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF3C1.setObjectName("DEMF3C1")
self.line_21 = QtWidgets.QFrame(self.FIL3COL1)
self.line_21.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_21.setFrameShape(QtWidgets.QFrame.HLine)
self.line_21.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_21.setObjectName("line_21")
self.NUMF3C1 = QtWidgets.QLineEdit(self.FIL3COL1)
self.NUMF3C1.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF3C1.setObjectName("NUMF3C1")
self.FIL3COL2 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL3COL2.setGeometry(QtCore.QRect(270, 210, 51, 91))
self.FIL3COL2.setObjectName("FIL3COL2")
self.DEMF3C2 = QtWidgets.QLineEdit(self.FIL3COL2)
self.DEMF3C2.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF3C2.setObjectName("DEMF3C2")
self.line_22 = QtWidgets.QFrame(self.FIL3COL2)
self.line_22.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_22.setFrameShape(QtWidgets.QFrame.HLine)
self.line_22.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_22.setObjectName("line_22")
self.NUMF3C2 = QtWidgets.QLineEdit(self.FIL3COL2)
self.NUMF3C2.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF3C2.setObjectName("NUMF3C2")
self.FIL3COL3 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL3COL3.setGeometry(QtCore.QRect(330, 210, 51, 91))
self.FIL3COL3.setObjectName("FIL3COL3")
self.DEMF3C3 = QtWidgets.QLineEdit(self.FIL3COL3)
self.DEMF3C3.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF3C3.setObjectName("DEMF3C3")
self.line_23 = QtWidgets.QFrame(self.FIL3COL3)
self.line_23.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_23.setFrameShape(QtWidgets.QFrame.HLine)
self.line_23.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_23.setObjectName("line_23")
self.NUMF3C3 = QtWidgets.QLineEdit(self.FIL3COL3)
self.NUMF3C3.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF3C3.setObjectName("NUMF3C3")
self.FIL3COL4 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL3COL4.setGeometry(QtCore.QRect(390, 210, 51, 91))
self.FIL3COL4.setObjectName("FIL3COL4")
self.DEMF3C4 = QtWidgets.QLineEdit(self.FIL3COL4)
self.DEMF3C4.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF3C4.setObjectName("DEMF3C4")
self.line_24 = QtWidgets.QFrame(self.FIL3COL4)
self.line_24.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_24.setFrameShape(QtWidgets.QFrame.HLine)
self.line_24.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_24.setObjectName("line_24")
self.NUMF3C4 = QtWidgets.QLineEdit(self.FIL3COL4)
self.NUMF3C4.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF3C4.setObjectName("NUMF3C4")
self.FIL3COL5 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL3COL5.setGeometry(QtCore.QRect(450, 210, 51, 91))
self.FIL3COL5.setObjectName("FIL3COL5")
self.DEMF3C5 = QtWidgets.QLineEdit(self.FIL3COL5)
self.DEMF3C5.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF3C5.setObjectName("DEMF3C5")
self.line_25 = QtWidgets.QFrame(self.FIL3COL5)
self.line_25.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_25.setFrameShape(QtWidgets.QFrame.HLine)
self.line_25.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_25.setObjectName("line_25")
self.NUMF3C5 = QtWidgets.QLineEdit(self.FIL3COL5)
self.NUMF3C5.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF3C5.setObjectName("NUMF3C5")
self.FIL4COL1 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL4COL1.setGeometry(QtCore.QRect(210, 310, 51, 91))
self.FIL4COL1.setObjectName("FIL4COL1")
self.DEMF4C1 = QtWidgets.QLineEdit(self.FIL4COL1)
self.DEMF4C1.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF4C1.setObjectName("DEMF4C1")
self.line_26 = QtWidgets.QFrame(self.FIL4COL1)
self.line_26.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_26.setFrameShape(QtWidgets.QFrame.HLine)
self.line_26.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_26.setObjectName("line_26")
self.NUMF4C1 = QtWidgets.QLineEdit(self.FIL4COL1)
self.NUMF4C1.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF4C1.setObjectName("NUMF4C1")
self.FIL4COL2 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL4COL2.setGeometry(QtCore.QRect(270, 310, 51, 91))
self.FIL4COL2.setObjectName("FIL4COL2")
self.DEMF4C2 = QtWidgets.QLineEdit(self.FIL4COL2)
self.DEMF4C2.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF4C2.setObjectName("DEMF4C2")
self.line_27 = QtWidgets.QFrame(self.FIL4COL2)
self.line_27.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_27.setFrameShape(QtWidgets.QFrame.HLine)
self.line_27.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_27.setObjectName("line_27")
self.NUMF4C2 = QtWidgets.QLineEdit(self.FIL4COL2)
self.NUMF4C2.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF4C2.setObjectName("NUMF4C2")
self.FIL4COL3 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL4COL3.setGeometry(QtCore.QRect(330, 310, 51, 91))
self.FIL4COL3.setObjectName("FIL4COL3")
self.DEMF4C3 = QtWidgets.QLineEdit(self.FIL4COL3)
self.DEMF4C3.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF4C3.setObjectName("DEMF4C3")
self.line_28 = QtWidgets.QFrame(self.FIL4COL3)
self.line_28.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_28.setFrameShape(QtWidgets.QFrame.HLine)
self.line_28.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_28.setObjectName("line_28")
self.NUMF4C3 = QtWidgets.QLineEdit(self.FIL4COL3)
self.NUMF4C3.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF4C3.setObjectName("NUMF4C3")
self.FIL4COL4 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL4COL4.setGeometry(QtCore.QRect(390, 310, 51, 91))
self.FIL4COL4.setObjectName("FIL4COL4")
self.DEMF4C4 = QtWidgets.QLineEdit(self.FIL4COL4)
self.DEMF4C4.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF4C4.setObjectName("DEMF4C4")
self.line_29 = QtWidgets.QFrame(self.FIL4COL4)
self.line_29.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_29.setFrameShape(QtWidgets.QFrame.HLine)
self.line_29.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_29.setObjectName("line_29")
self.NUMF4C4 = QtWidgets.QLineEdit(self.FIL4COL4)
self.NUMF4C4.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF4C4.setObjectName("NUMF4C4")
self.FIL4COL5 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL4COL5.setGeometry(QtCore.QRect(450, 310, 51, 91))
self.FIL4COL5.setObjectName("FIL4COL5")
self.DEMF4C5 = QtWidgets.QLineEdit(self.FIL4COL5)
self.DEMF4C5.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF4C5.setObjectName("DEMF4C5")
self.line_30 = QtWidgets.QFrame(self.FIL4COL5)
self.line_30.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_30.setFrameShape(QtWidgets.QFrame.HLine)
self.line_30.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_30.setObjectName("line_30")
self.NUMF4C5 = QtWidgets.QLineEdit(self.FIL4COL5)
self.NUMF4C5.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF4C5.setObjectName("NUMF4C5")
self.FIL5COL1 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL5COL1.setGeometry(QtCore.QRect(210, 410, 51, 91))
self.FIL5COL1.setObjectName("FIL5COL1")
self.DEMF5C1 = QtWidgets.QLineEdit(self.FIL5COL1)
self.DEMF5C1.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF5C1.setObjectName("DEMF5C1")
self.line_31 = QtWidgets.QFrame(self.FIL5COL1)
self.line_31.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_31.setFrameShape(QtWidgets.QFrame.HLine)
self.line_31.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_31.setObjectName("line_31")
self.NUMF5C1 = QtWidgets.QLineEdit(self.FIL5COL1)
self.NUMF5C1.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF5C1.setObjectName("NUMF5C1")
self.FIL5COL2 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL5COL2.setGeometry(QtCore.QRect(270, 410, 51, 91))
self.FIL5COL2.setObjectName("FIL5COL2")
self.DEMF5C2 = QtWidgets.QLineEdit(self.FIL5COL2)
self.DEMF5C2.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF5C2.setObjectName("DEMF5C2")
self.line_32 = QtWidgets.QFrame(self.FIL5COL2)
self.line_32.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_32.setFrameShape(QtWidgets.QFrame.HLine)
self.line_32.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_32.setObjectName("line_32")
self.NUMF5C2 = QtWidgets.QLineEdit(self.FIL5COL2)
self.NUMF5C2.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF5C2.setObjectName("NUMF5C2")
self.FIL5COL3 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL5COL3.setGeometry(QtCore.QRect(330, 410, 51, 91))
self.FIL5COL3.setObjectName("FIL5COL3")
self.DEMF5C3 = QtWidgets.QLineEdit(self.FIL5COL3)
self.DEMF5C3.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF5C3.setObjectName("DEMF5C3")
self.line_33 = QtWidgets.QFrame(self.FIL5COL3)
self.line_33.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_33.setFrameShape(QtWidgets.QFrame.HLine)
self.line_33.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_33.setObjectName("line_33")
self.NUMF5C3 = QtWidgets.QLineEdit(self.FIL5COL3)
self.NUMF5C3.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF5C3.setObjectName("NUMF5C3")
self.FIL5COL4 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL5COL4.setGeometry(QtCore.QRect(390, 410, 51, 91))
self.FIL5COL4.setObjectName("FIL5COL4")
self.DEMF5C4 = QtWidgets.QLineEdit(self.FIL5COL4)
self.DEMF5C4.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF5C4.setObjectName("DEMF5C4")
self.line_34 = QtWidgets.QFrame(self.FIL5COL4)
self.line_34.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_34.setFrameShape(QtWidgets.QFrame.HLine)
self.line_34.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_34.setObjectName("line_34")
self.NUMF5C4 = QtWidgets.QLineEdit(self.FIL5COL4)
self.NUMF5C4.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF5C4.setObjectName("NUMF5C4")
self.FIL5COL5 = QtWidgets.QGroupBox(self.centralwidget)
self.FIL5COL5.setEnabled(True)
self.FIL5COL5.setGeometry(QtCore.QRect(450, 410, 51, 91))
self.FIL5COL5.setObjectName("FIL5COL5")
self.DEMF5C5 = QtWidgets.QLineEdit(self.FIL5COL5)
self.DEMF5C5.setGeometry(QtCore.QRect(10, 60, 31, 20))
self.DEMF5C5.setObjectName("DEMF5C5")
self.line_35 = QtWidgets.QFrame(self.FIL5COL5)
self.line_35.setGeometry(QtCore.QRect(10, 45, 31, 16))
self.line_35.setFrameShape(QtWidgets.QFrame.HLine)
self.line_35.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_35.setObjectName("line_35")
self.NUMF5C5 = QtWidgets.QLineEdit(self.FIL5COL5)
self.NUMF5C5.setGeometry(QtCore.QRect(10, 30, 31, 20))
self.NUMF5C5.setObjectName("NUMF5C5")
self.OperacionesElementalesGrupBox = QtWidgets.QGroupBox(self.centralwidget)
self.OperacionesElementalesGrupBox.setGeometry(QtCore.QRect(10, 90, 181, 331))
self.OperacionesElementalesGrupBox.setObjectName("OperacionesElementalesGrupBox")
self.FilaxfilasGroupBox = QtWidgets.QGroupBox(self.OperacionesElementalesGrupBox)
self.FilaxfilasGroupBox.setGeometry(QtCore.QRect(10, 20, 161, 81))
self.FilaxfilasGroupBox.setObjectName("FilaxfilasGroupBox")
self.filaA1 = QtWidgets.QLineEdit(self.FilaxfilasGroupBox)
self.filaA1.setGeometry(QtCore.QRect(10, 20, 41, 20))
self.filaA1.setObjectName("filaA1")
self.filaA2 = QtWidgets.QLineEdit(self.FilaxfilasGroupBox)
self.filaA2.setGeometry(QtCore.QRect(110, 20, 41, 20))
self.filaA2.setObjectName("filaA2")
self.RealizarTipoA = QtWidgets.QPushButton(self.FilaxfilasGroupBox)
self.RealizarTipoA.setGeometry(QtCore.QRect(10, 50, 75, 23))
self.RealizarTipoA.setObjectName("RealizarTipoA")
self.label = QtWidgets.QLabel(self.FilaxfilasGroupBox)
self.label.setGeometry(QtCore.QRect(60, 20, 41, 16))
self.label.setObjectName("label")
self.groupBox_27 = QtWidgets.QGroupBox(self.OperacionesElementalesGrupBox)
self.groupBox_27.setGeometry(QtCore.QRect(10, 110, 161, 101))
self.groupBox_27.setObjectName("groupBox_27")
self.label_2 = QtWidgets.QLabel(self.groupBox_27)
self.label_2.setGeometry(QtCore.QRect(10, 20, 21, 16))
self.label_2.setObjectName("label_2")
self.filaB = QtWidgets.QLineEdit(self.groupBox_27)
self.filaB.setGeometry(QtCore.QRect(10, 40, 21, 20))
self.filaB.setObjectName("filaB")
self.label_3 = QtWidgets.QLabel(self.groupBox_27)
self.label_3.setGeometry(QtCore.QRect(40, 40, 21, 21))
font = QtGui.QFont()
font.setPointSize(18)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.constanteB = QtWidgets.QLineEdit(self.groupBox_27)
self.constanteB.setGeometry(QtCore.QRect(60, 40, 21, 20))
self.constanteB.setObjectName("constanteB")
self.label_4 = QtWidgets.QLabel(self.groupBox_27)
self.label_4.setGeometry(QtCore.QRect(60, 20, 51, 16))
self.label_4.setObjectName("label_4")
self.RealizarTipoB = QtWidgets.QPushButton(self.groupBox_27)
self.RealizarTipoB.setGeometry(QtCore.QRect(10, 70, 75, 23))
self.RealizarTipoB.setObjectName("RealizarTipoB")
self.groupBox_28 = QtWidgets.QGroupBox(self.OperacionesElementalesGrupBox)
self.groupBox_28.setGeometry(QtCore.QRect(10, 220, 161, 101))
self.groupBox_28.setObjectName("groupBox_28")
self.label_5 = QtWidgets.QLabel(self.groupBox_28)
self.label_5.setGeometry(QtCore.QRect(10, 20, 21, 16))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.groupBox_28)
self.label_6.setGeometry(QtCore.QRect(130, 20, 21, 16))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.groupBox_28)
self.label_7.setGeometry(QtCore.QRect(60, 20, 51, 16))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.groupBox_28)
self.label_8.setGeometry(QtCore.QRect(100, 40, 21, 21))
font = QtGui.QFont()
font.setPointSize(18)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.groupBox_28)
self.label_9.setGeometry(QtCore.QRect(40, 40, 21, 21))
font = QtGui.QFont()
font.setPointSize(18)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
#
self.label_10 = QtWidgets.QLabel(self.groupBox_28)
self.label_10.setGeometry(QtCore.QRect(42, 50, 50, 25))
font = QtGui.QFont()
font.setPointSize(20)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
#
self.filaC1 = QtWidgets.QLineEdit(self.groupBox_28)
self.filaC1.setGeometry(QtCore.QRect(10, 40, 21, 20))
self.filaC1.setObjectName("filaC1")
self.constanteC = QtWidgets.QLineEdit(self.groupBox_28)
self.constanteC.setGeometry(QtCore.QRect(70, 40, 21, 20))
self.constanteC.setObjectName("constanteC")
self.filaC2 = QtWidgets.QLineEdit(self.groupBox_28)
self.filaC2.setGeometry(QtCore.QRect(130, 40, 21, 20))
self.filaC2.setObjectName("filaC2")
self.RealizarTipoC = QtWidgets.QPushButton(self.groupBox_28)
self.RealizarTipoC.setGeometry(QtCore.QRect(10, 70, 75, 23))
self.RealizarTipoC.setObjectName("RealizarTipoC")
TareaAlgebra.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(TareaAlgebra)
self.menubar.setGeometry(QtCore.QRect(0, 0, 524, 20))
self.menubar.setObjectName("menubar")
TareaAlgebra.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(TareaAlgebra)
self.statusbar.setObjectName("statusbar")
self.sResutadosButt.setEnabled(False)
self.RealizarTipoA.setEnabled(False)
self.RealizarTipoB.setEnabled(False)
self.RealizarTipoC.setEnabled(False)
TareaAlgebra.setStatusBar(self.statusbar)
self.retranslateUi(TareaAlgebra)
QtCore.QMetaObject.connectSlotsByName(TareaAlgebra)
self.home()
def retranslateUi(self, TareaAlgebra):
_translate = QtCore.QCoreApplication.translate
TareaAlgebra.setWindowTitle(_translate("TareaAlgebra", "Tarea Programada Parte1"))
self.FilasGroupBox.setTitle(_translate("TareaAlgebra", "Filas"))
self.filas2.setText(_translate("TareaAlgebra", "2"))
self.filas3.setText(_translate("TareaAlgebra", "3"))
self.filas4.setText(_translate("TareaAlgebra", "4"))
self.filas5.setText(_translate("TareaAlgebra", "5"))
self.ColumnasGroupBox.setTitle(_translate("TareaAlgebra", "Columnas"))
self.col2.setText(_translate("TareaAlgebra", "2"))
self.col3.setText(_translate("TareaAlgebra", "3"))
self.col4.setText(_translate("TareaAlgebra", "4"))
self.col5.setText(_translate("TareaAlgebra", "5"))
self.nuevaMatrizButt.setText(_translate("TareaAlgebra", "Crear Matriz"))
self.sResutadosButt.setText(_translate("TareaAlgebra", "Secuencia de Resultados"))
self.FIL1COL1.setTitle(_translate("TareaAlgebra", "(1,1)"))
self.DEMF1C1.setText(_translate("TareaAlgebra", "1"))
self.NUMF1C1.setText(_translate("TareaAlgebra", "0"))
self.FIL1COL2.setTitle(_translate("TareaAlgebra", "(1,2)"))
self.DEMF1C2.setText(_translate("TareaAlgebra", "1"))
self.NUMF1C2.setText(_translate("TareaAlgebra", "0"))
self.FIL1COL3.setTitle(_translate("TareaAlgebra", "(1,3)"))
self.DEMF1C3.setText(_translate("TareaAlgebra", "1"))
self.NUMF1C3.setText(_translate("TareaAlgebra", "0"))
self.FIL1COL4.setTitle(_translate("TareaAlgebra", "(1,4)"))
self.DEMF1C4.setText(_translate("TareaAlgebra", "1"))
self.NUMF1C4.setText(_translate("TareaAlgebra", "0"))
self.FIL1COL5.setTitle(_translate("TareaAlgebra", "(1,5)"))
self.DEMF1C5.setText(_translate("TareaAlgebra", "1"))
self.NUMF1C5.setText(_translate("TareaAlgebra", "0"))
self.FIL2COL1.setTitle(_translate("TareaAlgebra", "(2,1)"))
self.DEMF2C1.setText(_translate("TareaAlgebra", "1"))
self.NUMF2C1.setText(_translate("TareaAlgebra", "0"))
self.FIL2COL2.setTitle(_translate("TareaAlgebra", "(2,2)"))
self.DEMF2C2.setText(_translate("TareaAlgebra", "1"))
self.NUMF2C2.setText(_translate("TareaAlgebra", "0"))
self.FIL2COL3.setTitle(_translate("TareaAlgebra", "(2,3)"))
self.DEMF2C3.setText(_translate("TareaAlgebra", "1"))
self.NUMF2C3.setText(_translate("TareaAlgebra", "0"))
self.FIL2COL4.setTitle(_translate("TareaAlgebra", "(2,4)"))
self.DEMF2C4.setText(_translate("TareaAlgebra", "1"))
self.NUMF2C4.setText(_translate("TareaAlgebra", "0"))
self.FIL2COL5.setTitle(_translate("TareaAlgebra", "(2,5)"))
self.DEMF2C5.setText(_translate("TareaAlgebra", "1"))
self.NUMF2C5.setText(_translate("TareaAlgebra", "0"))
self.FIL3COL1.setTitle(_translate("TareaAlgebra", "(3,1)"))
self.DEMF3C1.setText(_translate("TareaAlgebra", "1"))
self.NUMF3C1.setText(_translate("TareaAlgebra", "0"))
self.FIL3COL2.setTitle(_translate("TareaAlgebra", "(3,2)"))
self.DEMF3C2.setText(_translate("TareaAlgebra", "1"))
self.NUMF3C2.setText(_translate("TareaAlgebra", "0"))
self.FIL3COL3.setTitle(_translate("TareaAlgebra", "(3,3)"))
self.DEMF3C3.setText(_translate("TareaAlgebra", "1"))
self.NUMF3C3.setText(_translate("TareaAlgebra", "0"))
self.FIL3COL4.setTitle(_translate("TareaAlgebra", "(3,4)"))
self.DEMF3C4.setText(_translate("TareaAlgebra", "1"))
self.NUMF3C4.setText(_translate("TareaAlgebra", "0"))
self.FIL3COL5.setTitle(_translate("TareaAlgebra", "(3,5)"))
self.DEMF3C5.setText(_translate("TareaAlgebra", "1"))
self.NUMF3C5.setText(_translate("TareaAlgebra", "0"))
self.FIL4COL1.setTitle(_translate("TareaAlgebra", "(4,1)"))
self.DEMF4C1.setText(_translate("TareaAlgebra", "1"))
self.NUMF4C1.setText(_translate("TareaAlgebra", "0"))
self.FIL4COL2.setTitle(_translate("TareaAlgebra", "(4,2)"))
self.DEMF4C2.setText(_translate("TareaAlgebra", "1"))
self.NUMF4C2.setText(_translate("TareaAlgebra", "0"))
self.FIL4COL3.setTitle(_translate("TareaAlgebra", "(4,3)"))
self.DEMF4C3.setText(_translate("TareaAlgebra", "1"))
self.NUMF4C3.setText(_translate("TareaAlgebra", "0"))
self.FIL4COL4.setTitle(_translate("TareaAlgebra", "(4,4)"))
self.DEMF4C4.setText(_translate("TareaAlgebra", "1"))
self.NUMF4C4.setText(_translate("TareaAlgebra", "0"))
self.FIL4COL5.setTitle(_translate("TareaAlgebra", "(4,5)"))
self.DEMF4C5.setText(_translate("TareaAlgebra", "1"))
self.NUMF4C5.setText(_translate("TareaAlgebra", "0"))
self.FIL5COL1.setTitle(_translate("TareaAlgebra", "(5,1)"))
self.DEMF5C1.setText(_translate("TareaAlgebra", "1"))
self.NUMF5C1.setText(_translate("TareaAlgebra", "0"))
self.FIL5COL2.setTitle(_translate("TareaAlgebra", "(5,2)"))
self.DEMF5C2.setText(_translate("TareaAlgebra", "1"))
self.NUMF5C2.setText(_translate("TareaAlgebra", "0"))
self.FIL5COL3.setTitle(_translate("TareaAlgebra", "(5,3)"))
self.DEMF5C3.setText(_translate("TareaAlgebra", "1"))
self.NUMF5C3.setText(_translate("TareaAlgebra", "0"))
self.FIL5COL4.setTitle(_translate("TareaAlgebra", "(5,4)"))
self.DEMF5C4.setText(_translate("TareaAlgebra", "1"))
self.NUMF5C4.setText(_translate("TareaAlgebra", "0"))
self.FIL5COL5.setTitle(_translate("TareaAlgebra", "(5,5)"))
self.DEMF5C5.setText(_translate("TareaAlgebra", "1"))
self.NUMF5C5.setText(_translate("TareaAlgebra", "0"))
self.OperacionesElementalesGrupBox.setTitle(_translate("TareaAlgebra", "Operaciones Elementales"))
self.FilaxfilasGroupBox.setTitle(_translate("TareaAlgebra", "Cambiar fila por fila"))
self.RealizarTipoA.setText(_translate("TareaAlgebra", "Realizar"))
self.label.setText(_translate("TareaAlgebra", "<----->"))
self.groupBox_27.setTitle(_translate("TareaAlgebra", "Fila por Constante"))
self.label_2.setText(_translate("TareaAlgebra", "Fila"))
self.label_3.setText(_translate("TareaAlgebra", "*"))
self.label_4.setText(_translate("TareaAlgebra", "Constante"))
self.RealizarTipoB.setText(_translate("TareaAlgebra", "Realizar"))
self.groupBox_28.setTitle(_translate("TareaAlgebra", "Tipo C"))
self.label_5.setText(_translate("TareaAlgebra", "Fila"))
self.label_6.setText(_translate("TareaAlgebra", "Fila"))
self.label_7.setText(_translate("TareaAlgebra", "Constante"))
self.label_8.setText(_translate("TareaAlgebra", "*"))
self.label_9.setText(_translate("TareaAlgebra", "+"))
self.label_10.setText(_translate("TareaAlgebra", "-"))
self.RealizarTipoC.setText(_translate("TareaAlgebra", "Realizar"))
def resetMatriz(self, bool):
for fila in self.matrizEnInterfaz:
for columna in fila:
columna.setEnabled(bool)
def setEnableMatriz(self):
self.primeraMat = True
self.nuevoLog()
self.resetMatriz(True)
matriz = self.matrizEnInterfaz
numCol = 5
numFil = 5
if (self.col2.isChecked()):
numCol = 2
elif (self.col3.isChecked()):
numCol = 3
elif (self.col4.isChecked()):
numCol = 4
else:
pass
if (self.filas2.isChecked()):
numFil = 2
elif (self.filas3.isChecked()):
numFil = 3
elif (self.filas4.isChecked()):
numFil = 4
else:
pass
self.filas = numFil
self.columnas = numCol
while (numFil < 5):
for i in range(0, 5):
matriz[numFil][i].setEnabled(False)
numFil += 1
for i in range(0, 5):
tmp = numCol
while (tmp < 5):
matriz[i][tmp].setEnabled(False)
tmp+=1
self.nuevaMatrizButt.setEnabled(False)
self.nuevaMatrizButt.setText("Nueva Matriz")
self.RealizarTipoA.setEnabled(True)
self.RealizarTipoB.setEnabled(True)
self.RealizarTipoC.setEnabled(True)
self.FilasGroupBox.setEnabled(False)
self.ColumnasGroupBox.setEnabled(False)
self.sResutadosButt.setEnabled(True)
self.setMatrizActiva()
def setMatrizActiva(self):
for i in range(0, self.filas):
self.matrizActiva.insert(i,[])
self.matrizReal.insert(i,[])
for j in range(0, self.columnas):
self.matrizActiva[i].insert(j, self.matrizEnInterfaz[i][j])
def generarMatrizReal(self):
self.matrizReal = []
for i in range(0, self.filas):
self.matrizReal.insert(i,[])
for i in range(0,self.filas):
for j in range(0,self.columnas):
tmp = self.matrizActiva[i][j].children()
print(int(tmp[2].text()),int(tmp[0].text()))
self.matrizReal[i].insert(j, Fraction(int(tmp[2].text()),int(tmp[0].text())))
for i in self.matrizReal:
if(len(i)==0):
self.matrizReal.remove(i)
def actualizarMatrizUI(self, matriz):
for i in range(0,self.filas):
for j in range(0,self.columnas):
tmp = self.matrizActiva[i][j].children()
num = Fraction(matriz[i][j])
tmp[2].setText(str(num.numerator))
tmp[0].setText(str(num.denominator))
def tipoA(self):
try:
self.resetMatriz(False)
self.nuevaMatrizButt.setEnabled(True)
self.FilasGroupBox.setEnabled(True)
self.ColumnasGroupBox.setEnabled(True)
self.generarMatrizReal()
filaCambio1 = int(self.filaA1.text())-1
filaCambio2 = int(self.filaA2.text())-1
self.guardarPrimMat()
self.matrizReal = intercambiarFilas(self.matrizReal, filaCambio1, filaCambio2)
self.actualizarMatrizUI(self.matrizReal)
except:
self.showMessageBox("Error", "Imposible realizar")
def tipoB(self):
try:
self.generarMatrizReal()
self.resetMatriz(False)
self.nuevaMatrizButt.setEnabled(True)
self.FilasGroupBox.setEnabled(True)
self.ColumnasGroupBox.setEnabled(True)
filaC = int(self.filaB.text())-1
constante = int(self.constanteB.text())
self.guardarPrimMat()
self.matrizReal = multiplicarPorConstante(self.matrizReal, filaC, constante)
self.actualizarMatrizUI(self.matrizReal)
except:
self.showMessageBox("Error", "Imposible realizar")
def tipoC(self):
try:
self.generarMatrizReal()
self.resetMatriz(False)
self.nuevaMatrizButt.setEnabled(True)
self.FilasGroupBox.setEnabled(True)
self.ColumnasGroupBox.setEnabled(True)
filaS = int(self.filaC1.text())-1
filaC = int(self.filaC2.text())-1
constante = int(self.constanteC.text())
self.guardarPrimMat()
self.matrizReal = sumaDeFilas(self.matrizReal, filaS, filaC, constante)
self.actualizarMatrizUI(self.matrizReal)
except:
self.showMessageBox("Error", "Imposible realizar")
def showMessageBox(self, title, message):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setWindowTitle(title)
msgBox.setText(message)
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec_()
def home(self):
self.FIL1COL1.children()
self.matrizEnInterfaz = [[self.FIL1COL1,self.FIL1COL2,self.FIL1COL3,self.FIL1COL4,self.FIL1COL5],
[self.FIL2COL1,self.FIL2COL2,self.FIL2COL3,self.FIL2COL4,self.FIL2COL5],
[self.FIL3COL1,self.FIL3COL2,self.FIL3COL3,self.FIL3COL4,self.FIL3COL5],
[self.FIL4COL1,self.FIL4COL2,self.FIL4COL3,self.FIL4COL4,self.FIL4COL5],
[self.FIL5COL1,self.FIL5COL2,self.FIL5COL3,self.FIL5COL4,self.FIL5COL5]]
self.nuevaMatrizButt.clicked.connect(self.setEnableMatriz)
self.RealizarTipoA.clicked.connect(self.tipoA)
self.RealizarTipoB.clicked.connect(self.tipoB)
self.RealizarTipoC.clicked.connect(self.tipoC)
self.sResutadosButt.clicked.connect(self.mostrarLog)
def guardarPrimMat(self):
if self.primeraMat==True:
writeToFile(matrizToString(self.matrizReal))
self.primeraMat = False
def mostrarLog(self):
#Creando ui del Log
self.logV = Log(self)
def nuevoLog(self):
fileHandle = open("Resultados.txt", "w+")
fileHandle.write("<-------------------------------------------------- Inicio --------------------------------------------------->\n\n")
fileHandle.close()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
TareaAlgebra = QtWidgets.QMainWindow()
ui = Ui_TareaAlgebra()
ui.setupUi(TareaAlgebra)
TareaAlgebra.show()
sys.exit(app.exec_())
|
from pathlib import Path
import numpy as np
import pytest
from npe2 import DynamicPlugin
from npe2.manifest.contributions import SampleDataURI
import napari
from napari.layers._source import Source
from napari.viewer import ViewerModel
def test_sample_hook(builtins, tmp_plugin: DynamicPlugin):
viewer = ViewerModel()
NAME = tmp_plugin.name
KEY = 'random data'
with pytest.raises(KeyError, match=f"Plugin {NAME!r} does not provide"):
viewer.open_sample(NAME, KEY)
@tmp_plugin.contribute.sample_data(key=KEY)
def _generate_random_data(shape=(512, 512)):
data = np.random.rand(*shape)
return [(data, {'name': KEY})]
LOGO = str(Path(napari.__file__).parent / 'resources' / 'logo.png')
tmp_plugin.manifest.contributions.sample_data.append(
SampleDataURI(uri=LOGO, key='napari logo', display_name='Napari logo')
)
assert len(viewer.layers) == 0
viewer.open_sample(NAME, KEY)
assert viewer.layers[-1].source == Source(
path=None, reader_plugin=None, sample=(NAME, KEY)
)
assert len(viewer.layers) == 1
viewer.open_sample(NAME, 'napari logo')
assert viewer.layers[-1].source == Source(
path=LOGO, reader_plugin='napari', sample=(NAME, 'napari logo')
)
# test calling with kwargs
viewer.open_sample(NAME, KEY, shape=(256, 256))
assert len(viewer.layers) == 3
assert viewer.layers[-1].source == Source(sample=(NAME, KEY))
|
class Cat:
def __init__(self, name):
self.name = name
def eat(self):
print('%s 吃鱼' % self.name)
cat = Cat('TOM')
cat.eat()
jery = Cat('jery')
jery.eat() |
#!/usr/bin/enc pyton3
#This script is for going in every directory and concatenating all text files in that directory
import os
#import subprocess
#print("-----------------------------------------------------------Hello user------------------------------------------------------\n")
pwd=os.getcwd()
#print(pwd)
#print("\n")
dir="/home/pirateking/Desktop/Sys_Programming_Lab/ADFA-LD/ADFA-LD/Attack_Data_Master"
os.chdir(".")
dir=os.getcwd()
#print(pwd)
#print("\n")
#os.system("ls -alt")
concat=""
#folder=int(input("\n Choose which folder you want to enter Adduser=1 Hydra_FTP=2 Hydra_SSH=3 Java_Meterpreter=4 Meterpreter=5 Web_Shell=6.\n"))
for folder in range(1,7):
concat=""
for subfolder in range(1,8):
if folder==1:
dir=dir+"/Adduser_"+str(subfolder)
elif folder==2:
dir=dir+"/Hydra_FTP_"+str(subfolder)
elif folder==3:
dir=dir+"/Hydra_SSH_"+str(subfolder)
elif folder==4:
dir=dir+"/Java_Meterpreter_"+str(subfolder)
elif folder==5:
dir=dir+"/Meterpreter_"+str(subfolder)
elif folder==6:
dir=dir+"/Web_Shell_"+str(subfolder)
os.chdir(dir)
#print(dir)
#print("\n")
filenames=os.listdir()
length=len(filenames)
for x in range(length):
f=open(filenames[x],"r+")
concat=concat+f.read()
f.close()
#print("\n")
dir="/home/pirateking/Desktop/Sys_Programming_Lab/ADFA-LD/ADFA-LD/Attack_Data_Master"
os.chdir("/home/pirateking/Desktop/Sys_Programming_Lab/ADFA-LD/ADFA-LD/Attack_Data_Master")
if folder==1:
filekanaam="Adduser"
elif folder==2:
filekanaam="Hydra_FTP"
elif folder==3:
filekanaam="Hydra_SSH"
elif folder==4:
filekanaam="Java_Meterpreter"
elif folder==5:
filekanaam="Meterpreter"
elif folder==6:
filekanaam="Web_Shell"
filekanaam=filekanaam+"_train"
f1=open(filekanaam,"w+")
f1.write(concat)
f1.close()
#print("--------------------------------------------------------------------------pirateking--------------------------------------------------------------------")
|
import datetime
from django.db.models import Count
# Сериализаторы
from rest_framework import filters
from rest_framework import generics, viewsets
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from django.shortcuts import get_object_or_404
from collections import OrderedDict
from rest_framework import status
from django_filters.rest_framework import DjangoFilterBackend
from .models import IndividualImplementationAcademicPlan, WorkProgramInWorkProgramChangeInDisciplineBlockModule,\
DisciplineBlockModuleInDisciplineBlock, ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModule
from .serializers import IndividualImplementationAcademicPlanSerializer,CreateIndividualImplementationAcademicPlanSerializer,\
ShortIndividualImplementationAcademicPlanSerializer, WorkProgramInWorkProgramChangeInDisciplineBlockModuleSerializer, \
DisciplineBlockModuleInDisciplineBlockSerializer, ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModuleSerializer
# CreateElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModuleSerializer
from ..folders_ans_statistic.models import IndividualImplementationAcademicPlanInFolder
class IndividualImplementationAcademicPlansSet(viewsets.ModelViewSet):
queryset = IndividualImplementationAcademicPlan.objects.all()
serializer_class = IndividualImplementationAcademicPlanSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter, DjangoFilterBackend)
filterset_fields = ['implementation_of_academic_plan__academic_plan__educational_profile',
'implementation_of_academic_plan__field_of_study__title',
'implementation_of_academic_plan__field_of_study__number',
'implementation_of_academic_plan__academic_plan__discipline_blocks_in_academic_plan__modules_in_discipline_block__change_blocks_of_work_programs_in_modules__work_program__prerequisites__name',
'implementation_of_academic_plan__academic_plan__discipline_blocks_in_academic_plan__modules_in_discipline_block__change_blocks_of_work_programs_in_modules__work_program__outcomes__name',
]
http_method_names = ['get', 'post']
def get_serializer_class(self):
if self.action == 'list':
return ShortIndividualImplementationAcademicPlanSerializer
if self.action == 'create':
return CreateIndividualImplementationAcademicPlanSerializer
if self.action == 'update':
return CreateIndividualImplementationAcademicPlanSerializer
return IndividualImplementationAcademicPlanSerializer
def retrieve(self, request, *args, **kwargs):
# do your customization here
instance = self.get_object()
serializer = self.get_serializer(instance)
newdata = dict(serializer.data)
for discipline_block in newdata['implementation_of_academic_plan']['academic_plan']['discipline_blocks_in_academic_plan']:
print(discipline_block['id'])
delete_module = []
k = 0
for module in discipline_block['modules_in_discipline_block']:
print(module['id'])
for change_block in module['change_blocks_of_work_programs_in_modules']:
if change_block['change_type'] == "Optionally":
i = 0
delete = []
for work_program in change_block['work_program']:
try:
if work_program['id'] != \
WorkProgramInWorkProgramChangeInDisciplineBlockModule.objects. \
get(individual_implementation_of_academic_plan = newdata['id'],
work_program_change_in_discipline_block_module = change_block['id']).work_program.id:
delete.append(i)
except:
pass
i +=1
a = 0
for i in delete:
print(i)
del change_block['work_program'][i-a]
a +=1
if change_block['change_type'] == "Facultativ":
try:
if change_block['id'] == \
ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModule.objects. \
get(individual_implementation_of_academic_plan = newdata['id'],
work_program_change_in_discipline_block_module = change_block['id']).work_program_change_in_discipline_block_module.id:
#delete.append(i)
change_block.update({"changed": True})
except:
change_block.update({"changed": False})
if module['type'] == "specialization_module":
try:
if module['id'] != \
DisciplineBlockModuleInDisciplineBlock.objects. \
get(individual_implementation_of_academic_plan = newdata['id'],
discipline_block = discipline_block['id']).discipline_block_module.id:
#change_block['work_program'].pop(i)
#del change_block['work_program'][i]
delete_module.append(k)
print('dd',k)
#del change_block[work_program]
#change_block.remove(work_program['id'])
except:
pass
k +=1
a = 0
for i in delete_module:
print('dddddd', k)
del discipline_block['modules_in_discipline_block'][i]
a +=1
#TODO: Посмотреть как можно поменять костыль
data_order=OrderedDict(newdata)
newdata = dict(data_order)
try:
newdata.update({"rating": IndividualImplementationAcademicPlanInFolder.objects.get(individual_implementation_of_academic_plan__pk=self.kwargs['pk'],
folder__owner=self.request.user).route_rating})
newdata.update({"id_rating": IndividualImplementationAcademicPlanInFolder.objects.get(individual_implementation_of_academic_plan__pk=self.kwargs['pk'],
folder__owner=self.request.user).id})
except:
newdata.update({"rating": False})
return Response(OrderedDict(newdata), status=status.HTTP_200_OK)
class IndividualImplementationAcademicPlanForUser(generics.ListAPIView):
serializer_class = ShortIndividualImplementationAcademicPlanSerializer
permission_classes = [IsAuthenticated]
pagination_class = PageNumberPagination
def list(self, request, **kwargs):
"""
Вывод всех результатов для одной рабочей программы по id
"""
# Note the use of `get_queryset()` instead of `self.queryset`
queryset = IndividualImplementationAcademicPlan.objects.filter(user=self.request.user)
page = self.paginate_queryset(queryset)
serializer = ShortIndividualImplementationAcademicPlanSerializer(queryset, many=True)
return self.get_paginated_response(serializer.data)
@api_view(['POST'])
@permission_classes((IsAuthenticated, ))
def SaveImplementationAcademicPlans(request):
implementations=request.data.get('implementation_set')
for imp in implementations:
IndividualImplementationAcademicPlan.objects.filter(pk=imp).update(user=request.user)
return Response("null", status=status.HTTP_200_OK)
class WorkProgramInWorkProgramChangeInDisciplineBlockModuleSet(viewsets.ModelViewSet):
queryset = WorkProgramInWorkProgramChangeInDisciplineBlockModule.objects.all()
serializer_class = WorkProgramInWorkProgramChangeInDisciplineBlockModuleSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
class DisciplineBlockModuleInDisciplineBlockSet(viewsets.ModelViewSet):
queryset = DisciplineBlockModuleInDisciplineBlock.objects.all()
serializer_class = DisciplineBlockModuleInDisciplineBlockSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
class ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModuleSet(viewsets.ModelViewSet):
queryset = ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModule.objects.all()
serializer_class = ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModuleSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
class ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModuleCreateAPIView(generics.CreateAPIView):
serializer_class = ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModuleSerializer
queryset = ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModule.objects.all()
filter_backends = [filters.SearchFilter, filters.OrderingFilter]
search_fields = ['description']
#permission_classes = [IsRpdDeveloperOrReadOnly]
def post(self, request):
for data in request.data['electives']:
serializer = ElectiveWorkProgramInWorkProgramChangeInDisciplineBlockModuleSerializer(data=data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from typing import Any
from pants.backend.codegen.protobuf.lint.buf.skip_field import SkipBufLintField
from pants.backend.codegen.protobuf.lint.buf.subsystem import BufSubsystem
from pants.backend.codegen.protobuf.target_types import (
ProtobufDependenciesField,
ProtobufSourceField,
)
from pants.core.goals.lint import LintResult, LintTargetsRequest, Partitions
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.core.util_rules.source_files import SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.fs import Digest, MergeDigests
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet, Target, TransitiveTargets, TransitiveTargetsRequest
from pants.util.logging import LogLevel
from pants.util.meta import classproperty
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class BufFieldSet(FieldSet):
required_fields = (ProtobufSourceField,)
sources: ProtobufSourceField
dependencies: ProtobufDependenciesField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipBufLintField).value
class BufLintRequest(LintTargetsRequest):
field_set_type = BufFieldSet
tool_subsystem = BufSubsystem # type: ignore[assignment]
@classproperty
def tool_name(cls) -> str:
return "buf lint"
@classproperty
def tool_id(cls) -> str:
return "buf-lint"
@rule
async def partition_buf(
request: BufLintRequest.PartitionRequest[BufFieldSet], buf: BufSubsystem
) -> Partitions[BufFieldSet, Any]:
return Partitions() if buf.lint_skip else Partitions.single_partition(request.field_sets)
@rule(desc="Lint with buf lint", level=LogLevel.DEBUG)
async def run_buf(
request: BufLintRequest.Batch[BufFieldSet, Any], buf: BufSubsystem, platform: Platform
) -> LintResult:
transitive_targets = await Get(
TransitiveTargets,
TransitiveTargetsRequest((field_set.address for field_set in request.elements)),
)
all_stripped_sources_request = Get(
StrippedSourceFiles,
SourceFilesRequest(
tgt[ProtobufSourceField]
for tgt in transitive_targets.closure
if tgt.has_field(ProtobufSourceField)
),
)
target_stripped_sources_request = Get(
StrippedSourceFiles,
SourceFilesRequest(
(field_set.sources for field_set in request.elements),
for_sources_types=(ProtobufSourceField,),
enable_codegen=True,
),
)
download_buf_get = Get(DownloadedExternalTool, ExternalToolRequest, buf.get_request(platform))
config_files_get = Get(
ConfigFiles,
ConfigFilesRequest,
buf.config_request,
)
target_sources_stripped, all_sources_stripped, downloaded_buf, config_files = await MultiGet(
target_stripped_sources_request,
all_stripped_sources_request,
download_buf_get,
config_files_get,
)
input_digest = await Get(
Digest,
MergeDigests(
(
target_sources_stripped.snapshot.digest,
all_sources_stripped.snapshot.digest,
downloaded_buf.digest,
config_files.snapshot.digest,
)
),
)
config_arg = ["--config", buf.config] if buf.config else []
process_result = await Get(
FallibleProcessResult,
Process(
argv=[
downloaded_buf.exe,
"lint",
*config_arg,
*buf.lint_args,
"--path",
",".join(target_sources_stripped.snapshot.files),
],
input_digest=input_digest,
description=f"Run buf lint on {pluralize(len(request.elements), 'file')}.",
level=LogLevel.DEBUG,
),
)
return LintResult.create(request, process_result)
def rules():
return [
*collect_rules(),
*BufLintRequest.rules(),
]
|
#This problem was asked by Airbnb.
#Given a list of integers, write a function that returns the largest
#sum of non-adjacent numbers. Numbers can be 0 or negative.
#For example, [2, 4, 6, 2, 5] should return 13, since
#we pick 2, 6, and 5. [5, 1, 1, 5] should return 10, since we pick 5 and 5.
def largest_non_adjacent(arr):
if len(arr) <= 2:
return max(0, max(arr))
cached_result = [0] * len(arr)
cached_result[0] = max(0, arr[0])
cached_result[1] = max(cached_result[0], arr[1])
for i in range(2, len(arr)):
num = arr[i]
cached_result[i] = max(num + cached_result[i - 2], cached_result[i - 1])
return cached_result[-1]
l = [2, 4, 6, 2, 5]
print largest_non_adjacent(l)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from twisted.enterprise import adbapi
import MySQLdb
import MySQLdb.cursors
# 导入时间处理库
import datetime
import time
# 导入json/csv模块
import json
import csv
# 导入pymysql模块
import pymysql
class XiaohuaPipeline(object):
def __init__(self):
# 存入mysql
dbargs = dict(
host='127.0.0.1',
db='test',
user='root',
passwd='root',
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True,
)
self.dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
def process_item(self, item, spider):
self.dbpool.runInteraction(self.insert_into_table, item)
return item
def insert_into_table(self, conn, item):
publish = int(time.time())
created = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
conn.execute(
'insert into joke(content,publish,created_at,updated_at) values(%s, %s, %s, %s)',
(item['content'], publish, created, created))
class HuatuPipeline(object):
def __init__(self):
self.filename = open('huatu.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
text = json.dumps(dict(item), ensure_ascii=False) + ",\n"
self.filename.write(text)
return item
def close_spider(self, spider):
self.filename.close()
class YhbCSVPipeline(object):
def __init__(self):
# 打开文件,指定方式为写,利用第3个参数把csv写数据时产生的空行消除
self.f = open("myproject.csv", "a", encoding='utf-8')
# 设置文件第一行的字段名,注意要跟spider传过来的字典key名称相同
self.fieldnames = ["title", "option", "answer", "analysis"]
# 指定文件的写入方式为csv字典写入,参数1为指定具体文件,参数2为指定字段名
self.writer = csv.DictWriter(self.f, fieldnames=self.fieldnames)
# 写入第一行字段名,因为只要写入一次,所以文件放在__init__里面
self.writer.writeheader()
def process_item(self, item, spider):
# 写入spider传过来的具体数值
self.writer.writerow(item)
# 写入完返回
return item
def close(self, spider):
self.f.close()
class YhbPipeline(object):
def __init__(self):
dbparams = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': 'root',
'database': 'pythonDB',
'charset': 'utf8'
}
self.conn = pymysql.connect(**dbparams)
self.cursor = self.conn.cursor()
self._sql = None
def process_item(self, item, spider):
# 获取当前时间的日期格式
times = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.cursor.execute(self.sql, (
item['title'], item['answer'], json.dumps(item['analysis']), str(item['question_option']), times))
self.conn.commit()
return item
@property
def sql(self):
if not self._sql:
self._sql = """
insert into yhb_questions(id,title,answer,analysis,question_option,create_time) values(null,%s,%s,%s,%s,%s)
"""
return self._sql
return self._sql
class BooksPipeline(object):
def __init__(self):
self.con = pymysql.connect(user="root", passwd="root", host="localhost", db="pythonDB", charset="utf8")
self.cur = self.con.cursor()
self.cur.execute('drop table if exists douban_books')
self.cur.execute("create table douban_books(id int auto_increment primary key,book_name varchar(200),book_star varchar(244),book_pl varchar(244),book_author varchar(200),book_publish varchar(200),book_date varchar(200),book_price varchar(200))")
def process_item(self, item, spider):
self.cur.execute("insert into douban_books(id,book_name,book_star,book_pl,book_author,book_publish,book_date,book_price) values(NULL,%s,%s,%s,%s,%s,%s,%s)", (item['book_name'], item['book_star'], item['book_pl'], item['book_author'], item['book_publish'], item['book_date'], item['book_price']))
self.con.commit()
return item |
import logging
from description import Description
from slipnet import slipnet
from workspaceStructure import WorkspaceStructure
class WorkspaceObject(WorkspaceStructure):
def __init__(self, workspaceString):
WorkspaceStructure.__init__(self)
self.string = workspaceString
#self.string.objects += [ self ]
self.descriptions = []
self.extrinsicDescriptions = []
self.incomingBonds = []
self.outgoingBonds = []
self.bonds = []
self.group = None
self.changed = None
self.correspondence = None
self.clampSalience = False
self.rawImportance = 0.0
self.relativeImportance = 0.0
self.leftBond = None
self.rightBond = None
self.newAnswerLetter = False
self.name = ''
self.replacement = None
self.rightStringPosition = 0
self.leftStringPosition = 0
self.leftmost = False
self.rightmost = False
self.intraStringSalience = 0.0
self.interStringSalience = 0.0
self.totalSalience = 0.0
self.intraStringUnhappiness = 0.0
self.interStringUnhappiness = 0.0
self.totalUnhappiness = 0.0
def __str__(self):
return 'object'
def spansString(self):
return self.leftmost and self.rightmost
def addDescription(self, descriptionType, descriptor):
description = Description(self, descriptionType, descriptor)
logging.info("Adding description: %s to %s" % (description, self))
self.descriptions += [description]
def addDescriptions(self, descriptions):
#print 'addDescriptions 1'
#print 'add %d to %d of %s' % (len(descriptions),len(self.descriptions), self.string.string)
copy = descriptions[:] # in case we add to our own descriptions, which turns the loop infinite
for description in copy:
#print '%d addDescriptions 2 %s ' % (len(descriptions),description)
logging.info('might add: %s' % description)
if not self.containsDescription(description):
#print '%d addDescriptions 3 %s ' % (len(descriptions),description)
self.addDescription(description.descriptionType, description.descriptor)
#print '%d addDescriptions 4 %s ' % (len(descriptions),description)
else:
logging.info("Won't add it")
#print '%d added, have %d ' % (len(descriptions),len(self.descriptions))
from workspace import workspace
workspace.buildDescriptions(self)
def __calculateIntraStringHappiness(self):
if self.spansString():
return 100.0
if self.group:
return self.group.total_strength
bondStrength = 0.0
for bond in self.bonds:
bondStrength += bond.total_strength
divisor = 6.0
if self.spansString(): # XXX then we have already returned
divisor = 3.0
return bondStrength / divisor
def __calculateRawImportance(self):
"""Calculate the raw importance of this object.
Which is the sum of all relevant descriptions"""
result = 0.0
for description in self.descriptions:
if description.descriptionType.fully_active():
result += description.descriptor.activation
else:
result += description.descriptor.activation / 20.0
if self.group:
result *= 2.0 / 3.0
if self.changed:
result *= 2.0
return result
def updateValue(self):
self.rawImportance = self.__calculateRawImportance()
intraStringHappiness = self.__calculateIntraStringHappiness()
self.intraStringUnhappiness = 100.0 - intraStringHappiness
interStringHappiness = 0.0
if self.correspondence:
interStringHappiness = self.correspondence.total_strength
self.interStringUnhappiness = 100.0 - interStringHappiness
#logging.info("Unhappy: %s"%self.interStringUnhappiness)
averageHappiness = (intraStringHappiness + interStringHappiness) / 2
self.totalUnhappiness = 100.0 - averageHappiness
if self.clampSalience:
self.intraStringSalience = 100.0
self.interStringSalience = 100.0
else:
from formulas import weightedAverage
self.intraStringSalience = weightedAverage(((self.relativeImportance, 0.2), (self.intraStringUnhappiness, 0.8)))
self.interStringSalience = weightedAverage(((self.relativeImportance, 0.8), (self.interStringUnhappiness, 0.2)))
self.totalSalience = (self.intraStringSalience + self.interStringSalience) / 2.0
logging.info('Set salience of %s to %f = (%f + %f)/2' % (
self.__str__(), self.totalSalience, self.intraStringSalience, self.interStringSalience))
def isWithin(self, other):
return self.leftStringPosition >= other.leftStringPosition and self.rightStringPosition <= other.rightStringPosition
def relevantDescriptions(self):
return [d for d in self.descriptions if d.descriptionType.fully_active()]
def morePossibleDescriptions(self, node):
return []
def getPossibleDescriptions(self, descriptionType):
logging.info('getting possible descriptions for %s' % self)
descriptions = []
from group import Group
for link in descriptionType.instanceLinks:
node = link.destination
if node == slipnet.first and self.hasDescription(slipnet.letters[0]):
descriptions += [node]
if node == slipnet.last and self.hasDescription(slipnet.letters[-1]):
descriptions += [node]
i = 1
for number in slipnet.numbers:
if node == number and isinstance(self, Group) and len(self.objectList) == i:
descriptions += [node]
i += 1
if node == slipnet.middle and self.middleObject():
descriptions += [node]
s = ''
for d in descriptions:
s = '%s, %s' % (s, d.get_name())
logging.info(s)
return descriptions
def containsDescription(self, sought):
soughtType = sought.descriptionType
soughtDescriptor = sought.descriptor
for d in self.descriptions:
if soughtType == d.descriptionType and soughtDescriptor == d.descriptor:
return True
return False
def hasDescription(self, slipnode):
return [d for d in self.descriptions if d.descriptor == slipnode] and True or False
def middleObject(self):
# XXX only works if string is 3 chars long
# as we have access to the string, why not just " == len / 2" ?
objectOnMyRightIsRightmost = objectOnMyLeftIsLeftmost = False
for objekt in self.string.objects:
if objekt.leftmost and objekt.rightStringPosition == self.leftStringPosition - 1:
objectOnMyLeftIsLeftmost = True
if objekt.rightmost and objekt.leftStringPosition == self.rightStringPosition + 1:
objectOnMyRightIsRightmost = True
return objectOnMyRightIsRightmost and objectOnMyLeftIsLeftmost
def distinguishingDescriptor(self, descriptor):
"""Whether no other object of the same type (ie. letter or group) has the same descriptor"""
if descriptor == slipnet.letter:
return False
if descriptor == slipnet.group:
return False
for number in slipnet.numbers:
if number == descriptor:
return False
return True
def relevantDistinguishingDescriptors(self):
return [d.descriptor for d in self.relevantDescriptions() if self.distinguishingDescriptor(d.descriptor)]
def getDescriptor(self, descriptionType):
"""The description attached to this object of the specified description type."""
descriptor = None
logging.info("\nIn %s, trying for type: %s" % (self, descriptionType.get_name()))
for description in self.descriptions:
logging.info("Trying description: %s" % description)
if description.descriptionType == descriptionType:
return description.descriptor
return descriptor
def getDescriptionType(self, sought_description):
"""The description_type attached to this object of the specified description"""
for description in self.descriptions:
if description.descriptor == sought_description:
return description.descriptionType
description = None
return description
def getCommonGroups(self, other):
return [o for o in self.string.objects if self.isWithin(o) and other.isWithin(o)]
def letterDistance(self, other):
if other.leftStringPosition > self.rightStringPosition:
return other.leftStringPosition - self.rightStringPosition
if self.leftStringPosition > other.rightStringPosition:
return self.leftStringPosition - other.rightStringPosition
return 0
def letterSpan(self):
return self.rightStringPosition - self.leftStringPosition + 1
def beside(self, other):
if self.string != other.string:
return False
if self.leftStringPosition == other.rightStringPosition + 1:
return True
return other.leftStringPosition == self.rightStringPosition + 1
|
# coding=utf-8
import os
import sys
import unittest
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.initData import init
from webTest_pro.common.model.baseActionAdd import user_login, \
add_cfg_listener_lessons
from webTest_pro.common.model.baseActionDel import del_cfg_listener_lessons
from webTest_pro.common.model.baseActionSearch import search_cfg_listener_lessons
from webTest_pro.common.model.baseActionModify import update_ClassOver
from webTest_pro.common.logger import logger, T_INFO
reload(sys)
sys.setdefaultencoding("utf-8")
loginInfo = init.loginInfo
hdk_lesson_cfgs = [{'name': u'互动课模板'}, {'name': u'互动_课模板480p'}]
jp_lesson_cfgs = [{'name': u'精品课'}, {'name': u'精品_课480p'}]
conference_cfgs = [{'name': u'会议'}, {'name': u'会_议480p'}]
speaker_lesson_cfgs = [{'name': u'主讲下课'}, {'name': u'主讲_下课_1'}]
listener_lesson_cfgs = [{'name': u'听讲下课'}, {'name': u'听讲_下课_1'}]
classOverData = [{'name': u'下课模板测试数据', 'searchName': u'听讲下课'},
{'name': u'听讲下课', 'searchName': u'下课模板测试数据'}]
class listenserCfgsMgr(unittest.TestCase):
''''听讲模板管理'''
def setUp(self):
if init.execEnv['execType'] == 'local':
T_INFO(logger,"\nlocal exec testcase")
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
else:
T_INFO(logger,"\nremote exec testcase")
browser = webdriver.DesiredCapabilities.CHROME
self.driver = webdriver.Remote(command_executor=init.execEnv['remoteUrl'], desired_capabilities=browser)
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
T_INFO(logger,"tenantmanger end!")
def test_add_cfg_listener_lessons(self):
'''添加听讲下课模板'''
print "exec:test_add_cfg_listener_lessons..."
driver = self.driver
user_login(driver, **loginInfo)
for listener_lesson_cfg in listener_lesson_cfgs:
add_cfg_listener_lessons(driver, **listener_lesson_cfg)
self.assertEqual(u"添加成功!", driver.find_element_by_css_selector(".layui-layer-content").text)
sleep(0.5)
print "exec:test_add_cfg_speaker_lessons success."
def test_bsearch_cfg_listener_lessons(self):
'''查询听讲下课模板信息'''
print "exec:test_search_cfg_listener_lessons"
driver = self.driver
user_login(driver, **loginInfo)
for listener_lesson_cfg in listener_lesson_cfgs:
search_cfg_listener_lessons(driver, **listener_lesson_cfg)
self.assertEqual(listener_lesson_cfg['name'],
driver.find_element_by_xpath("//table[@id='listeningclasstable']/tbody/tr/td[3]").text)
print "exec: test_bsearch_cfg_listener_lessons success."
sleep(0.5)
def test_bupdate_cfg_listener_lessons(self):
'''查询听讲下课模板信息'''
print "exec:test_search_cfg_listener_lessons"
driver = self.driver
user_login(driver, **loginInfo)
for listener_lesson_cfg in classOverData:
update_ClassOver(driver, **listener_lesson_cfg)
print "exec: test_bsearch_cfg_listener_lessons success."
sleep(0.5)
def test_del_cfg_listener_lessons(self):
'''删除听讲下课模板_确定'''
print "exec:test_del_cfg_listener_lessons..."
driver = self.driver
user_login(driver, **loginInfo)
for listener_lesson_cfg in listener_lesson_cfgs:
del_cfg_listener_lessons(driver, **listener_lesson_cfg)
sleep(1.5)
self.assertEqual(u"删除成功!", driver.find_element_by_css_selector(".layui-layer-content").text)
sleep(0.5)
print "exec:test_del_cfg_listener_lessons success."
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
if __name__ == '__main__':
unittest.main()
|
import os
import boto3
import json
from boto3.dynamodb.types import TypeDeserializer
def lambda_handler(event, context):
"""
Takes a message from the DynamoDB stream, serializes it, and publishes the
message to the Topic for broadcast
"""
sns = boto3.resource("sns")
topic = sns.Topic(os.environ['TOPIC'])
deserializer = TypeDesiralizer()
for record in event['Records']:
event_name = record['eventName']
raw_item = event_name['dynamodb']['NewImage']
item = {k: desializer.deserialize(v) for k, v in raw_items()}
topic.publish(
Message=json.dumps(item)
)
return {
"statusCode": 200,
"body": json.dumps({
"message": "Sucess",
}),
}
|
#!/usr/bin/python
import json
import xlwt
from xlwt import *
import xlrd
## write the header row in the worksheet.
def writeHeaders(sheet,rubric):
##print "adding header for rubric type="+rubric
font = Font()
font.name = "Calibri"
font.bold = True;
style = XFStyle()
style.font = font
if (rubric == "CSW"):
sheet.write_merge(0,0,10,14,"CSWAdditionalQueryables")
sheet.write_merge(0,0,15,19,"CSWCoreQueryables")
sheet.write_merge(0,0,20,24,"CSWCoreReturnables")
elif (rubric == "UMM-C"):
sheet.write_merge(0,0,10,14,"UMM-C.Required")
sheet.write_merge(0,0,15,19,"UMM-C.Highly Recommended")
sheet.write_merge(0,0,20,24,"UMM-C.Recommended")
else:
sheet.write_merge(0,0,10,14,"DataCite3.1Mandatory")
sheet.write_merge(0,0,15,19,"DataCite3.1Recommended")
sheet.write_merge(0,0,20,24,"DataCite3.1Optional")
col = 0
col = addTitle(sheet,"Data Center",col,style)
col = addTitle(sheet,"RecordName",col,style)
col = addTitle(sheet,"Dialect",col,style)
col = addTitle(sheet,"Date",col,style)
col = addTitle(sheet,"RubricType",col,style)
col = addTitle(sheet,"RubricVersion",col,style)
col = addTitle(sheet,"RubricMax",col,style)
col = addTitle(sheet,"Exist",col,style)
col = addTitle(sheet,"Count",col,style)
col = addTitle(sheet,"Signature",col,style)
col = addTitle(sheet,"Max",col,style)
col = addTitle(sheet,"DialectMax",col,style)
col = addTitle(sheet,"Exist",col,style)
col = addTitle(sheet,"Count",col,style)
col = addTitle(sheet,"Spiral %",col,style)
col = addTitle(sheet,"Max",col,style)
col = addTitle(sheet,"DialectMax",col,style)
col = addTitle(sheet,"Exist",col,style)
col = addTitle(sheet,"Count",col,style)
col = addTitle(sheet,"Spiral %",col,style)
col = addTitle(sheet,"Max",col,style)
col = addTitle(sheet,"DialectMax",col,style)
col = addTitle(sheet,"Exist",col,style)
col = addTitle(sheet,"Count",col,style)
col = addTitle(sheet,"Spiral %",col,style)
col = addItems(sheet,col,style,"e",rubric)
col = addItems(sheet,col,style,"c",rubric)
def addItems(sheet,col,style,val,rubric):
col = addTitle(sheet,"1.1."+val,col,style)
col = addTitle(sheet,"1.2."+val,col,style)
col = addTitle(sheet,"1.3."+val,col,style)
col = addTitle(sheet,"1.4."+val,col,style)
col = addTitle(sheet,"1.5."+val,col,style)
col = addTitle(sheet,"1.6."+val,col,style)
col = addTitle(sheet,"1.7."+val,col,style)
col = addTitle(sheet,"1.8."+val,col,style)
if ((rubric == "CSW") or (rubric == "UMM-C")):
col = addTitle(sheet,"1.9."+val,col,style)
col = addTitle(sheet,"1.10."+val,col,style)
if (rubric == "UMM-C"):
col = addTitle(sheet,"1.11."+val,col,style)
col = addTitle(sheet,"1.12."+val,col,style)
col = addTitle(sheet,"1.13."+val,col,style)
col = addTitle(sheet,"1.14."+val,col,style)
col = addTitle(sheet,"1.15."+val,col,style)
col = addTitle(sheet,"1.16."+val,col,style)
col = addTitle(sheet,"1.17."+val,col,style)
col = addTitle(sheet,"1.18."+val,col,style)
col = addTitle(sheet,"1.19."+val,col,style)
col = addTitle(sheet,"1.20."+val,col,style)
col = addTitle(sheet,"1.21."+val,col,style)
col = addTitle(sheet,"1.22."+val,col,style)
col = addTitle(sheet,"1.23."+val,col,style)
col = addTitle(sheet,"1.24."+val,col,style)
col = addTitle(sheet,"1.25."+val,col,style)
col = addTitle(sheet,"1.26."+val,col,style)
col = addTitle(sheet,"1.27."+val,col,style)
col = addTitle(sheet,"1.28."+val,col,style)
col = addTitle(sheet,"1.29."+val,col,style)
col = addTitle(sheet,"1.30."+val,col,style)
col = addTitle(sheet,"1.31."+val,col,style)
col = addTitle(sheet,"1.32."+val,col,style)
col = addTitle(sheet,"1.33."+val,col,style)
col = addTitle(sheet,"1.34."+val,col,style)
col = addTitle(sheet,"1.35."+val,col,style)
col = addTitle(sheet,"1.36."+val,col,style)
col = addTitle(sheet,"1.37."+val,col,style)
col = addTitle(sheet,"2.1."+val,col,style)
col = addTitle(sheet,"2.2."+val,col,style)
col = addTitle(sheet,"2.3."+val,col,style)
col = addTitle(sheet,"2.4."+val,col,style)
col = addTitle(sheet,"2.5."+val,col,style)
col = addTitle(sheet,"2.6."+val,col,style)
if ((rubric == "CSW") or (rubric == "DCITE")):
col = addTitle(sheet,"2.7."+val,col,style)
col = addTitle(sheet,"2.8."+val,col,style)
col = addTitle(sheet,"2.9."+val,col,style)
col = addTitle(sheet,"2.10."+val,col,style)
if (rubric == "CSW"):
col = addTitle(sheet,"2.11."+val,col,style)
col = addTitle(sheet,"2.12."+val,col,style)
col = addTitle(sheet,"2.13."+val,col,style)
col = addTitle(sheet,"2.14."+val,col,style)
col = addTitle(sheet,"2.15."+val,col,style)
col = addTitle(sheet,"3.1."+val,col,style)
col = addTitle(sheet,"3.2."+val,col,style)
col = addTitle(sheet,"3.3."+val,col,style)
col = addTitle(sheet,"3.4."+val,col,style)
col = addTitle(sheet,"3.5."+val,col,style)
col = addTitle(sheet,"3.6."+val,col,style)
if ((rubric == "CSW") or (rubric == "UMM-C")):
col = addTitle(sheet,"3.7."+val,col,style)
col = addTitle(sheet,"3.8."+val,col,style)
col = addTitle(sheet,"3.9."+val,col,style)
if (rubric == "UMM-C"):
col = addTitle(sheet,"3.10."+val,col,style)
col = addTitle(sheet,"3.11."+val,col,style)
col = addTitle(sheet,"3.12."+val,col,style)
col = addTitle(sheet,"3.13."+val,col,style)
col = addTitle(sheet,"3.14."+val,col,style)
col = addTitle(sheet,"3.15."+val,col,style)
col = addTitle(sheet,"3.16."+val,col,style)
col = addTitle(sheet,"3.17."+val,col,style)
col = addTitle(sheet,"3.18."+val,col,style)
col = addTitle(sheet,"3.19."+val,col,style)
col = addTitle(sheet,"3.20."+val,col,style)
col = addTitle(sheet,"3.21."+val,col,style)
col = addTitle(sheet,"3.22."+val,col,style)
col = addTitle(sheet,"3.23."+val,col,style)
col = addTitle(sheet,"3.24."+val,col,style)
col = addTitle(sheet,"3.25."+val,col,style)
return col
def addTitle(sheet,title,col,style):
row = 1
sheet.write(row,col,title, style)
return col+1
|
"""
inputFile = open("Day11_SeatingSystem/InputTest2.txt","r")
Lines = inputFile.readlines()
input_list = []
for line in Lines:
currentInput = line.strip()
input_list.append(currentInput)
for row_number in range(len(input_list)):
print(row_number)
for row_number in range(len(input_list[0])):
print(row_number)
print(len(input_list))
#print(len(input_list[0]))
"""
## TEST 2
def find_number_occupied_seats(input_list):
counter_occupied_seats = 0
for row_number in range(len(input_list)):
for column_number in range(len(input_list[0])):
if current_layout[row_number][column_number]=="#":
counter_occupied_seats = counter_occupied_seats+1
return counter_occupied_seats
#current_layout = ['#.##.', 'LL##L', 'L##.L', 'LLLL.']
#print(find_number_occupied_seats(current_layout))
## TEST 3
def next_state(current_layout,row_position,column_position,max_row,max_column):
"""
Example List of List:
[['a', 'b', 'c'],
['d', 'X', 'e'],
['f', 'i', 'j']]
"""
#Check if floor:
if current_layout[row_position][column_position]==".":
return "."
#Check number of neighboor seats that are occupied
counter_occupied_neighboors = 0
#Situation i
if row_position <max_row:
if current_layout[row_position+1][column_position]=="#":
counter_occupied_neighboors += 1
#Situation b
if row_position >0:
if current_layout[row_position-1][column_position]=="#":
counter_occupied_neighboors += 1
#Situation e
if column_position <max_column:
if current_layout[row_position][column_position+1]=="#":
counter_occupied_neighboors += 1
#Situation d
if column_position >0:
if current_layout[row_position][column_position-1]=="#":
counter_occupied_neighboors += 1
#Situation j
if row_position <max_row and column_position <max_column:
if current_layout[row_position+1][column_position+1]=="#":
counter_occupied_neighboors += 1
#Situation c
if row_position <max_row and column_position >0:
if current_layout[row_position+1][column_position-1]=="#":
counter_occupied_neighboors += 1
#Situation f
if row_position >0 and column_position <max_column:
if current_layout[row_position-1][column_position+1]=="#":
counter_occupied_neighboors += 1
#Situation a
if row_position >0 and column_position >0:
if current_layout[row_position-1][column_position-1]=="#":
counter_occupied_neighboors += 1
if current_layout[row_position][column_position]=="L" and counter_occupied_neighboors==0:
return "#"
elif current_layout[row_position][column_position]=="#" and counter_occupied_neighboors>3:
return "L"
else:
return current_layout[row_position][column_position]
current_layout = [['#', '#', '#'], ['#', '.', '.'], ['#', '.', '#']]
max_row = int(len(current_layout))
max_column = int(len(current_layout[0]))
print(max_row)
print(max_column)
#print(next_state(current_layout,2,2,2,2))
for row_number in range(len(current_layout)):
for column_number in range(len(current_layout[0])):
print(f"Current datapoint is: row_number: {row_number} & column_number: {column_number}")
new_value = next_state(current_layout,row_number,column_number,max_row,max_column)
print(new_value) |
import requests
import json
import time
import queue
class Bot():
def __init__(self, name, port, mediator):
self.name = name
self.port = port
self.url = 'http://localhost:'+str(port)+'/webhooks/rest_custom/webhook'
self.mediator = mediator
if(name != "Scrum Master"):
self.__instance_chatbot()
def __str__(self):
return str(self.name)
def get_name(self):
return self.name
def get_port(self):
return self.port
def get_url(self):
return self.url
def __eq__(self, other):
return ((self.get_name() == other.get_name()) and (self.get_port() == other.get_port()))
def __hash__(self):
return hash(self.__key())
def __key(self):
return self.name
def __instance_chatbot(self):
#El objetivo de este metodo es instanciar los slots del bot RASA
data = {"sender": self.get_name(), "message": "Hola "+ self.get_name(), "metadata": { "flag": 1 , "toMe": 1}}
x = requests.post(self.get_url(), json = data)
def send_message(self, msg, sender, flag=1, toMe=1):
"""
msg = mensaje a enviar
sender = quien envia el mensaje
Flag = 0 -> No responde (porque interpreta como que no es el ultimo mensaje que debe recibir)
Flag = 1 -> Responde (porque interpreta la politica que es el ultimo mensaje que debe recibir)
toMe = 1 -> Soy el destino interpretan los bots
toMe = 0 -> No soy el destino interpretan los bots
"""
data = {"sender": sender.get_name(), "message": msg, "metadata": { "flag": flag, "toMe": toMe} }
x = requests.post(self.get_url(), json = data)
rta = x.json()
text = ""
if(rta != [] ):
while(len(rta) > 1): #Lo ultimo que hay en rta es el nombre a quien está destinado el mensaje
text += rta.pop(0)['text'] + ". "
text = [text]
text.append(rta.pop(0)['text'])
else:
text = ['','None']
if x.status_code == 200:
return text
else:
print(x.raw)
return None
def notifyAll(self, msg, destino):
mediator.notifyAll(self,msg,destino)
def notifyAllMeeting(self,msg,dev):
for d in dev:
mediator.notifyAll(self,msg,d)
class Mediator():
def __init__(self,name,scrum=None,developers=None):
self.name = name
self.scrum = scrum #lista de los Scrum's Masters
self.developers = developers #Lista de los developers
def set_developers(self, devs):
self.developers = devs
def set_scrum(self, scrums):
self.scrum = scrums
def notifyAll(self,origen:Bot, message, destino:Bot):
answer_queue = [] #lista de las rta's que recibe el mediator
metiches = {} #un Dict {key= dev, value= [rta,a quien le respondio eso]}
for sc in self.scrum:
if(sc != origen):
if(sc == destino):
rta = sc.send_message(message, origen, toMe=1)
if (rta[0] != ''): #rta = [message,sender]
answer_queue.append([sc, rta[0]])
else:
rta = sc.send_message(message, origen, toMe=0)
if(rta[0] != ''):#alguien que no tenia que responder, respondio
metiches[sc] = rta
answer_queue.append([sc, rta[0]])
for dev in self.developers: #recorre la lista dev y les pide que genern una rta al message
if(dev != origen):
if(dev == destino):
rta = dev.send_message(message, origen, toMe=1)
if(rta[0] != ''):
answer_queue.append([dev, rta[0]])
else:
rta = dev.send_message(message, origen, toMe=0)
if(rta[0] != ''):#alguien que no tenia que responder, respondio
metiches[dev] = rta #{key= dev, value= [rta,a quien le respondio eso]}
answer_queue.append([dev, rta[0]])
#en este punto en la answer_queue tenes todas las respuestas a 'message'
#print("---A fines de control imprimo la lista de answer---")
#print(answer_queue)
while (len(answer_queue) > 1):
"""
ahora recorremos la lista de respuestas enviando todo al que origino
la invocacion del notifyAll
"""
prox_sms = answer_queue.pop(0) # prox_sms = [dev/scrum,rta]
print(prox_sms[0].get_name() + ": " + prox_sms[1])
answer_dev = origen.send_message(prox_sms[1], prox_sms[0], flag=0, toMe= 1) #Con flag en 0 para que no responda el bot
if(len(answer_queue) == 1):
prox_sms = answer_queue.pop(0) # prox_sms = [dev/scrum,rta]
print(prox_sms[0].get_name() + ": " + prox_sms[1])
answer_dev = origen.send_message(prox_sms[1], prox_sms[0], flag=1, toMe= 1)
#answer_dev '[{"recipient_id":"Emiliano","text":"Que onda perri, soy Emiliano"},
# {"recipient_id":"Emiliano","text":"sended to"}]'
#esto pasa 1° por el send_message que lo limpia y lo deja como:
#answer_dev = ["Que onda perri, soy Emiliano", "sended to"]
#Por lo tanto en en answer_dev[0] tenemos el mensaje que respondió el bot y va dirigido hacia
# answer_dev[1] = sended to
bot = self.give_me_bot(answer_dev[1])
print(str(origen.get_name()) + ": " + str(bot.get_name()) + ", " + answer_dev[0])
if(bot in metiches.keys() and bot != destino):
self.notifyAll(bot, metiches[bot], origen)
else:
self.notifyAll(origen, answer_dev[0], bot)
def give_me_bot(self,name):
#Retorna el objeto BOT asociado a name
for dev in self.developers:
if(dev.get_name() == name):
return dev
for sc in self.scrum:
if(sc.get_name() == name):
return sc
# Tiempo de espera entre mensaje y mensaje para que no vaya a las chapas (en segundos)
delay = 0.5
mediator = Mediator("mediator")
# Puertos donde tienen que estar corriendo los dos chatbots
#port_EMI = 5005
#port_MATI = 5006
#port_PEDRO = 5008
#port_SM = 5007
emi = Bot("Emiliano", 5005, mediator)
matiB = Bot("MatiasB", 5006, mediator)
sm = Bot("MatiasG", 5007, mediator)
pedro = Bot("Pedro", 5008, mediator)
mediator.set_developers([emi,matiB,pedro])
mediator.set_scrum([sm])
sm.notifyAll("Con que trabajaste el dia de ayer?",pedro)
#sm.notifyAllMeeting("Con que trabajaste ayer?",[pedro,emi,matiB])
"""
esto corta cuando se vacia la queue o todos lanzan ''. Para que lancen '' los dev's cuando
hay una interrupción, luego del agradecimiento que dispare un action_listen entonces no se
agregaria nada a queue
para mentirle a rasa: si en la cola tenes mas de un elemento es porque tenes una "interrupcion"
osea te respondio uno que tenia que 'Escuchar' por lo tanto a Rasa le podes mentir diciendole
al segundo bot "Hubo una interrumcion" o algo por el estilo y que él internamente resuelva
eso y se plantee la nueva pregunta que va a hacerle. Ejemplo: esta hablando Emi - Scrum, Emi dice
tuve un problema entonces por casualidad un DEV dice 'Resolvelo así..' y al mismo tiempo el Scrum
dice algo como 'Que pena...', en la cola tenes 2 elementos, lanzas primero el del DEV
para que siga ese hilo conversacional el dev con Emi (gracias por la ayuda bla bla bla)
y al ser recursivo cuando esto vuelva va a seguir teniendo un elemento la queue, el mensaje del SC
entonces si vos invertis el mensaje, es decir ahora se lo mandas al Scrum en vez a Emi pero con
una especie de clave o algo por el estilo le estas avisando al SC que hubo una "interrupcion"
en su conversacion, por lo que su logica interna resolverá que hacer, si preguntar otra cosa o lo q se le cante
Codigo viejo:
scrum_master = answer_queue.pop(0)##un paso más adelante de como habia quedado tras la interrupcion
rta = scrum_master[1].send_message(scrum_master[0], "Respondeme")
#answer_queue.append([rta,scrum_master[0]])
scrum_master[1].notifyAll(rta, self, scrum_master[0]) #entro en recurrencia al notifyAll del SM
"""
# Mando el mensaje inicial simulando que soy el chatbot 1
#print("Scrum Master: Buenas")
#actual_dev = [emi, escuchador, escuchador]
#actual_dev_name = emi.get_name()
#puerto destino, msj a enviar, chatbot destino
#message_c1 tiene la respuesta del send_message
#message_emi = send_message("Buenas Emiliano", emi, emi) #Chatbot Emiliano
#message_mati = send_message("Buenas MatiasB", matiB, matiB) #Chatbot MatiasB
#message_pedro = send_message("Buenas Pedro", pedro, pedro) #Chatbot Pedro
#print(message_emi)
#print(message_mati)
"""
lista_msg = [message_emi, message_mati, message_pedro]
msg_to_send = lista_msg.pop(0)
adios = ["Hasta mañana, que le vaya bien", "Hasta la proxima, que vaya bien",
"hasta mañana","chau,hasta luego","buenas noches","adios","hasta la proxima","que tengas un buen dia",
"nos vemos luego","chau chau", "nv bro","Nos vemos mañana","Nos re vimos mañana","Nos re vimos perrito" ]
# Loop infinito de los chatbots mandandose mensajes entre si, la conversacion se imprime en consola desde la funcion send_message
while True:
if(not msg_to_send in adios):
message_sm = send_message(msg_to_send, sm, sm)
time.sleep(delay)
message_emi = send_message(message_sm, actual_dev[0], emi)
message_mati = send_message(message_sm, actual_dev[1], matiB)
message_pedro = send_message(message_sm, actual_dev[2], pedro)
#Si emi es el primero que esta hablando, Mati no va a responder hasta que Emi se despida
#Cuando Emi se despide, se hace el pop de la lista de mensajes
#y queda el primer mensaje que mati envio para continuar la conversacion con el
#verificar con quien habla y cambiar el msg_to_send
if(actual_dev[0].equals(emi)):
msg_to_send = message_emi
elif (actual_dev[1].equals(matiB)):
msg_to_send = message_mati
else:
msg_to_send = message_pedro
else:
if (len(lista_msg) == 2):
msg_to_send = lista_msg.pop(0)
actual_dev[0] = escuchador
actual_dev[1] = matiB
elif (len(lista_msg) == 1):
msg_to_send = lista_msg.pop(0)
actual_dev[1] = escuchador
actual_dev[2] = pedro
else:
break
""" |
#Finding numbers that are not divisible by a particular number
n=int(input("Enter the number"))
print('The numbers between 1-100 that are divisible by %d are:'%n)
for i in range(1,100):
if i%n!=0:
continue
else:
print(i,end=" ")
|
#!/usr/bin/env python
import sys
import os
import matplotlib.pyplot as plt
import class_analyse_tools as tools
iteration = list()
nbr_pos_vect = list()
nbr_neg_vect = list()
if len(sys.argv) != 4 :
print("Usage : \narg1 : archive path")
print("arg2 : name of file with the scores")
print("arg3 : number of iteration")
sys.exit(1)
for arch_exp in os.listdir(sys.argv[1]) :
archive_folder = sys.argv[1] + arch_exp + "/"
iteration, nbr_pos, nbr_neg \
= tools.load_nbr_comp(archive_folder + sys.argv[2])
iteration, tabs = tools.sort_data(iteration, nbr_pos, nbr_neg)
nbr_pos = tabs[0]
nbr_neg = tabs[1]
nbr_pos_vect.append(nbr_pos)
nbr_neg_vect.append(nbr_neg)
aver_pos, min_pos, max_pos = tools.average_vector(nbr_pos_vect)
aver_neg, min_neg, max_neg = tools.average_vector(nbr_neg_vect)
iteration = iteration[:len(aver_pos)]
fig, ax1 = plt.subplots(1,sharex=True)
ax1.plot(iteration,aver_pos,'g-',label='number of positive components',linewidth=2)
ax1.plot(iteration,aver_neg,'r-',label='number of negative components',linewidth=2)
ax1.plot(iteration,min_pos,'g-',iteration,max_pos,'g-',linewidth=.5)
ax1.plot(iteration,min_neg,'r-',iteration,max_neg,'r-',linewidth=.5)
ax1.fill_between(iteration,min_pos,aver_pos,facecolor='green',alpha=.5)
ax1.fill_between(iteration,max_pos,aver_pos,facecolor='green',alpha=.5)
ax1.fill_between(iteration,min_neg,aver_neg,facecolor='red',alpha=.5)
ax1.fill_between(iteration,max_neg,aver_neg,facecolor='red',alpha=.5)
# ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2, borderaxespad=0.,fontsize=25)
#
ax1.set_ylabel('number of components',fontsize=25)
ax1.set_xlabel('number of iteration',fontsize=25)
ax1.set_aspect('auto')
ax1.tick_params(labelsize=20)
ax1.set_xlim([0,int(sys.argv[3])])
# ax1.set_ylim([0,int(sys.argv[3])])
plt.show()
|
def score(test):
rt, acc = 0, 0
for i in range(len(test)):
if test[i] == 1:
rt += test[i] + acc
acc += 1
else:
acc = 0
return rt
N = int(input())
test = list(map(int, input().split()))
print(score(test))
|
#!/usr/bin/python
import httplib2
import pprint
import time
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from oauth2client.client import OAuth2WebServerFlow
# Copy your credentials from the console
CLIENT_ID = '135248680417-jvna7sa41ae8vbfq5kgqb6q5ubfovkj9.apps.googleusercontent.com'
CLIENT_SECRET = 'NZja3IU0VgpljFal_LrapMo7'
#CLIENT_ID = '418865297255-s1i8272rntvgnq72abatg08eqqtpkpep.apps.googleusercontent.com'
#CLIENT_SECRET = '3A2cpzIEUf6SSL9RUPwp7x6O'
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
# Redirect URI for installed apps
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, OAUTH_SCOPE,
redirect_uri=REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print('Go to the following link in your browser: ' + authorize_url)
code = raw_input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = build('drive', 'v2', http=http)
# Path to the file to upload - uploading csv file with 10000 records and measuring time
FILENAME = 'D:\\books\\semester4\\Cloud_Computing\\Project5-GWE\\10K.csv'
# Insert a file
media_body = MediaFileUpload(FILENAME, mimetype='text/csv', resumable=True)
body = {
'title': '10K',
'description': 'A test document',
'mimeType': 'text/csv'
}
start = time.clock()
file = drive_service.files().insert(body=body, media_body=media_body).execute()
end = time.clock()
print ("uploading 10K: "+ str(end-start))
downloadUrl = file.get('downloadUrl')
# downloading csv file with 10000 records and measuring time
start = time.clock()
resp, content = drive_service._http.request(downloadUrl)
if resp.status == 200:
outstream = open('10K-download.csv', 'wb')
outstream.write(content)
outstream.close()
end = time.clock()
print ("downloading 10K: "+ str(end-start))
# Path to the file to upload - uploading csv file with 25000 records and measuring time
FILENAME = 'D:\\books\\semester4\\Cloud_Computing\\Project5-GWE\\25K.csv'
# Insert a file
media_body = MediaFileUpload(FILENAME, mimetype='text/csv', resumable=True)
body = {
'title': '25K',
'description': 'A test document',
'mimeType': 'text/csv'
}
start = time.clock()
file = drive_service.files().insert(body=body, media_body=media_body).execute()
end = time.clock()
print ("uploading 25K: "+ str(end-start))
downloadUrl = file.get('downloadUrl')
# downloading csv file with 25000 records and measuring time
start = time.clock()
resp, content = drive_service._http.request(downloadUrl)
if resp.status == 200:
outstream = open('25K-download.csv', 'wb')
outstream.write(content)
outstream.close()
end = time.clock()
print ("downloading 25K: "+ str(end-start))
# Path to the file to upload - uploading csv file with 100000 records and measuring time
FILENAME = 'D:\\books\\semester4\\Cloud_Computing\\Project5-GWE\\100K.csv'
# Insert a file
media_body = MediaFileUpload(FILENAME, mimetype='text/csv', resumable=True)
body = {
'title': '100K',
'description': 'A test document',
'mimeType': 'text/csv'
}
start = time.clock()
file = drive_service.files().insert(body=body, media_body=media_body).execute()
end = time.clock()
print ("uploading 100K: "+ str(end-start))
downloadUrl = file.get('downloadUrl')
# downloading csv file with 100000 records and measuring time
start = time.clock()
resp, content = drive_service._http.request(downloadUrl)
if resp.status == 200:
outstream = open('100K-download.csv', 'wb')
outstream.write(content)
outstream.close()
end = time.clock()
print ("downloading 100K: "+ str(end-start))
|
# public function to use
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseBadRequest
import os.path
from captcha.tools import GenCaptcha
from captcha.models import CaptchaStore
def set_captcha(key):
g = GenCaptcha()
ans, buffer = g.create_img()
try:
# try to get if already there
s = CaptchaStore.objects.get(key=key)
s.answer = ans
s.save()
except ObjectDoesNotExist:
# not exist, create new
s = CaptchaStore(key=key, answer=ans)
s.save()
return buffer.read()
def check(key, ans):
CaptchaStore.clean_expire()
try:
s = CaptchaStore.objects.get(key=key)
except ObjectDoesNotExist:
return False
else:
if s.answer == ans.lower():
s.delete()
return True
else:
s.delete()
return False
|
DENSITY = {'H': 1.36, 'W': 1, 'A': 0.87, 'O': 0.8}
def separate_liquids(glass):
if not glass:
return []
column = len(glass[0])
liquids = sorted((b for a in glass for b in a), key=lambda c: DENSITY[c])
return [liquids[d:d + column] for d in xrange(0, len(liquids), column)]
|
def max(xs):
if len(xs) == 1:
return xs[0]
else:
sub_max = max(xs[1:])
return xs[0] if xs[0] > sub_max else sub_max
print max([1]) # => 1
print max([1,2,10,3,4]) # => 10
|
fout=open('/Users/alejo/Projects/ActiveLearning/Data/compiledv2_Headers.csv', 'w')
with open('/Users/alejo/Projects/ActiveLearning/Data/compiledv2.txt', 'r') as f:
for i,line in enumerate(f):
fout.write("{0},{1}".format(i,line))
fout.close()
|
import os
import pandas as pd
import numpy as np
import datetime
import gc
class Dataset(object):
def __init__(self, train_path = 'train.csv', test_path = 'test.csv', hist_trans_path = 'historical_transactions.csv', new_trans_path='new_merchant_transactions.csv',
new_merc_path='merchants.csv', base_dir='../data'):
self.train_path = os.path.join(base_dir,train_path)
self.test_path = os.path.join(base_dir,test_path)
self.hist_trans_path = os.path.join(base_dir, hist_trans_path)
self.new_trans_path = os.path.join(base_dir, new_trans_path)
self.new_merc_path = os.path.join(base_dir, new_merc_path)
self.base_dir = base_dir
def load_train(self):
print('load train data ...')
if not os.path.isfile(self.train_path):
print('{} - train path not found ! '.format(self.train_path))
return
return pd.read_csv(self.train_path, parse_dates=['first_active_month'])
def set_outlier_col(self, df_train):
# simply set
print('set train outlier ...')
df_train['outliers'] = 0
df_train.loc[df_train['target'] < -30, 'outliers'] = 1
print('set outlier successfully')
def load_test(self):
print('load test data ... ')
if not os.path.isfile(self.test_path):
print('{} - test path not found ! '.format(self.test_path))
return
return pd.read_csv(self.test_path, parse_dates=['first_active_month'])
def get_new_columns(self, name, aggs):
return [name + '_' + k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
def fill_hist_missing(self,df_hist_trans,df_new_merchant_trans):
print('filling the missing value in hist ...')
for df in [df_hist_trans, df_new_merchant_trans]:
df['category_2'].fillna(1.0, inplace=True)
df['category_3'].fillna('A', inplace=True)
df['merchant_id'].fillna('M_ID_00a6ca8a8a', inplace=True)
def load_hist_new_merchant(self):
print('load history data ...')
if not os.path.isfile(self.hist_trans_path):
print('hist trans path not found ! ')
return
if not os.path.isfile(self.new_merc_path):
print('new merchant path not found !')
return
if not os.path.isfile(self.new_trans_path):
print('new hist trans path not found !')
return
df_hist_trans = pd.read_csv(self.hist_trans_path)
df_new_merchant_trans = pd.read_csv(self.new_trans_path)
self.fill_hist_missing(df_hist_trans, df_new_merchant_trans)
for df in [df_hist_trans, df_new_merchant_trans]:
df['purchase_date'] = pd.to_datetime(df['purchase_date'])
df['year'] = df['purchase_date'].dt.year
df['weekofyear'] = df['purchase_date'].dt.weekofyear
df['month'] = df['purchase_date'].dt.month
df['dayofweek'] = df['purchase_date'].dt.dayofweek
df['weekend'] = (df.purchase_date.dt.weekday >= 5).astype(int)
df['hour'] = df['purchase_date'].dt.hour
df['authorized_flag'] = df['authorized_flag'].map({'Y': 1, 'N': 0})
df['category_1'] = df['category_1'].map({'Y': 1, 'N': 0})
# https://www.kaggle.com/c/elo-merchant-category-recommendation/discussion/73244
df['month_diff'] = ((datetime.datetime.today() - df['purchase_date']).dt.days) // 30
df['month_diff'] += df['month_lag']
print('reduce hist_trans & new_merchant_trans memory usage...')
self.reduce_mem_usage(df_hist_trans)
self.reduce_mem_usage(df_new_merchant_trans)
return df_hist_trans, df_new_merchant_trans
def agg1(self,df_hist_trans, df_new_merchant_trans):
aggs = {}
for col in ['month', 'hour', 'weekofyear', 'dayofweek', 'year', 'subsector_id', 'merchant_id',
'merchant_category_id']:
aggs[col] = ['nunique']
aggs['purchase_amount'] = ['sum', 'max', 'min', 'mean', 'var']
aggs['installments'] = ['sum', 'max', 'min', 'mean', 'var']
aggs['purchase_date'] = ['max', 'min']
aggs['month_lag'] = ['max', 'min', 'mean', 'var']
aggs['month_diff'] = ['mean']
aggs['authorized_flag'] = ['sum', 'mean']
aggs['weekend'] = ['sum', 'mean']
aggs['category_1'] = ['sum', 'mean']
aggs['card_id'] = ['size']
for col in ['category_2', 'category_3']:
df_hist_trans[col + '_mean'] = df_hist_trans.groupby([col])['purchase_amount'].transform('mean')
aggs[col + '_mean'] = ['mean']
new_columns = self.get_new_columns('hist', aggs)
df_hist_trans_group = df_hist_trans.groupby('card_id').agg(aggs)
df_hist_trans_group.columns = new_columns
df_hist_trans_group.reset_index(drop=False, inplace=True)
df_hist_trans_group['hist_purchase_date_diff'] = (
df_hist_trans_group['hist_purchase_date_max'] - df_hist_trans_group['hist_purchase_date_min']).dt.days
df_hist_trans_group['hist_purchase_date_average'] = df_hist_trans_group['hist_purchase_date_diff'] / \
df_hist_trans_group['hist_card_id_size']
df_hist_trans_group['hist_purchase_date_uptonow'] = (
datetime.datetime.today() - df_hist_trans_group['hist_purchase_date_max']).dt.days
aggs = {}
for col in ['month', 'hour', 'weekofyear', 'dayofweek', 'year', 'subsector_id', 'merchant_id',
'merchant_category_id']:
aggs[col] = ['nunique']
aggs['purchase_amount'] = ['sum', 'max', 'min', 'mean', 'var']
aggs['installments'] = ['sum', 'max', 'min', 'mean', 'var']
aggs['purchase_date'] = ['max', 'min']
aggs['month_lag'] = ['max', 'min', 'mean', 'var']
aggs['month_diff'] = ['mean']
aggs['weekend'] = ['sum', 'mean']
aggs['category_1'] = ['sum', 'mean']
aggs['card_id'] = ['size']
for col in ['category_2', 'category_3']:
df_new_merchant_trans[col + '_mean'] = df_new_merchant_trans.groupby([col])['purchase_amount'].transform(
'mean')
aggs[col + '_mean'] = ['mean']
new_columns = self.get_new_columns('new_hist', aggs)
df_new_trans_group = df_new_merchant_trans.groupby('card_id').agg(aggs)
df_new_trans_group.columns = new_columns
df_new_trans_group.reset_index(drop=False, inplace=True)
df_new_trans_group['new_hist_purchase_date_diff'] = (
df_new_trans_group['new_hist_purchase_date_max'] - df_new_trans_group['new_hist_purchase_date_min']).dt.days
df_new_trans_group['new_hist_purchase_date_average'] = df_new_trans_group['new_hist_purchase_date_diff'] / \
df_new_trans_group['new_hist_card_id_size']
df_new_trans_group['new_hist_purchase_date_uptonow'] = (
datetime.datetime.today() - df_new_trans_group['new_hist_purchase_date_max']).dt.days
return df_hist_trans_group, df_new_trans_group
def combine_all_features(self, train_list, test_list):
train_df = self.load_train()
test_df = self.load_test()
for file in zip(train_list, test_list):
train_path = os.path.join(self.base_dir, file[0])
test_path = os.path.join(self.base_dir, file[1])
if os.path.isfile(train_path) and os.path.isfile(test_path):
new_train_file = pd.read_csv(train_path)
new_test_file = pd.read_csv(test_path)
attach_features = [_f for _f in new_test_file.columns.values if _f not in train_df.columns.values]
attach_features.append('card_id')
train_df = train_df.merge(new_train_file[attach_features],on="card_id", how="left")
test_df = test_df.merge(new_test_file[attach_features], on="card_id", how="left")
train_df.to_csv(os.path.join(self.base_dir, "train_all.csv"))
test_df.to_csv(os.path.join(self.base_dir, "test_all.csv"))
def convert_feature_to_outlier_mean(self,df_train, df_test):
for f in ['feature_1','feature_2','feature_3']:
feature_mapping = df_train.groupby([f])['outlier'].mean()
df_train[f] = df_train[f].map(feature_mapping)
df_test[f] = df_test[f].map(feature_mapping)
def preprocess_train_test(self, df_train, df_test):
self.set_outlier_col(df_train)
# add date related attr
for df in [df_train, df_test]:
df['first_active_month'] = pd.to_datetime(df['first_active_month'])
df['dayofweek'] = df['first_active_month'].dt.dayofweek
df['weekofyear'] = df['first_active_month'].dt.weekofyear
df['month'] = df['first_active_month'].dt.month
df['elapsed_time'] = (datetime.datetime.today() - df['first_active_month']).dt.days
df['hist_first_buy'] = (df['hist_purchase_date_min'] - df['first_active_month']).dt.days
df['new_hist_first_buy'] = (df['new_hist_purchase_date_min'] - df['first_active_month']).dt.days
for f in ['hist_purchase_date_max', 'hist_purchase_date_min', 'new_hist_purchase_date_max',
'new_hist_purchase_date_min']:
df[f] = df[f].astype(np.int64) * 1e-9
df['card_id_total'] = df['new_hist_card_id_size'] + df['hist_card_id_size']
df['purchase_amount_total'] = df['new_hist_purchase_amount_sum'] + df['hist_purchase_amount_sum']
self.convert_feature_to_outlier_mean(df_train, df_test)
def reduce_mem_usage(self, df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (
start_mem - end_mem) / start_mem))
return df
def preprocess(self, reload = False, version='1.0'):
df_train = self.load_train()
df_test = self.load_test()
if not reload:
if version == '1.0':
df_hist_trans, df_new_merchant_trans = self.load_hist_new_merchant()
df_hist_trans_group, df_new_trans_group = self.agg1(df_hist_trans, df_new_merchant_trans)
df_train = df_train.merge(df_hist_trans_group, on='card_id', how='left')
df_test = df_test.merge(df_hist_trans_group, on='card_id', how='left')
del df_hist_trans_group
gc.collect()
df_train = df_train.merge(df_new_trans_group, on='card_id', how='left')
df_test = df_test.merge(df_new_trans_group, on='card_id', how='left')
del df_new_trans_group
gc.collect()
del df_hist_trans;
gc.collect()
del df_new_merchant_trans;
gc.collect()
self.preprocess_train_test(df_train, df_test)
df_train.to_csv('df_train_agg1.csv', index=False)
df_test.to_csv('df_test_agg1.csv', index=False)
train_Y = df_train['target']
test = df_test
del df_train['target']
train_X = df_train
features = [c for c in df_train.columns if c not in ['card_id', 'first_active_month', 'outliers','Unnamed: 0']]
cate_features = [c for c in features if 'feature_' in c]
#features = [col for col in df_train.columns.values if col not in ['card_id','first_active_month','Unnamed: 0','outliers']]
#cate_features = [col for col in df_train.columns.values if 'feature' in col]
return train_X, train_Y, test, features, cate_features
def format_transformer(self, unwanted = None ,fields = None,
train_file_name = 'alltrainffm.txt',
test_file_name = 'alltestffm.txt',
numeric_features = None,cate_feature=None):
'''
:param unwanted: unwanted list
:param fields: fields to select
:param fields: which field to select
:param train_file_name: training file name
:param test_file_name: test file name
:param numeric_features
:param cate_feature
:return: processed training and testing dataset, format is <label><feature1>:<value1><feature2>:<value2>, for classification label is an integer
indicating the class label, for regression, label is a the target value which can be any real number
'''
# add an extra column to the test_df
train_df = self.load_train()
test_df = self.load_test()
unwanted = ['card_id','first_active_month','target']
test_df.insert(test_df.shape[1],'target',0)
# concat them together
train_test_df = pd.concat([train_df, test_df])
train_test_df = train_test_df.reset_index(drop = True)
# default use all the features as candidate, select the features that has lower unique value which can be treated as categoricial data
features = []
if fields != None:
for col in train_test_df.columns.values:
if col in fields and col not in unwanted:
features.append(col)
else:
for col in train_test_df.columns.values:
if col in unwanted:
continue
else:
features.append(col)
for col in features:
train_no = len(train_test_df.loc[:train_df.shape[0], col].unique())
test_no = len(train_test_df.loc[train_df.shape[0]:, col].unique())
if train_no >= 30 or test_no >= 30:
train_test_df.loc[:, col] = pd.cut(train_test_df.loc[:, col], 30, labels=False)
train = train_test_df.loc[:train_df.shape[0]].copy()
test = train_test_df.loc[train_df.shape[0]:].copy()
categories = features
print(categories)
if numeric_features != None:
numerics = numeric_features
else:
numerics = []
currentcode = len(numerics)
catdict = {}
catcode = {}
for x in numerics:
catdict[x] = 0
for x in categories:
catdict[x] = 1
print(catdict)
# transform training file
num_rows = len(train)
if fields != None:
num_cols = len(fields)
else:
num_cols = len(features)
train_path = os.path.join(self.base_dir,train_file_name)
with open(train_path,'w') as text_file:
for index, row in enumerate(range(num_rows)):
if ((index % 100000) == 0):
print('Train Row', index)
datastring = ""
datarow = train.iloc[row].to_dict()
datastring += str(datarow['target'])
for i,x in enumerate(catdict.keys()):
# if it is numeric feature
if catdict[x] == 0:
datastring = datastring + " " + str(i) + ":" + str(i) + ":" + str(datarow[x])
else:
if x not in catcode:
catcode[x] = {}
currentcode += 1
catcode[x][datarow[x]] = currentcode
elif datarow[x] not in catcode[x]:
currentcode += 1
catcode[x][datarow[x]] = currentcode
code = catcode[x][datarow[x]]
datastring = datastring + " " + str(i) + ":" + str(int(code)) + ":1"
datastring += '\n'
text_file.write(datastring)
# transform test file
num_rows = len(test)
if fields != None:
num_cols = len(fields)
else:
num_cols = len(features)
test_path = os.path.join(self.base_dir,test_file_name)
with open(test_path, 'w') as text_file:
for index, row in enumerate(range(num_rows)):
if ((index % 100000) == 0):
print('Test Row', index)
datastring = ""
datarow = train.iloc[row].to_dict()
datastring += str(datarow['target'])
for i,x in enumerate(catdict.keys()):
# if it is numeric feature
if catdict[x] == 0:
datastring = datastring + " " + str(i) + ":" + str(i) + ":" + str(datarow[x])
else:
if x not in catcode:
catcode[x] = {}
currentcode += 1
catcode[x][datarow[x]] = currentcode
elif datarow[x] not in catcode[x]:
currentcode += 1
catcode[x][datarow[x]] = currentcode
code = catcode[x][datarow[x]]
datastring = datastring + " " + str(i) + ":" + str(int(code)) + ":1"
datastring += '\n'
text_file.write(datastring)
print('successfully transform the data to libSvm format ...')
def ffmtxt2csv(self, file_name = '../submission/ffmoutput.txt', dest_name = '../submission/ffmoutput.csv'):
test_df = pd.read_csv(self.test_path)
#print("test_df {}".format(len(test_df)))
submission = pd.read_csv(file_name)
#print("submission {}".format(len(submission)))
self.recoverdf(submission)
final_submission = pd.DataFrame({'card_id':test_df['card_id']})
#print("final_submission {}".format(len(final_submission)))
final_submission['target'] = submission.loc[:,submission.columns.values[0]]
final_submission.to_csv(dest_name,index=False)
def recoverdf(self,df):
first_value = df.columns.values[0]
if 'target' not in first_value:
# df.rename({first_value: 'target'}, axis='columns', inplace=True)
df.loc[-1] = float(first_value)
df.index = df.index + 1
df.sort_index(inplace=True)
# test
if __name__ == '__main__':
# dataset = Dataset('train.csv','test.csv')
#
# train_X, train_Y, test = dataset.preprocess(reload=True)
dataset = Dataset('df_train_agg1.csv','df_test_agg1.csv')
#dataset.ffmtxt2csv()
dataset.combine_all_features(['train_clean.csv','train_agg_id1.csv','df_train_agg1.csv'],
['test_clean.csv','test_agg_id1.csv', 'df_test_agg1.csv'])
|
from rest_framework import serializers
from .models import Bike_model, Bike, Bike_rent
class Bike_modelSerializer(serializers.ModelSerializer):
class Meta:
model = Bike_model
fields = '__all__'
class BikeSerializer(serializers.ModelSerializer):
class Meta:
model = Bike
fields = '__all__'
class Bike_rentSerializer(serializers.ModelSerializer):
class Meta:
model = Bike_rent
fields = '__all__' |
# This file will define our database structure and provide methods to
# access our database
# Sets up database
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from json import dumps
Base = declarative_base()
# Class for Races. Each race has a unique id, name, and faction
class Race(Base):
__tablename__ = 'races'
name = Column(String(20), nullable = False, primary_key = True)
id = Column(Integer, nullable = False, primary_key = True)
faction = Column(String(10), nullable = False)
playableClasses = Column(String(10), nullable = False)
description = Column(String(500))
def __repr__(self):
return "<Race: {0}>".format(self.name)
# Class for Class. Each class has a name, power type, and roles available to that class.
class Class(Base):
__tablename__ = 'classes'
name = Column(String(20), nullable = False, primary_key = True)
powerType = Column(String(20), nullable = False)
roles = Column(String(50))
def __repr__(self):
return "<Class Name: {0}>".format(self.name)
# Class for Factions. The two factions have a name and description.
class Faction(Base):
__tablename__ = 'factions'
name = Column(String(20), nullable = False, primary_key = True)
description = Column(String(400))
def __repr__(self):
return "<Faction: {0}>".format(self.name)
# Class for Role. The three roles have a name and description.
class Role(Base):
__tablename__ = 'roles'
name = Column(String(20), nullable = False, primary_key = True)
description = Column(String(400))
def __repr__(self):
return "<Role: {0}>".format(self.name)
# Class for Battlegroup. Each battlegroup has a name.
class Battlegroup(Base):
__tablename__ = 'battlegroups'
name = Column(String(20), nullable = False, primary_key = True)
def __repr__(self):
return "<Battlegroup Name: {0}>".format(self.name)
class Stat(Base):
__tablename__ = 'stats'
name = Column(String(40), nullable = False, primary_key = True)
statistic = Column(String(100), nullable = False, primary_key = True)
description = Column(String(50))
def __repr__(self):
return "<Stat Name: {0} -- Stat: {1}>".format(self.name, self.statistic)
# Represents the database and our interaction with it
class Db:
def __init__(self, refresh = False):
engineName = 'sqlite:///test.db' # Uses in-memory database
self.engine = create_engine(engineName)
self.metadata = Base.metadata
self.metadata.bind = self.engine
if refresh:
self.metadata.drop_all(bind=self.engine)
self.metadata.create_all(bind=self.engine)
Session = sessionmaker(bind=self.engine)
self.session = Session()
# Base commit method for our database
def commit(self):
self.session.commit()
# Base rollback method for our database
def rollback(self):
self.session.rollback()
########## METHODS FOR RACE CLASS ###########
# Returns the list of all races
def getRaces(self):
return self.session.query(Race).all()
# Returns a specific race when given a race name
def getRace(self, name):
return self.session.query(Race)\
.filter_by(name=name)\
.one_or_none()
# This method adds a new race to our database when given name, id, faction, and description
# Returns the Race object that got added
def addRace(self, name, id, faction, playableClasses, description):
newRace = Race(name=name, id=id, faction=faction, playableClasses=playableClasses, description=description)
self.session.add(newRace)
return newRace
def deleteRace(self, race):
self.session.delete(race)
########## METHODS FOR CLASS CLASS ###########
# Returns the list of all classes
def getClasses(self):
return self.session.query(Class).all()
# Returns a specific class when given a class name
def getClass(self, name):
return self.session.query(Class)\
.filter_by(name=name)\
.one_or_none()
# This method adds a new class to our database when given a className and powerType
# Returns the Class object that got added
def addClass(self, name, powerType, roles):
newClass = Class(name=name, powerType=powerType, roles=roles)
self.session.add(newClass)
return newClass
def deleteClass(self, class_name):
self.session.delete(class_name)
########## METHODS FOR FACTION CLASS ###########
# This method returns the list of factions in WoW
def getFactions(self):
return self.session.query(Faction).all()
# This method returns information on one of the two factions
# User gives name of which faction they want to learn about
def getFaction(self, name):
return self.session.query(Faction)\
.filter_by(name=name)\
.one_or_none()
# This method adds a new faction to the database with the given name
# and description, then returns the new faction that was created
def addFaction(self, name, description):
newFaction = Faction(name=name, description=description)
self.session.add(newFaction)
return newFaction
def deleteFaction(self, faction):
self.session.delete(faction)
########## METHODS FOR ROLE CLASS ###########
# This method returns the list of roles in WoW
def getRoles(self):
return self.session.query(Role).all()
# This method returns information on one of the three roles
# User gives name of which role they want to learn about
def getRole(self, name):
return self.session.query(Role)\
.filter_by(name=name)\
.one_or_none()
# This method adds a new role to the database with the given name
# and description, then returns the new role that was created
def addRole(self, name, description):
newRole = Role(name=name, description=description)
self.session.add(newRole)
return newRole
def deleteRole(self, role):
self.session.delete(role)
########## METHODS FOR BATTLEGROUP CLASS ###########
# This methods returns the list of battlegroups in WoW
def getBattlegroups(self):
return self.session.query(Battlegroup).all()
# This method allows us to add a battlegroup to our database
def addBattlegroup(self, name):
newbg = Battlegroup(name=name)
self.session.add(newbg)
return newbg
########## METHODS FOR STAT CLASS ###########
# This method returns the list of all arena stats in our database
def getStats(self):
return self.session.query(Stat).all()
# This method returns a specific statistic when given a name of a stat
def getStat(self, name):
return self.session.query(Stat)\
.filter_by(name=name)\
.one_or_none()
# This method adds a new arena stat to our database
def addStat(self, name, stat, description):
newStat = Stat(name=name, statistic=stat, description=description,)
self.session.add(newStat)
return newStat
def deleteStat(self, stat):
self.session.delete(stat) |
import pandas as pd
print("Remove this")
name = "Ismet"
age = 29 |
from PIL import Image
import numpy as np
import random
import math
class DBSCAN():
def __init__(self, fileName, radis, less):
self.img = Image.open(fileName)
self.radis = radis
self.less = less
# --- Image
self.width, self.height = self.img.size
self.imgArr = np.array(self.img)
self.valImgArr = np.full((self.height, self.width), -1)
self.newImgArr = np.full((self.height, self.width, 3), -1)
# --- Image Value
self.value = 1
self.valArr = [0]
def Start(self):
self.Scan()
print(self.value)
self.TransToRGB()
def CreateNewImage(self, fileName):
cimg = Image.fromarray(self.newImgArr.astype('uint8')).convert('RGB')
cimg.save(fileName)
# --- Hiding Layer
def CalculateRadis(self, y, x, targetY, targetX):
return int(math.sqrt(math.pow((y - targetY), 2) + math.pow((x - targetX), 2)))
def Scan(self):
for y in range (self.height):
for x in range (self.width):
rVal = self.ScanNearValue(y, x)
if (self.imgArr[y, x]).any():
self.valImgArr[y, x] = 0
elif (not(self.imgArr[y, x]).any() and not(self.PassCircle(y, x))):
self.valImgArr[y, x] = 0
elif (not(self.imgArr[y, x]).any() and self.PassCircle(y, x) and rVal == -1):
self.valImgArr[y, x] = self.value
self.valArr.append(1)
self.value += 1
elif (not(self.imgArr[y, x]).any() and self.PassCircle(y, x) and rVal != -1):
self.valImgArr[y, x] = rVal
self.valArr[rVal] += 1
for y in range (self.height):
for x in range (self.width):
rVal = self.ScanNearValue(y, x)
if (self.imgArr[y, x]).any():
self.valImgArr[y, x] = 0
elif (not(self.imgArr[y, x]).any() and self.valImgArr[y, x] != rVal):
self.valImgArr[y, x] = rVal
def PassCircle(self, centerY, centerX):
counter = 0
status = False
for y in range (centerY - self.radis, centerY + self.radis + 1):
for x in range (centerX - self.radis, centerX + self.radis + 1):
if (y < 0 or y >= self.height or x < 0 or x >= self.width):
continue
elif (not(self.imgArr[y, x]).any() and self.CalculateRadis(centerY, centerX, y, x) <= self.radis):
counter += 1
if (counter >= self.less):
status = True
break
if (status):
break
return status
def ScanNearValue(self, centerY, centerX):
rVal = -1
for y in range (centerY - self.radis, centerY + self.radis + 1):
for x in range (centerX - self.radis, centerX + self.radis + 1):
if (y < 0 or y >= self.height or x < 0 or x >= self.width or (y == centerY and x == centerX)):
continue
elif (self.valImgArr[y, x] != -1 and not(self.imgArr[y, x]).any() and self.CalculateRadis(centerY, centerX, y, x) <= self.radis):
rVal = self.valImgArr[y, x]
break
if (rVal != -1):
break
return rVal
def TransToRGB(self):
for y in range (self.height):
for x in range (self.width):
if (self.valImgArr[y, x] == 0):
for cir in range (3):
self.newImgArr[y, x, cir] = 255
for pVal in range (1, self.value):
if (self.valArr[pVal] != 0):
self.Render(pVal, random.randint(1,255), random.randint(1,255), random.randint(1,255))
def Render(self, val, r, g, b):
for y in range (self.height):
for x in range (self.width):
if (self.valImgArr[y, x] == val):
self.newImgArr[y, x, 0] = r
self.newImgArr[y, x, 1] = g
self.newImgArr[y, x, 2] = b
# --- --- ---
if __name__ == "__main__":
ds = DBSCAN("./test.bmp", 1, 5)
ds.Start()
ds.CreateNewImage("./convert_1_5.bmp")
ds = DBSCAN("./test.bmp", 1, 7)
ds.Start()
ds.CreateNewImage("./convert_1_7.bmp")
ds = DBSCAN("./test.bmp", 3, 5)
ds.Start()
ds.CreateNewImage("./convert_3_5.bmp")
ds = DBSCAN("./test.bmp", 5, 35)
ds.Start()
ds.CreateNewImage("./convert_5_35.bmp")
# ds = DBSCAN("./test2.bmp", 1, 5)
# ds.Start()
# ds.CreateNewImage("./convert2_1_5.bmp")
# ds = DBSCAN("./test2.bmp", 1, 7)
# ds.Start()
# ds.CreateNewImage("./convert2_1_7.bmp")
# ds = DBSCAN("./test2.bmp", 3, 5)
# ds.Start()
# ds.CreateNewImage("./convert2_3_5.bmp")
# ds = DBSCAN("./test2.bmp", 5, 35)
# ds.Start()
# ds.CreateNewImage("./convert2_5_35.bmp") |
#!usr/bin/env python3
import csv
import os
import sys
from datetime import datetime
import db_secrets
import database as db
import google_drive as gd
def migrate_gd_to_db(file_id_list, table):
"""
Downloads data from Google Sheet and uploads it do database.
Argumnent passed to this function must be a Google Drive
file id inside a list
Examples: file_id_list[-1:] or file_id_list[-2:]
"""
for file in file_id_list:
# Download file from Google Drive and save it as temporary .csv file
drive.download_temp_file(file["id"])
with open(drive.filename) as csv_file: # Open temporary .csv file
filereader = csv.reader(csv_file, delimiter=',')
# Get the date of last query in database
# Date is in first column
last_query_date = con.get_last_date(table)[0]
# Read data from row and save it in dictionary
for row in filereader:
if row[0] == '': # Skip first row if it contains column titles
continue
air_data = {
'date': datetime.strptime(row[0], '%B %d, %Y at %I:%M%p'),
'pm25': row[1],
'tvoc': row[2],
'co2': row[3],
'temp': row[4],
'hum': row[5],
}
# If date in database is smaller than in file
# or there is no record yet in database then
# write row to database
if (
(last_query_date is None)
or (last_query_date < air_data['date'])
):
print(f"Writing data from {air_data['date']}")
con.add_data(air_data, table) # Write row data to database
os.remove(drive.filename) # Remove temporary file
FOLDER_ID = '1LYKq8vxWBQrS-nlUuAW_53Dtw86vzRS4'
os.chdir(os.path.dirname(sys.argv[0])) # Change working directory
con = db.Connection( # Connect to database
db_secrets.DATABASE['host'],
db_secrets.DATABASE['user'],
db_secrets.DATABASE['passwd'],
db_secrets.DATABASE['database'])
drive = gd.GoogleDriveSession(FOLDER_ID) # Connect to Google Drive folder
file_list = drive.get_file_list() # Get all files from folder
# Migrate data from Google Drive to database
migrate_gd_to_db(file_list[-2:], db_secrets.DATABASE['table'])
con.close_connection() # Close connection with database
|
try:
import Tkinter as tk
from Tkinter import *
except ImportError:
import tkinter as tk
from tkinter import *
from base_input import BaseInputPage
from utils.paths import isValidPath
class DirectoryInputPage(BaseInputPage, object):
def __init__(self, parent, controller, frame_number):
BaseInputPage.__init__(self, parent, controller, frame_number)
lf_inputs = tk.LabelFrame(self, text='Inputs', font='Helvetica 14 bold', padx=15)
lf_inputs.grid(row=self.starting_row+1, column=0, columnspan=100, sticky='nsew', padx=30, pady=30, ipadx=10, ipady=10)
lf_inputs.grid_rowconfigure(0, weight=1)
lf_inputs.grid_columnconfigure(0, weight=1)
lb_input = Label(lf_inputs, text="1. Input Directory")
lb_input.grid(row=0, column=0, sticky="W", pady=3)
button1 = tk.Button(lf_inputs, text='Select', command=lambda : self.chooseDir(self, controller, controller.sv_input_dir, 'input directory'))
button1.grid(row=0, column=91, sticky='W', padx=5, pady=3)
en_input_dir = Entry(lf_inputs, textvariable=controller.sv_input_dir, width = 50)
en_input_dir.grid(row=0, column=1, columnspan=90, sticky="W", pady=3)
lb_output = Label(lf_inputs, text="2. Output Directory")
lb_output.grid(row=1, column=0, sticky="W", pady=3)
button2 = tk.Button(lf_inputs, text='Select', command=lambda : self.chooseDir(self, controller, controller.sv_output_dir, 'output directory'))
button2.grid(row=1, column=91, sticky='E', padx=5, pady=3)
en_output_dir = Entry(lf_inputs, textvariable=controller.sv_output_dir, width = 50)
en_output_dir.grid(row=1, column=1, columnspan=90, sticky="W", pady=3)
lb_t1_identifier = Label(lf_inputs, text="3. T1 Identifier")
lb_t1_identifier.grid(row=2, column=0, sticky="W", pady=3)
en_t1_identifier = Entry(lf_inputs, textvariable=controller.sv_t1_id, width = 50)
en_t1_identifier.grid(row=2, column=1, columnspan=90, sticky="W", pady=3)
lb_lm_identifier = Label(lf_inputs, text="4. Lesion Mask Identifier")
lb_lm_identifier.grid(row=3, column=0, sticky="W", pady=(3, 20))
en_lm_identifier = Entry(lf_inputs, textvariable=controller.sv_lesion_mask_id, width = 50)
en_lm_identifier.grid(row=3, column=1, columnspan=90, sticky="W", pady=(3, 20))
lb_same_anatomical_space = Label(lf_inputs, text="My T1 and Lesion masks are in the same anatomical space.")
lb_same_anatomical_space.grid(row=4, column=0, columnspan=90, sticky="W", padx=10, pady=(3, 20))
chk_same_anatomical_space = tk.Checkbutton(lf_inputs, variable=controller.b_same_anatomical_space)
chk_same_anatomical_space.grid(row=4, column=91, sticky='W', pady=(3, 20))
def setFrameTitle(self):
self.title.set('Please indicate the following')
def moveToNextPage(self):
input_dir = self.controller.sv_input_dir.get()
output_dir = self.controller.sv_output_dir.get()
if not isValidPath(input_dir.strip()) or not isValidPath(output_dir.strip()):
self.setRequiredInputError('Directory inputs are invalid')
return
if not self.controller.sv_lesion_mask_id.get().strip()\
or not self.controller.sv_t1_id.get().strip():
self.setRequiredInputError()
return
else:
super(DirectoryInputPage, self).moveToNextPage()
def checkValues(self, controller):
print controller.sv_input_dir.get()
print controller.sv_output_dir.get()
print controller.run_normalize_status.get() |
#!/usr/bin/env python
__author__ = "Alessandro Coppe"
'''
Create a single VCF from multiple VCFs from Mutect2, Strelka2 and Varscan2.
Parameters:
- v (--vcfs): the list of VCFs separated by ,
- d (--directory): the directory containing the VCFs
'''
import argparse
import os.path
import sys
def check_that_vcf_exists(vcf_path):
if not os.path.isfile(vcf_path):
sys.exit("{} file do not exists".format(vcf_path))
def main():
parser = argparse.ArgumentParser(description="Join SNPs VCFs from Mutect2, Strelka2 and Varscan2")
parser.add_argument('-v', '--vcfs', action='store', type=str, help="The list of VCFs to join, separated by ,", required=True)
parser.add_argument('-d', '--directory', action='store', type=str, help="The directory containing the VCFs", required=False, default=".")
args = parser.parse_args()
vcfs_directory = args.directory
vcfs = args.vcfs.split(",")
# Create the file path for every VCF
vcfs = [os.path.join(vcfs_directory, vcf) for vcf in vcfs]
# Check if the VCFs exists
for vcf in vcfs: check_that_vcf_exists(vcf)
already_included_vcfs = set()
first_header = 1
for vcf in vcfs:
with open(vcf, 'r') as f:
for line in f:
if line.startswith('#') and first_header == 1:
print(line[:-1])
else:
key = ",".join(line.split("\t")[0:2])
if key not in already_included_vcfs:
already_included_vcfs.add(key)
print(line[:-1])
first_header = 0
if __name__ == "__main__":
main()
|
import csv
import datetime as dt
import json
import logging
import math
import pickle
import sys
import traceback
from collections import defaultdict
import numpy as np
import luminometers
from fitResultReader import fitResultReader
from luminometers import *
from vdmUtilities import makeCorrString
import os
# [in Hz]
LHC_revolution_frequency = 11245
pi = math.pi
class XsecCalculationOptions:
class LuminometerOptions:
LuminometerTypes = ("HF", "PCC", "Vtx", "BCM1F", "PLT")
WhatIsMeasured = ("CountsPerTime","Counts")
NormalizationGraphs = ("None", "CurrentProduct")
OldNormalizationAvailable = ("Yes", "No")
class FormulaOptions:
FormulaToUse = ("1D-Gaussian-like", "2D-like", "numerical-Integration")
def xsecFormula_1DGaussianLike(CapSigmaX, CapSigmaY, peakX, peakY):
# units, want visible cross section in microbarn !
CapSigmaX[0] =CapSigmaX[0]*1000
CapSigmaX[1] =CapSigmaX[1]*1000
CapSigmaY[0] =CapSigmaY[0]*1000
CapSigmaY[1] =CapSigmaY[1]*1000
# with approximation peakX ~ peakY ~ 0.5(peakX+peakY)
xsec = pi * CapSigmaX[0] * CapSigmaY[0] * (peakX[0] + peakY[0])
xsecErr = ( CapSigmaX[1]*CapSigmaX[1]/CapSigmaX[0]/CapSigmaX[0] + \
CapSigmaY[1]*CapSigmaY[1]/CapSigmaY[0]/CapSigmaY[0] + \
(peakX[1]*peakX[1] + peakY[1]*peakY[1])/(peakX[0]+peakY[0])/(peakX[0]+peakY[0]))
xsecErr = math.sqrt(xsecErr) * xsec
return xsec, xsecErr
def xsecFormula_2DLike(fitResult):
return xsec
def xsecFormula_numericalIntegration(fitFunc):
return xsec
def CalculateCalibrationConstant(configFile):
# check that options chosen in json do actually exist
# if non-standard luminometer chosen, check that all options provided are allowed, i.e. are in LuminometerOptions
# either use xsec as returned by function, for "Counts", or xsec/LHC_frequency, for "CountsPerTime"
if type(configFile)==str:
config=open(configFile)
ConfigInfo = json.load(config)
config.close()
else:
ConfigInfo = configFile
Fill = ConfigInfo['Fill']
AnalysisDir = ConfigInfo['AnalysisDir']
Luminometer = ConfigInfo['Luminometer']
Corr = ConfigInfo['Corr']
InputFitResultsFile = ConfigInfo['InputFitResultsFile']
fit = InputFitResultsFile.strip("FitResults.pkl")
if 'CapSigmaInput' in ConfigInfo:
CapSigmaInput = ConfigInfo['CapSigmaInput']
corrFull = makeCorrString(Corr)
InputFitResultsFile = './' + AnalysisDir + "/" + Luminometer + "/results/" + corrFull + "/" + InputFitResultsFile
OutputDir = './' + AnalysisDir + '/' + Luminometer + '/results/' + corrFull + '/'
predefinedTypes = XsecCalculationOptions.LuminometerOptions.LuminometerTypes
oldNormAvailable = False
WhatIsMeasured = ConfigInfo['LuminometerSettings']['WhatIsMeasured']
NormalizationGraphs = ConfigInfo['LuminometerSettings']['NormalizationGraphs']
OldNormAvailable = ConfigInfo['LuminometerSettings']['OldNormAvailable']
if Luminometer in predefinedTypes:
defaults = LuminometerDefaults(Luminometer)
if WhatIsMeasured == "default":
WhatIsMeasured = defaults.WhatIsMeasured
if NormalizationGraphs== "default":
NormalizationGraphs = defaults.NormalizationGraphs
if OldNormAvailable == "default":
OldNormAvailable = defaults.OldNormAvailable
print "defaults ", WhatIsMeasured, NormalizationGraphs, OldNormAvailable
Total_inel_Xsec = ConfigInfo['Total_inel_Xsec']
FormulaToUse = ConfigInfo['FormulaToUse']
Scanpairs = ConfigInfo['Scanpairs']
fitResult = fitResultReader(InputFitResultsFile)
CapSigmaDict = fitResult.getFitParam("CapSigma")
CapSigmaErrDict = fitResult.getFitParam("CapSigmaErr")
if 'CapSigmaInput' in ConfigInfo:
fitResult2 = fitResultReader(CapSigmaInput)
CapSigmaDict = fitResult2.getFitParam("CapSigma")
CapSigmaErrDict = fitResult2.getFitParam("CapSigmaErr")
peakDict = fitResult.getFitParam("peak")
peakErrDict = fitResult.getFitParam("peakErr")
fitstatusDict = fitResult.getFitParam("fitStatus")
chi2Dict = fitResult.getFitParam("chi2")
ndofDict = fitResult.getFitParam('ndof')
table =[]
csvtable = []
BeamCurrents_path = './' + AnalysisDir + '/cond/BeamCurrents_' + str(Fill) + '.json'
addsbil = os.path.exists(BeamCurrents_path)
if addsbil:
csvtable.append(["XscanNumber_YscanNumber","Type", "BCID", "xsec", "xsecErr", "SBIL", 'SBILErr'])
table.append(["XscanNumber_YscanNumber","Type", "BCID", "xsec", "xsecErr", "SBIL", 'SBILErr'])
else:
csvtable.append(["XscanNumber_YscanNumber","Type", "BCID", "xsec", "xsecErr"])
table.append(["XscanNumber_YscanNumber","Type", "BCID", "xsec", "xsecErr"])
logbuffer="CalculateCalibrationConstant - excluded BCIDs\n"
if addsbil:
with open(BeamCurrents_path) as f:
beamdata = json.load(f)
for entry in Scanpairs:
XscanNumber = entry[0]
YscanNumber = entry[1]
XYbxlist=[]
if addsbil:
s1 = beamdata['Scan_' + str(XscanNumber)]
b1 = [0 for i in range(3654)]
b2 = [0 for i in range(3654)]
bcx1 = {i[0]:i[1] for i in s1[len(s1)/2]['fbctB1'].items()}
bcx2 = {i[0]:i[1] for i in s1[len(s1)/2]['fbctB2'].items()}
s2 = beamdata['Scan_' + str(YscanNumber)]
b1 = [0 for i in range(3654)]
b2 = [0 for i in range(3654)]
bcy1 = {i[0]:i[1] for i in s2[len(s2)/2]['fbctB1'].items()}
bcy2 = {i[0]:i[1] for i in s2[len(s2)/2]['fbctB2'].items()}
xsec = defaultdict(float)
xsecErr = defaultdict(float)
xsecDict = defaultdict(dict)
xsecErrDict = defaultdict(dict)
XscanID = 'Scan_'+str(XscanNumber)
YscanID = 'Scan_'+str(YscanNumber)
XY_ID = 'Scan_'+str(XscanNumber) + '_'+str(YscanNumber)
logbuffer=logbuffer+"Scanpair:"+XY_ID+"\n"
logbuffer=logbuffer+"BCIDs excluded because they are filled only in Scan_X or only in Scan_Y\n"
logbuffer=logbuffer+"ScanID: list of excluded BCIDs\n"
XexclBX=[]
YexclBX=[]
for bx in CapSigmaDict[XscanID]:
if bx in CapSigmaDict[YscanID]:
XYbxlist.append(bx)
else:
XexclBX.append(bx)
for bx in CapSigmaDict[YscanID]:
if bx not in CapSigmaDict[XscanID]:
YexclBX.append(bx)
temp = [int(i) for i in XYbxlist if i != 'sum']
temp.sort()
temp = [str(i) for i in temp]
# temp.append('sum')
XYbxlist = temp
logbuffer=logbuffer+XscanID+":"+str(XexclBX)+"\n"
logbuffer=logbuffer+YscanID+":"+str(YexclBX)+"\n"
for bx in XYbxlist:
CapSigmaX = [CapSigmaDict[XscanID][bx], CapSigmaErrDict[XscanID][bx]]
CapSigmaY = [CapSigmaDict[YscanID][bx], CapSigmaErrDict[YscanID][bx]]
peakX = [peakDict[XscanID][bx], peakErrDict[XscanID][bx]]
peakY = [peakDict[YscanID][bx], peakErrDict[YscanID][bx]]
# need to replace with something that takes FormulaToUse as argument and applies selected formula
if FormulaToUse == "1D-Gaussian-like":
value, err = xsecFormula_1DGaussianLike(CapSigmaX, CapSigmaY, peakX, peakY)
if WhatIsMeasured == "CountsPerTime":
value = value/LHC_revolution_frequency
err = err/LHC_revolution_frequency
xsec[bx] = value
xsecErr[bx] = err
if fitstatusDict[XscanID][bx] >0:
print "fitstatus Xscan for bx", bx, fitstatusDict[XscanID][bx]
if fitstatusDict[YscanID][bx] >0:
print "fitstatus Yscan for bx", bx, fitstatusDict[YscanID][bx]
if addsbil:
sbil = (LHC_revolution_frequency*(peakX[0]*bcx1[bx]*bcx2[bx] + peakY[0]*bcy1[bx]*bcy2[bx]))/(1e22*2*xsec[bx])
sbilerr = (LHC_revolution_frequency/(1e22*2*xsec[bx])) * math.sqrt(
(peakX[1] * bcx1[bx]*bcx2[bx])**2 + (peakY[1] * bcy1[bx]*bcy2[bx])**2 +
(xsecErr[bx] * (peakX[0]*bcx1[bx]*bcx2[bx] + peakY[0]*bcy1[bx]*bcy2[bx])/xsec[bx])**2)
row = [str(XscanNumber)+"_"+str(YscanNumber), "XY", bx, xsec[bx], xsecErr[bx], sbil, sbilerr]
else:
row = [str(XscanNumber)+"_"+str(YscanNumber), "XY", bx, xsec[bx], xsecErr[bx]]
table.append(row)
csvtable.append(row)
# need to name output file such that fit function name in file name
csvfile = open(OutputDir+'/LumiCalibration_'+ Luminometer+ '_'+ fit + str(Fill)+'.csv', 'wb')
writer = csv.writer(csvfile)
writer.writerows(csvtable)
csvfile.close()
with open(OutputDir+'/LumiCalibration_'+ Luminometer+ '_'+ fit + str(Fill)+'.pkl', 'wb') as f:
pickle.dump(table, f)
excldata=open(OutputDir+'/LumiCalibration_'+ Luminometer+ '_'+ fit + str(Fill)+'.log','w')
excldata.write(logbuffer)
excldata.close()
return csvtable
if __name__ == '__main__':
configFile = sys.argv[1]
logging.basicConfig(filename="Automation/Logs/calibrationconst_" +
dt.datetime.now().strftime('%y%m%d%H%M%S') + '.log', level=logging.DEBUG)
CalculateCalibrationConstant(configFile)
|
import sys
sys.path.append('..')
import BTreeNode
"""
create tree
4
5 9
6 7 11
"""
root = BTreeNode.BTreeNode(4)
root.left = BTreeNode.BTreeNode(5)
root.left.left = BTreeNode.BTreeNode(6)
root.left.right = BTreeNode.BTreeNode(7)
root.right = BTreeNode.BTreeNode(9)
root.right.right = BTreeNode.BTreeNode(11)
"""
create tree
'r'
'r1' 'r2'
'r3' 'r4'
"""
root1 = BTreeNode.BTreeNode('r')
root1.left = BTreeNode.BTreeNode('r1')
root1.right = BTreeNode.BTreeNode('r2')
root1.left.right = BTreeNode.BTreeNode('r3')
root1.right.left = BTreeNode.BTreeNode('r4')
# Recursive solution
def inOrder(root):
if root != None:
inOrder(root.left)
print(root.val)
inOrder(root.right)
# Non recursive solution - 1
stck = []
def inOrder_nr(root):
currRoot = root
done = False
while not done:
if currRoot != None:
stck.append(currRoot)
currRoot = currRoot.left
else:
if len(stck) > 0:
#backtrack
currRoot = stck.pop()
print(currRoot.val)
currRoot = currRoot.right
else:
done = True
# Non recursive solution - 2
def inOrder_nr2(root):
"""Each node will be visited twice except the leaf nodes. Now for an algo where every node is visited twice, comlexity will be O(2n) => O(n)..for this algo it's n< this< 2n
Hence complexity is O(n)"""
stck = []
output = []
if root == None:
return output
# passing False if not checked children yet
stck.append((root,False))
while stck:
rootTuple = stck.pop()
root = rootTuple[0]
checkedChildren = rootTuple[1]
if (checkedChildren == True or (root.left == None and root.right == None)):
# => it's a leaf node OR if you have already checked it's children implies you have placed them in order LEFT <- PARENT <- RIGHT in stack
# then being at this point means you have processed left child and are currently at parent...so push it's value to output[] for traversal to be inorder
output.append(root.val)
else:
if root.right != None:
stck.append((root.right,False))
stck.append((root, True))
if root.left != None:
stck.append((root.left,False))
# print(stck)
# print(output)
return output
print("="*10,"Tree 1","="*10)
print("-"*10,"Recursive","-"*10)
inOrder(root)
print("-"*10,"Non-Recursive","-"*10)
inOrder_nr(root)
print("-"*10,"Non-Recursive","-"*10)
print(inOrder_nr2(root))
print("="*10,"Tree 2","="*10)
print("-"*10,"Recursive","-"*10)
inOrder(root1)
print("-"*10,"Non-Recursive","-"*10)
inOrder_nr(root1)
print("-"*10,"Non-Recursive","-"*10)
print(inOrder_nr2(root1)) |
for i in range(10):
n=int(input("Enter the number"))
if n==0:
continue
print(n)
print("Thank you") |
n = int(input())
arr = list(map(int,input().strip().split()))[:n]
arr.sort()
if n % 2 == 0:
stor1 = []
stor2 = []
for i in range(n):
if i % 2 == 0:
stor1.append(arr[i])
else:
stor2.append(arr[i])
stor2.reverse()
res = stor1 + stor2
elif n % 2 == 1:
stor3 = []
stor4 = []
for j in range(n-1):
if j % 2 == 0:
stor3.append(arr[j])
else:
stor4.append(arr[j])
stor4.reverse()
res = stor3 + stor4
res.insert((n//2),arr[n-1])
print(*res,sep=" ") |
# Copyright 2022 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows to connect to PASQAL's cloud platform to run sequences."""
from __future__ import annotations
import copy
import json
from dataclasses import fields
from typing import Any, Type, cast
import backoff
import numpy as np
import pasqal_cloud
from pasqal_cloud.device.configuration import (
BaseConfig,
EmuFreeConfig,
EmuTNConfig,
)
from pulser import Sequence
from pulser.backend.config import EmulatorConfig
from pulser.backend.remote import (
JobParams,
RemoteConnection,
RemoteResults,
SubmissionStatus,
)
from pulser.devices import Device
from pulser.json.abstract_repr.deserializer import deserialize_device
from pulser.result import Result, SampledResult
EMU_TYPE_TO_CONFIG: dict[pasqal_cloud.EmulatorType, Type[BaseConfig]] = {
pasqal_cloud.EmulatorType.EMU_FREE: EmuFreeConfig,
pasqal_cloud.EmulatorType.EMU_TN: EmuTNConfig,
}
MAX_CLOUD_ATTEMPTS = 5
backoff_decorator = backoff.on_exception(
backoff.fibo, Exception, max_tries=MAX_CLOUD_ATTEMPTS, max_value=60
)
def _make_json_compatible(obj: Any) -> Any:
"""Makes an object compatible with JSON serialization.
For now, simply converts Numpy arrays to lists, but more can be added
as needed.
"""
class NumpyEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, np.ndarray):
return o.tolist()
return json.JSONEncoder.default(self, o)
# Serializes with the custom encoder and then deserializes back
return json.loads(json.dumps(obj, cls=NumpyEncoder))
class PasqalCloud(RemoteConnection):
"""Manager of the connection to PASQAL's cloud platform.
The cloud connection enables to run sequences on simulators or on real
QPUs.
Args:
username: Your username in the PASQAL cloud platform.
password: The password for your PASQAL cloud platform account.
project_id: The project ID associated to the account.
kwargs: Additional arguments to provide to the pasqal_cloud.SDK()
"""
def __init__(
self,
username: str = "",
password: str = "",
project_id: str = "",
**kwargs: Any,
):
"""Initializes a connection to the Pasqal cloud platform."""
project_id_ = project_id or kwargs.pop("group_id", "")
self._sdk_connection = pasqal_cloud.SDK(
username=username,
password=password,
project_id=project_id_,
**kwargs,
)
def submit(self, sequence: Sequence, **kwargs: Any) -> RemoteResults:
"""Submits the sequence for execution on a remote Pasqal backend."""
if not sequence.is_measured():
bases = sequence.get_addressed_bases()
if len(bases) != 1:
raise ValueError(
"The measurement basis can't be implicitly determined "
"for a sequence not addressing a single basis."
)
# The copy prevents changing the input sequence
sequence = copy.deepcopy(sequence)
sequence.measure(bases[0])
emulator = kwargs.get("emulator", None)
job_params: list[JobParams] = _make_json_compatible(
kwargs.get("job_params", [])
)
if emulator is None:
available_devices = self.fetch_available_devices()
# TODO: Could be better to check if the devices are
# compatible, even if not exactly equal
if sequence.device not in available_devices.values():
raise ValueError(
"The device used in the sequence does not match any "
"of the devices currently available through the remote "
"connection."
)
# TODO: Validate the register layout
if sequence.is_parametrized() or sequence.is_register_mappable():
for params in job_params:
vars = params.get("variables", {})
sequence.build(**vars)
configuration = self._convert_configuration(
config=kwargs.get("config", None), emulator=emulator
)
create_batch_fn = backoff_decorator(self._sdk_connection.create_batch)
batch = create_batch_fn(
serialized_sequence=sequence.to_abstract_repr(),
jobs=job_params or [], # type: ignore[arg-type]
emulator=emulator,
configuration=configuration,
wait=False,
)
jobs_order = []
if job_params:
for job_dict in job_params:
for job in batch.jobs.values():
if (
job.id not in jobs_order
and job_dict["runs"] == job.runs
and job_dict.get("variables", None) == job.variables
):
jobs_order.append(job.id)
break
else:
raise RuntimeError(
f"Failed to find job ID for {job_dict}."
)
return RemoteResults(batch.id, self, jobs_order or None)
@backoff_decorator
def fetch_available_devices(self) -> dict[str, Device]:
"""Fetches the devices available through this connection."""
abstract_devices = self._sdk_connection.get_device_specs_dict()
return {
name: cast(Device, deserialize_device(dev_str))
for name, dev_str in abstract_devices.items()
}
def _fetch_result(
self, submission_id: str, jobs_order: list[str] | None
) -> tuple[Result, ...]:
# For now, the results are always sampled results
get_batch_fn = backoff_decorator(self._sdk_connection.get_batch)
batch = get_batch_fn(id=submission_id)
seq_builder = Sequence.from_abstract_repr(batch.sequence_builder)
reg = seq_builder.get_register(include_mappable=True)
all_qubit_ids = reg.qubit_ids
meas_basis = seq_builder.get_measurement_basis()
results = []
jobs = (
(batch.jobs[job_id] for job_id in jobs_order)
if jobs_order
else batch.jobs.values()
)
for job in jobs:
vars = job.variables
size: int | None = None
if vars and "qubits" in vars:
size = len(vars["qubits"])
assert job.result is not None, "Failed to fetch the results."
results.append(
SampledResult(
atom_order=all_qubit_ids[slice(size)],
meas_basis=meas_basis,
bitstring_counts=job.result,
)
)
return tuple(results)
@backoff_decorator
def _get_submission_status(self, submission_id: str) -> SubmissionStatus:
"""Gets the status of a submission from its ID."""
batch = self._sdk_connection.get_batch(id=submission_id)
return SubmissionStatus[batch.status]
def _convert_configuration(
self,
config: EmulatorConfig | None,
emulator: pasqal_cloud.EmulatorType | None,
) -> pasqal_cloud.BaseConfig | None:
"""Converts a backend configuration into a pasqal_cloud.BaseConfig."""
if emulator is None or config is None:
return None
emu_cls = EMU_TYPE_TO_CONFIG[emulator]
backend_options = config.backend_options.copy()
pasqal_config_kwargs = {}
for field in fields(emu_cls):
pasqal_config_kwargs[field.name] = backend_options.pop(
field.name, field.default
)
# We pass the remaining backend options to "extra_config"
if backend_options:
pasqal_config_kwargs["extra_config"] = backend_options
if emulator == pasqal_cloud.EmulatorType.EMU_TN:
pasqal_config_kwargs["dt"] = 1.0 / config.sampling_rate
return emu_cls(**pasqal_config_kwargs)
|
from django.shortcuts import render
from django.core.serializers import serialize
from django.views.generic import View
from django.http import HttpResponse
from django.http import JsonResponse
from .mixins import CSRFExempt,render_to_response,is_json
from .models import StuData
from .forms import StuForm
import json
# Create your views here.
def sample(request):
data={"Hello There":"Wassup "}
return JsonResponse(data)
class StuDataDetail(View):
def get(self,request,id,*args,**kwargs):
obj=StuData.objects.get(id=id)
json_data=obj.serialize()
#data=serialize("json",[obj,])
#return HttpResponse(json_data,content_type="application/json",status=200)
return render_to_response(data=json_data,status=200)
class StuDataList(View):
def get(self,request,*args,**kwargs):
#obj=StuData.objects.all()
obj=StuData.objects.order_by('-id')#descending id's
json_data=obj.serialize()
#data=serialize("json",[obj,])
#return HttpResponse(json_data,content_type="application/json",status=200)
return render_to_response(data=json_data,status=200)
class StuDataPost(CSRFExempt,View):
def post(self,request,*args,**kwargs):
form=StuForm(request.POST)
if form.is_valid():
obj=form.save(commit=True)
#obj_data=obj.serialize()
#print(obj_data)
data={"message":"Succesfull"}
#return HttpResponse(json.dumps(data),content_type="application/json",status=200)
return render_to_response(data=json.dumps(data),status=200)
if form.errors:
data={"message":"did not work properly!"}
#return HttpResponse(json.dumps(data),content_type="application/json",status=402)
return render_to_response(data=json.dumps(data),status=200)
class StuDataDelete(CSRFExempt,View):
def get_object(self,id):
qs=StuData.objects.filter(id=id)
if qs.count()>0:
return qs.first()
return None
def delete(self,request,id,*args,**kwargs):
obj=self.get_object(id)
if obj is None:
err_data=json.dumps({"message":"No Such ID"})
return render_to_response(err_data,status=404)
obj.delete()
return render_to_response(json.dumps({"message":f"{id} deleted successfully"}),status=200)
class StuDataUpdate(CSRFExempt,View):
def get_object(self,id):
qs=StuData.objects.filter(id=id)
if qs.count()>0:
return qs.first()
return None
def put(self,request,id,*args,**kwargs):
obj=self.get_object(id)
if obj is None:
err_data=json.dumps({"message":"No Such ID"})
return render_to_response(err_data,status=404)
data=json.loads(obj.serialize())
passed_data=json.loads(request.body)
print(passed_data)
for key,value in passed_data.items():
data[key]=value
form=StuForm(data,instance=obj)
if form.is_valid():
obj=form.save(commit=True)
obj_data=obj.serialize()
return render_to_response(obj_data,status=200)
if form.errors:
data=json.dumps(form.errors)
return render_to_response(data,status=404)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.