text
stringlengths 8
6.05M
|
|---|
import xml.etree.ElementTree as ET
import os
import traceback
import json
def xml_getter(xml_file_path, xml_string):
tree = ET.parse(xml_file_path)
root = tree.getroot()
item = root.find(xml_string)
return item.text
def xml_parser(xml_file_path):
#return "Ishmeet"
if os.path.exists("errorFile.txt"):
os.remove(r"errorFile.txt")
dict_output = {}
# Get Admission and Discharge Date
adm_discharge_value = ""
adm_discharge_date = r"{http://www.edifecs.com/xdata/200}Loop-2000B/" \
r"{http://www.edifecs.com/xdata/200}Loop-2300/" \
r"{http://www.edifecs.com/xdata/200}Segment-DTP_1/" \
r"{http://www.edifecs.com/xdata/200}Element-1251"
try:
adm_discharge_value = xml_getter(xml_file_path, adm_discharge_date)
except AttributeError:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write("Unable to extract Admit and Discharge Date")
except Exception:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write(traceback.format_exc())
dict_output["adm_discharge_value"] = adm_discharge_value
print(f'Adm and Discharge Date = {dict_output["adm_discharge_value"]}')
# Member ID
member_id_value = ""
member_id_path = r"{http://www.edifecs.com/xdata/200}Loop-2000B/" \
r"{http://www.edifecs.com/xdata/200}Loop-2010BA/" \
r"{http://www.edifecs.com/xdata/200}Segment-NM1/" \
r"{http://www.edifecs.com/xdata/200}Element-67"
try:
member_id_value = xml_getter(xml_file_path, member_id_path)
except AttributeError:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write("Unable to extract MemberID")
except Exception:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write(traceback.format_exc())
dict_output["MemberID"] = member_id_value
print(dict_output["MemberID"])
# Provider Name (Institution)
provider_name_value = ""
provider_name_path = r"{http://www.edifecs.com/xdata/200}Loop-2000A/" \
r"{http://www.edifecs.com/xdata/200}Loop-2010AA/" \
r"{http://www.edifecs.com/xdata/200}Segment-NM1/" \
r"{http://www.edifecs.com/xdata/200}Element-1035"
try:
provider_name_value = xml_getter(xml_file_path, provider_name_path)
except AttributeError:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write("Unable to extract Provider NAme")
except Exception:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write(traceback.format_exc())
dict_output["ProviderName"] = provider_name_value
print(dict_output["ProviderName"])
# Billed Amount
billed_amount_value = ""
billed_amount_path = r"{http://www.edifecs.com/xdata/200}Loop-2000B/" \
r"{http://www.edifecs.com/xdata/200}Loop-2300/" \
r"{http://www.edifecs.com/xdata/200}Segment-CLM/" \
r"{http://www.edifecs.com/xdata/200}Element-782"
try:
billed_amount_value = xml_getter(xml_file_path, billed_amount_path)
except AttributeError:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write("Unable to extract Billed Amount")
except Exception:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write(traceback.format_exc())
dict_output["BilledAmount"] = billed_amount_value
print(dict_output["BilledAmount"])
# State
state_value = ""
state_path = r"{http://www.edifecs.com/xdata/200}Loop-2000A/" \
r"{http://www.edifecs.com/xdata/200}Loop-2010AA/" \
r"{http://www.edifecs.com/xdata/200}Segment-N4/" \
r"{http://www.edifecs.com/xdata/200}Element-156"
try:
state_value = xml_getter(xml_file_path, state_path)
except AttributeError:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write("Unable to extract State")
except Exception:
with open("errorFile.txt", 'w') as fileobj:
fileobj.write(traceback.format_exc())
dict_output["State"] = state_value
print(dict_output["State"])
return json.dumps(dict_output)
# xml_parser(r"C:\Users\ibindra\Desktop\Institutional Data.xml")
|
# dataset link https://www.kaggle.com/subhassing/exploring-consumer-complaint-data/data
# Step 1 Import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import string
from nltk.stem import SnowballStemmer
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
import sklearn.feature_extraction.text as text
from sklearn import model_selection, preprocessing,linear_model, naive_bayes, metrics, svm
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
import os
from textblob import TextBlob
from textblob import Word
from io import StringIO
import seaborn as sns
# Step 2 Importing the data
Data = pd.read_csv("consumer_complaints.csv",encoding='latin-1')
# Step 3 Data understanding
# Selecting required columns and rows
Data = Data[['product', 'consumer_complaint_narrative']]
Data = Data[pd.notnull(Data['consumer_complaint_narrative'])]
# See top 5 rows
Data.head()
# Factorizing the category column
Data['category_id'] = Data['product'].factorize()[0]
Data.head()
# Check the distriution of complaints by category
Data.groupby('product').consumer_complaint_narrative.count()
# Lets plot it and see
fig = plt.figure(figsize=(8,6))
Data.groupby('product').consumer_complaint_narrative.count().plot.bar(ylim=0)
plt.show()
# Step 4 Splitting the data
train_x, valid_x, train_y, valid_y = model_selection.train_test_split(Data['consumer_complaint_narrative'], Data['product'])
# Step 5 Feature engineering using TF-IDF
encoder = preprocessing.LabelEncoder() # Encode labels with value between 0 and n_classes-1.
train_y = encoder.fit_transform(train_y)
valid_y = encoder.fit_transform(valid_y)
tfidf_vect = TfidfVectorizer(analyzer='word',token_pattern=r'\w{1,}', max_features=5000)
tfidf_vect.fit(Data['consumer_complaint_narrative'])
xtrain_tfidf = tfidf_vect.transform(train_x)
xvalid_tfidf = tfidf_vect.transform(valid_x)
# Step 6 Model building and evaluation
'''
Suppose we are building a linear classifier on word-level TF-IDF vectors.
We are using default hyper parameters for the classifier. Parameters can be
changed like C, max_iter, or solver to obtain better results.
'''
model = linear_model.LogisticRegression().fit(xtrain_tfidf, train_y)
accuracy = metrics.accuracy_score(model.predict(xvalid_tfidf),valid_y)
print ("Accuracy: ", accuracy)
# Classification report
print(metrics.classification_report(valid_y, model.predict(xvalid_tfidf),target_names=Data['product'].unique()))
#confusion matrix
conf_mat = confusion_matrix(valid_y, model.predict(xvalid_tfidf))
# Vizualizing confusion matrix
category_id_df = Data[['product', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_df.values)
id_to_category = dict(category_id_df[['category_id','product']].values)
fig, ax = plt.subplots(figsize=(8,6))
sns.heatmap(conf_mat, annot=True, fmt='d', cmap="BuPu",xticklabels=category_id_df[['product']].values,yticklabels=category_id_df[['product']].values)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
# Step 7 prediction
# Prediction example
texts = ["This company refuses to provide me verification and validation of debt per my right under the FDCPA.I do not believe this debt is mine."]
text_features = tfidf_vect.transform(texts)
predictions = model.predict(text_features)
print(texts)
print(" - Predicted as: '{}'".format(id_to_category[predictions[0]]))
# -------------------------------------------------------------------------
'''
To increase the accuracy, we can do the following things:
• Reiterate the process with different algorithms like
Random Forest, SVM, GBM, Neural Networks, Naive
Bayes.
• Deep learning techniques like RNN and LSTM can also be used.
• In each of these algorithms, there are so many
parameters to be tuned to get better results. It can be
easily done through Grid search, which will basically
try out all possible combinations and give the best out.
'''
|
#!/bin/python3
from sys import stdin
for i in stdin:
print(i.replace("\n",""))
|
import random
import json
class Igra:
def __init__(self, znesek_stave):
self.zgodovina = []
self.znesek_stave = float(znesek_stave)
def poslji_stave(self, stavljene_stevilke):
dobljena_stevilka = random.randint(0, 36)
self.zgodovina.append(dobljena_stevilka)
return self.rezultat_stav(stavljene_stevilke)
def stava_na_eno_stevilko(self, stavljene_stevilke):
dobicek = 0
znesek_stave = self.znesek_stave
for stava in stavljene_stevilke:
if int(stava) <= 36 and int(stava) >= 0:
if int(self.zgodovina[-1]) == int(stava):
dobicek += 35 * float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
return dobicek
def stava_na_polovicko(self, stavljene_stevilke):
znesek_stave = self.znesek_stave
dobicek = 0
for stava in stavljene_stevilke:
if int(stava) == 41 or int(stava) == 42:
if int(stava) == 41:
if int(self.zgodovina[-1]) in range(1, 19):
dobicek += float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
else:
if int(self.zgodovina[-1]) in range(19, 37):
dobicek += float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
return dobicek
def stava_na_barvo(self, stavljene_stevilke):
znesek_stave = self.znesek_stave
dobicek = 0
for stava in stavljene_stevilke:
if int(stava) == 70 or int(stava) == 50:
if int(stava) == 70:
if int(self.zgodovina[-1]) in {1, 3, 5, 7, 9, 12, 14, 16, 18, 19, 21, 23, 25, 27, 30, 32, 34}:
dobicek += float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
else:
if self.zgodovina[-1] in {2, 4, 6, 8, 10, 11, 13, 15, 17, 20, 22, 24, 26, 28, 29, 31, 33, 35}:
dobicek += float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
return dobicek
def stava_na_sodo_liho(self, stavljene_stevilke):
znesek_stave = self.znesek_stave
dobicek = 0
for stava in stavljene_stevilke:
if int(stava) == 40 or int(stava) == 60:
if int(stava) == 40:
if int(self.zgodovina[-1]) % 2 == 0:
dobicek += float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
else:
if int(self.zgodovina[-1]) % 2 != 0:
dobicek += float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
return dobicek
def stava_na_ducat(self, stavljene_stevilke):
znesek_stave = self.znesek_stave
dobicek = 0
for stava in stavljene_stevilke:
if int(stava) == 43 or int(stava) == 44 or int(stava) == 45:
if int(stava) == 43:
if int(self.zgodovina[-1]) in range(1, 13):
dobicek += 2 * float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
elif int(stava) == 44:
if int(self.zgodovina[-1]) in range(13, 25):
dobicek += 2 * float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
else:
if int(self.zgodovina[-1]) in range(25, 37):
dobicek += 2 * float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
return dobicek
def stava_na_vrstico(self, stavljene_stevilke):
znesek_stave = self.znesek_stave
dobicek = 0
for stava in stavljene_stevilke:
if int(stava) == 46 or int(stava) == 47 or int(stava) == 48:
if int(stava) == 46:
if int(self.zgodovina[-1]) in range(1, 35, 3):
dobicek += 2 * float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
elif int(stava) == 47:
if int(self.zgodovina[-1]) in range(2, 36, 3):
dobicek += 2 * float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
else:
if int(self.zgodovina[-1]) in range(3, 37, 3):
dobicek += 2 * float(znesek_stave)
else:
dobicek += (-1) * float(znesek_stave)
return dobicek
def pridobi_stevilo_stav(self, stavljene_stevilke):
stevilo_trenutnih_stav = 0
for stava in stavljene_stevilke:
stevilo_trenutnih_stav += 1
return stevilo_trenutnih_stav
def pridobi_vrednost_trenutnih_stav(self, stavljene_stevilke):
return float(self.znesek_stave * self.pridobi_stevilo_stav(stavljene_stevilke))
def rezultat_stav(self, stavljene_stevilke):
dobicek = self.stava_na_barvo(stavljene_stevilke) + self.stava_na_ducat(stavljene_stevilke) + self.stava_na_polovicko(stavljene_stevilke) + self.stava_na_vrstico(stavljene_stevilke) + self.stava_na_eno_stevilko(stavljene_stevilke) + self.stava_na_sodo_liho(stavljene_stevilke)
return dobicek
def je_dovolj_denarja(igralec, znesek_stave):
return (igralec.stanje_na_racunu - znesek_stave) >= 0
def preveri_ce_je_stevilka(cifra):
x = cifra
if x.isnumeric() == True:
return True
else:
return False
class Igralec:
def __init__(self, znesek_pologa):
self.stanje_na_racunu = int(znesek_pologa)
self.znesek_stave = 0
self.igra = Igra(self.znesek_stave)
def dodaj(self, dobicek):
self.stanje_na_racunu += float(dobicek)
|
# visitor을 안 만들어주고 maps에 메모제이션을 사용하다 보니, maps[0][0] = 1 이어서 한 번 더 0.0이 queue에 들어간다.
def solution(maps):
N, M = len(maps) - 1, len(maps[0]) - 1
direction = [[0, 1], [1, 0], [0, -1], [-1, 0]]
queue = [[0, 0]]
maps[0][0] = 1
while queue:
X, Y = queue.pop(0)
Count = maps[X][Y]
# finish
if X == N and Y == M:
return maps[-1][-1]
if maps[X][Y] != 0:
for d in direction:
x, y = X + d[0], Y + d[1]
if 0 <= x <= N and 0 <= y <= M:
if maps[x][y] == 1:
maps[x][y] += Count
queue.append([x, y])
return -1
# 잘못된 풀이
# 메모제이션을 활용하지 않아서, 모든 경로에 대한 최소 값을 구하지 못했음
def solution(maps):
answer = 0
N, M = len(maps) - 1, len(maps[0]) - 1
visitor = maps[:]
direction = [[0, 1], [1, 0], [0, -1], [-1, 0]]
queue = [[0, 0]]
while queue:
print(queue)
X, Y = queue.pop(0)
# finish
if X == N and Y == M:
return answer
if visitor[X][Y] == 1:
visitor[X][Y] = 0
answer += 1
inp = 0
for d in direction:
x, y = X + d[0], Y + d[1]
if 0 <= x <= N and 0 <= y <= M:
if visitor[x][y] == 1:
inp += 1
queue.append([x, y])
answer -= (inp - 1)
print(answer)
return -1
|
#!/usr/bin/env python
""" Arguments:
-h = Display help and exit
-o = Encrypt or Decrypt
-k = Keyword/Passphrase
-f = File for encryption
The vigenere cipher is very similar to the Caesar cipher but much more secure.
This is due to the use of multiple cipher alphabets instead of just one (see Caesar cipher)
The message is encrypted with the use of a vigenere square and a passphrase,
the passphrase tells the individual which alphabet to use and the switch alphabets each time
VIGENERE SQUARE:
b c d e f g h i j k l m n o p q r s t u v w x y z a
c d e f g h i j k l m n o p q r s t u v w x y z a b
d e f g h i j k l m n o p q r s t u v w x y z a b c
e f g h i j k l m n o p q r s t u v w x y z a b c d
f g h i j k l m n o p q r s t u v w x y z a b c d e
g h i j k l m n o p q r s t u v w x y z a b c d e f
h i j k l m n o p q r s t u v w x y z a b c d e f g
i j k l m n o p q r s t u v w x y z a b c d e f g h
j k l m n o p q r s t u v w x y z a b c d e f g h i
k l m n o p q r s t u v w x y z a b c d e f g h i j
l m n o p q r s t u v w x y z a b c d e f g h i j k
m n o p q r s t u v w x y z a b c d e f g h i j k l
n o p q r s t u v w x y z a b c d e f g h i j k l m
o p q r s t u v w x y z a b c d e f g h i j k l m n
p q r s t u v w x y z a b c d e f g h i j k l m n o
q r s t u v w x y z a b c d e f g h i j k l m n o p
r s t u v w x y z a b c d e f g h i j k l m n o p q
s t u v w x y z a b c d e f g h i j k l m n o p q r
t u v w x y z a b c d e f g h i j k l m n o p q r s
u v w x y z a b c d e f g h i j k l m n o p q r s t
v w x y z a b c d e f g h i j k l m n o p q r s t u
w x y z a b c d e f g h i j k l m n o p q r s t u v
y z a b c d e f g h i j k l m n o p q r s t u v w x
z a b c d e f g h i j k l m n o p q r s t u v w x y
a b c d e f g h i j k l m n o p q r s t u v w x y z
As you can see, this cipher is not overly complex, but will ensure security.
If your key is as long as you message and COMPLETELY random, is is a one-time-pad
which is unbreakable. Since there are no patterns for analysis, is is impossible to
decrypt.
Example, Message = hello world, key = man:
h e l l o w o r l d
m a n m a n m a n m
-------------------
s d x w n i z q x o
The only repitition is the first 'l' of hello and the 'l' of world but that id due to
the short key-length.
"""
import sys
import optparse
print '''**********************************************
Copyright (c) 2010, William Zebulon Farmer
All rights reserved.
Licensed with the BSD License.
**********************************************
'''
def main():
opts = get_args()
message = get_message(opts.filename)
final_message = vigenere_shift(message, opts.key, opts.operation)
if opts.operation == 'encrypt':
encrypt_write(final_message, opts.filename)
elif opts.operation == 'decrypt':
decrypt_write(final_message, opts.filename)
def vigenere_shift(message, key, operation):
key_list = []
letter = 0
ciphertext = []
for byte in key:
if operation == 'decrypt':
key_list.append(-ord(byte))
else:
key_list.append(ord(byte))
for byte in message:
chr_enc = shift_message(byte, key_list[letter])
ciphertext.append(chr_enc)
letter += 1
if letter == len(key):
letter = 0
final_message = ''.join(ciphertext)
return final_message
def encrypt_write(final_message, filename):
if filename != None:
file_open = open(filename + '.enc', mode = 'w')
file_open.write(final_message)
file_open.close()
else:
print final_message
def decrypt_write(final_message, filename):
if filename != None:
file_open = open(filename[0:-4], mode = 'w')
file_open.write(final_message)
file_open.close()
else:
print final_message
def shift_message(message, shift):
''' Uses ASCII codes to identify letter and shift it however many it was shifted
Depending on whether or not file operations were enabled, it will also write the file
'''
end_list = []
end_list.append(chr((ord(message) + shift) % 128)) # mod 128 due to limits of ASCII
final = ''.join(end_list)
return final
def get_message(filename):
if filename != None:
read_file = filename
open_file = open(read_file, mode = 'r')
message = open_file.read()
open_file.close()
else:
message = raw_input('Enter Message: ')
return message
def get_args():
global opts
global args
parser = optparse.OptionParser(usage = 'UsageL %prog <options>', version = 'Version 1.0')
parser.add_option('-o', '--operation', action = 'store', type = 'string', default = None, help = 'Encrypt or decrypt the message')
parser.add_option('-k', '--key' , action = 'store', type = 'string', default = None, help = 'The keyword for encryption or decryption')
parser.add_option('-f', '--filename' , action = 'store', type = 'string', default = None, help = 'This is the name of the read/write file')
opts, args = parser.parse_args()
if opts.operation == None:
opts.operation = raw_input('(E)ncrypt or (D)ecrypt: ')
if opts.operation.lower() in ['encrypt', 'e']:
opts.operation = 'encrypt'
elif opts.operation.lower() in ['decrypt', 'd']:
opts.operation = 'decrypt'
else:
parser.print_help() # invalid operation
sys.exit(1)
if opts.key == None:
opts.key = raw_input('Enter Keyword: ')
return opts
if __name__ == "__main__":
sys.exit(main())
|
import pickle
import os.path
class Personal_data:
def __init__(self, name):
self.name = name
self.info = ['None']
def add_info(self, info):
if 'None' in self.info:
del self.info[self.info.index('None')]
self.info.append(info)
def privetstvie():
print('\nВыберете необходимый функционал:\n'
'"1" Чтение файла\n'
'"2" Запись в файл нового контакта\n'
'"3" Дополнение контактной информации\n'
'"4" Смена контактной информации\n'
'"5" Смена имени контакта\n'
'"6" Удаление котакта\n'
'"7" Проверка в списке. Поиск контактной информации\n'
'"s" Для сохранения игры\n'
'"w" Чтобы выйти\n')
#print(os.listdir())
if 'txt1.txt' in os.listdir():
with open('txt1.txt', 'rb') as f:
contacts = pickle.load(f)
else :
with open('txt1.txt', 'wb') as f:
pickle.dump("", f)
contacts = {}
def check(a):
if a in contacts: return True
else:
print('The name isnt exist')
return False
def main():
print('\n-------Приветствуем в Адресной книге имени Меня!-------\n')
while True:
privetstvie()
vibor = input()
if vibor == '1':
for i in contacts:
print(i + ":" )
print(*contacts[i].info, sep='\n', end='\n\n')
elif vibor == '2':
name = input("Enter name:")
contacts[name] = Personal_data(name)
elif vibor == '3':
name = input('Enter Name of contact :')
if check(name):
information = input('Enter information :')
contacts[name].add_info(information)
elif vibor == '4':
name = input('Enter name:')
if check(name):
old_info = input("Enter old info:")
new_info = input("Enter new info:")
if old_info in contacts[name].info:
contacts[name].info[contacts[name].info.index(old_info)] = new_info
elif vibor == '5':
old_key = input("Enter old name:")
if check(old_key):
new_key = input("Enter new name:")
contacts[new_key] = contacts.pop(old_key)
elif vibor == '6':
name = input("Enter name to delete:")
if check(name):
del contacts[name]
elif vibor == '7':
name = input("Enter name to find:")
if check(name): print('Info :' + contacts[name].info)
elif vibor == 's':
with open('txt1.txt', 'wb') as f:
pickle.dump(contacts, f)
print("Game saved")
elif vibor == 'w': break
else :
print("Try Again")
continue
main()
with open('txt1.txt', 'wb') as f:
pickle.dump(contacts, f)
|
first_name = "三"
last_name = "张"
print(first_name + last_name)
print(last_name + first_name)
|
# Generated by Django 2.2.13 on 2020-07-10 08:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0054_remove_about_title'),
]
operations = [
migrations.DeleteModel(
name='About',
),
migrations.DeleteModel(
name='Contact',
),
]
|
from django.contrib.sites.models import Site
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
_thread_locals = local()
def get_current_request():
""" returns the request object for this thead """
return getattr(_thread_locals, "request", None)
def get_current_site():
request = get_current_request()
try:
from django.conf import settings
site = Site.objects.get(pk = settings.SITE_ID)
except BaseException, e:
site = None
if request:
from urlparse import urlparse
domain = urlparse(request.get_host())[2]
try:
site = Site.objects.get(domain = domain)
except BaseException, e:
pass
return site
def get_current_user():
""" returns the current user, if exist, otherwise returns None """
request = get_current_request()
if request and hasattr(request, 'user') and request.user.is_authenticated():
return request.user
return None
def set_thread_var(name, value):
setattr(_thread_locals, name, value)
def get_thread_var(name, default = None):
return getattr(_thread_locals, name, default)
|
def handleRequestForm(request_form, transform_field_config):
"""
Handles POST/GET data from user who is trying to filter BSB data,
Transform config will specify what transforms should happen to what fields,
returns appropriate mongodb query which will get user intended data.
:param http_query: dict, transform_field_config: dict
:return: mongo_query: dict
"""
input_query = dict(request_form)
mongo_query = {"$and": list()}
print("raw_input", input_query)
mongo_date_queries = {}
mongo_name_queries = {}
for fieldName, fieldData in input_query.items():
#print(fieldName)
mongo_field_query = str(fieldData)
if len(mongo_field_query) < 1:
continue
for transform in transform_field_config[fieldName]:
if transform == "CommaString":
mongo_field_query = handleCommaString(mongo_field_query)
if transform == "DateString":
mongo_field_query = handleDateString(mongo_field_query)
if transform == "DropdownString":
mongo_field_query = handleDropdownString(mongo_field_query)
if fieldName in ["playerID", "orderDetailID"]:
if len(mongo_field_query) > 0:
mongo_query["$and"].append({fieldName: {
"$in": mongo_field_query
}
})
if fieldName in ["region"]:
if mongo_field_query != "All Regions":
mongo_query["$and"].append({fieldName: {
"$in": [mongo_field_query]
}})
if fieldName in ["playerName", "userName"]:
print(fieldName, mongo_field_query)
nameType = fieldName.split("Name")[0]
mongo_name_queries[nameType] = mongo_name_queries.get(nameType, {
"FirstName": list(),
"LastName": list()
})
for name_query in mongo_field_query:
if len(name_query.split(" ")) > 1:
firstName, lastName = " ".join(name_query.split(" ")[:-1]), name_query.split(" ")[-1]
mongo_name_queries[nameType]["FirstName"].append(firstName)
mongo_name_queries[nameType]["LastName"].append(lastName)
else:
lastName = name_query
mongo_name_queries[nameType]["LastName"].append(lastName)
if fieldName in ["payment_Date_Start", "payment_Date_End", "order_Date_Start", "order_Date_End"]:
dateFieldMeta = fieldName.split("_")
dateType, dateStartEnd = dateFieldMeta[0]+"Date", dateFieldMeta[2]
mongo_date_queries[dateType] = mongo_date_queries.get(dateType, dict())
if dateStartEnd == "Start":
mongo_date_queries[dateType]["$gte"] = mongo_field_query
if dateStartEnd == "End":
mongo_date_queries[dateType]["$lt"] = mongo_field_query
print(mongo_name_queries)
if len(mongo_name_queries) > 0:
for k, v in mongo_name_queries.items():
for key, val in v.items():
if len(val) > 0:
mongo_query["$and"].append({k+key: {
"$in": val
}})
for k, v in mongo_date_queries.items():
if len(mongo_query["$and"]) > 0:
mongo_query["$and"].append({k:v})
if len(mongo_query["$and"]) < 1:
mongo_query.pop("$and")
for k,v in mongo_date_queries.items():
mongo_query[k] = mongo_date_queries[k]
print("mongo_query", mongo_query)
return mongo_query
def handleCommaString(comma_string):
"""
Takes input string for a field with multiple values seperated by commas,
return a list of valid strings. Gets rid of white space and handles invalid inputs.
:param comma_string: str
:return: list_of_str
"""
return [x.strip() for x in comma_string.split(",") if len(x.strip()) > 0]
def handleDateString(date_string):
return date_string
def handleDropdownString(dropdown_string):
return dropdown_string
|
import tensorflow as tf
import Model_hyperparameters as p
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from IPython.display import display
from seaborn import color_palette
import cv2
def batch_norm(inputs,training, data_format):
"""Performs a batch normalization using a standard set of parameters."""
return tf.layers.batch_normalization(inputs=inputs,axis=1 if data_format=='channels_first' else 3,
momentum= p._BATCH_NORM_DECAY,epsilon=p._BATCH_NORM_EPSILON,scale=True,training=training)
def fixed_padding(inputs,kernel_size,data_format):
"""ResNet implementation of fixed padding.
Pads the input along the spatial dimensions independently of input size.
Args:
inputs: Tensor input to be padded.
kernel_size: The kernel to be used in the conv2d or max_pool2d.
data_format: The input format.
Returns:
A tensor with the same format as the input.
"""
pad_total = kernel_size-1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'chennels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end],
[pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs,filters,kernel_size,data_format,strides=1):
"""Strided 2-D convolution with explicit padding."""
if strides > 1:
inputs = fixed_padding(inputs,kernel_size,data_format)
return tf.layers.conv2d(inputs,filters,kernel_size,strides,padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,data_format = data_format)
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x = [1, 2, 3]
y = [1, 2, 3]
W = tf.Variable(10, dtype=tf.float32)
X = tf.constant(x, dtype=tf.float32)
Y = tf.constant(y, dtype=tf.float32)
hx = W * X
cost = tf.reduce_mean(tf.square(hx - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train = optimizer.minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in np.arange(100):
sess.run(train)
print(i, sess.run(cost))
print(sess.run(W))
|
"""
Interface between python and arduino for live data logger
Author: James Keaveney
19/05/2015
"""
import time
import csv
import serial
import sys
import cPickle as pickle
import numpy as np
import matplotlib.pyplot as plt
nchannels = 2 # number of total channels (time axis + ADC channels)
datalen = 2000 # numbers in each array that serial.print does in arduino
class SerialDataPlotter:
"""
class for interfacing with the Arduino Data Logger
The data logger runs on an Arduino DUE; the sketch is "SixChannelLogger.ino"
and should also be in this directory
"""
def __init__(self,recording_time=1,verbose=True):
self.recording_time = recording_time
self.verbose = verbose
self.time_axis = None
def get_data(self):
"""
Initialise serial port and listen for data until timeout.
Convert the bytestream into numpy arrays for each channel
Returns:
7 numpy arrays (1D) representing time and ADC channels 0-5
"""
# setup serial port - it's the native USB port so baudrate is irrelevant,
# the data is always transferred at full USB speed
ser = serial.Serial(
port='COM4',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.recording_time # seconds - should be the same amount of time as the arduino will send data for + 1
)
#testing - repeat serial read to confirm data arrays are always predictable
#n_reps = 2
#for i in range(n_reps):
st = time.clock()
#sync
self._handshake(ser)
#get data
data = ser.readline() # this number should be larger than the number of
# bytes that will actually be sent
ser.close() # close serial port
et = time.clock() - st
if self.verbose:
print 'Elapsed time reading data (s): ', et
#make string into list of strings, comma separated
data_list = data.split(',')
# remove new line characters (are any present?)
#data_list = filter(lambda a: a != '\n', data_list)
# make list of strings into 1D numpy array of floats (ignore last point as it's an empty string)
data_array = np.array([float(i) for i in data_list[:-1]])
if self.verbose:
print 'Length of array:', len(data_array)
# reshape array into 3D array
data_array_3d = data_array.reshape(-1,nchannels,datalen)
# then separate 1d arrays
self.time_axis = data_array_3d[0][0]
for i in range(1,len(data_array_3d)):
self.time_axis = np.append(self.time_axis, data_array_3d[i][0])
# convert time axis into ms, and zero the axis
self.time_axis = (self.time_axis - self.time_axis[0])/1e3
self.channel1 = data_array_3d[0][1]
for i in range(1,len(data_array_3d)):
self.channel1 = np.append(self.channel1, data_array_3d[i][1])
if self.verbose:
print 'Data acquisition complete.'
return self.time_axis,self.channel1
def _handshake(self,serialinst):
""" Send/receive pair of bytes to synchronize data gathering """
nbytes = serialinst.write('A') # can write anything here, just a single byte (any ASCII char)
if self.verbose:
print 'Wrote bytes to serial port: ', nbytes
#wait for byte to be received before returning
st = time.clock()
byte_back = serialinst.readline()
et = time.clock()
if self.verbose:
print 'Received handshake data from serial port: ',byte_back
print 'Time between send and receive: ',et-st
def save_data(self,filename):
""" Save generated data to pickle file for use later """
if self.time_axis is not None:
timestamp = time.strftime("-%Y-%m-%d-%H%M")
full_filename = filename + timestamp + '.pkl'
#check if the file already exists - either overwrite or append
if os.path.isfile(full_filename):
print '\n\n WARNING - FILE ALREADY EXISTS !!!'
if raw_input('Overwrite (y/n)?') in ['y','Y']:
pass
else:
full_filename = full_filename[:-4] + '_new.pkl'
with open(full_filename,'wb') as fileobj:
pickle.dump((self.time_axis,self.channel1,self.channel2,self.channel3, \
self.channel4,self.channel5,self.channel6), fileobj)
if self.verbose:
print 'Output saved'
else:
print 'No data to save yet'
def load_data(self,full_filename):
""" Load previously generated and pickled data and return it """
with open(full_filename,'rb') as fileobj:
self.time_axis,self.channel1,self.channel2,self.channel3, \
self.channel4,self.channel5,self.channel6 = pickle.load(fileobj)
return self.time_axis,self.channel1,self.channel2,self.channel3, \
self.channel4,self.channel5,self.channel6
def cleanup(self):
# delete serial port instance?
pass
def main():
""" Grab data once and save it to file, with current timestamp """
SR = SerialDataLogger(recording_time=6)
filename = "TestData"
t, C1, C2, C3, C4, C5, C6 = SR.get_data()
SR.save_data(filename)
if __name__ == '__main__':
main()
|
import pulp
class CitrusError(pulp.PulpError):
pass
class NonBinaryVariableError(CitrusError):
pass
class MissingProblemReference(CitrusError):
pass
def assert_binary(var):
if var.isBinary(): return
if var.isConstant() and (var.value() == 1 or var.value() == 0): return
raise NonBinaryVariableError(var.name)
def assert_same_problem(x, y):
if x._problem is not y._problem:
raise CitrusError('Variables must be associated with the same problem.')
return x # return for use in reduce(assert_same_problem, (x, y, z))
|
#!/usr/bin/env python3
"""
Extract a list of AP names from VisualRF Building XML
"""
from argparse import ArgumentParser
import xml.etree.ElementTree as ET
def process_building(building):
data = {}
name = building.attrib['name']
sites = building.findall("site")
for site in sites:
floor, aps = process_site(site)
data[floor] = aps
return (name, data)
def process_site(site):
floor = int(float(site.attrib['floor']))
aps = site.findall("ap")
names = [process_ap(ap) for ap in aps]
names = list(filter(lambda name: name != "NETWORK SWITCHES"
and name != "", names))
return (floor, names)
def process_ap(ap):
return ap.attrib['name']
def main():
# Parse arguments
arg_parser = ArgumentParser(description='Extract AP names')
arg_parser.add_argument('-x', '--xml', action='store', required=True,
help='Path to XML file')
settings = arg_parser.parse_args()
tree = ET.parse(settings.xml)
buildings = tree.findall("./building")
data = {}
for building in buildings:
name, floors = process_building(building)
data[name] = floors
print(data)
if __name__ == '__main__':
main()
|
"""
7. Faça um Programa que calcule a área de um quadrado,
em seguida mostre o dobro desta área para o usuário.
"""
def calc_quad(l):
return l ** 2
if __name__ == '__main__':
assert calc_quad(1) == 1
assert calc_quad(2) == 4
assert calc_quad(3) == 9
assert calc_quad(4) == 16
|
import smtplib
from email.message import EmailMessage
import os.path
from os import path
import requests
from bs4 import BeautifulSoup
from lxml import html
from selenium import webdriver
import time
import os
import sys
#"pip3 install secure-smtplib"
# bs4 mac terminal command: "pip3 install beautifulsoup4"
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from bs4 import BeautifulSoup
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import stdiomask
import re
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import json
import pytz
from datetime import timedelta
class assignment:
def __init__(self, link, driver):
self.url = link
self.link = str(link[0:29] + "api/v1/" +
link[29:len(link)])
self.driver = driver
self.driver.get(self.link)
self.pageSource = driver.find_element_by_xpath(
"/html/body/pre").text
self.pageJson = json.loads(str(self.pageSource))
temp = self.pageJson['due_at']
tempDate = datetime.strptime(temp, '%Y-%m-%dT%H:%M:%SZ')
oldZone = pytz.timezone("Zulu")
newZone = pytz.timezone("US/Eastern")
localTimeS = oldZone.localize(tempDate)
self.dueDate = localTimeS.astimezone(newZone)
self.delta = self.dueDate - datetime.now(timezone.utc)
self.name = self.pageJson['name']
self.courseId = self.pageJson['course_id']
self.points = self.pageJson['points_possible']
self.assignmentId = self.pageJson['assignment_group_id']
def calcDelta(self):
self.delta = self.dueDate - datetime.now(timezone.utc)
return self.delta
def __str__(self):
date = str(self.delta)
temp = date.split(",")
days = ""
hours = ""
minutes = ""
notDays = ""
if(len(temp) == 1):
notDays = temp[0]
hours = notDays[0:notDays.index(":")]
minutes = notDays[notDays.index(":") + 1:notDays.index(":") + 3]
else:
notDays = temp[1]
days = temp[0]
hours = notDays[0:notDays.index(":")]
minutes = notDays[notDays.index(":") + 1:notDays.index(":") + 3]
hours = int(hours)
minutes = int(minutes)
easyReadTimeRemaining = ""
if(days!=""):
easyReadTimeRemaining = days + ", "
if(hours > 0):
easyReadTimeRemaining = easyReadTimeRemaining + str(hours) + " hours, "
if(minutes > 0):
easyReadTimeRemaining = easyReadTimeRemaining + str(minutes) + " minutes"
return "DUE IN: " + easyReadTimeRemaining
def assignmentName(self):
return self.name
def assignmentUrl(self):
return self.url
def assignmentInfo(self):
info = str(self.assignmentId) + ", " + str(self.name) + \
", " + str(self.dueDate) + ", " + str(self.points) + \
", " + str(self.link) + ", " + str(self.courseId)
return info
# PATH = os.path.join(sys.path[0], "chromedriver")
# options = webdriver.ChromeOptions()
# options.add_argument('--lang=en_US')
# options.headless = False
# options.detach = True
# driver = webdriver.Chrome(PATH, options=options)
# # driver.get("http://njit.instructure.com/login/saml")
# # username = driver.find_element_by_name("j_username")
# # Password = driver.find_element_by_name("j_password")
# # username.send_keys("kj323")
# # Password.send_keys("inf@#123UCID+_")
# # username.send_keys(Keys.RETURN)
# # checkbox = driver.find_element_by_id("accept")
# # checkbox.click()
# # accept = driver.find_element_by_id("submitbtn")
# # accept.click()
# # links = ["https://njit.instructure.com/courses/14901/assignments/92908",
# # "https://njit.instructure.com/courses/13721/assignments/88626"]
# # a = assignment(
# # "https://njit.instructure.com/courses/14901/assignments/92908", driver)
# # links = assignmentLinks()
# # lis = assignmentList(links)
# # # l = []
# # # for link in links:
# # # a = assignment(link, driver)
# # l.append(a)
# for assign in lis:
# print(assign)
|
#!/usr/bin/python
import datetime
print(datetime.datetime.now())
|
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404
from django.views import View
from analytics.models import ClickEvent
from .models import fesURL
from .forms import SubmitUrlForm
# Create your views here.
class HomeView(View):
def get(self,request,*args,**kwargs):
the_form=SubmitUrlForm()
context= {
"title": "FES.co",
"form": the_form,
}
return render(request,"shortener/home.html",context) #by default django looks within the apps for template
def post(self,request,*args,**kwargs):
form=SubmitUrlForm(request.POST)
context= {
"title": "FES.co",
"form": form,
}
template="shortener/home.html"
if form.is_valid():
new_url=form.cleaned_data.get("url")
obj,created=fesURL.objects.get_or_create(url=new_url)
context= {
"object":obj,
"created":created,
}
if created:
template="shortener/success.html"
else:
template="shortener/already_exists.html"
return render(request,template,context)
class URLRedirectView(View):
def get(self, request, shortcode=None, *args, **kwargs):
qs = fesURL.objects.filter(shortcode__iexact=shortcode)
if qs.count() != 1 and not qs.exists():
raise Http404
obj = qs.first()
print(ClickEvent.objects.create_event(obj))
return HttpResponseRedirect(obj.url)
'''
def fes_redirect_view(request,shortcode=None,*args,**kwargs):
#print(shortcode)
obj=get_object_or_404(fesURL,shortcode=shortcode)
obj_url=obj.url
# try:
# obj=fesURL.objects.get(shortcode=shortcode)
# except:
# obj=fesURL.objects.all.first() #first item in that query set
# obj_url=None
# qs=fesURL.objects.filter(shortcode__iexact=shortcode.upper())
# if qs.exists() and qs.count()==1:
# obj=qs.first()
# obj_url=obj.url
return HttpResponse("hello {sc}".format(sc=obj_url))
'''
|
# -*- coding: utf-8 -*-
__author__ = 'benywon'
from public_functions import *
cc=load_file('03-04-wikiQA-MAP_0.710809035076_MRR+0.727666480753.pickle')
print cc
print cc
print cc
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class utm_did_numbers(models.Model):
_name = 'utm.did_numbers'
_description = 'utm.did_numbers'
did_number = fields.Char(string='Numero DID')
source_id = fields.Many2one('utm.source', string='Origen')
medium_id = fields.Many2one('utm.medium', string='Medio')
campaign_id = fields.Many2one('utm.campaign', string='Campaña')
company_id = fields.Many2one('res.company', string='Companía')
country_id = fields.Many2one('res.country', string='País')
user_id = fields.Many2one('res.users')
team_id = fields.Many2one('crm.team')
datetime = fields.Datetime(string='Fecha')
|
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from swagger_server.models.body import Body
from swagger_server.models.http_problems import HTTPProblems
from swagger_server.models.inline_response200 import InlineResponse200
from swagger_server.models.trabajo import Trabajo
from swagger_server.models.trabajo_links import TrabajoLinks
from swagger_server.models.trabajo_links_trabajos import TrabajoLinksTrabajos
|
from flask import Blueprint, request, jsonify, Response
from ..controller import Pekerja_pekerjaan
from flask_cors import cross_origin
import json
from ..controller.utils import upload_file
pekerja_pekerjaan_routes = Blueprint('Pekerja_pekerjaan', __name__)
@pekerja_pekerjaan_routes.route("/all", methods=['GET'])
@cross_origin()
def get_all():
pekerja_pekerjaan = Pekerja_pekerjaan.get_all()
return jsonify(pekerja_pekerjaan)
@pekerja_pekerjaan_routes.route("/all/<pekerjaan_id>", methods=['GET'])
@cross_origin()
def get_all_by_pekerjaan_id(pekerjaan_id:int):
pekerja_pekerjaan = Pekerja_pekerjaan.get_all_by_pekerjaan_id(pekerjaan_id)
return jsonify(pekerja_pekerjaan)
@pekerja_pekerjaan_routes.route("/add", methods=['POST'])#add sesuai dengan ID dan id employee yang ada dalam database Pekerja
@cross_origin()
def add():
pekerja_id = request.form.get('pekerja_id')
pekerjaan_id = request.form.get('pekerjaan_id')
position = request.form.get('position')
nama = request.form.get('nama')
type_status = request.form.get('status')
due_date = request.form.get('due_date')
path = request.files['file_path']
path_file = upload_file(path)
pekerja_pekerjaan = Pekerja_pekerjaan.add(nama,pekerja_id,pekerjaan_id,position,type_status,path_file,due_date)
return jsonify(pekerja_pekerjaan)
@pekerja_pekerjaan_routes.route("/update", methods=['PUT'])
@cross_origin()
def update_by_id():
id_pekerjaan = request.json.get('id_pekerjaan')
nama_job = request.json.get('nama_job')
nama_pekerja = request.json.get('id_pekerja')
reviewer = request.json.get('id_reviewer')
document_terkait = request.json.get('document_terkait')
object_job = request.json.get('object_job')
pekerja_pekerjaan = Pekerja_pekerjaan.update_by_id(id_pekerjaan,nama_job,id_pekerja,document_terkait,object_job,id_reviewer)
return jsonify(pekerja)
@pekerja_pekerjaan_routes.route("/delete", methods=['POST'])
@cross_origin()
def delete_by_id():
pekerjaan_id = request.json.get('pekerjaan_id')
pekerja_pekerjaan = Pekerja_pekerjaan.delete_by_id(pekerjaan_id)
return jsonify(pekerja_pekerjaan)
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from paypal.standard.ipn.models import PayPalIPN
from paypal.standard.ipn.signals import valid_ipn_received
from .test_ipn import IPN_POST_PARAMS, IPNUtilsMixin, MockedPostbackMixin
@override_settings(ROOT_URLCONF='paypal.standard.ipn.tests.test_urls')
class AdminTest(MockedPostbackMixin, IPNUtilsMixin, TestCase):
def setUp(self):
super(AdminTest, self).setUp()
User = get_user_model()
user = User.objects.create_superuser(username="admin",
email="admin@example.com",
password="password")
self.user = user
def test_verify_action(self):
PayPalIPN._postback = lambda self: b"Internal Server Error"
self.paypal_post(IPN_POST_PARAMS)
ipn_obj = PayPalIPN.objects.get()
self.assertEqual(ipn_obj.flag, True)
url = reverse('admin:ipn_paypalipn_changelist')
self.assertTrue(self.client.login(username='admin',
password='password'))
response = self.client.get(url)
self.assertContains(response, IPN_POST_PARAMS['txn_id'])
self.got_signal = False
self.signal_obj = None
def handle_signal(sender, **kwargs):
self.got_signal = True
self.signal_obj = sender
valid_ipn_received.connect(handle_signal)
PayPalIPN._postback = lambda self: b"VERIFIED"
response_2 = self.client.post(url,
{'action': 'reverify_flagged',
'_selected_action': [str(ipn_obj.id)]})
response_3 = self.client.get(response_2['Location'])
self.assertContains(response_3,
"1 IPN object(s) re-verified")
ipn_obj = PayPalIPN.objects.get()
self.assertEqual(ipn_obj.flag, False)
self.assertTrue(self.got_signal)
|
def gcd(s,v):
if(v==0):
return s
else:
return gcd(v,s%v)
s1,v1=map(int,input().split())
LCM=(s1*v1)/gcd(s1,v1)
print(int(LCM))
|
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render
from website.models import *
name = "shaligram.prajapat@gmail.com"
def login(request):
if request.method == 'GET':
return render(request, 'pbas/index.html')
if request.method == 'POST':
current_user_object = Userinfo.objects.all().filter(user_id = request.POST['user_id'])
if current_user_object:
if current_user_object[0].pwd == request.POST['pwd']:
#return HttpResponse('login')
return render(request, 'pbas/yearModal.html')
#return HttpResponseRedirect(request, 'pbas/yearModal.html')
def signup_action(request):
if request.POST['userID'] and request.POST['regPass'] and request.POST['confirmPass']:
''' -----fetching the data from the user and saving it in database---- '''
data = Userinfo(user_id= request.POST['userID'], pwd = request.POST['regPass'])
data.save()
return render(request, 'pbas/index.html')
#return render_to_response('fileupload/upload.html', {'form': c['UploadFileForm']}, RequestContext(request))
def home_page(request):
return render(request, 'pbas/home.html')
def myfriends(request):
myfriends_list = Myfriend.objects.all()
context = {'myfriends_list':myfriends_list}
return render(request, 'website/myfriends.html', context)
def myfriends(request):
myfriends_list = Myfriend.objects.all()
context = {'myfriends_list':myfriends_list}
return render(request, 'website/myfriends.html', context)
def myfriends(request):
myfriends_list = Myfriend.objects.all()
context = {'myfriends_list':myfriends_list}
return render(request, 'website/myfriends.html', context)
def myfriends(request):
myfriends_list = Myfriend.objects.all()
context = {'myfriends_list':myfriends_list}
return render(request, 'website/myfriends.html', context)
def myfriends(request):
myfriends_list = Myfriend.objects.all()
context = {'myfriends_list':myfriends_list}
return render(request, 'website/myfriends.html', context)
def myfriends(request):
myfriends_list = Myfriend.objects.all()
context = {'myfriends_list':myfriends_list}
return render(request, 'website/myfriends.html', context)
def myfriends(request):
myfriends_list = Myfriend.objects.all()
context = {'myfriends_list':myfriends_list}
return render(request, 'website/myfriends.html', context)
|
# Generated by Django 2.2.6 on 2020-05-27 15:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0005_auto_20200527_1727'),
]
operations = [
migrations.DeleteModel(
name='Test',
),
]
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import CONF_ID
DEPENDENCIES = ['i2c']
CONF_I2C_ADDR = 0x01
empty_i2c_component_ns = cg.esphome_ns.namespace('empty_i2c_component')
EmptyI2CComponent = empty_i2c_component_ns.class_('EmptyI2CComponent', cg.Component, i2c.I2CDevice)
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(EmptyI2CComponent)
}).extend(cv.COMPONENT_SCHEMA).extend(i2c.i2c_device_schema(CONF_I2C_ADDR))
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield i2c.register_i2c_device(var, config)
|
# Generated by Django 2.2.7 on 2019-11-17 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tssite', '0004_auto_20191117_1714'),
]
operations = [
migrations.AlterField(
model_name='teacher',
name='mname',
field=models.CharField(default=None, max_length=10),
),
]
|
for i in range(1000000000, 1000000000000000001):
for j in range(1, )
|
# Generated by Django 2.1.7 on 2019-03-28 10:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_images'),
]
operations = [
migrations.AddField(
model_name='post',
name='image_2',
field=models.ImageField(blank=True, upload_to='post_pics'),
),
migrations.AddField(
model_name='post',
name='image_3',
field=models.ImageField(blank=True, upload_to='post_pics'),
),
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, upload_to='post_pics'),
),
]
|
from lib import *
def make_datapath_list(rootpath):
image_path_template = os.path.join(rootpath, 'JPEGImages', '%s.jpg')
annotation_path_template = os.path.join(rootpath, 'Annotations', '%s.xml')
train_id_names = os.path.join(rootpath, 'ImageSets/Main/train.txt')
val_id_names = os.path.join(rootpath, 'ImageSets/Main/val.txt')
train_img_list = list()
train_annotation_list = list()
for line in open(train_id_names):
file_id = line.strip() #xóa ký tự xuống dòng, xóa space
img_path = (image_path_template % file_id) #đưa từng file_id vào %s trên template
anno_path = (annotation_path_template % file_id) #đưa từng file_id vào %s trên template
train_img_list.append(img_path)
train_annotation_list.append(anno_path)
val_img_list = list()
val_annotation_list = list()
for line in open(val_id_names):
file_id = line.strip() #xóa ký tự xuống dòng, xóa space
img_path = (image_path_template % file_id) #đưa từng file_id vào %s trên template
anno_path = (annotation_path_template % file_id) #đưa từng file_id vào %s trên template
val_img_list.append(img_path)
val_annotation_list.append(anno_path)
return train_img_list, train_annotation_list, val_img_list, val_annotation_list
if __name__ == '__main__':
rootpath = './data/VOCdevkit/VOC2012/'
train_img_list, train_annotation_list, val_img_list, val_annotation_list = make_datapath_list(rootpath)
print(len(train_img_list))
print(train_img_list[0])
|
import logging
import importlib
import argparse
import inspect
import pkgutil
import sys
import colorama
from sv2.helpers import get_public_class, get_public_members
dev_log = logging.getLogger("dev")
fh = logging.FileHandler("/tmp/sv2.log")
fh.setLevel(logging.DEBUG)
dev_log.addHandler(fh)
user_log = logging.getLogger("user")
user_log.setLevel(logging.WARNING)
class Issue:
def __init__(self, msg, long_msg):
self.msg = msg
self.long_msg = long_msg
class WontRun:
def __init__(self, reason, name):
self.reason = reason
self.name = name
class Report:
def __init__(self, name):
self.name = name
self.issues = []
self.reasons = []
self.ex = None
def new_issue(self, msg, long_msg=""):
self.issues.append(Issue(msg, long_msg))
def wont_run(self, reason):
caller = inspect.stack()[1][3]
if caller == "makes_sense":
name = self.name
else:
name = "{}.{}".format(self.name, caller)
self.reasons.append(WontRun(reason, name))
def exception(self, ex):
self.ex = ex
class ReportManager:
# TODO: Maybe be should add the possibility to set a level of priority
def __init__(self, verbose):
self._reports = []
self._verbose = verbose
self._exceptions = False
def add_report(self, r):
self._reports.append(r)
def _print_reasons(self, r):
print(colorama.Fore.BLUE, end="")
for i in r.reasons:
print("\t\"{}\" check did not run ({})".format(i.name, i.reason))
def print(self):
ret_val = 0
counter = 0
colorama.init()
for r in self._reports:
not_executed = len(r.reasons) == 1 and r.reasons[0].name == r.name
if not self._verbose and not_executed:
counter += 1
continue
something_to_print = r.ex or len(r.reasons) > 0 or len(r.issues) > 0
if len(r.issues) > 0:
ret_val = 1
if something_to_print or self._verbose:
print(colorama.Fore.WHITE, end="")
print("Reports for \"{}\":".format(r.name))
if not something_to_print and self._verbose:
print(colorama.Fore.GREEN, end='')
print("\t\"{}\" NO issues found".format(r.name))
elif r.ex:
self._exceptions = True
print(colorama.Fore.RED, end='')
print("\t\"{}\" returned exception: {}".format(r.name, r.ex))
elif not_executed:
self._print_reasons(r)
else:
if self._verbose:
self._print_reasons(r)
if not self._verbose:
counter += len(r.reasons)
print(colorama.Fore.YELLOW, end="")
for i in r.issues:
print("\t{}".format(i.msg))
if (self._verbose or something_to_print) and r != self._reports[-1]:
print("")
print(colorama.Style.RESET_ALL, end="")
if self._exceptions:
ret_val = 2
print(
"Exceptions ocurred, check log file on /tmp/sv2.log for more information")
if counter > 0:
print(
"\n{} checkers couldn't run, use --verbose to see more information".format(counter))
return ret_val
def setup_args():
parser = argparse.ArgumentParser()
g = parser.add_mutually_exclusive_group()
g.add_argument('--list-checkers', action="store_true",
help='List available checkers')
g.add_argument('--list-all-checkers', action="store_true",
help='List all available checkers')
parser.add_argument('--force', action='store_true',
help="Force the execution of checks (for debugging purpose only)")
parser.add_argument('--select', nargs='+',
help='Select checkers to be run')
parser.add_argument('--exclude', nargs='+',
help='Exclude the given checkers')
parser.add_argument('--verbose', action='store_true',
help="Tell which checkers had no issues and which ones won't run")
return parser
def get_available_checkers():
m = importlib.import_module("sv2_checkers")
return [s[1] for s in pkgutil.walk_packages(m.__path__)]
def import_checker(name):
return importlib.import_module("sv2_checkers." + name)
def import_checkers(l):
return [import_checker(m) for m in l]
def retrieve_checker_methods(module):
methods_list = []
for c in get_public_class(module):
for member in get_public_members(getattr(module, c)):
methods_list.append(member)
return methods_list
def list_checkers(l):
print("LIST OF AVAILABLE CHECKERS")
for module in import_checkers(l):
name = module.__name__.split(".")[1]
summary = module.summary
print(colorama.Fore.WHITE, end='')
print("\t{}: ".format(name), end='')
print(colorama.Fore.BLUE, end='')
print(summary)
print(colorama.Style.RESET_ALL, end="")
def list_all_checkers(l):
print("LIST OF ALL AVAILABLE CHECKERS")
for module in import_checkers(l):
summary = module.summary
name = module.__name__.split(".")[1]
print(colorama.Fore.WHITE, end="")
print("\tList of {} checks".format(name))
print(colorama.Fore.BLUE, end="")
for member in retrieve_checker_methods(module):
print("\t\t", member)
print(colorama.Style.RESET_ALL, end="")
def run_checkers(checkers, r_manager, opts, force):
for c in checkers:
name = c.__name__.split(".")[-1]
r = Report(name)
try:
if force or c.makes_sense(r):
c.run(r, opts[name])
except Exception as ex:
dev_log.exception(ex)
r.exception(ex)
r_manager.add_report(r)
return r_manager
def initialize_checkers_options(checkers):
checkers_options = {}
for i in checkers:
checkers_options[i] = {"exclude_list": [], "select_list": []}
return checkers_options
def main():
parser = setup_args()
args = parser.parse_args()
checkers = get_available_checkers()
checkers_options = initialize_checkers_options(checkers)
if args.exclude:
for i in args.exclude:
if "." in i:
name, check = i.split(".")
checkers_options[name]["exclude_list"].append(check)
else:
checkers.remove(i)
if args.select:
checkers = []
# TODO: Check that provided checkers are valid
for i in args.select:
if "." in i:
name, check = i.split(".")
checkers_options[name]["select_list"].append(check)
if name not in checkers:
checkers.append(name)
else:
checkers.append(i)
if args.list_checkers:
list_checkers(checkers)
return 0
elif args.list_all_checkers:
list_all_checkers(checkers)
return 0
else:
repots = ReportManager(args.verbose)
checkers_modules = import_checkers(checkers)
run_checkers(checkers_modules, repots, checkers_options, args.force)
return repots.print()
if __name__ == "__main__":
sys.exit(main())
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import pandas as pd
import datetime
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
df = pd.read_csv("assets/data.csv")
# 日付
dates = []
for _date in df["Date"]:
date = datetime.datetime.strptime(_date,"%Y-%m-%d %H:%M:%S").date()
dates.append(date)
# オープン価格
open_price = df["Open"].values
# クローズ価格
close_price =df["Close"].values
# 予測価格
predict_price =df["Predict"].values
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H2(children="Python学習~株価の反映~",
style={
"textAlign": "center",
"color": "blue"
}),
html.Div(children=[
dcc.Graph(
id="open_graph",
figure={
"data":[
go.Scatter(
x=dates,
y=open_price,
mode="lines",
name="オープン価格",
opacity=0.7,
yaxis="y1"
),
go.Scatter(
x=dates,
y=close_price,
mode="lines",
name="クローズ価格",
opacity=0.7,
yaxis="y1"
),
go.Scatter(
x=dates,
y=predict_price,
mode="lines",
name="予測価格",
opacity=0.7,
yaxis="y1"
)
]
}
)
])
])
if __name__ == '__main__':
app.run_server(debug=True)
|
from django.shortcuts import render
from django.shortcuts import HttpResponse
from math import factorial
def index(request):
return HttpResponse("<h1>welcome to views of dp5app</h1>")
def home(request):
return render(request,"dp5app/main.html",{'pname':"abhilash"})
def fact(request,n):
n=int(n)
return HttpResponse("<h1>factorial of value {} is:{}</h1>".format(n,factorial(n)))
def child(request):
return render(request,"child.html")
|
from flask_testing import TestCase
from .context import slopespot
import os
from slopespot.app import app, db
from slopespot.model import Mountain
class TestMountain(TestCase):
def create_app(self):
return app
def setUp(self):
self.db = db
self.db.create_all()
def tearDown(self):
self.db.session.remove()
self.db.session.commit()
self.db.drop_all()
def test_adding_mountain(self):
m = Mountain(name='Whiteface')
self.db.session.add(m)
self.db.session.commit()
self.assertEqual(1, len(m.query.all()))
|
# -*- coding: utf-8 -*-
"""
@author: melkarmo
"""
S = [1,2,5,10,20,50,100,200,500,1000,2000,5000,10000] # liste du stock de pièces
# la fonction suivante est une fonction qui donne le minimum de a et b
# en autorisant a ou b à être infinis
def mini(a,b):
if a == "infini" :
return b
elif b == "infini" :
return a
else :
return min(a, b)
def monnaie(S,M):
mat = [ [0 for j in range(M+1)] for k in range(len(S)+1)] # matrice des solutions optimales
memoire = [ [0 for j in range(M+1)] for k in range(len(S)+1)] # matrice de mémorisation
t = [0 for j in range (len(S))] # liste-solution
# --- double-iteration ---
for i in range(0, len(S)+1):
for m in range(0, M+1):
if m == 0:
mat[i][m] = 0
elif i == 0:
mat[i][m] = "infini"
else:
ajout = 0
if (m - S[i-1]) >= 0 :
ajout = 1 + mat[i][(m - S[i-1])]
else :
ajout = "infini"
declin = 0
if i >= 1 :
declin = mat[i-1][m]
else :
declin = "infini"
mat[i][m] = mini(ajout, declin)
# --- memorisation ---
if mat[i][m] != "infini":
if mat[i][m] == ajout :
memoire[i][m] = "ajout"
elif mat[i][m] == declin :
memoire[i][m] = "declin"
# --- restitution ---
x, y= M, len(S)
while x > 0 and y > 0:
if memoire[y][x] == "ajout":
t[y-1] += 1
x = x - S[y-1]
elif memoire[y][x] == "declin":
y = y -1
return mat[len(S)][M], t
# --- tests ---
print(monnaie(S,389))
|
import json
from country_codes import get_country_code
import pygal.maps.world
from pygal.style import RotateStyle as RS
from pygal.style import LightColorizedStyle as LCS
#将数据加载到一个列表中
filename='population.json'
with open(filename) as file:
pop_data=json.load(file)
#打印每个国家2016年的人口数量
cc_populations={}
for pop_dict in pop_data:
if pop_dict['Year'] == 2016:
country = pop_dict['Country Name']
population = int(float(pop_dict['Value']))
code=get_country_code(country)
if code:
cc_populations[code]=population
#根据人口数量将所有的国家分成三组
cc_pops_1,cc_pops_2,cc_pops_3={},{},{}
for cc,pop in cc_populations.items():
if pop<10000000:
cc_pops_1[cc]=pop
elif pop<1000000000:
cc_pops_2[cc]=pop
else:
cc_pops_3[cc]=pop
wm_style=RS("#336699",base_style=LCS)
wm=pygal.maps.world.World(style=wm_style)
wm.title='World Population in 2016,Bu Country'
wm.add('0-10m',cc_pops_1)
wm.add("10m-1bn",cc_pops_2)
wm.add(">1bn",cc_pops_3)
wm.render_to_file('world_population.svg')
|
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from .lookahead import *
from .symbol import *
from .structures import *
from .parsing import *
from .parsing_structure import *
from .interface import *
from .compact_all import *
from . import utils
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
import os
from revolver import directory as dir
from revolver import core
def repository_name():
command = "grep 'url' .git/config | cut -d':' -f2"
name = core.local(command, capture=True)
# Get the basename (e.g. github/repo => repo)
name = name.split("/")[-1]
# Also strip the sometimes optional .git extension
if name.endswith(".git"):
name = name[:-4]
return name
def create_archive(revision):
tmp_folder = dir.temp_local()
tmp_tar = os.path.join(tmp_folder, 'repo.tar.gz')
core.local(
'git archive --format=tar %(rev)s | gzip > %(tar)s'
% {'rev': revision, 'tar': tmp_tar}
)
return tmp_tar
def revparse(revision):
return core.local('git rev-parse %s' % revision, capture=True)
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Endpoint interfaces for working with streams.
The endpoint interfaces in this module provide endpoint interfaces suitable for
connecting streams to USB endpoints.
"""
from amaranth import *
from usb_protocol.types import USBDirection
from ...stream import SuperSpeedStreamInterface
from ..protocol.endpoint import SuperSpeedEndpointInterface
class SuperSpeedStreamInEndpoint(Elaboratable):
""" Endpoint interface that transmits a simple data stream to a host.
This interface is suitable for a single bulk or interrupt endpoint.
This endpoint interface will automatically generate ZLPs when a stream packet would end without
a short data packet. If the stream's ``last`` signal is tied to zero, then a continuous stream of
maximum-length-packets will be sent with no inserted ZLPs.
This implementation is double buffered; and can store a single packet's worth of data while transmitting
a second packet. Bursting is currently not supported.
Attributes
----------
stream: SuperSpeedStreamInterface, input stream
Full-featured stream interface that carries the data we'll transmit to the host.
interface: SuperSpeedEndpointInterface
Communications link to our USB device.
Parameters
----------
endpoint_number: int
The endpoint number (not address) this endpoint should respond to.
max_packet_size: int
The maximum packet size for this endpoint. Should match the wMaxPacketSize provided in the
USB endpoint descriptor.
"""
SEQUENCE_NUMBER_BITS = 5
def __init__(self, *, endpoint_number, max_packet_size=1024):
self._endpoint_number = endpoint_number
self._max_packet_size = max_packet_size
#
# I/O port
#
self.stream = SuperSpeedStreamInterface()
self.interface = SuperSpeedEndpointInterface()
def elaborate(self, platform):
m = Module()
interface = self.interface
handshakes_in = interface.handshakes_in
handshakes_out = interface.handshakes_out
# Parameters for later use.
data_width = len(self.stream.data)
bytes_per_word = data_width // 8
buffer_depth = self._max_packet_size // bytes_per_word
#
# Transciever sequencing.
#
# Keep track of the sequence number used as we're transmitting.
sequence_number = Signal(self.SEQUENCE_NUMBER_BITS)
# Create a signal equal to the next sequence number; for easy comparisons.
next_sequence_number = Signal.like(sequence_number)
m.d.comb += next_sequence_number.eq(sequence_number + 1)
# Advance the sequence number after transmission, or reset it when the endpoint is reset.
advance_sequence = Signal()
with m.If(interface.ep_reset):
m.d.ss += sequence_number.eq(0)
with m.Elif(advance_sequence):
m.d.ss += sequence_number.eq(next_sequence_number)
#
# Transmit buffer.
#
# Our USB connection imposed a few requirements on our stream:
# 1) we must be able to transmit packets at a full rate; i.e. ```valid``
# must be asserted from the start to the end of our transfer; and
# 2) we must be able to re-transmit data if a given packet is not ACK'd.
#
# Accordingly, we'll buffer a full USB packet of data, and then transmit
# it once either a) our buffer is full, or 2) the transfer ends (last=1).
#
# This implementation is double buffered; so a buffer fill can be pipelined
# with a transmit.
#
ping_pong_toggle = Signal()
# We'll create two buffers; so we can fill one as we empty the other.
# Since each buffer will be used for every other transaction, we'll use a simple flag to identify
# which of our "ping-pong" buffers is currently being targeted.
buffer = Array(Memory(width=data_width, depth=buffer_depth, name=f"transmit_buffer_{i}") for i in range(2))
buffer_write_ports = Array(buffer[i].write_port(domain="ss") for i in range(2))
buffer_read_ports = Array(buffer[i].read_port(domain="ss", transparent=False) for i in range(2))
m.submodules.read_port_0, m.submodules.read_port_1 = buffer_read_ports
m.submodules.write_port_0, m.submodules.write_port_1 = buffer_write_ports
# Create values equivalent to the buffer numbers for our read and write buffer; which switch
# whenever we swap our two buffers.
write_buffer_number = ping_pong_toggle
read_buffer_number = ~ping_pong_toggle
# Create a shorthand that refers to the buffer to be filled; and the buffer to send from.
# We'll call these the Read and Write buffers.
buffer_write = buffer_write_ports[write_buffer_number]
buffer_read = buffer_read_ports[read_buffer_number]
# Buffer state tracking:
# - Our ``fill_count`` keeps track of how much data is stored in a given buffer.
# - Our ``stream_ended`` bit keeps track of whether the stream ended while filling up
# the given buffer. This indicates that the buffer cannot be filled further; and, when
# ``generate_zlps`` is enabled, is used to determine if the given buffer should end in
# a short packet; which determines whether ZLPs are emitted.
buffer_fill_count = Array(Signal(range(0, self._max_packet_size + 1)) for _ in range(2))
buffer_stream_ended = Array(Signal(name=f"stream_ended_in_buffer{i}") for i in range(2))
# Create shortcuts to active fill_count / stream_ended signals for the buffer being written.
write_fill_count = buffer_fill_count[write_buffer_number]
write_stream_ended = buffer_stream_ended[write_buffer_number]
# Create shortcuts to the fill_count / stream_ended signals for the packet being sent.
read_fill_count = buffer_fill_count[read_buffer_number]
read_stream_ended = buffer_stream_ended[read_buffer_number]
# Keep track of our current send position; which determines where we are in the packet.
send_position = Signal(range(0, self._max_packet_size + 1))
# Shortcut names.
in_stream = self.stream
out_stream = self.interface.tx
# We're ready to receive data iff we have space in the buffer we're currently filling.
m.d.comb += [
in_stream.ready.eq((write_fill_count + 4 <= self._max_packet_size) & ~write_stream_ended),
buffer_write.en.eq(in_stream.valid.any() & in_stream.ready),
]
# Increment our fill count whenever we accept new data;
# based on the number of valid bits we have.
with m.If(buffer_write.en):
with m.Switch(in_stream.valid):
with m.Case(0b0001):
m.d.ss += write_fill_count.eq(write_fill_count + 1)
with m.Case(0b0011):
m.d.ss += write_fill_count.eq(write_fill_count + 2)
with m.Case(0b0111):
m.d.ss += write_fill_count.eq(write_fill_count + 3)
with m.Case(0b1111):
m.d.ss += write_fill_count.eq(write_fill_count + 4)
# If the stream ends while we're adding data to the buffer, mark this as an ended stream.
with m.If(in_stream.last & buffer_write.en):
m.d.ss += write_stream_ended.eq(1)
# Use our memory's two ports to capture data from our transfer stream; and two to emit packets
# into our packet stream. Since we'll never receive to anywhere else, or transmit to anywhere else,
# we can just unconditionally connect these.
m.d.comb += [
# We'll only ever -write- data from our input stream...
buffer_write_ports[0].data .eq(in_stream.payload),
buffer_write_ports[0].addr .eq(write_fill_count >> 2),
buffer_write_ports[1].data .eq(in_stream.payload),
buffer_write_ports[1].addr .eq(write_fill_count >> 2),
# ... and we'll only ever -send- data from the Read buffer; in the SEND_PACKET state.
buffer_read.addr .eq(send_position),
]
#
# Transmit controller.
#
# Stores whether the last packet transmitted was a ZLP. This bit of state determines how
# retranmission behaves.
last_packet_was_zlp = Signal()
# Stores whether we'll need to send an ERDY packet before we send any additional data.
# If we send an NRDY packet indicating that we have no data for the host, the host will
# stop polling this endpoint until an ERDY packet is sent [USB3.2r1: 8.10.1]. We'll need
# to send an ERDY packet to have it resume polling.
erdy_required = Signal()
# Shortcut for when we need to deal with an in token.
# Note that, for USB3, an IN token is an ACK that contains a non-zero ``number_of_packets``.
is_to_us = (handshakes_in.endpoint_number == self._endpoint_number)
is_in_token = (handshakes_in.number_of_packets != 0)
ack_received = handshakes_in.ack_received & is_to_us
in_token_received = ack_received & is_in_token
with m.FSM(domain='ss'):
# WAIT_FOR_DATA -- We don't yet have a full packet to transmit, so we'll capture data
# to fill the our buffer. At full throughput, this state will never be reached after
# the initial post-reset fill.
with m.State("WAIT_FOR_DATA"):
# We can't yet send data; so we'll send an NRDY transaction packet.
with m.If(in_token_received):
m.d.comb += handshakes_out.send_nrdy .eq(1)
m.d.ss += erdy_required .eq(1)
# If we have valid data that will end our packet, we're no longer waiting for data.
# We'll now wait for the host to request data from us.
packet_complete = (write_fill_count + 4 >= self._max_packet_size)
will_end_packet = packet_complete | in_stream.last
with m.If(in_stream.valid & will_end_packet):
# If we've just finished a packet, we now have data we can send!
with m.If(packet_complete | in_stream.last):
m.d.ss += [
# We're now ready to take the data we've captured and _transmit_ it.
# We'll swap our read and write buffers.
ping_pong_toggle.eq(~ping_pong_toggle),
# Mark our current stream as no longer having ended.
read_stream_ended .eq(0)
]
# If we've already sent an NRDY token, we'll need to request an IN token
# before the host will be willing to send us one.
with m.If(erdy_required | in_token_received):
m.next = "REQUEST_IN_TOKEN"
# Otherwise, we can wait for an IN token directly.
with m.Else():
m.next = "WAIT_TO_SEND"
# REQUEST_IN_TOKEN -- we now have at least a buffer full of data to send; but
# we've sent a NRDY token to the host; and thus the host is no longer polling for data.
# We'll send an ERDY token to the host, in order to request it poll us again.
with m.State("REQUEST_IN_TOKEN"):
# Send our ERDY token...
m.d.comb += handshakes_out.send_erdy.eq(1)
# ... and once that send is complete, move on to waiting for an IN token.
with m.If(handshakes_out.done):
m.next = "WAIT_TO_SEND"
# WAIT_TO_SEND -- we now have at least a buffer full of data to send; we'll
# need to wait for an IN token to send it.
with m.State("WAIT_TO_SEND"):
# Once we get an IN token, move to sending a packet.
with m.If(in_token_received):
# If we have a packet to send, send it.
with m.If(read_fill_count):
m.next = "SEND_PACKET"
m.d.ss += [
last_packet_was_zlp .eq(0)
]
# Otherwise, we entered a transmit path without any data in the buffer.
with m.Else():
# ... send a ZLP...
m.d.comb += interface.tx_zlp.eq(1)
# ... and clear the need to follow up with one, since we've just sent a short packet.
m.d.ss += [
read_stream_ended .eq(0),
last_packet_was_zlp .eq(1)
]
# We've now completed a packet send; so wait for it to be acknowledged.
m.next = "WAIT_FOR_ACK"
# SEND_PACKET -- we now have enough data to send _and_ have received an IN token.
# We can now send our data over to the host.
with m.State("SEND_PACKET"):
m.d.comb += [
# Apply our general transfer information.
interface.tx_direction .eq(USBDirection.IN),
interface.tx_sequence_number .eq(sequence_number),
interface.tx_length .eq(read_fill_count),
interface.tx_endpoint_number .eq(self._endpoint_number),
]
with m.If(~out_stream.valid.any() | out_stream.ready):
# Once we emitted a word of data for our receiver, move to the next word in our packet.
m.d.ss += send_position .eq(send_position + 1)
m.d.comb += buffer_read.addr .eq(send_position + 1)
# We're on our last word whenever the next word would be contain the end of our data.
first_word = (send_position == 0)
last_word = ((send_position + 1) << 2 >= read_fill_count)
m.d.ss += [
# Block RAM often has a large clock-to-dout delay; register the output to
# improve timings.
out_stream.payload .eq(buffer_read.data),
# Let our transmitter know the packet boundaries.
out_stream.first .eq(first_word),
out_stream.last .eq(last_word),
]
# Figure out which bytes of our stream are valid. Normally; this is all of them,
# but the last word is a special case, which we'll have to handle based on how
# many bytes we expect to be valid in the word.
with m.If(last_word):
# We can figure out how many bytes are valid by looking at the last two bits of our
# count; which happen to be the mod-4 remainder.
with m.Switch(read_fill_count[0:2]):
# If we're evenly divisible by four, all four bytes are valid.
with m.Case(0):
m.d.ss += out_stream.valid.eq(0b1111)
# Otherwise, our remainder tells os how many bytes are valid.
with m.Case(1):
m.d.ss += out_stream.valid.eq(0b0001)
with m.Case(2):
m.d.ss += out_stream.valid.eq(0b0011)
with m.Case(3):
m.d.ss += out_stream.valid.eq(0b0111)
# For every word that's not the last one, we know that all bytes are valid.
with m.Else():
m.d.ss += out_stream.valid.eq(0b1111)
# If we've just sent our last word, we're now ready to wait for a response
# from our host.
with m.If(last_word):
m.next = 'WAIT_FOR_ACK'
# WAIT_FOR_ACK -- We've just sent a packet; but don't know if the host has
# received it correctly. We'll wait to see if the host ACKs.
with m.State("WAIT_FOR_ACK"):
# We're done transmitting data.
m.d.ss += out_stream.valid.eq(0)
# Reset our send-position for the next data packet.
m.d.ss += send_position .eq(0)
m.d.comb += buffer_read.addr.eq(0)
# In USB3, an ACK handshake can act as an ACK, an error indicator, and/or an IN token.
# This helps to maximize bus bandwidth, but means we have to handle each case carefully.
with m.If(ack_received):
# Figure out how the sequence advertisement in our ACK relates to our current sequence number.
sequence_advancing = (handshakes_in.next_sequence == next_sequence_number)
# Our simplest case is actually when an error occurs, which is indicated by receiving
# an ACK packet with Retry set to `1`. For now, we'll also treat a repeated sequence number
# as an indication that we need to re-try the given packet.
with m.If(handshakes_in.retry_required | ~sequence_advancing):
# In this case, we'll re-transmit the relevant data, either by sending another ZLP...
with m.If(last_packet_was_zlp):
m.d.comb += [
interface.tx_zlp.eq(1),
advance_sequence.eq(1),
]
# ... or by moving right back into sending a data packet.
with m.Else():
m.next = 'SEND_PACKET'
# Otherwise, if our ACK contains the next sequence number, then this is an acknowledgement
# of the previous packet [USB3.2r1: 8.12.1.2].
with m.Else():
# We no longer need to keep the data that's been acknowledged; clear it.
m.d.ss += read_fill_count.eq(0)
# Figure out if we'll need to follow up with a ZLP. If we have ZLP generation enabled,
# we'll make sure we end on a short packet. If this is max-packet-size packet _and_ our
# transfer ended with this packet; we'll need to inject a ZLP.
follow_up_with_zlp = \
(read_fill_count == self._max_packet_size) & read_stream_ended
# If we're following up with a ZLP, we have two cases, depending on whether this ACK
# is also requesting another packet.
with m.If(follow_up_with_zlp):
# If we are requesting another packet immediately, we can said ZLP our immediately,
# and then continue waiting for the next ACK.
with m.If(is_in_token):
# ... send a ZLP...
m.d.comb += [
interface.tx_zlp.eq(1),
advance_sequence.eq(1),
]
# ... and clear the need to follow up with one, since we've just sent a short packet.
m.d.ss += [
read_stream_ended .eq(0),
last_packet_was_zlp .eq(1)
]
# Otherwise, we'll wait for an attempt to send data before we generate a ZLP.
with m.Else():
m.next = "WAIT_TO_SEND"
# Otherwise, there's a possibility we already have a packet-worth of data waiting
# for us in our "write buffer", which we've been filling in the background.
# If this is the case, we'll flip which buffer we're working with, and then
# ready ourselves for transmit.
packet_completing = in_stream.valid & (write_fill_count + 4 >= self._max_packet_size)
with m.Elif(~in_stream.ready | packet_completing):
m.d.comb += [
advance_sequence .eq(1),
]
m.d.ss += [
ping_pong_toggle .eq(~ping_pong_toggle),
read_stream_ended .eq(0),
]
with m.If(is_in_token):
m.d.ss += [
last_packet_was_zlp .eq(0)
]
m.next = "SEND_PACKET"
with m.Else():
m.next = "WAIT_TO_SEND"
# If neither of the above conditions are true; we now don't have enough data to send.
# We'll wait for enough data to transmit.
with m.Else():
m.next = "WAIT_FOR_DATA"
return m
|
from .server import Hardwire
from .signal import Signal
|
from typing import Iterable, Sequence
from ai.backend.client.output.fields import keypair_resource_policy_fields
from ai.backend.client.output.types import FieldSpec
from .base import api_function, BaseFunction
from ..session import api_session
__all__ = (
'KeypairResourcePolicy'
)
_default_list_fields = (
keypair_resource_policy_fields['name'],
keypair_resource_policy_fields['created_at'],
keypair_resource_policy_fields['total_resource_slots'],
keypair_resource_policy_fields['max_concurrent_sessions'],
keypair_resource_policy_fields['max_vfolder_count'],
keypair_resource_policy_fields['max_vfolder_size'],
keypair_resource_policy_fields['idle_timeout'],
keypair_resource_policy_fields['max_containers_per_session'],
keypair_resource_policy_fields['allowed_vfolder_hosts'],
)
_default_detail_fields = (
keypair_resource_policy_fields['name'],
keypair_resource_policy_fields['created_at'],
keypair_resource_policy_fields['total_resource_slots'],
keypair_resource_policy_fields['max_concurrent_sessions'],
keypair_resource_policy_fields['max_vfolder_count'],
keypair_resource_policy_fields['max_vfolder_size'],
keypair_resource_policy_fields['idle_timeout'],
keypair_resource_policy_fields['max_containers_per_session'],
keypair_resource_policy_fields['allowed_vfolder_hosts'],
)
class KeypairResourcePolicy(BaseFunction):
"""
Provides interactions with keypair resource policy.
"""
def __init__(self, access_key: str):
self.access_key = access_key
@api_function
@classmethod
async def create(cls, name: str,
default_for_unspecified: int,
total_resource_slots: int,
max_concurrent_sessions: int,
max_containers_per_session: int,
max_vfolder_count: int,
max_vfolder_size: int,
idle_timeout: int,
allowed_vfolder_hosts: Sequence[str],
fields: Iterable[str] = None) -> dict:
"""
Creates a new keypair resource policy with the given options.
You need an admin privilege for this operation.
"""
if fields is None:
fields = ('name',)
q = 'mutation($name: String!, $input: CreateKeyPairResourcePolicyInput!) {' \
+ \
' create_keypair_resource_policy(name: $name, props: $input) {' \
' ok msg resource_policy { $fields }' \
' }' \
'}'
q = q.replace('$fields', ' '.join(fields))
variables = {
'name': name,
'input': {
'default_for_unspecified': default_for_unspecified,
'total_resource_slots': total_resource_slots,
'max_concurrent_sessions': max_concurrent_sessions,
'max_containers_per_session': max_containers_per_session,
'max_vfolder_count': max_vfolder_count,
'max_vfolder_size': max_vfolder_size,
'idle_timeout': idle_timeout,
'allowed_vfolder_hosts': allowed_vfolder_hosts,
},
}
data = await api_session.get().Admin._query(q, variables)
return data['create_keypair_resource_policy']
@api_function
@classmethod
async def update(cls, name: str,
default_for_unspecified: int,
total_resource_slots: int,
max_concurrent_sessions: int,
max_containers_per_session: int,
max_vfolder_count: int,
max_vfolder_size: int,
idle_timeout: int,
allowed_vfolder_hosts: Sequence[str]) -> dict:
"""
Updates an existing keypair resource policy with the given options.
You need an admin privilege for this operation.
"""
q = 'mutation($name: String!, $input: ModifyKeyPairResourcePolicyInput!) {' \
+ \
' modify_keypair_resource_policy(name: $name, props: $input) {' \
' ok msg' \
' }' \
'}'
variables = {
'name': name,
'input': {
'default_for_unspecified': default_for_unspecified,
'total_resource_slots': total_resource_slots,
'max_concurrent_sessions': max_concurrent_sessions,
'max_containers_per_session': max_containers_per_session,
'max_vfolder_count': max_vfolder_count,
'max_vfolder_size': max_vfolder_size,
'idle_timeout': idle_timeout,
'allowed_vfolder_hosts': allowed_vfolder_hosts,
},
}
data = await api_session.get().Admin._query(q, variables)
return data['modify_keypair_resource_policy']
@api_function
@classmethod
async def delete(cls, name: str) -> dict:
"""
Deletes an existing keypair resource policy with given name.
You need an admin privilege for this operation.
"""
q = 'mutation($name: String!) {' \
+ \
' delete_keypair_resource_policy(name: $name) {' \
' ok msg' \
' }' \
'}'
variables = {
'name': name,
}
data = await api_session.get().Admin._query(q, variables)
return data['delete_keypair_resource_policy']
@api_function
@classmethod
async def list(
cls,
fields: Sequence[FieldSpec] = _default_list_fields,
) -> Sequence[dict]:
'''
Lists the keypair resource policies.
You need an admin privilege for this operation.
'''
q = 'query {' \
' keypair_resource_policies {' \
' $fields' \
' }' \
'}'
q = q.replace('$fields', ' '.join(f.field_ref for f in fields))
data = await api_session.get().Admin._query(q)
return data['keypair_resource_policies']
@api_function
async def info(
self,
name: str,
fields: Sequence[FieldSpec] = _default_detail_fields,
) -> dict:
"""
Returns the resource policy's information.
:param fields: Additional per-agent query fields to fetch.
.. versionadded:: 19.03
"""
q = 'query($name: String) {' \
' keypair_resource_policy(name: $name) {' \
' $fields' \
' }' \
'}'
q = q.replace('$fields', ' '.join(f.field_ref for f in fields))
variables = {
'name': name,
}
data = await api_session.get().Admin._query(q, variables)
return data['keypair_resource_policy']
|
#!/usr/bin/python
#
-*- coding: utf-8 -*-
import sys, re, hashlib
hashes = raw_input('\nPlease specify hash value: ')
wordlist = raw_input('\nPlease specify wordlist path: ')
words = open(wordlist, "r")
words = words.readlines()
print "\n",len(words),"words"
for word in words:
hashed = hashlib.md5(word[:-1])
value = hashed.hexdigest()
if hashes == value :
print "Password is:"+word,"\n"
sys.exit(0)
elif:
print "Sorry....Hash not found"\n"
sys.exit(0)
sys.exit(0)
|
# -*- coding: utf-8 -*-
import urlparse
from django.shortcuts import render, redirect, get_object_or_404
from django import forms
from shootr.core.models import Bundle, Screenshot
from shootr.core.utils import make_screenshot
class ScreenshotForm(forms.Form):
urls = forms.CharField(widget=forms.Textarea(attrs={'class': 'span12'}), label=u'Имена сайтов', )
def clean_urls(self):
urls = [
self.clean_single_url(url) for url in \
self.cleaned_data['urls'].strip().split()
]
return urls
def clean_single_url(self, url):
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if scheme != 'http':
scheme = 'http'
if not netloc:
netloc, path = path, ''
result = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return result
def save(self):
bundle = Bundle.objects.create()
for url in self.cleaned_data['urls']:
shot = Screenshot.objects.create(bundle=bundle, url=url)
make_screenshot(shot)
return bundle
def index(request):
form = ScreenshotForm(request.POST or None)
if form.is_valid():
bundle = form.save()
return redirect('get_bundle', bundle.id)
return render(request, 'core/index.html', {'form': form})
def get_bundle(request, bundle_id):
bundle = get_object_or_404(Bundle, pk=bundle_id)
return render(request, 'core/get_bundle.html', {'bundle': bundle})
|
n = int(input('Input n: '))
# square
print('square')
for i in range(n):
print('*' * n, end='')
print()
# triangle
print()
print('triangle')
start_point = 2
triangle_height = n // 2
if n % 2 != 0:
triangle_height = (n + 1) // 2
start_point = 1
for i in range(triangle_height):
print(' ' * ((n - start_point) // 2) + '*' * start_point)
start_point += 2
# rhombus
print()
print('rhombus')
start_point = 2
triangle_height = n // 2
if n % 2 != 0:
triangle_height = (n + 1) // 2
start_point = 1
for i in range(triangle_height):
print(' ' * ((n - start_point) // 2) + '*' * start_point)
if i < triangle_height - 1:
start_point += 2
for i in range(triangle_height - 1, 0, -1):
start_point -= 2
print(' ' * ((n - start_point) // 2) + '*' * start_point)
# fir
print()
print('fir')
start_point = 2
triangle_height = n // 2
if n % 2 != 0:
triangle_height = (n + 1) // 2
start_point = 1
next_point = start_point
for j in range(3):
start_point = next_point
if j == 0:
for i in range(triangle_height):
print(' ' * ((n - start_point) // 2) + '*' * start_point)
if i < triangle_height - 1:
start_point += 2
else:
start_point+=2
for i in range(1, triangle_height):
print(' ' * ((n - start_point) // 2) + '*' * start_point)
if i < triangle_height - 1:
start_point += 2
# stairs
print()
print('stairs')
for i in range(n):
print('*' * (i+1)*2, end='')
print()
print('*' * (i + 1) * 2, end='')
print()
|
import os
import json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.abspath(__file__)
for __ in range(3):
BASE_DIR = os.path.dirname(BASE_DIR)
CONFIG = {}
config_file = os.path.join(BASE_DIR, 'config.json')
if config_file and os.path.isfile(config_file):
with open(config_file) as f:
CONFIG = json.loads(f.read())
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = CONFIG.get('django_secret_key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'ncdjango',
'rest_framework',
'tastypie',
'djcelery', # Celery backend, on linux we should use django_celery_results
#'django_celery_results',
'landscapesim'
)
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'landscapesim_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'landscapesim_project.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': CONFIG.get('db_name', 'landscapesim'),
'USER': CONFIG.get('db_user', 'landscapesim'),
'PASSWORD': CONFIG.get('db_password'),
'HOST': CONFIG.get('db_host', '127.0.0.1')
}
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100
}
NC_INSTALLED_INTERFACES = (
'ncdjango.interfaces.data',
'ncdjango.interfaces.arcgis_extended',
'ncdjango.interfaces.arcgis',
'landscapesim.tiles'
)
# pdfkit binary
WKHTMLTOPDF_BIN = CONFIG.get('WKHTMLTOPDF_BIN', 'wkhtmltopdf')
CELERY_TRACK_STARTED = True
CELERY_ROUTES = {
'landscapesim.async.tasks.post_process_results': {'queue': 'periodic-tasks'},
'landscapesim.async.tasks.look_for_new_scenario': {'queue': 'periodic-tasks'},
'landscapesim.async.tasks.cleanup_temp_files': {'queue': 'periodic-tasks'},
'landscapesim.async.tasks.run_model': {'queue': 'run-model'}
}
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import math
import mox
from pants.base.hash_utils import Sharder, hash_all, hash_file
from pants.util.contextutil import temporary_file
class TestHashUtils(mox.MoxTestBase):
def setUp(self):
super(TestHashUtils, self).setUp()
self.digest = self.mox.CreateMockAnything()
def test_hash_all(self):
self.digest.update('jake')
self.digest.update('jones')
self.digest.hexdigest().AndReturn('42')
self.mox.ReplayAll()
self.assertEqual('42', hash_all(['jake', 'jones'], digest=self.digest))
def test_hash_file(self):
self.digest.update('jake jones')
self.digest.hexdigest().AndReturn('1137')
self.mox.ReplayAll()
with temporary_file() as fd:
fd.write('jake jones')
fd.close()
self.assertEqual('1137', hash_file(fd.name, digest=self.digest))
def test_compute_shard(self):
# Spot-check a couple of values, to make sure compute_shard doesn't do something
# completely degenerate.
self.assertEqual(31, Sharder.compute_shard('', 42))
self.assertEqual(35, Sharder.compute_shard('foo', 42))
self.assertEqual(5, Sharder.compute_shard('bar', 42))
def test_compute_shard_distribution(self):
# Check that shard distribution isn't obviously broken.
nshards = 7
mean_samples_per_shard = 10000
nsamples = nshards * mean_samples_per_shard
distribution = [0] * nshards
for n in range(0, nsamples):
shard = Sharder.compute_shard(str(n), nshards)
distribution[shard] += 1
variance = sum([(x - mean_samples_per_shard) ** 2 for x in distribution]) / nshards
stddev = math.sqrt(variance)
# We arbitrarily assert that a stddev of less than 1% of the mean is good enough
# for sanity-checking purposes.
self.assertLess(stddev, 100)
def test_sharder(self):
def check(spec, expected_shard, expected_nshards):
sharder = Sharder(spec)
self.assertEquals(expected_shard, sharder.shard)
self.assertEquals(expected_nshards, sharder.nshards)
def check_bad_spec(spec):
self.assertRaises(Sharder.InvalidShardSpec, lambda: Sharder(spec))
check('0/1', 0, 1)
check('0/2', 0, 2)
check('1/2', 1, 2)
check('0/100', 0, 100)
check('99/100', 99, 100)
check_bad_spec('0/0')
check_bad_spec('-1/0')
check_bad_spec('0/-1')
check_bad_spec('1/1')
check_bad_spec('2/1')
check_bad_spec('100/100')
check_bad_spec('1/2/3')
check_bad_spec('/1')
check_bad_spec('1/')
check_bad_spec('/')
check_bad_spec('foo/1')
check_bad_spec('1/foo')
|
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import mglearn
import matplotlib.pylab as plt
X, y = mglearn.datasets.make_wave(n_samples=100)
line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
bins = np.linspace(-3, 3, 11)
encoder = OneHotEncoder(sparse=False)
which_bin = np.digitize(X, bins=bins)
encoder.fit(which_bin)
X_binned = encoder.transform(which_bin)
line_binned = encoder.transform(np.digitize(line, bins=bins))
reg = LinearRegression().fit(X_binned, y)
plt.plot(line, reg.predict(line_binned), label='binned regression')
reg = DecisionTreeRegressor(min_samples_split=3).fit(X_binned, y)
plt.plot(line, reg.predict(line_binned), '--', label='decision tree')
plt.plot(X[:, 0], y, 'o', c='k')
plt.vlines(bins, -3, 3, linewidth=1, alpha=.2)
plt.legend(loc="best")
plt.ylabel("regression output")
plt.xlabel("feature")
plt.show()
|
from randimal import Randimal, DisplayOption
if __name__ == "__main__":
randimal1 = Randimal(2)
print("Default: " + randimal1.get())
randimal1 = Randimal(2, displayOption=DisplayOption.LOWERCASE_HYPHENATED)
print("LCH: " + randimal1.get())
randimal1 = Randimal(2, displayOption=DisplayOption.CAMELCASE_HYPHENATED)
print("CCH: " + randimal1.get())
randimal1 = Randimal(2, displayOption=DisplayOption.CAMELCASE_SPACED)
print("CCS: " + randimal1.get())
randimal1 = Randimal(2, displayOption=DisplayOption.LOWERCASE_SPACED)
print("LCS: " + randimal1.get())
|
def get_char(text,pos):
if pos<0 or pos>=len(text):
return None
c=text[pos]
if c>='0' and c<='9':
return 'DIGIT'
return c
def scan(text,transitions,accepts,start):
pos = 0
state = start
while True :
c=get_char(text,pos)
if state in transitions and c in transitions[state]:
state = transitions[state][c]
pos+=1
else:
if state in accepts:
return {'token': accepts[state],'lexeme':text[:pos]}
return None
transitions = {
's0':{'DIGIT':'s1','.':'s2'},
's1':{'.':'s3','DIGIT':'s1'},
's2':{'DIGIT':'s3'},
's3':{'DIGIT':'s3'}
}
accepts = {'s3':'FLOAT_TOKEN'}
#text = input('Give')
#m=scan(text,transitions,accepts,'s0')
#print(m)
for test in ['12.456','6789.','.66998','1234','.']:
m = scan(test,transitions,accepts,'s0')
print("Testing '{}'\nResult: {}\n".format(test,m))
|
from .cls_dataset import ClsDataset
from .transform import train_transform, val_transform
|
from Request import *
from Client import Client
class RequestManager:
def __init__(self):
self.requests = [
Handshake,
Authentication,
ListUsers,
SendMessage,
Logout,
Unknown
]
def get_request(self, client: Client, raw_request: str):
raw_request = raw_request.strip()
for request_class in self.requests:
if request_class.is_of_type(raw_request):
return request_class(client, raw_request)
return Unknown(client, None)
|
from rest_framework import viewsets
from photos.models import Photo, Comment
from photos.serializers import PhotoSerializer, CommentSerializer
class PhotoViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
def get_queryset(self):
return Photo.objects.filter(user=self.kwargs['users_pk'])
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
# def get_queryset(self):
# return Comment.objects.filter(user=self.kwargs['photo_pk'])
|
# coding: utf-8
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'ZONA:STRING, '
'CODIGO_DE_CIUDAD:STRING, '
'CEDULA_CIUDADANIA:STRING, '
'CODIGO_INTERNO:STRING, '
'TIPO_COMPRADORA:STRING, '
'CUSTOMER_CLASS:STRING, '
'CUPO:STRING, '
'NUMERO_DE_OBLIGACION:STRING, '
'VALOR_FACTURA:STRING, '
'FECHA_FACTURA:STRING, '
'FECHA_VENCIMIENTO:STRING, '
'VALOR_SALDO_EN_CARTERA:STRING, '
'DIAS_DE_VENCIMIENTO:STRING, '
'CAMPANA_ORIGINAL:STRING, '
'ULTIMA_CAMPANA:STRING, '
'CODIGO:STRING, '
'NOMBRE1:STRING, '
'APELLIDOS:STRING, '
'TELEFONO_1:STRING, '
'CELULAR1:STRING, '
'TEL_CEL_2:STRING, '
'E_MAIL:STRING, '
'AUTORIZO_ENVIO_DE_MENSAJES_DE_TEXTO_A_MI_CELULAR_SI_NO:STRING, '
'AUTORIZO_CORREOS_DE_VOZ_A_MI_CELULAR_SI_NO:STRING, '
'AUTORIZO_ENVIO_DE_E_MAIL_SI_NO:STRING, '
'DIRECCION1:STRING, '
'BARRIO1:STRING, '
'CIUDAD1:STRING, '
'DEPARTAMENTO1:STRING, '
'DIRECCION2:STRING, '
'BARRIO2:STRING, '
'CIUDAD2:STRING, '
'DEPARTAMENTO2:STRING, '
'NOMBRE2:STRING, '
'APELLIDO1:STRING, '
'PARENTESCO1:STRING, '
'CELULAR2:STRING, '
'NOMBRE3:STRING, '
'APELLIDO2:STRING, '
'PARENTESCO2:STRING, '
'TELEFONO2:STRING, '
'CELULAR3:STRING, '
'DIRECCION3:STRING, '
'CIUDAD3:STRING, '
'DEPARTAMENTO3:STRING, '
'NOMBRE4:STRING, '
'APELLIDO3:STRING, '
'TELEFONO3:STRING, '
'CELULAR4:STRING, '
'DIRECCION4:STRING, '
'CIUDAD4:STRING, '
'DEPARTAMENTO4:STRING, '
'NOMBRE5:STRING, '
'APELLIDO4:STRING, '
'DIRECCION5:STRING, '
'TELEFONO4:STRING, '
'CELULAR5:STRING, '
'CIUDAD5:STRING, '
'DEPARTAMENTO5:STRING, '
'ABOGAD:STRING, '
'DIVSION:STRING, '
'PAIS:STRING, '
'FECHA_DE_PROXIMA_CONFERENCIA:STRING, '
'CODIGO_DE_GESTION:STRING, '
'FECHA_DE_GESTION:STRING, '
'FECHA_DE_PROMESA_DE_PAGO:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split(';')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha' : self.mifecha,
'ZONA' : arrayCSV[0].replace('"',''),
'CODIGO_DE_CIUDAD' : arrayCSV[1].replace('"',''),
'CEDULA_CIUDADANIA' : arrayCSV[2].replace('"',''),
'CODIGO_INTERNO' : arrayCSV[3].replace('"',''),
'TIPO_COMPRADORA' : arrayCSV[4].replace('"',''),
'CUSTOMER_CLASS' : arrayCSV[5].replace('"',''),
'CUPO' : arrayCSV[6].replace('"',''),
'NUMERO_DE_OBLIGACION' : arrayCSV[7].replace('"',''),
'VALOR_FACTURA' : arrayCSV[8].replace('"',''),
'FECHA_FACTURA' : arrayCSV[9].replace('"',''),
'FECHA_VENCIMIENTO' : arrayCSV[10].replace('"',''),
'VALOR_SALDO_EN_CARTERA' : arrayCSV[11].replace('"',''),
'DIAS_DE_VENCIMIENTO' : arrayCSV[12].replace('"',''),
'CAMPANA_ORIGINAL' : arrayCSV[13].replace('"',''),
'ULTIMA_CAMPANA' : arrayCSV[14].replace('"',''),
'CODIGO' : arrayCSV[15].replace('"',''),
'NOMBRE1' : arrayCSV[16].replace('"',''),
'APELLIDOS' : arrayCSV[17].replace('"',''),
'TELEFONO_1' : arrayCSV[18].replace('"',''),
'CELULAR1' : arrayCSV[19].replace('"',''),
'TEL_CEL_2' : arrayCSV[20].replace('"',''),
'E_MAIL' : arrayCSV[21].replace('"',''),
'AUTORIZO_ENVIO_DE_MENSAJES_DE_TEXTO_A_MI_CELULAR_SI_NO' : arrayCSV[22].replace('"',''),
'AUTORIZO_CORREOS_DE_VOZ_A_MI_CELULAR_SI_NO' : arrayCSV[23].replace('"',''),
'AUTORIZO_ENVIO_DE_E_MAIL_SI_NO' : arrayCSV[24].replace('"',''),
'DIRECCION1' : arrayCSV[25].replace('"',''),
'BARRIO1' : arrayCSV[26].replace('"',''),
'CIUDAD1' : arrayCSV[27].replace('"',''),
'DEPARTAMENTO1' : arrayCSV[28].replace('"',''),
'DIRECCION2' : arrayCSV[29].replace('"',''),
'BARRIO2' : arrayCSV[30].replace('"',''),
'CIUDAD2' : arrayCSV[31].replace('"',''),
'DEPARTAMENTO2' : arrayCSV[32].replace('"',''),
'NOMBRE2' : arrayCSV[33].replace('"',''),
'APELLIDO1' : arrayCSV[34].replace('"',''),
'PARENTESCO1' : arrayCSV[35].replace('"',''),
'CELULAR2' : arrayCSV[36].replace('"',''),
'NOMBRE3' : arrayCSV[37].replace('"',''),
'APELLIDO2' : arrayCSV[38].replace('"',''),
'PARENTESCO2' : arrayCSV[39].replace('"',''),
'TELEFONO2' : arrayCSV[40].replace('"',''),
'CELULAR3' : arrayCSV[41].replace('"',''),
'DIRECCION3' : arrayCSV[42].replace('"',''),
'CIUDAD3' : arrayCSV[43].replace('"',''),
'DEPARTAMENTO3' : arrayCSV[44].replace('"',''),
'NOMBRE4' : arrayCSV[45].replace('"',''),
'APELLIDO3' : arrayCSV[46].replace('"',''),
'TELEFONO3' : arrayCSV[47].replace('"',''),
'CELULAR4' : arrayCSV[48].replace('"',''),
'DIRECCION4' : arrayCSV[49].replace('"',''),
'CIUDAD4' : arrayCSV[50].replace('"',''),
'DEPARTAMENTO4' : arrayCSV[51].replace('"',''),
'NOMBRE5' : arrayCSV[52].replace('"',''),
'APELLIDO4' : arrayCSV[53].replace('"',''),
'DIRECCION5' : arrayCSV[54].replace('"',''),
'TELEFONO4' : arrayCSV[55].replace('"',''),
'CELULAR5' : arrayCSV[56].replace('"',''),
'CIUDAD5' : arrayCSV[57].replace('"',''),
'DEPARTAMENTO5' : arrayCSV[58].replace('"',''),
'ABOGAD' : arrayCSV[59].replace('"',''),
'DIVSION' : arrayCSV[60].replace('"',''),
'PAIS' : arrayCSV[61].replace('"',''),
'FECHA_DE_PROXIMA_CONFERENCIA' : arrayCSV[62].replace('"','')
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-leonisa" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1)
#lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='')
#transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery leonisa' >> beam.io.WriteToBigQuery(
gcs_project + ":leonisa.prejuridico",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD")
|
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
import main
import components.weatherInfo as weatherInfo
import components.getNews as getNews
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
print("Say Again...")
return "None"
return query
def commands():
query = takeCommand().lower()
if 'wikipedia' in query:
main.speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
print(results)
main.speak("According to wikipedia")
main.speak(results)
elif 'weather in' in query or 'temperature in' in query:
main.speak("Searching weather...")
try:
city_name = query.split("in ")[1]
response = weatherInfo.getInfo(city_name)
main.speak(response)
except Exception as e:
main.speak("Sorry, I couldn't get it...")
elif 'read news' in query:
main.speak("reading news...")
news = getNews.news()
print(news)
main.speak(news)
elif 'open youtube' in query:
webbrowser.open('youtube.com')
elif 'open google' in query:
webbrowser.open("google.com")
elif 'what is the time' in query or 'tell me the time' in query:
currentTime = datetime.datetime.now().strftime("%H:%M:%S")
main.speak(f"The current time is {currentTime}")
elif query == 'good' or query == 'very good' or query == 'you are very good':
main.speak("Ohh thank you so much... ")
elif 'how to stop you' in query:
main.speak("Just say stop to stop me")
elif 'stop' == query:
main.speak("Thank you for having me.\nHave a Good Day")
exit()
|
import sys #pour recuperer les arguments
import requests #pour les telechargement
import os #pour la creation de repertoir
import shutil #pour le traitement des fichier
from tqdm import tqdm # module de telechergement
from bs4 import BeautifulSoup as bs #POUR rechercher des elements specifique dans une page web.
def telechargement(url, repertoire):
"""
Telecharge toutes les pages d'un chapitre donne par son url `url` et
stocke les pages dans le repertoire `repertoire`
"""
### Creation du repertoire sous la forme nomManga/numChapitre ###
os.makedirs(repertoire) #permet de creer le repertoire chapitre dans le repertoir nommanga
##############################
j=1
continu = True
while continu:
urlfinal= url + str(j)
### Extraction de l'url de l'image dans l'url donne en parametre ###
reponse1=requests.get(urlfinal)
if reponse1.ok:
soup = bs(reponse1.content, "html.parser")
for img in tqdm(soup.find_all("img"), "Telechargement Page"+ str(j)):
img_url = img.attrs.get("src")
if not img_url or "mangapanda.com/" not in img_url:
# Donc ce n'est pas la photo du manga
continue
###############################################
reponse2 = requests.get(img_url, stream=True)
reponse2.raw.decode_content = True
### Nom du fichier ###
ext= str(j) + ".jpg"
filename = os.path.join(repertoire, ext )
######################
with open(filename, "wb") as f:
shutil.copyfileobj(reponse2.raw, f)
j += 1
else:
continu = False
urldebase= "https://www.mangapanda.com/"
urls = list()
if len(sys.argv) < 3:
sys.exit("Ce programme nécessite 2 arguments")
else:
#recuperation du nom du manga
nomManga = sys.argv[1].lower()
#On concatène le nom du manga à l'url du site
urldebase= urldebase+ nomManga # a partir dici urldebase= "https://www.mangapanda.com/nommanga/4/"
if "," in sys.argv[2]:
chapitres = sys.argv[2].split(',')#tranforme la chaine en list avec comme separateur le caractere donne en parametre.
for chapitre in chapitres:#pour chaque chapitre contenue dans le tableau des chapitres on contruitt lurl permettant dacceder
#a la page du chapitre en ajoutant a lurl de base puis on stocke ca dans kla listye des urls
#On ajoute les différents chapitres donnés en paramètre
urls.append(urldebase+ "/" + chapitre + "/")
else:
if "-" in sys.argv[2]:
chapitres = sys.argv[2].split("-")
#On ajoute les différents chapitres donnés en paramètre
for chapitre in range(int( chapitres[0] ) , int( chapitres[1] ) + 1 ):
urls.append(urldebase+ "/" + str(chapitre) + "/")
else:
#Dans ce cas on a un seul chapitre donne en parametre
urls.append( urldebase+ "/" + sys.argv[2] + "/")
testManga = requests.get(urldebase)#permet d'acceder à une page web
if testManga.ok: ### Le manga donne en parametre existe ###
##Telechargement des differents chapitres
for url in urls:
#pour obtenir le chemin d'un repertoire on enleve le debut jusqu'a ".com/"
#ainsi le nom du repertoire sera le 2e element de la liste retournee par url.split('.com/')
telechargement( url, url.split(".com/")[1] )
print("Téléchargement reussi!")
else:
sys.exit("Manga introuvable dans ce site!")
|
import pandas as pd
import os
import json
import logging
import sys
logging.basicConfig(filename = 'logs.log')
#This function will create customer_level_features.csv
def customer_level_features(read_from_train, read_from_test, write_to):
customer_level_features = ['customer_number',
'avg_of_all_delays',
'avg_of_invoices_closed',
'payment_count_quarter_q1',
'payment_count_quarter_q2',
'payment_count_quarter_q3',
'payment_count_quarter_q4',
'invoice_count_quarter_q1',
'invoice_count_quarter_q2',
'invoice_count_quarter_q3',
'invoice_count_quarter_q4',
'L1_perc',
'L2_perc',
'L3_perc',
'M_perc',
'H_perc'
]
data = pd.read_csv(r''+read_from_train)
data2 = pd.read_csv(r''+read_from_test)
customer_level_dataset=pd.DataFrame()
dataset = data[customer_level_features].append(data2[customer_level_features], ignore_index=True)
for i in dataset['customer_number'].unique():
customer_level_dataset = customer_level_dataset.append(dataset[dataset['customer_number'] == i][customer_level_features].iloc[0], ignore_index=True)
customer_level_dataset.rename(columns={'customer_number':'object_value','H_perc':'H','L1_perc':'L1','L2_perc':'L2','L3_perc':'L3','M_perc':'M'},inplace=True)
if customer_level_dataset['object_value'].dtype==float:
customer_level_dataset['object_value'] = customer_level_dataset['object_value'].astype('int')
customer_level_dataset.to_csv(write_to,index=False)
logging.warning('Customer Level Features Created.')
def subsets_json(subset):
list=[]
for invoice_number in subset['invoice_number']:
invoice=subset[subset['invoice_number']==invoice_number]
list.append({"invoice_amount":invoice['invoice_amount'].values[0],"invoice_number":str(invoice_number).split('.')[0],"invoice_date":invoice['invoice_date'].values[0]})
return list
def raw_json_creation(read_from, read_from_subsets,write_to):
data = pd.read_csv(r''+read_from) # raw csv
subsets_predictions = pd.read_csv(read_from_subsets) # predictions
temp = data.groupby('payment_id').agg({'customer_number':'nunique'}).reset_index()
invalid_payments = temp[temp['customer_number'] > 1]['payment_id'].unique()
print(invalid_payments)
data = data[~data['payment_id'].isin(invalid_payments)]
top_header = data.groupby('payment_id')[['customer_number','unique_invoice_count','payment_amount','payment_date']].max().reset_index()
payments = []
predictions = pd.DataFrame()
for index, row in top_header.iterrows():
if row['payment_id'] not in subsets_predictions['payment_id'].unique():
print('payment' + str(row['payment_id']) + 'not found')
continue
subset_dict = {}
subset_dict["customer_number"] = str(row['customer_number'])
subset_dict["unique_invoice_count"] = row['unique_invoice_count']
subset_dict["payment_amount"] = row['payment_amount']
subset_dict["primaryKey"]=int(row['payment_id'])
subset_dict["payment_date"]=str(row['payment_date'])
subset_dict["items"] = []
items = []
subsets = data[data['payment_id']==row['payment_id']]
for subset_number in subsets['subset_number'].unique():
abc = len(subsets_predictions[(subsets_predictions['payment_id']==row['payment_id']) & (subsets_predictions['subset_number']==subset_number)])
if abc == 0:
# print('subset ' + str(subset_number) + ' not found for payment ' + str(row['payment_id']))
continue
items.append({"subsetId":int(subset_number),"subset":subsets_json(subsets[subsets['subset_number']==subset_number])})
predictions=predictions.append(subsets_predictions[(subsets_predictions['payment_id']==row['payment_id']) & (subsets_predictions['subset_number']==subset_number)],ignore_index=True)
subset_dict['items']=items
payments.append(subset_dict)
final_json={"data":payments}
write_to_ = write_to+'raw_data_json.json'
with open(write_to_, 'w') as fp:
json.dump(final_json, fp)
predictions.rename(columns={'output':'actual','predictions':'output','H_perc':'H','L1_perc':'L1','L2_perc':'L2','L3_perc':'L3','M_perc':'M','pred_proba_0':'probability(0)','pred_proba_1':'probability(1)'},inplace=True)
predictions.to_csv(write_to+'raw_predictions.csv',index=False)
#This function will divide the data to 70% train and 30% test
def rivana_testing(read_from, testing_data_path, write_to_raw_csv):
raw_fields = ['customer_number','payment_id','payment_amount','payment_date','invoice_number','invoice_amount','invoice_date','subset_number','unique_invoice_count','output']
raw_csv = pd.DataFrame()
testing_data = pd.read_csv(testing_data_path)
testing_payment_ids = testing_data['payment_id'].unique()
for i in os.listdir(read_from):
dataset = pd.read_csv(r''+read_from+'/' + str(i), sep=',', index_col=0)
dataset['invoice_number']=dataset['invoice']
dataset['invoice_amount']=dataset['amount']
dataset=dataset[dataset['payment_id'].isin(testing_payment_ids)]
raw_csv=raw_csv.append(dataset[raw_fields],ignore_index=True)
raw_csv.to_csv(write_to_raw_csv)
if __name__ == '__main__':
acct_id = str(sys.argv[1])
#path = "/root/accounts"
path = str(sys.argv[2])
read_from = path+'/account_'+acct_id+'/customer_subsets_features'
testing_data_path = path+"/account_"+acct_id+"/train_test_splitted/test_30.csv"
write_to_raw_csv = path+"/account_"+acct_id+"/rivana_test/raw_data.csv"
# This function will generate all the Rivana Testing Files
rivana_testing(read_from, testing_data_path, write_to_raw_csv)
read_from = write_to_raw_csv
write_to = path+"/account_"+acct_id+"/rivana_test/"
read_from_subsets = path+"/account_"+acct_id+"/predictions/predictions.csv"
# This function will create JSON input files
raw_json_creation(read_from, read_from_subsets, write_to)
# This will generate aggregate file of customer level features
read_from_train = path+"/account_"+acct_id+"/train_test_splitted/train_70.csv"
read_from_test = path+"/account_"+acct_id+"/train_test_splitted/test_30.csv"
write_to = path+'/account_'+acct_id+'/rivana_test/customer_level_features.csv'
#This will generate aggregate file of customer level features
customer_level_features(read_from_train, read_from_test,write_to)
|
#
# @lc app=leetcode.cn id=13 lang=python3
#
# [13] 罗马数字转整数
#
# @lc code=start
class Solution:
def romanToInt(self, s: str) -> int:
# 3999/3999 cases passed (40 ms)
# Your runtime beats 97.24 % of python3 submissions
# Your memory usage beats 71.67 % of python3 submissions (14.9 MB)
d = {"I": 1, "V": 5, "X": 10, "L": 50, "C":100, "D":500, "M":1000}
res = 0
pos = 0
while pos < len(s) - 1:
if d[s[pos]] >= d[s[pos+1]]:
res += d[s[pos]]
pos += 1
else:
res += d[s[pos+1]]
res -= d[s[pos]]
pos += 2
if pos == len(s) - 1:
res += d[s[pos]]
return res
# @lc code=end
|
class Matrix:
def __init__(self, width, height):
self.width = width
self.height = height
def edge_values(self):
lowest = float('inf')
highest = float('-inf')
for x, y in self:
if self[x][y] < lowest:
lowest = self[x][y]
if self[x][y] > highest:
highest = self[x][y]
return lowest, highest
def __iter__(self):
return MatrixIterator(self)
def __getitem__(self, x):
return None
class MatrixIterator:
def __init__(self, matrix):
self.max_x = matrix.width - 1
self.max_y = matrix.height - 1
self.x = 0
self.y = 0
def __next__(self):
x = self.x
y = self.y
if y > self.max_y:
raise StopIteration
if x >= self.max_x:
self.x = 0
self.y += 1
else:
self.x += 1
return x, y
|
from _typeshed import Incomplete
from collections.abc import Generator
def bfs_edges(
G,
source,
reverse: bool = False,
depth_limit: Incomplete | None = None,
sort_neighbors: Incomplete | None = None,
) -> Generator[Incomplete, Incomplete, None]: ...
def bfs_tree(
G,
source,
reverse: bool = False,
depth_limit: Incomplete | None = None,
sort_neighbors: Incomplete | None = None,
): ...
def bfs_predecessors(
G,
source,
depth_limit: Incomplete | None = None,
sort_neighbors: Incomplete | None = None,
) -> Generator[Incomplete, None, None]: ...
def bfs_successors(
G,
source,
depth_limit: Incomplete | None = None,
sort_neighbors: Incomplete | None = None,
) -> Generator[Incomplete, None, None]: ...
def bfs_layers(G, sources) -> Generator[Incomplete, None, None]: ...
def descendants_at_distance(G, source, distance): ...
|
import boto3
def create_loadbalancer(lb_name, vpc, protocol):
"""
A fucntion to create load balancer
"""
client = boto3.client('elbv2', region_name='ap-south-1')
conn = boto3.client('ec2', region_name='ap-south-1')
# get subnets to create load balancer (Min=3)
response = conn.describe_subnets(
Filters=[
{
'Name': 'vpc-id',
'Values': [
vpc
]
}
]
)['Subnets']
subnets = []
for res in response:
subnets.append(res['SubnetId'])
# create the load balancer
response = client.create_load_balancer(
Name=lb_name,
Subnets=subnets,
Type='application'
)['LoadBalancers']
# get load balancer arn to create the target group
lb_arn = response[0]['LoadBalancerArn']
# create the targer group
tg_arn = create_target_group('tg1', protocol, vpc)
# create the listener that points to the target group
response = client.create_listener(
LoadBalancerArn=lb_arn,
Protocol=protocol,
Port=80,
DefaultActions=[
{
'Type': 'forward',
'TargetGroupArn': tg_arn
}
],
)
def create_target_group(tg_name, protcol, vpc):
"""
A function to create the target group
"""
tg = boto3.client('elbv2', region_name='ap-south-1')
# create the target groups
response = tg.create_target_group(
Name=tg_name,
Protocol=protcol,
VpcId=vpc,
Port=80,
TargetType='instance'
)['TargetGroups']
# get the target
tg_arn = response[0]['TargetGroupArn']
# input the running ec2 instance you want to attach to the target group
response = tg.register_targets(
TargetGroupArn=tg_arn,
Targets=[
{
'Id': 'i-074c56ce8b59e515f',
},
{
'Id': 'i-08eb4e570fa4cdf6b',
},
],
)
return tg_arn
create_loadbalancer("Loadbalancer1", 'vpc-7d587715', "HTTP")
|
from textblob import TextBlob
from rake_nltk import Rake
import time
import collections
import json
import re
# Sample RSS Feed for testing purposes
rssData = '''
{
"title": "3 Questions: Why are student-athletes amateurs?",
"author": "Peter Dizikes | MIT News Office",
"description": "MIT Professor Jennifer Light digs into the history of the idea that students aren\u2019t part of the labor force.",
"url": "http://news.mit.edu/2019/jennifer-light-student-athletes-0325"
}
'''
def extractKeyword(text):
# Extract keywords from text and return maximum 3
r = Rake()
r.extract_keywords_from_text(cleanText(text))
resultKeyword = r.frequency_dist
Keyword = list(collections.Counter(resultKeyword))
Keyword=[x for x in Keyword if len(x)>2]
if(len(Keyword)>2):
return Keyword[:3]
else:
return Keyword[:2]
def extractSentiment(text):
# Get polarity values and return sentiment type
analysis = TextBlob(cleanText(text))
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
def cleanText(text):
# Apply RegEx substitution to clean the description from weird characters
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])| (\w+:\ / \ / \S+) ", " ", text).split())
def parseJSON(data):
# Load JSON RSS Feed input
data = json.loads(data)
# Add timestamps as key, sentiments, and keywords to the digested RSS Feed
# The _id is used because this JSON are meant to be stored in MongoDB.
data['_id'] = time.time();
# Only if description exists we should apply the NLP analysis
if data["description"]:
data['sentiment'] = extractSentiment(data["description"])
data['keyword'] = extractKeyword(data["description"])
else:
data['sentiment'] = ""
data['keyword'] = ""
# Re-construct JSON and return output
parsedFeed = json.dumps(data, indent = 2)
return parsedFeed
|
# pylint: disable=missing-docstring
''' Tasks module.
'''
import io
from behave.configuration import Configuration
from behave.formatter.base import StreamOpener
from behave.runner import Runner
from celery import Celery
from testsuite.application import create_app
from testsuite.extensions import db, socketio
from testsuite.models.testrun import TestRun
# Create and configure celery task runner
celery = Celery()
celery.config_from_object('testsuite.celeryconfig')
@celery.task
def run_tests(room, vendor, tags, override):
app = create_app()
with app.app_context():
test_run = TestRun(vendor, tags)
db.session.add(test_run)
db.session.commit()
def on_snapshot(snapshot, plan):
test_run.save_snapshot(snapshot, plan)
socketio.emit('snapshot', test_run.event, room=room)
db.session.commit()
def on_payload(payload):
socketio.emit('payload', payload, room=room)
try:
output = io.StringIO()
output_stream = StreamOpener(stream=output)
config = Configuration(
outputs=[output_stream],
format=['json.chunked'],
on_snapshot=on_snapshot,
on_payload=on_payload,
vendor=vendor,
override=override,
command_args=[],
tags=[','.join(tags)],
)
runner = Runner(config)
runner.run()
except Exception as err: # pylint: disable=broad-except
import traceback
traceback.print_exc()
socketio.emit('global_error', str(err), room=room)
finally:
socketio.emit('tests_complete', room=room)
|
import bpy
from photogrammetry_importer.photogrammetry_import_op import ImportMeshroom
from photogrammetry_importer.photogrammetry_import_op import ImportOpenMVG
from photogrammetry_importer.photogrammetry_import_op import ImportOpenSfM
from photogrammetry_importer.photogrammetry_import_op import ImportColmap
from photogrammetry_importer.photogrammetry_import_op import ImportNVM
from photogrammetry_importer.photogrammetry_import_op import ImportOpen3D
from photogrammetry_importer.photogrammetry_import_op import ImportPLY
from photogrammetry_importer.photogrammetry_export_op import ExportNVM
from photogrammetry_importer.photogrammetry_export_op import ExportColmap
# Import Functions
def colmap_import_operator_function(self, context):
self.layout.operator(ImportColmap.bl_idname, text="Colmap Import (model/workspace)")
def meshroom_import_operator_function(self, context):
self.layout.operator(ImportMeshroom.bl_idname, text="Meshroom Import (.sfm/.json/.mg)")
def open3d_import_operator_function(self, context):
self.layout.operator(ImportOpen3D.bl_idname, text="Open3D Import (.log/.json)")
def opensfm_import_operator_function(self, context):
self.layout.operator(ImportOpenSfM.bl_idname, text="OpenSfM Import (.json)")
def openmvg_import_operator_function(self, context):
self.layout.operator(ImportOpenMVG.bl_idname, text="OpenMVG / Regard3D Import (.json)")
def ply_import_operator_function(self, context):
self.layout.operator(ImportPLY.bl_idname, text="Point Cloud PLY Import (.ply)")
def visualsfm_import_operator_function(self, context):
self.layout.operator(ImportNVM.bl_idname, text="VisualSfM Import (.nvm)")
# Export Functions
def colmap_export_operator_function(self, context):
self.layout.operator(ExportColmap.bl_idname, text="Colmap Export (folder)")
def visualsfm_export_operator_function(self, context):
self.layout.operator(ExportNVM.bl_idname, text="VisualSfM Export (.nvm)")
# Define register/unregister Functions
def bl_idname_to_bpy_types_name(bl_idname, bpy_types_prefix):
assert bpy_types_prefix in ['IMPORT', 'EXPORT']
bl_idname_suffix = bl_idname.split('.')[1]
return bpy_types_prefix + '_SCENE_OT_' + bl_idname_suffix
def is_registered(import_or_export_operator, operator_type):
assert operator_type in ['IMPORT', 'EXPORT']
return hasattr(
bpy.types,
bl_idname_to_bpy_types_name(import_or_export_operator.bl_idname, operator_type))
def register_importer(condition, importer, append_function):
# https://blenderartists.org/t/find-out-if-a-class-is-registered/602335
if condition:
if not is_registered(importer, operator_type='IMPORT'):
bpy.utils.register_class(importer)
bpy.types.TOPBAR_MT_file_import.append(append_function)
def unregister_importer(importer, append_function):
if is_registered(importer, operator_type='IMPORT'):
bpy.utils.unregister_class(importer)
bpy.types.TOPBAR_MT_file_import.remove(append_function)
def register_exporter(condition, exporter, append_function):
# https://blenderartists.org/t/find-out-if-a-class-is-registered/602335
if condition:
if not is_registered(exporter, operator_type='EXPORT'):
bpy.utils.register_class(exporter)
bpy.types.TOPBAR_MT_file_export.append(append_function)
def unregister_exporter(exporter, append_function):
if is_registered(exporter, operator_type='EXPORT'):
bpy.utils.unregister_class(exporter)
bpy.types.TOPBAR_MT_file_export.remove(append_function)
def register_importers(import_prefs):
register_importer(import_prefs.colmap_importer_bool, ImportColmap, colmap_import_operator_function)
register_importer(import_prefs.meshroom_importer_bool, ImportMeshroom, meshroom_import_operator_function)
register_importer(import_prefs.open3d_importer_bool, ImportOpen3D, open3d_import_operator_function)
register_importer(import_prefs.opensfm_importer_bool, ImportOpenSfM, opensfm_import_operator_function)
register_importer(import_prefs.openmvg_importer_bool, ImportOpenMVG, openmvg_import_operator_function)
register_importer(import_prefs.ply_importer_bool, ImportPLY, ply_import_operator_function)
register_importer(import_prefs.visualsfm_importer_bool, ImportNVM, visualsfm_import_operator_function)
def unregister_importers():
unregister_importer(ImportColmap, colmap_import_operator_function)
unregister_importer(ImportMeshroom, meshroom_import_operator_function)
unregister_importer(ImportOpen3D, open3d_import_operator_function)
unregister_importer(ImportOpenSfM, opensfm_import_operator_function)
unregister_importer(ImportOpenMVG, openmvg_import_operator_function)
unregister_importer(ImportPLY, ply_import_operator_function)
unregister_importer(ImportNVM, visualsfm_import_operator_function)
def register_exporters(export_prefs):
register_exporter(export_prefs.colmap_exporter_bool, ExportColmap, colmap_export_operator_function)
register_exporter(export_prefs.visualsfm_exporter_bool, ExportNVM, visualsfm_export_operator_function)
def unregister_exporters():
unregister_exporter(ExportColmap, colmap_export_operator_function)
unregister_exporter(ExportNVM, visualsfm_export_operator_function)
|
def function(list):
i=0
print(min(list))
list = [8, 6, 4, 8, 4, 50, 2, 7]
function(list)
|
# I pledge my honor that I have abided by the Stevens Honor System
def main():
name=0
infileName = input("What files are the names in?")
outfileName = input("Place uppercase names in this file: ")
infile=open(infileName, "r")
outfile = open(outfileName, "w")
for i in infile:
n= i.title()
print(n, file=outfile)
infile.close()
outfile.close()
main()
|
import os
import csv
import json
import torch
from torchtext.utils import download_from_url, extract_archive
from torchtext.datasets import text_classification
from tqdm import tqdm
from tokenizer import NLTKTokenizer
os.makedirs('data', exist_ok=True)
def load_csv(path, tokenize_fn):
"""
Yields iterator of tokenized and tagged data
"""
with open(path, 'r') as f:
csvreader = csv.reader(f)
for row in csvreader:
label = int(row[0]) - 1
tokens, tags = tokenize_fn(' '.join(row[1:]))
yield label, tokens, tags
def get_dataset(name, tokenize_fn, root='data'):
"""
Downloads and extracts dataset
Gets iterators over dataset
"""
dataset_tar = download_from_url(text_classification.URLS[name], root=root)
extracted_files = extract_archive(dataset_tar)
for fname in extracted_files:
if fname.endswith('train.csv'):
train_csv_path = fname
if fname.endswith('test.csv'):
test_csv_path = fname
train_iterator = load_csv(train_csv_path, tokenize_fn)
test_iterator = load_csv(test_csv_path, tokenize_fn)
return train_iterator, test_iterator
def save_data(path, data, field_names):
"""
Saves data List[Tuple] to jsonlines format
"""
with open(path, 'w+') as f:
for example in tqdm(data, desc='Saving data to jsonl...'):
assert len(example) == len(field_names)
_example = dict()
for field, name in zip(example, field_names):
_example[name] = field
json.dump(_example, f)
f.write('\n')
tokenizer = NLTKTokenizer(lower=True, max_length=250, sos_token='<sos>',
eos_token='<eos>')
yelp_train, yelp_test = get_dataset('YelpReviewPolarity', tokenizer.tokenize)
save_data('data/yelp_train.jsonl', yelp_train, ['label', 'tokens', 'tags'])
save_data('data/yelp_test.jsonl', yelp_test, ['label', 'tokens', 'tags'])
amazon_train, amazon_test = get_dataset('AmazonReviewPolarity', tokenizer.tokenize)
save_data('data/amazon_train.jsonl', amazon_train, ['label', 'tokens', 'tags'])
save_data('data/amazon_test.jsonl', amazon_test, ['label', 'tokens', 'tags'])
torch.save(tokenizer, 'tokenizer_no_vocab.pt')
|
from functions import *
show_personal_info("Matti Meikäläinen", "Sodankylä", "Ohjelmistosuunnittelija")
|
import tempfile
import urllib.request
from datetime import date, timedelta
from enum import Enum, unique
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.gis.db.models.functions import Distance
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.files import File
from django.db import models, transaction
from django.db.models import JSONField
from django.db.models.fields.related_descriptors import (
create_reverse_many_to_one_manager,
)
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.urls import reverse
from django_extensions.db.models import TimeStampedModel
from storages.backends.s3boto3 import S3Boto3Storage
from uk_election_ids.datapackage import ID_REQUIREMENTS, VOTING_SYSTEMS
from uk_election_timetables.calendars import Country
from uk_election_timetables.election_ids import (
NoSuchElectionTypeError,
from_election_id,
)
from uk_geo_utils.models import Onspd
from .managers import PrivateElectionsManager, PublicElectionsManager
class ElectionCancellationReason(models.TextChoices):
NO_CANDIDATES = "NO_CANDIDATES", "No candidates"
EQUAL_CANDIDATES = "EQUAL_CANDIDATES", "Equal candidates to seats"
UNDER_CONTESTED = "UNDER_CONTESTED", "Fewer candidates than seats"
CANDIDATE_DEATH = "CANDIDATE_DEATH", "Death of a candidate"
class ElectionType(models.Model):
name = models.CharField(blank=True, max_length=100)
election_type = models.CharField(blank=True, max_length=100, unique=True)
def __str__(self):
return self.name
class ElectionSubType(models.Model):
name = models.CharField(blank=True, max_length=100)
election_type = models.ForeignKey(
"ElectionType", related_name="subtype", on_delete=models.CASCADE
)
election_subtype = models.CharField(blank=True, max_length=100)
ValidationError = ValueError
def __str__(self):
return "{} ({})".format(self.name, self.election_type)
class ElectedRole(models.Model):
"""
M2M through table between Organisation <-> ElectionType that defines
the role of the job that the elected person will have. e.g:
"Councillor for Trumpton" or "Mayor of London"
"""
election_type = models.ForeignKey("ElectionType", on_delete=models.CASCADE)
organisation = models.ForeignKey(
"organisations.Organisation",
related_name="electedrole",
on_delete=models.CASCADE,
)
elected_title = models.CharField(blank=True, max_length=255)
elected_role_name = models.CharField(blank=True, max_length=255)
def __str__(self):
return "{} ({})".format(self.elected_title, self.organisation)
@unique
class ModerationStatuses(Enum):
suggested = "Suggested"
rejected = "Rejected"
approved = "Approved"
deleted = "Deleted"
class ModerationStatus(models.Model):
short_label = models.CharField(
blank=False,
max_length=32,
primary_key=True,
choices=[(x, x.value) for x in ModerationStatuses],
)
long_label = models.CharField(blank=False, max_length=100)
def __str__(self):
return self.short_label
DEFAULT_STATUS = ModerationStatuses.suggested.value
class Election(TimeStampedModel):
"""
An election.
This model should contain everything needed to make the election ID,
plus extra information about this election.
"""
election_id = models.CharField(
blank=True, null=True, max_length=250, unique=True
)
tmp_election_id = models.CharField(blank=True, null=True, max_length=250)
election_title = models.CharField(blank=True, max_length=255)
election_type = models.ForeignKey(ElectionType, on_delete=models.CASCADE)
election_subtype = models.ForeignKey(
ElectionSubType, null=True, on_delete=models.CASCADE
)
poll_open_date = models.DateField(blank=True, null=True)
organisation = models.ForeignKey(
"organisations.Organisation", null=True, on_delete=models.CASCADE
)
elected_role = models.ForeignKey(
ElectedRole, null=True, on_delete=models.CASCADE
)
division = models.ForeignKey(
"organisations.OrganisationDivision",
null=True,
on_delete=models.CASCADE,
)
division_geography = models.ForeignKey(
"organisations.DivisionGeography",
null=True,
blank=True,
on_delete=models.CASCADE,
)
organisation_geography = models.ForeignKey(
"organisations.OrganisationGeography",
null=True,
blank=True,
on_delete=models.CASCADE,
)
seats_contested = models.IntegerField(blank=True, null=True)
seats_total = models.IntegerField(blank=True, null=True)
group = models.ForeignKey(
"Election",
null=True,
related_name="_children_qs",
on_delete=models.CASCADE,
)
requires_voter_id = models.CharField(
max_length=100,
null=True,
choices=[
(req, ID_REQUIREMENTS[req]["name"]) for req in ID_REQUIREMENTS
],
)
def get_children(self, manager):
"""
This method allows us to call with a manger instance or a string
i.e both: obj.get_children('private_objects') and
obj.get_children(Election.public_objects)
are supported.
This will return a 'children' RelatedManager
with the relevant filters applied.
"""
for m in self._meta.managers:
if m.name == manager or m == manager:
child_manager_cls = create_reverse_many_to_one_manager(
m.__class__, self._meta.get_field("_children_qs")
)
return child_manager_cls(self)
raise ValueError("Unknown manager {}".format(manager))
group_type = models.CharField(
blank=True, max_length=100, null=True, db_index=True
)
voting_system = models.CharField(
max_length=100,
null=True,
choices=[(vs, VOTING_SYSTEMS[vs]["name"]) for vs in VOTING_SYSTEMS],
)
explanation = models.ForeignKey(
"elections.Explanation",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
metadata = models.ForeignKey(
"elections.MetaData", null=True, blank=True, on_delete=models.SET_NULL
)
current = models.BooleanField(null=True, db_index=True)
"""
## Statuses
Elections can have various statuses.
We track these in `ModerationStatus`. Using this model we can
get the moderation history of each object, including the current
status.
However this query is somewhat slow, and most of the time (e.g
for public use) we want to filter on the current status.
Because of this, we denormalize the current status into a
`current_status` field.
election.moderation_statuses.all() is not a terribly useful call
to reference directly because it just gives us a list of all the
statuses an election object has ever been assigned
(but not when they were assigned or or which is the most recent).
`ModerationHistory.objects.all().filter(election=self).latest().status`
will get the latest status, but this should always be the same as
`self.current_status`.
"""
moderation_statuses = models.ManyToManyField(
ModerationStatus, through="ModerationHistory"
)
# Don't modify this field directly. Add a ModerationStatus event and save it
# to change this value.
current_status = models.CharField(
blank=False,
max_length=32,
choices=[(x, x.value) for x in ModerationStatuses],
default=DEFAULT_STATUS,
db_index=True,
)
# where did we hear about this election
# (not necessarily the Notice of Election)
source = models.CharField(blank=True, max_length=1000)
# Notice of Election document
notice = models.ForeignKey(
"elections.Document",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="notice_election_set",
)
# optional FK to a SnoopedElection record
snooped_election = models.ForeignKey(
"election_snooper.SnoopedElection",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
cancelled = models.BooleanField(default=False)
cancellation_notice = models.ForeignKey(
"elections.Document",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="cancellation_election_set",
)
cancellation_reason = models.CharField(
max_length=16,
null=True,
blank=True,
choices=ElectionCancellationReason.choices,
default=None,
)
replaces = models.ForeignKey(
"Election",
null=True,
blank=True,
related_name="_replaced_by",
on_delete=models.CASCADE,
)
tags = JSONField(default=dict, blank=True)
@property
def replaced_by(self):
if len(self._replaced_by.all()) == 0:
return None
if len(self._replaced_by.all()) == 1:
return self._replaced_by.all()[0]
raise AttributeError("Election should only have one replacement")
"""
Note that order is significant here.
The first manager we define is the default. See:
https://docs.djangoproject.com/en/1.11/topics/db/managers/#modifying-a-manager-s-initial-queryset
public_objects might seem like the 'safe' default here, but there are a
number of places where Django implicitly uses the default manager
(e.g: /admin, dumpdata, etc).
Using public_objects as the default can lead to some strange bugs.
For the most part, not having a .objects forces us to make a choice
about what we are exposing when we query the model but there are
some places where django/DRF/etc are "clever" and silently uses the default.
We need to be careful about this. e.g:
class ElectionListView(ListView):
model = Election
and ensure we override get_queryset().
"""
private_objects = PrivateElectionsManager()
public_objects = PublicElectionsManager()
class Meta:
ordering = ("election_id",)
def get_absolute_url(self):
return reverse("single_election_view", args=(self.election_id,))
@property
def get_current(self):
model_current = getattr(self, "current", None)
if model_current is not None:
return model_current
recent_past = date.today() - timedelta(days=settings.CURRENT_PAST_DAYS)
return self.poll_open_date >= recent_past
def get_ballots(self):
"""
If self has a group_type this returns all ballots that are descended from self.
If self doesn't have a group_type (i.e. is a 'ballot') it returns itself.
"""
if self.group_type:
group, date = self.election_id.rsplit(".", 1)
return Election.public_objects.filter(
election_id__startswith=group + ".",
election_id__endswith=date,
group_type=None,
)
return None
@property
def group_seats_contested(self):
"""
Returns the sum of the seats_contested property on all ballots that are
descended from the election, unless self is a ballot, in which case
self.seats_contested is returned.
It's likely there are election groups where not every ballot has had
seats_contested filled in, so treat with care.
"""
if self.group_type:
return (
self.get_ballots()
.aggregate(models.Sum("seats_contested"))
.get("seats_contested__sum")
)
return self.seats_contested
def __str__(self):
return self.get_id()
def get_example_postcode(self):
if not self.group_type and self.geography:
return (
Onspd.objects.filter(location__within=self.geography.geography)
.filter(
location__dwithin=(self.geography.geography.centroid, 0.08)
)
.annotate(
distance=Distance(
"location", self.geography.geography.centroid
)
)
.order_by("distance")
.first()
)
return None
@property
def get_timetable(self):
country_map = {
"WLS": Country.WALES,
"ENG": Country.ENGLAND,
"NIR": Country.NORTHERN_IRELAND,
"SCT": Country.SCOTLAND,
"GBN": None,
}
area = self.division or self.organisation
if not area:
return None
territory_code = area.territory_code or self.organisation.territory_code
if not territory_code:
return None
try:
timetable = from_election_id(
self.election_id, country=country_map[territory_code]
).timetable
except NoSuchElectionTypeError:
timetable = None
return timetable
def get_id(self):
if self.election_id:
return self.election_id
return self.tmp_election_id
@property
def geography(self):
if self.identifier_type == "ballot" and self.division:
return self.division_geography
return self.organisation_geography
@property
def identifier_type(self):
if not self.group_type:
return "ballot"
return self.group_type
def get_division_geography(self):
if self.division_geography:
return self.division_geography
if self.identifier_type == "ballot" and self.division:
# attach geography by division if possible
try:
return self.division.geography
except ObjectDoesNotExist:
pass
return None
@property
def ynr_link(self):
if self.identifier_type in ["organisation", "ballot"]:
return (
"https://candidates.democracyclub.org.uk/elections/{}".format(
self.election_id
)
)
return None
@property
def whocivf_link(self):
if self.identifier_type in ["organisation", "ballot"]:
return "https://whocanivotefor.co.uk/elections/{}".format(
self.election_id
)
return None
def get_organisation_geography(self):
if self.organisation_geography:
return self.organisation_geography
try:
if self.identifier_type == "ballot":
if self.division:
return None
# Try to attach geography by organisation
# (e.g: for Mayors, PCCs etc)
if not self.division and self.organisation:
return self.organisation.get_geography(self.poll_open_date)
# if the election is an 'organisation group'
# attach geography by organisation
if self.group_type == "organisation" and not self.division:
return self.organisation.get_geography(self.poll_open_date)
except ObjectDoesNotExist:
pass
return None
def get_admin_url(self):
"""
Build URL to the election in the admin
"""
viewname = (
f"admin:{self._meta.app_label}_{self._meta.model_name}_change"
)
return reverse(viewname=viewname, kwargs={"object_id": self.pk})
def clean(self):
if not self.identifier_type == "ballot" and self.cancelled:
raise ValidationError(
"Can't set a group to cancelled. Only a ballot can be cancelled"
)
if not self.cancelled and self.cancellation_notice:
raise ValidationError(
"Only a cancelled election can have a cancellation notice"
)
if not self.cancelled and self.cancellation_reason:
raise ValidationError(
"Only a cancelled election can have a cancellation reason"
)
@transaction.atomic
def save(self, *args, **kwargs):
# used later to determine if we should look for ballots
created = not self.pk
status = kwargs.pop("status", None)
user = kwargs.pop("user", None)
notes = kwargs.pop("notes", "")[:255]
self.division_geography = self.get_division_geography()
self.organisation_geography = self.get_organisation_geography()
if not self.group_id and self.group:
try:
group_model = Election.private_objects.get(
election_id=self.group.election_id
)
except Election.DoesNotExist:
group_model = self.group.save(*args, **kwargs)
self.group = group_model
super().save(*args, **kwargs)
if (
status
and status != DEFAULT_STATUS
and status != self.current_status
):
event = ModerationHistory(
election=self, status_id=status, user=user, notes=notes
)
event.save()
# if the object was created return here to save on unnecessary
# db queries
if created:
return
# otherwise check if we have related ballots
ballots = self.get_ballots()
if ballots:
# if so update the modified date on them so that we import
# the changes made on the parent election
ballots.update(modified=self.modified)
@receiver(post_save, sender=Election, dispatch_uid="init_status_history")
def init_status_history(sender, instance, **kwargs):
if not ModerationHistory.objects.all().filter(election=instance).exists():
event = ModerationHistory(election=instance, status_id=DEFAULT_STATUS)
event.save(initial_status=True)
class ModerationHistory(TimeStampedModel):
election = models.ForeignKey(Election, on_delete=models.CASCADE)
status = models.ForeignKey(ModerationStatus, on_delete=models.CASCADE)
user = models.ForeignKey(
User, null=True, blank=True, on_delete=models.SET_NULL
)
notes = models.CharField(blank=True, max_length=255)
def save(self, **kwargs):
# if this is the initial status no need to update the related election
# so return early. This is because the default status is identical on
# both this model and the Election model
if kwargs.pop("initial_status", False):
return super().save(**kwargs)
# save the related election to update the modified timestamp so that it
# is found by the importer looking for recent changes
if self.election.current_status != self.status.short_label:
self.election.current_status = self.status.short_label
self.election.save()
super().save(**kwargs)
return None
class Meta:
verbose_name_plural = "Moderation History"
get_latest_by = "modified"
ordering = ("election", "-modified")
class Explanation(models.Model):
description = models.CharField(blank=False, max_length=100)
explanation = models.TextField()
def __str__(self):
return self.description
class MetaData(models.Model):
description = models.CharField(blank=False, max_length=100)
data = JSONField()
class Meta:
verbose_name_plural = "MetaData"
def __str__(self):
return self.description
class PdfS3Storage(S3Boto3Storage):
default_content_type = "application/pdf"
default_acl = "public-read"
class Document(models.Model):
source_url = models.URLField(max_length=1000)
uploaded_file = models.FileField(
max_length=1000, upload_to="", storage=PdfS3Storage()
)
def archive_document(self, url, election_id):
# copy a notice of election document to our s3 bucket
# because it won't stay on the council website forever
filename = url.split("/")[-1]
if filename == "":
filename = "Notice_of_Election"
with tempfile.NamedTemporaryFile() as tmp:
urllib.request.urlretrieve(url, tmp.name)
self.uploaded_file.save(
"%s/%s" % (election_id, filename), File(tmp)
)
return self.uploaded_file
|
i = 'SsNn'
soma = maior = menor = c = cont = 0
while i not in 'Nn':
c = int(input('Digite um número: '))
i = input('Quer continuar [S/N]? ').upper()
cont += 1
soma += c / cont
if cont == 1:
maior = menor = c
else:
if c > maior:
maior = c
if c < menor:
menor = c
print('Você digitou {} números e a média foi {:.2f}'.format(cont, soma))
print('O maior número digitado foi {} e o menor {}'.format(maior, menor))
|
'''
Created on 01-08-2013
@author: klangner
'''
from collections import defaultdict
from bluenotepad.notepad.models import Notepad, DailyStats
from bluenotepad.notepad.parser import parseReportModel
from bluenotepad.settings import FILE_STORAGE
from bluenotepad.storage.log import read_sessions
from datetime import timedelta, datetime
from django.core.management.base import BaseCommand
import gzip
import os
class Command(BaseCommand):
args = '<...>'
help = 'Aggregate statistics'
def handle(self, *args, **options):
notepads = Notepad.objects.all();
today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
yesterday = today - timedelta(days=1)
file_date = yesterday.strftime("%Y-%m-%d")
counter = 0
for notepad in notepads:
filename = FILE_STORAGE + notepad.uuid + "/" + file_date + ".log"
sessions = read_sessions(filename)
stats = DailyStats(notepad=notepad)
stats.day = yesterday.date()
stats.session_count = len(sessions)
stats.event_count = sum([len(events) for events in sessions.itervalues()])
stats.report_data = self.createReport(sessions, notepad)
stats.save()
if len(sessions) > 0:
counter += 1
self.compressLog(filename)
self.stdout.write('processed: %d notepads\n' % (counter))
def createReport(self, sessions, notepad):
report = ''
event_count = sum([len(events) for events in sessions.itervalues()])
variables = parseReportModel(notepad.report_model)
data = defaultdict(int)
for records in sessions.itervalues():
for record in records:
for var_name, var_events in variables.iteritems():
if record['event'] in var_events:
data[var_name] += 1
for key, value in data.iteritems():
report += ('%s: %d (%.2f%%)\n' % (key, value, (value*100.0)/event_count))
return report
def compressLog(self, filename):
if os.path.exists(filename):
f_in = open(filename, 'rb')
f_out = gzip.open(filename + '.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
|
from generators.KruskalGenerator import KruskalGenerator
import logging
class KruskalWithLoopsGenerator(KruskalGenerator):
def __init__(self):
KruskalGenerator.__init__(self)
self.log = logging.getLogger(__name__)
self.foo = lambda: self.random.random() <= 1/(2*self.size)
# @Override
def __create_loops__(self, wall, size):
# Don't want too many loops, so we set the probability for a cell to remove both walls to 1/(2*size)
if self.foo():
wall.remove()
def set_probability_function(self, foo):
self.foo = foo
|
import numpy as np
import pandas as pd
import sklearn
import sklearn.preprocessing
import scipy
import tensorflow.keras as keras
df = pd.read_csv('WISDM_clean.csv')
df_train = df[df['user_id'] <= 30]
df_test = df[df['user_id'] > 30]
# Norm
scale_columns = ['x_axis', 'y_axis', 'z_axis']
scaler = sklearn.preprocessing.RobustScaler()
scaler = scaler.fit(df_train[scale_columns])
df_train.loc[:, scale_columns] = scaler.transform(
df_train[scale_columns].to_numpy()
)
df_test.loc[:, scale_columns] = scaler.transform(
df_test[scale_columns].to_numpy()
)
def create_dataset(X, y, time_steps=1, step=1):
Xs, ys = [], []
for i in range(0, len(X) - time_steps, step):
v = X.iloc[i:(i + time_steps)].values
labels = y.iloc[i: i + time_steps]
Xs.append(v)
ys.append(scipy.stats.mode(labels)[0][0])
return np.array(Xs), np.array(ys).reshape(-1, 1)
TIME_STEPS = 200
STEP = 40
X_train, y_train = create_dataset(
df_train[['x_axis', 'y_axis', 'z_axis']],
df_train.activity,
TIME_STEPS,
STEP
)
X_test, y_test = create_dataset(
df_test[['x_axis', 'y_axis', 'z_axis']],
df_test.activity,
TIME_STEPS,
STEP
)
print(X_train.shape, y_train.shape)
enc = sklearn.preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
enc = enc.fit(y_train)
y_train = enc.transform(y_train)
y_test = enc.transform(y_test)
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(8, (2, 2), input_shape=(200, 3, 1)))
# 200, 3, 8
#Dense
model.add(keras.layers.Flatten())
# 200*3*8
model.add(keras.layers.Dense(units=512, activation='relu'))
model.add(keras.layers.Dropout(rate=0.5))
model.add(keras.layers.Dense(units=128, activation='relu'))
model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
history = model.fit(
X_train.reshape(-1, X_train.shape[1], X_train.shape[2], 1), y_train,
epochs=20,
batch_size=64,
validation_split=0.1,
shuffle=True
)
model.summary()
model.save("cnn.h5")
|
import sys
sys.setrecursionlimit(10000)
import functools
def rodcut(tup):
rodlengt, cutlengths = tup
cutlengths = list(set(cutlengths))
if rodlengt < min(cutlengths):
return 0
mincutl = min(cutlengths)
@functools.lru_cache()
def f(rodlength):
nonlocal mincutl
# if rodlength < minimum cutlengths return 0 which is invalid
if rodlength < mincutl: return 0
# set curmax to 0, the whole operation on the rod could be invalid
# so we set to 0
curmax = 0
for cutlength in cutlengths:
if rodlength >= cutlength:
if rodlength == cutlength:
curmax = max(1, curmax)
else:
ret = f(rodlength - cutlength)
if ret == 0:
curmax = max(curmax, 0)
else:
curmax = max(1 + ret, curmax)
return curmax
return f(rodlengt)
def rodcutdp(tup):
rodlength, cutlengths = tup
cutlengths = sorted(list(set(cutlengths)))
mincutl = min(cutlengths)
if rodlength < mincutl: return 0
dp = [[0]*(len(cutlengths) + 1) for _ in range(rodlength + 1)]
for i in range(1, (rodlength + 1)):
for j in range(1, len(cutlengths) + 1):
if i < mincutl:
# if rodlength < mincut length set to invalid
dp[i][j] = 0
else:
# check what nextrodlength would be if we make cut
nextrodlength = i - cutlengths[j - 1]
if nextrodlength == 0:
# if nextrodlength would be zero then v1 is 1
# because we will have 1 piece
v1 = 1
elif nextrodlength < 0:
# if it would be negative, then v1 is obv invalid
v1 = 0
else:
# if it does have a length more than 0
# check if that newrodlength with the first j cutlengths is valid or not
v1 = dp[nextrodlength][j]
if v1 == 0:
v1 = 0
else:
# if its valid we have dp[nextrodlength][j] + 1 pieces
v1 = v1 + 1
# check max pieces if we don't use this cutlength
v2 = dp[i][j - 1]
dp[i][j] = max(v1, v2)
return dp[-1][-1]
ans = rodcutdp((5, [5,3,2]))
ans2 = rodcutdp((4,[2,1,1]))
ans3 = rodcutdp((4000,[1,2,3]))
ans4 = rodcutdp((3,[1,2]))
ans5 = rodcutdp((7,[5,5,2]))
ansr = rodcut((5, [5,3,2]))
ans2r = rodcut((4,[2,1,1]))
ans3r = rodcut((4000,[1,2,3]))
ans4r = rodcut((3,[1,2]))
ans5r = rodcut((7,[5,5,2]))
x = 2
|
def base(time):
text=fin.readline().rstrip('\n')
b=[]
for i in xrange(2,37):
try:
a=int(text,i)
for j in xrange(2,37):
try:
c=int(str(a),10)
b.append(c)
except ValueError:
pass
except ValueError:
pass
b.sort()
print b
fout.write("Case #{}: {}\n".format(time+1,b[0]))
if __name__ == '__main__':
fin=open('in.txt','r')
fout=open('out.txt','w')
for time in xrange(0,int(fin.readline().rstrip('\n'))):
base(time)
|
# Generated by Django 3.2.3 on 2021-05-19 16:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=264)),
('Size', models.IntegerField()),
('Developer', models.CharField(max_length=256)),
('Description', models.CharField(max_length=1024)),
('cover_image', models.CharField(max_length=256)),
('Gameplay_images', models.CharField(max_length=256)),
('poster', models.CharField(max_length=256)),
('price', models.IntegerField()),
],
),
]
|
from selenium.webdriver.support.select import Select
from Pages.base_page import BasePage
from Utils.locators import *
class ListBoxPage(BasePage):
def __init__(self, driver):
self.locator = ListBoxLocators
super().__init__(driver)
def pick_value_by_index_list1(self, index):
selections = self.driver.find_element(*self.locator.list1_selections)
list_values = Select(selections)
list_values.select_by_index(index)
def pick_value_by_index_list2(self, index):
selections = self.driver.find_element(*self.locator.list2_selections)
list_values = Select(selections)
list_values.select_by_index(index)
def click_add_button(self):
self.driver.find_element(*self.locator.add_button).click()
def click_add_all_button(self):
self.driver.find_element(*self.locator.addAll_button).click()
def click_remove_all_button(self):
self.driver.find_element(*self.locator.removeAll_button).click()
def click_remove_button(self):
self.driver.find_element(*self.locator.remove_button).click()
|
def areaOf():
figure = input('Area of (C)ircle or area of R(ectangle)? ').upper()
if figure == 'C':
r = int(input('Enter a radius: '))
area = 3.14*r**2
return 'Area of circle is ' + str(area)
elif figure == 'R':
w = int(input('Enter a width: '))
h = int(input('Enter a hight: '))
area = w * h
return 'Area of rectangle is ' + str(area)
def rootOf():
'''
int OR float
int OR float
int OR float -> int OR float and int OR float
that calculates the roots of quadratic equation
'''
try:
a = int(input())
b = int(input())
c = int(input())
except:
return "It's not quadratic equation."
delta = b ** 2 - 4 * a * c
x1 = (b + (b**2 - delta)**.5)/2
x2 = (b - (b**2 - delta)**.5)/2
return round(x1, 2), round(x2, 2)
def factorial():
'''
returns a factorial of a given number
'''
result = 1
try:
n = int(input())
except:
return 'error'
if n < 0:
return 'error'
else:
for i in range(n):
result *= (i+1)
return result
def fibo(n, fiboList = [1,1]):
'''
int -> list
returns a list of fibbonachi number till the given number
'''
fib = 0
for i in range(n-2):
fib = fiboList[i] + fiboList[i+1]
fiboList.append(fib)
return fiboList
def powerSeries(stopNum):
'''
int -> int
returns a power series of "(-1)**(i+1)/(2*i-1)" till a given number
'''
sum = 0
for i in range(1,stopNum):
sum += (-1)**(i+1)/(2*i-1)
return sum*4
|
import os
import sys
import numpy as np
import cv2
import imgdb
import imgdata
import nnmpl
import pickle
OPIS = """recognizephone.py <mode> <database> <method> <images>
mode:
learn - uczenie na podstawie folderu podanego jako parametr images
check - sprawdzenie wybranego zdjęcia podanego jako parametr images
database:
nazwa pliku danych - binarny obiekt sieci neuronowej
method:
histogram - używa histogramu do rozpoznania
empirdist - używa dystrybuanty empirycznej
image:
folder lub obraz w zależności od wybranego mode
"""
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Nie podano parametrów.")
print(OPIS)
sys.exit(-1)
if sys.argv[1] == "learn":
print("Learning..")
nndatabase = sys.argv[2]
fproc = sys.argv[3]
imgPrefix = sys.argv[4]
if fproc != "histogram" and fproc != "empirdist":
print("Nie poprawny parametr: " + fproc)
print(OPIS)
sys.exit(-1)
if imgPrefix[-1] != "/":
imgPrefix += "/"
firms = imgdb.readBase(imgPrefix)
if len(firms) == 0:
print("Nie prawidłowy folder z obrazami.")
print(OPIS)
sys.exit(-1)
models = imgdb.createListOfModels(firms)
Ymod = np.eye(len(models))
X = np.ndarray(shape=(0,3*256))
y = np.ndarray(shape=(0,len(models)))
for i,(firm,model) in enumerate(models):
# for firm,model in models:
for imgFile in firms[firm][model]:
print(imgFile)
print(str(i) + " " + firm + ":" + model)
img = cv2.imread(imgPrefix+imgFile, cv2.IMREAD_COLOR)
if fproc == "histogram":
dataBGR = imgdata.histBGR(img)
elif fproc == "empirdist":
dataBGR = imgdata.backProjectBGR(img)
vector = np.concatenate( ( dataBGR["b"], dataBGR["g"], dataBGR["r"]), axis=0).T
X = np.concatenate( (X, vector), axis=0)
y = np.concatenate( (y,[Ymod[i]]), axis=0)
nnmpl.learn(X.astype(float), y.astype(float), nndatabase)
with open('models', 'wb') as handle:
pickle.dump(models, handle, protocol=pickle.HIGHEST_PROTOCOL)
elif sys.argv[1] == "check":
print("Checking..")
nndatabase = sys.argv[2]
fproc = sys.argv[3]
img = cv2.imread(sys.argv[4], cv2.IMREAD_COLOR)
if isinstance(img, np.ndarray) == 0:
print("Nie prawidłowy obraz.")
print(OPIS)
sys.exit(-1)
models = []
with open('models', 'rb') as fp:
models = pickle.load(fp)
if len(models) == 0:
print("Baza modeli jest uszkodzona")
sys.exit(-1)
if fproc == "histogram":
dataBGR = imgdata.histBGR(img)
elif fproc == "empirdist":
dataBGR = imgdata.backProjectBGR(img)
vector = np.concatenate( ( dataBGR["b"], dataBGR["g"], dataBGR["r"]), axis=0).T
klasyfikacja = nnmpl.classify(vector.astype(float), nndatabase)
idx = np.argmax(klasyfikacja)
print("Wektor klasyfikacji: ", klasyfikacja, "\n")
print("Wykryto obraz z telefonu: " + models[idx][0] + ", model: " + models[idx][1])
|
#!usr/bin/env python3
from random import randint
tries = 0
number = randint (1, 100)
print("Rate zwischen 1 und 100!")
while True:
try:
guess = int(input(f'Versuch #{tries+1}:'))
tries += 1
if guess < number:
print("Zahl ist zu klein, du Volltrottel.")
elif guess > number:
print("Zahl ist groß, du Idiot!")
else:
print(f"Richtig, das war Spitze! du hast die Zahl {number} nur {tries} Versuche gebraucht!")
break
except ValueError:
print("Ungültige Eingabe!")
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import shlex
from textwrap import dedent
import pytest
from pants.backend.shell.target_types import (
ShellCommandRunTarget,
ShellCommandTarget,
ShellCommandTestTarget,
ShellSourcesGeneratorTarget,
)
from pants.backend.shell.util_rules.shell_command import (
GenerateFilesFromShellCommandRequest,
RunShellCommand,
ShellCommandProcessFromTargetRequest,
)
from pants.backend.shell.util_rules.shell_command import rules as shell_command_rules
from pants.core.goals.run import RunRequest
from pants.core.target_types import ArchiveTarget, FilesGeneratorTarget, FileSourceField
from pants.core.target_types import rules as core_target_type_rules
from pants.core.util_rules import archive, source_files
from pants.core.util_rules.adhoc_process_support import AdhocProcessRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Address
from pants.engine.environment import EnvironmentName
from pants.engine.fs import EMPTY_SNAPSHOT, DigestContents
from pants.engine.internals.native_engine import IntrinsicError
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.process import Process, ProcessExecutionFailure
from pants.engine.target import (
GeneratedSources,
GenerateSourcesRequest,
MultipleSourcesField,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.testutil.rule_runner import QueryRule, RuleRunner, engine_error
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*archive.rules(),
*shell_command_rules(),
*source_files.rules(),
*core_target_type_rules(),
QueryRule(GeneratedSources, [GenerateFilesFromShellCommandRequest]),
QueryRule(Process, [AdhocProcessRequest]),
QueryRule(Process, [EnvironmentName, ShellCommandProcessFromTargetRequest]),
QueryRule(RunRequest, [RunShellCommand]),
QueryRule(SourceFiles, [SourceFilesRequest]),
QueryRule(TransitiveTargets, [TransitiveTargetsRequest]),
],
target_types=[
ShellCommandTarget,
ShellCommandRunTarget,
ShellCommandTestTarget,
ShellSourcesGeneratorTarget,
ArchiveTarget,
FilesGeneratorTarget,
],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def assert_shell_command_result(
rule_runner: RuleRunner,
address: Address,
expected_contents: dict[str, str],
) -> None:
generator_type: type[GenerateSourcesRequest] = GenerateFilesFromShellCommandRequest
target = rule_runner.get_target(address)
result = rule_runner.request(GeneratedSources, [generator_type(EMPTY_SNAPSHOT, target)])
assert result.snapshot.files == tuple(expected_contents)
contents = rule_runner.request(DigestContents, [result.snapshot.digest])
for fc in contents:
assert fc.content == expected_contents[fc.path].encode()
def assert_logged(caplog, expect_logged=None):
if expect_logged:
assert len(caplog.records) == len(expect_logged)
for idx, (lvl, msg) in enumerate(expect_logged):
log_record = caplog.records[idx]
assert msg in log_record.message
assert lvl == log_record.levelno
else:
assert not caplog.records
def test_sources_and_files(rule_runner: RuleRunner) -> None:
MSG = ["Hello shell_command", ", nice cut."]
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="hello",
execution_dependencies=[":build-utils", ":files"],
tools=[
"bash",
"cat",
"env",
"mkdir",
"tee",
],
output_files=["message.txt"],
output_directories=["res"],
command="./script.sh",
root_output_directory=".",
)
files(
name="files",
sources=["*.txt"],
)
shell_sources(name="build-utils")
"""
),
"src/intro.txt": MSG[0],
"src/outro.txt": MSG[1],
"src/script.sh": (
"#!/usr/bin/env bash\n"
"mkdir res && cat *.txt > message.txt && cat message.txt | tee res/log.txt"
),
}
)
# Set script.sh mode to rwxr-xr-x.
rule_runner.chmod("src/script.sh", 0o755)
RES = "".join(MSG)
assert_shell_command_result(
rule_runner,
Address("src", target_name="hello"),
expected_contents={
"message.txt": RES,
"res/log.txt": RES,
},
)
def test_quotes_command(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="quotes",
tools=["echo", "tee"],
command='echo "foo bar" | tee out.log',
output_files=["out.log"],
root_output_directory=".",
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="quotes"),
expected_contents={"out.log": "foo bar\n"},
)
def test_chained_shell_commands(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/a/BUILD": dedent(
"""\
shell_command(
name="msg",
tools=["echo"],
output_files=["../msg"],
command="echo 'shell_command:a' > ../msg",
)
"""
),
"src/b/BUILD": dedent(
"""\
shell_command(
name="msg",
tools=["cp", "echo"],
output_files=["../msg"],
command="echo 'shell_command:b' >> ../msg",
execution_dependencies=["src/a:msg"],
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src/a", target_name="msg"),
expected_contents={"src/msg": "shell_command:a\n"},
)
assert_shell_command_result(
rule_runner,
Address("src/b", target_name="msg"),
expected_contents={"src/msg": "shell_command:a\nshell_command:b\n"},
)
def test_chained_shell_commands_with_workdir(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/a/BUILD": dedent(
"""\
shell_command(
name="msg",
tools=["echo"],
output_files=["msg"],
command="echo 'shell_command:a' > msg",
workdir="/",
)
"""
),
"src/b/BUILD": dedent(
"""\
shell_command(
name="msg",
tools=["cp", "echo"],
output_files=["msg"],
command="echo 'shell_command:b' >> msg",
execution_dependencies=["src/a:msg"],
workdir="/",
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src/a", target_name="msg"),
expected_contents={"msg": "shell_command:a\n"},
)
assert_shell_command_result(
rule_runner,
Address("src/b", target_name="msg"),
expected_contents={"msg": "shell_command:a\nshell_command:b\n"},
)
def test_side_effecting_command(caplog, rule_runner: RuleRunner) -> None:
caplog.set_level(logging.INFO)
caplog.clear()
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="side-effect",
command="echo 'server started' && echo 'warn msg' >&2",
tools=["echo"],
log_output=True,
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="side-effect"),
expected_contents={},
)
assert_logged(
caplog,
[
(logging.INFO, "server started\n"),
(logging.WARNING, "warn msg\n"),
],
)
def test_tool_search_path_stable(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="paths",
command="mkdir subdir; cd subdir; ls .",
tools=["cd", "ls", "mkdir"],
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="paths"),
expected_contents={},
)
def test_shell_command_masquerade_as_a_files_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="content-gen",
command="echo contents > contents.txt",
tools=["echo"],
output_files=["contents.txt"],
root_output_directory=".",
)
"""
),
}
)
src_contents = rule_runner.get_target(Address("src", target_name="content-gen"))
sources = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(
(src_contents[MultipleSourcesField],),
enable_codegen=True,
for_sources_types=(FileSourceField,),
)
],
)
assert sources.files == ("contents.txt",)
assert sources.unrooted_files == sources.files
contents = rule_runner.request(DigestContents, [sources.snapshot.digest])
assert len(contents) == 1
fc = contents[0]
assert fc.path == "contents.txt"
assert fc.content == b"contents\n"
def test_package_dependencies(caplog, rule_runner: RuleRunner) -> None:
caplog.set_level(logging.INFO)
caplog.clear()
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="msg-gen",
command="echo message > msg.txt",
tools=["echo"],
output_files=["msg.txt"],
)
archive(
name="msg-archive",
format="zip",
files=[":msg-gen"],
)
shell_command(
name="test",
command="ls",
tools=["ls"],
log_output=True,
execution_dependencies=[":msg-archive"],
)
"""
),
}
)
assert_shell_command_result(
rule_runner, Address("src", target_name="test"), expected_contents={}
)
assert_logged(
caplog,
[
(logging.INFO, "msg-archive.zip\n"),
],
)
def test_execution_dependencies(caplog, rule_runner: RuleRunner) -> None:
caplog.set_level(logging.INFO)
caplog.clear()
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="a1",
command="echo message > msg.txt",
output_files=["msg.txt"],
workdir="/",
)
shell_command(
name="a2",
tools=["cat"],
command="cat msg.txt > msg2.txt",
execution_dependencies=[":a1",],
output_files=["msg2.txt",],
workdir="/",
)
# Fails because runtime dependencies are not exported
# transitively
shell_command(
name="expect_fail_1",
tools=["cat"],
command="cat msg.txt",
execution_dependencies=[":a2",],
workdir="/",
)
# Fails because `output_dependencies` are not available at runtime
shell_command(
name="expect_fail_2",
tools=["cat"],
command="cat msg.txt",
execution_dependencies=(),
output_dependencies=[":a1"],
workdir="/",
)
# Fails because `output_dependencies` are not available at runtime
shell_command(
name="expect_fail_3",
tools=["cat"],
command="cat msg.txt",
output_dependencies=[":a1"],
workdir="/",
)
# Fails because execution dependencies are not fetched transitively
# even if the root is requested through `output_dependencies`
shell_command(
name="expect_fail_4",
tools=["cat"],
command="cat msg.txt",
output_dependencies=[":a2"],
workdir="/",
)
# Succeeds because `a1` and `a2` are requested directly
shell_command(
name="expect_success_1",
tools=["cat"],
command="cat msg.txt msg2.txt > output.txt",
execution_dependencies=[":a1", ":a2",],
output_files=["output.txt"],
workdir="/",
)
# Succeeds becuase `a1` and `a2` are requested directly and `output_dependencies`
# are made available at runtime
shell_command(
name="expect_success_2",
tools=["cat"],
command="cat msg.txt msg2.txt > output.txt",
execution_dependencies=[":a1", ":a2",],
output_dependencies=[":a1", ":a2",],
output_files=["output.txt"],
workdir="/",
)
"""
),
}
)
for i in range(1, 5):
with engine_error(ProcessExecutionFailure):
assert_shell_command_result(
rule_runner, Address("src", target_name=f"expect_fail_{i}"), expected_contents={}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="expect_success_1"),
expected_contents={"output.txt": "message\nmessage\n"},
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="expect_success_2"),
expected_contents={"output.txt": "message\nmessage\n"},
)
@pytest.mark.parametrize(
("workdir", "expected_boot"),
[
(None, "cd src; "),
(".", "cd src; "),
("/", ""),
("src/with space'n quote", """cd 'src/with space'\"'\"'n quote'; """),
("./with space'n quote", """cd 'src/with space'\"'\"'n quote'; """),
],
)
def test_run_shell_command_request(
rule_runner: RuleRunner, workdir: None | str, expected_boot: str
) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
f"""\
run_shell_command(
name="test",
command="some cmd string",
workdir={workdir!r},
)
"""
),
}
)
args = ("bash", "-c", expected_boot + "some cmd string", "pants run src:test --")
tgt = rule_runner.get_target(Address("src", target_name="test"))
run = RunShellCommand.create(tgt)
request = rule_runner.request(RunRequest, [run])
assert len(args) == len(request.args)
# handle the binary name specially, because the path may differ
assert args[0] in request.args[0]
for arg, request_arg in zip(args[1:], request.args[1:]):
assert arg == request_arg
@pytest.mark.parametrize(
("tool_name", "should_succeed"),
(
("python3.8", True),
("cd", False),
("floop", False),
),
)
def test_path_populated_with_tools(
caplog, rule_runner: RuleRunner, tool_name: str, should_succeed: bool
) -> None:
caplog.set_level(logging.INFO)
caplog.clear()
rule_runner.write_files(
{
"src/BUILD": dedent(
f"""\
shell_command(
name="tools-populated",
tools=["which", "{tool_name}"],
command='which {tool_name}',
log_output=True,
)
"""
)
}
)
try:
assert_shell_command_result(
rule_runner,
Address("src", target_name="tools-populated"),
expected_contents={},
)
except ExecutionError as exerr:
if should_succeed:
raise exerr
if should_succeed:
assert caplog.records[0].msg.strip().endswith("python3.8")
else:
# `which` is silent in `bash` when nothing is found
assert not caplog.records
def test_shell_command_boot_script(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="boot-script-test",
tools=[
"python3.8",
],
command="./command.script",
workdir=".",
)
"""
),
}
)
tgt = rule_runner.get_target(Address("src", target_name="boot-script-test"))
res = rule_runner.request(Process, [ShellCommandProcessFromTargetRequest(tgt)])
assert "bash" in res.argv[0]
assert res.argv[1] == "-c"
assert res.argv[2].startswith("cd src &&")
assert "bash -c" in res.argv[2]
assert res.argv[2].endswith(shlex.quote("./command.script") + " src:boot-script-test")
assert "PATH" in res.env
def test_shell_command_boot_script_in_build_root(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
shell_command(
name="boot-script-test",
tools=[
"python3.8",
],
command="./command.script",
)
"""
),
}
)
tgt = rule_runner.get_target(Address("", target_name="boot-script-test"))
res = rule_runner.request(Process, [ShellCommandProcessFromTargetRequest(tgt)])
assert "bash" in res.argv[0]
assert res.argv[1] == "-c"
assert "bash -c" in res.argv[2]
assert res.argv[2].endswith(shlex.quote("./command.script") + " //:boot-script-test")
def test_shell_command_extra_env_vars(caplog, rule_runner: RuleRunner) -> None:
caplog.set_level(logging.INFO)
caplog.clear()
rule_runner.set_options([], env={"FOO": "foo"}, env_inherit={"PATH"})
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="extra-env-test",
tools=["echo"],
extra_env_vars=["FOO", "HELLO=world", "BAR"],
command='echo FOO="$FOO" HELLO="$HELLO" BAR="$BAR"',
log_output=True,
)
"""
)
}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="extra-env-test"),
expected_contents={},
)
assert_logged(caplog, [(logging.INFO, "FOO=foo HELLO=world BAR=\n")])
def test_relative_directories(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="quotes",
tools=["echo"],
command='echo foosh > ../foosh.txt',
output_files=["../foosh.txt"],
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="quotes"),
expected_contents={"foosh.txt": "foosh\n"},
)
def test_relative_directories_2(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="quotes",
tools=["echo"],
command='echo foosh > ../newdir/foosh.txt',
output_files=["../newdir/foosh.txt"],
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="quotes"),
expected_contents={"newdir/foosh.txt": "foosh\n"},
)
def test_cannot_escape_build_root(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="quotes",
tools=["echo"],
command='echo foosh > ../../invalid.txt',
output_files=["../../invalid.txt"],
)
"""
),
}
)
with engine_error(IntrinsicError):
assert_shell_command_result(
rule_runner,
Address("src", target_name="quotes"),
expected_contents={"../../invalid.txt": "foosh\n"},
)
def test_missing_tool_called(
caplog,
rule_runner: RuleRunner,
) -> None:
caplog.set_level(logging.INFO)
caplog.clear()
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
shell_command(
name="gerald-is-not-here",
command="gerald hello",
log_output=True,
)
"""
)
}
)
with pytest.raises(ExecutionError):
assert_shell_command_result(
rule_runner,
Address("src", target_name="gerald-is-not-here"),
expected_contents={},
)
assert "requires the names of any external commands" in caplog.text
def test_env_vars(rule_runner: RuleRunner) -> None:
envvar_value = "clang"
rule_runner.write_files(
{
"src/BUILD": dedent(
f"""\
shell_command(
name="envvars",
tools=[],
command='echo $ENVVAR > out.log',
output_files=["out.log"],
extra_env_vars=["ENVVAR={envvar_value}"],
root_output_directory=".",
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="envvars"),
expected_contents={"out.log": f"{envvar_value}\n"},
)
_DEFAULT = object()
@pytest.mark.parametrize(
("workdir", "expected_dir"),
[
("src", "/src"),
(".", "/src"),
("./", "/src"),
("./dst", "/src/dst"),
("/", ""),
("", ""),
("/src", "/src"),
("/dst", "/dst"),
(None, "/src"),
],
)
def test_working_directory_special_values(
rule_runner: RuleRunner, workdir: str | None, expected_dir: str
) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
f"""\
shell_command(
name="workdir",
tools=['sed'],
command='echo $PWD | sed s@^{{chroot}}@@ > out.log',
workdir={workdir!r},
output_files=["out.log"],
root_output_directory=".",
)
"""
),
}
)
assert_shell_command_result(
rule_runner,
Address("src", target_name="workdir"),
expected_contents={"out.log": f"{expected_dir}\n"},
)
|
#!/usr/bin/python3
from datetime import datetime
from utils.date import to_python_date_format
class TimeSeriesRow:
"""
Represents each row in the TimeSeries that will be analyzed by time_series_analysis_service
Attributes
- date (datetime) : date in the row
- value (float) : corresponding value
"""
def __init__(self, date: datetime, value: float):
self.date = date
self.value = value
def get_date(self) -> datetime:
return self.date
def get_value(self) -> float:
return self.value
def __eq__(self, other):
return isinstance(other, TimeSeriesRow) and self.date == other.date and int(self.value) == int(other.value)
def to_json(self, date_format: str) -> dict:
return dict(date=self.date.strftime(to_python_date_format(date_format)),
value=float(self.value))
@classmethod
def from_json(cls, data: dict, date_format: str):
date = datetime.strptime(data["date"], to_python_date_format(date_format))
return cls(date, float(data["value"]))
|
from . import models
from django import forms
from captcha.fields import CaptchaField
class VideoForm(forms.ModelForm):
captcha = CaptchaField(label='captcha')
class Meta:
model = models.Video
fields = ['title', 'description', 'url']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'description': forms.Textarea(attrs={'class': 'form-control'}),
'url': forms.TextInput(attrs={'class': 'form-control'}),
}
class ArticleForm(forms.ModelForm):
captcha = CaptchaField(label='captcha')
class Meta:
model = models.Article
fields = ['title', 'description', 'part_1', 'part_2', 'part_3']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'description': forms.Textarea(attrs={'class': 'form-control'}),
'part_1': forms.Textarea(attrs={'class': 'form-control'}),
'part_2': forms.Textarea(attrs={'class': 'form-control'}),
'part_3': forms.Textarea(attrs={'class': 'form-control'}),
}
|
# LEVEL 21
# (zip from previous level)
import bz2
import zipfile
import zlib
with zipfile.ZipFile('data/level_20.zip') as myzip:
for zi in myzip.infolist():
print(zi)
# print(zi.comment)
with myzip.open('readme.txt', 'r', pwd=b'redavni') as zf:
# print(myzip.getinfo('readme.txt'))
for l in zf:
print(l)
with myzip.open('package.pack', 'r', pwd=b'redavni') as zf2:
data = zf2.read()
known = [b'x\x9c', b'BZ']
stop = False
message = ''
accum = 0
while not stop:
decompressed = 0
while data[:2] == b'x\x9c':
decompressed += 1
data = zlib.decompress(data)
message += ' '
# print('zlib:', decompressed)
accum += decompressed
decompressed = 0
while data[:3] == b'BZh':
decompressed += 1
data = bz2.decompress(data)
message += '*'
# print('bz2:', decompressed)
accum += decompressed
if data[:2] not in known:
data = data[::-1]
# print('inv', accum)
message += '\n'
accum = 0
if data[:2] not in known:
stop = True
print(len(data))
print(data[:20])
print(data[-20:])
print(message)
print(len(message))
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.python.util_rules import pex
from pants.backend.python.util_rules.lockfile_diff import _generate_python_lockfile_diff
from pants.core.goals.generate_lockfiles import GenerateLockfileResult, LockfileDiff
from pants.engine.rules import rule
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner
@rule
async def helper_fixture(result: GenerateLockfileResult) -> LockfileDiff:
return await _generate_python_lockfile_diff(result.digest, result.resolve_name, result.path)
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
helper_fixture,
*pex.rules(),
QueryRule(LockfileDiff, [GenerateLockfileResult]),
]
)
rule_runner.set_options([], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
def test_load_lockfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"reqs/test.lock": lockfile_contents["old"]})
snapshot = rule_runner.make_snapshot({"reqs/test.lock": lockfile_contents["new"]})
lockfile = GenerateLockfileResult(
digest=snapshot.digest,
resolve_name="testing",
path="reqs/test.lock",
diff=None,
)
diff = rule_runner.request(LockfileDiff, [lockfile])
assert diff.path == "reqs/test.lock"
assert diff.resolve_name == "testing"
assert {req: tuple(map(str, vers)) for req, vers in diff.upgraded.items()} == dict(
cowsay=("4.0", "5.0")
)
lockfile_contents = dict(
old=dedent(
"""\
// This lockfile was autogenerated by Pants. To regenerate, run:
//
// ./pants run build-support/bin/generate_builtin_lockfiles.py"
//
// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
// {
// "version": 3,
// "valid_for_interpreter_constraints": [
// "CPython==3.9.*"
// ],
// "generated_with_requirements": [
// "cowsay<5"
// ],
// "manylinux": "manylinux2014",
// "requirement_constraints": [],
// "only_binary": [],
// "no_binary": []
// }
// --- END PANTS LOCKFILE METADATA ---
{
"allow_builds": true,
"allow_prereleases": false,
"allow_wheels": true,
"build_isolation": true,
"constraints": [],
"locked_resolves": [
{
"locked_requirements": [
{
"artifacts": [
{
"algorithm": "sha256",
"hash": "2594b11d6624fff4bf5147b6bdd510ada54a7b5b4e3f2b15ac2a6d3cf99e0bf8",
"url": "https://files.pythonhosted.org/packages/b7/65/38f31ef16efc312562f68732098d6f7ba3b2c108a4aaa8ac8ba673ee0871/cowsay-4.0-py2.py3-none-any.whl"
},
{
"algorithm": "sha256",
"hash": "a9e1e5f957054010b7faa6406deb5f6aa5cb674498118bbbed0151f92c2dc20e",
"url": "https://files.pythonhosted.org/packages/e8/15/fcfe67988ffd8e6256363174ca78fca86c927f8e6e618fd178f95a97d4d6/cowsay-4.0.tar.gz"
}
],
"project_name": "cowsay",
"requires_dists": [],
"requires_python": null,
"version": "4.0"
}
],
"platform_tag": null
}
],
"path_mappings": {},
"pex_version": "2.1.116",
"pip_version": "20.3.4-patched",
"prefer_older_binary": false,
"requirements": [
"cowsay<5"
],
"requires_python": [
"==3.9.*"
],
"resolver_version": "pip-2020-resolver",
"style": "universal",
"target_systems": [
"linux",
"mac"
],
"transitive": true,
"use_pep517": null
}
"""
),
new=dedent(
"""\
// This lockfile was autogenerated by Pants. To regenerate, run:
//
// ./pants run build-support/bin/generate_builtin_lockfiles.py"
//
// --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
// {
// "version": 3,
// "valid_for_interpreter_constraints": [
// "CPython==3.9.*"
// ],
// "generated_with_requirements": [
// "cowsay"
// ],
// "manylinux": "manylinux2014",
// "requirement_constraints": [],
// "only_binary": [],
// "no_binary": []
// }
// --- END PANTS LOCKFILE METADATA ---
{
"allow_builds": true,
"allow_prereleases": false,
"allow_wheels": true,
"build_isolation": true,
"constraints": [],
"locked_resolves": [
{
"locked_requirements": [
{
"artifacts": [
{
"algorithm": "sha256",
"hash": "c00e02444f5bc7332826686bd44d963caabbaba9a804a63153822edce62bbbf3",
"url": "https://files.pythonhosted.org/packages/6b/b8/9f497fd045d74fe21d91cbe8debae0b451229989e35b539d218547d79fc6/cowsay-5.0.tar.gz"
}
],
"project_name": "cowsay",
"requires_dists": [],
"requires_python": null,
"version": "5.0"
}
],
"platform_tag": null
}
],
"path_mappings": {},
"pex_version": "2.1.116",
"pip_version": "20.3.4-patched",
"prefer_older_binary": false,
"requirements": [
"cowsay"
],
"requires_python": [
"==3.9.*"
],
"resolver_version": "pip-2020-resolver",
"style": "universal",
"target_systems": [
"linux",
"mac"
],
"transitive": true,
"use_pep517": null
}
"""
),
)
|
import time
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from 基于文本内容的垃圾短信识别.data_process import data_process
start = time.time()
data_str, data_after_stop, labels = data_process()
# 分割测试集 训练集
# 测试集占比20%
data_tr, data_te, labels_tr, labels_te = train_test_split(data_str, labels, test_size=0.20)
"""
---------------------------------模型------------------------------------
"""
# 文本转换成词频
countVectorizer = CountVectorizer()
# 获取训练集样本权值矩阵
data_tr = countVectorizer.fit_transform(data_tr)
x_tr = TfidfTransformer().fit_transform(data_tr.toarray()).toarray()
# 测试集样本权值矩阵
# 维度共享vocabulary=countVectorizer.vocabulary_
data_te = CountVectorizer(vocabulary=countVectorizer.vocabulary_).fit_transform(data_te)
x_te = TfidfTransformer().fit_transform(data_te.toarray()).toarray()
# 模型创建
model = GaussianNB()
# 模型训练
model.fit(x_tr, labels_tr)
# 模型预测
acr = model.score(x_te, labels_te)
# ans=model.predict(x_te)
print("精度:", acr)
# print(ans)
end = time.time()
print('运行时间:',end-start)
|
# Generated by Django 2.1.7 on 2019-03-11 16:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('milliard', '0013_auto_20190311_1734'),
]
operations = [
migrations.RenameField(
model_name='player',
old_name='help_peole',
new_name='help_people',
),
]
|
from CallBackOperator import CallBackOperator
from SignalGenerationPackage.DynamicPointsDensitySignal.DynamicPointsDensityUIParameters import DynamicPointsDensityUIParameters
class EndTimeCallBackOperator(CallBackOperator):
def __init__(self, model):
super().__init__(model)
# overridden
def ConnectCallBack(self, window):
self.window = window
self.setup_callback_and_synchronize_slider(
validator_min=DynamicPointsDensityUIParameters.EndTimeSliderMin,
validator_max=DynamicPointsDensityUIParameters.EndTimeSliderMax,
validator_accuracy=DynamicPointsDensityUIParameters.EndTimeLineEditAccuracy,
line_edit=window.EndTimelineEdit,
slider_min=DynamicPointsDensityUIParameters.EndTimeSliderMin,
slider_max=DynamicPointsDensityUIParameters.EndTimeSliderMax,
slider=window.EndTimehorizontalSlider,
update_slider_func=self.update_end_time_slider,
update_line_edit_func=self.update_end_time_line_edit
)
def update_end_time_slider(self):
self.update_slider(
line_edit=self.window.EndTimelineEdit,
slider=self.window.EndTimehorizontalSlider,
calc_constant=DynamicPointsDensityUIParameters.EndTimeCalcConstant
)
def update_end_time_line_edit(self):
self.update_line_edit(
line_edit=self.window.EndTimelineEdit,
slider=self.window.EndTimehorizontalSlider,
calc_constant=DynamicPointsDensityUIParameters.EndTimeCalcConstant,
update_model_func=self.update_end_time
)
def update_end_time(self, val):
self.model.EndTime = val
|
class QAgent():
def __init__(self, nbins, state_space, action_space, epsilon=0.1, gamma=0.9, alpha=0.1):
self.epsilon = epsilon # exploration probability
self.gamma = gamma # discount factor
self.alpha = alpha # learning rate
self.nbins = nbins # for discretizing state space
self.state_space = state_space
self.action_space = action_space
self.q_table = {}
def preprocess_state(self, state):
env_low = self.state_space.low
env_high = self.state_space.high
# truncate bins where range is too large
env_low[env_low < -4] = -4
env_high[env_high > 4] = 4
states = []
for i in range(len(state)):
env_dx = (env_high[i] - env_low[i])/self.nbins[i]
x = int((state[i] - env_low[i])/env_dx)
if x >= self.nbins[i]:
x -= 1
states.append(x)
return tuple(states)
def get_epsilon(self, i):
return self.epsilon
def sample_action(self, state, eps=0):
if np.random.uniform(0,1) < eps:
return np.random.choice(self.action_space.n)
return np.argmax(self.get_q_value(state))
def get_q_value(self, state, action=None):
if state not in self.q_table:
self.q_table[state] = np.zeros(self.action_space.n)
if action is None:
return self.q_table[state]
else:
return self.q_table[state][action]
def update(self, state, action, reward, next_state, done):
q_delta = reward - self.get_q_value(state, action)
if not done:
q_delta += self.gamma*np.max(self.get_q_value(next_state))
self.q_table[state][action] += self.alpha*q_delta
def run(self, env, nepisodes=200):
scores = np.zeros((nepisodes,))
for i in range(nepisodes):
done = False
state = self.preprocess_state(env.reset())
while not done:
action = self.sample_action(state, self.get_epsilon(i))
next_state, reward, done, info = env.step(action)
next_state = self.preprocess_state(next_state)
self.update(state, action, reward, next_state, done)
state = next_state
scores[i] += reward
return scores
|
import turtle
class Polygon:
def __init__(self, sides, name, size=100, color="black", line_thickness=2):
self.sides = sides
self.name = name
self.size = size
self.color = color
self.line_thickness = line_thickness
self.interior_angles = (self.sides-2)*180
self.angle = self.interior_angles/self.sides
def draw(self):
turtle.color(self.color)
turtle.pensize(self.line_thickness)
for i in range(self.sides):
turtle.forward(self.size)
turtle.right(180-self.angle)
class Square(Polygon):
def __init__(self, size=100, color="black", line_thickness=3):
super().__init__(4, "Square", size, color, line_thickness)
def draw(self):
turtle.begin_fill()
super().draw()
turtle.end_fill()
square = Square(100, color="#123abc", line_thickness=5)
pentagon = Polygon(5, "Pentagon", 100)
hexagon = Polygon(6, "Hexagon", color="orange", line_thickness=10)
print(square.sides)
print(square.name)
print(square.interior_angles)
print(square.angle)
square.draw()
# pentagon.draw()
# hexagon.draw()
turtle.done()
|
import torch
from utils.model_utils import get_mask_from_lengths
def get_loss(outputs, targets, lengths, max_len=None):
mask = get_mask_from_lengths(lengths, max_len)
# Remove SOS character from the beginning of target sequence
loss = torch.nn.functional.cross_entropy(outputs[:, :-1, :].transpose(1, 2),
targets[:, 1:], reduction='none')
loss = loss.masked_fill(mask[:, 1:], 0).sum(dim=-1) / (lengths - 1)
return loss.mean()
def get_binf_loss(outputs, targets, lengths, max_len=None):
mask = get_mask_from_lengths(lengths, max_len).unsqueeze(-1).repeat(1, 1, targets.size(-1))
# Remove SOS vector from the beginning of target sequence
loss = torch.nn.functional.binary_cross_entropy_with_logits(outputs[:, :-1, :],
targets[:, 1:, :],
reduction='none')
loss = loss.masked_fill(mask[:, 1:, :], 0).sum(dim=-1).sum(dim=-1) / (lengths - 1)
return loss.mean()
|
# -*- coding: utf-8 -*-
"""Tests for simple_history extensions."""
from json import dumps, loads
from django.contrib.auth.models import User
from webplatformcompat.history import Changeset
from webplatformcompat.models import Browser
from .base import APITestCase
class TestBaseMiddleware(APITestCase):
"""Test the HistoryChangesetRequestMiddleware.
This should be tested by a subclass that defines:
* namespace - the API namespace, such as v1
* api_data - A function returning data in the API format
"""
__test__ = False # Don't test outside of a versioned API
content_type = 'application/vnd.api+json'
def url(self, changeset):
"""Return the test URL."""
return (
self.api_reverse('browser-list') +
'?use_changeset=%s' % changeset.id)
def test_post_with_changeset(self):
self.login_user()
changeset = Changeset.objects.create(user=self.user)
url = self.url(changeset)
response = self.client.post(
url, dumps(self.api_data()), content_type=self.content_type)
self.assertEqual(201, response.status_code, response.data)
browser = Browser.objects.get()
history = browser.history.get()
self.assertEqual(changeset.id, history.history_changeset_id)
def test_post_with_changeset_wrong_user(self):
self.login_user()
other = User.objects.create(username='other')
changeset = Changeset.objects.create(user=other)
url = self.url(changeset)
response = self.client.post(
url, dumps(self.api_data()), content_type=self.content_type)
self.assertEqual(400, response.status_code)
expected = {
'errors': {
'changeset': (
'Changeset %s has a different user.' % changeset.id)
}
}
self.assertDataEqual(expected, loads(response.content.decode('utf-8')))
def test_post_with_closed_changeset(self):
self.login_user()
changeset = Changeset.objects.create(user=self.user, closed=True)
url = self.url(changeset)
response = self.client.post(
url, dumps(self.api_data()), content_type=self.content_type)
self.assertEqual(400, response.status_code)
expected = {
'errors': {
'changeset': 'Changeset %s is closed.' % changeset.id}}
self.assertDataEqual(expected, loads(response.content.decode('utf-8')))
def test_post_with_error_not_json_api(self):
self.login_user()
changeset = Changeset.objects.create(user=self.user, closed=True)
url = self.url(changeset)
data = {'slug': 'firefox', 'name': '{"en": "Firefox"}'}
response = self.client.post(url, data)
self.assertEqual(400, response.status_code)
expected = 'Changeset %s is closed.' % changeset.id
self.assertDataEqual(expected, response.content.decode('utf-8'))
|
"Handle command"
import threading
import main
import config as cf
import re
command_list = {'stop': lambda x: main.stop()}
user_command_list = {}
class Command(threading.Thread):
"Handle command"
def __init__(self):
threading.Thread.__init__(self)
self.running = True
def run(self):
"run"
print("Start Command System")
# wait for handle commands
while self.running:
if not self.running:
return
str1 = input("> ")
command = str1.split()
if not command:
continue
command_list.get(
command[0],
lambda cmd: print("Not found command \"%s\", \"help\"" % cmd))(command)
def stop(self):
"stop"
print("stop command system")
self.running = False
def load_commands():
"load commands"
# exit command
command_list.update({'quit': lambda x: main.stop()})
command_list.update({'exit': lambda x: main.stop()})
command_list.update({'help': lambda x: help()})
# load custom commands
custom_commands = cf.config["Command"]["custom_commands"]
for k in custom_commands.items():
call = lambda x, y=k[1]: y
add_user_command("/" + k[0], call)
add_user_command("/help", user_help)
def add_command(keyword, callback):
"Add command handler"
command_list.update({keyword: callback})
def add_user_command(keyword, callback):
"Add command handler for user input"
print("Add user command \"" + keyword + "\"")
user_command_list.update({keyword: callback})
def user_command_handler(command, reply_handler):
"chatbot api call this to handler user commands"
check = r'^/[a-zA-Z_]*'
match = re.match(check, command)
if not command or not match:
return
command_name = match.group(0)
reply = user_command_list.get(
command_name,
lambda cmd, command_name=command_name:
("Not found command \"%s\", /help" % command_name))(command)
reply_handler(reply)
print(command)
print(reply)
def user_help(command):
result = "**Command list**\n"
for k in user_command_list:
result += k + '\n'
return result
def help():
for k in command_list:
print(k)
def start_command_system():
"run command system"
__command__ = Command()
__command__.start()
@main.stop_handler
def stop():
"stop"
__command__.stop()
|
from django.urls import path
from AdminApp.views import *
urlpatterns = [
path('', admin),
path('get_user_form/<id>/', get_user_form),
path('user/add/', add_user),
path('get_category/<_id>/', get_category),
path('get_user/<id>/', get_user),
path('delete_user/', delete_user),
path('category/', list_category),
path('category/<category_id>/all/', list_good),
path('category/delete/<_id>/', delete_category),
path('category/add/', add_category),
path('category/edit/<_id>/', edit_category),
path('good/', list_good),
path('good/delete/<_id>/', delete_good),
path('good/add/', add_good),
path('good/edit/<_id>/', edit_good),
path('get_good/<_id>/', get_good),
path('menu/', list_menu),
path('menu/delete/<_id>/', delete_menu),
path('menu/add/', add_menu),
path('menu/edit/<_id>/', edit_menu),
path('get_menu/<_id>/', get_menu),
path('album/', list_album),
path('album/delete/<_id>/', delete_album),
path('album/add/', add_album),
path('album/edit/<_id>/', edit_album),
path('get_album/<_id>/', get_album),
path('image/delete/', delete_image),
path('upload/<_id>/', upload),
path('price/', list_price),
path('price/delete/<_id>/', delete_price),
path('price/add/', add_price),
path('price/edit/<_id>/', edit_price),
path('get_price/<_id>/', get_price),
path('news/', list_news),
path('news/delete/<_id>/', delete_news),
path('news/add/', add_news),
path('news/edit/<_id>/', edit_news),
path('get_news/<_id>/', get_news),
path('about/', list_organization),
path('about/delete/<_id>/', delete_organization),
path('about/edit/<_id>/', edit_organization),
path('get_organization/<_id>/', get_organization),
path('contact/', list_contact),
path('contact/delete/<_id>/', delete_contact),
path('contact/add/', add_contact),
path('contact/edit/<_id>/', edit_contact),
path('get_contact/<_id>/', get_contact),
]
|
CONFIG = """
version: '2'
networks:
byfn:
services:
"""
CA_TEMPLATE = """
ca0:
image: hyperledger/fabric-ca:$IMAGE_TAG
environment:
- FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
- FABRIC_CA_SERVER_CA_NAME=ca-org1
- FABRIC_CA_SERVER_TLS_ENABLED=true
- FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem
- FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/${BYFN_CA1_PRIVATE_KEY}
- FABRIC_CA_SERVER_PORT=7054
ports:
- "TEMP_PORT:TEMP_PORT"
command: sh -c 'fabric-ca-server start --ca.certfile /etc/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile /etc/hyperledger/fabric-ca-server-config/${BYFN_CA1_PRIVATE_KEY} -b admin:adminpw -d'
volumes:
- ./crypto-config/peerOrganizations/org1.example.com/ca/:/etc/hyperledger/fabric-ca-server-config
container_name: ca_peerOrg1
networks:
- byfn
"""
if __name__ == '__main__':
|
import pytest
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from openslides.core.config import config
from openslides.core.models import Projector, Tag
from openslides.users.models import User
from openslides.utils.autoupdate import inform_changed_data
from openslides.utils.test import TestCase
from tests.common_groups import GROUP_ADMIN_PK, GROUP_DELEGATE_PK
from ..helpers import count_queries
@pytest.mark.django_db(transaction=False)
def test_projector_db_queries():
"""
Tests that only the following db queries are done:
* 1 requests to get the list of all projectors,
* 1 request to get the list of the projector defaults.
"""
for index in range(10):
Projector.objects.create(name=f"Projector{index}")
assert count_queries(Projector.get_elements) == 2
@pytest.mark.django_db(transaction=False)
def test_tag_db_queries():
"""
Tests that only the following db queries are done:
* 1 requests to get the list of all tags.
"""
for index in range(10):
Tag.objects.create(name=f"tag{index}")
assert count_queries(Tag.get_elements) == 1
@pytest.mark.django_db(transaction=False)
def test_config_db_queries():
"""
Tests that only the following db queries are done:
* 1 requests to get the list of all config values
"""
config.save_default_values()
assert count_queries(Tag.get_elements) == 1
class ProjectorViewSet(TestCase):
"""
Tests (currently just parts) of the ProjectorViewSet.
"""
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
def test_create(self):
response = self.client.post(
reverse("projector-list"), {"name": "test_name_efIOLJHF32f&EF)NG3fw"}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# pk=1 should be the default projector and pk=2 the new one
self.assertEqual(Projector.objects.all().count(), 2)
self.assertTrue(Projector.objects.filter(pk=2).exists())
projector = Projector.objects.get(pk=2)
self.assertEqual(projector.name, "test_name_efIOLJHF32f&EF)NG3fw")
self.assertEqual(projector.elements, [{"name": "core/clock", "stable": True}])
def test_create_no_data(self):
response = self.client.post(reverse("projector-list"))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Projector.objects.all().count(), 1)
def test_no_permission(self):
admin = User.objects.get(username="admin")
admin.groups.add(GROUP_DELEGATE_PK)
admin.groups.remove(GROUP_ADMIN_PK)
inform_changed_data(admin)
response = self.client.post(reverse("projector-list"))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Projector.objects.all().count(), 1)
class Projection(TestCase):
"""
Tests the projection view.
"""
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.projector = Projector.objects.get(pk=1) # the default projector
def test_add_element(self):
elements = [{"name": "core/clock"}]
response = self.client.post(
reverse("projector-project", args=[self.projector.pk]),
{"elements": elements},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.projector = Projector.objects.get(pk=1)
self.assertEqual(self.projector.elements, elements)
self.assertEqual(self.projector.elements_preview, [])
self.assertEqual(self.projector.elements_history, [])
def test_add_element_without_name(self):
response = self.client.post(
reverse("projector-project", args=[self.projector.pk]),
{"elements": [{}]},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.projector = Projector.objects.get(pk=1)
self.assertEqual(self.projector.elements, [])
self.assertEqual(self.projector.elements_preview, [])
self.assertEqual(self.projector.elements_history, [])
def test_no_permissions(self):
admin = User.objects.get(username="admin")
admin.groups.add(GROUP_DELEGATE_PK)
admin.groups.remove(GROUP_ADMIN_PK)
inform_changed_data(admin)
response = self.client.post(
reverse("projector-project", args=[self.projector.pk]), {}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_remove_element(self):
self.projector.elements = [{"name": "core/clock"}]
self.projector.save()
response = self.client.post(
reverse("projector-project", args=[self.projector.pk]),
{"elements": []},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.projector = Projector.objects.get(pk=1)
self.assertEqual(self.projector.elements, [])
self.assertEqual(self.projector.elements_preview, [])
self.assertEqual(self.projector.elements_history, [])
def test_add_element_to_history(self):
element = [{"name": "core/clock"}]
response = self.client.post(
reverse("projector-project", args=[self.projector.pk]),
{"append_to_history": element},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.projector = Projector.objects.get(pk=1)
self.assertEqual(self.projector.elements, [])
self.assertEqual(self.projector.elements_preview, [])
self.assertEqual(self.projector.elements_history, [element])
def test_remove_last_history_element(self):
element1 = [{"name": "core/clock"}]
element2 = [{"name": "motions/motion"}]
self.projector.elements_history = [element1, element2]
self.projector.save()
response = self.client.post(
reverse("projector-project", args=[self.projector.pk]),
{"delete_last_history_element": True},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.projector = Projector.objects.get(pk=1)
self.assertEqual(self.projector.elements, [])
self.assertEqual(self.projector.elements_preview, [])
self.assertEqual(self.projector.elements_history, [element1])
def test_set_preview(self):
elements = [{"name": "core/clock"}]
response = self.client.post(
reverse("projector-project", args=[self.projector.pk]),
{"preview": elements},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.projector = Projector.objects.get(pk=1)
self.assertEqual(self.projector.elements, [])
self.assertEqual(self.projector.elements_preview, elements)
self.assertEqual(self.projector.elements_history, [])
|
import random
import time
debug = 0
def playerReset():
global low
global high
global playerResponse
global randomNum
low = 1
high = 100
playerResponse = ''
randomNum = random.randint(low, high)
def startNow():
global low
global high
global playerResponse
global randomNum
print("Please enter h, l, or y. h means the guess was too high. l means the guess was too low. y means the guess was correct.")
print("-----")
if debug == 1:
print(playerResponse)
print(low)
print(high)
print(randomNum)
while playerResponse != "y":
print ("Is it", randomNum, "?")
playerResponse = input("Please enter h, l, or y. ")
if playerResponse == "h":
high = randomNum + 1
randomNum = random.randint(low, high)
elif playerResponse == "l":
low = randomNum - 1
randomNum = random.randint(low, high)
elif playerResponse == "y":
print("Ya! I found your number.")
time.sleep(0.5)
playAgain = input("Would you like to play again? y / n ")
if playAgain == "y":
print("-----")
time.sleep(0.1)
playerReset()
startNow()
else:
print("Goodbye")
else:
print("Please answer with only l, h, or y.")
playerReset()
startNow()
|
"""empty message
Revision ID: 486ed7f7d877
Revises: None
Create Date: 2015-09-25 09:06:58.365000
"""
# revision identifiers, used by Alembic.
revision = '486ed7f7d877'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('templates',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=256), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('phone', sa.String(length=128), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('address', sa.String(length=64), nullable=True),
sa.Column('city', sa.String(length=64), nullable=True),
sa.Column('country', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_name'), 'users', ['name'], unique=False)
op.create_index(op.f('ix_users_phone'), 'users', ['phone'], unique=False)
op.create_table('cards',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('company', sa.String(length=256), nullable=True),
sa.Column('name', sa.String(length=256), nullable=True),
sa.Column('job', sa.String(length=256), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('phone', sa.String(length=64), nullable=True),
sa.Column('address', sa.String(length=256), nullable=True),
sa.Column('website', sa.String(length=256), nullable=True),
sa.Column('logo', sa.String(length=256), nullable=True),
sa.Column('city', sa.String(length=256), nullable=True),
sa.Column('country', sa.String(length=256), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('template_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['template_id'], ['templates.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_cards_address'), 'cards', ['address'], unique=False)
op.create_index(op.f('ix_cards_city'), 'cards', ['city'], unique=False)
op.create_index(op.f('ix_cards_company'), 'cards', ['company'], unique=False)
op.create_index(op.f('ix_cards_country'), 'cards', ['country'], unique=False)
op.create_index(op.f('ix_cards_description'), 'cards', ['description'], unique=False)
op.create_index(op.f('ix_cards_email'), 'cards', ['email'], unique=False)
op.create_index(op.f('ix_cards_job'), 'cards', ['job'], unique=False)
op.create_index(op.f('ix_cards_logo'), 'cards', ['logo'], unique=False)
op.create_index(op.f('ix_cards_name'), 'cards', ['name'], unique=False)
op.create_index(op.f('ix_cards_phone'), 'cards', ['phone'], unique=False)
op.create_index(op.f('ix_cards_website'), 'cards', ['website'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_cards_website'), table_name='cards')
op.drop_index(op.f('ix_cards_phone'), table_name='cards')
op.drop_index(op.f('ix_cards_name'), table_name='cards')
op.drop_index(op.f('ix_cards_logo'), table_name='cards')
op.drop_index(op.f('ix_cards_job'), table_name='cards')
op.drop_index(op.f('ix_cards_email'), table_name='cards')
op.drop_index(op.f('ix_cards_description'), table_name='cards')
op.drop_index(op.f('ix_cards_country'), table_name='cards')
op.drop_index(op.f('ix_cards_company'), table_name='cards')
op.drop_index(op.f('ix_cards_city'), table_name='cards')
op.drop_index(op.f('ix_cards_address'), table_name='cards')
op.drop_table('cards')
op.drop_index(op.f('ix_users_phone'), table_name='users')
op.drop_index(op.f('ix_users_name'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('templates')
### end Alembic commands ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.