blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1335c1e683e0f3a387c4f90ecc635733f2dbce5 | 05a175090ffebdd0713802aba1469f2673fda3d3 | /IntroNN/hw2_delvalle_network.py | 1ad44467d98f1d231d562f3ccbe7f7afbf9dfee4 | [] | no_license | gdelvalle99/CS691-Deep-Learning-Projects | c41d8a835eeb4247d88d8121dcacba96b431ac1c | d6f6e1b9a2caf24e283d15acf78da1c4bf521897 | refs/heads/master | 2022-10-04T22:26:52.728823 | 2020-05-23T21:56:18 | 2020-05-23T21:56:18 | 266,417,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,952 | py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn import svm
N = 250
size = int(N/2)
Uh = 20
Ul = -1
x_min2 = [Ul, Ul]
x_max2 = [-Uh, Uh]
x_min4 = [Ul, -Ul]
x_max4 = [Uh, -Uh]
o_min1 = [Ul, Ul]
o_max1 = [Uh, Uh]
o_min3 = [-Ul, -Ul]
o_max3 = [-Uh, -Uh]
O1 = np.random.uniform(low=o_min1, high=o_max1, size=(size,2))
X2 = np.random.uniform(low=x_min2, high=x_max2, size=(size,2))
O3 = np.random.uniform(low=o_min3, high=o_max3, size=(size,2))
X4 = np.random.uniform(low=x_min4, high=x_max4, size=(size,2))
O = np.concatenate((O1,O3), axis=0)
X = np.concatenate((X2,X4), axis=0)
x_train = np.concatenate((O,X), axis=0)
y_train = None
for index, row in enumerate(x_train):
if( index < N):
y_o = np.array([0,1])
if(index == 0):
y_train = y_o
else:
y_train = np.vstack((y_train,y_o))
else:
y_x = np.array([1,0])
y_train = np.vstack((y_train,y_x))
#y_train = np.vstack((y_o,y_x))
#print(y_train.shape)
model = Sequential()
model.add(Dense(8, activation='relu', input_shape=(2,)))
model.add(Dense(2, activation='sigmoid'))
model.summary()
print(model.get_config())
print(model.get_weights())
model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy'])
#plt.scatter(X[:,0],X[:,1],marker='+',c='blue', label='X-class')
#plt.scatter(O[:,0],O[:,1],marker='o',c='red', edgecolors='none', label='O-class')
#plt.legend()
#plt.grid(True)
#plt.show()
epochs = 100
n = 50
history = model.fit(x=x_train,y=y_train,batch_size=n,epochs=epochs,verbose=1)
NTest = 150
sizeTest = int(NTest/2)
#print(sizeTest)
O1test = np.random.uniform(low=o_min1, high=o_max1, size=(sizeTest,2))
X2test = np.random.uniform(low=x_min2, high=x_max2, size=(sizeTest,2))
O3test = np.random.uniform(low=o_min3, high=o_max3, size=(sizeTest,2))
X4test = np.random.uniform(low=x_min4, high=x_max4, size=(sizeTest,2))
Otest = np.concatenate((O1test,O3test), axis=0)
Xtest = np.concatenate((X2test,X4test), axis=0)
x_test = np.vstack((Otest,Xtest))
#print(x_test.shape)
#plt.scatter(Xtest[:,0],Xtest[:,1],marker='+',c='blue', label='X-class')
#plt.scatter(Otest[:,0],Otest[:,1],marker='o',c='red', edgecolors='none', label='O-class')
#plt.legend()
#plt.grid(True)
#plt.show()
y_test = None
for index, row in enumerate(x_test):
if( index < NTest):
y_o = np.array([0,1])
if(index == 0):
y_test = y_o
else:
y_test = np.vstack((y_test,y_o))
else:
y_x = np.array([1,0])
y_test = np.vstack((y_test,y_x))
#print(x_test[index], y_test[index])
score = model.evaluate(x=x_test,y=y_test)
print(y_test.shape)
false_positives = None
false_negatives = None
X_true = None
O_true = None
q = model.predict(x_test)
#we assume that X is positive and O is negative
for index, row in enumerate(q):
#print(q[0],q[1],y_test[index][0])
if((row[0] > row[1] and y_test[index][0] > y_test[index][1]) or(row[1] > row[0] and y_test[index][1] > y_test[index][0])):
#print(y_test[index])
if(row[0] > row[1]):
if(X_true is None):
X_true = x_test[index]
else:
#print('here')
X_true = np.vstack((X_true, x_test[index]))
else:
if(O_true is None):
O_true = x_test[index]
else:
#print(x_test[index])
O_true = np.vstack((O_true, x_test[index]))
elif((row[0] > row[1] and y_test[index][1] > y_test[index][0])):#false positive
if(false_positives is None):
false_positives = x_test[index]
else:
false_positives = np.vstack((false_positives, x_test[index]))
elif((row[1] > row[0] and y_test[index][0] > y_test[index][1])):
if(false_negatives is None):
false_negatives = x_test[index]
else:
false_negatives = np.vstack((false_negatives, x_test[index]))
v_line = np.concatenate((O1test,X2test),axis=0)
h_line = np.concatenate((O3test,X2test),axis=0)
y_h = None
y_v = None
q_h = model.predict(h_line)
q_v = model.predict(v_line)
for index, i in enumerate(q_h):
if(q_h[index][1] > q_h[index][0]):
if y_h is None:
y_h = 0
else:
y_h = np.hstack((y_h,0))
elif(q_h[index][0] > q_h[index][1]):
if y_h is None:
y_h = 1
else:
y_h = np.hstack((y_h,1))
for index, i in enumerate(q_v):
if(q_v[index][1] > q_v[index][0]):
if y_v is None:
y_v = 0
else:
y_v = np.hstack((y_v,0))
elif(q_v[index][0] > q_v[index][1]):
if y_v is None:
y_v = 1
else:
y_v = np.hstack((y_v,1))
C = 1.0 # SVM regularization parameter
clf_v = svm.SVC(kernel = 'linear', gamma=0.7, C=C )
clf_v.fit(v_line, y_v)
clf_h = svm.SVC(kernel = 'linear', gamma=0.7, C=C )
clf_h.fit(h_line, y_h)
w_h = clf_h.coef_[0]
a_h = -w_h[0] / w_h[1]
xx_h = np.linspace(-20, 20)
yy_h = a_h * xx_h - (clf_h.intercept_[0]) / w_h[1]
plt.plot(xx_h, yy_h, 'k-')
w_v = clf_v.coef_[0]
a_v = -w_v[0] / w_v[1]
xx_v = np.linspace(-5, 5)
yy_v = a_v * xx_v - (clf_v.intercept_[0]) / w_v[1]
plt.plot(xx_v, yy_v, 'k-')
#plt.plot(dec_bound[:,0], dec_bound[:,1])
#print(X_true)
#print(score)
if(X_true is not None):
plt.scatter(X_true[:,0],X_true[:,1],marker='+',c='blue', label='X-class')
if(O_true is not None):
plt.scatter(O_true[:,0],O_true[:,1],marker='o',c='red', edgecolors='none', label='O-class')
if(false_positives is not None):
plt.scatter(false_positives[:,0],false_positives[:,1],marker='+',c='yellow', label='False positives')
if(false_negatives is not None):
plt.scatter(false_negatives[:,0],false_negatives[:,1],marker='o',c='green', edgecolors='none', label='False negatives')
plt.legend()
plt.grid(True)
plt.show()
| [
"gdelvalle@nevada.unr.edu"
] | gdelvalle@nevada.unr.edu |
3c36c0d10742f9c25af173e2077d9c835a3e3ff8 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/celery/2015/12/graph.py | d441a54ca1edf2545aaaa16e0d18be8ec8d7318d | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 6,432 | py | # -*- coding: utf-8 -*-
"""
The :program:`celery graph` command.
.. program:: celery graph
"""
from __future__ import absolute_import, unicode_literals
from operator import itemgetter
from celery.datastructures import DependencyGraph, GraphFormatter
from celery.five import items
from .base import Command
__all__ = ['graph']
class graph(Command):
args = """<TYPE> [arguments]
..... bootsteps [worker] [consumer]
..... workers [enumerate]
"""
def run(self, what=None, *args, **kwargs):
map = {'bootsteps': self.bootsteps, 'workers': self.workers}
if not what:
raise self.UsageError('missing type')
elif what not in map:
raise self.Error('no graph {0} in {1}'.format(what, '|'.join(map)))
return map[what](*args, **kwargs)
def bootsteps(self, *args, **kwargs):
worker = self.app.WorkController()
include = {arg.lower() for arg in args or ['worker', 'consumer']}
if 'worker' in include:
graph = worker.blueprint.graph
if 'consumer' in include:
worker.blueprint.connect_with(worker.consumer.blueprint)
else:
graph = worker.consumer.blueprint.graph
graph.to_dot(self.stdout)
def workers(self, *args, **kwargs):
def simplearg(arg):
return maybe_list(itemgetter(0, 2)(arg.partition(':')))
def maybe_list(l, sep=','):
return (l[0], l[1].split(sep) if sep in l[1] else l[1])
args = dict(simplearg(arg) for arg in args)
generic = 'generic' in args
def generic_label(node):
return '{0} ({1}://)'.format(type(node).__name__,
node._label.split('://')[0])
class Node(object):
force_label = None
scheme = {}
def __init__(self, label, pos=None):
self._label = label
self.pos = pos
def label(self):
return self._label
def __str__(self):
return self.label()
class Thread(Node):
scheme = {'fillcolor': 'lightcyan4', 'fontcolor': 'yellow',
'shape': 'oval', 'fontsize': 10, 'width': 0.3,
'color': 'black'}
def __init__(self, label, **kwargs):
self._label = 'thr-{0}'.format(next(tids))
self.real_label = label
self.pos = 0
class Formatter(GraphFormatter):
def label(self, obj):
return obj and obj.label()
def node(self, obj):
scheme = dict(obj.scheme) if obj.pos else obj.scheme
if isinstance(obj, Thread):
scheme['label'] = obj.real_label
return self.draw_node(
obj, dict(self.node_scheme, **scheme),
)
def terminal_node(self, obj):
return self.draw_node(
obj, dict(self.term_scheme, **obj.scheme),
)
def edge(self, a, b, **attrs):
if isinstance(a, Thread):
attrs.update(arrowhead='none', arrowtail='tee')
return self.draw_edge(a, b, self.edge_scheme, attrs)
def subscript(n):
S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄',
'5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'}
return ''.join([S[i] for i in str(n)])
class Worker(Node):
pass
class Backend(Node):
scheme = {'shape': 'folder', 'width': 2,
'height': 1, 'color': 'black',
'fillcolor': 'peachpuff3', 'color': 'peachpuff4'}
def label(self):
return generic_label(self) if generic else self._label
class Broker(Node):
scheme = {'shape': 'circle', 'fillcolor': 'cadetblue3',
'color': 'cadetblue4', 'height': 1}
def label(self):
return generic_label(self) if generic else self._label
from itertools import count
tids = count(1)
Wmax = int(args.get('wmax', 4) or 0)
Tmax = int(args.get('tmax', 3) or 0)
def maybe_abbr(l, name, max=Wmax):
size = len(l)
abbr = max and size > max
if 'enumerate' in args:
l = ['{0}{1}'.format(name, subscript(i + 1))
for i, obj in enumerate(l)]
if abbr:
l = l[0:max - 1] + [l[size - 1]]
l[max - 2] = '{0}⎨…{1}⎬'.format(
name[0], subscript(size - (max - 1)))
return l
try:
workers = args['nodes']
threads = args.get('threads') or []
except KeyError:
replies = self.app.control.inspect().stats()
workers, threads = [], []
for worker, reply in items(replies):
workers.append(worker)
threads.append(reply['pool']['max-concurrency'])
wlen = len(workers)
backend = args.get('backend', self.app.conf.result_backend)
threads_for = {}
workers = maybe_abbr(workers, 'Worker')
if Wmax and wlen > Wmax:
threads = threads[0:3] + [threads[-1]]
for i, threads in enumerate(threads):
threads_for[workers[i]] = maybe_abbr(
list(range(int(threads))), 'P', Tmax,
)
broker = Broker(args.get(
'broker', self.app.connection_for_read().as_uri()))
backend = Backend(backend) if backend else None
graph = DependencyGraph(formatter=Formatter())
graph.add_arc(broker)
if backend:
graph.add_arc(backend)
curworker = [0]
for i, worker in enumerate(workers):
worker = Worker(worker, pos=i)
graph.add_arc(worker)
graph.add_edge(worker, broker)
if backend:
graph.add_edge(worker, backend)
threads = threads_for.get(worker._label)
if threads:
for thread in threads:
thread = Thread(thread)
graph.add_arc(thread)
graph.add_edge(thread, worker)
curworker[0] += 1
graph.to_dot(self.stdout)
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
1ac1bf0d486a318d12379426563fee9a8f6f22d6 | fe85138c949c6198184c591780831fd2e183a24a | /Address Book.py | 251c32fc6f328cd1f9352bc08e897b68bbe90efc | [] | no_license | valeri1383/Personal-Python-Projects | e98f6b7171298def019db4e28f6d176a709615cc | b7db81cb44668f549a7fd15de84c0cb23654ac3d | refs/heads/main | 2023-05-26T09:02:24.260700 | 2023-05-22T14:40:28 | 2023-05-22T14:40:28 | 337,518,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | from tkinter import *
root = Tk()
root.geometry('400x400')
root.configure(bg='cyan')
root.resizable(1, 1)
root.title('Address Book')
contact_list = [
['John Smith', '07567374343'],
['Terry Adams', '07569984343'],
['Allen Gibson', '07564474743'],
['Grant Foster', '07567396843'],
['Hall Grey', '07567746343']
]
Name = StringVar()
Number = StringVar()
frame = Frame(root)
frame.pack(side=RIGHT)
scroll = Scrollbar(frame, orient=VERTICAL)
select = Listbox(frame,bg='light goldenrod', yscrollcommand=scroll.set, width=30, height=33)
scroll.configure(command=select.yview)
scroll.pack(side=RIGHT, fill=Y)
select.pack(side=LEFT, fill=BOTH, expand=1)
def Selected():
return int(select.curselection()[0])
def AddContact():
contact_list.append([Name.get(), Number.get()])
Select_set()
def EDIT():
contact_list[Selected()] = [Name.get(), Number.get()]
Select_set()
def DELETE():
del contact_list[Selected()]
Select_set()
def VIEW():
NAME, PHONE = contact_list[Selected()]
Name.set(NAME)
Number.set(PHONE)
def EXIT():
root.destroy()
def RESET():
Name.set('')
Number.set('')
def Select_set():
contact_list.sort()
select.delete(0, END)
for name, phone in contact_list:
select.insert(END, name)
Select_set()
Label(root, text='NAME', font='arial 15 bold', bg='cyan').pack()
Entry(root, font=20, bg='light yellow', textvariable=Name).pack()
Label(root, text='PHONE NO.', font='arial 15 bold', bg='cyan').pack()
Entry(root, font=20,bg='light yellow', textvariable=Number).pack()
Button(root, text='ADD', width=7, font='arial 15 bold', bg='SlateGray4', command=AddContact).pack()
Button(root, text='EDIT', width=7, font='arial 15 bold', bg='SlateGray4', command=EDIT).pack()
Button(root, text="DELETE", width=7, font='arial 15 bold', bg='SlateGray4', command=DELETE).pack()
Button(root, text="VIEW", width=7, font='arial 15 bold', bg='SlateGray4', command=VIEW).pack()
Button(root, text="EXIT", width=7, font='arial 15 bold', bg='tomato', command=EXIT).pack()
Button(root, text="RESET", width=7, font='arial 15 bold', bg='SlateGray4', command=RESET).pack()
mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
9b8b028c0c0efe89315744409a6b69fe90a07bea | 7930635dae78d050ebe158303aa5343a8440a884 | /lesson05/lesson05-3.py | c57400cea3fe4ec5bba9412246963cfa06edd93d | [] | no_license | lexpol/python_homework | 1211bda3db32b869e34c4cca2264372f5c99b240 | 667fe61f7e23ed834f13c68c40a99ef21c39d5a7 | refs/heads/master | 2023-05-30T22:04:09.144029 | 2021-06-09T01:04:40 | 2021-06-09T01:04:40 | 365,838,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | # 3. Создать текстовый файл (не программно), построчно записать фамилии сотрудников и
# величину их окладов (не менее 10 строк). Определить, кто из сотрудников имеет оклад менее 20 тыс.,
# вывести фамилии этих сотрудников. Выполнить подсчет средней величины дохода сотрудников.
#
# Пример файла:
#
# Иванов 23543.12
# Петров 13749.32
with open('lesson05-3-sample.txt') as my_file:
salary = []
print("фамилии сотрудников с окладом менее 20 тыс:")
for line in my_file.readlines():
formated_line = line.replace("\n", "")
# print(formated_line)
if float(formated_line.split()[1]) < 20000:
print(f"{formated_line.split()[0]} \t\t\tс результатом: {formated_line.split()[1]}")
salary.append(float(formated_line.split()[1]))
print(f"величина среднего дохода сотрудников: {round(sum(salary) / len(salary), 2)}")
| [
"lex@poltor.ru"
] | lex@poltor.ru |
9a9b63a8daca2426c5e7f92f421d90edd8b68eb5 | ca0556d3dc6fb6b92e194c4ff0a979619e7be0e4 | /I2c7SegmentLed.py | 8baadc4d53fbc559a2402174d86b34a339e609fe | [] | no_license | dcityorg/i2c-7-segment-led-library-raspberrypi | 2216c46eb945faf1dab2c15f1a123208d0e0a889 | 62910936e6a43d6585925f493cf72b2d609e9ffe | refs/heads/master | 2021-06-13T20:01:15.431573 | 2021-03-10T19:47:50 | 2021-03-10T19:47:50 | 146,506,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,329 | py | # -*- coding: utf-8 -*-
'''
I2c7SegmentLed.py - class library for using 7 Segment LEDs
Written by: Gary Muhonen gary@dcity.org
Versions
1.0.0 - 7/31/2016
Original Release.
1.0.1 - 9/1/2018
Transfer to GM, and some minor changes
Short Description:
These files provide a software library and demo program for the Raspberry Pi.
The library files provide useful functions to make it easy
to communicate with 7 Segment LEDs
that use the I2C communication protocol. The demo
program shows the usage of the functions in the library.
The 7 Segment LED must connect to the I2C bus using a AMS AS1115 controller chip.
A backback board with the AMS AS1115 chip is available and details are in the link below.
https://www.dcity.org/portfolio/i2c-7-segment-led-library/
This link has details including:
* software library installation for use with Arduino, Particle and Raspberry Pi boards
* list of functions available in these libraries
* a demo program (which shows the usage of most library functions)
* info on 7 segment LED displays that work with this software
* hardware design for a backpack board for 7 segment LEDs, available on github
* info on backpack “bare” pc boards available from OSH Park.
License Information: https://www.dcity.org/license-information/
Notes:
1. You must enable I2C on your Raspberry Pi board (see your particular operating system documentation).
On Raspian: Menu...Preferences...Raspberry Pi Configuration...Interfaces...Enable I2C
2. This software was tested on a RASPBERRY PI 3 MODEL B, running Rasbian and Python 3.5.2
'''
import smbus # import the i2c library
from time import sleep # import the sleep functions
i2c = smbus.SMBus(1) # create an i2c object for writing/reading from i2c
# create a class for the i2c 7 segment led displays that use the AS1115 chip
class I2c7SegmentLed(object):
# Control Register Addresses in the AS1115
# Digit 0-7 are at adresses 1-8
REG_DECODE_MODE = 0x09 # sets which digits respond to data that is decoded (like BCD or HEX)
REG_GLOBAL_INTENSITY = 0X0a # set the brightness for all digits... only bottom 4 bits are used for 16 brightness values
REG_SCAN_LIMIT = 0x0b # controls which digits are turned on
REG_SHUTDOWN = 0x0c # used to shutdown the display and save power
REG_FEATURE = 0x0e # register that contains key features
REG_DISPLAY_TEST_MODE = 0x0f # used for test mode
REG_DIGIT01_INTENSITY = 0x10
REG_DIGIT23_INTENSITY = 0x11
REG_DIGIT45_INTENSITY = 0x12
REG_DIGIT67_INTENSIGY = 0x13
REG_DIAGNOSTIC_DIGIT0 = 0x14
REG_DIAGNOSTIC_DIGIT1 = 0x15
REG_DIAGNOSTIC_DIGIT2 = 0x16
REG_DIAGNOSTIC_DIGIT3 = 0x17
REG_DIAGNOSTIC_DIGIT4 = 0x18
REG_DIAGNOSTIC_DIGIT5 = 0x19
REG_DIAGNOSTIC_DIGIT6 = 0x1a
REG_DIAGNOSTIC_DIGIT7 = 0x1b
REG_KEYA = 0x1c
REG_KEYB = 0x1d
REG_SELF_ADDRESSING = 0x2d # register used to set the chip to read jumpers to determine it's own i2c address
# Constants that can be written to the control registers above
# REG_DECODE_MODE values (type of decode is set in REG_FEATURE)
REG_DECODE_MODE_NO_DIGITS = 0x00 # no decoding
REG_DECODE_MODE_ALL_DIGITS = 0xFF # used for BCD or HEX decoding, bit 0 turns on digit 0 for decoding, etc
# REG_SCAN_LIMIT values (how many digits are displayed)
REG_SCAN_LIMIT_1 = 0x00 # if there is only 1 digit in the display
REG_SCAN_LIMIT_2 = 0x01
REG_SCAN_LIMIT_3 = 0x02
REG_SCAN_LIMIT_4 = 0x03
REG_SCAN_LIMIT_5 = 0x04
REG_SCAN_LIMIT_6 = 0x05
REG_SCAN_LIMIT_7 = 0x06
REG_SCAN_LIMIT_8 = 0x07 # if there are 8 digits in the display
# REG_SHUTDOWN values
REG_SHUTDOWN_SHUTDOWN_AND_RESET = 0x00 # shutdown chip and reset the feature register
REG_SHUTDOWN_SHUTDOWN = 0x80 # shutdown chip and don't reset the feature register
REG_SHUTDOWN_NORMAL_AND_RESET = 0x01 # set normal mode and reset the feature register
REG_SHUTDOWN_NORMAL = 0X81 # set normal mode and don't reset the feature register...this is the normal running values
# REG_SELF_ADDRESSING values, for determinine the chip's i2c address
REG_SELF_ADDRESSING_FACTORY_ADDRESS = 0x00 # for using factory set i2c address = 0x00
REG_SELF_ADDRESSING_USER_ADDRESS = 0x01 # for using jumpers to determine i2c address
# REG_FEATURE bit values
REG_FEATURE_EXTERNAL_CLOCK = 0X01 # set bit if using an external clock
REG_FEATURE_RESET = 0x02 # set bit to reset all registers
REG_FEATURE_HEX = 0x04 # clear this bit for BCD decoding, set for HEX decoding
REG_FEATURE_BLINK = 0x10 # set bit to enable blinking of display
REG_FEATURE_BLINK_FREQUENCY = 0x020 # set bit for 2 second blinking, clear for 1 second blinking
REG_FEATURE_SYNC = 0x40 # set bit for multiple device blinking
REG_FEATURE_BLINK_START = 0x80 # set bit to start blinking when display turns on, clear to start blinking when display turns off
DECIMAL_POINT_MASK = 0x80 # bit to control the decimal point
# segment values for the LED for all 128 ASCII characters
# the first value is for ASCII character 0, then 1, etc
# each byte contains the 7 LED segments and the decimal point, arranged as (from MSB to LSB)
# DP G F E D C B A (DP, middle, top left, btm left, btm, btm right, top right, top)
# if a bit is a '1', then that segment of the led will be turned on.
LedSegments = [
0b01111110,0b00110000,0b01101101,0b01111001,0b00110011,0b01011011,0b01011111,0b01110010, # Ascii decimal:0-7 hex:00-07
0b01111110,0b01111011,0b01111101,0b00011111,0b00001101,0b00111101,0b01101111,0b01000111, # Ascii decimal:8-15 hex:08-0F
0b01111110,0b00000110,0b01101101,0b01001111,0b00010111,0b01011011,0b01111011,0b00011110, # Ascii decimal:16-23 hex:10-17
0b01111111,0b01011111,0b01101111,0b01110011,0b01100001,0b01100111,0b01111101,0b00111001, # Ascii decimal:24-31 hex:18-1f
0b00000000,0b00110000,0b00100010,0b01000001,0b01001001,0b00100101,0b00110001,0b00000010, # Ascii decimal:32-39 hex:20-27
0b01001010,0b01101000,0b01000010,0b00000111,0b00000100,0b00000001,0b00000000,0b00100101, # Ascii decimal:40-47 hex:28-2F
0b01111110,0b00110000,0b01101101,0b01111001,0b00110011,0b01011011,0b01011111,0b01110010, # Ascii decimal:48-55 hex:30-37
0b01111111,0b01111011,0b01001000,0b01011000,0b01000011,0b00001001,0b01100001,0b01100101, # Ascii decimal:56-63 hex:38-3F
0b01111101,0b01110111,0b01111111,0b01001110,0b00111101,0b01001111,0b01000111,0b01011110, # Ascii decimal:64-71 hex:40-47
0b00110111,0b00000110,0b00111100,0b01010111,0b00001110,0b01010100,0b01110110,0b01111110, # Ascii decimal:72-79 hex:48-4F
0b01100111,0b01101011,0b01100110,0b01011011,0b00001111,0b00111110,0b00111110,0b00101010, # Ascii decimal:80-87 hex:50-57
0b00110111,0b00111011,0b01101101,0b00011110,0b00010011,0b00110110,0b01100010,0b00001000, # Ascii decimal:88-95 hex:58-5F
0b00100000,0b01111101,0b00011111,0b00001101,0b00111101,0b01101111,0b01000111,0b01111011, # Ascii decimal:96-103 hex:60-67
0b00010111,0b00000100,0b00011000,0b01010111,0b00000110,0b00010100,0b00010101,0b00011101, # Ascii decimal:104-111 hex:68-6F
0b01100111,0b01110011,0b00000101,0b01011011,0b00001111,0b00011100,0b00011100,0b00010100, # Ascii decimal:112-119 hex:70-77
0b00110111,0b00111011,0b01101101,0b01001011,0b01010101,0b01100011,0b01000000,0b00000000 # Ascii decimal:120-127 hex:78-7F
]
# constructor to create I2c7SegmentLed object, and initialize the LED module
def __init__(self, i2cAddress, digits):
self._digits = digits
self._i2cAddress = i2cAddress
self._feature = 0
self._segments = [0,0,0,0,0,0,0,0,0]
self._cursorPosition = 1
# Start talking to the AS1115 chip, as it will be at i2c address 0 initially (upon powerup)
# Power down the AS1115 chip
try:
i2c.write_byte_data(0x00, I2c7SegmentLed.REG_SHUTDOWN, I2c7SegmentLed.REG_SHUTDOWN_NORMAL)
except:
pass # an error just means that the i2c led display has already had it's address set
sleep(0.020)
# tell all AS1115 chips to use their hardware jumpered i2c address
try:
i2c.write_byte_data(0x00, I2c7SegmentLed.REG_SELF_ADDRESSING, I2c7SegmentLed.REG_SELF_ADDRESSING_USER_ADDRESS)
except:
pass # an error just means that the i2c led display has already had it's address set
sleep(0.020)
# power up and reset the AS1115 chip and the feature register
self.setRegister(I2c7SegmentLed.REG_SHUTDOWN, I2c7SegmentLed.REG_SHUTDOWN_NORMAL_AND_RESET)
# display all digits, full brightness, decoded using the hex font
self.setBrightness(15)
self.setRegister(I2c7SegmentLed.REG_SCAN_LIMIT,self._digits-1) # set number of digits in use
self.setRegister(I2c7SegmentLed.REG_DECODE_MODE,I2c7SegmentLed.REG_DECODE_MODE_NO_DIGITS) # we won't use their decoder
self._feature = 0 # starting value for the _feature register
self.setRegister(I2c7SegmentLed.REG_FEATURE,self._feature) # initialize the feature register
self.clear() # clear the display
# write value to register
def setRegister(self, reg, value):
try:
i2c.write_byte_data(self._i2cAddress, reg, value)
except:
print("Error writing to i2c 7 Segment Led at Address 0x%02x" %self._i2cAddress )
# write the 8 segments to this digit of the led
def setSegments(self, digit, segments):
if (digit <= self._digits) and (digit >= 1):
self.setRegister(digit, segments)
self._segments[digit] = segments
# set the brightness to value (0-15)
def setBrightness(self, value):
self.setRegister(I2c7SegmentLed.REG_GLOBAL_INTENSITY, value)
# clear all digits of the LED
def clear(self):
for i in range(1,self._digits+1):
self._segments[i] = 0x00 # clear local storage
self.setSegments(i,0x00) # clear led display
self._cursorPosition = 1 # move cursor to home position
# move the invisible virtual cursor to the 1st position, so that the next char written will go to that digit
def home(self):
self.cursorMove(1)
# move the invisible virtual cursor to specified digit
def cursorMove(self, digit):
if (digit <= self._digits) and (digit >= 1):
self._cursorPosition = digit
# turn the display off (also reduces current consumption
def displayOff(self):
self.setRegister(I2c7SegmentLed.REG_SHUTDOWN,I2c7SegmentLed.REG_SHUTDOWN_SHUTDOWN)
# turn the display on
def displayOn(self):
self.setRegister(I2c7SegmentLed.REG_SHUTDOWN,I2c7SegmentLed.REG_SHUTDOWN_NORMAL)
# set the brightness of the LEDs to value (0-15)
def setBrightness(self, value):
self.setRegister(I2c7SegmentLed.REG_GLOBAL_INTENSITY, value)
# set the decimal point on digit specified
def setDecimalPoint(self, digit):
if (digit <= self._digits) and (digit >= 1):
currentSegments = self._segments[digit] | I2c7SegmentLed.DECIMAL_POINT_MASK
self.setSegments(digit, currentSegments)
# clear the decimal point on digit specified
def clearDecimalPoint(self, digit):
if (digit <= self._digits) and (digit >= 1):
currentSegments = self._segments[digit] & ~I2c7SegmentLed.DECIMAL_POINT_MASK
self.setSegments(digit, currentSegments)
# write an ascii character (value) to the led
def write(self, value):
# if we are not past the number of digits that we have
if self._cursorPosition <= self._digits:
# check if the character is a decimal point
if value == '.':
if self._cursorPosition == 1:
self.setDecimalPoint(self._cursorPosition); # set the dp for digit 1 and inc cursorPosition
self._cursorPosition += 1
else:
self.setDecimalPoint(self._cursorPosition-1); # set the dp for the previous digit
# else it is not a decimal point
else:
self._segments[self._cursorPosition] = I2c7SegmentLed.LedSegments[ord(value)]; # save the segments to local storage
self.setSegments(self._cursorPosition, I2c7SegmentLed.LedSegments[ord(value)]); # write the segments to the display
self._cursorPosition += 1
# write a string (including formatting options)
# For examples of printing numbers: https://mkaz.tech/python-string-format.html
def writeString(self, value):
for char in value:
self.write(char)
| [
"“gary@dcity.org”"
] | “gary@dcity.org” |
706b1412b4d839f20812fa99a5bf77bacc68ade1 | 7aa82a17d3545ad418ce6defd84ec9e460937299 | /work/views.py | 8352fb4b2c73b7e59d3ae78eb6c4f4a00b58243f | [] | no_license | parin-2002/CRUD-IN-DJANGO | ad28052b69814a9f35c0427c82418440a82cb47a | 34557e9f9fc6a8e3e5d0d7edc7d070237b0387e5 | refs/heads/master | 2022-11-19T18:23:11.677185 | 2020-07-23T03:39:26 | 2020-07-23T03:39:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | from django.shortcuts import render,redirect
from .forms import rgstudent
from .models import student
# Create your views here.
def reg(request):
if(request.method=='POST'):
form=rgstudent(request.POST)
if form.is_valid():
form.save()
form=rgstudent()
else:
form=rgstudent()
data=student.objects.all()
return render(request,'register.html',{'form':form,'data':data})
data=student.objects.all()
return render(request,'register.html',{'form':form,'data':data})
def delete(request,id):
if request.method=='POST':
pi=student.objects.get(pk=id)
pi.delete()
return redirect("/")
def update(request,id):
if request.method=='POST':
pi=student.objects.get(pk=id)
form=rgstudent(request.POST,instance=pi)
if form.is_valid():
form.save()
return redirect("/")
else:
pi=student.objects.get(pk=id)
form=rgstudent(instance=pi)
return render(request,'update.html',{'id':form})
| [
"1akashsuvagiya1999@gmail.com"
] | 1akashsuvagiya1999@gmail.com |
988e35a1c0043ed6844775a7278008acc5bd01fd | d6f7273500f28fcc0a378620cc4e417a08542992 | /turnedOn/turnedOn/wsgi.py | c1d388723b28d037f6d884e93b8b7bb43cc723a9 | [] | no_license | katelyndunaski/Turned-On | 109e1ecd168f97255c178b95cc289739da443fbb | 80b0e01d9ffd2c7b142a6ecbeb96e48a0f22f8fb | refs/heads/master | 2016-09-10T08:38:40.314407 | 2015-02-08T17:24:11 | 2015-02-08T17:24:11 | 30,468,506 | 0 | 0 | null | 2015-02-08T02:04:38 | 2015-02-07T20:07:49 | JavaScript | UTF-8 | Python | false | false | 391 | py | """
WSGI config for turnedOn project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "turnedOn.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"ubuntu@ip-172-31-39-124.us-west-2.compute.internal"
] | ubuntu@ip-172-31-39-124.us-west-2.compute.internal |
4eb09dfed6ad25c8eddd6132f2dc73dff3fcc6a3 | 1933ef2c5b3ec58feeb50dd092d670f58a3ec2bb | /kospeech/models/modules.py | 352b6a0bd0bf59f8861fa3d7e573569560a2ad30 | [
"Apache-2.0"
] | permissive | hephaex/KoSpeech | 68275af311ae5c53548f7c7bc27fe9dd5b1e441b | bf3fa0dc6d50089164fd0b47e02620062718d407 | refs/heads/master | 2022-12-02T02:00:01.164265 | 2020-08-05T08:47:55 | 2020-08-05T08:47:55 | 285,344,731 | 0 | 0 | Apache-2.0 | 2020-08-12T14:53:11 | 2020-08-05T16:22:59 | null | UTF-8 | Python | false | false | 1,579 | py | import torch
import torch.nn as nn
import torch.nn.init as init
from torch import Tensor
class Linear(nn.Module):
"""
Wrapper class of torch.nn.Linear
Weight initialize by xavier initialization and bias initialize to zeros.
"""
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(Linear, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
init.xavier_uniform_(self.linear.weight)
if bias:
init.zeros_(self.linear.bias)
def forward(self, x: Tensor) -> Tensor:
return self.linear(x)
class LayerNorm(nn.Module):
""" Wrapper class of torch.nn.LayerNorm """
def __init__(self, dim: int, eps: float = 1e-6) -> None:
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.beta = nn.Parameter(torch.zeros(dim))
self.eps = eps
def forward(self, z: Tensor) -> Tensor:
mean = z.mean(dim=-1, keepdim=True)
std = z.std(dim=-1, keepdim=True)
output = (z - mean) / (std + self.eps)
output = self.gamma * output + self.beta
return output
class View(nn.Module):
""" Wrapper class of torch.view() for Sequential module. """
def __init__(self, shape: tuple, contiguous: bool = False):
super(View, self).__init__()
self.shape = shape
self.contiguous = contiguous
def forward(self, inputs):
if self.contiguous:
inputs = inputs.contiguous()
return inputs.view(*self.shape)
| [
"sh951011@gmail.com"
] | sh951011@gmail.com |
525051e2943540875900fe0b6db434ee527c30ba | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /examples/v1/usage-metering/GetUsageNetworkFlows_1239422069.py | 60afb66b6f88d5918aba22ca4b3b72c0ab5be76d | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 599 | py | """
Get hourly usage for Network Flows returns "OK" response
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.usage_metering_api import UsageMeteringApi
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = UsageMeteringApi(api_client)
response = api_instance.get_usage_network_flows(
start_hr=(datetime.now() + relativedelta(days=-5)),
end_hr=(datetime.now() + relativedelta(days=-3)),
)
print(response)
| [
"noreply@github.com"
] | noreply@github.com |
9a704f28f280264c0f0f116eb2b5f892f3e8a617 | 2b3e562da5d9b473f3a7dcb3ba833552e649b675 | /session3.py | 6c0c799a912ea879733bcd1f02e38c53ad9dadf3 | [] | no_license | Aakashdeveloper/python-web | 51f89c34e30beb0be1d8fdd2e01b3e2617a334b0 | fe163c193b20d23794eb327d2d580124e32fe152 | refs/heads/master | 2020-07-25T12:38:13.416166 | 2019-09-27T15:07:32 | 2019-09-27T15:07:32 | 208,291,750 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
x = 10
# In[3]:
y = 20
# In[4]:
x+y
# In[5]:
x*y
# In[6]:
x/y
# In[7]:
x-y
# In[8]:
10<<2
# In[9]:
10>>2
# In[12]:
x == 10 & y ==20
'''
docker run --rm -u root -p 8080:8080 -v jenkins-data:/var/jenkins_home -v /var/run/docker.sock:/var/run/docker.sock -v "$HOME":/home jenkinsci/blueocean | [
"ahanda205@gmail.com"
] | ahanda205@gmail.com |
707c7cb4f5f704f84ee4f3b07f62df36cb7bb8a2 | e1152ed447cf32f12acc1d71fddce1f6a1830023 | /zhaquirks/xiaomi/aqara_vibration_sensor.py | d1d8defe395ddaab7cea4adfeecbf5bcbd0962a7 | [
"Apache-2.0"
] | permissive | Gamester17/zha-device-handlers | f85d2ca864f7e6f977e46c3562b06ce9a3225790 | e7260ebb31025fbbbe5c5a9c2c8e7077aa85b66f | refs/heads/master | 2020-04-25T04:51:05.210691 | 2019-02-22T13:55:10 | 2019-02-22T13:55:10 | 172,523,970 | 0 | 0 | Apache-2.0 | 2019-02-25T14:44:06 | 2019-02-25T14:44:05 | null | UTF-8 | Python | false | false | 7,119 | py | import asyncio
import logging
import homeassistant.components.zha.const as zha_const
from zigpy.quirks import CustomCluster
from zigpy.profiles import PROFILES, zha
import zigpy.types as types
from zigpy.zcl.clusters.general import Basic, Groups, PowerConfiguration,\
Identify, Ota, Scenes, MultistateInput
from zigpy.zcl.clusters.closures import DoorLock
from zigpy.zcl.clusters.security import IasZone
from zhaquirks.xiaomi import BasicCluster, PowerConfigurationCluster,\
TemperatureMeasurementCluster, XiaomiCustomDevice
from zhaquirks import Bus, LocalDataCluster
VIBE_DEVICE_TYPE = 0x5F02 # decimal = 24322
RECENT_ACTIVITY_LEVEL_ATTR = 0x0505 # decimal = 1285
ACCELEROMETER_ATTR = 0x0508 # decimal = 1288
STATUS_TYPE_ATTR = 0x0055 # decimal = 85
ROTATION_DEGREES_ATTR = 0x0503 # decimal = 1283
STATIONARY_VALUE = 0
VIBE_VALUE = 1
TILT_VALUE = 2
DROP_VALUE = 3
MEASUREMENT_TYPE = {
STATIONARY_VALUE: "Stationary",
VIBE_VALUE: "Vibration",
TILT_VALUE: "Tilt",
DROP_VALUE: "Drop"
}
_LOGGER = logging.getLogger(__name__)
PROFILES[zha.PROFILE_ID].CLUSTERS[VIBE_DEVICE_TYPE] = (
[
Basic.cluster_id,
Identify.cluster_id,
Ota.cluster_id,
DoorLock.cluster_id,
MultistateInput.cluster_id,
IasZone.cluster_id
],
[
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
DoorLock.cluster_id,
MultistateInput.cluster_id
]
)
if zha.PROFILE_ID not in zha_const.DEVICE_CLASS:
zha_const.DEVICE_CLASS[zha.PROFILE_ID] = {}
zha_const.DEVICE_CLASS[zha.PROFILE_ID].update(
{
VIBE_DEVICE_TYPE: 'binary_sensor'
}
)
class AqaraVibrationSensor(XiaomiCustomDevice):
def __init__(self, *args, **kwargs):
self.motionBus = Bus()
super().__init__(*args, **kwargs)
class VibrationBasicCluster(BasicCluster):
cluster_id = BasicCluster.cluster_id
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attributes.update({
0xFF0D: ('sensitivity', types.uint8_t),
})
class MultistateInputCluster(CustomCluster, MultistateInput):
cluster_id = DoorLock.cluster_id
def __init__(self, *args, **kwargs):
self._currentState = {}
super().__init__(*args, **kwargs)
def _update_attribute(self, attrid, value):
super()._update_attribute(attrid, value)
if attrid == STATUS_TYPE_ATTR:
self._currentState[STATUS_TYPE_ATTR] = MEASUREMENT_TYPE.get(
value
)
if value == VIBE_VALUE:
self.endpoint.device.motionBus.listener_event(
'motion_event'
)
elif value == DROP_VALUE:
self.listener_event(
'zha_send_event',
self,
self._currentState[STATUS_TYPE_ATTR],
{}
)
elif attrid == ROTATION_DEGREES_ATTR:
self.listener_event(
'zha_send_event',
self,
self._currentState[STATUS_TYPE_ATTR],
{
'degrees': value
}
)
elif attrid == RECENT_ACTIVITY_LEVEL_ATTR:
# these seem to be sent every minute when vibration is active
self.endpoint.device.motionBus.listener_event(
'motion_event'
)
class MotionCluster(LocalDataCluster, IasZone):
cluster_id = IasZone.cluster_id
ZONE_STATE = 0x0000
ZONE_TYPE = 0x0001
ZONE_STATUS = 0x0002
VIBRATION_TYPE = 0x002d
ON = 1
OFF = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._timer_handle = None
self.endpoint.device.motionBus.add_listener(self)
self._update_attribute(self.ZONE_STATE, self.OFF)
self._update_attribute(self.ZONE_TYPE, self.VIBRATION_TYPE)
self._update_attribute(self.ZONE_STATUS, self.OFF)
def motion_event(self):
super().listener_event(
'cluster_command',
None,
self.ZONE_STATE,
[self.ON]
)
super().listener_event(
'cluster_command',
None,
self.ZONE_STATUS,
[self.ON]
)
if self._timer_handle:
self._timer_handle.cancel()
loop = asyncio.get_event_loop()
self._timer_handle = loop.call_later(75, self._turn_off)
def _turn_off(self):
self._timer_handle = None
super().listener_event(
'cluster_command',
None,
self.ZONE_STATE,
[self.OFF]
)
super().listener_event(
'cluster_command',
None,
self.ZONE_STATUS,
[self.OFF]
)
signature = {
1: {
'profile_id': zha.PROFILE_ID,
'device_type': zha.DeviceType.DOOR_LOCK,
'input_clusters': [
Basic.cluster_id,
Identify.cluster_id,
Ota.cluster_id,
DoorLock.cluster_id
],
'output_clusters': [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
DoorLock.cluster_id
],
},
2: {
'profile_id': zha.PROFILE_ID,
'device_type': VIBE_DEVICE_TYPE,
'input_clusters': [
Identify.cluster_id,
MultistateInput.cluster_id
],
'output_clusters': [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
MultistateInput.cluster_id
],
},
}
replacement = {
'endpoints': {
1: {
'manufacturer': 'LUMI',
'model': 'lumi.vibration.aq1',
'device_type': VIBE_DEVICE_TYPE,
'input_clusters': [
VibrationBasicCluster,
PowerConfigurationCluster,
TemperatureMeasurementCluster,
Identify.cluster_id,
MultistateInputCluster,
MotionCluster
],
'output_clusters': [
VibrationBasicCluster,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
DoorLock.cluster_id
],
}
},
}
| [
"david.mulcahey@icloud.com"
] | david.mulcahey@icloud.com |
106c53998b513b93b70be5cc2982f708c3ecf2b3 | 5a4124eea866334a9e3ddd94c57dbf1df7e3378a | /virtual/bin/pip | 6d53621274d2a5698a9d66f0e36ee6c3cd195680 | [
"MIT"
] | permissive | Elrophi/Housing | 6eb465c06918e67faadbc0cdad4bfe6d139a178f | dc3ccc545eb9d609a62495cbea76f4f849d4658a | refs/heads/master | 2023-05-12T23:45:58.201434 | 2021-06-08T13:09:29 | 2021-06-08T13:09:29 | 373,784,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | #!/home/el/Desktop/moringa-core/python-django/Housing/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"elrophilskwaila@gmail.com"
] | elrophilskwaila@gmail.com | |
16ffe2ce0b7d1d05344cc7814fd04b63e4a84196 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_4/hrbmax002/piglatin.py | 32eb09647dd7f6c75cec56edc0b28a10e8811327 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | def toPigLatin(s):
if s[len(s)-1] != " ":
s = s + " "
answer = ""
while len(s)>0:
temp = s[0:s.index(" ")]
s = s[s.index(" ")+1:]
if temp[0].upper() in ["A","E","I","O","U"]:
temp = temp + "way "
else:
temp = temp + "a"
while temp[0].upper() not in ["A","E","I","O","U"]:
temp = temp[1:] + temp[0]
temp = temp + "ay "
answer = answer + temp
answer = answer[0:len(answer)-1]
return answer
def toEnglish(s):
if s[len(s)-1] != " ":
s = s + " "
answer = ""
while len(s)>0:
temp = s[0:s.index(" ")]
s = s[s.index(" ")+1:]
if temp[-3:]=="way":
answer = answer + " " + temp[0:-3]
else:
temp = temp[0:-2]
while temp[-1] != "a":
temp = temp[-1] + temp[0:-1]
answer = answer + " " + temp[0:-1]
return answer[1:] | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
51705550782e5a0f8c41b524d7d0cf60b7edc565 | fcbf3ddca275606830d455a69df73e20ced6546a | /doc/conf.py | 9ca4ca664b3f765a31dd264254f24c060e447023 | [
"Apache-2.0"
] | permissive | KarchinLab/probabilistic2020 | 5f56e30e0c8484ac524081dd022c0159f24508ce | 8e0b1b9578bd8189b1690dd2f17476c3305b98dc | refs/heads/master | 2023-07-26T12:06:28.647117 | 2019-07-28T12:37:50 | 2019-07-28T12:37:50 | 57,408,263 | 8 | 7 | Apache-2.0 | 2023-07-06T21:02:44 | 2016-04-29T19:32:49 | Python | UTF-8 | Python | false | false | 8,727 | py | # -*- coding: utf-8 -*-
#
# 20/20 Permutation Test documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 28 13:53:42 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('./img'))
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
#'numpydoc',
#'IPython.sphinxext.ipython_console_highlighting',
#'IPython.sphinxext.ipython_directive',
#'matplotlib.sphinxext.plot_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Probabilistic 20/20'
copyright = u'2014-19, Collin Tokheim'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Probabilistic2020doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Probabilistic2020.tex', u'Probabilistic 20/20 Documentation',
u'Collin Tokheim', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Probabilistic 20/20 Documentation', u'Probabilistic 20/20 Documentation',
[u'Collin Tokheim'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Probabilistic2020', u'Probabilistic 20/20 Documentation',
u'Collin Tokheim', 'Probabilistic2020', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
"collintokheim@gmail.com"
] | collintokheim@gmail.com |
9a1838d05d52c92ed2187545c5cfa8e07d8125ed | d7871f3ff716919da9e7a7c9d7ba3a0732114d63 | /3DMean.py | 015010600c692df66f624d29270865533cf62baf | [] | no_license | eric-risbakk/kework | 337040d2b8df6915c9d9ed8ccd5303bf51b82f62 | 705556213a6f5fb9da28f4010d2ca4999b1f0915 | refs/heads/master | 2021-07-10T10:36:12.665117 | 2017-10-12T13:05:24 | 2017-10-12T13:05:24 | 103,539,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py |
import scipy.stats as spstat
import numpy as np
import numpy.random as npr
import scipy.stats as spstat
import numpy as np
import numpy.random as npr
__author__ = 'Eric Risbakk'
__date__ = "2017-09-14"
__maintainer__ = "Eric Risbakk"
__email__ = "e.risbakk@student.maastrichtuniversity.nl"
DEBUG = False
TEST = True
# Push test.
def online_mean_3d(ndarray, axis):
"""
Finds the mean in a 3d ndarray along the specified (int) axis.
Takes in the already complete array and finds the mean for it.
:param axis: Axis which we find the mean on.
:param ndarray: A 1-dimensional array.
:return: Arithmetic mean of simpleArray
"""
if len(ndarray) < 2:
return ndarray
else:
"""
# Creating the ndarray which will be the mean.
dimensions = []
for i in range(ndarray.ndim):
if i == axis:
continue
dimensions.append(ndarray.shape[i])
m = np.zeros(dimensions)
# Getting the mean for all points, using some recursion!
tempAxis = 0
"""
mean = ndarray[0]
n = 1
for x in ndarray[1:]:
mean = online_mean_step(x, mean, n)
n += 1
return mean
def recursive_truncation(mean, ndarray, currentAxis, axis):
# End-statement.
if currentAxis == ndarray.ndim:
return
# Skip this.
if currentAxis == axis:
recursive_truncation(mean, ndarray, currentAxis + 1, axis)
# Let's go depth first!
# Let's truncate this axis.
# TODO: FIGURE THIS OUT. IS IT EVEN POSSIBLE?
# TODO: MAYBE I SHOULD BE USING A TUPLE OR SOMETHING.
for i in range(ndarray.shape[axis]):
def online_mean_step(new_element, mean, n):
"""
Updates the mean, given newElement, old mean, and the number of elements before we add newElement.
NB: This method does not increase the number of element.
:param new_element: New Element.
:param mean: Old mean.
:param n: Old number of elements.
:return: The new mean.
"""
return (mean * n + new_element) / (n + 1)
def axis_online_mean_check(a1):
"""
Checks the mean of the last axis of a 3d ndarray, using onlineMeanCheck.
:param a1: The 3d ndarray.
:return: 2d ndarray averaged.
"""
if DEBUG:
print("axisOnlineMeanCheck begun.")
x = a1.shape[0]
y = a1.shape[1]
z = a1.shape[2]
if DEBUG:
print("Dimensions: ({} {} {})".format(x, y, z))
mean = np.zeros((x, y))
if DEBUG:
print("ndarray of zeroes created.")
if DEBUG:
print("Dimensions: ({} {})".format(mean.shape[0], mean.shape[1]))
if DEBUG:
print(mean)
for i in range(x):
for j in range(y):
mean[i, j] = online_mean_3d(a1[i, j, :])
if DEBUG:
print("End axisOnlineMeanCheck")
if DEBUG:
print(mean)
return mean
def get_avg(simple_array):
mean = 0
for x in simple_array:
mean += x
return mean/len(simple_array)
if TEST:
# First is rows, second is columns
a1 = npr.rand(2, 2, 2)
a1 *= 10
array_mean0 = np.mean(a1, axis=0)
array_mean1 = np.mean(a1, axis=1)
array_mean2 = np.mean(a1, axis=2)
print(a1)
print("\nMeans:")
print("\n First axis:")
print(array_mean0)
print("\n Second axis:")
print(array_mean1)
print("\n Third axis:")
print(array_mean2)
print("\nLet us attempt using axisMeanCheck.")
a_mean = axis_mean_check(a1)
print(a_mean)
print("\nLet us attempt using axisOnlineMeanCheck.")
b_mean = axis_online_mean_check(a1)
print(b_mean)
print("Finished.")
# End.
| [
"I6146197@unimaas.nl"
] | I6146197@unimaas.nl |
391b5234dde1dd8a811356eeeaa2d4e5f9d7a7ac | 1ae56f2ebb35f75c9e4ccb108b5eff2b158fa355 | /process_data.py | ab43b0172e6485bb40f1c534cb6c33c1d0795694 | [] | no_license | crrcgeorgia/air_quality | 5670d3b7390c2484d09238b9a985deb54688b27c | c16d8cd9b816cabccf425c4acb70d12dc32c9be1 | refs/heads/master | 2021-05-18T11:41:15.670321 | 2020-03-30T08:12:02 | 2020-03-30T08:12:02 | 251,230,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py | from pull_data import pull_airq_range, pull_weather
from datetime import date, datetime
import pandas as pd
from fbprophet import Prophet
import progressbar
def air_processed(start_date, end_date, cut_points, cut_labs, *args, **kwargs):
air = (
pull_airq_range(start_date, end_date)
.query('settlement_en == "Tbilisi"')
.groupby(["ds", "substance"])["value"]
.mean()
.reset_index()
.pivot(index="ds", columns="substance", values="value")
.reset_index()
.assign(
cut=lambda x: pd.cut(
x["ds"], bins=cut_points, labels=cut_labs, include_lowest=True
)
)
)
return air
def extract_seasonality(air: pd.DataFrame):
subs = ["PM10", "PM2.5", "NO2", "O3", "SO2", "CO"]
with progressbar.ProgressBar(max_value=len(subs)) as bar:
for n, sub in enumerate(subs):
df = air[["ds", sub]].rename(columns={sub: "y"})
m = Prophet()
m.fit(df)
future = m.make_future_dataframe(periods=1)
out = m.predict(future)
out = out[[i for i in out if "_" not in i and i != "yhat"]]
out["ds"] = pd.to_datetime(out.ds)
out = out.query("ds < datetime.now()")
out = out.rename(
columns={i: f"{sub}_" + i for i in out if i != "ds"}
)
air = air.merge(out, on="ds", how="left")
bar.update(n)
return air
def load_processed_data(start_date, end_date, *args, **kwargs):
air = air_processed(start_date, end_date, *args, **kwargs)
air = extract_seasonality(air)
air = pd.merge_asof(
air, pull_weather(start_date, end_date), on="ds"
).dropna()
return air
| [
"noreply@github.com"
] | noreply@github.com |
21ffffe2f10c8650b232760086dfbefe96216764 | c09f02ebc1c4418bf9324afd803532234293d6f5 | /CS61A/Project 2/trends_old3.py | d4d524fd8cdc2005ba0b1e2a888c2e360f710ecb | [] | no_license | jesseyli/OldProjects | a2fddabfbb6207616572c807fde71455f5197a6e | edc28b02bfdb2ea1f4041d879a20858b797e674f | refs/heads/master | 2021-01-10T09:05:07.294910 | 2016-02-05T07:34:04 | 2016-02-05T07:34:04 | 50,936,330 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,330 | py | """Visualizing Twitter Sentiment Across America"""
from data import word_sentiments, load_tweets
from datetime import datetime
from doctest import run_docstring_examples
from geo import us_states, geo_distance, make_position, longitude, latitude
from maps import draw_state, draw_name, draw_dot, wait, message, draw_top_states
from string import ascii_letters
from ucb import main, trace, interact, log_current_line
###################################
# Phase 1: The Feelings in Tweets #
###################################
def make_tweet(text, time, lat, lon):
"""Return a tweet, represented as a Python dictionary.
text -- A string; the text of the tweet, all in lowercase
time -- A datetime object; the time that the tweet was posted
lat -- A number; the latitude of the tweet's location
lon -- A number; the longitude of the tweet's location
>>> t = make_tweet("just ate lunch", datetime(2012, 9, 24, 13), 38, 74)
>>> tweet_words(t)
['just', 'ate', 'lunch']
>>> tweet_time(t)
datetime.datetime(2012, 9, 24, 13, 0)
>>> p = tweet_location(t)
>>> latitude(p)
38
"""
return {'text': text, 'time': time, 'latitude': lat, 'longitude': lon}
def tweet_words(tweet):
"""Return a list of the words in the text of a tweet."""
return extract_words(tweet['text'])
def tweet_time(tweet):
"""Return the datetime that represents when the tweet was posted."""
return tweet['time']
def tweet_location(tweet):
"""Return a position (see geo.py) that represents the tweet's location."""
return make_position(tweet['latitude'], tweet['longitude'])
def tweet_string(tweet):
"""Return a string representing the tweet."""
location = tweet_location(tweet)
return '"{0}" @ {1}'.format(tweet['text'], (latitude(location), longitude(location)))
def extract_words(text):
"""Return the words in a tweet, not including punctuation.
>>> extract_words('anything else.....not my job')
['anything', 'else', 'not', 'my', 'job']
>>> extract_words('i love my job. #winning')
['i', 'love', 'my', 'job', 'winning']
>>> extract_words('make justin # 1 by tweeting #vma #justinbieber :)')
['make', 'justin', 'by', 'tweeting', 'vma', 'justinbieber']
>>> extract_words("paperclips! they're so awesome, cool, & useful!")
['paperclips', 'they', 're', 'so', 'awesome', 'cool', 'useful']
>>> extract_words('@(cat$.on^#$my&@keyboard***@#*')
['cat', 'on', 'my', 'keyboard']
"""
words = []
word = ''
for letter in text + ' ':
if letter not in ascii_letters:
if word != '':
words += [word]
word = ''
else:
word = word + letter
return words
def make_sentiment(value):
"""Return a sentiment, which represents a value that may not exist.
>>> positive = make_sentiment(0.2)
>>> neutral = make_sentiment(0)
>>> unknown = make_sentiment(None)
>>> has_sentiment(positive)
True
>>> has_sentiment(neutral)
True
>>> has_sentiment(unknown)
False
>>> sentiment_value(positive)
0.2
>>> sentiment_value(neutral)
0
"""
assert value is None or (value >= -1 and value <= 1), 'Illegal value'
return value
def has_sentiment(s):
"""Return whether sentiment s has a value."""
if s == None:
return False
return True
def sentiment_value(s):
"""Return the value of a sentiment s."""
assert has_sentiment(s), 'No sentiment value'
return s
def get_word_sentiment(word):
"""Return a sentiment representing the degree of positive or negative
feeling in the given word.
>>> sentiment_value(get_word_sentiment('good'))
0.875
>>> sentiment_value(get_word_sentiment('bad'))
-0.625
>>> sentiment_value(get_word_sentiment('winning'))
0.5
>>> has_sentiment(get_word_sentiment('Berkeley'))
False
"""
# Learn more: http://docs.python.org/3/library/stdtypes.html#dict.get
return make_sentiment(word_sentiments.get(word))
def analyze_tweet_sentiment(tweet):
""" Return a sentiment representing the degree of positive or negative
sentiment in the given tweet, averaging over all the words in the tweet
that have a sentiment value.
If no words in the tweet have a sentiment value, return
make_sentiment(None).
>>> positive = make_tweet('i love my job. #winning', None, 0, 0)
>>> round(sentiment_value(analyze_tweet_sentiment(positive)), 5)
0.29167
>>> negative = make_tweet("saying, 'i hate my job'", None, 0, 0)
>>> sentiment_value(analyze_tweet_sentiment(negative))
-0.25
>>> no_sentiment = make_tweet("berkeley golden bears!", None, 0, 0)
>>> has_sentiment(analyze_tweet_sentiment(no_sentiment))
False
"""
sentiments = list(filter(None,map(sentiment_value,map(get_word_sentiment,tweet_words(tweet)))))
if len(sentiments) == 0:
return make_sentiment(None)
return make_sentiment(float(sum(sentiments))/float(len(sentiments)))
#################################
# Phase 2: The Geometry of Maps #
#################################
def find_centroid(polygon):
"""Find the centroid of a polygon.
http://en.wikipedia.org/wiki/Centroid#Centroid_of_polygon
polygon -- A list of positions, in which the first and last are the same
Returns: 3 numbers; centroid latitude, centroid longitude, and polygon area
Hint: If a polygon has 0 area, use the latitude and longitude of its first
position as its centroid.
>>> p1, p2, p3 = make_position(1, 2), make_position(3, 4), make_position(5, 0)
>>> triangle = [p1, p2, p3, p1] # First vertex is also the last vertex
>>> round5 = lambda x: round(x, 5) # Rounds floats to 5 digits
>>> tuple(map(round5, find_centroid(triangle)))
(3.0, 2.0, 6.0)
>>> tuple(map(round5, find_centroid([p1, p3, p2, p1])))
(3.0, 2.0, 6.0)
>>> tuple(map(float, find_centroid([p1, p2, p1]))) # A zero-area polygon
(1.0, 2.0, 0.0)
"""
area = 0
c_lat = 0
c_lon = 0
for index in range(len(polygon) - 1):
x_i = latitude(polygon[index])
x_i2 = latitude(polygon[index+1])
y_i = longitude(polygon[index])
y_i2 = longitude(polygon[index+1])
area += (x_i*y_i2 - x_i2*y_i)
c_lat += (x_i+x_i2)*(x_i*y_i2 - x_i2*y_i)
c_lon += (y_i+y_i2)*(x_i*y_i2 - x_i2*y_i)
area = area/2
if area == 0:
c_lat = latitude(polygon[0])
c_lon = longitude(polygon[0])
else:
c_lat = c_lat/(6*area)
c_lon = c_lon/(6*area)
return (c_lat, c_lon, abs(area))
def find_state_center(polygons):
"""Compute the geographic center of a state, averaged over its polygons.
The center is the average position of centroids of the polygons in polygons,
weighted by the area of those polygons.
Arguments:
polygons -- a list of polygons
>>> ca = find_state_center(us_states['CA']) # California
>>> round(latitude(ca), 5)
37.25389
>>> round(longitude(ca), 5)
-119.61439
>>> hi = find_state_center(us_states['HI']) # Hawaii
>>> round(latitude(hi), 5)
20.1489
>>> round(longitude(hi), 5)
-156.21763
"""
weighted_lat = 0
weighted_lon = 0
total_area = 0
for polygon in polygons:
centroid = find_centroid(polygon)
weighted_lat += centroid[0]*centroid[2]
weighted_lon += centroid[1]*centroid[2]
total_area += find_centroid(polygon)[2]
return make_position(weighted_lat/total_area, weighted_lon/total_area)
###################################
# Phase 3: The Mood of the Nation #
###################################
def find_closest_state(tweet, state_centers):
"""Return the name of the state closest to the given tweet's location.
Use the geo_distance function (already provided) to calculate distance
in miles between two latitude-longitude positions.
Arguments:
tweet -- a tweet abstract data type
state_centers -- a dictionary from state names to positions.
>>> us_centers = {n: find_state_center(s) for n, s in us_states.items()}
>>> sf = make_tweet("welcome to san Francisco", None, 38, -122)
>>> ny = make_tweet("welcome to new York", None, 41, -74)
>>> find_closest_state(sf, us_centers)
'CA'
>>> find_closest_state(ny, us_centers)
'NJ'
"""
closest_state = 'CA'
closest_distance = geo_distance(tweet_location(tweet), state_centers[closest_state])
for state in state_centers:
if geo_distance(tweet_location(tweet), state_centers[state]) < closest_distance:
closest_state = state
closest_distance = geo_distance(tweet_location(tweet), state_centers[state])
return closest_state
def group_tweets_by_state(tweets):
"""Return a dictionary that aggregates tweets by their nearest state center.
The keys of the returned dictionary are state names, and the values are
lists of tweets that appear closer to that state center than any other.
tweets -- a sequence of tweet abstract data types
>>> sf = make_tweet("welcome to san francisco", None, 38, -122)
>>> ny = make_tweet("welcome to new york", None, 41, -74)
>>> ca_tweets = group_tweets_by_state([sf, ny])['CA']
>>> tweet_string(ca_tweets[0])
'"welcome to san francisco" @ (38, -122)'
"""
tweets_by_state = {}
us_centers = {n: find_state_center(s) for n, s in us_states.items()}
for tweet in tweets:
closest_state = find_closest_state(tweet, us_centers)
if tweets_by_state.get(closest_state) == None:
tweets_by_state[closest_state] = []
tweets_by_state[closest_state] = tweets_by_state[closest_state] + [tweet]
return tweets_by_state
def most_talkative_states(term):
"""Return a list of the top five states with the largest number of tweets
containing 'term' in descending order (from most to least).
If multiple states tie, return them in alphabetical order.
>>> most_talkative_states('texas')
[('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)]
>>> most_talkative_states('soup')
[('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)]
"""
tweets = load_tweets(make_tweet, term) # A list of tweets containing term
tweets_left = group_tweets_by_state(tweets)
top_five = []
for key in sorted(tweets_left):
if len(top_five) < 5:
top_five += [(key,len(tweets_left[key]))]
else:
top_five = sorted(top_five, key=lambda x: x[1], reverse=True)
if len(tweets_left[key]) > top_five[4][1]:
top_five[4] = (key,len(tweets_left[key]))
return top_five
def average_sentiments(tweets_by_state):
"""Calculate the average sentiment of the states by averaging over all
the tweets from each state. Return the result as a dictionary from state
names to average sentiment values (numbers).
If a state has no tweets with sentiment values, leave it out of the
dictionary entirely. Do NOT include states with no tweets, or with tweets
that have no sentiment, as 0. 0 represents neutral sentiment, not unknown
sentiment.
tweets_by_state -- A dictionary from state names to lists of tweets
"""
averaged_state_sentiments = {}
sentiments = []
for key in tweets_by_state:
sentiments = list(filter(lambda x: x != None,map(analyze_tweet_sentiment,tweets_by_state[key])))
if len(sentiments) != 0:
averaged_state_sentiments[key] = float(sum(sentiments))/float(len(sentiments))
return averaged_state_sentiments
######################################
# Phase 4: Into the Fourth Dimension #
######################################
def group_tweets_by_hour(tweets):
"""Return a dictionary that groups tweets by the hour they were posted.
The keys of the returned dictionary are the integers 0 through 23.
The values are lists of tweets, where tweets_by_hour[i] is the list of all
tweets that were posted between hour i and hour i + 1. Hour 0 refers to
midnight, while hour 23 refers to 11:00PM.
To get started, read the Python Library documentation for datetime objects:
http://docs.python.org/py3k/library/datetime.html#datetime.datetime
tweets -- A list of tweets to be grouped
>>> tweets = load_tweets(make_tweet, 'party')
>>> tweets_by_hour = group_tweets_by_hour(tweets)
>>> for hour in [0, 5, 9, 17, 23]:
... current_tweets = tweets_by_hour.get(hour, [])
... tweets_by_state = group_tweets_by_state(current_tweets)
... state_sentiments = average_sentiments(tweets_by_state)
... print('HOUR:', hour)
... for state in ['CA', 'FL', 'DC', 'MO', 'NY']:
... if state in state_sentiments.keys():
... print(state, ":", round(state_sentiments[state], 5))
HOUR: 0
CA : 0.08333
FL : -0.09635
DC : 0.01736
MO : -0.11979
NY : -0.15
HOUR: 5
CA : 0.00945
FL : -0.0651
DC : 0.03906
MO : 0.1875
NY : -0.04688
HOUR: 9
CA : 0.10417
NY : 0.25
HOUR: 17
CA : 0.09808
FL : 0.0875
MO : -0.1875
NY : 0.14583
HOUR: 23
CA : -0.10729
FL : 0.01667
DC : -0.3
MO : -0.0625
NY : 0.21875
"""
tweets_by_hour = {}
for hour in range(24):
tweets_by_hour[hour] = []
for tweet in tweets:
tweets_by_hour[tweet_time(tweet).hour] = tweets_by_hour[tweet_time(tweet).hour] + [tweet]
return tweets_by_hour
# Interaction. You don't need to read this section of the program.
def print_sentiment(text='Are you virtuous or verminous?'):
"""Print the words in text, annotated by their sentiment scores."""
words = extract_words(text.lower())
layout = '{0:>' + str(len(max(words, key=len))) + '}: {1:+}'
for word in words:
s = get_word_sentiment(word)
if has_sentiment(s):
print(layout.format(word, sentiment_value(s)))
def draw_centered_map(center_state='TX', n=10):
"""Draw the n states closest to center_state."""
us_centers = {n: find_state_center(s) for n, s in us_states.items()}
center = us_centers[center_state.upper()]
dist_from_center = lambda name: geo_distance(center, us_centers[name])
for name in sorted(us_states.keys(), key=dist_from_center)[:int(n)]:
draw_state(us_states[name])
draw_name(name, us_centers[name])
draw_dot(center, 1, 10) # Mark the center state with a red dot
wait()
def draw_state_sentiments(state_sentiments):
"""Draw all U.S. states in colors corresponding to their sentiment value.
Unknown state names are ignored; states without values are colored grey.
state_sentiments -- A dictionary from state strings to sentiment values
"""
for name, shapes in us_states.items():
sentiment = state_sentiments.get(name, None)
draw_state(shapes, sentiment)
for name, shapes in us_states.items():
center = find_state_center(shapes)
if center is not None:
draw_name(name, center)
def draw_map_for_term(term='my job'):
"""Draw the sentiment map corresponding to the tweets that contain term.
Some term suggestions:
New York, Texas, sandwich, my life, justinbieber
"""
tweets = load_tweets(make_tweet, term)
tweets_by_state = group_tweets_by_state(tweets)
state_sentiments = average_sentiments(tweets_by_state)
draw_state_sentiments(state_sentiments)
for tweet in tweets:
s = analyze_tweet_sentiment(tweet)
if has_sentiment(s):
draw_dot(tweet_location(tweet), sentiment_value(s))
if len(tweets) != 0:
draw_top_states(most_talkative_states(term))
else:
draw_top_states(None)
wait()
def draw_map_by_hour(term='my job', pause=0.5):
"""Draw the sentiment map for tweets that match term, for each hour."""
tweets = load_tweets(make_tweet, term)
tweets_by_hour = group_tweets_by_hour(tweets)
for hour in range(24):
current_tweets = tweets_by_hour.get(hour, [])
tweets_by_state = group_tweets_by_state(current_tweets)
state_sentiments = average_sentiments(tweets_by_state)
draw_state_sentiments(state_sentiments)
message("{0:02}:00-{0:02}:59".format(hour))
wait(pause)
def run_doctests(names):
"""Run verbose doctests for all functions in space-separated names."""
g = globals()
errors = []
for name in names.split():
if name not in g:
print("No function named " + name)
else:
run_docstring_examples(g[name], g, True, name)
def test_abstraction(names):
global make_position, longitude, latitude, us_states
global make_sentiment, has_sentiment, sentiment_value
import geo
print('--- Testing data abstraction violations for {} ---'.format(names))
make_position = geo.make_position = lambda lat, lon: lambda: (lat, lon)
latitude = geo.latitude = lambda p: p()[0]
longitude = geo.longitude = lambda p: p()[1]
us_states = geo.load_states()
make_sentiment = lambda v: lambda: v
has_sentiment = lambda s: s() is not None
sentiment_value = lambda s: s()
run_doctests(names)
print('------')
print("""If there are errors in the doctests, you have a data abstraction violation in {}""".format(names))
@main
def run(*args):
"""Read command-line arguments and calls corresponding functions."""
import argparse
parser = argparse.ArgumentParser(description="Run Trends")
parser.add_argument('--print_sentiment', '-p', action='store_true')
parser.add_argument('--run_doctests', '-t', action='store_true')
parser.add_argument('--draw_centered_map', '-d', action='store_true')
parser.add_argument('--draw_map_for_term', '-m', action='store_true')
parser.add_argument('--draw_map_by_hour', '-b', action='store_true')
parser.add_argument('--test_abstraction', '-a', action='store_true')
parser.add_argument('text', metavar='T', type=str, nargs='*',
help='Text to process')
args = parser.parse_args()
for name, execute in args.__dict__.items():
if name != 'text' and execute:
globals()[name](' '.join(args.text))
| [
"jesseyli@berkeley.edu"
] | jesseyli@berkeley.edu |
6408b1b91926c28f6a0816eef12e75332aba15ac | 4113e7c9f1beb13a0ef963a6760e43b5cab676ce | /__init__.py | 4781cbd159824f702baafe799f5dee329fb4d432 | [] | no_license | charlieb/flowers | b413595b8ce1839123305ad96bdae6213a8b7faf | dca18ef10b849ee01518818279426699749a655e | refs/heads/master | 2020-04-19T08:36:48.699934 | 2017-01-09T04:12:00 | 2017-01-09T04:12:00 | 66,811,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | from .petals import petal, flower, draw
| [
"charlie.burrows@gmail.com"
] | charlie.burrows@gmail.com |
33e3abf7249f08d1377e5d1805da84937ab779eb | 379934f86f2e7fce60c88222ed61bc106390271e | /glasslab/dataanalysis/misc/gr_project_2012/v1/boxplots_from_p65_gr_peaks.py | e7313cb2c1ebf8f13f60ddced648317001bc413a | [] | no_license | karmel/glasslab | a022fb3e1147382ba5f64c67d6db9b87b9bca2de | 754774390f03852d1385c5fffeb32fcdab5cd7e4 | refs/heads/master | 2021-09-04T18:00:49.650817 | 2014-10-06T19:37:25 | 2014-10-06T19:37:25 | 5,957,226 | 1 | 1 | null | 2019-09-22T16:55:29 | 2012-09-25T21:56:42 | Python | UTF-8 | Python | false | false | 1,966 | py | '''
Created on Oct 1, 2012
@author: karmel
'''
from __future__ import division
from glasslab.dataanalysis.graphing.seq_grapher import SeqGrapher
if __name__ == '__main__':
yzer = SeqGrapher()
dirpath = 'karmel/Desktop/Projects/Classes/Rotations/Finland_2012/GR_Project/'
dirpath = yzer.get_path(dirpath)
img_dirpath = yzer.get_and_create_path(dirpath, 'boxplots_from_p65_gr')
if True:
for main, compare, basal_cond in (('p65','GR', 'KLA'),('GR','p65', 'Dex')):
data = yzer.import_file(yzer.get_filename(dirpath, 'motifs', 'from_peaks',
'{0}_kla_dex_vectors.txt'.format(main)))
data = data.fillna(0)
data = data.groupby(['id','chr_name'],as_index=False).mean()
data = data[data['tag_count_2'] > 0]
colname = 'tag_count_diff'
data[colname] = (data['tag_count'] - data['tag_count_2'])/data['tag_count']
cond_1 = (data['tag_count_3'] == 0)
cond_2 = (data['tag_count_3'] > 0) & (data['tag_count_3'] < data['tag_count_4'] )
cond_3 = (data['tag_count_3'] > 0) & (data['tag_count_3'] >= data['tag_count_4'] )
title = 'Difference in {0} peak tag counts by {1}'.format(main, compare)
names = [s.format(compare) for s in ['No {0} in KLA+Dex','Loses {0} in KLA+Dex','Gains/maintains {0} in KLA+Dex']]
ax = yzer.boxplot([data[cond_1][colname], data[cond_2][colname], data[cond_3][colname]],
names,
title=title,
xlabel='Condition',
ylabel='{0} KLA+Dex tags in peak - {0} {1} tags in peak'.format(main, basal_cond),
show_outliers=False, show_plot=False)
yzer.save_plot(yzer.get_filename(img_dirpath, title.replace(' ','_')))
yzer.show_plot() | [
"karmel@arcaio.com"
] | karmel@arcaio.com |
b2b18b0466f68363ec428575924ddb25850628e3 | 0ce934553a854e5a3d28971f73be19d0912449bf | /homePage/migrations/0002_auto_20190311_1333.py | e5ee1021a560d11d27a4092636182d934d75cbd7 | [] | no_license | keennhlc/GKWeb | d0c1c2617e2334ee9aba6e3b741d049cf75c9a62 | db34c14a4be13fab1cf16de66fc406b7142d7fcb | refs/heads/master | 2020-05-01T09:19:13.871041 | 2019-03-24T10:20:40 | 2019-03-24T10:20:40 | 177,397,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Generated by Django 2.2b1 on 2019-03-11 05:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('homePage', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='newscontent',
name='news',
),
migrations.DeleteModel(
name='News',
),
migrations.DeleteModel(
name='NewsContent',
),
]
| [
"keennweb@gmail.com"
] | keennweb@gmail.com |
a8e3cce479c0b620026b4944db203dae7b0979bd | b29d80506512e9cec2aa820c043c616751b13dcf | /serializers.py | 671d4c81e4ccca93029d7c7bbb733c0ca80d62a2 | [] | no_license | luvjoey1996/storageManager | f25e8d359294f28799e592f49febeea0ca1acb06 | 2d66820f05a80245e2c5ef2bcf56b00d3c747b8a | refs/heads/main | 2023-08-03T17:41:18.327926 | 2021-09-17T10:23:49 | 2021-09-17T10:23:49 | 407,498,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | from marshmallow import Schema, fields, validate
from config import Configuration
from models import ServiceType
class CreateServiceSchema(Schema):
type = fields.Str()
class ServiceCreateSchema(Schema):
type = fields.Int(validate=validate.OneOf(choices=ServiceType.as_choices()))
memory = fields.Int(validate=validate.Range(min=1, max=Configuration.MEMORY_TOTAL - Configuration.MEMORY_RESERVE),
missing=1)
class PaginationSchema(Schema):
page_size = fields.Int(validate=validate.Range(min=10, max=30))
current_page = fields.Int(validate=validate.Range(min=1))
class ServicePagingSchema(Schema):
name = fields.Str()
type = fields.Int()
class SettingSchema(Schema):
type = fields.Int()
name = fields.Str()
value = fields.Str()
class ServiceSchema(Schema):
name = fields.Str()
type = fields.Int()
memory = fields.Int()
state = fields.Int()
error_no = fields.Int()
settings = SettingSchema(many=True)
username = fields.Str()
password = fields.Str()
port = fields.Int()
ip = fields.Str()
| [
"luvjoey1996@gmail.com"
] | luvjoey1996@gmail.com |
eb085418aab782c970d7166273fd9b9262c46f5b | c858d9511cdb6a6ca723cd2dd05827d281fa764d | /MFTU/lesson 7/Test work/test_F.py | b6885f38ec053864866d442146f62a2ba115c3a5 | [] | no_license | DontTouchMyMind/education | 0c904aa929cb5349d7af7e06d9b1bbaab972ef95 | 32a53eb4086b730cc116e633f68cf01f3d4ec1d1 | refs/heads/master | 2021-03-12T11:15:02.479779 | 2020-09-17T08:19:50 | 2020-09-17T08:19:50 | 246,616,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # Необходимо найти НОД двух чисел, используя алгоритм Евклида.
#
# Формат входных данных
# На вход подаются два натуральных числа, по числу в новой строке.
#
# Формат выходных данных
# Одно число - НОД входных чисел.
def gcd(a, b):
if a == b:
return a
elif a > b:
return gcd(a - b, b)
else:
return gcd(a, b - a)
n1 = int(input())
n2 = int(input())
print(gcd(n1, n2))
| [
"tobigface@gmail.com"
] | tobigface@gmail.com |
ad0cdab693cf632e6b1795e623fb5a5965e77727 | 18a0f4ddefae1e9a0ddae86de7315bacd1a96491 | /apps/users/views.py | 6681229876742a55d927c790d13ae2c52aa24ef3 | [] | no_license | ZVR999/belt_reviewer | f1a2d41bcced69f906d73178f06acc4f6b215431 | ffe0099813bfb1389c0a52f400ba00fc2d9acf86 | refs/heads/master | 2020-03-29T04:05:46.229105 | 2020-02-11T17:40:10 | 2020-02-11T17:40:10 | 149,515,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,310 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from .models import User
from ..reviews.models import Review
from ..books.models import Book
import bcrypt
from django.contrib import messages
# Create your views here.
# Create a User
def create(request):
errors = User.objects.basic_validator(request.POST)
hashed_pw = bcrypt.hashpw(
request.POST['password'].encode(), bcrypt.gensalt())
if len(errors):
for tag, error in errors.iteritems():
messages.error(request, error, extra_tags=tag)
return redirect('/')
user_exists = User.objects.filter(email=request.POST['email'])
if user_exists:
messages.error(request, 'This email is already in use')
return redirect('/')
else:
request.session['alias'] = request.POST['alias']
name = request.POST['name']
alias = request.POST['alias']
email = request.POST['email']
User.objects.create(name=name, alias=alias,
email=email, password=hashed_pw)
return redirect('/books')
# Login a User
def login(request):
user_exists = User.objects.filter(email=request.POST['email'])
if user_exists:
db_password = str(user_exists[0].password)
if bcrypt.checkpw(str(request.POST['password']), db_password.encode()):
request.session['alias'] = user_exists[0].alias
return redirect('/books')
else:
messages.error(request, 'Invalid email or password')
else:
messages.error(request, 'Invalid email or password')
return redirect('/')
def show(request, user_id):
user = User.objects.get(id=user_id)
Review.objects.filter()
context = {
'user': User.objects.get(id=user_id),
'books': Review.objects.raw('SELECT DISTINCT "books_Book"."id", "books_Book"."name" FROM reviews_Review JOIN books_Book ON "reviews_Review"."book_id"="books_Book"."id" WHERE "reviews_Review"."user_id"='+str(user.id)+';')
}
request.session['total'] = Review.objects.filter(user=User.objects.filter(id=user_id)).count()
return render(request, 'users/user.html', context)
def logout(request):
request.session['alias'] = 'Please Login or Register'
return redirect('/')
| [
"zachkery999@gmail.com"
] | zachkery999@gmail.com |
0450edaf3b101ccb33050e851f1cf7ff76c42e14 | 8fd279f728b7a83e6f14fd6ab77da67459bd21df | /test.py | eed4c5c5c238b7175223f43ce87c4e216551fbf3 | [] | no_license | yiebo/stt-transformer | ec4de39fb3f54c8ab264aaa69f1b9b41c83d303f | 78a06451085064de0fd2417764183a1f4ea4b4d0 | refs/heads/master | 2022-09-09T11:32:19.265157 | 2020-06-05T16:06:16 | 2020-06-05T16:06:16 | 266,639,088 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | from tqdm import tqdm
import glob
import os
import numpy as np
from prefetch_generator import BackgroundGenerator
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils import tensorboard
from torchvision import utils
import torchaudio
from torchaudio.transforms import GriffinLim, InverseMelScale, Resample, Spectrogram, MelScale
from ops import positional_encoding
from util import to_device, plot_att_heads, text_id_to_string
from model import Encoder, Decoder
from dataset import Dataset, _symbol_to_id
from audio_process import sample_rate, rescale_mel, scale_mel, MelWav
import time
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
epoch_total = 64
batch_size = 4
enc_lr = 0.0001
dec_lr = 0.0005
emb_lr = 0.0001
sym_dim = len(_symbol_to_id)
mel_lin = InverseMelScale(n_stft=1024 // 2 + 1, n_mels=80, sample_rate=sample_rate,
max_iter=2*2048).to(device)
griffin_lim = GriffinLim(n_fft=1024, hop_length=256).to(device)
writer = tensorboard.SummaryWriter(log_dir=f'logs/test')
dataset = Dataset('../DATASETS/LJSpeech-1.1/metadata.csv', '../DATASETS/LJSpeech-1.1')
dataloader = DataLoader(dataset, collate_fn=dataset.collocate, batch_size=batch_size,
shuffle=False, num_workers=0, drop_last=True)
resample = Resample(orig_freq=22050, new_freq=sample_rate)
spectogram = Spectrogram(n_fft=1024, hop_length=256).to(device)
to_mel = MelScale(n_mels=80, sample_rate=sample_rate,
n_stft=1024 // 2 + 1).to(device)
with open('../DATASETS/LJSpeech-1.1/metadata.csv', encoding='utf8') as file:
data = [line.strip().split('|') for line in file]
path, text = data[0][0], data[0][1]
path = f'../DATASETS/LJSpeech-1.1/wavs/{path}.wav'
data, sr = torchaudio.load(path)
data = resample(data)
data = data.to(device)
data = spectogram(data.squeeze(0))
mel_norm = ((data.unsqueeze(0) - data.mean()) / data.std()).clamp(-1, 1) * .5 + .5
writer.add_image(f'spec/origin', mel_norm, 0)
writer.add_audio(f'audio/origin', griffin_lim(data), global_step=0, sample_rate=sample_rate)
data = to_mel(data)
data = scale_mel(data)
data = rescale_mel(data)
data = mel_lin(data)
mel_norm = ((data.unsqueeze(0) - data.mean()) / data.std()).clamp(-1, 1) * .5 + .5
writer.add_image(f'spec/re', mel_norm, 0)
writer.add_audio(f'audio/re', griffin_lim(data), global_step=0, sample_rate=sample_rate)
mel_wav = MelWav().to(device)
for batch in dataloader:
text_data, text_pos, text_len, text_mask, mel_data, mel_pos, mel_len, mel_mask, gate = to_device(batch, device)
start = time.time()
# data = mel_wav(mel_data, mel_mask[:, -1].unsqueeze(1))
x = mel_data.transpose(-2, -1)
x = rescale_mel(x)
x = mel_lin(x)
mel_norm = ((x - x.mean()) / x.std()).clamp(-1, 1) * .5 + .5
writer.add_image(f'spec/all2', mel_norm[:1], 0)
x = griffin_lim(x)
for sample in x:
writer.add_audio(f'audio/all2', sample, global_step=0, sample_rate=sample_rate)
print(time.time() - start)
start = time.time()
for data, mel_len_ in zip(mel_data, mel_len):
writer.add_audio(f'audio/all', mel_wav(data[:mel_len_]), global_step=0, sample_rate=sample_rate)
print(time.time() - start)
writer.flush()
exit()
| [
"yiebo-c@hotmail.com"
] | yiebo-c@hotmail.com |
07dc4711d757ebe944fcd4427827ad60b56c0574 | 6aaea15dbf99219f03b08f14582c4bbe085b41fc | /0304/bj7576_토마토.py | 16dd692c61cbd5d68aae8be8e2e55edb6aa70529 | [] | no_license | kkkin02/Algorithm | f6ade2d7bebc3bd878b0e1fb1b22206f146fcea7 | 797223528b344faea497fe8390d78cd683cd063f | refs/heads/master | 2023-03-17T02:40:38.407345 | 2021-03-05T14:01:54 | 2021-03-05T14:01:54 | 337,080,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | def tomato(t, box):
l = len(t)
if l == 0:
return -1
elif l == n * m:
return 0
dr = [-1, 1, 0, 0]
dc = [0, 0, -1, 1]
visited = [[0] * m for _ in range(n)]
q = []
for i in t:
q.append(i)
visited[i[0]][i[1]] = 1
while q:
p = q.pop(0)
r = p[0]
c = p[1]
for j in range(4):
nr = r + dr[j]
nc = c + dc[j]
if 0 <= nr < n and 0 <= nc < m and box[nr][nc] == 0 and visited[nr][nc] == 0:
box[nr][nc] = 1
q.append([nr, nc])
visited[nr][nc] = visited[r][c] + 1
result = 0
for x in range(n):
for y in range(n):
if box[x][y] == 0:
return -1
elif box[x][y] == 1:
if visited[x][y] > result:
result = visited[x][y]
return result - 1
m, n = map(int, input().split())
box = []
for _ in range(n):
box.append(list(map(int, input().split())))
t = []
for i in range(n):
for j in range(m):
if box[i][j] == 1:
t.append([i, j])
break
print(tomato(t, box)) | [
"rkddlsdud0720@gmail.com"
] | rkddlsdud0720@gmail.com |
6450073c33cb50db18dc4b145b95d18e75ee47b0 | e2d22f12f8e540a80d31de9debe775d35c3c5c22 | /blousebrothers/confs/migrations/0037_auto_20170117_1535.py | 6841343b2a40c2fbb431ff15ae9ddfd4cd5a80ee | [
"MIT"
] | permissive | sladinji/blousebrothers | 360c3b78ec43379977dbf470e5721e6a695b2354 | 461de3ba011c0aaed3f0014136c4497b6890d086 | refs/heads/master | 2022-12-20T10:24:07.631454 | 2019-06-13T13:17:35 | 2019-06-13T13:17:35 | 66,867,705 | 1 | 0 | NOASSERTION | 2022-12-19T18:15:44 | 2016-08-29T18:04:33 | Python | UTF-8 | Python | false | false | 813 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-01-17 15:35
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('confs', '0036_auto_20170110_1100'),
]
operations = [
migrations.AlterField(
model_name='conference',
name='price',
field=models.DecimalField(decimal_places=2, default=Decimal('0.5'), help_text='', max_digits=6, verbose_name='Prix de vente'),
),
migrations.AlterField(
model_name='conference',
name='type',
field=models.CharField(choices=[('DCP', 'DCP'), ('QI', 'QI'), ('LCA', 'LCA')], default='DP', max_length=10, verbose_name='Type'),
),
]
| [
"julien.almarcha@gmail.com"
] | julien.almarcha@gmail.com |
1465bbad98fe6c51d22d31a82efaa6fba3362f45 | e8a285cb1dcdae6f1b6d8506b8d25a1d031d6cd7 | /cpptools/tests/test_write_pythia_hepmc3.py | d4e73a3185bc0137d2756b3b3f25a6b491647b97 | [] | no_license | matplo/heppy | f30558e4ff3c1720c63b4d82f739b3f8acadc53e | 88c931e3e7dcf57a3a476ef0a92f0204491cafb9 | refs/heads/master | 2023-07-07T18:17:04.486149 | 2023-06-29T20:45:32 | 2023-06-29T20:45:32 | 201,352,733 | 5 | 8 | null | 2023-07-04T21:57:31 | 2019-08-08T23:33:39 | C | UTF-8 | Python | false | false | 782 | py | #!/usr/bin/env python
import pythia8
import pythiahepmc3
def create_and_init_pythia(config_strings=[]):
pythia = pythia8.Pythia()
for s in config_strings:
pythia.readString(s)
for extra_s in ["Next:numberShowEvent = 0", "Next:numberShowInfo = 0", "Next:numberShowProcess = 0", "Next:numberCount = 0"]:
pythia.readString(extra_s)
if pythia.init():
return pythia
return None
def main():
pythia = create_and_init_pythia(["PhaseSpace:pTHatMin = 2", "HardQCD:all = on"])
sfoutname = "test_write_pythia_hepmc3.dat"
pyhepmcwriter = pythiahepmc3.Pythia8HepMCWrapper(sfoutname)
for iEvent in range(100):
if not pythia.next(): continue
pyhepmcwriter.fillEvent(pythia)
pythia.stat()
print("[i] done writing to {}".format(sfoutname))
if __name__ == '__main__':
main()
| [
"ploskon@gmail.com"
] | ploskon@gmail.com |
66f6e4500621285bbbbaf51d4c572120cb3598e7 | 3b1229c458aa232bfcf11cd6da5f1275e9bb3a8f | /python/Python基础/截图和代码/if、while、for/PaxHeader/01-if比较运算符.py | 147895a71cc09ac233a82e6fae85d44e6ae21569 | [] | no_license | sunjianbo/learning | 4fee3ddc5e3d4040a49f2ef3e6f239fd6a67b393 | 384cb4e73cc67e390ee2f4be0da9fe0319d93644 | refs/heads/master | 2021-02-17T16:32:22.557614 | 2020-03-09T05:29:51 | 2020-03-09T05:29:51 | 245,111,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | 78 path=Python基础/截图和代码/if、while、for/01-if比较运算符.py
27 mtime=1491131771.711676
| [
"sunjianbo"
] | sunjianbo |
e3015d6511a097f72860e5b54e64757741b0778b | 7401dac35fa6ea9bdbddb9dad15f5879ba2c0507 | /acadbehaviour.py | b63e506090b12ae2641e0ab31f24d2ad1c551201 | [] | no_license | airrakeshkumarsharma/student-classification | 995b096dd612806faf49b60a9c1e22678db53e1d | 9b46cbe0d6d8747ce78762e327473861ee44b7a2 | refs/heads/master | 2020-03-28T01:27:46.794113 | 2018-09-06T13:00:46 | 2018-09-06T13:00:46 | 147,506,789 | 2 | 1 | null | 2018-09-06T05:13:55 | 2018-09-05T11:22:52 | null | UTF-8 | Python | false | false | 3,725 | py | import pandas as pd
import random
#Serial Number
sno = []
#Academic Behaviour
active = []
sem = []
#Higher Studies
name = []
mba = []
ms = []
mtech = []
#Job
govtjob = []
it = []
entrepreneur = []
#Co-circular activities
sports=[]
music=[]
dance=[]
others=[]
#General Behaviour
result=[]
#print("0 -> Not Attentive")
#print("1 -> A Bit Attentive")
#print("2 -> Attentive")
#print("3 -> Hyperactive")
for i in range(1,61):
a = random.randint(0,3)
s = random.randint(0,10)
#a = int(input("Enter the Academic behaviour marks on a scale of 0 to 3"))
#s = int(input("Enter the Semester marks on a scale of 0 to 10"))
active.append(a)
sem.append(s)
sno.append(i)
print("Academic Behaviour")
print("Attentive",active)
print("Semester marks",sem)
#print("0->Not Intrested")
#print("1->Confused")
#print("2-> Interested")
#print("3->Passionate")
for i in range(1,61):
mba1 = random.randint(0,3)
ms1 = random.randint(0,3)
mtech1 = random.randint(0,3)
#mba1 = input("Enter your intrest in mba on a scale of 0 to 3")
#ms1 = input("Enter your intrest in ms on a scale of 0 to 3")
#mtech1 = input("Enter your intrest in mtech on a scale of 0 to 3")
mtech.append(mtech1)
ms.append(ms1)
mba.append(mba1)
print("Higher Studies")
print("M.tech",mtech)
print("MS",ms)
print("MBA",mba)
#print("0 -> Not Intrested")
#print("1 -> Confused")
#print("2 -> Interested")
#print("3 -> Passionate")
for i in range(1,61):
g = random.randint(0,3)
it1 = random.randint(0,3)
e = random.randint(0,3)
#g = input("Enter your intrest in govt job on a scale of 0 to 3")
#it1 = input("Enter your intrest in IT job on a scale of 0 to 3")
#e = input("Enter your intrest in entrepeneurship on a scale of 0 to 3")
it.append(it1)
entrepreneur.append(e)
govtjob.append(g)
print("JOB")
print("IT job",it)
print("Bussiness",entrepreneur)
print("Govt Job",govtjob)
#print("1 -> Not Intrested")
#print("2 -> On and OFF")
#print("3 -> Regular")
#print("4 -> Passionate")
for i in range(1,61):
s = random.randint(0,3)
m = random.randint(0,3)
d = random.randint(0,3)
o = random.randint(0,3)
#s = input ("Enter your intrest in sports on a scale of 0 to 3")
#m = input ("Enter your intrest in music on a scale of 0 to 3")
#d = input ("Enter your intrest in dance on a scale of 0 to 3")
#o = input ("Enter your intrest in others on a scale of 0 to 3")
music.append (m)
sports.append (s)
dance.append (d)
others.append (o)
print("Co-Circular Activities")
print("music",music)
print("sports",sports)
print("dance",dance)
print("others",others)
#print("For grading any student consider the categories:")
#print(" polite, not polite")
#print(" responisible, not responsible")
#print(" honest, not honest")
#print(" give the data in number format:")
#print("0 -> No postive aspect")
#print("1 -> one postive aspect")
#print("2 -> two postive aspect")
#print("3 -> three postive aspect")
for i in range(1,61):
k = random.randint(0,3)
#k = int (input ("enter remarks for the student:"))
result.append(k)
print("General Behaviour")
print(result)
# save the file with data
csvf = pd.DataFrame({"S.No": sno, "Attentiveness": active, "Sem Marks": sem, "MBA": mba, "MS": ms, "M.Tech": mtech, "GOVTJOB": govtjob, "IT": it, "ENTREPRENEUR": entrepreneur, "sports": sports, "music": music, "dance": dance, "others": others, "StudentBehaviour":result})
csvf.to_csv("AcademicBehaviour.csv",index="false")
print(csvf)
| [
"noreply@github.com"
] | noreply@github.com |
e372c12cb3b68da6d60f0b9badb77c5351675ea6 | dfd0036b13141f0a61ce29325044d9fc3accfa67 | /prob20.py | 165d4cd80e0efaa2f74d5cae6b4c7d1396f817cc | [] | no_license | V-Neck/Euler | a909a9265ce8b0b09204189adcf7655ffe9bec5d | 4710f1456cc9c23eb0564122878b4245bd6c33d8 | refs/heads/master | 2021-05-06T17:58:49.316448 | 2018-04-09T02:48:03 | 2018-04-09T02:48:03 | 111,858,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | from prob15 import fact
print sum([int(i) for i in str(fact(100))])
| [
"evilepicproductions@gmail.com"
] | evilepicproductions@gmail.com |
b1dc9e505c919a677e4ad516ba5eb32f5820c244 | 610dedfb6e21d297e8cdbcba599a4e564bd785cb | /EstruturaDeRepeticao/estruturaderepeticao-09.py | 8b4c1153a41989cbf2047c8067840d6a96441880 | [] | no_license | zumbipy/PythonExercicios | f7b2ddf2376b9ecb2aedc77531e3571dc746a12b | 7a17b78cf927a2889b93238542e90e00810c43e6 | refs/heads/master | 2021-01-23T10:43:47.997462 | 2018-07-22T14:58:44 | 2018-07-22T14:58:44 | 93,086,120 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Telegram: @ZumbiPy __ _ ___
# /_ / __ ____ _ / / (_) _ \__ __
# / /_/ // / ' \/ _ \/ / ___/ // /
# /___/\_,_/_/_/_/_.__/_/_/ \_, /
# E-mail: zumbipy@gmail.com /___/
"""
09 - Faça um programa que imprima na tela apenas os números
ímpares entre 1 e 50.
"""
# ================================================================================
# Logica do Programa.
# ================================================================================
for i in range(1, 50):
# Quando resto de uma divisao por 2 for 0 ele e par se nao e ímpar.
if i % 2 != 0:
print(i)
print("=" * 72)
# ou
for i in range(1, 50, 2):
print(i)
| [
"zumbipy@gmail.com"
] | zumbipy@gmail.com |
6a5596e9f5936e591842035cd79dd7c31355ad3f | d169aaea184d13b92a79db7ddcdf6cafc8696fb7 | /ArrayMathematics.py | 40acb0b5e066a3de3f3559ea5c8b44e7419747c9 | [] | no_license | sahilshah1610/HackerRank | 71dcc6c5d5f8411240e12c2e74c31fc5c62d5ed0 | 1fd63624b05927bf5ac38ee206d4f7e79b000b68 | refs/heads/master | 2023-01-28T23:21:42.383578 | 2020-12-14T05:21:36 | 2020-12-14T05:21:36 | 314,386,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | import numpy as np
if __name__ == "__main__":
n,m = map(int, input().split())
arrA = np.zeros((n,m),int)
arrB = np.zeros((n,m),int)
for i in range(n):
arrA[i] = np.array(input().split(), int)
for i in range(n):
arrB[i] = np.array(input().split(), int)
print(arrA + arrB)
print(arrA - arrB)
print(arrA * arrB)
print(arrA // arrB)
print(arrA % arrB)
print(arrA ** arrB)
| [
"sahil.shah56@gmail.com"
] | sahil.shah56@gmail.com |
097f6aa61829185596618db0242a9f7088507c1b | 98005f697f615e55d1a34bbb8f71fb45dd11f2be | /agmapi/__init__.py | 275199dc36eeee47ba48903ed7abfd9d83c8931a | [] | no_license | sreecodeslayer/khub-task | 0a2bf12cc4d65c6feea0e3fde4cf0484ce5d779b | 97edf6670bcaa0675aed2f589644e31747d54862 | refs/heads/master | 2020-03-10T06:42:04.900308 | 2018-04-17T05:10:19 | 2018-04-17T05:10:19 | 129,244,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | from flask import Flask, render_template
from flask_restful import Api
from .db import init_db
from .utils import init_logger, get_logger
app = Flask('agmapi')
app.config['DEBUG'] = True
app.config['MONGODB_SETTINGS'] = {
'db': 'AGMAPI',
'host': 'mongodb://localhost:27017/AGMAPI'
}
init_db(app)
init_logger(app)
logger = get_logger()
logger.info('Server ready!')
@app.route('/')
def index():
return render_template('index.html')
from .web import (
StocksResource,
CommodityResource,
StatesResource,
MandisResource
)
api = Api(app)
api.add_resource(StocksResource, '/api/stocks')
api.add_resource(CommodityResource, '/api/commodities')
api.add_resource(StatesResource, '/api/states')
api.add_resource(MandisResource, '/api/mandis')
| [
"kesav.tc8@gmail.com"
] | kesav.tc8@gmail.com |
971a9e6462e076e0cd93f1bffaa93f96f40d2ccd | d8648e2c56452c6ee09aced281b0810a702467ec | /blog/migrations/0003_auto_20181217_1108.py | 69e6236703fe51a9a139a9de960baa01582cb1e5 | [] | no_license | nikhil0162/django_Project | c51018eacf4965b7edf62d1067c636476454e2e1 | 25c4f4c1c5065bcb127590a2339e4d7725078be8 | refs/heads/master | 2022-12-14T04:20:36.839251 | 2020-10-29T17:39:27 | 2020-10-29T17:39:27 | 159,532,188 | 0 | 0 | null | 2022-12-08T02:28:32 | 2018-11-28T16:30:36 | HTML | UTF-8 | Python | false | false | 403 | py | # Generated by Django 2.1.4 on 2018-12-17 05:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20181215_0712'),
]
operations = [
migrations.AlterField(
model_name='carpost',
name='launched_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"nikhil0162@gmail.com"
] | nikhil0162@gmail.com |
c55fcbc5290050fda7ddeebb2c0e6adec8d9980c | 703e6baed8e2b1efbd1aaee7eba3b6af1fb3fd84 | /nndist.py | 22521fb8082fa049edcfbd3e6d691c0c444eea03 | [] | no_license | deyh2020/EIT-computations | 12969400c2bec3810112a69da9a8d9a594ab2934 | 9159bff4bfda8b308a486b3404c90c0376e576eb | refs/heads/master | 2022-03-07T18:21:36.656263 | 2018-06-19T21:04:27 | 2018-06-19T21:04:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | import pylab
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import scipy
from qutip import *
from scipy import linalg
from math import *
def nndist(x, n):
return np.exp(-4*pi*x*x*x*n/3.)*4.*pi*x*x*n
def nndistV(C, V, n):
x = (C/V)**(1./6.)
return np.exp(-4*pi*x*x*x*n/3.)*4.*pi*x*x*n*((C/(V**7.))**(1./6.))/6.
| [
"hudpsa@gmail.com"
] | hudpsa@gmail.com |
e3bb0a08160c3b5afbb1561fc67f5e5b2b320380 | 43a676d507c9f3e007d46b9335c82f77e35350f6 | /config/wsgi.py | df17ccb416ed061cc0afd7cf24b277bc198a94b4 | [] | no_license | Zoxon470/nekidaem-blog | 79136fd9f4747afd01beb02bfd9d0c524493a6f6 | c2539963d149841397e9eb2d4153a73abea15da2 | refs/heads/master | 2022-05-02T20:14:05.805564 | 2019-06-27T21:50:57 | 2019-06-27T21:50:57 | 194,165,211 | 0 | 2 | null | 2022-04-22T21:53:15 | 2019-06-27T21:25:07 | JavaScript | UTF-8 | Python | false | false | 340 | py | import os
import sys
from django.core.wsgi import get_wsgi_application
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'nekidaem-blog'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'config.settings.dev')
application = get_wsgi_application()
| [
"zoxon470@gmail.com"
] | zoxon470@gmail.com |
51d514b03c5d8f55cd4062f7f5d8f84380a4ae58 | 8288c6f44ed26292b0795d2290763e19043a058d | /lab9gradient.py | 81a01b213cfa165f7db1d35c6a268be0b35c433e | [] | no_license | szymonln/Mownit2 | d763a1a40b44c45a6a95f26b355cf7442ff58943 | d27d1e194133e6930109eb4d2261bb10dd0590fa | refs/heads/master | 2020-04-03T15:33:26.161354 | 2019-02-08T14:31:46 | 2019-02-08T14:31:46 | 155,366,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | cur_x = 3
rate = 0.01 # Learning rate
precision = 0.00001
previous_step_size = 1
max_iters = 10000
iters = 0
df = lambda x: 2*x - 2.71 #Gradient
while previous_step_size > precision and iters < max_iters:
prev_x = cur_x # Store current x value in prev_x
cur_x = cur_x - rate * df(prev_x) # Grad descent
previous_step_size = abs(cur_x - prev_x) # Change in x
iters = iters + 1 # iteration count
print("Iteration", iters, "\nX value is", cur_x) # Print iterations
print("The local minimum occurs at", cur_x) | [
"szymonln@gmail.com"
] | szymonln@gmail.com |
d820ae424b8b015df2aa8aee36762d571e6921f2 | 7b4d83e0e476110ed8ebf444da4f3125774ddcba | /projeto_extensao/PIL/ImageWin.py | 37cf8f26b7eecf295e7211894b42dde741dcf8a1 | [] | no_license | gefferson/projeto_extensao | 5fa70f99ac75b9dc2a09dbd5f458f6f74dcb7cf4 | 9ae9b232f6aabbe590b2791fda04ca4430f3655a | refs/heads/master | 2021-01-10T20:55:52.424534 | 2014-09-02T06:27:32 | 2014-09-02T06:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,375 | py | #
# The Python Imaging Library.
# $Id$
#
# a Windows DIB display interface
#
# History:
# 1996-05-20 fl Created
# 1996-09-20 fl Fixed subregion exposure
# 1997-09-21 fl Added draw primitive (for tzPrint)
# 2003-05-21 fl Added experimental Window/ImageWindow classes
# 2003-09-05 fl Added fromstring/tostring methods
#
# Copyright (c) Secret Labs AB 1997-2003.
# Copyright (c) Fredrik Lundh 1996-2003.
#
# See the README file for information on usage and redistribution.
#
##
# The <b>ImageWin</b> module contains support to create and display
# images under Windows 95/98, NT, 2000 and later.
from PIL import Image
class HDC:
def __init__(self, dc):
self.dc = dc
def __int__(self):
return self.dc
class HWND:
def __init__(self, wnd):
self.wnd = wnd
def __int__(self):
return self.wnd
##
# Create a Windows bitmap with the given mode and size. The mode can
# be one of "1", "L", "P", or "RGB".
#
# If the display requires a palette, this constructor creates a
# suitable palette and associates it with the image. For an "L" image,
# 128 greylevels are allocated. For an "RGB" image, a 6x6x6 colour
# cube is used, together with 20 greylevels.
#
# To make sure that palettes work properly under Windows, you must
# call the <b>palette</b> method upon certain events from Windows.
class Dib:
##
# Create Windows bitmap.
#
# @param image Either a PIL image, or a mode string. If a
# mode string is used, a size must also be given. The
# mode can be one of "1", "L", "P", or "RGB".
# @param size If the first argument is a mode string, this
# defines the size of the image.
def __init__(self, image, size=None):
if hasattr(image, "mode") and hasattr(image, "size"):
mode = image.mode
size = image.size
else:
mode = image
image = None
if mode not in ["1", "L", "P", "RGB"]:
mode = Image.getmodebase(mode)
self.image = Image.core.display(mode, size)
self.mode = mode
self.size = size
if image:
self.paste(image)
##
# Copy the bitmap contents to a device context.
#
# @param handle Device context (HDC), cast to a Python integer,
# or a HDC or HWND instance. In PythonWin, you can use the
# <b>GetHandleAttrib</b> method of the <b>CDC</b> class to get
# a suitable handle.
def expose(self, handle):
if isinstance(handle, HWND):
dc = self.image.getdc(handle)
try:
result = self.image.expose(dc)
finally:
self.image.releasedc(handle, dc)
else:
result = self.image.expose(handle)
return result
def draw(self, handle, dst, src=None):
if not src:
src = (0,0) + self.size
if isinstance(handle, HWND):
dc = self.image.getdc(handle)
try:
result = self.image.draw(dc, dst, src)
finally:
self.image.releasedc(handle, dc)
else:
result = self.image.draw(handle, dst, src)
return result
##
# Installs the palette associated with the image in the
# given device context.
# <p>
# This method should be called upon <b>QUERYNEWPALETTE</b>
# and <b>PALETTECHANGED</b> events from Windows. If this
# method returns a non-zero value, one or more display
# palette entries were changed, and the image should be
# redrawn.
#
# @param handle Device context (HDC), cast to a Python integer,
# or an HDC or HWND instance.
# @return A true value if one or more entries were changed
# (this indicates that the image should be redrawn).
def query_palette(self, handle):
if isinstance(handle, HWND):
handle = self.image.getdc(handle)
try:
result = self.image.query_palette(handle)
finally:
self.image.releasedc(handle, handle)
else:
result = self.image.query_palette(handle)
return result
##
# Paste a PIL image into the bitmap image.
#
# @param im A PIL image. The size must match the target region.
# If the mode does not match, the image is converted to the
# mode of the bitmap image.
# @param box A 4-tuple defining the left, upper, right, and
# lower pixel coordinate. If None is given instead of a
# tuple, all of the image is assumed.
def paste(self, im, box=None):
im.load()
if self.mode != im.mode:
im = im.convert(self.mode)
if box:
self.image.paste(im.im, box)
else:
self.image.paste(im.im)
##
# Load display memory contents from string buffer.
#
# @param buffer A string buffer containing display data (usually
# data returned from <b>tostring</b>)
def fromstring(self, buffer):
return self.image.fromstring(buffer)
##
# Copy display memory contents to string buffer.
#
# @return A string buffer containing display data.
def tostring(self):
return self.image.tostring()
##
# Create a Window with the given title size.
class Window:
def __init__(self, title="PIL", width=None, height=None):
self.hwnd = Image.core.createwindow(
title, self.__dispatcher, width or 0, height or 0
)
def __dispatcher(self, action, *args):
return apply(getattr(self, "ui_handle_" + action), args)
def ui_handle_clear(self, dc, x0, y0, x1, y1):
pass
def ui_handle_damage(self, x0, y0, x1, y1):
pass
def ui_handle_destroy(self):
pass
def ui_handle_repair(self, dc, x0, y0, x1, y1):
pass
def ui_handle_resize(self, width, height):
pass
def mainloop(self):
Image.core.eventloop()
##
# Create an image window which displays the given image.
class ImageWindow(Window):
def __init__(self, image, title="PIL"):
if not isinstance(image, Dib):
image = Dib(image)
self.image = image
width, height = image.size
Window.__init__(self, title, width=width, height=height)
def ui_handle_repair(self, dc, x0, y0, x1, y1):
self.image.draw(dc, (x0, y0, x1, y1))
| [
"geffersonvivan@187-68-19-51.3g.claro.net.br"
] | geffersonvivan@187-68-19-51.3g.claro.net.br |
c5b435586383bec7e14c2017d6182ce5f217272e | 449147399b91db8ca3192e9960834a73967cd01d | /pandas-ml-utils/pandas_ml_utils/ml/data/reconstruction/__init__.py | 52b22fdb3e0d667496a779c3c98ac6e25c9b2549 | [
"MIT"
] | permissive | brunoreisportela/pandas-ml-quant | 04b81568b900d226bb7028ccbe81ea97d0c00587 | a80b06aab28f38c3c6cb298e96f497e4fcdb95a5 | refs/heads/master | 2022-12-18T23:51:38.297857 | 2020-09-08T06:14:16 | 2020-09-08T06:14:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | from .prediction import assemble_prediction_frame | [
"ch9.ki7@gmail.com"
] | ch9.ki7@gmail.com |
3227cb4f2668033863586798ab35d19867aa42b8 | 443690c3b6e0ab294cd24fee3db164b756a45f02 | /cs308app/migrations/0008_auto_20201116_2352.py | e1618e0313c80ad4fc2fba800e3361402dd86fee | [] | no_license | oziyildirim/DjangoSoftEng | 19175770fdce3c66506d0f48ce9309912cd42912 | 97bf4d8821867f94d70802d65bde92b1f3aeb1d9 | refs/heads/main | 2023-04-01T06:55:03.421094 | 2021-04-07T16:55:06 | 2021-04-07T16:55:06 | 336,238,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | # Generated by Django 3.1.3 on 2020-11-16 20:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cs308app', '0007_auto_20201110_2233'),
]
operations = [
migrations.AddField(
model_name='basketitem',
name='quantity',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='order',
name='phone_number',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='order',
name='address',
field=models.CharField(max_length=300),
),
migrations.AlterField(
model_name='orderitem',
name='quantity',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='product',
name='img',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='product',
name='product_name',
field=models.CharField(max_length=100),
),
]
| [
"oyildirim@sabanciuniv.edu"
] | oyildirim@sabanciuniv.edu |
dc6940ccab54fe26f6cdd8418152ac93e3a870f6 | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/tpu/feature_column_v2.py | 1a5bddb173a599ee196c98ef4cd8bf3483151377 | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,102 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU Feature Column Library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import enum
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.feature_column import _is_running_on_cpu
from tensorflow.python.tpu.feature_column import _record_variable_scope_and_name
from tensorflow.python.tpu.feature_column import _SUPPORTED_CATEGORICAL_COLUMNS_V2
from tensorflow.python.tpu.feature_column import _SUPPORTED_SEQUENCE_COLUMNS
from tensorflow.python.tpu.feature_column import _TPUBaseEmbeddingColumn
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_ALLOWED_DEVICES = ["cpu", "tpu_tensor_core", "tpu_embedding_core"]
_TENSOR_CORE_MASK_KEY_SUFFIX = "__TENSOR_CORE_MASK"
class EmbeddingDevice(enum.Enum):
CPU = 1
TPU_TENSOR_CORE = 2
TPU_EMBEDDING_CORE = 3
@tf_export(v1=["tpu.experimental.embedding_column"])
def embedding_column_v2(
categorical_column,
dimension,
combiner="mean",
initializer=None,
max_sequence_length=0,
learning_rate_fn=None,
embedding_lookup_device=None,
tensor_core_shape=None,
use_safe_embedding_lookup=True,
):
"""TPU version of `tf.compat.v1.feature_column.embedding_column`.
Note that the interface for `tf.tpu.experimental.embedding_column` is
different from that of `tf.compat.v1.feature_column.embedding_column`: The
following arguments are NOT supported: `ckpt_to_load_from`,
`tensor_name_in_ckpt`, `max_norm` and `trainable`.
Use this function in place of `tf.compat.v1.feature_column.embedding_column`
when you want to use the TPU to accelerate your embedding lookups via TPU
embeddings.
```
column = tf.feature_column.categorical_column_with_identity(...)
tpu_column = tf.tpu.experimental.embedding_column(column, 10)
...
def model_fn(features):
dense_feature = tf.keras.layers.DenseFeature(tpu_column)
embedded_feature = dense_feature(features)
...
estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
column=[tpu_column],
...))
```
Args:
categorical_column: A categorical column returned from
`categorical_column_with_identity`, `weighted_categorical_column`,
`categorical_column_with_vocabulary_file`,
`categorical_column_with_vocabulary_list`,
`sequence_categorical_column_with_identity`,
`sequence_categorical_column_with_vocabulary_file`,
`sequence_categorical_column_with_vocabulary_list`
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
max_sequence_length: An non-negative integer specifying the max sequence
length. Any sequence shorter then this will be padded with 0 embeddings
and any sequence longer will be truncated. This must be positive for
sequence features and 0 for non-sequence features.
learning_rate_fn: A function that takes global step and returns learning
rate for the embedding table. If you intend to use the same learning rate
for multiple embedding tables, please ensure that you pass the exact same
python function to all calls of embedding_column, otherwise performence
may suffer.
embedding_lookup_device: The device on which to run the embedding lookup.
Valid options are "cpu", "tpu_tensor_core", and "tpu_embedding_core".
If specifying "tpu_tensor_core", a tensor_core_shape must be supplied.
If not specified, the default behavior is embedding lookup on
"tpu_embedding_core" for training and "cpu" for inference.
Valid options for training : ["tpu_embedding_core", "tpu_tensor_core"]
Valid options for serving : ["cpu", "tpu_tensor_core"]
For training, tpu_embedding_core is good for large embedding vocab (>1M),
otherwise, tpu_tensor_core is often sufficient.
For serving, doing embedding lookup on tpu_tensor_core during serving is
a way to reduce host cpu usage in cases where that is a bottleneck.
tensor_core_shape: If supplied, a list of integers which specifies
the intended dense shape to run embedding lookup for this feature on
TensorCore. The batch dimension can be left None or -1 to indicate
a dynamic shape. Only rank 2 shapes currently supported.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A `_TPUEmbeddingColumnV2`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
"""
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
raise TypeError(
"categorical_column for tpu "
" embedding_column must be type %s, got %s."
% (
" or ".join([cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2]),
type(categorical_column),
)
)
if (dimension is None) or (dimension < 1):
raise ValueError("Invalid dimension {}.".format(dimension))
if tensor_core_shape and len(tensor_core_shape) != 2:
raise ValueError(
"tensor_core_shape must be size 2. Got {}.".format(tensor_core_shape)
)
if (initializer is not None) and (not callable(initializer)):
raise ValueError(
"initializer must be callable if specified. "
"Embedding of column_name: {}".format(categorical_column.name)
)
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension)
)
if embedding_lookup_device and embedding_lookup_device not in _ALLOWED_DEVICES:
raise ValueError(
"If set, embedding_lookup_device must be in ", _ALLOWED_DEVICES
)
if embedding_lookup_device == "cpu":
embedding_lookup_device = EmbeddingDevice.CPU
elif embedding_lookup_device == "tpu_tensor_core":
embedding_lookup_device = EmbeddingDevice.TPU_TENSOR_CORE
elif embedding_lookup_device == "tpu_embedding_core":
embedding_lookup_device = EmbeddingDevice.TPU_EMBEDDING_CORE
if embedding_lookup_device == EmbeddingDevice.TPU_TENSOR_CORE:
if not tensor_core_shape:
raise ValueError(
"Using embedding_lookup_device=tpu_tensor_core requires "
"tensor_core_shape to be set."
)
if isinstance(categorical_column, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
"embedding_lookup_device=tpu_tensor_core currently does "
"not support sequence columns."
)
if not embedding_lookup_device:
return _TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
use_safe_embedding_lookup=use_safe_embedding_lookup,
)
else:
return _TPUDeviceSpecificEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
embedding_lookup_device=embedding_lookup_device,
tensor_core_shape=tensor_core_shape,
use_safe_embedding_lookup=use_safe_embedding_lookup,
)
@tf_export(v1=["tpu.experimental.shared_embedding_columns"])
def shared_embedding_columns_v2(
categorical_columns,
dimension,
combiner="mean",
initializer=None,
shared_embedding_collection_name=None,
max_sequence_lengths=None,
learning_rate_fn=None,
embedding_lookup_device=None,
tensor_core_shape=None,
use_safe_embedding_lookup=True,
):
"""TPU version of `tf.compat.v1.feature_column.shared_embedding_columns`.
Note that the interface for `tf.tpu.experimental.shared_embedding_columns` is
different from that of `tf.compat.v1.feature_column.shared_embedding_columns`:
The following arguments are NOT supported: `ckpt_to_load_from`,
`tensor_name_in_ckpt`, `max_norm` and `trainable`.
Use this function in place of
tf.compat.v1.feature_column.shared_embedding_columns` when you want to use the
TPU to accelerate your embedding lookups via TPU embeddings.
```
column_a = tf.feature_column.categorical_column_with_identity(...)
column_b = tf.feature_column.categorical_column_with_identity(...)
tpu_columns = tf.tpu.experimental.shared_embedding_columns(
[column_a, column_b], 10)
...
def model_fn(features):
dense_feature = tf.keras.layers.DenseFeature(tpu_columns)
embedded_feature = dense_feature(features)
...
estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
column=tpu_columns,
...))
```
Args:
categorical_columns: A list of categorical columns returned from
`categorical_column_with_identity`, `weighted_categorical_column`,
`categorical_column_with_vocabulary_file`,
`categorical_column_with_vocabulary_list`,
`sequence_categorical_column_with_identity`,
`sequence_categorical_column_with_vocabulary_file`,
`sequence_categorical_column_with_vocabulary_list`
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
max_sequence_lengths: An list of non-negative integers, either None or empty
or the same length as the argument categorical_columns. Entries
corresponding to non-sequence columns must be 0 and entries corresponding
to sequence columns specify the max sequence length for the column. Any
sequence shorter then this will be padded with 0 embeddings and any
sequence longer will be truncated.
learning_rate_fn: A function that takes global step and returns learning
rate for the embedding table. If you intend to use the same learning rate
for multiple embedding tables, please ensure that you pass the exact same
python function to all calls of shared_embedding_columns, otherwise
performence may suffer.
embedding_lookup_device: The device on which to run the embedding lookup.
Valid options are "cpu", "tpu_tensor_core", and "tpu_embedding_core". If
specifying "tpu_tensor_core", a tensor_core_shape must be supplied.
Defaults to "cpu". If not specified, the default behavior is embedding
lookup on "tpu_embedding_core" for training and "cpu" for inference.
Valid options for training : ["tpu_embedding_core", "tpu_tensor_core"]
Valid options for serving : ["cpu", "tpu_tensor_core"]
For training, tpu_embedding_core is good for large embedding vocab (>1M),
otherwise, tpu_tensor_core is often sufficient.
For serving, doing embedding lookup on tpu_tensor_core during serving is
a way to reduce host cpu usage in cases where that is a bottleneck.
tensor_core_shape: If supplied, a list of integers which specifies the
intended dense shape to run embedding lookup for this feature on
TensorCore. The batch dimension can be left None or -1 to indicate a
dynamic shape. Only rank 2 shapes currently supported.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A list of `_TPUSharedEmbeddingColumnV2`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
ValueError: if `max_sequence_lengths` is specified and not the same length
as `categorical_columns`.
ValueError: if `max_sequence_lengths` is positive for a non sequence column
or 0 for a sequence column.
"""
for categorical_column in categorical_columns:
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
raise TypeError(
"categorical_column for tpu "
" shared_embedding_columns must be type %s, got %s."
% (
" or ".join(
[cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2]
),
type(categorical_column),
)
)
if not max_sequence_lengths:
max_sequence_lengths = [0] * len(categorical_columns)
if len(max_sequence_lengths) != len(categorical_columns):
raise ValueError(
"max_sequence_lengths and categorical_columns must be of "
"the same length. len(max_sequence_lengths)={} "
"len(categorical_columns)={}.".format(
len(max_sequence_lengths), len(categorical_columns)
)
)
if (dimension is None) or (dimension < 1):
raise ValueError("Invalid dimension {}.".format(dimension))
if tensor_core_shape and len(tensor_core_shape) != 2:
raise ValueError(
"tensor_core_shape must be size 2. Got {}.".format(tensor_core_shape)
)
if (initializer is not None) and (not callable(initializer)):
raise ValueError("initializer must be callable if specified. ")
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension)
)
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access
for c in sorted_columns[1:]:
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
"To use shared_embedding_column, all categorical_columns must have "
"the same number of buckets. Given column: {} with buckets: {} does "
"not match column: {} with buckets: {}".format(
sorted_columns[0], num_buckets, c, c._num_buckets
)
) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = "_".join(c.name for c in sorted_columns)
shared_embedding_collection_name += "_shared_embedding"
tpu_columns = []
column_creator = fc_lib.SharedEmbeddingColumnCreator(
dimension=dimension,
initializer=initializer,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
num_buckets=num_buckets,
trainable=None,
name=shared_embedding_collection_name,
)
if embedding_lookup_device and embedding_lookup_device not in _ALLOWED_DEVICES:
raise ValueError(
"If set, embedding_lookup_device must be in ", _ALLOWED_DEVICES
)
if embedding_lookup_device == "cpu":
embedding_lookup_device = EmbeddingDevice.CPU
elif embedding_lookup_device == "tpu_tensor_core":
embedding_lookup_device = EmbeddingDevice.TPU_TENSOR_CORE
elif embedding_lookup_device == "tpu_embedding_core":
embedding_lookup_device = EmbeddingDevice.TPU_EMBEDDING_CORE
if embedding_lookup_device == EmbeddingDevice.TPU_TENSOR_CORE:
if not tensor_core_shape:
raise ValueError(
"Using embedding_lookup_device=tpu_tensor_core requires "
"tensor_core_shape to be set."
)
for c in sorted_columns:
if isinstance(c, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
"embedding_lookup_device=tpu_tensor_core currently "
"does not support sequence columns."
)
# Create the state (_SharedEmbeddingColumnLayer) here.
for categorical_column, max_sequence_length in zip(
categorical_columns, max_sequence_lengths
):
if not embedding_lookup_device:
column = _TPUSharedEmbeddingColumnV2(
categorical_column=categorical_column,
shared_embedding_column_creator=column_creator,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
use_safe_embedding_lookup=use_safe_embedding_lookup,
)
else:
column = _TPUSharedDeviceSpecificEmbeddingColumnV2(
categorical_column=categorical_column,
shared_embedding_column_creator=column_creator,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
embedding_lookup_device=embedding_lookup_device,
tensor_core_shape=tensor_core_shape,
use_safe_embedding_lookup=use_safe_embedding_lookup,
)
tpu_columns.append(column)
return tpu_columns
class _TPUEmbeddingColumnV2(_TPUBaseEmbeddingColumn, fc_lib.EmbeddingColumn):
"""Core Embedding Column."""
def __new__(
cls,
categorical_column,
dimension,
combiner="mean",
initializer=None,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=False,
):
del bypass_scope_validation
# pylint: disable=redundant-keyword-arg
return fc_lib.EmbeddingColumn.__new__(
cls,
categorical_column,
dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=use_safe_embedding_lookup,
)
def __getnewargs__(self):
return (
self._tpu_categorical_column,
self.dimension,
self.combiner,
self.initializer,
self._max_sequence_length,
self._learning_rate_fn,
self.use_safe_embedding_lookup,
self._bypass_scope_validation,
)
def __deepcopy__(self, memo):
return _TPUEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__())
)
def __init__(
self,
categorical_column,
dimension,
combiner="mean",
initializer=None,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=False,
):
_TPUBaseEmbeddingColumn.__init__(
self,
categorical_column,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
)
self._key = None
# If true, scope validation is skipped to allow the same column to be used
# in multiple variable scopes. By default, this is False, and we expect a
# 1:1 mapping between feature columns and scopes.
self._bypass_scope_validation = bypass_scope_validation
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets, self.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self.categorical_column.name
def get_initializer(self):
return self.initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn,
),
):
return True
return False
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable
)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable
)
# TPU mode
# Get the embeddings from the LazyBuilder.
tensor = inputs.get(self.get_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(
self.get_embedding_var_name(),
"embedding_weights",
bypass_scope_validation=self._bypass_scope_validation,
)
return tensor
def create_state(self, state_manager):
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
# Create state is called for the EmbeddingColumn to create its embedding
# variables under feature column V2, if we are on TPU so record the scope
# here.
_record_variable_scope_and_name(
self.get_embedding_var_name(),
"embedding_weights",
bypass_scope_validation=self._bypass_scope_validation,
)
def get_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn.get_dense_tensor(
self, transformation_cache, state_manager
)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.get_dense_tensor(
self, transformation_cache, state_manager
)
# TPU mode
# Get the embeddings from the FeatureTransformationCache.
tensor = transformation_cache.get(self.get_feature_key_name(), state_manager)
return tensor
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None
):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable
)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable
)
tensor = inputs.get(self.get_feature_key_name())
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
# inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
# We need to undo this to match the standard CPU sequence embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(
self.get_embedding_var_name(),
"embedding_weights",
bypass_scope_validation=self._bypass_scope_validation,
)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths
)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager
)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager
)
tensor = transformation_cache.get(self.get_feature_key_name(), state_manager)
tensor_lengths = transformation_cache.get(
self.get_sequence_length_feature_key_name(), state_manager
)
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
# to rank 2. We need to undo this to match the standard CPU sequence
# embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths
)
class _TPUSharedEmbeddingColumnV2(
_TPUBaseEmbeddingColumn, fc_lib.SharedEmbeddingColumn
):
"""Core Shared Embedding Column."""
def __new__(
cls,
categorical_column,
shared_embedding_column_creator,
combiner="mean",
initializer=None,
shared_embedding_collection_name=None,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
):
# pylint: disable=redundant-keyword-arg
return fc_lib.SharedEmbeddingColumn.__new__(
cls,
categorical_column,
combiner=combiner,
shared_embedding_column_creator=shared_embedding_column_creator,
max_norm=None,
use_safe_embedding_lookup=use_safe_embedding_lookup,
)
def __getnewargs__(self):
return (
self._tpu_categorical_column,
self.shared_embedding_column_creator,
self.combiner,
self._initializer,
self._shared_embedding_collection_name,
self._max_sequence_length,
self._learning_rate_fn,
)
def __deepcopy__(self, memo):
return _TPUSharedEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__())
)
def __init__(
self,
categorical_column,
shared_embedding_column_creator,
combiner="mean",
initializer=None,
shared_embedding_collection_name=None,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
):
_TPUBaseEmbeddingColumn.__init__(
self,
categorical_column,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
)
self._initializer = initializer
self._shared_embedding_collection_name = shared_embedding_collection_name
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (
self.categorical_column._num_buckets,
self.shared_embedding_column_creator.dimension,
)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self._shared_embedding_collection_name
def get_initializer(self):
return self._initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn,
),
):
return True
return False
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
self, transformation_cache, state_manager
)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
self, transformation_cache, state_manager
)
# TPU mode
# Get the embeddings from the FeatureTransformationCache.
tensor = transformation_cache.get(self.get_feature_key_name(), state_manager)
# Add to collection for _create_tpu_embedding_variables_and_ops
# Note that in Feature Column V2, shared embeddings have no scope.
_record_variable_scope_and_name(
self.get_embedding_var_name(),
self.shared_embedding_column_creator._name,
is_shared_embedding=True,
)
return tensor
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager
)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager
)
tensor = self._get_dense_tensor_internal(transformation_cache, state_manager)
tensor_lengths = transformation_cache.get(
self.get_sequence_length_feature_key_name(), state_manager
)
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
# to rank 2. We need to undo this to match the standard CPU sequence
# embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths
)
def split_sequence_columns_v2(feature_columns):
"""Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.
For use in a TPUEstimator model_fn function. E.g.
def model_fn(features):
sequence_columns, feature_columns = (
tf.tpu.feature_column.split_sequence_columns(feature_columns))
input = tf.feature_column.input_layer(
features=features, feature_columns=feature_columns)
sequence_features, sequence_lengths = (
tf.contrib.feature_column.sequence_input_layer(
features=features, feature_columns=sequence_columns))
Args:
feature_columns: A list of _TPUEmbeddingColumns to split.
Returns:
Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the
second is the non-sequence columns.
"""
sequence_columns = []
non_sequence_columns = []
for column in feature_columns:
if not isinstance(column, (_TPUEmbeddingColumnV2, _TPUSharedEmbeddingColumnV2)):
raise TypeError(
"column must be a _TPUEmbeddingColumnV2 or "
"_TPUSharedEmbeddingColumnV2 but got %s instead." % (type(column))
)
if column.is_sequence_column():
sequence_columns.append(column)
else:
non_sequence_columns.append(column)
return sequence_columns, non_sequence_columns
def sparse_embedding_aggregate_slice(
params,
values_and_values_mask,
combiner="mean",
name="sparse_embedding_aggregate_slice",
):
"""Uses XLA's dynamic slice operations to perform embedding lookups.
From third_party/cloud_tpu/models/movielens/tpu_embedding.py
Args:
params: Tensor of embedding table. Rank 2 (table_size x embedding dim)
values_and_values_mask: is a two-tuple that contains: values - Tensor of
embedding indices. Rank 2 (batch x n_indices) values_mask - Tensor of mask
/ weights. Rank 2 (batch x n_indices)
combiner: The combiner to use for the embedding lookup. Currently supports
'sum' and 'mean'.
name: Optional name scope for created ops
Returns:
Rank 2 tensor of aggregated (per batch element) embedding vectors.
Raises:
ValueError: Combiner is not supported.
"""
values, values_mask = values_and_values_mask # unpack the two-tuple
with ops.name_scope(name):
_, embedding_dimension = params.get_shape().as_list()
n_batch, n_indices_padded = values.get_shape().as_list()
if not n_batch:
n_batch = -1
emb_lookup = array_ops.reshape(
embedding_ops.embedding_lookup(
params, array_ops.reshape(values, [n_batch, n_indices_padded])
),
[n_batch, n_indices_padded, embedding_dimension],
)
values_mask_broadcast = array_ops.reshape(
values_mask, [n_batch, n_indices_padded, 1]
)
aggregate_emb = math_ops.reduce_sum(emb_lookup * values_mask_broadcast, axis=1)
if combiner == "sum":
return aggregate_emb
elif combiner == "mean":
# In the case we have an empty row, both aggregate_emb and
# math_ops.reduce_sum(values_mask_broadcast, axis=1) will be 0. Thus,
# we can take max it with a non-zero value to prevent NaNs. Note that
# math_ops.reduce_sum(values_mask_broadcast, axis=1) will have integer
# values so 1.0 is the smallest value.
return aggregate_emb / math_ops.maximum(
math_ops.reduce_sum(values_mask_broadcast, axis=1), 1.0
)
else:
raise ValueError(
"Dense TPU Embedding does not support combiner "
"other than sum and mean."
)
def pad_sparse_embedding_lookup_indices(sparse_indices, padded_size):
"""Creates statically-sized Tensors containing indices and weights.
From third_party/cloud_tpu/models/movielens/tpu_embedding.py
Also computes sparse_indices.values % embedding_table_size, for equivalent
functionality to sparse_column_with_integerized_feature. The returned
padded weight Tensor also doubles as a mask indicating which values in
the returned padded indices Tensor are indices versus padded zeros.
Args:
sparse_indices: SparseTensor of embedding lookup indices.
padded_size: Number of columns of the returned Tensors. Indices which fall
out of bounds will be truncated to the padded size.
Returns:
(sparse_indices.values padded to the specified size,
a mask the same size as the returned padded values in which 0s
indicate padded locations and 1s (or values from sparse_weights)
indicate actual values)
"""
batch_size = sparse_indices.dense_shape[0]
sparse_indices = sparse_ops.sparse_slice(
sparse_indices, [0, 0], [batch_size, padded_size]
)
indices, values = sparse_indices.indices, sparse_indices.values
padded_values = array_ops.scatter_nd(
indices, math_ops.cast(values, dtypes.int32), shape=(batch_size, padded_size)
)
weights = array_ops.ones_like(values, dtype=dtypes.float32)
padded_mask = array_ops.scatter_nd(
indices, weights, shape=(batch_size, padded_size)
)
return padded_values, padded_mask
def _check_invalid_cases(embedding_lookup_device):
"""Checks for invalid embedding_lookup_device configurations."""
if (
tpu.under_tpu_inference_context()
and embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE
):
raise ValueError(
"Using embedding_lookup_device=tpu_embedding_core during inference "
"is not supported."
)
if embedding_lookup_device == EmbeddingDevice.CPU:
if not tpu.under_tpu_inference_context():
raise ValueError(
'Using TPUEmbeddingColumn with embedding_lookup_device="cpu" '
"during training is not supported."
)
class _TPUDeviceSpecificEmbeddingColumnV2(_TPUEmbeddingColumnV2):
"""TPUEmbeddingColumn which allows serving on TensorCore."""
def __new__(cls, *args, **kwargs):
# For __new__, just capture the inference dense shape and call parent.
if "tensor_core_shape" in kwargs:
cls._tensor_core_shape = kwargs["tensor_core_shape"]
del kwargs["tensor_core_shape"]
if "embedding_lookup_device" in kwargs:
cls._embedding_lookup_device = kwargs["embedding_lookup_device"]
del kwargs["embedding_lookup_device"]
return _TPUEmbeddingColumnV2.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# For __init__, just capture the inference dense shape and call parent.
if "tensor_core_shape" in kwargs:
self._tensor_core_shape = kwargs["tensor_core_shape"]
del kwargs["tensor_core_shape"]
if "embedding_lookup_device" in kwargs:
self._embedding_lookup_device = kwargs["embedding_lookup_device"]
del kwargs["embedding_lookup_device"]
_TPUEmbeddingColumnV2.__init__(self, *args, **kwargs)
def __deepcopy__(self, memo):
return _TPUDeviceSpecificEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()),
tensor_core_shape=self._tensor_core_shape,
embedding_lookup_device=self._embedding_lookup_device
)
def create_state(self, state_manager):
_check_invalid_cases(self._embedding_lookup_device)
# CPU case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2, self).create_state(
state_manager
)
# TPU_EMBEDDING_CORE case.
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Private method that follows get_dense_tensor."""
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(_TPUDeviceSpecificEmbeddingColumnV2, self).get_dense_tensor(
transformation_cache, state_manager
)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2, self).get_dense_tensor(
transformation_cache, state_manager
)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = transformation_cache.get(
self.categorical_column.name, state_manager
)
def host_computation():
return pad_sparse_embedding_lookup_indices(
sparse_tensor, self._tensor_core_shape[1]
)
values, mask = tpu.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = transformation_cache.get(
self.categorical_column.name, state_manager
)
mask = transformation_cache.get(
self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
state_manager,
)
embedding_weights = state_manager.get_variable(self, name="embedding_weights")
return sparse_embedding_aggregate_slice(
embedding_weights, (values, mask), self.get_combiner()
)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(_TPUDeviceSpecificEmbeddingColumnV2, self)._get_dense_tensor(
inputs, weight_collections, trainable
)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2, self)._get_dense_tensor(
inputs, weight_collections, trainable
)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = inputs.get(self.get_feature_key_name())
def host_computation():
return pad_sparse_embedding_lookup_indices(
sparse_tensor, self._tensor_core_shape[1]
)
values, mask = tpu.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = inputs.get(self.get_feature_key_name())
mask = inputs.get(
self.get_feature_key_name() + _TENSOR_CORE_MASK_KEY_SUFFIX
)
embedding_shape = (
self.categorical_column._num_buckets,
self.dimension,
) # pylint: disable=protected-access
if (
weight_collections
and ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections
):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name="embedding_weights",
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections,
)
return sparse_embedding_aggregate_slice(
embedding_weights, (values, mask), self.get_combiner()
)
class _TPUSharedDeviceSpecificEmbeddingColumnV2(_TPUSharedEmbeddingColumnV2):
"""TPUSharedEmbeddingColumnV2 which allows serving on TensorCore."""
def __new__(cls, *args, **kwargs):
# For __new__, just capture the inference dense shape and call parent.
if "tensor_core_shape" in kwargs:
cls._tensor_core_shape = kwargs["tensor_core_shape"]
del kwargs["tensor_core_shape"]
if "embedding_lookup_device" in kwargs:
cls._embedding_lookup_device = kwargs["embedding_lookup_device"]
del kwargs["embedding_lookup_device"]
return _TPUSharedEmbeddingColumnV2.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# For __init__, just capture the inference dense shape and call parent.
if "tensor_core_shape" in kwargs:
self._tensor_core_shape = kwargs["tensor_core_shape"]
del kwargs["tensor_core_shape"]
if "embedding_lookup_device" in kwargs:
self._embedding_lookup_device = kwargs["embedding_lookup_device"]
del kwargs["embedding_lookup_device"]
_TPUSharedEmbeddingColumnV2.__init__(self, *args, **kwargs)
def __deepcopy__(self, memo):
return _TPUSharedDeviceSpecificEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()),
tensor_core_shape=self._tensor_core_shape,
embedding_lookup_device=self._embedding_lookup_device
)
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows _get_dense_tensor_internal."""
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(
_TPUSharedDeviceSpecificEmbeddingColumnV2, self
)._get_dense_tensor_internal(transformation_cache, state_manager)
# TPU_EMBEDDING_CORE case.
if self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(
_TPUSharedDeviceSpecificEmbeddingColumnV2, self
)._get_dense_tensor_internal(transformation_cache, state_manager)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = transformation_cache.get(
self.categorical_column.name, state_manager
)
def host_computation():
return pad_sparse_embedding_lookup_indices(
sparse_tensor, self._tensor_core_shape[1]
)
values, mask = tpu.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = transformation_cache.get(
self.categorical_column.name, state_manager
)
mask = transformation_cache.get(
self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
state_manager,
)
# Do a dense embedding lookup on TensorCore.
embedding_weights = self.shared_embedding_column_creator.embedding_weights
return sparse_embedding_aggregate_slice(
embedding_weights, (values, mask), self.get_combiner()
)
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
b2cd196a4e77d83e542be25199838e0b8ec80ff9 | ad357cfbec64afb8f4cc4043b212996768f9755c | /api/assessment/automate/formatters.py | dac02f8f9749219cec476cf1e0392f3c9036f96a | [
"MIT"
] | permissive | uktrade/market-access-api | 6b4680e6455eb5c25480ccd3e3d9445654269f36 | 4da26d1be53843d22411577409d9489010bdda09 | refs/heads/master | 2023-08-30T14:47:10.373148 | 2023-08-29T13:58:08 | 2023-08-29T13:58:08 | 131,856,014 | 2 | 3 | MIT | 2023-09-14T08:04:42 | 2018-05-02T13:38:37 | Python | UTF-8 | Python | false | false | 2,065 | py | def rca(import_value, export_value):
if import_value is None or export_value is None:
return "NA"
elif import_value > 0 and export_value > 0:
return "Specialised"
elif import_value < 0 and export_value < 0:
return "Unspecialised"
return "Inconclusive"
def rca_diff(import_value, export_value, country1, country2):
if import_value is None or export_value is None:
return "NA"
elif import_value > 0 and export_value > 0:
return f"{country2} more specialised globally than in {country1}"
elif import_value < 0 and export_value < 0:
return f"{country2} more specialised in {country1} than globally"
return "Inconclusive"
def rca_diff_glob(import_value, export_value, country1, country2):
if import_value is None or export_value is None:
return "NA"
elif import_value > 0 and export_value > 0:
return f"{country2} more specialised globally than {country1}"
elif import_value < 0 and export_value < 0:
return f"{country1} more specialised globally than {country2}"
return "Inconclusive"
def format_value(value):
if value < 1000:
return f"£{round(value, 0)}"
elif value > 1000000000:
return f"£{round(value, -8) / 1000000000}bn"
elif value > 1000000:
return f"£{round(value, -5) / 1000000}m"
return f"£{round(value, -2) / 1000}k"
def value_range(import_value, export_value):
if import_value < export_value:
return f"{format_value(import_value)} - {format_value(export_value)}"
return f"{format_value(export_value)} - {format_value(import_value)}"
def percent_range(import_value, export_value, decimal_places):
import_value *= 100
export_value *= 100
if import_value == export_value:
return f"{round(import_value, decimal_places)}%"
elif import_value < export_value:
return f"{round(import_value, decimal_places)}% - {round(export_value, decimal_places)}%"
return f"{round(export_value, decimal_places)}% - {round(import_value, decimal_places)}%"
| [
"noreply@github.com"
] | noreply@github.com |
b0f8b3dc5a6fbec391eb180abdea385a1eda72cf | 3dab50196ae7c93ec7f0bb7ddc84c2409a989e15 | /13305.py | acc6652e9e7bfc9a919643103706dd5812e3ceb7 | [] | no_license | seounjin/baekjoon_algorithm | 7d8b6a8f51356ad862cdb3229ff628a9e1ca49d8 | a7a3f212992fd9248db9680c901536d68e5680bd | refs/heads/master | 2022-12-21T03:15:17.968041 | 2020-09-16T15:25:23 | 2020-09-16T15:25:23 | 185,127,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # 주유소
import sys
input = sys.stdin.readline
N = int(input())
# 도로의 길이
road = list(map(int, input().split()))
# 도시당 가격
city = list(map(int, input().split()))
road_len = sum(road)
road += [0]
answer = [0] * 100000
answer[0] = city[0] * road[0]
temp = city[0]
for i in range(1, N):
temp = min(temp, city[i])
answer[i] = answer[i - 1] + temp * road[i]
print(answer[N - 1]) | [
"invanda7@gmail.com"
] | invanda7@gmail.com |
c8e2155ef68a3eba87ea0e8c4cab9b582c3f5355 | 8bc3e7bd0fa1714b3d0466e940ed801cf9a4c5d4 | /pyvisual/node/io/system_var.py | 2e6dfeaf5a70761d5951b4abff26e7ec2a04eaae | [] | no_license | m0r13/pyvisual | d99b3512fefaf4a2164362a0b7aabd1df9ecee03 | f6b3e2217e647b80f1379716c00e8adb53975bca | refs/heads/master | 2022-02-21T22:24:22.467475 | 2019-06-17T20:38:48 | 2019-06-17T20:38:48 | 140,211,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,802 | py | import json
import os
import time
from collections import defaultdict, OrderedDict
import imgui
from pyvisual.node import dtype, value
from pyvisual.node.base import Node
from pyvisual.editor import widget
SERIALIZATION_WRITE_INTERVAL = 5.0
SERIALIZATION_FILE = "system_vars.json"
# if you add another variable with another dtype than here, add the name of the dtype below!
VARIABLES = OrderedDict([
("gain", {"dtype" : dtype.float, "dtype_args" : {"default" : 4.0, "range" : [0.0, float("inf")]}}),
("threshold", {"dtype" : dtype.float, "dtype_args" : {"default" : 0.4, "range" : [0.0, float("inf")]}}),
("ref_aspect", {"dtype" : dtype.str, "dtype_args" : {"default" : "16:9"}}),
("ref_highres_height", {"dtype" : dtype.int, "dtype_args" : {"default" : 1080, "range" : [0, float("inf")]}}),
("ref_lowres_height", {"dtype" : dtype.int, "dtype_args" : {"default" : 720, "range" : [0, float("inf")]}}),
("ref_noiseres_height", {"dtype" : dtype.int, "dtype_args" : {"default" : 512, "range" : [0, float("inf")]}}),
])
# name -> value for each variable
values = OrderedDict()
# name -> widget for each variable
widgets = OrderedDict()
# dtype -> list of (name, value)
values_by_dtype = defaultdict(lambda: [])
# initialize values and widgets that are associated with variables
for name, spec in VARIABLES.items():
assert "dtype" in spec
dt = spec["dtype"]
dt_args = spec.get("dtype_args", {})
default_value = dt.default
if "default" in dt_args:
default_value = dt_args["default"]
v = value.SettableValue(default_value)
w = widget.create_widget(dt, dt_args)
w.width = widget.WIDGET_WIDTH * 1.5
values[name] = v
values_by_dtype[dt].append((name, v))
widgets[name] = w
_variables_dirty = False
_variables_last_written = 0
_node_instances = set()
# Important: Call this when changed a value! (Is done by editor for example)
def notify_change():
global _variables_dirty
_variables_dirty = True
for instance in _node_instances:
instance.force_evaluate()
# if the nodes would take over the values if they are changed only,
# then this would need to be changed probably
for value in values.values():
value.reset_changed()
def read_variables():
serialized_values = {}
if not os.path.isfile(SERIALIZATION_FILE):
return
serialized_values = json.load(open(SERIALIZATION_FILE))
for name, serialized_value in serialized_values.items():
if name not in VARIABLES:
continue
value = values[name]
dt = VARIABLES[name]["dtype"]
value.value = dt.base_type.unserialize(serialized_values[name])
notify_change()
read_variables()
def write_variables(force=False):
global _variables_dirty, _variables_last_written
if force or time.time() - _variables_last_written > SERIALIZATION_WRITE_INTERVAL:
_variables_dirty = False
_variables_last_written = time.time()
data = {}
for name, spec in VARIABLES.items():
value = values[name].value
data[name] = spec["dtype"].base_type.serialize(value)
with open("system_vars.json", "w") as f:
json.dump(data, f)
class GetSystemVar(Node):
DTYPE = None
class Meta:
inputs = [
{"name" : "name", "dtype" : dtype.str, "hide" : True}
]
options = {
"virtual" : True
}
def __init__(self):
super().__init__()
self._value = None
@property
def collapsed_node_title(self):
return "get system var: %s" % self.get("name")
def start(self, graph):
_node_instances.add(self)
name = self.get("name")
if name:
self._value = values.get(name, None)
if self._value is None:
self.get_input("name").value = ""
def _evaluate(self):
output = self.get_output("output")
if self._value != None:
output.value = self._value.value
def stop(self):
_node_instances.remove(self)
def _show_custom_ui(self):
selected_name = self.get("name")
preview = selected_name if selected_name else "<none>"
if imgui.begin_combo("", preview):
is_selected = not selected_name
opened, selected = imgui.selectable("<none>", is_selected)
if opened:
self.get_input("name").value = ""
self._value = None
if is_selected:
imgui.set_item_default_focus()
imgui.separator()
for name, value in values_by_dtype.get(self.DTYPE, []):
is_selected = name == selected_name
opened, selected = imgui.selectable(name, is_selected)
if opened:
self.get_input("name").value = name
self._value = value
if is_selected:
imgui.set_item_default_focus()
imgui.end_combo()
@classmethod
def get_presets(cls, graph):
presets = []
for name, value in values_by_dtype.get(cls.DTYPE, []):
presets.append((name, {"i_name" : name}))
return presets
dtype_capital_names = {
dtype.float : "Float",
dtype.str : "Str",
dtype.int : "Int",
}
# create a GetXXXSystemVar class for each dtype
node_classes = []
for dt in values_by_dtype.keys():
name = "Get%sSystemVar" % dtype_capital_names[dt]
class Meta:
outputs = [
{"name" : "output", "dtype" : dt, "manual_input": True},
]
options = {
"virtual" : False,
"show_title" : False
}
cls = type(name, (GetSystemVar,), {"DTYPE" : dt, "Meta" : Meta, "__module__" : __name__})
node_classes.append(cls)
| [
"moritz.hilscher@gmail.com"
] | moritz.hilscher@gmail.com |
494f4579cf7fca7b1eb90c375efb34a67f6d3cd4 | e319b3f9b80f0e8a843cec7edd65f19baa0c9f3b | /Interface/Dialogs/SpendOrRestoreSpellPointsDialog.py | 3aa7c90fe3e39e7fa58af55d966d06d1c6e5583f | [
"MIT"
] | permissive | Snackhole/PyFifth | 0e852ece4ef37d0cfa952d0b47d1d9c42ea6b1fd | 2a5419dea0309119d3ffbb5ba33ec14395d6dd59 | refs/heads/main | 2023-08-19T02:00:20.433299 | 2023-08-01T00:21:56 | 2023-08-01T00:21:56 | 125,868,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,083 | py | from PyQt5 import QtCore
from PyQt5.QtWidgets import QComboBox, QLabel, QPushButton, QDialog, QGridLayout, QSizePolicy, QSpinBox
class SpendOrRestoreSpellPointsDialog(QDialog):
def __init__(self, CharacterWindow, RestoreMode=False):
super().__init__(parent=CharacterWindow)
# Store Parameters
self.CharacterWindow = CharacterWindow
self.RestoreMode = RestoreMode
# Variables
self.ModeString = "Restore" if self.RestoreMode else "Spend"
self.SpellLevels = ["None"] + list(self.CharacterWindow.PlayerCharacter.SpellPointValues.keys())
self.SpellSlotLevel = None
self.SpellSlotAmount = None
self.ManualAmount = None
self.Submitted = False
# Inputs Size Policy
self.InputsSizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
# Prompt
self.PromptLabel = QLabel(self.ModeString + " spell points:")
self.PromptLabel.setAlignment(QtCore.Qt.AlignCenter)
# Spell Slots
self.SpellSlotLevelLabel = QLabel("Spell Slot Level:")
self.SpellSlotLevelLabel.setAlignment(QtCore.Qt.AlignCenter)
self.SpellSlotLevelComboBox = QComboBox()
self.SpellSlotLevelComboBox.setSizePolicy(self.InputsSizePolicy)
self.SpellSlotLevelComboBox.addItems(self.SpellLevels)
self.SpellSlotLevelComboBox.setEditable(False)
self.SpellSlotAmountLabel = QLabel("Spell Slot Amount:")
self.SpellSlotAmountLabel.setAlignment(QtCore.Qt.AlignCenter)
self.SpellSlotAmountSpinBox = QSpinBox()
self.SpellSlotAmountSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.SpellSlotAmountSpinBox.setButtonSymbols(self.SpellSlotAmountSpinBox.NoButtons)
self.SpellSlotAmountSpinBox.setSizePolicy(self.InputsSizePolicy)
self.SpellSlotAmountSpinBox.setRange(0, 1000000000)
self.SpellSlotAmountSpinBox.setValue(0)
# Manual Amount
self.ManualAmountLabel = QLabel("Manual Amount:")
self.ManualAmountLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ManualAmountSpinBox = QSpinBox()
self.ManualAmountSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.ManualAmountSpinBox.setButtonSymbols(self.ManualAmountSpinBox.NoButtons)
self.ManualAmountSpinBox.setSizePolicy(self.InputsSizePolicy)
self.ManualAmountSpinBox.setRange(0, 1000000000)
self.ManualAmountSpinBox.setValue(0)
# Buttons
self.SubmitButton = QPushButton(self.ModeString)
self.SubmitButton.clicked.connect(self.Submit)
self.CancelButton = QPushButton("Cancel")
self.CancelButton.clicked.connect(self.Cancel)
# Layout
self.Layout = QGridLayout()
self.Layout.addWidget(self.PromptLabel, 0, 0, 1, 2)
self.Layout.addWidget(self.SpellSlotLevelLabel, 1, 0)
self.Layout.addWidget(self.SpellSlotLevelComboBox, 1, 1)
self.Layout.addWidget(self.SpellSlotAmountLabel, 2, 0)
self.Layout.addWidget(self.SpellSlotAmountSpinBox, 2, 1)
self.Layout.addWidget(self.ManualAmountLabel, 3, 0)
self.Layout.addWidget(self.ManualAmountSpinBox, 3, 1)
self.ButtonsLayout = QGridLayout()
self.ButtonsLayout.addWidget(self.SubmitButton, 0, 0)
self.ButtonsLayout.addWidget(self.CancelButton, 0, 1)
self.Layout.addLayout(self.ButtonsLayout, 4, 0, 1, 2)
for Row in [1, 2, 3]:
self.Layout.setRowStretch(Row, 1)
self.Layout.setColumnStretch(1, 1)
self.setLayout(self.Layout)
# Set Window Title and Icon
self.setWindowTitle(self.CharacterWindow.ScriptName)
self.setWindowIcon(self.CharacterWindow.WindowIcon)
# Execute Dialog
self.exec_()
def Submit(self):
self.SpellSlotLevel = self.SpellSlotLevelComboBox.currentText()
self.SpellSlotAmount = self.SpellSlotAmountSpinBox.value()
self.ManualAmount = self.ManualAmountSpinBox.value()
self.Submitted = True
self.close()
def Cancel(self):
self.close()
| [
"snackhole.dev@gmail.com"
] | snackhole.dev@gmail.com |
cbf179d79502288c0887998e124e14e76e67a723 | c271c196bf2730c20de42f75568336cd8ccd07d1 | /password_generator/settings.py | b735e335e8541b7ca0067bb54d2db2e9ee2e225f | [] | no_license | entry-dev/django3-password_generator | e68f97565944355c8001ad35a73118a8741a1106 | 74f2684e9cdd597bccf9a4aaedbc208625e42de2 | refs/heads/master | 2023-01-04T03:23:48.416370 | 2020-11-04T23:52:36 | 2020-11-04T23:52:36 | 310,145,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,115 | py | """
Django settings for password_generator project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dvnck9yp5_#09@1zzyjc2c=)!g=8a36n!@it291c13(4%i5=pb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'generator',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'password_generator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'password_generator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"ivan.krstic@yahoo.com"
] | ivan.krstic@yahoo.com |
ca649f8cd329ae6e3c66facc2dfe5d27aa53dc6b | a70538105d0cb172c2f5628f083d53529904941b | /Watermelon.py | 04c38d409a4fa100bf7fb7a115aef465f1a1019a | [] | no_license | Raihan-009/Codeforces | 792be79991d1ade5aa87e779506173b5d4fac442 | 3bb53f2561a29793964ebaf76848de5be8ba975e | refs/heads/master | 2022-09-12T09:40:45.988289 | 2020-06-01T14:24:48 | 2020-06-01T14:24:48 | 268,368,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | '''
One hot summer day Pete and his friend Billy decided to buy a watermelon. They chose the biggest and the ripest one, in their opinion. After that the watermelon was weighed, and the scales showed w kilos. They rushed home, dying of thirst, and decided to divide the berry, however they faced a hard problem.
Pete and Billy are great fans of even numbers, that's why they want to divide the watermelon in such a way that each of the two parts weighs even number of kilos, at the same time it is not obligatory that the parts are equal. The boys are extremely tired and want to start their meal as soon as possible, that's why you should help them and find out, if they can divide the watermelon in the way they want. For sure, each of them should get a part of positive weight.
#Input
The first (and the only) input line contains integer number w (1 ≤ w ≤ 100) — the weight of the watermelon bought by the boys.
#Output
Print YES, if the boys can divide the watermelon into two parts, each of them weighing even number of kilos; and NO in the opposite case.'''
w = input()
w = int(w)
if (w > 2 and w%2 ==0):
print("YES")
else :
print("NO")
| [
"64744693+Raihan-009@users.noreply.github.com"
] | 64744693+Raihan-009@users.noreply.github.com |
62d9fb786e671921898f8a2ecbece758e2046377 | a5f5f19615d1af450338b8d1071b940bcc68ab91 | /core/mixins.py | 9beda68da1934370bd1b925f13dde7f55033d5b7 | [] | no_license | duydo131/hotel | 987943dc184d3f98fe96ddbd16c6a4453605c9e3 | b010d84a657ddb54c1f6c6add656fe21e5f74a31 | refs/heads/main | 2023-06-15T09:14:37.109199 | 2021-07-16T03:26:02 | 2021-07-16T03:26:02 | 369,415,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | from rest_framework_extensions.mixins import DetailSerializerMixin
class GetSerializerClassMixin:
serializer_action_classes = {}
serializer_detail_class = None
queryset_detail = None
def get_serializer_class(self):
try:
return self.serializer_action_classes[self.action]
except (KeyError, AttributeError):
error_message = "'{0}' should include a 'serializer_detail_class' attribute".format(self.__class__.__name__)
# assert self.serializer_detail_class is not None, error_message
# if getattr(self, 'object', None):
# return self.serializer_detail_class
# else:
return super(GetSerializerClassMixin, self).get_serializer_class()
def get_queryset(self):
if self.action in ["update", "partial_update", "retrieve"]:
return self.queryset_detail
return self.queryset
| [
"dotheduybk131@gmail.com"
] | dotheduybk131@gmail.com |
15834731332573ccf1a384394228b16286a05343 | f2e7442f38465fdd237c763c80f436e3f103221c | /experimental/HandKeyPointsCustom.py | caf05d577f753b76b5d6fd0187ac6e81faa72eb9 | [] | no_license | SuhelNaryal/Sign-Hawk | fda0f35efd9b0a7c17e0562efd7faac76367ab57 | 52e3e150b1ea30c7c4c15ee54f0dba09d0408dc7 | refs/heads/main | 2023-01-24T13:24:57.984937 | 2020-11-23T11:30:16 | 2020-11-23T11:30:16 | 303,287,298 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | import keras
import tensorflow as tf
def HandKeyPointsLoss(y_true, y_pred):
y_pred = keras.backend.cast(y_pred, dtype=tf.float32)
left_hand_true = y_true[:, :, 0]
right_hand_true = y_true[:, :, 1]
left_keypoints_true = y_true[:, :, 2:65]
right_keypoints_true = y_true[:, :, 65:]
left_hand_loss = tf.reduce_sum(keras.losses.binary_crossentropy(left_hand_true, y_pred[:, :, 0]), axis=-1) / left_hand_true.shape[0]
right_hand_loss = tf.reduce_sum(keras.losses.binary_crossentropy(right_hand_true, y_pred[:, :, 1]), axis=-1) / right_hand_true.shape[0]
left_keypoints_loss = tf.reduce_sum(left_hand_true * keras.losses.mean_squared_error(left_keypoints_true, y_pred[:, :, 2:65]), axis=-1) / tf.reduce_sum(left_hand_true, axis=-1)
right_keypoints_loss = tf.reduce_sum(right_hand_true * keras.losses.mean_squared_error(right_keypoints_true, y_pred[:, :, 65:]), axis=-1) / tf.reduce_sum(right_hand_true, axis=-1)
return left_hand_loss + right_hand_loss + left_keypoints_loss, right_keypoints_loss
class HandKeyPoints():
def __init__(self, learning_rate=0.001):
super(HandKeyPoints, self).__init__()
input_layer = keras.Input(shape=(256, 256, 3))
resnetbackbone = keras.applications.ResNet50V2(input_shape=(256, 256, 3), include_top=False)
resnetbackbone_out = resnetbackbone(input_layer)
global_avg_pool = keras.layers.GlobalAveragePooling2D()(resnetbackbone_out)
dense1 = keras.layers.Dense(units=2048, activation='relu')(global_avg_pool)
dense2 = keras.layers.Dense(units=2048, activation='relu')(dense1)
left_hand = keras.layers.Dense(units=1, activation='sigmoid', name='left_hand')(dense2)
right_hand = keras.layers.Dense(units=1, activation='sigmoid', name='right_hand')(dense2)
left_keypoints = keras.layers.Dense(units=63, name='left_keypoints')(dense2)
right_keypoints = keras.layers.Dense(units=63, name='right_keypoints')(dense2)
output = keras.backend.concatenate([left_hand, right_hand, left_keypoints, right_keypoints], axis=1)
self.model = keras.Model(inputs=input_layer, outputs=output)
self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
self.loss = HandKeyPointsLoss
self.model.compile(optimizer=self.optimizer, loss=self.loss)
print(self.model.summary())
HandKeyPoints()
| [
"noreply@github.com"
] | noreply@github.com |
bb78b536e10ed1c070713095bc6926c0d668fe9b | 680ef089e77f3d510f0fbe7d632c01fb497e2a57 | /manage.py | 6b33c09231ad589a05eba9ffb021c19d258c0cd0 | [] | no_license | kupuk090/idaproject_test | 3fa4ef68ba9ca921edb4ad603ba39e5c9a3f9b7f | e92cbf615bc360fc44f45c746be3f4431b48ee41 | refs/heads/master | 2023-04-09T10:29:22.869203 | 2021-03-21T22:59:41 | 2021-03-21T22:59:41 | 350,136,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'image_resizer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"kupuk090@gmail.com"
] | kupuk090@gmail.com |
0ee27c2b6c2029409b39052286ba40d81a836616 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/SjShHBJJMM/YW_HBJJMM_SHSJ_067.py | 4cb90cd9223c79893514c907a5e29a58cc20a03f | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,142 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/MoneyFund/moneyfundservice")
from mfmainService import *
from mfQueryStkPriceQty import *
sys.path.append("/home/yhl2/workspace/xtp_test/MoneyFund/moneyfundmysql")
from mfCaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_HBJJMM_SHSJ_067(xtp_test_case):
# YW_HBJJMM_SHSJ_067
def test_YW_HBJJMM_SHSJ_067(self):
title = '上海A股股票交易日五档即成转限价卖——错误的价格(价格10亿)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '1', '111', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_LIMIT'],
'price': 1000000000,
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
b7b979b04b0a3ebb22cfc3527178b9ed7832184d | e104a337a1e3ced474511d258645109641aca01d | /movie/views.py | 71f002084a1d9a6d4d76757bebb7e17068740b8b | [] | no_license | mjkya/MovieVersity | 3be879faa68ce6855cc8e935723a50f7b5ed790f | 7eede648bb9d8bb8108c780feb513bfc8884bc89 | refs/heads/master | 2020-05-19T10:51:18.997948 | 2019-09-01T06:32:26 | 2019-09-01T06:32:26 | 184,977,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | from django.shortcuts import render,redirect
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
# Create your views here.
from .models import Movie
from django.urls import reverse_lazy
from .parser import parse_movie
def movie(request):
return render(request, 'home.html')
def parse(request):
parse_movie()
return redirect('home')
class MovieList(ListView):
model = Movie
template_name = 'home.html'
class MovieCreate(CreateView):
model = Movie
fields = '__all__'
template_name = 'movie_form.html'
success_url = reverse_lazy('home')
class MovieDetail(DetailView):
model = Movie
template_name = 'movie_detail.html'
class MovieUpdate(UpdateView):
model = Movie
fields = '__all__'
template_name = 'movie_form.html'
success_url = reverse_lazy('home')
class MovieDelete(DeleteView):
model = Movie
template_name = 'movie_delete.html'
success_url = reverse_lazy('home') | [
"noreply@github.com"
] | noreply@github.com |
aecb50d16e4b98d83419ea535baee6629c26a325 | 8060b19318440ff2fbd728afd9435d4b5a3f1da6 | /code/metadata/build_nodelist.py | ae79eb7f27e1ed301c80a478032048397612d20c | [] | no_license | Irallia/IZW-HU-Parasites | dae82db2600a96bdce7cffb228c07cdc6dde5226 | b0d70f671fa489ba132e2cffb81ff6d6fc431fe5 | refs/heads/master | 2021-01-25T08:13:18.646633 | 2018-05-20T20:25:51 | 2018-05-20T20:25:51 | 93,733,091 | 4 | 3 | null | 2018-03-21T13:36:06 | 2017-06-08T09:43:17 | TeX | UTF-8 | Python | false | false | 4,547 | py | import csv
import datetime
import sys
from code.utilities.Helpers import print_time
from code.utilities.nodelist_util import read_tags, tag_node
from time import gmtime, strftime
from Bio import Phylo
from termcolor import colored
# path_freelivings = "./data/interaction_data/reduced_freelivings.csv"
# path_parasites = "./data/interaction_data/reduced_parasites.csv"
path_freelivings = "./data/interaction_data/freelivings.csv"
path_parasites = "./data/interaction_data/parasites.csv"
# input arguments
args = sys.argv
# values from input:
subtree_name = sys.argv[1]
# examples: 'Eukaryota'
# global variables:
START_TIME = datetime.datetime.now().replace(microsecond=0)
CURRENT_TIME = datetime.datetime.now().replace(microsecond=0)
freelivings = []
parasites = []
nr_leave_nodes = 0
nr_used_freelivings = 0
nr_used_parasites = 0
unknown = 0
doubleTagged = 0
nodelist = []
def main():
global START_TIME
global CURRENT_TIME
global freelivings
global parasites
global nr_leave_nodes
global nr_used_freelivings
global nr_used_parasites
global unknown
global nodelist
global doubleTagged
print(colored("------------------------ build nodelists ------------------------", "green"))
print(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
CURRENT_TIME = print_time(START_TIME)
print(colored("---------------- read parasites and freelivings ----------------", "green"))
print("Freelivings:")
freelivings = read_tags(path_freelivings)
print("Parasites:")
parasites = read_tags(path_parasites)
CURRENT_TIME = print_time(CURRENT_TIME)
print(colored("---------------- read tree ----------------", "green"))
subtree_path = './data/subtree/' + subtree_name + '.tre'
print("Build nodelist for:", subtree_name)
tree = Phylo.read(subtree_path, 'newick')
print(colored("---------------- tag tree ----------------", "green"))
fill_tree_with_tags(tree.clade, 0)
print(colored(nr_leave_nodes, 'blue'), "leave nodes are in the tree")
print(colored(nr_used_freelivings, 'blue'), "freeliving tags were used,", colored(nr_used_parasites, 'blue'), "parasite tags were used =>", colored(unknown, 'blue'), "unknown leave nodes")
print("Rootnode, Depth, Heigths: [Min, Max, Mean], Originaltag, Finaltag, Nr_children")
print(nodelist[0])
print(doubleTagged, "are tagged as P, but could also be FL!")
# ---- reset countings ----
nr_leave_nodes = 0
nr_used_freelivings = 0
nr_used_parasites = 0
unknown = 0
nodelist = []
CURRENT_TIME = print_time(CURRENT_TIME)
print(colored("--------------------------------", "green"))
return
def fill_tree_with_tags(subtree, depth):
global nr_leave_nodes
global nr_used_freelivings
global nr_used_parasites
global unknown
global nodelist
global doubleTagged
ott = subtree.name.split("$")[0] # remove index
heights = [1, 1, 1]
# 0 1 2 3 4 5
# nodelist - [id, originaltag, finaltag, depth, heights, nr_children]
nodelist.append([ott, "", "", depth, heights, len(subtree.clades)])
current_list_index = len(nodelist) - 1
if subtree.is_terminal():
stats = [nr_leave_nodes, nr_used_parasites, nr_used_freelivings, unknown, doubleTagged]
stats = tag_node(nodelist, current_list_index, ott, [freelivings, parasites], stats)
nr_leave_nodes = stats[0]
nr_used_parasites = stats[1]
nr_used_freelivings = stats[2]
unknown = stats[3]
doubleTagged = stats[4]
else:
min_heigth = float('inf')
max_heigth = 0
mean_heigth = 0
child_heigth = 0
for clade in subtree.clades:
heights = fill_tree_with_tags(clade, depth + 1)
if heights[0] < min_heigth:
min_heigth = heights[0]
if heights[1] > max_heigth:
max_heigth = heights[1]
child_heigth = child_heigth + heights[2]
mean_heigth = child_heigth/len(subtree.clades) + 1
heights = [min_heigth + 1, max_heigth + 1, mean_heigth]
nodelist[current_list_index][4] = heights
# -------------------------------------------------
csv_title = './data/nodelist/' + subtree_name + '.csv'
nodelist_file = open(csv_title, 'a')
writer = csv.writer(nodelist_file)
writer.writerow((nodelist[current_list_index]))
nodelist_file.close()
# -------------------------------------------------
return heights
main()
| [
"irallia@chrigelyra.de"
] | irallia@chrigelyra.de |
9207dc3cabf5a523ba87298730ea3d3b9f8d7750 | 7a01d168819027ed74021395edf55c79419b1e74 | /miSitio/urls.py | 06f844768f45cce9e92f4a22619de57896d7bf07 | [] | no_license | TeresaHRivas/my-first-blog | 4f2347f53faf222f7279de1d89f282815cf94422 | a6794fca9249ec2d41730f4e420c537e09d045d4 | refs/heads/master | 2020-07-20T15:21:27.431700 | 2020-02-28T22:36:38 | 2020-02-28T22:36:38 | 206,666,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """miSitio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('miBlog.urls')),
]
| [
"teresahrivas4@gmail.com"
] | teresahrivas4@gmail.com |
50956f529320e551f07a71600677d930f51f66ab | 4b7883843049d01fa718368f09ee0aa41167f595 | /1HelloWorld.py | e27b7c5f2a359339b94f53dc65f6d1b6e00538f6 | [] | no_license | rickyjreyes/Python_Tutorial | fac324fa94e28e45f8c1293b6c4f0fe8db714052 | 00c1e90e732a6066f1324df52ee6832a85b5d6bb | refs/heads/master | 2021-06-12T03:33:53.140440 | 2017-03-19T08:25:15 | 2017-03-19T08:25:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # HelloWorld.py
#
# First Program!
# This prints hello world
print("Hello World!")
# More Practice: Make 5 of your own prints
print("My name is Ricky!!!!")
print("My favorite color is blue!!!!")
print("Video games are cool!!!!")
print("I love to code!!!!")
print("This is Python :)")
# This is a comment
# Make your own comments
"""I am a comment
with multiple lines
"""
# blah blah blah
# The computer doesn't read this
# Practice Input
user = input("Enter input: ")
print("Output: ", user)
# Manipulate Inp ")
print(int(user*10)) | [
"noreply@github.com"
] | noreply@github.com |
99e00483b2fcb94681bab327da53cc9bd655b160 | 9644e6b9b8bf64dac7b95cd527b5364653bf7ea7 | /stuff_map/migrations/0002_car.py | 7969ea652672628e197f29e5bbe827b897c46cb4 | [] | no_license | mittonface/stuff_map | d6e651cb0fc96c3985e4ca4cfa0f75000c4f78e0 | 5b758ee713c4c11c3a5c4ffceac04ea80b5583bb | refs/heads/master | 2020-06-14T03:59:39.848546 | 2016-12-04T17:21:16 | 2016-12-04T17:21:16 | 75,493,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-03 17:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stuff_map', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model', models.CharField(max_length=128)),
('mpg', models.FloatField()),
('fuel_capacity', models.FloatField(max_length=128)),
],
),
]
| [
"bmitton@gatech.edu"
] | bmitton@gatech.edu |
b739d2efc981f15f251d102759aa0a3696382421 | 975a3b8189fffde47256b52901bcce9b5d8b1001 | /app.py | 6e0e28db9d423cdf8e340b69678b4ee67aaaa5b4 | [] | no_license | FrancisGKing/RedditToMessengerBot | f7db66e7b6b3150dd42473002ddd7eb11efc47df | 25c4f09da5c5b456f88738b00cd6674bedb984ef | refs/heads/master | 2020-04-17T03:39:27.733581 | 2019-01-17T09:09:19 | 2019-01-17T09:09:19 | 166,194,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,438 | py | from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
import json
import requests
import os
import praw
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
db = SQLAlchemy(app)
reddit = praw.Reddit(client_id='QhZh0AZf4HlrXg',
client_secret='g7vBRIAfYRFEBJtXrHF-P_lh0RQ',
user_agent='web:reddit_to_messenger_bot:v1.0.0 (by u/ThisMeansFreedom)')
#This must be filled with the Page Access Token that will be provided by the
#Facebook app that will be created
PAT = 'EAAfla2Qkli8BAHwBWlkFIjE6IpLiBNCe7WhDhVPyqqtDXQfjNIb8ZC9rRbU4ZCOsKTBaLSmshDsv3ZC3SneVcZCrEtSqSaKRqNqdXApe3lEoqcNhUrQgzTKZAwkA03anfV5mZCdVZA8eXPK3LKAsZBXbtvb4RKJtmEB6ZApuYM33whAZDZD'
quick_replies_list = [{
"content_type":"text",
"title":"Meme",
"payload":"meme",
},
{
"content_type":"text",
"title":"Motivation",
"payload":"motivation",
},
{
"content_type":"text",
"title":"Shower Thought",
"payload":"Shower_Thought",
},
{
"content_type":"text",
"title":"Jokes",
"payload":"Jokes",
}
]
@app.route('/', methods=['GET'])
def handle_verification():
print("Handling Verification.")
if request.args.get('hub.verify_token', '') == 'my_voice_is_my_password_verify_me':
print("Verification successful!")
return request.args.get('hub.challenge', '')
else:
print("Verification failed!")
return 'Error, wrong validation token'
@app.route('/', methods=['POST'])
def handle_messages():
print("Handling Messages")
payload = request.get_data()
print(payload)
for sender, message in messaging_events(payload):
print("Incoming from %s: %s" % (sender, message))
send_message(PAT, sender, message)
return "ok"
def messaging_events(payload):
"""Generate tuples of (sender_id, message_text) from the provided payload. """
data = json.loads(payload)
messaging_events = data["entry"][0]["messaging"]
for event in messaging_events:
if "message" in event and "text" in event["message"]:
yield event["sender"]["id"], event["message"]["text"].encode('unicode_escape')
else:
yield event["sender"]["id"], "I can't echo this"
def send_message(token, recipient, text):
"""Send the message text to recipient with id recipient"""
if "meme" in text.lower():
subreddit_name = "memes"
elif "shower" in text.lower():
subreddit_name = "Showerthoughts"
elif "joke" in text.lower():
subreddit_name = "Jokes"
else:
subreddit_name = "GetMotivated"
myUser = get_or_create(db.session, Users, name=recipient)
if subreddit_name == "Showerthoughts":
for submission in reddit.subreddit(subreddit_name).hot(limit=None):
if (submission.is_self == True):
query_result = Posts.query.filter(Posts.name == submission.id).first()
if query_result is None:
myPost = Posts(submission.id, submission.title)
myUser.posts.append(myPost)
db.session.commit()
payload = submission.title
break
elif myUser not in query_result.users:
myUser.posts.append(query_result)
db.session.commit()
payload = submission.title
break
else:
continue
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps({
"recipient": {"id": recipient},
"message": {"text": payload, "quick_replies":quick_replies_list}
}),
headers={'Content-type': 'application/json'})
elif subreddit_name == "Jokes":
for submission in reddit.subreddit(subreddit_name).hot(limit=None):
if ((submission.is_self == True) and (submission.link_flair_text is None)):
query_result = Posts.query.filter(Posts.name == submission.id).first()
if query_result is None:
myPost = Posts(submission.id, submission.title)
myUser.posts.append(myPost)
db.session.commit()
payload = submission.title
payload_text = submission.selftext
break
elif myUser not in query_result.users:
myUser.posts.append(query_result)
db.session.commit()
payload = submission.title
payload_text = submission.selftext
break
else:
continue
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps({
"recipient": {"id": recipient},
"message": {"text": payload, "quick_replies":quick_replies_list}
}),
headers={'Content-type': 'application/json'})
else:
payload = "http://imgur.com/WeyNGtQ.jpg"
for submission in reddit.subreddit(subreddit_name).hot(limit=None):
if (submission.link_flair_css_class == 'image') or ((submission.is_self != True) and ((".jpg" in submission.url) or (".png" in submission.url))):
query_result = Posts.query.filter(Posts.name == submission.id).first()
if query_result is None:
myPost = Posts(submission.id, submission.url)
myUser.posts.append(myPost)
db.session.commit()
payload = submission.url
break
elif myUser not in query_result.users:
myUser.posts.append(query_result)
db.session.commit()
payload = submission.url
break
else:
continue
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps({
"recipient": {"id": recipient},
"message": {"attachment": {
"type": "image",
"payload": {"url": payload}},
"quick_replies":quick_replies_list}
}),
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text)
def get_or_create(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
session.commit()
return instance
relationship_table=db.Table('relationship_table',
db.Column('user_id', db.Integer,db.ForeignKey('users.id'), nullable=False),
db.Column('post_id', db.Integer,db.ForeignKey('posts.id'), nullable=False),
db.PrimaryKeyConstraint('user_id', 'post_id') )
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255),nullable=False)
posts=db.relationship('Posts', secondary=relationship_table, backref='users' )
def __init__(self, name=None):
self.name = name
class Posts(db.Model):
id=db.Column(db.Integer, primary_key=True)
name=db.Column(db.String, unique=True, nullable=False)
url=db.Column(db.String, nullable=False)
def __init__(self, name=None, url=None):
self.name = name
self.url = url
if __name__ == '__main__':
app.run() | [
"francis.kingjr@gmail.com"
] | francis.kingjr@gmail.com |
8664746b874b28b034fc07228953772e842c71df | 392b644e8be2bdd5cb0e2656483786a7ce6c1ef9 | /setup.py | ffbe34a51be9405e0e1d23e19a09f3910435ace8 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"Python-2.0"
] | permissive | pandrey76/python_client | 67287b41a826ed6c2c1c5b87f30582261e35777c | 3b8a8ca471dbb2178687e7febbf216f81b49fcca | refs/heads/master | 2022-04-12T15:29:49.863787 | 2020-02-13T03:31:08 | 2020-02-13T03:31:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | #!/usr/bin/env python
"""
setup.py file for GridDB python client
"""
from distutils.command.build import build
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
try:
with open('README.rst') as f:
readme = f.read()
except IOError:
readme = ''
os.environ["CXX"] = "g++"
os.environ["CC"] = "g++"
SOURCES = [
'src/AggregationResult.cpp',
'src/Container.cpp',
'src/ContainerInfo.cpp',
'src/Field.cpp',
'src/PartitionController.cpp',
'src/Query.cpp',
'src/QueryAnalysisEntry.cpp',
'src/RowKeyPredicate.cpp',
'src/RowSet.cpp',
'src/Store.cpp',
'src/StoreFactory.cpp',
'src/TimeSeriesProperties.cpp',
'src/TimestampUtils.cpp',
'src/griddb.i',
'src/Util.cpp',
]
DEPENDENTS = [
'src/AggregationResult.h',
'src/ContainerInfo.h',
'src/Container.h',
'src/ExpirationInfo.h',
'src/Field.h'
'src/GSException.h',
'src/PartitionController.h',
'src/Query.h',
'src/QueryAnalysisEntry.h',
'src/RowKeyPredicate.h',
'src/RowSet.h',
'src/Store.h',
'src/StoreFactory.h',
'src/TimeSeriesProperties.h',
'src/TimestampUtils.h',
'src/gstype_python.i',
'src/gstype.i',
'include/gridstore.h',
'include/Util.h',
]
INCLUDES = [
'include',
'src',
]
COMPILE_ARGS = [
'-std=c++0x'
]
LIBRARIES = [
'rt',
'gridstore',
]
SWIG_OPTS = [
'-DSWIGWORDSIZE64',
'-c++',
'-outdir',
'.',
'-Isrc'
]
class CustomBuild(build):
sub_commands = [
('build_ext', build.has_ext_modules),
('build_py', build.has_pure_modules),
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts),
]
griddb_module = Extension('_griddb_python',
sources=SOURCES,
include_dirs=INCLUDES,
libraries=LIBRARIES,
extra_compile_args=COMPILE_ARGS,
swig_opts=SWIG_OPTS,
depends=DEPENDENTS,
)
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
]
setup(name='griddb_python',
version='0.8.2',
author='Katsuhiko Nonomura',
author_email='contact@griddb.org',
description='GridDB Python Client Library built using SWIG',
long_description=readme,
ext_modules=[griddb_module],
py_modules=['griddb_python'],
url='https://github.com/griddb/python_client/',
license='Apache Software License',
cmdclass={'build': CustomBuild},
long_description_content_type = 'text/x-rst',
classifiers=classifiers,
)
| [
"katsuhiko.nonomura@griddb.org"
] | katsuhiko.nonomura@griddb.org |
840981ee699fd005cdcb4fa9ecbb2214b62fced0 | a6986f430351dcc871d035f8361ca377ddc90edf | /HouseAnalysis/house/migrations/0001_initial.py | afdaea7fc6b90257183f31bc00fc8691e54c02e0 | [] | no_license | qijianchuan/HouseAnalysisWeb | d9f65a91cb1dba2bfea482214c6b532a59e21a8c | 8a619c77bdfb3516c37df912d6fe7bf14d27e706 | refs/heads/master | 2023-03-15T23:23:02.677191 | 2020-04-30T14:59:19 | 2020-04-30T14:59:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,836 | py | # Generated by Django 2.1.5 on 2020-03-12 07:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Api',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500, verbose_name='名称')),
('price', models.DecimalField(decimal_places=1, max_digits=9, verbose_name='总价')),
('unit_price', models.DecimalField(decimal_places=1, max_digits=9, verbose_name='单价')),
('community_name', models.CharField(max_length=100, verbose_name='小区名')),
('region', models.CharField(max_length=50, verbose_name='区域')),
('type', models.CharField(max_length=50, verbose_name='户型')),
('construction_area', models.CharField(max_length=20, verbose_name='建筑面积')),
('orientation', models.CharField(max_length=10, verbose_name='朝向')),
('decoration', models.CharField(max_length=10, verbose_name='装修情况')),
('floor', models.CharField(max_length=15, verbose_name='楼层')),
('elevator', models.CharField(max_length=10, verbose_name='电梯')),
('purposes', models.CharField(max_length=15, verbose_name='房屋类型')),
('release_date', models.DateField(verbose_name='挂牌时间')),
('house_structure', models.CharField(max_length=20, verbose_name='建筑类型')),
('image_urls', models.CharField(max_length=1500, verbose_name='房屋详情图')),
('from_url', models.CharField(max_length=100, verbose_name='房屋链接')),
('idi', models.IntegerField()),
('lat', models.DecimalField(decimal_places=9, max_digits=12, verbose_name='纬度')),
('lng', models.DecimalField(decimal_places=9, max_digits=12, verbose_name='经度')),
],
options={
'verbose_name': 'house',
'verbose_name_plural': 'house',
},
),
migrations.CreateModel(
name='Constructure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=8, verbose_name='接口版本')),
('title', models.CharField(max_length=12, verbose_name='接口info')),
('layout', models.CharField(max_length=10, verbose_name='建筑类型')),
('num', models.IntegerField(verbose_name='数量')),
],
options={
'verbose_name': 'constructureinfo',
'verbose_name_plural': 'constructureinfo',
},
),
migrations.CreateModel(
name='Decortion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=8, verbose_name='接口版本')),
('title', models.CharField(max_length=12, verbose_name='接口info')),
('layout', models.CharField(max_length=10, verbose_name='装修情况')),
('num', models.IntegerField(verbose_name='数量')),
('mean_price', models.DecimalField(decimal_places=3, max_digits=8, verbose_name='总价均价')),
('mean_unit_price', models.DecimalField(decimal_places=3, max_digits=8, verbose_name='单价均价')),
],
options={
'verbose_name': 'decorationinfo',
'verbose_name_plural': 'decorationinfo',
},
),
migrations.CreateModel(
name='Elevator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=8, verbose_name='接口版本')),
('title', models.CharField(max_length=12, verbose_name='接口info')),
('has_el_num', models.IntegerField(verbose_name='存在电梯的房源数')),
('no_el_num', models.IntegerField(verbose_name='不存在电梯的房源数')),
('has_mean_price', models.DecimalField(decimal_places=3, max_digits=8, verbose_name='总价均价')),
('has_mean_unit_price', models.DecimalField(decimal_places=3, max_digits=8, verbose_name='单价均价')),
('no_mean_price', models.DecimalField(decimal_places=3, max_digits=8, verbose_name='总价均价')),
('no_mean_unit_price', models.DecimalField(decimal_places=3, max_digits=8, verbose_name='单价均价')),
],
options={
'verbose_name': 'elevatorinfo',
'verbose_name_plural': 'elevatorinfo',
},
),
migrations.CreateModel(
name='Floor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=8, verbose_name='接口版本')),
('title', models.CharField(max_length=12, verbose_name='接口info')),
('floor', models.CharField(max_length=20, verbose_name='楼层')),
('num', models.IntegerField(verbose_name='数量')),
],
options={
'verbose_name': 'floorinfo',
'verbose_name_plural': 'floorinfo',
},
),
migrations.CreateModel(
name='Layout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=8, verbose_name='接口版本')),
('title', models.CharField(max_length=12, verbose_name='接口info')),
('layout', models.CharField(max_length=20, verbose_name='户型')),
('num', models.IntegerField(verbose_name='数量')),
],
options={
'verbose_name': 'layoutinfo',
'verbose_name_plural': 'layoutinfo',
},
),
migrations.CreateModel(
name='Orientation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=8, verbose_name='接口版本')),
('title', models.CharField(max_length=12, verbose_name='接口info')),
('layout', models.CharField(max_length=15, verbose_name='房屋朝向')),
('num', models.IntegerField(verbose_name='数量')),
],
options={
'verbose_name': 'orientationinfo',
'verbose_name_plural': 'orientationinfo',
},
),
migrations.CreateModel(
name='Purposes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=8, verbose_name='接口版本')),
('title', models.CharField(max_length=12, verbose_name='接口info')),
('layout', models.CharField(max_length=10, verbose_name='房屋用途')),
('num', models.IntegerField(verbose_name='数量')),
],
options={
'verbose_name': 'purposesinfo',
'verbose_name_plural': 'purposesinfo',
},
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=8, verbose_name='接口版本')),
('title', models.CharField(max_length=12, verbose_name='接口info')),
('layout', models.CharField(max_length=10, verbose_name='行政区划')),
('num', models.IntegerField(verbose_name='数量')),
('mean_price', models.DecimalField(decimal_places=3, max_digits=8, verbose_name='总价均价')),
('mean_unit_price', models.DecimalField(decimal_places=3, max_digits=8, verbose_name='单价均价')),
],
options={
'verbose_name': 'regioninfo',
'verbose_name_plural': 'regioninfo',
},
),
]
| [
"zj20162325@163.com"
] | zj20162325@163.com |
0e65d65def490ae82f7776d64b2d70c86abf7d55 | b838b2ecce195d1293b0ff485b3d76bc0adb5d70 | /st36/Film.py | 1b7fe8b7ddd61874118e09afd00ffd4cf7fdb2cb | [] | no_license | BelyaevaSveta/ASM.17.Lab1 | e4d37c8b7600cf3f7bf787dd396678315b19d57d | ee20998877a4e511a778a7afd52cb977b55aa2f5 | refs/heads/master | 2021-05-07T04:52:05.904334 | 2017-11-28T10:30:42 | 2017-11-28T10:30:42 | 111,489,081 | 0 | 0 | null | 2017-11-21T02:36:19 | 2017-11-21T02:36:19 | null | UTF-8 | Python | false | false | 1,363 | py | from .Actor import Actor
import pickle
class Film:
def __init__(self):
self.enter_film_name()
self.actors = {}
def enter_film_name(self):
self.film_name = input('Enter new film name: ')
def print_film_name(self):
print('Film name is %s' % self.film_name)
def print_all_actors(self):
for name, actor_object in self.actors.items():
actor_object.print_name()
def add_new_actor(self):
new_actor = Actor()
self.actors[new_actor.name] = new_actor
def edit_actor(self):
print('\nType name of actor you want to edit:\nAvailable actors:')
for actor_name, actor_object in self.actors.items():
print(actor_name)
actor_name = input()
self.actors[actor_name].edit_bio()
def remove_actor(self):
print('\nType name of actor you want to remove:\nAvailable actors:')
for actor_name, actor_object in self.actors.items():
print(actor_name)
actor_name = input()
self.actors.pop(actor_name)
def remove_all_actors(self):
self.actors.clear()
def save_actors_to_file(self):
with open('Film.txt', 'wb') as f:
pickle.dump(self.actors, f)
def load_actors_from_file(self):
with open('Film.txt', 'rb') as f:
self.actors = pickle.load(f)
| [
"noreply@github.com"
] | noreply@github.com |
1df207e3ac0e742b82e1992b634d3bbf48dbfbe2 | 70331cc864c44a3f30883d40073f048d1a5a8e3f | /main.py | 69d7007bdd8659110ec9a209219a24a98be176d3 | [] | no_license | rmar6544/Day-7-Hangman-3-Start | 5c0f310925b8ba4b3f87db019c7f25e94865cfd8 | ea538f6cff8433bc9ee90c32794593c938d8a4e5 | refs/heads/master | 2023-04-04T23:43:08.966646 | 2021-04-07T23:06:28 | 2021-04-07T23:06:28 | 355,696,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | #Step 3
import random
word_list = ["aardvark", "baboon", "camel"]
chosen_word = random.choice(word_list)
word_length = len(chosen_word)
#Testing code
print(f'Pssst, the solution is {chosen_word}.')
#Create blanks
display = []
for _ in range(word_length):
display += "_"
#TODO-1: - Use a while loop to let the user guess again. The loop should only stop once the user has guessed all the letters in the chosen_word and 'display' has no more blanks ("_"). Then you can tell the user they've won.
game_on = True
while game_on == True:
guess = input("Guess a letter: ").lower()
#Check guessed letter
for position in range(word_length):
letter = chosen_word[position]
# print(f"Current position: {position}\n Current letter: {letter}\n Guessed letter: {guess}")
if letter == guess:
display[position] = letter
print(display)
print(chosen_word)
if chosen_word == "".join(display):
game_on = False
print("you win") | [
""
] | |
1c5c459904e78434ad22ecd6cb261de1f2c29f83 | 4c378e60497ce892f5c25cbe7d6fc439124141ea | /solutions/226. Invert Binary Tree.py | 96d9017e0473c9f2cd4b047c540206dea030ce6d | [] | no_license | udhavsethi/play | 8008369d4c582f6db33a5737a3d9d68e5afc38c3 | 885d5b6e0a05051554fb61eb501a44a785098f3a | refs/heads/master | 2021-11-27T11:40:52.847607 | 2021-11-23T18:49:40 | 2021-11-23T18:49:40 | 147,461,456 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
| [
"udhavsethi@users.noreply.github.com"
] | udhavsethi@users.noreply.github.com |
92cd077f58237f62504c76208cda5a489cb6ee9f | 525252226b3c6b76fd0916084e9de7de9db2ae60 | /backend/tests/test_gasfeed.py | d13ec3c809b000f2b5ee40bcdbc9ac1753047945 | [] | no_license | james4388/gas-price | 8215f429249d2985ea964eea6ea7e63b74faf7b8 | 3e380146a8698c84443bba2b11f3829710ad0e69 | refs/heads/master | 2022-12-02T14:17:15.893681 | 2020-08-06T05:29:25 | 2020-08-06T05:29:25 | 278,273,476 | 3 | 0 | null | 2020-07-15T18:17:16 | 2020-07-09T05:41:30 | JavaScript | UTF-8 | Python | false | false | 1,221 | py | from flask import url_for
from tests import BaseTestCase
from gasprice import config, create_app
class GasFeedTestCase(BaseTestCase):
def test_brands(self):
with self.app.app_context():
resp = self.client.get(
url_for(
'GasFeed.station_brands'
)
)
data = resp.json
self.assertIsInstance(
data['stations'], list, 'Should stations is a list')
def test_nearby_stations(self):
with self.app.app_context():
resp = self.client.get(
url_for(
'GasFeed.nearby_stations',
lat='iajsd',
lon='sddsf'
)
)
data = resp.json
self.assertEqual(resp.status_code, 422, 'Validation should work')
resp = self.client.get(
url_for(
'GasFeed.nearby_stations',
lat='37.4131208',
lon='-122.0908522'
)
)
data = resp.json
self.assertIsInstance(
data['stations'], list, 'Should stations is a list') | [
"nhutrinh@leafgroup.com"
] | nhutrinh@leafgroup.com |
e706179c11effcfa8f133d63d2655724fca4d1e9 | 0005e05b9d8b8ad0d3c3c0539b2ded9db6e9f1dd | /codechef_client/models/tag.py | 4cdd6e64295823ef02e369ae6ce1a056970ea646 | [] | no_license | termicoder/codechef-client-lib | a3e3de2b300355c5daa5ed3fad03a9859af13d86 | 74d6b21787c75a987e3451751f5554e4cc6cf469 | refs/heads/master | 2020-03-27T17:58:45.298121 | 2018-09-30T18:03:14 | 2018-09-30T18:03:14 | 146,889,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,094 | py | # coding: utf-8
"""
CodeChef API
CodeChef API to support different applications. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Tag(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'tag': 'str',
'type': 'str',
'count': 'int'
}
attribute_map = {
'tag': 'tag',
'type': 'type',
'count': 'count'
}
def __init__(self, tag=None, type=None, count=None): # noqa: E501
"""Tag - a model defined in Swagger""" # noqa: E501
self._tag = None
self._type = None
self._count = None
self.discriminator = None
if tag is not None:
self.tag = tag
if type is not None:
self.type = type
if count is not None:
self.count = count
@property
def tag(self):
"""Gets the tag of this Tag. # noqa: E501
Value # noqa: E501
:return: The tag of this Tag. # noqa: E501
:rtype: str
"""
return self._tag
@tag.setter
def tag(self, tag):
"""Sets the tag of this Tag.
Value # noqa: E501
:param tag: The tag of this Tag. # noqa: E501
:type: str
"""
self._tag = tag
@property
def type(self):
"""Gets the type of this Tag. # noqa: E501
author/tag # noqa: E501
:return: The type of this Tag. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Tag.
author/tag # noqa: E501
:param type: The type of this Tag. # noqa: E501
:type: str
"""
self._type = type
@property
def count(self):
"""Gets the count of this Tag. # noqa: E501
Count of problems with this tag # noqa: E501
:return: The count of this Tag. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this Tag.
Count of problems with this tag # noqa: E501
:param count: The count of this Tag. # noqa: E501
:type: int
"""
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Tag):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"diveshuttamchandani@gmail.com"
] | diveshuttamchandani@gmail.com |
3084a00a20347e2a00d4110f699cde1dff559340 | d1a5d8bdaf7c27daf3a5aebde84be7cefa371723 | /dlc2_model_training (copy).py | 115e631869346886b255211c96b48c7808389be2 | [] | no_license | sambeettiady/dlc2_he | 1c7b811f3294022edaeb59494a19731badfbf56d | c54d5365b742951524458b92d0b31b028f9e195d | refs/heads/master | 2020-03-30T06:07:49.117344 | 2018-09-29T08:05:31 | 2018-09-29T08:05:31 | 150,840,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,333 | py | #Import image transformation packages
import skimage
from skimage import filters, io, exposure, color, segmentation, feature, morphology
from skimage.feature import canny
from scipy import ndimage as ndi
from scipy import misc
import skimage.transform as skt
#Import required packages
import numpy as np
import pandas as pd
import os
import glob
#Import Visualisation packages
import matplotlib.pyplot as plt
import graphviz
#Import sklearn modules
import sklearn.metrics as skm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import class_weight
#Import Keras
import keras
from keras import metrics
from keras.models import Sequential
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy,sparse_categorical_crossentropy
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, Input, Concatenate, Dense, Dropout, Flatten, Activation, Merge
from keras.models import Model,load_model
from keras.utils import plot_model
from keras import callbacks
import keras.backend as K
#Change working directory
os.chdir('/home/sambeet/data/hackerearth/deep learning challenge 2/')
#Read train, test and validation packages
#split_string = lambda x : x.split('_')[1]
train_data = pd.read_csv('csv/train_data.csv')
test_data = pd.read_csv('csv/test_data.csv')
val_data = pd.read_csv('csv/val_data.csv')
#train_data.detected = train_data.detected.apply(split_string)
#test_data.detected = test_data.detected.apply(split_string)
#all_data = pd.read_csv('csv/train.csv')
encoder = LabelEncoder()
encoder.fit(train_data.detected.values)
encoded_2 = encoder.transform(train_data.detected.values)
class_weights = class_weight.compute_class_weight('balanced', np.unique(encoded_2), encoded_2)
labels_dict = dict()
for key in np.unique(encoded_2):
labels_dict[key] = class_weights[key]
def data_generator(batch_size = 8, dataset = 'train'):
if dataset == 'train':
df = train_data.copy()
elif dataset == 'test':
df = test_data.copy()
else:
df = val_data.copy()
df = df.sample(frac=1).reset_index(drop=True)
image_list = list(df.image_name.values)
numeric_variables = df[['age','gender_M','view_position']].values
# encode class values as integers
encoded_Y = encoder.transform(df.detected.values)
# convert integers to dummy variables (i.e. one hot encoded)
labels = keras.utils.to_categorical(encoded_Y)
while 1:
for batch_num in range(len(image_list)//batch_size):
start_index = batch_num*batch_size
end_index = (batch_num + 1)*batch_size
batch_images = image_list[start_index:end_index]
numeric_data_1 = numeric_variables[start_index:end_index]
images = np.empty((batch_size, 1024, 1024, 1), dtype = np.float32)
numeric_data_2 = np.empty((batch_size,8), dtype = np.float32)
detected = labels[start_index:end_index]
for i,image_name in zip(range(batch_size),batch_images):
images[i,...,0] = misc.imread('train/train_/' + image_name,flatten=True)/255.
numeric_data_2[i,...] = np.histogram(images[i,...], bins = [0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8, 0.9,1])[0][0:8]/float(1024*1024)
i = i + 1
numeric_data = np.hstack((numeric_data_1,numeric_data_2))
yield [images,numeric_data], detected
train_data_gen = data_generator(2,'train')
test_data_gen = data_generator(2,'test')
input1 = Input(shape=(1024, 1024, 1))
cnn1 = Conv2D(32, (3, 3), activation='relu',padding='same')(input1)
cnn2 = Conv2D(32, (3, 3), activation='relu',padding='same')(cnn1)
pool1 = MaxPooling2D(pool_size=(2, 2))(cnn2)
drop1 = Dropout(0.25)(pool1)
cnn3 = Conv2D(64, (3, 3), activation='relu',padding='same')(drop1)
cnn4 = Conv2D(64, (3, 3), activation='relu',padding='same')(cnn3)
pool2 = MaxPooling2D(pool_size=(2, 2))(cnn4)
drop2 = Dropout(0.25)(pool2)
cnn5 = Conv2D(128, (3, 3), activation='relu',padding='same')(drop2)
cnn6 = Conv2D(128, (3, 3), activation='relu',padding='same')(cnn5)
pool3 = MaxPooling2D(pool_size=(2, 2))(cnn6)
drop3 = Dropout(0.25)(pool3)
cnn7 = Conv2D(256, (3, 3), activation='relu',padding='same')(drop3)
cnn8 = Conv2D(256, (3, 3), activation='relu',padding='same')(cnn7)
pool4 = MaxPooling2D(pool_size=(2, 2))(cnn8)
drop4 = Dropout(0.25)(pool4)
cnn9 = Conv2D(512, (3, 3), activation='relu',padding='same')(drop4)
cnn10 = Conv2D(512, (3, 3), activation='relu',padding='same')(cnn9)
pool5 = MaxPooling2D(pool_size=(2, 2))(cnn10)
drop5 = Dropout(0.25)(pool5)
cnn11 = Conv2D(512, (3, 3), activation='relu',padding='same')(drop5)
cnn12 = Conv2D(512, (5, 5), activation='relu',padding='same')(cnn11)
cnn13 = Conv2D(512, (7, 7), activation='relu',padding='same')(cnn12)
pool6 = MaxPooling2D(pool_size=(2, 2))(cnn13)
drop6 = Dropout(0.25)(pool6)
flatten = Flatten()(drop6)
input2 = Input(shape=(11,))
merged_input = keras.layers.concatenate([flatten, input2])
dense1 = Dense(256, activation='relu')(merged_input)
drop7 = Dropout(0.25)(dense1)
dense2 = Dense(256, activation='relu')(drop7)
drop8 = Dropout(0.25)(dense2)
output = Dense(14, activation='softmax')(drop8)
model = Model(inputs=[input1, input2], outputs=output)
model.compile(loss='categorical_crossentropy', optimizer=Adam(5e-2),metrics=['accuracy'])
model.summary()
#plot_model(model, to_file='dl2_model_1.png')
model_checkpoint = callbacks.ModelCheckpoint(filepath = 'logs/vgg13.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
tensorboard = callbacks.TensorBoard(log_dir='logs', histogram_freq=0, batch_size=2, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
csv_logger = callbacks.CSVLogger('logs/training.log')
#model.load_weights('dlc2_vgg_13_2fc_1.hd5')#,custom_objects={'dice_loss':dice_loss,'dice_coef':dice_coef})
history = model.fit_generator(train_data_gen, epochs=20, steps_per_epoch= 464*4*4, verbose = 1,
callbacks=[model_checkpoint,tensorboard,csv_logger],class_weight = labels_dict,
validation_data = test_data_gen, validation_steps = 932)
model.save('dlc2_vgg_13_2fc_1.hd5')
| [
"noreply@github.com"
] | noreply@github.com |
dffede7cbbfa98929853b8241f6a1e945007f560 | e5fb2d912415c302221604126afa7cbbb0a039c0 | /keras_gym/policies/test_special.py | d19afe8e363fc4399127c8f76a179ab42414bef4 | [
"MIT"
] | permissive | KristianHolsheimer/keras-gym | fc034025a1180b1124fe1a25886b54088d2f3552 | 0296ddcc8685e1ce732c3173caaa0fd25af9ef58 | refs/heads/master | 2021-06-28T21:57:50.122753 | 2020-09-30T04:29:15 | 2020-09-30T04:29:15 | 174,637,157 | 17 | 5 | MIT | 2019-08-02T22:48:41 | 2019-03-09T02:09:03 | Python | UTF-8 | Python | false | false | 1,012 | py | from gym.envs.toy_text.frozen_lake import FrozenLakeEnv, RIGHT, DOWN
from .special import UserInputPolicy
class MockInputFunction:
def __init__(self, return_value=None):
self.return_value = return_value
self._orig_input_fn = __builtins__['input']
def _mock_input_fn(self, prompt):
print(prompt + str(self.return_value))
return self.return_value
def __enter__(self):
__builtins__['input'] = self._mock_input_fn
def __exit__(self, type, value, traceback):
__builtins__['input'] = self._orig_input_fn
class TestUserInputPolicy:
def test_expected(self):
env = FrozenLakeEnv(is_slippery=False)
policy = UserInputPolicy(env)
s = env.reset()
env.render()
for i in [RIGHT, RIGHT, DOWN, DOWN, DOWN, RIGHT]:
with MockInputFunction(return_value=i):
a = policy(s)
s, r, done, info = env.step(a)
env.render()
if done:
break
| [
"kristian.holsheimer@gmail.com"
] | kristian.holsheimer@gmail.com |
d5c3dd5a7e5b2f8332dda72ef2737f66f62832b9 | 4f35782ac42f1cc65581b6f21b7a80be8f0164cc | /resources/todo_item.py | 86400c55f8c6821098187dc26050847564cb707d | [] | no_license | Rhemm/berry | 2da3cb202040b130ff566a78f6fb98efac1cace4 | 0c908bc4cbbdf770ac8c5b66d0cbf2b115ac456b | refs/heads/master | 2020-04-23T21:04:40.382398 | 2019-02-21T04:47:18 | 2019-02-21T04:47:18 | 171,459,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,589 | py | # -*- coding: utf-8 -*-
# pylint: disable=R0201
"""
Module providing todo item resource.
"""
from flask_restful import Resource
from flask_restful.reqparse import RequestParser
from flask_restful.inputs import date, boolean
from bson import ObjectId
from application import mongo
from common.utils import validate_id
item_put_parser = RequestParser()
item_put_parser.add_argument("text", type=str, location="json")
item_put_parser.add_argument("dueDate", type=date, location="json")
item_put_parser.add_argument("finished", type=boolean, location="json")
item_post_parser = item_put_parser.copy()
for arg in item_post_parser.args:
arg.required = True
db = mongo.db.todo
class TodoItem(Resource):
"""
Provides methods for updating and deleting todo items.
Usage::
For updating todo item:
curl -X PUT http://127.0.0.1:5000/todolists/<list_id>/items/<item_id> \
-H "Content-Type: application/json" \
-d '{"text": "sometext", "due_date": "2019-2-12", "finished": "true" }'
For deleting todo item:
curl -X DELETE http://127.0.0.1:5000/todolists/<list_id>/items/<item_id> \
-H "Content-Type: application/json"
"""
def put(self, list_id, item_id):
"""
Updates todo item.
:param list_id: ID of related todo list.
:param item_id: ID of todo item.
"""
if not validate_id(list_id, item_id):
return {"msg": "Invalid id, it should be 24-character string"}, 400
args = item_put_parser.parse_args()
result = db.update_one(
{"_id": ObjectId(list_id), "todos._id": ObjectId(item_id)},
{
"$set": {
"todos.$." + key: args[key] for key in args if args[key] is not None
}
}
)
if result.matched_count == 0:
return {"msg": "No such list found"}, 404
if result.modified_count == 0:
return {"msg": "No item found"}, 404
return {"msg": "Successfuly updated"}
def delete(self, list_id, item_id):
"""
Deletes todo item.
:param list_id: ID of related todo list.
:param item_id: ID of todo item.
"""
if not validate_id(list_id, item_id):
return {"msg": "Invalid id, it should be 24-character string"}, 400
result = db.update_one(
{"_id": ObjectId(list_id)},
{"$pull": {"todos": {"_id": ObjectId(item_id)}}}
)
if result.matched_count == 0:
return {"msg": "No such list found"}, 404
if result.modified_count == 0:
return {"msg": "No item found"}, 404
return {"msg": "Successfuly deleted"}, 204
class TodoItemCollection(Resource):
"""
Provides method for adding new todo item.
Usage::
For creating new todo item:
curl -X POST http://127.0.0.1:5000/todolists/<list_id>/items \
-d '{"text": "sometext", "due_date": "2019-2-12", "finished": "true" }'
"""
def post(self, list_id):
"""
Creates todo item
:param list_id: ID of related todo list.
"""
if not validate_id(list_id):
return {"msg": "Invalid id, it should be 24-character string"}, 400
args = item_post_parser.parse_args()
result = db.update_one(
{"_id": ObjectId(list_id)},
{"$push": {"todos": {"_id": ObjectId(), **args}}}
)
if result.matched_count == 0:
return {"msg": "No such list found"}, 404
return {"msg": "Successfuly added"}, 201
| [
"yslfhe@gmail.com"
] | yslfhe@gmail.com |
8001d2f7a9d565237552aea7cbf4fd1650d437b9 | 912196d86c93c29b3b031792e3cf886420a0fbde | /core/rnn/rnn_minibatch_test.py | c8e233c8e50eb5ebf1c20a8a41d63c0c12daa8c2 | [
"Apache-2.0"
] | permissive | brian-lau/guac | fce363745c9a778733f1df765fd9c3b832fdeef4 | c3db6cdbe56a1cb04486650ea5473287ba159ad4 | refs/heads/master | 2020-05-29T11:55:34.494957 | 2015-10-28T02:17:34 | 2015-10-28T02:17:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,065 | py | # adapted from https://github.com/lisa-lab/DeepLearningTutorials
from collections import OrderedDict
import copy
import os
import re
import random
import timeit
from hyperopt import STATUS_OK
import numpy as np
import pandas as pd
from scipy import stats
import theano
from theano import tensor as T
import common
from ..util import defines
from ..util import file_handling as fh
from ..experiment import reusable_holdout
from ..experiment import evaluation
# Otherwise the deepcopy fails
import sys
sys.setrecursionlimit(5000)
THEANO_FLAGS='floatX=float32'
# utils functions
def shuffle(lol, seed=None):
'''
lol :: list of list as input
seed :: seed the shuffling
shuffle inplace each list in the same order
'''
for l in lol:
random.seed(seed)
random.shuffle(l)
class RNN(object):
''' elman neural net model '''
def __init__(self, nh, nc, ne, de, cs, init_scale=0.2, initial_embeddings=None,
rnn_type='basic', # 'basic', 'GRU', or 'LSTM'
pooling_method='max', #'max', 'mean', 'attention1' or 'attention2',
extra_input_dims=0, train_embeddings=True, clip_gradients=False,
bidirectional=True, bi_combine='concat' # 'concat', 'sum', or 'mean'
):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# initialize parameters
dx = de * cs
if extra_input_dims > 0:
dx += extra_input_dims
bi = 1
if bidirectional and bi_combine == 'concat':
bi = 2
if initial_embeddings is None:
self.emb = theano.shared(name='embeddings',
value=init_scale * np.random.uniform(-1.0, 1.0,
(ne, de)).astype(theano.config.floatX))
#(ne+1, de)) # add one for padding at the end
else:
self.emb = theano.shared(name='embeddings', value=initial_embeddings.astype(theano.config.floatX))
if extra_input_dims > 0:
self.W_drld = theano.shared(name='W_drld', value=init_scale * np.random.uniform(-1.0, 1.0, (1, nh))
.astype(theano.config.floatX))
# common paramters (feeding into hidden node)
self.W_xh = theano.shared(name='W_xh', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hh = theano.shared(name='W_hh', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_h = theano.shared(name='b_h', value=np.array(np.random.uniform(0.0, 1.0, nh),
dtype=theano.config.floatX))
# output layer parameters
self.W_s = theano.shared(name='W_s', value=init_scale * np.random.uniform(-1.0, 1.0, (nh * bi, nc))
.astype(theano.config.floatX))
self.b_s = theano.shared(name='b_s', value=np.zeros(nc, dtype=theano.config.floatX))
# temporary parameters
#self.h_i_f = theano.shared(name='h_i_f', value=np.zeros((2, nh), dtype=theano.config.floatX))
#if bidirectional:
# self.h_i_r = theano.shared(name='h_i_r', value=np.zeros(nh, dtype=theano.config.floatX))
# Attention parameters
if pooling_method == 'attention1' or pooling_method == 'attention2':
self.W_a = theano.shared(name='W_a', value=init_scale * np.random.uniform(-1.0, 1.0, (bi*nh, 1))
.astype(theano.config.floatX))
self.b_a = theano.shared(name='b_a', value=0.0)
# GRU parameters
if rnn_type == 'GRU':
self.W_xr = theano.shared(name='W_xr', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hr = theano.shared(name='W_hr', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_r = theano.shared(name='b_r', value=np.zeros(nh, dtype=theano.config.floatX))
self.W_xz = theano.shared(name='W_xz', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hz = theano.shared(name='W_hz', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_z = theano.shared(name='b_z', value=np.zeros(nh, dtype=theano.config.floatX))
# LSTM paramters
if rnn_type == 'LSTM':
# forget gate (needs special initialization)
self.W_xf = theano.shared(name='W_xf', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hf = theano.shared(name='W_hf', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_cf = theano.shared(name='W_cf', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_f = theano.shared(name='b_f', value=np.array(np.random.uniform(0.0, 1.0, nh),
dtype=theano.config.floatX))
# input gate
self.W_xi = theano.shared(name='W_xi', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hi = theano.shared(name='W_hi', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_ci = theano.shared(name='W_ci', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_i = theano.shared(name='b_i', value=np.zeros(nh, dtype=theano.config.floatX))
# output gate
self.W_xo = theano.shared(name='W_xo', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_ho = theano.shared(name='W_ho', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_co = theano.shared(name='W_co', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_o = theano.shared(name='b_o', value=np.zeros(nh, dtype=theano.config.floatX))
# use normal ->hidden weights for memory cell
# temp
#self.c_i_f = theano.shared(name='c_i_f', value=np.zeros(nh, dtype=theano.config.floatX))
#if bidirectional:
# self.c_i_r = theano.shared(name='c_i_r', value=np.zeros(nh, dtype=theano.config.floatX))
self.params = [self.W_xh, self.W_hh, self.b_h,
self.W_s, self.b_s]
#self.params += [self.h_i_f]
if train_embeddings:
self.params += [self.emb]
if pooling_method == 'attention':
self.params += [self.W_a, self.b_a]
if rnn_type == 'GRU':
self.params += [self.W_xr, self.W_hr, self.b_r,
self.W_xz, self.W_hz, self.b_z]
if rnn_type == 'LSTM':
self.params += [self.W_xf, self.W_hf, self.W_cf, self.b_f,
self.W_xi, self.W_hi, self.W_ci, self.b_i,
self.W_xo, self.W_ho, self.W_co, self.b_o]
#self.c_i_f]
#if bidirectional:
# self.params += [self.c_i_r]
#if bidirectional:
# self.params += [self.h_i_r]
# create an X object based on the size of the object at the index [elements, emb_dim * window]
idxs = T.tensor3('idxs', dtype='int32')
if extra_input_dims:
extra = T.tensor3('extra')
extra_3d = extra.repeat(idxs.shape[0], axis=0)
#x = T.concatenate([self.emb[idxs].reshape((idxs.shape[0], de*cs)),
# T.repeat(extra, idxs.shape[0], axis=0)], axis=1)
#temp = T.printing.Print('temp')(self.emb[idxs].reshape((idxs.shape[0], idxs.shape[1], de*cs)))
temp = self.emb[idxs].reshape((idxs.shape[0], idxs.shape[1], de*cs))
x = T.concatenate([temp, extra_3d], axis=2)
else:
#x = T.printing.Print('x')(self.emb[idxs])
x = self.emb[idxs].reshape((idxs.shape[0], idxs.shape[1], de*cs)) # [n_elements, minibatch_size, emb_dim]
#x = self.emb[idxs]
y = T.imatrix('y')
mask = T.tensor3('mask')
mask_3d = mask.repeat(nh, axis=2)
minibatch_size = T.iscalar()
def recurrence_basic(x_t, mask_t, h_tm1):
#h_t = theano.printing.Print('h_t')(T.nnet.sigmoid(T.dot(x_t, self.W_xh) + T.dot(h_tm1, self.W_hh) + self.b_h))
h_t = T.nnet.sigmoid(T.dot(x_t, self.W_xh) + T.dot(h_tm1, self.W_hh) + self.b_h)
#masked_h_t = T.printing.Print('masked_h_t')(mask_t * h_t + (1 - mask_t) * h_tm1)
# apply the mask to propogate the last (unmaksed) element in sequence to the end
return mask_t * h_t + (1 - mask_t) * h_tm1
#return h_t
def recurrence_basic_reverse(x_t, mask_t, h_tp1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.W_xh) + T.dot(h_tp1, self.W_hh) + self.b_h)
return mask_t * h_t + (1 - mask_t) * h_tp1
def recurrence_gru(x_t, mask_t, h_tm1):
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) + T.dot(h_tm1, self.W_hr) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) + T.dot(h_tm1, self.W_hz) + self.b_z)
g_t = T.tanh(T.dot(x_t, self.W_xh) + r_t * T.dot(h_tm1, self.W_hh) + self.b_h)
h_t = (1 - z_t) * h_tm1 + z_t * g_t
return mask_t * h_t + (1 - mask_t) * h_tm1
def recurrence_gru_reverse(x_t, mask_t, h_tp1):
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) + T.dot(h_tp1, self.W_hr) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) + T.dot(h_tp1, self.W_hz) + self.b_z)
g_t = T.tanh(T.dot(x_t, self.W_xh) + r_t * T.dot(h_tp1, self.W_hh) + self.b_h)
h_t = (1 - z_t) * h_tp1 + z_t * g_t
return mask_t * h_t + (1 - mask_t) * h_tp1
def recurrence_lstm(x_t, mask_t, h_tm1, c_tm1):
i_t = T.nnet.sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tm1, self.W_hi) + T.dot(c_tm1, self.W_ci) + self.b_i)
f_t = T.nnet.sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tm1, self.W_hf) + T.dot(c_tm1, self.W_cf) + self.b_f)
d_t = T.tanh(T.dot(x_t, self.W_xh) + T.dot(h_tm1, self.W_hh) + self.b_h)
c_t = f_t * c_tm1 + i_t * d_t
o_t = T.nnet.sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tm1, self.W_ho) + T.dot(c_t, self.W_co) + self.b_o)
h_t = o_t * c_t
return [mask_t * h_t + (1 - mask_t) * h_tm1, mask_t * c_t + (1 - mask_t) * c_tm1]
def recurrence_lstm_reverse(x_t, mask_t, h_tp1, c_tp1):
i_t = T.nnet.sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tp1, self.W_hi) + T.dot(c_tp1, self.W_ci) + self.b_i)
f_t = T.nnet.sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tp1, self.W_hf) + T.dot(c_tp1, self.W_cf) + self.b_f)
d_t = T.tanh(T.dot(x_t, self.W_xh) + T.dot(h_tp1, self.W_hh) + self.b_h)
c_t = f_t * c_tp1 + i_t * d_t
o_t = T.nnet.sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tp1, self.W_ho) + T.dot(c_t, self.W_co) + self.b_o)
h_t = o_t * c_t
return [mask_t * h_t + (1 - mask_t) * h_tp1, mask_t * c_t + (1 - mask_t) * c_tp1]
h_r = None
if rnn_type == 'GRU':
h_f, _ = theano.scan(fn=recurrence_gru, sequences=[x, mask_3d],
outputs_info=[T.alloc(np.array(0.), minibatch_size, nh)],
n_steps=x.shape[0])
if bidirectional:
h_r, _ = theano.scan(fn=recurrence_gru_reverse, sequences=[x, mask_3d],
outputs_info=[T.alloc(np.array(0.), minibatch_size, nh)],
go_backwards=True)
elif rnn_type == 'LSTM':
[h_f, c_f], _ = theano.scan(fn=recurrence_lstm, sequences=[x, mask_3d],
outputs_info=[T.alloc(np.array(0.), minibatch_size, nh),
T.alloc(np.array(0.), minibatch_size, nh)],
n_steps=x.shape[0])
if bidirectional:
[h_r, c_r], _ = theano.scan(fn=recurrence_lstm_reverse, sequences=[x, mask_3d],
outputs_info=[T.alloc(np.array(0.), minibatch_size, nh),
T.alloc(np.array(0.), minibatch_size, nh)],
go_backwards=True)
#[h_r, c_r], _ = theano.scan(fn=recurrence_lstm_reverse, sequences=x,
# outputs_info=[self.h_i_r, self.c_i_r], go_backwards=True)
else:
#h_f, _ = theano.scan(fn=recurrence_basic, sequences=x, outputs_info=[self.h_i_f], n_steps=x.shape[0])
temp, _ = theano.scan(fn=recurrence_basic, sequences=[x, mask_3d],
outputs_info=[T.alloc(np.array(0.), minibatch_size, nh)],
n_steps=x.shape[0])
#h_f = theano.printing.Print('h_f')(temp)
h_f = temp
if bidirectional:
h_r, _ = theano.scan(fn=recurrence_basic_reverse, sequences=[x, mask_3d],
outputs_info=[T.alloc(np.array(0.), minibatch_size, nh)],
go_backwards=True)
if bidirectional:
# reverse the second hidden layer so it lines up with the first
h_r = h_r[::-1, :, :]
if bi_combine == 'max':
h = T.maximum(h_f, h_r)
elif bi_combine == 'mean':
h = (h_f + h_r) / 2.0
else: # concatenate
#h = theano.printing.Print('h:')(T.concatenate([h_fp, h_rp], axis=1))
h = T.concatenate([h_f, h_r], axis=2)
else:
#temp = T.printing.Print('isnan')(T.max(T.isnan(h_f)))
#h = h_f * (1-temp)
h = h_f #[n_elements, minibatch_size, n_hidden] (?)
a_sum = T.sum([1])
if pooling_method == 'attention1': # combine hidden nodes, then transform and sigmoid
# THIS IS NOT WORKIGN...
# SOFTMAX normalizes across the row (axis=1)
#a = T.nnet.softmax((T.dot(h, self.W_a) + self.b_a).T)
temp = T.dot(h, self.W_a) + self.b_a
# softmax?
a = T.exp(temp)/T.exp(temp).sum(axis=0, keepdims=True)
a_sum = T.sum(a, ) # to check a is normalized
a_rep = T.repeat(a, nh*bi, axis=2)
weighted_sum = T.sum(h * a_rep, axis=0)
p_y_given_x_sentence = T.nnet.sigmoid(T.dot(weighted_sum, self.W_s) + self.b_s) # [1, nc] in R(0,1)
y_pred = p_y_given_x_sentence > 0.5 # note, max is just to coerce into proper shape
#element_weights = T.outer(a, p_y_given_x_sentence) # [ne, nc]
#p_y_given_x_sentence = T.nnet.sigmoid(T.dot(T.dot(a, h), self.W_s) + self.b_s) # [1, nc] in R(0,1)
#y_pred = T.max(p_y_given_x_sentence, axis=0) > 0.5 # note, max is just to coerce into proper shape
#element_weights = T.outer(a, p_y_given_x_sentence) # [ne, nc]
elif pooling_method == 'attention2': # transform hidden nodes, sigmoid, then combine
temp = T.dot(h, self.W_a) + self.b_a
# softmax?
a = T.exp(temp)/T.exp(temp).sum(axis=0, keepdims=True) # [ne, minibatch_size, 1]: normalized over ne
#a = T.nnet.softmax((T.dot(h, self.W_a) + self.b_a))
a_sum = T.sum(a, axis=0)
temp = T.nnet.sigmoid(T.dot(h, self.W_s) + self.b_s) # [ne, minibatch_size, nc]
p_y_given_x_sentence = T.sum(temp * T.repeat(a, nc, axis=2), axis=0) # [minibatch_size, nc] in R(0,1)
y_pred = p_y_given_x_sentence > 0.5
#element_weights = T.repeat(a.T, nc, axis=1) * temp # [ne, nc]
elif pooling_method == 'mean':
s = T.nnet.sigmoid((T.dot(h, self.W_s) + self.b_s)) # [n_elements, nc] in R(0,1)
p_y_given_x_sentence = T.mean(s, axis=0)
y_pred = p_y_given_x_sentence > 0.5
element_weights = s
elif pooling_method == 'max':
#s = T.nnet.sigmoid((T.dot(h, self.W_s) + self.b_s)) # [n_elements, minibatch_size, nc] in R(0,1)
s = T.printing.Print('s')(T.nnet.sigmoid((T.dot(h, self.W_s) + self.b_s)))
#s_shape = T.printing.Print('s_shape')(s.shape)
#p_y_given_x_sentence = T.max(s_shape[0] * s, axis=0)
p_y_given_x_sentence = T.max(s, axis=0)
#p_y_given_x_sentence = T.printing.Print('p_y')(T.max(s, axis=0))
#temp = T.printing.Print('p_y')(p_y_given_x_sentence)
#y_pred = T.printing.Print('y_pred')(p_y_given_x_sentence > 0.5)
y_pred = p_y_given_x_sentence > 0.5
element_weights = s
elif pooling_method == 'last':
s = T.nnet.sigmoid((T.dot(h, self.W_s) + self.b_s)) # [n_elements, minibatch_size, nc] in R(0,1)
p_y_given_x_sentence = s[-1, :, :]
y_pred = p_y_given_x_sentence > 0.5
element_weights = s
else:
sys.exit("Pooling method not recognized")
# cost and gradients and learning rate
lr = T.scalar('lr_main')
lr_emb_fac = T.scalar('lr_emb')
#sentence_nll = T.mean(T.sum(-T.log(y*p_y_given_x_sentence + (1-y)*(1-p_y_given_x_sentence)), axis=1))
sentence_nll = T.sum(-T.log(y*p_y_given_x_sentence + (1-y)*(1-p_y_given_x_sentence)))
sentence_gradients = T.grad(sentence_nll, self.params)
if clip_gradients:
sentence_gradients= [T.clip(g, -1, 1) for g in sentence_gradients]
sentence_updates = OrderedDict((p, p - lr * g) for p, g in zip(self.params, [lr_emb_fac *
sentence_gradients[0]]
+ sentence_gradients[1:]))
# theano functions to compile
if extra_input_dims > 0:
self.sentence_classify = theano.function(inputs=[idxs, mask, extra, minibatch_size], outputs=y_pred)
self.sentence_train = theano.function(inputs=[idxs, mask, extra, y, lr, lr_emb_fac, minibatch_size],
outputs=[sentence_nll, a_sum],
updates=sentence_updates)
#if pooling_method == 'attention1' or pooling_method == 'attention2':
# self.a_sum_check = theano.function(inputs=[idxs, extra], outputs=a_sum)
self.sentence_step_through = theano.function(inputs=[idxs, mask, extra, minibatch_size],
outputs=[h, self.W_s, self.b_s, p_y_given_x_sentence])
else:
self.sentence_classify = theano.function(inputs=[idxs, mask, minibatch_size], outputs=y_pred)
self.sentence_train = theano.function(inputs=[idxs, mask, y, lr, lr_emb_fac, minibatch_size],
outputs=[sentence_nll, a_sum],
updates=sentence_updates)
#if pooling_method == 'attention1' or pooling_method == 'attention2':
# self.a_sum_check = theano.function(inputs=[idxs, mask, minibatch_size], outputs=a_sum)
self.sentence_step_through = theano.function(inputs=[idxs, mask, minibatch_size],
outputs=[h, self.W_s, self.b_s, p_y_given_x_sentence])
self.normalize = theano.function(inputs=[],
updates={self.emb: self.emb / T.sqrt((self.emb**2).sum(axis=1))
.dimshuffle(0, 'x')})
def step_through(self, x, mask, window_size, extra_input_dims=0, extra=None):
seq_len, minibatch_size, window_size = x.shape
words = x
mask = np.array(mask.T).astype('int32').reshape((seq_len, minibatch_size, 1))
if extra_input_dims > 0:
extra = np.array(extra).astype('int32').reshape((1, minibatch_size, extra_input_dims))
return self.sentence_step_through(words, mask, extra, minibatch_size)
else:
return self.sentence_step_through(words, mask, minibatch_size)
def classify(self, x, mask, window_size, extra_input_dims=0, extra=None):
#assert window_size == 1
#assert extra_input_dims == 0
#cwords = contextwin(x, window_size)
## make an array of these windows
#words = map(lambda x: np.asarray(x).astype('int32'), cwords)
"""
for i in range(x.shape[0]):
cwords = contextwin(list(x[i, :]), window_size)
words = map(lambda q: np.asarray(q).astype('int32'), cwords)
x[i, :] = words
if len(x.shape) == 2:
minibatch_size, seq_len = x.shape
words = np.array(x.T).astype('int32')
mask = np.array(mask.T).astype('int32').reshape((seq_len, minibatch_size, 1))
else:
minibatch_size = 1
seq_len = x.shape[0]
words = np.array(x).astype('int32').reshape((seq_len, minibatch_size))
mask = np.array(mask).astype('int32').reshape((seq_len, minibatch_size, 1))
"""
"""
if len(x.shape) == 2:
minibatch_size, seq_len = x.shape
words = np.zeros([seq_len, minibatch_size, window_size], dtype='int32')
if window_size > 1:
for i in range(minibatch_size):
cwords = contextwin(list(x[i, :]), window_size)
words_i = np.array(cwords, dtype='int32')
#[words_i.extend(j) for j in cwords]
words[:, i, :] = words_i
x = words.T
words = np.array(x.T).astype('int32').reshape((seq_len, minibatch_size, window_size))
mask = np.array(mask.T).astype('int32').reshape((seq_len, minibatch_size, 1))
else:
minibatch_size = 1
seq_len = x.shape[0]
words = np.zeros([seq_len, minibatch_size, window_size], dtype='int32')
cwords = contextwin(x, window_size)
words[:, 0, :] = np.array(cwords, dtype='int32')
#words = np.array(words).astype('int32').reshape((seq_len, minibatch_size, window_size))
mask = np.array(mask).astype('int32').reshape((seq_len, 1, 1))
`
"""
seq_len, minibatch_size, window_size = x.shape
words = x
mask = np.array(mask.T).astype('int32').reshape((seq_len, minibatch_size, 1))
if extra_input_dims > 0:
extra = np.array(extra).astype('int32').reshape((1, minibatch_size, extra_input_dims))
return self.sentence_classify(words, mask, extra, minibatch_size)
else:
return self.sentence_classify(words, mask, minibatch_size)
def train(self, x, mask, y, window_size, learning_rate, emb_lr_factor, extra_input_dims=0, extra=None):
#assert window_size == 1
#assert extra_input_dims == 0
# concatenate words in a window
#cwords = contextwin(x, window_size)
# make an array of these windows
#words = map(lambda x: np.asarray(x).astype('int32'), cwords)
# if minibatch_size is 1, X = 1D list of indices, i.e. X.shape[0] = seq_len
# if minibatch_size > 0, X = np.array([minibatch_size, seq_len])
"""
if len(x.shape) == 2:
minibatch_size, seq_len = x.shape
words = np.zeros([seq_len, minibatch_size, window_size], dtype='int32')
if window_size > 1:
for i in range(minibatch_size):
cwords = contextwin(list(x[i, :]), window_size)
words_i = np.array(cwords, dtype='int32')
#[words_i.extend(j) for j in cwords]
words[:, i, :] = words_i
x = words.T
words = np.array(x.T).astype('int32').reshape((seq_len, minibatch_size, window_size))
mask = np.array(mask.T).astype('int32').reshape((seq_len, minibatch_size, 1))
y = np.array(y).astype('int32')
else:
minibatch_size = 1
seq_len = x.shape[0]
words = np.zeros([seq_len, minibatch_size, window_size], dtype='int32')
cwords = contextwin(x, window_size)
words[:, 0, :] = np.array(cwords, dtype='int32')
#words = np.array(words).astype('int32').reshape((seq_len, minibatch_size, window_size))
mask = np.array(mask).astype('int32').reshape((seq_len, 1, 1))
y = np.array(y).astype('int32').reshape((1, len(y)))
"""
seq_len, minibatch_size, window_size = x.shape
words = x
mask = np.array(mask.T).astype('int32').reshape((seq_len, minibatch_size, 1))
y = np.array(y).astype('int32')
# train on these sentences and normalize
if extra_input_dims > 0:
extra = np.array(extra).astype('int32').reshape((1, minibatch_size, extra_input_dims))
nll = self.sentence_train(words, mask, extra, y, learning_rate, emb_lr_factor, minibatch_size)
else:
nll = self.sentence_train(words, mask, y, learning_rate, emb_lr_factor, minibatch_size)
self.normalize()
return nll
def save(self, output_dir):
for param in self.params:
np.save(os.path.join(output_dir, param.name + '.npy'), param.get_value())
def load(self, input_dir):
for param in self.params:
param.set_value(np.load(os.path.join(input_dir, param.name + '.npy')))
def print_embeddings(self):
for param in self.params:
print param.name, param.get_value()
def save_embeddings(self, filename):
np.save(filename, self.emb)
def contextwin(l, win):
'''
win :: int corresponding to the size of the window
given a list of indexes composing a sentence
l :: array containing the word indexes
it will return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
'''
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win // 2 * [-1] + l + win // 2 * [-1]
out = [lpadded[i:(i + win)] for i in range(len(l))]
assert len(out) == len(l)
return out
def main(params=None):
if params is None:
params = {
'dataset': 'DRLD',
'exp_name': 'char_test',
'test_fold': 0,
'n_dev_folds': 1,
'min_doc_thresh': 1,
'initialize_word_vectors': True,
'vectors': 'chars_word2vec_25', # default_word2vec_300, anes_word2vec_300, chars_word2vec_25, eye_1 ...
'init_scale': 0.2,
'add_OOV_dim': True,
'win': 1, # size of context window
'add_DRLD': True,
'rnn_type': 'basic', # basic, GRU, or LSTM
'n_hidden': 50, # size of hidden units
'pooling_method': 'max', # max, mean, or attention1/2
'bidirectional': True,
'bi_combine': 'concat', # concat, max, or mean
'train_embeddings': True,
'lr': 0.1, # learning rate
'lr_emb_fac': 1, # factor to modify learning rate for embeddings
'decay_delay': 10, # number of epochs with no improvement before decreasing learning rate
'decay_factor': 0.5, # factor by which to multiply learning rate in case of delay
'n_epochs': 300,
'add_OOV_noise': True,
'OOV_noise_prob': 0.01,
'minibatch_size': 16,
'classify_minibatch_size': 64,
'ensemble': False,
'save_model': True,
'seed': 42,
'verbose': 1,
'reuse': False,
'orig_T': 0.04,
'tau': 0.01,
'clip_gradients': False
}
#params = fh.read_json('/Users/dcard/Projects/CMU/ARK/guac/experiments/best_mod.json')
#params['exp_name'] += '_best'
#params['n_hidden'] = int(params['n_hidden'])
rnn_base_dir = '/Users/dcard/Projects/CMU/ARK/guac/experiments/rnn/car_test/'
params_filename = fh.make_filename(rnn_base_dir, 'params', 'txt')
params = fh.read_json(params_filename)
fold = params['test_fold']
rnn_input_dir = fh.makedirs(rnn_base_dir, 'fold' + str(fold))
keys = params.keys()
keys.sort()
for key in keys:
print key, ':', params[key]
# seed the random number generators
np.random.seed(params['seed'])
random.seed(params['seed'])
vector_type = params['vectors'].split('_')[0]
params['word2vec_dim'] = int(params['vectors'].split('_')[-1])
reuser = None
if params['reuse']:
reuser = reusable_holdout.ReuseableHoldout(T=params['orig_T'], tau=params['tau'])
if params['dataset'] == 'DRLD':
datasets = ['Democrat-Likes', 'Democrat-Dislikes', 'Republican-Likes', 'Republican-Dislikes']
elif params['dataset'] == 'MIP':
datasets = ['MIP-Personal-1', 'MIP-Personal-2', 'MIP-Political-1', 'MIP-Political-2']
elif params['dataset'] == 'MOLD':
datasets = ['McCain-Likes', 'McCain-Dislikes', 'Obama-Likes', 'Obama-Dislikes']
elif params['dataset'] == 'Primary':
datasets = ['Obama-Primary', 'Clinton-Primary']
elif params['dataset'] == 'General':
datasets = ['Obama-General', 'McCain-General']
else:
datasets = [params['dataset']]
np.random.seed(params['seed'])
random.seed(params['seed'])
best_valid_f1s = []
best_true_valid_f1s = []
best_test_f1s = []
best_train_f1s = []
test_prediction_arrays = []
output_dir = fh.makedirs(defines.exp_dir, 'rnn', params['exp_name'])
output_filename = fh.make_filename(output_dir, 'params', 'txt')
fh.write_to_json(params, output_filename)
for dev_fold in range(params['n_dev_folds']):
print "dev fold =", dev_fold
output_dir = fh.makedirs(defines.exp_dir, 'rnn', params['exp_name'], 'fold' + str(dev_fold))
if vector_type == 'chars':
all_data, words2idx, items, all_labels = common.load_char_data(datasets, params['test_fold'], dev_fold)
else:
all_data, words2idx, items, all_labels = common.load_data(datasets, params['test_fold'], dev_fold,
params['min_doc_thresh'])
train_xy, valid_xy, test_xy = all_data
train_lex, train_y = train_xy
valid_lex, valid_y = valid_xy
test_lex, test_y = test_xy
#if params['minibatch_size'] > 1 or params['classify_minibatch_size'] > 1:
print "padding input with zeros"
all_data, all_masks = common.prepare_data(train_lex, valid_lex, test_lex)
train_lex, valid_lex, test_lex = all_data
train_masks, valid_masks, test_masks = all_masks
#else:
# train_masks = [np.ones(len(x)).astype('int32') for x in train_lex]
# valid_masks = [np.ones(len(x)).astype('int32') for x in valid_lex]
# test_masks = [np.ones(len(x)).astype('int32') for x in test_lex]
print "expanding x with context win dows"
# Rejigger to convert x to contex win in advance
train_x_win = expand_x_with_context_win(train_lex, params['win'])
valid_x_win = expand_x_with_context_win(valid_lex, params['win'])
test_x_win = expand_x_with_context_win(test_lex, params['win'])
order = range(len(train_lex))
print "done"
train_items, dev_items, test_items = items
vocsize = len(words2idx.keys())
idx2words = dict((k, v) for v, k in words2idx.iteritems())
best_test_predictions = None
n_sentences = len(train_lex)
print "vocsize = ", vocsize, 'n_train', n_sentences
codes = all_labels.columns
n_items, n_codes = all_labels.shape
# get the words in the sentences for the test and validation sets
words_valid = [map(lambda x: idx2words[x], w) for w in valid_lex]
groundtruth_test = test_y[:]
words_test = [map(lambda x: idx2words[x], w) for w in test_lex]
#if vector_type == 'eye':
# initial_embeddings = np.eye(vocsize)
# emb_dim = initial_embeddings.shape[1]
if params['initialize_word_vectors']:
initial_embeddings = common.load_embeddings(params, words2idx)
emb_dim = initial_embeddings.shape[1]
else:
initial_embeddings = None
emb_dim = params['word2vec_dim']
print "embedding dim =", emb_dim
temp_output = fh.make_filename(output_dir, 'embedding_labels', 'json')
fh.write_to_json(idx2words, temp_output)
extra_input_dims = 0
if params['add_DRLD']:
extra_input_dims = 2
print "Building RNN"
rnn = RNN(nh=params['n_hidden'],
nc=n_codes,
ne=vocsize,
de=emb_dim,
cs=params['win'],
extra_input_dims=extra_input_dims,
initial_embeddings=initial_embeddings,
init_scale=params['init_scale'],
rnn_type=params['rnn_type'],
train_embeddings=params['train_embeddings'],
pooling_method=params['pooling_method'],
bidirectional=params['bidirectional'],
bi_combine=params['bi_combine'],
clip_gradients=params['clip_gradients']
)
rnn.load(rnn_input_dir)
#temp_filename = fh.make_filename(output_dir, 'initial_embeddings', 'npy')
#rnn.save_embeddings(temp_filename)
train_likes = [1 if re.search('Likes', i) else 0 for i in train_items]
dev_likes = [1 if re.search('Likes', i) else 0 for i in dev_items]
test_likes = [1 if re.search('Likes', i) else 0 for i in test_items]
train_dem = [1 if re.search('Democrat', i) else 0 for i in train_items]
dev_dem = [1 if re.search('Democrat', i) else 0 for i in dev_items]
test_dem = [1 if re.search('Democrat', i) else 0 for i in test_items]
train_extra = [[train_likes[i], train_dem[i]] for i, t in enumerate(train_items)]
dev_extra = [[dev_likes[i], dev_dem[i]] for i, t in enumerate(dev_items)]
test_extra = [[test_likes[i], test_dem[i]] for i, t in enumerate(test_items)]
ms = 1
mb_x, mb_masks, mb_extra, mb_y = select_minibatch(train_x_win, train_masks, train_extra, train_y,
params['win'], 0, 1, order=np.arange(n_sentences))
print '\n'.join([' '.join([idx2words[idx] for idx in mb_x[:, k, 0].tolist()]) for k in range(ms)])
prediction = rnn.classify(mb_x, mb_masks, params['win'], extra_input_dims, mb_extra)
print prediction
h, W, b, p_y = rnn.step_through(mb_x, mb_masks, params['win'], extra_input_dims, mb_extra)
print p_y
print W
print b
temp = np.dot(h, W) + b
s = 1.0/(1.0 + np.exp(-temp))
print s
p_y_calc = np.max(s, axis=0)
print p_y_calc
print np.array(p_y_calc > 0.5, dtype='int')
sys.exit()
# train with early stopping on validation set
best_f1 = -np.inf
params['clr'] = params['lr']
for e in xrange(params['n_epochs']):
# shuffle
#shuffle([train_lex, train_y, train_extra, train_masks], params['seed']) # shuffle the input data
shuffle([order, train_lex, train_y, train_extra, train_masks], params['seed']) # shuffle the input data
params['ce'] = e # store the current epoch
tic = timeit.default_timer()
ms = params['minibatch_size']
n_train = len(train_lex)
nll = 0
#for i, orig_x in enumerate(train_lex):
for iteration, i in enumerate(range(0, n_train, ms)):
#orig_x = train_lex[i]
#n_words = len(orig_x)
#if params['add_OOV_noise']:
# draws = np.random.rand(n_words)
# x = [OOV_index if draws[i] < params['OOV_noise_prob'] else orig_x[i] for i in range(n_words)]
#else:
# x = orig_x
#y = train_y[i]
extra = train_extra[i]
#mask = train_masks[i]
minibatch_x, minibatch_mask,\
minibatch_extra, minibatch_y= select_minibatch(train_x_win, train_masks, train_extra, train_y,
params['win'], i, ms, order,
params['add_OOV_noise'], params['OOV_noise_prob'])
#if i == 0:
# print '\n'.join([' '.join([idx2words[idx] for idx in minibatch_x[:, k, 0].tolist()]) for
# k in range(ms)])
nll_i, a_sum = rnn.train(minibatch_x, minibatch_mask, minibatch_y, params['win'],
params['clr'],
params['lr_emb_fac'], extra_input_dims, minibatch_extra)
nll += nll_i
#rnn.train(x, mask, y, params['win'], params['clr'], params['lr_emb_fac'],
# extra_input_dims, extra)
print '[learning] epoch %i >> %2.2f%%' % (
e, (i + 1) * 100. / float(n_sentences)),
print 'completed in %.2f (sec), nll = %.2f, a_sum = %.1f <<\r' % (timeit.default_timer() - tic,
nll, np.max(a_sum)),
sys.stdout.flush()
if np.isnan(nll) or np.isinf(nll):
if best_f1 > 0:
break
else:
return {'loss': 1.0,
'final_test_f1': 0,
'valid_f1s': 0,
'true_valid_f1s': 0,
'train_f1s': 0,
'test_f1s': 0,
'status': STATUS_OK
}
# evaluation // back into the real world : idx -> words
print ""
#print "true y", train_y[-1]
#y_pred = rnn.classify(np.array(train_x_win[-1]).reshape((1, len(train_x_win[-1]))),
# train_masks[-1], params['win'], extra_input_dims, train_extra[-1])[0]
#print "pred y", y_pred
#if params['pooling_method'] == 'attention1' or params['pooling_method'] == 'attention2':
# if extra_input_dims == 0:
# r = np.random.randint(0, len(train_lex))
# print r, rnn.a_sum_check(np.asarray(contextwin(train_lex[r], params['win'])).astype('int32'))
predictions_train = predict(n_train, params['classify_minibatch_size'], train_x_win, train_masks,
train_y, params['win'], extra_input_dims, train_extra, rnn, order)
n_valid = len(valid_lex)
n_test = len(test_lex)
predictions_valid = predict(n_valid, params['classify_minibatch_size'], valid_x_win, valid_masks,
valid_y, params['win'], extra_input_dims, dev_extra, rnn)
predictions_test = predict(n_test, params['classify_minibatch_size'], test_x_win, test_masks,
test_y, params['win'], extra_input_dims, test_extra, rnn)
"""
predictions_train = [rnn.classify(x, train_masks[i], params['win'],
extra_input_dims, train_extra[i])[0] for i, x in enumerate(train_lex)]
predictions_valid = [rnn.classify(x, valid_masks[i], params['win'],
extra_input_dims, dev_extra[i])[0] for i, x in enumerate(valid_lex)]
predictions_test = [rnn.classify(x, test_masks[i], params['win'],
extra_input_dims, test_extra[i])[0] for i, x in enumerate(test_lex)]
"""
train_f1 = common.calc_mean_f1(predictions_train, train_y)
test_f1 = common.calc_mean_f1(predictions_test, test_y)
valid_f1 = common.calc_mean_f1(predictions_valid, valid_y)
question_f1s = []
question_pps = []
print "train_f1 =", train_f1, "valid_f1 =", valid_f1, "test_f1 =", test_f1
if valid_f1 > best_f1:
best_rnn = copy.deepcopy(rnn)
best_f1 = valid_f1
best_test_predictions = predictions_test
if params['verbose']:
print('NEW BEST: epoch', e,
'valid f1', valid_f1,
'best test f1', test_f1)
params['tr_f1'] = train_f1
params['te_f1'] = test_f1
params['v_f1'] = valid_f1
params['be'] = e # store the current epoch as a new best
# learning rate decay if no improvement in a given number of epochs
if abs(params['be']-params['ce']) >= params['decay_delay']:
params['clr'] *= params['decay_factor']
params['be'] = params['ce']
print "Reverting to current best; new learning rate = ", params['clr']
# also reset to the previous best
rnn = best_rnn
if params['clr'] < 1e-5:
break
if best_f1 == 1.0:
break
if best_f1 == 0 and e > 7:
break
if params['save_model']:
predictions_valid = predict(len(valid_y), params['classify_minibatch_size'], valid_x_win, valid_masks,
valid_y, params['win'], extra_input_dims, dev_extra, rnn)
#predictions_valid = [best_rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')) for x in valid_lex]
best_rnn.save(output_dir)
common.write_predictions(datasets, params['test_fold'], dev_fold, predictions_valid, dev_items, output_dir)
print('BEST RESULT: epoch', params['be'],
'train F1 ', params['tr_f1'],
'valid F1', params['v_f1'],
'best test F1', params['te_f1'],
'with the model', output_dir)
best_true_valid_f1s.append(params['v_f1'])
best_test_f1s.append(params['te_f1'])
best_train_f1s.append(params['tr_f1'])
if reuser is not None:
best_valid_f1 = reuser.mask_value(params['v_f1'], params['tr_f1'])
else:
best_valid_f1 = params['v_f1']
best_valid_f1s.append(best_valid_f1)
test_prediction_arrays.append(np.array(best_test_predictions, dtype=int))
params['ensemble'] = False
if params['ensemble']:
test_predictions_stack = np.dstack(test_prediction_arrays)
final_predictions = stats.mode(test_predictions_stack, axis=2)[0][:, :, 0]
predicted_df = pd.DataFrame(final_predictions, index=test_items, columns=codes)
true_df = pd.DataFrame(np.array(test_y), index=test_items, columns=codes)
final_test_f1, final_test_pp = evaluation.calc_macro_mean_f1_pp(true_df, predicted_df)
else:
final_test_f1 = np.median(best_test_f1s)
return {'loss': -np.median(best_valid_f1s),
'final_test_f1': final_test_f1,
'valid_f1s': best_valid_f1s,
'train_f1s': best_train_f1s,
'true_valid_f1s': best_true_valid_f1s,
'test_f1s': best_test_f1s,
'status': STATUS_OK
}
def expand_x_with_context_win(lex, window_size):
x = np.vstack(lex)
n_items, seq_len = x.shape
x_win = np.zeros([seq_len, n_items, window_size], dtype='int32')
if window_size > 1:
for i in range(n_items):
x_win[:, i, :] = np.array(contextwin(list(x[i, :]), window_size), dtype='int32')
#x_i =
#x_win = [[np.array(w).astype('int32') for w in contextwin(list(x), window_size)] for x in lex]
else:
x_win[:, :, 0] = x.T
print "x_win.shape", x_win.shape
return x_win
def select_minibatch(x_win, masks, extra, y, window_size, i, minibatch_size, order=None, add_oov_noise=False, oov_noise_prob=0.0):
n = len(masks)
if order is None:
order = range(n)
ms = min(minibatch_size, n-i)
if ms > 1:
minibatch_mask = np.vstack([masks[j] for j in range(i, min(i+ms, n))])
max_len = np.max(np.argmin(minibatch_mask, axis=1))
if max_len == 0:
max_len = len(masks[i])
try:
minibatch_mask = minibatch_mask[:, 0: max_len].reshape((ms, max_len))
except:
e = sys.exc_info()[0]
print e
print max_len
print minibatch_mask
minibatch_x = x_win[0: max_len, order[i: min(i+ms, n)], :]
minibatch_extra = np.vstack([extra[j] for j in range(i, min(i+ms, n))])
minibatch_y = np.vstack([y[j] for j in range(i, min(i+ms, n))])
else:
max_len = np.argmin(masks[i])
if max_len == 0:
max_len = len(masks[i])
minibatch_mask = np.array(masks[i][0: max_len]).reshape((1, max_len))
minibatch_x = x_win[0: max_len, order[i], :].reshape((max_len, 1, window_size))
minibatch_extra = np.array(extra[i]).reshape((1, len(extra[i])))
minibatch_y = np.array(y[i]).reshape((1, len(y[i])))
if add_oov_noise:
draws = np.random.rand(max_len, ms, window_size)
minibatch_x = np.array(minibatch_x * np.array(draws > oov_noise_prob, dtype='int32'), dtype='int32')
return minibatch_x, minibatch_mask, minibatch_extra, minibatch_y
def predict(n, ms, x_win, masks, y, window_size, extra_input_dims, extra, rnn, order=None):
predictions = []
for i in range(0, n, ms):
mb_x, mb_masks, mb_extra, mb_y = select_minibatch(x_win, masks, extra, y, window_size, i, ms, order=order)
if ms > 1:
prediction = rnn.classify(mb_x, mb_masks, window_size, extra_input_dims, mb_extra)
for p in prediction:
predictions.append(p)
else:
prediction = rnn.classify(mb_x, mb_masks, window_size, extra_input_dims, mb_extra)
predictions.append(prediction)
return predictions
if __name__ == '__main__':
report = main()
print report | [
"dcard@andrew.cmu.edu"
] | dcard@andrew.cmu.edu |
ad050bdca9957e37be1ca71048282a0569e623be | 736d1945173d89c0ba50d47e70a1e7ff7be9b432 | /run.py | 354a11aa27a4774879b9bd841f52cf9772f7169d | [] | no_license | cuglilong/Stacking_Algorithm | 1ce0643eae94b4fe908ea9d035d8f2e5d42cbca5 | dc0da245eb9db10576cc2ff56063ce99d4232001 | refs/heads/master | 2022-02-27T09:05:21.741039 | 2019-08-23T09:40:39 | 2019-08-23T09:40:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | import Stacker
import Hawaii_Stacker
import Stacker_Test
import sys
from obspy import read
import pickle
from numpy import random
import numpy as np
import clustering_scripts as cs
import plotting_scripts as ps
sys.path.append('./CCP_stacks/Plotting_Scripts')
#import plot_CCP
# Reading in
file =sys.argv[1]
print("Reading " + file + "...")
seis = read(file,format='PICKLE')
# Formatting relevant data
seis_data1 = np.array([tr.dataSL2014 for tr in seis])
locs = np.array([t.stats.piercepoints['P410s']['410'] for t in seis]).astype(float)
coords = np.array([(l[1], l[2]) for l in locs])
depths = np.array(seis[0].stats.depth)
seis_data2 = np.array([tr.dataPREM for tr in seis])
s1 = Stacker_Test.Stacker_Test(depths, coords, seis_data1, 'main')
#s2 = Stacker_Test.Stacker_Test(depths, coords, seis_data1, 'geographical1')
#s3 = Stacker_Test.Stacker_Test(depths, coords, seis_data2, 'prem1')
#cs.stability_test(s1, 9)
s1.adaptive_stack()
ps.interpolation(s1, 'inter_whole')
s1.plot()
print(s1.average_cluster_variance())
#s2.adaptive_stack(geographical = True)
#s2.plot()
#print(s2.average_cluster_variance())
#s3.adaptive_stack()
#s3.plot()
#print(s3.average_cluster_variance())
| [
"mh826@syrma.esc.cam.ac.uk"
] | mh826@syrma.esc.cam.ac.uk |
ca9384417c7381029549dc4cefe6c3c2371e83ab | 9547ba5d65029c7eb3975d888bc1c5579bd455c2 | /Spark-DF-Sql/10-AggregationsDemo/AggDemo.py | e8ad9a72a4caf8bdd34de50850b03aef84c65224 | [] | no_license | vinayavs/spark2-python | 53ebd2769525eca2e3727cb171fd43de87fd9d84 | bba1eedd232bd0ea3d1ac72293475d561326c1a4 | refs/heads/main | 2023-04-08T15:29:25.391387 | 2021-04-06T07:32:34 | 2021-04-06T07:32:34 | 352,007,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | from pyspark.sql import SparkSession
from pyspark.sql import functions as f
from lib.logger import Log4J
if __name__ == '__main__':
spark = SparkSession.builder.master("local[2]").appName("Aggregations Demo") \
.config("spark.sql.shuffle.partitions", 2) \
.getOrCreate()
logger = Log4J(spark)
invoiceDf = spark.read.format("csv") \
.option("header", "true") \
.option("inferSchema", "true") \
.load("data/invoices.csv")
# invoiceDf.printSchema()
# Column Object Expression
invoiceDf.select(f.count("*").alias("Count *"),
f.sum("Quantity").alias("TotalQuantity"),
f.avg("UnitPrice").alias("AvgPrice"),
f.countDistinct("InvoiceNo").alias("CountDistinct")).show()
# Using Sql like string Expression
invoiceDf.selectExpr(
"count(1) as RecordCount", # Includes Null
"count(StockCode) as TickerCount", # Ignores Null
"sum(Quantity) as TotalQuantity",
"avg(UnitPrice) as UnitPrice"
).show()
# Grouping Aggregations
invoiceDf.createOrReplaceTempView("sales")
summarySql = spark.sql("""
SELECT Country, InvoiceNo,
sum(Quantity) as TotalQuantity,
round(sum(Quantity * UnitPrice), 2) as InvoiceValue
FROM sales
GROUP BY Country, InvoiceNo
""")
summarySql.show()
# DF Expressions
summaryDf = invoiceDf \
.groupBy("Country", "InvoiceNo") \
.agg(f.sum("Quantity").alias("TotalQuantity"),
f.expr("round(sum(Quantity * UnitPrice), 2) as InvoiceValue"))
# f.round(f.sum(f.expr("Quantity * UnitPrice")),2).alias("InvoiceValue")
summaryDf.show()
| [
"vinayavs@gmail.com"
] | vinayavs@gmail.com |
ad0df481b83aad3cb9398d08e94d900100182e33 | 1db17195776328739b465902f6bf2d53bff0f9ac | /logs/2021-09-08-11-39-20/ricequant.py | 24df136b934372a0a551256e7528110b92791d25 | [] | no_license | eulancer/convertible_bond | 01f56d3d318dd7343e2547b3c6a1ad184c25880d | 2778fb5ee2c0145758565e5cfe83392a879d429b | refs/heads/main | 2023-07-28T22:43:45.553292 | 2021-09-18T09:16:40 | 2021-09-18T09:16:40 | 407,878,522 | 1 | 0 | null | 2021-09-18T14:12:34 | 2021-09-18T14:12:33 | null | UTF-8 | Python | false | false | 5,808 | py | # -*- coding: utf-8 -*-
from datetime import date
import rqdatac
import pandas as pd
def read_data(today):
txn_day = rqdatac.get_previous_trading_date(today)
df_all_instruments = rqdatac.convertible.all_instruments(
txn_day).reset_index()
df_latest_bond_price = rqdatac.get_price(
df_all_instruments.order_book_id.tolist(),
start_date=txn_day,
end_date=txn_day,
frequency='1d').reset_index()
df_latest_stock_price = rqdatac.get_price(
df_all_instruments.stock_code.tolist(),
start_date=txn_day,
end_date=txn_day,
frequency='1d').reset_index()
df_conversion_price = rqdatac.convertible.get_conversion_price(
df_all_instruments.order_book_id.tolist(),
end_date=txn_day).reset_index()
df_call_info = rqdatac.convertible.get_call_info(
df_all_instruments.order_book_id.tolist(), end_date=txn_day)
if df_call_info is not None:
df_call_info = df_call_info.reset_index()
df_indicators = rqdatac.convertible.get_indicators(
df_all_instruments.order_book_id.tolist(),
start_date=txn_day,
end_date=txn_day).reset_index()
return txn_day, df_all_instruments, df_conversion_price, df_latest_bond_price, df_latest_stock_price, df_call_info, df_indicators
def process(txn_day, df_all_instruments, df_conversion_price,
df_latest_bond_price, df_latest_stock_price, df_call_info,
df_indicators):
# Data cleaning
# Filter non-conbond, e.g. exchange bond
df_all_instruments = df_all_instruments[df_all_instruments.bond_type ==
'cb']
# Filter bonds that stopped trading by txn_day
df_all_instruments[
'stopped_trading'] = df_all_instruments.stop_trading_date.dt.date <= txn_day
df_all_instruments = df_all_instruments[df_all_instruments.stopped_trading
== False]
df_all_instruments = df_all_instruments[[
'order_book_id',
'symbol',
'stock_code',
]]
df_latest_stock_price = df_latest_stock_price[[
'order_book_id', 'close'
]].rename(columns={
'close': 'stock_price'
}).set_index('order_book_id')
# stock_price
df = df_all_instruments.set_index('stock_code').join(
df_latest_stock_price).reset_index().set_index('order_book_id')
df_latest_bond_price = df_latest_bond_price[[
'order_book_id', 'close'
]].rename(columns={
'close': 'bond_price'
}).set_index('order_book_id')
# bond_price
df = df.join(df_latest_bond_price)
if df_call_info is not None and 'info_date' in df_call_info.columns:
# info_date
df_call_info = df_call_info[pd.notnull(df_call_info.info_date)]
if not df_call_info.empty:
df = df.join(df_call_info[['order_book_id', 'info_date'
]].set_index('order_book_id'))
if df.info_date.dt.date.dtype == date:
df['force_redeem'] = df.info_date.dt.date < txn_day
df = df[df.force_redeem == False]
df_conversion_price = df_conversion_price[[
'order_book_id', 'conversion_price'
]].groupby('order_book_id').min()
# conversion_price
df = df.join(df_conversion_price)
df['convert_premium_rate'] = df.bond_price / (100 / df.conversion_price *
df.stock_price) - 1
return df
# config: Expect to have two keys: weight_bond_price and weight_convert_premium_rate
# df: Expect to have a column named 'double_low', or two columns named 'bond_price' and 'convert_premium_rate'
# index of df is the id for the bond to place order
def double_low(df, config):
assert 'top' in config
top = config['top']
if 'double_low' not in df.columns:
assert 'weight_bond_price' in config
assert 'weight_convert_premium_rate' in config
weight_bond_price = config['weight_bond_price']
weight_convert_premium_rate = config['weight_convert_premium_rate']
assert 'bond_price' in df.columns
assert 'convert_premium_rate' in df.columns
df['double_low'] = df.bond_price * weight_bond_price + df.convert_premium_rate * 100 * weight_convert_premium_rate
dl = df.nsmallest(top, 'double_low')
print(dl)
return set(df.nsmallest(top, 'double_low').index.values.tolist())
def generate_orders(df, strategy, strategy_config, holdings):
candidates = strategy(df, strategy_config)
orders = {}
orders['buy'] = list(candidates - holdings)
orders['sell'] = list(holdings - candidates)
orders['hold'] = list(holdings & candidates)
return orders
def init(context):
context.top = 20
scheduler.run_weekly(rebalance,
tradingday=1,
time_rule=market_open(minute=10))
def rebalance(context, bar_dict):
txn_day, df_all_instruments, df_conversion_price, df_latest_bond_price, df_latest_stock_price, df_call_info, df_indicators = read_data(
context.now)
df = process(txn_day, df_all_instruments, df_conversion_price,
df_latest_bond_price, df_latest_stock_price, df_call_info,
df_indicators)
positions = set()
for p in context.portfolio.get_positions():
positions.add(p.order_book_id)
orders = generate_orders(
df, double_low, {
'weight_bond_price': 0.5,
'weight_convert_premium_rate': 0.5,
'top': context.top,
}, positions)
logger.info("今日操作:%s" % orders)
for code in orders['sell']:
order_target_percent(code, 0)
for op in ['hold', 'buy']:
for code in orders[op]:
order_target_percent(code, 1 / context.top)
| [
"paulhybryant@gmail.com"
] | paulhybryant@gmail.com |
a20070b324979ddb978268aa926e7c238120fb98 | 94354828fc025e091165d2a68d692d6645290140 | /apps/user_operation/adminx.py | 8e02327027feb7c3efba6a92eb832823ded15720 | [] | no_license | simon-wxm/MxShop | 02350944a9756887dcd4a95aebf8801fea6df54f | 309b9e52b9e018b9ab5a7631492d59b280b085d6 | refs/heads/master | 2022-12-13T13:41:43.660462 | 2019-07-04T10:40:16 | 2019-07-04T10:40:16 | 191,779,173 | 0 | 0 | null | 2022-12-08T01:04:15 | 2019-06-13T14:31:42 | JavaScript | UTF-8 | Python | false | false | 543 | py | # coding = utf-8
import xadmin
from .models import UserFav ,UserAddress,UserLeavingMessage
class UserFavAdmin(object):
list_display = ['user','goods','add_time']
class UserLeavingMessageAdmin(object):
list_display = ['user','message_type','message','add_time']
class UserAddressAdmin(object):
list_display = ['signer_name','signer_mobile','district','address']
xadmin.site.register(UserFav, UserFavAdmin)
xadmin.site.register(UserLeavingMessage, UserLeavingMessageAdmin)
xadmin.site.register(UserAddress, UserAddressAdmin)
| [
"wangxm5721@163.com"
] | wangxm5721@163.com |
1fabea699593b6f7717d53f0a54019114c626198 | 470c55bca410969d772593c5b0cc7f63fc97354f | /compiler/tools/conf.py | 3fb06ce1918cb2e16b6a819e5802c9294f1a0a41 | [
"BSD-2-Clause"
] | permissive | knz/restcrumbs | b19fd250a021f1b77c25a48c7163204d4021381d | 494ea1fc5788b0ec8823e0000607da4187c39986 | refs/heads/master | 2016-08-08T15:22:52.301680 | 2011-02-28T18:38:56 | 2011-02-28T18:38:56 | 1,422,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,928 | py | # -*- coding: utf-8 -*-
#
# Blah documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 9 10:40:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
pngmath_use_preview = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rstr'
source_encoding = 'iso-8859-1'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'reST Crumbs'
copyright = u'YEAR YOURNAME'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'reST Crumbs'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
#html_logo = '../im/uva_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'notesdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
]
| [
"r.c.poss@uva.nl"
] | r.c.poss@uva.nl |
3e0492db360ce01a76f540ff3bf14d2133ae8153 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_bogies.py | e575bb083362fdfd4e25d0bf21f424dc5070f88d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from xai.brain.wordbase.nouns._bogy import _BOGY
#calss header
class _BOGIES(_BOGY, ):
def __init__(self,):
_BOGY.__init__(self)
self.name = "BOGIES"
self.specie = 'nouns'
self.basic = "bogy"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
83da69b3ca2edaf7ca26891e03e9d0eafd7f5cb2 | 3cbf8706ea6655aad48d6bb0c34a4d7f5d6b2fdf | /depth/self_supervised_sfm/train.py | 73671c45601f5f768f1fe3bf22a2e3a02d20caad | [
"MIT"
] | permissive | seo-dev/cvml_project | cf166f7aa513bdbc5b23941d8cb19b53bbdc400b | 7c95ce22db6f31dc4624af9417edffde021b5351 | refs/heads/master | 2022-12-06T07:43:11.906512 | 2020-08-27T03:08:38 | 2020-08-27T03:08:38 | 290,434,881 | 1 | 0 | MIT | 2020-08-26T08:04:37 | 2020-08-26T08:04:36 | null | UTF-8 | Python | false | false | 14,224 | py | import argparse
import datetime
import os
import tensorflow as tf
from dataset import KittiSFMDataset
from disparitynet import DisparityNet
from posenet import PoseNet
from utils import pixel_coord, ssim_loss, smooth_loss, bilinear_sampler, forwardproject, backproject, disp_to_depth
parser = argparse.ArgumentParser(description="Disparity Project")
parser.add_argument('--identifier', default="sfm_resnet18")
parser.add_argument('--data_dir')
parser.add_argument("--input_h", default=192)
parser.add_argument("--input_w", default=640)
parser.add_argument("--batch_size", default=8)
parser.add_argument("--epochs", default=50)
parser.add_argument("--num_scales", default=4)
parser.add_argument("--num_input_frames", default=2, help='num of frames as input to posenet')
parser.add_argument("--frame_ids", default=[0, -1, 1], help='frames to load ')
parser.add_argument("--draw_every_iter", default=1000)
PROJECT_DIR = os.getcwd()
MIN_DEPTH = 1e-3
MAX_DEPTH = 80
class Trainer:
def __init__(self, params, output_dir):
self.params = params
# Models
self.models = {}
self.models['disparity'] = DisparityNet(input_shape=(params.input_h, params.input_w, 3))
self.models['pose'] = PoseNet(input_shape=(params.input_h, params.input_w, 3 * params.num_input_frames),
num_input_frames=params.num_input_frames)
# Datasets
train_dataset = KittiSFMDataset(params.data_dir, 'train',
(params.input_h, params.input_w),
batch_size=params.batch_size,
frame_idx=params.frame_ids)
val_dataset = KittiSFMDataset(params.data_dir, 'val',
(params.input_h, params.input_w),
frame_idx=params.frame_ids,
batch_size=params.batch_size)
self.train_dataset = train_dataset.load_tfdataset()
self.val_dataset = val_dataset.load_tfdataset()
# Optimizer
self.total_iteration = (train_dataset.num_samples // params.batch_size) * params.epochs
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(0.0002, end_learning_rate=0.000001,
decay_steps=self.total_iteration,
power=0.5)
self.optimizer = tf.keras.optimizers.Adam(learning_rate_fn)
# Tensorboard & Meters
train_log_dir = os.path.join(output_dir, 'train_logs')
val_log_dir = os.path.join(output_dir, 'val_logs')
self.train_summary_writer = tf.summary.create_file_writer(train_log_dir)
self.test_summary_writer = tf.summary.create_file_writer(val_log_dir)
self.train_meter = {
'ssim': tf.keras.metrics.Mean(name='ssim'),
'l1': tf.keras.metrics.Mean(name='l1'),
'smooth': tf.keras.metrics.Mean(name='smooth'),
}
self.val_meter = {
'ssim': tf.keras.metrics.Mean(name='ssim'),
'l1': tf.keras.metrics.Mean(name='l1'),
'smooth': tf.keras.metrics.Mean(name='smooth'),
}
self.step = 0
# Load states from optimiser and model if available
self.ckpt_disp, self.manager_disp = self.setup_logger(self.models['disparity'],
os.path.join(output_dir, 'disparity_model'))
self.ckpt_pose, self.manager_pose = self.setup_logger(self.models['pose'],
os.path.join(output_dir, 'pose_model'))
self.start_epoch = int(self.ckpt_disp.step) + 1 if self.manager_disp.latest_checkpoint else int(
self.ckpt_disp.step)
print("Starting training step {}".format(self.ckpt_disp.step.numpy()))
# Helpers
self.pix_coords = pixel_coord(params.batch_size, params.input_h, params.input_w, True) # [b, 3, npoints]
def setup_logger(self, model, out_dir):
ckpt = tf.train.Checkpoint(step=tf.Variable(0), optimizer=self.optimizer, net=model)
manager = tf.train.CheckpointManager(ckpt, out_dir, max_to_keep=1)
ckpt.restore(manager.latest_checkpoint)
return ckpt, manager
def train(self):
for epoch in range(self.start_epoch, self.params.epochs):
[self.train_meter[k].reset_states() for k, v in self.train_meter.items()]
[self.val_meter[k].reset_states() for k, v in self.val_meter.items()]
# Train
for i, inputs in enumerate(self.train_dataset):
loss, outputs = self.train_step(inputs)
print(
f'\rEpoch: [{epoch}/{self.params.epochs}] | 'f'Iter: [{self.optimizer.iterations.numpy()}/{self.total_iteration}] | '
f'Lr: {self.optimizer._decayed_lr(tf.float32):.5f} | '
f"ssim: {self.train_meter['ssim'].result():.4f} | ",
f"l1: {self.train_meter['l1'].result():.4f} | ",
f"smooth: {self.train_meter['smooth'].result():.10f} | ",
f"total loss: {loss['loss']:.4f} | ",
end="")
if i % self.params.draw_every_iter == 0:
with self.train_summary_writer.as_default():
tf.summary.image('disparity', outputs['disparity0'], step=epoch)
tf.summary.image('depth', outputs['depth0'], step=epoch)
stack_prediction_pred = tf.concat([outputs['pred-10'], inputs['img'], outputs['pred10']],
axis=1)
stack_prediction_gt = tf.concat([inputs['img-1'], inputs['img'], inputs['img1']], axis=1)
tf.summary.image('predictions', stack_prediction_pred, step=epoch)
tf.summary.image('groundtruth', stack_prediction_gt, step=epoch)
# Validation
for i, inputs in enumerate(self.val_dataset):
self.val_step(inputs)
print(
f'\rEpoch: [{epoch}/{params.epochs}] | '
f"ssim: {self.val_meter['ssim'].result():.4f} | ",
f"l1: {self.val_meter['l1'].result():.4f} | ",
f"smooth: {self.val_meter['smooth'].result():.4f} | ",
end="")
with self.train_summary_writer.as_default():
tf.summary.scalar('ssim', self.train_meter['ssim'].result(), step=epoch)
tf.summary.scalar('l1', self.train_meter['l1'].result(), step=epoch)
tf.summary.scalar('smooth', self.train_meter['smooth'].result(), step=epoch)
with self.test_summary_writer.as_default():
tf.summary.scalar('ssim', self.val_meter['ssim'].result(), step=epoch)
tf.summary.scalar('l1', self.val_meter['l1'].result(), step=epoch)
tf.summary.scalar('smooth', self.val_meter['smooth'].result(), step=epoch)
# save and increment
save_path = self.manager_disp.save()
save_path = self.manager_pose.save()
print("Saved checkpoint for step {}: {}".format(int(self.ckpt_disp.step), save_path))
self.ckpt_disp.step.assign_add(1)
self.ckpt_pose.step.assign_add(1)
@tf.function
def train_step(self, inputs):
with tf.GradientTape() as tape:
outputs = self.models['disparity'](inputs['img'], training=True)
outputs.update(self.predict_pose(inputs))
outputs.update(self.view_synthesis(inputs, outputs))
loss = self.criterions(inputs, outputs)
trainable_params = self.models['disparity'].trainable_variables + self.models['pose'].trainable_variables
gradients = tape.gradient(loss['loss'], trainable_params)
self.optimizer.apply_gradients(zip(gradients, trainable_params))
# Update moving average
[self.train_meter[k](loss[k]) for k, v in self.train_meter.items()]
return loss, outputs
@tf.function
def val_step(self, inputs):
outputs = self.models['disparity'](inputs['img'], training=False)
outputs.update(self.predict_pose(inputs))
outputs.update(self.view_synthesis(inputs, outputs))
loss = self.criterions(inputs, outputs)
# Update moving average
[self.val_meter[k](loss[k]) for k, v in self.val_meter.items()]
def criterions(self, inputs, outputs):
loss_dict = {}
total_l1_loss = 0.
total_ssim_loss = 0.
total_smooth_loss = 0.
for scale in range(self.params.num_scales):
l1_losses = []
ssim_losses = []
for f_i in self.params.frame_ids[1:]:
target_rgb = inputs['img']
pred_rgb = outputs[f'pred{f_i}{scale}']
# L1 Loss
abs_diff = tf.abs(target_rgb - pred_rgb)
l1_loss = tf.reduce_mean(abs_diff, axis=-1, keepdims=True) # [b, h, w, 1]
l1_losses.append(l1_loss)
# SSIM Loss
ssim = tf.reduce_mean(ssim_loss(target_rgb, pred_rgb), axis=-1, keepdims=True)
ssim_losses.append(ssim)
ssim_losses = tf.concat(ssim_losses, -1)
l1_losses = tf.concat(l1_losses, -1)
if scale == 0:
outputs['l1_error'] = l1_losses
# Automasking
identity_l1_losses = []
identity_ssim_losses = []
for f_i in self.params.frame_ids[1:]:
target_rgb = inputs['img']
source_rgb = inputs[f'img{f_i}']
# L1 Loss
abs_diff = tf.abs(source_rgb - target_rgb)
l1_loss = tf.reduce_mean(abs_diff, axis=-1, keepdims=True)
identity_l1_losses.append(l1_loss)
# SSIM Loss [b, h, w, 1]
ssim = tf.reduce_mean(ssim_loss(source_rgb, target_rgb), axis=-1, keepdims=True)
identity_ssim_losses.append(ssim)
identity_ssim_losses = tf.concat(identity_ssim_losses, -1)
identity_l1_losses = tf.concat(identity_l1_losses, -1)
identity_l1_losses += tf.random.normal(identity_l1_losses.shape) * 0.00001 # Break ties
identity_ssim_losses += tf.random.normal(identity_ssim_losses.shape) * 0.00001 # Break ties
combined_l1 = tf.concat((identity_l1_losses, l1_losses), axis=-1)
combined_ssim = tf.concat((identity_ssim_losses, ssim_losses), axis=-1)
combined_l1 = tf.reduce_min(combined_l1, axis=-1)
combined_ssim = tf.reduce_min(combined_ssim, axis=-1)
_ssim_loss = tf.reduce_mean(combined_ssim) * 0.85
_l1_loss = tf.reduce_mean(combined_l1) * 0.15
total_l1_loss += _l1_loss
total_ssim_loss += _ssim_loss
# Disparity smoothness
disparity = outputs[f'disparity{scale}']
mean_disp = tf.reduce_mean(disparity, [1, 2], keepdims=True)
norm_disp = disparity / (mean_disp + 1e-7)
h = self.params.input_h // (2 ** scale)
w = self.params.input_w // (2 ** scale)
color_resized = tf.image.resize(target_rgb, (h, w))
smooth = smooth_loss(norm_disp, color_resized) * 1e-3
total_smooth_loss += smooth
total_smooth_loss /= self.params.num_scales
total_ssim_loss /= self.params.num_scales
total_l1_loss /= self.params.num_scales
loss_dict['ssim'] = total_ssim_loss
loss_dict['l1'] = total_l1_loss
loss_dict['smooth'] = total_smooth_loss
loss_dict['loss'] = total_smooth_loss + total_ssim_loss + total_l1_loss
return loss_dict
def predict_pose(self, inputs):
"""
Compute pose wrt to each source frame
"""
output = {}
for f_i in self.params.frame_ids[1:]:
if f_i < 0:
pose_inputs = tf.concat([inputs[f'img{f_i}'], inputs['img']], -1)
else:
pose_inputs = tf.concat([inputs['img'], inputs[f'img{f_i}']], -1)
axisangle, translation, M = self.models['pose'](pose_inputs, invert=(f_i < 0))
output[f'axisangle{f_i}'] = axisangle
output[f'translation{f_i}'] = translation
output[f'M{f_i}'] = M
return output
def view_synthesis(self, inputs, outputs):
"""
Warped prediction based on predicted depth and pose
Args:
inputs:
'disparity': [b, h, w, 1]
'img': [b, h, w, 3]
"""
for scale in range(self.params.num_scales):
disp = outputs[f'disparity{scale}']
disp = tf.image.resize(disp, [self.params.input_h, self.params.input_w])
_, depth = disp_to_depth(disp, min_depth=MIN_DEPTH, max_depth=MAX_DEPTH)
outputs[f'depth{scale}'] = depth
for i, frame_id in enumerate(self.params.frame_ids[1:]):
source = inputs[f'img{frame_id}']
T = outputs[f'M{frame_id}']
# depth2pcl
cam_points = backproject(self.pix_coords, depth, inputs['K_inv'])
# pcl2pix
proj_mat = tf.matmul(inputs['K'], T)
pix_coords = forwardproject(cam_points, proj_mat, self.params.input_h,
self.params.input_w) # [b, h, w, 2]
# Warped source to target
projected_img = bilinear_sampler(source, pix_coords) # [b, h, w, 3]
outputs[f'pred{frame_id}{scale}'] = projected_img
return outputs
if __name__ == '__main__':
params = parser.parse_args()
output_dir = os.path.join(PROJECT_DIR, 'results', params.identifier)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
print(f'Start: {params.identifier}', datetime.datetime.now())
t = Trainer(params, output_dir)
t.train()
| [
"daryl.tan@easymile.com"
] | daryl.tan@easymile.com |
6a08c2f08d79f432c3bf319defed10312a65bde4 | 3501c13c0465b6a4a333840243b583710dbeb959 | /kodēšana.py | 48b2a7a204b83790edb1610465ad2a5f88ac9fcf | [] | no_license | DaigaSarkane/RTR105 | e5032330fe42e08c925013ccc815118bfd23b7cc | 13a619bc8e61938758643be95b163fb58c6758ad | refs/heads/master | 2020-03-28T03:24:01.695389 | 2019-05-03T08:46:25 | 2019-05-03T08:46:25 | 147,642,367 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | Python 2.7.12 (default, Dec 4 2017, 14:50:18)
[GCC 5.4.0 20160609] on linux2
Type "copyright", "credits" or "license()" for more information.
>>> original = "To be or not to be"
>>> type(original)
<type 'str'>
>>> len(original)
18
>>> original[0]
'T'
>>> original[1]
'o'
>>> original[2]
' '
>>> original[18]
Traceback (most recent call last):
File "<pyshell#6>", line 1, in <module>
original[18]
IndexError: string index out of range
>>> key = 10
>>> original[0] ^ key
Traceback (most recent call last):
File "<pyshell#8>", line 1, in <module>
original[0] ^ key
TypeError: unsupported operand type(s) for ^: 'str' and 'int'
>>> ord(original[0])
84
>>> original[0]
'T'
>>> bin(ord(original[0]))
'0b1010100'
>>> chr(original[0]) ^ key
Traceback (most recent call last):
File "<pyshell#12>", line 1, in <module>
chr(original[0]) ^ key
TypeError: an integer is required
>>> chr(ord(original[0]) ^ key)
'^'
>>> (ord(original[0]) ^ key) ^ key
84
>>> chr(ord(original[0]) ^ key) ^ key
Traceback (most recent call last):
File "<pyshell#15>", line 1, in <module>
chr(ord(original[0]) ^ key) ^ key
TypeError: unsupported operand type(s) for ^: 'str' and 'int'
>>> chr((ord(original[0]) ^ key) ^ key)
'T'
>>> original
'To be or not to be'
>>> key
10
>>> N =len(original)
>>> N
18
\
>>> n
Traceback (most recent call last):
File "<pyshell#21>", line 1, in <module>
n
NameError: name 'n' is not defined
>>> N
18
>>> message = []
>>> for i in range(N):
message.append(chr(ord(original[i]) ^ key))
>>>
>>>
>>>
>>>
>>>
>>>
>>> original
'To be or not to be'
>>> message
['^', 'e', '*', 'h', 'o', '*', 'e', 'x', '*', 'd', 'e', '~', '*', '~', 'e', '*', 'h', 'o']
>>> message = ' '
>>> for i in range(N)
SyntaxError: invalid syntax
>>> for i in range(N):
message = message + (chr(ord(original[i]) ^ key))
>>> message
' ^e*ho*ex*de~*~e*ho'
>>>
>>>
>>>
>>> result = ' '
>>> key1 = 45
>>> for i in range(N):
result = result + (chr(ord(message[i]) ^ key1))
>>>
>>> result
' \rsH\x07EB\x07HU\x07IHS\x07SH\x07E'
>>>
>>>
>>>
>>> key1 = key
>>> result = ' '
>>> for i in range(N):
result = result + (chr(ord(message[i]) ^ key1))
>>> result
' *To be or not to b'
>>>
| [
"daiga.sarkane1@gmail.com"
] | daiga.sarkane1@gmail.com |
097439d4e5e15a04cbe777f77fd0434256fd16d1 | a61ca7b89ef5817b2027239ece9dd175f776c8f3 | /rcsb/app/chem/LogFilterUtils.py | 86c6b9113eaef1e38f51a767d80d66d89057586c | [
"Apache-2.0"
] | permissive | rcsb/py-rcsb_app_chem | 7da2941f6e0d0f8ff0f5a802a3edb689d283659b | 64ca10e6ccf8b604fa3d16ab72406408b22c0aca | refs/heads/master | 2023-08-17T21:33:51.660687 | 2023-01-09T17:30:07 | 2023-01-09T17:30:07 | 245,858,180 | 0 | 0 | Apache-2.0 | 2023-01-09T17:30:08 | 2020-03-08T17:31:37 | Python | UTF-8 | Python | false | false | 866 | py | ##
# File: LogFilterUtils.py
# Date: 29-Jun-2020 jdw
#
# Pre-filter for Gunicorn/Uvicorn health check requests -
##
# pylint: disable=E1101
import logging
logger = logging.getLogger(__name__)
class HealthCheckFilter(logging.Filter):
def filter(self, record):
return record.getMessage().find("/healthcheck") == -1
class LogFilterUtils(object):
def __init__(self):
pass
def addFilters(self):
logger.debug("Current loggers are: %r", [name for name in logging.root.manager.loggerDict]) # pylint: disable=no-member
for name in logging.root.manager.loggerDict: # pylint: disable=no-member
if any(x in name for x in ["uvicorn", "gunicorn"]):
logger.debug("Add filter to logger %r", name)
loggerT = logging.getLogger(name)
loggerT.addFilter(HealthCheckFilter())
| [
"john.westbrook@rcsb.org"
] | john.westbrook@rcsb.org |
b696c2d48b27c31a6a2b374d40a31649b1a5774c | 28d5b9d208a861703840837ad6302cd7e3f84d42 | /process.py | 87a80d181d435e34caf88251aacfe4543cab462d | [] | no_license | jagmeet787/Malicious-App-Detection | d4f5ee9b747d05799886483b157c62a15a94420b | c5925a36aafcbc60ccd30c69685220644652c34c | refs/heads/master | 2020-05-14T17:58:33.647278 | 2019-04-18T18:26:04 | 2019-04-18T18:26:04 | 181,902,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | import time
from permissions import PermissionsResource
from database import database
from androwarn.warn.report.report import generate_report
from androwarn.warn.analysis.analysis import perform_analysis
from androwarn.warn.search.search import grab_application_package_name
from androguard.misc import AnalyzeAPK
class process(object):
permissions_service = PermissionsResource()
database_service = database()
upload_directory_base = "./Files"
def process_apk(self):
taks_list = self.database_service.get_processing()
for records in taks_list:
apk_id = records[0]
apk_name = records[3]
file_dir = self.upload_directory_base + "/" + str(apk_id) + "/"
file_path = file_dir + apk_name
a, d, dx = AnalyzeAPK(file_path)
permissions = self.permissions_service.getPermissions(a)
# print permissions
package_name = grab_application_package_name(a)
data = perform_analysis(file_path, a, d, dx, False)
# 'Verbosity level (ESSENTIAL 1, ADVANCED 2, EXPERT 3) (default 1)'
generate_report(package_name, data, 1, 'html', file_dir + 'index.html')
# maldrolyzer report and send back in the body
self.database_service.update_record(apk_id,"Finished")
process_ins = process()
while (True):
process_ins.process_apk()
# time.sleep(100) | [
"jagmeet787@gmail.com"
] | jagmeet787@gmail.com |
8ada09c41d2c212d35513c2278b4de86e8462604 | 1619eff6f62daf2109134cf49f1a1f8c4a45d639 | /product/urls.py | eea12a79437bfc40a6f0fb188604935c3db8daec | [] | no_license | Loay159/esntls.co | aa92a4647cdecaa159f69b175aefa813c5af358d | 5cafef4267e1bdf115c5a49a0fdf66580721a478 | refs/heads/master | 2023-08-03T10:12:27.188706 | 2021-09-23T09:15:01 | 2021-09-23T09:15:01 | 403,773,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | from django.urls import path
from .views import *
from dynamic_rest.routers import DynamicRouter
urlpatterns = [
# path('', ProductsAPI.as_view(), name='All_products'),
# path('<int:id>', ProductItem.as_view(), name='specefic_products'),
# path('category/<slug:category>', CategoryAPI.as_view(), name='Men_products'),
path('products/<slug:slug>/', ProductItemAPI.as_view(), name='product_detail'),
path('categories/<slug:slug>/', CategoryAPI.as_view(), name='category_detail'),
path('cart/<slug:slug>/', CartAPI.as_view(), name='cart_detail'),
]
router = DynamicRouter()
router.register(r'products', AllProduct)
router.register(r'categories', AllCategories)
router.register(r'colors', AllProductColors)
# router.register(r'cart', Cart)
app_name = "product"
urlpatterns += router.urls
| [
"88399225+LoayAshraf@users.noreply.github.com"
] | 88399225+LoayAshraf@users.noreply.github.com |
83275e5deed513d8e87891c2e7afdb10a3f39919 | 95588c0bb9861fd45a620512cfb798dd5cd4efab | /flask_qaq/forms.py | 42d937b06fe2fa045ba96ad2b9979eca2e8d6806 | [] | no_license | Nauman1971/flask_final_project | ddb5fb07023b7082933a747959098cec794a81ef | 084f92242ee7ad1bb14e5fc56e9ed1d8eae2bb3a | refs/heads/master | 2021-07-08T19:00:42.472388 | 2019-12-18T16:29:59 | 2019-12-18T16:29:59 | 228,384,907 | 0 | 0 | null | 2021-03-20T02:24:11 | 2019-12-16T12:41:32 | HTML | UTF-8 | Python | false | false | 917 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField
from wtforms.validators import InputRequired, Length, Email
from flask_wtf.file import FileField, FileRequired
class SignupForm(FlaskForm):
username = StringField('Name', validators=[InputRequired(), Length(min=4, max=15)])
email = StringField("Email", validators=[InputRequired(), Email(message="Invalid Email"), Length(max=50)])
password = PasswordField("Password", validators=[InputRequired(), Length(min=8, max=200)])
number = StringField("Number")
roles = SelectField("Roles", choices=[('Teacher', 'Teacher'), ('Student', 'Student')])
file = FileField(validators=[FileRequired()])
class LoginForm(FlaskForm):
username = StringField("Name", validators=[InputRequired(), Length(min=4, max=15)])
password = PasswordField("Password", validators=[InputRequired(), Length(min=6, max=200)])
| [
"nauman81007@gmail.com"
] | nauman81007@gmail.com |
5da9adf25b89d369656bd244e97b7be62c895405 | ce918cd9b23a4cd44861a8ba7448abac218c9761 | /0x03-python-data_structures/11-delete_at.py | 22abba5f9291ba9766a7213758d301401397b5fc | [] | no_license | adrielt07/holbertonschool-higher_level_programming | 80318056730a5c1bea0e95f7308b1499fab93a22 | adb04252e63ba714d2c65f25597778c89f6878a9 | refs/heads/master | 2020-03-09T14:48:29.225685 | 2018-10-07T00:48:45 | 2018-10-07T00:48:45 | 128,843,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/usr/bin/python3
def delete_at(my_list=[], idx=0):
if idx < 0 or idx > len(my_list)-1:
return my_list
del my_list[idx]
return my_list
| [
"adrieltolentino@outlook.com"
] | adrieltolentino@outlook.com |
26288ab26b891bd354d684ad66484d83d8fb746a | bf0b7643d9d9f83ea2820df9847a11cf52c8a5bc | /app/models.py | a9b6171b3936f4c683ccee0846d8cfc24309bd39 | [] | no_license | ducknessman/dust | 269851e4f420e01da14247565d65deff7c7c2a17 | 5dad23fd39aa5302f2e17dd286beca7b82446fcc | refs/heads/master | 2023-05-11T07:44:23.914855 | 2020-08-17T12:30:45 | 2020-08-17T12:30:45 | 259,030,166 | 2 | 1 | null | 2023-05-01T21:40:59 | 2020-04-26T12:54:17 | CSS | UTF-8 | Python | false | false | 7,069 | py | #!/usr/bin/env python
#! -*-coding:utf-8 -*-
#!@Author : zhuxx
#!@time : 2020/05/06 19:04
from datetime import datetime
from exts import db
from werkzeug.security import generate_password_hash,check_password_hash
# 用户表
class User(db.Model):
'''
_password:对内密码
password:对外密码
'''
__tablename__ = 'user'
user_id = db.Column(db.Integer,primary_key=True,autoincrement=True) #用户id
username = db.Column(db.String(100),nullable=False,unique=True) # 用户名
_password = db.Column(db.String(500),nullable=False) # 密码
email = db.Column(db.String(100),nullable=False,unique=True) # 邮箱
phone = db.Column(db.String(20),unique=True) # 电话
fullname = db.Column(db.String(100)) #全称
status = db.Column(db.Integer) # 状态
is_super = db.Column(db.SmallInteger) # 是否为管理员,1为管理员
role_id = db.Column(db.Integer, db.ForeignKey('role.id')) # 所属角色
remarks = db.Column(db.String(500)) # 备注
reg_time = db.Column(db.DateTime, default=datetime.now) #注册时间
def __init__(self,username=None,password=None,email=None,phone=None,fullname=None,
status=None,is_super=None,role_id=None,remarks=None,reg_time=None):
self.username = username
self.password = password
self.email = email
self.phone = phone
self.fullname = fullname
self.status = status
self.is_super = is_super
self.role_id = role_id
self.remarks = remarks
self.reg_time = reg_time
#获取密码
@property
def password(self):
return self._password
#设置密码
@password.setter
def password(self,raw_password):
self._password = generate_password_hash(raw_password)
#检查密码
def check_password(self,raw_password):
result = check_password_hash(self.password,raw_password)
return result
#测试用例
class Tasks(db.Model):
__tablename__ = 'tasks'
id = db.Column(db.Integer,primary_key=True,autoincrement=True) #记录id
task_id = db.Column(db.String(100),nullable=False) #用例编号
task_son_id = db.Column(db.String(200),unique=True,nullable=False) #用例子编号
task_name = db.Column(db.String(500),nullable=False) # 用例名称
task_description = db.Column(db.String(4096),nullable=False) # 用例描述
task_url = db.Column(db.String(1024),nullable=False) # 用例地址
task_method = db.Column(db.String(100),nullable=False) #请求方法
task_data = db.Column(db.String(4096)) # 用例数据
task_result = db.Column(db.String(4096),nullable=False) # 预期结果
task_session = db.Column(db.Integer,nullable=False) #是否需要session ,0:不需要,1:需要
sessions = db.Column(db.String(4096)) #登录session
task_auth = db.Column(db.String(1024)) #用例执行人信息
task_env = db.Column(db.Integer,nullable=False) #是否需要环境变量,0:不需要,1:stage,2:alpha,3:real
task_time = db.Column(db.String(4096)) #添加时间
#测试环境变量
class Env(db.Model):
__tablename__ = 'env'
id = db.Column(db.Integer, primary_key=True, autoincrement=True) # 记录id
env_name = db.Column(db.String(4096),nullable=False) # 环境变量名称
env_single = db.Column(db.Integer,nullable=False) #0:不需要,1:stage,2:alpha,3:real
env_url = db.Column(db.String(4096),nullable=False) # 环境变量地址
description = db.Column(db.String(4096),nullable=False)# 环境描述
#测试报告
class TaskReport(db.Model):
__tablename__ = 'taskreport'
id = db.Column(db.Integer, primary_key=True, autoincrement=True) # 记录id
report_name = db.Column(db.String(4096),nullable=False) # 报告名称
success_count = db.Column(db.Integer,nullable=False) # 成功数量
fail_count = db.Column(db.Integer,nullable=False) # 失败数量
error_account = db.Column(db.Integer,nullable=False) # 错误数量
finished_time = db.Column(db.String(100),index=True,default=datetime.now) #生成报告时间
#测试结果
class TaskResult(db.Model):
__tablename__ = 'task_result'
id = db.Column(db.Integer, primary_key=True, autoincrement=True) # 记录id
task_id = db.Column(db.String(100),nullable=False) # 用例编号
task_son_id = db.Column(db.String(100),nullable=False) # 用例子编号
task_url = db.Column(db.String(500),nullable=False) # 用例地址
task_data = db.Column(db.String(1024)) # 用例数据
task_result = db.Column(db.String(2048),nullable=False) #用例结果
task_response = db.Column(db.String(4096),nullable=False) # 请求响应结果
task_status = db.Column(db.Integer,nullable=False) #0:success,1:fail,2:error
finished_time = db.Column(db.String(100),index=True,default=datetime.now) #执行用例结束时间
# 定义角色数据模型
class Role(db.Model):
__tablename__ = 'role'
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(100), unique=True) # 名称
description = db.Column(db.String(600)) # 角色描述
auths = db.Column(db.String(600)) # 权限列表
add_time = db.Column(db.String(100), index=True, default=datetime.utcnow) # 添加时间
admins = db.relationship("User", backref='role')
# 定义权限数据模型
class Auth(db.Model):
__tablename__ = 'auth'
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(100),unique=True) # 名称,不能重复
url = db.Column(db.String(255)) # 地址
add_time = db.Column(db.String(100), index=True, default=datetime.utcnow) # 添加时间
#管理员登录日志
class AdminLog(db.Model):
__tablename__ = "admin_log" #定义表名
id = db.Column(db.Integer,primary_key=True) #编号
#定义外键 db.ForeignKey
admin_id = db.Column(db.Integer,db.ForeignKey('user.user_id')) #所属管理员
operate = db.Column(db.String(300)) # 操作行为
ip = db.Column(db.String(100)) #登录IP
time=db.Column(db.String(100))#时间戳
add_time = db.Column(db.String(100),index=True,default=datetime.now) #登录时间 ,默认时间
#操作日志
class OperateLog(db.Model):
__tablename__ = 'operate_log'
id = db.Column(db.Integer, primary_key=True) # 编号
# 定义外键 db.ForeignKey
admin_id = db.Column(db.Integer, db.ForeignKey('user.user_id')) # 所属管理员
ip = db.Column(db.String(100)) # 登录IP
operate = db.Column(db.String(600)) # 操作行为
add_time = db.Column(db.String(100), index=True, default=datetime.now) # 登录时间 ,默认时间
#任务执行表
class TaskRun(db.Model):
__tablename__ = 'task_run'
id = db.Column(db.Integer,primary_key=True,autoincrement=True) #序列编号
running_name = db.Column(db.String(100),nullable=False) #执行名称
running_info = db.Column(db.String(1024),nullable=False) #执行的用例子编号
create_time = db.Column(db.String(100),nullable=False) # 创建时间 | [
"1160154212@qq.com"
] | 1160154212@qq.com |
937ca15e0fcc69c211f17b69de785760dbc1afb7 | 9e87897c988af634c3fddc42113992a65ec006f4 | /sims/repfam_fs/test/metrics_v2.py | e6aeb826dec6e7fd91e6f424c75731b25e4345ea | [
"MIT"
] | permissive | luiarthur/cytof5 | 152eb06030785fdff90220f0d0a244a02204c2e9 | 6b4df5e9fd94bfd586e96579b8c618fdf6f913ed | refs/heads/master | 2021-07-20T13:39:45.821597 | 2021-03-02T23:27:35 | 2021-03-02T23:27:35 | 145,253,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,927 | py | import collections
import os
import sys
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import rcparams
KTRUE = [4, 5]
def parse_Rs(path_to_Rs_csv):
Rs = pd.read_csv(path_to_Rs_csv).rename(columns=dict(mean='Mean'))
return Rs
def get_ks(results_dir):
# Get dir names for each K
kdirs = sorted(os.listdir(results_dir))
# Get all values of K
ks = [d.replace('KMCMC', '') for d in kdirs]
return ks, kdirs
def parse_log(path_to_log):
# Read log file
with open(path_to_log, "r") as f:
contents = f.read()
# Keep only the metrics
metrics = contents.split('metrics:')[1]
# Separate into lines
metrics = metrics.split('\n')
# keep only lines with '=>'
metrics = list(filter(lambda line: '=>' in line, metrics))
# Create empty dict to return
out = dict()
# store metrics in a dict
for metric in metrics:
key, val = metric.split('=>')
out[key.strip()] = float(val.strip())
return out
def count_num_small_phenotypes(path, thresh=.01):
rgx = lambda f: re.match('W\d+_hat', f)
w_hat_paths = list(filter(rgx, os.listdir(path)))
num_small_phenotypes = 0
for wpath in w_hat_paths:
wi = np.genfromtxt('{}/{}'.format(path, wpath))
# num_small_phenotypes += ((0 < wi) * (wi < thresh)).sum()
num_small_phenotypes += (wi < thresh).sum()
return num_small_phenotypes
def compute_num_selected_features(path):
rgx = lambda f: re.match('W\d+_hat', f)
w_hat_paths = list(filter(rgx, os.listdir(path)))
di = []
for wpath in sorted(w_hat_paths):
wi = np.genfromtxt('{}/{}'.format(path, wpath))
di.append((wi > 0).sum())
return di
def get_metrics_for_each_dir(results_dir, thresh=.01):
# Create dict to store results
out = dict()
# Traverse results
for root, dirs, files in os.walk(results_dir):
for f in files:
if f == 'log.txt':
path_to_log = '{}/{}'.format(root, f)
# Parse LPML / DIC
metrics = parse_log(path_to_log)
# Parse W
path_to_W = '{}/img/yz/txt/'.format(root)
num_small_phenotypes = count_num_small_phenotypes(path_to_W,
thresh)
metrics['num_selected_features'] = compute_num_selected_features(path_to_W)
metrics['num_small_phenotypes'] = num_small_phenotypes
# Parse R
path_to_R = '{}/img/txt/'.format(root)
R_df = parse_Rs('{}/Rs.csv'.format(path_to_R))
metrics['I'] = R_df.shape[0]
metrics['R_mean'] = R_df.Mean.to_numpy()
metrics['R_lower'] = R_df.p_02_5.to_numpy()
metrics['R_upper'] = R_df.p_97_5.to_numpy()
# metrics['R_mean'] = R_df.p_50_0.to_numpy()
# metrics['R_lower'] = R_df.p_25_0.to_numpy()
# metrics['R_upper'] = R_df.p_75_0.to_numpy()
# Parse Rprob
path_to_Rprob = path_to_R
R_prob = np.loadtxt('{}/prob_R_equals_K.txt'.format(path_to_R))
metrics['Rprob'] = R_prob.T
# Append to metrics
out[path_to_log] = metrics
return out
def get_exp_dict(results_dir):
# Get Ks and KMCMC dirname
ks, kdirs = get_ks(results_dir)
# For each directory,
all_metrics = get_metrics_for_each_dir(results_dir)
# Experiments dictionary, indexed by (z, scale)
exp_dict = dict()
# Split all the keys
for key in all_metrics:
path = key.replace(results_dir + '/', '')
kmcmc, z, scale, _, seed, _ = path.split('/')
kmcmc_int = int(kmcmc.replace('KMCMC', ''))
scale_float = float(scale.replace('scale', ''))
new_key = (z, scale_float, seed)
if new_key not in exp_dict:
exp_dict[new_key] = dict()
exp_dict[new_key][kmcmc_int] = all_metrics[key]
return exp_dict
def graph_for_setting(setting, exp_dict, metric, label, labels=None):
d = exp_dict[setting]
if metric == 'num_small_phenotypes':
lpml = []
num_small = []
ks = []
for kmcmc in sorted(d.keys()):
ks.append(kmcmc)
lpml.append(d[kmcmc]['LPML'])
num_small.append(d[kmcmc]['num_small_phenotypes'])
plt.plot(num_small, lpml, marker='o', label=label)
plt.xlabel('number of obscure phenotypes')
plt.ylabel('LPML')
elif metric == 'Rprob':
K_min = list(d.keys())[0]
I = d[K_min]['I']
if labels is not None:
if len(labels) == 2:
c = {labels[0]: 'blue', labels[1]: 'red'}
else:
print('NotImplemented!')
for i in range(I):
plt.subplot(I, 1, i + 1)
ks = []
Ri_prob_equals_K_TRUE = []
Ks = sorted(d.keys())
for kmcmc in Ks:
ks.append(kmcmc)
if kmcmc < KTRUE[i]:
Ri_prob_equals_K_TRUE.append(0)
else:
Ri_prob_equals_K_TRUE.append(d[kmcmc]['Rprob'][i, KTRUE[i] - 1])
plt.plot(ks, Ri_prob_equals_K_TRUE,
color=c[label], marker='o', label=label)
plt.xlabel('KMCMC')
plt.ylabel('Prob(Ri = K_TRUE)')
plt.ylim([-0.1, 1.1])
elif metric == 'R':
K_min = list(d.keys())[0]
I = d[K_min]['I']
if labels is not None:
if len(labels) == 2:
c = {labels[0]: 'blue', labels[1]: 'red'}
else:
print('NotImplemented!')
for i in range(I):
plt.subplot(I, 1, i + 1)
ks = []
Ri_mean = []
Ri_lower = []
Ri_upper = []
Ks = sorted(d.keys())
for kmcmc in Ks:
ks.append(kmcmc)
Ri_mean.append(d[kmcmc]['R_mean'][i])
Ri_lower.append(d[kmcmc]['R_lower'][i])
Ri_upper.append(d[kmcmc]['R_upper'][i])
plt.plot(ks, Ri_mean, color=c[label], marker='o', label=label)
plt.fill_between(ks, Ri_lower, Ri_upper,
color=c[label], alpha=.3)
plt.xlabel('KMCMC')
plt.ylabel('R_{}'.format(i + 1))
plt.yticks(range(min(Ks) - 2, int(max(Ri_upper) + .5), 2),
range(min(Ks) - 2, int(max(Ri_upper) + .5), 2))
else:
ks = []
ms = []
for kmcmc in sorted(d.keys()):
ks.append(kmcmc)
ms.append(d[kmcmc][metric])
plt.plot(ks, ms, marker='o', label=label)
plt.xlabel('K')
plt.ylabel(metric)
plt.xticks(ks)
if __name__ == '__main__':
if len(sys.argv) > 1:
results_dir = sys.argv[1]
else:
results_dir = 'results/test-sims-5-5'
print('Results dir: {}'.format(results_dir))
# Get a dictionary indexed by experiment setting (z, scale, seed)
exp_dict = get_exp_dict(results_dir)
# Metrics to plot
# metrics = ['LPML', 'DIC', 'num_small_phenotypes', 'R']
# metrics = ['LPML', 'DIC', 'num_small_phenotypes']
metrics = ['LPML', 'DIC', 'R']
# Name of metrics dir
metrics_dir = '{}/metrics'.format(results_dir)
# Get unique zs
zs = set([key[0] for key in exp_dict.keys()])
print('zs: {}'.format(zs))
# Get unique seeds
seeds = set([key[2] for key in exp_dict.keys()])
print('seeds: {}'.format(seeds))
# Get unique scales
scales = set([key[1] for key in exp_dict.keys()])
num_scales = len(scales)
print('scales: {}'.format(scales))
# sorted exp_dict keys
exp_dict_keys_sorted = sorted(exp_dict.keys())
# TODO:
# graph Rs
labels = ['scale={}'.format(scale)for scale in scales]
for z in zs:
for seed in seeds:
for metric in metrics:
for setting in exp_dict_keys_sorted:
zidx, scale, sd = setting
if z == zidx and sd == seed:
label = 'scale={}'.format(scale)
graph_for_setting(setting, exp_dict, metric, label,
labels=labels)
dest_dir = '{}/{}/{}'.format(metrics_dir, z, seed)
if metric == 'R':
plt.legend(loc='lower right')
elif metric == 'Rprob':
plt.legend(loc='lower center')
else:
plt.legend()
plt.tight_layout()
# Make destination dir if needed
os.makedirs(dest_dir, exist_ok=True)
plt.savefig('{}/{}.pdf'.format(dest_dir, metric))
plt.close()
| [
"luiarthur@gmail.com"
] | luiarthur@gmail.com |
2e3fcba553d7bb1fd1bf38c8ca6fd92ca74e91d4 | be862c96024320595afd7736605f94928be5f90d | /Crawler/私活/person_info.py | a8ea63dff59472dc088f8a2670225911586b8e2d | [] | no_license | SmasterZheng/leetcode | 1b35b72efc6ea6add605d08b13d7abcffbdf05e8 | 11b33fcbab47969ebd0bfc2f41cb50a976881910 | refs/heads/master | 2023-04-24T14:45:53.285565 | 2021-05-15T06:52:29 | 2021-05-15T06:52:29 | 264,981,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,091 | py | # -*- coding: utf-8 -*-
# @Time : 20201228
# @Author : zhengxz
import requests
import json
from bs4 import BeautifulSoup
import pandas as pd
def get_html():
'''获取html'''
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9',
'Cache-Control':'max-age=0',
'Connection': 'keep-alive',
'Content-Type':'application/x-www-form-urlencoded',
'Upgrade-Insecure-Requests':'1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36',
'Referer':'http://data.gdcic.net/Dop/Open/PersonList.aspx'
}
viestate=['sswqqHmNQbdz4ESZ2/lbsMZ+gaP6EONOTUxVV5aaChjmsm8TtKiPd3QzRVl4AEJMfzje8IBLu2evmAs/QIcXYsGP7xOoGesfbU3uKjy5RgX/D47pTIRMOD3UWnF0r7CdXUisftn602181Oa171SELtYkRYJxoswXEhdlqP9G4rXoSj3RWWorYAyc9naHRxIOjm33LF5d9HhbIlqXOfYFzH0RDNV7m2hO9gIk6UmDsHoqIofhqBval4N8k+qmiPcyDiqPvWchCtsIgrMxQ/m/FrcG+hFYop347Br4JS8xHJGnrtKDDG3dg0GRMLgf8DHp0loYWrUdZtxEHnBx41a/IlWjzEwZrHmJtW4lSGxfWk/ZSz6Wv+UDBE0RF5wdeu5i/j0oz8EUSRia1IQFrldL379rIoomevIfyVOOiS8Y5UpeUzoCLxlap2xvojljdXCJRDcKSsnsP1EUYRkIrVjQgPjfvX2dFR4pl4c+GypsvMUaOE+4Ip5a6bb4JNKkKIaiE9MIin989MR9xfvI6Noce+1QrPYJJrmhLw9x6VnwrP23vFvUyo29yli6T805pCL3JF6gtdBdmISbb1dRNB3DWFBqGc9zJeL2hVq+GYXYSKOXNgRV1b4uQzmAWTMPuCrONNDGlax7bijgVJuE9ksac5bHcOYBPGa00q/mw7mfERRjwv6c+4ilZNXNUmaMMctmLFwD3Mu1XOJNg9jakjUrswWUQTETcK8Q1RBbBQv6yIChbOvWYN48/v2P1MYs8y5SEYoVKbnA6XkGyQce6wCq/e2KogoxnQ12xY+UcvYpE8CAzLVWzH//4EcA6QhoQBlo0tF7QIEU+uHJ17J2B5vlTX6vw+JkB5M16+ipKx1Dgvwr/TP29uk3ZchV2yl+UwwvGA5x1UXvz/HAvIwqIKGX6THjVMYcG8ewNIH7CpYXlmHaH+3U/euRXvyFlAfWc/eM6b6c06d/FHsCOK4YkzIejJ7fBo0eaIqLaf1OmcVbzmKuhy1j4QI9ziVuj82DOqYqzHLhbdGUNEzSEb2s7cIuNYq1NOGlyD7hHB6xKbqOxSfBcnO9ZGxrjjNERXITWnSCptl0As9cxMVe2G63CX+l3K8ey7wNyqKG935b2AHydbcrfy4ubT20cNKNXHKa7JhOJPvIa13gio+9Deu53I8S5ZY71g5JDh8PnNHwVywlTkZA44CZaun1QQQImv1WJWr5+DmHiiVqy5AhES/l+cfPxs6KGF7GHs86Wzh1phEQrFm/UoktFnOZWWpdEu1k98XIvrT/DfZiK07/BHiRxmBnMb+Yt2fFxEvFjbIclhg095HGf3YpGt+9JQETUgEN4EzutyAMaqqfx7JudmbLXFEwJ9Gu8ajHeCsZv9Kiga6OdWULvxgJpwKtem7aXvpzWSCkvleM1ApQmx9GQpbUoKjFeePclUvUUBjp+Hx2f/IL75SaDdI3i8BGx5Kkb2NjmYf/oexoQ1xms+/klY7U2G1X6ofwusXxzR1KMMcPbvvLIgLmBpaeMWwxPDCca4mMehMWzj+1mTGKjSBOudTsXoQtvbZHVLMDVkfWDNp5zBabGjnO0hVjMMJxkPeVvwBVNuLG5bTe8CBhfmN9CnD5uY5DMCZIA/4nj5WjhqxLKInelT4bylkJKnuCyByGJYjsAYY5LhY2bc/CJIEBdWiP7+/11aq1omE02uhLz0YrToTu6ab6qt2SsiX/MG82EjNyklZ+AydaihhTrnWZaBRipBg3p6pUjy4FC5tD6trpklDcvAWdDkIrPjxedz8UXwuBCXqWuilVR3pc30Xblx8kuRC+48/pA0JlDcuGSv7jAezJskirrjdMRtfpkg5OtXGU2S3kDBKLRjrjMbLlOMuWPhOqkQNekrWIcu2tWalR23DGtMRQqbvM+7O1d+i09C4crD+jzTGwXoPdFq3bFyAAXXOeipCynhLI0/XWI+ZWJSY0X3TIt29+PAJ/1EY8Cn3XE7hGRZkr8FY+oa6U63xIzfbiUC7Iy46KMKEY1K0hf3R9afn2XQIJZGLRp3hV7j0j/z7DRxCQ4YTe9TMQxzRZJ3D5RE10h1Ptm2bKhvDATmB5oWY0XBjBFi2UdYNu+dHqKk9U3Mrb4i4uBYD8SC/EBoBiZVtDQR/okdbWcu8OU5XUeYncqsD+9KWmhbvpET3bmqNBXZx83fF5jdzGujZ3Gr5IpiS1dv2r0cNvjs2LN7oPO4tztzN7MdlI18WtZBBT/iRZRgxkPKk4wLbHXKmGrcs/2z0f2E+5xfgAxb1G0fR5mZJ13kzg0OnX/BADxgpYD/W56kHv+YM8edQpfenDKyy4Ts9bqsYAYgDE1V+WIWcoq4qbdFvJCewDYnHLZCdc6I4d7VvqfARZP6Y9bEQDwkLPIBI7KKNsPo3Go9VA1cL9mJkH+aYdXsKVE2xZ5YXtQjtIMW0cSxpouGSmoFbyt2FZ+Lc2nakEJTIruCRJgc5MByYyM4FUbrFaOpDBSTyBzNxxkeMcZ3AcQuRFFeECOmKZql8lUjaRm0isBxVMTk4vc/PyT2XB23Ku4zdDR4EmcmocIdHLIfa2NE6HtR4Zq+gHf1g3Ny8YeNsgj5aE2WYi9vBU2d4INtZbaf9Bzq401tqAzmM1Oh/rliPasUphiZ+UTCR6/E8XM4XZsGh7lCV0nYei/OG//9TgBBU9YNGxSZdNiz4enQomWSJ2vvneV7VRvA==',#第一页
'B80wZitV9IPUrS7ZD0f87PXHtL36SKVVq6VIOaDnQgI5IpDrvjz76yJKzOiP7kZkXM9szd/43cnqW+PRJTyWQQL5gsvDVlwCYFdoN48gpjxTJ7OvzjI0AKLpt/BVQbWXjCf9dUwyWloEF0tT66Q0z34PMhigeL7VZ8MqAGKG5iZb006Et14tpme+ZQNxmvkQRtWsD3VHFbvSzi44nEqpx3doItGJ/yaLkoR2qWDPVkCRxB8Ttsd4OzZ1Jm2H2uXvlePgE6QYWihn8rft7hSCo+cNb2QYd1zceTwmJDaswN70CCEO1CxFGf7LRWyFfXRsPru+vwzLVbU8pu59sH5zEh0/+iUJOxFB/sI7XRXfqziswKIeHOCMUQ8PKI+gwa9yEAqqFC18y45Idmck8f4JsBQP+MFJSiZn8GeScjbmdMbKNNH5TcxR1It3YuNjRXA90Ro632n3TXGH7iCozKr3Zj+8StVMIR2bOR5/6ElYA3QZBAKLdjwEtdyQU7wgU/uTkI/tetd+9rRYRVpsV/PIiyhkpOGKWbudOb9N4BiMEkwdPEbL+Wn9Ycgm71AN12w8ZJnaZpY01TvfUtybm7wvU8vlOfISCfcd17Cey/WhSDElvlJ3xDkEanY4+AGEC1LJ+647HXr03LXOINvqzD8muoQT0EOZvqweNueh+k14Ayfph92NxqkWORkbhI3vRETKLlRjezNBu2zy3I8fAE/OOzCmf5xA8GkkEE9ChPY2UUpPN97RisGxz3KpOnK5s8thKF/C5NYPo/vm+1PRctWdVBp9sreEJA5qhLUhm7WZW/PVQ7hH0cNbQ6HfZzTLN5l7F7KepE+AgG9Sh19oE2P7xS8nNnbL7TCUCkxzTx5W5tybpKGsVgzsnLgPrQ2Y07N2vKLjjdZKjspT4+uk7gS4bHwmTwz6Lvk24hmZGZ6KJ99co23clRfhH2Y40shjc3SGWnuZ2fNKi6uzpLDS5qHjfdWtPvXNgbjq30tKELG76DnYWtO/i5om8fzhLRZ/LEKHw/t5HDOvIz8mnleH5bfYMdbtrITxuhFSHcDDOHwWUPbGXCyMWhzbwiWlc3oyUx5+Glh92zuGXYbVg2hLzE5KsZ42oVapi14NnNhH74QvUpGfN7qFzi8b15jdPWnB8ko/CCBKG3KDxQ9bpWLqscZJhTgIOEDw9LeEKwQnm+rIcc+BEu2ZlJmHGbnHFhZlP1s1R/Lzk97OLWt9EugZ5HTaVQ+PoiMes/CW+W3GAsa9A1jNFoGSDi9c6X/RRgHEE++h1T4O8e2dcDtDy0qqjueJ0w1+0AogpZr56H29zYM0ooV75sXmw88iuuA9hp+R2ZdgsIj9Pd2iAjmsCcsKyRnIB0TFK4EDsFO7B1Net/fiQ7ws3hbF+kM+KFinYwg1qbODLSuG5bS+raYjYzyuoH3sc19FZiTDHwiYfbe++tN22S/S0XbimyZzhhbFoIC4OrlliYTC2kwLtAkSeSCOpf3yalMQtU4EXE3pPst+oYE2kVCcpVY1V6+bngET0Li9sf6aSIuwFIB5QvdJlPaN/8EEjWl3j0XH6bOgUYrNOMJ1Ksd5z2f5zK659Zs7VCKardFmYHRciengYqicr9PPGKOhA6oNpaJy24x3Gca6R17KmzesPy+4bA22ib4yAbLJPJMvPUGOCXsf0VNhUqoXo5IlCXy07hRxQ+RZDoIhBWIGIPcicL1Fs9YNBIGmDGuu1XQxDSsvqdhJ+EXKUO5jrRIJm0jflq4yaN5TcgUgpsecUrMXvjvNywY/EuK3eETDRhXfzuWjb77741ocG9T2HYqk+Xo9rAtHWDJJqJR5Nh9gW2LgJA74E/C5qIZzThLgVYx4mcSXJ8FmOwJzAI8tZu1j9puDJN331a3rN3w2MU0Of/cj6AHVKtJnHmuX2QqO5FUE88X65Ju9czLVgNHAoZMJI54uJV3BSeoeOB4FNIGwvlZCFEYd45ZVvLPFIp/JEivDwauuRhvNqhPGOyHdX3FfWd6T2JTIcrXskSdTSRpa2oBImWqaX7Qmr6QEpr93ay764/wrZyWtVwLLic2hyEe3zh2HZ0+1DXBaCNkq/VcZoBWtWHU3dLKo88xt8YiArRrWpLDX4PI9BvKn0/1jh5Cm+ab1m9hz6wieRxK5O3T7wCF2ujBwsw/6OvLnOpNaRRyLyFCe7+AxV2MibEd4AR5pjo28F7ExIG3nLm0R/HyUWsqghxVmxUtiwzMJCkjS/xjm5rVe4vnqHzSwEoa9bfqYTSCUzQ72EiK5XdDzw82pz/FphBsiG4HYozg/4AN+KprgR5PMWSQ3ze9/8HwIADkHIBqveoOgfvlEqBGWVpqN/naVRCOmFzSQPPnH6GKxDqwM1IicbimzOjKPW4Ent4EFaFFqYPSsumB0D1kNwMvfPCRmk+eagWI6omSGdAfDGeRl2GLm0zKLCfqT6tWifXulvW0cS/Zdj+0LmzbRq5JIGG6HKGY/6Jzy+4SL1rb30YGe+7UBGZhP8dz09nqIP1//1omouLKR1L0YEZE0onM2k0xVV2+bWfOFtljrTqkXKtGEyzpzCR7iF7aXpepUzH+D4XK/ejIeFGHfGwJLfBPULEyh1f8HR4kSb/1XgFORBOwN25gcXhhyvhEcj4TtMqZTfdrbB4hsoYJm1mekNNI0pWmDBgngZvresxzfNQ34WnIO', #第三页
'BF+fNowy8Dw8t7tB0/1vSlSCYcJyTbxeeJ3m/FGuW+LGJkiPasopAPlWqQqWv5gq2OIKaO49Cn7sc5UyyRmaB2bdUZp8WDFTdrO8nef17XxXprS6/lysR25bP7v7Pwchel+uXL2/akk5IkU88FGWA3leSxzFhCMlwOEiudifjyOqmUDSni2ozUwo2AM/VxUmak4wfDjsPVDM+PsYrhJoQVyCoiHDGxDU56wkXFesI+fqBZ8dX7TSqU/dOv+Z1X65pARgfONXvc8vZbSDtQP114b/VEyx669Pb9p0sEngOd7QZEz4sS50/nJh0wwNCstMviIxBFTTJZAJ1XN6JR+aJP/raBVYWCeJR33rL834VhRCM5zJ1ssYlO1S+2BXcEV6rYWnKEjzfDDmWxk1RLryvaZdzN+OPmC7z7+FmmA0f6zKIFDkY8fuYMjPLaOjf5PcMv4w8t8PWZYAfR6ICabu41uB0Q1XOaPVqKFSYcmTYIxN5rx7bgEyVr+IaOdvT7dsW72sMAZuNlONrDZaoC0U2EbMuPAeWIYfwKOePKi2rXbpuYAo9ShaqCaJy9HOnCitjLoHuxwkimO7R6VuVwoZ0E88nxDR3NJ0yUGqIBzjU9JxeMOdGZ+5u3LtiIQxF+h6sOmKGGDPzE6KAQ0dZzkzFWsaxw9+bB70VAdyRBFfq9L6dFZIvnSbSzr2jlC1EEC2wbRw5OMPR/LBismjJsuPDfX9o65DAqWLgXzdEv8DyMiao7IEru2JIo6UjsSXtQVk6fmZviIZbRogq8MILD9xmGNhh6xhU1uBpODav2LWNU+W/2s3tFNerDhBaWNdlNfKvUbcf0FJtbQJJfVSS4wEhFrGU58FYEzuUyiqduG7qrU3dKFgCTaLhIRaT2i9YczQGh+YSqP9iWlVLKqj3MSLO9OcE0wHy7rtW4wrDeV25ery2LoQYjiQQ3eR2Gvy0hNtUeKFGXLdb4qzrrZMMkwYimnCF5XgeNgsu41/ilzt/jE+AJNxeTS2AnP9toyqH47CxcYw3y5v0JoYP4mxlnZaaVdEsn3rTp6z1mHN99ohc9VzsK1+OIeqmWNb2TrvsHzpFWqi+r+EpEDVdEfTr7exRPOzFAJryTQp8yYKJayPqtpW8Gtqqx1hre1kE6uwioBL0ybGneNvNjQoF21rTc7kIRllMx1HZGNb5kFbxHc/DbLNx/xYd0IYEOOUV5AFJAtr77s2XOfwYxyuh1OhXJrQUVzrwmw3jl6eeFOSLczQ+1ulWvYEW4lA9DwDfOw7S8J0TAtmw6MHusHipolKkErv2KlRbs6oCSloeicu9iOH/61OMEB9JMfWstTM8lfbuhf/0Cb2hRzCo+4urr7C3U33Cp4j4OSRA3c/8J9YGBS6IJetg9en2EQyNZ5BSNU+tvRCtnsCGKbz41CYIj8I6QFYH1XMw8G2VCk9jYrmBO0dlRNbJshBrGcfxGWXT+2AgRGT4VstxAApHXv/rH1KfE0KzkxY6mXyFPrpcO1LfG9UpJFxCW3OjbsbEuTdd8V7IutBKOx7ou5YlRnfWhsY6UESiok/pEUCkhY1JAIXbFI150pRS+r1wvq4AVzbSkT6M6kHYWVlPf63JWpsRE9iHHNXDO0s6euCjJNSqUkcnr15AO7kBmVeD5HObhfKj1GDZHMdXNGC74E9NDpAtN9lZ2fxIT53o7glrBy7B/wQtVyif0nRUoMCe8OKxYW2XrYXOG56Xc1Kzu4FZnryQwnebAgZIDfpqHbbNMXHozKGe48Qd360AdiW2PWRRuDQ1nE0AnKyjO3oAdstpQMrd61fnrCnHPBCsrONBnTRMALInmnM29VRe5yHkJW0x6MxtCOhuz/ZAFvsDyZwH5ClD3r1fB+s4YR1PSZN0gCHHWCcWLLmZQ4+J4pbANJ/nCiV8aEeiUkElTlMWXleNwHa6z0xmr0UOTr024Gjrz0a38qHAzBsvvwfK7y3M6aPn+J7n/IFjJhImENTTOGTrLJgX0jX8uSF9IJyvOwITLBEm4aRuowaXjIZypSNXnacstdz+dnmE1gay453TRqphyW52qftEJs/M4qVmFFD5CiTFgnVjjUFsWoL+Eydl5C+XrK4kGqILisorW1th6CAbtm6ticNk/rxzUqCYODIRYwubB4e+qnXU4oAcA11rVVDFK/So57QYiNaICwDgF/zKGAktTMB72kSFaLHX3xYFysGHTZ61c5ewzq1GQYrEeSH+n1e7hYCuhsfq6aHHZJ3xUKuXdoo6/OSbdV9XZCxJo9UEJ25STF/r0TzKK5YFOfPfmIxgEUArp7f9dOhmrXMRajEOMidZ3OOBYON6oor1HPCCMnwjSaPU3UUP+0RrJ6gHu5/UgrNB2VN07TV7Vc4hnloRrl0hodCo8rguiN3aM6p21xjHxwp1vNBM0PnjdBWb+lRymUamyYEYr58eBi9XPEv1hE/e5xaOxmiDUypE2vCzQpoF9e6itoIhTIPLr6u6ep68GJ6XuxzyKdBfu92ZJ1B3kyJGk1iO88zN06X3gfvQruBlRbqe+82eFZamlLy74A99IAtuwunris3VQb0aCG/LblfaakoS/w7wKc92D3pO4XlVtOsyEUSZAZZ1yrX2y2k1D5Z882uuVCNx9cLzEpRxVQsH1Gvk8FepDgL0+UI7q6ZcbwncWubztI95SERNnBvwiLRu0vJ', #第二页
'PjvvX2rARc18fkcmW7/6uVUGhFc/yfAF/NGF3MXmc9vrCoP34/HoYCbDSMvAJ+zZ0YpZEaXC7XGgrUFWWY/0DodM6oLKmlTeAVfG49BrINpyu+wj74v58nx2xta0A7lRPBmALJ8YglIOcWwWuinhhzJrJDX2PhR7aE8SvLJZgAGsGU57psjAFQtf4KGqpr6nYDLp0ioIsrEthgrhoZh0Ylt8RuLHmhP1IoI9gjYVWdAj5p8PXhVDZVskPpT74lypMEXhsDSAKBMIz2EG2o74TwIXypSjX7lIYA6JTw5DZ5oyXJQYnDr20Zp8ehQZ96aGQcSn0vpYtKvvbVPs0xIcgzex2kOVYef9w3+Y8Osn5UFp8mKQHwItoZDeN6iCf3C9J+U3+CCvL379KrTNezKQMfKWmDstnH7Cc41OsTI5DGr+flUGrYq7p239b7VMUhGIKkoz1s+gdORZJLKuiSxBYFMZ5steI2aYESmXDI3LkY8LfWR0wEBtAfOeNccYFtaPeS1bX67erkTAjhyy1IUDsFhTi2SPjSrZzTpR+DmKuEDU9zXDV5DHf11kO1MBhjs0xwh8oGxxkpPAHO/cZGQqpUHqoq5G029SGx9vuNZPsy8V8oQ0K33XLSYQ9RWzlodWxSRGPwezsB0clZZGBobyeOXTImQ+C2BbPaNGW2cMA6NEHlf9g5m1dXILAjRRBsdFHwKeTdKb9456sUICBZJYPQmOC2W+IP1nS0dimBZsdy3pgoVB3+oqmfCrmfo98z+d4ZDDzjijHszT7jD0a9ZRb8U4Pn6n4pZpU0JtH+5+0pluzzvqVG8/ZnvaSjyJrF8ete63686ZSetuORc7F+Hx/eXtg0Pq3AvZERhwAsNjw5YQ2H/5H8PF3Vk+/TrR0QE2gV/Y4rz6PbP3qcwQ3pcDk3H+ERBFhghzwV3uu7uqhLb+/Q/AA5Nocz3m/B6njfCt+V/33r9w9f2UT4LtdIPOA5jTlgOEUltPxDKH8BD9Otm5kquErz+vOsJzYQyTMpmty2U/Y5iYOQ0w4ZN3fzrbtr2qwDSEnn6JBQwz6w4Z4eStt1c9Khr9hfX4c79MyTRVXReMNVuyE1EfDwE6xWWIh+0kI/lbsU26XkYzHlNR8ArfbrxSe4tB/IQOtoICr1Acfa94mLI6XAYz4Wzi25yRrZBOZx3JbhoGsrQxicSlxzG0YalW/NHdcL8/8BQ5eA6LOLhQJFZw/jCzTTa5Pl+PG0lf9NZet4Hi8KGfzDTecwnB97F+VaAgnuYJIwKGBFMBiv22OyW46EkrTpzIVLAH2Os15ocK7dAdw4aycyy6bZQGjwSlsPpNhhGYEGdrXlAKr7AndTatoC+GIfPy+XPaRbP+RLoP53ZW22e2a5PH5LjyjNegWYmUySC5fg13yjvmYAjieUWn6v2xvDL8zWbqpeH3J7xV4ND3klX5qVN4GCt8gHJacARWYlPm4ODZA/6V9IkIFbcToIwUQPQMTzrhIyKKE3hD5Yyv/ZHnlAIjQuXbo2YJh4b3xYczOACRS2WV9h+5JhBzehI86gLok2Fi4jAI8ZXVNjIreZQqkCuIjtk6j7eWinKqsam/L14UO1TRtGNbJnEkcls7YOIUleGNN+CCFEbZVK71AWsfQvzMq4tJ9lGvtDqDBC9ALqcgouzvk8x6MnMtOJRyFj3bhV9vkxRMVB1h0F9iSN7CKQXBvtsF9TSjN5chY7/USxUQhrlCNUBaRSB0hsrY54M5k9TjnIt+Wy7oqPo4dRbD1bJigyjehM4iwrPuabAs+5V3+R7dB3EvSJcihr/o7a5WipcHvzozFdIaYsiQCQqHHRps2078G2ZPejK9BGtqY+T2BuKZCc4DUywY6S6NQBFDiRwKWkAcWoGvndFWFEPQBCPQTOJfZVdGlpKeCXS8bFkosh2sX9uyFNJO0/jcSZyFuVmHqbHIF0e89NTMpEcklLxjzMQb3sHnkc4l2KKbBq8VAn7Xb9mpfrWJmAyvVSr5d7k0Dzr66mCGYbjQdEqOSAuJVnJ8AK2k91BxlcSaWCimGCO46m3Gby1XrPzGzqEwg5BE0K79SHOq1SZXRSMlgMGfFehpRMZ2PvJ4DMXnVe3bf72uhv83McfQ7r6jcOa2/n+kxqLmquY7WzcJmaXDdmYsIcQtMnYxw0ttNACocIxNQ886SGUV+eBVgk0tpI0his78x0HvuVKjtdNw+G98M9sitAG6gOsfp8qRK/sk32qTnVvlgI1pj0YlbY5DPEr6W1+3AjDrj2t5UGbc6yv0qbAVW3Ge6U+NkqGNSaJjC3CXSP5rznu6XMemmC9mApZvBHO7SPlDccS/52GS3TQ/cyTV1kE+IOH1uMKnLq6cTaOcv++e65rrypoz4NH3ePoJETc1+AB8yr/FSq5+IzGDHhgkTZzdoC5Pe2H/GYR406He9cUpPF9CncaThvMiyUr/j7lUwzWK9LmF6Xa+lYK0DcqOJixxWjUWXovuUppiFzKCXJX6C7CPAXqZLA6aQdLMrlyPlBRkPj11oz63FC+3bYviwKYUcJgIowYj3tc9ywoldGaUkgNVLpz+wEb1S9PP5Ka59Olihv+vONq0mvdN/o2CLADU3aqV8BLUIQpa8owQgVQ6y+60m8fiuQuHthpbGB8zU5HWSJ9/svVO78xM7/FkieXSRN0KRWtSVvIXqc+skiG+',
'',
]
data={
'__VIEWSTATE': viestate[3] #根据__VIEWSTATE实现翻页,加密算法未知
}
url="http://data.gdcic.net/Dop/Open/PersonList.aspx"
strhtml = requests.post(url,headers=headers,data=data,verify=False)
return strhtml.text
def bs_parse_html(html):
'''解析页面'''
soup =BeautifulSoup(html,'lxml')
table = soup.select('.data-list td')
info_list=[]
n=1
for tr in table:
if tr.a != None:
name=tr.a.string
info_list.append(name)
else:
infos=tr.string.strip()
info_list.append(infos)
info_lists=clip_list(a=info_list,c=4)
df=pd.DataFrame(info_lists,columns=['姓名','身份证号码','性别','学历'])
print(df)
def clip_list(a,c): #a为原列表,c为等分长度
clip_back=[]
if len(a)>c:
for i in range(int(len(a) / c)):
# print(i)
clip_a = a[c * i:c * (i + 1)]
clip_back.append(clip_a)
# print(clip_a)
# last 剩下的单独为一组
last = a[int(len(a) / c) * c:]
if last:
clip_back.append(last)
else: #如果切分长度不小于原列表长度,那么直接返回原列表
clip_back = a
return clip_back
def main():
html=get_html()
bs_parse_html(html)
if __name__ == '__main__':
main() | [
"xiaozhangzxz@163.com"
] | xiaozhangzxz@163.com |
f496a31b97699a52d2a516cdc3c0651dbc0f2db5 | ce68b4e79620f8209b22bb72f8896c9a7cc01698 | /dis_train_week.py | 3f6b93b0465af25304fb167aef7fb7d8d25cdf62 | [] | no_license | ajithpad/Indian_railways | 2eb82f093361ce891026e6d88a7adbcff75f76bc | f6d9d2908127e3f945180c95b47191d74949add9 | refs/heads/master | 2021-01-20T17:07:12.057935 | 2016-11-11T07:57:01 | 2016-11-11T07:57:01 | 62,805,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | #Code to use bokeh to plot the distribution of trains for a given station across the week
import pickle
import numpy as np
from bokeh.charts import Bar, output_file, show
import pylab as pl
import seaborn as sbn
from bokeh import mpl as mpl
from bokeh.io import output_notebook, show
dis_trains = pickle.load(open("data/dep_times_lat_lon.p","rb"))
gradient = []
for ii in range(7):
gradient += range(25)
gradient = np.array(gradient)
gradient = gradient/24.
gradient = gradient.reshape(1,-1)
def get_trains_week(stat_code):
sbn.set_style("white")
stat_vals = dis_trains[dis_trains.code == stat_code]
all_trains = stat_vals.times.values
xx = all_trains
days = np.array(xx[0])/1440
tot_mins = np.array(xx[0])%1440
hour = tot_mins/60
mins = tot_mins % 60
train_time = zip(days,hour,mins)
fig = pl.figure(2)
ax1 = fig.add_subplot(111)
aa,bb = np.histogram(xx[0], bins = range(0,10081,120))
ax1.hist(xx[0], bins = range(0,10081,120), alpha = 0.5)
ax1.imshow(np.sin(3*gradient), extent=[0, 10080, 0, max(aa)+1], aspect='auto', cmap='gray')
ax1.set_xlim(0,24*60*7)
ax1.set_xticks(range(0,10081,1440))
ax1.set_xticklabels(['Sun','Mon', 'Tue','Wed','Thu','Fri','Sat'],size = 16, rotation = 90)
ax1.set_ylabel("Number of trains departing", size = 16)
ax1.set_yticks(range(0,max(aa)+1,2))
ax1.set_yticklabels(range(0,max(aa)+1,2), size = 16)
pl.show()
| [
"ajith.physics@gmail.com"
] | ajith.physics@gmail.com |
5264b881370b18425a4a90f409799984552d35c9 | 21cb4f3f52168e31484c6389a9a08772b78a992d | /basic/list1.py | 7e6bb873cf62559d476044ed498561cfae6d2385 | [
"MIT"
] | permissive | Riverfount/estudos | 806f4592bf0f00fdc62c30ed04a7f61720d943fe | 03e5339de23b9fc49a2e26174d0241e923adb580 | refs/heads/master | 2022-12-11T08:13:19.994566 | 2019-11-02T13:26:19 | 2019-11-02T13:26:19 | 87,834,875 | 0 | 0 | MIT | 2021-02-26T02:35:23 | 2017-04-10T16:46:49 | HTML | UTF-8 | Python | false | false | 3,304 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
couter = 0
for word in words:
if len(word) < 2:
continue
elif word[0] == word[-1]:
couter += 1
return couter
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
x_list, another_list = [], []
for word in words:
x_list.append(word) if word[0] == 'x' else another_list.append(word)
x_list.sort()
another_list.sort()
return x_list + another_list
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
tuples.sort(key = lambda x: x[-1])
return tuples
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print('match_ends')
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print()
print('front_x')
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print()
print('sort_last')
test(sort_last([(1, 3, 4), (3, 2, 8), (2, 1, 10)]),
[(1, 3, 4), (3, 2, 8), (2, 1, 10)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| [
"vicente.marcal@gmail.com"
] | vicente.marcal@gmail.com |
dd8c8a83f383e751d627d7bf77583460548b6891 | edc73196fd98b01c380ae83696169d427778fe43 | /ex15.py | ef807db295410f013fdebd23c5987af002716b0b | [] | no_license | floatingman/pythonthehardway | 145773c130350cb26a88be8baca87489ad580047 | ab89b119b186e06b090db1a138b858287b8ca3df | refs/heads/master | 2020-03-27T16:53:17.359580 | 2018-09-18T21:55:50 | 2018-09-18T21:55:50 | 146,812,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from sys import argv
script, filename = argv
txt = open(filename)
print("Here's your file %r:" % filename)
print(txt.read())
print("Type the filename again:")
file_again = input(">")
txt_again = open(file_again)
print(txt_again.read())
| [
"dwnewman78@gmail.com"
] | dwnewman78@gmail.com |
1f5a4de9ed4c15d42484b3ef33e2021054fbd9b5 | 200896b4c28291d2019d14e1fa7bbdd68abdf47f | /modelv3.py | 25c0ad0ed6bdb1832383cf5d355db28e119699cc | [] | no_license | danielhoadley/caselaw-classifier | 0d382e7d8d65e00a9bce1428b983075aaf7bd997 | f5714e5745a9c20e4a70dc6c7a6dc9af6dc6cc00 | refs/heads/master | 2021-05-04T14:38:30.397200 | 2018-02-04T17:24:37 | 2018-02-04T17:24:37 | 120,206,129 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | import sklearn
#from pprint import pprint
import numpy as np
from glob import glob
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
#from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.pipeline import Pipeline
# Get paths to labelled data
rawFolderPaths = glob("/Users/danielhoadley/PycharmProjects/trainer/!labelled_data_reportXML/*/")
print ('\nGathering labelled categories...\n')
# categories = ['crime', 'contract', 'negligence', 'judicial review', 'insolvency', 'defamation', 'employment', 'evidence', 'extradition', 'commonwealth']
categories = []
# Extract the folder paths, reduce down to the label and append to the categories list
for i in rawFolderPaths:
string1 = i.replace('/Users/danielhoadley/PycharmProjects/trainer/!labelled_data_reportXML/','')
category = string1.strip('/')
#print (category)
categories.append(category)
# Load the data
print ('\nLoading the dataset...\n')
docs_to_train = sklearn.datasets.load_files("/Users/danielhoadley/PycharmProjects/trainer/!labelled_data_reportXML",
description=None, categories=categories, load_content=True,
encoding='utf-8', shuffle=True, random_state=42)
# Split the dataset into training and testing sets
print ('\nBuilding out hold-out test sample...\n')
X_train, X_test, y_train, y_test = train_test_split(docs_to_train.data, docs_to_train.target, test_size=0.4)
# THE TRAINING DATA
# Transform the training data into tfidf vectors
print ('\nTransforming the training data...\n')
count_vect = CountVectorizer(stop_words='english')
X_train_counts = count_vect.fit_transform(raw_documents=X_train)
#tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
#X_train_tf = tf_transformer.transform(X_train_counts)
#print (X_train_tf.shape)
tfidf_transformer = TfidfTransformer(use_idf=True)
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
print (X_train_tfidf.shape)
# THE TRAINING DATA
# Transform the test data into tfidf vectors
print ('\nTransforming the test data...\n')
count_vect = CountVectorizer(stop_words='english')
X_test_counts = count_vect.fit_transform(raw_documents=X_test)
###tf_transformer = TfidfTransformer(use_idf=False).fit(X_test_counts)
##X_test_tf = tf_transformer.transform(X_test_counts)
#print (X_test_tf.shape)
tfidf_transformer = TfidfTransformer(use_idf=True)
X_test_tfidf = tfidf_transformer.fit_transform(X_test_counts)
print (X_test_tfidf.shape)
print (X_test_tfidf)
print (y_train.shape)
docs_test = X_test
# Construct the classifier pipeline using a SGDClassifier algorithm
print ('\nApplying the classifier...\n')
text_clf = Pipeline([('vect', CountVectorizer(stop_words='english')),
('tfidf', TfidfTransformer(use_idf=True)),
('clf', SGDClassifier(loss='hinge', penalty='l2',
alpha=1e-3, random_state=42, verbose=1)),
])
# Fit the model to the training data
text_clf.fit(X_train, y_train)
# Run the test data into the model
predicted = text_clf.predict(docs_test)
# Calculate mean accuracy of predictions
print (np.mean(predicted == y_test))
# Generate labelled performance metrics
print(metrics.classification_report(y_test, predicted,
target_names=docs_to_train.target_names))
| [
"noreply@github.com"
] | noreply@github.com |
c304cc7227d2255c4dde942808b00c572699cabc | bbc35e3c7e35b5219e06027c1798ac36775305ff | /LeeCode/topic575/python/topic575.py | 5dbedfc04f4cc9fa5df6110fa5aab30da4507267 | [] | no_license | zabcdefghijklmnopqrstuvwxy/leecode | 7f261dfde880f4755cf3b35b0ca08b36025cd5e9 | 35018a3e0506a77d425d0aff6a1c50bfa67afeaa | refs/heads/master | 2023-03-07T06:27:51.292041 | 2023-02-26T03:50:34 | 2023-02-26T03:50:34 | 211,665,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | class Solution:
def distributeCandies(self, candies: List[int]) -> int:
return len(set(candies)) if len(set(candies)) < len(candies)//2 else len(candies)//2 | [
"sai14790085178@163.com"
] | sai14790085178@163.com |
1aab06356a5b872431d143f6f90ed5c62541a83c | fad681997737905b8f5f7be126cc74302069f503 | /Sudoku/Solver.py | e666976f27ecb047863c371154c90555d271a4ca | [] | no_license | jerdytjandra/JerdyTjandra_ComputationalMathematics2018_FinalProject | f5766a6f988701c55d05a916b9972b6041a39dcd | b64eeb978d0f20120b526ee72323de9b865a48ee | refs/heads/master | 2020-03-19T04:11:39.190755 | 2018-06-05T11:50:02 | 2018-06-05T11:50:02 | 135,804,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | def find_empty_location(grid, current):
for row in range(9):
for col in range(9):
if(grid[row][col][2] == 0):
current[0] = row
current[1] = col
return True
return False
def used_in_row(grid, row , num):
for i in range(9):
if(grid[row][i][2] == num):
return True
return False
def used_in_col(grid , col, num):
for i in range(9):
if(grid[i][col][2] == num):
return True
return False
def used_in_box(grid, row, col, num):
for i in range(3):
for j in range(3):
if(grid[i+row][j+col][2] == num):
return True
return False
def check_location_is_safe(grid,row,col,num):
return not used_in_row(grid,row,num) and not used_in_col(grid,col,num) and not used_in_box(grid,row - row%3,col - col%3,num)
def solve_sudoku(grid):
# 'current' is a list variable that keeps the record of row and col in find_empty_location Function
current = [0,0]
# If there is no unassigned location, we are done
if(not find_empty_location(grid, current)):
return True
# Assigning list values to row and col that we got from the above Function
row = current[0]
col = current[1]
# consider digits 1 to 9
for num in range(1,10):
# if looks promising
if(check_location_is_safe(grid,row,col,num)):
# make tentative assignment
grid[row][col][2] = num
# return, if sucess, ya!
if(solve_sudoku(grid)):
return True
# failure, unmake & try again
grid[row][col][2] = 0
# this triggers backtracking
return False
| [
"noreply@github.com"
] | noreply@github.com |
9f944b69899522352c0060c3f4a975d4d14cda6d | cde99055d9b180e63e01e7230d142b0c83d74243 | /index_calc_0.3.py | db8815b4b88f02e707ad3ef23bdab56629654f85 | [] | no_license | ibobriakov/clickstream | bfbca5611fc9e5921fba6b765fbe01541ab262f4 | 7ac62c607dce1ff1cf8d5c05b68810f6bb0cb942 | refs/heads/master | 2021-01-25T12:14:18.330846 | 2014-06-25T06:52:20 | 2014-06-25T06:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,562 | py |
# coding: utf-8
# In[335]:
import pandas as pd
import fnmatch
import os
import numpy as np
# In[336]:
android = pd.DataFrame()
itunes = pd.DataFrame()
# In[337]:
for file in os.listdir('.\data'):
if fnmatch.fnmatch(file, '*android*.csv'):
fname = "data\\" + file
print "appending: " + fname
file_df = pd.read_csv(fname)#, index_col='id')
print file_df['Latino / Hispanic'].head(25)
android = pd.concat([android, file_df])
if fnmatch.fnmatch(file, '*itunes*.csv'):
fname = "data\\" + file
print "appending: " + fname
file_df = pd.read_csv(fname, index_col='id')
print file_df['Latino / Hispanic'].head(25)
itunes = pd.concat([itunes,file_df])
# In[338]:
def count_totals(df):
df['Ethnic-total'] = df['Caucasian (white)'] + df['Middle Eastern'] + df['Native American'] + df['East Indian'] + df['African descent (black)'] + df['Asian'] + df['Latino / Hispanic'] +df['Pacific Islander']
df['Related Apps: Ethnic-total'] = df['Related Apps: Caucasian (white)'] + df['Related Apps: Middle Eastern'] + df['Related Apps: Native American'] + df['Related Apps: East Indian'] + df['Related Apps: African descent (black)'] + df['Related Apps: Asian'] + df['Related Apps: Latino / Hispanic'] +df['Related Apps: Pacific Islander']
df['Gender-total'] = df['M'] + df['F']
df['Related Apps: Gender-total'] = df['Related Apps: F'] + df['Related Apps: M']
return df
itunes = count_totals(itunes)
android = count_totals(android)
# In[339]:
def share_count(row, one, two):
import math
#for i in two:
if math.isnan(row[two]) or row[two]==0:
#print "nan or zero:", row[one], row[two]
return 0
else:
#print "ok:", row[one], row[two]
return row[one] / row[two]
#df = df[df['F-share'].notnull()]
# In[340]:
def share_platform_gender(df):
df['F-share'] = df.apply(share_count, one = 'F', two = 'Gender-total', axis=1)
df['M-share'] = df.apply(share_count, one = 'M', two = 'Gender-total', axis=1)
#df['Related Apps: F-share'] = df['Related Apps: F'] / (df['Related Apps: F'] + df['Related Apps: M'])
df['Related Apps: F-share'] = df.apply(share_count, one = 'Related Apps: F', two = 'Related Apps: Gender-total', axis=1)
df['Related Apps: M-share'] = df.apply(share_count, one = 'Related Apps: M', two = 'Related Apps: Gender-total', axis=1)
return df
itunes = share_platform_gender(itunes)
android = share_platform_gender(android)
# In[341]:
def share_platform_age(df):
ethnic = [ 'Caucasian (white)', 'Middle Eastern', 'Native American', 'East Indian', 'African descent (black)', 'Asian', 'Pacific Islander', 'Latino / Hispanic' ]
for e in ethnic:
res = e + '-share'
df[res] = df.apply(share_count, one = e, two = 'Ethnic-total', axis=1)
for e in ethnic:
e = "Related Apps: " + e
res = e + '-share'
df[res] = df.apply(share_count, one = e, two = 'Related Apps: Ethnic-total', axis=1)
return df
itunes = share_platform_age(itunes)
android = share_platform_age(android)
# In[342]:
itunes.columns.values
# In[343]:
android.to_csv('tmp\\a-sample.csv')
itunes.to_csv('tmp\\i-sample.csv')
# In[344]:
def index_count(row, one, two):
#print row[one], two
return row[one] / two
#df = df[df['F-share'].notnull()]
def index_platform_gender(df):
df['F-index'] = df.apply(index_count, one = 'F-share', two = df[df['F-share']!=0]['F-share'].mean(), axis=1)
df['M-index'] = df.apply(index_count, one = 'M-share', two = df[df['M-share']!=0]['M-share'].mean(), axis=1)
#df['Related Apps: F-share'] = df['Related Apps: F'] / (df['Related Apps: F'] + df['Related Apps: M'])
df['Related Apps: F-index'] = df.apply(index_count, one = 'Related Apps: F-share', two = df[df['Related Apps: F-share']!=0]['Related Apps: F-share'].mean(), axis=1)
df['Related Apps: M-index'] = df.apply(index_count, one = 'Related Apps: M-share', two = df[df['Related Apps: M-share']!=0]['Related Apps: M-share'].mean(), axis=1)
return df
itunes = index_platform_gender(itunes)
android = index_platform_gender(android)
# In[345]:
itunes.columns.values
# In[346]:
def index_platform_age(df):
ethnic = [ 'Caucasian (white)', 'Middle Eastern', 'Native American', 'East Indian', 'African descent (black)', 'Asian', 'Pacific Islander', 'Latino / Hispanic' ]
for e in ethnic:
res = e + '-index'
o = e +'-share'
df[res] = df.apply(index_count, one = o, two = df[df[o]!=0][o].mean(), axis=1)
for e in ethnic:
o = "Related Apps: " + e + "-share"
res = "Related Apps: " + e + '-index'
df[res] = df.apply(index_count, one = e, two = df[df[o]!=0][o].mean(), axis=1)
return df
itunes = index_platform_age(itunes)
android = index_platform_age(android)
# In[347]:
android.to_csv('tmp\\a-sample.csv')
itunes.to_csv('tmp\\i-sample.csv')
# done index against platform
# In[348]:
itunes.columns.values
# In[349]:
# begin index against category
cats = itunes.groupby('category')
# In[350]:
def cat_count(row):
return row[row>0].mean()
# In[351]:
categories = pd.DataFrame(index=cats.groups.keys())
categories
# In[352]:
#m = cats['F-index'].apply(cat_count)
categories['F-share'] = cats['F-share'].apply(cat_count)
categories['M-share'] = cats['M-share'].apply(cat_count)
categories['Related Apps: F-share'] = cats['Related Apps: F-share'].apply(cat_count)
categories['Related Apps: M-share'] = cats['Related Apps: M-share'].apply(cat_count)
ethnic = [ 'Caucasian (white)', 'Middle Eastern', 'Native American', 'East Indian', 'African descent (black)', 'Asian', 'Pacific Islander', 'Latino / Hispanic' ]
for e in ethnic:
res = e + '-index'
o = e +'-share'
categories[o] = cats[o].apply(cat_count)
for e in ethnic:
e = "Related Apps: " + e + "-share"
res = e + '-index'
categories[e] = cats[e].apply(cat_count)
# In[353]:
categories.columns
#categories[categories.index=='Books']['F-share'].values[0]
# In[354]:
def cat_find(row, index):
#print "App:", row[index]
#print "Category: \n", categories[categories.index==row['category']][index]
result = row[index] / categories[categories.index==row['category']][index]
if result.values:
#print "result", result.values[0]
return result.values[0]
else:
#print "-"
return 0
def index_cat(df):
df['F-index-cat'] = df.apply(cat_find, index='F-share', axis=1)#[itunes['category'] == 'Books']
df['M-index-cat'] = df.apply(cat_find, index='M-share', axis=1)#[itunes['category'] == 'Books']
df['Related Apps: F-index-cat'] = df.apply(cat_find, index='Related Apps: F-share', axis=1)#[itunes['category'] == 'Books']
df['Related Apps: M-index-cat'] = df.apply(cat_find, index='Related Apps: M-share', axis=1)#[itunes['category'] == 'Books']
ethnic = [ 'Caucasian (white)', 'Middle Eastern', 'Native American', 'East Indian', 'African descent (black)', 'Asian', 'Pacific Islander', 'Latino / Hispanic' ]
for e in ethnic:
res = e + '-index-cat'
o = e +'-share'
df[res] = df.apply(cat_find, index = o, axis=1)
for e in ethnic:
o = "Related Apps: " + e + "-share"
res = "Related Apps: " + e + '-index-cat'
df[res] = df.apply(cat_find, index = o, axis=1)
return df
itunes = index_cat(itunes)
android = index_cat(android)
# In[355]:
itunes.to_csv('itunes_master.csv')
android.to_csv('android_master.csv')
# In[356]:
itunes.to_csv('cats.csv')
# In[357]:
#index against both platforms
rows = [ 'M','F','Caucasian (white)', 'Middle Eastern', 'Native American', 'East Indian', 'African descent (black)', 'Asian', 'Pacific Islander', 'Latino / Hispanic' ]
final_rows = []
for r in rows:
final_rows.append(r + '-share')
final_rows.append('Related Apps: ' + r + '-share')
both_platforms_shares = pd.concat([android[final_rows],itunes[final_rows]])
# In[358]:
both_platforms_shares.to_csv('tmp/both_platforms.csv')
# In[359]:
def index_both_platform(df, both_shares):
rows = [ 'M','F','Caucasian (white)', 'Middle Eastern', 'Native American', 'East Indian', 'African descent (black)', 'Asian', 'Pacific Islander', 'Latino / Hispanic' ]
for e in rows:
res = e + '-index-both'
o = e +'-share'
df[res] = df.apply(index_count, one = o, two = both_shares[both_shares[o]!=0][o].mean(), axis=1)
for e in rows:
o = "Related Apps: " + e + "-share"
res = "Related Apps: " + e + '-index-both'
df[res] = df.apply(index_count, one = o, two = both_shares[both_shares[o]!=0][o].mean(), axis=1)
return df
itunes = index_both_platform(itunes,both_platforms_shares)
android = index_both_platform(android,both_platforms_shares)
# In[360]:
print android.columns.values
# In[361]:
itunes.to_csv('itunes_master.csv')
android.to_csv('android_master.csv')
# In[362]:
cols = "Publisher Placement AID Publisher URL 1 day avails title language icon category developer price reviews count content rating brand score release date likely to be top_free_rank top_paid_rank top_grossing_rank top keywords title top keywords description top keywords description sip top keywords review top keywords review sip M F Caucasian (white) Middle Eastern Native American East Indian African descent (black) Asian Pacific Islander Latino / Hispanic Related Apps Related Apps Icons Related Apps Categories Related Apps SUB Categories Related Apps: M Related Apps: F Related Apps: Caucasian (white) Related Apps: Middle Eastern Related Apps: Native American Related Apps: East Indian Related Apps: African descent (black) Related Apps: Asian Related Apps: Pacific Islander Related Apps: Latino / Hispanic"
cols = cols.split('\t')
#cols = []
#those columns are in itunes, but not in android:
'''['installs',
'sub categories',
'Related Apps Name',
'review rating',
'id',
'featured_rank']'''
cols_additional = ['installs',
'sub categories',
'Related Apps Name',
'review rating',
'id',
'featured_rank']
rows = [ 'M','F','Caucasian (white)', 'Middle Eastern', 'Native American', 'East Indian', 'African descent (black)', 'Asian', 'Pacific Islander', 'Latino / Hispanic' ]
for e in rows:
ind = e + '-index'
cat = e + '-index-cat'
both = e + '-index-both'
inds = e +'-share'
cols.extend([inds,ind,cat,both])
#print inds,ind,cat,both
for e in rows:
e = "Related Apps: " + e
ind = e + '-index'
cat = e + '-index-cat'
both = e + '-index-both'
inds = e +'-share'
cols.extend([inds,ind,cat,both])
#print inds,ind,cat,both
print cols
# In[363]:
cols_final = list( set(cols) - set(itunes.columns.values) )
# In[364]:
cols_final
# In[365]:
cols_android = []
cols_android.extend(cols)
cols_android.extend(cols_additional)
if set(cols_additional) <= set(android.columns.values):
pass
else:
for column in cols_additional:
android[column] = np.nan
cols_itunes = []
cols_itunes.extend(cols)
cols_itunes.extend(cols_additional)
if set(cols_additional) <= set(itunes.columns.values):
pass
else:
for column in cols_additional:
itunes[column] = np.nan
# In[366]:
itunes[cols_additional].head()
# In[367]:
itunes.to_csv('itunes_master_cols.csv', cols = cols_android)
android.to_csv('android_master_cols.csv', cols = cols_itunes)
# In[368]:
pd.version.version
| [
"bobriakov.igor@gmail.com"
] | bobriakov.igor@gmail.com |
8d6daaa7554f3704a12d3df4f8a956e1c910f1cc | 027086eff4acaf865f0cf3786a2093f8f8088e46 | /OpenCollegePro/module/Cluster.py | 6a1b48260e9d250df6e117a7447bb8840767f33b | [] | no_license | pxz000git/OpenCollege | c44236b9987e762116f418b7fc209d2eb22d4b31 | da7b0fffa1662a1affad0b5fca4b02246969e969 | refs/heads/master | 2021-05-03T11:18:55.362285 | 2018-03-06T04:06:04 | 2018-03-06T04:06:04 | 120,548,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
# autho:pxz
from pyspark.sql import SparkSession
def spark_context():
spark = SparkSession \
.builder \
.appName("myApp") \
.config("spark.mongodb.input.uri", "mongodb://175.102.18.112:27018/OpenCollege_kw.art2vec_syn") \
.config("spark.mongodb.output.uri", "mongodb://175.102.18.112:27018/OpenCollege_kw.VECTORS_syn") \
.getOrCreate()
# people =spark.createDataFrame([("Bilbo Baggins", 50), ("Gandalf", 1000), ("Thorin", 195), ("Balin", 178), ("Kili", 77), ("Dwalin", 169), ("Oin", 167), ("Gloin", 158), ("Fili", 82), ("Bombur", None)], ["name", "age"])
# people.write.format("com.mongodb.spark.sql.DefaultSource").mode("append").save()
df = spark.read.format("com.mongodb.spark.sql.DefaultSource").load()
print(type(df.take(5).fit))
# print(df.take(5)[1])
print((df.take(5)[1])['words'])
# df.show()
if __name__ == '__main__':
spark_context()
| [
"pxz015@163.com"
] | pxz015@163.com |
609b88845909ba5d98028e29508d62209ad9e6bd | 33db7d03970bf12a2e15d256cc4b9b3f7773ca4d | /numsp.py | ffb1932214520ffcc82e6dc73882394f2c622108 | [] | no_license | preethika2308/code-kata | 7f57baeda030eebc4713519d9b94d0c8aafb91ec | 6b9ef096bfb4d46055575af201813ca928b00118 | refs/heads/master | 2020-06-03T03:01:38.693920 | 2019-07-06T02:22:37 | 2019-07-06T02:22:37 | 191,408,292 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | input_string = input()
coun = 0
for s in input_string:
if s.isdigit() == True:
coun = coun + 1
print(coun)
| [
"noreply@github.com"
] | noreply@github.com |
094154e1bdcc4fe286692401721c016cf778e4f5 | 2228333066709d365d4e59f7ea4e43fe0a2e8b37 | /loja/migrations/0014_pedidos_quantidades.py | 31ea459288fde4164c474e87e7ad365ec457e90a | [] | no_license | MathiasDias/paes_tania | 759f159eb3411d4ed204413b3cddd3d9f2416a9b | 13f24302689f8d9ede9e2675c52696f6db08a440 | refs/heads/master | 2023-01-24T05:00:01.889325 | 2020-12-09T16:14:16 | 2020-12-09T16:14:16 | 274,463,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 3.0.5 on 2020-07-02 17:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loja', '0013_userprofile'),
]
operations = [
migrations.AddField(
model_name='pedidos',
name='Quantidades',
field=models.CharField(max_length=512, null=True),
),
]
| [
"mathias.gadias@gmail.com"
] | mathias.gadias@gmail.com |
a4f8110f6f05523eaa4cf4d211ae35fa5e01ed6e | 5f61a5738f7c253c3696eb78cb8fe8f448824733 | /tests/unit/test_fixtures.py | b4da5a15c51ae33e2f68992a2b1f9f7864fca60a | [] | no_license | JuanBer/PyTestBook | 8986779471a3598a068466a4bad60c7343e57b9a | 6ef5843da58f3f7dd872527d8e5d23eb9a67cb23 | refs/heads/master | 2020-04-26T19:13:52.735648 | 2019-03-17T23:50:17 | 2019-03-17T23:50:17 | 173,767,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import pytest
@pytest.fixture()
def some_data():
"""Return answer to last question"""
return 42
@pytest.fixture()
def some_other_data():
"""Raise an exception from fixture."""
x = 43
assert x == 42
return x
@pytest.fixture()
def a_tuple():
"""Returning something more interesting"""
return 1, 'foo', None, {'bar': 23}
def test_some_data(some_data):
"""Use fixture return value in a test"""
assert some_data == 42
def test_a_tuple(a_tuple):
"""Demo the a_tuple fixture"""
assert a_tuple[3]['bar'] == 32
def test_other_data(some_other_data):
assert some_other_data == 42
| [
"jm_bertoni@yahoo.com.ar"
] | jm_bertoni@yahoo.com.ar |
6d9ae5d1e897d019452dba963fae79bc1ac70972 | 94d006caa0abbfd2b4602f8698580af38bf6534d | /todo/models.py | dfe434060f7acbcd4e271e2d15727697646ce6de | [] | no_license | anas4488/django_todolist_freecodecamp | fc0e3b6bbd5fd6286ed91b761e62f4771d67b23f | 441a0f28752bbee253fe5855c26a40abea955ee2 | refs/heads/main | 2023-04-01T11:48:57.624790 | 2021-04-12T06:14:31 | 2021-04-12T06:14:31 | 356,867,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from django.db import models
# Create your models here
class Todo(models.Model):
title = models.CharField(max_length=1000)
def __str__(self):
return self.title | [
"anassayed63@gmail.com"
] | anassayed63@gmail.com |
9273c39ec8598daef4fda559c0c6b1c84d56a0bf | 29bb6f76bff9a9c40d5ca8176d94a5f57e331f0b | /backend/translations/migrations/0015_auto_20210616_1104.py | 0b5169906dc44bf6ea37656dac0e79370fd2dca3 | [] | no_license | zetkin/translators-interface | 697f320ed3e7bf737ebbe2a7ef58ffdba6c245b1 | efa525e76f36d05d7ee2ec10d17b679c97355ada | refs/heads/master | 2023-06-05T09:27:40.923506 | 2021-06-28T11:31:47 | 2021-06-28T11:31:47 | 363,886,140 | 1 | 0 | null | 2021-06-28T11:31:48 | 2021-05-03T09:58:34 | Python | UTF-8 | Python | false | false | 691 | py | # Generated by Django 3.2 on 2021-06-16 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translations', '0014_auto_20210611_1148'),
]
operations = [
migrations.AddField(
model_name='translation',
name='deleted_at',
field=models.DateTimeField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='project',
name='languages',
field=models.ManyToManyField(help_text='Make sure to always select English, and any other translations in this project.', to='translations.Language'),
),
]
| [
"eribloodlust@grrlz.net"
] | eribloodlust@grrlz.net |
e05a1a54e6e39d37cc0f1f6579fe22cb3f50df22 | d0ce596ce34a3ec8b3c20e6594dd4ad6ad321d53 | /followunfollow.py | a5d51f00adb368a4421a5c942eafb5e6dc956db7 | [] | no_license | scottyallen/instafan | 6174077f2ddcfc6634d257da789c2e5669e7f1ec | a921c8a0cc7c8a129c2cd44ba2874d4251ea88e4 | refs/heads/master | 2021-01-12T08:02:23.888803 | 2016-12-22T08:31:53 | 2016-12-22T08:31:53 | 77,108,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | import datetime
import json
import random
import sys
import time
from selenium import webdriver
import instagram
import utils
import gflags
gflags.DEFINE_string('credentials', None,
'Login credentials. Filename for file with USERNAME= and PASSWORD= on separate lines')
FLAGS = gflags.FLAGS
def main(argv):
USERNAME, PASSWORD = utils.load_credentials(FLAGS.credentials)
try:
b = webdriver.PhantomJS(service_args=['--ignore-ssl-errors=true', '--ssl-protocol=TLSv1'])
b.set_window_size(1120, 550)
b.implicitly_wait(10)
print "Created webdriver"
utils.get(b, 'https://www.instagram.com/')
print "Loaded homepage"
utils.load_cookies(b, '%s_cookies.json' % USERNAME, 'instagram.com')
print "Loaded cookies"
user = instagram.User(USERNAME, b)
user.maybe_login(PASSWORD)
utils.save_cookies(b, '%s_cookies.json' % USERNAME)
profile = instagram.Profile(USERNAME, b)
log = open('follower_log.json', 'a')
following_usernames = set(profile.following())
follow_set = set([x.strip() for x in open(argv[1]).readlines()])
def following():
return list(follow_set.intersection(following_usernames))
def not_following():
return list(follow_set.difference(following_usernames))
while True:
print "Following %d out of %d" % (len(following()), len(follow_set))
print "%s followers: %d" % (USERNAME, instagram.Profile(USERNAME, b).follower_count())
log_record = {
'username': USERNAME,
'followers': instagram.Profile(USERNAME, b).follower_count(),
'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
}
log.write(json.dumps(log_record) + '\n')
log.flush()
if random.random() < 0.6 and not_following():
username = random.choice(not_following())
profile = instagram.Profile(username, b)
profile.follow()
following_usernames.add(username)
if random.random() < 0.05:
utils.delay(60 * 30)
else:
utils.delay(10)
if len(following()) > len(follow_set) * 0.75 and random.random() < 0.6:
username = random.choice(following())
profile = instagram.Profile(username, b)
profile.unfollow()
following_usernames.remove(username)
utils.delay(20)
b.close()
except:
b.save_screenshot('screenshot.png')
raise
if __name__ == '__main__':
argv = gflags.FLAGS(sys.argv)
main(argv)
| [
"scotty@scottyallen.com"
] | scotty@scottyallen.com |
7fa22c168188a99368332d49c68e039d9a147a2b | 4c72e1817245404aeb7caa6be70bdbbacb574053 | /exerciciosCev/Mundo 3/Aula 17/081.py | af84c8df260ba757c0503072626639af8d50d390 | [] | no_license | Viniciusadm/Python | 9b88e1dc626346661ac6a43fc5e9ca9d12fa6610 | 9ee066c99a0f0cbf00865cd82ea61b2464afa425 | refs/heads/master | 2021-09-25T15:31:04.159070 | 2021-09-16T02:28:47 | 2021-09-16T02:28:47 | 251,954,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | numerosLista = []
cont = 0
while True:
numerosLista.append(int(input('Digite um número: ')))
resposta = str(input('Você deseja continuar? [S/N] ')).upper()
if resposta == 'N':
break
numerosListaContrario = sorted(numerosLista, reverse = True)
print(f'Você digitou {len(numerosLista)} elementos')
print(f'Os valores em ordem decrescente são ', end='')
for contador in range(0 ,len(numerosListaContrario)):
if contador != len(numerosListaContrario) -1:
print(numerosListaContrario[contador], end=' - ')
else:
print(numerosListaContrario[contador])
if 5 in numerosLista:
print('O valor 5 faz parte da lista')
else:
print('O valor 5 não faz parte da lista') | [
"viniciusam.22@gmail.com"
] | viniciusam.22@gmail.com |
f6d7fbc403d0e1a6e8c96270631e6613f7065501 | 7f17c66a7bfdd93877af8f4d1f2dc377cc5ef5ff | /ceph_pool_plugin.py | 0c7d2afbcfe8a233c786b8e2e608deb425fd7fd2 | [] | no_license | ksingh7/ceph-collectd-plugin | 4910b381b3854e66821b87d3d9a2472f5c6863cd | ebaa708769e02d79c9c9bf484d54abde345825a8 | refs/heads/master | 2020-12-25T13:44:53.294780 | 2016-06-04T00:31:36 | 2016-06-04T00:31:36 | 60,381,525 | 2 | 4 | null | 2017-06-06T16:18:29 | 2016-06-03T22:32:52 | Python | UTF-8 | Python | false | false | 4,601 | py | #!/usr/bin/env python
#
# vim: tabstop=4 shiftwidth=4
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License is applicable.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors:
# Ricardo Rocha <ricardo@catalyst.net.nz>
#
# About this plugin:
# This plugin collects information regarding Ceph pools.
#
# collectd:
# http://collectd.org
# collectd-python:
# http://collectd.org/documentation/manpages/collectd-python.5.shtml
# ceph pools:
# http://ceph.com/docs/master/rados/operations/pools/
#
import collectd
import json
import traceback
import subprocess
import os
import base
class CephPoolPlugin(base.Base):
def __init__(self):
base.Base.__init__(self)
self.prefix = 'ceph'
def get_stats(self):
"""Retrieves stats from ceph pools"""
ceph_cluster = "%s-%s" % (self.prefix, self.cluster)
data = { ceph_cluster: {} }
stats_output = None
try:
# osd_pool_cmdline='ceph osd pool stats -f json --cluster ' + self.cluster
# stats_output = subprocess.check_output(osd_pool_cmdline, shell=True)
# cephdf_cmdline='ceph df -f json --cluster ' + self.cluster
# df_output = subprocess.check_output(ceph_dfcmdline, shell=True)
stdin, stdout, stderr = os.popen3('ceph osd pool stats -f json')
stats_output = stdout.read()
stdin2, stdout2, stderr2 = os.popen3('ceph df -f json')
df_output = stdout2.read()
except Exception as exc:
collectd.error("ceph-pool: failed to ceph pool stats :: %s :: %s"
% (exc, traceback.format_exc()))
return
if stats_output is None:
collectd.error('ceph-pool: failed to ceph osd pool stats :: output was None')
if df_output is None:
collectd.error('ceph-pool: failed to ceph df :: output was None')
json_stats_data = json.loads(stats_output)
json_df_data = json.loads(df_output)
# push osd pool stats results
for pool in json_stats_data:
pool_key = "pool-%s" % pool['pool_name']
data[ceph_cluster][pool_key] = {}
pool_data = data[ceph_cluster][pool_key]
for stat in ('read_bytes_sec','write_bytes_sec','read_op_per_sec','write_op_per_sec'):
pool_data[stat] = pool['client_io_rate'][stat] if pool['client_io_rate'].has_key(stat) else 0
# push df results
for pool in json_df_data['pools']:
pool_data = data[ceph_cluster]["pool-%s" % pool['name']]
for stat in ('bytes_used', 'kb_used', 'objects'):
pool_data[stat] = pool['stats'][stat] if pool['stats'].has_key(stat) else 0
# push totals from df
data[ceph_cluster]['cluster'] = {}
if json_df_data['stats'].has_key('total_bytes'):
# ceph 0.84+
data[ceph_cluster]['cluster']['total_space'] = int(json_df_data['stats']['total_bytes'])
data[ceph_cluster]['cluster']['total_used'] = int(json_df_data['stats']['total_used_bytes'])
data[ceph_cluster]['cluster']['total_avail'] = int(json_df_data['stats']['total_avail_bytes'])
else:
# ceph < 0.84
data[ceph_cluster]['cluster']['total_space'] = int(json_df_data['stats']['total_space']) * 1024.0
data[ceph_cluster]['cluster']['total_used'] = int(json_df_data['stats']['total_used']) * 1024.0
data[ceph_cluster]['cluster']['total_avail'] = int(json_df_data['stats']['total_avail']) * 1024.0
return data
try:
plugin = CephPoolPlugin()
except Exception as exc:
collectd.error("ceph-pool: failed to initialize ceph pool plugin :: %s :: %s"
% (exc, traceback.format_exc()))
def configure_callback(conf):
"""Received configuration information"""
plugin.config_callback(conf)
def read_callback():
"""Callback triggerred by collectd on read"""
plugin.read_callback()
collectd.register_config(configure_callback)
collectd.register_read(read_callback, plugin.interval)
| [
"karan.singh731987@gmail.com"
] | karan.singh731987@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.