text
stringlengths 8
6.05M
|
|---|
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
from typing import Optional, List, Tuple
import gzip
import zlib
def decompress_gzip(content: bytes) -> bytes:
"""
Decompress content compressed with gzip.
:param content: content to decompress
:return:
"""
try:
return gzip.decompress(content)
except OSError:
return b''
def decompress_deflate(content: bytes) -> bytes:
"""
Decompress content compressed with deflate.
:param content: content to decompress
:return:
"""
return zlib.decompress(content)
class ContentFetcher:
"""
Responsible for fetching content from a given URL.
"""
decompressors = {
"gzip": decompress_gzip,
"deflate": decompress_deflate
}
def __init__(self, user_agents: List[str]):
"""
:param user_agents: list of user agent strings to cycle through for request headers.
"""
self.user_agents = user_agents
self.user_agent_index = 0
def get_next_user_agent(self) -> Optional[str]:
"""
Retrieves the next user agent. The code will cycle through the provided user agents
sequentially.
:return: a string representing a user agent, or None if no user agents were provided
"""
if len(self.user_agents) == 0:
return None
user_agent = self.user_agents[self.user_agent_index]
self.user_agent_index = (self.user_agent_index + 1) % len(self.user_agents)
return user_agent
def decompress_content(self, content: bytes, encoding: str) -> bytes:
"""
Attempt to decompress the content given the encoding
:param content: content to decompress
:param encoding: encoding as extracted from response header
:return: decompressed bytes if successful, empty byte string if not
"""
if encoding in self.decompressors:
return self.decompressors[encoding](content)
return b''
def retrieve_page(self, url: str) -> str:
"""
Attempts to fetch the web page at the provided url, returning its contents as a string.
:param url: the url to fetch from.
:return: contents of fetched web page, or empty string if fetching failed
"""
request = self.construct_request(url)
try:
response = urlopen(request)
return self.handle_response(response.getheaders(), response.read())
except (HTTPError, URLError) as e:
print(e)
return ""
def construct_request(self, url: str) -> Request:
"""
Build the HTTP request for the URL given.
:param url: url to request as a string
:return: an urllib Request object
"""
url_request = Request(url)
user_agent = self.get_next_user_agent()
if user_agent is not None:
url_request.add_header("User-Agent", user_agent)
url_request.add_header("Accept", "text/html")
url_request.add_header("Accept-Encoding", "gzip, deflate")
return url_request
def handle_response(self, headers: List[Tuple], content: bytes) -> str:
"""
Handle the HTTP response, including any encoding and compression.
:param headers: list of tuples representing the response headers
:param content: bytes of content
:return: string containing the decoded, decompressed page contents
"""
for header in headers:
if header[0] == "Content-Encoding":
encoding = header[1]
if encoding is not None:
content = self.decompress_content(content, encoding)
return content.decode("utf-8", "ignore")
|
import pandas as pd
def scale_profile(profile, weight):
"""Scale hourly profile using a list of monthly weights.
:param pandas.DataFrame profile: hourly profile.
:param list weight: list of monthly weights.
:return: (*pandas.DataFrame*) -- scaled hourly profile.
:raises TypeError: if profile is not a time series or weight is not a list.
:raises ValueError: if frequency of time series is not 1h or size of weight is
not 12
"""
if not isinstance(profile, pd.Series):
raise TypeError("profile must be a pandas.Series object")
if not isinstance(weight, list):
raise TypeError("weight must be a list")
if pd.infer_freq(profile.index) != "H":
raise ValueError("frequency of time series must be 1h")
if len(weight) != 12:
raise ValueError("the list of weight must have exactly 12 elements")
monthly_profile = profile.resample("M").sum(min_count=24 * 28)
monthly_factor = [t / p for t, p in zip(weight, monthly_profile.values)]
hourly_factor = (
pd.Series(
monthly_factor,
index=pd.date_range(profile.index.min(), periods=12, freq="MS"),
)
.resample("H")
.ffill()
.reindex(profile.index, method="ffill")
)
return profile * hourly_factor
|
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Round 3 - Problem A. Build-A-Pair
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000436142/0000000000813aa8
#
# Time: O((N/(2b) + 1)^b * b^2 * N), b = 10, pass in PyPy2 but Python2
# Space: O(b)
#
from operator import mul
def greedy(n, l, count, dir): # Time: O(N)
for i in dir(xrange(len(count))):
if count[i] == 0:
continue
common = min(l, count[i])
l -= common
count[i] -= common
for _ in xrange(common):
n = 10*n + i
return n
def odd_case(count): # Time: O(N)
d = next(d for d in xrange(1, len(count)) if count[d])
count[d] -= 1
remain = sum(count)
A = greedy(d, remain//2, count, lambda x: x)
B = greedy(0, remain//2, count, reversed)
return A-B
def mask_to_count(count, choice, mask): # Time: O(b)
new_count = [0]*BASE
for k, v in enumerate(choice):
mask, cnt = divmod(mask, v)
new_count[k] = cnt*2+count[k]%2
return new_count
def even_case(count): # Time: O((N/(2b) + 1)^b * b^2 * N)
choice = [0]*BASE
for k, v in enumerate(count):
choice[k] = v//2+1
total = reduce(mul, (v for v in choice if v))
result = float("inf")
for mask in xrange(total): # enumerate all possible prefixes
# N/2 + b >= (c0+1) + (c1+1) + ... + (c(b-1)+1) >= b * ((c0+1)*(c1+1)*...*(c(b-1)+1))^(1/b)
# (c0+1)*(c1+1)*...*(c(b-1)+1) <= (N/(2b) + 1)^b
# mask loops at most O((N/(2b) + 1)^b) times
has_prefix = True
new_count = mask_to_count(count, choice, mask)
if all(new_count[k] == count[k] for k in xrange(1, len(count))): # no digit other than 0 is chosen
if new_count[0] != count[0]: # invalid
continue
has_prefix = False
candidates = [k for k, v in enumerate(new_count) if v and (k or has_prefix)]
if not candidates:
return 0
if len(candidates) == 1:
continue
remain = sum(new_count)
for i in xrange(1, len(candidates)): # O(b^2) times
for j in xrange(i):
tmp_count = new_count[:]
tmp_count[candidates[i]] -= 1
tmp_count[candidates[j]] -= 1
A = greedy(candidates[i], remain//2-1, tmp_count, lambda x: x) # Time: O(N)
B = greedy(candidates[j], remain//2-1, tmp_count, reversed) # Time: O(N)
result = min(result, A-B)
return result
def build_a_pair():
D = map(int, list(raw_input().strip()))
count = [0]*BASE
for c in D:
count[int(c)] += 1
return odd_case(count) if sum(count)%2 == 1 else even_case(count)
BASE = 10
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, build_a_pair())
|
# coding=utf-8
'''
“闹铃”播放器。可多次调用,前后值相同不会产生影响。
唯一接口:
play(wav_filename)循环播放wav音频(不可为'default')
play('default')播放windows beep声,可以在配置文件beep.conf设置样式(出现win7没有声音的问题,但在播放音乐时有声音,也可能是声卡问题)
play('')不作变化
play(None)停止
此外还可以用win32自带系统声音:
'SystemAsterisk' Asterisk
'SystemExclamation' Exclamation
'SystemExit' Exit Windows
'SystemHand' Critical Stop
'SystemQuestion' Question
'SystemDefault'
'''
import winsound
from sine.threads import ReStartableThread
from exception import ClientException
_list = []
def _init():
def create_beep(Hz, last):
def __func_bepp():
winsound.Beep(Hz, last)
return __func_bepp
import time
def create_sleep(last):
def __func_sleep():
time.sleep(last)
return __func_sleep
import data
from initUtil import warn
import sine.propertiesReader as reader
# 读入beep样式信息
beep_filename = 'beep.conf'
default_pattern = [(600,50),(200,),(600,50),(300,)]
lines = []
try:
data.useDefault(data.data['location'], beep_filename)
lines = reader.readAsList(data.data['location'].join(beep_filename))
except Exception, e:
warn('load beep pattern from file', beep_filename, 'failed, will use default value.', e)
beep_pattern = default_pattern
try:
if 'beep_pattern' not in locals():
beep_pattern = []
for i, (s, unuse) in enumerate(lines):
array = s.split(',')
if len(array) > 1:
frequency = int(array[0].strip())
if (frequency < 37 or frequency > 32767):
raise ClientException('frequency must be in 37 thru 32767, but meet ' + frequency)
duration = int(array[1].strip())
if (duration <= 0):
raise ClientException('duration must be positive, but meet ' + duration)
if (duration > 10000):
raise ClientException('duration is too big, more than 10000, but meet ' + duration)
beep_pattern.append((frequency, duration))
else:
last = int(array[0].strip())
if (last <= 0):
raise ClientException('last must be positive, but meet ' + last)
beep_pattern.append((last,))
except Exception, e:
warn('parse beep pattern failed, will use default value.', e)
beep_pattern = default_pattern
for s in beep_pattern:
if len(s) > 1:
_list.append(create_beep(s[0], s[1]))
else:
_list.append(create_sleep(s[0] / 1000.0))
_init()
def _alarm(stop_event):
while 1:
for func in _list:
if stop_event.is_set():
return
func()
return
_name = None # 必然非空''
_beep = 'default'
_alarmThread = ReStartableThread(target=_alarm)
def play(name):
global _name
if _name == name or name == '':
return
if _name != None: # 正在播则停止当前beep或者音乐
_alarmThread.stop()
winsound.PlaySound(None, winsound.SND_PURGE)
if name != None:
if name == _beep or not isLegal(name):
_alarmThread.start()
else:
# 播放系统声音,或用绝对路径播放wav音频(后者优先)
winsound.PlaySound(name, winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP)
winsound.PlaySound(name, winsound.SND_FILENAME | winsound.SND_ASYNC | winsound.SND_LOOP)
_name = name
return
_extraLegal = [
'',
_beep,
'SystemAsterisk',
'SystemExclamation',
'SystemExit',
'SystemHand',
'SystemQuestion',
'SystemDefault']
def isLegal(name):
'''检查音频文件是否存在或为以上合法系统值。'''
import os
if name in _extraLegal:
return True
if os.path.isfile(name):
return True
if os.path.isfile(name + '.wav'):
return True
return False
def assertLegal(name):
if not isLegal(name):
raise ClientException('wav file \''+name+'\' or \''+name+'.wav\' not exists or not system sound')
|
# create deterministic fixed keys according to the specified parameter sizes
raise NotImplementedError("module not complete")
from hashlib import sha512
from epqcrypto.utilities import deterministic_random, bytes_to_integer, modular_inverse
from epqcrypto.asymmetric.trapdoor import Q, INVERSE_SIZE, SHIFT, K_SIZE, A_SHIFT, S_SIZE, E_SHIFT, MASK
TEST_VECTOR_SEED = "\x01" * 16
TEST_VECTOR_NONCE = "\x00" * 16
TEST_VECTOR_HASH = sha512
def increment_nonce(nonce):
output = bytearray(nonce)
increment_flag = True
for byte, index in enumerate(bytearray(nonce)):
output[index] = (byte + 1) & 0xFF
if output[index] != 0:
break
else:
raise ValueError("Counter Roll-over")
return bytes(output)
def random_integer(size, seed=TEST_VECTOR_SEED, nonce=TEST_VECTOR_NONCE, hash_function=TEST_VECTOR_HASH):
random_bits = deterministic_random(size, seed, nonce, hash_function)
assert len(random_bits) == size
return bytes_to_integer(bytearray(random_bits))
def generate_private_key(inverse_size=INVERSE_SIZE, k_size=K_SIZE, q=Q, shift=SHIFT, seed=TEST_VECTOR_SEED, nonce=TEST_VECTOR_NONCE):
""" usage: generate_private_key(inverse_size=INVERSE_SIZE, k_size=K_SIZE, q=Q, shift=SHIFT,
seed=TEST_VECTOR_SEED, nonce=TEST_VECTOR_NONCE) => private_key
Returns the integer(s) that constitute a private key. """
while True:
inverse = random_integer(inverse_size, seed, nonce) << shift
nonce = increment_nonce(nonce)
k = random_integer(k_size, seed, nonce)
nonce = increment_nonce(nonce)
try:
modular_inverse(inverse, q + k)
except ValueError:
continue
else:
break
return inverse, q + k
def generate_public_key(private_key, q=Q, a_shift=A_SHIFT):
""" usage: generate_public_key(private_key, q=Q, a_shift=A_SHIFT) => public_key
Returns the integer that constitutes a public key. """
ai, q_k = private_key
a = modular_inverse(ai, q_k)
return (a >> a_shift) << a_shift
def generate_keypair(inverse_size=INVERSE_SIZE, k_size=K_SIZE, q=Q, shift=SHIFT,
seed=TEST_VECTOR_SEED, nonce=TEST_VECTOR_NONCE):
""" usage: generate_keypair(invers_size=INVERSE_SIZE,
q_size=Q_SIZE, k_size=K_SIZE,
seed=TEST_VECTOR_SEED, nonce=TEST_VECTOR_NONCE) => public_key, private_key
Returns a public key and a private key. """
private_key = generate_private_key(inverse_size, k_size, q, shift, seed, nonce)
public_key = generate_public_key(private_key, q)
return public_key, private_key
|
import tensorflow as tf
import numpy as np
phi_up = np.load('./Data/Channel/Phase_uplink_init.npy')
diag_Phi_up = tf.linalg.diag(phi_up)
print(diag_Phi_up.shape)
a= np.array([[1,2,3],[1,2,3]])
b= np.array([[1,2],[1,1],[1,0]])
c =a*b
print(c)
|
from django.db import models
# Create your models here.
from django.utils import timezone
#Definición del modelo
class Post(models.Model): # Define el objeto para el modelo, proveniente de model q es de Django
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self): # Método para publicar la clase
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
from models import db, Post, User, Tag, PostTag
from app import app
from random import randint
db.drop_all()
db.create_all()
users = [
User(first_name='Sara', last_name='Sanders'),
User(first_name='John', last_name='Doe'),
User(first_name='Diane', last_name='Diedrich'),
User(first_name='Robert', last_name='Reiner'),
User(first_name='Michael', last_name='MacGillicudy'),
User(first_name='Donald', last_name='Dunn'),
User(first_name='Daniel', last_name='Dunn')
]
posts = [
Post(title='My first puppy', content="Look at my puppy. Isn't he so cute?", poster_id=randint(1, len(users))),
Post(title='Movie Review: Lighthouse', content="My dad didn't understand it, but I thought it waas great.", poster_id=randint(1, len(users))),
Post(title='Oregon Trip', content="The fam and I went up to Oregon last week, it was pretty cool.", poster_id=randint(1, len(users))),
Post(title='Back Propagation in Neural Nets', content="I couldn't tell you mathematically how they work, but I guess I understand it.", poster_id=randint(1, len(users))),
Post(title='My latest tortoise', content="I just got another tortoise. Isn't she so cute?", poster_id=randint(1, len(users))),
Post(title='When is TES6 coming?', content="Seriously, does anyone know? DM me if you've got the scoop.", poster_id=randint(1, len(users))),
Post(title='Found something while cycling', content="It's a baby garter snake. He tried to bite me, but he was too small.", poster_id=randint(1, len(users))),
Post(title='Teaching my cat how to skateboard', content="Everyone thought I was crazy, but I finally did it. Who's laughing now, huh?", poster_id=randint(1, len(users)))
]
tags = [
Tag(name="winning"), Tag(name='pets'), Tag(name='summer'), Tag(name='science'), Tag(name='video games')
]
db.session.add_all(users)
db.session.commit()
db.session.add_all(posts)
db.session.commit()
db.session.add_all(tags)
db.session.commit()
posts[0].tags.append(tags[1])
posts[2].tags.append(tags[2])
posts[3].tags.append(tags[3])
posts[4].tags.append(tags[1])
posts[4].tags.append(tags[2])
posts[5].tags.append(tags[4])
posts[6].tags.append(tags[2])
posts[6].tags.append(tags[0])
posts[7].tags.append(tags[0])
posts[7].tags.append(tags[1])
db.session.add_all(posts)
db.session.commit()
|
def palindrome(original):
reverse = original[::-1]
if original == "":
return False
elif original == reverse:
return True
else:
return False
|
def quicksort(list):
if len(list) > 1:
l, e, g = partition(list)
return quicksort(l) + e + quicksort(g)
else:
return list
def partition(list):
pivot = list[len(list) - 1]
l = []
e = []
g = []
for i in range(len(list)):
if list[i] > pivot:
g.append(list[i])
elif list[i] < pivot:
l.append(list[i])
else:
e.append(list[i])
return l, e, g
if __name__ == "__main__":
list = [1,5,7,8,3,6,8,0,9]
print quicksort(list)
|
import sys,os
sys.path.append(os.getcwd())
from appium import webdriver
import pytest
from Page_Object_Pro.Page.sms import Send_Sms
from Page_Object_Pro.Base.base import Base
class Test_Search:
def setup_class(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '8.0.0'
desired_caps['deviceName'] = 'A5RNW18208010252'
desired_caps['appPackage'] = 'com.android.mms'
desired_caps['appActivity'] = '.ui.ConversationList'
self.driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", desired_caps)
self.Teb_obj=Base(self.driver)
self.search_obj=Send_Sms(self.driver)
def teardown_class(self):
self.driver.quit()
@pytest.mark.parametrize("phone",['123456789'])
def test_add_input_phone(self,phone):
self.search_obj.add_sms_btn()
self.search_obj.accept_user_input(phone)
@pytest.mark.parametrize("text", ['123456789'])
def test_input_sms(self,text):
self.search_obj.send_sms_input(text)
if __name__ == '__main__':
pytest.main()
|
haystack = raw_input()
needle = raw_input().strip(" ")
if (len(haystack) == 0 and len(needle)==0) or len(needle)==0:
return 0
h = list(map(str, haystack.split(" ")))
if needle not in h:
return -1
p = 0
for i in range(len(h)):
if h[i] == needle:
p = i
return p
break
if p != 0:
return p
|
#!/bin/env python
# encoding:utf-8
'''
#=============================================================================
# FileName: exception.py
# Desc:
# Author: Crow
# Email: lrt_no1@163.com
# HomePage: @_@"
# Version: 0.0.1
# LastChange: 2016-05-24 10:03:32
# History:
#=============================================================================
'''
class AppBaseException(Exception):
pass
class AppLookupException(AppBaseException):
pass
class AppLoadException(AppBaseException):
pass
#--------------------------------------------------------------
class ExceShellCommandException(Exception):
pass
#--------------------------------------------------------------
class ServerException(Exception):
pass
class TcpServerException(Exception):
pass
|
#! /usr/bin/env python3
#03_schaltjahre.py
jahr = int(input("Geben Sie ein Jahr ein: "))
if (jahr%400 == 0) or (jahr%4 == 0) and not (jahr%100 == 0):
print("Es ist ein Schaltjahr!")
else:
print("Es ist KEIN Schaltjahr!!!")
input("Eingabe mit ENTER beenden.")
|
import os
from keras.models import Model, Sequential
from keras.layers import Input, Activation, Flatten, Conv2D, Dense, MaxPooling2D, UpSampling2D, Concatenate, Dropout, AlphaDropout
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.regularizers import l2
from make_parallel import make_parallel
class ClassyCoder(object):
def __init__(self, input_shape=(64, 64, 1), num_categories=5 , verbose=False):
"""
https://keras.io/getting-started/functional-api-guide/#multi-input-and-multi-output-models
https://keras.io/getting-started/functional-api-guide/#shared-layers
https://blog.keras.io/building-autoencoders-in-keras.html
"""
input_img = Input(shape=input_shape, name="main_input")
self.verbose = verbose
if self.verbose:
print "Network input shape is", input_img.get_shape()
x = Conv2D(32, (3, 3), padding='same', activity_regularizer=l2(10e-8))(input_img)
x = LeakyReLU(alpha=0.05)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), padding='same', activity_regularizer=l2(10e-8))(x)
x = LeakyReLU(alpha=0.05)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), padding='same', activity_regularizer=l2(10e-8))(x)
x = LeakyReLU(alpha=0.05)(x)
x = BatchNormalization()(x)
encoded = MaxPooling2D((2, 2), padding='same', name="encoded")(x)
encoding_shape = encoded.get_shape()
encoding_dims = int(encoding_shape[1] * encoding_shape[2] * encoding_shape[3])
if self.verbose:
print "Encoding shape is", encoding_shape, "(", encoding_dims, "dimensions )"
# at this point the representation is (n, n, 8) i.e. (n*n*8)-dimensional
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# next, declare the auto_encoding output side
n = 0;
n +=1; ae = Conv2D(8, (3, 3), padding='same')(encoded)
n +=1; ae = LeakyReLU(alpha=0.05)(ae)
n +=1; ae = UpSampling2D((2, 2))(ae)
n +=1; ae = Conv2D(16, (3, 3), padding='same')(ae)
n +=1; ae = LeakyReLU(alpha=0.05)(ae)
n +=1; ae = UpSampling2D((2, 2))(ae)
n +=1; ae = Conv2D(32, (3, 3), padding='same')(ae)
n +=1; ae = LeakyReLU(alpha=0.05)(ae)
n +=1; ae = UpSampling2D((2, 2))(ae)
n +=1; decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(ae)
if self.verbose:
print "Decoder output shape is", decoded.get_shape()
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='mse', metrics=['mae'])
# use right side of architecture encoded input to construct an image
encoded_input = Input(shape=(int(encoding_shape[1]),int(encoding_shape[2]),int(encoding_shape[3])))
deco = encoded_input
for l in range(-n, 0):
deco = autoencoder.layers[l](deco)
decoder = Model(encoded_input, deco)
# and then, the classifier
n = 0
n +=1; cl = Flatten()(encoded)
n +=1; cl = Dense(encoding_dims, activation='selu')(cl)
n +=1; cl = AlphaDropout(0.1)(cl)
n +=1; cl = Dense(512, activation='selu')(cl)
n +=1; cl = AlphaDropout(0.1)(cl)
n +=1; cl = Dense(256, activation='selu')(cl)
n +=1; cl = AlphaDropout(0.1)(cl)
n +=1; cl = Dense(128, activation='selu')(cl)
n +=1; classified = Dense(num_categories, activation='softmax')(cl)
if self.verbose:
print "Classifier output shape is", classified.get_shape()
# provide classification on images
imageclassifier = Model(input_img, classified)
imageclassifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['mae'] )
# and classifications of encoded representations
fc = encoded_input
for l in range(-n, 0):
fc = imageclassifier.layers[l](fc)
featureclassifier = Model(encoded_input, fc)
featureclassifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
# complete model (1 input, 2 outputs)
classycoder = Model(inputs=[input_img], outputs=[decoded, classified])
classycoder.compile(
optimizer='adam',
loss=['mse', 'categorical_crossentropy'],
loss_weights=[0.618, 0.618],
metrics=['mae', 'acc'])
#helpful to know this
self.encoding_dims = encoding_dims
self.encoding_shape = encoding_shape
# Recieve a image and encode it into its latent space representation
self.encoder = encoder
# direct pipe from input_image to its reconstruction
self.autoencoder = autoencoder
# reconstructs an image from its encoded representation
self.decoder = decoder
# direct pipe from input_image to its classification
self.imageclassifier = imageclassifier
# direct pipe from encoded representation to classification
self.featureclassifier = featureclassifier
# multiple output model, train this for the rest to work.
self.classycoder = classycoder
def save(self, path):
self.classycoder.save_weights(path)
def load(self, path):
if self.verbose:
print "Loading weights", path
self.classycoder.load_weights(path)
def path(self):
return os.path.dirname(os.path.realpath(__file__))
|
from sys import argv,exit
import os.path
print("argvLen:", len(argv))
# print(argv[0])
def count(file_path):
"""count the line, word, and char of a given file"""
line_count = word_count = char_count = 0
with open(file_path, mode="r", encoding="utf8") as file:
# file= open(file_path,'r')
print("file opened")
for line in file:
line_count += 1
char_count += len(line)
word_count += len(line.split())
# file.close()
return line_count, word_count, char_count
if len(argv) < 2:
print("***ERROR: not enough arguments")
exit(1)
else:
# for i in range(1, len(argv)):
for i in argv[1:]:
print("file path:", argv[1])
# if not os.path.isfile(argv[1]):
if not os.path.exists(i):
print("file not exists:", i)
else:
# l, w, c = count(i)
# print("l:", l, "\tw:", w, "\tc:", c, "\tf:", i.split("\\")[-1])
print("%sl %sw %sc " % count(i), "\tf:", i.split("\\")[-1])
|
import csv, os
# inport informations about relations between recoders and indices
def get_fdr_indices(recoders):
mydir = '..'+os.sep+'recoder_index'+os.sep
files = os.listdir(mydir)
for filename in files:
reader = csv.reader(file(mydir+filename, 'rb'), delimiter='\t')
recoderType = filename.split('.')[0]
recoders[recoderType] = {}
for line in reader:
recoders[recoderType][line[0].strip()] = line[2].strip()
return recoders
|
from dataclasses import *
@dataclass
class Result:
name: str
math: int
science: int
english: int
def score():
results = [
Result("Alice", 100, 65, 57),
Result("Bob", 45, 98, 100),
Result("Charley", 50, 50, 50)]
return results
def max_point(student):
m_score = 0
s_score = 0
e_score = 0
m_name = ""
s_name = ""
e_name = ""
for x in student:
if x.math >= m_score:
m_score = x.math
m_name = x.name
if x.science >= s_score:
s_score = x.science
s_name = x.name
if x.english >= e_score:
e_score = x.english
e_name = x.name
return f"数学:{m_name}、物理:{s_name}、英語:{e_name}"
def total_max_point(student):
total = 0
name = ""
for x in student:
if (x.math + x.science + x.english) >= total:
total = x.math + x.science + x.english
name = x.name
return f"総合:{name}"
print(max_point(score()))
print(total_max_point(score()))
#
|
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
def length(head):
c=0
while head:
c+=1
head=head.next
return c
def findMergeNode(head1, head2):
m=length(head1)
n=length(head2)
#if 2 llist is bigger no need to swap as we assumend 2 llist will be greater
if n>m:
d=n-m
# if 1 llist is bigger so swapping
else:
temp=head1
head1=head2
head2=temp
d=m-n
#now the bigger node will always be second
for i in range(d):
head2=head2.next
while head1 and head2:
if head1==head2:
return head1.data
head1=head1.next
head2=head2.next
return None
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
index = int(input())
llist1_count = int(input())
llist1 = SinglyLinkedList()
for _ in range(llist1_count):
llist1_item = int(input())
llist1.insert_node(llist1_item)
llist2_count = int(input())
llist2 = SinglyLinkedList()
for _ in range(llist2_count):
llist2_item = int(input())
llist2.insert_node(llist2_item)
ptr1 = llist1.head
ptr2 = llist2.head
for i in range(llist1_count):
if i < index:
ptr1 = ptr1.next
for i in range(llist2_count):
if i != llist2_count-1:
ptr2 = ptr2.next
ptr2.next = ptr1
result = findMergeNode(llist1.head, llist2.head)
fptr.write(str(result) + '\n')
fptr.close()
|
"""
CSSE1001 Assignment 3
Semester 1, 2017
"""
import tkinter as tk
from tkinter import messagebox
import random
import winsound
import json
import model
import view
import highscores
from game_regular import RegularGame
# # For alternative game modes
from game_make13 import Make13Game
from game_lucky7 import Lucky7Game
from game_unlimited import UnlimitedGame
from highscores import HighScoreManager
__author__ = "<Wayne>"
__version__ = "1.1.2"
# Once you have created your basic gui (LoloApp), you can delete this class
# and replace it with the following:
# from base import BaseLoloApp
class BaseLoloApp:
"""Base class for a simple Lolo game."""
def __init__(self, master, game=None, grid_view=None):
"""Constructor
Parameters:
master (tk.Tk|tk.Frame): The parent widget.
game (model.AbstractGame): The game to play. Defaults to a
game_regular.RegularGame.
grid_view (view.GridView): The view to use for the game. Optional.
Raises:
ValueError: If grid_view is supplied, but game is not.
"""
self._master = master
# Game
if game is None:
game = RegularGame(types=3)
self._game = game
# Grid View
if grid_view is None:
if game is None:
raise ValueError("A grid view cannot be given without a game.")
grid_view = view.GridView(master, self._game.grid.size())
self._grid_view = grid_view
self._grid_view.pack()
self._grid_view.draw(self._game.grid, self._game.find_connections())
# Events
self.bind_events()
def bind_events(self):
"""Binds relevant events."""
self._grid_view.on('select', self.activate)
self._game.on('game_over', self.game_over)
self._game.on('score', self.score)
def create_animation(self, generator, delay=200, func=None, callback=None):
"""Creates a function which loops through a generator using the tkinter
after method to allow for animations to occur
Parameters:
generator (generator): The generator yielding animation steps.
delay (int): The delay (in milliseconds) between steps.
func (function): The function to call after each step.
callback (function): The function to call after all steps.
Return:
(function): The animation runner function.
"""
def runner():
try:
value = next(generator)
self._master.after(delay, runner)
if func is not None:
func()
except StopIteration:
if callback is not None:
callback()
return runner
def activate(self, position):
"""Attempts to activate the tile at the given position.
Parameters:
position (tuple<int, int>): Row-column position of the tile.
Raises:
IndexError: If position cannot be activated.
"""
# Magic. Do not touch.
if position is None:
return
if self._game.is_resolving():
return
if position in self._game.grid:
if not self._game.can_activate(position):
hell = IndexError("Cannot activate position {}".format(position))
raise hell # he he
def finish_move():
self._grid_view.draw(self._game.grid,
self._game.find_connections())
def draw_grid():
self._grid_view.draw(self._game.grid)
animation = self.create_animation(self._game.activate(position),
func=draw_grid,
callback=finish_move)
animation()
def remove(self, *positions):
"""Attempts to remove the tiles at the given positions.
Parameters:
*positions (tuple<int, int>): Row-column position of the tile.
Raises:
IndexError: If position cannot be activated.
"""
if len(positions) is None:
return
if self._game.is_resolving():
return
def finish_move():
self._grid_view.draw(self._game.grid,
self._game.find_connections())
def draw_grid():
self._grid_view.draw(self._game.grid)
animation = self.create_animation(self._game.remove(*positions),
func=draw_grid,
callback=finish_move)
animation()
def reset(self):
"""Resets the game."""
raise NotImplementedError("Abstract method")
def game_over(self):
"""Handles the game ending."""
raise NotImplementedError("Abstract method") # no mercy for stooges
def score(self, score):
"""Handles change in score.
Parameters:
score (int): The new score.
"""
# Normally, this should raise the following error:
# raise NotImplementedError("Abstract method")
# But so that the game can work prior to this method being implemented,
# we'll just print some information.
# Sometimes I believe Python ignores all my comments :(
print("Score is now {}.".format(score))
print("Don't forget to override the score method!")
# Note: # score can also be retrieved through self._game.get_score()
# Define your classes here
class LoloApp(BaseLoloApp):
"""Class for a Lolo game."""
def __init__(self, master, game=None, grid_view=None, playername='None'):
"""Constructor
Parameters:
master (tk.Tk|tk.Frame): The parent widget.
game (model.AbstractGame): The game to play. Defaults to a
game_regular.RegularGame.
grid_view (view.GridView): The view to use for the game. Optional.
Raises:
ValueError: If grid_view is supplied, but game is not.
"""
self._master = master
self._player_name = playername
self._game = game
self._lightning_count = 2
self._grid_view = grid_view
self._file = playername + '_' + self._game.get_name() + '_save.json'
self._savegame = SaveGame(self._file, game.get_name(), self._lightning_count)
self._loadgame = {}
self._logo_frame = tk.Frame(self._master)
self._logo_frame.pack(side=tk.TOP, fill=tk.BOTH)
self._statusBar = tk.Frame(self._master)
self._statusBar.pack(side=tk.TOP, fill=tk.BOTH, padx=10)
super().__init__(self._master, self._game, self._grid_view)
self._master.title('Lolo :: ' + self._game.get_name() + ' Game')
self._menubar = tk.Menu(self._master)
self._master.config(menu=self._menubar)
filemenu = tk.Menu(self._menubar, tearoff=0)
self._menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="New Game", command=self.reset)
filemenu.add_command(label="Save", command=lambda: self._savegame.record(self._game.get_score(), self._game, self._player_name, self._lightning_count))
filemenu.add_command(label="Load", command=self.loadgame)
filemenu.add_command(label="Exit", command=quit)
self.filename = None
self._logo = LoloLogo(self._logo_frame)
self._sb = StatusBar(self._statusBar, self._game, self._player_name)
self._sb.set_game(self._game.get_name())
self._sb.set_score(self._game.get_score())
self._lightning_frame = tk.Frame(self._master)
self._lightning_frame.pack(side=tk.BOTTOM)
self._lightningbt = tk.Button(self._lightning_frame, text="Lightning ({})".format(self._lightning_count), command=self.lightning)
self._lightningbt.pack(side=tk.BOTTOM)
self._lightning_on = False
self._round_count = 0
self._master.bind("<KeyPress>", self.keyboardevent)
self.reset()
def bind_events(self):
"""Binds relevant events."""
self._grid_view.on('select', self.activate)
self._game.on('game_over', self.game_over)
def loadgame(self):
"""Load saved game from json file."""
try:
self._loadgame = self._savegame.load()
except json.JSONDecodeError:
messagebox.showinfo(title="Load", message="Load Failed")
if self._loadgame == {}:
messagebox.showinfo(title="Load", message="Load Failed")
else:
self._game = RegularGame.deserialize(self._loadgame["grid"])
self._game.set_score(self._loadgame["score"])
self._sb.set_score(self._loadgame["score"])
self._grid_view.draw(self._game.grid, self._game.find_connections())
self._lightning_count = self._loadgame["lighting"]
if self._lightning_count <= 0:
self._lightningbt.config(text="Lightning ({})".format(self._lightning_count), state=tk.DISABLED)
self._lightning_on = False
else:
self._lightningbt.config(text="Lightning ({})".format(self._lightning_count), state=tk.NORMAL)
messagebox.showinfo(title="Load", message="Load Successful")
def keyboardevent(self, event):
"""
Bind keyboard event
Press Control+n to start a new game, press Control+l to activate lighting function.
Parameters:
event (event): The keyboard event.
event.state == 4 is <Control>
"""
if event.state == 4 and event.keysym == "n":
self.reset()
elif event.state == 4 and event.keysym == "l":
self.lightning()
def reset(self):
"""Reset the game (start a new game)."""
self._game.reset()
self._sb.set_game(self._game.get_name())
self._sb.set_score(self._game.get_score())
self._grid_view.draw(self._game.grid, self._game.find_connections())
self._lightning_count = 2
self._lightningbt.config(text="Lightning ({})".format(self._lightning_count), state=tk.NORMAL)
def activate(self, position):
"""Attempts to activate the tile at the given position.
Parameters:
position (tuple<int, int>): Row-column position of the tile.
"""
try:
# Check whether lighting function is activated
if self._lightning_on:
self.remove(position)
self._lightning_on = False
if self._lightning_count == 0:
self._lightningbt.config(text="Lightning ({})".format(self._lightning_count), state=tk.DISABLED)
else:
self._lightningbt.config(text="Lightning ({})".format(self._lightning_count), state=tk.NORMAL)
else:
super().activate(position)
self._sb.set_score(self._game.get_score())
self._round_count += 1
self.lightning_gain()
except IndexError:
messagebox.showinfo(title="Invalid Activation", message="Cannot activate position {}".format(position))
def game_over(self):
"""Handles the game ending."""
self.save_record()
self._lightningbt.config(text="Lightning ({})".format(self._lightning_count), state=tk.DISABLED)
self._lightning_on = False
messagebox.showinfo(title="Game Over", message="Your final score is {}".format(self._game.get_score()))
def lightning(self):
"""Define the lighting function."""
if self._lightning_count > 0:
self._lightningbt.config(text="Lightning On".format(self._lightning_count), state=tk.DISABLED)
self._lightning_on = True
self._lightning_count -= 1
# self.save_record()
else:
self._lightningbt.config(text="Lightning ({})".format(self._lightning_count), state=tk.DISABLED)
self._lightning_on = False
def lightning_gain(self):
"""When meet some conditions can gain extra lighting chances."""
if self._round_count % 20 == 0:
print(self._game.get_score() % 20)
self._lightning_count += 1
self._lightningbt.config(text="Lightning ({})".format(self._lightning_count), state=tk.NORMAL)
def save_record(self):
"""Save game record."""
record = HighScoreManager()
record.record(self._game.get_score(), self._game, self._player_name)
class SaveGame(HighScoreManager):
"""Class for game saving"""
def __init__(self, filename, gamemode, lightning_count):
"""Constructs a game save using the provided json file.
Parameters:
filename (str): The name of the json file which stores the game
information.
gamemode (str): The name of the game mode to load game from.
lightning_count (int): The number of rest lightning function chance
"""
self._file = filename
self._gamemode = gamemode
self._lighting_count = lightning_count
self._datasave = {}
self._dataload = {}
def _load_json(self):
"""Loads the game save json file."""
try:
with open(self._file) as file:
try:
data = json.load(file)
except json.JSONDecodeError:
# Failed to decode the json file
# Default to empty leaderboard
data = {}
except IOError:
# Could not locate the json file
# Default to empty leaderboard
data = {}
return data
def load(self):
"""Loads the game save information from the game save file.
"""
self._dataload = self._load_json()
return self._dataload
def save(self):
"""Saves the information of current game to the file."""
with open(self._file, "w") as file:
file.write(json.dumps(self._datasave))
messagebox.showinfo(title="Save", message="Save Successful")
def record(self, score, grid, name=None, lighting_count=2):
"""Makes a record of a game save based on the score, grid, name and lightning count.
Parameters:
score (int): The current score of the game.
grid (LoloGrid): A grid to be serialized into the file.
name (str): The name of the player.
lighting_count (int): The number of lightning chance.
"""
self._datasave = {"score": score, "name": str(name), "grid": grid.serialize(), "lighting": lighting_count}
print(self._datasave)
self.save()
class StatusBar(tk.Frame):
"""Class for setting the status bar in lolo game."""
def __init__(self, statusbar, game, playername):
"""Constructor
Parameters:
statusbar (th.Tk|tk.Frame): The parent widget.
game (model.AbstractGame): The game to play. Defaults to a
game_regular.RegularGame.
playername (string): The name of current player.
"""
super().__init__()
self._statusBar = statusbar
self._game = game
self._player_name = playername
self._game_label = tk.Label(self._statusBar, text='Abstract Mode')
self._game_label.pack(side=tk.LEFT)
self._score_label = tk.Label(self._statusBar, text='Score: 0')
self._score_label.pack(side=tk.RIGHT)
self._name_label = tk.Label(self._statusBar, text='Player: {} '.format(self._player_name))
self._name_label.pack(side=tk.RIGHT)
def set_game(self, game_mode):
"""Set game mode label"""
self._game_label.config(text=game_mode+' Mode')
def set_score(self, score):
"""Set current score label"""
self._score_label.config(text='Score: '+str(score))
class LoloLogo(tk.Canvas):
"""Class for setting the game logo."""
def __init__(self, logo_frame):
"""Constructor
Parameters:
logo_frame (tk.Tk|tk.Frame): The parent widget.
"""
super().__init__()
self._logo_frame = logo_frame
self._canvas = tk.Canvas(self._logo_frame, width=332, height=130)
self.draw()
self._canvas.pack(side=tk.TOP)
def draw(self):
"""Draw the logo"""
self._canvas.create_line(30, 10, 30, 100, 80, 100, width=25, fill='#9966ff')
self._canvas.create_oval(100, 45, 155, 100, width=25, outline='#9966ff')
self._canvas.create_line(195, 10, 195, 100, 245, 100, width=25, fill='#9966ff')
self._canvas.create_oval(265, 45, 320, 100, width=25, outline='#9966ff')
class ObjectiveGame(RegularGame):
"""Objective game of Lolo.
Join groups of three or more until max tiles are formed. Join max tiles to
destroy all surrounding tiles."""
GAME_NAME = "Objective"
def __init__(self, size=(8, 8), types=3, min_group=3,
max_tile_value=50, max_tile_type='max', normal_weight=20,
max_weight=2, animation=True, autofill=True):
"""Constructor
Parameters:
size (tuple<int, int>): The number of (rows, columns) in the game.
types (int): The number of types of basic tiles.
min_group (int): The minimum number of tiles required for a
connected group to be joinable.
normal_weight (int): The relative weighted probability that a basic
tile will be generated.
max_weight (int): The relative weighted probability that a maximum
tile will be generated.
animation (bool): If True, animation will be enabled.
autofill (bool): Automatically fills the grid iff True.
"""
self._file = "objective.json"
self._data = {}
self.load()
self._sizex = 6
self._sizey = 6
self._size = (self._sizex, self._sizey)
self._types = types
self._min_group = min_group
self._starting_grid = []
self._objectives = []
self._limit = 90
self.decode()
super().__init__(self._size, self._types, self._min_group,
max_tile_value, max_tile_type, normal_weight,
max_weight, animation, autofill)
def _load_json(self):
"""Loads the objective json file."""
with open(self._file, 'r') as file:
data = json.load(file)
return data
def load(self):
"""Loads the objective information from the objective file into the
manager.
"""
self._data = self._load_json()
print(self._data)
def decode(self):
"""Decode objective information"""
self._types = self._data["types"]
self._sizex = self._data["sizex"]
self._sizey = self._data["sizey"]
self._size = (self._sizex, self._sizey)
self._min_group = self._data["min_group"]
self._limit = self._data["limit"]
class ObjectiveGameMode(LoloApp):
"""Class for a objective game."""
def __init__(self, master, game=ObjectiveGame(), grid_view=None, playername=None):
"""Constructor
Parameters:
master (tk.Tk|tk.Frame): The parent widget.
game (model.AbstractGame): The game to play. Defaults to a
ObjectiveGame.
grid_view (view.GridView): The view to use for the game. Optional.
playername (string): The current player's name. Default is None.
"""
self._master = master
self._game = game
print(self._game)
super().__init__(self._master, self._game, grid_view, playername)
self._master.title('Lolo :: ' + self._game.get_name() + ' Game')
class MainWindow():
"""Loading Screen."""
def __init__(self, master):
"""Constructor
Parameters:
master (tk.Tk|tk.Frame): The parent widget.
"""
self._master = master
self._game_mode = RegularGame()
# Background music, only work on Windows
winsound.PlaySound("./Nier.wav", winsound.SND_FILENAME|winsound.SND_ASYNC)
# Logo frame
logo_frame = tk.Frame(self._master)
logo_frame.pack(side=tk.TOP)
logo = LoloLogo(logo_frame)
# Input frame
input_frame = tk.Frame(self._master)
input_frame.pack(side=tk.TOP, pady=10)
name_label = tk.Label(input_frame, text="Your Name: ")
name_label.pack(side=tk.LEFT)
self._name_text = tk.Entry(input_frame)
self._name_text.pack(side=tk.LEFT)
# Button Frame
button_frame = tk.Frame(self._master)
button_frame.pack(side=tk.LEFT, expand=True, padx=100)
bt_playgame = tk.Button(button_frame, text="New Game", command=self.startgame)
bt_playgame.pack(side=tk.TOP, ipadx=100, pady=30)
bt_selectmode = tk.Button(button_frame, text="Game Mode", command=self.gamemodewindow)
bt_selectmode.pack(side=tk.TOP, ipadx=100, pady=30)
bt_selectmode = tk.Button(button_frame, text="Objective Mode", command=self.startobjectivegame)
bt_selectmode.pack(side=tk.TOP, ipadx=100, pady=30)
bt_highscore = tk.Button(button_frame, text="High Score", command=self.highscorewindow)
bt_highscore.pack(side=tk.TOP, ipadx=100, pady=30)
bt_exitgame = tk.Button(button_frame, text="Exit Game", command=quit)
bt_exitgame.pack(side=tk.TOP, ipadx=100, pady=30)
# Auto play frame
self._autogame_frame = tk.Frame(self._master)
self._autogame_frame.pack(side=tk.RIGHT, expand=True, padx=20, pady=10)
self._auto_game2 = AutoPlayingGame(self._autogame_frame, RegularGame())
def startgame(self):
"""Start selected Lolo game."""
if str(self._name_text.get()) != "":
self._auto_game2._grid_view.off('resolve', self._auto_game2.resolve)
root = tk.Toplevel()
app = LoloApp(root, self._game_mode, None, str(self._name_text.get()))
else:
messagebox.showinfo(title="No Name", message="Please input a name!")
def startobjectivegame(self):
"""Start Objective Lolo game."""
if str(self._name_text.get()) != "":
self._auto_game2._grid_view.off('resolve', self._auto_game2.resolve)
root = tk.Toplevel()
app = LoloApp(root, ObjectiveGame(), None, str(self._name_text.get()))
else:
messagebox.showinfo(title="No Name", message="Please input a name!")
def gamemodewindow(self):
"""Show game mode select window."""
gm = GameModeWindow(self)
def highscorewindow(self):
"""Show high score window."""
hs = HighScoreWindow()
class AutoPlayingGame(BaseLoloApp):
"""Class for auto play Lolo game."""
def __init__(self, master, game=None, grid_view=None):
"""Constructor
Parameters:
master (tk.Tk|tk.Frame): The parent widget.
game (model.AbstractGame): The game to play. Defaults to a
game_regular.RegularGame.
grid_view (view.GridView): The view to use for the game. Optional.
"""
super().__init__(master, game, grid_view)
self._move_delay = 1000
self.resolve()
def bind_events(self):
"""Binds relevant events."""
self._game.on('resolve', self.resolve)
self._game.off('score', self.score)
self._grid_view.off('select', self.activate)
self._game.on('game_over', self.game_over)
def resolve(self, delay=None):
"""Makes a move after a given movement delay."""
if delay is None:
delay = self._move_delay
self._master.after(delay, self.move)
def move(self):
"""Finds a connected tile randomly and activates it."""
connections = list(self._game.find_groups())
if connections:
# pick random valid move
cells = list()
for connection in connections:
for cell in connection:
cells.append(cell)
self.activate(random.choice(cells))
else:
self.game_over()
def score(self, score):
"""Handles the score."""
pass
def reset(self):
"""Handles the reset."""
pass
def game_over(self):
"""Handles the game ending."""
self._game.reset()
self._grid_view.draw(self._game.grid, self._game.find_connections())
self.resolve(self._move_delay)
class HighScoreWindow(MainWindow):
"""High Score Screen."""
def __init__(self):
"""Constructor."""
score = highscores.HighScoreManager()
highestdata = score.get_sorted_data()[0]
highestgrid = highestdata['grid']
highestname = highestdata['name']
highestscore = highestdata['score']
game = RegularGame.deserialize(highestgrid)
data = score.get_sorted_data()
name_list = []
score_list = []
for i in range(len(data)):
name_list.append(data[i]['name'])
score_list.append(str(data[i]['score']))
print(name_list)
score_window = tk.Toplevel()
score_window.title("High Scores :: Lolo")
bestplayer = tk.Label(score_window, text="Best Player: {} with {} points!".format(highestname, highestscore))
bestplayer.pack(side=tk.TOP)
sw = BaseLoloApp(score_window, game)
sw._grid_view.off('select', sw.activate)
leaderboard = tk.Label(score_window, text="Leaderboard")
leaderboard.pack(side=tk.TOP)
name_frame = tk.Frame(score_window)
name_frame.pack(side=tk.LEFT)
score_frame = tk.Frame(score_window)
score_frame.pack(side=tk.RIGHT)
# Create leaderboard
for text in name_list:
tk.Label(name_frame, text=text).pack(side=tk.TOP, anchor=tk.W)
for score in score_list:
tk.Label(score_frame, text=score).pack(side=tk.TOP, anchor=tk.E)
class GameModeWindow(MainWindow):
"""Game mode selection window."""
def __init__(self, mw):
"""Constructor
Parameters:
mw (MainWindow): Mainwindow element.
"""
self._mw = mw
self._gamemode_root = tk.Toplevel()
self._gamemode_root.title("Game Modes :: Lolo")
text = [("Regular", 1),
("Make 13", 2),
("Lucky 7", 3),
("Unlimited", 4)]
gamemode_frame = tk.Frame(self._gamemode_root)
gamemode_frame.pack(side=tk.LEFT)
self._gamemode_dict = {1: RegularGame(), 2: Make13Game(), 3: Lucky7Game(), 4: UnlimitedGame()}
# Find the default value
self._gamemode_dict_reverse = {type(RegularGame()): 1, type(Make13Game()): 2, type(Lucky7Game()): 3, type(UnlimitedGame()): 4}
self._save = self._gamemode_dict_reverse[type(self._mw._game_mode)]
self._game_select = tk.IntVar()
self._game_select.set(self._save)
for t, v in text:
tk.Radiobutton(gamemode_frame,
text=t,
variable=self._game_select,
value=v,
command=lambda: self.showauto(self._gamemode_dict[self._game_select.get()])
).pack(side=tk.TOP, padx=200, pady=30)
tk.Button(gamemode_frame, text="Save", command=self.setgame).pack(side=tk.TOP, padx=200, pady=30)
self._game_frame = tk.Frame(self._gamemode_root)
self._game_frame.pack(side=tk.RIGHT, expand=True, padx=20, pady=10)
self._auto_game = AutoPlayingGame(self._game_frame, self._gamemode_dict[self._game_select.get()])
def setgame(self):
"""Set game mode."""
self._mw._game_mode = self._gamemode_dict[self._game_select.get()]
print(self._mw._game_mode)
self._auto_game._game.off('resolve', self._auto_game.resolve)
def showauto(self, gamemode):
"""Show auto playing game with selected mode."""
print(gamemode)
self._auto_game._game.off('resolve', self._auto_game.resolve)
self._game_frame.destroy()
self._game_frame = tk.Frame(self._gamemode_root)
self._game_frame.pack(side=tk.RIGHT, expand=True, padx=20, pady=10)
self.startplay(gamemode)
def startplay(self, gamemode):
"""Change auto playing game with selected mode."""
self._auto_game = AutoPlayingGame(self._game_frame, gamemode)
def main():
# Your GUI instantiation code here
root = tk.Tk()
root.title("LoLo")
main_window = MainWindow(root)
root.mainloop()
if __name__ == "__main__":
main()
|
from django.db import models
# Create your models here.
from healthapp.utils.models import BaseModel
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from abc import abstractmethod
from pathspec.gitignore import GitIgnorePattern
from pathspec.pathspec import PathSpec
from pants.util.dirutil import fast_relpath
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class ProjectTree(AbstractClass):
"""Represents project tree which is used to locate and read build files.
Has two implementations: one backed by file system and one backed by SCM.
"""
class InvalidBuildRootError(Exception):
"""Raised when the build_root specified to a ProjectTree is not valid."""
class AccessIgnoredPathError(Exception):
"""Raised when accessing a path which is ignored by pants"""
def __init__(self, build_root, ignore_patterns=None):
if not os.path.isabs(build_root):
raise self.InvalidBuildRootError('ProjectTree build_root {} must be an absolute path.'.format(build_root))
self.build_root = os.path.realpath(build_root)
logger.debug('ProjectTree ignore_patterns: %s', ignore_patterns)
self.ignore = PathSpec.from_lines(GitIgnorePattern, ignore_patterns if ignore_patterns else [])
@abstractmethod
def _glob1_raw(self, dir_relpath, glob):
"""Returns a list of paths in path that match glob."""
@abstractmethod
def _listdir_raw(self, relpath):
"""Return the names of paths in the given directory."""
@abstractmethod
def _isdir_raw(self, relpath):
"""Returns True if path is a directory."""
@abstractmethod
def _isfile_raw(self, relpath):
"""Returns True if path is a file."""
@abstractmethod
def _exists_raw(self, relpath):
"""Returns True if path exists."""
@abstractmethod
def _content_raw(self, file_relpath):
"""Returns the content for file at path."""
@abstractmethod
def _relative_readlink_raw(self, relpath):
"""Execute `readlink` for the given path, which may result in a relative path."""
@abstractmethod
def _lstat_raw(self, relpath):
"""Without following symlinks, returns a PTStat object for the path, or None."""
@abstractmethod
def _walk_raw(self, relpath, topdown=True):
"""Walk the file tree rooted at `path`. Works like os.walk but returned root value is relative path."""
def glob1(self, dir_relpath, glob):
"""Returns a list of paths in path that match glob and are not ignored."""
if self.isignored(dir_relpath, directory=True):
return []
matched_files = self._glob1_raw(dir_relpath, glob)
return self.filter_ignored(matched_files, dir_relpath)
def listdir(self, relpath):
"""Return the names of paths which are in the given directory and not ignored."""
if self.isignored(relpath, directory=True):
self._raise_access_ignored(relpath)
names = self._listdir_raw(relpath)
return self.filter_ignored(names, relpath)
def isdir(self, relpath):
"""Returns True if path is a directory and is not ignored."""
if self._isdir_raw(relpath):
if not self.isignored(relpath, directory=True):
return True
return False
def isfile(self, relpath):
"""Returns True if path is a file and is not ignored."""
if self.isignored(relpath):
return False
return self._isfile_raw(relpath)
def exists(self, relpath):
"""Returns True if path exists and is not ignored."""
if self.isignored(self._append_slash_if_dir_path(relpath)):
return False
return self._exists_raw(relpath)
def content(self, file_relpath):
"""
Returns the content for file at path. Raises exception if path is ignored.
Raises exception if path is ignored.
"""
if self.isignored(file_relpath):
self._raise_access_ignored(file_relpath)
return self._content_raw(file_relpath)
def relative_readlink(self, relpath):
"""
Execute `readlink` for the given path, which may result in a relative path.
Raises exception if path is ignored.
"""
if self.isignored(self._append_slash_if_dir_path(relpath)):
self._raise_access_ignored(relpath)
return self._relative_readlink_raw(relpath)
def lstat(self, relpath):
"""
Without following symlinks, returns a PTStat object for the path, or None
Raises exception if path is ignored.
"""
if self.isignored(self._append_slash_if_dir_path(relpath)):
self._raise_access_ignored(relpath)
return self._lstat_raw(relpath)
def walk(self, relpath, topdown=True):
"""
Walk the file tree rooted at `path`. Works like os.walk but returned root value is relative path.
Ignored paths will not be returned.
"""
for root, dirs, files in self._walk_raw(relpath, topdown):
matched_dirs = self.ignore.match_files([os.path.join(root, "{}/".format(d)) for d in dirs])
matched_files = self.ignore.match_files([os.path.join(root, f) for f in files])
for matched_dir in matched_dirs:
dirs.remove(fast_relpath(matched_dir, root).rstrip('/'))
for matched_file in matched_files:
files.remove(fast_relpath(matched_file, root))
yield root, dirs, files
def readlink(self, relpath):
link_path = self.relative_readlink(relpath)
if os.path.isabs(link_path):
raise IOError('Absolute symlinks not supported in {}: {} -> {}'.format(
self, relpath, link_path))
# In order to enforce that this link does not escape the build_root, we join and
# then remove it.
abs_normpath = os.path.normpath(os.path.join(self.build_root,
os.path.dirname(relpath),
link_path))
return fast_relpath(abs_normpath, self.build_root)
def isignored(self, relpath, directory=False):
"""Returns True if path matches pants ignore pattern."""
relpath = self._relpath_no_dot(relpath)
if directory:
relpath = self._append_trailing_slash(relpath)
match_result = list(self.ignore.match_files([relpath]))
return len(match_result) > 0
def filter_ignored(self, path_list, prefix=''):
"""Takes a list of paths and filters out ignored ones."""
prefix = self._relpath_no_dot(prefix)
prefixed_path_list = [self._append_slash_if_dir_path(os.path.join(prefix, item)) for item in path_list]
ignored_paths = list(self.ignore.match_files(prefixed_path_list))
if len(ignored_paths) == 0:
return path_list
return [fast_relpath(f, prefix).rstrip('/') for f in
[path for path in prefixed_path_list if path not in ignored_paths]
]
def _relpath_no_dot(self, relpath):
return relpath.lstrip('./') if relpath != '.' else ''
def _raise_access_ignored(self, relpath):
"""Raises exception when accessing ignored path."""
raise self.AccessIgnoredPathError('The path {} is ignored in {}'.format(relpath, self))
def _append_trailing_slash(self, relpath):
"""Add a trailing slash if not already has one."""
return relpath if relpath.endswith('/') or len(relpath) == 0 else relpath + '/'
def _append_slash_if_dir_path(self, relpath):
"""For a dir path return a path that has a trailing slash."""
if self._isdir_raw(relpath):
return self._append_trailing_slash(relpath)
return relpath
class PTStat(datatype('PTStat', ['ftype'])):
"""A simple 'Stat' facade that can be implemented uniformly across SCM and posix backends.
:param ftype: Either 'file', 'dir', or 'link'.
"""
PTSTAT_FILE = PTStat('file')
PTSTAT_DIR = PTStat('dir')
PTSTAT_LINK = PTStat('link')
|
"""
Definition of urls for DjangoWebProject.
"""
from datetime import datetime
from django.conf.urls import patterns, url, include
# Uncomment the next lines to enable the admin:
# from django.conf.urls import include
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^logout/$', 'directory.views.logout', name='logout'),
url(r'^home/(?P<parameter>.*)/', 'directory.views.home', name='home'),
url(r'^home/', 'directory.views.home', name='home'),
url(r'^createuser/', 'directory.views.createuser', name='createuser'),
url(r'^edituser/', 'directory.views.edituser', name='edituser'),
url(r'^profile/(?P<username>.+)/$', 'directory.views.profile', name='profile'),
url(r'^profile/', 'directory.views.profile', name='profile'),
url(r'^search/$', 'directory.views.search', name='search'),
url(r'', 'directory.views.login', name='login'),
#url(r'^logout/$', 'directory.views.logout', name='logout'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
import sqlite3
import csv
import pandas as pd
####Code to query the local table####
con = sqlite3.connect('MY_SAMPLE_SQLITE.db')
db_df = pd.read_sql_query('SELECT distinct M.MEMBER_ID, \
C.PaidAmt, C.billedamt, C.AllowedAmt, C.PcpProvNbr, C.VendorNumber, \
P.ProviderName, \
V.VendorNumber, V.VendorName \
FROM Claims as C LEFT JOIN Members as M ON C.Member_Id=M.MEMBER_ID \
LEFT JOIN Providers as P ON C.PcpProvNbr=P.ProviderID \
LEFT JOIN Vendors as V ON C.VendorNumber=V.VendorNumber \
where C.MEMBER_ID= "88C783D6-88FD-88B4-88F3-8865875A6A54" \
', con)
print (db_df)
#db_df.to_csv('database.csv', index=False) #write the query results to a csv
con.close()
#Alternatively you can also use https://sqlitebrowser.org/dl/#macos
# to access this locally created Sqlite DB using a GUI for querying and other database operations.
|
from embedly import Embedly
from api_keys import embedly_key
from classes import Link
from google.appengine.ext import db
import datetime
import re
import feedparser
from bs4 import BeautifulSoup
import uuid
from classes import Email
configuration = {"vote_deflator":2}
def embedly_link(URL):
#Takes a URL, returns a populated Link object
#Returns "error" if Embedly API fails
client = Embedly(embedly_key)
response = client.extract(URL)
try:
#Get link information from Embedly
headline = response['title']
URL = clean_url(response['url'])
summary = response['description']
source = response['provider_name']
path = page_path(headline)
keywords_response = response['keywords']
keyword1 = str(keywords_response[0]['name'])
keyword2 = str(keywords_response[1]['name'])
keyword3 = str(keywords_response[2]['name'])
keyword4 = str(keywords_response[3]['name'])
keyword5 = str(keywords_response[4]['name'])
#Create Link object with info from Embedly
link = Link(headline = headline, url = URL, source = source, summary = summary,
path = path, keyword1 = keyword1, keyword2 = keyword2, keyword3 = keyword3,
keyword4 = keyword4, keyword5 = keyword5, votes = 0)
return link
except:
return "error"
def clean_url(url):
if "?" in url:
return url.split("?")[0]
else:
return url
def is_link_in_database(URL):
#Returns Link object if URL already in Link database, else returns False
qry = db.Query(Link).filter("url =", URL)
result = qry.get()
if result:
return result
else:
return False
def is_email_in_database(email):
#Returns Email object if email already in database, else returns False
qry = db.Query(Email).filter("email =", email)
result = qry.get()
if result:
return result
else:
return False
def hotness(link):
#Calculate hotness score for a Link, update hotness attribute, return Link
#Doesn't put the Link to the database; that needs to be done with returned object
#Hotness = (# of days between creation of link and origin date 1/23/16)
# + (number of votes, divided by vote_deflator variable in configuration)
origin = datetime.datetime(2016, 1, 23)
diff = link.created - origin
days = float(diff.days) + (float(diff.seconds)/float(86400))
link.hotness = float(days) + (float(link.votes)/configuration['vote_deflator'])
return link
def upvote(URL):
#Takes URL, increments votes on Link object by one, updates hotness, updates database
qry = db.Query(Link).filter("url =", URL)
link = qry.get()
link.votes = link.votes + 1
hotness(link)
link.put()
def session_voting(handler, URL):
#When a user clicks upvote, checks if a user has upvoted a particular link this session
#Returns True or False; if False, changes vote status
has_voted = handler.session.get(URL)
if not has_voted:
handler.session[URL] = True
upvote(URL)
def page_path(headline):
#Returns today's date, hyphenated, followed by lowercase headline, hypthenated
return str(datetime.date.today()) + "-" + "-".join(re.findall("[a-zA-Z]+", headline)).lower()
def rss_parse(source):
#Take a source object with an RSS feed and enter new links into db
feed = feedparser.parse(source.rss)
#If source has exclude words, break exclude_words string field into
#list of words that exclude a link if in headline
if source.exclude_words:
exclude_words = source.exclude_words.split(":")
else:
exclude_words = ""
#Check what the last link from this source last time was
last_link = source.last_link
source.last_link = feed.entries[0][source.link_tag]
for a in feed.entries:
if a[source.link_tag] == last_link:
break
else:
#If there are no exclude_words in the headline
if not any(word in a['title'].lower() for word in exclude_words):
#Hit the Embedly API
response = embedly_link(a[source.link_tag])
if response != "error":
database_response = is_link_in_database(response.url)
if database_response == False:
#Calculate a hotness score for the new link
response = hotness(response)
#Set submitted_by attribute to name of the rss feed
response.submitted_by = source.name
#add to database
response.put()
else:
#If link is already in the database, get that link
if source.name not in database_response.submitted_by:
#add a vote
database_response.votes = database_response.votes + 1
#recalculate hotness
database_response = hotness(database_response)
#add source name to the submitted_by source list
database_response.submitted_by = database_response.submitted_by + ":" + source.name
#update database
database_response.put()
source.put()
### Pseudo code to cut down on RSS db calls
### Each source has a "last seen link" attribute -- the most recent link from the last time you hit it
### for each rss source: while entry not equal to source.last_seen_link: parse. stop the loop when you hit the last_seen_link
def blog_rec_parser(source):
#Takes an RSS feed, returns list of URLs to posts mentioned in link roundup posts
feed = feedparser.parse(source.rss)
url_list = []
for entry in feed.entries:
headline = entry['title']
if source.include_words in headline:
#If the post's headline mentions terms indicating it's a link roundup post
post = entry.content[0]['value']
soup = BeautifulSoup(post)
#if soup.body is not None:
for link in soup.find_all('a'):
url = link.get('href')
if url != source.exclude_words and source.include_words.replace(' ', '-').lower() not in url:
#If exclude words aren't present and if it isn't just a link to the site's homepage add to list
url_list.append(url)
return url_list
def aggregator_parse(list_of_urls, source_name):
#Takes list of URLs, name of the aggregator as a string. Enters in db if appropriate
for a in list_of_urls:
response = embedly_link(a)
if response != "error":
#If the link is NOT already in the Link database, put it there
qry = is_link_in_database(response.url)
if qry == False:
#If link not in database, calculate hotness, add to db
response = hotness(response)
response.submitted_by = source_name
response.put()
elif source_name not in qry.submitted_by:
#If link is in database but it wasn't submitted by thoma feed
qry.votes = qry.votes + 1
qry = hotness(qry)
#add thoma source name to the submitted_by source list
qry.submitted_by = qry.submitted_by + ":" + source_name
qry.put()
def is_email_address(email):
#Checks if string looks like an email
email_regex = re.compile(r"[^@]+@[^@]+\.[^@]+")
if email_regex.match(email):
return True
else:
return False
def create_email_key():
unique = False
while unique == False:
email_key = str(uuid.uuid4())
qry = db.Query(Email).filter("email_key =", email_key)
if not qry.get():
unique = True
return email_key
|
""" Fault class and processing methods. """
import os
import glob
import warnings
import numpy as np
import pandas as pd
from numba import prange, njit
from sklearn.decomposition import PCA
from sklearn.neighbors import NearestNeighbors
from skimage.morphology import skeletonize
from scipy.ndimage import measurements, binary_erosion, binary_dilation, generate_binary_structure, binary_fill_holes
from ...batchflow.notifier import Notifier
from .horizon import Horizon
from .fault_triangulation import make_triangulation, triangle_rasterization
from ..plotters import show_3d
from ..geometry import SeismicGeometry
class Fault(Horizon):
""" Contains points of fault.
Initialized from `storage` and `geometry`.
Storage can be one of:
- csv-like file in CHARISMA, REDUCED_CHARISMA or FAULT_STICKS format.
- ndarray of (N, 3) shape.
- hdf5 file as a binary mask for cube.
"""
#pylint: disable=attribute-defined-outside-init
FAULT_STICKS = ['INLINE', 'iline', 'xline', 'cdp_x', 'cdp_y', 'height', 'name', 'number']
COLUMNS = ['iline', 'xline', 'height', 'name', 'number']
def from_file(self, path, transform=True, direction=None, **kwargs):
""" Init from path to either CHARISMA, REDUCED_CHARISMA or FAULT_STICKS csv-like file
from .npy or .hdf5 file with points.
"""
path = self.field.make_path(path, makedirs=False)
self.path = path
self.name = os.path.basename(path)
ext = os.path.splitext(path)[1][1:]
if ext == 'npz':
npzfile = np.load(path, allow_pickle=False)
points = npzfile['points']
transform = False
nodes = None if len(npzfile['nodes']) == 0 else npzfile['nodes']
self.format = 'file-npz'
elif ext == 'hdf5':
cube = SeismicGeometry(path, **kwargs).file_hdf5['cube']
points = np.stack(np.where(np.array(cube) == 1)).T #TODO: get points in chunks
transform = False
nodes = None
self.format = 'file-hdf5'
else:
points, nodes = self.csv_to_points(path, **kwargs)
self.format = 'file-csv'
self.from_points(points, transform, **kwargs)
if nodes is not None:
self.from_points(nodes, transform, dst='nodes', reset=None, **kwargs)
if direction is None:
if len(self.points) > 0:
self.direction = 0 if self.points[:, 0].ptp() > self.points[:, 1].ptp() else 1
else:
self.direction = 0
elif isinstance(direction, int):
self.direction = direction
elif isinstance(direction[self.field.short_name], int):
self.direction = direction[self.field.short_name]
else:
self.direction = direction[self.field.short_name][self.name]
def csv_to_points(self, path, fix=False, **kwargs):
""" Get point cloud array from file values. """
df = self.read_file(path)
if df is not None:
df = self.recover_lines_from_cdp(df)
sticks = self.read_sticks(df, self.name, fix)
if len(sticks) > 0:
sticks = self.sort_sticks(sticks)
points = self.interpolate_3d(sticks, **kwargs)
nodes = np.concatenate(sticks.values)
return points, nodes
return np.zeros((0, 3)), np.zeros((0, 3))
@classmethod
def read_file(cls, path):
""" Read data frame with sticks. """
with open(path, encoding='utf-8') as file:
line_len = len([item for item in file.readline().split(' ') if len(item) > 0])
if line_len == 3:
names = Horizon.REDUCED_CHARISMA_SPEC
elif line_len == 8:
names = cls.FAULT_STICKS
elif line_len >= 9:
names = Horizon.CHARISMA_SPEC
elif line_len == 0:
return None
else:
raise ValueError('Fault labels must be in FAULT_STICKS, CHARISMA or REDUCED_CHARISMA format.')
return pd.read_csv(path, sep=r'\s+', names=names)
def recover_lines_from_cdp(self, df):
""" Fix broken iline and crossline coordinates. If coordinates are out of the cube, 'iline' and 'xline'
will be infered from 'cdp_x' and 'cdp_y'. """
i_bounds = [self.field.ilines_offset, self.field.ilines_offset + self.field.cube_shape[0]]
x_bounds = [self.field.xlines_offset, self.field.xlines_offset + self.field.cube_shape[1]]
i_mask = np.logical_or(df.iline < i_bounds[0], df.iline >= i_bounds[1])
x_mask = np.logical_or(df.xline < x_bounds[0], df.xline >= x_bounds[1])
_df = df[np.logical_and(i_mask, x_mask)]
coords = np.rint(self.field.geometry.cdp_to_lines(_df[['cdp_x', 'cdp_y']].values)).astype(np.int32)
df.loc[np.logical_and(i_mask, x_mask), ['iline', 'xline']] = coords
return df
@classmethod
def read_sticks(cls, df, name=None, fix=False):
""" Transform initial fault dataframe to array of sticks. """
if 'number' in df.columns: # fault file has stick index
col = 'number'
elif df.iline.iloc[0] == df.iline.iloc[1]: # there is stick points with the same iline
col = 'iline'
elif df.xline.iloc[0] == df.xline.iloc[1]: # there is stick points with the same xline
col = 'xline'
else:
raise ValueError('Wrong format of sticks: there is no column to group points into sticks.')
df = df.sort_values('height')
sticks = df.groupby(col).apply(lambda x: x[Horizon.COLUMNS].values).reset_index(drop=True)
if fix:
# Remove sticks with horizontal parts.
mask = sticks.apply(lambda x: len(np.unique(np.array(x)[:, 2])) == len(x))
if not mask.all():
warnings.warn(f'{name}: Fault has horizontal parts of sticks.')
sticks = sticks.loc[mask]
# Remove sticks with one node.
mask = sticks.apply(len) > 1
if not mask.all():
warnings.warn(f'{name}: Fault has one-point sticks.')
sticks = sticks.loc[mask]
# Filter faults with one stick.
if len(sticks) == 1:
warnings.warn(f'{name}: Fault has an only one stick')
sticks = pd.Series()
return sticks
def sort_sticks(self, sticks):
""" Order sticks with respect of fault direction. Is necessary to perform following triangulation. """
pca = PCA(1)
coords = pca.fit_transform(np.array([stick[0][:2] for stick in sticks.values]))
indices = np.array([i for _, i in sorted(zip(coords, range(len(sticks))))])
return sticks.iloc[indices]
def interpolate_3d(self, sticks, width=1, **kwargs):
""" Interpolate fault sticks as a surface. """
triangles = make_triangulation(sticks)
points = []
for triangle in triangles:
res = triangle_rasterization(triangle, width)
points += [res]
return np.concatenate(points, axis=0)
def add_to_mask(self, mask, locations=None, **kwargs):
""" Add fault to background. """
mask_bbox = np.array([[locations[0].start, locations[0].stop],
[locations[1].start, locations[1].stop],
[locations[2].start, locations[2].stop]],
dtype=np.int32)
points = self.points
if (self.bbox[:, 1] < mask_bbox[:, 0]).any() or (self.bbox[:, 0] >= mask_bbox[:, 1]).any():
return mask
insert_fault_into_mask(mask, points, mask_bbox)
return mask
@classmethod
def check_format(cls, path, verbose=False):
""" Find errors in fault file.
Parameters
----------
path : str
path to file or glob expression
verbose : bool
response if file is succesfully readed.
"""
for filename in glob.glob(path):
try:
df = cls.read_file(filename)
except ValueError:
print(filename, ': wrong format')
else:
if 'name' in df.columns and len(df.name.unique()) > 1:
print(filename, ': fault file must be splitted.')
elif len(cls.read_sticks(df)) == 1:
print(filename, ': fault has an only one stick')
elif any(cls.read_sticks(df).apply(len) == 1):
print(filename, ': fault has one point stick')
elif verbose:
print(filename, ': OK')
@classmethod
def split_file(cls, path, dst):
""" Split file with multiple faults into separate files. """
if dst and not os.path.isdir(dst):
os.makedirs(dst)
df = pd.read_csv(path, sep=r'\s+', names=cls.FAULT_STICKS)
df.groupby('name').apply(cls.fault_to_csv, dst=dst)
@classmethod
def fault_to_csv(cls, df, dst):
""" Save separate fault to csv. """
df.to_csv(os.path.join(dst, df.name), sep=' ', header=False, index=False)
def dump_points(self, path):
""" Dump points. """
path = self.field.make_path(path, name=self.short_name, makedirs=False)
if os.path.exists(path):
npzfile = np.load(path, allow_pickle=False)
points = np.concatenate([npzfile['points'], self.points], axis=0)
if self.nodes is not None:
nodes = np.concatenate([npzfile['nodes'], self.nodes], axis=0)
else:
nodes = npzfile['nodes']
else:
points = self.points
nodes = self.nodes if self.nodes is not None else np.zeros((0, 3), dtype=np.int32)
np.savez(path, points=points, nodes=nodes, allow_pickle=False)
def split_faults(self, **kwargs):
""" Split file with faults points into separate connected faults.
Parameters
----------
**kwargs
Arguments for `split_faults` function.
"""
array = np.zeros(self.field.shape)
array[self.points[:, 0], self.points[:, 1], self.points[:, 2]] = 1
return self.from_mask(array, cube_shape=self.field.shape, field=self.field, **kwargs)
def show_3d(self, n_sticks=100, n_nodes=10, z_ratio=1., zoom_slice=None, show_axes=True,
width=1200, height=1200, margin=20, savepath=None, **kwargs):
""" Interactive 3D plot. Roughly, does the following:
- select `n` points to represent the horizon surface
- triangulate those points
- remove some of the triangles on conditions
- use Plotly to draw the tri-surface
Parameters
----------
n_sticks : int
Number of sticks for each fault.
n_nodes : int
Number of nodes for each stick.
z_ratio : int
Aspect ratio between height axis and spatial ones.
zoom_slice : tuple of slices or None.
Crop from cube to show. If None, the whole cube volume will be shown.
show_axes : bool
Whether to show axes and their labels.
width, height : int
Size of the image.
margin : int
Added margin from below and above along height axis.
savepath : str
Path to save interactive html to.
kwargs : dict
Other arguments of plot creation.
"""
title = f'Fault `{self.name}` on `{self.field.displayed_name}`'
aspect_ratio = (self.i_length / self.x_length, 1, z_ratio)
axis_labels = (self.field.index_headers[0], self.field.index_headers[1], 'DEPTH')
if zoom_slice is None:
zoom_slice = [slice(0, i) for i in self.field.shape]
zoom_slice[-1] = slice(self.h_min, self.h_max)
margin = [margin] * 3 if isinstance(margin, int) else margin
x, y, z, simplices = self.make_triangulation(n_sticks, n_nodes, zoom_slice)
show_3d(x, y, z, simplices, title, zoom_slice, None, show_axes, aspect_ratio,
axis_labels, width, height, margin, savepath, **kwargs)
def make_triangulation(self, n_sticks, n_nodes, slices, **kwargs):
""" Create triangultaion of fault.
Parameters
----------
n_sticks : int
Number of sticks to create.
n_nodes : int
Number of nodes for each stick.
slices : tuple
Region to process.
Returns
-------
x, y, z, simplices
`x`, `y` and `z` are numpy.ndarrays of triangle vertices, `simplices` is (N, 3) array where each row
represent triangle. Elements of row are indices of points that are vertices of triangle.
"""
points = self.points.copy()
for i in range(3):
points = points[points[:, i] <= slices[i].stop]
points = points[points[:, i] >= slices[i].start]
if len(points) <= 3:
return None, None, None, None
sticks = get_sticks(points, n_sticks, n_nodes)
simplices = make_triangulation(sticks, True)
coords = np.concatenate(sticks)
return coords[:, 0], coords[:, 1], coords[:, 2], simplices
@classmethod
def from_mask(cls, array, field=None, chunk_size=None, threshold=None, overlap=1, pbar=False,
cube_shape=None, fmt='mask'):
""" Label faults in an array.
Parameters
----------
array : numpy.ndarray or SeismicGeometry
binary mask of faults or array of coordinates.
field : Field or None
Where the fault is.
chunk_size : int
size of chunks to apply `measurements.label`.
threshold : float or None
threshold to drop small faults.
overlap : int
size of overlap to join faults from different chunks.
pbar : bool
progress bar
cube_shape : tuple
shape of cube. If fmt='mask', can be infered from array.
fmt : str
if 'mask', array is a binary mask of faults. If 'points', array consists of coordinates of fault points.
Returns
-------
numpy.ndarray
array of shape (n_faults, ) where each item is array of fault points of shape (N_i, 3).
"""
# TODO: make chunks along xlines
if isinstance(array, SeismicGeometry):
array = array.file_hdf5
chunk_size = chunk_size or len(array)
if chunk_size == len(array):
overlap = 0
if cube_shape is None and fmt == 'points':
raise ValueError("If fmt='points', cube_shape must be specified")
cube_shape = cube_shape or array.shape
if fmt == 'mask':
chunks = [(start, array[start:start+chunk_size]) for start in range(0, cube_shape[0], chunk_size-overlap)]
total = len(chunks)
else:
def _chunks():
for start in range(0, cube_shape[0], chunk_size-overlap):
chunk = np.zeros((chunk_size, *cube_shape[1:]))
points = array[array[:, 0] < start+chunk_size]
points = points[points[:, 0] >= start]
chunk[points[:, 0]-start, points[:, 1], points[:, 2]] = 1
yield (start, chunk)
chunks = _chunks()
total = len(range(0, cube_shape[0], chunk_size-overlap))
prev_overlap = np.zeros((0, *cube_shape[1:]))
labels = np.zeros((0, 4), dtype='int32')
n_objects = 0
for start, item in Notifier(pbar, total=total)(chunks):
chunk_labels, new_objects = measurements.label(item, structure=np.ones((3, 3, 3))) # labels for new chunk
chunk_labels[chunk_labels > 0] += n_objects # shift all values to avoid intersecting with previous labels
new_overlap = chunk_labels[:overlap]
if len(prev_overlap) > 0:
coords = np.where(prev_overlap > 0)
if len(coords[0]) > 0:
# while there are the same objects with different labels repeat procedure
while (new_overlap != prev_overlap).any():
# find overlapping objects and change labels in chunk
chunk_transform = {k: v for k, v in zip(new_overlap[coords], prev_overlap[coords]) if k != v}
for k, v in chunk_transform.items():
chunk_labels[chunk_labels == k] = v
new_overlap = chunk_labels[:overlap]
# find overlapping objects and change labels in processed part of cube
labels_transform = {k: v for k, v in zip(prev_overlap[coords], new_overlap[coords]) if k != v}
for k, v in labels_transform.items():
labels[labels[:, 3] == k, 3] = v
prev_overlap[prev_overlap == k] = v
prev_overlap = chunk_labels[-overlap:]
chunk_labels = chunk_labels[overlap:]
nonzero_coord = np.where(chunk_labels)
chunk_labels = np.stack([*nonzero_coord, chunk_labels[nonzero_coord]], axis = -1)
chunk_labels[:, 0] += start
labels = np.concatenate([labels, chunk_labels])
n_objects += new_objects
labels = labels[np.argsort(labels[:, 3])]
labels = np.array(np.split(labels[:, :-1], np.unique(labels[:, 3], return_index=True)[1][1:]), dtype=object)
sizes = faults_sizes(labels)
labels = sorted(zip(sizes, labels), key=lambda x: x[0], reverse=True)
if threshold:
labels = [item for item in labels if item[0] >= threshold]
if field is not None:
labels = [Fault(item[1].astype('int32'), name=f'fault_{i}', field=field)
for i, item in Notifier(pbar)(enumerate(labels))]
return labels
@classmethod
def skeletonize_faults(cls, prediction, axis=0, threshold=0.1, pbar=True):
""" Make faults from binary mask. """
prediction_cube = SeismicGeometry(prediction) if isinstance(prediction, str) else prediction
processed_faults = np.zeros(prediction_cube.cube_shape)
for i in Notifier(pbar)(range(prediction_cube.cube_shape[axis])):
slices = [slice(None)] * 2
slices[axis] = i
slices = tuple(slices)
struct = generate_binary_structure(2, 10)
prediction = prediction_cube.load_slide(i, axis=axis)
dilation = binary_dilation(prediction > threshold, struct)
holes = binary_fill_holes(dilation, struct)
erosion = binary_erosion(holes, generate_binary_structure(2, 1))
processed_faults[slices] = binary_dilation(skeletonize(erosion, method='lee'))
return cls.from_mask(processed_faults, prediction_cube, chunk_size=100, pbar=pbar)
@classmethod
def remove_predictions_on_bounds(cls, image, prediction, window=30, dilation=30, padding=True, fill_value=0):
""" Remove predictions from cube bounds. """
dilation = [dilation] * image.ndim if isinstance(dilation, int) else dilation
if padding:
pad_width = [(0, 0)] * image.ndim
pad_width[-1] = (window // 2, window // 2)
image = np.pad(image, pad_width=pad_width)
shape = (*image.shape[:-1], image.shape[-1] - window + window % 2, window)
strides = (*image.strides, image.strides[-1])
strided = np.lib.stride_tricks.as_strided(image, shape, strides=strides)
if padding:
mask = strided.min(axis=-1) == strided.max(axis=-1)
else:
mask = np.ones_like(image, dtype=np.bool)
slices = [slice(None)] * image.ndim
slices[-1] = slice(window // 2, -window // 2 + 1)
mask[slices] = strided.min(axis=-1) == strided.max(axis=-1)
for i, width in enumerate(dilation):
slices = [[slice(None) for _ in range(image.ndim)] for _ in range(2)]
for _ in range(1, width):
slices[0][i] = slice(1, None)
slices[1][i] = slice(None, -1)
mask[slices[0]] = np.logical_or(mask[slices[0]], mask[slices[1]])
mask[slices[1]] = np.logical_or(mask[slices[0]], mask[slices[1]])
prediction[mask] = fill_value
return prediction
def faults_sizes(labels):
""" Compute sizes of faults.
Parameters
----------
labels : numpy.ndarray
array of shape (N, 4) where the first 3 columns are coordinates of points and the last one
is for labels
Returns
-------
sizes : numpy.ndarray
"""
sizes = []
for array in labels:
i_len = (array[:, 0].max() - array[:, 0].min())
x_len = (array[:, 1].max() - array[:, 1].min())
sizes += [(i_len ** 2 + x_len ** 2) ** 0.5]
return np.array(sizes)
def get_sticks(points, n_sticks, n_nodes):
""" Get sticks from fault which is represented as a cloud of points.
Parameters
----------
points : np.ndarray
Fault points.
n_sticks : int
Number of sticks to create.
n_nodes : int
Number of nodes for each stick.
Returns
-------
numpy.ndarray
Array of sticks. Each item of array is a stick: sequence of 3D points.
"""
pca = PCA(1)
pca.fit(points)
axis = 0 if np.abs(pca.components_[0][0]) > np.abs(pca.components_[0][1]) else 1
column = points[:, 0] if axis == 0 else points[:, 1]
step = max((column.max() - column.min()) // (n_sticks + 1), 1)
points = points[np.argsort(points[:, axis])]
projections = np.split(points, np.unique(points[:, axis], return_index=True)[1][1:])[::step]
res = []
for p in projections:
points_ = thicken_line(p).astype(int)
loc = p[0, axis]
if len(points_) > 3:
nodes = approximate_points(points_[:, [1-axis, 2]], n_nodes)
nodes_ = np.zeros((len(nodes), 3))
nodes_[:, [1-axis, 2]] = nodes
nodes_[:, axis] = loc
res += [nodes_]
return res
def thicken_line(points):
""" Make thick line. """
points = points[np.argsort(points[:, -1])]
splitted = np.split(points, np.unique(points[:, -1], return_index=True)[1][1:])
return np.stack([np.mean(item, axis=0) for item in splitted], axis=0)
def approximate_points(points, n_points):
""" Approximate points by stick. """
pca = PCA(1)
array = pca.fit_transform(points)
step = (array.max() - array.min()) / (n_points - 1)
initial = np.arange(array.min(), array.max() + step / 2, step)
indices = np.unique(nearest_neighbors(initial.reshape(-1, 1), array.reshape(-1, 1), 1))
return points[indices]
def nearest_neighbors(values, all_values, n_neighbors=10):
""" Find nearest neighbours for each `value` items in `all_values`. """
nn = NearestNeighbors(n_neighbors=n_neighbors).fit(all_values)
return nn.kneighbors(values)[1].flatten()
@njit(parallel=True)
def insert_fault_into_mask(mask, points, mask_bbox):
""" Add new points into binary mask. """
#pylint: disable=not-an-iterable
for i in prange(len(points)):
point = points[i]
if (point[0] >= mask_bbox[0][0]) and (point[0] < mask_bbox[0][1]):
if (point[1] >= mask_bbox[1][0]) and (point[1] < mask_bbox[1][1]):
if (point[2] >= mask_bbox[2][0]) and (point[2] < mask_bbox[2][1]):
mask[point[0] - mask_bbox[0][0], point[1] - mask_bbox[1][0], point[2] - mask_bbox[2][0]] = 1
|
import time
def time_cost(func):
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print('function {} cost: {}'.format(func.__name__, round(end - start, 2)))
return result
return wrapper
@time_cost
def foo():
time.sleep(2)
if __name__ == '__main__':
foo()
|
from tensorflow.keras.models import Model, load_model, save_model
import tensorflow.compat.v1.keras.backend as K
from tensorflow.keras.layers import Dense, Activation, Input, LeakyReLU, Dropout, GaussianNoise, concatenate
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
import numpy as np
from os.path import join, exists
import pandas as pd
from scipy.stats import rv_histogram, norm
from lorenz_gan.gan import Interpolate1D, unnormalize_data, normalize_data, ConcreteDropout, Split1D, Scale, AutoScale
from sklearn.linear_model import LinearRegression
import yaml
class SubModelGAN(object):
def __init__(self, model_path=None):
self.model_path = model_path
self.model_path_start = "/".join(model_path.split("/")[:-1])
self.model_config = model_path.split("/")[-1].split("_")[2]
self.model = load_model(self.model_path, custom_objects={"Interpolate1D": Interpolate1D,
"ConcreteDropout": ConcreteDropout,
"Split1D": Split1D,
"Scale": Scale,
"AutoScale": AutoScale})
self.pred_func = K.function(self.model.input + [K.learning_phase()], [self.model.output])
self.x_scaling_file = join(self.model_path_start, "gan_X_scaling_values_{0}.csv".format(self.model_config))
self.y_scaling_file = join(self.model_path_start, "gan_Y_scaling_values_{0}.csv".format(self.model_config))
self.y_scaling_values = pd.read_csv(self.y_scaling_file, index_col="Channel")
self.x_scaling_values = pd.read_csv(self.x_scaling_file, index_col="Channel")
def predict(self, cond_x, random_x, train_mode=1):
norm_x = normalize_data(np.expand_dims(cond_x, axis=2), scaling_values=self.x_scaling_values)[0]
predictions = unnormalize_data(self.pred_func([norm_x[:, :, 0], random_x, train_mode])[0],
self.y_scaling_values)[:, :, 0]
if predictions.shape[1] > 1:
predictions = predictions.sum(axis=1)
else:
predictions = predictions.ravel()
return predictions
def predict_batch(self, cond_x, random_x, batch_size=8, stochastic=0):
norm_x = normalize_data(np.expand_dims(cond_x, axis=2), scaling_values=self.x_scaling_values)[0]
batch_indices = np.arange(0, norm_x.shape[0], batch_size, dtype=np.int32)
batch_indices = np.append(batch_indices, norm_x.shape[0])
predictions = np.zeros((norm_x.shape[0], self.model.output.shape[1].value))
print("Start batches", cond_x.shape[0])
for b, batch_index in enumerate(batch_indices[:-1]):
predictions[batch_index:
batch_indices[b + 1]] = unnormalize_data(self.pred_func([norm_x[batch_index:
batch_indices[b + 1],
:, 0],
random_x[batch_index:
batch_indices[b + 1]],
stochastic])[0],
self.y_scaling_values)[:, :, 0]
print("End batches", cond_x.shape[0])
if predictions.shape[1] > 1:
predictions = predictions.sum(axis=1)
else:
predictions = predictions.ravel()
return predictions
class SubModelHist(object):
def __init__(self, num_x_bins=20, num_u_bins=20):
self.num_x_bins = num_x_bins
self.num_u_bins = num_u_bins
self.x_bins = None
self.u_bins = None
self.model = None
def fit(self, cond_x, u):
x_bins = np.linspace(cond_x.min(), cond_x.max(), self.num_x_bins)
u_bins = np.linspace(cond_x.min(), cond_x.max(), self.num_u_bins)
self.model, self.x_bins, self.u_bins = np.histogram2d(cond_x, u, bins=(x_bins, u_bins))
def predict(self, cond_x, random_x):
cond_x_filtered = np.where(cond_x > self.x_bins.max(), self.x_bins.max(), cond_x)
cond_x_filtered = np.where(cond_x < self.x_bins.min(), self.x_bins.min(), cond_x_filtered)
random_percentile = norm.cdf(random_x)
sampled_u = np.zeros(cond_x.shape)
for c, cond_x_val in enumerate(cond_x_filtered):
x_bin = np.searchsorted(self.x_bins, cond_x_val)
sampled_u[c] = rv_histogram((self.model[:, x_bin[0]], self.u_bins)).ppf(random_percentile[c])
return sampled_u.ravel()
class AR1RandomUpdater(object):
def __init__(self, corr=0, noise_sd=1):
self.corr = corr
self.noise_sd = noise_sd
def fit(self, data):
self.corr = np.corrcoef(data[:-1], data[1:])[0, 1]
self.noise_sd = np.sqrt(1 - self.corr ** 2)
def update(self, random_values, rs=np.random.RandomState(3232)):
return self.corr * random_values + rs.normal(size=random_values.shape,
loc=0, scale=self.noise_sd)
class SubModelPoly(object):
def __init__(self, num_terms=3, noise_type="additive"):
self.num_terms = num_terms
self.model = LinearRegression()
self.noise_type = noise_type
def fit(self, cond_x, u):
x_terms = np.zeros((cond_x.shape[0], self.num_terms))
for p in range(1, self.num_terms + 1):
x_terms[:, p - 1] = cond_x ** p
self.model.fit(x_terms, u)
def predict(self, cond_x, random_x):
x_terms = np.zeros((cond_x.shape[0], self.num_terms))
for p in range(1, self.num_terms + 1):
x_terms[:, p - 1:p] = cond_x ** p
sampled_u = self.model.predict(x_terms).reshape(cond_x.shape)
if self.noise_type == "additive":
sampled_u += random_x
elif self.noise_type == "multiplicative":
sampled_u = (1 + random_x) * sampled_u
return sampled_u.ravel()
class SubModelPolyAdd(object):
def __init__(self, num_terms=3):
self.num_terms = num_terms
self.model = LinearRegression()
self.res_sd = 0
self.corr = 1
def fit(self, x, u):
x_terms = np.zeros((x.shape[0], self.num_terms))
for p in range(1, self.num_terms + 1):
x_terms[:, p - 1] = x[:, 0] ** p
self.model.fit(x_terms, u)
u_mean = self.model.predict(x_terms)
residuals = u - u_mean
self.corr = np.corrcoef(residuals[1:], residuals[:-1])[0, 1]
self.res_sd = np.std(residuals)
print("Poly Add Corr:", self.corr)
print("Poly Add Res SD:", self.res_sd)
def predict(self, x, residuals=None, predict_residuals=True):
if residuals is None:
residuals = np.zeros(x.shape[0])
x_terms = np.zeros((x.shape[0], self.num_terms))
for p in range(1, self.num_terms + 1):
x_terms[:, p - 1] = x[:, 0] ** p
u_mean = self.model.predict(x_terms).ravel()
u_res = self.corr * residuals + \
self.res_sd * np.sqrt(1 - self.corr ** 2) * np.random.normal(size=residuals.shape)
u_res = u_res.ravel()
if predict_residuals:
return u_mean, u_res
else:
return u_mean
def predict_mean(self, x):
x_terms = np.zeros((x.shape[0], self.num_terms))
for p in range(1, self.num_terms + 1):
x_terms[:, p - 1] = x[:, 0] ** p
return self.model.predict(x_terms).ravel()
def predict_res(self, residuals):
u_res = self.corr * residuals + \
self.res_sd * np.sqrt(1 - self.corr ** 2) * np.random.normal(size=residuals.shape)
u_res = u_res.ravel()
return u_res
class SubModelANN(object):
def __init__(self, inputs=1, hidden_layers=2, hidden_neurons=8,
activation="selu", l2_weight=0.01, learning_rate=0.001, loss="mse",
noise_sd=1, beta_1=0.9, model_path=None, dropout_alpha=0.5,
num_epochs=10, batch_size=1024, verbose=0, model_config=0, **kwargs):
self.config = dict(inputs=inputs,
hidden_layers=hidden_layers,
hidden_neurons=hidden_neurons,
activation=activation,
l2_weight=l2_weight,
learning_rate=learning_rate,
loss=loss,
noise_sd=noise_sd,
beta_1=beta_1,
dropout_alpha=dropout_alpha,
model_path=model_path,
num_epochs=num_epochs,
batch_size=batch_size,
verbose=verbose,
model_config=model_config,
corr=1,
res_sd=0)
if model_path is None:
nn_input = Input((inputs, ))
nn_model = nn_input
for h in range(hidden_layers):
nn_model = Dense(hidden_neurons, kernel_regularizer=l2(l2_weight))(nn_model)
if activation == "leaky":
nn_model = LeakyReLU(0.1)(nn_model)
else:
nn_model = Activation(activation)(nn_model)
if dropout_alpha > 0:
nn_model = Dropout(dropout_alpha)(nn_model)
if noise_sd > 0:
nn_model = GaussianNoise(noise_sd)(nn_model)
nn_model = Dense(1)(nn_model)
self.model = Model(nn_input, nn_model)
self.model.compile(Adam(lr=learning_rate, beta_1=beta_1), loss=loss)
self.x_scaling_file = None
self.x_scaling_values = None
self.sample_predict = K.function([self.model.input, K.learning_phase()], [self.model.output])
elif type(model_path) == str:
model_path_start = join(*model_path.split("/")[:-1])
self.model = load_model(model_path)
self.sample_predict = K.function([self.model.input, K.learning_phase()], [self.model.output])
self.x_scaling_file = join(model_path_start,
"ann_config_{0:04d}_scale.csv".format(self.config["model_config"]))
self.x_scaling_values = pd.read_csv(self.x_scaling_file, index_col="Channel")
def fit(self, cond_x, u):
norm_x, self.x_scaling_values = normalize_data(cond_x,
scaling_values=self.x_scaling_values)
self.model.fit(norm_x, u, batch_size=self.config["batch_size"], epochs=self.config["num_epochs"],
verbose=self.config["verbose"])
u_mean = self.model.predict(norm_x).ravel()
residuals = u.ravel() - u_mean
self.config["corr"] = float(np.corrcoef(residuals[1:], residuals[:-1])[0, 1])
self.config["res_sd"] = float(np.std(residuals))
def predict(self, cond_x, residuals=None, predict_residuals=False):
norm_x = normalize_data(cond_x, scaling_values=self.x_scaling_values)[0]
u_mean = self.sample_predict([norm_x, 0])[0].ravel()
if predict_residuals:
u_total = self.sample_predict([norm_x, 1])[0].ravel()
u_res = u_total - u_mean
return u_mean, u_res
else:
return u_mean
def predict_mean(self, cond_x):
norm_x = normalize_data(cond_x, scaling_values=self.x_scaling_values)[0]
u_mean = self.sample_predict([norm_x, 0])[0].ravel()
return u_mean
def predict_res(self, residuals):
u_res = self.config["corr"] * residuals + \
self.config["res_sd"] * np.sqrt(1 - self.config["corr"] ** 2) * np.random.normal(size=residuals.shape)
u_res = u_res.ravel()
return np.zeros(u_res.shape)
def save_model(self, out_path):
out_config_file = join(out_path, "ann_config_{0:04d}_opts.yaml".format(self.config["model_config"]))
with open(out_config_file, "w") as out_config:
yaml.dump(self.config, out_config)
model_file = join(out_path, "ann_config_{0:04d}_model.nc".format(self.config["model_config"]))
save_model(self.model, model_file)
self.x_scaling_file = join(out_path, "ann_config_{0:04d}_scale.csv".format(self.config["model_config"]))
self.x_scaling_values.to_csv(self.x_scaling_file, index_label="Channel")
class SubModelANNRes(object):
"""
Artificial Neural Network Parameterization with separate mean and residual models.
Args:
mean_inputs (int): number of inputs to mean model (default 1)
hidden_layers (int): number of hidden layers in each model (default 2)
hidden_neurons (int): number of hidden neurons in each hidden layer (default 8)
noise_sd (float): standard deviation of the GaussianNoise layers (default 1)
beta_1 (float): controls the beta_1 parameter in the Adam optimizer
model_path (str or None): Path to existing model object. If not specified or if model file not found,
new model is created from scratch.
dropout_alpha (float): Proportion of input neurons set to 0.
num_epochs (int): The number of epochs (iterations through training data) performed during training.
batch_size (int): Number of training examples sampled for each network update
val_split (float): Proportion of training examples used to split training and validation data
verbose (int): Level of text output during training.
model_config (int): Configuration number to keep saved files consistent.
"""
def __init__(self, mean_inputs=1, hidden_layers=2, hidden_neurons=8,
activation="selu", l2_weight=0.01, learning_rate=0.001, mean_loss="mse",
res_loss="kullback_leibler_divergence",
noise_sd=1, beta_1=0.9, model_path=None, dropout_alpha=0.5,
num_epochs=10, batch_size=1024, val_split=0.5, verbose=0, model_config=0):
self.config = dict(mean_inputs=mean_inputs,
hidden_layers=hidden_layers,
hidden_neurons=hidden_neurons,
activation=activation,
l2_weight=l2_weight,
learning_rate=learning_rate,
mean_loss=mean_loss,
res_loss=res_loss,
noise_sd=noise_sd,
beta_1=beta_1,
dropout_alpha=dropout_alpha,
model_path=model_path,
num_epochs=num_epochs,
batch_size=batch_size,
verbose=verbose,
model_config=model_config,
val_split=val_split)
mean_model_file = join(model_path, "annres_config_{0:04d}_mean.nc".format(self.config["model_config"]))
res_model_file = join(model_path, "annres_config_{0:04d}_res.nc".format(self.config["model_config"]))
self.x_scaling_file = join(model_path, "annres_scaling_values_{0:04d}.csv".format(model_config))
if model_path is None or not exists(mean_model_file):
nn_input = Input((mean_inputs,))
nn_model = nn_input
for h in range(hidden_layers):
nn_model = Dense(hidden_neurons, kernel_regularizer=l2(l2_weight))(nn_model)
if activation == "leaky":
nn_model = LeakyReLU(0.1)(nn_model)
else:
nn_model = Activation(activation)(nn_model)
nn_model = Dense(1)(nn_model)
nn_res_input_x = Input((mean_inputs,))
nn_res_input_res = Input((1,))
nn_res = concatenate([nn_res_input_x, nn_res_input_res])
for h in range(hidden_layers):
nn_res = Dense(hidden_neurons, kernel_regularizer=l2(l2_weight))(nn_res)
if activation == "leaky":
nn_res = LeakyReLU(0.1)(nn_res)
else:
nn_res = Activation(activation)(nn_res)
nn_res = Dropout(dropout_alpha)(nn_res)
nn_res = GaussianNoise(noise_sd)(nn_res)
nn_res = Dense(1)(nn_res)
self.mean_model = Model(nn_input, nn_model)
self.mean_model.compile(Adam(lr=learning_rate, beta_1=beta_1), loss=mean_loss)
self.res_model = Model([nn_res_input_x, nn_res_input_res], nn_res)
self.res_model.compile(Adam(lr=learning_rate, beta_1=beta_1), loss=res_loss)
self.x_scaling_values = None
elif type(model_path) == str:
self.mean_model = load_model(mean_model_file)
self.res_model = load_model(res_model_file)
self.x_scaling_values = pd.read_csv(self.x_scaling_file, index_col="Channel")
self.res_predict = K.function(self.res_model.input + [K.learning_phase()], [self.res_model.output])
def fit(self, cond_x, u):
split_index = int(cond_x.shape[0] * self.config["val_split"])
norm_x, self.x_scaling_values = normalize_data(cond_x,
scaling_values=self.x_scaling_values)
self.x_scaling_values.to_csv(self.x_scaling_file, index_label="Channel")
self.mean_model.fit(norm_x[:split_index], u[:split_index], batch_size=self.config["batch_size"],
epochs=self.config["num_epochs"],
verbose=self.config["verbose"])
mean_preds = self.mean_model.predict(norm_x[split_index:]).ravel()
residuals = u[split_index:] - mean_preds
self.res_model.fit([norm_x[split_index:-1], residuals[:-1].reshape(-1, 1)],
residuals[1:], batch_size=self.config["batch_size"],
epochs=self.config["num_epochs"],
verbose=self.config["verbose"])
def predict(self, cond_x, residuals, predict_residuals=True):
norm_x = normalize_data(cond_x, scaling_values=self.x_scaling_values)[0]
u_mean = self.mean_model.predict(norm_x).ravel()
u_res = self.res_predict([norm_x, residuals, 1])[0].ravel()
if predict_residuals:
return u_mean, u_res
else:
return u_mean
def save_model(self, out_path):
out_config_file = join(out_path, "annres_config_{0:04d}_opts.yaml".format(self.config["model_config"]))
with open(out_config_file, "w") as out_config:
yaml.dump(self.config, out_config)
mean_model_file = join(out_path, "annres_config_{0:04d}_mean.nc".format(self.config["model_config"]))
res_model_file = join(out_path, "annres_config_{0:04d}_res.nc".format(self.config["model_config"]))
save_model(self.mean_model, mean_model_file)
save_model(self.res_model, res_model_file)
self.x_scaling_values.to_csv(self.x_scaling_file, index_label="Channel")
def load_ann_model(model_config_file):
"""
Load Artificial Neural Network model from config yaml file
Args:
model_config_file: The full or relative path to the config file with name formatted "annres_config_0000.yaml"
Returns:
SubModelANN or SubModelANNRes
"""
model_type = model_config_file.split("/")[-1].split("_")[0]
with open(model_config_file) as config_file:
config = yaml.load(config_file)
if model_type == "ann":
model = SubModelANN(**config)
else:
model = SubModelANNRes(**config)
return model
|
vowels = ('h', 'i', 'i')
print(vowels)
# vowels[3] = 'g'
# print(vowels)
word = ('word')
realWord = ('word',)
realWordTuple = tuple('word')
# print(type(vowels))
print(type(word))
print(type(realWord))
print(type(realWordTuple))
|
# -*- coding: utf-8 -*-
import argparse
import base64
import errno
import logging
import os
import shlex
import socket
from collections import OrderedDict
from contextlib import contextmanager
from gettext import gettext
from streamlink import plugins, Streamlink
from streamlink.compat import (
is_py2,
parse_qsl,
unquote,
urlparse,
)
from streamlink.exceptions import (
FatalPluginError,
NoPluginError,
PluginError,
StreamError,
)
from streamlink.plugin import PluginOptions
from streamlink.stream import RTMPStream
from streamlink.stream.dash import DASHStream
from streamlink.stream.ffmpegmux import MuxedStream
from .compat import BaseHTTPRequestHandler, HTTPServer, ThreadingMixIn
from .constants import CONFIG_FILES, PLUGINS_DIR, STREAM_SYNONYMS
from .mirror_argparser import build_parser
from .shared import logger
ACCEPTABLE_ERRNO = (
errno.ECONNABORTED,
errno.ECONNRESET,
errno.EINVAL,
errno.EPIPE,
)
try:
ACCEPTABLE_ERRNO += (errno.WSAECONNABORTED,)
except AttributeError:
pass # Not windows
log = logging.getLogger('streamlink.liveproxy-server')
class TempData(object):
pass
class LiveProxyStreamlink(Streamlink):
def load_builtin_plugins(self):
if not hasattr(TempData, '_loaded_plugins'):
self.load_plugins(plugins.__path__[0])
TempData._loaded_plugins = self.plugins.copy()
else:
self.plugins = TempData._loaded_plugins.copy()
if is_py2:
# Python 2.7
for plugin in self.plugins.itervalues():
plugin.session = self
else:
for plugin in iter(self.plugins.values()):
plugin.session = self
# copy of - from .utils import ignored
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
def resolve_stream_name(streams, stream_name):
'''Returns the real stream name of a synonym.'''
if stream_name in STREAM_SYNONYMS and stream_name in streams:
for name, stream in streams.items():
if stream is streams[stream_name] and name not in STREAM_SYNONYMS:
return name
return stream_name
def format_valid_streams(plugin, streams):
'''Formats a dict of streams.
Filters out synonyms and displays them next to
the stream they point to.
Streams are sorted according to their quality
(based on plugin.stream_weight).
'''
delimiter = ', '
validstreams = []
for name, stream in sorted(streams.items(),
key=lambda stream: plugin.stream_weight(stream[0])):
if name in STREAM_SYNONYMS:
continue
def synonymfilter(n):
return stream is streams[n] and n is not name
synonyms = list(filter(synonymfilter, streams.keys()))
if len(synonyms) > 0:
joined = delimiter.join(synonyms)
name = '{0} ({1})'.format(name, joined)
validstreams.append(name)
return delimiter.join(validstreams)
def setup_args(parser, arglist=[], config_files=[], ignore_unknown=True):
'''Parses arguments.'''
# Load arguments from config files
for config_file in filter(os.path.isfile, config_files):
arglist.insert(0, '@' + config_file)
args, unknown = parser.parse_known_args(arglist)
if unknown and not ignore_unknown:
msg = gettext('unrecognized arguments: %s')
parser.error(msg % ' '.join(unknown))
# Force lowercase to allow case-insensitive lookup
if args.stream:
args.stream = [stream.lower() for stream in args.stream]
# force --url as args.url
if args.url_param:
args.url = args.url_param
return args
def load_plugins(session, dirs):
'''Attempts to load plugins from a list of directories.'''
dirs = [os.path.expanduser(d) for d in dirs]
for directory in dirs:
if os.path.isdir(directory):
session.load_plugins(directory)
else:
log.info('Plugin path {0} does not exist or is not '
'a directory!', directory)
def setup_config_args(session, args, parser, arglist):
config_files = []
if args.url:
with ignored(NoPluginError):
plugin = session.resolve_url(args.url)
config_files += ['{0}.{1}'.format(fn, plugin.module) for fn in CONFIG_FILES]
if args.config:
# We want the config specified last to get highest priority
config_files += list(reversed(args.config))
else:
# Only load first available default config
for config_file in filter(os.path.isfile, CONFIG_FILES):
config_files.append(config_file)
break
if config_files:
args = setup_args(parser, arglist, config_files, ignore_unknown=True)
return args
def setup_plugins(session, args):
'''Loads any additional plugins.'''
if args.plugin_dirs:
PLUGINS_DIR.extend(args.plugin_dirs)
load_plugins(session, PLUGINS_DIR)
def setup_http_session(session, args):
'''Sets the global HTTP settings, such as proxy and headers.'''
if args.http_proxy:
session.set_option('http-proxy', args.http_proxy)
if args.https_proxy:
session.set_option('https-proxy', args.https_proxy)
if args.http_cookie:
session.set_option('http-cookies', dict(args.http_cookie))
if args.http_header:
session.set_option('http-headers', dict(args.http_header))
if args.http_query_param:
session.set_option('http-query-params', dict(args.http_query_param))
if args.http_ignore_env:
session.set_option('http-trust-env', False)
if args.http_no_ssl_verify:
session.set_option('http-ssl-verify', False)
if args.http_disable_dh:
session.set_option('http-disable-dh', True)
if args.http_ssl_cert:
session.set_option('http-ssl-cert', args.http_ssl_cert)
if args.http_ssl_cert_crt_key:
session.set_option('http-ssl-cert', tuple(args.http_ssl_cert_crt_key))
if args.http_timeout:
session.set_option('http-timeout', args.http_timeout)
if args.http_cookies:
session.set_option('http-cookies', args.http_cookies)
if args.http_headers:
session.set_option('http-headers', args.http_headers)
if args.http_query_params:
session.set_option('http-query-params', args.http_query_params)
def setup_options(session, args):
'''Sets streamlink options.'''
if args.hls_live_edge:
session.set_option('hls-live-edge', args.hls_live_edge)
if args.hls_segment_attempts:
session.set_option('hls-segment-attempts', args.hls_segment_attempts)
if args.hls_playlist_reload_attempts:
session.set_option('hls-playlist-reload-attempts', args.hls_playlist_reload_attempts)
if args.hls_segment_threads:
session.set_option('hls-segment-threads', args.hls_segment_threads)
if args.hls_segment_timeout:
session.set_option('hls-segment-timeout', args.hls_segment_timeout)
if args.hls_segment_ignore_names:
session.set_option('hls-segment-ignore-names', args.hls_segment_ignore_names)
if args.hls_timeout:
session.set_option('hls-timeout', args.hls_timeout)
if args.hls_audio_select:
session.set_option('hls-audio-select', args.hls_audio_select)
if args.hls_start_offset:
session.set_option('hls-start-offset', args.hls_start_offset)
if args.hls_duration:
session.set_option('hls-duration', args.hls_duration)
if args.hls_live_restart:
session.set_option('hls-live-restart', args.hls_live_restart)
if args.hds_live_edge:
session.set_option('hds-live-edge', args.hds_live_edge)
if args.hds_segment_attempts:
session.set_option('hds-segment-attempts', args.hds_segment_attempts)
if args.hds_segment_threads:
session.set_option('hds-segment-threads', args.hds_segment_threads)
if args.hds_segment_timeout:
session.set_option('hds-segment-timeout', args.hds_segment_timeout)
if args.hds_timeout:
session.set_option('hds-timeout', args.hds_timeout)
if args.http_stream_timeout:
session.set_option('http-stream-timeout', args.http_stream_timeout)
if args.ringbuffer_size:
session.set_option('ringbuffer-size', args.ringbuffer_size)
if args.rtmp_proxy:
session.set_option('rtmp-proxy', args.rtmp_proxy)
if args.rtmp_rtmpdump:
session.set_option('rtmp-rtmpdump', args.rtmp_rtmpdump)
if args.rtmp_timeout:
session.set_option('rtmp-timeout', args.rtmp_timeout)
if args.stream_segment_attempts:
session.set_option('stream-segment-attempts', args.stream_segment_attempts)
if args.stream_segment_threads:
session.set_option('stream-segment-threads', args.stream_segment_threads)
if args.stream_segment_timeout:
session.set_option('stream-segment-timeout', args.stream_segment_timeout)
if args.stream_timeout:
session.set_option('stream-timeout', args.stream_timeout)
if args.ffmpeg_ffmpeg:
session.set_option('ffmpeg-ffmpeg', args.ffmpeg_ffmpeg)
if args.ffmpeg_verbose:
session.set_option('ffmpeg-verbose', args.ffmpeg_verbose)
if args.ffmpeg_verbose_path:
session.set_option('ffmpeg-verbose-path', args.ffmpeg_verbose_path)
if args.ffmpeg_video_transcode:
session.set_option('ffmpeg-video-transcode', args.ffmpeg_video_transcode)
if args.ffmpeg_audio_transcode:
session.set_option('ffmpeg-audio-transcode', args.ffmpeg_audio_transcode)
session.set_option('subprocess-errorlog', args.subprocess_errorlog)
session.set_option('subprocess-errorlog-path', args.subprocess_errorlog_path)
session.set_option('locale', args.locale)
def setup_plugin_args(session, parser):
'''Sets Streamlink plugin options.'''
plugin_args = parser.add_argument_group('Plugin options')
for pname, plugin in session.plugins.items():
defaults = {}
for parg in plugin.arguments:
plugin_args.add_argument(parg.argument_name(pname), **parg.options)
defaults[parg.dest] = parg.default
plugin.options = PluginOptions(defaults)
def setup_plugin_options(session, args, plugin):
'''Sets Streamlink plugin options.'''
pname = plugin.module
required = OrderedDict({})
for parg in plugin.arguments:
if parg.options.get('help') != argparse.SUPPRESS:
if parg.required:
required[parg.name] = parg
value = getattr(args, parg.namespace_dest(pname))
session.set_plugin_option(pname, parg.dest, value)
# if the value is set, check to see if any of the required arguments are not set
if parg.required or value:
try:
for rparg in plugin.arguments.requires(parg.name):
required[rparg.name] = rparg
except RuntimeError:
log.error('{0} plugin has a configuration error and the arguments '
'cannot be parsed'.format(pname))
break
if required:
for req in required.values():
if not session.get_plugin_option(pname, req.dest):
prompt = req.prompt or 'Enter {0} {1}'.format(pname, req.name)
session.set_plugin_option(pname, req.dest,
plugin.input_ask_password(prompt)
if req.sensitive else
plugin.input_ask(prompt))
def main_play(HTTPBase, arglist, redirect=False):
parser = build_parser()
args = setup_args(parser, arglist, ignore_unknown=True)
# create a new session for every request
session = LiveProxyStreamlink()
log.info('User-Agent: {0}'.format(HTTPBase.headers.get('User-Agent', '???')))
log.info('Client: {0}'.format(HTTPBase.client_address))
log.info('Address: {0}'.format(HTTPBase.address_string()))
setup_plugins(session, args)
setup_plugin_args(session, parser)
# call setup args again once the plugin specific args have been added
args = setup_args(parser, arglist, ignore_unknown=True)
args = setup_config_args(session, args, parser, arglist)
logger.root.setLevel(args.loglevel)
setup_http_session(session, args)
if args.url:
setup_options(session, args)
try:
plugin = session.resolve_url(args.url)
setup_plugin_options(session, args, plugin)
log.info('Found matching plugin {0} for URL {1}',
plugin.module, args.url)
plugin_args = []
for parg in plugin.arguments:
value = plugin.get_option(parg.dest)
if value:
plugin_args.append((parg, value))
if plugin_args:
log.debug('Plugin specific arguments:')
for parg, value in plugin_args:
log.debug(' {0}={1} ({2})'.format(parg.argument_name(plugin.module),
value if not parg.sensitive else ('*' * 8),
parg.dest))
if redirect is True:
streams = session.streams(
args.url,
stream_types=['hls', 'http'])
else:
streams = session.streams(
args.url,
stream_types=args.stream_types,
sorting_excludes=args.stream_sorting_excludes)
except FatalPluginError as err:
log.error('FatalPluginError {0}', str(err))
HTTPBase._headers(404, 'text/html', connection='close')
return
except NoPluginError:
log.error('No plugin can handle URL: {0}', args.url)
HTTPBase._headers(404, 'text/html', connection='close')
return
except PluginError as err:
log.error('PluginError {0}', str(err))
HTTPBase._headers(404, 'text/html', connection='close')
return
if not streams:
log.error('No playable streams found on this URL: {0}', args.url)
HTTPBase._headers(404, 'text/html', connection='close')
return
if args.default_stream and not args.stream:
args.stream = args.default_stream
if not args.stream:
args.stream = ['best']
stream_ended = False
validstreams = format_valid_streams(plugin, streams)
for stream_name in args.stream:
if stream_name in streams:
log.info('Available streams: {0}', validstreams)
'''Decides what to do with the selected stream.'''
stream_name = resolve_stream_name(streams, stream_name)
stream = streams[stream_name]
# Find any streams with a '_alt' suffix and attempt
# to use these in case the main stream is not usable.
alt_streams = list(filter(lambda k: stream_name + '_alt' in k,
sorted(streams.keys())))
for stream_name in [stream_name] + alt_streams:
stream = streams[stream_name]
stream_type = type(stream).shortname()
log.info('Opening stream: {0} ({1})', stream_name,
stream_type)
if isinstance(stream, (RTMPStream)):
log.info('RTMP streams '
'might not work on every platform.')
elif isinstance(stream, (MuxedStream, DASHStream)):
log.info('FFmpeg streams (dash, muxed) '
'might not work on every platform.')
# 301
if redirect is True:
log.info('301 - URL: {0}'.format(stream.url))
HTTPBase.send_response(301)
HTTPBase.send_header('Location', stream.url)
HTTPBase.end_headers()
log.info('301 - done')
stream_ended = True
break
# play
try:
fd = stream.open()
except StreamError as err:
log.error('Could not open stream: {0}'.format(err))
continue
cache = 4096
HTTPBase._headers(200, 'video/unknown')
try:
log.debug('Pre-buffering {0} bytes'.format(cache))
while True:
buff = fd.read(cache)
if not buff:
log.error('No Data for buff!')
break
HTTPBase.wfile.write(buff)
HTTPBase.wfile.close()
except socket.error as e:
if isinstance(e.args, tuple):
if e.errno == errno.EPIPE:
# remote peer disconnected
log.info('Detected remote disconnect')
else:
log.error(str(e))
else:
log.error(str(e))
fd.close()
log.info('Stream ended')
fd = None
stream_ended = True
break
if not stream_ended:
HTTPBase._headers(404, 'text/html', connection='close')
return
else:
err = ('The specified stream(s) \'{0}\' could not be '
'found'.format(', '.join(args.stream)))
log.error('{0}.\n Available streams: {1}',
err, validstreams)
HTTPBase._headers(404, 'text/html', connection='close')
return
def arglist_from_query(path):
old_data = parse_qsl(urlparse(path).query)
arglist = []
for k, v in old_data:
if k == 'q':
# backwards compatibility --q
k = 'default-stream'
arglist += ['--{0}'.format(unquote(k)), unquote(v)]
return arglist
class HTTPRequest(BaseHTTPRequestHandler):
def log_message(self, format, *args):
# log.debug('%s - %s' % (self.address_string(), format % args))
pass
def _headers(self, status, content, connection=False):
self.send_response(status)
self.send_header('Server', 'LiveProxy')
self.send_header('Content-type', content)
if connection:
self.send_header('Connection', connection)
self.end_headers()
def do_HEAD(self):
'''Respond to a HEAD request.'''
self._headers(404, 'text/html', connection='close')
def do_GET(self):
'''Respond to a GET request.'''
if self.path.startswith(('/play/', '/streamlink/')):
# http://127.0.0.1:53422/play/?url=https://foo.bar&q=worst
arglist = arglist_from_query(self.path)
main_play(self, arglist)
elif self.path.startswith(('/301/', '/streamlink_301/')):
# http://127.0.0.1:53422/301/?url=https://foo.bar&q=worst
arglist = arglist_from_query(self.path)
main_play(self, arglist, redirect=True)
elif self.path.startswith(('/base64/')):
# http://127.0.0.1:53422/base64/STREAMLINK-COMMANDS/
base64_path = urlparse(self.path).path.split('/')
arglist = shlex.split(base64.b64decode(base64_path[2]).decode('UTF-8'))
if arglist[0].lower() == 'streamlink':
arglist = arglist[1:]
main_play(self, arglist)
else:
self._headers(404, 'text/html', connection='close')
class Server(HTTPServer):
'''HTTPServer class with timeout.'''
timeout = 5
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
try:
self.RequestHandlerClass(request, client_address, self)
except socket.error as err:
if err.errno not in ACCEPTABLE_ERRNO:
raise
class ThreadedHTTPServer(ThreadingMixIn, Server):
'''Handle requests in a separate thread.'''
allow_reuse_address = True
daemon_threads = True
__all__ = [
'HTTPRequest',
'ThreadedHTTPServer',
]
|
# Smearing schemes for delta function
# Intended for use with fc_direct.py for calculation of
# free-carrier direct absorption transitions
#
# Methods:
# w0gauss : standard Gaussian smearing
# w1gauss : FD smearing
# sig_nk# : variable smearing for Dirac-delta involvinv e_{nk} - e_{m,k+q}
# TODO: implement band velocity for variable smearing....
# with/without inclusion of symmetry?
# mpdelta : Methfessel-Paxton; order n = 0 is equivalent to Gaussian smearing
# supposedly can extract more physical meaning from MP smearing
# is what is used by default in VASP
# in practice, n = 1, 2 is sufficient;
import numpy as np
# standard gaussian
def w0gauss(x):
sqrtpm1 = 1.0/1.7724538509055160
arg = np.min([500,x**2.0])
w0gauss = np.float64(0.0)
w0gauss = sqrtpm1 * np.exp( -arg )
return w0gauss
def mpdelta(x,n):
""" x : argument
n : order of MP; n = 0 equivalent to Gaussian smearing
adapted from DELSTP subroutine in dos.F of VASP 5.4.1"""
A = 1.0/np.sqrt(np.pi)
K = 0
H1 = 1.0
H2 = 2.0*x
D = A
# coeffecient of Hermite polynomials
for i in range(n):
A = A/(-4.0*(i+1))
K = K + 1
H3 = H1
H1 = H2
H2 = 2.0*x*H2 - 2*K*H3
K = K + 1
H3 = H1
H1 = H2
H2 = 2.0*x*H2 - 2*K*H3
D = D + A*H1
D = D*np.exp(-(x*x))
return D
# FD smearing
def fd(x):
""" Fermi-Dirac occupation occupation factor:
x : (ebnd - efermi)/T
"""
fd = np.float64(0.0)
fd = 1/(np.exp(x)+1)
return fd
def fermidirac(ebnd,efermi,T):
""" Fermi-Dirac occupation occupation factor:
ebnd: energy (in eV) of band of interest
efermi: Fermi energy (in eV)
T: temperature (in eV) """
fd = np.float64(0.0)
energy = ebnd-efermi
fd = 1/(np.exp(energy/T)+1)
return fd
# adaptive smearing
def sig_nk(nk1,nk2,nk3,vk,aa):
dF1 = np.abs(vk[0] * 1.0/nk1)
dF2 = np.abs(vk[1] * 1.0/nk2)
dF3 = np.abs(vk[2] * 1.0/nk3)
sig_nk = aa * np.sqrt(np.abs(dF1**2 + dF2**2 + dF3**2))
return sig_nk
##
def sig_nk1(nk1,nk2,nk3,vk,aa):
dF1 = np.abs(vk[0] * 1.0)
dF2 = np.abs(vk[1] * 1.0)
dF3 = np.abs(vk[2] * 1.0)
sig_nk1 = aa * np.sqrt(np.abs(dF1**2 + dF2**2 + dF3**2))
return sig_nk1
##
def sig_nk2(nk1,nk2,nk3,vk,aa):
dF1 = np.abs(vk[0] * 1.0/nk1)
dF2 = np.abs(vk[1] * 1.0/nk2)
dF3 = np.abs(vk[2] * 1.0/nk3)
sig_nk2 = aa * np.max([dF1,dF2,dF3])
return sig_nk2
##
def sig_nk3(nk1,nk2,nk3,vk,aa):
dF1 = np.abs(vk[0] * 1.0/nk1)
dF2 = np.abs(vk[1] * 1.0/nk2)
dF3 = np.abs(vk[2] * 1.0/nk3)
sig_nk3 = aa * np.min([dF1,dF2,dF3])
return sig_nk3
|
# Generated by Django 2.2 on 2019-04-24 22:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20190424_2246'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='user_name',
),
migrations.AddField(
model_name='user',
name='email',
field=models.EmailField(default=' ', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='name',
field=models.CharField(default=' ', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='password',
field=models.CharField(default=' ', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='telephone',
field=models.CharField(default=' ', max_length=100),
preserve_default=False,
),
]
|
#!/usr/bin/env python
# WS server that sends messages at random intervals
import asyncio
import datetime
import random
import websockets
import redis
from dotenv import dotenv_values
config = dotenv_values("../.env")
WEBSOCKET_HOST = config["WEBSOCKET_HOST"]
WEBSOCKET_PORT = config["WEBSOCKET_PORT"]
HOST_REDIS = config["HOST_REDIS"]
PORT_REDIS = config["PORT_REDIS"]
redis_service = redis.StrictRedis(host=HOST_REDIS, port=PORT_REDIS, db=0)
def reverserd_iterator(iter):
return reversed(list(iter))
async def time(websocket, path):
redis_service.flushdb()
while True:
for key in redis_service.scan_iter("display_log:*"):
msg = str(redis_service.get(key))
print(msg)
await websocket.send(msg)
redis_service.delete(key)
distance_log = redis_service.get("distance_log")
if distance_log is not None:
msg = str(distance_log)
print(msg)
await websocket.send(msg)
#redis_service.delete("distance_log")
async def websocketserver():
async with websockets.serve(time, WEBSOCKET_HOST, WEBSOCKET_PORT):
await asyncio.Future() # run forever
asyncio.run(websocketserver())
|
import torch
import copy
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import networkFiles as NF
import numpy as np
import logging
# Module ToDo List
# Do we want to add weight decay as a hyperparameter?
# What is a sensible value for the weight_decay
# Add function to generate different types of masks
def generateDictionary_Hyperopt(N_models, model_type, layers, input_size, hidden_size, image_size, loss_fn, dtype):
# This is the parameters for the distribution of path lengths passed to generate samples
# n is the longest path we want to generate uniformly over
# Set up model dictionaries with meta entires that stores key properties of the model
modelBlock = {"Meta": {"Model_Type": model_type, "Loss_Function": loss_fn, "Layers": layers,
"Epochs_Trained": 0, "Type": dtype, "N": image_size}}
resultBlock = {}
# Generate a vector of hyperparameters for the number of models
lr_vec = np.power(10, (np.random.uniform(-2.5, -5, N_models)))
#weight_decay_vec = np.power(10, (np.random.uniform(-2.5, -5, N_models)))
batch_size_vec = np.around(np.random.uniform(32, 256, N_models))
batch_size_vec = batch_size_vec.astype(int)
for i in range(N_models):
modelBlock[i] = {}
modelInit(modelBlock, model_type, i, input_size, hidden_size, layers, image_size)
modelBlock[i]["Model"].type(dtype)
modelBlock[i]["Learning"] = lr_vec[i]
modelBlock[i]["Batch"] = np.asscalar(batch_size_vec[i])
modelBlock[i]["Weight_Decay"] = 0.0 #np.asscalar(weight_decay_vec[i])
modelBlock[i]["Optimizer"] = optim.Adam(modelBlock[i]["Model"].parameters(),
lr = modelBlock[i]["Learning"], weight_decay = modelBlock[i]["Weight_Decay"])
modelBlock[i]["Loss"] = 100.0
modelBlock[i]["Accuracy"] = 1.0
resultBlock["Meta"] = {"Total_Epochs": 0}
resultBlock[i] = {"Hyperparameter":{}}
resultBlock[i]["Hyperparameter"]["Learning"] = lr_vec[i]
resultBlock[i]["Hyperparameter"]["Batch"] = np.asscalar(batch_size_vec[i])
resultBlock[i]["Hyperparameter"]["Weight_Decay"] = np.asscalar(batch_size_vec[i])
resultBlock[i]["Hyperparameter"]["Max_Epoch"] = 0
return modelBlock, resultBlock
def generateDictionary_Exp(N_models, model_type, layers, input_size, hidden_size, image_size, loss_fn, dtype, hyperparameter):
modelBlock = {}
resultBlock = {}
# This is the parameters for the distribution of path lengths passed to generate samples
# n is the longest path we want to generate uniformly over
n = 25
distribution = np.ones(n)/n
modelBlock["Meta"] = {"Model_Type": model_type, "Epochs_Trained": 0,
"Type": dtype, "N": image_size, "Distribution": distribution, "Layers": layers, "Loss_Function": loss_fn,
"Input": input_size, "Hidden": hidden_size}
lr = hyperparameter[model_type][layers]["Learning"]
batch_size = hyperparameter[model_type][layers]["Batch"]
weight_decay = hyperparameter[model_type][layers]["Weight_Decay"]
modelBlock["Meta"]["Learning"] = lr
modelBlock["Meta"]["Batch"] = batch_size
modelBlock["Meta"]["Weight_Decay"] = weight_decay
for i in range(N_models):
modelBlock[i] = {}
# Note that here we need to pass i in rather than layers
modelInit(modelBlock, model_type, i, input_size, hidden_size, layers, image_size)
modelBlock[i]["Model"].type(dtype)
modelBlock[i]["Learning"] = lr
modelBlock[i]["Batch"] = batch_size
modelBlock[i]["Optimizer"] = optim.Adam(modelBlock[i]["Model"].parameters(), lr = lr, weight_decay = weight_decay)
modelBlock[i]["Loss"] = 100.0
modelBlock[i]["Accuracy"] = 1.0
resultBlock[i] = {}
# Then in the actual code, results get saved as resultBlock[layer][model id][epoch] = {Dictionary of results}
return modelBlock, resultBlock
def modelInit(modelBlock, model_type, key, input_size, hidden_size, layers, image_size):
logger = logging.getLogger(__name__)
if (model_type == "FixedWeights"):
num_nodes = image_size**2
modelBlock[key]["Model"] = NF.PropagationOnly_FixedWeights(num_nodes, layers, num_nodes*5, image_size)
elif(model_type == "SharedPixel"):
num_nodes = image_size**2
modelBlock[key]["Model"] = NF.PropagationOnly_SharedPixel(num_nodes, layers, num_nodes*5, image_size)
else:
logger.warning('Model type not recognized')
def convertStateDict(modelBlock):
# The deep copy is important here. If not done, we end up modifying the original modelBlock
modelBlock_State = copy.deepcopy(modelBlock)
for key, val in modelBlock.items():
if (key != "Meta"):
model = modelBlock[key]["Model"].state_dict()
optimizer = modelBlock[key]["Optimizer"].state_dict()
modelBlock_State[key]["Model"] = model
modelBlock_State[key]["Optimizer"] = optimizer
return modelBlock_State
def loadStateDict(modelBlock_State):
modelBlock = copy.deepcopy(modelBlock_State)
model_type = modelBlock['Meta']['Model_Type']
input_size = modelBlock['Meta']['Input']
hidden_size = modelBlock['Meta']['Hidden']
layers = modelBlock['Meta']['Layers']
image_size = modelBlock['Meta']['N']
#lr = modelBlock['Meta']['Learning']
#weight_decay = modelBlock['Meta']['Weight_Decay']
for key, val in modelBlock.items():
if (key != "Meta"):
modelInit(modelBlock, model_type, key, input_size, hidden_size, layers, image_size)
#modelBlock[key]["Optimizer"] = optim.Adam(modelBlock[key]["Model"].parameters(), lr = lr, weight_decay = weight_decay)
modelBlock[key]["Model"].load_state_dict(modelBlock_State[key]["Model"])
#modelBlock[key]["Optimizer"].load_state_dict(modelBlock_State[key]["Optimizer"])
return modelBlock
|
# Copied from OpenAI baselines
# Modified by Kenneth Marino
from abc import ABC, abstractmethod
from baselines import logger
import numpy as np
from gym import spaces
from collections import OrderedDict
from multiprocessing import Process, Pipe
from baselines.common.tile_images import tile_images
import pdb
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions, step_mask):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions, step_mask=None):
# By default, we always step
if step_mask is None:
step_mask = np.ones([len(actions), 1])
self.step_async(actions, step_mask)
self.step_mask = step_mask
return self.step_wait()
def render(self, mode='human'):
logger.warn('Render not defined for %s'%self)
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions, step_mask):
self.venv.step_async(actions, step_mask)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions, step_mask):
listify = True
try:
if len(actions) == self.num_envs:
listify = False
except TypeError:
pass
if not listify:
self.actions = actions
else:
assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(actions, self.num_envs)
self.actions = [actions]
self.step_mask = step_mask
def step_wait(self):
for e in range(self.num_envs):
action = self.actions[e]
if isinstance(self.envs[e].action_space, spaces.Discrete):
action = int(action)
# If step_mask is 1, step normally
if self.step_mask[e].item() > 0:
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action)
if self.buf_dones[e]:
obs = self.envs[e].reset()
# If zero, just fill with dummy values
else:
obs = float('inf') * np.ones(self.envs[e].observation_space.shape) # This value should never be assigned anywhere
self.buf_rews[e] = 0
self.buf_dones[e] = True
self.buf_infos[e] = {}
self._save_obs(e, obs)
return (np.copy(self._obs_from_buf()), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def render(self, mode='human'):
return [e.render(mode=mode) for e in self.envs]
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if self.keys==[None]:
return self.buf_obs[None]
else:
return self.buf_obs
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'dummy_step':
ob = float('inf') * np.ones(env.observation_space.shape) # This value should never be assigned anywhere
reward = 0
done = True
info = {}
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions, step_mask):
for remote, action, proc in zip(self.remotes, actions, range(len(actions))):
if step_mask[proc].item() > 0:
remote.send(('step', action))
else:
remote.send(('dummy_step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:,:,::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scanner for the Bucket acls rules engine."""
import itertools
from google.cloud.security.common.util import log_util
from google.cloud.security.common.data_access import bucket_dao
from google.cloud.security.common.gcp_type.resource import ResourceType
from google.cloud.security.scanner.audit import buckets_rules_engine
from google.cloud.security.scanner.scanners import base_scanner
LOGGER = log_util.get_logger(__name__)
class BucketsAclScanner(base_scanner.BaseScanner):
"""Pipeline to Bucket acls data from DAO"""
def __init__(self, global_configs, scanner_configs, snapshot_timestamp,
rules):
"""Initialization.
Args:
global_configs (dict): Global configurations.
scanner_configs (dict): Scanner configurations.
snapshot_timestamp (str): Timestamp, formatted as YYYYMMDDTHHMMSSZ.
rules (str): Fully-qualified path and filename of the rules file.
"""
super(BucketsAclScanner, self).__init__(
global_configs,
scanner_configs,
snapshot_timestamp,
rules)
self.rules_engine = buckets_rules_engine.BucketsRulesEngine(
rules_file_path=self.rules,
snapshot_timestamp=self.snapshot_timestamp)
self.rules_engine.build_rule_book(self.global_configs)
@staticmethod
def _flatten_violations(violations):
"""Flatten RuleViolations into a dict for each RuleViolation member.
Args:
violations (list): The RuleViolations to flatten.
Yields:
dict: Iterator of RuleViolations as a dict per member.
"""
for violation in violations:
violation_data = {}
violation_data['role'] = violation.role
violation_data['entity'] = violation.entity
violation_data['email'] = violation.email
violation_data['domain'] = violation.domain
violation_data['bucket'] = violation.bucket
violation_data['project_number'] = violation.project_number
yield {
'resource_id': violation.resource_id,
'resource_type': violation.resource_type,
'rule_index': violation.rule_index,
'rule_name': violation.rule_name,
'violation_type': violation.violation_type,
'violation_data': violation_data
}
def _output_results(self, all_violations):
"""Output results.
Args:
all_violations (list): All violations
"""
all_violations = self._flatten_violations(all_violations)
self._output_results_to_db(all_violations)
def _find_violations(self, bucket_data):
"""Find violations in the policies.
Args:
bucket_data (list): Buckets to find violations in
Returns:
list: All violations.
"""
bucket_data = itertools.chain(*bucket_data)
all_violations = []
LOGGER.info('Finding bucket acl violations...')
for (bucket, bucket_acl) in bucket_data:
LOGGER.debug('%s => %s', bucket, bucket_acl)
violations = self.rules_engine.find_policy_violations(
bucket_acl)
LOGGER.debug(violations)
all_violations.extend(violations)
return all_violations
@staticmethod
def _get_resource_count(project_policies, buckets_acls):
"""Get resource count for org and project policies.
Args:
project_policies (list): project policies from inventory.
buckets_acls (list): buclet acls from inventory.
Returns:
dict: Resource count map
"""
resource_counts = {
ResourceType.PROJECT: len(project_policies),
ResourceType.BUCKETS_ACL: len(buckets_acls),
}
return resource_counts
def _get_bucket_acls(self):
"""Get bucket acls from data source.
Returns:
list: List of bucket acls.
"""
buckets_acls = {}
buckets_acls = (bucket_dao
.BucketDao(self.global_configs)
.get_buckets_acls('buckets_acl',
self.snapshot_timestamp))
return buckets_acls
def _retrieve(self):
"""Runs the data collection.
Returns:
list: Bucket ACL data.
"""
buckets_acls_data = []
project_policies = {}
buckets_acls = self._get_bucket_acls()
buckets_acls_data.append(buckets_acls.iteritems())
buckets_acls_data.append(project_policies.iteritems())
return buckets_acls_data
def run(self):
buckets_acls_data = self._retrieve()
all_violations = self._find_violations(buckets_acls_data)
self._output_results(all_violations)
|
from rest_framework import generics
from .models import Author, Book
from .serializers import AuthorSerializer, BookSerializer
class AuthorListAPIView(generics.ListCreateAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
class AuthorDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
class BookListAPIView(generics.ListCreateAPIView):
queryset = Book.objects.all()
serializer_class = BookSerializer
class BookDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Book.objects.all()
serializer_class = BookSerializer
|
# Generated by Django 2.2.6 on 2019-11-30 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('work', '0047_auto_20191130_1013'),
]
operations = [
migrations.AlterField(
model_name='progressqty',
name='status',
field=models.CharField(blank=True, choices=[('completed', 'completed'), ('ongoing', 'ongoing'), ('not started', 'not started'), ('canceled', 'canceled')], max_length=200, null=True, verbose_name='completed or others'),
),
migrations.AlterField(
model_name='progressqtyextra',
name='status',
field=models.CharField(blank=True, choices=[('completed', 'completed'), ('ongoing', 'ongoing'), ('not started', 'not started'), ('canceled', 'canceled')], max_length=100, null=True),
),
]
|
from django.conf.urls import url
from .views import CafeCoordinates
from .views import ItemDetail
from .views import CafeName
from .views import CafeList
from .views import CafeDetail
from .views import OrderCreation
from .views import OrdersList
from .views import ChangingOrderStatus
urlpatterns = [
url(r'^cafes/$', CafeList.as_view()),
url(r'^cafes/(?P<pk>[0-9]+)/$', CafeDetail.as_view()),
url(r'^cafes/get_cafe_by_name', CafeName.as_view()),
url(r'^cafes/get_cafe_by_coord', CafeCoordinates.as_view()),
url(r'^cafes/get_item_by_id', ItemDetail.as_view()),
url(r'^cafes/create_order', OrderCreation.as_view()),
url(r'^cafes/get_orders', OrdersList.as_view()),
url(r'^cafes/change_order_status', ChangingOrderStatus.as_view()),
]
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScraperAuchanItem(scrapy.Item):
id = scrapy.Field()
name = scrapy.Field()
desc = scrapy.Field()
price = scrapy.Field()
pricePerUnit = scrapy.Field()
composition = scrapy.Field()
pass
|
import webapp2
from views.ViewHandler import ViewHandler
from models.AppUserModel import *
import urlparse
import json
"""
Base class for webapp2
Base logged in authentication and common variables created here
"""
class BaseHandler(webapp2.RequestHandler):
def __init__(self, request, response):
# calling super class constructor
super(BaseHandler, self).__init__(request=request, response=response)
# initializing user property with current user. Returns None if not logged in
self.user = users.get_current_user()
# parsing request url for obtaining base url
parsed = urlparse.urlparse(self.request.url)
base_url = urlparse.urlunparse((parsed.scheme, parsed.netloc, "", "", "", ""))
# creating login/logout url
if self.user:
# if user is logged in, create logout url
url = users.create_logout_url(self.request.uri)
self.appUser = AppUserMethods.fetch_user(self.user.email())
else:
# else create login url.
self.appUser = None
url = users.create_login_url(self.request.uri)
self.redirect(url)
# default template values common for all templates.
# creating view object
self.view = ViewHandler(self.response, base_url, self.user, url)
def send_json_object(self, response_object):
self.response.headers['content-type'] = 'text/plain'
self.response.write(json.dumps(response_object))
|
#!/usr/bin/env python3
from ev3dev2.motor import MoveSteering, MoveTank, MediumMotor, LargeMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D
from ev3dev2.sensor.lego import TouchSensor, ColorSensor, GyroSensor
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
import xml.etree.ElementTree as ET
import threading
import time
from sys import stderr
gyro = GyroSensor(INPUT_1)
steering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)
tank_block = MoveTank(OUTPUT_B, OUTPUT_C)
largeMotor_Left= LargeMotor(OUTPUT_B)
largeMotor_Right= LargeMotor(OUTPUT_C)
#_________________________________________________________________________________________________________________________________
def StraightGyro_current(stop, speed, rotations):
print("In StraightGyro_current", file=stderr)
current_degrees = largeMotor_Left.position
rotations = rotations * 360
target_rotations= current_degrees + rotations
target = gyro.angle
current_gyro_reading = target
# print("Current Gyro Reading: {}".format(current_gyro_reading))
while float(current_degrees) < target_rotations: # if the currentm rotations is smaller than the target rotations
if stop():
break
#recording the gyro reading and the current rotations
current_gyro_reading=gyro.angle
current_degrees = largeMotor_Left.position
# if the gyro reading is smaller than the target (Going to the right)
if current_gyro_reading < target:
correction = target - current_gyro_reading #figure out correction by target gyro reading - the current reading
correction = correction * .25 # find a 1/4 of the correction
steering_drive.on(steering = -correction , speed = speed) #turns by the corrrection
# if the gyro reading is larger than the target (Going to the left)
if current_gyro_reading > target:
correction = target - current_gyro_reading#figure out correction by target gyro reading - the current reading
correction = correction * .25 # find a 1/4 of the correction
steering_drive.on(steering = -correction , speed = speed) #turns by the corrrection
# if the current gyro = the target just continue straight
if current_gyro_reading == target:
steering_drive.on(steering = 0 , speed = speed)
#if the current rotations is larger than the target break which will stop the loop
if float(current_degrees) >= target_rotations:
break
if stop():
break
tank_block.off()
print('Leaving StraightGyro_current', file=stderr)
#stopProcessing=False
#StraightGyro_current(lambda:stopProcessing, speed=30, rotations=3)
|
from rest_framework import viewsets
from .serializers import CoffeeShopSerializer, NewsletterSerializer, BookSerializer
from ..models import CoffeeShop, Newsletter,Book
from rest_framework.response import Response
class CoffeeShopViewSet(viewsets.ModelViewSet):
queryset = CoffeeShop.objects.all()
serializer_class = CoffeeShopSerializer
class NewsletterViewSet(viewsets.ModelViewSet):
queryset = Newsletter.objects.all()
serializer_class = NewsletterSerializer
class BookViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
class CoffeeShopBooksViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
def get_queryset(self):
shop_id = self.request.parser_context["kwargs"]["id"]
shop = CoffeeShop.objects.filter(id=shop_id)
books = Book.objects.filter(shop=shop[0])
return books
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio
from gi.repository.GdkPixbuf import Pixbuf
class tagOverlay(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Tag Overlay")
self.set_border_width(10)
hb = Gtk.HeaderBar(title="Tag Overlay")
self.set_titlebar(hb)
closebutton = Gtk.Button()
closebutton.set_relief(Gtk.ReliefStyle.NONE)
img = Gtk.Image.new_from_icon_name("window-close-symbolic", Gtk.IconSize.MENU)
closebutton.set_image(img)
closebutton.connect("clicked", Gtk.main_quit)
hb.pack_end(closebutton)
hbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(hbox)
listbox = Gtk.ListBox()
listbox.add(self.tagName())
listbox.add(self.taggedField())
listbox.add(self.tagDescription())
listbox.add(self.bottomBttn())
hbox.pack_start(listbox, False, True, 0)
def tagName(self):
row = Gtk.ListBox()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
vbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=15)
hbox.pack_start(vbox, False, True, 0)
label2 = Gtk.Label()
label2.set_markup("Tag Name")
vbox.pack_start(label2, False, True, 0)
entry1 = Gtk.Entry()
entry1.set_text('Tag Name')
vbox.pack_start(entry1, False, True, 0)
return row
def taggedField(self):
row = Gtk.ListBox()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
vbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=15)
hbox.pack_start(vbox, False, True, 0)
label2 = Gtk.Label()
label2.set_markup("Tagged Field")
vbox.pack_start(label2, False, True, 0)
entry1 = Gtk.Entry()
entry1.set_text('Tagged Field Name')
vbox.pack_start(entry1, False, True, 0)
return row
def tagDescription(self):
row = Gtk.ListBox()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
vbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=15)
hbox.pack_start(vbox, False, True, 0)
label2 = Gtk.Label()
label2.set_markup("Tag Description")
vbox.pack_start(label2, False, True, 0)
entry1 = Gtk.Entry()
entry1.set_text('Tag Description')
vbox.pack_start(entry1, False, True, 0)
return row
def bottomBttn(self):
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=30)
row.add(hbox)
btn = Gtk.Button.new_with_label("Save")
hbox.pack_start(btn, True, True, 0)
btn = Gtk.Button.new_with_label("Cancel")
hbox.pack_start(btn, True, True, 0)
return row
window = tagOverlay()
window.show_all()
Gtk.main()
|
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
import doubleml as dml
from ._utils import draw_smpls
from ._utils_plr_manual import fit_plr_multitreat, boot_plr_multitreat
@pytest.fixture(scope='module',
params=range(2))
def idx(request):
return request.param
@pytest.fixture(scope='module',
params=[Lasso(alpha=0.1),
RandomForestRegressor(max_depth=2, n_estimators=10)])
def learner(request):
return request.param
@pytest.fixture(scope='module',
params=['IV-type', 'partialling out'])
def score(request):
return request.param
@pytest.fixture(scope='module',
params=['dml1', 'dml2'])
def dml_procedure(request):
return request.param
@pytest.fixture(scope='module',
params=[1, 3])
def n_rep(request):
return request.param
@pytest.fixture(scope='module')
def dml_plr_multitreat_fixture(generate_data_bivariate, generate_data_toeplitz, idx, learner,
score, dml_procedure, n_rep):
boot_methods = ['normal']
n_folds = 2
n_rep_boot = 483
# collect data
if idx == 0:
data = generate_data_bivariate
else:
assert idx == 1
data = generate_data_toeplitz
x_cols = data.columns[data.columns.str.startswith('X')].tolist()
d_cols = data.columns[data.columns.str.startswith('d')].tolist()
# Set machine learning methods for m & g
ml_g = clone(learner)
ml_m = clone(learner)
np.random.seed(3141)
obj_dml_data = dml.DoubleMLData(data, 'y', d_cols, x_cols)
dml_plr_obj = dml.DoubleMLPLR(obj_dml_data,
ml_g, ml_m,
n_folds, n_rep,
score=score,
dml_procedure=dml_procedure)
dml_plr_obj.fit()
np.random.seed(3141)
y = data['y'].values
x = data.loc[:, x_cols].values
d = data.loc[:, d_cols].values
n_obs = len(y)
all_smpls = draw_smpls(n_obs, n_folds, n_rep)
res_manual = fit_plr_multitreat(y, x, d,
clone(learner), clone(learner),
all_smpls, dml_procedure, score,
n_rep=n_rep)
res_dict = {'coef': dml_plr_obj.coef,
'coef_manual': res_manual['theta'],
'se': dml_plr_obj.se,
'se_manual': res_manual['se'],
'boot_methods': boot_methods}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_plr_multitreat(
y, d,
res_manual['thetas'], res_manual['ses'],
res_manual['all_g_hat'], res_manual['all_m_hat'],
all_smpls, score,
bootstrap, n_rep_boot, n_rep)
np.random.seed(3141)
dml_plr_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_plr_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_plr_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
@pytest.mark.ci
def test_dml_plr_multitreat_coef(dml_plr_multitreat_fixture):
assert np.allclose(dml_plr_multitreat_fixture['coef'],
dml_plr_multitreat_fixture['coef_manual'],
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_dml_plr_multitreat_se(dml_plr_multitreat_fixture):
assert np.allclose(dml_plr_multitreat_fixture['se'],
dml_plr_multitreat_fixture['se_manual'],
rtol=1e-9, atol=1e-4)
@pytest.mark.ci
def test_dml_plr_multitreat_boot(dml_plr_multitreat_fixture):
for bootstrap in dml_plr_multitreat_fixture['boot_methods']:
assert np.allclose(dml_plr_multitreat_fixture['boot_coef' + bootstrap],
dml_plr_multitreat_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_plr_multitreat_fixture['boot_t_stat' + bootstrap],
dml_plr_multitreat_fixture['boot_t_stat' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
|
import requests
import pandas as pd
import environ
env = environ.Env()
environ.Env.read_env() # reading .env file
cert_cert = env('CERT')
cert_key = env('KEY')
online_edu_link = 'https://online.edu.ru/api/'
def get_data():
"""
Collecting platforms to DataFrame
"""
cert_cert = env('CERT')
cert_key = env('KEY')
print(cert_cert, cert_key)
platforms = requests.get(online_edu_link+'partners/v0/platform',
cert=(cert_cert, cert_key))
platforms_list = platforms.json()['rows']
global_id = []
title = []
for platform in platforms_list:
global_id.append(platform['global_id'])
title.append(platform['title'])
data_Platform = pd.DataFrame(list(zip(global_id, title)),
columns=['platform_id', 'title'])
"""
Collecting institutions to DataFrame
"""
rightholders = requests.get(online_edu_link+'partners/v0/rightholder',
cert=(cert_cert, cert_key))
rightholders_list = rightholders.json()['rows']
global_id = []
short_title = []
for rightholder in rightholders_list:
global_id.append(rightholder['global_id'])
short_title.append(rightholder['short_title'])
data_Rigthholder = pd.DataFrame(list(zip(global_id, short_title)),
columns=['institution_id', 'title'])
"""
Collecting onlinecourses to DataFrame
"""
# сбор ссылок на все страницы с онлайн курсами
course_link = online_edu_link+'courses/v0/course/'
total_count_courses = requests.get(course_link,
cert=(cert_cert, cert_key)).json()['total_count']
count_courses = len(requests.get(course_link,
cert=(cert_cert, cert_key)).json()['results'])
print('Всего онлайн-курсов :', total_count_courses)
pages = round(total_count_courses / count_courses) + 1
print('Количество страниц - ', pages)
course_links = []
course_links.append(course_link)
for i in range(1, pages):
new_link = course_link + '?page=' + str(i)
course_links.append(new_link)
# сбор id всех онлайн курсов
course_id = []
for link in course_links:
course = requests.get(link,
cert=(cert_cert, cert_key))
courses_list = course.json()['results']
for i in courses_list:
course_id.append(i['global_id'])
print('Длина course_id', len(course_id))
title = []
description = []
institution = []
platform = []
language = []
started_at = []
created_at = []
record_end_at = []
finished_at = []
rating = []
experts_rating = []
visitors_number = []
total_visitors_number = []
duration = []
volume = []
intensity_per_week = []
content = []
lectures_number = []
external_url = []
has_certificate = []
credits = []
requirements = []
learning_outcomes = []
competences = []
course_id_field_of_study = []
field_of_study = []
course_id_transfer = []
field_of_study_transfer = []
institution_transfer = []
# сбор данных по каждому онлайн курсов
course_link = online_edu_link+'courses/v0/course/'
for i in range(0, len(course_id)):
current_course_link = course_link + str(course_id[i])
current_course = requests.get(current_course_link,
cert=(cert_cert, cert_key))
title.append(current_course.json()['title'])
description.append(current_course.json()['description'])
institution.append(current_course.json()['institution_id'])
platform.append(current_course.json()['partner_id'])
language.append(current_course.json()['language'])
started_at.append(current_course.json()['started_at'])
created_at.append(current_course.json()['created_at'])
record_end_at.append(current_course.json()['record_end_at'])
finished_at.append(current_course.json()['finished_at'])
rating.append(current_course.json()['rating'])
experts_rating.append(current_course.json()['experts_rating'])
visitors_number.append(current_course.json()['visitors_number'])
total_visitors_number.append(current_course.json()['total_visitors_number'])
duration.append(current_course.json()['duration'])
volume.append(current_course.json()['volume'])
intensity_per_week.append(current_course.json()['intensity_per_week'])
content.append(current_course.json()['content'])
lectures_number.append(current_course.json()['lectures_number'])
external_url.append(current_course.json()['registry_url'])
has_certificate.append(current_course.json()['has_certificate'])
credits.append(current_course.json()['credits'])
try:
if len(current_course.json()['requirements']) != 0:
requirements.append(current_course.json()['requirements'][0])
else:
requirements.append('null')
except:
continue
try:
if len(current_course.json()['learning_outcomes']) != 0:
learning_outcomes.append(current_course.json()['learning_outcomes'][0])
else:
learning_outcomes.append('null')
except:
continue
try:
if len(current_course.json()['competences']) != 0:
competences.append(current_course.json()['competences'])
else:
competences.append('null')
except:
competences.append('')
"""
#Collecting onlinecourse&field_of_study to DataFrame
"""
for j in current_course.json()['directions']:
field_of_study.append(j)
course_id_field_of_study.append(course_id[i])
"""
Collecting onlinecourse&credit to DataFrame
"""
for j in current_course.json()['transfers']:
course_id_transfer.append(course_id[i])
field_of_study_transfer.append(j['direction_id'])
institution_transfer.append(j['institution_id'])
data_OnlineCourse = pd.DataFrame(list(zip(course_id, title, description, institution, platform, language,
started_at, created_at, record_end_at, finished_at, rating,
experts_rating, visitors_number, total_visitors_number, duration, volume,
intensity_per_week, content, lectures_number, external_url,
has_certificate, credits, requirements, learning_outcomes, competences)),
columns=['course_id', 'title', 'description', 'institution_id', 'platform_id',
'language', 'started_at', 'created_at', 'record_end_at', 'finished_at',
'rating', 'experts_rating', 'visitors_number', 'total_visitors_number',
'duration', 'volume', 'intensity_per_week', 'content', 'lectures_number',
'external_url', 'has_certificate', 'credits', 'requirements',
'learning_outcomes', 'competences'])
print(data_OnlineCourse.shape)
data_OnlineCourse['id_course'] = data_OnlineCourse.index
print(data_OnlineCourse.shape)
data_CourseFieldOfStudy = pd.DataFrame(list(zip(course_id_field_of_study, field_of_study)),
columns=['course_id', 'field_of_study'])
data_CourseCredit = pd.DataFrame(list(zip(course_id_transfer, institution_transfer, field_of_study_transfer)),
columns=['course_id', 'institution_id', 'field_of_study'])
"""
Adding new id as FK to OnlineCourse
"""
data_Platform['id_platform'] = data_Platform.index
data_Rigthholder['id_institution'] = data_Rigthholder.index
data_online_course_platform = pd.merge(data_OnlineCourse, data_Platform, how='left', on='platform_id')
data_online_course_platform_inst = pd.merge(data_online_course_platform, data_Rigthholder, how='left',
on='institution_id')
data_online_course_platform_inst.started_at = pd.to_datetime(data_online_course_platform_inst['started_at'],
format='%Y-%m-%d', errors='ignore')
data_online_course_platform_inst.finished_at = pd.to_datetime(data_online_course_platform_inst['finished_at'],
format='%Y-%m-%d', errors='ignore')
data_OnlineCourse = data_online_course_platform_inst.copy()
data_OnlineCourse = data_OnlineCourse.fillna('null')
print('final shape', data_OnlineCourse.shape)
"""
Adding new id as FK to CourseFieldOfStudy
"""
data_CourseFieldOfStudy = pd.merge(data_CourseFieldOfStudy, data_OnlineCourse[['id_course', 'course_id']],
how='left', on='course_id')
"""
Adding new id as FK to CourseCredit
"""
data_CourseCredit = pd.merge(data_CourseCredit, data_OnlineCourse[['id_course', 'course_id']],
how='left', on='course_id')
data_CourseCredit = pd.merge(data_CourseCredit, data_Rigthholder, how='left', on='institution_id')
return data_Platform, data_Rigthholder, data_OnlineCourse, data_CourseFieldOfStudy, data_CourseCredit
|
def checker(datelist):
longmonths = [1, 3 , 5, 7 , 8 , 10 , 12]
date = True
if len(datelist) != 3:
date = False
else:
month , day , year = datelist
month = int(month)
day = int(day)
year = int(year)
if month > 12 or month < 1 or day < 1 or day > 31 or year < 1:
date = False
elif month not in longmonths and day == 31:
date = False
elif month == 2 and day > 28:
date = False
if date == False:
print( month,"/",day,"/",year , "is a false date")
else:
print( month,"/",day,"/",year, "is a valid date")
def main():
datelist = []
x = (input("enter a date seperated by /'s, formatted XX/XX/XXXX : "))
datelist = x.split("/")
checker(datelist)
main()
|
def solution(arrangement):
laser = arrangement.replace("()","r");
cutting = []
count = 0
for i in laser:
if i == "r":
count+=len(cutting)
elif i == "(":
cutting.append("(")
elif i == ")":
cutting.pop()
count+=1
return count
|
import json
import unittest
import responses
import pyyoutube
class ApiCaptionsTest(unittest.TestCase):
BASE_PATH = "testdata/apidata/captions/"
BASE_URL = "https://www.googleapis.com/youtube/v3/captions"
with open(BASE_PATH + "captions_by_video.json", "rb") as f:
CAPTIONS_BY_VIDEO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "captions_filter_by_id.json", "rb") as f:
CAPTIONS_FILTER_ID = json.loads(f.read().decode("utf-8"))
def setUp(self) -> None:
self.api_with_access_token = pyyoutube.Api(access_token="token")
def testGetCaptionByVideo(self) -> None:
video_id = "oHR3wURdJ94"
# test parts
with self.assertRaises(pyyoutube.PyYouTubeException):
self.api_with_access_token.get_captions_by_video(
video_id=video_id,
parts="id,not_part",
)
# test by video
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.CAPTIONS_BY_VIDEO)
res = self.api_with_access_token.get_captions_by_video(
video_id=video_id,
parts="id,snippet",
return_json=True,
)
self.assertEqual(len(res["items"]), 2)
self.assertEqual(res["items"][0]["snippet"]["videoId"], video_id)
# test filter id
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.CAPTIONS_FILTER_ID)
res = self.api_with_access_token.get_captions_by_video(
video_id=video_id,
parts=["id", "snippet"],
caption_id="SwPOvp0r7kd9ttt_XhcHdZthMwXG7Z0I",
)
self.assertEqual(len(res.items), 1)
self.assertEqual(res.items[0].snippet.videoId, video_id)
|
#test clear_tokens.py
from dotenv import load_dotenv, find_dotenv
from pathlib import Path
import json
import os
import pymysql
import traceback
import time
import sys
import re
import subprocess
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(path + "/../../cron")
sys.path.append(path + "/../../")
from clear_tokens import clear_tokens
from reviewgramdb import connect_to_db, execute_insert, execute_update, select_and_fetch_first_column
def test_clear_tokens():
timestamp = int(time.time())
clear_tokens()
cleanupTime = int(os.getenv("TOKEN_CLEANUP_TIME"))
assert cleanupTime != 0
con = connect_to_db()
with con:
execute_update(con, "TRUNCATE TABLE `token_to_user_id`" , [])
execute_insert(con, "INSERT `token_to_chat_id`(`ID`,`TOKEN`, `CHAT_ID`,`TSTAMP`) VALUES (0, '111', 9999, 0)" , [])
execute_insert(con, "INSERT `token_to_user_id`(`ID`,`TOKEN`, `USER_ID`,`TSTAMP`) VALUES (0, '111', 9999, 0)" , [])
clear_tokens()
con = connect_to_db()
with con:
cnt = select_and_fetch_first_column(con, "SELECT COUNT(*) FROM `token_to_chat_id` WHERE `TOKEN` IN (SELECT `TOKEN` FROM `token_to_user_id` WHERE " + str(timestamp) + " - TSTAMP >= " + str(cleanupTime) + ")", [])
assert cnt == 0
cnt = select_and_fetch_first_column(con, "SELECT COUNT(*) FROM `token_to_user_id` WHERE " + str(timestamp) + " - TSTAMP >= " + str(cleanupTime), [])
assert cnt == 0
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class ListInstance(object):
"""
使用 __dict__ 列出实例属性
"""
def __str__(self):
return '<Instance of %s, address %s:\n%s>' % (
self.__class__.__name__, id(self), self.__attrnames())
def __attrnames(self):
# result = ''
for attr in sorted(self.__dict__):
result = '\tname: %s = %s\n' % (attr, self.__dict__[attr])
return result
class ListInherited(object):
"""
使用 dir 列出继承的属性
"""
def __str__(self):
return '<Instance of %s, address %s:\n%s>' % (
self.__class__.__name__, id(self), self.__attrnames())
def __attrnames(self):
result = ''
for attr in dir(self):
if attr[:2] == '__' and attr[-2:] == '__':
result += '\tname: %s = <>\n' % attr
else:
result += '\tname: %s = %s\n' % (attr, getattr(self, attr))
return result
class ListTree(object):
"""列出类树中每个对象的属性"""
def __str__(self):
self.__visted = {}
return '<Instance of {0}, address {1}:\n{2}{3}>'.format(
self.__class__.__name__,
id(self),
self.__attrnames(self, 0),
self.__listclass(self.__class__, 4))
def __listclass(self, aClass, indent):
dots = '.' * indent
if aClass in self.__visted:
return '\n{0}<Class {1}:, address {2}: (see above)>\n'.format(
dots, aClass.__name__, id(aClass))
else:
self.__visted[aClass] = True
genabove = (self.__listclass(c, indent + 4)
for c in aClass.__bases__)
return '\n{0}<Class {1}, address {2}:\n{3}{4}{5}>\n'.format(
dots,
aClass.__name__,
id(aClass),
self.__attrnames(aClass, indent),
''.join(genabove),
dots)
def __attrnames(self, obj, indent):
spaces = ' ' * (indent + 4)
result = ''
for attr in sorted(obj.__dict__):
if attr.startswith('__') and attr.endswith('__'):
result += spaces + '{0} = <>\n'.format(attr)
else:
result += spaces + \
'{0} = {1}\n'.format(attr, getattr(obj, attr))
return result
if __name__ == "__main__":
# 使用 __dict__ 列出实例属性-----------------------------------------------
print('-' * 40, '使用 __dict__ 列出实例属性', '-' * 40)
class Spam(ListInstance):
def __init__(self):
self.data1 = 'food'
# 当继承自 ListInstance 的类,使用打印操作时,自动触发 __str__ 方法
a = Spam()
print(a)
# 还可以将其变为一个字符串,而不用 str 打印出来,并且交互式响应仍然使用默认格式
str(a) # 该方式需要在交互式解释器使用
# 将 ListInstance 类用于混合继承
class Super(object):
def __init__(self):
self.data1 = 'spam'
def ham(self):
pass
class Sub(Super, ListInstance):
def __init__(self):
Super.__init__(self)
self.data2 = 'eggs'
self.data3 = 42
def spam(self):
pass
b = Sub()
print(b)
# 使用 dir 列出继承的属性--------------------------------------------------
print('-' * 40, '使用 dir 列出继承的属性', '-' * 40)
class Sub2(Super, ListInherited):
def __init__(self):
Super.__init__(self)
self.data2 = 'eggs'
self.data3 = 42
def spam(self):
pass
c = Sub2()
print(c)
# 列出类树中每个对象的属性-------------------------------------------------
print('-' * 40, '列出类树中每个对象的属性', '-' * 40)
class Sub3(Super, ListTree):
def __init__(self):
Super.__init__(self)
self.data2 = 'eggs'
self.data3 = 42
def spam(self):
pass
d = Sub3()
print(d)
|
import turtle as t
t.setup(600,600,300,200)
#绘图框的宽、高,绘图框距屏幕左上角的左右、上下间距
t.width(2)
t.color('black')
t.left(45)
for i in range(2):
t.fd(150)
t.left(90)
#圆弧与当前朝向相切
t.circle(150,360/8)
t.left(90)
t.fd(150)
t.right(45)
t.seth(135)
for j in range(2):
t.fd(150)
t.left(90)
t.circle(150,360/8)
t.left(90)
t.fd(150)
t.right(45)
t.done()
|
#!/usr/bin/env python
import argparse
import requests
import sys
twitter_url = 'http://search.twitter.com/search.json?q=from:{username}'
parser = argparse.ArgumentParser(description='Fetch some tweets.')
parser.add_argument('--username', '-u', dest='username',
help='Twitter username to fetch', required=False)
def main(args):
## If a username wasn't passed in, prompt the user
username = args.username
if username is None:
print 'What username would you like to display?'
username = raw_input('> ')
message = '\nMost recent tweets from @{0}'.format(username)
print message
print '=' * len(message.strip())
## Fetch the users feed
response = requests.get(twitter_url.format(username=username))
## No results? I has a sad. :(
if not response.json['results']:
print '\nSorry bub, nothing to display for @{0}\n'.format(username)
return
## Print the most recent tweets
for tweet in response.json['results'][:5]:
print tweet['created_at']
print tweet['text']
print ''
if __name__ == '__main__':
sys.exit(main(parser.parse_args()))
|
from django.core.management.base import BaseCommand
from review.models import Comment, Review
from titles.models import Category, Genre, Title
from users.models import CustomUser
from utils.csv_to_db import fill_db
class Command(BaseCommand):
help = 'Fill db'
def handle(self, *args, **options):
model_config = (
('data/users.csv', CustomUser),
('data/genre.csv', Genre),
('data/category.csv', Category),
('data/titles.csv', Title),
('data/genre_title.csv', Title.genre.through),
('data/review.csv', Review),
('data/comments.csv', Comment),
)
for config in model_config:
fill_db(*config)
|
# Generated by Django 3.0.6 on 2020-05-22 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Find',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('father_name', models.CharField(blank=True, max_length=50, null=True)),
('mother_name', models.CharField(blank=True, max_length=50, null=True)),
('age', models.IntegerField(blank=True, max_length=3, null=True)),
('image', models.ImageField(upload_to='')),
('state', models.CharField(max_length=50)),
('district', models.CharField(max_length=50)),
('city_or_village', models.CharField(max_length=100)),
('PIN_code', models.IntegerField(max_length=6)),
('aadhaar_no', models.IntegerField(blank=True, max_length=12, null=True)),
('pan_no', models.EmailField(blank=True, max_length=10, null=True)),
('description', models.TextField(blank=True, max_length=500, null=True)),
],
),
migrations.CreateModel(
name='Lost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('father_name', models.CharField(blank=True, max_length=50, null=True)),
('mother_name', models.CharField(blank=True, max_length=50, null=True)),
('age', models.IntegerField(blank=True, max_length=3, null=True)),
('image', models.ImageField(upload_to='')),
('state', models.CharField(max_length=50)),
('district', models.CharField(max_length=50)),
('city_or_village', models.CharField(max_length=100)),
('PIN_code', models.IntegerField(max_length=6)),
('aadhaar_no', models.IntegerField(blank=True, max_length=12, null=True)),
('pan_no', models.EmailField(blank=True, max_length=10, null=True)),
('description', models.TextField(blank=True, max_length=500, null=True)),
],
),
]
|
from django.urls import reverse_lazy
from django.views.generic import FormView
from django.contrib.auth import login as auth_login
from member.forms import SignupModelForm
__all__ = [
'SignupView',
]
class SignupView(FormView):
template_name = 'member/signup.html'
form_class = SignupModelForm
success_url = reverse_lazy('diary:month_calendar')
def form_valid(self, form):
user = form.save()
auth_login(self.request, user, backend='django.contrib.auth.backends.ModelBackend')
return super(SignupView, self).form_valid(form)
|
#!/usr/bin/python
#Prepocess labels, non-text and text features
import FeatureActualExtraction
import constants
from PreprocessingFunctions import label_transform, label_inv_transform, nonTextFeature_transform, nonTextFeature_nvalues, checkLabelsNFeatures
from sklearn.preprocessing import OneHotEncoder
#Features (non-text)
print 'transforming actual features (non-text)...'
#train
FLfitConfFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.FLconfidentialityNonTextActualFeatures)
CDfitConfFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.CDconfidentialityNonTextActualFeatures)
RPfitConfFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.RPconfidentialityNonTextActualFeatures)
RGfitConfFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.RGconfidentialityNonTextActualFeatures)
FLfitIntFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.FLintegrityNonTextActualFeatures)
CDfitIntFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.CDintegrityNonTextActualFeatures)
RPfitIntFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.RPintegrityNonTextActualFeatures)
RGfitIntFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.RGintegrityNonTextActualFeatures)
FLfitAvailFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.FLavailabilityNonTextActualFeatures)
CDfitAvailFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.CDavailabilityNonTextActualFeatures)
RPfitAvailFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.RPavailabilityNonTextActualFeatures)
RGfitAvailFeaturesNonTextActual = nonTextFeature_transform(FeatureActualExtraction.RGavailabilityNonTextActualFeatures)
print 'loading encoder model...'
# filename (encoders)
en_1 = constants.directory + str('/FLconfEncoder.pkl')
en_2 = constants.directory + str('/CDconfEncoder.pkl')
en_3 = constants.directory + str('/RPconfEncoder.pkl')
en_4 = constants.directory + str('/RGconfEncoder.pkl')
en_5 = constants.directory + str('/FLintEncoder.pkl')
en_6 = constants.directory + str('/CDintEncoder.pkl')
en_7 = constants.directory + str('/RPintEncoder.pkl')
en_8 = constants.directory + str('/RGintEncoder.pkl')
en_9 = constants.directory + str('/FLavailEncoder.pkl')
en_10 = constants.directory + str('/CDavailEncoder.pkl')
en_11 = constants.directory + str('/RPavailEncoder.pkl')
en_12 = constants.directory + str('/RGavailEncoder.pkl')
from sklearn.externals import joblib
#one-hot encoding
FLconf_enc = joblib.load(en_1)
CDconf_enc = joblib.load(en_2)
RPconf_enc = joblib.load(en_3)
RGconf_enc = joblib.load(en_4)
FLint_enc = joblib.load(en_5)
CDint_enc = joblib.load(en_6)
RPint_enc = joblib.load(en_7)
RGint_enc = joblib.load(en_8)
FLavail_enc = joblib.load(en_9)
CDavail_enc = joblib.load(en_10)
RPavail_enc = joblib.load(en_11)
RGavail_enc = joblib.load(en_12)
print 'preprocessing actual features (non-text)...'
#transform actual values
FLtransConfFeaturesNonTextActual = FLconf_enc.transform(FLfitConfFeaturesNonTextActual).toarray()
CDtransConfFeaturesNonTextActual = CDconf_enc.transform(CDfitConfFeaturesNonTextActual).toarray()
RPtransConfFeaturesNonTextActual = RPconf_enc.transform(RPfitConfFeaturesNonTextActual).toarray()
RGtransConfFeaturesNonTextActual = RGconf_enc.transform(RGfitConfFeaturesNonTextActual).toarray()
FLtransIntFeaturesNonTextActual = FLint_enc.transform(FLfitIntFeaturesNonTextActual).toarray()
CDtransIntFeaturesNonTextActual = CDint_enc.transform(CDfitIntFeaturesNonTextActual).toarray()
RPtransIntFeaturesNonTextActual = RPint_enc.transform(RPfitIntFeaturesNonTextActual).toarray()
RGtransIntFeaturesNonTextActual = RGint_enc.transform(RGfitIntFeaturesNonTextActual).toarray()
FLtransAvailFeaturesNonTextActual = FLavail_enc.transform(FLfitAvailFeaturesNonTextActual).toarray()
CDtransAvailFeaturesNonTextActual = CDavail_enc.transform(CDfitAvailFeaturesNonTextActual).toarray()
RPtransAvailFeaturesNonTextActual = RPavail_enc.transform(RPfitAvailFeaturesNonTextActual).toarray()
RGtransAvailFeaturesNonTextActual = RGavail_enc.transform(RGfitAvailFeaturesNonTextActual).toarray()
# #feature selection, to reduce overall dimensionality and computational time - TODO
#Features - text - TODO
|
#Create a form using input and raw_input
#name
#last name
#year of birth
#Favorite singer band
#favorite movie
#best fiend
#calculate current age
#print everything in a string concatenating variables
#print the name 10 times
name = raw_input('What is your name?')
lastname = raw_input('What is last your name?')
yearofbirth = input('When were you born?')
favoritesingerorband = raw_input('What is your favorite singer or band?')
favoritemovie = raw_input('What is your favorite favorite movie?')
bestfriend = raw_input('Who is your best friend?')
print 'You are', 2015- yearofbirth, 'years old '
print 'your name is', name, lastname, ".You were born in", yearofbirth, 'Your favorite singer or band is', favoritesingerorband, 'Your favorite movie is', favoritemovie, 'and your best friend is', bestfriend
print name*10
|
# -*- coding: utf-8 -*-
import os
import time
import socket
import subprocess
from wifi import Cell
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.conf import settings
from .models import Info, Robot
from .robot_server import Server, find_local_ip, check_url, robot_logs
from .wpa_wifi import Network, Fileconf
# Create your views here.
#create context the first time
try :
context
except NameError :
context = {'valid' : False}
def check_context():
if not context['valid'] : start()
#function to load the global context
def start():
robot = Robot.objects.get(alive=True)
context['server_snap'] = Server('snap',robot)
context['server_jupyter'] = Server('jupyter',robot)
context['server_rest'] = Server('http',robot)
context.update({'info' : Info.objects.get(), 'robot' : robot , 'url_for_index' : '/','valid' : True})
def index(request):
check_context()
# Adding new context specific to the view here :
rest = request.GET.get('rest','go')
if rest=='stop' :
context['server_rest'].stop()
context.update({ 'url_for_index' : '/'})
context['server_snap'].stop()
context['server_jupyter'].stop(8989)
context.update({ 'message' : None})
return render(request, 'app1/index.html',context)
def snap(request):
check_context()
# Adding new context specific to the view here :
context['server_jupyter'].stop(8989)
context['server_snap'].start()
for i in range(20):
if check_url('http://localhost:6969') :
break
time.sleep(1)
iframe_src = '/static/snap/snap.html#open:http://'+find_local_ip()+':6969/snap-blocks.xml'
context.update({'iframe_src' : iframe_src })
return render(request, 'app1/base-iframe.html', context)
def jupyter(request):
check_context()
# Adding new context specific to the view here :
context['server_snap'].stop()
token = context['server_jupyter'].start(get='token')
for i in range(10):
if check_url('http://localhost:8989') :
break
time.sleep(1)
iframe_src = 'http://{}:8989/?token={}'.format(find_local_ip(),token)
context.update({'iframe_src' : iframe_src })
return render(request, 'app1/base-iframe.html', context)
def monitor(request):
check_context()
context['server_rest'].start()
for i in range(10):
if check_url('http://localhost:8080') :
break
time.sleep(1)
iframe_src = '/static/monitor/'+context['robot'].brand.lower()+'-'+context['robot'].creature.lower()+'.html#open=http://'+find_local_ip()+':8080'
context.update({'iframe_src' : iframe_src, 'url_for_index' : '/?rest=stop' })
return render(request, 'app1/base-iframe.html', context)
def rest(request):
check_context()
rest_action = request.POST.get('rest_action',False)
if rest_action=='stop': context['server_rest'].stop()
else : context['server_rest'].start()
context.update({ 'logs_rest' : '/rest/raw/', 'url_rest' : '/rest/state/'})
return render(request, 'app1/rest.html', context)
def rest_state(request):
return HttpResponse(context['server_rest'].state())
def rest_raw(request):
raw=''
if context['server_rest'].daemon.pid==-1 : return HttpResponse(raw)
with open(os.path.join(settings.LOG_ROOT, context['server_rest'].daemon.logfile+
context['server_rest'].daemon.type+'_'+context['robot'].creature+'_'+context['robot'].type+'.log'), 'r') as log:
u = log.readlines()
for l in u :
try :
raw += l+'<br>'
except UnicodeDecodeError:
pass
return HttpResponse(raw)
def configuration(request):
check_context()
try:
connect = subprocess.check_output(['iwgetid', '-r'])
except :
connect = 'none'
pass
try :
# works only on linux system
wifi = list(Cell.all('wlan0'))
conf = Fileconf.from_file('/etc/wpa_supplicant/wpa_supplicant.conf')
except :
# give fake values on windows platform
context.update({'ip' : find_local_ip(),'hostname' : socket.gethostname(),
'wifi' : [{ 'ssid' : 'test network' , 'quality' : '0/70' , 'encrypted' : 'secure' },{ 'ssid' : 'reseau test2' , 'quality' : '0/70' , 'encrypted' : 'secure' }], 'conf' : [{'ssid' : 'reseau test', 'opts' : {'priority' : '1'}},{'ssid' : 'reseau test2', 'opts' : {'priority' : '5'}},], 'connect' : 'reseau test' })
pass
else :
# Adding new context specific to the view here :
context.update({'ip' : find_local_ip(),'hostname' : socket.gethostname(),
'wifi' : wifi, 'conf' : conf.network_list, 'connect' : connect })
return render(request, 'app1/settings.html', context)
def wifi_add(request):
try :
# works only on linux system
conf = Fileconf.from_file('/etc/wpa_supplicant/wpa_supplicant.conf')
except :
# give fake values on windows platform
pass
return HttpResponseRedirect('/settings')
wifi_ssid = request.POST['wifi_ssid']
wifi_psk = request.POST['wifi_psk']
wifi_priority = request.POST['wifi_priority']
opts = {}
if wifi_psk != '' : opts = { 'psk' : wifi_psk, }
if wifi_priority != 'Aucune' : opts.update({'priority' : wifi_priority})
(res, msg) = conf.add(wifi_ssid, **opts)
if res : conf.make_new()
message = { 'ok' : None, 'ssid' : "Wrong network name !", 'psk' : "Wrong password !"}
context.update({ 'message' : message[msg], 'category' : 'warning'})
return HttpResponseRedirect('/settings')
def wifi_suppr(request):
try :
# works only on linux system
conf = Fileconf.from_file('/etc/wpa_supplicant/wpa_supplicant.conf')
except :
# give fake values on wondows platform
pass
return HttpResponseRedirect('/settings')
wifi_ssid = request.POST['wifi_ssid']
res = conf.suppr(wifi_ssid)
if res : conf.make_new()
message = { True : "Network deleted" , False : "Can't suppress the network !"}
context.update({ 'message' : message[res], 'category' : 'success'})
return HttpResponseRedirect('/settings')
def wifi_restart(request):
try :
res1 = subprocess.call(['sudo', 'ifdown', 'wlan0'])
time.sleep(1)
res2 = subprocess.call(['sudo', 'ifup', 'wlan0'])
except :
# return on fail (windows)
pass
return HttpResponseRedirect('/settings')
if res1 == 0 and res2 == 0 : context.update({ 'message' : 'Wifi restarted', 'category' : 'success'})
else : context.update({ 'message' : 'Unable to restart wifi', 'category' : 'warning'})
return HttpResponseRedirect('/settings')
def logs(request):
check_context()
snap = context['server_snap'].state()
jupyter = context['server_jupyter'].state()
rest = context['server_rest'].state()
context.update({'url_logs' : '/logs/raw/', 'snap' : snap, 'jupyter' : jupyter, 'rest' : rest})
return render(request, 'app1/logs.html', context)
def rawlogs(request):
raw = robot_logs(context['robot'])
return HttpResponse(raw)
def reboot(request):
try :
command = '(sleep 2 ; sudo reboot) &'
subprocess.call(command, shell=True)
except :
# return on fail (windows)
pass
return HttpResponseRedirect('/')
def shutdown(request):
try :
subprocess.call(['sudo', 'halt'])
except :
# return on fail (windows)
pass
return HttpResponseRedirect('/')
|
import json
from pprint import pprint
from django.db.models.aggregates import Count
from django.shortcuts import get_object_or_404
from rest_framework import generics
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from dataprocessing.models import User
from workprogramsapp.expertise.models import Expertise
from workprogramsapp.models import WorkProgram, WorkProgramInFieldOfStudy, AcademicPlan, DisciplineBlock, \
DisciplineBlockModule, WorkProgramChangeInDisciplineBlockModule, ImplementationAcademicPlan, FieldOfStudy, \
СertificationEvaluationTool
from workprogramsapp.statistic.serializers import WorkProgramInFieldOfStudySerializerForStatistic, \
WorkProgramSerializerForStatistic, SuperShortWorkProgramSerializer, WorkProgramSerializerForStatisticExtended, \
AcademicPlansDescriptionWpSerializer, WorkProgramPrerequisitesAndOutcomesSerializer, \
WorkProgramDescriptionOnlySerializer, \
ImplementationAcademicPlanWpStatisticSerializer
from workprogramsapp.workprogram_additions.models import StructuralUnit
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def EmptyStringWp(request):
"""
API-запрос на просмотр РПД, без id строки
"""
empty_wp = (WorkProgramInFieldOfStudy.objects.filter(work_program__editors__isnull=False,
id_str_up__isnull=True)).distinct()
serializer = WorkProgramInFieldOfStudySerializerForStatistic(empty_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def WpWithoutAP(request):
"""
API-запрос на просмотр РПД, которых нету в УП
"""
empty_wp = (WorkProgram.objects.filter(zuns_for_wp=None,
editors__isnull=False)).distinct()
serializer = WorkProgramSerializerForStatistic(empty_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def WpWithSimilarCode(request):
"""
API-запрос на просмотр РПД с одинаковым дисциплин кодом
"""
wp_counter_code = WorkProgram.objects.all().values('discipline_code').annotate(
total=Count('discipline_code')).filter(total__gt=1)
print(wp_counter_code)
similar_codes = []
for wp in wp_counter_code:
similar_codes.append(wp['discipline_code'])
similar_wp = WorkProgram.objects.filter(discipline_code__in=similar_codes).order_by("discipline_code")
serializer = WorkProgramSerializerForStatistic(similar_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def SimpleStatistic(request):
"""
API-запрос на просмотр различной статистики по РПД и пользователям
"""
registered_users = User.objects.count()
rpd_users = User.objects.filter(editors__isnull=False).distinct().count()
on_expertise = Expertise.objects.filter(expertise_status="EX").count()
approved = Expertise.objects.filter(expertise_status="AC").count()
in_work = Expertise.objects.filter(expertise_status="WK").count() + WorkProgram.objects.filter(
expertise_with_rpd__isnull=True).distinct().count()
editors_rpd = WorkProgram.objects.filter(editors__isnull=False).count()
return Response(
{
"registered_users": registered_users,
"users_in_rpd": rpd_users,
"rpd_with_editors": editors_rpd,
"rpd_on_expertise": on_expertise,
"rpd_approved": approved,
"rpd_in_work": in_work
}
)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def WpWithoutStructuralUnit(request):
"""
API-запрос на на просмотр РПД без структурного подразделения
"""
wp_without_unit = WorkProgram.objects.filter(structural_unit__isnull=True)
serializer = WorkProgramSerializerForStatistic(wp_without_unit, many=True)
# print(serializer.data)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def StructuralUnitWp(request):
"""
API-запрос на просмотр РПД в структурныхх подразделениях; Можно фильтровать посредством параметров в адресной строке
Поле филтрации: status - статус РПД
Параметры: EX - на экспертизе, AC - одобрена, WK - в работе
Пример запроса:
https://op.itmo.ru/api/statistic/structural/workprogram?status=EX - Все РПД из структруных подразделений на экспертизе
"""
try:
status_filter = request.query_params["status"]
except KeyError:
status_filter = ""
units = StructuralUnit.objects.all()
result = []
for unit in units:
if status_filter == "WK":
needed_wp = (WorkProgram.objects.filter(expertise_with_rpd__isnull=True,
structural_unit=unit) | WorkProgram.objects.filter(
expertise_with_rpd__expertise_status__contains=status_filter,
structural_unit=unit)).distinct()
elif status_filter == "":
needed_wp = WorkProgram.objects.filter(structural_unit=unit).distinct()
else:
needed_wp = WorkProgram.objects.filter(expertise_with_rpd__expertise_status__contains=status_filter,
structural_unit=unit).distinct()
serializer = WorkProgramSerializerForStatistic(needed_wp, many=True)
result.append({"id": unit.id,
"title": unit.title,
"work_programs": serializer.data})
return Response(result)
@api_view(['GET'])
@permission_classes((IsAdminUser,))
def FieldOfStudyPlanToISU(request, pk):
"""
Перевод наших данных в ISU-лайк данные
"""
"""code = request.data.get('field_of_study_code')
year = request.data.get('year')
academic_plan_id = request.data.get('academic_plan_id')"""
implementation_list = []
all_imp = ImplementationAcademicPlan.objects.all()
imp_len = all_imp.count()
from_len = pk * 20
end_len = from_len + 20 if from_len + 20 < imp_len else imp_len
# all_imp = all_imp[from_len:end_len]
for implementation in all_imp:
academic_plan = AcademicPlan.objects.get(pk=implementation.academic_plan.id)
field_of_study = FieldOfStudy.objects.get(pk=implementation.field_of_study.id)
wp_isu_list = []
for block in DisciplineBlock.objects.filter(academic_plan=academic_plan):
for module in DisciplineBlockModule.objects.filter(descipline_block=block):
for change in WorkProgramChangeInDisciplineBlockModule.objects.filter(discipline_block_module=module):
for wp_field in WorkProgramInFieldOfStudy.objects.filter(
work_program_change_in_discipline_block_module=change):
for wp in WorkProgram.objects.filter(zuns_for_wp=wp_field):
try:
struct_unit = StructuralUnit.objects.get(pk=wp.structural_unit.id)
except AttributeError:
struct_unit = None
isu_id = None
struct_title = None
if wp.language == "ru":
language = "Русский"
elif wp.language == "en":
language = "Английский"
elif wp.language == "kz":
language = "Казахский"
elif wp.language == "de":
language = "Немецкий"
else:
language = "Русский/Английский"
wp_isu_list.append(
{"УНИКАЛЬНЫЙ_КОД": wp.discipline_code,
"ИД_ИМПЛЕМЕНТАЦИЯ_АНАЛИТИКА": implementation.id,
"ИД_УП_АНАЛИТИКА": academic_plan.id,
"ИД_РПД_АНАЛИТИКА": wp.id,
"ИД_УП": academic_plan.ap_isu_id,
# ТИП ПЛАНА
# "НАПР_ИД": ,
"НС_ИД": implementation.ns_id,
"ШИФР_НАПРАВЛЕНИЯ": field_of_study.number,
"НАПРАВЛЕНИЕ_ПОДГОТОВКИ": field_of_study.title,
"ОП_ИД": implementation.op_isu_id,
"ОБРАЗОВАТЕЛЬНАЯ_ПРОГРАММА": academic_plan.educational_profile,
# "ФАК_ИД": 768
"ФАКУЛЬТЕТ": field_of_study.faculty,
"СРОК_ОБУЧЕНИЯ": 4.0,
# "ВУЗ_ПАРТНЕР": null,
# "СТРАНА_ВУЗА_ПАРТНЕРА": null,
# "ЯЗЫК_ОБУЧЕНИЯ": language,
# "ВОЕННАЯ_КАФЕДРА": 1,
# "ОБЩАЯ_ТРУДОЕМКОСТЬ": "240 з.е.",
# "ОГНП_ИД": int(wp.subject_code.split(".")[1]),
# "ОГНП": "Фотоника"
"ГОД_НАБОРА": implementation.year,
# "БЛОК_ИД": 1
"НАИМЕНОВАНИЕ_БЛОКА": block.name,
# "МОДУЛЬ_ИД": 2
"НАИМЕНОВАНИЕ_МОДУЛЯ": module.name,
"ИД_СТР_УП": wp_field.id_str_up,
# "ВЫБОР": 0,
# "НОМЕР_ПО_ПЛАНУ": "1",
"ДИС_ИД": wp.wp_isu_id,
"ЗЕ": [token for token in
change.credit_units.split(',')] if change.credit_units else None,
"ЭКЗ": [cerf.semester for cerf in
СertificationEvaluationTool.objects.filter(work_program=wp, type=1)],
"ДИФ_ЗАЧЕТ": [cerf.semester for cerf in
СertificationEvaluationTool.objects.filter(work_program=wp, type=2)],
"ЗАЧЕТ": [cerf.semester for cerf in
СertificationEvaluationTool.objects.filter(work_program=wp, type=3)],
"КП": [cerf.semester for cerf in
СertificationEvaluationTool.objects.filter(work_program=wp, type=4)],
"ЛЕК": [float(token) for token in
wp.lecture_hours.split(",")] if wp.lecture_hours else [],
"ЛАБ": [float(token) for token in
wp.lab_hours.split(",")] if wp.lecture_hours else [],
"ПРАКТ": [float(token) for token in
wp.practice_hours.split(",")] if wp.lecture_hours else [],
"ДИСЦИПЛИНА": wp.title,
"ИД_ИСПОЛНИТЕЛЯ_ДИС": struct_unit.isu_id if struct_unit else None,
"ИСПОЛНИТЕЛЬ_ДИС": struct_unit.title if struct_unit else None,
"ЯЗЫК_ДИСЦИПЛИНЫ": language
}
)
implementation_list.append(wp_isu_list)
print("step complete")
# print(serializer.data)
print(len(implementation_list))
print(ImplementationAcademicPlan.objects.all().count())
with open('ap_all.json', 'w', encoding="utf-8") as file:
file.write(json.dumps(implementation_list, ensure_ascii=False, indent=4)) # use `json.loads` to do the reverse
file.close()
return Response("я очинь люблю чоколадние орещки")
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def AllWpShort(request):
wp = WorkProgram.objects.all()
serializer = SuperShortWorkProgramSerializer(wp, many=True)
return Response(serializer.data)
class WorkProgramDetailsWithApAndSemesters(generics.ListAPIView):
"""
Запрос с филтрами для рпд в структурных подразделениях
-----------------------------------------------------
Обязательные параметры:
structural_unit_id - id структрных подразделений, для которых надо получить РПД, может быть несколько
Необязательные параметры:
year - Год учбеного плана в котором реализуется РПД, может быть несколько
semester - Семетр в котором реализуется РПД
status - Тип статуса РПД (EX - на экспертизе, AC - одобрена, WK - в работе), только в одном экземпляре
Пример запроса:
http://127.0.0.1:8000/api/statistic/structural/workprogram_extend?structural_unit_id=5&semester=5&year=2020&year=2019
Все РПД из структурного подразделения с ID 5, реализующиеся в 5 семестре, для УП 2020 и 2019 года
"""
queryset = WorkProgram.objects.all()
serializer_class = WorkProgramSerializerForStatisticExtended
permission_classes = [IsAuthenticated]
def get_queryset(self):
print(self.request.query_params)
status_filter = self.request.query_params["status"] if "status" in self.request.query_params else ""
structural_unit_id = self.request.query_params.getlist(
"structural_unit_id") if "structural_unit_id" in self.request.query_params else []
year = self.request.query_params.getlist("year") if "year" in self.request.query_params \
else [x for x in range(2000, 2050)]
semester = self.request.query_params.getlist("semester") if "semester" in self.request.query_params else [-1]
cred_regex = r""
structural_unit_id = [int(x) for x in structural_unit_id]
print(structural_unit_id)
for i in range(12):
if str(i + 1) in semester:
cred_regex += "[^0]\.[0-9],\s"
else:
cred_regex += "(([0-9]\.[0-9])|[0]),\s"
cred_regex = cred_regex[:-3]
print(cred_regex)
if status_filter == "WK":
needed_wp = (WorkProgram.objects.filter(expertise_with_rpd__isnull=True,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__academic_plan_in_field_of_study__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__zuns_for_wp__ze_v_sem__iregex=cred_regex) |
WorkProgram.objects.filter(
expertise_with_rpd__expertise_status__contains=status_filter,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__academic_plan_in_field_of_study__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__zuns_for_wp__ze_v_sem__iregex=cred_regex)).distinct()
elif status_filter == "":
needed_wp = WorkProgram.objects.filter(structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__academic_plan_in_field_of_study__year__in=year,
zuns_for_wp__zuns_for_wp__ze_v_sem__iregex=cred_regex).distinct()
else:
needed_wp = WorkProgram.objects.filter(expertise_with_rpd__expertise_status__contains=status_filter,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__academic_plan_in_field_of_study__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__zuns_for_wp__ze_v_sem__iregex=cred_regex).distinct()
return needed_wp
class OneAcademicPlanWithDescriptionWp(generics.RetrieveAPIView):
"""
Получение конкретного учебного плана по его id со всеми описаниями РПД
"""
queryset = AcademicPlan.objects.all()
serializer_class = AcademicPlansDescriptionWpSerializer
permission_classes = [IsAuthenticated]
class AllAcademicPlanWithDescriptionWp(generics.ListAPIView):
"""
Получение всех учебных планов со всеми описаниями РПД
"""
queryset = AcademicPlan.objects.all()
serializer_class = AcademicPlansDescriptionWpSerializer
permission_classes = [IsAuthenticated]
class GetPrerequisitesAndOutcomesOfWpByStrUP(generics.RetrieveAPIView):
"""
Получение пререквизитов и результатов РПД по СТР_УП_ИД
"""
queryset = WorkProgram.objects.all()
serializer_class = WorkProgramPrerequisitesAndOutcomesSerializer
permission_classes = [AllowAny]
def get_queryset(self):
pk = self.kwargs['pk']
return WorkProgram.objects.filter(zuns_for_wp__zuns_for_wp__id_str_up=pk)
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(queryset)
self.check_object_permissions(self.request, obj)
return obj
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def EditorsByWPStatuses(request):
"""
Редакторы с информацией о статусах их РПД (AC: принято, EX: на экспертизе:, WK: на доработке,
NO_EXP: не отправлялось на экспертизу)
"""
editors_status_list = []
editors = User.objects.filter(editors__isnull=False).distinct()
for editor in editors:
expertise_of_editor = list(Expertise.objects.filter(work_program__editors=editor).distinct().values(
"expertise_status").annotate(total=Count("expertise_status")))
no_exp = {'expertise_status': 'NO_EXP', 'total': int(
WorkProgram.objects.filter(expertise_with_rpd__isnull=True, editors=editor).distinct().count())}
if no_exp['total'] == 0:
no_exp = []
expertise_of_editor.append(no_exp)
editors_status_list.append(
{
"editor": {"id": editor.id, "name": editor.first_name + " " + editor.last_name, },
"statuses_count": expertise_of_editor
}
)
return Response(editors_status_list)
class GetAllWPsByEditor(generics.ListAPIView):
"""
По id редактора показывает все его РПД
"""
queryset = WorkProgram.objects.all()
serializer_class = WorkProgramDescriptionOnlySerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
pk = self.kwargs['pk']
return WorkProgram.objects.filter(editors__pk=pk)
class GetAllWPsWithEmptyField(generics.ListAPIView):
"""
Получить список всех РПД с опредленным пустым полем
чтобы указать по какому пустому полю производить фильтрацию надо задать параметр field в запрос
На данный момент можно отфильтровать по следующим полям:
ED - редакторы
LANG - язык
--------------------------------------------------------
Пример: Получить список всех РПД без редакторов:
/api/statistic/workprogram/empty_field_wp?field=ED
"""
queryset = WorkProgram.objects.all()
serializer_class = WorkProgramDescriptionOnlySerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
field = self.request.query_params["field"]
if field == "ED":
return WorkProgram.objects.filter(editors__isnull=True)
if field == "LANG":
return WorkProgram.objects.filter(language__isnull=True)
class AllAcademicPlansWpExpertiseStatisticView(generics.ListAPIView):
queryset = ImplementationAcademicPlan.objects.all()
serializer_class = ImplementationAcademicPlanWpStatisticSerializer
permission_classes = [AllowAny]
|
import nltk
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
import spacy
import json
import os
import spacy
import json
import numpy as np
from functools import reduce
from sklearn import decomposition
from sklearn.decomposition import PCA
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import nltk.stem as ns
texts = []
for root,dirs,files in os.walk('./script'):
for file in files:
with open('./script/{0}'.format(file)) as f:
texts.append((file[:-4],f.read()))
js = open("./glove.6B.50d_word2id.json", encoding='utf-8')
setting = json.load(js)
for title,text in texts:
#if n <27:
# continue
print(title)
sentences = nltk.sent_tokenize(text)
word_tags = [nltk.pos_tag(nltk.word_tokenize(sent)) for sent in sentences]
n_list = ['NN','NNP','NNS']
N =[word[0] for i in range(len(word_tags)) for word in word_tags[i] if word[1] in n_list]
N = list(set(N))
v_list = ['VB','VBD','VBG','VBN','VBP','VBZ']
V= [word[0] for i in range(len(word_tags)) for word in word_tags[i] if word[1] in v_list]
lemmatizer = ns.WordNetLemmatizer()
n_lemma = [lemmatizer.lemmatize(word, pos='n') for word in N]
v_lemma = [lemmatizer.lemmatize(word, pos='v') for word in V]
n_lemma_lower = [noun.lower() for noun in n_lemma]
v_lemma_lower = [verb.lower() for verb in v_lemma]
v_lemma_lower = list(set(v_lemma_lower))
print(len(v_lemma_lower))
v_word2id = []
try:
for i in v_lemma_lower:
num = setting.get(i, -1)
if num == -1:
v_lemma_lower.remove(i)
except KeyError:
pass
print(len(v_lemma_lower))
try:
for i in v_lemma_lower:
num = setting.get(i, -1)
if num != -1:
v_word2id.append((i,setting[i]))
except KeyError:
pass
print(len(v_word2id))
#对json里面没有的词进行剔除更新
pre_train = np.load("./glove.6B.50d_mat.npy", allow_pickle=True, encoding="latin1")
#pre_train = np.loadtxt("./glove.42B.300d.txt", encoding="latin1")
X = map(lambda x: pre_train[x], [v[1] for v in v_word2id])
Y = reduce(lambda x, y: np.vstack((x, y)), X)
print(Y.shape)
pca = PCA(n_components=2)
Z = pca.fit_transform(Y)
pos = pd.DataFrame()
pos['X'] = Z[:, 0]
pos['Y'] = Z[:, 1]
#plt.scatter(pos['X'], pos['Y'], )
estimator = KMeans(n_clusters=5)
estimator.fit(Z)
j = 0
x = []
y = []
# 要绘制的点的横纵坐标
for j in range(len(Z)):
x.append(Z[j:j + 1, 0])
y.append(Z[j:j + 1, 1])
#print(x)
#print(y)
label_pred = estimator.labels_ # 获取聚类标签
centroids = estimator.cluster_centers_ # 获取聚类中心
inertia = estimator.inertia_ # 获取聚类准则的总和
mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
# 这里'or'代表中的'o'代表画圈,'r'代表颜色为红色,后面的依次类推
plt.figure(figsize=(30, 30))
color = 0
j = 0
for i in label_pred:
plt.plot(x[j], y[j], mark[i], markersize=5)
j += 1
# 为散点打上数据标签
with open('log.txt', 'a') as f: f.writelines(' '.join(v_lemma_lower))
#v_lemma_lower.reverse()
print('=' * 20)
print(len(Z))
print(len(v_lemma_lower))
for k in range(len(Z)):
plt.text(x[k], y[k], v_lemma_lower[k])
if not os.path.exists('./img'):
os.mkdir('./img')
plt.savefig('./img/{0}_50_tai.jpg'.format(title))
plt.show()
plt.close()
js.close()
|
from abc import ABC, abstractmethod
class AbstractFactory(metaclass=ABC):
@abstractmethod
def create_product_a(self): pass
@abstractmethod
def create_product_b(self): pass
class ConcreteFactoryOne(AbstractFactory):
def create_product_a(self):
return ConcreteProductA()
def create_product_b(self):
return ConcreteProductB()
class ConcreteFactoryTwo(AbstractFactory):
def create_product_a(self):
return ConcreteProductATwo()
def create_product_b(self):
return ConcreteProductBTwo()
class AbstractProductA:
@abstractmethod
def interface_a(self): pass
class ConcreteProductA(AbstractProductA):
def interface_a(self): pass
class ConcreteProductATwo(AbstractProductA):
def interface_a(self): pass
class AbstractProductB:
@abstractmethod
def interface_b(self): pass
class ConcreteProductB(AbstractProductB):
def interface_b(self):
pass
class ConcreteProductBTwo(AbstractProductB):
def interface_b(self):
pass
if __name__ == '__main__':
for factory in (ConcreteFactoryOne, ConcreteFactoryTwo):
product_a = factory.create_product_a()
product_b = factory.create_product_b()
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
python test_csv.py MongoTestTask --local-scheduler
"""
import sys
import os
import luigi
from ke2mongo.tasks.csv import CSVTask
from ke2mongo.tests.tasks.mongo_test import MongoTestTask
class CSVTestTask(CSVTask):
"""
Class for exporting exporting KE Mongo data to CSV
This requires all mongo files have been imported into Mongo DB
"""
# Date to process
mongo_db = 'test'
collection_name = 'test'
# date = None
columns = [
('_id', '_id', 'int32'),
('SummaryData', 'SummaryData', 'string:100')
]
query = {}
def requires(self):
yield MongoTestTask()
def process_dataframe(self, m, df):
print df['SummaryData']
return df
|
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
import pandas as pd # Para la manipulación de datos y análisis
import numpy as np # Para crear vectores de datos, matrices de n dimensiones
import matplotlib.pyplot as plt # Para generar gráficos
import seaborn as sns # Para visualización de los datos
import platform
def Carga():
style = ttk.Style()
style.configure("TLabelframe", background="#A9E2F3")
labelCarga.grid(column =0, row =1, padx=10,pady=5)
buttonCarga = ttk.Button(labelCarga, text="Upload", command=fileDialog)
buttonCarga.grid(column =1, row =1)
def fileDialog():
global labelArch, filename, datosAnalizar
if (platform.system() != "Windows"):
filename = filedialog.askopenfilename(initialdir=".",title="Selecciona un archivo")
else:
filename = filedialog.askopenfilename(initialdir=".",title="Selecciona un archivo",filetype=(("csv","*.csv"),("All files","*")))
labelArch=ttk.Label(labelCarga, text="Se seleccionó archivo:")
labelArch.grid(column =2, row =1)
label=ttk.Label(labelCarga, text="")
label.grid(column =3, row =1)
label.configure(text = filename)
try:
datosAnalizar = pd.read_csv(filename)
except:
messagebox.showerror('Error','Archivo no válido, seleccione uno nuevo')
EtapaAnalisis()
def EtapaAnalisis():
labelEDA.configure(text="Analizando archivo "+filename)
labelAnalisis = ttk.LabelFrame(labelEDA, text="Datos Disponibles")
labelAnalisis.grid(column =1, row =1)
tabla=ttk.Treeview(labelAnalisis,columns=tuple(datosAnalizar.columns))
for i in range(len(datosAnalizar.index.values)-1,len(datosAnalizar.index.values)-101,-1):
tabla.insert("",0,text=i,value=tuple(datosAnalizar.values[i]))
for i in range(100,0,-1):
tabla.insert("",0,text=i,value=tuple(datosAnalizar.values[i]))
tabla.pack()
labelData = ttk.LabelFrame(labelEDA, text="Descripción de Datos")
labelData.grid(column =1, row =3)
def EDA():
style = ttk.Style()
style.configure("TLabelframe", background="#A9E2F3")
labelEDA.grid(column =0, row =1, padx=10,pady=5)
def Window():
global labelEDA, labelCarga, tabla
ventana=Tk()
ventana.title("Mineria de Datos")
if (platform.system() == "Windows"):
ventana.iconbitmap("./MD.ico")
ventana.geometry("700x450")
ventana.config(bg="#cfeafa")
notebook = ttk.Notebook()
labelCarga=ttk.LabelFrame(text="Ingresa el archivo para analizar")
labelEDA=ttk.LabelFrame(text="Analizando Archivo")
Carga()
EDA()
notebook.add(labelCarga, text="Carga Datos", padding=20)
notebook.add(labelEDA, text="AnalisisDatos", padding=20)
notebook.pack(padx=10, pady=10)
ventana.mainloop()
if __name__=="__main__":
wind = Window()
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the jumpingOnClouds function below.
def jumpingOnClouds(c):
#we have the first and last element are always 0
#last three elements possibilities: (110 can't be), 010, 000, 100
#once, we reach third to last, we need one count
#when we can stand on 4th to last: 0010, 0000,0100
#we always need two steps
i = 0
count = 0
while i < len(c)-2:
if i == len(c)-3:
count += 1
break
elif i == len(c)-4:
count += 2
break
else:
if c[i+2] == 0:
i += 2
count += 1
else:
count += 1
i += 1
return max(count, 1) #there is always at least one step
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
c = list(map(int, input().rstrip().split()))
result = jumpingOnClouds(c)
fptr.write(str(result) + '\n')
fptr.close()
|
"""
COG INVASION ONLINE
Copyright (c) CIO Team. All rights reserved.
@file ScreenshotHandler.py
@author Maverick Liberty
@date April 19, 2016
@desc System used to combat problems that occur when taking
screenshots in the same thread as everything else is running in.
"""
from datetime import datetime
from panda3d.core import Filename
from direct.interval.IntervalGlobal import Sequence, Wait, Func
from threading import Thread
import os
FILEPATH = 'screenshots/'
flashSeq = Sequence()
flashSfx = None
# Let's make sure the screenshots directory exists.
if not os.path.exists(FILEPATH[:-1]):
os.makedirs(FILEPATH[:-1])
def __doEffects():
global flashSfx
if not flashSfx:
flashSfx = base.loadSfx('phase_4/audio/sfx/Photo_shutter.ogg')
flashSeq = Sequence(
Func(flashSfx.play),
Func(base.transitions.setFadeColor, 1, 1, 1),
Func(base.transitions.fadeOut, 0.1),
Wait(0.1),
Func(base.transitions.fadeIn, 0.1),
Wait(0.1),
Func(base.transitions.setFadeColor, 0, 0, 0),
)
flashSeq.start()
def __saveScreenshot(shot):
now = datetime.now().strftime(FILEPATH + 'screenshot-%a-%b-%d-%Y-%I-%M-%S-%f')
shot.write(Filename(now + '.png'))
return
def takeScreenshot(win = None, effect = True):
if not win:
win = base.win
shot = win.getScreenshot()
thread = Thread(target = __saveScreenshot, args = (shot,))
thread.start()
#if effect:
# __doEffects()
|
#!/usr/bin/env python
"""
decrypts stored passwords used by Psi messenger
(psi-im.org)
the idea is from
https://www.georglutz.de/blog/2005/07/01/recover-lost-jabber-passwords-in-psis-config-files/
Copyright © 2019 Jose Riha <jose1711 gmail com>
This work is free. You can redistribute it and/or modify it under the
terms of the Do What The Fuck You Want To Public License, Version 2,
as published by Sam Hocevar. See http://www.wtfpl.net/ for more details.
"""
import xml.etree.ElementTree as ET
import argparse
from itertools import cycle
description = '''
Decrypt passwords from accounts.xml file
provided as an argument. Account jids and
passwords are sent to stdout.
Example:
%(prog)s ~/.config/psi/profiles/default/accounts.xml
'''
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('accounts_file', nargs=1, help='path to accounts.xml file')
args = parser.parse_args()
tree = ET.parse(args.accounts_file[0])
root = tree.getroot()
ns = {'psi': 'http://psi-im.org/options'}
def decodePassword(password, jid):
result = ''
jid = cycle(jid)
for n1 in range(0, len(password), 4):
x = int(password[n1:n1+4], 16)
result += chr(x ^ ord(next(jid)))
return result
for el in root.findall('.//*/psi:password/..', ns):
try:
password = el.find('./psi:password', ns).text
jid = el.find('./psi:jid', ns).text
except AttributeError:
continue
print(jid, decodePassword(password, jid))
|
ck = ""
cs = ""
ak = ""
ast = ""
recent_id = 0
recent_date = ""
recent_date1 = ""
|
class Event:
def __init__(self, time, delay, name):
self.time = time
self.when = time + delay
self.delay = delay
self.name = name
|
from email.mime.text import MIMEText
class Mail():
def __init__(self, to, fromAddr, subject, message, cc=None, bcc=None):
self.to = to
self.fromAddr = fromAddr
self.message = message
self.subject = subject
self.cc = cc
self.bcc = bcc
@property
def email(self):
msg = MIMEText(self.message, 'plain', 'utf-8')
msg['From'] = self.fromAddr
msg['Subject'] = self.subject
msg['To'] = self.to
return msg
|
class Solution:
def dfs(self, curr, M, mark):
if mark[curr]==True:
return
mark[curr] = True
#find all friends
for i in range(len(M)):
if M[curr][i]==1 and mark[i]==False:
self.dfs(i, M, mark)
def findCircleNum(self, M: List[List[int]]) -> int:
#DFS
#一共有N个朋友,通过一个朋友去寻找下一个连接的朋友,找到头了就返回,circle++
#然后找下一个先前没连接过的朋友
N = len(M)
mark = [False]*N
count = 0
for i in range(N):
if mark[i]==False:
self.dfs(i, M, mark)
count +=1
return count
|
#using SBS(Summation-Based Selection)
from representative_score import rep_score_sentence
import config
import os
import nltk.data
import time
from decimal import Decimal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-f',
'--factor',
default = 1,
help = 'factor of threshold for summary'
)
args = parser.parse_args()
start_time = time.time()
#fetching scraped txt blogs
blogs = []
for file in os.listdir():
if file.endswith(".txt"):
blogs.append(str(file))
sentence_sbs = dict()
overall_sbs = 0
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
for blog in blogs:
file = open(blog)
summary = open(blog[:-4] + '_summary.txt', 'w+')
data = file.read()
sentences = tokenizer.tokenize(data)
#successfully split blog into smart sentences
file = open("sbs_score.txt", "w+")
for sentence in sentences:
print("Old sentence : " + sentence)
sbs_score = rep_score_sentence(sentence, config.tau)
file.write(str(sbs_score) + '\n')
overall_sbs = overall_sbs + sbs_score
sentence_sbs[sentence] = sbs_score
threshold = overall_sbs/len(sentences)
for key in sentence_sbs:
if sentence_sbs[key] >= Decimal(args.factor) * threshold:
summary.write(key)
summary.close()
file.close()
end_time = time.time()
print("Execution time : " + str(end_time - start_time))
|
from torch_lib.Dataset import *
from torch_lib.Model import Model, OrientationLoss
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.models import vgg
from torch.utils import data
import os
# 训练的时候,不需要相机校正参数,但是DetectedObject类在测试集上也用到了
# 而测试集中需要用要相机参数,所以为了代码的兼容性,所以,训练集会加载
# 一个global cal_matrix,但是并不起什么作用
def main():
# hyper parameters
epochs = 100
batch_size = 8 # 批训练数据的个数
alpha = 0.6
w = 0.4
print("Loading all detected objects in dataset...")
# 找到训练集的路径,目录默认为 ./Kitti/training/
train_path = os.path.abspath(os.path.dirname(__file__)) + os.path.sep + 'Kitti' + os.path.sep + 'training' + os.path.sep
# 执行Dataset()的init函数
dataset = Dataset(train_path)
# shuffle为true表示打乱数据 ,num_works线程个数
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 6}
generator = data.DataLoader(dataset, **params)
my_vgg = vgg.vgg19_bn(pretrained=True)
model = Model(features=my_vgg.features)
opt_SGD = torch.optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)
conf_loss_func = nn.CrossEntropyLoss()
dim_loss_func = nn.MSELoss()
# 对于orient的损失函数,采用自定义的损失函数
orient_loss_func = OrientationLoss
# load any previous weights
model_path = os.path.abspath(os.path.dirname(__file__)) + os.path.sep + 'weights' + os.path.sep
latest_model = None
first_epoch = 0
if not os.path.isdir(model_path):
os.mkdir(model_path)
else:
try:
latest_model = [x for x in sorted(os.listdir(model_path)) if x.endswith('.pkl')][-1]
except:
pass
if latest_model is not None:
checkpoint = torch.load(model_path + latest_model, map_location=torch.device('cpu')) # 加载epoch_10.pkl文件
model.load_state_dict(checkpoint['model_state_dict'])
opt_SGD.load_state_dict(checkpoint['optimizer_state_dict'])
first_epoch = checkpoint['epoch']
loss = checkpoint['loss']
print('Found previous checkpoint: %s at epoch %s' % (latest_model, first_epoch))
print('Resuming training....')
total_num_batches = int(len(dataset) / batch_size)
for epoch in range(first_epoch + 1, epochs + 1):
curr_batch = 0
passes = 0
for local_batch, local_labels in generator:
# Orientation是根据angle角与bin的中心角度的差计算的cos和sin的值
# 注意此处的bin是angle落在哪个bin中,没落的bin对应者的orient为0,0
truth_orient = local_labels['Orientation'].float()
# 根据label中angle落在哪个bin上,得到的confidence信息,由于本文设置的bin
# 的个数为2,所以对于每一个label标签中的每一行,Confidence都是1*2矩阵
truth_conf = local_labels['Confidence'].long()
# 标签中的真正的维度信息,经过了减去类别均值的操作
truth_dim = local_labels['Dimensions'].float()
local_batch = local_batch.float()
# 数据送入到模型中,得到预测的结果
[orient, conf, dim] = model(local_batch)
orient_loss = orient_loss_func(orient, truth_orient, truth_conf)
dim_loss = dim_loss_func(dim, truth_dim)
# 返回的是truth_conf为1的的索引下标
truth_conf = torch.max(truth_conf, dim=1)[1]
conf_loss = conf_loss_func(conf, truth_conf)
loss_theta = conf_loss + w * orient_loss
loss = alpha * dim_loss + loss_theta
opt_SGD.zero_grad()
loss.backward()
opt_SGD.step()
if passes % 10 == 0:
print("--- epoch %s | batch %s/%s --- [loss: %s]" % (epoch, curr_batch, total_num_batches, loss.item()))
passes = 0
passes += 1
curr_batch += 1
# save after every 10 epochs
if epoch % 10 == 0:
name = model_path + 'epoch_%s.pkl' % epoch
print("====================")
print("Done with epoch %s!" % epoch)
print("Saving weights as %s ..." % name)
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': opt_SGD.state_dict(), 'loss': loss}, name)
print("====================")
if __name__ == '__main__':
main()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
import subprocess
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.goals import package_binary
from pants.backend.go.goals.package_binary import GoBinaryFieldSet
from pants.backend.go.target_types import GoBinaryTarget, GoModTarget, GoPackageTarget
from pants.backend.go.testutil import gen_module_gomodproxy
from pants.backend.go.util_rules import (
assembly,
build_pkg,
build_pkg_target,
first_party_pkg,
go_mod,
import_analysis,
link,
sdk,
third_party_pkg,
)
from pants.core.goals.package import BuiltPackage
from pants.engine.addresses import Address
from pants.engine.rules import QueryRule
from pants.engine.target import Target
from pants.testutil.rule_runner import RuleRunner, engine_error
@pytest.fixture()
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*assembly.rules(),
*import_analysis.rules(),
*package_binary.rules(),
*build_pkg.rules(),
*build_pkg_target.rules(),
*first_party_pkg.rules(),
*go_mod.rules(),
*link.rules(),
*target_type_rules.rules(),
*third_party_pkg.rules(),
*sdk.rules(),
QueryRule(BuiltPackage, (GoBinaryFieldSet,)),
],
target_types=[
GoBinaryTarget,
GoModTarget,
GoPackageTarget,
],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def build_package(rule_runner: RuleRunner, binary_target: Target) -> BuiltPackage:
field_set = GoBinaryFieldSet.create(binary_target)
result = rule_runner.request(BuiltPackage, [field_set])
rule_runner.write_digest(result.digest)
return result
def test_package_simple(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"go.mod": dedent(
"""\
module foo.example.com
go 1.17
"""
),
"main.go": dedent(
"""\
package main
import (
"fmt"
)
func main() {
fmt.Println("Hello world!")
}
"""
),
"BUILD": dedent(
"""\
go_mod(name='mod')
go_package(name='pkg')
go_binary(name='bin')
"""
),
}
)
binary_tgt = rule_runner.get_target(Address("", target_name="bin"))
built_package = build_package(rule_runner, binary_tgt)
assert len(built_package.artifacts) == 1
assert built_package.artifacts[0].relpath == "bin"
result = subprocess.run([os.path.join(rule_runner.build_root, "bin")], stdout=subprocess.PIPE)
assert result.returncode == 0
assert result.stdout == b"Hello world!\n"
def test_package_third_party_requires_main(rule_runner: RuleRunner) -> None:
import_path = "pantsbuild.org/go-sample-for-test"
version = "v0.0.1"
fake_gomod = gen_module_gomodproxy(
version,
import_path,
(
(
"pkg/hello/hello.go",
dedent(
"""\
package hello
import "fmt"
func Hello() {
fmt.Println("Hello world!")
}
"""
),
),
(
"cmd/hello/main.go",
dedent(
f"""\
package main
import "{import_path}/pkg/hello"
func main() {{
hello.Hello()
}}
"""
),
),
),
)
fake_gomod.update(
{
"BUILD": dedent(
f"""\
go_mod(name='mod')
go_binary(name="bin", main='//:mod#{import_path}/pkg/hello')
"""
),
"go.mod": dedent(
f"""\
module go.example.com/foo
go 1.16
require (
\t{import_path} {version}
)
"""
),
}
)
rule_runner.write_files(fake_gomod)
rule_runner.set_options(
[
"--go-test-args=-v -bench=.",
f"--golang-subprocess-env-vars=GOPROXY=file://{rule_runner.build_root}/go-mod-proxy",
"--golang-subprocess-env-vars=GOSUMDB=off",
],
env_inherit={"PATH"},
)
binary_tgt = rule_runner.get_target(Address("", target_name="bin"))
with engine_error(ValueError, contains="but uses package name `hello` instead of `main`"):
build_package(rule_runner, binary_tgt)
def test_package_third_party_can_run(rule_runner: RuleRunner) -> None:
import_path = "pantsbuild.org/go-sample-for-test"
version = "v0.0.1"
fake_gomod = gen_module_gomodproxy(
version,
import_path,
(
(
"pkg/hello/hello.go",
dedent(
"""\
package hello
import "fmt"
func Hello() {
fmt.Println("Hello world!")
}
"""
),
),
(
"cmd/hello/main.go",
dedent(
f"""\
package main
import "{import_path}/pkg/hello"
func main() {{
hello.Hello()
}}
"""
),
),
),
)
fake_gomod.update(
{
"BUILD": dedent(
f"""\
go_mod(name='mod')
go_binary(name="bin", main='//:mod#{import_path}/cmd/hello')
"""
),
"go.mod": dedent(
f"""\
module go.example.com/foo
go 1.16
require (
\t{import_path} {version}
)
"""
),
}
)
rule_runner.write_files(fake_gomod)
rule_runner.set_options(
[
"--go-test-args=-v -bench=.",
f"--golang-subprocess-env-vars=GOPROXY=file://{rule_runner.build_root}/go-mod-proxy",
"--golang-subprocess-env-vars=GOSUMDB=off",
],
env_inherit={"PATH"},
)
binary_tgt = rule_runner.get_target(Address("", target_name="bin"))
built_package = build_package(rule_runner, binary_tgt)
assert len(built_package.artifacts) == 1
assert built_package.artifacts[0].relpath == "bin"
result = subprocess.run([os.path.join(rule_runner.build_root, "bin")], stdout=subprocess.PIPE)
assert result.returncode == 0
assert result.stdout == b"Hello world!\n"
def test_package_with_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"lib/lib.go": dedent(
"""\
package lib
import (
"fmt"
"rsc.io/quote"
)
func Quote(s string) string {
return fmt.Sprintf(">> %s <<", s)
}
func GoProverb() string {
return quote.Go()
}
"""
),
"lib/BUILD": "go_package()",
"main.go": dedent(
"""\
package main
import (
"fmt"
"foo.example.com/lib"
)
func main() {
fmt.Println(lib.Quote("Hello world!"))
fmt.Println(lib.GoProverb())
}
"""
),
"go.mod": dedent(
"""\
module foo.example.com
go 1.17
require (
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
rsc.io/quote v1.5.2
rsc.io/sampler v1.3.0 // indirect
)
"""
),
"go.sum": dedent(
"""\
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:qgOY6WgZOaTkIIMiVjBQcw93ERBE4m30iBm00nkL0i8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
"""
),
"BUILD": dedent(
"""\
go_mod(name='mod')
go_package(name='pkg')
go_binary(name='bin')
"""
),
}
)
binary_tgt = rule_runner.get_target(Address("", target_name="bin"))
built_package = build_package(rule_runner, binary_tgt)
assert len(built_package.artifacts) == 1
assert built_package.artifacts[0].relpath == "bin"
result = subprocess.run([os.path.join(rule_runner.build_root, "bin")], stdout=subprocess.PIPE)
assert result.returncode == 0
assert result.stdout == (
b">> Hello world! <<\n"
b"Don't communicate by sharing memory, share memory by communicating.\n"
)
|
import tkinter as tk
import data
data.saveFilesToList()
# creating the root or 'master/parent' window
root = tk.Tk()
app_name = root.title("TakeNote")
# width
window_width = 600
# height
window_height = 400
# windows x position
window_x_pos = int((root.winfo_screenwidth() / 2) - (window_width / 2))
# windows y position
window_y_pos = int((root.winfo_screenheight() / 2) - (window_height / 2))
# using the previous four variables to set the windows dimension and position
root.geometry(str(window_width) + 'x' + str(window_height) + '+' + str(window_x_pos) + '+' + str(window_y_pos))
main_frame = tk.Frame(root)
main_frame.pack()
file_tree_frame = tk.Frame(main_frame, bd=0, highlightthickness=0)
file_tree_frame.pack(side='left', anchor='nw')
file_tree_canvas = tk.Canvas(file_tree_frame, width=150, height=600,
highlightthickness=0, bg="#000", scrollregion=(0, 0, 500, 500))
file_tree_canvas.config(width=150, height=600)
horizontal_scrollbar = tk.Scrollbar(file_tree_frame, orient="horizontal")
horizontal_scrollbar.pack(side='bottom', fill='x')
horizontal_scrollbar.config(command=file_tree_canvas.xview)
vertical_scrollbar = tk.Scrollbar(file_tree_frame, orient='vertical')
vertical_scrollbar.pack(side='right', fill='y')
vertical_scrollbar.config(command=file_tree_canvas.yview)
file_tree_canvas.config(xscrollcommand=horizontal_scrollbar.set, yscrollcommand=vertical_scrollbar.set)
file_tree_canvas.pack(side='left', anchor='nw', fill='both', expand=False)
file_tree_button_frame = tk.Frame(file_tree_canvas, width=150, height=400)
file_tree_button_frame.pack(side='left', anchor='nw')
# the configure_interior and the configure_canvas are copied from
# stack overflow to help with the button display with scrolling
def configureInterior(event):
# update the scrollbars to match the size of the inner frame
size = (file_tree_button_frame.winfo_reqwidth(), file_tree_button_frame.winfo_reqheight())
file_tree_canvas.config(scrollregion="0 0 %s %s" % size)
if file_tree_button_frame.winfo_reqwidth() != file_tree_canvas.winfo_width():
# update the canvas's width to fit the inner frame
file_tree_canvas.config(width=file_tree_canvas.winfo_reqwidth())
file_tree_button_window = file_tree_canvas.create_window(0, 0, window=file_tree_button_frame, anchor='nw')
file_tree_button_frame.bind('<Configure>', configureInterior)
def configureCanvas(event):
if file_tree_button_frame.winfo_reqwidth() != file_tree_canvas.winfo_width():
# update the inner frame's width to fill the canvas
file_tree_canvas.itemconfigure(file_tree_button_window, width=file_tree_canvas.winfo_width())
file_tree_canvas.bind('<Configure>', configureCanvas)
file_tree_button_list = []
working_file_page_frame = tk.Frame(main_frame, bd=0, highlightthickness=0)
working_file_page_frame.pack(side='left', anchor='nw')
# Creating working file
working_file = tk.Text(working_file_page_frame, width=600, wrap='word')
working_file.pack(side='bottom', anchor='w', fill='y')
# setting a label to tell user what to enter in the text entry
working_file_name_label = tk.Label(working_file_page_frame, text='File Name:')
working_file_name_label.pack(side='left', anchor='w')
# Creating a string object which helps store the user input
file_name_given_by_user = tk.StringVar()
def saveFile():
"""
this function helps retrieve the user input using the the
string object we created in the previous line and then
outputs the data
:return:
"""
working_file_name_given = file_name_given_by_user.get()
working_file_data = working_file.get(1.0, 'end-1c')
print('File name: ' + working_file_name_given)
print('File data: ' + working_file_data)
print("Done")
data.updateDataBase(working_file_name_given, working_file_data)
# add new button if it is a new file
if data.permissionToCreateButton():
FileBtn(working_file_name_given)
def deleteFile():
working_file_name_given = file_name_given_by_user.get()
working_file_data = working_file.get(1.0, 'end-1c')
print('Deleting File name: ' + working_file_name_given)
print('Deleting File data: ' + working_file_data)
print("Done")
data.deleteFileFromDatabase(working_file_name_given, working_file_data)
for btn in file_tree_button_list:
if btn.f_name == working_file_name_given:
btn.btn.pack_forget()
file_tree_button_list.remove(btn)
# Creating an entry for the user to set the file's name
file_name = tk.Entry(working_file_page_frame, textvariable=file_name_given_by_user)
file_name.pack(side='left', anchor='w')
# Creating this button to save the file and file's name
save_file_btn = tk.Button(working_file_page_frame, text='Save', command=saveFile)
save_file_btn.pack(side='left', anchor='w')
# Creating this button to handle file deletion
delete_file_btn = tk.Button(working_file_page_frame, text='Delete', command=deleteFile)
delete_file_btn.pack(side='right', anchor='e')
class FileBtn:
def __init__(self, f_name):
self.f_name = f_name
self.btn = tk.Button(file_tree_button_frame, text=self.f_name)
self.btn.config(command=lambda: self.callback())
self.btn.pack()
def callback(self):
for file in data.all_files_and_titles:
if file[0] == self.f_name:
file_name.delete(0, 'end')
file_name.insert(0, file[0])
working_file.delete('1.0', 'end')
working_file.insert('1.0', file[1])
# Creating all the file tree buttons
for f in data.all_files_and_titles:
file_btn = FileBtn(f[0])
file_tree_button_list.append(file_btn)
# running the app
root.mainloop()
# close off the database
data.closeDataBase()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 00:16:07 2020
@author: shaun
"""
import numpy as np
from gaussxw import gaussxw
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
#grab key points and weights from legendre polymials
N=10
x,w=gaussxw(N)
#define the integrand with insput x,y and x0 and y0
#x0 and y0 indicate your location in the matrix
def integrand(x,y,x0,y0):
q=100
density=q*np.sin(2*np.pi*x/0.05)*np.sin(2*np.pi*y/0.05)
f=density/(((x-x0)**2+(y-y0)**2)**0.5)
return f
def function(x0,y0):
#sets the bound of charge distribution to be a 10 cm by 10 cm square in the center of the matrix
F=Gmulti(-0.05,0.05,-0.05,0.05,integrand,x0,y0)
return F
#take the triple integral
def Gmulti(a,b,c,d,f,x0,y0):
global N
global x
global w
#rescale x and weights to the domain
xp=0.5*(b-a)*x + 0.5*(b+a)
wpx=0.5*(b-a)*w
yp=0.5*(d-c)*x + 0.5*(d+c)
wpy=0.5*(d-c)*w
s=0
for ym in range(0,N):
#find the value of the function at every y and multiply it by the weights to get the sum
for xm in range(0,N):
#find the value of the function at every x and multiply it by the weights to get the sum
s+=wpy[ym]*wpx[xm]*f(xp[xm],yp[ym],x0,y0)
return s
#set size of matrix
size=100
#define the range and domain
X=np.linspace(-0.5,0.5,size)
Y=np.linspace(0.5,-0.5,size)
A=np.empty([size,size],float)
#specify the size of matrix grids
boxsize=abs(X[1]-X[0])
limit=0.05
#fill A with the potential given the location of the grid
for row in range(0,len(Y)):
print("percent done "+ str(float(row)/len(Y)))
for col in range(0,len(X)):
if(-limit<X[col]<limit and -limit<Y[row]<limit):
A[row][col]=0
else:
A[row][col]=function(X[col],Y[row])
#create heat map
fig = go.Figure(data=go.Heatmap(
z=A,
x=X,
y=Y,
zmin=-0.01,
zmax=0.01,
zauto=False,
hoverongaps = False)
)
fig.update_layout(
xaxis_title="X meters",
yaxis_title="Y meters",
title='Eletric Potential of continous charge distribution'
)
fig.show()
#find the gradient of A to get the eltric field plot using quiver
print(A)
A=np.log(A)
v, u = np.gradient(A, boxsize, boxsize)
figure1=plt.figure()
ax = figure1.add_subplot()
ax.set_xlabel("X meters")
ax.set_ylabel("Y meters")
q = ax.quiver(X, Y, u, v)
figure1.suptitle("Eletric Field of continous charge distributions")
plt.show()
|
#!/usr/bin/env python
"""
Proxy class for XML-PRC
"""
__author__ = "Mezhenin Artoym <mezhenin@cs.karelia.ru>"
__version__ = "$Revision: 0.1 $"
__date__ = "$Date: 2010/01/10 $"
__copyright__ = ""
__license__ = "GPLv2"
import xmlrpclib
import httplib
class ProxyedTransp (xmlrpclib.Transport):
"""
To access an XML-RPC server through a proxy, you need to define a
custom transport.
This is example from the official documentation.
http://docs.python.org/library/xmlrpclib.html
"""
def set_proxy(self, proxy):
"""
Actually I do not know what this funct. is for...
"""
self.proxy = proxy
def make_connection(self, host):
"""
... sorry guys ...
"""
self.realhost = host
return httplib.HTTP(self.proxy)
def send_request(self, connection, handler, request_body):
"""
... I thought that the xml-rps should have its
own class for the proxy ...
"""
connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
def send_host(self, connection, host):
"""
... but it is not :^(
"""
#connection.putheader('Host', self.realhost) - officail doc
connection.putheader(self.realhost)
|
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('autores/', views.AutorSearchFormListView.as_view(), name = 'autor-list'),
path('autores/novo/', views.AutorCreateView.as_view(), name = 'autor-create'),
path('autores/<int:pk>/', views.AutorUpdateView.as_view(), name = 'autor-update'),
path('autores/remover/<int:pk>/', views.AutorDeleteView.as_view(), name = 'autor-delete'),
path('livros/', views.LivroSearchFormListView.as_view(), name = 'livro-list'),
path('livros/novo/', views.LivroCreateView.as_view(), name = 'livro-create'),
path('livros/<int:pk>/', views.LivroUpdateView.as_view(), name = 'livro-update'),
path('livros/remover/<int:pk>/', views.LivroDeleteView.as_view(), name = 'livro-delete'),
path('livros.json', views.LivroJsonListView.as_view(), name = 'livro-json-list'),
path('autores.json', views.AutorJsonListView.as_view(), name = 'autor-json-list'),
path('autores/taken/', views.autor_nome_registrado, name = 'autor-taken'),
url(r'^login/$', auth_views.LoginView.as_view(), name = 'login'),
url(r'^logout/$', auth_views.LogoutView, name = 'logout'),
url(r'^admin/', admin.site.urls),
]
|
# from tensorflow.examples.tutorials.mnist import input_data
from data_helper import load_data
import numpy as np
def load():
mnist = input_data.read_data_sets('data/', one_hot=False)
return mnist.train.images, mnist.train.labels
# return np.ones([55000, 784]), np.ones([55000, ])
def EM(imgs, tags):
"""
imgs: [784, 55000]
tags: [1 , 55000]
"""
# Random guess
print(np.shape(imgs))
print(np.shape(tags))
dimension, img_num = np.shape(imgs)
lambda_arr = np.ones([10, 1]) / 10
prob_arr = np.ones([10, dimension]) / len(imgs)
responsibility = np.empty([10, img_num])
print(prob_arr[:, 400])
for epoch in range(10):
# E step
for i in range(10):
for j in range(img_num):
sub_prob = 1
_ = prob_arr[i, :] ** imgs[:, j] * (1 - prob_arr[i, :]) ** (1 - imgs[:, j])
for k in range(dimension):
sub_prob *= (lambda_arr[i] * _[k])
if k % 150 == 0:
sub_prob *= 1e+150
responsibility[i][j] = lambda_arr[i][0] * sub_prob
for i in range(img_num):
responsibility[:, i] /= np.sum(responsibility[:, i])
print(np.unique(responsibility))
# M step
for i in range(10):
lambda_arr[i][0] = np.sum(responsibility[i, :]) / img_num
for i in range(dimension):
for j in range(10):
print(np.sum(responsibility[j, :] * imgs[i, :])Q)
prob_arr[j][i] = np.sum(responsibility[j, :] * imgs[i, :]) / np.sum(responsibility[j, :])
print(prob_arr[:, 400])
# Evaluation
print(tags)
print(np.argmax(responsibility, axis=0))
if __name__ == '__main__':
(train_x, train_y), (test_x, test_y) = load_data()
# Binaryize
train_x = train_x[:500]
train_y = train_y[:500]
for i in range(np.shape(train_x)[0]):
for j in range(np.shape(train_x)[1]):
if train_x[i][j] > 127.5:
train_x[i][j] = 1.0
else:
train_x[i][j] = 0.0
# EM
EM(train_x.T / 255, train_y)
|
#!/usr/local/bin/python3.8
print ('pass condition')
# PASS keyword is a non-operational statement. It doesn't do anything but it allows us to define the else statement without writing anything
name = input('Enter your name: ')
if name == 'bill':
print ('Hello bill')
else:
pass # pass = do nothing
|
# coding: utf-8
"""Parser for KumaScript used in compatibility data.
KumaScript is a macro system used on MDN:
https://github.com/mozilla/kumascript
KumaScript uses a JS-like syntax. The source is stored as pages on MDN:
https://developer.mozilla.org/en-US/docs/Template:SpecName
KumaScript can query the database, do math, and generate text using all the
power of JavaScript. It's slow, so it is rendered server-side and cached.
The unrendered version of a page can be accessed by asking for the raw version:
https://developer.mozilla.org/en-US/docs/Web/CSS/display
https://developer.mozilla.org/en-US/docs/Web/CSS/display?raw
The MDN importer needs to recognize KumaScript templates in the raw page, and:
1. For valid KumaScript, extract data and/or render HTML
2. For invalid KumaScript, generate an error
3. For unknown KumaScript, generate a different error
The Compat API will not support KumaScript.
"""
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six import text_type
from django.utils.text import get_text_list
from parsimonious.grammar import Grammar
from parsimonious.nodes import Node
from .data import Data
from .html import HTMLInterval, HTMLText, HTMLVisitor, html_grammar_source
from .utils import format_version
kumascript_grammar_source = html_grammar_source + r"""
#
# KumaScript tokens
#
kumascript = ks_esc_start ks_name ks_arglist? ks_esc_end
ks_esc_start = "{{" _
ks_name = ~r"(?P<content>[^\(\}\s]*)\s*"s
ks_arglist = ks_func_start ks_arg ks_arg_rest* ks_func_end
ks_func_start = "(" _
ks_func_arg = _ "," _
ks_func_end = _ ")" _
ks_esc_end = "}}" _
ks_arg = (double_quoted_text / single_quoted_text / ks_bare_arg)
ks_bare_arg = ~r"(?P<content>.*?(?=[,)]))"
ks_arg_rest = ks_func_arg ks_arg
#
# WhyNoSpec block
whynospec = _ whynospec_start whynospec_content whynospec_end
whynospec_start = ks_esc_start ~r"WhyNoSpecStart"s _ ks_esc_end _
whynospec_content = ~r".*?(?={{\s*WhyNoSpecEnd)"s
whynospec_end = ks_esc_start ~r"WhyNoSpecEnd"s _ ks_esc_end _
#
# Add KumaScript to text
#
text_token = whynospec / kumascript / text_item
text_item = ~r"(?P<content>(?:[^{<]|{(?!{))+)"s
"""
kumascript_grammar = Grammar(kumascript_grammar_source)
SCOPES = set((
'specification name',
'specification maturity',
'specification description',
'compatibility feature',
'compatibility support',
'footnote',
))
MDN_DOMAIN = 'https://developer.mozilla.org'
MDN_DOCS = MDN_DOMAIN + '/en-US/docs'
@python_2_unicode_compatible
class KumaScript(HTMLText):
"""A KumaScript macro."""
def __init__(self, args=None, scope=None, **kwargs):
"""Initialize components of a KumaScript macro."""
super(KumaScript, self).__init__(**kwargs)
self.args = args or []
self.scope = scope or '(unknown scope)'
def arg(self, pos):
"""Return argument, or None if not enough arguments."""
try:
return self.args[pos]
except IndexError:
return None
def __str__(self):
"""Create the programmer debug string."""
args = []
for arg in self.args:
if '"' in arg:
quote = "'"
else:
quote = '"'
args.append('{0}{1}{0}'.format(quote, arg))
if args:
argtext = '(' + ', '.join(args) + ')'
else:
argtext = ''
name = getattr(self, 'name', 'KumaScript')
return '{{{{{}{}}}}}'.format(name, argtext)
def to_html(self):
"""Convert to HTML. Default is an empty string."""
return ''
def _make_issue(self, issue_slug, **extra_kwargs):
"""Create an importer issue with standard KumaScript parameters."""
assert self.scope
kwargs = {'name': self.name, 'args': self.args, 'scope': self.scope,
'kumascript': str(self)}
kwargs.update(extra_kwargs)
return (issue_slug, self.start, self.end, kwargs)
class UnknownKumaScript(KumaScript):
"""An unknown KumaScript macro."""
def __init__(self, name, **kwargs):
"""Initialize name of an unknown KumaScript macro."""
super(UnknownKumaScript, self).__init__(**kwargs)
self.name = name
@property
def known(self):
return False
@property
def issues(self):
"""Return the list of issues with this KumaScript in this scope."""
return super(UnknownKumaScript, self).issues + [
self._make_issue('unknown_kumascript')]
class KnownKumaScript(KumaScript):
"""Base class for known KumaScript macros."""
min_args = 0
max_args = 0
arg_names = []
expected_scopes = SCOPES
def __init__(self, args=None, scope=None, **kwargs):
"""Validate arg count of a known KumaScript macro."""
super(KnownKumaScript, self).__init__(**kwargs)
self.args = args or []
self.scope = scope or '(unknown scope)'
assert self.max_args >= self.min_args
assert len(self.arg_names) == self.max_args
@property
def known(self):
return True
@property
def name(self):
return getattr(self, 'canonical_name', self.__class__.__name__)
def _validate(self):
"""Return validation issues or empty list."""
issues = []
count = len(self.args)
if count < self.min_args or count > self.max_args:
extra = {
'max': self.max_args, 'min': self.min_args, 'count': count,
'arg_names': self.arg_names}
if self.max_args == 0:
arg_spec = 'no arguments'
else:
if self.max_args == self.min_args:
arg_range = 'exactly {0} argument{1}'.format(
self.max_args, '' if self.max_args == 1 else 's')
else:
arg_range = 'between {0} and {1} arguments'.format(
self.min_args, self.max_args)
names = []
for pos, name in enumerate(self.arg_names):
if pos > self.min_args:
names.append('[{}]'.format(name))
else:
names.append(name)
arg_spec = '{} ({})'.format(arg_range, ', '.join(names))
extra['arg_spec'] = arg_spec
if count == 1:
extra['arg_count'] = '1 argument'
else:
extra['arg_count'] = '{0} arguments'.format(count)
issues.append(self._make_issue('kumascript_wrong_args', **extra))
assert not (self.expected_scopes - SCOPES)
if self.scope not in self.expected_scopes:
expected = get_text_list(sorted(self.expected_scopes))
issues.append(self._make_issue(
'unexpected_kumascript', expected_scopes=expected))
return issues
@property
def issues(self):
return super(KumaScript, self).issues + self._validate()
class Bug(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:Bug
min_args = max_args = 1
arg_names = ['number']
canonical_name = 'bug'
expected_scopes = set(('footnote',))
def __init__(self, **kwargs):
"""
Initialize Bug.
{{bug}} macro takes 3 arguments, but only the 1-argument version is
supported.
"""
super(Bug, self).__init__(**kwargs)
self.number = self.arg(0)
def to_html(self):
return (
'<a href="https://bugzilla.mozilla.org/show_bug.cgi?id={number}">'
'bug {number}</a>').format(number=self.number)
class CompatKumaScript(KnownKumaScript):
"""Base class for KumaScript specifying a browser version."""
min_args = max_args = 1
expected_scopes = set(('compatibility support', ))
def to_html(self):
return self.version
class CompatBasicKumaScript(CompatKumaScript):
"""Base class for KumaScript specifying the actual browser version."""
def __init__(self, **kwargs):
super(CompatBasicKumaScript, self).__init__(**kwargs)
self.version = format_version(self.arg(0))
class CompatAndroid(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatAndroid
arg_names = ['AndroidVersion']
class CompatChrome(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatChrome
arg_names = ['ChromeVer']
class CompatGeckoDesktop(CompatKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoDesktop
arg_names = ['GeckoVersion']
geckoversion_to_version = {
'1': '1.0',
'1.0': '1.0',
'1.7 or earlier': '1.0',
'1.7': '1.0',
'1.8': '1.5',
'1.8.1': '2.0',
'1.9': '3.0',
'1.9.1': '3.5',
'1.9.1.4': '3.5.4',
'1.9.2': '3.6',
'1.9.2.4': '3.6.4',
'1.9.2.5': '3.6.5',
'1.9.2.9': '3.6.9',
'2': '4.0',
'2.0': '4.0',
}
def __init__(self, **kwargs):
super(CompatGeckoDesktop, self).__init__(**kwargs)
self.gecko_version = self.arg(0)
@property
def version(self):
try:
return self.geckoversion_to_version[self.gecko_version]
except KeyError:
try:
nversion = float(self.gecko_version)
except ValueError:
return None
if nversion >= 5:
return '{:1.1f}'.format(nversion)
else:
return None
@property
def issues(self):
issues = super(CompatGeckoDesktop, self).issues
if self.version is None:
issues.append(
('compatgeckodesktop_unknown', self.start, self.end,
{'version': self.gecko_version}))
return issues
class CompatGeckoFxOS(CompatKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoFxOS
max_args = 2
arg_names = ['GeckoVersion', 'VersionOverride']
def __init__(self, **kwargs):
super(CompatGeckoFxOS, self).__init__(**kwargs)
self.gecko_version = self.arg(0)
over = self.arg(1)
self.override = self.arg(1)
# TODO: Replace with KumaScript logic
try:
nversion = float(self.gecko_version)
except ValueError:
nversion = -1
over = self.override
self.bad_version = False
self.bad_override = False
if (0 <= nversion < 19) and over in (None, '1.0'):
self.version = '1.0'
elif (0 <= nversion < 21) and over == '1.0.1':
self.version = '1.0.1'
elif (0 <= nversion < 24) and over in ('1.1', '1.1.0', '1.1.1'):
self.version = '1.1'
elif (19 <= nversion < 27) and over in (None, '1.2'):
self.version = '1.2'
elif (27 <= nversion < 29) and over in (None, '1.3'):
self.version = '1.3'
elif (29 <= nversion < 31) and over in (None, '1.4'):
self.version = '1.4'
elif (31 <= nversion < 33) and over in (None, '2.0'):
self.version = '2.0'
elif (33 <= nversion < 35) and over in (None, '2.1'):
self.version = '2.1'
elif (35 <= nversion < 38) and over in (None, '2.2'):
self.version = '2.2'
elif (nversion < 0 or nversion >= 38):
self.version = over
self.bad_version = True
else:
self.version = over
self.bad_override = True
self.version = over
@property
def issues(self):
issues = super(CompatGeckoFxOS, self).issues
if self.bad_version:
issues.append(
('compatgeckofxos_unknown', self.start, self.end,
{'version': self.gecko_version}))
if self.bad_override:
issues.append(
('compatgeckofxos_override', self.start, self.end,
{'override': self.override, 'version': self.gecko_version}))
return issues
class CompatGeckoMobile(CompatKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoMobile
arg_names = ['GeckoVersion']
def __init__(self, **kwargs):
super(CompatGeckoMobile, self).__init__(**kwargs)
self.gecko_version = self.arg(0)
@property
def version(self):
nversion = self.gecko_version.split('.', 1)[0]
if nversion == '2':
return '4.0'
else:
return '{}.0'.format(nversion)
class CompatIE(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatIE
arg_names = ['IEver']
class CompatNightly(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatNightly
max_args = 1
arg_names = ['browser']
expected_scopes = set(('compatibility support',))
class CompatNo(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatNo
expected_scopes = set(('compatibility support',))
class CompatOpera(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatOpera
arg_names = ['OperaVer']
class CompatOperaMobile(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatOperaMobile
arg_names = ['OperaVer']
class CompatSafari(CompatBasicKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatSafari
arg_names = ['SafariVer']
class CompatUnknown(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatUnknown
expected_scopes = set(('compatibility support',))
class CompatVersionUnknown(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatVersionUnknown
expected_scopes = set(('compatibility support',))
class CompatibilityTable(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:CompatibilityTable
expected_scopes = set()
class KumaHTMLElement(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:HTMLElement
min_args = max_args = 1
arg_names = ['ElementName']
canonical_name = 'HTMLElement'
expected_scopes = set((
'compatibility feature', 'compatibility support', 'footnote',
'specification description'))
def __init__(self, **kwargs):
super(KumaHTMLElement, self).__init__(**kwargs)
self.element_name = self.arg(0)
def to_html(self):
if ' ' in self.element_name:
fmt = '<code>{}</code>'
else:
fmt = '<code><{}></code>'
return fmt.format(self.element_name)
class SpecKumaScript(KnownKumaScript):
"""Base class for Spec2 and SpecName."""
def __init__(self, data=None, **kwargs):
super(SpecKumaScript, self).__init__(**kwargs)
self.mdn_key = self.arg(0)
self.spec = None
self.data = data or Data()
if self.mdn_key:
self.spec = self.data.lookup_specification(self.mdn_key)
def to_html(self):
if self.spec:
name = self.spec.name['en']
else:
name = self.mdn_key or '(None)'
return 'specification {}'.format(name)
class Spec2(SpecKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:Spec2
min_args = max_args = 1
arg_names = ['SpecKey']
expected_scopes = set(('specification maturity',))
def _validate(self):
issues = super(Spec2, self)._validate()
if self.mdn_key and not self.spec:
issues.append(
('unknown_spec', self.start, self.end, {'key': self.mdn_key}))
return issues
class SpecName(SpecKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:SpecName
min_args = 1
max_args = 3
arg_names = ['SpecKey', 'Anchor', 'AnchorName']
expected_scopes = set(('specification name', 'specification description'))
def __init__(self, **kwargs):
super(SpecName, self).__init__(**kwargs)
self.subpath = self.arg(1)
self.section_name = self.arg(2)
if self.spec:
self.section_id = self.data.lookup_section_id(
self.spec.id, self.subpath)
else:
self.section_id = None
def _validate(self):
issues = super(SpecName, self)._validate()
if self.mdn_key and not self.spec:
issues.append(
('unknown_spec', self.start, self.end, {'key': self.mdn_key}))
if not self.mdn_key and len(self.args):
issues.append(self._make_issue('specname_blank_key'))
return issues
class CSSBox(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:cssbox
min_args = max_args = 1
arg_names = ['PropertyName']
canonical_name = 'cssbox'
expected_scopes = set()
class XRefBase(KnownKumaScript):
"""Base class for cross-reference KumaScript."""
expected_scopes = set((
'compatibility feature', 'specification description', 'footnote'))
def __init__(self, **kwargs):
super(XRefBase, self).__init__(**kwargs)
self.url = None
self.display = None
self.linked = self.scope in ('specification description', 'footnote')
def to_html(self):
"""Convert macro to link or plain text."""
assert self.display
if self.linked:
assert self.url
return '<a href="{}"><code>{}</code></a>'.format(
self.url, self.display)
else:
return '<code>{}</code>'.format(self.display)
class CSSxRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:cssxref
min_args = 1
max_args = 3
arg_names = ['APIName', 'DisplayName', 'Anchor']
canonical_name = 'cssxref'
def __init__(self, **kwargs):
super(CSSxRef, self).__init__(**kwargs)
self.api_name = self.arg(0)
self.display_name = self.arg(1)
self.anchor = self.arg(2)
self.construct_crossref(
self.api_name, self.display_name, self.anchor)
def construct_crossref(self, api_name, display_name, anchor=None):
self.url = '{}/Web/CSS/{}{}'.format(
MDN_DOCS, api_name, anchor or '')
self.display = display_name or api_name
class DeprecatedInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:deprecated_inline
canonical_name = 'deprecated_inline'
expected_scopes = set(('compatibility feature',))
class DOMEventXRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:domeventxref
min_args = max_args = 1
arg_names = ['api_name']
canonical_name = 'domeventxref'
def __init__(self, **kwargs):
"""Initialize DOMEventXRef.
Only implements the subset of domeventxref used on current pages.
"""
super(DOMEventXRef, self).__init__(**kwargs)
self.api_name = self.arg(0)
assert '()' not in self.api_name
self.url = '{}/DOM/DOM_event_reference/{}'.format(
MDN_DOCS, self.api_name)
self.display = self.api_name
class DOMException(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:exception
min_args = max_args = 1
arg_names = ['exception_id']
canonical_name = 'exception'
def __init__(self, **kwargs):
super(DOMException, self).__init__(**kwargs)
self.exception_id = self.arg(0)
self.url = '{}/Web/API/DOMException#{}'.format(
MDN_DOCS, self.exception_id)
self.display = self.exception_id
class DOMxRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:domxref
min_args = 1
max_args = 2
arg_names = ['DOMPath', 'DOMText']
canonical_name = 'domxref'
def __init__(self, **kwargs):
super(DOMxRef, self).__init__(**kwargs)
self.dom_path = self.arg(0)
self.dom_text = self.arg(1)
path = self.dom_path.replace(' ', '_').replace('()', '')
if '.' in path and '..' not in path:
path = path.replace('.', '/')
path = path[0].upper() + path[1:]
self.url = '{}/Web/API/{}'.format(MDN_DOCS, path)
self.display = self.dom_text or self.dom_path
class EmbedCompatTable(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:EmbedCompatTable
min_args = max_args = 1
arg_names = ['slug']
expected_scopes = set(('footnote',))
class Event(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:event
min_args = 1
max_args = 2
arg_names = ['api_name', 'display_name']
canonical_name = 'event'
def __init__(self, **kwargs):
super(Event, self).__init__(**kwargs)
self.api_name = self.arg(0)
self.display_name = self.arg(1)
self.url = '{}/Web/Events/{}'.format(MDN_DOCS, self.api_name)
self.display = self.display_name or self.api_name
class ExperimentalInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:experimental_inline
canonical_name = 'experimental_inline'
expected_scopes = set(('compatibility feature',))
class GeckoRelease(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:geckoRelease
min_args = max_args = 1
arg_names = ['release']
canonical_name = 'geckoRelease'
expected_scopes = set(('footnote',))
early_versions = {
'1.8': ('Firefox 1.5', 'Thunderbird 1.5', 'SeaMonkey 1.0'),
'1.8.1': ('Firefox 2', 'Thunderbird 2', 'SeaMonkey 1.1'),
'1.9': ('Firefox 3',),
'1.9.1': ('Firefox 3.5', 'Thunderbird 3.0', 'SeaMonkey 2.0'),
'1.9.1.4': ('Firefox 3.5.4',),
'1.9.2': ('Firefox 3.6', 'Thunderbird 3.1', 'Fennec 1.0'),
'1.9.2.4': ('Firefox 3.6.4',),
'1.9.2.5': ('Firefox 3.6.5',),
'1.9.2.9': ('Firefox 3.6.9',),
'2.0b2': ('Firefox 4.0b2',),
'2.0b4': ('Firefox 4.0b4',),
'2': ('Firefox 4', 'Thunderbird 3.3', 'SeaMonkey 2.1'),
'2.0': ('Firefox 4', 'Thunderbird 3.3', 'SeaMonkey 2.1'),
'2.1': ('Firefox 4 Mobile',),
}
firefoxos_name = 'Firefox OS {}'
firefoxos_versions = {
'18.0': ('1.0.1', '1.1'),
'26.0': ('1.2',),
'28.0': ('1.3',),
'30.0': ('1.4',),
'32.0': ('2.0',),
}
release_names = (
'Firefox {rnum}', 'Thunderbird {rnum}', 'SeaMonkey 2.{snum}')
def __init__(self, **kwargs):
super(GeckoRelease, self).__init__(**kwargs)
raw_version = self.arg(0)
self.gecko_version = raw_version
self.and_higher = False
if raw_version.endswith('+'):
self.gecko_version = raw_version[:-1]
self.and_higher = True
if self.gecko_version in self.early_versions:
self.releases = self.early_versions[self.gecko_version]
else:
vnum = float(self.gecko_version)
assert vnum >= 5.0
rnum = '{:.1f}'.format(vnum)
snum = int(vnum) - 3
self.releases = [
name.format(rnum=rnum, snum=snum)
for name in self.release_names]
for fxosnum in self.firefoxos_versions.get(rnum, []):
self.releases.append(self.firefoxos_name.format(fxosnum))
def to_html(self):
plus = '+' if self.and_higher else ''
return '(' + ' / '.join([rel + plus for rel in self.releases]) + ')'
class HTMLAttrXRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:htmlattrxref
min_args = 1
max_args = 2
arg_names = ['attribute', 'element']
canonical_name = 'htmlattrxref'
def __init__(self, **kwargs):
super(HTMLAttrXRef, self).__init__(**kwargs)
self.attribute = self.arg(0)
self.element = self.arg(1)
self.text = self.arg(2)
if self.element:
self.url = '{}/Web/HTML/Element/{}'.format(MDN_DOCS, self.element)
else:
self.url = '{}/Web/HTML/Global_attributes'.format(MDN_DOCS)
self.url += '#attr-' + self.attribute.lower()
self.display = self.attribute.lower()
class JSxRef(XRefBase):
# https://developer.mozilla.org/en-US/docs/Template:jsxref
min_args = 1
max_args = 2
arg_names = ['API name', 'display name']
canonical_name = 'jsxref'
def __init__(self, **kwargs):
"""
Initialize JSxRef.
{{jsxref}} macro can take 4 arguments, but only handling first two.
"""
super(JSxRef, self).__init__(**kwargs)
self.api_name = self.arg(0)
self.display_name = self.arg(1)
path_name = self.api_name.replace('.prototype.', '/').replace('()', '')
if path_name.startswith('Global_Objects/'):
path_name = path_name.replace('Global_Objects/', '', 1)
if '.' in path_name and '...' not in path_name:
path_name = path_name.replace('.', '/')
self.url = '{}/Web/JavaScript/Reference/Global_Objects/{}'.format(
MDN_DOCS, path_name)
self.display = self.display_name or self.api_name
class NonStandardInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:non-standard_inline
canonical_name = 'non-standard_inline'
expected_scopes = set(('compatibility feature',))
class NotStandardInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:not_standard_inline
canonical_name = 'not_standard_inline'
expected_scopes = set(('compatibility feature',))
class ObsoleteInline(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:obsolete_inline
canonical_name = 'obsolete_inline'
expected_scopes = set(('compatibility feature',))
class PropertyPrefix(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:property_prefix
min_args = max_args = 1
arg_names = ['Prefix']
canonical_name = 'property_prefix'
expected_scopes = set(('compatibility support',))
def __init__(self, **kwargs):
super(PropertyPrefix, self).__init__(**kwargs)
self.prefix = self.arg(0)
class WebkitBug(KnownKumaScript):
# https://developer.mozilla.org/en-US/docs/Template:WebkitBug
min_args = max_args = 1
arg_names = ['number']
expected_scopes = set(('footnote',))
def __init__(self, **kwargs):
super(WebkitBug, self).__init__(**kwargs)
self.number = self.arg(0)
def to_html(self):
return (
'<a href="https://bugs.webkit.org/show_bug.cgi?id={number}">'
'WebKit bug {number}</a>').format(number=self.number)
class WhyNoSpecBlock(HTMLInterval):
"""Psuedo-element for {{WhyNoSpecStart}}/{{WhyNoSpecEnd}} block.
Stand-alone {{WhyNoSpecStart}} and {{WhyNoSpecEnd}} elements will be
treated as unknown kumascript.
https://developer.mozilla.org/en-US/docs/Template:WhyNoSpecStart
https://developer.mozilla.org/en-US/docs/Template:WhyNoSpecEnd
"""
expected_scopes = set()
def __init__(self, scope=None, **kwargs):
super(WhyNoSpecBlock, self).__init__(**kwargs)
self.scope = scope
def to_html(self, drop_tag=None):
return ''
class XrefCSSBase(CSSxRef):
"""Base class for xref_cssXXX macros."""
min_args = max_args = 0
arg_names = []
def __init__(self, **kwargs):
super(XrefCSSBase, self).__init__(**kwargs)
self.construct_crossref(*self.xref_args)
class XrefCSSAngle(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssangle
canonical_name = 'xref_cssangle'
xref_args = ('angle', '<angle>')
class XrefCSSColorValue(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_csscolorvalue
canonical_name = 'xref_csscolorvalue'
xref_args = ('color_value', '<color>')
class XrefCSSGradient(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssgradient
canonical_name = 'xref_cssgradient'
xref_args = ('gradient', '<gradient>')
class XrefCSSImage(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssimage
canonical_name = 'xref_cssimage'
xref_args = ('image', '<image>')
class XrefCSSInteger(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssinteger
canonical_name = 'xref_cssinteger'
xref_args = ('integer', '<integer>')
class XrefCSSLength(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_csslength
canonical_name = 'xref_csslength'
xref_args = ('length', '<length>')
class XrefCSSNumber(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssnumber
canonical_name = 'xref_cssnumber'
xref_args = ('number', '<number>')
class XrefCSSPercentage(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_csspercentage
canonical_name = 'xref_csspercentage'
xref_args = ('percentage', '<percentage>')
class XrefCSSString(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssstring
canonical_name = 'xref_cssstring'
xref_args = ('string', '<string>')
class XrefCSSVisual(XrefCSSBase):
# https://developer.mozilla.org/en-US/docs/Template:xref_cssvisual
canonical_name = 'xref_cssvisual'
xref_args = ('Media/Visual', '<visual>')
class BaseKumaVisitor(HTMLVisitor):
"""Extract HTML structure from a MDN Kuma raw fragment.
Extracts KumaScript, with special handling if it is known.
"""
scope = None
def __init__(self, **kwargs):
super(BaseKumaVisitor, self).__init__(**kwargs)
self._kumascript_proper_names = None
def _visit_multi_block(self, node, children):
"""Visit a 1-or-more block of tokens."""
assert children
tokens = self.flatten(children)
assert tokens
for token in tokens:
assert isinstance(token, HTMLInterval)
return tokens
def flatten(self, nested_list):
result = []
for item in nested_list:
if isinstance(item, list):
result.extend(self.flatten(item))
else:
result.append(item)
return result
def _visit_multi_token(self, node, children):
"""Visit a single HTMLInterval or list of HTMLIntervals."""
assert len(children) == 1
item = children[0]
if isinstance(item, HTMLInterval):
return item
else:
for subitem in item:
assert isinstance(subitem, HTMLInterval), subitem
if len(item) == 1:
return item[0]
else:
return item
visit_html_block = _visit_multi_block
visit_html_element = _visit_multi_token
visit_text_block = _visit_multi_block
visit_text_token = _visit_multi_token
known_kumascript = {
'Bug': Bug,
'CompatAndroid': CompatAndroid,
'CompatChrome': CompatChrome,
'CompatGeckoDesktop': CompatGeckoDesktop,
'CompatGeckoFxOS': CompatGeckoFxOS,
'CompatGeckoMobile': CompatGeckoMobile,
'CompatIE': CompatIE,
'CompatNightly': CompatNightly,
'CompatNo': CompatNo,
'CompatOpera': CompatOpera,
'CompatOperaMobile': CompatOperaMobile,
'CompatSafari': CompatSafari,
'CompatUnknown': CompatUnknown,
'CompatVersionUnknown': CompatVersionUnknown,
'CompatibilityTable': CompatibilityTable,
'EmbedCompatTable': EmbedCompatTable,
'HTMLElement': KumaHTMLElement,
'Spec2': Spec2,
'SpecName': SpecName,
'WebkitBug': WebkitBug,
'cssbox': CSSBox,
'cssxref': CSSxRef,
'deprecated_inline': DeprecatedInline,
'domeventxref': DOMEventXRef,
'domxref': DOMxRef,
'event': Event,
'exception': DOMException,
'experimental_inline': ExperimentalInline,
'geckoRelease': GeckoRelease,
'htmlattrxref': HTMLAttrXRef,
'jsxref': JSxRef,
'non-standard_inline': NonStandardInline,
'not_standard_inline': NotStandardInline,
'obsolete_inline': ObsoleteInline,
'property_prefix': PropertyPrefix,
'xref_cssangle': XrefCSSAngle,
'xref_csscolorvalue': XrefCSSColorValue,
'xref_cssgradient': XrefCSSGradient,
'xref_cssimage': XrefCSSImage,
'xref_cssinteger': XrefCSSInteger,
'xref_csslength': XrefCSSLength,
'xref_cssnumber': XrefCSSNumber,
'xref_csspercentage': XrefCSSPercentage,
'xref_cssstring': XrefCSSString,
'xref_cssvisual': XrefCSSVisual,
}
def _kumascript_lookup(self, name):
"""
Get the proper name and class for a KumaScript name.
MDN does case-insensitive matching of KumaScript names.
"""
if self._kumascript_proper_names is None:
self._kumascript_proper_names = {}
for k in self.known_kumascript.keys():
self._kumascript_proper_names[k.lower()] = k
proper_name = self._kumascript_proper_names.get(name.lower())
return self.known_kumascript.get(proper_name)
def visit_kumascript(self, node, children):
"""Process a KumaScript macro."""
esc0, name, arglist, esc1 = children
assert isinstance(name, text_type), type(name)
if isinstance(arglist, Node):
assert arglist.start == arglist.end
args = []
else:
assert isinstance(arglist, list), type(arglist)
assert len(arglist) == 1
args = arglist[0]
assert isinstance(args, list), type(args)
if args == ['']:
args = []
ks_cls = self._kumascript_lookup(name)
init_args = {'args': args, 'scope': self.scope}
if ks_cls is None:
ks_cls = UnknownKumaScript
init_args['name'] = name
if issubclass(ks_cls, SpecKumaScript):
init_args['data'] = self.data
return self.process(ks_cls, node, **init_args)
visit_ks_name = HTMLVisitor._visit_content
def visit_ks_arglist(self, node, children):
f0, arg0, argrest, f1 = children
args = [arg0]
if isinstance(argrest, Node):
# No additional args
assert argrest.start == argrest.end
else:
for _, arg in argrest:
args.append(arg)
# Force to strings
arglist = []
for arg in args:
if arg is None:
arglist.append('')
else:
arglist.append(text_type(arg))
return arglist
def visit_ks_arg(self, node, children):
assert isinstance(children, list)
assert len(children) == 1
item = children[0]
assert isinstance(item, text_type)
return item or None
visit_ks_bare_arg = HTMLVisitor._visit_content
def visit_whynospec(self, node, children):
return self.process(WhyNoSpecBlock, node, scope=self.scope)
class KumaVisitor(BaseKumaVisitor):
"""Extract HTML structure from a MDN Kuma raw fragment.
Include extra policy for scraping pages for the importer:
- Converts <span>content</span> to "content", with issues
- Validate and cleanup <a> tags
- Keeps <div id="foo">, for detecting compat divs
- Keeps <td colspan=# rowspan=#>, for detecting spanning compat cells
- Keeps <th colspan=#>, for detecting spanning compat headers
- Keeps <h2 id="id" name="name">, for warning on mismatch
- Raises issues on all other attributes
"""
_default_attribute_actions = {None: 'ban'}
def visit_a_open(self, node, children):
"""Validate and cleanup <a> open tags."""
actions = self._default_attribute_actions.copy()
actions['href'] = 'must'
actions['title'] = 'drop'
actions['class'] = 'keep'
converted = self._visit_open(node, children, actions)
# Convert relative links to absolute links
attrs = converted.attributes.attrs
if 'href' in attrs:
href = attrs['href'].value
if href and href[0] == '/':
attrs['href'].value = MDN_DOMAIN + href
# Drop class attribute, warning if unexpected
if 'class' in attrs:
class_attr = attrs.pop('class')
for value in class_attr.value.split():
if value in ('external', 'external-icon'):
pass
else:
self.add_issue(
'unexpected_attribute', class_attr, node_type='a',
ident='class', value=value,
expected='the attribute href')
return converted
def visit_div_open(self, node, children):
"""Retain id attribute of <div> tags."""
actions = self._default_attribute_actions.copy()
actions['id'] = 'keep'
return self._visit_open(node, children, actions)
def visit_td_open(self, node, children):
"""Retain colspan and rowspan attributes of <td> tags."""
actions = self._default_attribute_actions.copy()
actions['colspan'] = 'keep'
actions['rowspan'] = 'keep'
return self._visit_open(node, children, actions)
def visit_th_open(self, node, children):
"""Retain colspan attribute of <th> tags."""
actions = self._default_attribute_actions.copy()
actions['colspan'] = 'keep'
return self._visit_open(node, children, actions)
def _visit_hn_open(self, node, children, actions=None, **kwargs):
"""Retain id and name attributes of <h#> tags."""
actions = self._default_attribute_actions.copy()
actions['id'] = 'keep'
actions['name'] = 'keep'
return self._visit_open(node, children, actions, **kwargs)
visit_h1_open = _visit_hn_open
visit_h2_open = _visit_hn_open
visit_h3_open = _visit_hn_open
visit_h4_open = _visit_hn_open
visit_h5_open = _visit_hn_open
visit_h6_open = _visit_hn_open
|
def distance_hamming(mot1,mot2):
distance = 0
L=len(mot1)
L2=len(mot2)
if L == L2:
for i in range(L):
if mot1[i] != mot2[i]:
distance += 1
return distance
else:
print('Attention : Les 2 mots doivent êtres de la même longueur !!')
A = input('Entrez un premier mot : ')
B = input('Entrez un deuxième mot qui comporte le même nombre de lettre : ')
X = distance_hamming(A,B)
print('La distance de Hamming entre', A, 'et', B, '=', X)
|
__author__ = 'bartek'
import ios
import myIos
import matplotlib.pyplot as plt
data = []
xpoints = []
ypoints = []
def save(Filename = 'out.csv' ):
global data
myIos.write_data(data, Filename)
return True
def plotData(data):
for value in data:
xpoints.append(value[0])
ypoints.append(value[1])
plt.plot(xpoints,ypoints,'ro')
plt.show()
def generate():
global data
data = myIos.genRandomGaussianGroups(500,5)
plotData(data)
def plotSaved():
data = myIos.read_data('out.csv')
plotData(data)
def joe():
print 'bwanakubwa'
def menu(argument):
if argument =='s':
save()
elif argument =='n':
generate()
elif argument == 'p':
plotSaved()
elif argument == 'k':
joe()
elif argument =='e':
pass
# data = ios.read_data('out.csv')
k = "n"
while k!= 'e':
k = raw_input('Save data (\'s\'), generate new set(\'n\'), plot saved data(\'p\'), exit(\'e\')?')
menu(k)
|
"""
WEB FRAME 配置文件
"""
# frame ip ='0.0.0.0'
frame_ip = '0.0.0.0'
frame_port = 8080
DEBUG = True
|
import soundLayer
soundLayer= soundLayer.SoundLayer()
globalStream=None
def loop():
while soundLayer.should_play():
pass
#This function schedules a sound.
#@path The path to the sound file to play
#@startOffset Time to wait before playing the sound
#@tag A textual tag to attach to the sound
def scheduleSound(path, startOffset=0, tag=""):
soundLayer.load_sound(path)
soundLayer.play_sound(path)
#This function loops a sound.
#@path The path to the sound file to play
#@startOffset Time to wait before playing the sound
#@loop The number of loops to play the sound for (this can be overridden if the duration has been reached)
#@duration How long the sound should be looped for (this can be overridden if the number of loops is reached before)
def loopSound(path, startOffset=0, loop=1, duration=1000, tag=""):
pass
#This function fades the volume of all the given tags to 0 over the given period of time
#@duration How long it should take to fade the volumes to 0
def volumeFadeTags(tag, duration=1000):
pass
#This function plays a chord progression.
#@startChord The chord to start the progression with
#@endChord The chord to end the progression with
#@interval How long each chord should be played for
def playProgression(startChord, endChord, interval=500):
pass
def playChord(chord, duration=500):
pass
def callFunction(functionName, args=[], duration=400, startOffset=0):
pass
###
def lowPassFilter():
pass
|
from math import floor
from copy import copy
def max_heapify(nlist, index):
# LEFT = parent * 2 + 1
# RIGHT = parent * 2
# BUT we are using zero index
left = (index+1) * 2 - 1
right = (index+1) * 2
largest = index
if left < len(nlist) and nlist[left] > nlist[largest]:
largest = left
if right < len(nlist) and nlist[right] > nlist[largest]:
largest = right
if largest != index:
nlist[largest], nlist[index] = nlist[index], nlist[largest]
max_heapify(nlist, largest)
# heap property is that parent must always be larger than children
# Always remember that heap arrays are always 1 indexed
def build_max_heap(nlist):
# Because 1 element nodes are guaranteed to satisfy the heap
# property we can simply start from size/2 - 1 (ie the first left node)
for i in reversed(range(floor(len(nlist)/2))):
max_heapify(nlist, i)
# Not the most efficient way to perform a merge sort
# because we are creating so many arrays per iteration
# Ideally you would pass boundary markers start and end
# and perform a heap sort within the specified region alone
def heapsort(nlist):
result = []
current = copy(nlist)
for i in range(len(nlist)):
build_max_heap(current)
result.append(current[0])
current = current[1:]
return result
if __name__ == '__main__':
import random
n = 10
a = random.sample(range(n), n)
a = heapsort(a)
print(a)
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
class School(object):
def __init__(self,name,addr):
self.name = name
self.addr = addr
self.students = []
self.grades = []
self.staffs = []
self.courses = []
def create_course(course_kind):
self.courses.append(course_kind)
class Course(object):
def __init__(self,type,price,time,addr):
self.type = type
self.price = price
self.time = time
self.addr = addr
class Schoolmember(object):
pass
class Teacher(Schoolmember):
pass
class Student(Schoolmember):
pass
|
"""Setup script."""
import re
from os import path
from io import open
from setuptools import setup, find_packages
__encode__ = 'utf8'
DISTNAME = 'pyss3'
DESCRIPTION = ("Python package that implements the SS3 text classifier (with "
"visualizations tools for XAI)")
AUTHOR = 'Sergio Burdisso'
AUTHOR_EMAIL = 'sergio.burdisso@gmail.com, sburdisso@unsl.edu.ar'
URL = "https://github.com/sergioburdisso/pyss3"
LICENSE = "MIT License"
CLASSIFIERS = ['Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Operating System :: OS Independent']
__cwd__ = path.abspath(path.dirname(__file__))
__readme_file__ = path.join(__cwd__, 'README.md')
with open(__readme_file__, encoding=__encode__) as readme:
LONG_DESCRIPTION = readme.read()
_version_re__ = r"__version__\s*=\s*['\"]([^'\"]+)['\"]"
__init_file__ = path.join(__cwd__, '%s/__init__.py' % DISTNAME)
with open(__init_file__, encoding=__encode__) as __init__py:
VERSION = re.search(_version_re__, __init__py.read()).group(1)
if __name__ == "__main__":
setup(name=DISTNAME,
version=VERSION,
maintainer=AUTHOR,
maintainer_email=AUTHOR_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
packages=find_packages(),
package_data={DISTNAME: ['resources/**/*', 'resources/**/**/*']},
include_package_data=True,
classifiers=CLASSIFIERS,
python_requires='>=2.7',
install_requires=['six',
'cython',
'scikit-learn[alldeps]>=0.20',
'tqdm>=4.8.4',
'matplotlib',
'iterative-stratification'],
tests_require=['pytest',
'pytest-mock'
'pytest-cov>=2.5'
'codecov',
'flake8',
'six',
'cython',
'scikit-learn[alldeps]>=0.20',
'tqdm>=4.8.4',
'matplotlib',
'iterative-stratification'],
extras_require={
':python_version >= "3"': ['stylecloud'],
},
entry_points={'console_scripts': ['pyss3=pyss3.cmd_line:main']})
|
#Creating Training Sample
f = open('ts.txt', 'w')
for i in range(1,1001):
s = './datasetnb/drone (' + str(i) + ').jpg-drone' + '\n'
f.write(s)
for i in range(1,1001):
s = './datasetnb/fighterjet (' + str(i) + ').jpg-fighterjet' + '\n'
f.write(s)
for i in range(1,1001):
s = './datasetnb/helicopter (' + str(i) + ').jpg-helicopter' + '\n'
f.write(s)
for i in range(1,1001):
s = './datasetnb/missile (' + str(i) + ').jpg-missile' + '\n'
f.write(s)
for i in range(1,1001):
s = './datasetnb/passengerplane (' + str(i) + ').jpg-passengerplane' + '\n'
f.write(s)
for i in range(1,1001):
s = './datasetnb/rocket (' + str(i) + ').jpg-rocket' + '\n'
f.write(s)
f.close()
#Creating Testing Sample
f2 = open('tss.txt','w')
for i in range(1,401):
s = './datasetnb/ddd (' + str(i) + ').jpg' + '\n'
f2.write(s)
for i in range(1,401):
s = './datasetnb/fff (' + str(i) + ').jpg' + '\n'
f2.write(s)
for i in range(1,401):
s = './datasetnb/hhh (' + str(i) + ').jpg' + '\n'
f2.write(s)
for i in range(1,401):
s = './datasetnb/mmm (' + str(i) + ').jpg' + '\n'
f2.write(s)
for i in range(1,401):
s = './datasetnb/ppp (' + str(i) + ').jpg' + '\n'
f2.write(s)
for i in range(1,401):
s = './datasetnb/rrr (' + str(i) + ').jpg' + '\n'
f2.write(s)
f2.close()
|
from abc import ABC
from colorama import Fore, Style
class AbstractConsole(ABC):
@staticmethod
def color(color: str, text: str) -> str:
return color + text + Style.RESET_ALL
@staticmethod
def alert(text: str) -> str:
return AbstractConsole.color(Fore.RED, text)
@staticmethod
def warning(text: str) -> str:
return AbstractConsole.color(Fore.YELLOW, text)
@staticmethod
def success(text: str) -> str:
return AbstractConsole.color(Fore.GREEN, text)
|
from deck import Deck
from players.human_cli_player import Player
from functools import reduce
import random
from pprint import pprint
from copy import deepcopy
import colorama
from colorama import Fore, Back, Style
class Log:
def __init__(self):
self.__states = []
def log_state(self, state):
self.__states.append(deepcopy(state))
return len(self.__states)
def get_length(self):
return len(self.__states)
def get_state(self, n):
return deepcopy(self.__states[n].copy())
class Hanabi:
players = []
deck = None
discard_pile = []
clock = 8
fuse = 3
fireworks = {'green': 0,
'white': 0,
'red': 0,
'yellow': 0,
'blue': 0}
last_round_counter = 0
def __init__(self, n_players):
self.turn_number = 0
self.discard_pile = []
self.clock = 8
self.fuse = 3
self.last_round_counter = 0
self.last_round = False
self.hints = {}
self.available_hints = []
self.last_action_taken = None
self.deck = Deck()
self.deck.shuffle()
self.n_players = n_players
self.player_hands = {}
for i in range(n_players):
self.players.append(Player(i))
self.hints[i] = []
if n_players <= 3:
n_cards = 5
else:
n_cards = 4
for p in self.players:
#p.hand = [None]*n_cards
self.player_hands[p.player_number] = [None]*n_cards
for card_num in range(n_cards):
#self.draw_card(p, card_num)
#p.draw(self.deck)
self.draw_card(p.player_number, card_num)
self.current_player = random.randint(0,n_players-1)
for p in self.players:
p.log_observed_state(self.observed_state(p.player_number))
# log = Log()
# log.log_state(self.game_state())
def game_state(self):
state = {'fireworks': self.fireworks,
'clock': self.clock,
'fuse': self.fuse,
'last_round': self.last_round,
'last_round_counter': self.last_round_counter,
'current_player': self.current_player,
'turn_number': self.turn_number,
'cards_left': self.deck.cards_left(),
'player_hands': self.player_hands,
'discard_pile': self.discard_pile,
'last_action_taken': self.last_action_taken
}
return state
def observed_state(self, player_number):
observed_hands = deepcopy(self.player_hands)
masked_hand = list(map(lambda x: x != None, self.player_hands[player_number]))
observed_hands[player_number] = masked_hand
state = {'fireworks': self.fireworks,
'clock': self.clock,
'fuse': self.fuse,
'last_round': self.last_round,
'last_round_counter': self.last_round_counter,
'current_player': self.current_player,
'turn_number': self.turn_number,
'cards_left': self.deck.cards_left(),
'player_hands': observed_hands,
'discard_pile': self.discard_pile,
'hints': self.hints,
'last_action_taken': self.last_action_taken
}
return deepcopy(state)
def next_turn(self):
if self.last_round:
print("LAST ROUND")
self.last_round_counter += 1
# Print game state info
print("\n")
print("-"*20)
print("Player #{}'s turn.\n".format(self.current_player))
print("Time left: {}".format(self.clock))
print("Fuse length: {}\n".format(self.fuse))
print("Discarded/played cards: {}\n".format(self.discard_pile))
print("{} cards left in deck. \n".format(self.deck.cards_left()))
#print("\nFireworks: {}\n".format(self.fireworks))
self.print_fireworks()
# print("\nOther players' hands:")
# for p in self.players:
# if p.player_number == self.current_player:
# pass
# else:
# print("Player #{}: {}".format(p.player_number, self.player_hands[p.player_number]))
print("")
self.print_observed_hands(self.current_player)
print("")
#print("\nHints given:")
#pprint(self.hints)
self.print_hints()
# update hint options
#self.hint_options = self.generate_hints(self.current_player)
player_action = self.players[self.current_player].take_turn()
self.take_action(self.current_player, player_action)
self.current_player = (self.current_player + 1) % self.n_players
self.turn_number += 1
for p in self.players:
p.log_observed_state(self.observed_state(p.player_number))
def take_action(self, player_number, action):
if action['action'] == 'discard_card':
card_num = action['action_data']
self.discard_card(player_number, card_num)
elif action['action'] == 'play_card':
card_num = action['action_data']
self.play_card(player_number, card_num)
elif action['action'] == 'give_hint':
# hint_num = action['num']
# hint = self.hint_options[hint_num]
# self.hints[hint['player']].append({'cards': hint['cards'], 'card_type': hint['card_type']})
hint = action['action_data']
if hint not in self.hint_options(player_number):
print("Invalid Hint.")
return -1
else:
self.hints[hint['player']].append({'cards': hint['cards'], 'card_type': hint['card_type']})
self.clock -= 1
print("Hint: {} given. 1 hour lost.".format(hint))
else:
print("Error. Invalid Action")
self.last_action_taken = {'player': player_number,
'action': action['action'],
'action_data': action['action_data']}
def hint_options(self, player_number):
hint_options = []
for p_num, hand in self.player_hands.items():
if p_num == player_number:
continue
card_types = set([i[0] for i in hand] + [i[1] for i in hand])
for t in card_types:
card_nums = []
if type(t) == str:
for card_num, card in enumerate(hand):
if card[0] == t:
card_nums.append(card_num)
else:
for card_num, card in enumerate(hand):
if card[1] == t:
card_nums.append(card_num)
hint = {'player': p_num,
'cards': card_nums,
'card_type': t}
hint_options.append(hint)
return hint_options
# def give_hint(self, player_number, hint):
# self.hints[player_number].append(hint)
# self.clock -= 1
# def discard_card(self, player, card_num):
# card = player.hand[card_num]
# self.discard_pile.append(card)
# print("Card {} discarded.".format(card))
# self.clock = min(self.clock + 1, 8)
# self.draw_card(player, card_num)
def discard_card(self, player_number, card_num):
card = self.player_hands[player_number][card_num]
self.discard_pile.append(card)
print("Card {} discarded.".format(card))
self.clock = min(self.clock + 1, 8)
self.draw_card(player_number, card_num)
# def draw_card(self, player, card_num):
# # draw new card and put it in the place of the old one
# if self.deck.cards_left() > 0:
# player.hand[card_num] = self.deck.pop()
# else:
# player.hand[card_num] = None
# print("No more cards in deck. No card drawn")
# pass
# # remove hints relating to the old card
# player_hints = self.hints[player.player_number]
# new_hints = []
# for h in player_hints:
# try:
# h[0].remove(card_num)
# except:
# pass
# if len(h[0]) == 0:
# continue
# else:
# new_hints.append(h)
# self.hints[player.player_number] = new_hints
# #print(self.hints[player.player_number])
def draw_card(self, player_number, card_num):
# draw new card and put it in the place of the old one
if self.deck.cards_left() > 0:
self.player_hands[player_number][card_num] = self.deck.pop()
else:
self.player_hands[player_number][card_num] = None
print("No more cards in deck. No card drawn")
return 0
# print("hints for player {}".format(player.player_number))
#
# print(player_hints)
# remove hints relating to the old card
print("removing hints")
print("card_num: {}".format(card_num))
player_hints = self.hints[player_number]
print("player_hints: {}".format(player_hints))
new_hints = []
for h in player_hints:
try:
h['cards'].remove(card_num)
#h[0].remove(card_num)
except:
continue
if len(h['cards']) == 0:
continue
else:
new_hints.append(h)
self.hints[player_number] = new_hints
print(self.hints[player.player_number])
# def play_card(self, player, card_num):
# # play the card
# card = player.hand[card_num]
# self.discard_pile.append(card)
# stack_count = self.fireworks[card[0]]
# if card[1] == stack_count + 1:
# self.fireworks[card[0]] += 1
# print("Card {} successfully added".format(card))
# print("Fireworks: {}".format(self.fireworks))
# # 5 - firework complete
# if self.fireworks[card[0]] == 5:
# print("{} firework complete! 1 hour has been added to the clock.".format(card[0]))
# self.clock = min(self.clock + 1, 8)
# else:
# print("Card {} does not match any fireworks.".format(card))
# print("Card discarded and fuse has been shortened. Fuse: {}".format(self.fuse))
# self.fuse -= 1
# self.draw_card(player, card_num)
def play_card(self, player_number, card_num):
# play the card
card = self.player_hands[player_number][card_num]
self.discard_pile.append(card)
stack_count = self.fireworks[card[0]]
if card[1] == stack_count + 1:
self.fireworks[card[0]] += 1
print("Card {} successfully added".format(card))
print("Fireworks: {}".format(self.fireworks))
# 5 - firework complete
if self.fireworks[card[0]] == 5:
print("{} firework complete! 1 hour has been added to the clock.".format(card[0]))
self.clock = min(self.clock + 1, 8)
else:
print("Card {} does not match any fireworks.".format(card))
self.fuse -= 1
print("Card discarded and fuse has been shortened. Fuse: {}".format(self.fuse))
self.draw_card(player_number, card_num)
def end_game(self):
print("*"*20)
final_score = reduce(lambda x,y: x+y, self.fireworks.values())
print("Game Over. Final Score: {}".format(final_score))
if final_score <= 5:
print("Horrible.")
elif final_score <= 10:
print("Mediocre.")
elif final_score <= 15:
print("Honorable Attempt")
elif final_score <= 20:
print("Excellent, crowd pleasing.")
elif final_score <= 24:
print("Amazing, they will be talking about it for weeks!")
elif final_score >= 25:
print("Legendary, everyone left speechless, stars in their eyes!")
return final_score
def get_color_tag(self, color):
if color == 'white':
return Style.BRIGHT + Fore.WHITE
elif color == 'yellow':
return Style.BRIGHT + Fore.YELLOW
elif color == 'red':
return Style.BRIGHT + Fore.RED
elif color == 'green':
return Style.BRIGHT + Fore.GREEN
elif color == 'blue':
return Style.BRIGHT + Fore.BLUE
def print_fireworks(self):
print("Fireworks: ", end =' ')
print(Style.BRIGHT + Fore.YELLOW + 'yellow:{}'.format(self.fireworks['yellow']) + Style.RESET_ALL, end = ' ')
print(Style.BRIGHT + Fore.RED + 'red:{}'.format(self.fireworks['red']) + Style.RESET_ALL, end = ' ')
print(Style.BRIGHT + Fore.WHITE + 'white:{}'.format(self.fireworks['white']) + Style.RESET_ALL, end = ' ')
print(Style.BRIGHT + Fore.GREEN + 'green:{}'.format(self.fireworks['green']) + Style.RESET_ALL, end = ' ')
print(Style.BRIGHT + Fore.BLUE + 'blue:{}'.format(self.fireworks['blue']) + Style.RESET_ALL)
def print_observed_hands(self, player_number):
print("Hands:")
for p in self.players:
print(" Player #{}: ".format(p.player_number), end = '')
if p.player_number == self.current_player:
print(list(map(lambda x: x != None, self.player_hands[player_number])), end='')
else:
for c in self.player_hands[p.player_number]:
if c != None:
color_tag = self.get_color_tag(c[0])
print(color_tag + c[0] + " " + str(c[1]) + Style.RESET_ALL, end = ' ')
else:
print("None", end = ' ')
print("")
def print_hints(self):
def hint_str(hint):
if len(hint['cards']) == 1:
return ("Card {} is {}.".format( hint['cards'], hint['card_type']))
else:
return ("Cards {} are {}.".format(hint['cards'], hint['card_type']))
print("Hints Given:")
for p, hints in self.hints.items():
print(" Player #{}: ".format(p), end = ' ')
for h in hints:
print(hint_str(h), end = ' ')
print("")
def game_end_check(game):
return (game.fuse == 0
or game.last_round_counter == game.n_players
or reduce(lambda x,y: x+y, game.fireworks.values()) == 25)
if __name__ == "__main__":
colorama.init()
print("Hanabi")
print("Race the clock... build the fireworks... launch your rockets!\n")
try:
n_players = int(input("# players (2-5): "))
except:
n_players = None
while n_players < 2 or n_players > 5:
print("Wrong # of players.")
try:
n_players = int(input("# players (2-5): "))
except:
n_players = None
game = Hanabi(n_players)
while not game_end_check(game):
game.last_round = (game.deck.cards_left() == 0)
game.next_turn()
game.end_game()
|
import computerapp.views as computerapp
from django.urls import path
app_name = 'computerapp'
urlpatterns = [
path('computer/', computerapp.computer, name='computer'),
]
|
# -*- python -*-
# Assignment: Compare Arrays
# Write a program that compares two lists and prints a message depending on if the inputs are identical or not.
# Your program should be able to accept and compare two lists: list_one and list_two.
# - If both lists are identical print "The lists are the same".
# - If they are not identical print "The lists are not the same."
# Try the following test cases for lists one and two:
list1_one = [1,2,5,6,2]
list1_two = [1,2,5,6,2]
list2_one = [1,2,5,6,5]
list2_two = [1,2,5,6,5,3]
list3_one = [1,2,5,6,5,16]
list3_two = [1,2,5,6,5]
list4_one = ['celery','carrots','bread','milk']
list4_two = ['celery','carrots','bread','cream']
######################################################################
def compare_lists( l1, l2 ):
print "Debug: l1 =", l1
print "Debug: l2 =", l2
if l1 == l2:
print "The lists ARE the same."
else:
print "The lists are NOT the same."
compare_lists( list1_one, list1_two )
compare_lists( list2_one, list2_two )
compare_lists( list3_one, list3_two )
compare_lists( list4_one, list4_two )
|
from datetime import datetime
from decimal import Decimal, ROUND_HALF_DOWN
from functools import reduce
from flask import Blueprint, request
from marshmallow import fields, validate
from sqlalchemy import func, text
import grant.utils.admin as admin
import grant.utils.auth as auth
from grant.ccr.models import CCR, ccrs_schema, ccr_schema
from grant.comment.models import Comment, user_comments_schema, admin_comments_schema, admin_comment_schema
from grant.email.send import generate_email, send_email
from grant.extensions import db
from grant.milestone.models import Milestone
from grant.parser import body, query, paginated_fields
from grant.proposal.models import (
Proposal,
ProposalArbiter,
ProposalContribution,
proposals_schema,
proposal_schema,
user_proposal_contributions_schema,
admin_proposal_contribution_schema,
admin_proposal_contributions_schema,
)
from grant.rfp.models import RFP, admin_rfp_schema, admin_rfps_schema
from grant.user.models import User, admin_users_schema, admin_user_schema
from grant.utils import pagination
from grant.utils.enums import (
ProposalStatus,
ProposalStage,
ContributionStatus,
ProposalArbiterStatus,
MilestoneStage,
RFPStatus,
CCRStatus
)
from grant.utils.misc import make_url, make_explore_url
from .example_emails import example_email_args
blueprint = Blueprint('admin', __name__, url_prefix='/api/v1/admin')
def make_2fa_state():
return {
"isLoginFresh": admin.is_auth_fresh(),
"has2fa": admin.has_2fa_setup(),
"is2faAuthed": admin.admin_is_2fa_authed(),
"backupCodeCount": admin.backup_code_count(),
"isEmailVerified": auth.is_email_verified(),
}
def make_login_state():
return {
"isLoggedIn": admin.admin_is_authed(),
"is2faAuthed": admin.admin_is_2fa_authed()
}
@blueprint.route("/checklogin", methods=["GET"])
def loggedin():
return make_login_state()
@blueprint.route("/login", methods=["POST"])
@body({
"username": fields.Str(required=False, missing=None),
"password": fields.Str(required=False, missing=None)
})
def login(username, password):
if auth.auth_user(username, password):
if admin.admin_is_authed():
return make_login_state()
return {"message": "Username or password incorrect."}, 401
@blueprint.route("/refresh", methods=["POST"])
@body({
"password": fields.Str(required=True)
})
def refresh(password):
if auth.refresh_auth(password):
return make_login_state()
else:
return {"message": "Username or password incorrect."}, 401
@blueprint.route("/2fa", methods=["GET"])
def get_2fa():
if not admin.admin_is_authed():
return {"message": "Must be authenticated"}, 403
return make_2fa_state()
@blueprint.route("/2fa/init", methods=["GET"])
def get_2fa_init():
admin.throw_on_2fa_not_allowed()
return admin.make_2fa_setup()
@blueprint.route("/2fa/enable", methods=["POST"])
@body({
"backupCodes": fields.List(fields.Str(), required=True),
"totpSecret": fields.Str(required=True),
"verifyCode": fields.Str(required=True)
})
def post_2fa_enable(backup_codes, totp_secret, verify_code):
admin.throw_on_2fa_not_allowed()
admin.check_and_set_2fa_setup(backup_codes, totp_secret, verify_code)
db.session.commit()
return make_2fa_state()
@blueprint.route("/2fa/verify", methods=["POST"])
@body({
"verifyCode": fields.Str(required=True)
})
def post_2fa_verify(verify_code):
admin.throw_on_2fa_not_allowed(allow_stale=True)
admin.admin_auth_2fa(verify_code)
db.session.commit()
return make_2fa_state()
@blueprint.route("/logout", methods=["GET"])
def logout():
admin.logout()
return {
"isLoggedIn": False,
"is2faAuthed": False
}
@blueprint.route("/stats", methods=["GET"])
@admin.admin_auth_required
def stats():
user_count = db.session.query(func.count(User.id)).scalar()
proposal_count = db.session.query(func.count(Proposal.id)).scalar()
ccr_pending_count = db.session.query(func.count(CCR.id)) \
.filter(CCR.status == CCRStatus.PENDING) \
.scalar()
proposal_pending_count = db.session.query(func.count(Proposal.id)) \
.filter(Proposal.status == ProposalStatus.PENDING) \
.scalar()
proposal_no_arbiter_count = db.session.query(func.count(Proposal.id)) \
.join(Proposal.arbiter) \
.filter(Proposal.status == ProposalStatus.LIVE) \
.filter(ProposalArbiter.status == ProposalArbiterStatus.MISSING) \
.filter(Proposal.stage != ProposalStage.CANCELED) \
.filter(Proposal.accepted_with_funding == True) \
.scalar()
proposal_milestone_payouts_count = db.session.query(func.count(Proposal.id)) \
.join(Proposal.milestones) \
.filter(Proposal.status == ProposalStatus.LIVE) \
.filter(Proposal.stage != ProposalStage.CANCELED) \
.filter(Milestone.stage == MilestoneStage.ACCEPTED) \
.scalar()
# Count contributions on proposals that didn't get funded for users who have specified a refund address
# contribution_refundable_count = db.session.query(func.count(ProposalContribution.id)) \
# .filter(ProposalContribution.refund_tx_id == None) \
# .filter(ProposalContribution.staking == False) \
# .filter(ProposalContribution.status == ContributionStatus.CONFIRMED) \
# .join(Proposal) \
# .filter(or_(
# Proposal.stage == ProposalStage.FAILED,
# Proposal.stage == ProposalStage.CANCELED,
# )) \
# .join(ProposalContribution.user) \
# .join(UserSettings) \
# .filter(UserSettings.refund_address != None) \
# .scalar()
return {
"userCount": user_count,
"ccrPendingCount": ccr_pending_count,
"proposalCount": proposal_count,
"proposalPendingCount": proposal_pending_count,
"proposalNoArbiterCount": proposal_no_arbiter_count,
"proposalMilestonePayoutsCount": proposal_milestone_payouts_count,
"contributionRefundableCount": 0,
}
# USERS
@blueprint.route('/users/<user_id>', methods=['DELETE'])
@admin.admin_auth_required
def delete_user(user_id):
user = User.query.filter(User.id == user_id).first()
if not user:
return {"message": "No user matching that id"}, 404
db.session.delete(user)
db.session.commit()
return {"message": "ok"}, 200
@blueprint.route("/users", methods=["GET"])
@query(paginated_fields)
@admin.admin_auth_required
def get_users(page, filters, search, sort):
filters_workaround = request.args.getlist('filters[]')
page = pagination.user(
schema=admin_users_schema,
query=User.query,
page=page,
filters=filters_workaround,
search=search,
sort=sort,
)
return page
@blueprint.route('/users/<id>', methods=['GET'])
@admin.admin_auth_required
def get_user(id):
user_db = User.query.filter(User.id == id).first()
if user_db:
user = admin_user_schema.dump(user_db)
user_proposals = Proposal.query.filter(Proposal.team.any(id=user['userid'])).all()
user['proposals'] = proposals_schema.dump(user_proposals)
user_comments = Comment.get_by_user(user_db)
user['comments'] = user_comments_schema.dump(user_comments)
contributions = ProposalContribution.get_by_userid(user_db.id)
contributions_dump = user_proposal_contributions_schema.dump(contributions)
user["contributions"] = contributions_dump
return user
return {"message": f"Could not find user with id {id}"}, 404
@blueprint.route('/users/<user_id>', methods=['PUT'])
@body({
"silenced": fields.Bool(required=False, missing=None),
"banned": fields.Bool(required=False, missing=None),
"bannedReason": fields.Str(required=False, missing=None),
"isAdmin": fields.Bool(required=False, missing=None),
})
@admin.admin_auth_required
def edit_user(user_id, silenced, banned, banned_reason, is_admin):
user = User.query.filter(User.id == user_id).first()
if not user:
return {"message": f"Could not find user with id {id}"}, 404
if silenced is not None:
user.set_silenced(silenced)
if banned is not None:
if banned and not banned_reason: # if banned true, provide reason
return {"message": "Please include reason for banning"}, 417
user.set_banned(banned, banned_reason)
if is_admin is not None:
user.set_admin(is_admin)
db.session.commit()
return admin_user_schema.dump(user)
# ARBITERS
@blueprint.route("/arbiters", methods=["GET"])
@query({
"search": fields.Str(required=False, missing=None)
})
@admin.admin_auth_required
def get_arbiters(search):
results = []
error = None
if len(search) < 3:
error = 'search query must be at least 3 characters long'
else:
users = User.query.filter(
User.email_address.ilike(f'%{search}%') | User.display_name.ilike(f'%{search}%')
).order_by(User.display_name).all()
results = admin_users_schema.dump(users)
return {
'results': results,
'search': search,
'error': error
}
@blueprint.route('/arbiters', methods=['PUT'])
@body({
"proposalId": fields.Int(required=True),
"userId": fields.Int(required=True),
})
@admin.admin_auth_required
def set_arbiter(proposal_id, user_id):
proposal = Proposal.query.filter(Proposal.id == proposal_id).first()
if not proposal:
return {"message": "Proposal not found"}, 404
for member in proposal.team:
if member.id == user_id:
return {"message": "Cannot set proposal team member as arbiter"}, 400
if proposal.is_failed:
return {"message": "Cannot set arbiter on failed proposal"}, 400
if proposal.version == '2' and not proposal.accepted_with_funding:
return {"message": "Cannot set arbiter, proposal has not been accepted with funding"}, 400
user = User.query.filter(User.id == user_id).first()
if not user:
return {"message": "User not found"}, 404
# send email
code = user.email_verification.code
send_email(user.email_address, 'proposal_arbiter', {
'proposal': proposal,
'proposal_url': make_url(f'/proposals/{proposal.id}'),
'accept_url': make_url(f'/email/arbiter?code={code}&proposalId={proposal.id}'),
})
proposal.arbiter.user = user
proposal.arbiter.status = ProposalArbiterStatus.NOMINATED
db.session.add(proposal.arbiter)
db.session.commit()
return {
'proposal': proposal_schema.dump(proposal),
'user': admin_user_schema.dump(user)
}, 200
# PROPOSALS
@blueprint.route("/proposals", methods=["GET"])
@query(paginated_fields)
@admin.admin_auth_required
def get_proposals(page, filters, search, sort):
filters_workaround = request.args.getlist('filters[]')
page = pagination.proposal(
schema=proposals_schema,
query=Proposal.query.filter(Proposal.status.notin_([ProposalStatus.ARCHIVED])),
page=page,
filters=filters_workaround,
search=search,
sort=sort,
)
return page
@blueprint.route('/proposals/<id>', methods=['GET'])
@admin.admin_auth_required
def get_proposal(id):
proposal = Proposal.query.filter(Proposal.id == id).first()
if proposal:
return proposal_schema.dump(proposal)
return {"message": f"Could not find proposal with id {id}"}, 404
@blueprint.route('/proposals/<id>', methods=['DELETE'])
@admin.admin_auth_required
def delete_proposal(id):
return {"message": "Not implemented."}, 400
@blueprint.route('/proposals/<proposal_id>/discussion', methods=['PUT'])
@body({
"isOpenForDiscussion": fields.Bool(required=True),
"rejectReason": fields.Str(required=False, missing=None)
})
@admin.admin_auth_required
def open_proposal_for_discussion(proposal_id, is_open_for_discussion, reject_reason=None):
proposal = Proposal.query.get(proposal_id)
if not proposal:
return {"message": "No Proposal found."}, 404
proposal.approve_discussion(is_open_for_discussion, reject_reason)
db.session.commit()
return proposal_schema.dump(proposal)
@blueprint.route('/proposals/<id>/approve-kyc', methods=['PUT'])
@admin.admin_auth_required
def approve_proposal_kyc(id):
proposal = Proposal.query.get(id)
if not proposal:
return {"message": "No proposal found."}, 404
proposal.kyc_approved = True
db.session.add(proposal)
db.session.commit()
return proposal_schema.dump(proposal)
@blueprint.route('/proposals/<id>/adjust-funder', methods=['PUT'])
@body({
"fundedByZomg": fields.Bool(required=True),
})
@admin.admin_auth_required
def adjust_funder(id, funded_by_zomg):
proposal = Proposal.query.get(id)
if not proposal:
return {"message": "No proposal found."}, 404
proposal.funded_by_zomg = funded_by_zomg
db.session.add(proposal)
db.session.commit()
return proposal_schema.dump(proposal)
@blueprint.route('/proposals/<id>/accept', methods=['PUT'])
@body({
"isAccepted": fields.Bool(required=True),
"withFunding": fields.Bool(required=False, missing=None),
"changesRequestedReason": fields.Str(required=False, missing=None)
})
@admin.admin_auth_required
def accept_proposal(id, is_accepted, with_funding, changes_requested_reason):
proposal = Proposal.query.get(id)
if not proposal:
return {"message": "No proposal found."}, 404
if is_accepted:
proposal.accept_proposal(with_funding)
if with_funding:
Milestone.set_v2_date_estimates(proposal)
else:
proposal.request_changes_discussion(changes_requested_reason)
db.session.add(proposal)
db.session.commit()
return proposal_schema.dump(proposal)
@blueprint.route('/proposals/<proposal_id>/reject_permanently', methods=['PUT'])
@body({
"rejectReason": fields.Str(required=True, missing=None)
})
@admin.admin_auth_required
def reject_permanently_proposal(proposal_id, reject_reason):
proposal = Proposal.query.get(proposal_id)
if not proposal:
return {"message": "No proposal found."}, 404
reject_permanently_statuses = [
ProposalStatus.REJECTED,
ProposalStatus.PENDING
]
if proposal.status not in reject_permanently_statuses:
return {"message": "Proposal status is not REJECTED or PENDING."}, 401
proposal.status = ProposalStatus.REJECTED_PERMANENTLY
proposal.reject_reason = reject_reason
db.session.add(proposal)
db.session.commit()
for user in proposal.team:
send_email(user.email_address, 'proposal_rejected_permanently', {
'user': user,
'proposal': proposal,
'proposal_url': make_url(f'/proposals/{proposal.id}'),
'admin_note': reject_reason,
'profile_rejected_url': make_url('/profile?tab=rejected'),
})
return proposal_schema.dump(proposal)
@blueprint.route('/proposals/<proposal_id>/resolve', methods=['PUT'])
@admin.admin_auth_required
def resolve_changes_discussion(proposal_id):
proposal = Proposal.query.get(proposal_id)
if not proposal:
return {"message": "No proposal found"}, 404
proposal.resolve_changes_discussion()
db.session.add(proposal)
db.session.commit()
return proposal_schema.dump(proposal)
@blueprint.route('/proposals/<id>/accept/fund', methods=['PUT'])
@admin.admin_auth_required
def change_proposal_to_accepted_with_funding(id):
proposal = Proposal.query.filter_by(id=id).first()
if not proposal:
return {"message": "No proposal found."}, 404
if proposal.accepted_with_funding:
return {"message": "Proposal already accepted with funding."}, 404
if proposal.version != '2':
return {"message": "Only version two proposals can be accepted with funding"}, 404
if proposal.status != ProposalStatus.LIVE and proposal.status != ProposalStatus.APPROVED:
return {"message": "Only live or approved proposals can be modified by this endpoint"}, 404
proposal.update_proposal_with_funding()
Milestone.set_v2_date_estimates(proposal)
db.session.add(proposal)
db.session.commit()
return proposal_schema.dump(proposal)
@blueprint.route('/proposals/<id>/cancel', methods=['PUT'])
@admin.admin_auth_required
def cancel_proposal(id):
proposal = Proposal.query.filter_by(id=id).first()
if not proposal:
return {"message": "No proposal found."}, 404
proposal.cancel()
db.session.add(proposal)
db.session.commit()
return proposal_schema.dump(proposal)
@blueprint.route("/proposals/<id>/milestone/<mid>/paid", methods=["PUT"])
@body({
"txId": fields.Str(required=True),
})
@admin.admin_auth_required
def paid_milestone_payout_request(id, mid, tx_id):
proposal = Proposal.query.filter_by(id=id).first()
if not proposal:
return {"message": "No proposal matching id"}, 404
if not proposal.is_funded:
return {"message": "Proposal is not fully funded"}, 400
for ms in proposal.milestones:
if ms.id == int(mid):
is_final_milestone = False
ms.mark_paid(tx_id)
db.session.add(ms)
db.session.flush()
# check if this is the final ms, and update proposal.stage
num_paid = reduce(lambda a, x: a + (1 if x.stage == MilestoneStage.PAID else 0), proposal.milestones, 0)
if num_paid == len(proposal.milestones):
is_final_milestone = True
proposal.stage = ProposalStage.COMPLETED # WIP -> COMPLETED
db.session.add(proposal)
db.session.flush()
db.session.commit()
# email TEAM that payout request was PAID
amount = Decimal(ms.payout_percent) * Decimal(proposal.target) / 100
for member in proposal.team:
send_email(member.email_address, 'milestone_paid', {
'proposal': proposal,
'milestone': ms,
'amount': amount,
'tx_explorer_url': make_explore_url(tx_id),
'proposal_milestones_url': make_url(f'/proposals/{proposal.id}?tab=milestones'),
})
# email FOLLOWERS that milestone was accepted
proposal.send_follower_email(
"followed_proposal_milestone",
email_args={"milestone": ms},
url_suffix="?tab=milestones",
)
if not is_final_milestone:
Milestone.set_v2_date_estimates(proposal)
db.session.commit()
return proposal_schema.dump(proposal), 200
return {"message": "No milestone matching id"}, 404
# EMAIL
@blueprint.route('/email/example/<type>', methods=['GET'])
@admin.admin_auth_required
def get_email_example(type):
email = generate_email(type, example_email_args.get(type))
if email['info'].get('subscription'):
# Unserializable, so remove
email['info'].pop('subscription', None)
return email
# CCRs
@blueprint.route("/ccrs", methods=["GET"])
@query(paginated_fields)
@admin.admin_auth_required
def get_ccrs(page, filters, search, sort):
filters_workaround = request.args.getlist('filters[]')
page = pagination.ccr(
schema=ccrs_schema,
query=CCR.query,
page=page,
filters=filters_workaround,
search=search,
sort=sort,
)
return page
@blueprint.route('/ccrs/<ccr_id>', methods=['DELETE'])
@admin.admin_auth_required
def delete_ccr(ccr_id):
ccr = CCR.query.filter(CCR.id == ccr_id).first()
if not ccr:
return {"message": "No CCR matching that id"}, 404
db.session.delete(ccr)
db.session.commit()
return {"message": "ok"}, 200
@blueprint.route('/ccrs/<id>', methods=['GET'])
@admin.admin_auth_required
def get_ccr(id):
ccr = CCR.query.filter(CCR.id == id).first()
if ccr:
return ccr_schema.dump(ccr)
return {"message": f"Could not find ccr with id {id}"}, 404
@blueprint.route('/ccrs/<ccr_id>/accept', methods=['PUT'])
@body({
"isAccepted": fields.Bool(required=True),
"rejectReason": fields.Str(required=False, missing=None)
})
@admin.admin_auth_required
def approve_ccr(ccr_id, is_accepted, reject_reason=None):
ccr = CCR.query.filter_by(id=ccr_id).first()
if ccr:
rfp_id = ccr.approve_pending(is_accepted, reject_reason)
if is_accepted:
return {"rfpId": rfp_id}, 201
else:
return ccr_schema.dump(ccr)
return {"message": "No CCR found."}, 404
@blueprint.route('/ccrs/<ccr_id>/reject_permanently', methods=['PUT'])
@body({
"rejectReason": fields.Str(required=True, missing=None)
})
@admin.admin_auth_required
def reject_permanently_ccr(ccr_id, reject_reason):
ccr = CCR.query.get(ccr_id)
if not ccr:
return {"message": "No CCR found."}, 404
reject_permanently_statuses = [
CCRStatus.REJECTED,
CCRStatus.PENDING
]
if ccr.status not in reject_permanently_statuses:
return {"message": "CCR status is not REJECTED or PENDING."}, 401
ccr.status = CCRStatus.REJECTED_PERMANENTLY
ccr.reject_reason = reject_reason
db.session.add(ccr)
db.session.commit()
send_email(ccr.author.email_address, 'ccr_rejected_permanently', {
'user': ccr.author,
'ccr': ccr,
'admin_note': reject_reason,
'profile_rejected_url': make_url('/profile?tab=rejected')
})
return ccr_schema.dump(ccr)
# Requests for Proposal
@blueprint.route('/rfps', methods=['GET'])
@admin.admin_auth_required
def get_rfps():
rfps = RFP.query.all()
return admin_rfps_schema.dump(rfps)
@blueprint.route('/rfps', methods=['POST'])
@body({
"title": fields.Str(required=True),
"brief": fields.Str(required=True),
"content": fields.Str(required=True),
"bounty": fields.Str(required=False, missing=0),
"matching": fields.Bool(required=False, missing=False),
"dateCloses": fields.Int(required=False, missing=None)
})
@admin.admin_auth_required
def create_rfp(date_closes, **kwargs):
rfp = RFP(
**kwargs,
date_closes=datetime.fromtimestamp(date_closes) if date_closes else None,
)
db.session.add(rfp)
db.session.commit()
return admin_rfp_schema.dump(rfp), 200
@blueprint.route('/rfps/<rfp_id>', methods=['GET'])
@admin.admin_auth_required
def get_rfp(rfp_id):
rfp = RFP.query.filter(RFP.id == rfp_id).first()
if not rfp:
return {"message": "No RFP matching that id"}, 404
return admin_rfp_schema.dump(rfp)
@blueprint.route('/rfps/<rfp_id>', methods=['PUT'])
@body({
"title": fields.Str(required=True),
"brief": fields.Str(required=True),
"status": fields.Str(required=True, validate=validate.OneOf(choices=RFPStatus.list())),
"content": fields.Str(required=True),
"bounty": fields.Str(required=False, allow_none=True, missing=None),
"matching": fields.Bool(required=False, default=False, missing=False),
"dateCloses": fields.Int(required=False, missing=None),
})
@admin.admin_auth_required
def update_rfp(rfp_id, title, brief, content, bounty, matching, date_closes, status):
rfp = RFP.query.filter(RFP.id == rfp_id).first()
if not rfp:
return {"message": "No RFP matching that id"}, 404
# Update fields
rfp.title = title
rfp.brief = brief
rfp.content = content
rfp.matching = matching
rfp.bounty = bounty
rfp.date_closes = datetime.fromtimestamp(date_closes) if date_closes else None
# Update timestamps if status changed
if rfp.status != status:
if status == RFPStatus.LIVE and not rfp.date_opened:
rfp.date_opened = datetime.now()
if status == RFPStatus.CLOSED:
rfp.date_closed = datetime.now()
rfp.status = status
db.session.add(rfp)
db.session.commit()
return admin_rfp_schema.dump(rfp)
@blueprint.route('/rfps/<rfp_id>', methods=['DELETE'])
@admin.admin_auth_required
def delete_rfp(rfp_id):
rfp = RFP.query.filter(RFP.id == rfp_id).first()
if not rfp:
return {"message": "No RFP matching that id"}, 404
db.session.delete(rfp)
db.session.commit()
return {"message": "ok"}, 200
# Contributions
@blueprint.route('/contributions', methods=['GET'])
@query(paginated_fields)
@admin.admin_auth_required
def get_contributions(page, filters, search, sort):
filters_workaround = request.args.getlist('filters[]')
page = pagination.contribution(
page=page,
schema=admin_proposal_contributions_schema,
filters=filters_workaround,
search=search,
sort=sort,
)
return page
@blueprint.route('/contributions', methods=['POST'])
@body({
"proposalId": fields.Int(required=True),
"userId": fields.Int(required=True),
"status": fields.Str(required=True, validate=validate.OneOf(choices=ContributionStatus.list())),
"amount": fields.Str(required=True),
"txId": fields.Str(required=False, missing=None)
})
@admin.admin_auth_required
def create_contribution(proposal_id, user_id, status, amount, tx_id):
# Some fields set manually since we're admin, and normally don't do this
contribution = ProposalContribution(
proposal_id=proposal_id,
user_id=user_id,
amount=amount,
)
contribution.status = status
contribution.tx_id = tx_id
db.session.add(contribution)
db.session.flush()
# TODO: should this stay?
contribution.proposal.set_pending_when_ready()
db.session.commit()
return admin_proposal_contribution_schema.dump(contribution), 200
@blueprint.route('/contributions/<contribution_id>', methods=['GET'])
@admin.admin_auth_required
def get_contribution(contribution_id):
contribution = ProposalContribution.query.filter(ProposalContribution.id == contribution_id).first()
if not contribution:
return {"message": "No contribution matching that id"}, 404
return admin_proposal_contribution_schema.dump(contribution), 200
@blueprint.route('/contributions/<contribution_id>', methods=['PUT'])
@body({
"proposalId": fields.Int(required=False, missing=None),
"userId": fields.Int(required=False, missing=None),
"status": fields.Str(required=False, missing=None, validate=validate.OneOf(choices=ContributionStatus.list())),
"amount": fields.Str(required=False, missing=None),
"txId": fields.Str(required=False, missing=None),
"refundTxId": fields.Str(required=False, allow_none=True, missing=None),
})
@admin.admin_auth_required
def edit_contribution(contribution_id, proposal_id, user_id, status, amount, tx_id, refund_tx_id):
contribution = ProposalContribution.query.filter(ProposalContribution.id == contribution_id).first()
if not contribution:
return {"message": "No contribution matching that id"}, 404
had_refund = contribution.refund_tx_id
# do not allow editing certain fields on contributions once a proposal has become funded
if (proposal_id or user_id or status or amount or tx_id) and contribution.proposal.is_funded:
return {"message": "Cannot edit contributions to fully-funded proposals"}, 400
# Proposal ID (must belong to an existing proposal)
if proposal_id:
proposal = Proposal.query.filter(Proposal.id == proposal_id).first()
if not proposal:
return {"message": "No proposal matching that id"}, 400
contribution.proposal_id = proposal_id
# User ID (must belong to an existing user or 0 to unset)
if user_id is not None:
if user_id == 0:
contribution.user_id = None
else:
user = User.query.filter(User.id == user_id).first()
if not user:
return {"message": "No user matching that id"}, 400
contribution.user_id = user_id
# Status (must be in list of statuses)
if status:
if not ContributionStatus.includes(status):
return {"message": "Invalid status"}, 400
contribution.status = status
# Amount (must be a Decimal parseable)
if amount:
try:
contribution.amount = str(Decimal(amount))
except:
return {"message": "Amount could not be parsed as number"}, 400
# Transaction ID (no validation)
if tx_id is not None:
contribution.tx_id = tx_id
# Refund TX ID (no validation)
if refund_tx_id is not None:
contribution.refund_tx_id = refund_tx_id
db.session.add(contribution)
db.session.flush()
# TODO: should this stay?
contribution.proposal.set_pending_when_ready()
db.session.commit()
return admin_proposal_contribution_schema.dump(contribution), 200
# Comments
@blueprint.route('/comments', methods=['GET'])
@body(paginated_fields)
@admin.admin_auth_required
def get_comments(page, filters, search, sort):
filters_workaround = request.args.getlist('filters[]')
page = pagination.comment(
page=page,
filters=filters_workaround,
search=search,
sort=sort,
schema=admin_comments_schema
)
return page
@blueprint.route('/comments/<comment_id>', methods=['PUT'])
@body({
"hidden": fields.Bool(required=False, missing=None),
"reported": fields.Bool(required=False, missing=None),
})
@admin.admin_auth_required
def edit_comment(comment_id, hidden, reported):
comment = Comment.query.filter(Comment.id == comment_id).first()
if not comment:
return {"message": "No comment matching that id"}, 404
if hidden is not None:
comment.hide(hidden)
if reported is not None:
comment.report(reported)
db.session.commit()
return admin_comment_schema.dump(comment)
# Financials
@blueprint.route("/financials", methods=["GET"])
@admin.admin_auth_required
def financials():
nfmt = '999999.99999999' # smallest unit of ZEC
def sql_pc(where: str):
return f"SELECT SUM(TO_NUMBER(amount, '{nfmt}')) FROM proposal_contribution WHERE {where}"
def sql_pc_p(where: str):
return f'''
SELECT SUM(TO_NUMBER(amount, '{nfmt}'))
FROM proposal_contribution as pc
INNER JOIN proposal as p ON pc.proposal_id = p.id
LEFT OUTER JOIN "user" as u ON pc.user_id = u.id
LEFT OUTER JOIN user_settings as us ON u.id = us.user_id
WHERE {where}
'''
def sql_ms(where: str):
return f'''
SELECT SUM(TO_NUMBER(ms.payout_percent, '999')/100 * TO_NUMBER(p.target, '999999.99999999'))
FROM milestone as ms
INNER JOIN proposal as p ON ms.proposal_id = p.id
WHERE p.version = '2' AND {where}
'''
def ex(sql: str):
res = db.engine.execute(text(sql))
return [row[0] if row[0] else Decimal(0) for row in res][0].normalize()
def gen_quarter_date_range(year, quarter):
if quarter == 1:
return f"{year}-1-1", f"{year}-3-31"
if quarter == 2:
return f"{year}-4-1", f"{year}-6-30"
if quarter == 3:
return f"{year}-7-1", f"{year}-9-30"
if quarter == 4:
return f"{year}-10-1", f"{year}-12-31"
# contributions = {
# 'total': str(ex(sql_pc("status = 'CONFIRMED' AND staking = FALSE"))),
# 'staking': str(ex(sql_pc("status = 'CONFIRMED' AND staking = TRUE"))),
# 'funding': str(ex(sql_pc_p("pc.status = 'CONFIRMED' AND pc.staking = FALSE AND p.stage = 'FUNDING_REQUIRED'"))),
# 'funded': str(
# ex(sql_pc_p("pc.status = 'CONFIRMED' AND pc.staking = FALSE AND p.stage in ('WIP', 'COMPLETED')"))),
# # should have a refund_address
# 'refunding': str(ex(sql_pc_p(
# '''
# pc.status = 'CONFIRMED' AND
# pc.staking = FALSE AND
# pc.refund_tx_id IS NULL AND
# p.stage IN ('CANCELED', 'FAILED') AND
# us.refund_address IS NOT NULL
# '''
# ))),
# # here we don't care about current refund_address of user, just that there has been a refund_tx_id
# 'refunded': str(ex(sql_pc_p(
# '''
# pc.status = 'CONFIRMED' AND
# pc.staking = FALSE AND
# pc.refund_tx_id IS NOT NULL AND
# p.stage IN ('CANCELED', 'FAILED')
# '''
# ))),
# # if there is no user, or the user hasn't any refund_address
# 'donations': str(ex(sql_pc_p(
# '''
# pc.status = 'CONFIRMED' AND
# pc.staking = FALSE AND
# pc.refund_tx_id IS NULL AND
# (pc.user_id IS NULL OR us.refund_address IS NULL) AND
# p.stage IN ('CANCELED', 'FAILED')
# '''
# ))),
# 'gross': str(ex(sql_pc_p("pc.status = 'CONFIRMED' AND pc.refund_tx_id IS NULL"))),
# }
po_due = ex(sql_ms("ms.stage = 'ACCEPTED'")) # payments accepted but not yet marked as paid
po_paid = ex(sql_ms("ms.stage = 'PAID'")) # will catch paid ms from all proposals regardless of status/stage
# expected payments
po_future = ex(sql_ms("ms.stage IN ('IDLE', 'REJECTED', 'REQUESTED') AND p.stage IN ('WIP', 'COMPLETED')"))
po_total = po_due + po_paid + po_future
now = datetime.now()
start_year = 2019
end_year = now.year
payouts_by_quarter = {}
for year in range(start_year, end_year + 1):
payouts_by_quarter[f"{year}"] = {}
year_total = 0
for quarter in range(1, 5):
begin, end = gen_quarter_date_range(year, quarter)
payouts = ex(sql_ms(f"ms.stage = 'PAID' AND (ms.date_paid BETWEEN '{begin}' AND '{end}')"))
payouts_by_quarter[f"{year}"][f"q{quarter}"] = str(payouts)
year_total += payouts
payouts_by_quarter[f"{year}"]["year_total"] = str(year_total)
payouts = {
'total': str(po_total),
'due': str(po_due),
'paid': str(po_paid),
'future': str(po_future),
}
grants = {
'total': '0',
'matching': '0',
'bounty': '0',
}
def add_str_dec(a: str, b: str):
return str((Decimal(a) + Decimal(b)).quantize(Decimal('0.001'), rounding=ROUND_HALF_DOWN))
proposals = Proposal.query.filter_by(version='2')
for p in proposals:
# CANCELED proposals excluded, though they could have had milestones paid out with grant funds
if p.stage in [ProposalStage.WIP, ProposalStage.COMPLETED]:
# matching
matching = Decimal(p.contributed) * Decimal(p.contribution_matching)
remaining = max(Decimal(p.target) - Decimal(p.contributed), Decimal('0.0'))
if matching > remaining:
matching = remaining
# bounty
bounty = Decimal(p.contribution_bounty)
remaining = max(Decimal(p.target) - (matching + Decimal(p.contributed)), Decimal('0.0'))
if bounty > remaining:
bounty = remaining
grants['matching'] = add_str_dec(grants['matching'], matching)
grants['bounty'] = add_str_dec(grants['bounty'], bounty)
grants['total'] = add_str_dec(grants['total'], matching + bounty)
return {
'grants': grants,
'payouts': payouts,
'payouts_by_quarter': payouts_by_quarter
}
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import News, Category
class BlogIndexView(generic.ListView):
model = News
#paginate_by = 12
template_name = 'news/index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
context.update({
'news_list': News.objects.order_by('-created_on')
})
return context
class DetailedView(generic.DetailView):
model = News
template_name = "news/detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
return context
class CategoryListView(generic.ListView):
model = Category
template_name = "news/cat_index.html"
def get_context_data(self, **kwargs):
category = get_object_or_404(Category, cat_slug=self.kwargs.get('cat_slug'))
context = super().get_context_data(**kwargs)
context.update({
'news_list': News.objects.order_by('-created_on').filter(category_id=category.id),
'cat' : category,
})
return context
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 19:24:02 2020
@author: shaun
"""
import numpy as np
def eulerstep(yn,tn,f,h):
yn1=yn+h*f(yn,tn)
return yn1
def nonlinE(yn,tn1,h):
#solve the differential equation you have for
# the equation that satisfies yn+1 equaling some function of x and t
top=yn+h*2.0*((1.0+tn1)**3.0)*np.e**(-tn1)
bottom=1.0+h*((3.0*tn1)/(1.0+tn1))
yn1=top/bottom
return yn1
def trapezoidalE(yn,tn1,tn,h):
#solve the differential equation you have for yn+1
A= yn+(h/2.0)*2*((1+tn1)**3)*(np.e**(-tn1))
B=(h/2.0)*(-3*tn*yn)/(1+tn1)
C=(h/2.0)*2*((1+tn)**3)*(np.e**(-tn))
top=A+B+C
bottom=1+(h/2.0)*((3*tn1)/(1+tn1))
yn1=top/bottom
return yn1
def rk2step(yn,tn,f,h):
y1=yn+(h/2)*f(yn,tn)
yn1=yn+h*f(y1,tn+tn*1/2)
return yn1
def rk4step(yn,tn,f,h):
a=1/2.0
k1=h*f(yn,tn)
k2=h*f(yn+a*k1,tn+h/2.0)
k3=h*f(yn+a*k2,tn+h/2.0)
k4=h*f(yn+a*k3,tn+h)
yn1=yn+(1/6)*k1+(1/3)*(k2+k3)+(1/6)*k4
return yn1
def eulerE(y0,a,b,f,h):
y=[y0]
t=[a]
start=a
end=a
while(end<b):
newy=eulerstep(y[-1],t[-1],f,h)
y.append(newy)
t.append(t[-1]+h)
end+=h
return t,y
def eulerI(y0,a,b,h):
y=[y0]
t=[a,a+h]
start=a
end=a
while(end<b):
newy=nonlinE(y[-1],t[-1],h)
y.append(newy)
t.append(t[-1]+h)
end+=h
del t[-1]
return t,y
def eulerT(y0,a,b,h):
y=[y0]
t=[a,a+h]
start=a
end=a
while(end<b):
newy=trapezoidalE(y[-1],t[-1],t[-2],h)
y.append(newy)
t.append(t[-1]+h)
end+=h
del t[-1]
return t,y
def rk2(y0,a,b,f,h):
y=[y0]
t=[a]
start=a
end=a
while(end<b):
newy=rk2step(y[-1],t[-1],f,h)
y.append(newy)
t.append(t[-1]+h)
end+=h
return t,y
def rk4(y0,a,b,f,h):
y=[y0]
t=[a]
start=a
end=a
while(end<b):
newy=rk4step(y[-1],t[-1],f,h)
y.append(newy)
t.append(t[-1]+h)
end+=h
return t,y
|
import json
intentionFile = 'Intentions_fr.json'
# importation du fichier d'intention Json
def load_intent(rep = ""):
with open(rep + intentionFile) as json_data:
intents = json.load(json_data)
return intents
def intents_list():
intents_list = ""
intents = load_intent()
for i in range(len(intents['intents'])):
intents_list = intents_list + " - " + str(i) + " : " + intents['intents'][i]['tag'] + "<br>"
return intents_list
def write_pattern(sentence,index,rep = ""):
with open(rep + intentionFile) as json_data:
intents = json.load(json_data)
intents['intents'][index]['newPatters'].append(sentence)
json_file = open(rep + intentionFile, "w+")
json_file.write(json.dumps(intents,ensure_ascii = False, sort_keys=True, indent=4))
json_file.close()
|
#Leo Li
#09/27/18
#Description: task for this program is Take two values, base and exponent, from the user. Then create a list that displays the exponents of that base from the 0 power (1) to the [entered exponent] power in ascending order. For example, if the base was 2 and the exponent was 5, the list should show [1, 2, 4, 8, 16, 32]
import math
a = []
x = float(input("\nplease enter the base\n\n>>"))
y = float(input("\nplease enter the exponent\n\n>>"))
z = 0
while z<(y+1):#when z is smaller then the exponent, keep adding terms
a.append(x**z)
z+=1
print("\n",a)
|
from openerp.osv import fields, osv
import logging
from logging import getLogger
_logger = getLogger(__name__)
class syncjob_chargetype_category_mapping(osv.osv):
_name = 'syncjob.chargetype.category.mapping'
_description = "Chargetype"
_columns = {
'chargetype_name': fields.char('Chargetype Name',required='True'),
'category_id':fields.many2one('product.category','Category Id',required='True'),
}
def create(self, cr, uid, values, context=None):
res=self.pool.get('syncjob.chargetype.category.mapping').search(cr, uid, [('category_id', '=', values['category_id'])], limit=1)
if len(res)>0:
super(osv.osv, self).write(cr, uid,res, values, context)
dept = res[0];
else :
dept = osv.Model.create(self,cr, uid, values, context)
return dept
|
"""empty message
Revision ID: 91b16dc2fd74
Revises: d03c91f3038d
Create Date: 2021-02-01 17:00:23.721765
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '91b16dc2fd74'
down_revision = 'd03c91f3038d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('funded_by_zomg', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'funded_by_zomg')
# ### end Alembic commands ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.