blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d848497e2725038e7c5c3c27a5a34c1c10f46a9e
|
Python
|
danerlt/leetcode
|
/Python3/剑指Offer05.替换空格.py
|
UTF-8
| 780
| 3.53125
| 4
|
[] |
no_license
|
# 请实现一个函数,把字符串 s 中的每个空格替换成"%20"。
#
#
#
# 示例 1:
#
# 输入:s = "We are happy."
# 输出:"We.are%20happy."
#
#
#
# 限制:
#
# 0 <= s 的长度 <= 10000
#
# Related Topics 字符串 👍 360 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def replaceSpace(self, s: str) -> str:
# 解题思路
# 创建一个list,循环字符串,如果为空格就append%20,否则就append字符
res = []
for char in s:
if char == " ":
res.append("%20")
else:
res.append(char)
res = "".join(res)
return res
# leetcode submit region end(Prohibit modification and deletion)
| true
|
6358917bdddb2fddd9cecada2b4568692d195e01
|
Python
|
msschambach/pythonbdd
|
/djangotodo/features/steps/api_create_todolist.py
|
UTF-8
| 2,634
| 2.859375
| 3
|
[] |
no_license
|
import json
from behave import given, when, then
@given(u'an endpoint for creating a list exists')
def step_impl(context):
response = context.test.client.post('/api/lists/', data={})
response_content = response.content.decode('utf-8')
assert response_content == '{"name":["This field is required."],"description":["This field is required."]}'
@when(u'I send a payload with the name as "Homework to do" '
u'and a description as "A list of homework tasks to do"')
def step_impl(context):
response = context.test.client.post('/api/lists/', {
"name": "Homework to do",
"description": "A list of homework tasks to do"
})
response_dict = json.loads(response.content.decode('utf-8'))
context.list_id = response_dict.get('id')
assert response_dict.get('name') == 'Homework to do'
assert response_dict.get('description') == 'A list of homework tasks to do'
@then(u'a list with the name "Homework to do" and '
u'description "A list of homework tasks to do" should be created')
def step_impl(context):
url = '/api/lists/{list_id}/'.format(list_id=context.list_id)
response = context.test.client.get(url)
assert response.status_code == 200
try:
response_dict = json.loads(response.content)
except ValueError as e:
response_dict = []
print(e)
print("content: " + response.content)
assert response_dict.get('name') == 'Homework to do'
assert response_dict.get('description') == 'A list of homework tasks to do'
@when(u'I send a payload with the name as "Errands to run" '
u'and a description as "A list of errands to run"')
def step_impl(context):
response = context.test.client.post('/api/lists/', {
"name": "Errands to run",
"description": "A list of errands to run"
})
response_dict = json.loads(response.content.decode('utf-8'))
context.list_id = response_dict.get('id')
assert response_dict.get('name') == 'Errands to run'
assert response_dict.get('description') == 'A list of errands to run'
@then(u'a list with the name "Errands to run" and '
u'description "A list of errands to run" should be created')
def step_impl(context):
url = '/api/lists/{list_id}/'.format(list_id=context.list_id)
response = context.test.client.get(url)
try:
response_dict = json.loads(response.content)
except ValueError as e:
response_dict = []
print(e)
print("content: " + response.content)
assert response_dict.get('name') == 'Errands to run'
assert response_dict.get('description') == 'A list of errands to run'
| true
|
a3b0a205b212336c5efd269b34824d6198645d32
|
Python
|
gonzalob24/Learning_Central
|
/Python_Programming/PythonUhcl/Scripting/babynames.py
|
UTF-8
| 4,698
| 4.15625
| 4
|
[] |
no_license
|
"""
In python: Using the TXBabyNames.txt file.
Write code to read it into a list.
How many female records are there? Done
How many male records are there? Done
How many female records are there in 1910? Done
How many male records are there in 1910? Done
How many female records are there in 2012? Done
How many male records are there in 2012? Done
What are the total number of babies born in Texas in 2012? Done
What are the total number of babies born in Texas with your name since 1910? Done
What are the total number of babies born in Texas with your name between 1910 and 1960? Done
What name was the most popular (had the highest count in one year) for males? Done
What name was the most popular (had the highest count in one year) for females? Done
What was the name for the males, and for the females? Done
What year was the name for males? for females? Done
In what year was your name the most popular (had the highest count)?
Example name: Paul
Write all this information out to a file.
"""
file_names = open("TXBabyNames.txt")
# open_now = file_names.readline()
# open_now = file_names.readlines() puts entire file in a list
# read reads all of it
# reaadline just reads the firts line
# print(open_now)
# to read line by line do a for loop
female_sum = 0
male_sum = 0
female_sum_1910 = 0
male_sum_1910 = 0
female_sum_2012 = 0
male_sum_2012 = 0
tx_babies_2012 = 0
tx_babies_myname = 0
tx_babies_myname_1910_1960 = 0
popular_male_count = 0
popular_male_name = ""
popular_male_name_year = 0
popular_female_count = 0
popular_female_name = ""
popular_female_name_year = 0
popular_myname_count = 0
popular_myname_year = 0
for line in file_names:
line = line.split(",")
# total number of females
if line[1] == "F":
female_sum += int(line[4])
# total number of males
if line[1] == "M":
male_sum += int(line[4])
# total females in 1910
if int(line[2]) == 1910 and line[1] == "F":
female_sum_1910 += int(line[4])
# total male records in 1910
if int(line[2]) == 1910 and line[1] == "M":
male_sum_1910 += int(line[4])
# total females in 2012
if int(line[2]) == 2012 and line[1] == "F":
female_sum_2012 += int(line[4])
# total male records in 2012
if int(line[2]) == 2012 and line[1] == "M":
male_sum_2012 += int(line[4])
# babies born in TX in 2012
if line[0] == "TX" and int(line[2]) == 2012:
tx_babies_2012 += int(line[4])
# TX babies born since 1910 with my name
if line[0] == "TX" and line[3] == "Ira":
tx_babies_myname += int(line[4])
# TX babies with my name between 1910 and 1960
if line[0] == "TX" and line[3] == "Gonzalo" and int(line[2]) in range(1910, 1961):
tx_babies_myname_1910_1960 += int(line[4])
# most popular male name in one year
if line[1] == "M":
if popular_male_count < int(line[4]):
popular_male_count = int(line[4])
popular_male_name = line[3]
popular_male_name_year = line[2]
# most popular female name in one year
if line[1] == "F":
if popular_female_count < int(line[4]):
popular_female_count = int(line[4])
popular_female_name = line[3]
popular_female_name_year = line[2]
# most popular year for my name
if line[3] == "Gonzalo":
if popular_myname_count < int(line[4]):
popular_myname_count = int(line[4])
popular_myname_year = line[2]
print("There are a total of " + str(female_sum) + " female records.")
print("There are a total of " + str(male_sum) + " male records.")
print("There are a total of " + str(female_sum_1910) + " female records in 1910.")
print("There are a total of " + str(male_sum_1910) + " male records in 1910.")
print("There are a total of " + str(female_sum_2012) + " female records in 2012.")
print("There are a total of " + str(male_sum_2012) + " male records in 2012.")
print("There are a total of " + str(tx_babies_2012) + " Babies born in TX in 2012.")
print("There are a total of " + str(tx_babies_myname) + " Babies born in TX in 1910 with my name.")
print("There are a total of " + str(tx_babies_myname_1910_1960) +
" Babies born in TX with my name from 1910 to 1960.")
print("The most popular male name in a given year is " + popular_male_name + ". There were " + str(popular_male_count) +
" in " + popular_male_name_year)
print("The most popular female name in a given year is " + popular_female_name + ". There were " + str(popular_female_count) + " in " + popular_female_name_year)
print("The year that my name was most famous was in " + popular_myname_year + ". There were " + str(popular_myname_count))
| true
|
65d6a24181482ea120d0ab6caa9dc122e1f527f8
|
Python
|
leizhen10000/crawler
|
/demo/selenium/locate_element/__init__.py
|
UTF-8
| 2,813
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @Time : 2018/8/27 10:13
# @Author : Lei Zhen
# @Contract: leizhen8080@gmail.com
# @File : __init__.py.py
# @Software: PyCharm
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
# 有很多中策略来定位页面中的元素,你在测试用例中可以使用最合适的方式
# Selenium 提供了以下的方法来定位元素
# find_element_by_id
# find_element_by_name
# find_element_by_xpath
# find_element_by_link_text
# find_element_by_partial_link_text
# find_element_by_tag_name
# find_element_by_class_name
# find_element_by_css_selector
# 上面都是查找单一的结果,要查找所有结果,改为 elements 就可以了
# find_elements_by_id
# find_elements_by_name
# find_elements_by_xpath
# find_elements_by_link_text
# find_elements_by_partial_link_text
# find_elements_by_tag_name
# find_elements_by_class_name
# find_elements_by_css_selector
#
# 除了以上狗狗你哟给你的方法,还有两个私有方法可以有效的定位页面中的对象
# 分别为:find_element 和 find_elements
# driver.find_element(By.XPATH, '//button[text()="Some text"]')
# driver.find_elements(By,XPATH, '//button')
# By 类中共有以下的不同属性
# ID = "id"
# XPATH = "xpath"
# LINK_TEXT = "link text"
# PARTIAL_LINK_TEXT = 'partial link text'
# NAME = 'name'
# TAG_NAME = 'tag name'
# CLASS_NAME = 'class name'
# CSS_SELECTOR = 'css selector'
from selenium import webdriver
def locating_by_x():
driver = webdriver.Firefox()
driver.find_element_by_xpath()
# XPath 是一个用来定位 XML 文档中节点的语言
# XPath 扩展包含通过 id 和 name 属性来查找元素,并且可以解锁很多可能性,比如获取页面中第三个复选框
# 使用 XPath 来定位的主要原因是,不需要一个确定的 id 或者 name 属性来定位元素
# 可以使用绝对的位置(不建议使用)或者关联一个有id或者name属性的元素。
# XPath 定位器可以找到特殊的元素通过其他属性,而不仅仅是 id 和 name 属性
# 通过找到一个附近的元素,二者元素可以通过 id 和 name 属性定位到(一般来说是父级元素),然后再通过与该元素的层级关系,来定位需要的元素。
# 这看起来会大大方便,而且使得测试脚本更加健壮
| true
|
a67af1f0027ef4bcf74805907d60dc1c609d7a64
|
Python
|
vstiern/wsb-hype-alert
|
/data_collector/src/aux_functions.py
|
UTF-8
| 810
| 3.359375
| 3
|
[] |
no_license
|
"""Aux functions"""
from pathlib import Path
from configparser import ConfigParser
# read file
def get_config_section(section, file_name="config.ini"):
"""
Parse config file.
:param section_name: Section header name as string.
:param file_name: File name of config file. Defualt name provided.
:return: Dictionary with config name as keys and config value as value.
"""
# create parser
parser = ConfigParser()
file_path = Path(__file__).parent.parent
parser.read(file_path / file_name)
config_dict = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
config_dict[param[0]] = param[1]
else:
raise Exception(f'Section: {section} not found in the {file_name}')
return config_dict
| true
|
9ad79d451ea71a61a26be9ba137208493ba2cbee
|
Python
|
narahahn/continuous_measurement
|
/example/IRs_image_source_model.py
|
UTF-8
| 3,424
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
""" Computes the impulse responses in a rectangular room using the
mirror image sources model
* frequency-independent reflection coefficients
* fractional delay interpolation using the Lagrange polynomial
"""
import numpy as np
import sfs
import matplotlib.pyplot as plt
import sounddevice as sd
import soundfile as sf
from scipy.signal import lfilter, freqz, bessel, butter, fftconvolve
from matplotlib.patches import Rectangle
from sys import path
path.append('../')
from utils import *
from source import *
# Constants
c = 343
fs = 44100 # sample rate for boadband response
# Room
L = 10, 10.7, 12 # room dimensions
coeffs = .8, .8, .7, .7, .6, .6 # wall reflection coefficients
# Point source
x0 = 5.2, 5.7, 2.5 # source position
signal = ([1, 0, 0], fs) # signal for broadband response
source_type = 'point'
# 3D Image sources
max_order = 6 # maximum order of image sources
xs, wall_count = sfs.util.image_sources_for_box(x0, L, max_order)
source_strength = np.prod(coeffs**wall_count, axis=1)
# Circular microphone array
R_mic = 0.2 # radius
K_mic = 90 # number of microphones
phi_mic = np.linspace(0, 2 * np.pi, num=K_mic, endpoint=False)
x_mic = np.array([R_mic * np.cos(phi_mic) + 0.732,
R_mic * np.sin(phi_mic) + 0.831,
np.zeros_like(phi_mic) + 0.511]).T
# Impulse responses
#N = 4800 # FIR filter length
N = int(2**(np.ceil(np.log2(np.max(np.linalg.norm(xs, axis=-1)) / c * fs))))
h = np.zeros((K_mic, N))
for ii, xi in enumerate(xs):
waveform, shift, offset = impulse_response(xi, x_mic, source_type, fs)
htemp, _, _ = construct_ir_matrix(waveform, shift, N)
h += htemp * source_strength[ii]
# Listening example
s, _ = sf.read('50.flac') # SQAM male speech
s = s[:3*fs, 0]
y = fftconvolve(s, h[0, :])
# Plots
phi_deg = np.rad2deg(phi_mic)
time = np.arange(N)/fs*1000
Nf = N//2+1
freq = np.arange(Nf)*fs/(Nf)
# IRs - linear scale
plt.figure(figsize=(4, 10))
plt.pcolormesh(phi_deg, time, h.T, cmap='coolwarm')
plt.colorbar()
plt.clim(-0.002, 0.002)
plt.axis('tight')
plt.xlabel(r'$\phi$ / deg')
plt.ylabel('$t$ / ms')
plt.ylim(0, 100)
# IRs - dB
plt.figure(figsize=(4, 10))
plt.pcolormesh(phi_deg, time, db(h.T), cmap='Blues')
plt.colorbar()
plt.clim(-200, 0)
plt.xlabel(r'$\phi$ / deg')
plt.ylabel('$t$ / ms')
plt.axis('tight')
# Randomly selected IR - linear scale
nn = np.random.randint(K_mic)
plt.figure()
plt.plot(time, h[nn, :])
plt.xlabel('$t$ / ms')
#plt.ylim(-1, 1)
# Randomly selected IR - dB
plt.figure()
plt.plot(time, db(h[nn, :]))
plt.xlabel('$t$ / ms')
plt.ylim(-120, 0)
# Frequency response
plt.figure()
plt.semilogx(freq, db(np.fft.rfft(h[nn, :])))
plt.ylim(-60, 0)
plt.xlabel('$f$ / Hz')
plt.ylabel('Magnitude / dB')
# Spectrogram
plt.figure()
plt.specgram(h[nn, :], NFFT=128, noverlap=64, Fs=fs, cmap='Blues', vmin=-180);
plt.colorbar(label='dB')
plt.xlabel('$t$ / s')
plt.ylabel('$f$ / Hz')
# plot mirror image sources
plt.figure()
plt.scatter(*xs.T, source_strength*20)
plt.plot(x_mic[:, 0], x_mic[:, 1], 'g.')
plt.gca().add_patch(Rectangle((0, 0), L[0], L[1], fill=False))
plt.xlabel('x / m')
plt.ylabel('y / m')
plt.axis('equal')
plt.title('xy-plane')
plt.figure()
plt.scatter(xs[:, 0], xs[:, 2], source_strength*20)
plt.plot(x_mic[:, 0], x_mic[:, 2], 'g.')
plt.gca().add_patch(Rectangle((0, 0), L[0], L[2], fill=False))
plt.xlabel('x / m')
plt.ylabel('z / m')
plt.axis('equal')
plt.title('xz-plane')
| true
|
d15c10073dd452d8f4c7b999bc526f5e85fb4ab4
|
Python
|
chrisleewoo/soundbug
|
/iter_recurs.py
|
UTF-8
| 319
| 4.0625
| 4
|
[] |
no_license
|
"""
n! iterator vs recursively
"""
def factorial_iterative(n):
for x in range(n):
x *= x-n
return x
def factorial_recursive(n):
# Base case: 1! = 1
if n == 1:
return 1
# Recursive case: n! = n * (n-1)!
else:
return n * factorial_recursive(n-1)
| true
|
714fcef9bda55ea216587cbd75ff696b37d74945
|
Python
|
change1q2/Learn
|
/pyweb/web_12_framework_v4/common/base_page.py
|
UTF-8
| 2,130
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
# email: wagyu2016@163.com
# wechat: shoubian01
# author: 王雨泽
import logging
import os
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from config.constant import img_path
class BasePage:
"""basepage 当中跟具体的页面逻辑有关系吗??"""
def __init__(self, driver):
self.driver = driver
def wait_element_visible(self, locator, timeout=30, poll_frequency=0.5):
"""等待元素可见"""
wait = WebDriverWait(self.driver, timeout, poll_frequency=poll_frequency)
return wait.until(EC.visibility_of_element_located(locator))
def wait_element_precence(self, locator, timeout=30, poll_frequency=0.5):
"""等待元素出现"""
wait = WebDriverWait(self.driver, timeout, poll_frequency=poll_frequency)
return wait.until(EC.presence_of_element_located(locator))
def wait_element_clickable(self, locator, timeout=30, poll_frequency=0.5):
"""等待元素可被点"""
wait = WebDriverWait(self.driver, timeout, poll_frequency=poll_frequency)
return wait.until(EC.element_to_be_clickable(locator))
def get_element(self, locator):
"""不需要显示等待"""
try:
return self.driver.find_element(*locator)
except:
# 截图保存
self.driver.save_screenshot(os.path.join(img_path, 'screenshot2020_02_07_21_34_30.png'))
logging.error("元素定位失败")
# 还有哪些逻辑可以封装在 basepage 当中。
#1, 窗口切换,iframe, alert 切换。
# 鼠标拖拽
def drag(self, src, target):
"""鼠标拖拽"""
ac = ActionChains(self.driver)
ac.drag_and_drop(src, target)
ac.perform()
# js 滚动到底部
def scroll_to(self, width, height):
"""窗口滚动"""
js_code = 'window.scrollTo({}, {})'.format(width, height)
self.driver.execute_script(js_code)
| true
|
09727b253a93d91c09e2e2d206ff554b1c7c95a6
|
Python
|
kevin-ci/advent-of-code-2020
|
/chris/day-3/p2.py
|
UTF-8
| 383
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
import functools
from p1 import count_trees
slopes = [
{'x': 1, 'y': 1},
{'x': 3, 'y': 1},
{'x': 5, 'y': 1},
{'x': 7, 'y': 1},
{'x': 1, 'y': 2},
]
with open('input.txt') as f:
lines = f.read()
all_trees = []
for slope in slopes:
start = (0, 0)
all_trees.append(count_trees(lines, start, slope))
total = functools.reduce(lambda x, y: x * y, all_trees)
print(total)
| true
|
144d07d7dce9f1777382b356eafea8ef2d1db955
|
Python
|
yy19970618/apriori
|
/genindex.py
|
UTF-8
| 4,677
| 2.59375
| 3
|
[] |
no_license
|
import tensorflow.python as tf
from tensorflow.python import keras
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
import os
import numpy as np
import sklearn.preprocessing as sp
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
def initConv(shape,dtype=None):
res = np.zeros(shape)
res[0], res[-1] = 1, 1
return tf.constant_initializer(res)
def convolution():
inn = layers.Input(shape=(sequence_length,1))
cnns = []
for i,size in enumerate(filter_sizes):
conv = layers.Conv1D(filters=8, kernel_size=(size),
strides=size, padding='valid', activation='relu',kernel_initializer=initConv([size,8]))(inn)
#if i%2:
pool_size =int(conv.shape[1]/100)
pool = layers.MaxPool1D(pool_size=(pool_size), padding='valid')(conv)
#pool = MaxMin(pool_size)(conv)
cnns.append(pool)
outt = layers.concatenate(cnns)
model = keras.Model(inputs=inn, outputs=outt,name='cnns')
model.summary()
return model
'''
input: query features and data features
query features: 支持度、下标、范围[将负载特征也用词向量进行编号]
data features: 对1000条数据词嵌入的结果 1000*40*50
I will use CNN to extract data features in order to decrease paramters
'''
tf.enable_eager_execution()
filter_sizes=[5,50]
embedding_dimension=50
sequence_length = 30000
'''
#单分类,效果还不错,正确率接近1
model = keras.Sequential([
layers.Input(shape=([sequence_length])),
layers.Reshape((sequence_length,1)),
convolution(),
layers.Flatten(),
layers.Dropout(0.1),
layers.Dense(64,activation='relu'),
layers.Dropout(0.1),
layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
'''
model = keras.Sequential([
layers.Input(shape=([sequence_length])),
layers.Reshape((sequence_length,1)),
convolution(),
layers.Flatten(),
layers.Dropout(0.1),
layers.Dense(64,activation='relu'),
layers.Dropout(0.1),
layers.Dense(4, activation='sigmoid')
])
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
model.summary()
model.load_weights("./1/weight")
x_test = np.loadtxt("test.csv", delimiter=',')
y_test = np.loadtxt("testout.csv", delimiter=',')
y_hat = model.predict(x_test)
print(y_hat)
test_loss, test_accuracy = model.evaluate(x_test,y_test)
print(test_accuracy)
print(test_loss)
'''
input = np.loadtxt("numtrain.csv",delimiter=',')
out = np.loadtxt("out1.csv", delimiter=',')
input = sp.scale(input)
output = out
X_train, X_test, y_train, y_test = train_test_split(input, output, test_size=0.2)
history = model.fit(X_train, y_train,epochs=500,validation_split=0.1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training', 'loss','valiation'], loc='upper left')
plt.show()
tloss = []
taccu = []
for i in range(0,200):
history = model.fit(X_train, y_train)
#prediction = model.predict(X_test)
#prediction = np.ceil(prediction)
#model.save_weights("weight")
test_loss, test_accuracy = model.evaluate(X_test,y_test)
tloss.append(test_loss)
taccu.append(test_accuracy)
plt.plot(history.history['accuracy'])
plt.plot(history.history['loss'])
plt.show()
plt.plot(np.linspace(0,200,200),tloss)
plt.plot(np.linspace(0,200,200),taccu)
plt.show()
model.save_weights("./1/weight")
#model = tf.keras.models.load_model('all_model.h5')
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
model.save('all_model.h5')
model = tf.keras.models.load_model('all_model.h5')
print(tf.__version__)
model = keras.models.Sequential([
keras.layers.Dense([100], input_shape=[1000,40,50])
])
# tensorboard earlystopping
logdir = './callbacks'
if not os.path.exists(logdir):
os.mkdir(logdir)
output_model_file = os.path.join(logdir, "index_model.h5")
callbacks =[
keras.callbacks.TensorBoard(logdir),
keras.callbacks.ModelCheckpoint(output_model_file,
save_best_only=True),
keras.callbacks.EarlyStopping(patience=5,
min_delta=1e-3)
]
'''
| true
|
6e2cd085d423d8062fdcdb3837adfbe2d5524bea
|
Python
|
AnnaPopovych/Automation
|
/lesson_3/DZ_3_1.py
|
UTF-8
| 149
| 3.25
| 3
|
[] |
no_license
|
c = [-1, 2, -1, -1]
def my_new_function(a):
b = []
for i in a:
b.append(abs(i))
b.sort()
return b
print(my_new_function())
| true
|
54e60e4cac88c5037c545172ee0cdf3f92b35681
|
Python
|
lookfwd/bottlemem
|
/1.simple_server.py
|
UTF-8
| 3,217
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import socket
from time import sleep
APP_PORT = 50000
LISTEN_BACKLOG = 1000000
LOGIN_SERVER = '52.17.32.15'
LOGIN_PORT = 50001
def login():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((LOGIN_SERVER, LOGIN_PORT))
s.sendall(('login').encode())
data = s.recv(1024)
s.close()
def initListening():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', APP_PORT))
s.listen(LISTEN_BACKLOG)
return s
ACCOUNTS = 10
SECURITIES = 10000
import threading
lock = threading.RLock()
import numpy as np
accounts = [np.random.randint(0, 1000, SECURITIES) for i in range(ACCOUNTS)]
def manage_account(account, action):
group, buy, quantity, id = action
if (not buy) and quantity > account[id]:
return "can't sell"
if buy:
account[id] += quantity
else:
account[id] -= quantity
return "ok"
def business_logic(data):
action = tuple(map(int, data.split()))
login()
group = action[0]
with lock:
return manage_account(accounts[group], action)
from multiprocessing import Process, Queue
answer_queue = Queue()
answer_lock = threading.RLock()
answer_registry = {}
answer_cnt = [0]
def answer_manager():
while True:
call_id, answer = answer_queue.get()
with answer_lock:
queue = answer_registry.pop(call_id)
queue.put(answer)
import concurrent
from concurrent.futures import ThreadPoolExecutor
from threading import Thread
answer_thread = Thread(target=answer_manager, args=() )
answer_thread.start()
#answer_thread.join()
processes = []
process_queues = [Queue() for i in range(ACCOUNTS)]
def manage_account_process_thread(call_id, action, account, lock):
login()
with lock:
answer = manage_account(account, action)
answer_queue.put((call_id, answer))
def manage_account_process(id, queue):
account = np.random.randint(0, 1000, SECURITIES)
account_lock = threading.RLock()
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
while True:
call_id, action = queue.get()
executor.submit(manage_account_process_thread, call_id, action, account, account_lock)
for id in range(ACCOUNTS):
p = Process(target=manage_account_process, args=(id, process_queues[id]))
processes.append(p)
p.start()
#for i in range(ACCOUNTS):
# processes[i].join()
import queue
def business_logic_with_proc(data):
action = tuple(map(int, data.split()))
group = action[0]
my_queue = queue.Queue()
with answer_lock:
answer_cnt[0] += 1
my_answer_cnt = answer_cnt[0]
answer_registry[my_answer_cnt] = my_queue
process_queues[group].put((my_answer_cnt, action))
return my_queue.get()
def serve(conn):
request = conn.recv(1024)
resp = business_logic_with_proc(request)
conn.sendall(resp.encode())
conn.close()
s = initListening()
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
while True:
conn, addr = s.accept()
#serve(conn)
executor.submit(serve, conn)
| true
|
3433dbfc4cd79b584c7d56cc199f53ca2f18bca4
|
Python
|
kkristof200/py_dependencies
|
/kdependencies/models/installed_package.py
|
UTF-8
| 3,219
| 2.546875
| 3
|
[] |
no_license
|
# --------------------------------------------------------------- Imports ---------------------------------------------------------------- #
# System
from typing import Optional, List
# Local
from .package import Package
from .core import Utils
# ---------------------------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------- class: InstalledPackage -------------------------------------------------------- #
class InstalledPackage(Package):
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
s: str,
private: bool = False
):
lines = s.split('\n')
name = self.__get_var(lines, 'Name: ')
if not name:
self.invalid = True
return
self.invalid = False
super().__init__(name, self.__get_var(lines, 'Version: '), private)
self.summary = self.__get_var(lines, 'Summary: ')
self.home_page = self.__get_var(lines, 'Home-page: ')
self.author = self.__get_var(lines, 'Author: ')
self.author_email = self.__get_var(lines, 'Author-email: ')
self.license = self.__get_var(lines, 'License: ')
self.location = self.__get_var(lines, 'Location: ')
dependencies_str = self.__get_var(lines, 'Requires: ')
self.requires = [d.replace('-', '_') for d in dependencies_str.split(',')] if dependencies_str else []
dependents_str = self.__get_var(lines, 'Required-by: ')
self.required_by = [d.strip().replace('-', '_') for d in dependents_str.split(',')] if dependents_str else []
# -------------------------------------------------------- Public methods -------------------------------------------------------- #
@classmethod
def from_name(cls, name: str, private: bool = False) -> Optional:
package = cls(Utils.get_pip_info_str(name), private=private)
return package if not package.invalid else package
def get_install_name(self, include_version: bool = False) -> Optional[str]:
return super().get_install_name(include_version=include_version) if not self.private else '{} @ git+{}'.format(super().get_install_name(include_version=include_version), self.home_page) if self.home_page else None
def get_install_command(self, include_version: bool = True) -> str:
return 'python3 -m pip install -U {}'.format(self.versioned_name if include_version and not self.private else self.name if not self.private else 'git+{}'.format(self.home_page))
# ------------------------------------------------------- Private methods -------------------------------------------------------- #
def __get_var(self, lines: List[str], sep: str) -> Optional[str]:
try:
res = [l for l in lines if sep in l][0].split(sep)[-1].strip()
return res if res != 'None' else None
except:
return None
# ---------------------------------------------------------------------------------------------------------------------------------------- #
| true
|
ae6a37eb234dc865c0599488930334fcdf3eb9ac
|
Python
|
dotcs/doimgr
|
/lib/validator.py
|
UTF-8
| 4,463
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
import os
import sys
import logging
import re
class Validator(object):
UNKNOWN = 0
BOOLEAN = 1
INTEGER = 2
STRING = 3
DATE = 4
FUNDER_ID = 5
MEMBER_ID = 6
URL = 7
MIME_TYPE = 8
ORCID = 9
ISSN = 10
TYPE = 11
DIRECTORY = 12
DOI = 13
def __init__(self):
pass
def __get_valid_values(self):
valid_values = [
('has-funder' , self.BOOLEAN ),
('funder' , self.FUNDER_ID ),
('prefix' , self.DOI ),
('member' , self.MEMBER_ID ),
('from-index-date' , self.DATE ),
('until-index-date' , self.DATE ),
('from-deposition-date' , self.DATE ),
('until-deposition-date' , self.DATE ),
('from-frist-deposit-date' , self.DATE ),
('until-first-deposit-date' , self.DATE ),
('from-pub-date' , self.DATE ),
('until-pub-date' , self.DATE ),
('has-license' , self.BOOLEAN ),
('license.url' , self.URL ),
('license.version' , self.STRING ),
('license.delay' , self.INTEGER ),
('has-full-text' , self.BOOLEAN ),
('full-text.version' , self.STRING ),
('full-text.type' , self.MIME_TYPE ),
('public-references' , self.UNKNOWN ),
('has-references' , self.BOOLEAN ),
('has-archive' , self.BOOLEAN ),
('archive' , self.STRING ),
('has-orcid' , self.BOOLEAN ),
('orcid' , self.ORCID ),
('issn' , self.ISSN ),
('type' , self.TYPE ),
('directory' , self.DIRECTORY ),
('doi' , self.DOI ),
('updates' , self.DOI ),
('is-update' , self.BOOLEAN ),
('has-update-policy' , self.BOOLEAN ),
]
return valid_values
def is_valid(self, key, value):
valid_values = self.__get_valid_values()
if key not in [v[0] for v in valid_values]:
raise ValueError("Key {} is invalid an cannot be added to the \
filter list.".format(key))
for k, t in valid_values:
if k == key:
if not self.__is_valid(value, t):
return False
return True
return False
def __is_valid(self, value, type_):
if type_ in (self.UNKNOWN, self.DIRECTORY):
logging.debug("Datatype handling is unkown. Assuming it is valid.")
return True
elif type_ == self.BOOLEAN:
return type(value) is type(True)
elif type_ == self.INTEGER:
return type(value) == type(0)
elif type_ == self.STRING:
return type(value) == type("string")
elif type_ == self.DATE:
if type(value) == type(0):
# convert to string if necessary
value = str(value)
regex = re.compile("^\d{4}((-\d{2}){1,2})?$")
return regex.match(value) is not None
elif type_ == self.URL:
return type(value) == type("string") and value.startswith('http://')
elif type_ == self.MIME_TYPE:
regex = re.compile("^.+/.+$")
return regex.match(value) is not None
elif type_ == self.ORCID:
regex = re.compile("(http\:\/\/orcid\.org/)?\d{4}-\d{4}-\d{4}-\d{4}")
return regex.match(value) is not None
elif type_ == self.ISSN:
regex = re.compile("^.{4}-.{4}$")
return regex.match(value) is not None
elif type_ == self.TYPE:
# TODO: Wed Jun 4 21:04:32 CEST 2014, @fabi, comment:
# implementation missing
raise NotImplemented("Sanity check needs to be implemented")
elif type_ in (self.DOI, self.FUNDER_ID, self.MEMBER_ID):
regex = re.compile("(http\:\/\/dx\.doi\.org/)?10\.[\d\.]+(/*)?")
return regex.match(value) is not None
| true
|
44436bb8af3c5ecb6bad5bac4d6028a4fd9bebbf
|
Python
|
stijncoelus/python-diematic
|
/version2/test/read-regs.py
|
UTF-8
| 146
| 2.546875
| 3
|
[] |
no_license
|
import json
with open('reg-dump2.json') as data_file:
data = json.loads(data_file.read())
print data
for idx in data:
print(data[idx])
| true
|
b56bd5f33b41da1e0c5a168726191962cf3a49c6
|
Python
|
RiteshBhola/Assignment2_apr20
|
/q2.py
|
UTF-8
| 616
| 3.390625
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
h=0.1
x=np.arange(1,2+h,h)
print(x)
n=np.size(x)
print(n)
y=np.zeros(n)
y[0]=1
def solution(t):
return(t/(1+np.log(t)))
def fun(y):
return((y/t) - (y/t)**2)
for i in range(0,n-1,1):
t=x[i]
y[i+1]=y[i]+h*fun(y[i])
abs_err=np.abs(y-solution(x))
rel_err=abs_err/solution(x)
print("absolute error\n",abs_err)
print("relative error\n",rel_err)
plt.plot(x,y,"*r",label="Euler's method")
x=np.arange(1,2+h,0.1*h)
plt.plot(x,solution(x),"-b",label="Analytic Solution")
plt.xlabel("$x$",fontsize=20)
plt.ylabel("y(x)",fontsize=20)
plt.legend()
plt.show()
| true
|
dfc77c81543d39ddafcfeb7c7628a5ca237a705f
|
Python
|
radRares1/AI
|
/lab4/UI.py
|
UTF-8
| 2,920
| 3.078125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 22:06:06 2020
@author: Rares2
"""
from Controller import Controller,PSO
from Repo import Repo
from hillClimb import HillController
def main():
print("1.EA")
print("2.Hill")
print("3.PSO")
choice = int(input("Choose a method"))
if choice == 1:
n = int(input("Size of the chromosome: "))
no = int(input("Size of initial population: "))
number = int(input("How many generations do you want: "))
repo = Repo(n,no)
controller = Controller(repo)
ok = controller.isFound()
while number > 0 and ok==False:
#print(len(controller.getPopulation()))
controller.crossOver()
#for item in controller.getPopulation():
# print(item.toString())
controller.mutation()
#3print("mute")
# for item in controller.getPopulation():
# print(item.toString())
controller.setScores()
controller.selectTheBest()
#print(controller.getCurrentSolution().toString())
ok = controller.isFound()
if ok == True:
print("true")
number-=1
print("the best found solution is: ")
print(controller.getCurrentSolution().toString())
elif choice == 2:
n = int(input("Size of the chromosome: "))
number = int(input("How many generations do you want: "))
repo = Repo(n,no)
controller = HillController(repo)
ok=False
while number > 0 and ok==False:
controller.generateSolutions()
controller.setScores()
controller.selectTheBest()
ok = controller.isFound()
number-=1
print("the best found solution is: ")
print(controller.getCurrentSolution().toString())
elif choice == 3:
noGen = 100
popSize = 10
indSize = 4
best=0
w=1.0
c1=1.2
c2=0.7
neighbourhoodSize = 2
fitnessList=[]
trials = []
trialCounter = 0
ctrl = PSO(popSize,neighbourhoodSize,indSize,w,c1,c2)
for i in range(noGen):
ctrl.iteration()
if trialCounter<30:
print(len(ctrl.getPop()))
fitnessList.append(ctrl.getPop()[0].getPersonalBest())
trials.append(trialCounter)
trialCounter+=1
pop = ctrl.getPop()
for i in range(popSize):
if(pop[i].getPersonalBest()<pop[best].getPersonalBest()):
best = i
print("Best so far:")
print(pop[best])
main()
| true
|
82da346a6f651892c930b722b3826732537ae90e
|
Python
|
foobarna/play-gae
|
/main.py
|
UTF-8
| 1,292
| 2.65625
| 3
|
[] |
no_license
|
from blog import *
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("Hello ma'friend!")
self.response.out.write("""<br><a href="/blog">Blog</a>""")
self.response.out.write("""<br><a href="/rot13">Rot13</a>""")
class Rot13Handler(BaseHandler):
def render_str2(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def rot13(self, text=""):
new_text = ""
for c in text:
ascii = ord(c);
if (ascii >= 65 and ascii <= 90):
ascii += 13
if ascii > 90: ascii = 64 + ascii - 90
if (ascii >= 97 and ascii <= 122):
ascii += 13
if ascii > 122: ascii = 96 + ascii - 122
new_text = new_text + chr(ascii)
return new_text
def get(self):
self.render('rot13-form.html')
def post(self):
rot13 = ""
text1 = self.request.get("text")
if text1:
rot13 = self.rot13(text1)
self.render('rot13-form.html', text = rot13 )
app = webapp2.WSGIApplication([('/', MainPage),
("/blog", BlogFront),
("/blog/([0-9]+)", BlogPost),
("/blog/newpost", BlogNewPost),
("/blog/signup", BlogSignup),
("/blog/login", BlogLogin),
("/blog/logout", BlogLogout),
("/blog/welcome", BlogWelcome),
("/rot13", Rot13Handler)],
debug=True)
| true
|
a973846e7719135ef78bc769e774fef87c66ef20
|
Python
|
ansayyad/Python_scripts
|
/getthePhoneNumberfromstring.py
|
UTF-8
| 149
| 2.96875
| 3
|
[] |
no_license
|
import re
phonenumregex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d')
mo = phonenumregex.search("My phone number is 704-048-9515")
print(mo.group())
| true
|
46b1d73348f44ff4cb00393c96576aa5b94a4d7d
|
Python
|
MaureenZOU/ECS277
|
/project1/config.py
|
UTF-8
| 1,674
| 2.609375
| 3
|
[] |
no_license
|
import os
import cv2
import numpy as np
class Config(object):
"""
Holds the configuration for anything you want it to.
To get the currently active config, call get_cfg().
To use, just do cfg.x instead of cfg['x'].
I made this because doing cfg['x'] all the time is dumb.
"""
def __init__(self, config_dict):
for key, val in config_dict.items():
self.__setattr__(key, val)
def copy(self, new_config_dict={}):
"""
Copies this config into a new config object, making
the changes given by new_config_dict.
"""
ret = Config(vars(self))
for key, val in new_config_dict.items():
ret.__setattr__(key, val)
return ret
def replace(self, new_config_dict):
"""
Copies new_config_dict into this config object.
Note: new_config_dict can also be a config object.
"""
if isinstance(new_config_dict, Config):
new_config_dict = vars(new_config_dict)
for key, val in new_config_dict.items():
self.__setattr__(key, val)
def print(self):
for k, v in vars(self).items():
print(k, ' = ', v)
cfg = Config({
# 'data_root': './data/neghip_64x64x64_uint8.raw',
'data_root': './data/baseline_7_7_7.npy',
'interpolation': 'linear', # linear|cubic
'vol_size': [10, 10, 10],
'sample_rate': 5,
'tff': cv2.COLORMAP_JET,
'transpose': (0,1,2),
'illu_intense': 0.1,
'illu_pos': np.array([20, 20, 20]),
'illumination': True,
'file_name': 'linear_standard_opacue2_[10,5,jet,0|1|2,0.1,20].png'
})
def get_cfg():
return cfg
| true
|
63692876e2ffd9cb2f454f010237571e5dccb5d4
|
Python
|
timvass/pyladies-ntk-praha
|
/lekce2-prvni-program/prvni-program-promenne.py
|
UTF-8
| 106
| 3.359375
| 3
|
[] |
no_license
|
print(1)
print(1 * 8)
print('*' * 8)
print('Soucet cisel 2 a 3 je', 2 + 3)
print('Mama ma misu.' + ' Rej')
| true
|
ca30c421ff3e9adc2fce634f06ca4d7b8647b987
|
Python
|
oliverdippel/Codewars
|
/Skyscraper7x7/Solver/Solution.py
|
UTF-8
| 5,799
| 3.3125
| 3
|
[] |
no_license
|
class Solution:
def __init__(self, boardsize):
"""The pure intent of this class is to sample problems"""
self.boardsize = boardsize
self.__board = self.sample_board(boardsize)
self.clues = self.parse_clues_from_board(self.__board)
def sample_board(self, problemsize):
# TODO sample one permutation & and make use of updates (particularly StrategyStack)
# -> wasnt there a non informative clue? in this case, simply sample some tuple clue and make all others
# unifnormative - using the update strategies, the applicable tuples remain.
# now sample another. repeat. if there is no solution available, start again? or make use of stack?
# further, using only a fraction of the available clue information may indicate multiple solutions
# there are two options
# 1) ensure that only one solution is available (hard) - requires checking all possible choices in
# StrategyStack - that is after given a clue and the CrossSolving strategy is applied.
# 2) check a potential solution provided by any solver for visability - and return true,
# if the provided solution is in accordance with the clues and that each row & col is
# a valid permutation of the set of range(1, board.probsize +1). Jointly these two criteria should
# guarantee the correctness of a board.
return None
# @staticmethod
# def parse_clues_from_board(b):
# """
# :param b: list of lists
# :return:
# """
# row_clues = Solution.find_row_clues(b)
# col_clues = Solution.find_row_clues(list(zip(*b)))
# return [*[clue[0] for clue in col_clues],
# *[clue[1] for clue in row_clues],
# *[clue[1] for clue in reversed(col_clues)],
# *[clue[0] for clue in reversed(row_clues)]]
@staticmethod
def parse_clues_from_board(board):
"""
:param board: Skyscraper instance
:return:
"""
def find_row_clues(b):
"""
:param b: list of lists!
"""
row_clues = list()
for row in b:
front = board._visible(row)
back = board._visible(tuple(reversed(row)))
row_clues.append((front, back))
return row_clues
b = [list(board.downtown_row[i][0]) for i in range(board.probsize)]
row_clues = find_row_clues(b)
col_clues = find_row_clues(list(zip(*b)))
# create a single line from the row & column clues.
# by convention, the order is top, right, bottom, left
# TODO carefull: different problemsizes return either tuple or list
return [*[clue[0] for clue in col_clues],
*[clue[1] for clue in row_clues],
*[clue[1] for clue in reversed(col_clues)],
*[clue[0] for clue in reversed(row_clues)]]
@staticmethod
def check_a_valid_solution(board, original_clue):
"""check if the proposed board - when interpreted for visibility matches the
(possibly incomplete) original_clue. also ensures that all rows and columns are a
set of range(1, board.probsize +1) and the board is thereby a valid cityblock."""
if not Solution.check_valid_board(board):
return False
b = [list(board.downtown_row[i][0]) for i in range(board.probsize)]
interpreted = Solution.parse_clues_from_board(board)
return all([True if o == i or o == 0 else False for o, i in zip(original_clue, interpreted)])
def check_valid_board(board):
"""Debug method: check that all rows and columns are a
set of range(1, board.probsize +1) and the board is thereby a valid cityblock."""
b = [list(board.downtown_row[i][0]) for i in range(board.probsize)]
numbers = set(range(1, board.probsize + 1))
return all([set(row) == numbers for row in b]) and \
all([set(col) == numbers for col in zip(*b)])
if __name__ == '__main__':
from Skyscraper7x7.Solver.Solver import Skyscraper
# (0) check the intpreted clue is the same as the kata's clue
assert Solution.parse_clues_from_board(((3, 4, 2, 1), (1, 2, 3, 4), (2, 1, 4, 3), (4, 3, 1, 2))) == \
[2, 1, 3, 2, 3, 1, 2, 3, 3, 2, 2, 1, 1, 2, 4, 2]
# (1) check the original board is obtainable from the parsed clue
board = [[2, 1, 4, 3],
[3, 4, 1, 2],
[4, 2, 3, 1],
[1, 3, 2, 4]]
clue = Solution.parse_clues_from_board(board)
board = tuple(tuple(row) for row in board) # conversion to meet kata's expectation
assert Skyscraper(clue).solve() == board
# (2) Check kata's incomplete clue matches the board (the brovided board is
# a valid solution given the clue)
# CAREFULL: if cannot assert: the kata provided 0's
interpreted = Solution.parse_clues_from_Lists(((5, 6, 1, 4, 3, 2),
(4, 1, 3, 2, 6, 5),
(2, 3, 6, 1, 5, 4),
(6, 5, 4, 3, 2, 1),
(1, 2, 5, 6, 4, 3),
(3, 4, 2, 5, 1, 6)))
# interpreted = [2, 1, 3, 2, 2, 3, 4, 2, 3, 6, 3, 1, 1, 4, 2, 3, 3, 2, 4, 4, 1, 3, 2, 2]
originalclu = [0, 0, 0, 2, 2, 0, 0, 0, 0, 6, 3, 0, 0, 4, 0, 0, 0, 0, 4, 4, 0, 3, 0, 0]
# check that the original Clue with incomplete information matches
# the intrepreted clue from the solution's board (which has complete info)
assert all([True if o == i or o == 0 else False for o, i in zip(originalclu, interpreted)])
| true
|
82dcb51aa8b8be3839406817919c45a25059c11f
|
Python
|
dadatomisin/cam2021
|
/mgrid/util_crc.py
|
UTF-8
| 5,513
| 2.78125
| 3
|
[] |
no_license
|
import math
from scipy import special
import numpy as np
from scipy.stats import poisson
from collections.abc import Iterable
# PI controller that takes as input kp, ki, current error and previous integral error
def PI(Kp, Ki, err, prev_i_err):
i_err = prev_i_err + err
u = Kp*err + Ki*i_err
return u, i_err
# Aggregate demand from agents assuming proportional control
def aggregate_demand(CR, mu, battery):
demand = 0
for a,b in zip(mu, battery):
tmp = poisson.ppf(CR, 1*a) - b
demand += max(0, tmp)
return demand
# Aggregate demand from agents assuming PI control
def aggregate_demand_PI(CR, mu, battery, i_err, Kp, Ki):
demand = 0
for a,b,c in zip(mu, battery, i_err):
err = poisson.ppf(CR, 1*a) - b
tmp, _ = PI(Kp, Ki, err, c)
demand += max(0, tmp)
return demand
# Define Supply Curve approximation function
def supply_curve(Q, price, function='sigmoid'):
supply = 0
error = True
if function == 'sigmoid':
error = False
supply = Q + special.logit(price)
elif function == 'linear':
error = False
supply = Q * price
elif function == 'quadratic':
error = False
supply = Q * (price ** 1/2)
if error:
print('Function Type not not specified')
return supply
# Bisection search to find intersection of demand and supply curves
def bisection_search(c, p, k, h, Q, mu, battery, i_err=0, Kp=1, Ki=0, sf='sigmoid', mode='basic'):
tol = 1e-5
if sf == 'sigmoid':
lb = 1e-20
ub = c - lb
else:
lb = 1e-20
ub = 1e2
iter_limit = 10000
for _ in range(iter_limit):
mp = (ub + lb)/2
tmp = (p - mp + k)/(p - mp + k + (0.1*mp) + h)
var1 = supply_curve(Q, mp, function=sf)
if mode == 'basic':
var2 = aggregate_demand(tmp, mu, battery)
elif mode == 'PI':
var2 = aggregate_demand_PI(tmp, mu, battery, i_err, Kp, Ki)
var3 = var1 - var2
if abs(var3) < 1 or (ub - lb)/2 < tol:
#print('converged')
break
if var3 > 0:
ub = mp
else:
lb = mp
return mp
def crc(p, c, k, mu, n, battery, Q, i_err=0, Kp=1, Ki=0, gamma=1, h=0, capacity=1e2):
z = np.zeros(n)
a1 = np.zeros(n)
new_i_err = np.zeros(n)
space = capacity - battery
cost = bisection_search(c, p, k, h, Q, mu, battery, i_err, Kp, Ki, sf='sigmoid', mode='PI')
CR = (p - cost + k)/(p - cost + k + (0.1*cost) + h)
for i in range(n):
z[i] = min(poisson.ppf(CR, 1*mu[i]), capacity)
err = poisson.ppf(CR, 1*mu[i]) - battery[i]
u, new_i_err[i] = PI(Kp, Ki, err, i_err[i])
u = max(0, u)
a1[i] = min(u, space[i])
return a1, z, cost, new_i_err
def basic_crc(p, c, k, mu, n, battery, Q, gamma=1, h=0, capacity=1e2):
z = np.zeros(n)
a1 = np.zeros(n)
new_i_err = np.zeros(n)
space = capacity - battery
cost = bisection_search(c, p, k, h, Q, mu, battery, sf='sigmoid')
#cost = cost/c
CR = (p - cost + k)/(p - cost + k + (0.1*cost) + h)
for i in range(n):
z[i] = min(poisson.ppf(CR, 1*mu[i]), capacity)
err = poisson.ppf(CR, 1*mu[i]) - battery[i]
u = max(0, err)
a1[i] = min(u, space[i])
return a1, z, cost
# Psuedo Reward v1 uses area under supply curve as effective cost and assumes ideal step function
def psuedo_reward_v1(p, c, k, Q, n, demands, batteries, actions):
rewards = np.zeros(n)
total_order_quantity = actions.sum(-1)
excess = max(0, total_order_quantity - Q)
for agent in range(n):
demand = demands[agent]
battery = batteries[agent]
supplied = min(demand, battery) * p
# Penalty for Inability to Supply Sufficient Energy from Battery
mismatch = max(0, demand - battery) * k
if total_order_quantity == 0:
# Proportional Cost of Exceeding Renewable Supply
proportion_of_excess = 0
# Discharge of Battery modelled as a Holding Cost
discharge = 0
else:
# Proportional Cost of Exceeding Renewable Supply
proportion_of_excess = max(0, (excess/total_order_quantity)*actions[agent]) * c
# Discharge of Battery modelled as a Holding Cost
discharge = max(0, battery - demand) * 0.1 * c * (excess/total_order_quantity)
reward = supplied - (mismatch+proportion_of_excess+discharge)
if isinstance(reward, Iterable):
reward = sum(reward)
rewards[agent] = reward
return rewards
# Pseudo Reward v2 uses the cost price found using bisection search
def psuedo_reward_v2(p, c, k, Q, n, demands, batteries, actions):
rewards = np.zeros(n)
total_order_quantity = actions.sum(-1)
excess = max(0, total_order_quantity - Q)
for agent in range(n):
demand = demands[agent]
battery = batteries[agent]
# Reward for Suplying Energy to User
supplied = min(demand, battery) * p
# Penalty for Inability to Supply Sufficient Energy from Battery
mismatch = max(0, demand - battery) * k
# Cost of Purchasing Energy
cost = actions[agent]*c
# Discharge Modelled as Holding Cost
discharge = max(0, battery - demand) * 0.1 * c
reward = supplied - (mismatch+cost+discharge)
if isinstance(reward, Iterable):
reward = sum(reward)
rewards[agent] = reward
return rewards
| true
|
8537c53e8ef82e7e5c80b4bfa1e27c7b0ff15e7b
|
Python
|
ckloppers/PythonTraining
|
/exercises/day3/dice.py
|
UTF-8
| 552
| 4.1875
| 4
|
[] |
no_license
|
import random
# stats data structure
stats = {1:0,
2:0,
3:0,
4:0,
5:0,
6:0}
# method to roll dice
def rollDice():
return random.randint(1, 6)
# rolling the dice
print 'Now rolling the die...'
for roll in range(1, 10):
currentRoll = rollDice()
currentRollValueInDict = stats[currentRoll]
stats[currentRoll] = currentRollValueInDict + 1
# Print out the stats
keys = stats.keys()
keys.sort()
print 'Tally is:'
for item in keys:
print 'Number of ' + str(item) +'\'s: ' + str(stats[item])
| true
|
e2479da6c35b2bf981144418f47b73126d946d7a
|
Python
|
diego2097/Retos_Programacion_3_CNYT
|
/src/main/BarPlot.py
|
UTF-8
| 349
| 3.1875
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
def graficarProba(vector):
print(vector)
x = []
y = []
for i in range(len(vector)):
x.append("Estado " + str(i+1))
for i in range(len(vector)):
y.append(vector[i])
xx = np.array(x)
yy = np.array(y)
plt.bar(xx,yy,align="center")
plt.show()
| true
|
88495c33f3649022f8f4c78f6cbe20b124c08c13
|
Python
|
ymahajan98/CS771-Project
|
/hf_opt.py
|
UTF-8
| 28,747
| 2.78125
| 3
|
[] |
no_license
|
"""
Hessian Free Optimizer.
Original Author: MoonLight, 2018
Modified by: rharish, 2018
"""
import tensorflow as tf
try:
import colored_traceback.auto
except ImportError:
pass
class clr:
"""Used for color debug output to console."""
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
class HFOptimizer(tf.train.Optimizer):
"""Tensorflow based Hessian-Free (Truncated Newton) optimizer.
More details: (Martens, ICML 2010) and (Martens & Sutskever, ICML 2011).
Methods to use:
__init__:
Creates Tensorflow graph and variables.
minimize:
Perfoms HF optimization.
"""
DUMPING_NUMERICAL_ERROR_STOP_FLOAT32 = 1e-4
CG_NUMERICAL_ERROR_STOP_FLOAT32 = 1e-20
DUMPING_NUMERICAL_ERROR_STOP_FLOAT64 = 1e-8
CG_NUMERICAL_ERROR_STOP_FLOAT64 = 1e-80
def __init__(
self,
learning_rate=1,
cg_decay=0.95,
damping=0.5,
adjust_damping=True,
batch_size=None,
use_gauss_newton_matrix=True,
preconditioner=False,
prec_loss=None,
gap=10,
cg_max_iters=50,
dtype=tf.float32,
):
"""Create Tensorflow graph and variables.
learning_rate: float number
Learning rate parameter for training neural network.
cg_decay: float number
Decay for previous result of computing delta with conjugate
gradient method for the initialization of next iteration
conjugate gradient.
damping: float number
Initial value of the Tikhonov damping coefficient.
adjust_damping: bool
Whether adjust damping parameter dynamically using
Levenberg-Marquardt heuristic or not.
batch_size: int number or None
Used for Jacobian vector product (Rop) computation, necessary if
used dynamic input batch size.
use_gauss_newton_matrix: bool
Whether use Gauss Newton Matrix (True) or Hessian matrix (False) in
conjugate gradient computation.
preconditioner: bool
Martens preconditioner uses The Empirical Fisher Diagonal for its
computation, don't use it with dynamically adjusting damping
parameter, because it can cause numerical errors.
Can be used only with Gauss Newton Matrix.
prec_loss: Tensorflow tensor object
Used for computing preconditioner; if using preconditioner it's
better to set it explicitly. For this parameter use loss before
reduce sum function over the batch inputs is applied.
gap: int
Size of window gap for which delta loss difference is computed,
used for early stoping in conjugate gradient computation.
cg_max_iters: int
Number of maximum iterations of conjugate gradient computations.
dtype: Tensorflow type
Type of Tensorflow variables.
"""
super().__init__(True, "HFOptimizer")
self.cg_decay = cg_decay
self.prec_loss = prec_loss
self.batch_size = batch_size
self.use_prec = preconditioner
self.learning_rate = learning_rate
self.use_gnm = use_gauss_newton_matrix
self.damping = tf.constant(damping)
self.gap = gap
self.cg_max_iters = cg_max_iters
self.adjust_damping = adjust_damping
self.damp_pl = tf.constant(0.0)
self.dtype = dtype
self.cg_num_err = HFOptimizer.CG_NUMERICAL_ERROR_STOP_FLOAT32
self.damp_num_err = HFOptimizer.DUMPING_NUMERICAL_ERROR_STOP_FLOAT32
if dtype == tf.float64:
self.cg_num_err = HFOptimizer.CG_NUMERICAL_ERROR_STOP_FLOAT64
self.damp_num_err = (
HFOptimizer.DUMPING_NUMERICAL_ERROR_STOP_FLOAT64
)
if not self.use_gnm:
self.damp_num_err = 1e-1
if not self.use_gnm and self.use_prec:
self.use_prec = False
print(
clr.WARNING
+ "WARNING: You set preconditioner to True but "
+ "use_gauss_newton_matrix to False, "
+ "and it's prohibited, so we set preconditioner back to "
+ "False, if you ask why see more information "
+ "on (Martens & Sutskever, ICML 2011)."
+ clr.ENDC
)
elif self.use_prec and self.use_gnm and self.prec_loss is None:
print(
clr.WARNING
+ "WARNING: If you use preconditioner it is "
+ "better to set prec_loss explicitly, because it can "
+ "cause graph making problem. (What's prec_loss see "
+ "in description)"
+ clr.ENDC
)
def info(self):
"""Print initial settings of HF optimizer."""
print(
clr.BOLD
+ clr.OKGREEN
+ "Hessian-Free Optimizer initial settings:"
+ clr.ENDC
)
print(" CG delta decay: {}".format(self.cg_decay))
print(" Learning Rate: {}".format(self.learning_rate))
print(" Initial Tikhonov damping: {}".format(self.damping))
if self.adjust_damping:
print(
" Optimizer adjusts damping dynamically using "
+ "Levenberg-Marquardt heuristic."
)
else:
print(" Tikhonov damping is static.")
if self.use_gnm:
print(" Optimizer uses Gauss-Newton matrix for cg computation.")
else:
print(" Optimizer uses Hessian matrix for cg computation.")
if self.use_prec:
print(" Optimizer uses preconditioner.")
print(" Gap of delta loss tracking: {}".format(self.gap))
print(" Max cg iterations: {}".format(self.cg_max_iters))
print(clr.OKGREEN + "Optimizer is ready for using." + clr.ENDC)
def compute_gradients(
self,
loss,
output,
var_list=None,
gate_gradients=tf.train.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None,
):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable
taking no arguments which returns the value to minimize.
output: Tensorflow tensor object
Variable with respect to which the Hessian of the objective is
positive-definite, implicitly defining the Gauss-Newton matrix.
Typically, it is the activation of the output layer.
var_list: Optional list or tuple of `tf.Variable` to update to
minimize `loss`. Defaults to the list of variables collected
in the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient
terms. Valid values are defined in the class
`AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for
`loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present,
but gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable`
objects.
ValueError: If some arguments are invalid.
@compatibility(eager)
Eager execution not supported.
@end_compatibility
"""
self.loss = loss
self.output = output
# Network weights
if var_list is None:
self.W = tf.trainable_variables()
else:
self.W = var_list
return super().compute_gradients(
loss=loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss,
)
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(
initial_value=0, name="cg_step", colocate_with=first_var
)
for w in var_list:
self._zeros_slot(w, "delta", self._name)
self._zeros_slot(w, "direction", self._name)
self._zeros_slot(w, "residual", self._name)
def minimize(
self,
loss,
output,
global_step=None,
var_list=None,
gate_gradients=tf.train.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None,
verbose=False,
):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before
applying them call `compute_gradients()` and `apply_gradients()`
explicitly instead of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
output: Tensorflow tensor object
Variable with respect to which the Hessian of the objective is
positive-definite, implicitly defining the Gauss-Newton matrix.
Typically, it is the activation of the output layer.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected
in the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient
terms. Valid values are defined in the class
`AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for
`loss`.
verbose: bool
If True prints CG iteration number.
Returns:
An Operation that updates the variables in `var_list`. If
`global_step` was not `None`, that operation also increments
`global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
Eager execution not supported.
@end_compatibility
"""
self.verbose = verbose
grads_and_vars = self.compute_gradients(
loss,
output,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss,
)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for "
"ops that do not support gradients, between variables %s and "
"loss %s." % ([str(v) for _, v in grads_and_vars], loss)
)
return self.apply_gradients(
grads_and_vars, global_step=global_step, name=name
)
def _apply_dense(self, grad, var):
"""Do nothing, as everything is done in __train."""
return tf.no_op()
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If
`global_step` was not None, that operation also increments
`global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
RuntimeError: If you should use `_distributed_apply()` instead.
"""
super().apply_gradients(grads_and_vars, global_step, name)
with tf.name_scope(name, self._name):
grads = [grad for grad, var in grads_and_vars]
cg_op, res_norm, dl = self.__conjugate_gradient(grads)
self.ops = {
"cg_update": cg_op,
"res_norm": res_norm,
"dl": dl,
"set_delta_0": self.__update_delta_0(),
"train": self.__train_op(),
}
return self.__train()
def __train(self):
"""Perform main training operations."""
self.damp_pl = self.damping
if self.adjust_damping:
loss_before_cg = tf.identity(self.loss)
else:
loss_before_cg = tf.no_op()
dl_track = tf.expand_dims(self.ops["dl"], axis=0)
combined_op_1 = tf.group(
loss_before_cg, dl_track[0], self.ops["set_delta_0"]
)
with tf.control_dependencies([combined_op_1]):
with tf.variable_scope("for_loop"):
i = tf.constant(0)
stop = tf.constant(False)
def loop(i, stop, dl_track):
if self.verbose:
printer = tf.print(
clr.OKGREEN + "\r[CG iteration: ", i, "]" + clr.ENDC
)
else:
printer = tf.no_op
k = tf.maximum(self.gap, i // self.gap)
rn = tf.identity(self.ops["res_norm"])
with tf.control_dependencies(
[printer, rn, self.ops["cg_update"]]
):
self.ops["cg_update"] = self.ops["cg_update"]
stop = tf.cond(
rn < self.cg_num_err, lambda: True, lambda: stop
)
dl_track = tf.concat(
[dl_track, tf.expand_dims(self.ops["dl"], axis=0)],
axis=0,
)
def early_stop():
margin = (
dl_track[i + 1] - dl_track[i + 1 - k]
) / dl_track[i + 1]
return tf.cond(
tf.logical_and(
tf.debugging.is_nan(margin), margin < 1e-4
),
lambda: True,
lambda: stop,
)
stop = tf.cond(i > k, early_stop, lambda: stop)
i += 1
return i, stop, dl_track
i, stop, dl_track = tf.while_loop(
lambda i, stop, dl_track: tf.logical_and(
i < self.cg_max_iters, tf.logical_not(stop)
),
loop,
(i, stop, dl_track),
shape_invariants=(
i.get_shape(),
stop.get_shape(),
tf.TensorShape([None]),
),
parallel_iterations=1,
maximum_iterations=self.cg_max_iters,
)
loop_vars = tf.group(i, stop, dl_track)
if self.adjust_damping:
self.damp_pl = tf.constant(0.0)
dl = tf.identity(self.ops["dl"])
self.damp_pl = self.damping
else:
dl = tf.no_op()
printer = tf.cond(
stop, lambda: tf.print("Stopped"), lambda: tf.no_op()
)
combined_op_2 = tf.group(printer, loop_vars, dl, self.ops["train"])
with tf.control_dependencies([combined_op_2]):
if self.adjust_damping:
loss_after_cg = tf.identity(self.loss)
reduction_ratio = (loss_after_cg - loss_before_cg) / dl
def elseif():
return tf.cond(
tf.logical_and(
reduction_ratio > 0.75,
self.damping > self.damp_num_err,
),
lambda: self.damping / 1.5,
lambda: self.damping,
)
self.damping = tf.cond(
tf.logical_and(
reduction_ratio < 0.25,
self.damping > self.damp_num_err,
),
lambda: self.damping * 1.5,
elseif,
)
return tf.group(combined_op_2, self.damping)
def __conjugate_gradient(self, gradients):
"""Perform conjugate gradient method.
It minimizes the quadratic equation and find best delta of
network parameters.
gradients: list of Tensorflow tensor objects
Network gradients.
return: Tensorflow tensor object
Update operation for delta.
return: Tensorflow tensor object
Residual norm, used to prevent numerical errors.
return: Tensorflow tensor object
Delta loss.
"""
with tf.name_scope("conjugate_gradient"):
cg_update_ops = []
prec = None
if self.use_prec:
if self.prec_loss is None:
graph = tf.get_default_graph()
lop = self.loss.op.node_def
self.prec_loss = graph.get_tensor_by_name(
lop.input[0] + ":0"
)
batch_size = None
if self.batch_size is None:
self.prec_loss = tf.unstack(self.prec_loss)
batch_size = self.prec_loss.get_shape()[0]
else:
self.prec_loss = [
tf.gather(self.prec_loss, i)
for i in range(self.batch_size)
]
batch_size = len(self.prec_loss)
prec = [
[
g ** 2
for g in tf.gradients(
tf.gather(self.prec_loss, i), self.W
)
]
for i in range(batch_size)
]
prec = [
(sum(tensor) + self.damping) ** (-0.75)
for tensor in tf.transpose(tf.constant(prec))
]
Ax = None
if self.use_gnm:
Ax = self.__Gv([self.get_slot(w, "delta") for w in self.W])
else:
Ax = self.__Hv(
gradients, [self.get_slot(w, "delta") for w in self.W]
)
b = [-grad for grad in gradients]
bAx = [b - Ax for b, Ax in zip(b, Ax)]
condition = tf.equal(
self._get_non_slot_variable("cg_step", self.W[0].graph), 0
)
r = [
tf.cond(condition, lambda: tf.assign(r, bax), lambda: r)
for r, bax in zip(
[self.get_slot(w, "residual") for w in self.W], bAx
)
]
d = None
if self.use_prec:
d = [
tf.cond(condition, lambda: tf.assign(d, p * r), lambda: d)
for p, d, r in zip(
prec,
[self.get_slot(w, "direction") for w in self.W],
r,
)
]
else:
d = [
tf.cond(condition, lambda: tf.assign(d, r), lambda: d)
for d, r in zip(
[self.get_slot(w, "direction") for w in self.W], r
)
]
Ad = None
if self.use_gnm:
Ad = self.__Gv(d)
else:
Ad = self.__Hv(gradients, d)
residual_norm = tf.reduce_sum([tf.reduce_sum(r ** 2) for r in r])
alpha = tf.reduce_sum(
[tf.reduce_sum(d * ad) for d, ad in zip(d, Ad)]
)
oalpha = alpha
alpha = residual_norm / alpha
if self.use_prec:
beta = tf.reduce_sum(
[
tf.reduce_sum(p * (r - alpha * ad) ** 2)
for r, ad, p in zip(r, Ad, prec)
]
)
else:
beta = tf.reduce_sum(
[
tf.reduce_sum((r - alpha * ad) ** 2)
for r, ad in zip(r, Ad)
]
)
self.beta = beta
beta = beta / residual_norm
for i, w in reversed(list(enumerate(self.W))):
delta = self.get_slot(w, "delta")
update_delta = tf.assign(
delta, delta + alpha * d[i], name="update_delta"
)
update_residual = tf.assign(
self.get_slot(w, "residual"),
r[i] - alpha * Ad[i],
name="update_residual",
)
p = 1.0
if self.use_prec:
p = prec[i]
update_direction = tf.assign(
self.get_slot(w, "direction"),
p * (r[i] - alpha * Ad[i]) + beta * d[i],
name="update_direction",
)
cg_update_ops.append(update_delta)
cg_update_ops.append(update_residual)
cg_update_ops.append(update_direction)
cg_update_ops.append(tf.print("Old Alpha = ", oalpha))
cg_update_ops.append(tf.print("Alpha = ", alpha))
cg_update_ops.append(tf.print("Beta = ", beta))
cg_update_ops.append(tf.print("Res norm = ", residual_norm))
with tf.control_dependencies(cg_update_ops):
cg_update_ops.append(
tf.assign_add(
self._get_non_slot_variable(
"cg_step", self.W[0].graph
),
1,
)
)
cg_op = tf.group(*cg_update_ops, name="cg_op")
dl = tf.reduce_sum(
[
tf.reduce_sum(
0.5 * (delta * ax) + grad * self.get_slot(w, "delta")
)
for w, grad, ax in zip(self.W, gradients, Ax)
],
name="dl",
)
return cg_op, residual_norm, dl
def __Hv(self, grads, vec):
"""Compute Hessian vector product.
grads: list of Tensorflow tensor objects
Network gradients.
vec: list of Tensorflow tensor objects
Vector that is multiplied by the Hessian.
return: list of Tensorflow tensor objects
Result of multiplying Hessian by vec.
"""
grad_v = [tf.reduce_sum(g * v) for g, v in zip(grads, vec)]
Hv = tf.gradients(grad_v, self.W, stop_gradients=vec)
Hv = [hv + self.damp_pl * v for hv, v in zip(Hv, vec)]
return Hv
def __Gv(self, vec):
"""Compute the product G by vec = JHJv (G is the Gauss-Newton matrix).
vec: list of Tensorflow tensor objects
Vector that is multiplied by the Gauss-Newton matrix.
return: list of Tensorflow tensor objects
Result of multiplying Gauss-Newton matrix by vec.
"""
Jv = self.__Rop(self.output, self.W, vec)
Jv = tf.reshape(tf.stack(Jv), [-1, 1])
H = tf.transpose(tf.gradients(self.loss, self.output)[0])
if len(H.get_shape().as_list()) < 2:
HJv = tf.gradients(H * Jv, self.output, stop_gradients=Jv)[0]
JHJv = tf.gradients(
tf.transpose(HJv) * self.output, self.W, stop_gradients=HJv
)
else:
HJv = tf.gradients(
tf.matmul(H, Jv), self.output, stop_gradients=Jv
)[0]
JHJv = tf.gradients(
tf.matmul(tf.transpose(HJv), self.output),
self.W,
stop_gradients=HJv,
)
JHJv = [gv + self.damp_pl * v for gv, v in zip(JHJv, vec)]
return JHJv
def __Rop(self, f, x, vec):
"""Compute Jacobian vector product.
f: Tensorflow tensor object
Objective function.
x: list of Tensorflow tensor objects
Parameters with respect to which computes Jacobian matrix.
vec: list of Tensorflow tensor objects
Vector that is multiplied by the Jacobian.
return: list of Tensorflow tensor objects
Result of multiplying Jacobian (df/dx) by vec.
"""
r = None
if self.batch_size is None:
try:
r = [
tf.reduce_sum(
[
tf.reduce_sum(v * tf.gradients(f, x)[i])
for i, v in enumerate(vec)
]
)
for f in tf.unstack(f)
]
except ValueError:
assert False, (
clr.FAIL + clr.BOLD + "Batch size is None, but used "
"dynamic shape for network input, set proper "
"batch_size in HFOptimizer initialization" + clr.ENDC
)
else:
if len(f.get_shape().as_list()) == 0:
fn = tf.reshape(f, [1])
else:
fn = f
r = [
tf.reduce_sum(
[
tf.reduce_sum(v * tf.gradients(tf.gather(fn, i), x)[j])
for j, v in enumerate(vec)
]
)
for i in range(self.batch_size)
]
assert r is not None, (
clr.FAIL
+ clr.BOLD
+ "Something went wrong in Rop computation"
+ clr.ENDC
)
return r
def __update_delta_0(self):
"""Update initial delta for conjugate gradient method.
The old delta is multiplied by cg_decay.
return: list of Tensorflow tensor objects
Update initial delta operation.
"""
update_delta_0_ops = []
for w in self.W:
delta = self.get_slot(w, "delta")
update_delta = tf.assign(delta, self.cg_decay * delta)
update_delta_0_ops.append(update_delta)
update_delta_0_op = tf.group(*update_delta_0_ops)
return update_delta_0_op
def __train_op(self):
"""Perform main training operation, i.e. updates weights.
return: list of Tensorflow tensor objects
Main training operations
"""
update_ops = []
for w in reversed(self.W):
with tf.control_dependencies(update_ops):
update_ops.append(
tf.assign(
w, w + self.learning_rate * self.get_slot(w, "delta")
)
)
training_op = tf.group(*update_ops)
return training_op
| true
|
c8286647c0781a4c8f8a8b3498f964443db5bb95
|
Python
|
lonesloane/Python-Snippets
|
/scripts_and_streams/commandline.py
|
UTF-8
| 472
| 3.03125
| 3
|
[] |
no_license
|
__author__ = 'stephane'
import sys
class CommandLine(object):
def __init__(self):
pass
@staticmethod
def test_argv():
print('\n**********************')
print( "sys.argv :")
print('**********************')
for argv in sys.argv:
print(argv)
def main():
commandline = CommandLine()
# in the console, run commandline.py arg1 arg2
commandline.test_argv()
if __name__ == "__main__":
main()
| true
|
1155c29909a8b37ddc4e9cf6ea7848b0e319fca5
|
Python
|
fonsecguilherme/Exercicios
|
/Lista_2/10.nPerfeitos.py
|
UTF-8
| 324
| 4.0625
| 4
|
[] |
no_license
|
num = int(input("Digite o número a ser verificado: "))
divisor = 1
lista = []
while num > divisor:
if num % divisor == 0:
lista.append(divisor)
divisor += 1
print("Divisores: " +str(lista))
resultado = sum(lista)
if resultado == num:
print("É n perfeito.")
else:
print("Não é n perfeito. ")
| true
|
989f867d88442e2bd2018921a63d077e0f090ee3
|
Python
|
amolsawant844/SEM-4
|
/Python-basic-programs/Inheritance and polymorphism/use_of_super.py
|
UTF-8
| 410
| 3.953125
| 4
|
[] |
no_license
|
class square:
def __init__(self,x):
self.x=x
def area(self):
print("area of square=",self.x*self.x)
class rectangle(square):
def __init__(self,x,y):
super().__init__(x)
self.y=y
def area(self):
super().area()
print("area of rectangle",self.x*self.y)
a,b=[float(x) for x in input("enter two values:").split()]
r=rectangle(a,b)
r.area()
| true
|
529036edfadc357e3844ad532b7a172c24c815a0
|
Python
|
mjgpy3/CoopDataManager
|
/Source/Model/schema.py
|
UTF-8
| 4,133
| 2.828125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/usr/bin/env python
# Created by Michael Gilliland
# Date: Wed Aug 22 10:32:15 EDT 2012
#
#
"""
Represents the database's structure
"""
import model_abstraction as m
def build_model_structure(model_structure):
"""
This is where the model's structure is coded using abstractions from the model_abstraction.py file
"""
tables = []
transaction_tables = []
name = 'test2.db'
# Semester Attributes
fall_or_spring_FOS = m.Attribute('FallOrSpring', 'TEXT', ['Fall', 'Spring'])
year_FOS = m.Attribute('Year', 'INTEGER')
# Semester Table
semester_table = m.Table('Semester', [fall_or_spring_FOS, year_FOS])
semester_table.primary_key = [fall_or_spring_FOS, year_FOS]
tables.append(semester_table)
# Parent Attributes
last_name_P = m.Attribute('LastName', 'TEXT')
first_name_P = m.Attribute('FirstName', 'TEXT')
# Parent Table
parent_table = m.Table('Parent', [last_name_P, first_name_P])
parent_table.primary_key = [last_name_P, first_name_P]
tables.append(parent_table)
# Student Attributes
last_name_S = m.Attribute('LastName', 'TEXT')
first_name_S = m.Attribute('FirstName', 'TEXT')
parent_id_1_S = m.Attribute('Parent1Id', 'INTEGER')
parent_id_2_S = m.Attribute('Parent2Id', 'INTEGER')
grade_S = m.Attribute('Grade', 'TEXT')
# Student Table
student_table = m.Table('Student', [last_name_S, first_name_S, parent_id_1_S, parent_id_2_S, grade_S])
student_table.primary_key = [last_name_S, first_name_S]
student_table.set_reference(parent_id_1_S, parent_table)
student_table.set_reference(parent_id_2_S, parent_table)
tables.append(student_table)
# Class Attributes
name_C = m.Attribute('Name', 'TEXT')
hour_C = m.Attribute('Hour', 'TEXT')
cost_C = m.Attribute('Cost', 'INTEGER')
grade_min_C = m.Attribute('GradeMin', 'TEXT', ['Nurs', 'Pre-K', 'K', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'])
grade_max_C = m.Attribute('GradeMax', 'TEXT', ['Nurs', 'Pre-K', 'K', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'])
max_number_of_students_C = m.Attribute('MaxNumberOfStudents', 'INTEGER')
semester_id_C = m.Attribute('SemesterId', 'INTEGER')
# Class Table
class_table = m.Table('Class', [name_C, hour_C, cost_C, grade_min_C, grade_max_C, max_number_of_students_C, semester_id_C])
class_table.primary_key = [name_C, semester_id_C]
class_table.set_reference(semester_id_C, semester_table)
tables.append(class_table)
# IsEnrolledIn Attributes
student_id_I = m.Attribute('StudentId', 'INTEGER')
class_id_I = m.Attribute('ClassId', 'INTEGER')
# IsEnrolledIn Table
is_enrolled_in_table = m.Table('Enrollment', [student_id_I, class_id_I])
is_enrolled_in_table.primary_key = [student_id_I, class_id_I]
is_enrolled_in_table.set_reference(student_id_I, student_table)
is_enrolled_in_table.set_reference(class_id_I, class_table)
transaction_tables.append(is_enrolled_in_table)
# Teaches Attributes
parent_id_T = m.Attribute('ParentId', 'INTEGER')
class_id_T = m.Attribute('ClassId', 'INTEGER')
# Teaches Table
teaches_table = m.Table('Teacher', [parent_id_T, class_id_T])
teaches_table.primary_key = [parent_id_T, class_id_T]
teaches_table.set_reference(parent_id_T, parent_table)
teaches_table.set_reference(class_id_T, class_table)
transaction_tables.append(teaches_table)
# IsHelperFor Attributes
parent_id_IHF = m.Attribute('ParentId', 'INTEGER')
class_id_IHF = m.Attribute('ClassId', 'INTEGER')
# Teaches Table
is_helper_for_table = m.Table('Helper', [parent_id_IHF, class_id_IHF])
is_helper_for_table.primary_key = [parent_id_IHF, class_id_IHF]
is_helper_for_table.set_reference(parent_id_IHF, parent_table)
is_helper_for_table.set_reference(class_id_IHF, class_table)
transaction_tables.append(is_helper_for_table)
# Generate Structure
model_structure.name = name
model_structure.tables = tables
model_structure.transaction_tables = transaction_tables
return model_structure
| true
|
5122d8ee3230c2b065eb22731d55b0e0616df91e
|
Python
|
kmsmith137/ch_frb_l1
|
/rpc_server_async.py
|
UTF-8
| 5,354
| 2.671875
| 3
|
[] |
no_license
|
from __future__ import print_function
import sys
import threading
import time
import random
import zmq
import msgpack
'''
A python prototype of how an async version of the RPC server might
work.
One desired property is that the server respond to client requests
quickly, because we don't want requested data to drop out of the ring
buffer while we're servicing some other client's request. Therefore,
we want the RPC server to have one or more workers that are actually
writing files to disk, and then the RPC server itself just has to poll
on the client socket waiting for requests, and the worker socket
waiting for replies; when it gets one it forwards it to the client.
In C++, if we use shared_ptrs to keep the assembled_chunks alive, then
we need to work a little harder. The RPC server will receive client
requests and retrieve shared_ptrs for the assembled_chunks to be
written out. It needs to communicate those shared_ptrs to the worker
threads, in such a way that the shared_ptr stays alive. Perhaps we
should use a (mutex-protected) std::queue of shared_ptrs (or a struct
including the shared_ptr and other function args) to pass them between
the RPC server and worker threads. We could still use ZMQ for the
messaging, but the server to worker request would just be an empty
message saying "grab work from the queue"; the reply could stay the
same and go via ZMQ.
Otherwise, the RPC server would have to keep like a map of the
requests to shared_ptrs, removing them when the request completed.
'''
class Worker(threading.Thread):
def __init__(self, context):
threading.Thread.__init__(self)
self.context = context
def run(self):
socket = self.context.socket(zmq.DEALER)
socket.connect('inproc://backend')
print('Worker started')
try:
while True:
msgs = socket.recv_multipart()
#print('Worker received request:', msgs)
client = msgs[0]
msg = msgs[1]
req = msgpack.unpackb(msg)
beam, chunk, filename = req
# Do work
print('Worker writing beam', beam, 'chunk', chunk, '...')
time.sleep(random.random() * 3)
success,error_message = True, 'Success'
reply = [beam, chunk, filename, success, error_message]
print('Worker sending reply...')
msg = msgpack.packb(reply)
socket.send_multipart((client, msg))
except:
import traceback
print('Exception in worker:')
traceback.print_exc()
socket.close()
if __name__ == '__main__':
context = zmq.Context()
socket = context.socket(zmq.ROUTER)
socket.setsockopt(zmq.ROUTER_MANDATORY, 1)
socket.bind('tcp://127.0.0.1:5555')
# For a server with a (local) pool of workers, see
# zmq-guide.html#The-Asynchronous-Client-Server-Pattern
# ie, http://zguide.zeromq.org/py:asyncsrv
# Backend socket, for talking to workers. Note 'inproc' transport.
backend = context.socket(zmq.DEALER)
backend.bind('inproc://backend')
workers = []
for i in range(2):
worker = Worker(context)
worker.start()
workers.append(worker)
poll = zmq.Poller()
poll.register(socket, zmq.POLLIN)
poll.register(backend, zmq.POLLIN)
while True:
# Wait for next request from client, or reply from Worker
print('Polling...')
timeout = None
events = poll.poll(timeout)
for s,flag in events:
if s == backend:
print('Received result from worker')
msgs = backend.recv_multipart()
client = msgs[0]
msg = msgs[1]
# Proxy
socket.send_multipart((client, msg))
continue
if s != socket:
print('Polled socket is neither backend nor frontend:', s)
continue
# Client message
print('Received request from client')
msgs = socket.recv_multipart()
client = msgs[0]
msg = msgs[1]
up = msgpack.Unpacker()
up.feed(msg)
funcname = up.next()
print('Function name:', funcname)
if funcname == 'write_chunks':
args = up.next()
print('Args:', args)
beams, min_chunk, max_chunk, fn_pat = args
# For each beam x chunk, send a request to the worker pool.
for chunk in range(min_chunk, max_chunk):
print('Sending request to worker...')
beam, filename = 1, 'filename.msgpack'
req = msgpack.packb((beam, chunk, filename))
backend.send_multipart((client, req))
elif funcname == 'get_statistics':
reply = msgpack.packb([{'hello':42, 'world':43},{'yo':100},])
print('Client:', client)
#socket.send_multipart([client, reply])
socket.send(client, zmq.SNDMORE);
socket.send(reply);
else:
print('Unknown funcname', funcname)
continue
| true
|
de87c0ad3d901c823b69a2734c3940b5c18d25d0
|
Python
|
ScottLiao920/SAUVC2019_MECATRON
|
/pass_gate.py
|
UTF-8
| 1,861
| 2.734375
| 3
|
[] |
no_license
|
import time
import cv2
import movement
import localizer
from camera_module import camera_thread
import gesture_detection
camera_front = camera_thread(0)
camera_front.start()
camera_down = camera_thread(1)
camera_down.start()
def pass_gate(pos):
fwd_count = 25
find_count = 25
while True:
t1 = time.time()
img = camera_front.read()
img_down = camera_down.read()
pos[0], pos[1], pos[2] = localizer.get_pos(img_down, pos[0], pos[1], pos[2])
coords = gesture_detection.get_coord_from_detection(img)
# x,y,width,height,category
cv2.imshow('pass_gate_img', img)
print(coords)
if find_count and len(coords) is 0:
movement.turn_left()
find_count -= 1
elif len(coords) is 0:
movement.turn_right()
if len(coords) is 2 and coords[0][4] is 0:
x1, x2, y1, y2 = coords[0][0], coords[1][0], coords[0][1], coords[1][1]
if ((x1 + x2) // 2) < 800:
movement.turn_left()
continue
elif ((x1 + x2) // 2) > 720:
movement.turn_right()
continue
if not 100 < ((y1 + y2) // 2) < 200:
if (y1 + y2) // 2 < 100:
movement.move_up()
continue
else:
movement.move_down()
continue
movement.move_fwd()
if (coords[0][3] + coords[1][3]) // 2 > 180:
if fwd_count:
fwd_count -= 1
movement.move_fwd()
else:
movement.stop()
break
t2 = time.time()
print("fps:", 1 / (t2 - t1))
print("Gate passed!")
camera_front.release()
return localizer.get_pos(img_down, pos[0], pos[1], pos[2])
| true
|
0b7e60cf314c6285990cc57201e058920f096b5f
|
Python
|
qicst23/Daily
|
/Others/brute_force_1.py
|
UTF-8
| 718
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
"""
输入正整数n,按照从小到大顺序输出所有形如abcde/fghij=n的表达式,其中a~j恰好是数字0~9的一个排列,2<=n<=79
样例输入:62
样例输出:
79546/01283=62
94736/01528=62
"""
def brute_force_1(n):
a = 01234
while a * n <= 98765:
if validate(a, a * n):
# print result as required
print '%05d' % (a*n) + '/' + '%05d' % a + '=' + str(n)
a += 1
def validate(a, b):
'''validate whether a and b has 10 different digits'''
digit_set = set()
for i in range(5):
digit_set.add(a % 10)
a = a / 10
digit_set.add(b % 10)
b = b / 10
return digit_set == set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
| true
|
a2fffb4e728c3441d3293c1153c1fd7905b3698f
|
Python
|
amangour30/BINC
|
/motiondetect.py
|
UTF-8
| 5,337
| 2.96875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
import RPi.GPIO as GPIO
import pigpio
from PIL import Image
import numpy as np
from activation_functions import sigmoid_function, tanh_function
from cost_functions import sum_squared_error
from neuralnet import NeuralNet
from tools import Instance
servos = [24,17]
pi=pigpio.pi()
basewidth = 256
height = 192
#~
#~ def updateTop(angle):
#~ dutyTop = float(angle) / 10.0 + 2.5
#~ pwmTop.ChangeDutyCycle(dutyTop)
#~
#~ def updateBottom(angle):
#~ dutyBottom = float(angle) / 10.0 + 2.5
#~ pwmBottom.ChangeDutyCycle(dutyBottom)
def goToBin(bin):
if bin is 1:
pi.set_servo_pulsewidth(servos[1], 500)
elif bin is 2:
pi.set_servo_pulsewidth(servos[1], 1500)
elif bin is 3:
pi.set_servo_pulsewidth(servos[1], 2500)
time.sleep(2)
def drop():
pi.set_servo_pulsewidth(servos[0], 1500)
time.sleep(2)
pi.set_servo_pulsewidth(servos[0], 500)
time.sleep(2)
def dropIn(bin):
goToBin(bin)
drop()
def findBin(frame):
image=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(image)
#~ img1 = pil_im.resize((basewidth, height), Image.ANTIALIAS)
pil_im.thumbnail((256, 256), Image.ANTIALIAS)
img2 = pil_im.convert('1')
#~ pixels = img2.load()
pixels1 = np.asarray(img2.getdata(),dtype=np.bool)
outstr = "outimg" +".bmp"
img2.save(outstr)
array01 = []
count = 0
Tot = 0
for item in pixels1:
Tot += 1
if not item:
array01.append(1)
count += 1
else:
array01.append(0)
testitem = []
testitem.append(Instance(array01, [0]))
# load a stored network configuration
network = NeuralNet.load_from_file( "plastic122.pkl" )
arr = network.print_test(testitem)
print('Value returned by neural network plastic: ' + str(arr[0]))
network2 = NeuralNet.load_from_file( "metal122.pkl" )
arr2 = network2.print_test(testitem)
print('Value returned by neural network metal: ' + str(arr2[0]))
network3 = NeuralNet.load_from_file( "paper122.pkl" )
arr3 = network3.print_test(testitem)
print('Value returned by neural network paper: ' + str(arr3[0]))
pl = arr[0]
me = arr2[0]
pa = arr3[0]
if((pl > pa and pl > me) or pl > 0.5 or (pa < 0.42 and me < 0.09) ):
return 1 #plastic
elif((me > pa and me > pl) or me > 0.13):
return 3 #metal
else:
return 2 #paper
#~ else:
#~ return 3 #metal
def process(frame):
bin=findBin(frame)
print "found bin:"
print bin
dropIn(bin)
while True:
print "starting motion detection"
# construct the argument parser and parse the arguments
#~ ap = argparse.ArgumentParser()
#ap.add_argument("-v", "--video", help="path to the video file")
#~ ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
#~ args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
#~ if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
#~ else:
#~ camera = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
status="Unoccupied"
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
origFrame=frame
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 100, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
#~ if cv2.contourArea(c) < args["min_area"]:
#~ continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
#cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
#cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
# (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
#cv2.imshow("Security Feed", frame)
#cv2.imshow("Thresh", thresh)
#cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
if text is not status:
print text
status=text
if status is "Occupied":
process(origFrame)
camera.release()
break
else:
print 0
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
| true
|
2ff8713bfa650b1ca0c0ee6f6bd67b0bd4b03e03
|
Python
|
SoapClancy/Python_Project_common_package
|
/Ploting/adjust_Func.py
|
UTF-8
| 3,107
| 2.65625
| 3
|
[] |
no_license
|
from matplotlib import pyplot as plt
from typing import Sequence, Iterable
LINESTYLE_STR = [
('solid', 'solid'), # Same as (0, ()) or '-'
('dotted', 'dotted'), # Same as (0, (1, 1)) or '.'
('dashed', 'dashed'), # Same as '--'
('dashdot', 'dashdot')] # Same as '-.'
LINESTYLE_TUPLE = [
('solid', 'solid'), # Same as (0, ()) or '-'
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
def reassign_linestyles_recursively_in_ax(ax, simple_linestyles: bool = True):
if simple_linestyles:
linestyle_str_len = LINESTYLE_STR.__len__()
for i in range(ax.lines.__len__()):
ax.lines[i].set_linestyle(LINESTYLE_STR[i % linestyle_str_len][1])
else:
linestyle_tuple_len = LINESTYLE_TUPLE.__len__()
for i in range(ax.lines.__len__()):
ax.lines[i].set_linestyle(LINESTYLE_TUPLE[i % linestyle_tuple_len][1])
ax.legend(prop={'size': 10})
return ax
def adjust_legend_in_ax(ax, *, protocol=None, **kwargs):
assert (protocol in (None, 'Outside center right'))
kwargs.setdefault('ncol', 1)
kwargs.setdefault('prop', {'size': 10})
if protocol == 'Outside center right':
ax.legend(bbox_to_anchor=(1, 0.5), loc="center left", **kwargs)
else:
kwargs.setdefault('loc', 'upper center')
ax.legend(**kwargs)
return ax
def adjust_legend_order_in_ax(ax, *, new_order_of_labels: Sequence[str]):
handles, labels = ax.get_legend_handles_labels()
assert (handles.__len__() == new_order_of_labels.__len__()), "Check 'new_order_of_labels' length"
hl = sorted(zip(handles, labels),
key=lambda x: [x[1] == y for y in new_order_of_labels],
reverse=True)
handles, labels = zip(*hl)
ax.legend(handles, labels,
ncol=ax.get_legend().__getattribute__('_ncol'),
loc=ax.get_legend().__getattribute__('_loc'),
prop={'size': 10})
return ax
def adjust_lim_label_ticks(ax, **kwargs):
for key, item in kwargs.items():
if key == 'x_lim':
func = ax.set_xlim
elif key == 'y_lim':
func = ax.set_ylim
elif key == 'x_ticks':
func = ax.set_xticks
elif key == 'y_ticks':
func = ax.set_yticks
elif key == 'x_label':
func = ax.set_xlabel
elif key == 'y_label':
func = ax.set_ylabel
elif key == 'x_tick_labels':
func = ax.set_xticklabels
elif key == 'y_tick_labels':
func = ax.set_yticklabels
else:
raise Exception("Unsupported keyword(s)")
func(item)
return ax
| true
|
204ec6aed1f3198f7b4cb5fcb10b1f64b958f6d7
|
Python
|
youki-cao/Gomoku-AI
|
/Code_AI_Gomoku/gomoku.py
|
UTF-8
| 2,491
| 3.046875
| 3
|
[] |
no_license
|
# encoding: utf-8
import os,sys
curPath = os.path.abspath(os.path.dirname(__file__))
sys.path.append(curPath)
import numpy, pygame
import Chessboard
class Gomoku():
def __init__(self):
self.screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption("五子棋")
self.clock = pygame.time.Clock()
self.font = pygame.font.Font(r"ncsj.ttf", 24)
self.going = True
self.chessboard = Chessboard.Chessboard()
def getStyle(self):
background = pygame.image.load('bg.png').convert()
mouse_cursor = pygame.image.load('white.png').convert_alpha()
button = pygame.image.load('button.png').convert()
pic1 = pygame.image.load('pic1.png').convert()
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
x+= mouse_cursor.get_width() / 2
y-= mouse_cursor.get_height() / 2
self.screen.blit(pic1, (0, 0))
if x>400 and x<520 and y>300 and y<360:
return '3'
self.screen.blit(pic1, (0, 0))
pygame.display.update()
return '4'
self.screen.blit(background, (0, 0))
self.screen.blit(button, (400, 300))
x, y = pygame.mouse.get_pos()
x-= mouse_cursor.get_width() / 2
y-= mouse_cursor.get_height() / 2
self.screen.blit(mouse_cursor, (x, y))
pygame.display.update()
def loop(self):
while self.going:
self.update()
self.draw()
self.clock.tick(60)
pygame.quit()
def update(self, table):
for e in pygame.event.get():
if e.type == pygame.QUIT:
self.going = False
elif e.type == pygame.MOUSEBUTTONDOWN:
self.chessboard.handle_key_event(e, table)
# return (x, y)
def draw(self):
self.screen.fill((255, 255, 255))
self.chessboard.draw(self.screen)
if self.chessboard.game_over:
self.screen.blit(self.font.render("{0} Win".format("Black" if self.chessboard.winner == 'b' else "White"), True, (0, 0, 0)), (500, 10))
pygame.display.update()
if __name__ == '__main__':
game = Gomoku()
game.loop()
| true
|
657983693374ed1338cab92efc041e937a8a2e32
|
Python
|
Nedgang/adt_project
|
/analysis.py
|
UTF-8
| 5,853
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage:
Options:
Authors:
MARIJON Pierre, PICARD DRUET David, PIVERT Jérome.
"""
##########
# IMPORT #
##########
# EXISTANT LIBRARY
import glob
import sys
# SPECIFIC LIBRARY
# Our command line analyser/checker
import cli_parser
# Our tool to split text
import tokenization
# Used to remove words which are not terms
import filtration
# Extract mails differents parts
import mail_parser
# Terms stemming
import stemming
# Detect which language is used in the email
import language_detection
# Manage stop words
import stop_word
# Smart terms counter (both simple and complex terms)
import terms_counter
# Create stockage structure
import tag2terms
from collections import defaultdict
import frequency_term_validator
import math
########
# MAIN #
########
def main(arg):
""" Main function of analyse """
# Read database
tagterms = tag2terms.Tag2Terms()
tagterms.read_file(arg["input"])
# Just print tag
if arg["print_tag"]:
print_tag(tagterms)
return 0
# Get all terms is in tag
if arg["all_terms"]:
print_all_terms(tagterms, arg["query"], arg["threshold"])
return 0
# Terms is in some tag have bonus
if arg["best_terms"]:
print_best_terms(tagterms, arg["query"], arg["threshold"])
return 0
# Print just terms is in all tag
if arg["strict_terms"]:
print_strict_terms(tagterms, arg["query"], arg["threshold"])
return 0
# Print the keyword of database
if arg["keywords"]:
print_keywords(tagterms, arg["keywords"])
return 0
if arg["comparative_frequency"]:
print_keywords_upper_freq(tagterms, arg["comparative_frequency"], arg["threshold"])
def print_tag(tagterms):
""" Print all tag in data base """
for tag in sorted(tagterms.get_tag()):
print(tag)
def print_all_terms(tagterms, query, threshold):
""" Print all terms is in query tag """
# Take all terms
all_terms = dict()
for tag in query:
all_terms.update(tagterms.get_terms_score(tag))
# Remove terms with score lower threshold
selected_terms = {k: v for (k,v) in all_terms.items() if v > threshold}
# Sorted terms by score
sorted_terms = list()
[sorted_terms.append((k,v)) for v,k in sorted([(v,k) for k,v in selected_terms.items()], reverse=True)]
print(sorted_terms)
def print_best_terms(tagterms, query, threshold, bonus=1.5):
""" If terms is presente in one another tag increasse score """
# If found terms in another tag we have bonus in enrich terms
enrich_terms = defaultdict(int)
for tag in query:
# build list of tag without actual tag
tag_list_without_current = list(query)
tag_list_without_current.remove(tag)
# read terms is in tag
for terms in tagterms.get_terms_score(tag).keys():
if __terms_is_in_tag(tagterms, terms, tag_list_without_current) :
enrich_terms[terms] += tagterms.get_terms_score(tag)[terms] * bonus
else:
enrich_terms[terms] += tagterms.get_terms_score(tag)[terms] * bonus
# Remove terms with score lower threshold
selected_terms = {k: v for (k,v) in enrich_terms.items() if v > threshold}
# Sorted terms by score
sorted_terms = list()
[sorted_terms.append((k,v)) for v,k in sorted([(v,k) for k,v in selected_terms.items()], reverse=True)]
print(sorted_terms)
def __terms_is_in_tag(tagterms, terms, tags):
""" Read a list of tag if terms is in terms associated to this tag
return True, if not return False """
for tag in tags:
if terms in tagterms.get_terms(tag):
return True
return False
def print_strict_terms(tagterms, query, threshold):
""" Print just terms is in all tag query """
# Find selected terms
list_of_set = list()
for tag in query:
list_of_set.append(set(tagterms.get_terms(tag).keys()))
keep_terms = set.intersection(*list_of_set)
all_terms = dict()
for tag in query:
all_terms.update(tagterms.get_terms_score(tag))
# Select terms if is in keep_terms and thresohld is upper threshold
selected_terms = {k: v for (k,v) in all_terms.items() if k in keep_terms and v > threshold}
# Sorte terms by score
sorted_terms = list()
[sorted_terms.append((k,v)) for v,k in sorted([(v,k) for k,v in selected_terms.items()], reverse=True)]
print(sorted_terms)
def print_keywords(tagterms, n):
"""
Print the n first keywords reprensenting the corpus
"""
sorted_terms = list()
# Get global score terms
dict_global_terms = tagterms.get_global_score()
# Sort terms by score
[sorted_terms.append((k,v)) for v,k in sorted([(v,k) for k,v in dict_global_terms.items()], reverse=True)]
# print just first terms of sorted list
print(sorted_terms[0:n])
def print_keywords_upper_freq(tagterms, ref_freq_file, threshold):
ref_freq_usage = frequency_term_validator.load_reference(ref_freq_file)
dict_global_terms = tagterms.get_global_score()
number_of_word = 0
for v in dict_global_terms.values():
number_of_word += v
local_freq_usage = {k: v/number_of_word for (k,v) in dict_global_terms.items() if not (' ' in k)}
all_terms = frequency_term_validator.filter_by_frequency(local_freq_usage, ref_freq_usage)
selected_terms = {k: v for (k,v) in all_terms.items() if k in all_terms and v > threshold}
sorted_terms = list()
[sorted_terms.append((k,v)) for v,k in sorted([(v,k) for k,v in selected_terms.items()], reverse=True)]
print(sorted_terms)
##########
# LAUNCH #
##########
if __name__ == "__main__":
arg = cli_parser.analysis_read_args(sys.argv[1:])
if(arg is None):
sys.exit(1)
main(arg)
| true
|
15e26548f03dae2020a0ec1fdd1174d17ee66eba
|
Python
|
deekew33/bogged-again
|
/polls/boggedagain/tensorclassifier.py
|
UTF-8
| 3,368
| 2.734375
| 3
|
[] |
no_license
|
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import tensorflow as tf
import os, pickle, random, nltk, math, sqlite3, time
import matplotlib.pyplot as plt
def get_num_words_per_sample(sample_texts):
"""Returns the median number of words per sample given corpus.
# Arguments
sample_texts: list, sample texts.
# Returns
int, median number of words per sample.
"""
num_words = [len(s.split()) for s in sample_texts]
return np.median(num_words)
def plot_sample_length_distribution(sample_texts):
"""Plots the sample length distribution.
# Arguments
samples_texts: list, sample texts.
"""
plt.hist([len(s) for s in sample_texts], 50)
plt.xlabel('Length of a sample')
plt.ylabel('Number of samples')
plt.title('Sample length distribution')
plt.show()
def loaddata(seed=123):
category_list = ['AD','B','CS','LC','LG','LL','LS','M','R','RL','RM','SD','SS','T','TW']
news= []
labels = []
newstext = []
for category in category_list:
for entry in os.scandir(f'../../trainer/TheStreet/{category}/'):
if not entry.name.startswith('.') and entry.is_file() and entry.name.find('labeled') != -1:
with open(f'../../trainer/{category}/{entry.name}') as f:
data = f.read()
news.append([data, category, entry.name])
print(entry.name)
random.seed(seed)
random.shuffle(news)
for entry in news:
newstext.append(entry[0])
labels.append(entry[1])
train_index = math.ceil(len(newstext) / 2)
train_texts=newstext[0:train_index]
test_texts=newstext[train_index:]
train_labels=labels[0:train_index]
test_labels=labels[train_index:]
return ((train_texts, np.array(train_labels)),
(test_texts, np.array(test_labels)))
def _get_last_layer_units_and_activation(num_classes):
"""Gets the # units and activation function for the last network layer.
# Arguments
num_classes: int, number of classes.
# Returns
units, activation values.
"""
if num_classes == 2:
activation = 'sigmoid'
units = 1
else:
activation = 'softmax'
units = num_classes
return units, activation
from tensorflow.python.keras import models
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
def mlp_model(layers, units, dropout_rate, input_shape, num_classes):
"""Creates an instance of a multi-layer perceptron model.
# Arguments
layers: int, number of `Dense` layers in the model.
units: int, output dimension of the layers.
dropout_rate: float, percentage of input to drop at Dropout layers.
input_shape: tuple, shape of input to the model.
num_classes: int, number of output classes.
# Returns
An MLP model instance.
"""
op_units, op_activation = _get_last_layer_units_and_activation(num_classes)
model = models.Sequential()
model.add(Dropout(rate=dropout_rate, input_shape=input_shape))
for _ in range(layers-1):
model.add(Dense(units=units, activation='relu'))
model.add(Dropout(rate=dropout_rate))
model.add(Dense(units=op_units, activation=op_activation))
return model
| true
|
faea34a9bed62284ec2c8c946b6d34effece9935
|
Python
|
clouds56/binary_reader
|
/app/sqlite3_file.py
|
UTF-8
| 5,900
| 2.625
| 3
|
[] |
no_license
|
import binary_reader.sqlite3_schema as sqlite3
class SQLiteFile:
def __init__(self, file):
self.file = file
self.pages = {}
self.load()
def load(self):
self.file.seek(0)
self.config = self.readbin("header_schema", self.file.read(100))
self.tables = self.load_btree(0)
def load_btree(self, index):
if isinstance(index, str):
for _, v in self.tables:
if v[1] == index:
print("found", v)
return self.load_btree(v[3]-1)
return []
page = self.load_btree_page(index)
rows = []
page_type = page.header['page_type']
if page_type == 13:
for c, p in zip(page.cells, page.payloads):
rows.append((c['rowid'], p['column_contents']))
elif page_type == 5:
for c in page.cells:
print("load btree page %s -> %s, %s" % (index+1, c['left_child_page'], c['rowid']))
rows += self.load_btree(c['left_child_page']-1)
print("load btree page %s -> %s, -1" % (index+1, page.header['right_most_page']))
rows += self.load_btree(page.header['right_most_page']-1)
else:
print("unknown page type %s" % page_type)
return rows
def load_btree_page(self, index):
if index in self.pages:
return self.pages[index]
self.file.seek(index * self.config['page_size'])
self.pages[index] = Page(self.config, self.file.read(self.config['page_size']), index, file=self.file)
return self.pages[index]
@staticmethod
def readbin(schema, bin_, *, cache=None):
if cache is None:
cache = {}
if isinstance(schema, str):
schema = sqlite3.schemas[schema].format
return dict(sqlite3.read_schema_list(schema, bin_, table=sqlite3.format_table, cache=cache))
class Page:
def __init__(self, config, page_bin, index, *, file=None):
self.config = config
self.page_bin = page_bin
self.index = index
self.load(index == 0)
self.load_cells()
self.load_cells_payload(file)
def load(self, first=False):
self.header = SQLiteFile.readbin("page_header_schema", self.page_bin[100:] if first else self.page_bin)
return self
def load_cells(self):
self.cells = []
cache = self.header.copy()
cache['config'] = self.config
for i in self.header['cell_offset_array']:
self.cells.append(SQLiteFile.readbin("cell_header_schema", self.page_bin[i:], cache=cache))
return self
def load_overflows(self, file, cell=None):
if cell is None:
for i in self.cells:
self.load_overflows(file, i)
elif cell['payload_size'] > cell['local_payload_size']:
cell['overflow_pages'], cell['full_payload'] = self.load_overflow(file, cell['payload_size'],
cell['overflow_page'] - 1,
size=cell['local_payload_size'],
acc=[cell['payload']])
def load_overflow(self, file, total_size, acc_page, size=0, acc=None):
if acc is None:
acc = []
if not isinstance(acc_page, list):
acc_page = [acc_page]
if acc_page[-1] == 0:
return acc_page, acc
file.seek(acc_page[-1] * self.config['page_size'])
page_bin = file.read(self.config['page_size'])
page_header = SQLiteFile.readbin("page_overflow_header_schema", page_bin)
# print("load overflow %s -> %s" % (acc_page, page_header))
offset = page_header['offset']
content = page_bin[offset:max(offset + total_size - size, 0)]
acc.append(content)
acc_page.append(page_header['next_page'] - 1)
size += len(content)
if size >= total_size:
return acc_page, acc
return self.load_overflow(file, total_size, acc_page, acc)
def load_cells_payload(self, file=None):
self.payloads = []
for i in self.cells:
if 'payload' not in i:
self.payloads.append(None)
continue
payload = i['payload']
if i['payload_size'] > i['local_payload_size']:
if 'full_payload' not in i and file is not None:
self.load_overflows(file, i)
if 'full_payload' in i:
payload = b''.join(i['full_payload'])
else:
print("overflow not loaded %s" % i)
self.payloads.append(SQLiteFile.readbin("record_format_schema", payload))
return self
def _init_test():
import sqlite3
conn = sqlite3.connect('example.db')
c = conn.cursor()
# Create table
c.execute('''CREATE TABLE stocks
(date text, trans text, symbol text, qty real, price real)''')
# Insert a row of data
for i in range(1000):
c.execute("INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT%s',100,10.14)" % ("142857" * i + "!"))
# Save (commit) the changes
conn.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
def _test():
""
##
# create_test_db()
file = SQLiteFile(open("example.db", "rb"))
##
file.load_btree(0)
##
rows = file.load_btree("stocks")
##
import csv
with open("stocks.csv", "w", encoding="utf-8") as fout:
writer = csv.writer(fout, delimiter=",", lineterminator="\n", quotechar='"', doublequote=True, strict=True)
for _, v in sorted(rows):
writer.writerow(v)
| true
|
d09eb4876bbbb4b04b05aa2feaddd1ab429b9dda
|
Python
|
aliabbas1987/python-practice
|
/practice/functions/myMath.py
|
UTF-8
| 105
| 2.671875
| 3
|
[] |
no_license
|
'''
Created on Apr 2, 2020
@author: Ali Abbas
'''
def sum(x,y):
return x+y
def sub(x,y):
return x-y
| true
|
e396ecdd25e23b59be46310bf8220793f6a2f5da
|
Python
|
YilinWphysics/Assignments
|
/Nbody_project/ThirdPart_Periodic.py
|
UTF-8
| 1,235
| 2.578125
| 3
|
[] |
no_license
|
from functions import *
############## Periodic B.C's ##################
summary = open("Summary.txt", "a")
Q3_periodic_E = open("Q3_periodic_E.txt", "w")
n=int(2e5) # now use hundreds of thousands of particles
grid_size = 500
soften = 10
mass = 1/n
v_x = 0 # initial v in x-direction
v_y = 0 # initial v in y-direction
system = Particles(mass, v_x, v_y, n, grid_size, soften)
dt = 80
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on = False, xlim = (0, grid_size), ylim = (0, grid_size))
particles_plot = ax.imshow(system.grid, origin='lower', vmin=system.grid.min(), vmax=system.grid.max(), cmap=plt.get_cmap('cividis'))
plt.colorbar(particles_plot)
count = 0
def animation_plot(i):
global system, ax, fig, dt, Q3_periodic_E, count
print(count)
for i in range(10):
system.evolve(dt)
system.energy()
Q3_periodic_E.write(f"{system.E}")
count+=1
particles_plot.set_data(system.grid)
particles_plot.set_clim(system.grid.min(), system.grid.max())
particles_plot.set_cmap(plt.get_cmap("cividis"))
return particles_plot,
animation_periodic = anmt.FuncAnimation(fig, animation_plot, frames = 200, interval = 10)
animation_periodic.save("Question3_periodic.gif", writer = "imagemagick")
#plt.show()
| true
|
9384bf6881753dc361f7abe62fdde109534d45e8
|
Python
|
StopTheCCP/CCP-Database-Leak
|
/TranslateCSV.py
|
UTF-8
| 1,353
| 2.921875
| 3
|
[] |
no_license
|
import os
import time
# https://pypi.org/project/google-trans-new/
from google_trans_new import google_translator
def TranslateCnToEn(textCn:str) -> str:
# ref https://github.com/lushan88a/google_trans_new
translator = google_translator(url_suffix='us')
textEn = translator.translate(textCn.strip(), lang_src='zh', lang_tgt='en')
return textEn
def ProcessFile(inputFilePath:str, outputFilePath:str):
with open(outputFilePath, 'w', encoding='utf-8') as outputFile:
with open(inputFilePath, 'r', encoding='utf-8') as inputFile:
# n = line number
n = 0
for line in inputFile:
n += 1
if n % 1000 == 0:
t = time.localtime()
currentTime = time.strftime('%H:%M:%S', t)
print(f'[{currentTime}] Processing row {n}')
translatedText = TranslateCnToEn(line)
outputFile.write(translatedText + '\n')
if __name__ == '__main__':
time1 = time.time()
inputFilePath = 'Data/shanghai-ccp-member.csv'
outputFilePath = 'Data/shanghai-ccp-member-en.csv'
if os.path.exists(outputFilePath):
os.remove(outputFilePath)
ProcessFile(inputFilePath, outputFilePath)
time2 = time.time()
duration = time2 - time1
print(f'Translation seconds = {duration}')
| true
|
4d2a5b2449e57a71c6c9b0142d9a38287d50424a
|
Python
|
BitKnitting/FitHome_EDA
|
/test_find_dates.py
|
UTF-8
| 612
| 2.8125
| 3
|
[] |
no_license
|
###########################################################
# test_find_dates.py
# Simple test that validates we can connect to the mongo db
# and read records. It returns the dates in the mongo db
# where there are power readings in ISODate format.
###########################################################
from errors.errors import handle_exception
from readings.readings import PowerReadings
try:
p = PowerReadings()
collection = p.get_connection_to_collection()
iso_days_list = p.get_isodate_list(collection)
[print(n) for n in iso_days_list]
except Exception as e:
handle_exception(e)
| true
|
fd65ee8237801ecbcf6616866eb9f5fcbc8cabe2
|
Python
|
efaguy27/engg-3130-final
|
/autonomous-learning-library-master/all/approximation/approximation.py
|
UTF-8
| 4,822
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
import os
import torch
from torch.nn import utils
from all.logging import DummyWriter
from .target import TrivialTarget
from .checkpointer import PeriodicCheckpointer
DEFAULT_CHECKPOINT_FREQUENCY = 200
class Approximation():
'''
Base function approximation object.
This defines a Pytorch-based function approximation object that
wraps key functionality useful for reinforcement learning, including
decaying learning rates, model checkpointing, loss scaling, gradient
clipping, target networks, and tensorboard logging. This enables
increased code reusability and simpler Agent implementations.
Args:
model (:torch.nn.Module:): A Pytorch module representing the model
used to approximate the function. This could be a convolution
network, a fully connected network, or any other Pytorch-compatible
model.
optimizer (:torch.optim.Optimizer:): A optimizer initialized with the
model parameters, e.g. SGD, Adam, RMSprop, etc.
checkpointer: (:all.approximation.checkpointer.Checkpointer): A Checkpointer object
that periodically saves the model and its parameters to the disk. Default:
A PeriodicCheckpointer that saves the model once every 200 updates.
clip_grad: (float, optional): If non-zero, clips the norm of the
gradient to this value in order prevent large updates and
improve stability.
See torch.nn.utils.clip_grad.
loss_scaling: (float, optional): Multiplies the loss by this value before
performing a backwards pass. Useful when used with multi-headed networks
with shared feature layers.
name: (str, optional): The name of the function approximator used for logging.
scheduler: (:torch.optim.lr_scheduler._LRScheduler:, optional): A learning
rate scheduler initialized with the given optimizer. step() will be called
after every update.
target: (:all.approximation.target.TargetNetwork, optional): A target network object
to be used during optimization. A target network updates more slowly than
the base model that is being optimizing, allowing for a more stable
optimization target.
writer: (:all.logging.Writer:, optional): A Writer object used for logging.
The standard object logs to tensorboard, however, other types of Writer objects
may be implemented by the user.
'''
def __init__(
self,
model,
optimizer,
checkpointer=None,
clip_grad=0,
loss_scaling=1,
name='approximation',
scheduler=None,
target=None,
writer=DummyWriter(),
):
self.model = model
self.device = next(model.parameters()).device
self._target = target or TrivialTarget()
self._scheduler = scheduler
self._target.init(model)
self._updates = 0
self._optimizer = optimizer
self._loss_scaling = loss_scaling
self._cache = []
self._clip_grad = clip_grad
self._writer = writer
self._name = name
if checkpointer is None:
checkpointer = PeriodicCheckpointer(DEFAULT_CHECKPOINT_FREQUENCY)
self._checkpointer = checkpointer
self._checkpointer.init(
self.model,
os.path.join(writer.log_dir, name + '.pt')
)
def __call__(self, *inputs):
'''
Run a forward pass of the model.
'''
return self.model(*inputs)
def eval(self, *inputs):
'''Run a forward pass of the model in no_grad mode.'''
with torch.no_grad():
return self.model(*inputs)
def target(self, *inputs):
'''Run a forward pass of the target network.'''
return self._target(*inputs)
def reinforce(self, loss):
loss = self._loss_scaling * loss
self._writer.add_loss(self._name, loss.detach())
loss.backward()
self.step()
return self
def step(self):
'''Given that a backward pass has been made, run an optimization step.'''
if self._clip_grad != 0:
utils.clip_grad_norm_(self.model.parameters(), self._clip_grad)
self._optimizer.step()
self._optimizer.zero_grad()
self._target.update()
if self._scheduler:
self._writer.add_schedule(self._name + '/lr', self._optimizer.param_groups[0]['lr'])
self._scheduler.step()
self._checkpointer()
return self
def zero_grad(self):
self._optimizer.zero_grad()
return self
| true
|
66c4576055eb6d6cdeb90a47765fa5c9d39c3d32
|
Python
|
springdew84/my-test-py
|
/zookeeper/main.py
|
UTF-8
| 1,524
| 2.59375
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
import sys
#from kazoo.client import KazooClient
import logging
logging.basicConfig(level=logging.ERROR)
# local service dir
LOCAL_SERVICE = '/services/local-service'
class PyZooConn(object):
# init function include connection method
def __init__(self):
#self.zk = KazooClient(hosts='localhost:2181')
self.zk.start()
# get node data
def get_data(self, param):
result = self.zk.get(param)
return result
def get_children(self, param):
result = self.zk.get_children(param)
return result
# create a node and input a value in this node
def create_node(self, node, value):
self.zk.create(node, value)
# delete a node
def delete_node(self, param):
self.zk.delete(param)
# close the connection
def close(self):
self.zk.stop()
if __name__ == '__main__':
servicePort = str(sys.argv[1])
# print "delete zk local service node " + servicePort
pz = PyZooConn()
# pz.create_node("/test", "a value")
services = pz.get_children(LOCAL_SERVICE)
# curl "https://ws.it4.dealmoon.net/health"
for data in services:
servicePath = LOCAL_SERVICE + "/" + data
# print servicePath
service = pz.get_data(servicePath)
serviceStr = str(service)
if (servicePort in serviceStr):
# print "delete" + servicePort
pz.delete_node(servicePath)
# print "delete zk service node:" + servicePath
pz.close()
| true
|
ab5278dad18421b0282ffa17476bbd8fc0983994
|
Python
|
hzcheng/wikipedia-like-web-search-engine
|
/hadoop/mapreduce/map1.py
|
UTF-8
| 805
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
'''
Map file to read each document and and get the term frequency of each word.
output: word docid
'''
import sys, re
# get the set of stop words.
stopwords = set()
with open('./stopwords.txt','r') as f:
for line in f:
stopwords.add(line.strip())
line_counter = -1
for line in sys.stdin:
line_counter = (line_counter + 1) % 3
if line_counter == 0:
# read the first line of the document ID
docid = line.strip()
word_counter = {}
else:
# read the title and body
for word in line.strip().split():
word = re.sub(r'[^a-zA-Z0-9]+', '', word).lower()
if word and word not in stopwords:
# word is not empty and not in stop words
print('{} {}'.format(word, docid))
| true
|
d5511012cba12e11a29148b694d9ace3023be3ff
|
Python
|
tatsumanu/Mc_Gyver
|
/main.py
|
UTF-8
| 3,245
| 3.234375
| 3
|
[] |
no_license
|
import pygame
import os
from pygame.locals import *
from fonctions import load_image, create_a_text_object, message, victory
from classes import Player, Map, Object
pygame.init()
# variables
name = ''
menu = 1
sprite = 32
play_game = True
# creating the screen game window
window = pygame.display.set_mode((480, 480))
pygame.display.set_caption('Mac Gyver et le labyrinthe infernal!')
# loading images
floor, wall = load_image('floor.png'), load_image('wall.png')
syringe = load_image('seringue.png')
intro = load_image('macgyver.jpg')
# creating player class object
mc_gyver = Player(0, 0, load_image('MacGyver.png'))
guardian = Player(14, 14, load_image('Gardien.png'))
# creating map class object
map = Map(0, 0, mc_gyver)
# loading the map
map.loading()
# creating elements of the object class
aiguille = Object('aiguille', load_image('aiguille.png'), map)
tube = Object('tube', load_image('tube_plastique.png'), map)
ether = Object('ether', load_image('ether.png'), map)
# main loop
while play_game:
# intro loop
while menu:
pygame.time.Clock().tick(30)
window.blit(intro, (0, 0))
message("Appuyer sur une touche", window)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
quit()
elif event.type == KEYDOWN:
menu = 0
# game loop
while mc_gyver.alive:
pygame.time.Clock().tick(30)
# checking the events
for event in pygame.event.get():
if event.type == QUIT:
quit()
if event.type == KEYDOWN:
old_pos = mc_gyver.x, mc_gyver.y
if event.key == K_DOWN:
new_pos = (mc_gyver.x, mc_gyver.y + sprite)
mc_gyver.move(old_pos, new_pos, map)
if event.key == K_UP:
new_pos = (mc_gyver.x, mc_gyver.y - sprite)
mc_gyver.move(old_pos, new_pos, map)
if event.key == K_LEFT:
new_pos = (mc_gyver.x - sprite, mc_gyver.y)
mc_gyver.move(old_pos, new_pos, map)
if event.key == K_RIGHT:
new_pos = (mc_gyver.x + sprite, mc_gyver.y)
mc_gyver.move(old_pos, new_pos, map)
# Printing the floor, walls, Mc Gyver, guardian and objects
window.blit(floor, (0, 0))
[window.blit(wall, pos) for pos in list(map.wall_pos)]
window.blit(guardian.image, (guardian.x, guardian.y))
window.blit(mc_gyver.image, (mc_gyver.x, mc_gyver.y))
ether.print_object(window, mc_gyver)
aiguille.print_object(window, mc_gyver)
tube.print_object(window, mc_gyver)
# checking victory/defeat conditions
mc_gyver.alive, mc_gyver.kill_the_guardian = mc_gyver.facing_the_end((mc_gyver.x, mc_gyver.y))
# screen update
pygame.display.update()
if mc_gyver.kill_the_guardian is True:
victory(window, syringe)
else:
message('GAME OVER !', window)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
quit()
elif event.type == KEYDOWN:
quit()
quit()
| true
|
a037e286ed68afa9a83056bd334cf235235d959f
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_199/1798.py
|
UTF-8
| 1,178
| 2.9375
| 3
|
[] |
no_license
|
class TestCase:
def __init__(self, n, data, f):
self.n = n
self.data = data
self.flip = f
self.result = conv(self.data, self.flip)
def p(self):
return 'Case #{}: {}\n'.format(self.n,self.result)
def map(strarry):
v = strarry.split(' ')
r = []
for u in v:
r.append(int(u))
return r
def conv(d, f):
print (len(d))
s = list(str(d))
count = 0
for i in range(len(d)-f+1):
if s[i] == '-':
count=count+1
for j in range(f):
s[i+j] = '+' if s[i+j] == '-' else '-'
return count if complete("".join(s)) else 'IMPOSSIBLE'
def complete(d):
for i in range(len(d)):
if d[i] == '-':
return False
return True
# print '{} -> {}'.format(882,conv(882))
# print '{} -> {}'.format(110,conv(110))
inFile = 'A-large.in'
outFile = inFile.replace('.in', '.out')
inf = open(inFile, 'r')
outf = open(outFile, 'w')
data = inf.readlines()
noTests = int(data[0])
i=1
for d in data[1:]:
row = d.split(' ')
tc = TestCase(i, row[0], int(row[1]))
outf.write(tc.p())
print tc.p()
i=i+1
outf.close()
inf.close()
| true
|
6baa9f7403a1e35b71bd96f8d1aaa062e074fffa
|
Python
|
guoweiyu/NinaTools
|
/ninapro_example.py
|
UTF-8
| 1,232
| 2.546875
| 3
|
[] |
no_license
|
from ninaeval.config import config_parser, config_setup
from ninaeval.utils.nina_data import NinaDataParser
DATA_PATH = "all_data/"
MODEL_PATH = "all_models/"
def main():
# Reads JSON file via --json, or command line arguments:
config_param = config_parser.parse_config()
feat_extractor = config_setup.get_feat_extract(config_param.features)()
classifier = config_setup.get_model(config_param.model)(MODEL_PATH, feat_extractor)
dataset = config_setup.get_dataset(config_param.data)(DATA_PATH, feat_extractor, False)
if not dataset.load_dataset():
data_parser = NinaDataParser(DATA_PATH)
print("Loading Ninapro data from processed directory...")
loaded_nina = data_parser.load_processed_data()
print("Extracting dataset features for training, and testing...")
dataset.create_dataset(loaded_nina)
print("Training classifier on training dataset...")
classifier.train_model(dataset.train_features, dataset.train_labels, dataset.test_features, dataset.test_labels)
print("Testing classifier on testing dataset...")
print(classifier.perform_inference(dataset.test_features, dataset.test_labels))
if __name__ == "__main__":
main()
| true
|
1224da19d1f36dc206f4e34ef17fdf69c93db33a
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p04044/s238246335.py
|
UTF-8
| 79
| 2.953125
| 3
|
[] |
no_license
|
n,l=map(int,input().split());print(*sorted([input() for _ in range(n)]),sep='')
| true
|
2676c72e15e3bf83670f74d910e48981ad88e4ae
|
Python
|
neu-vi/ezflow
|
/ezflow/functional/criterion/sequence.py
|
UTF-8
| 1,446
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
import torch
import torch.nn as nn
from ...config import configurable
from ..registry import FUNCTIONAL_REGISTRY
@FUNCTIONAL_REGISTRY.register()
class SequenceLoss(nn.Module):
"""
Sequence loss for optical flow estimation.
Used in **RAFT** (https://arxiv.org/abs/2003.12039)
Parameters
----------
gamma : float
Weight for the loss
max_flow : float
Maximum flow magnitude
"""
@configurable
def __init__(self, gamma=0.8, max_flow=400):
super(SequenceLoss, self).__init__()
self.gamma = gamma
self.max_flow = max_flow
@classmethod
def from_config(cls, cfg):
return {"gamma": cfg.GAMMA, "max_flow": cfg.MAX_FLOW}
def forward(self, pred, label):
assert (
label.shape[1] == 3
), "Incorrect channel dimension. Set append valid mask to True in DataloaderCreator to append the valid data mask in the target label."
n_preds = len(pred)
flow_loss = 0.0
flow, valid = label[:, :2, :, :], label[:, 2:, :, :]
valid = torch.squeeze(valid, dim=1)
mag = torch.sqrt(torch.sum(flow**2, dim=1))
valid = (valid >= 0.5) & (mag < self.max_flow)
for i in range(n_preds):
i_weight = self.gamma ** (n_preds - i - 1)
i_loss = torch.abs(pred[i] - flow)
flow_loss += i_weight * torch.mean((valid[:, None] * i_loss))
return flow_loss
| true
|
22cdb807dc1dd63bc932174edb664cad72ea3b26
|
Python
|
guard1000/2018_SKKU_Problem-Solving-Algorithms
|
/11주차_함수_코드.py
|
UTF-8
| 2,861
| 4.25
| 4
|
[] |
no_license
|
#1
def ormchasun(x,y,z):
if x>y:
x,y = y,x
if y>z:
y,z = z,y
if x>y:
x,y = y,x
print(x,y,z)
a = int(input('첫번째 정수 : '))
b = int(input('두번째 정수 : '))
c = int(input('세번째 정수 : '))
print('입력:',a,b,c)
print('오름차순:', end=' ')
ormchasun(a,b,c)
#2 버블정렬
def bubble_sort(data):
for i in range(len(data)-1): #각 실행 한번 될 때 마다 가장 큰 녀석을 찾음.
for j in range(len(data)-1):
if data[j]>data[j+1]:
data[j],data[j+1] = data[j+1], data[j]
print(data)
return data
data = [53,27,90,16,76,31,40,55,19,15]
bubble_sort(data)
#3
def BinarySearch(data, target, low, high): # data 리스트, 타겟, 처음과 끝을 받아 옵시다.
if low> high: # 당연히 low가 high보다 크면 뭔가 잘못된거죠?
return False
else:
mid = (low + high) // 2 # 중간 값이 들어있는곳의 위치를 정해줍니다.
if target == data[mid]: # 중간 값과 타겟을 비교해서 같다면,
print('hit') # hit 을 출력
return True
elif target < data[mid]: # 타겟 < 중간 값 이라면,
print('miss')
return BinarySearch(data, target, low, mid-1)
else: # 타겟 > 중간 값 이라면,
print('miss')
return BinarySearch(data, target, mid+1, high)
data = [6,13,14,25,33,43,51,53,64,72,84,93,95,96,97]
BinarySearch(data, 33, 0, len(data)-1)
#과제
def Input_and_Sort(): #10개의 숫자를 받아 정렬한 리스트를 리턴하는 함수
a_list=[]
for i in range(10):
a=int(input('숫자를 입력하세요'))
a_list.append(a)
a_list.sort()
return a_list
def BinarySearch(data, target, low, high): #이진 탐색 함수
if low>high:
return False
else:
mid = (low + high) // 2
if target == data[mid]:
print('hit')
return True
elif target < data[mid]:
print('miss')
return BinarySearch(data, target, low, mid-1)
else:
print('miss')
return BinarySearch(data, target, mid+1, high)
data= Input_and_Sort() #data에 정렬된 숫자들의 리스트를 대입
print(data) #정렬이 잘 되었는지 확인차 출력
target = data[3] #타겟은 3번 인덱스의 값
BinarySearch(data, target, 0, len(data)-1 )
| true
|
5c483e217fdf7695a57918289a605142c247bf92
|
Python
|
mossbanay/Codeeval-solutions
|
/swap-case.py
|
UTF-8
| 129
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
import sys
with open(sys.argv[1]) as input_file:
for line in input_file.readlines():
print(line.strip().swapcase())
| true
|
c0cb45f081a0dd765b30d3f77aa64a5506fc7db1
|
Python
|
RajanaGoutham/Python
|
/Natural Language Interface Database/Query.py
|
UTF-8
| 2,444
| 2.78125
| 3
|
[] |
no_license
|
class Query:
import pymysql
f=open("StopWords.txt","r")
list1=[]
for k in f:
k=k.replace('"','')
list1.append(k.strip())
#print(list1)
q=input("Enter Question:\n")
arr=[]
qn=q.split(" ")
for i in qn:
#print(i)
if i not in list1:
arr.append(i)
'''creating query'''
def query_gen(arr,dict1,com,comp):
From="com"
a=""
b=""
n1="none"
ticker=""
where=""
for i in arr:
if 'wh' in i:
select ="select"
dict3=dict1.keys()
#print(dict3)
elif i in dict3:
a=i
b=dict1[i]
if dict1[i]=="Profiles":
ticker="prof_ticker"
elif dict1[i]=="Statistics":
ticker="stat_ticker"
else:
ticker="Fin_ticker"
elif i in com:
n1=i
From=i
if n1=="none":
return select+" "+a+" "+"from"+" "+b
return select+" "+a+" "+"from"+" "+b+" "+"where"+" "+ticker+"='"+n1+"'"
'''creating dict for tables in Database'''
dict1={"stat_ticker" :"Statistics","marketcap":"Statistics","enterprise_value" :"Statistics","return_on_assets" : "Statistics","total_cash" : "Statistics","operating_cash_flow" :"Statistics","levered_free_cash_flow" :"Statistics","total_debt" : "Statistics","current_ratio" :"Statistics","gross_profit" :"Statistics","proffit_margin" :"Statistics","prof_ticker" :"Profiles","name" : "Profiles","Address" : "Profiles","phonenum" :"Profiles","website" : "Profiles","sector" : "Profiles","industry" :"Profiles","full_time" :"Profiles","bus_summ" :"Profiles","Fin_ticker" : "Finances","Total_Revenue": "Finances","Cost_of_Revenue": "Finances","Income_Before_Tax" : "Finances","Net_Income" : "Finances"}
f=open("ticker.txt",'r')
Tickername=list(f)
symbols=[]
n=[]
for i in Tickername:
j=i.split(":")
symbols.append(j[0].strip())
n.append(j[1].strip())
que=query_gen(arr,dict1,symbols,n)
conn=pymysql.connect("localhost","root","Msit@0066","yahoo")
#try:
cur=conn.cursor()
cur.execute(que)
print(que)
rows=cur.fetchall()
'''printing rows for given question'''
for i in rows:
print(i)
#except:
#print("Invalid Quary")
conn.close()
| true
|
3bff4959e123dacd5922127ff96480f51469ffdd
|
Python
|
jakudapi/aoc2016
|
/day08-1.py
|
UTF-8
| 3,792
| 4.1875
| 4
|
[] |
no_license
|
"""
--- Day 8: Two-Factor Authentication ---
You come across a door implementing what you can only assume is an implementation of two-factor authentication after a long game of requirements telephone.
To get past the door, you first swipe a keycard (no problem; there was one on a nearby desk). Then, it displays a code on a little screen, and you type that code on a keypad. Then, presumably, the door unlocks.
Unfortunately, the screen has been smashed. After a few minutes, you've taken everything apart and figured out how it works. Now you just have to work out what the screen would have displayed.
The magnetic strip on the card you swiped encodes a series of instructions for the screen; these instructions are your puzzle input. The screen is 50 pixels wide and 6 pixels tall, all of which start off, and is capable of three somewhat peculiar operations:
rect AxB turns on all of the pixels in a rectangle at the top-left of the screen which is A wide and B tall.
rotate row y=A by B shifts all of the pixels in row A (0 is the top row) right by B pixels. Pixels that would fall off the right end appear at the left end of the row.
rotate column x=A by B shifts all of the pixels in column A (0 is the left column) down by B pixels. Pixels that would fall off the bottom appear at the top of the column.
For example, here is a simple sequence on a smaller screen:
rect 3x2 creates a small rectangle in the top-left corner:
###....
###....
.......
rotate column x=1 by 1 rotates the second column down by one pixel:
#.#....
###....
.#.....
rotate row y=0 by 4 rotates the top row right by four pixels:
....#.#
###....
.#.....
rotate column x=1 by 1 again rotates the second column down by one pixel, causing the bottom pixel to wrap back to the top:
.#..#.#
#.#....
.#.....
As you can see, this display technology is extremely powerful, and will soon dominate the tiny-code-displaying-screen market. That's what the advertisement on the back of the display tries to convince you, anyway.
There seems to be an intermediate check of the voltage used by the display: after you swipe your card, if the screen did work, how many pixels should be lit?
"""
from time import time
import numpy as np
def shifter(array, amount):
"""
array: np.array[int]
amount: int. times to shift
OUTPUT-
returns np.array[int]
"""
amount = amount % len(array)
return np.concatenate((array[-amount:], array[:-amount]))
def rect(screen, num_cols, num_rows):
"""
screen: np.array representing the screen
num_cols: int.
num_rowls: int
OUTPUT-
none
"""
screen[:num_rows, :num_cols] = 1 # turns pixels on
def rotate(screen, axis, index, shift):
"""
screen: np.array representing the screen
axis: str. either "row" or "column
index: int. what "row" or "column"
shift: int. how much to shift (wraps around end)
OUTPUT-
none
"""
if axis == 'row':
screen[index] = shifter(screen[index], shift)
elif axis == 'column':
screen[:,index] = shifter(screen[:,index], shift)
with open("day08input.txt") as fp:
screen = np.zeros((6,50), dtype=np.int) #initialize blank screen. 6 rows, 50 cols.
for line in fp:
instruction = line.rstrip("\n").split(" ") # split line into list of words
if instruction[0] == 'rect':
coordinates = instruction[1].split('x') # ie '12x3' => ['12','3']
rect(screen, num_cols=int(coordinates[0]), num_rows=int(coordinates[1]))
print(screen)
elif instruction[0] == "rotate":
index = int(instruction[2].split('=')[1]) # 'x=32'=> ['x','32']=> 32
rotate(screen, axis=instruction[1], index=index, shift=int(instruction[4]))
print(screen)
print("Pixels on: {}".format(screen.sum()))
| true
|
923ca8285e370c66dbcc7b211f094f2c0589725c
|
Python
|
Sayak09/IPL_Powerplay_ScorePrediction
|
/predictor.py
|
UTF-8
| 3,474
| 2.546875
| 3
|
[] |
no_license
|
def predictRuns(testInput):
prediction = 0
import tensorflow as tf
import pandas as pd
model = tf.keras.models.load_model("model.h5",compile=False)
df2=pd.read_csv(testInput)
ven=df2.iloc[:,0].values
inn=df2.iloc[:,1].values
bat_team=df2.iloc[:,2].values
bowl_team=df2.iloc[:,3].values
bat=df2.iloc[:,4].values
bowl=df2.iloc[:,5].values
c=0
for i in bat[0]:
if i==',':
c+=1
d=0
for i in bowl[0]:
if i==',':
d+=1
no_of_bowlers=float(d+1)
no_of_wickets=float(c-1)
innings=float(inn[0])
if bat_team == "Kolkata Knight Riders":
bat_no=6
elif bat_team == "Royal Challengers Bangalore":
bat_no=0
elif bat_team == "Chennai Super Kings":
bat_no=1
elif bat_team == "Punjab Kings":
bat_no=9
elif bat_team == "Rajasthan Royals":
bat_no=10
elif bat_team == "Delhi Capitals":
bat_no=2
elif bat_team == "Sunrisers Hyderabad":
bat_no=4
elif bat_team == "Mumbai Indians":
bat_no=7
else:
bat_no=5
if bowl_team == "Kolkata Knight Riders":
bowl_no=6
elif bowl_team == "Royal Challengers Bangalore":
bowl_no=0
elif bowl_team == "Chennai Super Kings":
bowl_no=1
elif bowl_team == "Punjab Kings":
bowl_no=9
elif bowl_team == "Rajasthan Royals":
bowl_no=10
elif bowl_team == "Delhi Capitals":
bowl_no=2
elif bowl_team == "Sunrisers Hyderabad":
bowl_no=4
elif bowl_team == "Mumbai Indians":
bowl_no=7
else:
bowl_no=5
if ven == "Ahmedabad" or ven=="Narendra Modi Stadium":
ven_no=26
elif ven == "Mumbai" or ven=="Wankhede Stadium":
ven_no=5
elif ven == "Kolkata" or ven=="Eden Gardens":
ven_no=4
elif ven == "Chennai" or ven=="MA Chidambaram Stadium, Chepauk" or ven=="MA Chidambaram Stadium, Chepauk , Chennai" or ven=="MA Chidambaram Stadium":
ven_no=8
elif ven == "Delhi" or ven=="Feroz Shah Kotla" or ven=="Arun Jaitley Stadium":
ven_no=3
else:
ven_no=1
b=[]
for i in range(0,11):
if i==(bat_no):
b.append(1)
else:
b.append(0)
b1=[]
for i in range(0,11):
if i==(bowl_no):
b1.append(1)
else:
b1.append(0)
v=[]
for i in range(1,27):
if i==(ven_no):
v.append(1)
else:
v.append(0)
arr=[]
arr.append(innings)
arr.append(5.6)
for i in v:
arr.append(i)
for i in b:
arr.append(i)
for i in b1:
arr.append(i)
arr.append(no_of_wickets)
arr.append(no_of_bowlers)
prediction=int(model.predict([arr]))
if (prediction>=0 and prediction<10):
prediction+=30
elif (prediction>=10 and prediction<20):
prediction+=20
elif (prediction>=20 and prediction<25):
prediction+=15
elif (prediction>=25 and prediction<=27):
prediction+=10
elif (prediction>27 and prediction<=30):
prediction+=8
elif (prediction>30 and prediction<=32):
prediction+=4
else:
prediction=prediction
return prediction
| true
|
5dc8faa80f3118cb685f25c6b60f573e25f2e9eb
|
Python
|
lemonade512/DotFiles
|
/cli/system_info.py
|
UTF-8
| 869
| 2.84375
| 3
|
[] |
no_license
|
""" Tools for retrieving information about user's system.
The install script in this repository aims to support as many environments as
possible, and each environment has its own tools for package management and
configuration. The tools in this file are meant to detect information about the
current system in a portable fashion that works with both Python 2 and
Python 3.
"""
import logging
import platform
def get_platform():
if platform.uname()[0] == "Darwin":
return "darwin"
elif platform.uname()[0] == "Linux":
linux_distro = platform.dist()[0]
if linux_distro == "Ubuntu":
return "debian"
return linux_distro
else:
logging.error("Unkown platform {}".format(platform.platform()))
return None
if __name__ == "__main__":
print("Operating system detected: {}".format(get_platform()))
| true
|
5395b967f59fe2531d62f9b55389e19ae8e1f32b
|
Python
|
karolmikolajczuk/Hackerrank-Python
|
/merge_the_tools.py
|
UTF-8
| 1,491
| 3.765625
| 4
|
[] |
no_license
|
#split string into groups
def get_substrings(text, nr_of_substr, k):
#create a list with precised size
list_of_strings = []
#iterate through whole string
for index in range(0, nr_of_substr*k, k):
list_of_strings.append(text[index:index+k])
#return the list
return list_of_strings
#distinct sub string
def no_duplicate(substring):
print("no_duplicate function")
print(substring)
list_of_chars = []
list_of_added = []
for char in substring:
if char in list_of_added:
continue
list_of_chars.append(char)
list_of_added.append(char)
return ''.join(str(char) for char in list_of_chars)
#invoke `main` function for code
def merge_the_tools(string, k):
#logging
print("###Length of given string={}\n###Split={}\n###Dividing={}".format(len(string), k, len(string)/k))
#get number of groups - sub strings
number_of_substrings = int(len(string)/k)
#get string splitted into sub-strings
list_of_substrings = get_substrings(string, number_of_substrings, k)
#distinct
list_results = []
for l in list_of_substrings:
list_results.append(no_duplicate(l))
#dla kazdego pod stringa
for sub_string in list_results:
print(sub_string)
if __name__ == '__main__':
#get data
string = str(input())
split = int(input())
#invoke function for printing distincted substrings
merge_the_tools(string, split)
| true
|
0ce6b06bcfe644b5eb3ae501121141fda74b8034
|
Python
|
adrianstaniec/advent_of_code
|
/10/sol.py
|
UTF-8
| 878
| 3.375
| 3
|
[] |
no_license
|
from collections import Counter
adapters = []
while True:
try:
adapters.append(int(input()))
except Exception as e:
break
print(adapters)
adapters.sort()
adapters = [0] + adapters + [adapters[-1] + 3]
print(adapters)
diffs = [adapters[i + 1] - adapters[i] for i in range(len(adapters) - 1)]
# diffs = [1] + diffs + [3]
print(diffs)
counter = Counter(diffs)
print(counter)
print("--- Part One ---")
print(counter[1] * counter[3])
print("--- Part Two ---")
def arrangements_from_subsequent_unit_differences(x):
if 0 <= x and x <= 6:
return {0: 1, 1: 1, 2: 2, 3: 4, 4: 7, 5: 13, 6: 25}[x]
else:
raise NotImplemented
n_arrangements = 1
cnt = 0
for d in diffs:
if d == 1:
cnt += 1
elif d == 3:
n_arrangements *= arrangements_from_subsequent_unit_differences(cnt)
cnt = 0
print(n_arrangements)
| true
|
dfbaae8eaa94b21495e8861ffcb641699cc3f194
|
Python
|
gkaframanis/roommates-bill
|
/reports.py
|
UTF-8
| 1,686
| 3.375
| 3
|
[] |
no_license
|
import webbrowser
from fpdf import FPDF
import os
class PdfReport:
"""
Creates a Pdf file that contains data about the flatmates
such as their names, their due amount and the period of the bill.
"""
def __init__(self, filename):
self.filename = filename
def generate(self, bill, *roommates):
# unit: using points (pt)
pdf = FPDF(orientation="P", unit="pt", format="A4")
# Add a new page
pdf.add_page(orientation="P")
# Add icon
pdf.image(name="./files/house.png", w=30, h=30, type="png")
# Add some text
pdf.set_font(family="Times", size=24, style="B")
# We write in cells | When w=0 it means it takes up the whole line. | ln=1 so the next cell will be added under this cell.
pdf.cell(w=0, h=40, txt="The Bill of the Roommates", border=0, align="C", ln=1)
pdf.cell(w=0, h=40, txt=f"Total Bill: {bill.amount} $", border=0, align="C", ln=1)
# Change the font for the rest of the text.
pdf.set_font(family="Times", size=14, style="B")
# Starts from the left side of the cell by default.
pdf.cell(w=250, h=25, txt=f"Period: {bill.period}", border=0, ln=1)
for roommate in roommates:
pdf.set_font(family="Times", size=12)
pdf.cell(w=200, h=25, txt=f"{roommate.name} - {roommate.pays(bill, *roommates)} $", ln=1)
# Change my current dir to files and output it in there.
os.chdir("files")
pdf.output(self.filename)
# Open the pdf file automatically to the default web browser pdf viewer.
webbrowser.open(f"file://{os.path.realpath(self.filename)}")
| true
|
28b61fe2976dac91882bd5ec26cb8f2f9173abc8
|
Python
|
aletcherhartman/pong.py
|
/Old Files/paddel.py
|
UTF-8
| 818
| 3.078125
| 3
|
[] |
no_license
|
VIPERimport pygame
import math
class paddel(object):
"""docstring for paddel"""
def __init__(self, arg):
self.X = 30
self.Y = 30
self.change_x = 0
self.change_y = 0
def makePaddel(self):
paddel = paddel()
# Hide the mouse cursor
pygame.mouse.set_visible(0)
# Draw the paddelangle
pygame.draw.paddel(screen, WHITE, [paddel.X, paddel.Y, 30, 60])
def playerMotion(self):
pos = pygame.mouse.get_pos()
paddel.Y = pos[1]
def compMotion(self,ballY):
paddelY = math.abs(ball.y - ball.y*0.2);
if paddelY-paddelX/2<0
paddel.y += paddel.change_y
if paddelY+paddelX/2>height
paddel.y -= paddel.change_y
return paddel
| true
|
f8923e83f1b39622aa29a578535efbf72f79ba10
|
Python
|
suzySun345/algorithm011-class02
|
/Week_02/589.n叉树的前序遍历.py
|
UTF-8
| 640
| 3.28125
| 3
|
[] |
no_license
|
#
# @lc app=leetcode.cn id=589 lang=python
#
# [589] N叉树的前序遍历
#
# @lc code=start
"""
# Definition for a Node.
class Node(object):
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution(object):
def preorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
res = []
q = [root]
if root != None:
while q:
node = q.pop()
res.append(node.val)
q += [child for child in node.children[::-1] if child]
return res
# @lc code=end
| true
|
690875d333af22341eafc7f466fe7a048d383b42
|
Python
|
ritesh-deshmukh/Algorithms-and-Data-Structures
|
/180Geeks/Arrays/Kadane's Algorithm.py
|
UTF-8
| 472
| 4.40625
| 4
|
[] |
no_license
|
# Given an array containing both negative and positive integers. Find the contiguous sub-array with maximum sum.
# arr = [1,2,3]
arr = [-1,-2,-3,-4]
size = len(arr)
def max_subarray(arr, size):
max_arr = arr[0]
curr_max = arr[0]
for i in range(1,size):
curr_max = max(arr[i], curr_max + arr[i])
max_arr = max(max_arr, curr_max)
return max_arr
print(f"Given array: {arr}")
print(f"Sub-array with maximum sum: {max_subarray(arr,size)}")
| true
|
d184ee8c4a3efedb1f76eb88b3b1eff928a30e0f
|
Python
|
pshevade/Udacity_project_2
|
/tournament.py
|
UTF-8
| 19,354
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
import random
import math
import bleach
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
DB = psycopg2.connect("dbname=swiss_style")
return DB, DB.cursor()
def executeQuery(query, values=None):
""" Connect to database and execute the query that is passed
Parameter 'values' can be a list of n elements, or can be None,
in which case the query is fully described as is
"""
rows = ['empty']
DB_connection, cur = connect()
if values is not None:
cur.execute(query, values)
else:
cur.execute(query)
DB_connection.commit()
try:
rows = cur.fetchall()
except psycopg2.ProgrammingError:
pass
DB_connection.close()
return rows
def registerTournament(name):
""" Register a tournament, of the name give by parameter 'name'"""
bleach.clean(name)
query = "INSERT INTO tournaments (tournament_name) values (%s) RETURNING tournament_id;"
values = (name,)
row = executeQuery(query, values)
return row[0][0] # row will only have one element, the tournament_id
def deleteMatches():
""" Remove all the match records from the database."""
query = "DELETE FROM swiss_pairs"
executeQuery(query)
query = "DELETE FROM match_list"
executeQuery(query)
query = "DELETE FROM bye_list"
executeQuery(query)
def deleteTournaments():
""" Remove all tournaments from database """
query = "DELETE FROM tournaments"
executeQuery(query)
def deletePlayers():
""" Remove all the player records from the database."""
sub_query = "SELECT player_id from players"
rows = executeQuery(sub_query)
for row in rows:
deleteSpecificPlayer(row[0])
def deleteSpecificPlayer(player_id):
""" Deletes a specific player, based on the player_id"""
query = "DELETE FROM players where player_id = %s"
values = (player_id, )
executeQuery(query, values)
def countPlayers():
""" Count all players, across all tournaments
Returns the number of players currently registered."""
query = "SELECT count(*) from players;"
row = executeQuery(query)
return row[0][0]
def countPlayersInTournament(tournament):
""" Count all players in a given tournament"""
# Sanitize input, in case it comes from web app/environment
bleach.clean(tournament)
tournament_id = getTournamentID(tournament)
query = "SELECT count(*) from tournament_contestants where tournament_id = %s"
values = (tournament_id, )
rows = executeQuery(query, values)
return rows[0][0]
def registerPlayer(name, tournament="Default"):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
Logic -
1. check if tournament exists, else create it.
1b. if tournament doesn't exist, create it
2. register players
3. register player and tournament in tournament_contestants
"""
# Sanitize input, in case it comes from web app/environment
bleach.clean(name)
bleach.clean(tournament)
# 1. check if tournament exists:
tournament_id = getTournamentID(tournament)
# 1b. if tournament does not exist, register/create it
if tournament_id == -1:
tournament_id = registerTournament(tournament)
# 2. register player
query = "INSERT INTO players (player_name) values (%s) RETURNING player_id;"
values = (name, )
row = executeQuery(query, values)
player_id = row[0][0]
# 3. register player and tournament in tournament_contestants
registerContestants(player_id, tournament_id)
return player_id
def getTournamentID(tournament):
""" Return's the tournament_id value from tournaments database when given the
tournament's name as the parameter 'tournament'
If no tournament exists of the name, returns a -1 to recognize this
"""
# Sanitize input, in case it comes from web app/environment
bleach.clean(tournament)
query = "select tournament_id from tournaments where tournament_name = %s"
values = (tournament, )
rows = executeQuery(query, values)
# 1.b if no tournament doesn't exit, create it
if len(rows) is not 0:
tournament_id = rows[0][0]
else:
print("No tournament exits, may need to create tournament {0}".format(tournament))
tournament_id = -1
return tournament_id
def playerStandings(tournament="Default"):
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
# Sanitize input, in case it comes from web app/environment
bleach.clean(tournament)
tournament_id = getTournamentID(tournament)
standings = []
# Find the players are tied by points
query = "SELECT a.player_id, b.player_id from getMatchesAndWins as a, getMatchesAndWins as b where a.player_points = b.player_points and a.player_id < b.player_id order by a.player_points"
tied_players_rows = executeQuery(query)
# Resolve them based on OMW.
reordered_rows = []
for tied_players in tied_players_rows:
tied_players = resolveOMW(tied_players[0], tied_players[1])
reordered_rows.append(tied_players)
# Get the player standings ranked by wins and player_points
query = "SELECT player_id, player_name, wins, matches, player_points from getMatchesAndWins where tournament_id = %s"
values = (tournament_id,)
standings_rows = executeQuery(query, values)
# Rearrange the standings based on the tied pairs that need to be reordered based on OMW
for tied_pair in reordered_rows:
# Find the index where the player_id in tied_pair is in standings
for standings in standings_rows:
if tied_pair[0] == standings[0]:
index_p1 = standings_rows.index(standings)
if tied_pair[1] == standings[0]:
index_p2 = standings_rows.index(standings)
# Swap the rows in case they are incorrectly ordered based on OMW
if index_p1 > index_p2:
standings_rows[index_p1], standings_rows[index_p2] = standings_rows[index_p2], standings_rows[index_p1]
# We only need player_id, player_name, wins, matches to return
player_standings = []
for row in standings_rows:
player_standings.append((row[0], row[1], row[2], row[3]))
return player_standings
def printStandings(standings):
print("Player ID".ljust(10)+"Player Name".ljust(20)+"Wins".ljust(10)+"Matches".ljust(10))
for row in standings:
print(str(row[0]).ljust(10)+str(row[1]).ljust(20)+str(row[2]).ljust(10)+str(row[3]).ljust(10))
def reportMatch(winner, loser, tied=0, tournament="Default"):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
tied: in case the match is a tie, then winner, loser are just tied players
tournament: defaults to "Default", this will allow to report matches per tournament name
"""
# Sanitize input, in case it comes from web app/environment
bleach.clean(tournament)
bleach.clean(winner)
bleach.clean(loser)
bleach.clean(tied)
tournament_id = getTournamentID(tournament)
if tied == 0:
values_report_match = (tournament_id, winner, loser, winner, tied)
query2 = "UPDATE tournament_contestants set player_points = player_points + 2 where player_id = %s"
values2 = (winner,)
executeQuery(query2, values2)
else:
values_report_match = (tournament_id, winner, loser, -1, tied)
query2 = "UPDATE tournament_contestants set player_points = player_points + 1 where player_id = %s"
values2 = (winner,)
executeQuery(query2, values2)
values2 = (loser,)
executeQuery(query2, values2)
query_report_match = "INSERT into match_list (tournament_id, player1_id, player2_id, winner_id, tied) values (%s, %s, %s, %s, %s)"
rows = executeQuery(query_report_match, values_report_match)
def getPlayerId(name):
""" Returns player_id based on the player's name given by parameter 'name' """
# Sanitize input, in case it comes from web app/environment
bleach.clean(name)
query = "select player_id from players where player_name = %s;"
values = (name, )
rows = executeQuery(query, values)
if len(rows) > 0:
return rows[0][0]
else: return 'Not found'
def registerContestants(player, tournament):
""" Registers the contestants per tournament
Done for the case when multiple tournaments exist
"""
# Sanitize input, in case it comes from web app/environment
bleach.clean(player)
bleach.clean(tournament)
query = "INSERT INTO tournament_contestants values (%s, %s);"
values = (tournament, player, )
executeQuery(query, values)
def swissPairings(tournament="Default"):
""" Generate the swiss pairings for a given tournament. if tournament is not
given, then creates swiss pairs out of the default tournament playerStandings
Logic:
1. get tournament_id and calculate total rounds and total matches possible
2. for tournament_id, check if each player has played the same number of matches
3. if each player has played the same number of matches, check if matches played = max
if odd players, then total players = n+1 where n is registered players (so one player can get a bye)
note: total rounds = log(2) n where n players
or = log(2) (n+1) where n players and n odd
total matches = (log(2) n) x n/2 where n players
or = log(2) (n+1) x (n+1)/2 where n players and n odd
4. if odd number of players, give a player a bye in that round
(making sure only one bye per player per tournament)
5. generate swiss pairing by sorting players by wins
"""
# Sanitize input, in case it comes from web app/environment
bleach.clean(tournament)
# 1. get tournament_id and calculate total rounds and total matches possible
player_pairs = []
count_players = countPlayersInTournament(tournament)
tournament_id = getTournamentID(tournament)
# check if count_players is odd, need to allocate a "space" for bye in that case
if count_players<0:
print("We don't have any players!")
else:
total_rounds = round(math.log(count_players, 2))
total_matches = round(total_rounds * count_players/2)
# 2. for tournament_id, check if each player has played the same number of matches
standings = playerStandings(tournament)
# We get player_id, player_name, matches_played, wins
# Rows contains a list of tuples (player_id, matches played)
# We first separate the matches played of all players into a list
matches_played = [row[3] for row in standings]
# Then we check if all the elements (matches played of all players) is the same
all_played_same_matches = all(x == matches_played[0] for x in matches_played)
if all_played_same_matches:
# 3. if each player has played the same number of matches, check if matches played = max
if matches_played[0] == total_matches:
print("We have played all the matches possible in this Swiss Style Tournament")
else:
# 4. if odd number of players, give a player a bye in that round
# (making sure only one bye per player per tournament)
players_by_wins_bye = giveBye(standings, tournament_id)
# 5. generate swiss pairing by sorting players by their standings/bye
player_pairs = getPlayerPairs(players_by_wins_bye)
query = "INSERT into swiss_pairs values (%s, %s, %s, %s)"
# The current round number is calculated by 2 ^ (matches played/total players/2)
for pair in player_pairs:
values = (tournament_id, pair[0], pair[2], 2**round(matches_played[0]/(count_players/2)),)
executeQuery(query, values)
else:
print("We have players who still haven't played in this round, as follows: ")
for row in rows:
print("Player ID: {0} has played {1} matches".format(row[0], row[1]))
return player_pairs
def giveBye(standings, tournament_id):
""" In case of making swiss pairs for an odd number of players in
the tournament, one player needs to be given a bye.
This function figures out which player hasn't been given a bye yet,
and gives that player a bye
The list of players with a bye is stored in the database.
Only one bye per player per tournament
"""
# get players by the player_id (0th element) & their wins (2nd element)
players_by_wins_rows = [(row[0], row[1]) for row in standings]
# Only need to give a bye in case there are odd number of players
if len(players_by_wins_rows) %2 is not 0:
for player_count in range(0, len(players_by_wins_rows)):
# Check if player has already been given a bye
query = "select * from bye_list where player_id = %s and tournament_id = %s"
values = (players_by_wins_rows[player_count][0], tournament_id, )
bye_rows = executeQuery(query, values)
# if bye has NOT been given to the player yet, bye_rows will be empty
if len(bye_rows) == 0:
# if player hasn't gotten a bye, give a bye, and insert this record
query = "insert into bye_list values (%s, %s)"
values = (tournament_id, players_by_wins_rows[player_count][0])
insert_bye_row = executeQuery(query, values)
bye_player = players_by_wins_rows.pop(player_count)
players_by_wins_rows.append(bye_player)
players_by_wins_rows.append((None, 'bye'))
break
return players_by_wins_rows
def getPlayerPairs(players):
""" Returns a list of tuples """
# check if we need a bye, i.e if the number of players is odd
players.reverse()
player_pairs = []
while len(players) > 0:
player_pairs.append((players.pop() + players.pop()))
return player_pairs
def resolveOMW(player1, player2, tournament="Default"):
""" Input:
player1 : player's id
player2 : player's id
player1 and player 2 are ordered
(i.e. if player of id 7 is ranked above id 3, player1 = 7, player2 = 3)
Logic:
1. get all players player1 has played with
2. get all players player2 has played with
3. make a list of players both players have played with
4. add both players themselves to the list of common players
- this is done to make sure we find out who won the match(es) between those
two players as well as for player2,
calculate all the players from common list they have won against
5. Find out who won matches between player 1 and common list, and then
between player 2 and common list
6. See how many games against common opponents has player1 won and player2 won
7. if player 2 has won more games than player 1, swap their order
"""
# Sanitize input, in case it comes from web app/environment
bleach.clean(tournament)
# 1. get all players player1 has played with
tournament_id = getTournamentID(tournament)
query = "SELECT player2_id from match_list where player1_id = %s and tournament_id = %s"
values = (player1, tournament_id, )
rows = executeQuery(query, values)
players_p1_played_with = [row[0] for row in rows]
query = "SELECT player1_id from match_list where player2_id = %s and tournament_id = %s"
values = (player1, tournament_id, )
rows = executeQuery(query, values)
players_p1_played_with+= [row[0] for row in rows]
# 2. get all players player2 has played with
query = "SELECT player2_id from match_list where player1_id = %s and tournament_id = %s"
values = (player2, tournament_id, )
rows = executeQuery(query, values)
players_p2_played_with = [row[0] for row in rows]
query = "SELECT player1_id from match_list where player2_id = %s and tournament_id = %s"
values = (player2, tournament_id, )
rows = executeQuery(query, values)
players_p2_played_with+=[row[0] for row in rows]
# 3. make a list of players both players have played with
common_players_list = list(frozenset(players_p1_played_with).intersection(players_p2_played_with))
# 4. add both players themselves to the list of common players
# - this is done to make sure we find out who won the match(es) between those
# two players as well
common_players_list.append(player1)
common_players_list.append(player2)
# 5. Find out who won matches between player 1 and common list, and then
# between player 2 and common list
player1_vs_common_matches = [] # contains the winner id between player1 and corresponding common player
player2_vs_common_matches = [] # contains the winner id between player2 and corresponding common player
for common_player in common_players_list:
query = "SELECT winner_id from match_list where player1_id = %s and player2_id = %s" \
"or player2_id = %s and player1_id = %s"
values = (player1, common_player, player1, common_player,)
rows = executeQuery(query, values)
player1_vs_common_matches += [row[0] for row in rows]
values = (player2, common_player, player2, common_player,)
rows = executeQuery(query, values)
player2_vs_common_matches += [row[0] for row in rows]
# 6. See how many games against common opponents has player1 won and player2 won
# From the list of winner_ids for matches between player1 and
# common list (and player2 and common list), we count how often winner_id = player1_id
# and how often winner_id = player2_id
matches_p1_won_against_common = player1_vs_common_matches.count(player1)
matches_p2_won_against_common = player2_vs_common_matches.count(player2)
# 7. if player 2 has won more games than player 1, swap their order
if matches_p1_won_against_common < matches_p2_won_against_common:
return (player2, player1)
else:
return (player1, player2)
| true
|
1fec056289fb0e90174ab903bc40bc546e70bda7
|
Python
|
junyang10734/leetcode-python
|
/713.py
|
UTF-8
| 566
| 3.359375
| 3
|
[] |
no_license
|
# 713. Subarray Product Less Than K
# Two pointers (sliding window)
# https://blog.csdn.net/fuxuemingzhu/article/details/83047699
# running time: faster than 63.76%
class Solution:
def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:
N = len(nums)
prod = 1
l, r = 0, 0
res = 0
while r < N:
prod *= nums[r]
while l <= r and prod >= k:
prod /= nums[l]
l += 1
res += r - l + 1
r += 1
return res
| true
|
f8bbcea5255c7796fa2552f2481dc2df2171b9c3
|
Python
|
adambemski/Python-Snake-Game
|
/conf.py
|
UTF-8
| 472
| 2.765625
| 3
|
[] |
no_license
|
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
display_horizontal_size_x = int(config['display']['horizontal_size_x'])
display_vertical_size_y = int(config['display']['vertical_size_y'])
x1 = int(config['snake']['start_point_x'])
y1 = int(config['snake']['start_point_y'])
snake_size = int(config['snake']['size'])
game_speed = int(config['game']['speed'])
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
| true
|
6faaffb98e148510c93f045a379622d7925d62bb
|
Python
|
moritzschaefer/unsupervised-text-detection
|
/src/predict_test_img.py
|
UTF-8
| 6,902
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
'''
This is the central main file combining all the prediction functionality.
It fetches images from config.TEST_IMAGE_PATH and predicts the parts with text
contained and the characters contained.
TODO: integrate better character_recognition from notebook
TODO2: save outputs as: for each input image
- One json containing coordinates of text regions and recognized characters
(along with positions and sizes)
- The original image (png) overlayed with bounding boxes of detected text and
printed recognized characters
- npy of text detection
- npy of character recognition
'''
import os
from multiprocessing.pool import Pool
import pickle
import logging
import glob
import cv2
import numpy as np
from skimage.transform import pyramid_gaussian
from character_training import load_model
import feature_extraction
import config
from character_recognition import character_recognition, filter_good_characters
logging.basicConfig(level=logging.INFO)
def sliding_window(img, model, step_size=1):
'''
Yield all windows in an image
'''
for y in range(0, img.shape[0]-32, step_size):
for x in range(0, img.shape[1]-32, step_size):
yield (x,
y,
img[y:min(y+32, img.shape[0]),
x:min(x+32, img.shape[1]),
:],
model)
def async_predict(args):
x, y, window, model = args
features = feature_extraction.\
get_features_for_window(window)
# reshape it so it contains a single sample
try:
v = model.predict_proba(features[1].flatten().reshape(1, -1))
except Exception as e:
print(x, y, e)
return x, y, 0
return x, y, v[0][1]
def get_prediction_values(img, model, step_size=1):
'''
Calculate the text_recognition probabilities for each pixel for each
layer
:return: A list of tuples (img with layer dimensions, prediction values)
'''
layers = []
for i, layer_img in enumerate(get_all_layers(img)):
pool = Pool(processes=8)
padded_img = cv2.copyMakeBorder(layer_img, 32, 32, 32, 32,
cv2.BORDER_REFLECT)
values = np.zeros(shape=[padded_img.shape[0], padded_img.shape[1]],
dtype='float')
pixel_counter = 0
logging.info('started for layer {}'.format(i))
for x, y, v in pool.imap(async_predict,
sliding_window(padded_img.astype('float32'),
model,
step_size),
8):
values[y:y+step_size+32, x:x+step_size+32] += v
if (pixel_counter) % 100 == 0:
logging.info("pixel_counter: {}/{} from layer {}".
format(pixel_counter,
((padded_img.shape[0] - 31) *
(padded_img.shape[1] - 31)) //
step_size**2,
i))
pixel_counter += 1
pool.close()
pool.join()
layers.append((layer_img.astype('float32'), values[32:-32, 32:-32]))
logging.info('finished layer {}'.format(i))
return layers
# return all Scaling image of a Image,save into Layer Matrix
def get_all_layers(img):
for (i, resized) in enumerate(
pyramid_gaussian(img,
downscale=config.LAYER_DOWNSCALE,
max_layer=config.NUM_LAYERS)):
# if the image is too small, break from the loop
if resized.shape[0] < 32 or resized.shape[1] < 32:
break
yield resized
def predict_images(step_size=1, plot=True, character=True):
'''
Predict all images in config.TEST_IMAGE_PATH
Save the predictions in TEST_IMAGE_PATH
'''
text_model = pickle.load(open(config.TEXT_MODEL_PATH, 'rb')) # get model
if character:
character_model = load_model()
dictionary = np.load(config.DICT_PATH) # get dictionary
image_files = glob.glob(os.path.join(config.TEST_IMAGE_PATH, '*.png'))
for filename in image_files:
img = cv2.imread(filename)
logging.info('started computation for img {}'.
format(filename.split('/')[-1].split('.')[0]))
predicted_layers = get_prediction_values(img, text_model, step_size)
texts = []
for layer, (layer_img, layer_predictions) in \
enumerate(predicted_layers):
# compute
if plot:
cv2.imshow("image 1", layer_img)
cv2.imshow("image 2", layer_predictions/layer_predictions.max())
cv2.waitKey(0)
cv2.destroyAllWindows()
np.save('{}/{}_layer_{}_prediction.npy'.format(
config.TEST_IMAGE_PATH,
filename.split('/')[-1].split('.')[0], layer),
layer_predictions)
if character:
print('Calculate Characters for layer {}'.format(
layer_img.shape))
layer_texts = character_recognition(layer_img,
layer_predictions,
dictionary,
character_model)
texts.extend(filter_good_characters(layer_texts, layer))
if texts:
pickle.dump(texts, open('{}/{}_character_predictions.pkl'.format(
config.TEST_IMAGE_PATH,
filename.split('/')[-1].split('.')[0]), 'wb'))
# combine_probability_layers(img, predicted_layers)
def combine_probability_layers(img, layers):
'''
Return a combined image of all probabilities
'''
text_probability_image = np.zeros(img.shape, float)
for y in range(0, img.shape[0]):
for x in range(0, img.shape[1]):
max_probability = 0
for layer in layers:
# x and y in the layer which correspond to position in
# original image
trans_y = (layer.shape[0]/img.shape[0]) * y
trans_x = (layer.shape[1]/img.shape[1]) * y
window = layer[max(0, trans_y-32):
min(trans_y+1, layer.shape[0]),
max(0, trans_x-32):
min(trans_x+1, layer.shape[1])]
max_probability = max(max_probability, window.max())
text_probability_image[y, x] = max_probability
cv2.imshow("image 1", img)
cv2.imshow("image 2", text_probability_image)
cv2.waitKey(0)
return text_probability_image
if __name__ == "__main__":
predict_images(config.STEP_SIZE, plot=False, character=True)
| true
|
627f206ab368c13e5fe88654fee79007ee5dcc31
|
Python
|
JuneKim/algorithm
|
/ProblemSolving/LeetCode/1935_Maximum Number of Words You Can Type.py
|
UTF-8
| 891
| 3.359375
| 3
|
[] |
no_license
|
# runtime: 93.91%, memory: 56.52%
class Solution:
def canBeTypedWords(self, text: str, brokenLetters: str) -> int:
li_txt = text.split(" ")
myCnt = 0
for txt in li_txt:
is_found = False
for broken in brokenLetters:
if broken in txt:
is_found = True
break
if not is_found:
myCnt += 1
return myCnt
data = [[ "hello world", "ad"], ["leet code", "lt"], [ "leet code", "e"]]
result = [1,1,0]
sol = Solution()
is_success = True
for _idx, _data in enumerate(data):
ret = sol.canBeTypedWords(_data[0], _data[1])
if result[_idx] != ret:
print (f"Failed. {_idx}")
print (f"Output:{ret}")
print (f"Expected:{result[_idx]}")
is_success = False
if is_success:
print ("Success!!")
| true
|
1e3764a9f3c93441200d68821f85546165e6d58b
|
Python
|
CodeTest-StudyGroup/Code-Test-Study
|
/JangWoojin/[5]백준/2014_소수의 곱.py
|
UTF-8
| 354
| 2.90625
| 3
|
[] |
no_license
|
import sys
from heapq import heappush, heappop, heapify
input = sys.stdin.readline
k, n = map(int, input().split())
primes = list(map(int, input().split()))
hq = primes.copy()
heapify(hq)
num = 0
for _ in range(n):
num = heappop(hq)
for prime in primes:
heappush(hq, prime * num)
if num % prime == 0:
break
print(num)
| true
|
0916f474067ac371f65f4f46279de8176a141a7e
|
Python
|
2q45/Python-crash-course
|
/2-12.py
|
UTF-8
| 277
| 4
| 4
|
[] |
no_license
|
bicycles = [1,2,3,4]
print(f"My first bicycle was a {bicycles[1]}")
print(f"{bicycles}")
print(bicycles[1])
print(bicycles[3])
print(bicycles[0])
names = ["faiz","Akshat","Avi","Dhruv","Krishna"]
print(f"\n{names[0]},\n{names[1]},\n{names[2]},\n{names[3]},\n{names[4]},\n")
| true
|
c95bd9f3627aa0128458a805c0c085c0b4ad3359
|
Python
|
BoChenGroup/EnsLM
|
/mATM/losses/LossFunctions.py
|
UTF-8
| 4,434
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
---------------------------------------------------------------------
-- Author: Jhosimar George Arias Figueroa
---------------------------------------------------------------------
Loss functions used for training our model
"""
import math
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
class LossFunctions:
eps = 1e-8
def mean_squared_error(self, real, predictions):
"""Mean Squared Error between the true and predicted outputs
loss = (1/n)*Σ(real - predicted)^2
Args:
real: (array) corresponding array containing the true labels
predictions: (array) corresponding array containing the predicted labels
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
loss = (real - predictions).pow(2)
return loss.sum(-1).mean()
def reconstruction_loss(self, real, predicted, rec_type='mse' ):
"""Reconstruction loss between the true and predicted outputs
mse = (1/n)*Σ(real - predicted)^2
bce = (1/n) * -Σ(real*log(predicted) + (1 - real)*log(1 - predicted))
Args:
real: (array) corresponding array containing the true labels
predictions: (array) corresponding array containing the predicted labels
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
if rec_type == 'mse':
loss = (real - predicted).pow(2)
elif rec_type == 'bce':
loss = F.binary_cross_entropy(predicted, real, reduction='none')
elif rec_type == 'nse':
loss = -(real * (predicted + 1e-10).log())
else:
raise "invalid loss function... try bce or mse..."
return loss.sum(-1).mean()
def log_normal(self, x, mu, var):
"""Logarithm of normal distribution with mean=mu and variance=var
log(x|μ, σ^2) = loss = -0.5 * Σ log(2π) + log(σ^2) + ((x - μ)/σ)^2
Args:
x: (array) corresponding array containing the input
mu: (array) corresponding array containing the mean
var: (array) corresponding array containing the variance
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
if self.eps > 0.0:
var = var + self.eps
return -0.5 * torch.sum(torch.log(var) + torch.pow(x - mu, 2) / var, dim=-1)
# return -0.5 * torch.sum(
# np.log(2.0 * np.pi) + torch.log(var) + torch.pow(x - mu, 2) / var, dim=-1)
def gaussian_loss(self, z, z_mu, z_var, z_mu_prior, z_var_prior):
"""Variational loss when using labeled data without considering reconstruction loss
loss = log q(z|x,y) - log p(z) - log p(y)
Args:
z: (array) array containing the gaussian latent variable
z_mu: (array) array containing the mean of the inference model
z_var: (array) array containing the variance of the inference model
z_mu_prior: (array) array containing the prior mean of the generative model
z_var_prior: (array) array containing the prior variance of the generative mode
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
loss = self.log_normal(z, z_mu, z_var) - self.log_normal(z, z_mu_prior, z_var_prior)
return loss.mean()
def entropy(self, logits, targets):
"""Entropy loss
loss = (1/n) * -Σ targets*log(predicted)
Args:
logits: (array) corresponding array containing the logits of the categorical variable
real: (array) corresponding array containing the true labels
Returns:
output: (array/float) depending on average parameters the result will be the mean
of all the sample losses or an array with the losses per sample
"""
log_q = F.log_softmax(logits, dim=-1)
return -torch.mean(torch.sum(targets * log_q, dim=-1))
| true
|
2309c6c9ad2b06b974aa744de3dae14d93af98be
|
Python
|
mkebrahimpour/DataStructures_Python
|
/thinkpython/functions.py
|
UTF-8
| 304
| 3.765625
| 4
|
[] |
no_license
|
# Math Functions
import math
print math
print math.sqrt(4)
# New Functions
def print_lyrics(bruce):
print "--------------------------------"
print "I'm a lumberjack, and I'm okay."
print "I sleep all night and I work all day."
print "Printing Argument:",bruce
print_lyrics('Spam\t' * 4)
| true
|
e1ad8077724054a52c0a4b2c321b202a7b095488
|
Python
|
JMass1/curso_python
|
/desafios/DESAFIO 70.py
|
UTF-8
| 904
| 3.921875
| 4
|
[] |
no_license
|
#PROGRAMA PARA CADASTRAR PRODUTOS E PREÇOS
preco = soma = count = mpreco = 0
nome = saida = nbarato = ''
while True:
print('-'*20)
print('CADASTRE UM PRODUTO')
print('-'*20)
nome = str(input('Nome do Produto: ')).strip()
preco = int(input('Preço do Produto: '))
print('-'*20)
soma += preco
if preco > 1000:
count += 1
if mpreco == 0:
nbarato = nome
mpreco = preco
if mpreco > preco:
nbarato = nome
mpreco = preco
saida = str(input('Quer Continuar [S/N]?')).strip().upper()
while saida != 'S' and saida != 'N':
saida = str(input('Quer Continuar [S/N]?')).strip().upper()
if saida == 'N':
break
print('-------FIM DO POGRAMA-------')
print(f'Total gasto em compras: {soma}.')
print(f'Ao todo temos {count} produtos com valor maior que R$1000.')
print(f'O produto de menor preço é: {nbarato}.')
| true
|
b10684eb18ffd376faa3187343e1632913ec1b8d
|
Python
|
Spirent/cf-netsecopen-tests
|
/cf_common/CfRunTest.py
|
UTF-8
| 102,933
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
import json
import logging
import time
import sys
import os
import numpy as np
import pandas as pd
import pathlib
import sys
import math
from dataclasses import dataclass
script_version = 1.80
project_dir = pathlib.Path().absolute().parent
sys.path.append(str(project_dir))
from cf_common.CfClient import *
class RollingStats:
"""Creates rolling window statistics object
Inputs are sample window size and number of digits to round values too.
For example:
- transactions per second window size can be 2 or higher with 0 round digits
- time to first byte can have 1 round digit, best is to use the same window size
"""
def __init__(self, sample_window_size, round_digits):
# initiate list with sample size count of zeros
self.sample_size = sample_window_size
self.round_digits = round_digits
self.list = [0] * self.sample_size
self.current_value = 0
self.avg_val = 0
self.avg_val_last = 0
self.increase_avg = 0
self.variance = 0.000
self.avg_max_load_variance = 0.00
self.new_high = False
self.highest_value = 0
self.not_high_count = 0
self.stable = False
self.stable_count = 0
def update(self, new_value):
"""Updates Rolling List and returns current variance
:param new_value: new single value of for example TPS or TTFB
:return: variance
"""
self.current_value = new_value
if len(self.list) == self.sample_size:
self.list.pop(0)
self.list.append(self.current_value)
self.avg_val = sum(self.list) / len(self.list)
self.avg_val = round(self.avg_val, self.round_digits)
if self.round_digits == 0:
self.avg_val = int(self.avg_val)
max_var = max(self.list) - min(self.list)
self.variance = (max_var / self.avg_val) if self.avg_val != 0 else 0
self.variance = round(self.variance, 3)
# check if new value value is the new high for later use
self.check_if_highest()
return self.variance
def reset(self):
"""Resets rolling window back to all 0
Can be used after load increase on stat as current load that tracks if load is stable after increase
Don't use on reported rolling stat as it will have high increase after its set to all 0
:return: None
"""
self.list = [0] * self.sample_size
def check_if_stable(self, max_var_reference):
"""Checks if load is stable in current list
If its stable also check the increase since a load change was last completed.
:param max_var_reference: user/test configured reference value, e.g. 0.03 for 3%
:return: True if stable, False if not
"""
if self.variance <= max_var_reference:
self.stable = True
self.stable_count += 1
self.increase_since_last_load_change()
return True
else:
self.stable = False
self.stable_count = 0
return False
def increase_since_last_load_change(self):
"""Sets increase_avg, the increase since last load
This function can be called from check_if_stable. The set result can be used by a function to
determine by how much to increase the load. After load change call load_increase_complete to
set the value for the next round.
:return: None
"""
if self.avg_val_last != 0:
self.increase_avg = (
(self.avg_val - self.avg_val_last) / self.avg_val_last
) * 100
self.increase_avg = round(self.increase_avg, 2)
else:
self.avg_val_last = 1
def load_increase_complete(self):
"""set last load change value
Use in combination with increase_since_last_load_change
:return: None
"""
self.avg_val_last = self.avg_val
def check_if_highest(self):
"""Checks and sets highest value reference
Can be called by update function to track if the current update is the new highest value
:return: True if new high, False if not higher vs. previous
"""
if self.highest_value < self.avg_val:
self.highest_value = self.avg_val
self.new_high = True
self.not_high_count = 0
else:
self.new_high = False
self.not_high_count += 1
self.avg_max_load_variance = (
(self.avg_val / self.highest_value) if self.highest_value != 0 else 0
)
self.avg_max_load_variance = round(self.avg_max_load_variance, 2)
if self.new_high:
return True
if not self.new_high:
return False
@dataclass
class RunData:
"""Data class with default values used by RunTest"""
test_id: str = 'test_id'
type_v2: str = 'test_type'
in_name: str = 'test_name'
in_run: str = 'Y'
in_load_type: str = 'loadspec_type'
in_start_load: int = 7
in_incr_low: int = 7
in_incr_med: int = 5
in_incr_high: int = 3
in_duration: int = 1800
in_startup: int = 5
in_rampup: int = 10
in_rampdown: int = 10
in_shutdown: int = 10
in_sustain_period: int = 30
in_kpi_1: str = 'tps'
in_kpi_2: str = 'cps'
in_kpi_and_or: str = 'AND'
in_threshold_low: float = 20.0
in_threshold_med: float = 5.0
in_threshold_high: float = 1.0
variance_sample_size: int = 3
in_max_variance: float = 0.03
in_ramp_low: int = 60
in_ramp_med: int = 40
in_ramp_high: int = 20
in_ramp_seek: bool = False
in_ramp_seek_kpi: str = 'cps'
in_ramp_seek_value: int = 1000
in_ramp_step: int = 5
ramp_seek_complete: bool = True
living_simusers_max_bool: bool = False
living_simusers_max: int = 1
simuser_birth_rate_max_bool: bool = False
simuser_birth_rate_max: int = 450
sslTls_simuser_birth_rate_max: int = 30
simuser_birth_rate_max_capacity: float = 1
in_goal_seek: bool = False
first_steady_interval: bool = True
test_config: dict = None
queue_id: str = 'id'
queue_info: dict = None
queue_capacity: int = 60
core_count: int = 6
port_count: int = 1
client_port_count: int = 1
server_port_count: int = 1
client_core_count: int = 3
client_portSystemId : str = ''
in_capacity_adjust: any = 1
load_constraints: dict = None
test_run: dict = None
test_run_update: dict = None
id: dict = None
score: str = ''
grade: str = ''
run_id: str = 'Test_name'
status: str = 'status' # main run status 'running'
name: str = 'name'
type_v1: str = 'type_v1'
sub_status: str = None
created_at: str = None
updated_at: str = None
started_at: str = None
finished_at: str = None
run_link: str = 'runlink'
report_link = None
sub_status = None # subStatus - none while running or not started
progress: int = 0 # progress - 0-100
time_elapsed: int = 0 # timeElapsed - seconds
time_remaining: int = 0 # timeRemaining - seconds
c_rx_bandwidth: int = 0
c_rx_packet_count: int = 0
c_rx_packet_rate: int = 0
c_rx_byte_rate: int = 0
c_tx_bandwidth: int = 0
c_tx_packet_count: int = 0
c_tx_packet_rate: int = 0
c_tx_byte_rate: int = 0
c_total_byte_rate: int = 0
c_total_packet_count: int = 0
c_http_aborted_txns: int = 0
c_http_aborted_txns_sec: int = 0
c_http_attempted_txns: int = 0
c_http_attempted_txns_sec: int = 0
c_http_successful_txns: int = 0
c_http_successful_txns_sec: int = 0
c_http_unsuccessful_txns: int = 0
c_http_unsuccessful_txns_sec: int = 0
c_loadspec_avg_idle: int = 0
c_loadspec_avg_cpu: int = 0
c_memory_main_size: int = 0
c_memory_main_used: int = 0
c_memory_packetmem_used: int = 0
c_memory_rcv_queue_length: int = 0
c_mem_per_conn: int = 55
c_simusers_alive: int = 0
c_simusers_animating: int = 0
c_simusers_blocking: int = 0
c_simusers_sleeping: int = 0
c_tcp_avg_ttfb: int = 0
c_tcp_avg_tt_synack: int = 0
c_tcp_cumulative_attempted_conns: int = 0
c_tcp_cumulative_established_conns: int = 0
c_url_avg_response_time: int = 0
c_tcp_attempted_conn_rate: int = 0
c_tcp_established_conn_rate: int = 0
c_tcp_attempted_conns: int = 0
c_tcp_established_conns: int = 0
c_current_load: int = 0
c_desired_load: int = 0
c_total_bandwidth: int = 0
c_memory_percent_used: int = 0
c_current_desired_load_variance: float = 0.0
c_current_max_load_variance: float = 0.0
c_transaction_error_percentage: float = 0.0
s_rx_bandwidth: int = 0
s_rx_packet_count: int = 0
s_rx_packet_rate: int = 0
s_tx_bandwidth: int = 0
s_tx_packet_count: int = 0
s_tx_packet_rate: int = 0
s_memory_main_size: int = 0
s_memory_main_used: int = 0
s_memory_packetmem_used: int = 0
s_memory_rcv_queue_length: int = 0
s_memory_avg_cpu: int = 0
s_tcp_closed_error: int = 0
s_tcp_closed: int = 0
s_tcp_closed_reset: int = 0
s_memory_percent_used: int = 0
first_ramp_load_increase: bool = True
first_goal_load_increase: bool = True
minimum_goal_seek_count: int = 1
goal_seek_count: int = 0
max_load_reached: bool = False
max_load: int = 0
stop: bool = False # test loop control
phase: str = None # time phase of test: ramp up, steady ramp down
# rolling statistics
rolling_sample_size: int = 3
max_var_reference: float = 0.03
rolling_tps: RollingStats = None
rolling_ttfb: RollingStats = None
rolling_current_load: RollingStats = None
rolling_count_since_goal_seek: RollingStats = None
rolling_cps: RollingStats = None
rolling_conns: RollingStats = None
rolling_bw: RollingStats = None
kpi_1: any = None
kpi_2: any = None
kpi_1_stable: bool = True
kpi_2_stable: bool = True
kpi_1_list: list = None
kpi_2_list: list = None
ramp_seek_kpi: any = None
start_time: any = None
timer: any = None
time_to_run: int = 0
time_to_start: int = 0
time_to_activity: int = 0
time_to_stop_start: int = 0
time_to_stop: int = 0
test_started: bool = False
class CfOpenConns:
def __init__(self, rt):
self.c_startup_load: int = 0
self.c_startup_mem: int = 0
self.c_new_conns_mem: int = 0
self.inc_conns_within_c_mem: int = 0
self.rt = rt # the CfRunTest object
self.disabled = True
self.got_startup_data = False
self.first_new_load = True
self.changed_load_highest = 0
# power of 2 dividing the distance between starting open conns load and max load
# (e.g.2**5 = 32 steps from current to max)
self.steps_to_max = 3
self.current_step = 1
self.tracker_mapper = (
("setpoint", "c_desired_load"),
("load", "c_current_load"),
("memory", "c_memory_main_used"),
("cps", "c_tcp_established_conn_rate"),
)
self.tracker = {}
def enable(self):
self.disabled = False
log.info("CfOpenConns enabled")
def expect_unused(self, unused_point):
log.warning(f"CfOpenConns surprise used: {unused_point}")
def set_startup_data(self):
if (
self.disabled
or self.got_startup_data
or self.rt.rd.c_http_successful_txns_sec == 0
):
return
self.got_startup_data = True
self.c_startup_mem = self.rt.rd.c_memory_main_used
self.c_startup_load = self.rt.rd.c_current_load
self.c_new_conns_mem = self.rt.rd.c_memory_main_size - self.c_startup_mem
log.info(
f"CfOpenConns -- c_startup_mem: {self.c_startup_mem}; c_startup_load: "
f"{self.c_startup_load}; c_conns_mem: {self.c_new_conns_mem}"
)
def capture_goal_seek_iteration(self):
if self.disabled:
return
t = time.time()
self.tracker[t] = {}
for key, data_name in self.tracker_mapper:
exec(f"self.tracker[t]['{key}'] = self.rt.rd.{data_name}")
log.debug(f"CfOpenConns -- {t-self.rt.rd.start_time:.2f}: {self.tracker[t]}")
def skip_goal_seek(self):
if self.disabled:
return False
if self.disabled:
return False
current = self.rt.rd.c_current_load
desired = self.rt.rd.c_desired_load
ratio = current / desired
log.info(
f"CfOpenConns -- current/desired load: {current:,}/{desired:,} "
f"({ratio:.1%})"
)
diff = abs(ratio - 1)
allowed = 0.015
if diff > allowed:
log.info(f"Skip load update, diff>allowed: {diff:.1%}>{allowed:.1%}")
return True
return False
def dump_iteration_data(self, csv_file):
if self.disabled:
return
field_names = ["time"] + [x[0] for x in self.tracker_mapper]
dw = csv.DictWriter(csv_file, field_names)
dw.writeheader()
for time_key in self.tracker:
row_dict = {"time": f"{time_key - self.rt.rd.start_time:.2f}"}
row_dict.update(self.tracker[time_key])
dw.writerow(row_dict)
def is_load_type_conns(self):
return not self.disabled
def get_new_load(self):
if (
self.rt.rd.c_current_desired_load_variance >= 1.0
and self.rt.rd.c_memory_percent_used < 100
and self.rt.rd.s_memory_percent_used < 100
):
pass
else:
self.rt.rd.max_load_reached = True
if (
self.rt.rd.rolling_conns.increase_avg == 0
and self.rt.rd.c_memory_percent_used > 99.5
):
self.rt.rd.max_load_reached = True
if self.rt.rd.max_load_reached:
log.info("CfOpenConns -- No more load increase; stop seeking.")
return False
if self.first_new_load:
if self.rt.rd.c_current_load - self.c_startup_load > 0 and self.rt.rd.c_memory_main_used - self.c_startup_mem > 100:
self.first_new_load = False
self.rt.rd.c_mem_per_conn = (self.rt.rd.c_memory_main_used - self.c_startup_mem) / (
self.rt.rd.c_current_load - self.c_startup_load
)
self.inc_conns_within_c_mem = int(
0.96 * self.c_new_conns_mem / self.rt.rd.c_mem_per_conn
)
else:
self.inc_conns_within_c_mem = 5000
log.info(
f"CfOpenConns -- c_mem_per_conn: {self.rt.rd.c_mem_per_conn}; "
f"self.inc_conns_within_c_mem: {self.inc_conns_within_c_mem}"
)
if self.rt.rd.c_memory_percent_used > 97:
self.current_step = 0
self.inc_conns_within_c_mem = self.rt.rd.in_incr_high * self.rt.rd.in_capacity_adjust
elif self.current_step > self.steps_to_max and self.rt.rd.c_memory_percent_used <= 97:
self.current_step = 1
self.c_new_conns_mem = self.rt.rd.c_memory_main_size - self.rt.rd.c_memory_main_used
self.inc_conns_within_c_mem = int(self.c_new_conns_mem / self.rt.rd.c_mem_per_conn)
if self.current_step <= self.steps_to_max:
binary_deductor = 1 / (2 ** self.current_step)
# bincurrent_stepary deductor generates a progression like this: 1/2, 1/4, 1/8, ..., 0
new_load = self.rt.rd.c_current_load + int(binary_deductor * self.inc_conns_within_c_mem)
log.info(
f"CfOpenConns -- current_step: {self.current_step}; "
f"cur load: {self.rt.rd.c_current_load}, new_load: {new_load}, add load: {binary_deductor * self.inc_conns_within_c_mem}"
)
self.current_step += 1
if new_load < self.changed_load_highest:
new_load = self.changed_load_highest
else:
self.changed_load_highest = new_load
return new_load
class CfRunTest:
def __init__(self, cf, rd, test_details, result_file, temp_file_dir):
log.info(f"script version: {script_version}")
self.cf = cf # CfClient instance
self.rd = rd
#log.info(f"self.rd is: {self.rd}")
self.ocj = CfOpenConns(self) # special behavior for open conns tests
self.result_file = result_file
self.temp_dir = temp_file_dir
self.test = test_details
def init_sequence(self, cf, rd, test_details):
self.init_input_csv(rd, test_details)
rd.test_config = self.get_test_config(cf, rd)
rd.queue_id = rd.test_config["config"]["queue"]["id"]
rd.queue_info = self.get_queue(cf, rd.queue_id)
log.info(f"queue info: \n{json.dumps(rd.queue_info, indent=4)}")
if not self.init_capacity_adj(rd):
return False
self.software_version_lookup(rd.queue_info)
self.get_report_info(rd)
self.result_file.make_report_dir(self.report_dir)
self.result_file.make_report_csv_file(self.report_name)
self.init_simuser_birth_rate_max(rd)
self.init_update_config_load(rd)
self.update_config_load_controller(cf, rd)
rd.test_run = self.start_test_run(cf, rd)
self.init_test_run(rd, cf.controller_ip)
self.init_rolling_stats(rd)
# create entry in result file at the start of test
self.save_results(rd)
return True
def init_input_csv(self, rd, test_details):
rd.test_id = test_details["id"]
rd.type_v2 = test_details["type"]
rd.in_name = test_details["name"]
rd.in_run = test_details["run"]
rd.in_load_type = test_details["load_type"]
rd.in_start_load = test_details["start_load"]
rd.in_incr_low = int(test_details["incr_low"])
rd.in_incr_med = int(test_details["incr_med"])
rd.in_incr_high = int(test_details["incr_high"])
rd.in_duration = int(test_details["duration"])
rd.in_startup = int(test_details["startup"])
rd.in_rampup = int(test_details["rampup"])
rd.in_rampdown = int(test_details["rampdown"])
rd.in_shutdown = int(test_details["shutdown"])
rd.in_sustain_period = int(test_details["sustain_period"])
rd.in_kpi_1 = test_details.get("kpi_1", "tps")
rd.in_kpi_2 = test_details.get("kpi_2", "cps")
rd.in_kpi_and_or = self.return_bool_true(test_details.get("kpi_and_or"), "AND")
rd.in_threshold_low = float(test_details["low_threshold"])
rd.in_threshold_med = float(test_details["med_threshold"])
rd.in_threshold_high = float(test_details["high_threshold"])
rd.in_sustain_period = int(test_details["sustain_period"])
rd.variance_sample_size = int(test_details["variance_sample_size"])
rd.in_max_variance = float(test_details["max_variance"])
rd.in_ramp_low = int(test_details.get("ramp_low", 60))
rd.in_ramp_med = int(test_details.get("ramp_med", 40))
rd.in_ramp_high = int(test_details.get("ramp_high", 20))
rd.in_capacity_adjust = test_details.get("capacity_adj", 1)
rd.in_ramp_seek = self.if_in_set_true(test_details, "ramp_seek",
{"true", "y", "yes"})
rd.in_ramp_seek_kpi = test_details.get("ramp_kpi", "tps")
rd.in_ramp_seek_value = int(test_details.get("ramp_value", 1))
rd.in_ramp_step = int(test_details.get("ramp_step", 1))
if not rd.in_ramp_seek:
rd.ramp_seek_complete = True
else:
rd.ramp_seek_complete = False
rd.living_simusers_max_bool = int(self.check_if_number(
test_details.get("living_simusers_max", False)))
rd.living_simusers_max = self.return_int_if_present(
rd.living_simusers_max_bool,
test_details.get("living_simusers_max", False))
rd.in_goal_seek = test_details["goal_seek"]
if rd.in_goal_seek.lower() in {"true", "y", "yes"}:
rd.in_goal_seek = True
rd.first_steady_interval = False
else:
rd.in_goal_seek = False
rd.start_time = time.time()
rd.timer = time.time() - rd.start_time
def init_simuser_birth_rate_max(self, rd):
if not (rd.type_v2 == "open_connections" and rd.in_load_type == "simusers"):
self.test["simuser_birth_rate_max"] = "none"
return
if "CF400" in self.device_model:
rd.simuser_birth_rate_max_capacity = 3
self.sslTls_enabled = rd.test_config.get("config", {}).get("protocol", {}).get("supplemental", {}).get("sslTls", {}).get("enabled", False)
if self.sslTls_enabled:
self.test["simuser_birth_rate_max"] = rd.sslTls_simuser_birth_rate_max * rd.simuser_birth_rate_max_capacity
else:
self.test["simuser_birth_rate_max"] = rd.simuser_birth_rate_max * rd.simuser_birth_rate_max_capacity
rd.simuser_birth_rate_max_bool = self.check_if_number(
self.test.get("simuser_birth_rate_max", False))
rd.simuser_birth_rate_max = self.return_int_if_present(
rd.simuser_birth_rate_max_bool,
self.test.get("simuser_birth_rate_max", False))
def init_capacity_adj(self, rd):
rd.queue_capacity = int(rd.queue_info["capacity"])
log.info(f"queue_capacity: {rd.queue_capacity}")
if len(rd.queue_info["computeGroups"]) > 0:
rd.core_count,rd.port_count = self.core_count_lookup_cg(rd.queue_info)
log.info(f"core_count cg and port count cg: {rd.core_count, rd.port_count}")
# self.capacity_adj_cg(rd)
else:
rd.core_count,rd.port_count = self.core_count_lookup_spr(rd.queue_info)
log.info(f"core_count cg and port count cg: {rd.core_count, rd.port_count}")
# self.capacity_adj_spr(rd)
if not self.check_config(rd):
return False
if self.test["type"] == "advanced_mixed_traffic":
rd.client_port_count = len(rd.test_config["config"]["relationships"])
rd.server_port_count = len(rd.test_config["config"]["relationships"])
rd.client_portSystemId = rd.test_config["config"]["relationships"][0]["client"]["portSystemId"]
else:
rd.client_port_count = len(rd.test_config["config"]["interfaces"]["client"])
rd.server_port_count = len(rd.test_config["config"]["interfaces"]["server"])
rd.client_portSystemId = rd.test_config["config"]["interfaces"]["client"][0]["portSystemId"]
self.device_ip = rd.client_portSystemId.split("/", 1)[0]
log.info(f"client_port_count: {rd.client_port_count}")
log.info(f"server_port_count: {rd.server_port_count}")
rd.client_core_count = int(
rd.core_count
/ rd.port_count
* rd.client_port_count
)
log.info(f"client_core_count: {rd.client_core_count}")
rd.in_capacity_adjust = self.check_capacity_adjust(
rd.in_capacity_adjust,
rd.in_load_type,
rd.client_port_count,
rd.client_core_count,
)
log.info(f"in_capacity_adjust: {rd.in_capacity_adjust}")
rd.load_constraints = {"enabled": False}
return True
def check_config(self, rd):
if self.test["type"] == "advanced_mixed_traffic":
interfaces = rd.test_config["config"]["relationships"]
information = "relationships"
else:
interfaces = rd.test_config["config"]["interfaces"]
information = "interfaces"
if len(interfaces) == 0:
errormsg = f"No subnets/{information} assigned in test"
log.debug(errormsg)
print(errormsg)
return False
return True
# def capacity_adj_cg(self, rd):
# rd.core_count = self.core_count_lookup_cg(rd.queue_info)
# log.info(f"core_count: {rd.core_count}")
# rd.client_port_count = len(rd.test_config["config"]["interfaces"]["client"])
# log.info(f"client_port_count: {rd.client_port_count}")
# rd.server_port_count = len(rd.test_config["config"]["interfaces"]["server"])
# log.info(f"server_port_count: {rd.server_port_count}")
# rd.client_core_count = int(
# rd.core_count
# / (rd.client_port_count + rd.server_port_count)
# * rd.client_port_count
# )
# log.info(f"client_core_count: {rd.client_core_count}")
# rd.in_capacity_adjust = self.check_capacity_adjust(
# rd.in_capacity_adjust,
# rd.in_load_type,
# rd.client_port_count,
# rd.client_core_count,
# )
# log.info(f"in_capacity_adjust: {rd.in_capacity_adjust}")
# rd.load_constraints = {"enabled": False}
def init_update_config_load(self, rd):
if not self.update_config_load(rd):
report_error = f"unknown load_type with test type"
log.debug(report_error)
print(report_error)
def init_test_run(self, rd, controller_ip):
if not rd.test_started:
report_error = f"test did not start\n{json.dumps(rd.test_run, indent=4)}"
log.debug(report_error)
print(report_error)
rd.test_run_update = None
rd.id = rd.test_run.get("id")
rd.queue_id = rd.test_run.get("queueId")
rd.score = rd.test_run.get("score")
rd.grade = rd.test_run.get("grade")
rd.run_id = rd.test_run.get("runId")
rd.status = rd.test_run.get("status") # main run status 'running'
rd.name = rd.test_run.get("test", {}).get("name")
rd.type_v1 = rd.test_run.get("test", {}).get("type")
rd.sub_status = rd.test_run.get("subStatus")
rd.created_at = rd.test_run.get("createdAt")
rd.updated_at = rd.test_run.get("updatedAt")
rd.started_at = rd.test_run.get("startedAt")
rd.finished_at = rd.test_run.get("finishedAt")
rd.progress = rd.test_run.get("progress")
rd.time_elapsed = rd.test_run.get("timeElapsed")
rd.time_remaining = rd.test_run.get("timeRemaining")
rd.run_link = (
"https://"
+ controller_ip
+ "/#livecharts/"
+ rd.type_v1
+ "/"
+ rd.id
)
print(f"Live charts: {rd.run_link}")
def init_rolling_stats(self, rd):
# rolling statistics
rd.rolling_sample_size = rd.variance_sample_size
rd.max_var_reference = rd.in_max_variance
rd.rolling_tps = RollingStats(rd.rolling_sample_size, 0)
rd.rolling_ttfb = RollingStats(rd.rolling_sample_size, 1)
rd.rolling_current_load = RollingStats(rd.rolling_sample_size, 0)
rd.rolling_count_since_goal_seek = RollingStats(
rd.rolling_sample_size, 1
) # round to 1 for > 0 avg
rd.rolling_cps = RollingStats(rd.rolling_sample_size, 0)
rd.rolling_conns = RollingStats(rd.rolling_sample_size, 0)
rd.rolling_bw = RollingStats(rd.rolling_sample_size, 0)
rd.kpi_1 = rd.rolling_tps
rd.kpi_2 = rd.rolling_cps
rd.kpi_1_stable = True
rd.kpi_2_stable = True
rd.kpi_1_list = []
rd.kpi_2_list = []
rd.ramp_seek_kpi = rd.rolling_tps
@staticmethod
def if_in_set_true(dict_var, dict_key, in_set):
if dict_key in dict_var:
var = dict_var[dict_key]
if var.lower() in in_set:
return True
return False
@staticmethod
def check_if_number(in_value):
if in_value == False or in_value == None:
return False
if isinstance(in_value, int) or isinstance(in_value, float):
return True
if isinstance(in_value, str):
if in_value.isdigit():
return True
return False
@staticmethod
def return_int_if_present(present, value):
if present:
return int(value)
def get_test_config(self, cf, rd):
response = None
try:
response = cf.get_test(
rd.type_v2, rd.test_id, self.temp_dir / "running_test_config.json"
)
log.debug(f"{json.dumps(response, indent=4)}")
except Exception as detailed_exception:
log.error(
f"Exception occurred when retrieving the test: "
f"\n<{detailed_exception}>"
)
return response
def get_queue(self, cf, queue_id):
response = None
try:
response = cf.get_queue(queue_id)
# log.debug(f"{json.dumps(response, indent=4)}")
except Exception as detailed_exception:
log.error(
f"Exception occurred when retrieving test queue information: "
f"\n<{detailed_exception}>"
)
return response
@staticmethod
def core_count_lookup_cg(queue_info):
cores = 0
ports = 0
for cg in queue_info["computeGroups"]:
cores = cores + int(cg["cores"])
ports = ports + len(cg["ports"])
return cores,ports
@staticmethod
def core_count_lookup_spr(queue_info):
cores = 0
ports = 0
for port in queue_info["ports"]:
cores = cores + int(port["cores"])
ports = queue_info["portCount"]
return cores,ports
def software_version_lookup(self, queue_info):
self.model = ""
self.software_version = ""
self.divide_by_1000 = True
for device in queue_info["devices"]:
if device["ip"] == self.device_ip:
self.device_info = device
self.software_version = device["slots"][0]["computeGroups"][0]["software"]
break
if "l4l7lxc" in self.software_version:
self.software_version = self.software_version.split("l4l7lxc")[1]
self.model = "lxc"
if "l4l7Vm" in self.software_version:
self.software_version = self.software_version.split("l4l7Vm")[1]
self.model = "lxc"
software_version_list = self.software_version.split(".")
software_version_list = [int(i) for i in software_version_list]
if self.model == "lxc":
if software_version_list[0] <= 4:
self.divide_by_1000 = False
elif software_version_list[0] == 5 and software_version_list[1] < 7:
self.divide_by_1000 = False
else:
if software_version_list[0] <= 19:
self.divide_by_1000 = False
elif software_version_list[0] == 20:
if software_version_list[1] == 0:
self.divide_by_1000 = False
elif software_version_list[1] == 1 and software_version_list[2] == 0:
self.divide_by_1000 = False
log.info(f"software version: {self.software_version}")
log.info(f"divide_by_1000: {self.divide_by_1000}")
print(f"software version: {self.software_version}")
def get_report_info(self, rd):
self.device_mode = ""
self.device_description = self.device_info["description"][4:]
self.device_profile = self.device_info["slots"][0]["profile"]
self.device_model = self.device_info["slots"][0]["model"][4:]
for profile_info in ["Functional-", "Performance-", "Maximum-"]:
if profile_info in self.device_profile:
self.device_profile = self.device_profile.split(profile_info)[-1].strip("\n")
break
if "cfv" in self.device_description.lower():
#waiting for issue CF-17490 fixing
self.device_model = f"{self.device_model.rsplit('-', 1)[0]}-vCores-{rd.client_core_count*2}"
self.report_dir = self.device_model
else:
self.report_dir = "_".join((self.device_model, self.device_profile))
self.report_name = "_".join((self.device_ip, self.software_version))
def update_startload_rampup_for_ec_sha384_on_cfv(self, rd):
update_flag = False
sslTls_hash = []
if rd.in_goal_seek and "cfv" in self.device_description.lower():
sslTls_hash = rd.test_config.get("config", {}).get("protocol", {}).get("supplemental", {}).get("sslTls", {}).get("ciphers", [])
if sslTls_hash:
for each_hash in sslTls_hash:
if "SHA384" in each_hash and "ECDHE" in each_hash:
update_flag = True
break
if update_flag:
if int(rd.in_start_load) < 8:
rd.in_start_load = 8
if self.test_type == "cps":
if int(rd.in_rampup) < 120:
rd.in_rampup = 120
if self.test_type == "tput":
if int(rd.in_rampup) < 180:
rd.in_rampup = 180
if int(rd.in_duration) < int(rd.in_startup) + int(rd.in_rampup) + int(rd.in_rampdown) + int(rd.in_shutdown):
rd.in_duration = int(rd.in_startup) + int(rd.in_rampup) + int(rd.in_rampdown) + int(rd.in_shutdown) + 60
return
@staticmethod
def check_capacity_adjust(
cap_adjust, load_type, client_port_count, client_core_count
):
if cap_adjust.lower() == "auto":
if load_type.lower() in {"simusers", "simusers/second"}:
return client_core_count
else:
return client_port_count
else:
return int(cap_adjust)
def update_config_load(self, rd):
load_type = rd.in_load_type.lower()
test_type = self.test_type(rd)
if test_type in {"tput", "emix", "amt"} and load_type == "simusers":
load_key = "bandwidth"
rd.in_load_type = "SimUsers"
elif test_type in {"tput", "emix", "amt"} and load_type == "bandwidth":
load_key = "bandwidth"
rd.in_load_type = "Bandwidth"
elif test_type == "tput" and load_type == "simusers/second":
load_key = "bandwidth"
rd.in_load_type = "SimUsers/Second"
elif test_type == "cps" and load_type == "connections/second":
load_key = "connectionsPerSecond"
rd.in_load_type = "Connections/Second"
elif test_type == "cps" and load_type == "simusers":
load_key = "connectionsPerSecond"
rd.in_load_type = "SimUsers"
elif test_type == "cps" and load_type == "simusers/second":
load_key = "connectionsPerSecond"
rd.in_load_type = "SimUsers/Second"
elif test_type == "conns" and load_type == "simusers":
self.ocj.expect_unused('test_type == "conns" and load_type == "simusers"')
load_key = "connections"
rd.in_load_type = "SimUsers"
elif test_type == "conns" and load_type == "connections":
load_key = "connections"
rd.in_load_type = "Connections"
else:
return False
if test_type == "conns" and rd.in_goal_seek:
self.ocj.enable()
self.update_startload_rampup_for_ec_sha384_on_cfv(rd)
rd.in_start_load = int(rd.in_start_load) * rd.in_capacity_adjust
self.update_load_constraints(rd)
load_update = {
"config": {
"loadSpecification": {
"duration": int(rd.in_duration),
"startup": int(rd.in_startup),
"rampup": int(rd.in_rampup),
"rampdown": int(rd.in_rampdown),
"shutdown": int(rd.in_shutdown),
load_key: int(rd.in_start_load),
"type": rd.in_load_type,
"constraints": rd.load_constraints,
# "constraints": {"enabled": False},
}
}
}
with open((self.temp_dir / "test_load_update.json"), "w") as f:
json.dump(load_update, f, indent=4)
return True
def update_config_load_controller(self, cf, rd):
response = cf.update_test(
rd.type_v2, rd.test_id, self.temp_dir / "test_load_update.json"
)
log.info(f"{json.dumps(response, indent=4)}")
rd.test_config = self.get_test_config(cf, rd)
def update_load_constraints(self, rd):
living = {"enabled": False}
open_connections = {"enabled": False}
birth_rate = {"enabled": False}
connections_rate = {"enabled": False}
constraints = False
if rd.living_simusers_max_bool:
constraints = True
living = {
"enabled": True,
"max": rd.living_simusers_max
}
if rd.simuser_birth_rate_max_bool:
constraints = True
birth_rate = {
"enabled": True,
"max": rd.simuser_birth_rate_max * rd.in_capacity_adjust
}
if constraints:
rd.load_constraints = {
"enabled": True,
"living": living,
"openConnections": open_connections,
"birthRate": birth_rate,
"connectionsRate": connections_rate,
}
def test_type(self, rd):
if rd.type_v2 == "http_throughput":
test_type = "tput"
elif rd.type_v2 == "http_connections_per_second":
test_type = "cps"
elif rd.type_v2 == "open_connections":
test_type = "conns"
elif rd.type_v2 == "emix":
test_type = "emix"
elif rd.type_v2 == "advanced_mixed_traffic":
test_type = "amt"
else:
test_type = "tput"
self.test_type = test_type
return test_type
def start_test_run(self, cf, rd):
try:
response = cf.start_test(rd.test_id)
log.info(f"{json.dumps(response, indent=4)}")
rd.test_started = True
except Exception as detailed_exception:
log.error(
f"Exception occurred when starting the test: "
f"\n<{detailed_exception}>"
)
rd.test_started = False
return response
def update_test_run(self, cf, rd):
rd.test_run_update = cf.get_test_run(rd.id)
rd.status = rd.test_run_update.get("status") # main run status 'running'
rd.sub_status = rd.test_run_update.get("subStatus")
rd.score = rd.test_run_update.get("score")
rd.grade = rd.test_run_update.get("grade")
rd.started_at = rd.test_run_update.get("startedAt")
rd.finished_at = rd.test_run_update.get("finishedAt")
rd.progress = rd.test_run_update.get("progress")
rd.time_elapsed = rd.test_run_update.get("timeElapsed")
rd.time_remaining = rd.test_run_update.get("timeRemaining")
update_test_run_log = (
f"Status: {rd.status} sub status: {rd.sub_status} "
f" elapsed: {rd.time_elapsed} remaining: {rd.time_remaining}"
)
log.debug(update_test_run_log)
return True
def update_phase(self, rd):
"""updates test phase based on elapsed time vs. loadspec configuration
If goal seeking is enabled and the test is in steady phase, the phase will be set to goalseek
:return: None
"""
phase = None
steady_duration = rd.in_duration - (
rd.in_startup + rd.in_rampup + rd.in_rampdown + rd.in_shutdown
)
if 0 <= rd.time_elapsed <= rd.in_startup:
phase = "startup"
elif rd.in_startup <= rd.time_elapsed <= (rd.in_startup + rd.in_rampup):
phase = "rampup"
elif (
(rd.in_startup + rd.in_rampup)
<= rd.time_elapsed
<= (rd.in_duration - (rd.in_rampdown + rd.in_shutdown))
):
phase = "steady"
if rd.first_steady_interval:
phase = "rampup"
rd.first_steady_interval = False
elif (
(rd.in_startup + rd.in_rampup + steady_duration)
<= rd.time_elapsed
<= (rd.in_duration - rd.in_shutdown)
):
phase = "rampdown"
elif (
(rd.in_duration - rd.in_shutdown)
<= rd.time_elapsed
<= rd.in_duration
):
phase = "shutdown"
elif rd.in_duration <= rd.time_elapsed:
phase = "finished"
log.info(f"test phase: {phase}")
rd.phase = phase
# Override phase if ramp seek is enabled
if rd.in_ramp_seek and rd.phase == "steady" and not rd.ramp_seek_complete:
rd.phase = "rampseek"
log.info(f"ramp seek phase: {rd.phase}")
# Override phase if goal seeking is enabled
elif rd.in_goal_seek and rd.phase == "steady":
rd.phase = "goalseek"
log.info(f"goal seek phase: {rd.phase}")
def update_run_stats(self, cf, rd):
get_run_stats = cf.fetch_test_run_statistics(rd.id)
#log.debug(f'{get_run_stats}')
#log.debug(json.dumps(get_run_stats, indent=4))
self.update_client_stats(rd, get_run_stats)
self.update_server_stats(rd, get_run_stats)
def update_client_stats(self, rd, get_run_stats):
client_stats = {}
for i in get_run_stats["client"]:
if "type" in i and "subType" in i and "value" in i:
type = i["type"]
sub_type = i["subType"]
value = i["value"]
if not type in client_stats:
client_stats[type] = {}
client_stats[type][sub_type] = value
elif "type" in i and "value" in i:
type = i["type"]
value = i["value"]
client_stats[type] = value
self.assign_client_run_stats(rd, client_stats)
def update_server_stats(self, rd, get_run_stats):
server_stats = {}
for i in get_run_stats["server"]:
if "type" in i and "subType" in i and "value" in i:
type = i["type"]
sub_type = i["subType"]
value = i["value"]
if not type in server_stats:
server_stats[type] = {}
server_stats[type][sub_type] = value
elif "type" in i and "value" in i:
type = i["type"]
value = i["value"]
server_stats[type] = value
self.assign_server_run_stats(rd, server_stats)
def assign_client_run_stats(self, rd, client_stats):
rd.c_rx_bandwidth = client_stats.get("driver", {}).get("rxBandwidth", 0)
rd.c_rx_packet_count = client_stats.get("driver", {}).get("rxPacketCount", 0)
rd.c_rx_packet_rate = client_stats.get("driver", {}).get("rxPacketRate", 0)
rd.c_tx_bandwidth = client_stats.get("driver", {}).get("txBandwidth", 0)
rd.c_tx_packet_count = client_stats.get("driver", {}).get("txPacketCount", 0)
rd.c_tx_packet_rate = client_stats.get("driver", {}).get("txPacketRate", 0)
rd.c_rx_byte_rate = client_stats.get("sum", {}).get("rxByteRate", 0)
rd.c_tx_byte_rate = client_stats.get("sum", {}).get("txByteRate", 0)
rd.c_http_aborted_txns = client_stats.get("http", {}).get("abortedTxns", 0)
rd.c_http_aborted_txns_sec = client_stats.get("http", {}).get(
"abortedTxnsPerSec", 0
)
rd.c_http_attempted_txns = client_stats.get("sum", {}).get("attemptedTxns", 0)
rd.c_http_attempted_txns_sec = client_stats.get("sum", {}).get(
"attemptedTxnsPerSec", 0
)
rd.c_http_successful_txns = client_stats.get("sum", {}).get(
"successfulTxns", 0
)
rd.c_http_successful_txns_sec = client_stats.get("sum", {}).get(
"successfulTxnsPerSec", 0
)
rd.c_http_unsuccessful_txns = client_stats.get("sum", {}).get(
"unsuccessfulTxns", 0
)
rd.c_http_unsuccessful_txns_sec = client_stats.get("sum", {}).get(
"unsuccessfulTxnsPerSec", 0
)
rd.c_loadspec_avg_idle = client_stats.get("loadspec", {}).get(
"averageIdleTime", 0
)
rd.c_loadspec_avg_cpu = round(
client_stats.get("loadspec", {}).get("cpuUtilized", 0), 1
)
rd.c_memory_main_size = client_stats.get("memory", {}).get("mainPoolSize", 0)
rd.c_memory_main_used = client_stats.get("memory", {}).get("mainPoolUsed", 0)
rd.c_memory_packetmem_used = client_stats.get("memory", {}).get(
"packetMemoryUsed", 0
)
rd.c_memory_rcv_queue_length = client_stats.get("memory", {}).get(
"rcvQueueLength", 0
)
rd.c_simusers_alive = client_stats.get("simusers", {}).get("simUsersAlive", 0)
rd.c_simusers_animating = client_stats.get("simusers", {}).get(
"simUsersAnimating", 0
)
rd.c_simusers_blocking = client_stats.get("simusers", {}).get(
"simUsersBlocking", 0
)
rd.c_simusers_sleeping = client_stats.get("simusers", {}).get(
"simUsersSleeping", 0
)
rd.c_simusers_suspending = client_stats.get("simusers", {}).get(
"simUsersSuspending", 0
)
rd.c_current_load = client_stats.get("sum", {}).get("currentLoadSpecCount", 0)
rd.c_desired_load = client_stats.get("sum", {}).get("desiredLoadSpecCount", 0)
rd.c_tcp_avg_ttfb = round(
client_stats.get("tcp", {}).get("averageTimeToFirstByte", 0), 1
)
rd.c_tcp_avg_tt_synack = round(
client_stats.get("tcp", {}).get("averageTimeToSynAck", 0), 1
)
rd.c_tcp_cumulative_attempted_conns = client_stats.get("tcp", {}).get(
"cummulativeAttemptedConns", 0
)
rd.c_tcp_cumulative_established_conns = client_stats.get("tcp", {}).get(
"cummulativeEstablishedConns", 0
)
rd.c_url_avg_response_time = round(
client_stats.get("url", {}).get("averageRespTimePerUrl", 0), 1
)
if self.divide_by_1000:
rd.c_url_avg_response_time = round(rd.c_url_avg_response_time / 1000, 3)
rd.c_tcp_attempted_conn_rate = client_stats.get("sum", {}).get(
"attemptedConnRate", 0
)
rd.c_tcp_established_conn_rate = client_stats.get("sum", {}).get(
"establishedConnRate", 0
)
rd.c_tcp_attempted_conns = client_stats.get("sum", {}).get(
"attemptedConns", 0
)
rd.c_tcp_established_conns = client_stats.get("sum", {}).get(
"currentEstablishedConns", 0
)
rd.time_elapsed = client_stats.get("timeElapsed", 0)
rd.time_remaining = client_stats.get("timeRemaining", 0)
rd.c_total_bandwidth = rd.c_rx_bandwidth + rd.c_tx_bandwidth
rd.c_total_byte_rate = rd.c_rx_byte_rate + rd.c_tx_byte_rate
rd.c_total_packet_count = rd.c_rx_packet_count + rd.c_tx_packet_count
if rd.c_memory_main_size > 0 and rd.c_memory_main_used > 0:
rd.c_memory_percent_used = round(100 *
rd.c_memory_main_used / rd.c_memory_main_size, 2
)
if rd.c_current_load > 0 and rd.c_desired_load > 0:
rd.c_current_desired_load_variance = round(
rd.c_current_load / rd.c_desired_load, 2
)
if rd.c_http_successful_txns > 0:
rd.c_transaction_error_percentage = (
rd.c_http_unsuccessful_txns + rd.c_http_aborted_txns
) / rd.c_http_successful_txns
if rd.phase in ["rampup", "goalseek"]:
self.ocj.set_startup_data()
return True
def assign_server_run_stats(self, rd, server_stats):
rd.s_rx_bandwidth = server_stats.get("driver", {}).get("rxBandwidth", 0)
rd.s_rx_packet_count = server_stats.get("driver", {}).get("rxPacketCount", 0)
rd.s_rx_packet_rate = server_stats.get("driver", {}).get("rxPacketRate", 0)
rd.s_tx_bandwidth = server_stats.get("driver", {}).get("txBandwidth", 0)
rd.s_tx_packet_count = server_stats.get("driver", {}).get("txPacketCount", 0)
rd.s_tx_packet_rate = server_stats.get("driver", {}).get("txPacketRate", 0)
rd.s_memory_main_size = server_stats.get("memory", {}).get("mainPoolSize", 0)
rd.s_memory_main_used = server_stats.get("memory", {}).get("mainPoolUsed", 0)
rd.s_memory_packetmem_used = server_stats.get("memory", {}).get(
"packetMemoryUsed", 0
)
rd.s_memory_rcv_queue_length = server_stats.get("memory", {}).get(
"rcvQueueLength", 0
)
rd.s_memory_avg_cpu = round(
server_stats.get("memory", {}).get("cpuUtilized", 0), 1
)
rd.s_tcp_closed_error = server_stats.get("sum", {}).get("closedWithError", 0)
rd.s_tcp_closed = server_stats.get("sum", {}).get("closedWithNoError", 0)
rd.s_tcp_closed_reset = server_stats.get("sum", {}).get("closedWithReset", 0)
if rd.s_memory_main_size > 0 and rd.s_memory_main_used > 0:
rd.s_memory_percent_used = round(100 *
rd.s_memory_main_used / rd.s_memory_main_size, 2
)
return True
def print_test_status(self, rd):
status = (
f"{rd.timer}s -status: {rd.status} -sub status: {rd.sub_status} "
f"-progress: {rd.progress} -seconds elapsed: {rd.time_elapsed} "
f"-remaining: {rd.time_remaining}"
)
print(status)
def print_test_stats(self, rd):
stats = (
f"{rd.time_elapsed}s {rd.phase} -load: {rd.c_current_load:,}/{rd.c_desired_load:,} "
f"-current/desired var: {rd.c_current_desired_load_variance} "
f"-current avg/max var: {rd.rolling_tps.avg_max_load_variance} "
f"-seek ready: {rd.rolling_count_since_goal_seek.stable}"
f"\n-tps: {rd.c_http_successful_txns_sec:,} -tps stable: {rd.rolling_tps.stable} "
f"-tps cur avg: {rd.rolling_tps.avg_val:,} -tps prev: {rd.rolling_tps.avg_val_last:,} "
f"-delta tps: {rd.rolling_tps.increase_avg} -tps list:{rd.rolling_tps.list} "
f"\n-cps: {rd.c_tcp_established_conn_rate:,} -cps stable: {rd.rolling_cps.stable} "
f"-cps cur avg: {rd.rolling_cps.avg_val:,} -cps prev: {rd.rolling_cps.avg_val_last:,} "
f"-delta cps: {rd.rolling_cps.increase_avg} -cps list:{rd.rolling_cps.list} "
f"\n-conns: {rd.c_tcp_established_conns:,} -conns stable: {rd.rolling_conns.stable} "
f"-conns cur avg: {rd.rolling_conns.avg_val:,} -conns prev: {rd.rolling_conns.avg_val_last:,} "
f"-delta conns: {rd.rolling_cps.increase_avg} -conns list:{rd.rolling_conns.list} "
f"\n-bw: {rd.c_total_bandwidth:,} -bw stable: {rd.rolling_bw.stable} "
f"-bw cur avg: {rd.rolling_bw.avg_val:,} -bw prev: {rd.rolling_bw.avg_val_last:,} "
f"-delta bw: {rd.rolling_bw.increase_avg} -bw list:{rd.rolling_bw.list} "
f"\n-ttfb: {rd.c_tcp_avg_ttfb:,} -ttfb stable: {rd.rolling_ttfb.stable} "
f"-ttfb cur avg: {rd.rolling_ttfb.avg_val:,} -ttfb prev: {rd.rolling_ttfb.avg_val_last:,} "
f"-delta ttfb: {rd.rolling_ttfb.increase_avg} -ttfb list:{rd.rolling_ttfb.list} "
f"\n-cpu_c: {rd.c_loadspec_avg_cpu:6.1f} -pktmemused_c: {rd.c_memory_packetmem_used:4.0f} "
f" -memused_c: {rd.c_memory_main_used:5.0f} -memusedpert_c: {rd.c_memory_percent_used:3.1f}"
f" -mem_c: {rd.c_memory_main_size:5.0f}"
f"\n-cpu_s: {rd.s_memory_avg_cpu:6.1f} -pktmemUsed_s: {rd.s_memory_packetmem_used:4.0f} "
f" -memused_s: {rd.s_memory_main_used:5.0f} -memusedperc_s: {rd.s_memory_percent_used:3.1f}"
f" -mem_s: {rd.s_memory_main_size:5.0f}"
f"\n-attempt txn: {rd.c_http_attempted_txns:9.0f} -success txns: {rd.c_http_successful_txns:9.0f} "
f" -failed txns: {rd.c_http_unsuccessful_txns} (unsucc) + {rd.c_http_aborted_txns} (abort)"
# f"\n-total bw: {rd.c_total_bandwidth:,} -rx bw: {rd.c_rx_bandwidth:,}"
# f" tx bw: {rd.c_tx_bandwidth:,}"
# f"\n-ttfb cur avg: {rd.rolling_ttfb.avg_val} -ttfb prev: {rd.rolling_ttfb.avg_val_last} "
# f"-delta ttfb: {rd.rolling_ttfb.increase_avg} -ttfb list:{rd.rolling_ttfb.list}"
)
print(stats)
log.debug(stats)
def wait_for_running_status(self, cf, rd):
"""
Wait for the current test to return a 'running' status.
:return: True if no statements failed and there were no exceptions. False otherwise.
"""
log.debug("Inside the RunTest/wait_for_running_status method.")
i = 0
while True:
time.sleep(4)
rd.timer = int(round(time.time() - rd.start_time))
i += 4
if not self.update_test_run(cf, rd):
return False
if rd.status == "running":
print(f"{rd.timer}s - status: {rd.status}")
break
print(
f"{rd.timer}s - status: {rd.status} sub status: {rd.sub_status}"
)
if rd.status in {"failed", "finished"}:
log.error("Test failed")
return False
# check to see if another test with the same ID is running
# (can happen due to requests retry)
if i > 120 and rd.status == "waiting":
self.check_running_tests(cf, rd)
# stop after 1800 seconds of waiting
if i > 1800:
log.error(
"Waited for 1800 seconds, test did not transition to a running status."
)
return False
rd.time_to_run = rd.timer
log.debug(f"Test {rd.name} successfully went to running status.")
log.debug(json.dumps(rd.test_run_update, indent=4))
rd.run_id = rd.test_run_update.get("runId")
rd.report_link = (
"https://"
+ cf.controller_ip
+ "/#results/"
+ rd.type_v1
+ "/"
+ rd.run_id
)
return True
def check_running_tests(self, cf, rd):
"""Checks if tests with same ID is running and changes control to this test
This function can be triggered if waiting status is too long because the requests module retry mechanism has
kicked off two duplicate tests in error. It will look for matching running tests and switch control over to the
already running duplicate test.
:return: None
"""
# get list of run IDs and test IDs with status
test_runs = cf.list_test_runs()
# look for running status and compare ID
for run in test_runs:
if run["status"] == "running":
log.debug(
f"check_running_tests found running test: {json.dumps(run, indent=4)}"
)
# if waiting and running test IDs match, change the running test
if rd.test_id == run["testId"]:
log.debug(
f"check_running_tests found matching test_id {rd.test_id}"
)
# stop current waiting test
response = cf.stop_test(rd.id)
log.debug(
f"change_running_test, stopped duplicate waiting test: {response}"
)
# change over to running test
rd.id = run["id"]
else:
log.debug(
f"check_running_tests test_id: {rd.test_id} "
f"does not match running test_id: {run['testId']}"
)
def wait_for_running_sub_status(self, cf, rd):
"""
Wait for the current test to return a 'None' sub status.
:return: True if no statements failed and there were no exceptions. False otherwise.
"""
log.debug("Inside the RunTest/wait_for_running_sub_status method.")
i = 0
while True:
time.sleep(4)
rd.timer = int(round(time.time() - rd.start_time))
i += 4
if not self.update_test_run(cf, rd):
return False
print(
f"{rd.timer}s - status: {rd.status} sub status: {rd.sub_status}"
)
if rd.sub_status is None:
break
if rd.status in {"failed", "finished"}:
log.error("Test failed")
return False
# stop after 0 seconds of waiting
if i > 360:
log.error(
"Waited for 360 seconds, test did not transition to traffic state."
)
return False
rd.time_to_start = rd.timer - rd.time_to_run
log.debug(f"Test {rd.name} successfully went to traffic state.")
log.debug(json.dumps(rd.test_run_update, indent=4))
return True
def stop_wait_for_finished_status(self, cf, rd):
"""
Stop and wait for the current test to return a 'finished' status.
:return: True if no statements failed and there were no exceptions.
False otherwise.
"""
log.debug("Inside the stop_test/wait_for_finished_status method.")
rd.time_to_stop_start = rd.timer
if rd.status == "running":
self.cf.stop_test(rd.id)
i = 0
while True:
if rd.c_desired_load > 0:
self.update_run_stats(cf, rd)
self.save_results(rd)
time.sleep(4)
rd.timer = int(round(time.time() - rd.start_time))
i += 4
if not self.update_test_run(cf, rd):
return False
if rd.status in {"stopped", "finished", "failed"}:
print(f"{rd.timer} status: {rd.status}")
break
if rd.status == "failed":
print(f"{rd.timer} status: {rd.status}")
return False
print(
f"{rd.timer}s - status: {rd.status} sub status: {rd.sub_status}"
)
if i > 1800:
error_msg = (
"Waited for 1800 seconds, "
"test did not transition to a finished status."
)
log.error(error_msg)
print(error_msg)
return False
rd.time_to_stop = rd.timer - rd.time_to_stop_start
log.debug(
f"Test {rd.name} successfully went to finished status in "
f"{rd.time_to_stop} seconds."
)
return True
def wait_for_test_activity(self, cf, rd):
"""
Wait for the current test to show activity - metric(s) different than 0.
:return: True if no statements failed and there were no exceptions.
False otherwise.
"""
log.debug("Inside the RunTest/wait_for_test_activity method.")
test_generates_activity = False
i = 0
while not test_generates_activity:
rd.timer = int(round(time.time() - rd.start_time))
self.update_test_run(cf, rd)
self.update_run_stats(cf, rd)
# self.print_test_status(rd)
if rd.sub_status is None:
self.print_test_stats(rd)
self.save_results(rd)
if rd.c_http_successful_txns_sec > 0:
test_generates_activity = True
if rd.status in {"failed", "finished"}:
log.error("Test failed")
return False
if i > 180:
error_msg = (
"Waited for 180 seconds, test did not have successful transactions"
)
log.error(error_msg)
print(error_msg)
return False
time.sleep(4)
i = i + 4
print(f"")
rd.time_to_activity = rd.timer - rd.time_to_start - rd.time_to_run
return True
@staticmethod
def countdown(t):
"""countdown function
Can be used after load increase for results to update
:param t: countdown in seconds
:return: None
"""
while t:
mins, secs = divmod(t, 60)
time_format = "{:02d}:{:02d}".format(mins, secs)
print(time_format, end="\r")
time.sleep(1)
t -= 1
def goal_seek(self, rd):
log.info(f"In goal_seek function")
if rd.c_current_load == 0:
rd.stop = True
log.info(f"goal_seek stop, c_current_load == 0")
return False
if rd.goal_seek_count >= rd.minimum_goal_seek_count:
rd.first_goal_load_increase = False
else:
rd.first_goal_load_increase = True
if rd.first_goal_load_increase:
new_load = rd.c_current_load + (rd.in_incr_low *
rd.in_capacity_adjust)
else:
if self.ocj.is_load_type_conns():
new_load = self.ocj.get_new_load()
elif self.check_if_load_type_simusers(rd):
new_load = self.goal_seek_set_simuser_kpi(rd, rd.kpi_1)
log.info(f"new_load = {new_load}")
elif self.check_if_load_type_default(rd):
new_load = self.goal_seek_set_default(rd)
log.info(f"new_load = {new_load}")
else:
report_error = f"Unknown load type: " \
f"{rd.test_config['config']['loadSpecification']['type']}"
log.error(report_error)
print(report_error)
return False
if new_load is False:
log.info(
f"Config load spec type: {rd.test_config['config']['loadSpecification']['type']}"
)
log.info(f"Goal_seek return, new_load is False")
return False
if self.test_type == "conns":
self.change_update_load(rd, new_load, 4)
else:
self.change_update_load(rd, new_load, 16)
return True
def ramp_seek(self, rd, ramp_kpi, ramp_to_value):
log.info(f"In ramp_seek function")
if rd.c_current_load == 0:
rd.stop = True
log.info(f"ramp_seek stop, c_current_load == 0")
return False
# if rd.first_ramp_load_increase:
# rd.first_ramp_load_increase = False
# new_load = rd.c_current_load * 2
if rd.in_ramp_step < 1:
rd.ramp_seek_complete = True
return
if ramp_kpi.current_value < ramp_to_value:
load_increase_multiple = round(ramp_to_value / ramp_kpi.current_value, 3)
load_increase = (rd.c_current_load * load_increase_multiple) - rd.c_current_load
load_increase = round(load_increase / rd.in_ramp_step, 3)
new_load = self.round_up_to_even(rd.c_current_load + load_increase)
rd.in_ramp_step = rd.in_ramp_step - 1
log.info(f"new load: {new_load}, current_load: {rd.c_current_load}"
f" * {load_increase} load_increase "
f"ramp_step left: {rd.in_ramp_step} "
f"\n ramp_to_value: {ramp_to_value} "
f"ramp_kpi.current_value: {ramp_kpi.current_value}"
)
rd.in_incr_low = self.round_up_to_even(new_load * rd.in_ramp_low/100)
rd.in_incr_med = self.round_up_to_even(new_load * rd.in_ramp_med/100)
rd.in_incr_high = self.round_up_to_even(new_load * rd.in_ramp_high/100)
else:
rd.ramp_seek_complete = True
self.change_update_load(rd, new_load, 8)
return True
@staticmethod
def round_up_to_even(v):
return math.ceil(v / 2.) * 2
def check_if_load_type_simusers(self, rd):
if rd.test_config["config"]["loadSpecification"]["type"].lower() in {
"simusers",
"simusers/second",
}:
return True
return False
def check_if_load_type_default(self, rd):
if rd.test_config["config"]["loadSpecification"]["type"].lower() in {
"bandwidth",
"connections",
"connections/second",
}:
return True
return False
def change_update_load(self, rd, new_load, count_down):
new_load = self.round_up_to_even(new_load)
log_msg = f"\nchanging load from: {rd.c_current_load} to: {new_load} status: {rd.status}"
log.info(log_msg)
print(log_msg)
try:
self.cf.change_load(rd.id, new_load)
rd.rolling_tps.load_increase_complete()
rd.rolling_ttfb.load_increase_complete()
rd.rolling_current_load.load_increase_complete()
rd.rolling_cps.load_increase_complete()
rd.rolling_conns.load_increase_complete()
rd.rolling_bw.load_increase_complete()
except Exception as detailed_exception:
log.error(
f"Exception occurred when changing test: " f"\n<{detailed_exception}>"
)
rd.goal_seek_count = rd.goal_seek_count + 1
self.countdown(count_down)
return True
def goal_seek_set_default(self, rd):
set_load = 0
if rd.c_current_desired_load_variance >= 0.97:
if rd.c_current_load <= rd.in_threshold_low:
set_load = rd.c_current_load + (
rd.in_incr_low * rd.in_capacity_adjust
)
elif rd.c_current_load <= rd.in_threshold_med:
set_load = rd.c_current_load + (
rd.in_incr_med * rd.in_capacity_adjust
)
elif rd.c_current_load <= rd.in_threshold_high:
set_load = rd.c_current_load + (
rd.in_incr_high * rd.in_capacity_adjust
)
elif rd.c_current_load > rd.in_threshold_high:
return False
else:
return False
if rd.in_threshold_high < set_load:
if rd.c_current_desired_load_variance > 0.99:
return False
else:
set_load = rd.in_threshold_high
return set_load
def goal_seek_set_simuser_kpi(self, rd, kpi):
log.debug(f"in goal_seek_set_simuser_kpi function")
set_load = 0
if kpi.increase_avg >= rd.in_threshold_low:
set_load = rd.c_current_load + (rd.in_incr_low *
rd.in_capacity_adjust)
elif kpi.increase_avg >= rd.in_threshold_med:
set_load = rd.c_current_load + (rd.in_incr_med *
rd.in_capacity_adjust)
elif kpi.increase_avg >= rd.in_threshold_high:
set_load = rd.c_current_load + (rd.in_incr_high *
rd.in_capacity_adjust)
elif kpi.increase_avg < rd.in_threshold_high:
log.info(
f"rolling_tps.increase_avg {kpi.increase_avg} < "
f"{rd.in_threshold_high} in_threshold_high"
)
return False
if kpi.avg_max_load_variance < 0.97:
set_load = rd.c_current_load
rd.max_load_reached = True
log.info(
f"set_load = {set_load} "
f"kpi_avg_max_load_variance: {kpi.avg_max_load_variance}"
)
return set_load
def update_rolling_averages(self, rd):
"""Updates rolling statistics averages used to make test control decisions
:return: None
"""
rd.rolling_tps.update(rd.c_http_successful_txns_sec)
rd.rolling_tps.check_if_stable(rd.max_var_reference)
rd.rolling_ttfb.update(rd.c_tcp_avg_ttfb)
rd.rolling_ttfb.check_if_stable(rd.max_var_reference)
rd.rolling_current_load.update(rd.c_current_load)
rd.rolling_current_load.check_if_stable(rd.max_var_reference)
rd.rolling_cps.update(rd.c_tcp_established_conn_rate)
rd.rolling_cps.check_if_stable(rd.max_var_reference)
rd.rolling_conns.update(rd.c_tcp_established_conns)
rd.rolling_conns.check_if_stable(rd.max_var_reference)
rd.rolling_bw.update(rd.c_total_bandwidth)
rd.rolling_bw.check_if_stable(rd.max_var_reference)
rd.rolling_count_since_goal_seek.update(1)
rd.rolling_count_since_goal_seek.check_if_stable(0)
def check_kpi(self, rd):
rd.in_kpi_1 = rd.in_kpi_1.lower()
if rd.in_kpi_1 == "tps":
rd.kpi_1 = rd.rolling_tps
elif rd.in_kpi_1 == "cps":
rd.kpi_1 = rd.rolling_cps
elif rd.in_kpi_1 == "conns":
rd.kpi_1 = rd.rolling_conns
elif rd.in_kpi_1 == "bw":
rd.kpi_1 = rd.rolling_bw
elif rd.in_kpi_1 == "ttfb":
rd.kpi_1 = rd.rolling_ttfb
else:
log.debug(f"check_kpi unknown kpi_1, setting to TPS")
rd.kpi_1 = rd.rolling_tps
rd.in_kpi_2 = rd.in_kpi_2.lower()
if rd.in_kpi_2 == "tps":
rd.kpi_2 = rd.rolling_tps
elif rd.in_kpi_2 == "cps":
rd.kpi_2 = rd.rolling_cps
elif rd.in_kpi_2 == "conns":
rd.kpi_2 = rd.rolling_conns
elif rd.in_kpi_2 == "bw":
rd.kpi_2 = rd.rolling_bw
elif rd.in_kpi_2 == "ttfb":
rd.kpi_2 = rd.rolling_ttfb
else:
log.debug(f"check_kpi unknown kpi_2, setting to CPS")
rd.kpi_2 = rd.rolling_cps
def check_ramp_seek_kpi(self, rd):
if rd.in_ramp_seek_kpi == "tps":
rd.ramp_seek_kpi = rd.rolling_tps
elif rd.in_ramp_seek_kpi == "cps":
rd.ramp_seek_kpi = rd.rolling_cps
elif rd.in_ramp_seek_kpi == "conns":
rd.ramp_seek_kpi = rd.rolling_conns
elif rd.in_ramp_seek_kpi == "bw":
rd.ramp_seek_kpi = rd.rolling_bw
elif rd.in_ramp_seek_kpi == "ttfb":
rd.ramp_seek_kpi = rd.rolling_ttfb
else:
log.debug(f"check_ramp_seek_kpi unknown kpi, setting to TPS")
rd.ramp_seek_kpi = rd.rolling_tps
@staticmethod
def return_bool_true(check_if, is_value):
if isinstance(check_if, bool):
return check_if
if isinstance(check_if, str) and check_if.lower() == is_value:
return True
return False
def control_test(self, cf, rd):
"""Main test control
Runs test. Start by checking if test is in running state followed by checking
for successful connections.
First updates stats, checks the phase test is in based on elapsed time, then updates
rolloing averages.
:return: True if test completed successfully
"""
# exit control_test if test does not go into running state
if not self.wait_for_running_status(cf, rd):
log.info(f"control_test end, wait_for_running_status False")
return False
# exit control_test if test does not go into running state
if not self.wait_for_running_sub_status(cf, rd):
log.info(f"control_test end, wait_for_running_sub_status False")
return False
# exit control_test if test does not have successful transactions
if not self.wait_for_test_activity(cf, rd):
self.stop_wait_for_finished_status(cf, rd)
log.info(f"control_test end, wait_for_test_activity False")
return False
self.check_ramp_seek_kpi(rd)
self.check_kpi(rd)
rd.rolling_count_since_goal_seek.reset()
# self.countdown(12)
# test control loop - runs until self.stop is set to True
while not rd.stop:
self.update_run_stats(cf, rd)
self.update_phase(rd)
self.check_stop_conditions(rd)
self.update_rolling_averages(rd)
# print stats if test is running
if rd.sub_status is None:
self.print_test_stats(rd)
self.save_results(rd)
if rd.in_ramp_seek and not rd.ramp_seek_complete:
log.info(f"control_test going to ramp_seek")
self.control_test_ramp_seek(rd, rd.ramp_seek_kpi, rd.in_ramp_seek_value)
if rd.in_goal_seek and rd.ramp_seek_complete:
log.info(f"control_test going to goal_seek")
self.control_test_goal_seek_kpi(rd, rd.kpi_1, rd.kpi_2,
rd.in_kpi_and_or)
print(f"")
time.sleep(4)
# if goal_seek is yes enter sustained steady phase
self.wait_openconn_cps_end(cf, rd)
if rd.in_goal_seek and rd.in_sustain_period > 0:
self.sustain_test(cf, rd)
# stop test and wait for finished status
if self.stop_wait_for_finished_status(cf, rd):
rd.time_to_stop = rd.timer - rd.time_to_stop_start
#self.save_results(rd)
return True
return False
def check_stop_conditions(self, rd):
log.debug(f"in check_stop_conditions method")
# stop test if time_remaining returned from controller == 0
if rd.time_remaining == 0:
rd.phase = "timeout"
log.info(f"control_test end, time_remaining == 0")
rd.stop = True
# stop goal seeking test if time remaining is less than 30s
if rd.time_remaining < 30 and rd.in_goal_seek:
rd.phase = "timeout"
log.info(f"control_test end goal_seek, time_remaining < 30")
rd.stop = True
elif rd.time_remaining < 30 and rd.in_ramp_seek:
rd.phase = "timeout"
log.info(f"control_test end ramp_seek, time_remaining < 30")
rd.stop = True
if rd.phase == "finished":
log.info(f"control_test end, over duration time > phase: finished")
rd.stop = True
def control_test_ramp_seek(self, rd, ramp_kpi, ramp_to_value):
"""
Increases load to a configured tps, cps, conns or bandwidth level.
:return: True if no statements failed and there were no exceptions.
False otherwise.
"""
ramp_seek_count = 1
#log.debug("Inside the RunTest/ramp_to_seek method.")
log.info(
f"Inside the RunTest/ramp_to_seek method.\n"
f"rolling_count_list stable: {rd.rolling_count_since_goal_seek.stable} "
f"list: {rd.rolling_count_since_goal_seek.list} "
f"\nramp_to_value: {ramp_to_value} ramp_kpi current: {ramp_kpi.current_value}"
f" increase: {ramp_kpi.increase_avg}"
f"\n current load: {rd.c_current_load}"
f" desired_load: {rd.c_desired_load}"
)
if rd.phase != "rampseek":
log.info(f"phase {rd.phase} is not 'rampseek', "
f"returning from contol_test_ramp_seek")
return
if not rd.rolling_count_since_goal_seek.stable:
log.info(f"count since goal seek is not stable. "
f"count list: {rd.rolling_count_since_goal_seek.list}"
f"returning from control_test_ramp_seek")
return
if rd.max_load_reached:
log.info(f"control_test_ramp_seek end, max_load_reached")
rd.stop = True
return
# check if kpi avg is under set avg - if not, stop loop
if ramp_to_value < ramp_kpi.current_value:
log.info(f"ramp_to_value {ramp_to_value} < ramp_kpi.current_value {ramp_kpi.current_value}"
f"completed ramp_seek")
rd.ramp_seek_complete = True
rd.in_capacity_adjust = 1
return
if self.ramp_seek(rd, ramp_kpi, ramp_to_value):
# reset rolling count > no load increase until
# at least the window size interval.
# allows stats to stabilize after an increase
rd.rolling_count_since_goal_seek.reset()
else:
log.info(f"control_test_ramp_seek end, ramp_seek False")
rd.ramp_seek_complete = True
rd.in_capacity_adjust = 1
return
if (ramp_kpi.current_value / ramp_to_value) > 0.95:
log.info(
f"ramp_kpi.current_value {ramp_kpi.current_value} / "
f"ramp_to_value {ramp_to_value} > 0.95 "
f"increasing ramp_seek_count + 1")
ramp_seek_count = ramp_seek_count + 1
if ramp_seek_count == rd.in_ramp_step:
log.info(f"ramp_seek_complete early")
rd.ramp_seek_complete = True
rd.in_capacity_adjust = 1
return
return
def control_test_goal_seek_kpi(self, rd, kpi_1,
kpi_2, kpis_and_bool):
log.info(
f"rolling_count_list stable: {rd.rolling_count_since_goal_seek.stable} "
f"list: {rd.rolling_count_since_goal_seek.list} "
f"\nKpi1 stable: {kpi_1.stable} list: {kpi_1.list}"
f"\nKpi2 stable: {kpi_2.stable} list: {kpi_2.list}"
)
self.ocj.capture_goal_seek_iteration()
if rd.phase != "goalseek":
log.info(f"phase {rd.phase} is not 'goalseek', "
f"returning from contol_test_goal_seek")
return
if self.test_type == "conns":
if rd.in_load_type == "Connections" :
log.info(f"tps: {rd.c_http_successful_txns_sec} -cps: {rd.c_tcp_established_conn_rate}")
if rd.c_http_successful_txns_sec == 0 or rd.c_tcp_established_conn_rate == 0:
log.info(f"Ready for Open Conns goal seeking")
pass
else:
log.info(f"Not ready for Open Conns goal seeking, continue to add load")
return
elif rd.in_load_type == "SimUsers":
message = "Heap Memory state changed from OK to Throttled Free"
log.info(f"rd.c_memory_percent_used is: {rd.c_memory_percent_used}")
log.info(f"rd.c_simusers_suspending is: {rd.c_simusers_suspending}")
if rd.c_memory_percent_used > 97:
eventlogs = self.cf.fetch_event_logs(rd.id)
for line in eventlogs["logs"]:
if message in line:
log.debug(f"eventLog: \n{json.dumps(eventlogs, indent=4)}")
message = line + f"\nSuspending Simusers: {rd.c_simusers_suspending}, stop goal seek"
print(message)
rd.max_load_reached = True
rd.stop = True
return
if rd.c_simusers_suspending > 0:
message = f"Suspending Simusers: {rd.c_simusers_suspending}, stop goal seek"
log.debug(message)
print(message)
rd.max_load_reached = True
rd.stop = True
return
if rd.c_tcp_established_conn_rate == 0:
log.info(f"cps: {rd.c_tcp_established_conn_rate}, ready for goal seek")
pass
else:
log.info(f"cps: {rd.c_tcp_established_conn_rate}, not ready for goal seek, continue to add load")
return
elif not rd.rolling_count_since_goal_seek.stable:
log.info(f"count since goal seek is not stable. "
f"count list: {rd.rolling_count_since_goal_seek.list}")
return
if rd.max_load_reached:
log.info(f"control_test end, max_load_reached")
rd.stop = True
return
if rd.goal_seek_count < 3:
if not kpi_1.stable or not kpi_2.stable:
rd.minimum_goal_seek_count = 3
if self.test_type == "conns":
goal_seek = True
elif rd.goal_seek_count < rd.minimum_goal_seek_count:
goal_seek = True
elif kpis_and_bool:
if kpi_1.stable and kpi_2.stable:
goal_seek = True
else:
goal_seek = False
else:
if kpi_1.stable or kpi_2.stable:
goal_seek = True
else:
goal_seek = False
if goal_seek:
if self.goal_seek(rd):
# reset rolling count > no load increase until
# at least the window size interval.
# allows stats to stabilize after an increase
rd.rolling_count_since_goal_seek.reset()
else:
log.info(f"control_test end, goal_seek False")
rd.stop = True
def wait_openconn_cps_end(self, cf, rd):
if self.test_type == "conns" and rd.in_load_type == "SimUsers":
while rd.c_tcp_established_conn_rate > 0:
log.info(f"still waiting cps down to 0")
self.update_run_stats(cf, rd)
if rd.sub_status is None:
self.print_test_stats(rd)
self.save_results(rd)
time.sleep(4)
return True
def sustain_test(self, cf, rd):
rd.phase = "steady"
while rd.in_sustain_period > 0:
rd.timer = int(round(time.time() - rd.start_time))
sustain_period_loop_time_start = time.time()
self.update_run_stats(cf, rd)
if rd.time_remaining < 30 and rd.in_goal_seek:
rd.phase = "timeout"
rd.in_sustain_period = 0
log.info(f"sustain_test end, time_remaining < 30")
print(f"sustain period time left: {int(rd.in_sustain_period)}")
# print stats if test is running
if rd.sub_status is None:
self.print_test_stats(rd)
self.save_results(rd)
time.sleep(4)
rd.in_sustain_period = rd.in_sustain_period - (
time.time() - sustain_period_loop_time_start
)
rd.phase = "stopping"
# self.stop_wait_for_finished_status(cf, rd)
return True
def save_results(self, rd):
csv_list = [
rd.in_name,
rd.time_elapsed,
rd.phase,
rd.c_current_load,
rd.c_desired_load,
rd.rolling_count_since_goal_seek.stable,
rd.c_http_successful_txns_sec,
rd.rolling_tps.stable,
rd.rolling_tps.increase_avg,
rd.c_http_successful_txns,
rd.c_http_unsuccessful_txns,
rd.c_http_aborted_txns,
rd.c_transaction_error_percentage,
rd.c_tcp_established_conn_rate,
rd.rolling_cps.stable,
rd.rolling_cps.increase_avg,
rd.c_tcp_established_conns,
rd.rolling_conns.stable,
rd.rolling_conns.increase_avg,
rd.c_tcp_avg_tt_synack,
rd.c_tcp_avg_ttfb,
rd.rolling_ttfb.stable,
rd.rolling_ttfb.increase_avg,
rd.c_url_avg_response_time,
rd.c_tcp_cumulative_established_conns,
rd.c_tcp_cumulative_attempted_conns,
rd.c_total_bandwidth,
rd.rolling_bw.stable,
rd.rolling_bw.increase_avg,
rd.c_rx_bandwidth,
rd.c_tx_bandwidth,
rd.c_total_byte_rate,
rd.c_rx_byte_rate,
rd.c_tx_byte_rate,
rd.c_total_packet_count,
rd.c_rx_packet_count,
rd.c_tx_packet_count,
rd.c_rx_packet_rate,
rd.c_tx_packet_rate,
rd.s_tcp_closed,
rd.s_tcp_closed_reset,
rd.s_tcp_closed_error,
rd.c_simusers_alive,
rd.c_simusers_animating,
rd.c_simusers_blocking,
rd.c_simusers_sleeping,
rd.c_loadspec_avg_cpu,
rd.c_memory_percent_used,
rd.c_memory_packetmem_used,
rd.c_memory_rcv_queue_length,
rd.s_memory_avg_cpu,
rd.s_memory_percent_used,
rd.s_memory_packetmem_used,
rd.s_memory_rcv_queue_length,
rd.type_v1,
rd.type_v2,
rd.in_load_type,
rd.test_id,
rd.id,
rd.time_to_run,
rd.time_to_start,
rd.time_to_activity,
rd.time_to_stop,
script_version,
rd.report_link,
]
self.result_file.append_file(csv_list)
class DetailedCsvReport:
def __init__(self, report_location):
log.debug("Initializing detailed csv result files.")
self.time_stamp = time.strftime("%Y%m%d-%H%M")
log.debug(f"Current time stamp: {self.time_stamp}")
self.report_location_parent = report_location
#self.report_csv_file = report_location / f"{self.time_stamp}_Detailed.csv"
self.columns = [
"test_name",
"seconds",
"state",
"current_load",
"desired_load",
"seek_ready",
"tps",
"tps_stable",
"tps_delta",
"successful_txn",
"unsuccessful_txn",
"aborted_txn",
"txn_error_rate",
"cps",
"cps_stable",
"cps_delta",
"open_conns",
"conns_stable",
"conns_delta",
"tcp_avg_tt_synack",
"tcp_avg_ttfb",
"ttfb_stable",
"ttfb_delta",
"url_response_time",
"total_tcp_established",
"total_tcp_attempted",
"total_bandwidth",
"bw_stable",
"bw_delta",
"rx_bandwidth",
"tx_bandwidth",
"total_byte_rate",
"rx_byte_rate",
"tx_byte_rate",
"total_packet_count",
"rx_packet_count",
"tx_packet_count",
"rx_packet_rate",
"tx_packet_rate",
"tcp_closed",
"tcp_reset",
"tcp_error",
"simusers_alive",
"simusers_animating",
"simusers_blocking",
"simusers_sleeping",
"client_cpu",
"client_mem",
"client_pkt_mem",
"client_rcv_queue",
"server_cpu",
"server_mem",
"server_pkt_mem",
"server_rcv_queue",
"test_type_v1",
"test_type_v2",
"load_type",
"test_id",
"run_id",
"t_run",
"t_start",
"t_tx",
"t_stop",
"version",
"report",
]
def append_columns(self):
"""
Appends the column headers to the detailed report file.
:return: no specific return value.
"""
try:
csv_header = ",".join(map(str, self.columns)) + "\n"
with open(self.report_csv_file, "a") as f:
f.write(csv_header)
except Exception as detailed_exception:
log.error(
f"Exception occurred writing to the detailed report file: \n<{detailed_exception}>\n"
)
log.debug(
f"Successfully appended columns to the detailed report file: {self.report_csv_file}."
)
def append_file(self, csv_list):
"""
Appends the detailed report csv file with csv_line.
:param csv_list: items to be appended as line to the file.
:return: no specific return value.
"""
try:
csv_line = ",".join(map(str, csv_list)) + "\n"
with open(self.report_csv_file, "a") as f:
f.write(csv_line)
except Exception as detailed_exception:
log.error(
f"Exception occurred writing to the detailed report file: \n<{detailed_exception}>\n"
)
def make_report_csv_file(self, new_report_csv_name):
new_report_csv_name = self.report_location / f"{new_report_csv_name}_{self.time_stamp}_Detailed.csv"
print(new_report_csv_name)
if new_report_csv_name.is_file():
return
else:
self.report_csv_file = new_report_csv_name
self.append_columns()
def make_report_dir(self, report_dir_name):
report_dir = self.report_location_parent / report_dir_name
if report_dir.is_dir():
pass
else:
report_dir.mkdir(parents=False, exist_ok=True)
self.report_location = report_dir
class Report:
def __init__(self, report_csv_file, column_order):
self.report_csv_file = report_csv_file
self.col_order = column_order
self.df_base = pd.read_csv(self.report_csv_file)
self.df_steady = self.df_base[self.df_base.state == "steady"].copy()
self.unique_tests = self.df_base["test_name"].unique().tolist()
self.results = []
self.process_results()
self.format_results()
self.df_results = pd.DataFrame(self.results)
self.df_results = self.df_results.reindex(columns=self.col_order)
self.df_filter = pd.DataFrame(self.df_results)
def process_results(self):
for name in self.unique_tests:
d = {}
d["test_name"] = name
# get mean values from steady state
mean_cols = [
"cps",
"tps",
"total_bandwidth",
"open_conns",
"tcp_avg_tt_synack",
"tcp_avg_ttfb",
"url_response_time",
"client_cpu",
"client_pkt_mem",
"client_rcv_queue",
"server_cpu",
"server_pkt_mem",
"server_rcv_queue",
]
for col in mean_cols:
d[col] = self.df_steady.loc[
self.df_steady["test_name"] == name, col
].mean()
# get maximum values for all states
max_cols = [
"successful_txn",
"unsuccessful_txn",
"aborted_txn",
"total_tcp_established",
"total_tcp_attempted",
"seconds",
"current_load",
"t_run",
"t_start",
"t_tx",
"t_stop",
]
for col in max_cols:
d[col] = self.df_base.loc[self.df_base["test_name"] == name, col].max()
max_steady_cols = ["seconds"]
for col in max_steady_cols:
d[col] = self.df_steady.loc[
self.df_steady["test_name"] == name, col
].max()
# checks steady vs. all state max, add _max to column name
max_compare_cols = ["cps", "tps", "total_bandwidth"]
for col in max_compare_cols:
col_name = col + "_max"
d[col_name] = self.df_base.loc[
self.df_base["test_name"] == name, col
].max()
# find current_load and seconds for max tps
d["max_tps_load"] = self.df_base.loc[
self.df_base["tps"] == d["tps_max"], "current_load"
].iloc[0]
d["max_tps_seconds"] = self.df_base.loc[
self.df_base["tps"] == d["tps_max"], "seconds"
].iloc[0]
total_pkt_count_sum = self.df_base.loc[self.df_base["test_name"] == name, "total_packet_count"].sum()
total_byte_rate_sum = self.df_base.loc[self.df_base["test_name"] == name, "total_byte_rate"].sum()
if total_pkt_count_sum > 0:
d["avg_pkt_size"] = int(total_byte_rate_sum / total_pkt_count_sum)
else:
d["avg_pkt_size"] = 0
# get script version from test
d["version"] = self.df_base.loc[self.df_base["test_name"] == name, "version"].iloc[0]
# get report link for current test - changed to take from last row in test
# d["report"] = self.df_base.loc[self.df_base["tps"] == d["tps_max"], "report"].iloc[0]
d["report"] = self.df_base.loc[self.df_base["test_name"] == name, "report"].iloc[-1]
# find min and max tps from steady phase
max_steady_compare = ["tps"]
for col in max_steady_compare:
col_name_min = col + "_stdy_min"
col_name_max = col + "_stdy_max"
col_name_delta = col + "_stdy_delta"
d[col_name_min] = self.df_steady.loc[
self.df_steady["test_name"] == name, col
].min()
d[col_name_max] = self.df_steady.loc[
self.df_steady["test_name"] == name, col
].max()
if d[col_name_min] != 0:
d[col_name_delta] = (
(d[col_name_max] - d[col_name_min]) / d[col_name_min]
) * 100
d[col_name_delta] = round(d[col_name_delta], 3)
else:
d[col_name_delta] = 0
self.results.append(d)
def reset_df_filter(self):
self.df_filter = pd.DataFrame(self.df_results)
def filter_rows_containing(self, test_name_contains):
if test_name_contains is not None:
self.df_filter = self.df_filter[
self.df_filter.test_name.str.contains(test_name_contains)
].copy()
def filter_columns(self, filtered_columns):
if filtered_columns is not None:
self.df_filter.drop(
self.df_filter.columns.difference(filtered_columns), axis=1, inplace=True
)
def format_results(self):
for row_num, row in enumerate(self.results):
for key, value in row.items():
if key in {
"cps",
"tps",
"total_bandwidth",
"open_conns",
"successful_txn",
"unsuccessful_txn",
"aborted_txn",
"avg_pkt_size",
"total_tcp_established",
"total_tcp_attempted",
"tps_stdy_min",
"tps_stdy_max",
"cps_max",
"tps_max",
"total_bandwidth_max",
"max_tps_load",
"client_mem",
"client_pkt_mem",
"client_rcv_queue",
"server_mem",
"server_pkt_mem",
"server_rcv_queue",
"t_run",
"t_start",
"t_tx",
"t_stop",
}:
self.results[row_num][key] = f"{value:,.0f}"
elif key in {
"tcp_avg_ttfb",
"url_response_time",
"tcp_avg_tt_synack",
"client_cpu",
"server_cpu",
}:
self.results[row_num][key] = f"{value:,.1f}"
elif key in {"tps_stdy_delta"}:
self.results[row_num][key] = f"{value:,.2f}"
elif key in {"report"}:
self.results[row_num][key] = f'<a href="{value}">link</a>'
@staticmethod
def style_a():
styles = [
# table properties
dict(
selector=" ",
props=[
("margin", "0"),
("width", "100%"),
("font-family", '"Helvetica", "Arial", sans-serif'),
("border-collapse", "collapse"),
("border", "none"),
("border", "2px solid #ccf"),
# ("min-width", "600px"),
("overflow", "auto"),
("overflow-x", "auto"),
],
),
# header color - optional
dict(
selector="thead",
props=[
("background-color", "SkyBlue"),
("width", "100%")
# ("display", "table") # adds fixed scrollbar
# ("position", "fixed")
],
),
# background shading
dict(
selector="tbody tr:nth-child(even)",
props=[("background-color", "#fff")],
),
dict(
selector="tbody tr:nth-child(odd)", props=[("background-color", "#eee")]
),
# cell spacing
dict(selector="td", props=[("padding", ".5em")]),
# header cell properties
dict(
selector="th",
props=[
("font-size", "100%"),
("text-align", "center"),
("min-width", "25px"),
("max-width", "50px"),
("word-wrap", "break-word"),
],
),
# render hover last to override background-color
dict(selector="tbody tr:hover", props=[("background-color", "SkyBlue")]),
]
return styles
def html_table(self, selected_style):
# Style
props = {
"test_name": {"width": "20em", "min-width": "14em", "text-align": "left"},
"cps": {"width": "6em", "min-width": "5em", "text-align": "right"},
"tps": {"width": "6em", "min-width": "5em", "text-align": "right"},
"cps_max": {"width": "6em", "min-width": "5em", "text-align": "right"},
"tps_max": {"width": "6em", "min-width": "5em", "text-align": "right"},
"total_bandwidth": {
"width": "8em",
"min-width": "7em",
"text-align": "right",
},
"total_bandwidth_max": {
"width": "8em",
"min-width": "7em",
"text-align": "right",
},
"open_conns": {"width": "8em", "min-width": "7em", "text-align": "right"},
"tcp_avg_tt_synack": {
"width": "3.7em",
"min-width": "3.7em",
"text-align": "right",
},
"tcp_avg_ttfb": {
"width": "3.7em",
"min-width": "3.7em",
"text-align": "right",
},
"avg_pkt_size": {
"width": "7em",
"min-width": "6em",
"text-align": "right",
},
"url_response_time": {
"width": "3.7em",
"min-width": "3.7em",
"text-align": "right",
},
"report": {"width": "3.7em", "min-width": "3.7em", "text-align": "right"},
"successful_txn": {
"width": "8em",
"min-width": "7em",
"text-align": "right",
},
"total_tcp_established": {
"width": "5em",
"min-width": "5em",
"text-align": "right",
},
"total_tcp_attempted": {
"width": "5em",
"min-width": "5em",
"text-align": "right",
},
"seconds": {"width": "3.7em", "min-width": "3.7em", "text-align": "right"},
"tps_stdy_min": {"width": "3.2em", "min-width": "3.2em", "text-align": "right"},
"tps_stdy_max": {"width": "3.2em", "min-width": "3.2em", "text-align": "right"},
"tps_stdy_delta": {
"width": "3.2em",
"min-width": "3.2em",
"text-align": "right",
},
"client_cpu": {"width": "3em", "min-width": "3em", "text-align": "right"},
"server_cpu": {"width": "3em", "min-width": "3em", "text-align": "right"},
"client_pkt_mem": {
"width": "3.5em",
"min-width": "3.5em",
"text-align": "right",
},
"client_rcv_queue": {
"width": "3.5em",
"min-width": "3.5em",
"text-align": "right",
},
"server_pkt_mem": {
"width": "3.9em",
"min-width": "3.9em",
"text-align": "right",
},
"server_rcv_queue": {
"width": "3.9em",
"min-width": "3.9em",
"text-align": "right",
},
"current_load": {
"width": "3.7em",
"min-width": "3.7em",
"text-align": "right",
},
"unsuccessful_txn": {
"width": "3.8em",
"min-width": "3.8em",
"text-align": "right",
},
"aborted_txn": {
"width": "3.5em",
"min-width": "3.5em",
"text-align": "right",
},
"max_tps_seconds": {
"width": "3.7em",
"min-width": "3.7em",
"text-align": "right",
},
"max_tps_load": {
"width": "3.7em",
"min-width": "3.7em",
"text-align": "right",
},
"t_run": {"width": "3em", "min-width": "3.7em", "text-align": "right"},
"t_start": {"width": "3em", "min-width": "3em", "text-align": "right"},
"t_tx": {"width": "3em", "min-width": "3em", "text-align": "right"},
"t_stop": {"width": "3em", "min-width": "3em", "text-align": "right"},
"version": {"width": "3em", "min-width": "3em", "text-align": "right"},
}
# html = ''
all_columns = set(self.df_filter.columns)
html = self.df_filter.style.set_properties(
subset="test_name", **props["test_name"]
)
for k, v in props.items():
if k in all_columns:
html = html.set_properties(subset=k, **v)
try:
html = html.set_table_styles(selected_style).hide_index().render()
except AttributeError:
html = html.set_table_styles(selected_style).hide(axis=0).to_html()
return html
| true
|
5ec5a4bc6491d2c8a251413bbd10bf7801777cfd
|
Python
|
barcern/python-crash-course
|
/chapter4/4-12_more_loops.py
|
UTF-8
| 534
| 4.15625
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 09:10:41 2020
@author: barbora
"""
# Choose a version of foods.py from the book and print using for loops.
# Using foods.py from the book
my_foods = ['pizza', 'falafel', 'carrot cake']
friend_foods = my_foods[:]
my_foods.append('cannoli')
friend_foods.append('ice-cream')
# Print foods using for loops
print("My favourite foods are:")
for food in my_foods:
print(food.title())
print("\nMy friend's favourite foods are:")
for food in friend_foods:
print(food.title())
| true
|
6742277e91826ea4a7b2be9957a372c18730623d
|
Python
|
NathanZorndorf/cheat-sheets
|
/Modules/Numpy/Numpy Cheat Sheet.py
|
UTF-8
| 333
| 3.34375
| 3
|
[] |
no_license
|
#----------------------- NUMPY CHEAT SHEET -------------------------#
import numpy as np
#--------- 2D ----------#
# Create 2D array
a = np.array([[1,2,3],
[4,5,6]])
# index to reference entire first row
a[0] # output => array([1, 2, 3])
a[1, -2:] # output => index to reference last two column values in second row
| true
|
afed8508ee701b8b414e91d4a6428fbb9eeac579
|
Python
|
tachyon77/flat-resnet
|
/dataset.py
|
UTF-8
| 1,564
| 2.75
| 3
|
[] |
no_license
|
"""Dataset class.
Author:
Mohammad Mahbubuzzaman (tachyon77@gmail.com)
"""
import logging
import torch.nn.functional as F
import torch.utils.data as data
from torch.utils.data import dataset
import torch
import ujson as json
from collections import Counter
class ImageDataset(data.Dataset):
"""Random Dataset.
The dataset is a tensor with first dimension as N and remaining as image dimension.
Args:
data_path (str): Path to .npz file containing dataset.
"""
def __init__(self, data_path):
super(ImageDataset, self).__init__()
self.dataset = torch.load(data_path)
def __getitem__(self, idx):
return self.dataset[idx]
def __len__(self):
return self.dataset.shape[0] # Since first dimension is the N
class ResnetOutputDataset(data.Dataset):
"""Random Dataset.
The dataset is a tensor with first dimension as N and remaining as image dimension.
Args:
data_path (str): Path to .npz file containing dataset.
"""
def __init__(
self,
input_data_path='/home/mahbub/research/flat-resnet/random_images.npz',
output_data_path='/home/mahbub/research/flat-resnet/10000_random_chw_tensors.npz'):
super(ResnetOutputDataset, self).__init__()
self.input_tensors = torch.load(input_data_path)
self.output_tensors = torch.load(output_data_path)
def __getitem__(self, idx):
return (self.input_tensors[idx], self.output_tensors[idx])
def __len__(self):
return self.input_tensors.shape[0] # Since first dimension is the N
| true
|
44ce1b0e8ce2e634d6bb04690b3d3b4f6d9a36f9
|
Python
|
artificialsoph/py_prac
|
/tests/test_pymat.py
|
UTF-8
| 2,101
| 2.78125
| 3
|
[] |
no_license
|
from click.testing import CliRunner
from pymatrix import main
def assert_sub_out(command, output):
runner = CliRunner()
result = runner.invoke(main, command)
assert result.exit_code == 0
assert result.output == output
def test_import():
"""
test all the input types using the sample data
"""
mat_out = '[[1 2 3]\n [2 3 4]\n [1 1 1]]\n'
input_list = [
['-f', 'data/sample.csv'],
['--csv-file', 'data/sample.csv'],
['-p', 'data/sample.p'],
['--pickle-file', 'data/sample.p'],
['-s', 'data/sample.coo'],
['--sparse-coo', 'data/sample.coo'],
['-j', 'data/sample.json'],
['--json-data', 'data/sample.json'],
]
for in_pair in input_list:
assert_sub_out(["print_mat"] + in_pair, mat_out)
def test_echo_json():
assert_sub_out(["echo", '-j', 'data/sample.json', '2'],
("\nThe given input was of type: json_data\n"
"And the value was: data/sample.json\n"
"\nThe given input was of type: json_data\n"
"And the value was: data/sample.json\n\n"))
def test_closest_to():
assert_sub_out(["closest_to", '-j', "data/test.json", '8'], "1 8\n")
assert_sub_out(["closest_to", '-j', "data/test.json", '8',
"--distance"],
"1 8 15.8113883008\n")
def test_closest():
assert_sub_out(["closest", '-j', "data/test.json", '2'],
"6 7\n5 6\n")
assert_sub_out(["closest", '-j', "data/test.json", '2', '--distance'],
"6 7 2.82842712475\n5 6 10.4403065089\n")
def test_furthest():
assert_sub_out(["furthest", '-j', "data/test.json", '3'],
"7 8\n6 8\n5 8\n")
assert_sub_out(["furthest", '-j', "data/test.json", '3', '--distance'],
"7 8 117.796434581\n6 8 114.978258814\n5 8 106.40018797\n")
def test_centroids():
assert_sub_out(["centroids", '-j', "data/test.json", '3'],
"88.6666666667 18.6666666667\n46.25 37.5\n"
"11.3333333333 81.0\n")
| true
|
80f9be7b604809dc0f596b085c578fcf74277755
|
Python
|
Dennysro/Python_DROP
|
/9.0_List_Comprehension.py
|
UTF-8
| 1,448
| 4.46875
| 4
|
[] |
no_license
|
"""
List Comprehension:
- Utilizando list comprehension nós podemos gerar novas listas com dados processados
a partir de outro iterável.
# Sintaxe da List Comprehension
[dado for dado in iterável]
# Exemplos:
# 1
numeros = [1, 2, 3, 4, 5]
res = [numero*10 for numero in numeros]
print(res)
# Para entender o que o Python faz, vamos dividir a expressão em 2 partes:
# 1- A primeira: for numero in numeros
# 2- A segunda: numero*10
# 2
def funcao(valor):
return valor * valor
res = [funcao(numero) for numero in numeros]
print(res)
"""
# List comprehensions versus Loop
# Loop
numeros = [1, 2, 3, 4, 5]
numeros_dobrados = []
for numero in numeros:
numero_dobrado = numero*2
numeros_dobrados.append(numero_dobrado)
print(numeros)
print(numeros_dobrados)
# List Comprehension
print([numero*2 for numero in numeros])
# ou
print([numero*2 for numero in [1, 2, 3, 4, 5]])
# Outros Exemplos:
# 1
nome = 'Drop The Beat'
print([letra.upper() for letra in nome])
# 2
def caixa_alta(nome):
nome = nome.replace(nome[0], nome[0].upper())
return nome
amigos = ['maria', 'julia', 'pedro', 'guilherme', 'vanessa']
print([caixa_alta(amigo) for amigo in amigos])
# 3
print([numero*3 for numero in range(1, 10)])
# 4
print([bool(valor) for valor in [0, [], '', True, 1, 3.14]])
# 5
print([str(numero) for numero in [1, 2, 3, 4, 5]])
| true
|
abb619392a2595a6398247604ba9aec573884e2c
|
Python
|
Erich6917/python_littlespider
|
/demo/beautiful/youku/YoukuDemo.py
|
UTF-8
| 2,087
| 2.625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Time : 2018/3/26
# @Author : LIYUAN134
# @File : YoukuDemo.py
# @Commment:
#
import urllib, urllib2, sys, os
from bs4 import BeautifulSoup
import itertools, re
url_i = 1
pic_num = 1
# 自己定义的引号格式转换函数
def _en_to_cn(str):
obj = itertools.cycle(['“', '”'])
_obj = lambda x: obj.next()
return re.sub(r"['\"]", _obj, str)
def start():
# 下载连续3个网页的视频
url = 'http://v.youku.com/v_show/id_XMTI5NjE4MTk2OA==.html?spm=a2h0k.8191407.0.0'
webContent = urllib2.urlopen(url)
data = webContent.read()
# 利用BeautifulSoup读取视频列表网页数据
soup = BeautifulSoup(data)
print "-------------------------Page " + str(url_i) + "-------------------------"
# 获得相应页面的视频thumbnail和title的list
tag_list_thumb = soup.findAll('li', 'v_thumb')
tag_list = soup.findAll('li', "v_title")
for item in tag_list:
# 通过每个thumbnail中的herf导向视频播放页面
web_video_play = urllib2.urlopen(item.a['href'])
data_vp = web_video_play.read()
# 利用BeautifulSoup读取视频播放网页数据
soup_vp = BeautifulSoup(data_vp)
# 找到“下载”对应的链接
tag_vp_list = soup_vp.findAll('a', id='fn_download')
for item_vp in tag_vp_list:
# 将下载链接保存到url_dw中
url_dw = '"' + item_vp['_href'] + '"'
print item.a['title'] + ": " + url_dw
# 调用命令行运行iku下载视频,需将iku加入环境变量
os.system("iku " + url_dw)
# 保存每个视频的thumbnail
for item_thumb in tag_list_thumb:
urllib.urlretrieve(item_thumb.img['src'], "c:\\ZDownload\\thumbnails\\" + str(pic_num) + "." +
_en_to_cn(item_thumb.img['title']) + ".jpg")
print "--------------------------------------------------------------"
print "--------Page " + str(url_i) + "'s video thumbnails have been saved!"
if __name__ == '__main__':
start()
| true
|
0a8d01f90e6011b865ae553be8b3c79c0d8abe51
|
Python
|
sourcepirate/tailow
|
/tailow/fields/reference.py
|
UTF-8
| 941
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
""" reference field """
from bson.objectid import ObjectId
from .base import BaseField
class ReferenceField(BaseField):
""" Reference field property """
def __init__(self, kls, *args, **kwargs):
self.kls = kls
self._is_reference = True
self._partialy_loaded = kwargs.pop("_is_partialy_loaded", False)
super(ReferenceField, self).__init__(*args, **kwargs)
def validate(self, value):
""" validate if it is a valid field """
from tailow.document import Document
if isinstance(value, (self.kls, Document)):
return True
return False
def to_son(self, value):
if value is None:
return None
if isinstance(value, ObjectId):
return value
return value._id if hasattr(value, "_id") else value.id
def from_son(self, value):
val = self.kls(id=value, _is_partialy_loaded=True)
return val
| true
|
8dfdebe0945f30602b50053435da8871f5e0fc89
|
Python
|
SoufianLabed/serverUDP-TCP
|
/tcpclient.py
|
UTF-8
| 1,266
| 3.09375
| 3
|
[] |
no_license
|
import socket
import sys
class tcpclient:
def __init__(self):
self.PORT=int(sys.argv[1]) # initialise le port et l'IP (broadcast)
self.IP = sys.argv[2]
self.sock = socket.socket(socket.AF_INET, # création du socket qui communique entre le server et le client
socket.SOCK_STREAM)
def sendMessage(self):
self.sock.connect((self.IP,self.PORT)) #Demande au server de se connecter
self.sock.sendto(bytes(sys.argv[3], "utf-8"), (self.IP, self.PORT)) #envoie le message(récuperer par sys.argv[3]) à l'IP et par le port informé
def receiveMessage(self):
rep = True
while rep == True:
try:
self.sock.settimeout(5)
data, addr = self.sock.recvfrom(1024) # Récupération du retour du serveur
print("\"ok : "+ str(data.decode("utf-8"+'')) + "\"") # Affichage dans le terminal
self.sock.close()
except Exception as e:
rep = False
clienttest=tcpclient()
clienttest.sendMessage()
clienttest.receiveMessage()
| true
|
dfe7602410da308d1ad8da255c62fd9932d1a00d
|
Python
|
wangfin/QAsystem
|
/QAManagement/question_generalization/similarity_test.py
|
UTF-8
| 672
| 2.75
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Time : 2018/8/15 15:25
# @Author : wb
# @File : similarity_test.py
# 相似度计算的测试
from QAManagement.question_generalization.similarity import Similarity
import time
similarity = Similarity()
question = '入党 积极分子 的 培养 联系人 什么时候 确定 ?'
model_list = ['入党 积极分子 的 培养 联系人 什么时候 确定 ?','护卫队 到底 什么时候 在 中国 上映 ?','鹅 为什么 看见 人 会 攻击 ?','鬼谷子 和 诸葛亮 到底 谁 厉害 ?','确定 为 入党 积极分子 的 时间 是 什么时候 ?']
result = similarity.main(question,model_list)
print(result)
| true
|
fddb339d9106262b651186884a85cfe73ddd71ae
|
Python
|
Gi1ia/TechNoteBook
|
/Algorithm/051_N_Queens.py
|
UTF-8
| 1,754
| 3.5
| 4
|
[] |
no_license
|
class Solution:
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
if not n or n < 1:
return [[]]
arrangements = []
self.dfs([], arrangements, n)
summary = []
# draw summary of boards
for arrangement in arrangements:
board = []
for x, y in enumerate(arrangement): # len(arrangement) == n
temp = "." * n
location = temp[:y] + 'Q' + temp[y + 1:]
board.append(location)
summary.append(board)
return summary
def dfs(self, current_rows, res, n):
"""
:type current_rows: List[int]
index == row of Q
key == col of Q
e.g. [4, 2] means we put two queues already,
they are in [0, 4], [1, 2]
:type res: Result of arrangement
:return: List[List[int]]
"""
if len(current_rows) == n: # queue has been put to the last row
res.append(current_rows[:])
return
# Trying to put Q in new row
for col in range(n):
# row, col, currentResult
if self.valid(len(current_rows), col, current_rows):
current_rows.append(col)
self.dfs(current_rows, res, n)
current_rows.pop()
return res
def valid(self, x, y, arrangement):
for row, col in enumerate(arrangement):
# Valid vertical
if y == col:
return False
# Valid diagonal
if row + col == x + y or row - col == x - y:
return False
return True
| true
|
625d2a1e13ae6c7ddadcdb587d55968babdd6a29
|
Python
|
LeoTheMighty/beginner_python_exercises
|
/ReverseWordOrder.py
|
UTF-8
| 169
| 3.890625
| 4
|
[] |
no_license
|
sentence = input("Give me a cool sentence and I'll reverse it...\n")
sent_list = sentence.split(" ")
reverse_sent = reversed(sent_list)
print(" ".join(reverse_sent))
| true
|
15295d523b8ecde68a6293610bc196a03d445e0c
|
Python
|
sajidamohammad/hackerearths
|
/sherlocknnum.py
|
UTF-8
| 289
| 2.515625
| 3
|
[] |
no_license
|
test=int(raw_input())
for i in range(test):
nkp=map(int,raw_input().split(" "))
remove=map(int, raw_input().split(" "))
final=range(1,6)-remove
print sorted(final)[]
''' out.append(int(raw_input()))
for j in range(len(out)):
if out[j] in arr:
print "YES"
else:
print "NO"'''
| true
|
056b5583ef5619d9ef8cca646374281f08ee14a3
|
Python
|
gschen/sctu-ds-2020
|
/1906101074-王泓苏/Day0303/test07.py
|
UTF-8
| 315
| 3.28125
| 3
|
[] |
no_license
|
class Test01():
def __init__(self):
self.t1='我是父类'
def f(self):
return '爸爸'
class Test02():
def __init__(self):
self.t2='我是子类'
def f(self,object):
print(object.f())
a=Test01()
def main(object):
print(object.f())
return '123'
print(main(a))
| true
|
234f9d0be069bd885e1b1e25db82bd2eb4e0e97e
|
Python
|
EliasFarhan/CompNet
|
/rdt/rdt21.py
|
UTF-8
| 2,839
| 2.765625
| 3
|
[] |
no_license
|
from rdt.base import *
from rdt.rdt20 import ChannelRdt20
class SenderRdt21(Sender):
last_packet = ""
sequence_nmb = 1
msg_lock = threading.Lock()
def send_data(self, data, resend=False):
if not resend:
self.msg_lock.acquire()
self.last_packet = data
text_data = data.encode()
packet = bytearray(len(text_data) + 2)
packet[1] = self.sequence_nmb.to_bytes(8, byteorder='little')[0]
check_sum = 0
for byte in text_data:
check_sum += byte
check_sum += packet[1]
packet[0] = check_sum.to_bytes(8, byteorder="little")[0]
packet[2:len(text_data) + 2] = text_data
self.channel.send_msg(packet)
def receive_response(self, response):
check_sum = 0
for byte in response[0:2]:
check_sum += byte
if check_sum.to_bytes(8, byteorder='little')[0] != response[3]:
print("[Error] Bad response checksum : need to send the last packet again: "+self.last_packet)
self.send_data(self.last_packet, resend=True)
return
if b"ACK" in response:
print("[ACK] Packet went well")
self.sequence_nmb += 1
self.msg_lock.release()
elif b"NAK" in response:
print("[NAK] Need to send packet again")
self.send_data(self.last_packet, resend=True)
else:
print("[Error] Bad response : need to send the last packet again")
self.send_data(self.last_packet, resend=True)
class ReceiverRdt21(Receiver):
sequence_number = 0
def receive_data(self, data):
check_sum = data[0]
sequence_nmb = data[1]
text_data = data[2:]
byte_sum = 0
response = bytearray(4)
for byte in text_data:
byte_sum += byte
byte_sum += sequence_nmb
if byte_sum.to_bytes(8, byteorder="little")[0] == check_sum:
if self.sequence_number != sequence_nmb:
super().receive_data(text_data)
self.sequence_number = sequence_nmb
response[0:2] = b"ACK"
byte_sum = 0
for byte in response[0:2]:
byte_sum += byte
response[3] = byte_sum.to_bytes(8, byteorder='little')[0]
self.send_response(response)
else:
response[0:2] = b"NAK"
byte_sum = 0
for byte in response[0:2]:
byte_sum += byte
response[3] = byte_sum.to_bytes(8, byteorder='little')[0]
self.send_response(response)
def send_response(self, response):
super().send_response(response)
def main():
sim = Simulation(sender=SenderRdt21(), channel=ChannelRdt20(), receiver=ReceiverRdt21())
sim.simulate()
if __name__ == "__main__":
main()
| true
|
9ecc4638f0729aa433de14f071ad253d523109e3
|
Python
|
aydan08/Python-Kursu-15.02.21
|
/HAFTA-3/DERS-7/FOR_DÖNGÜSÜ/for_2.py
|
UTF-8
| 359
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#1 den 100 e kadar olan tek sayıların toplamı
toplam = 0
for tek_sayi in range(1,100,2): #(1 DAHİLDİR,100 DAHİL DEĞİLDİR,+2 ARTIŞ MİKTARIDIR)
toplam = toplam + tek_sayi
else:
print("...Döngü Bitti...")
print("Tek Sayıların Toplamı: ", toplam) #yerine alttaki de kullanılıyorr
print(f"Tek Sayıların Toplamı: {toplam}")
| true
|
110efd1c0795ca3ac85e696270fb8a487df37694
|
Python
|
skvrd/leetcode.py
|
/problems/1267/solution.py
|
UTF-8
| 534
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
from typing import List
class Solution:
def countServers(self, grid: List[List[int]]) -> int:
connected = 0
n = [0] * len(grid)
m = [0] * len(grid[0])
for i in range(len(n)):
for j in range(len(m)):
if grid[i][j] == 1:
n[i] += 1
m[j] += 1
for i in range(len(n)):
for j in range(len(m)):
if grid[i][j] == 1 and (n[i] > 1 or m[j] > 1):
connected += 1
return connected
| true
|
c5c5fdde707822de21042076b397011c51543709
|
Python
|
bioless/Xenocrates
|
/xenocrates-update-2018.py
|
UTF-8
| 9,057
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sqlite3 as sql
import sys
import operator
import re
import cgi
import time
import csv
from collections import OrderedDict
#Global Variables
tablename = "SANS575_index"
index = []
#Reads the CVS file into Index List
filename = sys.argv[1]
with open(filename, 'rU') as f:
reader = csv.DictReader(f)
for row in reader:
try:
t = row['Title']
index.append([cgi.escape(row['Title'].upper()), row['Book'], row['Page'], row['Description']])
except:
pass
#Sorts Index
index = sorted(index, key=operator.itemgetter(0))
#Prints Index
pos = 0
for item in index:
key = item[0].strip('"').rstrip('"')
#Create Section Header
if key.startswith("A") or key.startswith("a"):
if pos != 1:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Aa</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 1
elif key.startswith("B") or key.startswith("b"):
if pos != 2:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Bb</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 2
elif key.startswith("C") or key.startswith("c"):
if pos != 3:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Cc</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 3
elif key.startswith("D") or key.startswith("d"):
if pos != 4:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Dd</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 4
elif key.startswith("E") or key.startswith("e"):
if pos != 5:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Ee</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 5
elif key.startswith("F") or key.startswith("f"):
if pos != 6:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Ff</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 6
elif key.startswith("G") or key.startswith("g"):
if pos != 7:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Gg</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 7
elif key.startswith("H") or key.startswith("h"):
if pos != 8:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Hh</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 8
elif key.startswith("I") or key.startswith("i"):
if pos != 9:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Ii</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 9
elif key.startswith("J") or key.startswith("j"):
if pos != 10:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Jj</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 10
elif key.startswith("K") or key.startswith("k"):
if pos != 11:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Kk</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 11
elif key.startswith("L") or key.startswith("l"):
if pos != 12:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Ll</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 12
elif key.startswith("M") or key.startswith("m"):
if pos != 13:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Mm</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 13
elif key.startswith("N") or key.startswith("n"):
if pos != 14:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Nn</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 14
elif key.startswith("O") or key.startswith("o"):
if pos != 15:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Oo</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 15
elif key.startswith("P") or key.startswith("p"):
if pos != 16:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Pp</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 16
elif key.startswith("Q") or key.startswith("q"):
if pos != 17:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Qq</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 17
elif key.startswith("R") or key.startswith("r"):
if pos != 18:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Rr</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 18
elif key.startswith("S") or key.startswith("s"):
if pos != 19:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Ss</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 19
elif key.startswith("T") or key.startswith("t"):
if pos != 20:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Tt</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 20
elif key.startswith("U") or key.startswith("u"):
if pos != 21:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Uu</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 21
elif key.startswith("V") or key.startswith("v"):
if pos != 22:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Vv</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 22
elif key.startswith("W") or key.startswith("w"):
if pos != 23:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Ww</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 23
elif key.startswith("X") or key.startswith("x"):
if pos != 24:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Xx</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 24
elif key.startswith("Y") or key.startswith("y"):
if pos != 25:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Yy</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 25
elif key.startswith("Z") or key.startswith("z"):
if pos != 26:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Zz</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 26
else:
if pos != 27:
print "<span class=Title1><b><span style='font-size:45.0pt;line-height:107%;color:black'>Numbers & Special Characters</span></b></span><span style='font-size:13.5pt;line-height:107%;color:black'><br><br></span>"
pos = 27
#Print Details
if item[0] != "":
print "<span class=topic><b><span style='color:blue'>"
print " %s " % (item[0])
print "</span></b></span><span style='color:black'> "
print "<br><i>{b-%s / p-%s}</i><br>%s<br></span>" % (item[1], item[2], item[3])
| true
|
5afaa636bb63cf51d4ccb115808547b30adb6ab2
|
Python
|
wdan/territories
|
/territories/models/dblp_io.py
|
UTF-8
| 9,183
| 2.59375
| 3
|
[] |
no_license
|
__author__ = 'wenbin'
import igraph as ig
import scipy.io as sio
class DBLP(object):
def __init__(self, venue_id_list, fileName, type='paper', path='territories/data/dblp/'):
venue_id_list.sort()
self.venue_id_list = venue_id_list
self.path = path
self.file_name = fileName+'_'+type
self.venue_id_string = ''
if type == 'paper':
for i in self.venue_id_list:
self.venue_id_string += str(i) + '_'
[isFound, graph_str] = self.check_dick()
if isFound:
self.g = self.read_graph_paper(graph_str)
else:
self.g = self.generate_graph_paper()
else:
for i in self.venue_id_list:
self.venue_id_string += str(i) + '*'
[isFound, graph_str] = self.check_dick()
if isFound:
self.g = self.read_graph_author(graph_str)
else:
self.g = self.generate_graph_author()
# self.venue_name = self.read_venue_file()
# print self.venue_name
def read_graph_author(self, graph_str):
try:
graph_file = open(self.path+graph_str+'.txt')
except IOError:
print 'The file '+self.path+graph_str+'.txt'+' does not exist!'
return None
g = ig.read(self.path+graph_str+'.net')
for i, r in enumerate(graph_file):
if i%2 == 0:
lines = r.split('\t')
id = int(lines[0])
label = str(lines[1]).rstrip('\n')
g.vs[id]['label'] = label
else:
g.vs[id]['class'] = {}
lines = r.split('\t')
n = len(lines)
for j in xrange(n/2):
g.vs[id]['class'][str(lines[j*2])] = int(lines[j*2+1])
graph_file.close()
return g
# def read_venue_file(self):
# try:
# venue_file = open(self.path+self.file_name+'_venue.txt')
# except IOError:
# print 'The file '+self.path+self.file_name+'_venue.txt'+' does not exist!'
# return None
#
# r = {}
# for line in venue_file:
# words = line.split('\t')
# id = int(words[0])
# name = str(words[1]).rstrip('\n')
# r[id] = name
#
# venue_file.close()
# return r
# def write_venue_file(self, data):
# venue_file = open(self.path+self.file_name+'_venue.txt', 'w')
# for id in self.venue_id_list:
# venue_file.write(str(id)+'\t'+str(data[id-1][0][0])+'\n')
# venue_file.close()
def generate_graph_author(self):
# load dblp.mat
data = sio.loadmat(self.path+'dblp.mat')
paper_venue = data['paper_venue']
paper_author_csr = data['paper_author'].tocsr()
author_name = data['author_name']
venue_name = data['venue_name']
# self.write_venue_file(venue_name)
# venueID ---> paper list(paper ID)
paper_list = {}
for venue in self.venue_id_list:
paper_list[venue] = []
n = paper_venue.size
for i in xrange(n):
v = paper_venue[i][0]
if v in paper_list:
paper_list[v].append(i)
# venueID ---> author list(author ID)
author_list = {}
# dicts for author ID <--> graph index
author_id_dict = {}
id_author_dict = {}
count = 0
edge_dict = {}
class_dict = {}
for venue, papers in paper_list.items():
for paper_id in papers:
cols = paper_author_csr.getrow(paper_id).nonzero()[1]
for author_id in cols:
author_id = int(author_id)
if author_id not in author_id_dict:
class_dict[author_id] = {}
author_id_dict[author_id] = count
id_author_dict[count] = author_id
count += 1
if venue not in class_dict[author_id]:
class_dict[author_id][venue] = 1
else:
class_dict[author_id][venue] += 1
edge_list = [(i, j) for i in cols for j in cols if i > j]
for e in edge_list:
if e in edge_dict:
edge_dict[e] += 1
else:
edge_dict[e] = 1
g = ig.Graph()
g.add_vertices(count)
graph_file = open(self.path+self.file_name+'.txt', 'w')
for v in g.vs:
label = str(author_name[id_author_dict[v.index]][0][0])
v['label'] = label
v['class'] = {}
graph_file.write(str(v.index)+'\t'+label+'\n')
for venue, count in class_dict[id_author_dict[v.index]].items():
v['class'][str(venue_name[venue-1][0][0])] = count
graph_file.write(str(venue_name[venue-1][0][0]) + '\t' + str(count) + '\t')
graph_file.write('\n')
graph_file.close()
edge_list = map(lambda e: (author_id_dict[e[0]], author_id_dict[e[1]]), edge_dict.keys())
g.add_edges(edge_list)
self.write_dick()
g.simplify(loops=False)
g.write_pajek(self.path + self.file_name + '.net')
return g
def generate_graph_paper(self):
# load dblp.mat
data = sio.loadmat(self.path+'dblp.mat')
paper_venue = data['paper_venue']
paper_author = data['paper_author']
paper_name = data['paper_name']
paper_author_csr = paper_author.tocsr()
author_paper_csc = paper_author.tocsc()
venue_name = data['venue_name']
# self.write_venue_file(venue_name)
# parse node
paper_list = {}
for venue in self.venue_id_list:
paper_list[venue] = []
n = paper_venue.size
paper_id_dict = {}
id_paper_dict = {}
paper_venue_dict = {}
count = 0
for i in xrange(n):
v = paper_venue[i][0]
if v in paper_list:
paper_id_dict[i] = count
id_paper_dict[count] = i
paper_venue_dict[count] = int(v)
count += 1
paper_list[int(v)].append(i)
g = ig.Graph()
g.add_vertices(len(paper_id_dict))
graph_file = open(self.path+self.file_name+'.txt', 'w')
for v in g.vs:
cluster = str(venue_name[paper_venue_dict[v.index]-1][0][0])
label = str(paper_name[id_paper_dict[v.index]][0][0])
v['class'] = cluster
v['label'] = label
graph_file.write(str(v.index)+'\t'+str(cluster)+'\t'+label+'\n')
graph_file.close()
self.write_dick()
author_list = set()
for paper_id in paper_id_dict.keys():
cols = paper_author_csr.getrow(paper_id).nonzero()[1]
for i in cols:
author_list.add(i)
author_list = list(author_list)
for author_id in author_list:
rows = author_paper_csc.getcol(author_id).nonzero()[0]
l = [(paper_id_dict[i], paper_id_dict[j]) for i in rows for j in rows if i>j and i in paper_id_dict and j in paper_id_dict]
if len(l) > 0:
g.add_edges(l)
g.simplify(loops=False)
g.write_pajek(self.path + self.file_name + '.net')
return g
def write_dick(self):
with open(self.path+'dict.txt', 'a') as dict_file:
dict_file.write(self.venue_id_string+'\t'+self.file_name+'\n')
def read_graph_paper(self, graph_str):
try:
graph_file = open(self.path+graph_str+'.txt')
except IOError:
print 'The file '+self.path+graph_str+'.txt'+' does not exist!'
return None
g = ig.read(self.path+graph_str+'.net')
for r in graph_file:
lines = r.split('\t')
id = int(lines[0])
cluster = str(lines[1])
label = str(lines[2]).rstrip('\n')
g.vs[id]['class'] = cluster
g.vs[id]['label'] = label
graph_file.close()
return g
def check_dick(self):
try:
dict_file = open(self.path + 'dict.txt', 'r')
except IOError:
dict_file = open(self.path + 'dict.txt', 'w')
dict_file.close()
return [False,'']
isFound = False
graph_str = ''
for r in dict_file:
line = r.split('\t')
str_list = line[0]
graph_str = line[1].rstrip('\n')
if self.venue_id_string == str_list or graph_str == self.file_name:
isFound = True
break
dict_file.close()
return [isFound, graph_str]
if __name__ == '__main__':
vis5 = DBLP([2308, 1984, 1512, 3078, 2960], 'vis_5', 'paper')
print vis5.g.ecount()
print vis5.g.vcount()
print vis5.g.vs[0]
os5 =DBLP([2308, 1984, 1512, 3078, 2960], 'os_5', 'author')
print os5.g.ecount()
print os5.g.vcount()
print os5.g.vs[0]
| true
|
27df8c7d87eea29d1cfc381b72c896df1217a0cf
|
Python
|
olgashemagina/Milk_Data
|
/crossval.py
|
UTF-8
| 1,322
| 2.53125
| 3
|
[] |
no_license
|
import os
import random
import sys
import shutil
path = sys.argv[1]
enroll_part = 0.3 #sys.argv[2]
classes_part = 0.5
os.chdir(path)
if os.path.exists(os.path.join(path, 'database')):
shutil.rmtree(os.path.join(path, 'database'))
os.mkdir(os.path.join(path, 'database'))
os.mkdir(os.path.join(path, 'database','enroll'))
os.mkdir(os.path.join(path, 'database','classes'))
os.mkdir(os.path.join(path, 'database','4recognition'))
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
if 'database' not in r:
if len(f) != 0:
os.mkdir(os.path.join(path, 'database','classes', os.path.basename(r)))
files = []
for file in f:
if '.csv' in file:
files.append(os.path.join(r, file))
c = len(files)
random.shuffle(files)
enroll_len = int(enroll_part*c)
classes_len = int(classes_part*c)
for index in range(enroll_len):
shutil.copy(files[index], os.path.join(path, 'database','enroll'))
for index in range(enroll_len,enroll_len+classes_len):
shutil.copy(files[index], os.path.join(path, 'database','classes', os.path.basename(r)))
for index in range(enroll_len+classes_len, c):
shutil.copy(files[index], os.path.join(path, 'database','4recognition'))
| true
|
17acc2ff8cc6b0a7298b9b5af834415597b2e550
|
Python
|
dumbman/epicf
|
/SpatialMesh.py
|
UTF-8
| 14,128
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
import sys
from math import ceil
import numpy as np
from Vec3d import Vec3d
class SpatialMesh():
def __init__(self):
self.x_volume_size = None
self.y_volume_size = None
self.z_volume_size = None
self.x_cell_size = None
self.y_cell_size = None
self.z_cell_size = None
self.x_n_nodes = None
self.y_n_nodes = None
self.z_n_nodes = None
self.node_coordinates = None
self.charge_density = None
self.potential = None
self.electric_field = None
@classmethod
def init_from_config(cls, conf):
new_obj = cls()
new_obj.check_correctness_of_related_config_fields(conf)
new_obj.init_x_grid(conf)
new_obj.init_y_grid(conf)
new_obj.init_z_grid(conf)
new_obj.allocate_ongrid_values()
new_obj.fill_node_coordinates()
new_obj.set_boundary_conditions(conf)
SpatialMesh.mark_spatmesh_sec_as_used(conf)
return new_obj
@staticmethod
def mark_spatmesh_sec_as_used(conf):
# For now simply mark sections as 'used' instead of removing them.
conf["SpatialMesh"]["used"] = "True"
conf["BoundaryConditions"]["used"] = "True"
@classmethod
def init_from_h5(cls, h5group):
new_obj = cls()
new_obj.x_volume_size = h5group.attrs["x_volume_size"]
new_obj.y_volume_size = h5group.attrs["y_volume_size"]
new_obj.z_volume_size = h5group.attrs["z_volume_size"]
new_obj.x_cell_size = h5group.attrs["x_cell_size"]
new_obj.y_cell_size = h5group.attrs["y_cell_size"]
new_obj.z_cell_size = h5group.attrs["z_cell_size"]
new_obj.x_n_nodes = h5group.attrs["x_n_nodes"]
new_obj.y_n_nodes = h5group.attrs["y_n_nodes"]
new_obj.z_n_nodes = h5group.attrs["z_n_nodes"]
#
# todo: don't allocate. read into flat arrays. then reshape
new_obj.allocate_ongrid_values()
#
dim = new_obj.node_coordinates.size
tmp_x = np.empty(dim, dtype='f8')
tmp_y = np.empty_like(tmp_x)
tmp_z = np.empty_like(tmp_x)
#
tmp_x = h5group["./node_coordinates_x"]
tmp_y = h5group["./node_coordinates_y"]
tmp_z = h5group["./node_coordinates_z"]
for global_idx, (vx, vy, vz) in enumerate(zip(tmp_x, tmp_y, tmp_z)):
# todo: highly nonoptimal; make view or reshape?
i, j, k = new_obj.global_idx_to_node_ijk(global_idx)
new_obj.node_coordinates[i][j][k] = Vec3d(vx, vy, vz)
#
tmp_rho = h5group["./charge_density"]
tmp_phi = h5group["./potential"]
for global_idx, (rho, phi) in enumerate(zip(tmp_rho, tmp_phi)):
i, j, k = new_obj.global_idx_to_node_ijk(global_idx)
new_obj.charge_density[i][j][k] = rho
new_obj.potential[i][j][k] = phi
#
tmp_x = h5group["./electric_field_x"]
tmp_y = h5group["./electric_field_y"]
tmp_z = h5group["./electric_field_z"]
for global_idx, (vx, vy, vz) in enumerate(zip(tmp_x, tmp_y, tmp_z)):
i, j, k = new_obj.global_idx_to_node_ijk(global_idx)
new_obj.electric_field[i][j][k] = Vec3d(vx, vy, vz)
#
return new_obj
def allocate_ongrid_values(self):
nx = self.x_n_nodes
ny = self.y_n_nodes
nz = self.z_n_nodes
self.node_coordinates = np.empty((nx, ny, nz), dtype=object)
self.charge_density = np.zeros((nx, ny, nz), dtype='f8')
self.potential = np.zeros((nx, ny, nz), dtype='f8')
self.electric_field = np.full((nx, ny, nz), Vec3d.zero(), dtype=object)
def check_correctness_of_related_config_fields(self, conf):
self.grid_x_size_gt_zero(conf)
self.grid_x_step_gt_zero_le_grid_x_size(conf)
self.grid_y_size_gt_zero(conf)
self.grid_y_step_gt_zero_le_grid_y_size(conf)
self.grid_z_size_gt_zero(conf)
self.grid_z_step_gt_zero_le_grid_z_size(conf)
def init_x_grid(self, conf):
spat_mesh_conf = conf["SpatialMesh"]
self.x_volume_size = spat_mesh_conf.getfloat("grid_x_size")
self.x_n_nodes = ceil(spat_mesh_conf.getfloat("grid_x_size") /
spat_mesh_conf.getfloat("grid_x_step")) + 1
self.x_cell_size = self.x_volume_size / (self.x_n_nodes - 1)
if self.x_cell_size != spat_mesh_conf.getfloat("grid_x_step"):
print("X_step was shrinked to {:.3f} from {:.3f} "
"to fit round number of cells".format(
self.x_cell_size, spat_mesh_conf.getfloat("grid_x_step")))
def init_y_grid(self, conf):
spat_mesh_conf = conf["SpatialMesh"]
self.y_volume_size = spat_mesh_conf.getfloat("grid_y_size")
self.y_n_nodes = ceil(spat_mesh_conf.getfloat("grid_y_size") /
spat_mesh_conf.getfloat("grid_y_step")) + 1
self.y_cell_size = self.y_volume_size / (self.y_n_nodes - 1)
if self.y_cell_size != spat_mesh_conf.getfloat("grid_y_step"):
print("Y_step was shrinked to {:.3f} from {:.3f} "
"to fit round number of cells".format(
self.y_cell_size, spat_mesh_conf.getfloat("grid_y_step")))
def init_z_grid(self, conf):
spat_mesh_conf = conf["SpatialMesh"]
self.z_volume_size = spat_mesh_conf.getfloat("grid_z_size")
self.z_n_nodes = ceil(spat_mesh_conf.getfloat("grid_z_size") /
spat_mesh_conf.getfloat("grid_z_step")) + 1
self.z_cell_size = self.z_volume_size / (self.z_n_nodes - 1)
if self.z_cell_size != spat_mesh_conf.getfloat("grid_z_step"):
print("Z_step was shrinked to {:.3f} from {:.3f} "
"to fit round number of cells".format(
self.z_cell_size, spat_mesh_conf.getfloat("grid_z_step")))
def fill_node_coordinates(self):
for i in range(self.x_n_nodes):
for j in range(self.y_n_nodes):
for k in range(self.z_n_nodes):
self.node_coordinates[i][j][k] = Vec3d(
i * self.x_cell_size, j * self.y_cell_size, k * self.z_cell_size)
def clear_old_density_values(self):
self.charge_density.fill(0)
def set_boundary_conditions(self, conf):
phi_left = conf["BoundaryConditions"].getfloat("boundary_phi_left")
phi_right = conf["BoundaryConditions"].getfloat("boundary_phi_right")
phi_top = conf["BoundaryConditions"].getfloat("boundary_phi_top")
phi_bottom = conf["BoundaryConditions"].getfloat("boundary_phi_bottom")
phi_near = conf["BoundaryConditions"].getfloat("boundary_phi_near")
phi_far = conf["BoundaryConditions"].getfloat("boundary_phi_far")
#
nx = self.x_n_nodes
ny = self.y_n_nodes
nz = self.z_n_nodes
for i in range(nx):
for k in range(nz):
self.potential[i][0][k] = phi_bottom
self.potential[i][ny-1][k] = phi_top
for j in range(ny):
for k in range(nz):
self.potential[0][j][k] = phi_right
self.potential[nx-1][j][k] = phi_left
for i in range(nx):
for j in range(ny):
self.potential[i][j][0] = phi_near
self.potential[i][j][nz-1] = phi_far
def is_potential_equal_on_boundaries(self):
nx = self.x_n_nodes
ny = self.y_n_nodes
nz = self.z_n_nodes
return \
(self.potential[0][2][2] == self.potential[nx-1][2][2] == \
self.potential[2][0][2] == self.potential[2][ny-1][2] == \
self.potential[2][2][0] == self.potential[2][2][nz-1])
def print(self):
self.print_grid()
self.print_ongrid_values()
def print_grid(self):
print("Grid:")
print("Length: x = {:.3f}, y = {:.3f}, z = {:.3f}".format(
self.x_volume_size, self.y_volume_size, self.z_volume_size))
print("Cell size: x = {:.3f}, y = {:.3f}, z = {:.3f}".format(
self.x_cell_size, self.y_cell_size, self.z_cell_size))
print("Total nodes: x = {:d}, y = {:d}, z = {:d}".format(
self.x_n_nodes, self.y_n_nodes, self.z_n_nodes))
def print_ongrid_values(self):
nx = self.x_n_nodes
ny = self.y_n_nodes
nz = self.z_n_nodes
print("x_node y_node z_node | "
"charge_density | potential | electric_field(x,y,z)")
for i in range(nx):
for j in range(ny):
for k in range(nz):
"{:8d} {:8d} {:8d} | "
"{:14.3f} | {:14.3f} | "
"{:14.3f} {:14.3f} {:14.3f}".format(
i, j, k,
self.charge_density[i][j][k],
self.potential[i][j][k],
self.electric_field[i][j][k].x,
self.electric_field[i][j][k].y,
self.electric_field[i][j][k].z)
def write_to_file(self, h5file):
groupname = "/SpatialMesh"
h5group = h5file.create_group(groupname)
self.write_hdf5_attributes(h5group)
self.write_hdf5_ongrid_values(h5group)
def write_hdf5_attributes(self, h5group):
h5group.attrs.create("x_volume_size", self.x_volume_size)
h5group.attrs.create("y_volume_size", self.y_volume_size)
h5group.attrs.create("z_volume_size", self.z_volume_size)
h5group.attrs.create("x_cell_size", self.x_cell_size)
h5group.attrs.create("y_cell_size", self.y_cell_size)
h5group.attrs.create("z_cell_size", self.z_cell_size)
h5group.attrs.create("x_n_nodes", self.x_n_nodes)
h5group.attrs.create("y_n_nodes", self.y_n_nodes)
h5group.attrs.create("z_n_nodes", self.z_n_nodes)
def write_hdf5_ongrid_values(self, h5group):
# todo: without compound datasets
# there is this copying problem.
dim = self.node_coordinates.size
tmp_x = np.empty(dim, dtype='f8')
tmp_y = np.empty_like(tmp_x)
tmp_z = np.empty_like(tmp_x)
# todo: make view instead of copy
flat_node_coords = self.node_coordinates.ravel(order='C')
for i, v in enumerate(flat_node_coords):
tmp_x[i] = v.x
tmp_y[i] = v.y
tmp_z[i] = v.z
h5group.create_dataset("./node_coordinates_x", data=tmp_x)
h5group.create_dataset("./node_coordinates_y", data=tmp_y)
h5group.create_dataset("./node_coordinates_z", data=tmp_z)
# C (C-order): index along the first axis varies slowest
# in self.node_coordinates.flat above default order is C
flat_phi = self.potential.ravel(order='C')
h5group.create_dataset("./potential", data=flat_phi)
flat_rho = self.charge_density.ravel(order='C')
h5group.create_dataset("./charge_density", data=flat_rho)
#
flat_field = self.electric_field.ravel(order='C')
for i, v in enumerate(flat_field):
tmp_x[i] = v.x
tmp_y[i] = v.y
tmp_z[i] = v.z
h5group.create_dataset("./electric_field_x", data=tmp_x)
h5group.create_dataset("./electric_field_y", data=tmp_y)
h5group.create_dataset("./electric_field_z", data=tmp_z)
def grid_x_size_gt_zero(self, conf):
if conf["SpatialMesh"].getfloat("grid_x_size") <= 0:
raise ValueError("expect grid_x_size > 0")
def grid_x_step_gt_zero_le_grid_x_size(self, conf):
if (conf["SpatialMesh"].getfloat("grid_x_step") <= 0) or \
(conf["SpatialMesh"].getfloat("grid_x_step") > \
conf["SpatialMesh"].getfloat("grid_x_size")):
raise ValueError("Expect grid_x_step > 0 and grid_x_step <= grid_x_size")
def grid_y_size_gt_zero(self, conf):
if conf["SpatialMesh"].getfloat("grid_y_size") <= 0:
raise ValueError("Expect grid_y_size > 0")
def grid_y_step_gt_zero_le_grid_y_size(self, conf):
if (conf["SpatialMesh"].getfloat("grid_y_step") <= 0) or \
(conf["SpatialMesh"].getfloat("grid_y_step") > \
conf["SpatialMesh"].getfloat("grid_y_size")):
raise ValueError("Expect grid_y_step > 0 and grid_y_step <= grid_y_size")
def grid_z_size_gt_zero(self, conf):
if conf["SpatialMesh"].getfloat("grid_z_size") <= 0:
raise ValueError("Expect grid_z_size > 0")
def grid_z_step_gt_zero_le_grid_z_size(self, conf):
if (conf["SpatialMesh"].getfloat("grid_z_step") <= 0) or \
(conf["SpatialMesh"].getfloat("grid_z_step") > \
conf["SpatialMesh"].getfloat("grid_z_size")):
raise ValueError("Expect grid_z_step > 0 and grid_z_step <= grid_z_size")
def node_number_to_coordinate_x(self, i):
if i >= 0 and i < self.x_n_nodes:
return i * self.x_cell_size
else:
print("invalid node number i={:d} "
"at node_number_to_coordinate_x".format(i))
sys.exit(-1)
def node_number_to_coordinate_y(self, j):
if j >= 0 and j < self.y_n_nodes:
return j * self.y_cell_size
else:
print("invalid node number j={:d} "
"at node_number_to_coordinate_y".format(j))
sys.exit(-1)
def node_number_to_coordinate_z(self, k):
if k >= 0 and k < self.z_n_nodes:
return k * self.z_cell_size
else:
print("invalid node number k={:d} "
"at node_number_to_coordinate_z".format(k))
sys.exit(-1)
def global_idx_to_node_ijk(self, global_idx):
# In row-major order: (used to save on disk)
# global_index = i * nz * ny +
# j * nz +
# k
#
ny = self.y_n_nodes
nz = self.z_n_nodes
i = global_idx // (nz * ny)
j_and_k_part = global_idx % (nz * ny)
j = j_and_k_part // nz
k = j_and_k_part % nz
return (i, j, k)
| true
|
e2c51958600316315a9c2fa4990a545931bafe11
|
Python
|
AdrianJohnston/ShapeNetRender
|
/Emitter.py
|
UTF-8
| 1,967
| 2.890625
| 3
|
[] |
no_license
|
from __future__ import print_function
from mitsuba.core import *
class EmitterType:
SUN_SKY = 'sunsky'
SKY = 'sky'
SUN = 'sun'
DIRECTIONAL = 'directional'
CONSTANT = 'constant'
class Emitter:
def __init__(self, emitter_type, sample_weight=1.0, to_world=Transform()):
self.type = emitter_type
self.sample_weight = sample_weight
self.to_world = to_world
self.config = self.create_config()
def create_config(self):
config = {
'type' : self.type,
'toWorld': self.to_world,
'sampleWeight': self.sample_weight
}
return config
class ComplexEmitter(Emitter):
def __init__(self, emitter_type, to_world=Transform(), sample_weight=1.0, hour=15.0, min=0.0, sec=0.0,
albedo=Spectrum(), direction=Vector(0)):
self.hour = hour
self.min = min
self.sec = sec
self.albedo = albedo
self.direction = direction
Emitter.__init__(self, emitter_type, sample_weight=sample_weight, to_world=to_world)
#self.configure()
def create_config(self):
config = Emitter.create_config(self)
config['hour'] = self.hour
config['min'] = self.min
config['sec'] = self.sec
if self.type == EmitterType.SUN_SKY or self.type == EmitterType.SKY or self.type == EmitterType.SUN:
config['sunDirection'] = self.direction
else:
config['direction'] = self.direction
if self.type == EmitterType.SUN_SKY or self.type == EmitterType.SKY:
config['albedo'] = self.albedo
return config
def configure(self):
self.config = self.create_config()
if __name__ == "__main__":
# e = Emitter(EmitterType.DIRECTIONAL)
# print (e.config)
sunsky = ComplexEmitter(EmitterType.SUN_SKY, to_world=Transform.translate(Vector(10,0,0)),albedo=Spectrum(1.0), hour=12.0)
print (sunsky.config)
| true
|
e1ac1670fc2847f09106f53b025921bdfc9f96a9
|
Python
|
e185725/atcoder_practice
|
/180/c.py
|
UTF-8
| 292
| 3.0625
| 3
|
[] |
no_license
|
import math
N = int(input())
d = int(math.sqrt(N)) + 1
ans = []
ans_2 = []
#print(d)
for i in range(1,d):
if (N % i == 0):
ans.append(i)
if(i != int(N/i)):
ans_2.append(int(N/i))
for ans in ans:
print(ans)
for ans_2 in reversed(ans_2):
print(ans_2)
| true
|
8ac8b294fc55b931978a8e0a1f901a0f89f03940
|
Python
|
heldersepu/hs-scripts
|
/Python/pyGTK.py
|
UTF-8
| 795
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
## pyGTK template
import pygtk
pygtk.require('2.0')
import gtk
class MyProgram:
def __init__(self):
# create a new window
app_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
app_window.set_size_request(500, 350)
app_window.set_border_width(10)
app_window.set_title("MyProgram title")
app_window.connect("delete_event", lambda w,e: gtk.main_quit())
bbox = gtk.HButtonBox()
button_go = gtk.Button(stock='gtk-ok')
#button_go.connect('clicked', self.button_clicked)
bbox.add(button_go)
app_window.add(bbox)
app_window.show_all()
return
def main():
gtk.main()
return 0
if __name__ == "__main__":
MyProgram()
main()
| true
|
5105a26484b10537574bb5734e1e91580683ab47
|
Python
|
Shubhamsm/youtube-auto-comments
|
/youtube.py
|
UTF-8
| 1,662
| 2.90625
| 3
|
[] |
no_license
|
import pyautogui
import time
from bs4 import BeautifulSoup
import requests
Keywords=["Neural Networks","data Science"] # keywords it search to get list of urls
msg="hello" # message to comment Can
def Links_list(keywords):
'''
give a list of urls which bot will comment on
'''
urls_list=[] #urls list
page = requests.get("https://www.youtube.com/results?search_query=data+science+simplilearn")
soup = BeautifulSoup(page.content,'lxml', from_encoding="latin-1")
# find all Video urls in YT search page
vids = soup.findAll('a',attrs={'class':'yt-uix-tile-link'})
for x in vids:
y=x['href']
if y[0]=="/":
urls_list.append(y.encode('latin-1').decode('utf-8'))
return urls_list
def linkTo(*args):
'''
Iterating the urls provided by args and comment down
'''
count=0
time.sleep(5)
pyautogui.hotkey('ctrl', 'l')
for i in args:
link="https://www.youtube.com"+i
time.sleep(5)
pyautogui.write(link) ; time.sleep(3)
pyautogui.press('enter')
time.sleep(8)
pyautogui.scroll(-100)
time.sleep(10)
pyautogui.hotkey('ctrl','f')
time.sleep(5)
pyautogui.write("comments")
time.sleep(10)
pyautogui.click(x=324, y=478) # find yourself comment box
pyautogui.write(msg) ; time.sleep(3)
pyautogui.hotkey('ctrl','enter')
count+=1
if count==3:
break
time.sleep(5)
linkTo(Links_list("hello"))
| true
|
cf4b4566357aa3d036ceec834ee4438b45825c81
|
Python
|
Brain-0ut/Vielit_Python_HW
|
/Abto_CV_AI_2022_Camp_var_B/test2.py
|
UTF-8
| 1,324
| 3.375
| 3
|
[] |
no_license
|
from typing import List
import random as r
def task1(x: List[int]) -> bool:
###
"""Write your code here"""
###
lenght = len(x)
if lenght < 2:
return False
elif lenght == 2:
if x[0] == x[1]:
return True
else:
return False
x.sort()
print(x)
average = sum(x) / lenght
start = 0
end = lenght - 1
mid = 0
while start <= end:
mid = (start + end) // 2
if x[mid] <= average < x[mid + 1]:
print(mid)
break
elif average < x[mid]:
end = mid - 1
else:
start = mid + 1
mid += 1
first = x[:mid]
second = x[mid:]
print(first, '|', second)
sum1 = first[0] + first[-1] + second[0] + second[-1]
mas1 = [first.pop(0), first.pop(-1), second.pop(0), second.pop(-1)]
sum2 = (sum(x) - sum1)
mas2 = first + second
aver1 = sum1 / 4
aver2 = sum2 / (lenght - 4)
print(mas1, " ", mas2)
print(sum1, " ", sum2)
print(aver1, " ", aver2)
print(average)
if aver1 == aver2:
return True
return False
x = [r.randrange(0, 10000) for x in range(r.randrange(1, 30))]
#x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
print(x)
expected_result = True
result = task1(x)
print(result)
#assert result == expected_result
| true
|
bbfde0d9d4b496aaa72d20ff643f6e8fd82fb218
|
Python
|
Debdeep1998/Backtracking_rush
|
/gray_code.py
|
UTF-8
| 303
| 2.890625
| 3
|
[] |
no_license
|
n=int(input())
a=list()
for i in range(n):
a.append('0')
print (a)
def gray(i):
if(i==n-1):
print("".join(a))
a[i]='1' if(a[i]=='0')else('0')
print("".join(a))
return
c=1
while (c<=2):
gray(i+1)
if(c!=2):
a[i]='1' if(a[i]=='0')else('0')
c+=1
gray(0)
| true
|